From 5834c9337a62090155e55361590eaa0c614dc89a Mon Sep 17 00:00:00 2001 From: Pingan2017 Date: Tue, 8 Aug 2017 20:20:37 +0800 Subject: [PATCH 001/794] fix the typo of wtih --- pkg/quota/resources.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/quota/resources.go b/pkg/quota/resources.go index aaf56b72a60..8c3d3d30231 100644 --- a/pkg/quota/resources.go +++ b/pkg/quota/resources.go @@ -225,7 +225,7 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL for _, evaluator := range evaluators { potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) } - // NOTE: the intersection just removes duplicates since the evaluator match intersects wtih hard + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard matchedResources := Intersection(hardResources, potentialResources) // sum the observed usage from each evaluator From 5d94c2014a91239f33ec19fd855f899558ca50f5 Mon Sep 17 00:00:00 2001 From: Alex Rowley Date: Fri, 18 Aug 2017 19:08:41 +0100 Subject: [PATCH 002/794] Fix typo in comment --- pkg/cloudprovider/providers/aws/aws.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 217fbcad45f..389c8d14391 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -413,7 +413,7 @@ type CloudConfig struct { // KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources KubernetesClusterTag string - // KubernetesClusterTag is the cluster id we'll use to identify our cluster resources + // KubernetesClusterID is the cluster id we'll use to identify our cluster resources KubernetesClusterID string //The aws provider creates an inbound rule per load balancer on the node security From c578542ad7fac65a1b6c8c020dd1b5a99b4fe76c Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 31 Aug 2017 12:13:59 +0200 Subject: [PATCH 003/794] git: Use VolumeHost.GetExec() to execute stuff in volume plugins This prepares volume plugins to run things in containers instead of running them on the host. --- pkg/volume/git_repo/BUILD | 5 +- pkg/volume/git_repo/git_repo.go | 22 ++-- pkg/volume/git_repo/git_repo_test.go | 182 +++++++++------------------ 3 files changed, 73 insertions(+), 136 deletions(-) diff --git a/pkg/volume/git_repo/BUILD b/pkg/volume/git_repo/BUILD index be57c22489e..d73f11b3fe1 100644 --- a/pkg/volume/git_repo/BUILD +++ b/pkg/volume/git_repo/BUILD @@ -13,12 +13,12 @@ go_library( "git_repo.go", ], deps = [ + "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -27,14 +27,13 @@ go_test( srcs = ["git_repo_test.go"], library = ":go_default_library", deps = [ + "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", "//pkg/volume/testing:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/exec/testing:go_default_library", ], ) diff --git a/pkg/volume/git_repo/git_repo.go b/pkg/volume/git_repo/git_repo.go index 1948971e94e..779bdc5e93c 100644 --- a/pkg/volume/git_repo/git_repo.go +++ b/pkg/volume/git_repo/git_repo.go @@ -24,10 +24,10 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/utils/exec" ) // This is the primary entrypoint for volume plugins. @@ -100,7 +100,8 @@ func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts vol source: spec.Volume.GitRepo.Repository, revision: spec.Volume.GitRepo.Revision, target: spec.Volume.GitRepo.Directory, - exec: exec.New(), + mounter: plugin.host.GetMounter(plugin.GetPluginName()), + exec: plugin.host.GetExec(plugin.GetPluginName()), opts: opts, }, nil } @@ -149,7 +150,8 @@ type gitRepoVolumeMounter struct { source string revision string target string - exec exec.Interface + mounter mount.Interface + exec mount.Exec opts volume.VolumeOptions } @@ -195,7 +197,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if len(b.target) != 0 { args = append(args, b.target) } - if output, err := b.execCommand("git", args, dir); err != nil { + if output, err := b.execGit(args, dir); err != nil { return fmt.Errorf("failed to exec 'git %s': %s: %v", strings.Join(args, " "), output, err) } @@ -225,10 +227,10 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("unexpected directory contents: %v", files) } - if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil { + if output, err := b.execGit([]string{"checkout", b.revision}, subdir); err != nil { return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err) } - if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil { + if output, err := b.execGit([]string{"reset", "--hard"}, subdir); err != nil { return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } @@ -242,10 +244,10 @@ func (b *gitRepoVolumeMounter) getMetaDir() string { return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName) } -func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) { - cmd := b.exec.Command(command, args...) - cmd.SetDir(dir) - return cmd.CombinedOutput() +func (b *gitRepoVolumeMounter) execGit(args []string, dir string) ([]byte, error) { + // run git -C + fullArgs := append([]string{"-C", dir}, args...) + return b.exec.Run("git", fullArgs...) } // gitRepoVolumeUnmounter cleans git repo volumes. diff --git a/pkg/volume/git_repo/git_repo_test.go b/pkg/volume/git_repo/git_repo_test.go index 1bee3ad2185..4a4d857da72 100644 --- a/pkg/volume/git_repo/git_repo_test.go +++ b/pkg/volume/git_repo/git_repo_test.go @@ -28,11 +28,16 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/empty_dir" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/utils/exec" - fakeexec "k8s.io/utils/exec/testing" +) + +const ( + gitUrl = "https://github.com/kubernetes/kubernetes.git" + revision = "2a30ce65c5ab586b98916d83385c5983edd353a1" + gitRepositoryName = "kubernetes" ) func newTestHost(t *testing.T) (string, volume.VolumeHost) { @@ -62,23 +67,18 @@ func TestCanSupport(t *testing.T) { } // Expected command -type expectedCommand struct { - // The git command - cmd []string - // The dir of git command is executed - dir string +type expectedCommand []string + +type testScenario struct { + name string + vol *v1.Volume + repositoryDir string + expecteds []expectedCommand + isExpectedFailure bool } func TestPlugin(t *testing.T) { - gitUrl := "https://github.com/kubernetes/kubernetes.git" - revision := "2a30ce65c5ab586b98916d83385c5983edd353a1" - - scenarios := []struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool - }{ + scenarios := []testScenario{ { name: "target-dir", vol: &v1.Volume{ @@ -91,19 +91,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "target_dir", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "target_dir"}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "/target_dir", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "/target_dir", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"}, + []string{"git", "-C", "volume-dir/target_dir", "checkout", revision}, + []string{"git", "-C", "volume-dir/target_dir", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -118,11 +110,9 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "target_dir", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "target_dir"}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "target_dir"}, }, isExpectedFailure: false, }, @@ -136,11 +126,9 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "kubernetes", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl}, }, isExpectedFailure: false, }, @@ -156,19 +144,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "kubernetes", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "/kubernetes", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "/kubernetes", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl}, + []string{"git", "-C", "volume-dir/kubernetes", "checkout", revision}, + []string{"git", "-C", "volume-dir/kubernetes", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -184,19 +164,11 @@ func TestPlugin(t *testing.T) { }, }, }, + repositoryDir: "", expecteds: []expectedCommand{ - { - cmd: []string{"git", "clone", gitUrl, "."}, - dir: "", - }, - { - cmd: []string{"git", "checkout", revision}, - dir: "", - }, - { - cmd: []string{"git", "reset", "--hard"}, - dir: "", - }, + []string{"git", "-C", "volume-dir", "clone", gitUrl, "."}, + []string{"git", "-C", "volume-dir", "checkout", revision}, + []string{"git", "-C", "volume-dir", "reset", "--hard"}, }, isExpectedFailure: false, }, @@ -214,12 +186,7 @@ func TestPlugin(t *testing.T) { } -func doTestPlugin(scenario struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool -}, t *testing.T) []error { +func doTestPlugin(scenario testScenario, t *testing.T) []error { allErrs := []error{} plugMgr := volume.VolumePluginMgr{} @@ -311,73 +278,42 @@ func doTestPlugin(scenario struct { return allErrs } -func doTestSetUp(scenario struct { - name string - vol *v1.Volume - expecteds []expectedCommand - isExpectedFailure bool -}, mounter volume.Mounter) []error { +func doTestSetUp(scenario testScenario, mounter volume.Mounter) []error { expecteds := scenario.expecteds allErrs := []error{} - // Construct combined outputs from expected commands - var fakeOutputs []fakeexec.FakeCombinedOutputAction - var fcmd fakeexec.FakeCmd - for _, expected := range expecteds { - if expected.cmd[1] == "clone" { - fakeOutputs = append(fakeOutputs, func() ([]byte, error) { - // git clone, it creates new dir/files - os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750) - return []byte{}, nil - }) - } else { - // git checkout || git reset, they create nothing - fakeOutputs = append(fakeOutputs, func() ([]byte, error) { - return []byte{}, nil - }) + var commandLog []expectedCommand + execCallback := func(cmd string, args ...string) ([]byte, error) { + if len(args) < 2 { + return nil, fmt.Errorf("expected at least 2 arguments, got %q", args) } + if args[0] != "-C" { + return nil, fmt.Errorf("expected the first argument to be \"-C\", got %q", args[0]) + } + // command is 'git -C + gitDir := args[1] + gitCommand := args[2] + if gitCommand == "clone" { + // Clone creates a directory + if scenario.repositoryDir != "" { + os.MkdirAll(path.Join(gitDir, scenario.repositoryDir), 0750) + } + } + // add the command to log with de-randomized gitDir + args[1] = strings.Replace(gitDir, mounter.GetPath(), "volume-dir", 1) + cmdline := append([]string{cmd}, args...) + commandLog = append(commandLog, cmdline) + return []byte{}, nil } - fcmd = fakeexec.FakeCmd{ - CombinedOutputScript: fakeOutputs, - } - - // Construct fake exec outputs from fcmd - var fakeAction []fakeexec.FakeCommandAction - for i := 0; i < len(expecteds); i++ { - fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fcmd, cmd, args...) - }) - - } - fake := fakeexec.FakeExec{ - CommandScript: fakeAction, - } - g := mounter.(*gitRepoVolumeMounter) - g.exec = &fake + g.mounter = &mount.FakeMounter{} + g.exec = mount.NewFakeExec(execCallback) g.SetUp(nil) - if fake.CommandCalls != len(expecteds) { + if !reflect.DeepEqual(expecteds, commandLog) { allErrs = append(allErrs, - fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls)) - } - var expectedCmds [][]string - for _, expected := range expecteds { - expectedCmds = append(expectedCmds, expected.cmd) - } - if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) { - allErrs = append(allErrs, - fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds)) - } - - var expectedPaths []string - for _, expected := range expecteds { - expectedPaths = append(expectedPaths, g.GetPath()+expected.dir) - } - if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) { - allErrs = append(allErrs, - fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths)) + fmt.Errorf("unexpected commands: %v, expected: %v", commandLog, expecteds)) } return allErrs From b4afb09ab91573aff59c9447064cbd3f2062eae5 Mon Sep 17 00:00:00 2001 From: Ma Shimiao Date: Tue, 12 Sep 2017 11:17:50 +0800 Subject: [PATCH 004/794] small tfix in cmd factory comment Signed-off-by: Ma Shimiao --- pkg/kubectl/cmd/util/factory.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 992916dc5e4..a3f0c45bb39 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -69,10 +69,10 @@ var ( // Factory provides abstractions that allow the Kubectl command to be extended across multiple types // of resources and different API sets. -// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations +// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations // they need to provide low level pieces of *certain* functions so that when the factory calls back into itself -// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override -// we split the factory into rings, where each ring can depend on methods an earlier ring, but cannot depend +// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override +// we split the factory into rings, where each ring can depend on methods in an earlier ring, but cannot depend // upon peer methods in its own ring. // TODO: make the functions interfaces // TODO: pass the various interfaces on the factory directly into the command constructors (so the @@ -198,7 +198,7 @@ type ClientAccessFactory interface { PrintObjectSpecificMessage(obj runtime.Object, out io.Writer) } -// ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. +// ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. // Generally they provide object typing and functions that build requests based on the negotiated clients. type ObjectMappingFactory interface { // Returns interfaces for dealing with arbitrary runtime.Objects. @@ -240,7 +240,7 @@ type ObjectMappingFactory interface { OpenAPISchema() (openapi.Resources, error) } -// BuilderFactory holds the second level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods. +// BuilderFactory holds the third level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods. // Generally they depend upon client mapper functions type BuilderFactory interface { // PrinterForCommand returns the default printer for the command. It requires that certain options From 2aeb234c80a5b1fb9f2a0afe2a1e6bf8b4fe5bef Mon Sep 17 00:00:00 2001 From: wackxu Date: Mon, 18 Sep 2017 15:13:24 +0800 Subject: [PATCH 005/794] fix the bad code comment --- pkg/kubelet/eviction/eviction_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index d5fc2a49578..e556435a4e7 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -59,7 +59,7 @@ type managerImpl struct { killPodFunc KillPodFunc // the interface that knows how to do image gc imageGC ImageGC - // the interface that knows how to do image gc + // the interface that knows how to do container gc containerGC ContainerGC // protects access to internal state sync.RWMutex From 1c0debdd0ef5694f88c990db273d85471047aca6 Mon Sep 17 00:00:00 2001 From: Damon Wang Date: Mon, 16 Oct 2017 08:19:23 +0800 Subject: [PATCH 006/794] remove unused comment this file has in k8s.io/api. --- staging/src/k8s.io/api/core/v1/register.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/staging/src/k8s.io/api/core/v1/register.go b/staging/src/k8s.io/api/core/v1/register.go index 4916ee3c23c..ad52cf9b7d0 100644 --- a/staging/src/k8s.io/api/core/v1/register.go +++ b/staging/src/k8s.io/api/core/v1/register.go @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -//TODO: this file is going to be moved to k8s.io/api - package v1 import ( From 75cc26fb65ad92a604160f5cef22e852c8024803 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 19 Oct 2017 14:29:58 +0200 Subject: [PATCH 007/794] Allow Ceph server some time to start Ceph server needs to create our "foo" volume on startup. It keeps the image small, however it makes the server container start slow. Add sleep before the server is usable. Without this PR, all pods that use Ceph fail to start for couple of seconds with cryptic "image foo not found" error and it clutters logs and pod logs and makes it harder to spot real errors. --- test/e2e/framework/volume_util.go | 11 +++++++++++ test/e2e/storage/volumes.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 84214a15ab4..08d392b3a2b 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -72,6 +72,9 @@ const ( MiB int64 = 1024 * KiB GiB int64 = 1024 * MiB TiB int64 = 1024 * GiB + + // Waiting period for volume server (Ceph, ...) to initialize itself. + VolumeServerPodStartupSleep = 20 * time.Second ) // Configuration of one tests. The test consist of: @@ -196,6 +199,14 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo }, } pod, ip = CreateStorageServer(cs, config) + + // Ceph server container needs some time to start. Tests continue working if + // this sleep is removed, however kubelet logs (and kubectl describe + // ) would be cluttered with error messages about non-existing + // image. + Logf("sleeping a bit to give ceph server time to initialize") + time.Sleep(VolumeServerPodStartupSleep) + return config, pod, ip } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index f3bfc0f2be4..c8b505fb360 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -288,7 +288,7 @@ var _ = SIGDescribe("Volumes", func() { }() _, serverIP := framework.CreateStorageServer(cs, config) By("sleeping a bit to give ceph server time to initialize") - time.Sleep(20 * time.Second) + time.Sleep(framework.VolumeServerPodStartupSleep) // create ceph secret secret := &v1.Secret{ From 7d71129ff0f0d10873a6adcadfd92ddb876d130c Mon Sep 17 00:00:00 2001 From: foxyriver Date: Fri, 20 Oct 2017 11:40:52 +0800 Subject: [PATCH 008/794] delete archive --- test/e2e_node/runner/remote/run_remote.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e_node/runner/remote/run_remote.go b/test/e2e_node/runner/remote/run_remote.go index 917c9982caf..36a7dbedd6b 100644 --- a/test/e2e_node/runner/remote/run_remote.go +++ b/test/e2e_node/runner/remote/run_remote.go @@ -354,6 +354,7 @@ func main() { if !exitOk { fmt.Printf("Failure: %d errors encountered.\n", errCount) callGubernator(*gubernator) + arc.deleteArchive() os.Exit(1) } callGubernator(*gubernator) From 11b1c373f1627767e873aa647f4395386959372d Mon Sep 17 00:00:00 2001 From: Hardik Dodiya Date: Mon, 23 Oct 2017 13:29:01 +0530 Subject: [PATCH 009/794] Bugfix: Update AddNodeaHandler error logs --- pkg/controller/node/util/controller_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/node/util/controller_utils.go b/pkg/controller/node/util/controller_utils.go index 9fedc3bbe8f..560a9a55e5e 100644 --- a/pkg/controller/node/util/controller_utils.go +++ b/pkg/controller/node/util/controller_utils.go @@ -257,7 +257,7 @@ func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) { return func(originalObj interface{}) { node := originalObj.(*v1.Node).DeepCopy() if err := f(node); err != nil { - utilruntime.HandleError(fmt.Errorf("Error while processing Node Delete: %v", err)) + utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %v", err)) } } } From e067817a80ded886153538863778b8266947d849 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 30 Oct 2017 12:07:15 -0700 Subject: [PATCH 010/794] COS: Keep the docker network checkpoint This is necessary for enabling the live-restore feature. --- cluster/gce/gci/configure-helper.sh | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 12f7b6ec770..7631504901c 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -838,23 +838,11 @@ function assemble-docker-flags { echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker - if [[ "${use_net_plugin}" == "true" ]]; then - # If using a network plugin, extend the docker configuration to always remove - # the network checkpoint to avoid corrupt checkpoints. - # (https://github.com/docker/docker/issues/18283). - echo "Extend the docker.service configuration to remove the network checkpiont" - mkdir -p /etc/systemd/system/docker.service.d - cat </etc/systemd/system/docker.service.d/01network.conf -[Service] -ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network" -EOF - fi - # Ensure TasksMax is sufficient for docker. # (https://github.com/kubernetes/kubernetes/issues/51977) echo "Extend the docker.service configuration to set a higher pids limit" mkdir -p /etc/systemd/system/docker.service.d - cat </etc/systemd/system/docker.service.d/02tasksmax.conf + cat </etc/systemd/system/docker.service.d/01tasksmax.conf [Service] TasksMax=infinity EOF From 655d7341387da382f768e5ce442388a731af8c76 Mon Sep 17 00:00:00 2001 From: YuxiJin-tobeyjin Date: Wed, 1 Nov 2017 10:35:50 +0800 Subject: [PATCH 011/794] Add complementary unittest for kubectl logs --- pkg/kubectl/cmd/logs_test.go | 61 ++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/pkg/kubectl/cmd/logs_test.go b/pkg/kubectl/cmd/logs_test.go index 65c06bcbdc8..b8005755c00 100644 --- a/pkg/kubectl/cmd/logs_test.go +++ b/pkg/kubectl/cmd/logs_test.go @@ -111,6 +111,11 @@ func TestValidateLogFlags(t *testing.T) { flags: map[string]string{"since": "1h", "since-time": "2006-01-02T15:04:05Z"}, expected: "at most one of `sinceTime` or `sinceSeconds` may be specified", }, + { + name: "negative since-time", + flags: map[string]string{"since": "-1s"}, + expected: "must be greater than 0", + }, { name: "negative limit-bytes", flags: map[string]string{"limit-bytes": "-100"}, @@ -142,3 +147,59 @@ func TestValidateLogFlags(t *testing.T) { } } } + +func TestLogComplete(t *testing.T) { + f, _, _, _ := cmdtesting.NewAPIFactory() + + tests := []struct { + name string + args []string + flags map[string]string + expected string + }{ + { + name: "No args case", + flags: map[string]string{"selector": ""}, + expected: "'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.\nPOD or TYPE/NAME is a required argument for the logs command", + }, + { + name: "One args case", + args: []string{"foo"}, + flags: map[string]string{"selector": "foo"}, + expected: "only a selector (-l) or a POD name is allowed", + }, + { + name: "Two args case", + args: []string{"foo", "foo1"}, + flags: map[string]string{"container": "foo1"}, + expected: "only one of -c or an inline [CONTAINER] arg is allowed", + }, + { + name: "More than two args case", + args: []string{"foo", "foo1", "foo2"}, + flags: map[string]string{"tail": "1"}, + expected: "'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.\nPOD or TYPE/NAME is a required argument for the logs command", + }, + { + name: "follow and selecter conflict", + flags: map[string]string{"selector": "foo", "follow": "true"}, + expected: "only one of follow (-f) or selector (-l) is allowed", + }, + } + for _, test := range tests { + cmd := NewCmdLogs(f, bytes.NewBuffer([]byte{})) + var err error + out := "" + for flag, value := range test.flags { + cmd.Flags().Set(flag, value) + } + // checkErr breaks tests in case of errors, plus we just + // need to check errors returned by the command validation + o := &LogsOptions{} + err = o.Complete(f, os.Stdout, cmd, test.args) + out = err.Error() + if !strings.Contains(out, test.expected) { + t.Errorf("%s: expected to find:\n\t%s\nfound:\n\t%s\n", test.name, test.expected, out) + } + } +} From cb73ab2b0717666f52753a8e76e1bf9911dbb9ed Mon Sep 17 00:00:00 2001 From: guangxuli Date: Wed, 26 Jul 2017 11:08:08 +0800 Subject: [PATCH 012/794] The printing level for node updated failed info should be used WARNING type just use Warning instead of Warningf --- .../volume/attachdetach/cache/actual_state_of_world.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 9bb6320c662..4d4a7523d8d 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -488,7 +488,7 @@ func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName asw.Lock() defer asw.Unlock() if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil { - glog.Errorf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) + glog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) } } From 538b0a6eacad580f66ad9cf9ebaa0de229914b91 Mon Sep 17 00:00:00 2001 From: Ghe Rivero Date: Thu, 2 Nov 2017 09:58:37 +0100 Subject: [PATCH 013/794] Fix Makefile doc for quick-release In the Makefile doc for quick-release target, it says: KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'true' to do so. If should be "Set to false" --- build/root/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/root/Makefile b/build/root/Makefile index 76c8af23d92..ba50e2983e7 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -389,7 +389,7 @@ define RELEASE_SKIP_TESTS_HELP_INFO # # Args: # KUBE_RELEASE_RUN_TESTS: Whether to run tests. Set to 'y' to run tests anyways. -# KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'true' to do so. +# KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'false' to do so. # # Example: # make release-skip-tests From 13a355c837eb65f3184750385ef5a2918b59f48b Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 31 Jul 2017 13:08:42 +0800 Subject: [PATCH 014/794] refactor method to pkg/util/node --- pkg/controller/cloud/node_controller.go | 2 +- .../volume/attachdetach/statusupdater/BUILD | 2 +- .../statusupdater/node_status_updater.go | 46 ++----------------- pkg/kubelet/kubelet_node_status.go | 8 ++-- pkg/util/node/node.go | 28 +++++++---- 5 files changed, 27 insertions(+), 59 deletions(-) diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 7f884779947..4b2eb255141 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -183,7 +183,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud if !nodeAddressesChangeDetected(node.Status.Addresses, newNode.Status.Addresses) { return } - _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode) + _, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode) if err != nil { glog.Errorf("Error patching node with cloud ip addresses = [%v]", err) } diff --git a/pkg/controller/volume/attachdetach/statusupdater/BUILD b/pkg/controller/volume/attachdetach/statusupdater/BUILD index e60d31be92e..2ef64b0f6e9 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/BUILD +++ b/pkg/controller/volume/attachdetach/statusupdater/BUILD @@ -14,11 +14,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", + "//pkg/util/node:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", ], diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index ab6dd05ecc4..b5cbb224464 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -19,18 +19,15 @@ limitations under the License. package statusupdater import ( - "encoding/json" - "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" + nodeutil "k8s.io/kubernetes/pkg/util/node" ) // NodeStatusUpdater defines a set of operations for updating the @@ -100,47 +97,12 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error { node := nodeObj.DeepCopy() - - // TODO: Change to pkg/util/node.UpdateNodeStatus. - oldData, err := json.Marshal(node) - if err != nil { - return fmt.Errorf( - "failed to Marshal oldData for node %q. %v", - nodeName, - err) - } - node.Status.VolumesAttached = attachedVolumes - - newData, err := json.Marshal(node) + _, patchBytes, err := nodeutil.PatchNodeStatus(nsu.kubeClient.CoreV1(), nodeName, nodeObj, node) if err != nil { - return fmt.Errorf( - "failed to Marshal newData for node %q. %v", - nodeName, - err) + return err } - patchBytes, err := - strategicpatch.CreateTwoWayMergePatch(oldData, newData, node) - if err != nil { - return fmt.Errorf( - "failed to CreateTwoWayMergePatch for node %q. %v", - nodeName, - err) - } - - _, err = nsu.kubeClient.CoreV1().Nodes().PatchStatus(string(nodeName), patchBytes) - if err != nil { - return fmt.Errorf( - "failed to kubeClient.CoreV1().Nodes().Patch for node %q. %v", - nodeName, - err) - } - glog.V(4).Infof( - "Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v", - nodeName, - string(patchBytes), - node.Status.VolumesAttached) - + glog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes) return nil } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index e4ba0d8555d..7122b08fa7a 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -132,8 +132,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode) requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate if requiresUpdate { - if _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), - originalNode, existingNode); err != nil { + if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil { glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) return false } @@ -142,8 +141,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return true } - glog.Errorf( - "Previously node %q had externalID %q; now it is %q; will delete and recreate.", + glog.Errorf("Previously node %q had externalID %q; now it is %q; will delete and recreate.", kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID, ) if err := kl.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil { @@ -415,7 +413,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { kl.setNodeStatus(node) // Patch the current status on the API server - updatedNode, err := nodeutil.PatchNodeStatus(kl.heartbeatClient, types.NodeName(kl.nodeName), originalNode, node) + updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient, types.NodeName(kl.nodeName), originalNode, node) if err != nil { return err } diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 72790e4c8e8..c3b0500a866 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -151,10 +151,23 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N } // PatchNodeStatus patches node status. -func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) { +func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { + patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) + if err != nil { + return nil, nil, err + } + + updatedNode, err := c.Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") + if err != nil { + return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) + } + return updatedNode, patchBytes, nil +} + +func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { oldData, err := json.Marshal(oldNode) if err != nil { - return nil, fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) + return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err) } // Reset spec to make sure only patch for Status or ObjectMeta is generated. @@ -164,17 +177,12 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode newNode.Spec = oldNode.Spec newData, err := json.Marshal(newNode) if err != nil { - return nil, fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err) + return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) if err != nil { - return nil, fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err) } - - updatedNode, err := c.Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") - if err != nil { - return nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) - } - return updatedNode, nil + return patchBytes, nil } From 1a0ff90b1273579ff53da5bc0ae177df9872740a Mon Sep 17 00:00:00 2001 From: huangjiuyuan Date: Mon, 6 Nov 2017 15:40:23 +0800 Subject: [PATCH 015/794] Fix a typo in NewManager function --- pkg/kubelet/prober/results/results_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/prober/results/results_manager.go b/pkg/kubelet/prober/results/results_manager.go index 03261e353d2..2b01be7ba2f 100644 --- a/pkg/kubelet/prober/results/results_manager.go +++ b/pkg/kubelet/prober/results/results_manager.go @@ -77,7 +77,7 @@ type manager struct { var _ Manager = &manager{} -// NewManager creates ane returns an empty results manager. +// NewManager creates and returns an empty results manager. func NewManager() Manager { return &manager{ cache: make(map[kubecontainer.ContainerID]Result), From 5005a541d6b5b7d950ed621d9c9fd247abb9b4af Mon Sep 17 00:00:00 2001 From: Saksham Sharma Date: Tue, 7 Nov 2017 04:24:53 +0530 Subject: [PATCH 016/794] Use []byte in place of string in envelope.Service. --- .../options/encryptionconfig/config_test.go | 10 +++--- .../value/encrypt/envelope/envelope.go | 33 ++++++++++++------- .../value/encrypt/envelope/envelope_test.go | 10 +++--- 3 files changed, 31 insertions(+), 22 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go index 8233bb7eefd..ac0b3d75151 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go @@ -226,18 +226,18 @@ type testEnvelopeService struct { disabled bool } -func (t *testEnvelopeService) Decrypt(data string) ([]byte, error) { +func (t *testEnvelopeService) Decrypt(data []byte) ([]byte, error) { if t.disabled { return nil, fmt.Errorf("Envelope service was disabled") } - return base64.StdEncoding.DecodeString(data) + return base64.StdEncoding.DecodeString(string(data)) } -func (t *testEnvelopeService) Encrypt(data []byte) (string, error) { +func (t *testEnvelopeService) Encrypt(data []byte) ([]byte, error) { if t.disabled { - return "", fmt.Errorf("Envelope service was disabled") + return nil, fmt.Errorf("Envelope service was disabled") } - return base64.StdEncoding.EncodeToString(data), nil + return []byte(base64.StdEncoding.EncodeToString(data)), nil } func (t *testEnvelopeService) SetDisabledStatus(status bool) { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go index 9782e44b66d..90f1dca42b2 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go @@ -21,6 +21,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/rand" + "encoding/base64" "encoding/binary" "fmt" @@ -34,10 +35,10 @@ const defaultCacheSize = 1000 // Service allows encrypting and decrypting data using an external Key Management Service. type Service interface { - // Decrypt a given data string to obtain the original byte data. - Decrypt(data string) ([]byte, error) - // Encrypt bytes to a string ciphertext. - Encrypt(data []byte) (string, error) + // Decrypt a given bytearray to obtain the original data as bytes. + Decrypt(data []byte) ([]byte, error) + // Encrypt bytes to a ciphertext. + Encrypt(data []byte) ([]byte, error) } type envelopeTransformer struct { @@ -78,15 +79,12 @@ func (t *envelopeTransformer) TransformFromStorage(data []byte, context value.Co if keyLen+2 > len(data) { return nil, false, fmt.Errorf("invalid data encountered by genvelope transformer, length longer than available bytes: %q", data) } - encKey := string(data[2 : keyLen+2]) + encKey := data[2 : keyLen+2] encData := data[2+keyLen:] - var transformer value.Transformer // Look up the decrypted DEK from cache or Envelope. - _transformer, found := t.transformers.Get(encKey) - if found { - transformer = _transformer.(value.Transformer) - } else { + transformer := t.getTransformer(encKey) + if transformer == nil { key, err := t.envelopeService.Decrypt(encKey) if err != nil { return nil, false, fmt.Errorf("error while decrypting key: %q", err) @@ -136,16 +134,27 @@ func (t *envelopeTransformer) TransformToStorage(data []byte, context value.Cont var _ value.Transformer = &envelopeTransformer{} // addTransformer inserts a new transformer to the Envelope cache of DEKs for future reads. -func (t *envelopeTransformer) addTransformer(encKey string, key []byte) (value.Transformer, error) { +func (t *envelopeTransformer) addTransformer(encKey []byte, key []byte) (value.Transformer, error) { block, err := aes.NewCipher(key) if err != nil { return nil, err } transformer := t.baseTransformerFunc(block) - t.transformers.Add(encKey, transformer) + // Use base64 of encKey as the key into the cache because hashicorp/golang-lru + // cannot hash []uint8. + t.transformers.Add(base64.StdEncoding.EncodeToString(encKey), transformer) return transformer, nil } +// getTransformer fetches the transformer corresponding to encKey from cache, if it exists. +func (t *envelopeTransformer) getTransformer(encKey []byte) value.Transformer { + _transformer, found := t.transformers.Get(base64.StdEncoding.EncodeToString(encKey)) + if found { + return _transformer.(value.Transformer) + } + return nil +} + // generateKey generates a random key using system randomness. func generateKey(length int) ([]byte, error) { key := make([]byte, length) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope_test.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope_test.go index 0ba68afc63d..3215a22f331 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope_test.go @@ -42,22 +42,22 @@ type testEnvelopeService struct { keyVersion string } -func (t *testEnvelopeService) Decrypt(data string) ([]byte, error) { +func (t *testEnvelopeService) Decrypt(data []byte) ([]byte, error) { if t.disabled { return nil, fmt.Errorf("Envelope service was disabled") } - dataChunks := strings.SplitN(data, ":", 2) + dataChunks := strings.SplitN(string(data), ":", 2) if len(dataChunks) != 2 { return nil, fmt.Errorf("invalid data encountered for decryption: %s. Missing key version", data) } return base64.StdEncoding.DecodeString(dataChunks[1]) } -func (t *testEnvelopeService) Encrypt(data []byte) (string, error) { +func (t *testEnvelopeService) Encrypt(data []byte) ([]byte, error) { if t.disabled { - return "", fmt.Errorf("Envelope service was disabled") + return nil, fmt.Errorf("Envelope service was disabled") } - return t.keyVersion + ":" + base64.StdEncoding.EncodeToString(data), nil + return []byte(t.keyVersion + ":" + base64.StdEncoding.EncodeToString(data)), nil } func (t *testEnvelopeService) SetDisabledStatus(status bool) { From 0a9c42525bb7e01f2afdd979727bd3b05a83833a Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Tue, 7 Nov 2017 10:24:04 +0100 Subject: [PATCH 017/794] Fix protobuf generator for aliases to repeated types --- .../cmd/go-to-protobuf/protobuf/generator.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index d29e3e4751d..a76d670387d 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -513,10 +513,13 @@ func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *ty log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err) return err } - if field.Extras == nil { - field.Extras = make(map[string]string) + // If this is not an alias to a slice, cast to the alias + if !field.Repeated { + if field.Extras == nil { + field.Extras = make(map[string]string) + } + field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } - field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) } case types.Slice: if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { From d290996e7514f4a0581be75e75c5868dcbee76ab Mon Sep 17 00:00:00 2001 From: Bartlomiej Antoniak Date: Tue, 7 Nov 2017 13:31:20 +0100 Subject: [PATCH 018/794] #50598: Added more test cases for nodeShouldRunDaemonPod --- .../daemon/daemon_controller_test.go | 124 +++++++++++++++++- 1 file changed, 119 insertions(+), 5 deletions(-) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index b5f99c382c0..a4b85b44db1 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -518,6 +518,12 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec { } } +func resourceContainerSpec(memory, cpu string) v1.ResourceRequirements { + return v1.ResourceRequirements { + Requests: allocatableResources(memory, cpu), + } +} + func resourcePodSpecWithoutNodeName(memory, cpu string) v1.PodSpec { return v1.PodSpec{ Containers: []v1.Container{{ @@ -1400,6 +1406,7 @@ func setDaemonSetCritical(ds *extensions.DaemonSet) { func TestNodeShouldRunDaemonPod(t *testing.T) { cases := []struct { + predicateName string podsOnNode []*v1.Pod nodeCondition []v1.NodeCondition ds *extensions.DaemonSet @@ -1407,6 +1414,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { err error }{ { + predicateName: "ShouldRunDaemonPod", ds: &extensions.DaemonSet{ Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, @@ -1423,6 +1431,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldContinueRunning: true, }, { + predicateName: "InsufficientResourceError", ds: &extensions.DaemonSet{ Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, @@ -1439,6 +1448,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldContinueRunning: true, }, { + predicateName: "ErrPodNotMatchHostName", ds: &extensions.DaemonSet{ Spec: extensions.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, @@ -1455,6 +1465,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldContinueRunning: false, }, { + predicateName: "ErrPodNotFitsHostPorts", podsOnNode: []*v1.Pod{ { Spec: v1.PodSpec{ @@ -1487,13 +1498,116 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldSchedule: false, shouldContinueRunning: false, }, + { + predicateName: "InsufficientResourceError", + podsOnNode: []*v1.Pod{ + { + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, + }}, + Resources: resourceContainerSpec("50M", "0.5"), + }}, + }, + }, + }, + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: resourcePodSpec("", "100M", "0.5"), + }, + }, + }, + wantToRun: true, + shouldSchedule: false, + shouldContinueRunning: true, + }, + { + predicateName: "ShouldRunDaemonPod", + podsOnNode: []*v1.Pod{ + { + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, + }}, + Resources: resourceContainerSpec("50M", "0.5"), + }}, + }, + }, + }, + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: resourcePodSpec("", "50M", "0.5"), + }, + }, + }, + wantToRun: true, + shouldSchedule: true, + shouldContinueRunning: true, + }, + { + predicateName: "ErrNodeSelectorNotMatch", + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + NodeSelector: simpleDaemonSetLabel2, + }, + }, + }, + }, + wantToRun: false, + shouldSchedule: false, + shouldContinueRunning: false, + }, + { + predicateName: "ShouldRunDaemonPod", + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + NodeSelector: simpleDaemonSetLabel, + }, + }, + }, + }, + wantToRun: true, + shouldSchedule: true, + shouldContinueRunning: true, + }, } for i, c := range cases { for _, strategy := range updateStrategies() { - node := newNode("test-node", nil) + node := newNode("test-node", simpleDaemonSetLabel) node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) node.Status.Allocatable = allocatableResources("100M", "1") + + attachedVolumes := make([]v1.AttachedVolume, 1) + attachedVolumes[0] = v1.AttachedVolume{ + Name: v1.UniqueVolumeName("test"), + } + node.Status.VolumesAttached = attachedVolumes + manager, _, _ := newTestController() manager.nodeStore.Add(node) for _, p := range c.podsOnNode { @@ -1504,16 +1618,16 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds) if wantToRun != c.wantToRun { - t.Errorf("[%v] expected wantToRun: %v, got: %v", i, c.wantToRun, wantToRun) + t.Errorf("[%v] strategy: %v, predicateName: %v expected wantToRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.wantToRun, wantToRun) } if shouldSchedule != c.shouldSchedule { - t.Errorf("[%v] expected shouldSchedule: %v, got: %v", i, c.shouldSchedule, shouldSchedule) + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldSchedule: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldSchedule, shouldSchedule) } if shouldContinueRunning != c.shouldContinueRunning { - t.Errorf("[%v] expected shouldContinueRunning: %v, got: %v", i, c.shouldContinueRunning, shouldContinueRunning) + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning) } if err != c.err { - t.Errorf("[%v] expected err: %v, got: %v", i, c.err, err) + t.Errorf("[%v] strategy: %v, predicateName: %v expected err: %v, got: %v", i, c.predicateName, c.ds.Spec.UpdateStrategy.Type, c.err, err) } } } From 6b47e444a043db149e9d03588734b089c0aa005c Mon Sep 17 00:00:00 2001 From: Bartlomiej Antoniak Date: Tue, 7 Nov 2017 14:01:42 +0100 Subject: [PATCH 019/794] #50598: Removed obsolete volume mount in TestNodeShouldRunDaemonPod --- pkg/controller/daemon/daemon_controller_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index a4b85b44db1..e0997c03649 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -1601,13 +1601,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { node := newNode("test-node", simpleDaemonSetLabel) node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) node.Status.Allocatable = allocatableResources("100M", "1") - - attachedVolumes := make([]v1.AttachedVolume, 1) - attachedVolumes[0] = v1.AttachedVolume{ - Name: v1.UniqueVolumeName("test"), - } - node.Status.VolumesAttached = attachedVolumes - manager, _, _ := newTestController() manager.nodeStore.Add(node) for _, p := range c.podsOnNode { From f52ae0a68f6991419cd2e5f84e1360fd9b5eed70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Antoniak?= Date: Tue, 7 Nov 2017 19:48:18 +0100 Subject: [PATCH 020/794] Update daemon_controller_test.go Formatting --- pkg/controller/daemon/daemon_controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index e0997c03649..220f2551271 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -1406,7 +1406,7 @@ func setDaemonSetCritical(ds *extensions.DaemonSet) { func TestNodeShouldRunDaemonPod(t *testing.T) { cases := []struct { - predicateName string + predicateName string podsOnNode []*v1.Pod nodeCondition []v1.NodeCondition ds *extensions.DaemonSet From 3b93d6a0610ecf233a61425c46ba99f5e7cfcb0d Mon Sep 17 00:00:00 2001 From: tengqm Date: Thu, 2 Nov 2017 21:43:47 +0800 Subject: [PATCH 021/794] Fix autoscaling API documentation --- api/openapi-spec/swagger.json | 4 ++-- api/swagger-spec/autoscaling_v2beta1.json | 4 ++-- docs/api-reference/autoscaling/v2beta1/definitions.html | 4 ++-- pkg/apis/autoscaling/types.go | 6 ++++-- staging/src/k8s.io/api/autoscaling/v1/generated.proto | 6 ++++-- staging/src/k8s.io/api/autoscaling/v1/types.go | 6 ++++-- .../api/autoscaling/v1/types_swagger_doc_generated.go | 4 ++-- staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto | 6 ++++-- staging/src/k8s.io/api/autoscaling/v2beta1/types.go | 6 ++++-- .../api/autoscaling/v2beta1/types_swagger_doc_generated.go | 4 ++-- 10 files changed, 30 insertions(+), 20 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b6f08c8888d..00294467e6b 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -65461,7 +65461,7 @@ "$ref": "#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricSource" }, "type": { - "description": "type is the type of metric source. It should match one of the fields below.", + "description": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "type": "string" } } @@ -65485,7 +65485,7 @@ "$ref": "#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus" }, "type": { - "description": "type is the type of metric source. It will match one of the fields below.", + "description": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "type": "string" } } diff --git a/api/swagger-spec/autoscaling_v2beta1.json b/api/swagger-spec/autoscaling_v2beta1.json index 9b09f31b920..107dbd9f4fd 100644 --- a/api/swagger-spec/autoscaling_v2beta1.json +++ b/api/swagger-spec/autoscaling_v2beta1.json @@ -1547,7 +1547,7 @@ "properties": { "type": { "type": "string", - "description": "type is the type of metric source. It should match one of the fields below." + "description": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object." }, "object": { "$ref": "v2beta1.ObjectMetricSource", @@ -1680,7 +1680,7 @@ "properties": { "type": { "type": "string", - "description": "type is the type of metric source. It will match one of the fields below." + "description": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object." }, "object": { "$ref": "v2beta1.ObjectMetricStatus", diff --git a/docs/api-reference/autoscaling/v2beta1/definitions.html b/docs/api-reference/autoscaling/v2beta1/definitions.html index 76ca8a3845c..3ac0c1fe6b4 100755 --- a/docs/api-reference/autoscaling/v2beta1/definitions.html +++ b/docs/api-reference/autoscaling/v2beta1/definitions.html @@ -871,7 +871,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

type

-

type is the type of metric source. It will match one of the fields below.

+

type is the type of metric source. It will be one of "Object", "Pods" or "Resource", each corresponds to a matching field in the object.

true

string

@@ -1828,7 +1828,7 @@ Examples:

type

-

type is the type of metric source. It should match one of the fields below.

+

type is the type of metric source. It should be one of "Object", "Pods" or "Resource", each mapping to a matching field in the object.

true

string

diff --git a/pkg/apis/autoscaling/types.go b/pkg/apis/autoscaling/types.go index 7c830d9467e..ac2a968c3a9 100644 --- a/pkg/apis/autoscaling/types.go +++ b/pkg/apis/autoscaling/types.go @@ -116,7 +116,8 @@ var ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // Type is the type of metric source. It should match one of the fields below. + // Type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. Type MetricSourceType // Object refers to a metric describing a single kubernetes object @@ -261,7 +262,8 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // Type is the type of metric source. It will match one of the fields below. + // Type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. Type MetricSourceType // Object refers to a metric describing a single kubernetes object diff --git a/staging/src/k8s.io/api/autoscaling/v1/generated.proto b/staging/src/k8s.io/api/autoscaling/v1/generated.proto index e41e62746bd..f5e4471317a 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/generated.proto +++ b/staging/src/k8s.io/api/autoscaling/v1/generated.proto @@ -138,7 +138,8 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -163,7 +164,8 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/staging/src/k8s.io/api/autoscaling/v1/types.go b/staging/src/k8s.io/api/autoscaling/v1/types.go index e726c140399..eeadaf88475 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/types.go +++ b/staging/src/k8s.io/api/autoscaling/v1/types.go @@ -166,7 +166,8 @@ var ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -235,7 +236,8 @@ type ResourceMetricSource struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/staging/src/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 7f84c2d9348..5506b76f3df 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should match one of the fields below.", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", @@ -111,7 +111,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will match one of the fields below.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto index de3d2665fdc..332502fce1f 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -153,7 +153,8 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -178,7 +179,8 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/types.go b/staging/src/k8s.io/api/autoscaling/v2beta1/types.go index 9c72ae25ca7..22e53573b2f 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/types.go +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/types.go @@ -78,7 +78,8 @@ var ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -210,7 +211,8 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index c7002b3d1e6..8bcf0f4b305 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -100,7 +100,7 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should match one of the fields below.", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", @@ -112,7 +112,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will match one of the fields below.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", From 2ee936a0caefd209ab5147604499b27574b6a226 Mon Sep 17 00:00:00 2001 From: Bartlomiej Antoniak Date: Wed, 8 Nov 2017 09:33:09 +0100 Subject: [PATCH 022/794] #50598: Formatted code using gofmt --- pkg/controller/daemon/daemon_controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 220f2551271..244b9534e98 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -519,7 +519,7 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec { } func resourceContainerSpec(memory, cpu string) v1.ResourceRequirements { - return v1.ResourceRequirements { + return v1.ResourceRequirements{ Requests: allocatableResources(memory, cpu), } } @@ -1406,7 +1406,7 @@ func setDaemonSetCritical(ds *extensions.DaemonSet) { func TestNodeShouldRunDaemonPod(t *testing.T) { cases := []struct { - predicateName string + predicateName string podsOnNode []*v1.Pod nodeCondition []v1.NodeCondition ds *extensions.DaemonSet From 2a457a941cfa3a3cd17b84bcc83eb34871184e90 Mon Sep 17 00:00:00 2001 From: Bartlomiej Antoniak Date: Wed, 8 Nov 2017 13:35:12 +0100 Subject: [PATCH 023/794] #50598: Added NodeAffinity test case for nodeShouldRunDaemonPod --- .../daemon/daemon_controller_test.go | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 244b9534e98..14a6e96bc49 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -1594,6 +1594,76 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldSchedule: true, shouldContinueRunning: true, }, + { + predicateName: "ErrPodAffinityNotMatch", + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "type", + Operator: v1.NodeSelectorOpIn, + Values: []string{"test"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantToRun: false, + shouldSchedule: false, + shouldContinueRunning: false, + }, + { + predicateName: "ShouldRunDaemonPod", + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "type", + Operator: v1.NodeSelectorOpIn, + Values: []string{"production"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantToRun: true, + shouldSchedule: true, + shouldContinueRunning: true, + }, } for i, c := range cases { From f6af1904cd0e017c6181c23d41e1281fd4a9b198 Mon Sep 17 00:00:00 2001 From: Tomas Nozicka Date: Thu, 9 Nov 2017 12:23:37 +0100 Subject: [PATCH 024/794] Make StatefulSet report an event when recreating failed pod --- pkg/controller/statefulset/stateful_set.go | 1 + pkg/controller/statefulset/stateful_set_control.go | 10 +++++++--- .../statefulset/stateful_set_control_test.go | 7 +++++-- pkg/controller/statefulset/stateful_set_test.go | 4 +++- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index e9db4c10b44..d1cb293abe8 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -99,6 +99,7 @@ func NewStatefulSetController( recorder), NewRealStatefulSetStatusUpdater(kubeClient, setInformer.Lister()), history.NewHistory(kubeClient, revInformer.Lister()), + recorder, ), pvcListerSynced: pvcInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"), diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index 5102808c447..bddf1d6e55f 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -25,6 +25,7 @@ import ( apps "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/controller/history" ) @@ -53,14 +54,16 @@ type StatefulSetControlInterface interface { func NewDefaultStatefulSetControl( podControl StatefulPodControlInterface, statusUpdater StatefulSetStatusUpdaterInterface, - controllerHistory history.Interface) StatefulSetControlInterface { - return &defaultStatefulSetControl{podControl, statusUpdater, controllerHistory} + controllerHistory history.Interface, + recorder record.EventRecorder) StatefulSetControlInterface { + return &defaultStatefulSetControl{podControl, statusUpdater, controllerHistory, recorder} } type defaultStatefulSetControl struct { podControl StatefulPodControlInterface statusUpdater StatefulSetStatusUpdaterInterface controllerHistory history.Interface + recorder record.EventRecorder } // UpdateStatefulSet executes the core logic loop for a stateful set, applying the predictable and @@ -367,7 +370,8 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( for i := range replicas { // delete and recreate failed pods if isFailed(replicas[i]) { - glog.V(4).Infof("StatefulSet %s/%s is recreating failed Pod %s", + ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod", + "StatefulSet %s/%s is recreating failed Pod %s", set.Namespace, set.Name, replicas[i].Name) diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index a18995d5059..12d926b15eb 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -41,6 +41,7 @@ import ( appslisters "k8s.io/client-go/listers/apps/v1beta1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" @@ -52,7 +53,8 @@ func setupController(client clientset.Interface) (*fakeStatefulPodControl, *fake informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets()) ssu := newFakeStatefulSetStatusUpdater(informerFactory.Apps().V1beta1().StatefulSets()) - ssc := NewDefaultStatefulSetControl(spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions())) + recorder := record.NewFakeRecorder(10) + ssc := NewDefaultStatefulSetControl(spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()), recorder) stop := make(chan struct{}) informerFactory.Start(stop) @@ -452,7 +454,8 @@ func TestStatefulSetControl_getSetRevisions(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) spc := newFakeStatefulPodControl(informerFactory.Core().V1().Pods(), informerFactory.Apps().V1beta1().StatefulSets()) ssu := newFakeStatefulSetStatusUpdater(informerFactory.Apps().V1beta1().StatefulSets()) - ssc := defaultStatefulSetControl{spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions())} + recorder := record.NewFakeRecorder(10) + ssc := defaultStatefulSetControl{spc, ssu, history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()), recorder} stop := make(chan struct{}) defer close(stop) diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index f43d7b70145..632f799c531 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" ) @@ -585,7 +586,8 @@ func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSe ssh := history.NewFakeHistory(informerFactory.Apps().V1beta1().ControllerRevisions()) ssc.podListerSynced = alwaysReady ssc.setListerSynced = alwaysReady - ssc.control = NewDefaultStatefulSetControl(fpc, ssu, ssh) + recorder := record.NewFakeRecorder(10) + ssc.control = NewDefaultStatefulSetControl(fpc, ssu, ssh, recorder) return ssc, fpc } From 47a85180fdf3ab60935be29b788fcb002fdf1de8 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 10 Nov 2017 16:31:34 +0800 Subject: [PATCH 025/794] remove duplicate code --- pkg/securitycontext/util.go | 78 ------------------------------------- 1 file changed, 78 deletions(-) diff --git a/pkg/securitycontext/util.go b/pkg/securitycontext/util.go index 73d23a43131..2719e118269 100644 --- a/pkg/securitycontext/util.go +++ b/pkg/securitycontext/util.go @@ -21,7 +21,6 @@ import ( "strings" "k8s.io/api/core/v1" - api "k8s.io/kubernetes/pkg/apis/core" ) // HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account @@ -165,83 +164,6 @@ func securityContextFromPodSecurityContext(pod *v1.Pod) *v1.SecurityContext { return synthesized } -// TODO: remove the duplicate code -func InternalDetermineEffectiveSecurityContext(pod *api.Pod, container *api.Container) *api.SecurityContext { - effectiveSc := internalSecurityContextFromPodSecurityContext(pod) - containerSc := container.SecurityContext - - if effectiveSc == nil && containerSc == nil { - return nil - } - if effectiveSc != nil && containerSc == nil { - return effectiveSc - } - if effectiveSc == nil && containerSc != nil { - return containerSc - } - - if containerSc.SELinuxOptions != nil { - effectiveSc.SELinuxOptions = new(api.SELinuxOptions) - *effectiveSc.SELinuxOptions = *containerSc.SELinuxOptions - } - - if containerSc.Capabilities != nil { - effectiveSc.Capabilities = new(api.Capabilities) - *effectiveSc.Capabilities = *containerSc.Capabilities - } - - if containerSc.Privileged != nil { - effectiveSc.Privileged = new(bool) - *effectiveSc.Privileged = *containerSc.Privileged - } - - if containerSc.RunAsUser != nil { - effectiveSc.RunAsUser = new(int64) - *effectiveSc.RunAsUser = *containerSc.RunAsUser - } - - if containerSc.RunAsNonRoot != nil { - effectiveSc.RunAsNonRoot = new(bool) - *effectiveSc.RunAsNonRoot = *containerSc.RunAsNonRoot - } - - if containerSc.ReadOnlyRootFilesystem != nil { - effectiveSc.ReadOnlyRootFilesystem = new(bool) - *effectiveSc.ReadOnlyRootFilesystem = *containerSc.ReadOnlyRootFilesystem - } - - if containerSc.AllowPrivilegeEscalation != nil { - effectiveSc.AllowPrivilegeEscalation = new(bool) - *effectiveSc.AllowPrivilegeEscalation = *containerSc.AllowPrivilegeEscalation - } - - return effectiveSc -} - -func internalSecurityContextFromPodSecurityContext(pod *api.Pod) *api.SecurityContext { - if pod.Spec.SecurityContext == nil { - return nil - } - - synthesized := &api.SecurityContext{} - - if pod.Spec.SecurityContext.SELinuxOptions != nil { - synthesized.SELinuxOptions = &api.SELinuxOptions{} - *synthesized.SELinuxOptions = *pod.Spec.SecurityContext.SELinuxOptions - } - if pod.Spec.SecurityContext.RunAsUser != nil { - synthesized.RunAsUser = new(int64) - *synthesized.RunAsUser = *pod.Spec.SecurityContext.RunAsUser - } - - if pod.Spec.SecurityContext.RunAsNonRoot != nil { - synthesized.RunAsNonRoot = new(bool) - *synthesized.RunAsNonRoot = *pod.Spec.SecurityContext.RunAsNonRoot - } - - return synthesized -} - // AddNoNewPrivileges returns if we should add the no_new_privs option. func AddNoNewPrivileges(sc *v1.SecurityContext) bool { if sc == nil { From 22b04c828bb5c3fed0218e198fa470cafd0b78e2 Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Thu, 26 Oct 2017 19:04:18 +0800 Subject: [PATCH 026/794] Append --feature-gates option iff TestContext.FeatureGates is not nil --- test/e2e_node/services/kubelet.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 0e655349075..46352df5d6e 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -165,14 +165,17 @@ func (e *E2EServices) startKubelet() (*server, error) { // - test/e2e_node/conformance/run_test.sh. "--pod-cidr", "10.100.0.0/24", "--eviction-pressure-transition-period", "30s", - // Apply test framework feature gates by default. This could also be overridden - // by kubelet-flags. - "--feature-gates", framework.TestContext.FeatureGates, "--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds. "--eviction-minimum-reclaim", "nodefs.available=5%,nodefs.inodesFree=5%", // The minimum reclaimed resources after eviction. "--v", LOG_VERBOSITY_LEVEL, "--logtostderr", ) + // Apply test framework feature gates by default. This could also be overridden + // by kubelet-flags. + if framework.TestContext.FeatureGates != "" { + cmdArgs = append(cmdArgs, "--feature-gates", framework.TestContext.FeatureGates) + } + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { // Enable dynamic config if the feature gate is enabled dynamicConfigDir, err := getDynamicConfigDir() From 4d018182c4fe418d89455c82e8309e3afa37369b Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Thu, 9 Nov 2017 13:43:32 +0800 Subject: [PATCH 027/794] add unit test for statefulset Signed-off-by: yuexiao-wang --- pkg/kubectl/scale_test.go | 264 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 264 insertions(+) diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index 85c77a1ffbf..96250d8fe1e 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -23,10 +23,12 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" testcore "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" @@ -785,3 +787,265 @@ func TestValidateDeployment(t *testing.T) { } } } + +type ErrorStatefulSets struct { + appsclient.StatefulSetInterface + conflict bool + invalid bool +} + +func (c *ErrorStatefulSets) Update(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) { + switch { + case c.invalid: + return nil, kerrors.NewInvalid(api.Kind(statefulSet.Kind), statefulSet.Name, nil) + case c.conflict: + return nil, kerrors.NewConflict(api.Resource(statefulSet.Kind), statefulSet.Name, nil) + } + return nil, errors.New("statefulSet update failure") +} + +func (c *ErrorStatefulSets) Get(name string, options metav1.GetOptions) (*apps.StatefulSet, error) { + return &apps.StatefulSet{ + Spec: apps.StatefulSetSpec{ + Replicas: 0, + }, + }, nil +} + +type ErrorStatefulSetClient struct { + appsclient.StatefulSetsGetter + conflict bool + invalid bool +} + +func (c *ErrorStatefulSetClient) StatefulSets(namespace string) appsclient.StatefulSetInterface { + return &ErrorStatefulSets{ + StatefulSetInterface: c.StatefulSetsGetter.StatefulSets(namespace), + invalid: c.invalid, + conflict: c.conflict, + } +} + +func statefulSet() *apps.StatefulSet { + return &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + } +} + +func TestStatefulSetScale(t *testing.T) { + fake := fake.NewSimpleClientset(statefulSet()) + scaler := StatefulSetScaler{fake.Apps()} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-statefulsets %s", actions[0], name) + } + if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetObject().(*apps.StatefulSet).Spec.Replicas != int32(count) { + t.Errorf("unexpected action %v, expected update-statefulset with replicas = %d", actions[1], count) + } +} + +func TestStatefulSetScaleRetry(t *testing.T) { + fake := &ErrorStatefulSetClient{StatefulSetsGetter: fake.NewSimpleClientset().Apps(), conflict: true} + scaler := &StatefulSetScaler{fake} + preconditions := &ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass != false { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = &ScalePrecondition{3, ""} + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) + pass, err = scaleFunc() + if err == nil { + t.Error("Expected error on precondition failure") + } +} + +func TestStatefulSetScaleInvalid(t *testing.T) { + fake := &ErrorStatefulSetClient{StatefulSetsGetter: fake.NewSimpleClientset().Apps(), invalid: true} + scaler := StatefulSetScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + e, ok := err.(ScaleError) + if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestStatefulSetScaleFailsPreconditions(t *testing.T) { + fake := fake.NewSimpleClientset(&apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: 10, + }, + }) + scaler := StatefulSetScaler{fake.Apps()} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + } + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-statefulset %s", actions[0], name) + } +} + +func TestValidateStatefulSet(t *testing.T) { + zero, ten, twenty := int32(0), int32(10), int32(20) + tests := []struct { + preconditions ScalePrecondition + statefulset apps.StatefulSet + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: zero, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: apps.StatefulSetSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + }, + expectError: true, + test: "no replicas", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: apps.StatefulSetSpec{ + Replicas: ten, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + statefulset: apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: apps.StatefulSetSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := test.preconditions.ValidateStatefulSet(&test.statefulset) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("expected an error: %v (%s)", err, test.test) + } + } +} From b6c47305a24f5e4e894660829e049624b1736898 Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Sat, 11 Nov 2017 14:51:41 +0800 Subject: [PATCH 028/794] add unit test for replicaset Signed-off-by: yuexiao-wang --- pkg/kubectl/scale_test.go | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index 96250d8fe1e..f4e7a3b275d 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -1049,3 +1049,264 @@ func TestValidateStatefulSet(t *testing.T) { } } } + +type ErrorReplicaSets struct { + extensionsclient.ReplicaSetInterface + conflict bool + invalid bool +} + +func (c *ErrorReplicaSets) Update(replicaSets *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { + switch { + case c.invalid: + return nil, kerrors.NewInvalid(api.Kind(replicaSets.Kind), replicaSets.Name, nil) + case c.conflict: + return nil, kerrors.NewConflict(api.Resource(replicaSets.Kind), replicaSets.Name, nil) + } + return nil, errors.New("replicaSets update failure") +} + +func (c *ErrorReplicaSets) Get(name string, options metav1.GetOptions) (*extensions.ReplicaSet, error) { + return &extensions.ReplicaSet{ + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + }, + }, nil +} + +type ErrorReplicaSetClient struct { + extensionsclient.ReplicaSetsGetter + conflict bool + invalid bool +} + +func (c *ErrorReplicaSetClient) ReplicaSets(namespace string) extensionsclient.ReplicaSetInterface { + return &ErrorReplicaSets{ + ReplicaSetInterface: c.ReplicaSetsGetter.ReplicaSets(namespace), + invalid: c.invalid, + conflict: c.conflict, + } +} + +func replicaSet() *extensions.ReplicaSet { + return &extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + } +} + +func TestReplicaSetScale(t *testing.T) { + fake := fake.NewSimpleClientset(replicaSet()) + scaler := ReplicaSetScaler{fake.Extensions()} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicationSet %s", actions[0], name) + } + if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetObject().(*extensions.ReplicaSet).Spec.Replicas != int32(count) { + t.Errorf("unexpected action %v, expected update-replicaSet with replicas = %d", actions[1], count) + } +} + +func TestReplicaSetScaleRetry(t *testing.T) { + fake := &ErrorReplicaSetClient{ReplicaSetsGetter: fake.NewSimpleClientset().Extensions(), conflict: true} + scaler := &ReplicaSetScaler{fake} + preconditions := &ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass != false { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = &ScalePrecondition{3, ""} + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) + pass, err = scaleFunc() + if err == nil { + t.Error("Expected error on precondition failure") + } +} + +func TestReplicaSetScaleInvalid(t *testing.T) { + fake := &ErrorReplicaSetClient{ReplicaSetsGetter: fake.NewSimpleClientset().Extensions(), invalid: true} + scaler := ReplicaSetScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + e, ok := err.(ScaleError) + if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestReplicaSetsGetterFailsPreconditions(t *testing.T) { + fake := fake.NewSimpleClientset(&extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 10, + }, + }) + scaler := ReplicaSetScaler{fake.Extensions()} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + } + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicaSets %s", actions[0], name) + } +} + +func TestValidateReplicaSets(t *testing.T) { + zero, ten, twenty := int32(0), int32(10), int32(20) + tests := []struct { + preconditions ScalePrecondition + replicaSets extensions.ReplicaSet + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: zero, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + }, + expectError: true, + test: "no replicas", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: ten, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + replicaSets: extensions.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := test.preconditions.ValidateReplicaSet(&test.replicaSets) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("expected an error: %v (%s)", err, test.test) + } + } +} From 2cfdfd9f8f7ef2ff4b281eb9117a4d825e943dcc Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Sat, 11 Nov 2017 15:00:43 +0800 Subject: [PATCH 029/794] Use Error with no value format and fix typo error messages Signed-off-by: yuexiao-wang --- pkg/kubectl/scale_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index f4e7a3b275d..ba177982f20 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -323,7 +323,7 @@ func TestJobScaleRetry(t *testing.T) { scaleFunc = ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) pass, err = scaleFunc() if err == nil { - t.Errorf("Expected error on precondition failure") + t.Error("Expected error on precondition failure") } } @@ -349,7 +349,7 @@ func TestJobScale(t *testing.T) { t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) } if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) } if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) { t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count) @@ -585,7 +585,7 @@ func TestDeploymentScaleRetry(t *testing.T) { scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) pass, err = scaleFunc() if err == nil { - t.Errorf("Expected error on precondition failure") + t.Error("Expected error on precondition failure") } } @@ -611,7 +611,7 @@ func TestDeploymentScale(t *testing.T) { t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) } if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("deployments") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name) } if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != extensions.Resource("deployments") || action.GetObject().(*extensions.Deployment).Spec.Replicas != int32(count) { t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count) From dbca4f25c4ad8b23a42c917c46d45f0ae7e12ead Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Sat, 11 Nov 2017 15:03:54 +0800 Subject: [PATCH 030/794] update BUILD Signed-off-by: yuexiao-wang --- pkg/kubectl/BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index c1de97568d5..c76b9d86d5e 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -40,11 +40,13 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", From f65fbde607216d8f291a1d1dfb730d6cfe2813ca Mon Sep 17 00:00:00 2001 From: wackxu Date: Mon, 13 Nov 2017 11:02:04 +0800 Subject: [PATCH 031/794] add apiServerCertSANs case for test GetAltNames --- cmd/kubeadm/app/phases/certs/certs_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index a22f2e4ae77..897eb22c2f7 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -262,9 +262,10 @@ func TestGetAltNames(t *testing.T) { hostname := "valid-hostname" advertiseIP := "1.2.3.4" cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: hostname, + API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeName: hostname, + APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95"}, } altNames, err := getAltNames(cfg) @@ -287,7 +288,7 @@ func TestGetAltNames(t *testing.T) { } } - expectedIPAddresses := []string{"10.96.0.1", advertiseIP} + expectedIPAddresses := []string{"10.96.0.1", advertiseIP, "10.1.245.94", "10.1.245.95"} for _, IPAddress := range expectedIPAddresses { found := false for _, val := range altNames.IPs { From 7e973f82752b4447742b828b3a0497a345beb646 Mon Sep 17 00:00:00 2001 From: Yanqiang Miao Date: Mon, 13 Nov 2017 14:28:01 +0800 Subject: [PATCH 032/794] Simplify the sorting codes --- pkg/kubectl/util/slice/slice.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/kubectl/util/slice/slice.go b/pkg/kubectl/util/slice/slice.go index 6885c4888db..8130753c300 100644 --- a/pkg/kubectl/util/slice/slice.go +++ b/pkg/kubectl/util/slice/slice.go @@ -20,13 +20,5 @@ import ( "sort" ) -// Int64Slice attaches the methods of Interface to []int64, -// sorting in increasing order. -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - // Sorts []int64 in increasing order -func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } +func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } From aabee865cdc70b16571ecd346e35ce9008d93ad3 Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Mon, 13 Nov 2017 17:29:40 +0800 Subject: [PATCH 033/794] remove internal version api from kubectl annotate command --- pkg/kubectl/cmd/annotate_test.go | 36 ++++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/kubectl/cmd/annotate_test.go b/pkg/kubectl/cmd/annotate_test.go index 1c112d6a83f..fdd968df260 100644 --- a/pkg/kubectl/cmd/annotate_test.go +++ b/pkg/kubectl/cmd/annotate_test.go @@ -23,11 +23,11 @@ import ( "strings" "testing" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest/fake" - api "k8s.io/kubernetes/pkg/apis/core" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" ) @@ -226,7 +226,7 @@ func TestUpdateAnnotations(t *testing.T) { expectErr bool }{ { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, @@ -235,41 +235,41 @@ func TestUpdateAnnotations(t *testing.T) { expectErr: true, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, }, annotations: map[string]string{"a": "c"}, overwrite: true, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "c"}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, }, annotations: map[string]string{"c": "d"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b", "c": "d"}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, }, annotations: map[string]string{"c": "d"}, version: "2", - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b", "c": "d"}, ResourceVersion: "2", @@ -277,28 +277,28 @@ func TestUpdateAnnotations(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, }, annotations: map[string]string{}, remove: []string{"a"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b", "c": "d"}, }, }, annotations: map[string]string{"e": "f"}, remove: []string{"a"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "c": "d", @@ -308,14 +308,14 @@ func TestUpdateAnnotations(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b", "c": "d"}, }, }, annotations: map[string]string{"e": "f"}, remove: []string{"g"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "a": "b", @@ -326,13 +326,13 @@ func TestUpdateAnnotations(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b", "c": "d"}, }, }, remove: []string{"e"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "a": "b", @@ -342,11 +342,11 @@ func TestUpdateAnnotations(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{}, }, annotations: map[string]string{"a": "b"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"a": "b"}, }, From 3fe0d596077b2d89da3d0ea316d277068eda6a1d Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Mon, 13 Nov 2017 17:30:09 +0800 Subject: [PATCH 034/794] remove internal version api from kubectl label command --- pkg/kubectl/cmd/label_test.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/kubectl/cmd/label_test.go b/pkg/kubectl/cmd/label_test.go index ff7d5649759..0aaf460e93d 100644 --- a/pkg/kubectl/cmd/label_test.go +++ b/pkg/kubectl/cmd/label_test.go @@ -23,10 +23,10 @@ import ( "strings" "testing" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest/fake" - api "k8s.io/kubernetes/pkg/apis/core" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" "k8s.io/kubernetes/pkg/kubectl/resource" ) @@ -164,7 +164,7 @@ func TestLabelFunc(t *testing.T) { expectErr bool }{ { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, @@ -173,41 +173,41 @@ func TestLabelFunc(t *testing.T) { expectErr: true, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, }, labels: map[string]string{"a": "c"}, overwrite: true, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "c"}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, }, labels: map[string]string{"c": "d"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b", "c": "d"}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, }, labels: map[string]string{"c": "d"}, version: "2", - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b", "c": "d"}, ResourceVersion: "2", @@ -215,28 +215,28 @@ func TestLabelFunc(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, }, labels: map[string]string{}, remove: []string{"a"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{}, }, }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b", "c": "d"}, }, }, labels: map[string]string{"e": "f"}, remove: []string{"a"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "c": "d", @@ -246,11 +246,11 @@ func TestLabelFunc(t *testing.T) { }, }, { - obj: &api.Pod{ + obj: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{}, }, labels: map[string]string{"a": "b"}, - expected: &api.Pod{ + expected: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, }, From a31a14924ddd4ee7fe804dd9133645f5af651f57 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 10 Nov 2017 18:22:34 +0100 Subject: [PATCH 035/794] CreatePodSecurityContext: rename; modify its arguments instead of returning a copy. --- pkg/security/podsecuritypolicy/provider.go | 30 ++++++++++--------- .../podsecuritypolicy/provider_test.go | 8 ++--- pkg/security/podsecuritypolicy/types.go | 6 ++-- .../security/podsecuritypolicy/admission.go | 4 +-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pkg/security/podsecuritypolicy/provider.go b/pkg/security/podsecuritypolicy/provider.go index b374b265f56..ccf897ec751 100644 --- a/pkg/security/podsecuritypolicy/provider.go +++ b/pkg/security/podsecuritypolicy/provider.go @@ -64,17 +64,16 @@ func NewSimpleProvider(psp *extensions.PodSecurityPolicy, namespace string, stra }, nil } -// Create a PodSecurityContext based on the given constraints. If a setting is already set -// on the PodSecurityContext it will not be changed. Validate should be used after the context -// is created to ensure it complies with the required restrictions. -func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, map[string]string, error) { +// DefaultPodSecurityContext sets the default values of the required but not filled fields. +// It modifies the SecurityContext and annotations of the provided pod. Validation should be +// used after the context is defaulted to ensure it complies with the required restrictions. +func (s *simpleProvider) DefaultPodSecurityContext(pod *api.Pod) error { sc := securitycontext.NewPodSecurityContextMutator(pod.Spec.SecurityContext) - annotations := maps.CopySS(pod.Annotations) if sc.SupplementalGroups() == nil { supGroups, err := s.strategies.SupplementalGroupStrategy.Generate(pod) if err != nil { - return nil, nil, err + return err } sc.SetSupplementalGroups(supGroups) } @@ -82,7 +81,7 @@ func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurit if sc.FSGroup() == nil { fsGroup, err := s.strategies.FSGroupStrategy.GenerateSingle(pod) if err != nil { - return nil, nil, err + return err } sc.SetFSGroup(fsGroup) } @@ -90,24 +89,27 @@ func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurit if sc.SELinuxOptions() == nil { seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, nil) if err != nil { - return nil, nil, err + return err } sc.SetSELinuxOptions(seLinux) } // This is only generated on the pod level. Containers inherit the pod's profile. If the // container has a specific profile set then it will be caught in the validation step. - seccompProfile, err := s.strategies.SeccompStrategy.Generate(annotations, pod) + seccompProfile, err := s.strategies.SeccompStrategy.Generate(pod.Annotations, pod) if err != nil { - return nil, nil, err + return err } if seccompProfile != "" { - if annotations == nil { - annotations = map[string]string{} + if pod.Annotations == nil { + pod.Annotations = map[string]string{} } - annotations[api.SeccompPodAnnotationKey] = seccompProfile + pod.Annotations[api.SeccompPodAnnotationKey] = seccompProfile } - return sc.PodSecurityContext(), annotations, nil + + pod.Spec.SecurityContext = sc.PodSecurityContext() + + return nil } // Create a SecurityContext based on the given constraints. If a setting is already set on the diff --git a/pkg/security/podsecuritypolicy/provider_test.go b/pkg/security/podsecuritypolicy/provider_test.go index b0389e33aac..1c740842d8d 100644 --- a/pkg/security/podsecuritypolicy/provider_test.go +++ b/pkg/security/podsecuritypolicy/provider_test.go @@ -38,7 +38,7 @@ import ( const defaultContainerName = "test-c" -func TestCreatePodSecurityContextNonmutating(t *testing.T) { +func TestDefaultPodSecurityContextNonmutating(t *testing.T) { // Create a pod with a security context that needs filling in createPod := func() *api.Pod { return &api.Pod{ @@ -82,7 +82,7 @@ func TestCreatePodSecurityContextNonmutating(t *testing.T) { if err != nil { t.Fatalf("unable to create provider %v", err) } - _, _, err = provider.CreatePodSecurityContext(pod) + err = provider.DefaultPodSecurityContext(pod) if err != nil { t.Fatalf("unable to create psc %v", err) } @@ -91,10 +91,10 @@ func TestCreatePodSecurityContextNonmutating(t *testing.T) { // since all the strategies were permissive if !reflect.DeepEqual(createPod(), pod) { diffs := diff.ObjectDiff(createPod(), pod) - t.Errorf("pod was mutated by CreatePodSecurityContext. diff:\n%s", diffs) + t.Errorf("pod was mutated by DefaultPodSecurityContext. diff:\n%s", diffs) } if !reflect.DeepEqual(createPSP(), psp) { - t.Error("psp was mutated by CreatePodSecurityContext") + t.Error("psp was mutated by DefaultPodSecurityContext") } } diff --git a/pkg/security/podsecuritypolicy/types.go b/pkg/security/podsecuritypolicy/types.go index 31fcc5484d8..405ac65df74 100644 --- a/pkg/security/podsecuritypolicy/types.go +++ b/pkg/security/podsecuritypolicy/types.go @@ -32,9 +32,9 @@ import ( // Provider provides the implementation to generate a new security // context based on constraints or validate an existing security context against constraints. type Provider interface { - // Create a PodSecurityContext based on the given constraints. Also returns an updated set - // of Pod annotations for alpha feature support. - CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, map[string]string, error) + // DefaultPodSecurityContext sets the default values of the required but not filled fields. + // It modifies the SecurityContext and annotations of the provided pod. + DefaultPodSecurityContext(pod *api.Pod) error // Create a container SecurityContext based on the given constraints. Also returns an updated set // of Pod annotations for alpha feature support. CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, map[string]string, error) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/plugin/pkg/admission/security/podsecuritypolicy/admission.go index da6ec45695d..c7b545ec996 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -273,12 +273,10 @@ func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, func assignSecurityContext(provider psp.Provider, pod *api.Pod, fldPath *field.Path) field.ErrorList { errs := field.ErrorList{} - psc, pscAnnotations, err := provider.CreatePodSecurityContext(pod) + err := provider.DefaultPodSecurityContext(pod) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "securityContext"), pod.Spec.SecurityContext, err.Error())) } - pod.Spec.SecurityContext = psc - pod.Annotations = pscAnnotations errs = append(errs, provider.ValidatePodSecurityContext(pod, field.NewPath("spec", "securityContext"))...) From a4a3c7938a9241210160f7a7500fadb049ade338 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 10 Nov 2017 18:23:26 +0100 Subject: [PATCH 036/794] CreateContainerSecurityContext: rename; modify its arguments intead of returning a copy. --- pkg/security/podsecuritypolicy/BUILD | 1 - pkg/security/podsecuritypolicy/provider.go | 26 +++++++++---------- .../podsecuritypolicy/provider_test.go | 11 ++++---- pkg/security/podsecuritypolicy/types.go | 6 ++--- .../security/podsecuritypolicy/admission.go | 9 ++----- 5 files changed, 24 insertions(+), 29 deletions(-) diff --git a/pkg/security/podsecuritypolicy/BUILD b/pkg/security/podsecuritypolicy/BUILD index 282d256aaef..e76448d470d 100644 --- a/pkg/security/podsecuritypolicy/BUILD +++ b/pkg/security/podsecuritypolicy/BUILD @@ -27,7 +27,6 @@ go_library( "//pkg/security/podsecuritypolicy/user:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", "//pkg/securitycontext:go_default_library", - "//pkg/util/maps:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], diff --git a/pkg/security/podsecuritypolicy/provider.go b/pkg/security/podsecuritypolicy/provider.go index ccf897ec751..0c6bc1c71fc 100644 --- a/pkg/security/podsecuritypolicy/provider.go +++ b/pkg/security/podsecuritypolicy/provider.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" "k8s.io/kubernetes/pkg/securitycontext" - "k8s.io/kubernetes/pkg/util/maps" ) // used to pass in the field being validated for reusable group strategies so they @@ -112,21 +111,19 @@ func (s *simpleProvider) DefaultPodSecurityContext(pod *api.Pod) error { return nil } -// Create a SecurityContext based on the given constraints. If a setting is already set on the -// container's security context then it will not be changed. Validation should be used after -// the context is created to ensure it complies with the required restrictions. -func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, map[string]string, error) { +// DefaultContainerSecurityContext sets the default values of the required but not filled fields. +// It modifies the SecurityContext of the container and annotations of the pod. Validation should +// be used after the context is defaulted to ensure it complies with the required restrictions. +func (s *simpleProvider) DefaultContainerSecurityContext(pod *api.Pod, container *api.Container) error { sc := securitycontext.NewEffectiveContainerSecurityContextMutator( securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext), securitycontext.NewContainerSecurityContextMutator(container.SecurityContext), ) - annotations := maps.CopySS(pod.Annotations) - if sc.RunAsUser() == nil { uid, err := s.strategies.RunAsUserStrategy.Generate(pod, container) if err != nil { - return nil, nil, err + return err } sc.SetRunAsUser(uid) } @@ -134,14 +131,14 @@ func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container if sc.SELinuxOptions() == nil { seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, container) if err != nil { - return nil, nil, err + return err } sc.SetSELinuxOptions(seLinux) } - annotations, err := s.strategies.AppArmorStrategy.Generate(annotations, container) + annotations, err := s.strategies.AppArmorStrategy.Generate(pod.Annotations, container) if err != nil { - return nil, nil, err + return err } // if we're using the non-root strategy set the marker that this container should not be @@ -154,7 +151,7 @@ func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container caps, err := s.strategies.CapabilitiesStrategy.Generate(pod, container) if err != nil { - return nil, nil, err + return err } sc.SetCapabilities(caps) @@ -176,7 +173,10 @@ func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container sc.SetAllowPrivilegeEscalation(&s.psp.Spec.AllowPrivilegeEscalation) } - return sc.ContainerSecurityContext(), annotations, nil + pod.Annotations = annotations + container.SecurityContext = sc.ContainerSecurityContext() + + return nil } // Ensure a pod's SecurityContext is in compliance with the given constraints. diff --git a/pkg/security/podsecuritypolicy/provider_test.go b/pkg/security/podsecuritypolicy/provider_test.go index 1c740842d8d..ee1afcd308f 100644 --- a/pkg/security/podsecuritypolicy/provider_test.go +++ b/pkg/security/podsecuritypolicy/provider_test.go @@ -98,7 +98,7 @@ func TestDefaultPodSecurityContextNonmutating(t *testing.T) { } } -func TestCreateContainerSecurityContextNonmutating(t *testing.T) { +func TestDefaultContainerSecurityContextNonmutating(t *testing.T) { untrue := false tests := []struct { security *api.SecurityContext @@ -154,7 +154,7 @@ func TestCreateContainerSecurityContextNonmutating(t *testing.T) { if err != nil { t.Fatalf("unable to create provider %v", err) } - _, _, err = provider.CreateContainerSecurityContext(pod, &pod.Spec.Containers[0]) + err = provider.DefaultContainerSecurityContext(pod, &pod.Spec.Containers[0]) if err != nil { t.Fatalf("unable to create container security context %v", err) } @@ -163,10 +163,10 @@ func TestCreateContainerSecurityContextNonmutating(t *testing.T) { // since all the strategies were permissive if !reflect.DeepEqual(createPod(), pod) { diffs := diff.ObjectDiff(createPod(), pod) - t.Errorf("pod was mutated by CreateContainerSecurityContext. diff:\n%s", diffs) + t.Errorf("pod was mutated by DefaultContainerSecurityContext. diff:\n%s", diffs) } if !reflect.DeepEqual(createPSP(), psp) { - t.Error("psp was mutated by CreateContainerSecurityContext") + t.Error("psp was mutated by DefaultContainerSecurityContext") } } } @@ -893,12 +893,13 @@ func TestGenerateContainerSecurityContextReadOnlyRootFS(t *testing.T) { t.Errorf("%s unable to create provider %v", k, err) continue } - sc, _, err := provider.CreateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0]) + err = provider.DefaultContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0]) if err != nil { t.Errorf("%s unable to create container security context %v", k, err) continue } + sc := v.pod.Spec.Containers[0].SecurityContext if v.expected == nil && sc.ReadOnlyRootFilesystem != nil { t.Errorf("%s expected a nil ReadOnlyRootFilesystem but got %t", k, *sc.ReadOnlyRootFilesystem) } diff --git a/pkg/security/podsecuritypolicy/types.go b/pkg/security/podsecuritypolicy/types.go index 405ac65df74..1cb7b025b43 100644 --- a/pkg/security/podsecuritypolicy/types.go +++ b/pkg/security/podsecuritypolicy/types.go @@ -35,9 +35,9 @@ type Provider interface { // DefaultPodSecurityContext sets the default values of the required but not filled fields. // It modifies the SecurityContext and annotations of the provided pod. DefaultPodSecurityContext(pod *api.Pod) error - // Create a container SecurityContext based on the given constraints. Also returns an updated set - // of Pod annotations for alpha feature support. - CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, map[string]string, error) + // DefaultContainerSecurityContext sets the default values of the required but not filled fields. + // It modifies the SecurityContext of the container and annotations of the pod. + DefaultContainerSecurityContext(pod *api.Pod, container *api.Container) error // Ensure a pod's SecurityContext is in compliance with the given constraints. ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList // Ensure a container's SecurityContext is in compliance with the given constraints diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/plugin/pkg/admission/security/podsecuritypolicy/admission.go index c7b545ec996..a2f8c3d6b74 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -281,25 +281,20 @@ func assignSecurityContext(provider psp.Provider, pod *api.Pod, fldPath *field.P errs = append(errs, provider.ValidatePodSecurityContext(pod, field.NewPath("spec", "securityContext"))...) for i := range pod.Spec.InitContainers { - sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &pod.Spec.InitContainers[i]) + err := provider.DefaultContainerSecurityContext(pod, &pod.Spec.InitContainers[i]) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "initContainers").Index(i).Child("securityContext"), "", err.Error())) continue } - pod.Spec.InitContainers[i].SecurityContext = sc - pod.Annotations = scAnnotations errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.InitContainers[i], field.NewPath("spec", "initContainers").Index(i).Child("securityContext"))...) } for i := range pod.Spec.Containers { - sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &pod.Spec.Containers[i]) + err := provider.DefaultContainerSecurityContext(pod, &pod.Spec.Containers[i]) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "containers").Index(i).Child("securityContext"), "", err.Error())) continue } - - pod.Spec.Containers[i].SecurityContext = sc - pod.Annotations = scAnnotations errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[i], field.NewPath("spec", "containers").Index(i).Child("securityContext"))...) } From 12a8848b0e12c09fdce299f1b8193b37a2ccd26a Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 14 Nov 2017 10:25:58 +0800 Subject: [PATCH 037/794] add UT for testapi.go --- pkg/api/testapi/testapi_test.go | 41 +++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/pkg/api/testapi/testapi_test.go b/pkg/api/testapi/testapi_test.go index 7de0597602a..f90cb812c66 100644 --- a/pkg/api/testapi/testapi_test.go +++ b/pkg/api/testapi/testapi_test.go @@ -25,8 +25,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// TODO these tests don't add much value for testing things that have groups - func TestResourcePathWithPrefix(t *testing.T) { testCases := []struct { prefix string @@ -46,6 +44,25 @@ func TestResourcePathWithPrefix(t *testing.T) { t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) } } + + testGroupCases := []struct { + prefix string + resource string + namespace string + name string + expected string + }{ + {"prefix", "resource", "mynamespace", "myresource", "/apis/" + Admission.GroupVersion().Group + "/" + Admission.GroupVersion().Version + "/prefix/namespaces/mynamespace/resource/myresource"}, + {"prefix", "resource", "", "myresource", "/apis/" + Admission.GroupVersion().Group + "/" + Admission.GroupVersion().Version + "/prefix/resource/myresource"}, + {"prefix", "resource", "mynamespace", "", "/apis/" + Admission.GroupVersion().Group + "/" + Admission.GroupVersion().Version + "/prefix/namespaces/mynamespace/resource"}, + {"prefix", "resource", "", "", "/apis/" + Admission.GroupVersion().Group + "/" + Admission.GroupVersion().Version + "/prefix/resource"}, + {"", "resource", "mynamespace", "myresource", "/apis/" + Admission.GroupVersion().Group + "/" + Admission.GroupVersion().Version + "/namespaces/mynamespace/resource/myresource"}, + } + for _, item := range testGroupCases { + if actual := Admission.ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { + t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) + } + } } func TestResourcePath(t *testing.T) { @@ -67,6 +84,26 @@ func TestResourcePath(t *testing.T) { } } +func TestSubResourcePath(t *testing.T) { + testCases := []struct { + resource string + namespace string + name string + sub string + expected string + }{ + {"resource", "mynamespace", "myresource", "mysub", "/api/" + Default.GroupVersion().Version + "/namespaces/mynamespace/resource/myresource/mysub"}, + {"resource", "", "myresource", "mysub", "/api/" + Default.GroupVersion().Version + "/resource/myresource/mysub"}, + {"resource", "mynamespace", "", "mysub", "/api/" + Default.GroupVersion().Version + "/namespaces/mynamespace/resource/mysub"}, + {"resource", "", "", "mysub", "/api/" + Default.GroupVersion().Version + "/resource/mysub"}, + } + for _, item := range testCases { + if actual := Default.SubResourcePath(item.resource, item.namespace, item.name, item.sub); actual != item.expected { + t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s and name: %s", item.expected, actual, item.resource, item.namespace, item.name) + } + } +} + var status = &metav1.Status{ Status: metav1.StatusFailure, Code: 200, From f32d5995ab94e4c51e2ac96406fad3538fcf6547 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 14 Nov 2017 11:00:06 +0800 Subject: [PATCH 038/794] remove unnecessary TODO: Check ipvs version --- cmd/kube-proxy/app/server_others.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 035d0593fa3..5a27e113dae 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -282,8 +282,6 @@ func tryIPVSProxy(iptver iptables.IPTablesVersioner, kcompat iptables.KernelComp return proxyModeIPVS } - // TODO: Check ipvs version - // Try to fallback to iptables before falling back to userspace glog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) From 0fab7c1bec589537fba9f69ae1364474c911b9f5 Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Tue, 14 Nov 2017 11:45:14 +0800 Subject: [PATCH 039/794] bug(cli):fix kubectl rollout status not recoginze resource namespace --- pkg/kubectl/cmd/rollout/rollout_status.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index d817a81a6d1..52340f6ca6d 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -124,7 +124,7 @@ func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []stri } // check if deployment's has finished the rollout - status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision) + status, done, err := statusViewer.Status(info.Namespace, info.Name, revision) if err != nil { return err } @@ -149,7 +149,7 @@ func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []stri return intr.Run(func() error { _, err := watch.Until(0, w, func(e watch.Event) (bool, error) { // print deployment's status - status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision) + status, done, err := statusViewer.Status(info.Namespace, info.Name, revision) if err != nil { return false, err } From 4d56bc34bd44d5aaf35e703b5cacff7bd2b3e192 Mon Sep 17 00:00:00 2001 From: Guangya Liu Date: Tue, 14 Nov 2017 17:35:09 +0800 Subject: [PATCH 040/794] NC should log the whole node condition. --- pkg/controller/node/node_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 900bbd62222..efad945d516 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -976,7 +976,7 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node // If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now", // otherwise we leave it as it is. if savedCondition.LastTransitionTime != observedCondition.LastTransitionTime { - glog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition.Status, observedCondition) + glog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition, observedCondition) transitionTime = nc.now() } else { transitionTime = savedNodeStatus.readyTransitionTimestamp From c77d5d0f9031876409171b90128712a28a54fcb3 Mon Sep 17 00:00:00 2001 From: Marko Luksa Date: Mon, 13 Nov 2017 15:12:33 +0100 Subject: [PATCH 041/794] Kubectl explain now also prints the Kind and APIVersion of the resource --- pkg/kubectl/cmd/explain.go | 2 +- pkg/kubectl/explain/BUILD | 1 + pkg/kubectl/explain/explain.go | 5 ++-- pkg/kubectl/explain/model_printer.go | 29 +++++++++++++++++++---- pkg/kubectl/explain/model_printer_test.go | 27 +++++++++++++++------ 5 files changed, 50 insertions(+), 14 deletions(-) diff --git a/pkg/kubectl/cmd/explain.go b/pkg/kubectl/cmd/explain.go index a339df7c811..d1ecf3d65d4 100644 --- a/pkg/kubectl/cmd/explain.go +++ b/pkg/kubectl/cmd/explain.go @@ -129,5 +129,5 @@ func RunExplain(f cmdutil.Factory, out, cmdErr io.Writer, cmd *cobra.Command, ar return fmt.Errorf("Couldn't find resource for %q", gvk) } - return explain.PrintModelDescription(fieldsPath, out, schema, recursive) + return explain.PrintModelDescription(fieldsPath, out, schema, gvk, recursive) } diff --git a/pkg/kubectl/explain/BUILD b/pkg/kubectl/explain/BUILD index 6792dbf6bf1..cbb5f7b4bd7 100644 --- a/pkg/kubectl/explain/BUILD +++ b/pkg/kubectl/explain/BUILD @@ -16,6 +16,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/pkg/kubectl/explain/explain.go b/pkg/kubectl/explain/explain.go index a0dcc389a2d..4d2d1841dce 100644 --- a/pkg/kubectl/explain/explain.go +++ b/pkg/kubectl/explain/explain.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kube-openapi/pkg/util/proto" ) @@ -47,7 +48,7 @@ func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (st // PrintModelDescription prints the description of a specific model or dot path. // If recursive, all components nested within the fields of the schema will be // printed. -func PrintModelDescription(fieldsPath []string, w io.Writer, schema proto.Schema, recursive bool) error { +func PrintModelDescription(fieldsPath []string, w io.Writer, schema proto.Schema, gvk schema.GroupVersionKind, recursive bool) error { fieldName := "" if len(fieldsPath) != 0 { fieldName = fieldsPath[len(fieldsPath)-1] @@ -60,5 +61,5 @@ func PrintModelDescription(fieldsPath []string, w io.Writer, schema proto.Schema } b := fieldsPrinterBuilder{Recursive: recursive} f := &Formatter{Writer: w, Wrap: 80} - return PrintModel(fieldName, f, b, schema) + return PrintModel(fieldName, f, b, schema, gvk) } diff --git a/pkg/kubectl/explain/model_printer.go b/pkg/kubectl/explain/model_printer.go index 82ad99fac42..64f5bb4487a 100644 --- a/pkg/kubectl/explain/model_printer.go +++ b/pkg/kubectl/explain/model_printer.go @@ -16,7 +16,10 @@ limitations under the License. package explain -import "k8s.io/kube-openapi/pkg/util/proto" +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) // fieldIndentLevel is the level of indentation for fields. const fieldIndentLevel = 3 @@ -33,11 +36,19 @@ type modelPrinter struct { Descriptions []string Writer *Formatter Builder fieldsPrinterBuilder + GVK schema.GroupVersionKind Error error } var _ proto.SchemaVisitor = &modelPrinter{} +func (m *modelPrinter) PrintKindAndVersion() error { + if err := m.Writer.Write("KIND: %s", m.GVK.Kind); err != nil { + return err + } + return m.Writer.Write("VERSION: %s\n", m.GVK.GroupVersion()) +} + // PrintDescription prints the description for a given schema. There // might be multiple description, since we collect descriptions when we // go through references, arrays and maps. @@ -73,6 +84,11 @@ func (m *modelPrinter) VisitArray(a *proto.Array) { // VisitKind prints a full resource with its fields. func (m *modelPrinter) VisitKind(k *proto.Kind) { + if err := m.PrintKindAndVersion(); err != nil { + m.Error = err + return + } + if m.Type == "" { m.Type = GetTypeName(k) } @@ -103,10 +119,15 @@ func (m *modelPrinter) VisitMap(om *proto.Map) { // VisitPrimitive prints a field type and its description. func (m *modelPrinter) VisitPrimitive(p *proto.Primitive) { + if err := m.PrintKindAndVersion(); err != nil { + m.Error = err + return + } + if m.Type == "" { m.Type = GetTypeName(p) } - if err := m.Writer.Write("FIELD: %s <%s>\n", m.Name, m.Type); err != nil { + if err := m.Writer.Write("FIELD: %s <%s>\n", m.Name, m.Type); err != nil { m.Error = err return } @@ -120,8 +141,8 @@ func (m *modelPrinter) VisitReference(r proto.Reference) { } // PrintModel prints the description of a schema in writer. -func PrintModel(name string, writer *Formatter, builder fieldsPrinterBuilder, schema proto.Schema) error { - m := &modelPrinter{Name: name, Writer: writer, Builder: builder} +func PrintModel(name string, writer *Formatter, builder fieldsPrinterBuilder, schema proto.Schema, gvk schema.GroupVersionKind) error { + m := &modelPrinter{Name: name, Writer: writer, Builder: builder, GVK: gvk} schema.Accept(m) return m.Error } diff --git a/pkg/kubectl/explain/model_printer_test.go b/pkg/kubectl/explain/model_printer_test.go index 66b9b07eafc..42c0f424723 100644 --- a/pkg/kubectl/explain/model_printer_test.go +++ b/pkg/kubectl/explain/model_printer_test.go @@ -24,11 +24,12 @@ import ( ) func TestModel(t *testing.T) { - schema := resources.LookupResource(schema.GroupVersionKind{ + gvk := schema.GroupVersionKind{ Group: "", Version: "v1", Kind: "OneKind", - }) + } + schema := resources.LookupResource(gvk) if schema == nil { t.Fatal("Couldn't find schema v1.OneKind") } @@ -38,7 +39,10 @@ func TestModel(t *testing.T) { want string }{ { - want: `DESCRIPTION: + want: `KIND: OneKind +VERSION: v1 + +DESCRIPTION: OneKind has a short description FIELDS: @@ -58,7 +62,10 @@ FIELDS: path: []string{}, }, { - want: `RESOURCE: field1 + want: `KIND: OneKind +VERSION: v1 + +RESOURCE: field1 DESCRIPTION: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla ut lacus ac @@ -90,7 +97,10 @@ FIELDS: path: []string{"field1"}, }, { - want: `FIELD: string + want: `KIND: OneKind +VERSION: v1 + +FIELD: string DESCRIPTION: This string must be a string @@ -98,7 +108,10 @@ DESCRIPTION: path: []string{"field1", "string"}, }, { - want: `FIELD: array <[]integer> + want: `KIND: OneKind +VERSION: v1 + +FIELD: array <[]integer> DESCRIPTION: This array must be an array of int @@ -111,7 +124,7 @@ DESCRIPTION: for _, test := range tests { buf := bytes.Buffer{} - if err := PrintModelDescription(test.path, &buf, schema, false); err != nil { + if err := PrintModelDescription(test.path, &buf, schema, gvk, false); err != nil { t.Fatalf("Failed to PrintModelDescription for path %v: %v", test.path, err) } got := buf.String() From 9a88f1514134c62f959eee28fdc81519f87715e6 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 14 Nov 2017 19:38:10 +0800 Subject: [PATCH 042/794] fix comment typo and use wait.Forever --- staging/src/k8s.io/apiserver/pkg/util/logs/logs.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go index 392bbc0fbad..a3909583a7c 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go +++ b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go @@ -46,8 +46,8 @@ func (writer GlogWriter) Write(data []byte) (n int, err error) { func InitLogs() { log.SetOutput(GlogWriter{}) log.SetFlags(0) - // The default glog flush interval is 30 seconds, which is frighteningly long. - go wait.Until(glog.Flush, *logFlushFreq, wait.NeverStop) + // The default glog flush interval is 5 seconds. + go wait.Forever(glog.Flush, *logFlushFreq) } // FlushLogs flushes logs immediately. From 5311e9d95e81c63fd2fc701863711fcf8fed8768 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Sun, 12 Nov 2017 12:24:29 +0800 Subject: [PATCH 043/794] add UT for apk/apis/core/toleration.go --- pkg/apis/core/BUILD | 5 +- pkg/apis/core/toleration_test.go | 136 +++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 pkg/apis/core/toleration_test.go diff --git a/pkg/apis/core/BUILD b/pkg/apis/core/BUILD index ddd461bd522..28499cf2c69 100644 --- a/pkg/apis/core/BUILD +++ b/pkg/apis/core/BUILD @@ -32,7 +32,10 @@ go_library( go_test( name = "go_default_test", - srcs = ["taint_test.go"], + srcs = [ + "taint_test.go", + "toleration_test.go", + ], importpath = "k8s.io/kubernetes/pkg/apis/core", library = ":go_default_library", ) diff --git a/pkg/apis/core/toleration_test.go b/pkg/apis/core/toleration_test.go new file mode 100644 index 00000000000..ec7a8dec13d --- /dev/null +++ b/pkg/apis/core/toleration_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "testing" + +func TestMatchToleration(t *testing.T) { + + tolerationSeconds := int64(5) + tolerationToMatchSeconds := int64(3) + testCases := []struct { + description string + toleration *Toleration + tolerationToMatch *Toleration + expectMatch bool + }{ + { + description: "two taints with the same key,operator,value,effect should match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the different key cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "different-key", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different operator cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "different-operator", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different value cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "different-value", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different effect cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectPreferNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different tolerationSeconds should match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + TolerationSeconds: &tolerationSeconds, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + TolerationSeconds: &tolerationToMatchSeconds, + }, + expectMatch: true, + }, + } + + for _, tc := range testCases { + if actual := tc.toleration.MatchToleration(tc.tolerationToMatch); actual != tc.expectMatch { + t.Errorf("[%s] expect: %v , got: %v", tc.description, tc.expectMatch, !tc.expectMatch) + } + } +} From 03470d8428a7d01916f64d15145d085ec5669f72 Mon Sep 17 00:00:00 2001 From: Norman Joyner Date: Tue, 14 Nov 2017 20:57:41 -0500 Subject: [PATCH 044/794] Fix typo --- api/openapi-spec/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/README.md b/api/openapi-spec/README.md index 59fbd90100d..167f7cec938 100644 --- a/api/openapi-spec/README.md +++ b/api/openapi-spec/README.md @@ -4,7 +4,7 @@ This folder contains an [OpenAPI specification][openapi] for Kubernetes API. ## Vendor Extensions -Kuberntes extends OpenAPI using these extensions. Note the version that +Kubernetes extends OpenAPI using these extensions. Note the version that extensions has been added. ### `x-kubernetes-group-version-kind` From 4e74211aaf546271f5b7d5670b419b92efa48f3f Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Wed, 15 Nov 2017 16:47:50 +0100 Subject: [PATCH 045/794] pkg/securitycontext/util.go(InternalDetermineEffectiveSecurityContext): remove unused function. --- pkg/securitycontext/util.go | 78 ------------------------------------- 1 file changed, 78 deletions(-) diff --git a/pkg/securitycontext/util.go b/pkg/securitycontext/util.go index 73d23a43131..2719e118269 100644 --- a/pkg/securitycontext/util.go +++ b/pkg/securitycontext/util.go @@ -21,7 +21,6 @@ import ( "strings" "k8s.io/api/core/v1" - api "k8s.io/kubernetes/pkg/apis/core" ) // HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account @@ -165,83 +164,6 @@ func securityContextFromPodSecurityContext(pod *v1.Pod) *v1.SecurityContext { return synthesized } -// TODO: remove the duplicate code -func InternalDetermineEffectiveSecurityContext(pod *api.Pod, container *api.Container) *api.SecurityContext { - effectiveSc := internalSecurityContextFromPodSecurityContext(pod) - containerSc := container.SecurityContext - - if effectiveSc == nil && containerSc == nil { - return nil - } - if effectiveSc != nil && containerSc == nil { - return effectiveSc - } - if effectiveSc == nil && containerSc != nil { - return containerSc - } - - if containerSc.SELinuxOptions != nil { - effectiveSc.SELinuxOptions = new(api.SELinuxOptions) - *effectiveSc.SELinuxOptions = *containerSc.SELinuxOptions - } - - if containerSc.Capabilities != nil { - effectiveSc.Capabilities = new(api.Capabilities) - *effectiveSc.Capabilities = *containerSc.Capabilities - } - - if containerSc.Privileged != nil { - effectiveSc.Privileged = new(bool) - *effectiveSc.Privileged = *containerSc.Privileged - } - - if containerSc.RunAsUser != nil { - effectiveSc.RunAsUser = new(int64) - *effectiveSc.RunAsUser = *containerSc.RunAsUser - } - - if containerSc.RunAsNonRoot != nil { - effectiveSc.RunAsNonRoot = new(bool) - *effectiveSc.RunAsNonRoot = *containerSc.RunAsNonRoot - } - - if containerSc.ReadOnlyRootFilesystem != nil { - effectiveSc.ReadOnlyRootFilesystem = new(bool) - *effectiveSc.ReadOnlyRootFilesystem = *containerSc.ReadOnlyRootFilesystem - } - - if containerSc.AllowPrivilegeEscalation != nil { - effectiveSc.AllowPrivilegeEscalation = new(bool) - *effectiveSc.AllowPrivilegeEscalation = *containerSc.AllowPrivilegeEscalation - } - - return effectiveSc -} - -func internalSecurityContextFromPodSecurityContext(pod *api.Pod) *api.SecurityContext { - if pod.Spec.SecurityContext == nil { - return nil - } - - synthesized := &api.SecurityContext{} - - if pod.Spec.SecurityContext.SELinuxOptions != nil { - synthesized.SELinuxOptions = &api.SELinuxOptions{} - *synthesized.SELinuxOptions = *pod.Spec.SecurityContext.SELinuxOptions - } - if pod.Spec.SecurityContext.RunAsUser != nil { - synthesized.RunAsUser = new(int64) - *synthesized.RunAsUser = *pod.Spec.SecurityContext.RunAsUser - } - - if pod.Spec.SecurityContext.RunAsNonRoot != nil { - synthesized.RunAsNonRoot = new(bool) - *synthesized.RunAsNonRoot = *pod.Spec.SecurityContext.RunAsNonRoot - } - - return synthesized -} - // AddNoNewPrivileges returns if we should add the no_new_privs option. func AddNoNewPrivileges(sc *v1.SecurityContext) bool { if sc == nil { From b7598021ee0607404a10d90ea680b4f9ae46619c Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 15 Nov 2017 17:24:04 +0800 Subject: [PATCH 046/794] remove unused code in pkg/apimachinery --- pkg/BUILD | 1 - pkg/apimachinery/tests/BUILD | 30 ----- .../tests/api_meta_scheme_test.go | 121 ------------------ 3 files changed, 152 deletions(-) delete mode 100644 pkg/apimachinery/tests/BUILD delete mode 100644 pkg/apimachinery/tests/api_meta_scheme_test.go diff --git a/pkg/BUILD b/pkg/BUILD index fc74d006478..86e113bbfdd 100644 --- a/pkg/BUILD +++ b/pkg/BUILD @@ -27,7 +27,6 @@ filegroup( "//pkg/api/v1/pod:all-srcs", "//pkg/api/v1/resource:all-srcs", "//pkg/api/v1/service:all-srcs", - "//pkg/apimachinery/tests:all-srcs", "//pkg/apis/abac:all-srcs", "//pkg/apis/admission:all-srcs", "//pkg/apis/admissionregistration:all-srcs", diff --git a/pkg/apimachinery/tests/BUILD b/pkg/apimachinery/tests/BUILD deleted file mode 100644 index adbe4ae6907..00000000000 --- a/pkg/apimachinery/tests/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["api_meta_scheme_test.go"], - importpath = "k8s.io/kubernetes/pkg/apimachinery/tests", - deps = [ - "//pkg/api/testapi:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/apimachinery/tests/api_meta_scheme_test.go b/pkg/apimachinery/tests/api_meta_scheme_test.go deleted file mode 100644 index 42b2842cd3e..00000000000 --- a/pkg/apimachinery/tests/api_meta_scheme_test.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tests - -import ( - "fmt" - "reflect" - "strings" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api/testapi" -) - -// These types do not follow the list convention as documented in -// docs/devel/api-convention.md -var listTypeExceptions = sets.NewString("APIGroupList", "APIResourceList") - -func validateListType(target reflect.Type) error { - // exceptions - if listTypeExceptions.Has(target.Name()) { - return nil - } - hasListSuffix := strings.HasSuffix(target.Name(), "List") - hasMetadata := false - hasItems := false - for i := 0; i < target.NumField(); i++ { - field := target.Field(i) - tag := field.Tag.Get("json") - switch { - case strings.HasPrefix(tag, "metadata"): - hasMetadata = true - case tag == "items": - hasItems = true - if field.Type.Kind() != reflect.Slice { - return fmt.Errorf("Expected items to be slice, got %s", field.Type.Kind()) - } - } - } - if hasListSuffix && !hasMetadata { - return fmt.Errorf("Expected type %s to contain \"metadata\"", target.Name()) - } - if hasListSuffix && !hasItems { - return fmt.Errorf("Expected type %s to contain \"items\"", target.Name()) - } - // if a type contains field Items with JSON tag "items", its name should end with List. - if !hasListSuffix && hasItems { - return fmt.Errorf("Type %s has Items, its name is expected to end with \"List\"", target.Name()) - } - return nil -} - -// TestListTypes verifies that no external type violates the api convention of -// list types. -func TestListTypes(t *testing.T) { - for groupKey, group := range testapi.Groups { - for kind, target := range group.ExternalTypes() { - t.Logf("working on %v in %v", kind, groupKey) - err := validateListType(target) - if err != nil { - t.Error(err) - } - } - } -} - -type WithoutMetaDataList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta - Items []interface{} `json:"items"` -} - -type WithoutItemsList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` -} - -type WrongItemsJSONTagList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []interface{} `json:"items,omitempty"` -} - -// If a type has Items, its name should end with "List" -type ListWithWrongName struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []interface{} `json:"items"` -} - -// TestValidateListType verifies the validateListType function reports error on -// types that violate the api convention. -func TestValidateListType(t *testing.T) { - var testTypes = []interface{}{ - WithoutMetaDataList{}, - WithoutItemsList{}, - WrongItemsJSONTagList{}, - ListWithWrongName{}, - } - for _, testType := range testTypes { - err := validateListType(reflect.TypeOf(testType)) - if err == nil { - t.Errorf("Expected error") - } - } -} From da33d6f34f9424095e16d4881aa7f96fce5f8c66 Mon Sep 17 00:00:00 2001 From: Klaus Ma Date: Mon, 31 Jul 2017 21:33:52 +0800 Subject: [PATCH 047/794] Added nodeAffinity in validation error msg. --- pkg/apis/core/validation/validation.go | 31 ++++++++++++--------- pkg/apis/core/validation/validation_test.go | 6 ++-- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 8fe283ddfeb..8f124caf0ae 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -2323,19 +2323,8 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL allErrs := field.ErrorList{} if affinity != nil { - if na := affinity.NodeAffinity; na != nil { - // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - // } - - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } + if affinity.NodeAffinity != nil { + allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...) } if affinity.PodAffinity != nil { allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...) @@ -2751,6 +2740,22 @@ func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *fie return allErrs } +// validateNodeAffinity tests that the specified nodeAffinity fields have valid data +func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // } + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + // validatePodAffinity tests that the specified podAffinity fields have valid data func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 3a6121715ce..01c08591044 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -5449,7 +5449,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid node selector requirement in node affinity, operator can't be null": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -5500,7 +5500,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid requiredDuringSchedulingIgnoredDuringExecution node selector, nodeSelectorTerms must have at least one term": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -5516,7 +5516,7 @@ func TestValidatePod(t *testing.T) { }, }, "invalid requiredDuringSchedulingIgnoredDuringExecution node selector term, matchExpressions must have at least one node selector requirement": { - expectedError: "spec.affinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions", + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", From 71a035023922c084c57c53c345a328c7769d3a75 Mon Sep 17 00:00:00 2001 From: Gavin Date: Thu, 16 Nov 2017 15:03:08 +0800 Subject: [PATCH 048/794] httpserver should be close since the issue has been fixed --- plugin/pkg/scheduler/factory/factory_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index db73370ac09..25f33d226bf 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -371,8 +371,7 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) { T: t, } server := httptest.NewServer(&handler) - // TODO: Uncomment when fix #19254 - // defer server.Close() + defer server.Close() client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) // factory of "default-scheduler" informerFactory := informers.NewSharedInformerFactory(client, 0) From 507b03f2ab4462de08acee86bb1dd0ff00904539 Mon Sep 17 00:00:00 2001 From: Kai Chen Date: Thu, 16 Nov 2017 11:15:48 -0800 Subject: [PATCH 049/794] Fix a typo in kubeadm/GetEtcdPodSpec --- cmd/kubeadm/app/phases/etcd/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index 522e6e5dae9..a068030cfb1 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -47,7 +47,7 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.Ma } // GetEtcdPodSpec returns the etcd static Pod actualized to the context of the current MasterConfiguration -// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod mainfests. +// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod manifests. func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod { pathType := v1.HostPathDirectoryOrCreate etcdMounts := map[string]v1.Volume{ From 7fcf37e380d3c46470a0d1b5fdff5536f6579143 Mon Sep 17 00:00:00 2001 From: Guangya Liu Date: Fri, 17 Nov 2017 11:53:26 +0800 Subject: [PATCH 050/794] Updated a typo for ipvs document. --- pkg/proxy/ipvs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index c0f616c9217..a93cf77b53a 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -8,7 +8,7 @@ This document shows how to use kube-proxy ipvs mode. Linux kernel. IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP -and UDP-based services to the real servers, and make services of real servers appear as irtual services on a single IP address. +and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address. ## How to use From d18c2f21a495ad4902ff7cc4815e64d6cb4bec79 Mon Sep 17 00:00:00 2001 From: tengqm Date: Fri, 17 Nov 2017 15:26:28 +0800 Subject: [PATCH 051/794] Fix version indication for ServiceNodeExclusion --- pkg/features/kube_features.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 860abbf55eb..41d8d0654e3 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -165,7 +165,7 @@ const ( HugePages utilfeature.Feature = "HugePages" // owner @brendandburns - // alpha: v1.8 + // alpha: v1.9 // // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" From 03969fe0ed523db3c58dce9e3f73704d801ab180 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Fri, 17 Nov 2017 01:29:09 -0800 Subject: [PATCH 052/794] Remove incorrect dead code. TolerationToleratesTaint was incorrect: if the toleration.Key is empty and taint is non-empty, it should not return false. It was also not used anywhere. The correct implementations are in staging/src/k8s.io/api/core/v1/toleration.go and pkg/apis/core/v1/helper/helpers.go --- pkg/apis/core/helper/helpers.go | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/pkg/apis/core/helper/helpers.go b/pkg/apis/core/helper/helpers.go index 86800d03877..3fdc603c423 100644 --- a/pkg/apis/core/helper/helpers.go +++ b/pkg/apis/core/helper/helpers.go @@ -493,37 +493,6 @@ func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool return true } -// TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool { - if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { - return false - } - - if toleration.Key != taint.Key { - return false - } - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value { - return true - } - if toleration.Operator == core.TolerationOpExists { - return true - } - return false -} - -// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool { - tolerated := false - for i := range tolerations { - if TolerationToleratesTaint(&tolerations[i], taint) { - tolerated = true - break - } - } - return tolerated -} - // GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations // and converts it to the []Taint type in core. func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { From f11c35eb2964229068459e0ebae76ef3203b66ea Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Fri, 17 Nov 2017 17:57:33 +0100 Subject: [PATCH 053/794] Create sig-autoscaling-maintainers alias --- OWNERS_ALIASES | 6 ++++++ test/e2e/autoscaling/OWNERS | 10 ++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index d5f3f3dd178..7c546c3a562 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -101,6 +101,12 @@ aliases: sig-apps-api-approvers: - erictune - smarterclayton + sig-autoscaling-maintainers: + - aleksandra-malinowska + - bskiba + - DirectXMan12 + - MaciekPytel + - mwielgus milestone-maintainers: - lavalamp - deads2k diff --git a/test/e2e/autoscaling/OWNERS b/test/e2e/autoscaling/OWNERS index 4427cbf5026..28a0c8a5593 100644 --- a/test/e2e/autoscaling/OWNERS +++ b/test/e2e/autoscaling/OWNERS @@ -1,14 +1,8 @@ reviewers: - - aleksandra-malinowska - - bskiba - jszczepkowski - - MaciekPytel - - mwielgus + - sig-autoscaling-maintainers - wasylkowski approvers: - - aleksandra-malinowska - - bskiba - jszczepkowski - - MaciekPytel - - mwielgus + - sig-autoscaling-maintainers - wasylkowski From 1a5c80240f826474eb8f0d5ae042341d6cd80a0e Mon Sep 17 00:00:00 2001 From: Connor Doyle Date: Fri, 17 Nov 2017 15:06:03 -0800 Subject: [PATCH 054/794] CPU manager no-op policy is on by default. - Mark CPU manager feature as beta. --- pkg/features/kube_features.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 860abbf55eb..6c3dfb660ad 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -211,7 +211,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha}, MountPropagation: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, - CPUManager: {Default: false, PreRelease: utilfeature.Alpha}, + CPUManager: {Default: true, PreRelease: utilfeature.Beta}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha}, From db89b46ce74e94470f38e6dc3bfd46f032a797f8 Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Fri, 17 Nov 2017 22:30:49 -0500 Subject: [PATCH 055/794] kubelet summary api test updates --- test/e2e_node/summary_test.go | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index fe7654ca243..dab6ae05cae 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -75,7 +75,11 @@ var _ = framework.KubeDescribe("Summary API", func() { maxStartAge = time.Hour * 24 * 365 // 1 year maxStatsAge = time.Minute ) - fsCapacityBounds := bounded(100*framework.Mb, 100*framework.Gb) + // fetch node so we can know proper node memory bounds for unconstrained cgroups + node := getLocalNode(f) + memoryCapacity := node.Status.Capacity["memory"] + memoryLimit := memoryCapacity.Value() + fsCapacityBounds := bounded(100*framework.Mb, 10*framework.Tb) // Expectations for system containers. sysContExpectations := func() types.GomegaMatcher { return gstruct.MatchAllFields(gstruct.Fields{ @@ -90,10 +94,10 @@ var _ = framework.KubeDescribe("Summary API", func() { "Time": recent(maxStatsAge), // We don't limit system container memory. "AvailableBytes": BeNil(), - "UsageBytes": bounded(1*framework.Mb, 10*framework.Gb), - "WorkingSetBytes": bounded(1*framework.Mb, 10*framework.Gb), + "UsageBytes": bounded(1*framework.Mb, memoryLimit), + "WorkingSetBytes": bounded(1*framework.Mb, memoryLimit), // this now returns /sys/fs/cgroup/memory.stat total_rss - "RSSBytes": bounded(1*framework.Mb, 1*framework.Gb), + "RSSBytes": bounded(1*framework.Mb, memoryLimit), "PageFaults": bounded(1000, 1E9), "MajorPageFaults": bounded(0, 100000), }), @@ -116,9 +120,9 @@ var _ = framework.KubeDescribe("Summary API", func() { "Time": recent(maxStatsAge), // We don't limit system container memory. "AvailableBytes": BeNil(), - "UsageBytes": bounded(100*framework.Kb, 10*framework.Gb), - "WorkingSetBytes": bounded(100*framework.Kb, 10*framework.Gb), - "RSSBytes": bounded(100*framework.Kb, 1*framework.Gb), + "UsageBytes": bounded(100*framework.Kb, memoryLimit), + "WorkingSetBytes": bounded(100*framework.Kb, memoryLimit), + "RSSBytes": bounded(100*framework.Kb, memoryLimit), "PageFaults": bounded(1000, 1E9), "MajorPageFaults": bounded(0, 100000), }) @@ -203,11 +207,11 @@ var _ = framework.KubeDescribe("Summary API", func() { }), "Memory": ptrMatchAllFields(gstruct.Fields{ "Time": recent(maxStatsAge), - "AvailableBytes": bounded(100*framework.Mb, 100*framework.Gb), - "UsageBytes": bounded(10*framework.Mb, 10*framework.Gb), - "WorkingSetBytes": bounded(10*framework.Mb, 10*framework.Gb), + "AvailableBytes": bounded(100*framework.Mb, memoryLimit), + "UsageBytes": bounded(10*framework.Mb, memoryLimit), + "WorkingSetBytes": bounded(10*framework.Mb, memoryLimit), // this now returns /sys/fs/cgroup/memory.stat total_rss - "RSSBytes": bounded(1*framework.Kb, 1*framework.Gb), + "RSSBytes": bounded(1*framework.Kb, memoryLimit), "PageFaults": bounded(1000, 1E9), "MajorPageFaults": bounded(0, 100000), }), From 4649a8cdd6929351af4d8b8ab1c3c773752f9f2c Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Sat, 18 Nov 2017 22:26:57 +0530 Subject: [PATCH 056/794] fix download link for fedora libvirt vagrant box Fedora 23 releases have been moved from dl.fedoraproject.org/pub/fedora/linux/releases/23/ to archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/ This commit changes the URL to point to the newer location --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 532211d0f5d..8743a6f34af 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -71,7 +71,7 @@ $kube_provider_boxes = { :libvirt => { 'fedora' => { :box_name => 'kube-fedora23', - :box_url => 'https://dl.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box' + :box_url => 'https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box' } }, :vmware_desktop => { From 2f752f47b944df53e958e22732805179e631f8b2 Mon Sep 17 00:00:00 2001 From: Mahdi Mohammadi Date: Sun, 12 Nov 2017 17:26:23 +0000 Subject: [PATCH 057/794] Replace type switches in Rollback with Visitor pattern --- pkg/kubectl/BUILD | 1 + pkg/kubectl/rollback.go | 48 +++++++++++++++++++++++++++++------- pkg/kubectl/rollback_test.go | 46 ++++++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 9 deletions(-) create mode 100644 pkg/kubectl/rollback_test.go diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index fd1a9876c1a..e0777b438d4 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -23,6 +23,7 @@ go_test( "quota_test.go", "resource_filter_test.go", "rolebinding_test.go", + "rollback_test.go", "rolling_updater_test.go", "rollout_status_test.go", "run_test.go", diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index 7f6dfa80fb1..50236ea63f9 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -35,13 +35,13 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller/daemon" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/statefulset" + kapps "k8s.io/kubernetes/pkg/kubectl/apps" sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) @@ -56,16 +56,46 @@ type Rollbacker interface { Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) } +type RollbackVisitor struct { + clientset kubernetes.Interface + result Rollbacker +} + +func (v *RollbackVisitor) VisitDeployment(elem kapps.GroupKindElement) { + v.result = &DeploymentRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitStatefulSet(kind kapps.GroupKindElement) { + v.result = &StatefulSetRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitDaemonSet(kind kapps.GroupKindElement) { + v.result = &DaemonSetRollbacker{v.clientset} +} + +func (v *RollbackVisitor) VisitJob(kind kapps.GroupKindElement) {} +func (v *RollbackVisitor) VisitPod(kind kapps.GroupKindElement) {} +func (v *RollbackVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {} +func (v *RollbackVisitor) VisitReplicationController(kind kapps.GroupKindElement) {} + +// RollbackerFor returns an implementation of Rollbacker interface for the given schema kind func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) { - switch kind { - case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentRollbacker{c}, nil - case extensions.Kind("DaemonSet"), apps.Kind("DaemonSet"): - return &DaemonSetRollbacker{c}, nil - case apps.Kind("StatefulSet"): - return &StatefulSetRollbacker{c}, nil + elem := kapps.GroupKindElement(kind) + visitor := &RollbackVisitor{ + clientset: c, } - return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) + + err := elem.Accept(visitor) + + if err != nil { + return nil, fmt.Errorf("error retrieving rollbacker for %q, %v", kind.String(), err) + } + + if visitor.result == nil { + return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind.String()) + } + + return visitor.result, nil } type DeploymentRollbacker struct { diff --git a/pkg/kubectl/rollback_test.go b/pkg/kubectl/rollback_test.go new file mode 100644 index 00000000000..1d41650ba39 --- /dev/null +++ b/pkg/kubectl/rollback_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" +) + +var rollbacktests = map[schema.GroupKind]reflect.Type{ + {Group: "apps", Kind: "DaemonSet"}: reflect.TypeOf(&DaemonSetRollbacker{}), + {Group: "apps", Kind: "StatefulSet"}: reflect.TypeOf(&StatefulSetRollbacker{}), + {Group: "apps", Kind: "Deployment"}: reflect.TypeOf(&DeploymentRollbacker{}), +} + +func TestRollbackerFor(t *testing.T) { + fakeClientset := &fake.Clientset{} + + for kind, expectedType := range rollbacktests { + result, err := RollbackerFor(kind, fakeClientset) + if err != nil { + t.Fatalf("error getting Rollbacker for a %v: %v", kind.String(), err) + } + + if reflect.TypeOf(result) != expectedType { + t.Fatalf("unexpected output type (%v was expected but got %v)", expectedType, reflect.TypeOf(result)) + } + } +} From d814a5ad5139caab29c7b697ab33a39211182915 Mon Sep 17 00:00:00 2001 From: Mahdi Mohammadi Date: Sun, 19 Nov 2017 17:51:31 +0000 Subject: [PATCH 058/794] Update for cronJob --- pkg/kubectl/rollback.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index 50236ea63f9..90971aeb0c5 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -77,6 +77,7 @@ func (v *RollbackVisitor) VisitJob(kind kapps.GroupKindElement) func (v *RollbackVisitor) VisitPod(kind kapps.GroupKindElement) {} func (v *RollbackVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {} func (v *RollbackVisitor) VisitReplicationController(kind kapps.GroupKindElement) {} +func (v *RollbackVisitor) VisitCronJob(kind kapps.GroupKindElement) {} // RollbackerFor returns an implementation of Rollbacker interface for the given schema kind func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) { From d246cf3864ee29a05b28199e3be4224430a99bb7 Mon Sep 17 00:00:00 2001 From: wackxu Date: Thu, 16 Nov 2017 15:10:38 +0800 Subject: [PATCH 059/794] Use structured generator for kubectl autoscale --- pkg/kubectl/autoscale.go | 109 ++++++++++----------------- pkg/kubectl/autoscale_test.go | 134 ++++++++++++++-------------------- 2 files changed, 96 insertions(+), 147 deletions(-) diff --git a/pkg/kubectl/autoscale.go b/pkg/kubectl/autoscale.go index dcbfadde84a..fbe826d07bf 100644 --- a/pkg/kubectl/autoscale.go +++ b/pkg/kubectl/autoscale.go @@ -18,98 +18,69 @@ package kubectl import ( "fmt" - "strconv" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) -type HorizontalPodAutoscalerV1 struct{} - -func (HorizontalPodAutoscalerV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"scaleRef-kind", false}, - {"scaleRef-name", false}, - {"scaleRef-apiVersion", false}, - {"min", false}, - {"max", true}, - {"cpu-percent", false}, - } +// HorizontalPodAutoscalerV1Generator supports stable generation of a horizontal pod autoscaler. +type HorizontalPodAutoscalerGeneratorV1 struct { + Name string + ScaleRefKind string + ScaleRefName string + ScaleRefApiVersion string + MinReplicas int32 + MaxReplicas int32 + CPUPercent int32 } -func (HorizontalPodAutoscalerV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - return generateHPA(genericParams) -} +// Ensure it supports the generator pattern that uses parameters specified during construction. +var _ StructuredGenerator = &HorizontalPodAutoscalerGeneratorV1{} -func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - minString, found := params["min"] - min := -1 - var err error - if found { - if min, err = strconv.Atoi(minString); err != nil { - return nil, err - } - } - maxString, found := params["max"] - if !found { - return nil, fmt.Errorf("'max' is a required parameter.") - } - max, err := strconv.Atoi(maxString) - if err != nil { +// StructuredGenerate outputs a horizontal pod autoscaler object using the configured fields. +func (s *HorizontalPodAutoscalerGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { return nil, err } - if min > max { - return nil, fmt.Errorf("'max' must be greater than or equal to 'min'.") - } - - cpuString, found := params["cpu-percent"] - cpu := -1 - if found { - if cpu, err = strconv.Atoi(cpuString); err != nil { - return nil, err - } - } - scaler := autoscalingv1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: s.Name, }, Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ - Kind: params["scaleRef-kind"], - Name: params["scaleRef-name"], - APIVersion: params["scaleRef-apiVersion"], + Kind: s.ScaleRefKind, + Name: s.ScaleRefName, + APIVersion: s.ScaleRefApiVersion, }, - MaxReplicas: int32(max), + MaxReplicas: s.MaxReplicas, }, } - if min > 0 { - v := int32(min) + + if s.MinReplicas > 0 { + v := int32(s.MinReplicas) scaler.Spec.MinReplicas = &v } - if cpu >= 0 { - c := int32(cpu) + if s.CPUPercent >= 0 { + c := int32(s.CPUPercent) scaler.Spec.TargetCPUUtilizationPercentage = &c } + return &scaler, nil } + +// validate check if the caller has set the right fields. +func (s HorizontalPodAutoscalerGeneratorV1) validate() error { + if len(s.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if s.MaxReplicas <= 0 { + return fmt.Errorf("'max' is a required parameter and must be greater than zero") + } + if s.MinReplicas > s.MaxReplicas { + return fmt.Errorf("'max' must be greater than or equal to 'min'") + } + return nil +} + diff --git a/pkg/kubectl/autoscale_test.go b/pkg/kubectl/autoscale_test.go index e87a7075c13..9d2a52811c2 100644 --- a/pkg/kubectl/autoscale_test.go +++ b/pkg/kubectl/autoscale_test.go @@ -26,22 +26,26 @@ import ( func TestHPAGenerate(t *testing.T) { tests := []struct { - name string - params map[string]interface{} - expected *autoscalingv1.HorizontalPodAutoscaler - expectErr bool + name string + HPAName string + scaleRefKind string + scaleRefName string + scaleRefApiVersion string + minReplicas int32 + maxReplicas int32 + CPUPercent int32 + expected *autoscalingv1.HorizontalPodAutoscaler + expectErr bool }{ { - name: "valid case", - params: map[string]interface{}{ - "name": "foo", - "min": "1", - "max": "10", - "cpu-percent": "80", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, + name: "valid case", + HPAName: "foo", + minReplicas: 1, + maxReplicas: 10, + CPUPercent: 80, + scaleRefKind: "kind", + scaleRefName: "name", + scaleRefApiVersion: "apiVersion", expected: &autoscalingv1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", @@ -60,79 +64,53 @@ func TestHPAGenerate(t *testing.T) { expectErr: false, }, { - name: "'name' is a required parameter", - params: map[string]interface{}{ - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, + name: "'name' is a required parameter", + scaleRefKind: "kind", + scaleRefName: "name", + scaleRefApiVersion: "apiVersion", + expectErr: true, }, { - name: "'max' is a required parameter", - params: map[string]interface{}{ - "default-name": "foo", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, + name: "'max' is a required parameter", + HPAName: "foo", + scaleRefKind: "kind", + scaleRefName: "name", + scaleRefApiVersion: "apiVersion", + expectErr: true, }, { - name: "'max' must be greater than or equal to 'min'", - params: map[string]interface{}{ - "name": "foo", - "min": "10", - "max": "1", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, + name: "'max' must be greater than or equal to 'min'", + HPAName: "foo", + minReplicas: 10, + maxReplicas: 1, + scaleRefKind: "kind", + scaleRefName: "name", + scaleRefApiVersion: "apiVersion", + expectErr: true, }, { - name: "cpu-percent must be an integer if specified", - params: map[string]interface{}{ - "name": "foo", - "min": "1", - "max": "10", - "cpu-percent": "", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, - }, - { - name: "'min' must be an integer if specified", - params: map[string]interface{}{ - "name": "foo", - "min": "foo", - "max": "10", - "cpu-percent": "60", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, - }, - { - name: "'max' must be an integer if specified", - params: map[string]interface{}{ - "name": "foo", - "min": "1", - "max": "bar", - "cpu-percent": "90", - "scaleRef-kind": "kind", - "scaleRef-name": "name", - "scaleRef-apiVersion": "apiVersion", - }, - expectErr: true, + name: "'max' must be greater than zero", + HPAName: "foo", + minReplicas: 1, + maxReplicas: -10, + scaleRefKind: "kind", + scaleRefName: "name", + scaleRefApiVersion: "apiVersion", + expectErr: true, }, } - generator := HorizontalPodAutoscalerV1{} + for _, test := range tests { - obj, err := generator.Generate(test.params) + generator := HorizontalPodAutoscalerGeneratorV1{ + Name: test.HPAName, + ScaleRefKind: test.scaleRefKind, + ScaleRefName: test.scaleRefName, + ScaleRefApiVersion: test.scaleRefApiVersion, + MinReplicas: test.minReplicas, + MaxReplicas: test.maxReplicas, + CPUPercent: test.CPUPercent, + } + obj, err := generator.StructuredGenerate() if test.expectErr && err != nil { continue } From 87054639a27a18bd9dd27effabac935f058f272e Mon Sep 17 00:00:00 2001 From: wackxu Date: Thu, 16 Nov 2017 17:29:47 +0800 Subject: [PATCH 060/794] refactor kubectl autoscale to use the new generator --- pkg/kubectl/autoscale.go | 5 +- pkg/kubectl/autoscale_test.go | 2 +- pkg/kubectl/cmd/autoscale.go | 50 ++++++++----------- pkg/kubectl/cmd/util/factory_client_access.go | 4 -- 4 files changed, 24 insertions(+), 37 deletions(-) diff --git a/pkg/kubectl/autoscale.go b/pkg/kubectl/autoscale.go index fbe826d07bf..39c78ca31df 100644 --- a/pkg/kubectl/autoscale.go +++ b/pkg/kubectl/autoscale.go @@ -75,12 +75,11 @@ func (s HorizontalPodAutoscalerGeneratorV1) validate() error { if len(s.Name) == 0 { return fmt.Errorf("name must be specified") } - if s.MaxReplicas <= 0 { - return fmt.Errorf("'max' is a required parameter and must be greater than zero") + if s.MaxReplicas < 1 { + return fmt.Errorf("'max' is a required parameter and must be at least 1") } if s.MinReplicas > s.MaxReplicas { return fmt.Errorf("'max' must be greater than or equal to 'min'") } return nil } - diff --git a/pkg/kubectl/autoscale_test.go b/pkg/kubectl/autoscale_test.go index 9d2a52811c2..86b6c1eab55 100644 --- a/pkg/kubectl/autoscale_test.go +++ b/pkg/kubectl/autoscale_test.go @@ -89,7 +89,7 @@ func TestHPAGenerate(t *testing.T) { expectErr: true, }, { - name: "'max' must be greater than zero", + name: "'max' must be at least 1", HPAName: "foo", minReplicas: 1, maxReplicas: -10, diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index 1166674c82d..d16d43383eb 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -64,11 +64,11 @@ func NewCmdAutoscale(f cmdutil.Factory, out io.Writer) *cobra.Command { ArgAliases: argAliases, } cmdutil.AddPrinterFlags(cmd) - cmd.Flags().String("generator", "horizontalpodautoscaler/v1", i18n.T("The name of the API generator to use. Currently there is only 1 generator.")) - cmd.Flags().Int("min", -1, "The lower limit for the number of pods that can be set by the autoscaler. If it's not specified or negative, the server will apply a default value.") - cmd.Flags().Int("max", -1, "The upper limit for the number of pods that can be set by the autoscaler. Required.") + cmd.Flags().String("generator", cmdutil.HorizontalPodAutoscalerV1GeneratorName, i18n.T("The name of the API generator to use. Currently there is only 1 generator.")) + cmd.Flags().Int32("min", -1, "The lower limit for the number of pods that can be set by the autoscaler. If it's not specified or negative, the server will apply a default value.") + cmd.Flags().Int32("max", -1, "The upper limit for the number of pods that can be set by the autoscaler. Required.") cmd.MarkFlagRequired("max") - cmd.Flags().Int("cpu-percent", -1, fmt.Sprintf("The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, a default autoscaling policy will be used.")) + cmd.Flags().Int32("cpu-percent", -1, fmt.Sprintf("The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, a default autoscaling policy will be used.")) cmd.Flags().String("name", "", i18n.T("The name for the newly created object. If not specified, the name of the input resource will be used.")) cmdutil.AddDryRunFlag(cmd) usage := "identifying the resource to autoscale." @@ -102,15 +102,6 @@ func RunAutoscale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s return err } - // Get the generator, setup and validate all required parameters - generatorName := cmdutil.GetFlagString(cmd, "generator") - generators := f.Generators("autoscale") - generator, found := generators[generatorName] - if !found { - return cmdutil.UsageErrorf(cmd, "generator %q not found.", generatorName) - } - names := generator.ParamNames() - count := 0 err = r.Visit(func(info *resource.Info, err error) error { if err != nil { @@ -122,24 +113,25 @@ func RunAutoscale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s return err } - name := info.Name - params := kubectl.MakeParams(cmd, names) - params["default-name"] = name - - params["scaleRef-kind"] = mapping.GroupVersionKind.Kind - params["scaleRef-name"] = name - params["scaleRef-apiVersion"] = mapping.GroupVersionKind.GroupVersion().String() - - if err = kubectl.ValidateParams(names, params); err != nil { - return err - } - // Check for invalid flags used against the present generator. - if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { - return err + // get the generator + var generator kubectl.StructuredGenerator + switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { + case cmdutil.HorizontalPodAutoscalerV1GeneratorName: + generator = &kubectl.HorizontalPodAutoscalerGeneratorV1{ + Name: info.Name, + MinReplicas: cmdutil.GetFlagInt32(cmd, "min"), + MaxReplicas: cmdutil.GetFlagInt32(cmd, "max"), + CPUPercent: cmdutil.GetFlagInt32(cmd, "cpu-percent"), + ScaleRefName: info.Name, + ScaleRefKind: mapping.GroupVersionKind.Kind, + ScaleRefApiVersion: mapping.GroupVersionKind.GroupVersion().String(), + } + default: + return errUnsupportedGenerator(cmd, generatorName) } // Generate new object - object, err := generator.Generate(params) + object, err := generator.StructuredGenerate() if err != nil { return err } @@ -193,7 +185,7 @@ func RunAutoscale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s func validateFlags(cmd *cobra.Command) error { errs := []error{} - max, min := cmdutil.GetFlagInt(cmd, "max"), cmdutil.GetFlagInt(cmd, "min") + max, min := cmdutil.GetFlagInt32(cmd, "max"), cmdutil.GetFlagInt32(cmd, "min") if max < 1 { errs = append(errs, fmt.Errorf("--max=MAXPODS is required and must be at least 1, max: %d", max)) } diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index ea783b99e9f..ef389d9ff86 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -568,10 +568,6 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator { CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, CronJobV1Beta1GeneratorName: kubectl.CronJobV1Beta1{}, } - case "autoscale": - generator = map[string]kubectl.Generator{ - HorizontalPodAutoscalerV1GeneratorName: kubectl.HorizontalPodAutoscalerV1{}, - } case "namespace": generator = map[string]kubectl.Generator{ NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, From 99a789c668e6ba8bfb7aaf09d646422b9bd7b519 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 20 Nov 2017 16:49:53 +0800 Subject: [PATCH 061/794] remove dead code --- pkg/proxy/ipvs/proxier.go | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 9c81de66e4e..e2a93ebf5c9 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1702,40 +1702,6 @@ func (proxier *Proxier) linkKubeServiceChain(existingNATChains map[utiliptables. return nil } -//// linkKubeIPSetsChain will Create chain KUBE-SVC-IPSETS and link the chin in KUBE-SERVICES -// -//// Chain KUBE-SERVICES (policy ACCEPT) -//// target prot opt source destination -//// KUBE-SVC-IPSETS all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-SERVICE-ACCESS dst,dst -// -//// Chain KUBE-SVC-IPSETS (1 references) -//// target prot opt source destination -//// KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst -//// ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst PHYSDEV match ! --physdev-is-in ADDRTYPE match src-type !LOCAL -//// ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst ADDRTYPE match dst-type LOCAL -//// ... -//func (proxier *Proxier) linkKubeIPSetsChain(existingNATChains map[utiliptables.Chain]string, natChains *bytes.Buffer) error { -// if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, KubeServiceIPSetsChain); err != nil { -// return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeServiceIPSetsChain, err) -// } -// -// // TODO: iptables comment message for ipset? -// // The hash:ip,port type of sets require two src/dst parameters of the set match and SET target kernel modules. -// args := []string{"-m", "set", "--match-set", proxier.kubeServiceAccessSet.Name, "dst,dst", "-j", string(KubeServiceIPSetsChain)} -// if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, kubeServicesChain, args...); err != nil { -// return fmt.Errorf("Failed to ensure that ipset %s chain %s jumps to %s: %v", proxier.kubeServiceAccessSet.Name, kubeServicesChain, KubeServiceIPSetsChain, err) -// } -// -// // equal to `iptables -t nat -N KUBE-SVC-IPSETS` -// // write `:KUBE-SERVICES - [0:0]` in nat table -// if chain, ok := existingNATChains[KubeServiceIPSetsChain]; ok { -// writeLine(natChains, chain) -// } else { -// writeLine(natChains, utiliptables.MakeChainLine(KubeServiceIPSetsChain)) -// } -// return nil -//} - func (proxier *Proxier) createKubeFireWallChain(existingNATChains map[utiliptables.Chain]string, natChains *bytes.Buffer) error { // `iptables -t nat -N KUBE-FIRE-WALL` if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, KubeFireWallChain); err != nil { From 81917374db0abfadbc50d5a65b2fe98ec0c36a20 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 20 Nov 2017 23:39:01 +0800 Subject: [PATCH 062/794] Remove kubeadm fuzzer from api testing --- pkg/api/testing/fuzzer.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 868b4040bcb..4779ae9ec46 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -27,7 +27,6 @@ import ( genericfuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - kubeadmfuzzer "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/fuzzer" admissionregistrationfuzzer "k8s.io/kubernetes/pkg/apis/admissionregistration/fuzzer" appsfuzzer "k8s.io/kubernetes/pkg/apis/apps/fuzzer" autoscalingfuzzer "k8s.io/kubernetes/pkg/apis/autoscaling/fuzzer" @@ -98,7 +97,6 @@ var FuzzerFuncs = fuzzer.MergeFuzzerFuncs( batchfuzzer.Funcs, autoscalingfuzzer.Funcs, rbacfuzzer.Funcs, - kubeadmfuzzer.Funcs, policyfuzzer.Funcs, certificatesfuzzer.Funcs, admissionregistrationfuzzer.Funcs, From cdad258a54e85a9bd0d318697976edef3cdb7203 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 20 Nov 2017 23:40:23 +0800 Subject: [PATCH 063/794] Auto generated BUILD files. --- pkg/api/testing/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/api/testing/BUILD b/pkg/api/testing/BUILD index 9c04ee34584..3db5dc87bc0 100644 --- a/pkg/api/testing/BUILD +++ b/pkg/api/testing/BUILD @@ -16,7 +16,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/api/testing", deps = [ - "//cmd/kubeadm/app/apis/kubeadm/fuzzer:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admissionregistration/fuzzer:go_default_library", "//pkg/apis/apps/fuzzer:go_default_library", From 0ee152a616b0ea67f0843cd517d859b43fae1753 Mon Sep 17 00:00:00 2001 From: zhangxiaoyu-zidif Date: Tue, 21 Nov 2017 01:44:09 +0800 Subject: [PATCH 064/794] make k8s support cephfs fuse mount --- pkg/volume/cephfs/cephfs.go | 130 +++++++++++++++++++++++++++++++++--- 1 file changed, 122 insertions(+), 8 deletions(-) diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index 5c0fdd04a74..ec50de98012 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -19,6 +19,9 @@ package cephfs import ( "fmt" "os" + "os/exec" + "path" + "runtime" "strings" "github.com/golang/glog" @@ -231,15 +234,32 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { } os.MkdirAll(dir, 0750) - err = cephfsVolume.execMount(dir) - if err == nil { - return nil + // check whether it belongs to fuse, if not, default to use kernel mount. + if cephfsVolume.checkFuseMount() { + glog.V(4).Infof("CephFS fuse mount.") + err = cephfsVolume.execFuseMount(dir) + // cleanup no matter if fuse mount fail. + keyringPath := cephfsVolume.GetKeyringPath() + _, StatErr := os.Stat(keyringPath) + if !os.IsNotExist(StatErr) { + os.RemoveAll(keyringPath) + } + if err == nil { + // cephfs fuse mount succeeded. + return nil + } else { + // if cephfs fuse mount failed, fallback to kernel mount. + glog.V(4).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err) + } } - - // cleanup upon failure - util.UnmountPath(dir, cephfsVolume.mounter) - // return error - return err + glog.V(4).Infof("CephFS kernel mount.") + err = cephfsVolume.execMount(dir) + if err != nil { + // cleanup upon failure. + util.UnmountPath(dir, cephfsVolume.mounter) + return err + } + return nil } type cephfsUnmounter struct { @@ -264,6 +284,14 @@ func (cephfsVolume *cephfs) GetPath() string { return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName) } +// GetKeyringPath creates cephfuse keyring path +func (cephfsVolume *cephfs) GetKeyringPath() string { + name := cephfsPluginName + volumeDir := cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName) + volumeKeyringDir := volumeDir + "~keyring" + return volumeKeyringDir +} + func (cephfsVolume *cephfs) execMount(mountpoint string) error { // cephfs mount option ceph_opt := "" @@ -299,6 +327,92 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error { return nil } +func (cephfsMounter *cephfsMounter) checkFuseMount() bool { + execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName()) + switch runtime.GOOS { + case "linux": + retBytes, err := execute.Run("/bin/ls", "/sbin/mount.fuse.ceph") + if err == nil && string(retBytes) == "/sbin/mount.fuse.ceph\n" { + glog.V(4).Infof("/sbin/mount.fuse.ceph exists, it should be fuse mount") + return true + } + return false + } + return false +} + +func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { + // cephfs keyring file + keyring_file := "" + // override secretfile if secret is provided + if cephfsVolume.secret != "" { + // TODO: cephfs fuse currently doesn't support secret option, + // remove keyring file create once secret option is supported. + glog.V(4).Infof("cephfs mount begin using fuse.") + + keyringPath := cephfsVolume.GetKeyringPath() + os.MkdirAll(keyringPath, 0750) + + payload := make(map[string]util.FileProjection, 1) + var fileProjection util.FileProjection + + keyring := fmt.Sprintf("[client.%s]\n", cephfsVolume.id) + "key = " + cephfsVolume.secret + "\n" + + fileProjection.Data = []byte(keyring) + fileProjection.Mode = int32(0644) + fileName := cephfsVolume.id + ".keyring" + + payload[fileName] = fileProjection + + writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id) + writer, err := util.NewAtomicWriter(keyringPath, writerContext) + if err != nil { + glog.Errorf("failed to create atomic writer: %v", err) + return err + } + + err = writer.Write(payload) + if err != nil { + glog.Errorf("failed to write payload to dir: %v", err) + return err + } + + keyring_file = path.Join(keyringPath, fileName) + + } else { + keyring_file = cephfsVolume.secret_file + } + + // build src like mon1:6789,mon2:6789,mon3:6789:/ + hosts := cephfsVolume.mon + l := len(hosts) + // pass all monitors and let ceph randomize and fail over + i := 0 + src := "" + for i = 0; i < l-1; i++ { + src += hosts[i] + "," + } + src += hosts[i] + + mountArgs := []string{} + mountArgs = append(mountArgs, "-k") + mountArgs = append(mountArgs, keyring_file) + mountArgs = append(mountArgs, "-m") + mountArgs = append(mountArgs, src) + mountArgs = append(mountArgs, mountpoint) + mountArgs = append(mountArgs, "-r") + mountArgs = append(mountArgs, cephfsVolume.path) + + glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs) + command := exec.Command("ceph-fuse", mountArgs...) + output, err := command.CombinedOutput() + if err != nil || !(strings.Contains(string(output), "starting fuse")) { + return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s\n", err, mountArgs, string(output)) + } + + return nil +} + func getVolumeSource(spec *volume.Spec) ([]string, string, string, string, bool, error) { if spec.Volume != nil && spec.Volume.CephFS != nil { mon := spec.Volume.CephFS.Monitors From ca9076cf0f639d99e19eff34a146ae1140ffe4f7 Mon Sep 17 00:00:00 2001 From: George Kraft Date: Fri, 17 Nov 2017 08:32:45 -0600 Subject: [PATCH 065/794] Add docker-logins config to kubernetes-worker --- .../juju/layers/kubernetes-worker/config.yaml | 9 +++++++ .../reactive/kubernetes_worker.py | 27 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/cluster/juju/layers/kubernetes-worker/config.yaml b/cluster/juju/layers/kubernetes-worker/config.yaml index 3fddf002f74..b7ddc9bba72 100644 --- a/cluster/juju/layers/kubernetes-worker/config.yaml +++ b/cluster/juju/layers/kubernetes-worker/config.yaml @@ -49,3 +49,12 @@ options: runtime-config=batch/v2alpha1=true profiling=true will result in kube-apiserver being run with the following options: --runtime-config=batch/v2alpha1=true --profiling=true + docker-logins: + type: string + default: "[]" + description: | + Docker login credentials. Setting this config allows Kubelet to pull images from + registries where auth is required. + + The value for this config must be a JSON array of credential objects, like this: + [{"server": "my.registry", "username": "myUser", "password": "myPass"}] diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index d43f06768a8..76db529c523 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import random import shutil @@ -435,6 +436,32 @@ def extra_args_changed(): set_state('kubernetes-worker.restart-needed') +@when('config.changed.docker-logins') +def docker_logins_changed(): + config = hookenv.config() + previous_logins = config.previous('docker-logins') + logins = config['docker-logins'] + logins = json.loads(logins) + + if previous_logins: + previous_logins = json.loads(previous_logins) + next_servers = {login['server'] for login in logins} + previous_servers = {login['server'] for login in previous_logins} + servers_to_logout = previous_servers - next_servers + for server in servers_to_logout: + cmd = ['docker', 'logout', server] + subprocess.check_call(cmd) + + for login in logins: + server = login['server'] + username = login['username'] + password = login['password'] + cmd = ['docker', 'login', server, '-u', username, '-p', password] + subprocess.check_call(cmd) + + set_state('kubernetes-worker.restart-needed') + + def arch(): '''Return the package architecture as a string. Raise an exception if the architecture is not supported by kubernetes.''' From 2044d56decab03f81eb701c92bff532abd3c0031 Mon Sep 17 00:00:00 2001 From: Shiyang Wang Date: Fri, 17 Nov 2017 17:43:53 +0800 Subject: [PATCH 066/794] some test enhance, comments enhance and duplicate code reduce --- .../pkg/runtime/serializer/json/json.go | 50 +++++++++---------- .../pkg/runtime/serializer/json/json_test.go | 26 ++++++++++ 2 files changed, 50 insertions(+), 26 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 8a217f32e31..814df076d1f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -98,11 +98,29 @@ func init() { jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible) } +// gvkWithDefaults returns group kind and version defaulting from provided default +func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { + if len(actual.Kind) == 0 { + actual.Kind = defaultGVK.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = defaultGVK.Group + actual.Version = defaultGVK.Version + } + if len(actual.Version) == 0 && actual.Group == defaultGVK.Group { + actual.Version = defaultGVK.Version + } + return actual +} + // Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then -// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be -// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using -// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of -// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. +// load that data into an object matching the desired schema kind or the provided into. +// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed. +// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling. +// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. +// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk) +// On success or most errors, the method will return the calculated schema kind. +// The gvk calculate priority will be originalData > default gvk > into func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if versioned, ok := into.(*runtime.VersionedObjects); ok { into = versioned.Last() @@ -129,17 +147,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if gvk != nil { - // apply kind and version defaulting from provided default - if len(actual.Kind) == 0 { - actual.Kind = gvk.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = gvk.Group - actual.Version = gvk.Version - } - if len(actual.Version) == 0 && actual.Group == gvk.Group { - actual.Version = gvk.Version - } + *actual = gvkWithDefaults(*actual, *gvk) } if unk, ok := into.(*runtime.Unknown); ok && unk != nil { @@ -161,17 +169,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i case err != nil: return nil, actual, err default: - typed := types[0] - if len(actual.Kind) == 0 { - actual.Kind = typed.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = typed.Group - actual.Version = typed.Version - } - if len(actual.Version) == 0 && actual.Group == typed.Group { - actual.Version = typed.Version - } + *actual = gvkWithDefaults(*actual, types[0]) } } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go index 469bf3ed8df..c8ae5550c52 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json_test.go @@ -94,6 +94,32 @@ func TestDecode(t *testing.T) { expectedObject: &testDecodable{}, expectedGVK: &schema.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, }, + // group version, kind is defaulted + { + data: []byte(`{"apiVersion":"other1/blah1"}`), + defaultGVK: &schema.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{}, + expectedGVK: &schema.GroupVersionKind{Kind: "Test", Group: "other1", Version: "blah1"}, + }, + // gvk all provided then not defaulted at all + { + data: []byte(`{"kind":"Test","apiVersion":"other/blah"}`), + defaultGVK: &schema.GroupVersionKind{Kind: "Test1", Group: "other1", Version: "blah1"}, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{}, + expectedGVK: &schema.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + }, + //gvk defaulting if kind not provided in data and defaultGVK use into's kind + { + data: []byte(`{"apiVersion":"b1/c1"}`), + into: &testDecodable{gvk: schema.GroupVersionKind{Kind: "a3", Group: "b1", Version: "c1"}}, + typer: &mockTyper{gvk: &schema.GroupVersionKind{Kind: "a3", Group: "b1", Version: "c1"}}, + defaultGVK: nil, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{gvk: schema.GroupVersionKind{Kind: "a3", Group: "b1", Version: "c1"}}, + expectedGVK: &schema.GroupVersionKind{Kind: "a3", Group: "b1", Version: "c1"}, + }, // accept runtime.Unknown as into and bypass creator { From 226f8b3c735feda5e3298f67b0406a5bb855c0be Mon Sep 17 00:00:00 2001 From: zhangjie Date: Tue, 21 Nov 2017 18:21:59 +0800 Subject: [PATCH 067/794] delete useless params containerized Signed-off-by: zhangjie --- cmd/kubelet/app/server.go | 3 --- pkg/kubelet/kubelet.go | 2 -- 2 files changed, 5 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 94dd2fc7316..8a4fc75a343 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -714,7 +714,6 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. kubeFlags.RegisterNode, kubeFlags.RegisterWithTaints, kubeFlags.AllowedUnsafeSysctls, - kubeFlags.Containerized, kubeFlags.RemoteRuntimeEndpoint, kubeFlags.RemoteImageEndpoint, kubeFlags.ExperimentalMounterPath, @@ -787,7 +786,6 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, registerNode bool, registerWithTaints []api.Taint, allowedUnsafeSysctls []string, - containerized bool, remoteRuntimeEndpoint string, remoteImageEndpoint string, experimentalMounterPath string, @@ -820,7 +818,6 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, registerNode, registerWithTaints, allowedUnsafeSysctls, - containerized, remoteRuntimeEndpoint, remoteImageEndpoint, experimentalMounterPath, diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 025340803b1..b6085cec1ef 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -203,7 +203,6 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, registerNode bool, registerWithTaints []api.Taint, allowedUnsafeSysctls []string, - containerized bool, remoteRuntimeEndpoint string, remoteImageEndpoint string, experimentalMounterPath string, @@ -330,7 +329,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, registerNode bool, registerWithTaints []api.Taint, allowedUnsafeSysctls []string, - containerized bool, remoteRuntimeEndpoint string, remoteImageEndpoint string, experimentalMounterPath string, From 4aac6a80a3cd5b026c678dd670bfbe92b9ea7f1a Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Fri, 17 Nov 2017 21:46:14 +0800 Subject: [PATCH 068/794] probeAttachedVolume improvement in Cinder --- pkg/volume/cinder/attacher.go | 2 +- pkg/volume/cinder/cinder_util.go | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index dab97b9b9c4..1bd25712425 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -44,7 +44,7 @@ var _ volume.Attacher = &cinderDiskAttacher{} var _ volume.AttachableVolumePlugin = &cinderPlugin{} const ( - checkSleepDuration = 1 * time.Second + checkSleepDuration = 5 * time.Second operationFinishInitDealy = 1 * time.Second operationFinishFactor = 1.1 operationFinishSteps = 10 diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 661ad3f059c..15758cf31b7 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -224,6 +224,18 @@ func probeAttachedVolume() error { scsiHostRescan() executor := exec.New() + + // udevadm settle waits for udevd to process the device creation + // events for all hardware devices, thus ensuring that any device + // nodes have been created successfully before proceeding. + argsSettle := []string{"settle", "--timeout=1"} + cmdSettle := executor.Command("udevadm", argsSettle...) + _, errSettle := cmdSettle.CombinedOutput() + if errSettle != nil { + glog.Errorf("error running udevadm settle %v\n", errSettle) + return errSettle + } + args := []string{"trigger"} cmd := executor.Command("udevadm", args...) _, err := cmd.CombinedOutput() From b7f2f6e29932181255a3fc2e1f26c03f9b2a9483 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Fri, 17 Nov 2017 22:20:24 +0800 Subject: [PATCH 069/794] modify some wording --- pkg/volume/cinder/cinder_util.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 15758cf31b7..df258a06293 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -225,9 +225,9 @@ func probeAttachedVolume() error { executor := exec.New() - // udevadm settle waits for udevd to process the device creation - // events for all hardware devices, thus ensuring that any device - // nodes have been created successfully before proceeding. + // udevadm settle waits for udevd to process the device creation + // events for all hardware devices, thus ensuring that any device + // nodes have been created successfully before proceeding. argsSettle := []string{"settle", "--timeout=1"} cmdSettle := executor.Command("udevadm", argsSettle...) _, errSettle := cmdSettle.CombinedOutput() From 256c5c13c63e89c33970c8d472e61c6ed64255fd Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Mon, 20 Nov 2017 11:33:56 +0800 Subject: [PATCH 070/794] Using exponential backoff instead of linear --- pkg/volume/cinder/attacher.go | 64 ++++++++++++++++++-------------- pkg/volume/cinder/cinder_util.go | 3 +- 2 files changed, 38 insertions(+), 29 deletions(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 1bd25712425..95a28c82496 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -44,7 +44,9 @@ var _ volume.Attacher = &cinderDiskAttacher{} var _ volume.AttachableVolumePlugin = &cinderPlugin{} const ( - checkSleepDuration = 5 * time.Second + probeVolumeInitDealy = 1 * time.Second + probeVolumeFactor = 2.0 + probeVolumeSteps = 10 operationFinishInitDealy = 1 * time.Second operationFinishFactor = 1.1 operationFinishSteps = 10 @@ -221,6 +223,38 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod return volumesAttachedCheck, nil } +func (attacher *cinderDiskAttacher) waitProbeVolume(devicePath, volumeID string) (string, error) { + backoff := wait.Backoff{ + Duration: probeVolumeInitDealy, + Factor: probeVolumeFactor, + Steps: probeVolumeSteps, + } + + err := wait.ExponentialBackoff(backoff, func() (string, error) { + glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) + probeAttachedVolume() + if !attacher.cinderProvider.ShouldTrustDevicePath() { + // Using the Cinder volume ID, find the real device path (See Issue #33128) + devicePath = attacher.cinderProvider.GetDevicePath(volumeID) + } + exists, err := volumeutil.PathExists(devicePath) + if exists && err == nil { + glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) + return devicePath, nil + } + else { + // Log an error, and continue checking periodically + glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + } + }) + + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("Volume %q failed to be probed within the alloted time", volumeID) + } + + return "", err +} + func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { // NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128 volumeSource, _, err := getVolumeSource(spec) @@ -234,32 +268,8 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID) } - ticker := time.NewTicker(checkSleepDuration) - defer ticker.Stop() - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) - probeAttachedVolume() - if !attacher.cinderProvider.ShouldTrustDevicePath() { - // Using the Cinder volume ID, find the real device path (See Issue #33128) - devicePath = attacher.cinderProvider.GetDevicePath(volumeID) - } - exists, err := volumeutil.PathExists(devicePath) - if exists && err == nil { - glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) - return devicePath, nil - } else { - // Log an error, and continue checking periodically - glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) - } - case <-timer.C: - return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID) - } - } + // Using exponential backoff instead of linear + return attacher.waitProbeVolume(devicePath, volumeID) } func (attacher *cinderDiskAttacher) GetDeviceMountPath( diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index df258a06293..d994dff0b1a 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -228,12 +228,11 @@ func probeAttachedVolume() error { // udevadm settle waits for udevd to process the device creation // events for all hardware devices, thus ensuring that any device // nodes have been created successfully before proceeding. - argsSettle := []string{"settle", "--timeout=1"} + argsSettle := []string{"settle"} cmdSettle := executor.Command("udevadm", argsSettle...) _, errSettle := cmdSettle.CombinedOutput() if errSettle != nil { glog.Errorf("error running udevadm settle %v\n", errSettle) - return errSettle } args := []string{"trigger"} From 3652474ee192f238f9d7f7f5c627632a726fefe8 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Mon, 20 Nov 2017 11:39:45 +0800 Subject: [PATCH 071/794] fix some errors --- pkg/volume/cinder/attacher.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 95a28c82496..fe466cc4e86 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -241,8 +241,7 @@ func (attacher *cinderDiskAttacher) waitProbeVolume(devicePath, volumeID string) if exists && err == nil { glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) return devicePath, nil - } - else { + } else { // Log an error, and continue checking periodically glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) } From 1886de83578439993a21cdb188b188de3d6a7a1f Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Mon, 20 Nov 2017 12:51:39 +0800 Subject: [PATCH 072/794] fix ci problems --- pkg/volume/cinder/attacher.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index fe466cc4e86..585f1abb7a0 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -230,7 +230,7 @@ func (attacher *cinderDiskAttacher) waitProbeVolume(devicePath, volumeID string) Steps: probeVolumeSteps, } - err := wait.ExponentialBackoff(backoff, func() (string, error) { + err := wait.ExponentialBackoff(backoff, func() (bool, error) { glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) probeAttachedVolume() if !attacher.cinderProvider.ShouldTrustDevicePath() { @@ -240,18 +240,22 @@ func (attacher *cinderDiskAttacher) waitProbeVolume(devicePath, volumeID string) exists, err := volumeutil.PathExists(devicePath) if exists && err == nil { glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) - return devicePath, nil + return true, nil } else { // Log an error, and continue checking periodically glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + return false, nil } }) - if err == wait.ErrWaitTimeout { - err = fmt.Errorf("Volume %q failed to be probed within the alloted time", volumeID) + if err != nil { + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("Volume %q failed to be probed within the alloted time", volumeID) + } + return "", err } - return "", err + return devicePath, nil } func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { From 84eb6c8b9d32d9dfdf3ce35455882714ac7108a7 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Tue, 21 Nov 2017 16:25:28 +0800 Subject: [PATCH 073/794] exponential backoff with timeout --- pkg/volume/cinder/attacher.go | 69 ++++++++++++++++------------------- 1 file changed, 31 insertions(+), 38 deletions(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 585f1abb7a0..f98cae71bd2 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -46,7 +46,6 @@ var _ volume.AttachableVolumePlugin = &cinderPlugin{} const ( probeVolumeInitDealy = 1 * time.Second probeVolumeFactor = 2.0 - probeVolumeSteps = 10 operationFinishInitDealy = 1 * time.Second operationFinishFactor = 1.1 operationFinishSteps = 10 @@ -223,41 +222,6 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod return volumesAttachedCheck, nil } -func (attacher *cinderDiskAttacher) waitProbeVolume(devicePath, volumeID string) (string, error) { - backoff := wait.Backoff{ - Duration: probeVolumeInitDealy, - Factor: probeVolumeFactor, - Steps: probeVolumeSteps, - } - - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) - probeAttachedVolume() - if !attacher.cinderProvider.ShouldTrustDevicePath() { - // Using the Cinder volume ID, find the real device path (See Issue #33128) - devicePath = attacher.cinderProvider.GetDevicePath(volumeID) - } - exists, err := volumeutil.PathExists(devicePath) - if exists && err == nil { - glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) - return true, nil - } else { - // Log an error, and continue checking periodically - glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) - return false, nil - } - }) - - if err != nil { - if err == wait.ErrWaitTimeout { - err = fmt.Errorf("Volume %q failed to be probed within the alloted time", volumeID) - } - return "", err - } - - return devicePath, nil -} - func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { // NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128 volumeSource, _, err := getVolumeSource(spec) @@ -271,8 +235,37 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID) } - // Using exponential backoff instead of linear - return attacher.waitProbeVolume(devicePath, volumeID) + ticker := time.NewTicker(probeVolumeInitDealy) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + duration := probeVolumeInitDealy + for { + select { + case <-ticker.C: + glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) + probeAttachedVolume() + if !attacher.cinderProvider.ShouldTrustDevicePath() { + // Using the Cinder volume ID, find the real device path (See Issue #33128) + devicePath = attacher.cinderProvider.GetDevicePath(volumeID) + } + exists, err := volumeutil.PathExists(devicePath) + if exists && err == nil { + glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) + return devicePath, nil + } else { + // Log an error, and continue checking periodically + glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + // Using exponential backoff instead of linear + ticker.Stop() + duration = time.Duration(float64(duration) * probeVolumeFactor) + ticker = time.NewTicker(duration) + } + case <-timer.C: + return "", fmt.Errorf("Could not find attached Cinder disk %q. Timeout waiting for mount paths to be created.", volumeID) + } + } } func (attacher *cinderDiskAttacher) GetDeviceMountPath( From 50e4642821eeca6881fdf5dc76566bab6bdfaa4b Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 21 Nov 2017 13:12:51 +0000 Subject: [PATCH 074/794] add mount options for azure disk --- pkg/volume/azure_dd/azure_mounter.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go index ccec2d25fb0..dfa9907e880 100644 --- a/pkg/volume/azure_dd/azure_mounter.go +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -104,6 +104,10 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { options = append(options, "ro") } + if m.options.MountOptions != nil { + options = volume.JoinMountOptions(m.options.MountOptions, options) + } + glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) From 2b86881ab3e4603d51c90513dd0b9a0eea653a00 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Tue, 21 Nov 2017 15:24:34 +0100 Subject: [PATCH 075/794] pkg/controller/garbagecollector/garbagecollector.go: fix string format. --- pkg/controller/garbagecollector/garbagecollector.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index b9dc2705f82..fa1733a41a0 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -596,9 +596,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m preferredResources, err := discoveryClient.ServerPreferredResources() if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - glog.Warning("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups) + glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups) } else { - glog.Warning("failed to discover preferred resources: %v", err) + glog.Warningf("failed to discover preferred resources: %v", err) } } if preferredResources == nil { @@ -612,7 +612,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m for _, rl := range deletableResources { gv, err := schema.ParseGroupVersion(rl.GroupVersion) if err != nil { - glog.Warning("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err) + glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err) continue } for i := range rl.APIResources { From 97bb6cb9c79bc314daf3a3682f28846a70870133 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Tue, 21 Nov 2017 19:12:26 +0100 Subject: [PATCH 076/794] make quick-verify: make the output a bit more readable by showing script names without full paths. --- hack/make-rules/verify.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index e6d5337fac2..97018e12f48 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -92,24 +92,26 @@ function run-checks { local -r pattern=$1 local -r runner=$2 + local t for t in $(ls ${pattern}) do + local check_name="$(basename "${t}")" if is-excluded "${t}" ; then - echo "Skipping ${t}" + echo "Skipping ${check_name}" continue fi if ${QUICK} && ! is-quick "${t}" ; then - echo "Skipping ${t} in quick mode" + echo "Skipping ${check_name} in quick mode" continue fi - echo -e "Verifying ${t}" + echo -e "Verifying ${check_name}" local start=$(date +%s) run-cmd "${runner}" "${t}" && tr=$? || tr=$? local elapsed=$(($(date +%s) - ${start})) if [[ ${tr} -eq 0 ]]; then - echo -e "${color_green}SUCCESS${color_norm} ${t}\t${elapsed}s" + echo -e "${color_green}SUCCESS${color_norm} ${check_name}\t${elapsed}s" else - echo -e "${color_red}FAILED${color_norm} ${t}\t${elapsed}s" + echo -e "${color_red}FAILED${color_norm} ${check_name}\t${elapsed}s" ret=1 FAILED_TESTS+=(${t}) fi From f3c4ef835b9baf971402073f7627046495ec5702 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Wed, 22 Nov 2017 11:41:31 +0800 Subject: [PATCH 077/794] remove useless const --- plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 99d80e566d7..a2272a032bb 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -31,11 +31,8 @@ import ( ) const ( - // ClusterAutoscalerProvider defines the default autoscaler provider ClusterAutoscalerProvider = "ClusterAutoscalerProvider" - // StatefulSetKind defines the name of 'StatefulSet' kind - StatefulSetKind = "StatefulSet" ) func init() { From 18ef4beb84840c7d69d655ea4408a4ce24addc1a Mon Sep 17 00:00:00 2001 From: Mahdi Mohammadi Date: Wed, 22 Nov 2017 06:17:44 +0000 Subject: [PATCH 078/794] Trying to make error message similar to what is expected in tests --- pkg/kubectl/rollback.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index 90971aeb0c5..864d5a290dc 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -93,7 +93,7 @@ func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, e } if visitor.result == nil { - return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind.String()) + return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) } return visitor.result, nil From 99a25fed196b6267e55769e7344693f500a366cf Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 22 Nov 2017 09:45:23 +0200 Subject: [PATCH 079/794] Lowercase hostnames when used as node names in k8s --- .../layers/kubernetes-worker/reactive/kubernetes_worker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index d43f06768a8..5b875c4d25f 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -157,7 +157,7 @@ def shutdown(): ''' try: if os.path.isfile(kubeconfig_path): - kubectl('delete', 'node', gethostname()) + kubectl('delete', 'node', gethostname().lower()) except CalledProcessError: hookenv.log('Failed to unregister node.') service_stop('snap.kubelet.daemon') @@ -906,7 +906,8 @@ class ApplyNodeLabelFailed(Exception): def _apply_node_label(label, delete=False, overwrite=False): ''' Invoke kubectl to apply node label changes ''' - hostname = gethostname() + # k8s lowercases hostnames and uses them as node names + hostname = gethostname().lower() # TODO: Make this part of the kubectl calls instead of a special string cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}' From 400f5545bf4119a015c4f2c891b86380033a3be6 Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Tue, 14 Nov 2017 14:44:45 +0800 Subject: [PATCH 080/794] add more test case in TestValidateStatefulSet Signed-off-by: yuexiao-wang --- pkg/apis/apps/validation/validation.go | 3 +- pkg/apis/apps/validation/validation_test.go | 215 ++++++++++++++------ 2 files changed, 155 insertions(+), 63 deletions(-) diff --git a/pkg/apis/apps/validation/validation.go b/pkg/apis/apps/validation/validation.go index fc7c029ca7a..959e7ecb932 100644 --- a/pkg/apis/apps/validation/validation.go +++ b/pkg/apis/apps/validation/validation.go @@ -94,7 +94,6 @@ func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) fi int64(spec.UpdateStrategy.RollingUpdate.Partition), fldPath.Child("updateStrategy").Child("rollingUpdate").Child("partition"))...) } - default: allErrs = append(allErrs, field.Invalid(fldPath.Child("updateStrategy"), spec.UpdateStrategy, @@ -124,7 +123,7 @@ func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) fi allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) } if spec.Template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) } return allErrs diff --git a/pkg/apis/apps/validation/validation_test.go b/pkg/apis/apps/validation/validation_test.go index cf5ed252c8d..4b8303bb990 100644 --- a/pkg/apis/apps/validation/validation_test.go +++ b/pkg/apis/apps/validation/validation_test.go @@ -17,6 +17,7 @@ limitations under the License. package validation import ( + "strconv" "strings" "testing" @@ -41,6 +42,7 @@ func TestValidateStatefulSet(t *testing.T) { }, }, } + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} invalidPodTemplate := api.PodTemplate{ Template: api.PodTemplateSpec{ @@ -53,6 +55,21 @@ func TestValidateStatefulSet(t *testing.T) { }, }, } + + invalidTime := int64(60) + invalidPodTemplate2 := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + ActiveDeadlineSeconds: &invalidTime, + }, + }, + } + successCases := []apps.StatefulSet{ { ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, @@ -105,10 +122,13 @@ func TestValidateStatefulSet(t *testing.T) { }, }, } - for _, successCase := range successCases { - if errs := ValidateStatefulSet(&successCase); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } + + for i, successCase := range successCases { + t.Run("success case "+strconv.Itoa(i), func(t *testing.T) { + if errs := ValidateStatefulSet(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + }) } errorCases := map[string]apps.StatefulSet{ @@ -260,6 +280,29 @@ func TestValidateStatefulSet(t *testing.T) { UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: "foo"}, }, }, + "empty udpate strategy": { + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, + Spec: apps.StatefulSetSpec{ + PodManagementPolicy: apps.OrderedReadyPodManagement, + Selector: &metav1.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + Replicas: 3, + UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: ""}, + }, + }, + "invalid rolling update": { + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, + Spec: apps.StatefulSetSpec{ + PodManagementPolicy: apps.OrderedReadyPodManagement, + Selector: &metav1.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + Replicas: 3, + UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType, + RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy { + return &apps.RollingUpdateStatefulSetStrategy{Partition: 1} + }()}, + }, + }, "negative parition": { ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{ @@ -273,31 +316,67 @@ func TestValidateStatefulSet(t *testing.T) { }()}, }, }, + "empty pod management policy": { + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, + Spec: apps.StatefulSetSpec{ + PodManagementPolicy: "", + Selector: &metav1.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + Replicas: 3, + UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, + }, + }, + "invalid pod management policy": { + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, + Spec: apps.StatefulSetSpec{ + PodManagementPolicy: "foo", + Selector: &metav1.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + Replicas: 3, + UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, + }, + }, + "set active deadline seconds": { + ObjectMeta: metav1.ObjectMeta{Name: "abc-123", Namespace: metav1.NamespaceDefault}, + Spec: apps.StatefulSetSpec{ + PodManagementPolicy: "foo", + Selector: &metav1.LabelSelector{MatchLabels: validLabels}, + Template: invalidPodTemplate2.Template, + Replicas: 3, + UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, + }, + }, } + for k, v := range errorCases { - errs := ValidateStatefulSet(&v) - if len(errs) == 0 { - t.Errorf("expected failure for %s", k) - } - for i := range errs { - field := errs[i].Field - if !strings.HasPrefix(field, "spec.template.") && - field != "metadata.name" && - field != "metadata.namespace" && - field != "spec.selector" && - field != "spec.template" && - field != "GCEPersistentDisk.ReadOnly" && - field != "spec.replicas" && - field != "spec.template.labels" && - field != "metadata.annotations" && - field != "metadata.labels" && - field != "status.replicas" && - field != "spec.updateStrategy" && - field != "spec.updateStrategy.rollingUpdate" && - field != "spec.updateStrategy.rollingUpdate.partition" { - t.Errorf("%s: missing prefix for: %v", k, errs[i]) + t.Run(k, func(t *testing.T) { + errs := ValidateStatefulSet(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) } - } + + for i := range errs { + field := errs[i].Field + if !strings.HasPrefix(field, "spec.template.") && + field != "metadata.name" && + field != "metadata.namespace" && + field != "spec.selector" && + field != "spec.template" && + field != "GCEPersistentDisk.ReadOnly" && + field != "spec.replicas" && + field != "spec.template.labels" && + field != "metadata.annotations" && + field != "metadata.labels" && + field != "status.replicas" && + field != "spec.updateStrategy" && + field != "spec.updateStrategy.rollingUpdate" && + field != "spec.updateStrategy.rollingUpdate.partition" && + field != "spec.podManagementPolicy" && + field != "spec.template.spec.activeDeadlineSeconds" { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + }) } } @@ -399,19 +478,21 @@ func TestValidateStatefulSetStatus(t *testing.T) { } for _, test := range tests { - status := apps.StatefulSetStatus{ - Replicas: test.replicas, - ReadyReplicas: test.readyReplicas, - CurrentReplicas: test.currentReplicas, - UpdatedReplicas: test.updatedReplicas, - ObservedGeneration: test.observedGeneration, - CollisionCount: test.collisionCount, - } + t.Run(test.name, func(t *testing.T) { + status := apps.StatefulSetStatus{ + Replicas: test.replicas, + ReadyReplicas: test.readyReplicas, + CurrentReplicas: test.currentReplicas, + UpdatedReplicas: test.updatedReplicas, + ObservedGeneration: test.observedGeneration, + CollisionCount: test.collisionCount, + } - errs := ValidateStatefulSetStatus(&status, field.NewPath("status")) - if hasErr := len(errs) > 0; hasErr != test.expectedErr { - t.Errorf("%s: expected error: %t, got error: %t\nerrors: %s", test.name, test.expectedErr, hasErr, errs.ToAggregate().Error()) - } + errs := ValidateStatefulSetStatus(&status, field.NewPath("status")) + if hasErr := len(errs) > 0; hasErr != test.expectedErr { + t.Errorf("%s: expected error: %t, got error: %t\nerrors: %s", test.name, test.expectedErr, hasErr, errs.ToAggregate().Error()) + } + }) } } @@ -462,6 +543,7 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, }, } + type psUpdateTest struct { old apps.StatefulSet update apps.StatefulSet @@ -529,13 +611,17 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, }, } - for _, successCase := range successCases { - successCase.old.ObjectMeta.ResourceVersion = "1" - successCase.update.ObjectMeta.ResourceVersion = "1" - if errs := ValidateStatefulSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } + + for i, successCase := range successCases { + t.Run("success case "+strconv.Itoa(i), func(t *testing.T) { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateStatefulSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + }) } + errorCases := map[string]psUpdateTest{ "more than one read/write": { old: apps.StatefulSet{ @@ -656,10 +742,13 @@ func TestValidateStatefulSetUpdate(t *testing.T) { }, }, } + for testName, errorCase := range errorCases { - if errs := ValidateStatefulSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { - t.Errorf("expected failure: %s", testName) - } + t.Run(testName, func(t *testing.T) { + if errs := ValidateStatefulSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + }) } } @@ -715,13 +804,15 @@ func TestValidateControllerRevision(t *testing.T) { } for name, tc := range tests { - errs := ValidateControllerRevision(&tc.history) - if tc.isValid && len(errs) > 0 { - t.Errorf("%v: unexpected error: %v", name, errs) - } - if !tc.isValid && len(errs) == 0 { - t.Errorf("%v: unexpected non-error", name) - } + t.Run(name, func(t *testing.T) { + errs := ValidateControllerRevision(&tc.history) + if tc.isValid && len(errs) > 0 { + t.Errorf("%v: unexpected error: %v", name, errs) + } + if !tc.isValid && len(errs) == 0 { + t.Errorf("%v: unexpected non-error", name) + } + }) } } @@ -809,12 +900,14 @@ func TestValidateControllerRevisionUpdate(t *testing.T) { } for _, tc := range cases { - errs := ValidateControllerRevisionUpdate(&tc.newHistory, &tc.oldHistory) - if tc.isValid && len(errs) > 0 { - t.Errorf("%v: unexpected error: %v", tc.name, errs) - } - if !tc.isValid && len(errs) == 0 { - t.Errorf("%v: unexpected non-error", tc.name) - } + t.Run(tc.name, func(t *testing.T) { + errs := ValidateControllerRevisionUpdate(&tc.newHistory, &tc.oldHistory) + if tc.isValid && len(errs) > 0 { + t.Errorf("%v: unexpected error: %v", tc.name, errs) + } + if !tc.isValid && len(errs) == 0 { + t.Errorf("%v: unexpected non-error", tc.name) + } + }) } } From f0853342b2ff6ba3831fdfe9a33c4f67e938a8f0 Mon Sep 17 00:00:00 2001 From: langyenan Date: Wed, 22 Nov 2017 17:16:37 +0800 Subject: [PATCH 081/794] refactor(service-controller/gce/ensureInternalBackendService): delete unused variable --- pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index ff14cbacdbe..d4acf9fa031 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -525,11 +525,6 @@ func (gce *GCECloud) ensureInternalBackendService(name, description string, affi glog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) return nil } - // Check existing backend service - existingIGLinks := sets.NewString() - for _, be := range bs.Backends { - existingIGLinks.Insert(be.Group) - } if backendSvcEqual(expectedBS, bs) { return nil From b31cffd76d161cf63b996e7c31b222f0dbd686a1 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Wed, 22 Nov 2017 11:15:59 +0100 Subject: [PATCH 082/794] Fix typo. --- test/e2e/scalability/density.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index f4b9c0b8825..cb7d7c4c091 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -135,7 +135,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC controllerMem = math.MaxUint64 schedulerCPU := math.MaxFloat32 schedulerMem = math.MaxUint64 - framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider) + framework.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider) if framework.ProviderIs("kubemark") { if numNodes <= 5 { apiserverCPU = 0.35 From 9c02d7e38c124495bf0561f0648e12e4b7224ab8 Mon Sep 17 00:00:00 2001 From: supereagle Date: Sat, 18 Nov 2017 15:00:21 +0800 Subject: [PATCH 083/794] use extensions client with explicit version --- pkg/controller/replication/conversion.go | 2 +- pkg/kubectl/cmd/drain.go | 6 +++--- pkg/kubectl/rollout_status.go | 4 ++-- test/e2e/apimachinery/initializers.go | 2 +- test/e2e/apps/disruption.go | 2 +- test/e2e/auth/audit.go | 14 +++++++------- .../autoscaling/custom_metrics_autoscaling.go | 10 +++++----- test/e2e/framework/deployment_util.go | 2 +- test/e2e/scheduling/resource_quota.go | 4 ++-- test/e2e/storage/vsphere_volume_node_poweroff.go | 2 +- test/e2e/upgrades/apps/replicasets.go | 4 ++-- test/e2e/upgrades/kube_proxy_migration.go | 2 +- test/integration/apiserver/apiserver_test.go | 4 ++-- test/integration/replicaset/replicaset_test.go | 16 ++++++++-------- test/utils/deployment.go | 6 +++--- test/utils/replicaset.go | 4 ++-- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pkg/controller/replication/conversion.go b/pkg/controller/replication/conversion.go index 67c1847a95e..044c1b24081 100644 --- a/pkg/controller/replication/conversion.go +++ b/pkg/controller/replication/conversion.go @@ -187,7 +187,7 @@ func (c clientsetAdapter) ExtensionsV1beta1() extensionsv1beta1client.Extensions } func (c clientsetAdapter) Extensions() extensionsv1beta1client.ExtensionsV1beta1Interface { - return conversionExtensionsClient{c.Interface, c.Interface.Extensions()} + return conversionExtensionsClient{c.Interface, c.Interface.ExtensionsV1beta1()} } func (c clientsetAdapter) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..adb1e48a577 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -333,11 +333,11 @@ func (o *DrainOptions) getController(namespace string, controllerRef *metav1.Own case "ReplicationController": return o.client.Core().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "DaemonSet": - return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) + return o.client.ExtensionsV1beta1().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "Job": return o.client.Batch().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "ReplicaSet": - return o.client.Extensions().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) + return o.client.ExtensionsV1beta1().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "StatefulSet": return o.client.AppsV1beta1().StatefulSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) } @@ -404,7 +404,7 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) if controllerRef == nil || controllerRef.Kind != "DaemonSet" { return true, nil, nil } - if _, err := o.client.Extensions().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + if _, err := o.client.ExtensionsV1beta1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { return false, nil, &fatal{err.Error()} } if !o.IgnoreDaemonsets { diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index 2f58cb28eec..24fcdf568f5 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -38,9 +38,9 @@ type StatusViewer interface { func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { switch kind { case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): - return &DeploymentStatusViewer{c.Extensions()}, nil + return &DeploymentStatusViewer{c.ExtensionsV1beta1()}, nil case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): - return &DaemonSetStatusViewer{c.Extensions()}, nil + return &DaemonSetStatusViewer{c.ExtensionsV1beta1()}, nil case apps.Kind("StatefulSet"): return &StatefulSetStatusViewer{c.AppsV1beta1()}, nil } diff --git a/test/e2e/apimachinery/initializers.go b/test/e2e/apimachinery/initializers.go index 1ef358ba360..a38c6c7256b 100644 --- a/test/e2e/apimachinery/initializers.go +++ b/test/e2e/apimachinery/initializers.go @@ -405,7 +405,7 @@ func cleanupInitializer(c clientset.Interface, initializerConfigName, initialize // waits till the RS status.observedGeneration matches metadata.generation. func waitForRSObservedGeneration(c clientset.Interface, ns, name string, generation int64) error { return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{}) + rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index f86f83cb434..59a35edbec6 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -330,6 +330,6 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu }, } - _, err := cs.Extensions().ReplicaSets(ns).Create(rs) + _, err := cs.ExtensionsV1beta1().ReplicaSets(ns).Create(rs) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 1f516c62186..51c7c4b345c 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -199,27 +199,27 @@ var _ = SIGDescribe("Advanced Audit [Feature:Audit]", func() { podLabels := map[string]string{"name": "audit-deployment-pod"} d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType) - _, err := f.ClientSet.Extensions().Deployments(namespace).Create(d) + _, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Create(d) framework.ExpectNoError(err, "failed to create audit-deployment") - _, err = f.ClientSet.Extensions().Deployments(namespace).Get(d.Name, metav1.GetOptions{}) + _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Get(d.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-deployment") - deploymentChan, err := f.ClientSet.Extensions().Deployments(namespace).Watch(watchOptions) + deploymentChan, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Watch(watchOptions) framework.ExpectNoError(err, "failed to create watch for deployments") for range deploymentChan.ResultChan() { } - _, err = f.ClientSet.Extensions().Deployments(namespace).Update(d) + _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Update(d) framework.ExpectNoError(err, "failed to update audit-deployment") - _, err = f.ClientSet.Extensions().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch deployment") - _, err = f.ClientSet.Extensions().Deployments(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to create list deployments") - err = f.ClientSet.Extensions().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{}) + err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete deployments") }, []auditEvent{ diff --git a/test/e2e/autoscaling/custom_metrics_autoscaling.go b/test/e2e/autoscaling/custom_metrics_autoscaling.go index 24c5a09e584..9c83da1ca80 100644 --- a/test/e2e/autoscaling/custom_metrics_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_autoscaling.go @@ -113,7 +113,7 @@ func testHPA(f *framework.Framework, kubeClient clientset.Interface) { } func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error { - _, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100)) + _, err := cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100)) if err != nil { return err } @@ -121,14 +121,14 @@ func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) er if err != nil { return err } - _, err = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100)) + _, err = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100)) return err } func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) { - _ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{}) + _ = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{}) _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{}) - _ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{}) + _ = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{}) } func createPodsHPA(f *framework.Framework, cs clientset.Interface) error { @@ -196,7 +196,7 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error { func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { interval := 20 * time.Second err := wait.PollImmediate(interval, timeout, func() (bool, error) { - deployment, err := cs.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) + deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) } diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 8a249d1e131..60e498697a1 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -215,7 +215,7 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) { deploymentSpec := MakeDeployment(replicas, podLabels, namespace, pvclaims, false, command) - deployment, err := client.Extensions().Deployments(namespace).Create(deploymentSpec) + deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } diff --git a/test/e2e/scheduling/resource_quota.go b/test/e2e/scheduling/resource_quota.go index 21a02a6899d..b06438d1122 100644 --- a/test/e2e/scheduling/resource_quota.go +++ b/test/e2e/scheduling/resource_quota.go @@ -503,7 +503,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a ReplicaSet") replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0) - replicaSet, err = f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(replicaSet) + replicaSet, err = f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(replicaSet) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures replicaset creation") @@ -513,7 +513,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a ReplicaSet") - err = f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Delete(replicaSet.Name, nil) + err = f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Delete(replicaSet.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") diff --git a/test/e2e/storage/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere_volume_node_poweroff.go index b902872edce..1a2f37e7560 100644 --- a/test/e2e/storage/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere_volume_node_poweroff.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func( By("Creating a Deployment") deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, namespace, pvclaims, "") - defer client.Extensions().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) + defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) By("Get pod from the deployement") podList, err := framework.GetPodsForDeployment(client, deployment) diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index a9cea7555a0..88d46c2ae1e 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -54,7 +54,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) - rs, err := c.Extensions().ReplicaSets(ns).Create(replicaSet) + rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(replicaSet) framework.ExpectNoError(err) By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) @@ -67,7 +67,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { c := f.ClientSet ns := f.Namespace.Name - rsClient := c.Extensions().ReplicaSets(ns) + rsClient := c.ExtensionsV1beta1().ReplicaSets(ns) // Block until upgrade is done By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName)) diff --git a/test/e2e/upgrades/kube_proxy_migration.go b/test/e2e/upgrades/kube_proxy_migration.go index aad911144b2..e4c11c34ce3 100644 --- a/test/e2e/upgrades/kube_proxy_migration.go +++ b/test/e2e/upgrades/kube_proxy_migration.go @@ -216,5 +216,5 @@ func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) { func getKubeProxyDaemonSet(c clientset.Interface) (*extensions.DaemonSetList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.Extensions().DaemonSets(metav1.NamespaceSystem).List(listOpts) + return c.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).List(listOpts) } diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index c876961758f..8a931e9cbbb 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -123,7 +123,7 @@ func Test202StatusCode(t *testing.T) { ns := framework.CreateTestingNamespace("status-code", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - rsClient := clientSet.Extensions().ReplicaSets(ns.Name) + rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name) // 1. Create the resource without any finalizer and then delete it without setting DeleteOptions. // Verify that server returns 200 in this case. @@ -173,7 +173,7 @@ func TestAPIListChunking(t *testing.T) { ns := framework.CreateTestingNamespace("list-paging", s, t) defer framework.DeleteTestingNamespace(ns, s, t) - rsClient := clientSet.Extensions().ReplicaSets(ns.Name) + rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name) for i := 0; i < 4; i++ { rs := newRS(ns.Name) diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 91ae5ce7ffc..11dc93896e5 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -115,7 +115,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod { // sets and pods are rsNum and podNum. It returns error if the // communication with the API server fails. func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) { - rsClient := clientSet.Extensions().ReplicaSets(namespace) + rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(namespace) podClient := clientSet.CoreV1().Pods(namespace) pods, err := podClient.List(metav1.ListOptions{}) if err != nil { @@ -199,7 +199,7 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R var createdRSs []*v1beta1.ReplicaSet var createdPods []*v1.Pod for _, rs := range rss { - createdRS, err := clientSet.Extensions().ReplicaSets(rs.Namespace).Create(rs) + createdRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Create(rs) if err != nil { t.Fatalf("Failed to create replica set %s: %v", rs.Name, err) } @@ -225,7 +225,7 @@ func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.Repli // Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly func scaleRS(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) { - rsClient := c.Extensions().ReplicaSets(rs.Namespace) + rsClient := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace) rs = updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) { *rs.Spec.Replicas = replicas }) @@ -360,7 +360,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *v1beta1.ReplicaSet, replicas int32) { ns := rs.Namespace - rsClient := c.Extensions().ReplicaSets(ns) + rsClient := c.ExtensionsV1beta1().ReplicaSets(ns) newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err) @@ -453,7 +453,7 @@ func TestAdoption(t *testing.T) { ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) - rsClient := clientSet.Extensions().ReplicaSets(ns.Name) + rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) const rsName = "rs" rs, err := rsClient.Create(newRS(rsName, ns.Name, 1)) @@ -548,7 +548,7 @@ func TestSpecReplicasChange(t *testing.T) { // Add a template annotation change to test RS's status does update // without .Spec.Replicas change - rsClient := c.Extensions().ReplicaSets(ns.Name) + rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name) var oldGeneration int64 newRS := updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) { oldGeneration = rs.Generation @@ -818,7 +818,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { // by setting LastTransitionTime to more than 3600 seconds ago setPodsReadyCondition(t, c, thirdPodList, v1.ConditionTrue, time.Now().Add(-120*time.Minute)) - rsClient := c.Extensions().ReplicaSets(ns.Name) + rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) if err != nil { @@ -897,7 +897,7 @@ func TestFullyLabeledReplicas(t *testing.T) { waitRSStable(t, c, rs) // Change RS's template labels to have extra labels, but not its selector - rsClient := c.Extensions().ReplicaSets(ns.Name) + rsClient := c.ExtensionsV1beta1().ReplicaSets(ns.Name) updateRS(t, rsClient, rs.Name, func(rs *v1beta1.ReplicaSet) { rs.Spec.Template.Labels = extraLabelMap }) diff --git a/test/utils/deployment.go b/test/utils/deployment.go index 9035c26805d..4fe4b1b1e96 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -259,12 +259,12 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if deployment, err = c.Extensions().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { + if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) - if deployment, err = c.Extensions().Deployments(namespace).Update(deployment); err == nil { + if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Update(deployment); err == nil { logf("Updating deployment %s", name) return true, nil } @@ -279,7 +279,7 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { - return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + return c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) }, desiredGeneration, 2*time.Second, 1*time.Minute) } diff --git a/test/utils/replicaset.go b/test/utils/replicaset.go index adf30c35d06..c04a1fbdd9d 100644 --- a/test/utils/replicaset.go +++ b/test/utils/replicaset.go @@ -34,12 +34,12 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil { + if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Update(rs); err == nil { logf("Updating replica set %q", name) return true, nil } From a6ee55c4a7d936f3fd9a6cac612dbe1bff389a80 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 22 Nov 2017 16:55:39 +0200 Subject: [PATCH 084/794] Lowecasing the hostname on the known nodes entry --- .../layers/kubernetes-worker/reactive/kubernetes_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 5b875c4d25f..4e233cb8991 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -844,14 +844,14 @@ def request_kubelet_and_proxy_credentials(kube_control): # The kube-cotrol interface is created to support RBAC. # At this point we might as well do the right thing and return the hostname # even if it will only be used when we enable RBAC - nodeuser = 'system:node:{}'.format(gethostname()) + nodeuser = 'system:node:{}'.format(gethostname().lower()) kube_control.set_auth_request(nodeuser) @when('kube-control.connected') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" - nodeuser = 'system:node:{}'.format(gethostname()) + nodeuser = 'system:node:{}'.format(gethostname().lower()) creds = kube_control.get_auth_credentials(nodeuser) if creds \ and data_changed('kube-control.creds', creds) \ From d0fac451e0b2aafed3210fdca25c008a44d5da46 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Wed, 22 Nov 2017 09:38:17 -0800 Subject: [PATCH 085/794] hack: fix godep license parsing for gopkg.in packages The script incorrectly thinks that `gopkg.in/square/go-jose.v2/cipher` doesn't have a license because it parses `gopkg.in/square` as the root of the repo, even though `gopkg.in/square/go-jose.v2` is the root. Add special handling for gopkg.in packages by grep'ing for the version that gopkg.in appends to the package name. --- hack/update-godep-licenses.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hack/update-godep-licenses.sh b/hack/update-godep-licenses.sh index 93f10f7df2a..8d5a38c20ac 100755 --- a/hack/update-godep-licenses.sh +++ b/hack/update-godep-licenses.sh @@ -75,6 +75,14 @@ process_content () { go4.org/*) package_root=$(echo ${package} |awk -F/ '{ print $1 }') ;; + gopkg.in/*) + # Root of gopkg.in package always ends with '.v(number)' and my contain + # more than two path elements. For example: + # - gopkg.in/yaml.v2 + # - gopkg.in/inf.v0 + # - gopkg.in/square/go-jose.v2 + package_root=$(echo ${package} |grep -oh '.*\.v[0-9]') + ;; *) package_root=$(echo ${package} |awk -F/ '{ print $1"/"$2 }') ;; From 4f185e6b7f27c01db42b9fe1f30d1f7e3b14187d Mon Sep 17 00:00:00 2001 From: Connor Doyle Date: Tue, 21 Nov 2017 22:12:34 -0800 Subject: [PATCH 086/794] CPU Manager panics on state initialization error. - Update unit tests accordingly. - Minor related cleanup in state_file.go --- pkg/kubelet/cm/cpumanager/state/state_file.go | 87 +++++++++--------- .../cm/cpumanager/state/state_file_test.go | 89 ++++++++++--------- 2 files changed, 93 insertions(+), 83 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/state/state_file.go b/pkg/kubelet/cm/cpumanager/state/state_file.go index 20c7763350d..6c2353cf10f 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_file.go +++ b/pkg/kubelet/cm/cpumanager/state/state_file.go @@ -51,9 +51,10 @@ func NewFileState(filePath string, policyName string) State { if err := stateFile.tryRestoreState(); err != nil { // could not restore state, init new state file - glog.Infof("[cpumanager] state file: initializing empty state file - reason: \"%s\"", err) - stateFile.cache.ClearState() - stateFile.storeState() + msg := fmt.Sprintf("[cpumanager] state file: unable to restore state from disk (%s)\n", err.Error()) + + "Panicking because we cannot guarantee sane CPU affinity for existing containers.\n" + + fmt.Sprintf("Please drain this node and delete the CPU manager state file \"%s\" before restarting Kubelet.", stateFile.stateFilePath) + panic(msg) } return stateFile @@ -73,45 +74,51 @@ func (sf *stateFile) tryRestoreState() error { var content []byte - if content, err = ioutil.ReadFile(sf.stateFilePath); os.IsNotExist(err) { - // Create file - if _, err = os.Create(sf.stateFilePath); err != nil { - glog.Errorf("[cpumanager] state file: unable to create state file \"%s\":%s", sf.stateFilePath, err.Error()) - panic("[cpumanager] state file not created") - } - glog.Infof("[cpumanager] state file: created empty state file \"%s\"", sf.stateFilePath) - } else { - // File exists - try to read - var readState stateFileData + content, err = ioutil.ReadFile(sf.stateFilePath) - if err = json.Unmarshal(content, &readState); err != nil { - glog.Warningf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) - return err - } - - if sf.policyName != readState.PolicyName { - return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName) - } - - if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil { - glog.Warningf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) - return err - } - - for containerID, cpuString := range readState.Entries { - if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { - glog.Warningf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) - return err - } - tmpAssignments[containerID] = tmpContainerCPUSet - } - - sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet) - sf.cache.SetCPUAssignments(tmpAssignments) - - glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) - glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + // If the state file does not exist or has zero length, write a new file. + if os.IsNotExist(err) || len(content) == 0 { + sf.storeState() + glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath) + return nil } + + // Fail on any other file read error. + if err != nil { + return err + } + + // File exists; try to read it. + var readState stateFileData + + if err = json.Unmarshal(content, &readState); err != nil { + glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) + return err + } + + if sf.policyName != readState.PolicyName { + return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName) + } + + if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil { + glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) + return err + } + + for containerID, cpuString := range readState.Entries { + if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { + glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) + return err + } + tmpAssignments[containerID] = tmpContainerCPUSet + } + + sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet) + sf.cache.SetCPUAssignments(tmpAssignments) + + glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) + glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + return nil } diff --git a/pkg/kubelet/cm/cpumanager/state/state_file_test.go b/pkg/kubelet/cm/cpumanager/state/state_file_test.go index dde616555e1..93dd3910ad2 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_file_test.go +++ b/pkg/kubelet/cm/cpumanager/state/state_file_test.go @@ -77,33 +77,31 @@ func TestFileStateTryRestore(t *testing.T) { stateFileContent string policyName string expErr string + expPanic bool expectedState *stateMemory }{ { - "Invalid JSON - empty file", + "Invalid JSON - one byte file", "\n", "none", - "state file: could not unmarshal, corrupted state file", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + "[cpumanager] state file: unable to restore state from disk (unexpected end of JSON input)", + true, + &stateMemory{}, }, { "Invalid JSON - invalid content", "{", "none", - "state file: could not unmarshal, corrupted state file", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + "[cpumanager] state file: unable to restore state from disk (unexpected end of JSON input)", + true, + &stateMemory{}, }, { "Try restore defaultCPUSet only", `{"policyName": "none", "defaultCpuSet": "4-6"}`, "none", "", + false, &stateMemory{ assignments: ContainerCPUAssignments{}, defaultCPUSet: cpuset.NewCPUSet(4, 5, 6), @@ -113,11 +111,9 @@ func TestFileStateTryRestore(t *testing.T) { "Try restore defaultCPUSet only - invalid name", `{"policyName": "none", "defaultCpuSet" "4-6"}`, "none", - "", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + `[cpumanager] state file: unable to restore state from disk (invalid character '"' after object key)`, + true, + &stateMemory{}, }, { "Try restore assignments only", @@ -130,6 +126,7 @@ func TestFileStateTryRestore(t *testing.T) { }`, "none", "", + false, &stateMemory{ assignments: ContainerCPUAssignments{ "container1": cpuset.NewCPUSet(4, 5, 6), @@ -146,21 +143,17 @@ func TestFileStateTryRestore(t *testing.T) { "entries": {} }`, "B", - "policy configured \"B\" != policy from state file \"A\"", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + `[cpumanager] state file: unable to restore state from disk (policy configured "B" != policy from state file "A")`, + true, + &stateMemory{}, }, { "Try restore invalid assignments", `{"entries": }`, "none", - "state file: could not unmarshal, corrupted state file", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + "[cpumanager] state file: unable to restore state from disk (invalid character '}' looking for beginning of value)", + true, + &stateMemory{}, }, { "Try restore valid file", @@ -174,6 +167,7 @@ func TestFileStateTryRestore(t *testing.T) { }`, "none", "", + false, &stateMemory{ assignments: ContainerCPUAssignments{ "container1": cpuset.NewCPUSet(4, 5, 6), @@ -189,11 +183,9 @@ func TestFileStateTryRestore(t *testing.T) { "defaultCpuSet": "2-sd" }`, "none", - "state file: could not parse state file", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + `[cpumanager] state file: unable to restore state from disk (strconv.Atoi: parsing "sd": invalid syntax)`, + true, + &stateMemory{}, }, { "Try restore un-parsable assignments", @@ -206,17 +198,16 @@ func TestFileStateTryRestore(t *testing.T) { } }`, "none", - "state file: could not parse state file", - &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.NewCPUSet(), - }, + `[cpumanager] state file: unable to restore state from disk (strconv.Atoi: parsing "p": invalid syntax)`, + true, + &stateMemory{}, }, { - "TryRestoreState creates empty state file", + "tryRestoreState creates empty state file", "", "none", "", + false, &stateMemory{ assignments: ContainerCPUAssignments{}, defaultCPUSet: cpuset.NewCPUSet(), @@ -226,11 +217,23 @@ func TestFileStateTryRestore(t *testing.T) { for idx, tc := range testCases { t.Run(tc.description, func(t *testing.T) { + defer func() { + if tc.expPanic { + r := recover() + panicMsg := r.(string) + if !strings.HasPrefix(panicMsg, tc.expErr) { + t.Fatalf(`expected panic "%s" but got "%s"`, tc.expErr, panicMsg) + } else { + t.Logf(`got expected panic "%s"`, panicMsg) + } + } + }() + sfilePath, err := ioutil.TempFile("/tmp", fmt.Sprintf("cpumanager_state_file_test_%d", idx)) if err != nil { t.Errorf("cannot create temporary file: %q", err.Error()) } - // Don't create state file, let TryRestoreState figure out that is should create + // Don't create state file, let tryRestoreState figure out that is should create if tc.stateFileContent != "" { writeToStateFile(sfilePath.Name(), tc.stateFileContent) } @@ -245,11 +248,11 @@ func TestFileStateTryRestore(t *testing.T) { if tc.expErr != "" { if logData.String() != "" { if !strings.Contains(logData.String(), tc.expErr) { - t.Errorf("TryRestoreState() error = %v, wantErr %v", logData.String(), tc.expErr) + t.Errorf("tryRestoreState() error = %v, wantErr %v", logData.String(), tc.expErr) return } } else { - t.Errorf("TryRestoreState() error = nil, wantErr %v", tc.expErr) + t.Errorf("tryRestoreState() error = nil, wantErr %v", tc.expErr) return } } @@ -268,7 +271,7 @@ func TestFileStateTryRestorePanic(t *testing.T) { }{ "Panic creating file", true, - "[cpumanager] state file not created", + "[cpumanager] state file not written", } t.Run(testCase.description, func(t *testing.T) { @@ -277,10 +280,10 @@ func TestFileStateTryRestorePanic(t *testing.T) { if err := recover(); err != nil { if testCase.wantPanic { if testCase.panicMessage == err { - t.Logf("TryRestoreState() got expected panic = %v", err) + t.Logf("tryRestoreState() got expected panic = %v", err) return } - t.Errorf("TryRestoreState() unexpected panic = %v, wantErr %v", err, testCase.panicMessage) + t.Errorf("tryRestoreState() unexpected panic = %v, wantErr %v", err, testCase.panicMessage) } } }() From 4a0e242b3c16382ad30d316bb2c9d157f7ab8c33 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 22 Nov 2017 15:04:58 -0800 Subject: [PATCH 087/794] Add NODE_LOCAL_SSDS_EXT to config-test --- cluster/gce/config-test.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index f19b6a6925f..159d88e28d3 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -36,6 +36,11 @@ MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0} +# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices +# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple +# configurations by seperating them by a semi-colon ex. "2,scsi,fs;1,nvme,block" +# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD. +NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-} NODE_ACCELERATORS=${NODE_ACCELERATORS:-""} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} KUBE_APISERVER_REQUEST_TIMEOUT=300 From 1fe05ac29a0e3a67d58a80d26c77829e90bb6073 Mon Sep 17 00:00:00 2001 From: supereagle Date: Thu, 23 Nov 2017 09:32:13 +0800 Subject: [PATCH 088/794] Use batch client with explicit version --- pkg/kubectl/cmd/drain.go | 2 +- test/e2e/apimachinery/garbage_collector.go | 4 ++-- test/e2e/storage/persistent_volumes-local.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..7249a521b26 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -335,7 +335,7 @@ func (o *DrainOptions) getController(namespace string, controllerRef *metav1.Own case "DaemonSet": return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "Job": - return o.client.Batch().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) + return o.client.BatchV1().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "ReplicaSet": return o.client.Extensions().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "StatefulSet": diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index e52ca37348b..4de503dba30 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -255,7 +255,7 @@ func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items))) } - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list jobs: %v", err) } @@ -974,7 +974,7 @@ var _ = SIGDescribe("Garbage collector", func() { By("Wait for the CronJob to create new Job") err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) { - jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list jobs: %v", err) } diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 5dd4cba67d9..9985b766a5e 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -1000,7 +1000,7 @@ func createBootstrapperJob(config *localTestConfig) { }, }, } - job, err := config.client.Batch().Jobs(config.ns).Create(bootJob) + job, err := config.client.BatchV1().Jobs(config.ns).Create(bootJob) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForJobFinish(config.client, config.ns, job.Name, 1) Expect(err).NotTo(HaveOccurred()) From 08b0883734ab39b3b789c225b14e62f5c25be660 Mon Sep 17 00:00:00 2001 From: yanxuean Date: Thu, 23 Nov 2017 09:52:56 +0800 Subject: [PATCH 089/794] missing format args in apiserver/pkg/endpoints Signed-off-by: yanxuean --- .../src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index eda01358252..27c0b67c555 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -1958,11 +1958,11 @@ func TestGetTable(t *testing.T) { } obj, _, err := extractBodyObject(resp, unstructured.UnstructuredJSONScheme) if err != nil { - t.Fatalf("%d: unexpected body read error: %v", err) + t.Fatalf("%d: unexpected body read error: %v", i, err) } gvk := schema.GroupVersionKind{Version: "v1", Kind: "Status"} if obj.GetObjectKind().GroupVersionKind() != gvk { - t.Fatalf("%d: unexpected error body: %#v", obj) + t.Fatalf("%d: unexpected error body: %#v", i, obj) } return } @@ -2083,12 +2083,12 @@ func TestGetPartialObjectMetadata(t *testing.T) { } obj, _, err := extractBodyObject(resp, unstructured.UnstructuredJSONScheme) if err != nil { - t.Errorf("%d: unexpected body read error: %v", err) + t.Errorf("%d: unexpected body read error: %v", i, err) continue } gvk := schema.GroupVersionKind{Version: "v1", Kind: "Status"} if obj.GetObjectKind().GroupVersionKind() != gvk { - t.Errorf("%d: unexpected error body: %#v", obj) + t.Errorf("%d: unexpected error body: %#v", i, obj) } continue } From a90b4c8c412f3023fd3fdd892e2f892b34004708 Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Mon, 23 Oct 2017 12:09:21 +0800 Subject: [PATCH 090/794] HugePages require cpu or memory --- pkg/apis/core/helper/qos/BUILD | 1 - pkg/apis/core/helper/qos/qos.go | 6 +-- pkg/apis/core/v1/helper/qos/BUILD | 2 +- pkg/apis/core/v1/helper/qos/qos.go | 7 +-- pkg/apis/core/v1/helper/qos/qos_test.go | 6 +-- pkg/apis/core/validation/validation.go | 27 +++++++++++- pkg/apis/core/validation/validation_test.go | 47 +++++++++++++++++---- 7 files changed, 75 insertions(+), 21 deletions(-) diff --git a/pkg/apis/core/helper/qos/BUILD b/pkg/apis/core/helper/qos/BUILD index a029fdb5b8f..c2a1e9010b8 100644 --- a/pkg/apis/core/helper/qos/BUILD +++ b/pkg/apis/core/helper/qos/BUILD @@ -11,7 +11,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/core/helper/qos", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/core/helper:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], diff --git a/pkg/apis/core/helper/qos/qos.go b/pkg/apis/core/helper/qos/qos.go index 18414322c82..fad6fb24074 100644 --- a/pkg/apis/core/helper/qos/qos.go +++ b/pkg/apis/core/helper/qos/qos.go @@ -22,12 +22,12 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" ) +var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + func isSupportedQoSComputeResource(name core.ResourceName) bool { - supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) - return supportedQoSComputeResources.Has(string(name)) || helper.IsHugePageResourceName(name) + return supportedQoSComputeResources.Has(string(name)) } // GetPodQOS returns the QoS class of a pod. diff --git a/pkg/apis/core/v1/helper/qos/BUILD b/pkg/apis/core/v1/helper/qos/BUILD index d29fcf09e93..7dd60de18c5 100644 --- a/pkg/apis/core/v1/helper/qos/BUILD +++ b/pkg/apis/core/v1/helper/qos/BUILD @@ -26,7 +26,7 @@ go_library( srcs = ["qos.go"], importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", deps = [ - "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/apis/core/v1/helper/qos/qos.go b/pkg/apis/core/v1/helper/qos/qos.go index 5e9dbdd7462..426f054efa6 100644 --- a/pkg/apis/core/v1/helper/qos/qos.go +++ b/pkg/apis/core/v1/helper/qos/qos.go @@ -20,15 +20,16 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/apis/core" ) +var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + // QOSList is a set of (resource name, QoS class) pairs. type QOSList map[v1.ResourceName]v1.PodQOSClass func isSupportedQoSComputeResource(name v1.ResourceName) bool { - supportedQoSComputeResources := sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory)) - return supportedQoSComputeResources.Has(string(name)) || v1helper.IsHugePageResourceName(name) + return supportedQoSComputeResources.Has(string(name)) } // GetPodQOS returns the QoS class of a pod. diff --git a/pkg/apis/core/v1/helper/qos/qos_test.go b/pkg/apis/core/v1/helper/qos/qos_test.go index 7d14b519e79..0685d4e6559 100644 --- a/pkg/apis/core/v1/helper/qos/qos_test.go +++ b/pkg/apis/core/v1/helper/qos/qos_test.go @@ -131,10 +131,10 @@ func TestGetPodQOS(t *testing.T) { expected: v1.PodQOSBurstable, }, { - pod: newPod("burstable-hugepages", []v1.Container{ - newContainer("burstable", addResource("hugepages-2Mi", "1Gi", getResourceList("0", "0")), addResource("hugepages-2Mi", "1Gi", getResourceList("0", "0"))), + pod: newPod("best-effort-hugepages", []v1.Container{ + newContainer("best-effort", addResource("hugepages-2Mi", "1Gi", getResourceList("0", "0")), addResource("hugepages-2Mi", "1Gi", getResourceList("0", "0"))), }), - expected: v1.PodQOSBurstable, + expected: v1.PodQOSBestEffort, }, } for id, testCase := range testCases { diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index aaccd4d2ca4..8d3110fe40c 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4252,7 +4252,13 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa allErrs := field.ErrorList{} limPath := fldPath.Child("limits") reqPath := fldPath.Child("requests") + limContainsCpuOrMemory := false + reqContainsCpuOrMemory := false + limContainsHugePages := false + reqContainsHugePages := false + supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) for resourceName, quantity := range requirements.Limits { + fldPath := limPath.Key(string(resourceName)) // Validate resource name. allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) @@ -4263,10 +4269,17 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) } - if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { - allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName))) + if helper.IsHugePageResourceName(resourceName) { + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { + allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName))) + } else { + limContainsHugePages = true + } } + if supportedQoSComputeResources.Has(string(resourceName)) { + limContainsCpuOrMemory = true + } } for resourceName, quantity := range requirements.Requests { fldPath := reqPath.Key(string(resourceName)) @@ -4287,6 +4300,16 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa } else if resourceName == core.ResourceNvidiaGPU { allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU))) } + if helper.IsHugePageResourceName(resourceName) { + reqContainsHugePages = true + } + if supportedQoSComputeResources.Has(string(resourceName)) { + reqContainsCpuOrMemory = true + } + + } + if !limContainsCpuOrMemory && !reqContainsCpuOrMemory && (reqContainsHugePages || limContainsHugePages) { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("HugePages require cpu or memory"))) } return allErrs diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 5583eb9a857..6ba034b5e13 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -3316,6 +3316,32 @@ func TestAlphaHugePagesIsolation(t *testing.T) { successCases := []core.Pod{ { // Basic fields. ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), + }, + Limits: core.ResourceList{ + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), + }, + }, + }, + }, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + }, + }, + } + failureCases := []core.Pod{ + { // Basic fields. + ObjectMeta: metav1.ObjectMeta{Name: "hugepages-requireCpuOrMemory", Namespace: "ns"}, Spec: core.PodSpec{ Containers: []core.Container{ { @@ -3334,8 +3360,6 @@ func TestAlphaHugePagesIsolation(t *testing.T) { DNSPolicy: core.DNSClusterFirst, }, }, - } - failureCases := []core.Pod{ { // Basic fields. ObjectMeta: metav1.ObjectMeta{Name: "hugepages-shared", Namespace: "ns"}, Spec: core.PodSpec{ @@ -3344,10 +3368,14 @@ func TestAlphaHugePagesIsolation(t *testing.T) { Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{ Requests: core.ResourceList{ - core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), }, Limits: core.ResourceList{ - core.ResourceName("hugepages-2Mi"): resource.MustParse("2Gi"), + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-2Mi"): resource.MustParse("2Gi"), }, }, }, @@ -3364,12 +3392,15 @@ func TestAlphaHugePagesIsolation(t *testing.T) { Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{ Requests: core.ResourceList{ - core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), - core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"), + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"), }, Limits: core.ResourceList{ - core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), - core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"), + core.ResourceName(core.ResourceCPU): resource.MustParse("10"), + core.ResourceName(core.ResourceMemory): resource.MustParse("10G"), + core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"), + core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"), }, }, }, From fb78c39cbcc92833a60abb0248a630ebca51b913 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Thu, 23 Nov 2017 10:19:27 +0800 Subject: [PATCH 091/794] fix typos in this file --- pkg/volume/cinder/attacher.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index f98cae71bd2..87b58dae01b 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -44,15 +44,15 @@ var _ volume.Attacher = &cinderDiskAttacher{} var _ volume.AttachableVolumePlugin = &cinderPlugin{} const ( - probeVolumeInitDealy = 1 * time.Second + probeVolumeInitDelay = 1 * time.Second probeVolumeFactor = 2.0 - operationFinishInitDealy = 1 * time.Second + operationFinishInitDelay = 1 * time.Second operationFinishFactor = 1.1 operationFinishSteps = 10 - diskAttachInitDealy = 1 * time.Second + diskAttachInitDelay = 1 * time.Second diskAttachFactor = 1.2 diskAttachSteps = 15 - diskDetachInitDealy = 1 * time.Second + diskDetachInitDelay = 1 * time.Second diskDetachFactor = 1.2 diskDetachSteps = 13 ) @@ -75,7 +75,7 @@ func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error { backoff := wait.Backoff{ - Duration: operationFinishInitDealy, + Duration: operationFinishInitDelay, Factor: operationFinishFactor, Steps: operationFinishSteps, } @@ -100,7 +100,7 @@ func (attacher *cinderDiskAttacher) waitOperationFinished(volumeID string) error func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string) error { backoff := wait.Backoff{ - Duration: diskAttachInitDealy, + Duration: diskAttachInitDelay, Factor: diskAttachFactor, Steps: diskAttachSteps, } @@ -235,12 +235,12 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty.", volumeID) } - ticker := time.NewTicker(probeVolumeInitDealy) + ticker := time.NewTicker(probeVolumeInitDelay) defer ticker.Stop() timer := time.NewTimer(timeout) defer timer.Stop() - duration := probeVolumeInitDealy + duration := probeVolumeInitDelay for { select { case <-ticker.C: @@ -334,7 +334,7 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) { func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error { backoff := wait.Backoff{ - Duration: operationFinishInitDealy, + Duration: operationFinishInitDelay, Factor: operationFinishFactor, Steps: operationFinishSteps, } @@ -359,7 +359,7 @@ func (detacher *cinderDiskDetacher) waitOperationFinished(volumeID string) error func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string) error { backoff := wait.Backoff{ - Duration: diskDetachInitDealy, + Duration: diskDetachInitDelay, Factor: diskDetachFactor, Steps: diskDetachSteps, } From aab6efd192b4ca9e72a317bb2df93d39573494c2 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Thu, 23 Nov 2017 10:48:23 +0800 Subject: [PATCH 092/794] Change wording in OpenStack Provider --- .../providers/openstack/openstack_loadbalancer.go | 8 ++++---- pkg/cloudprovider/providers/openstack/openstack_test.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 98b5b44ed77..c459aa14e2a 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -50,7 +50,7 @@ const ( // going into ACTIVE loadbalancer provisioning status. Starting with 1 // seconds, multiplying by 1.2 with each step and taking 19 steps at maximum // it will time out after 128s, which roughly corresponds to 120s - loadbalancerActiveInitDealy = 1 * time.Second + loadbalancerActiveInitDelay = 1 * time.Second loadbalancerActiveFactor = 1.2 loadbalancerActiveSteps = 19 @@ -58,7 +58,7 @@ const ( // waiting for delete operation to complete. Starting with 1 // seconds, multiplying by 1.2 with each step and taking 13 steps at maximum // it will time out after 32s, which roughly corresponds to 30s - loadbalancerDeleteInitDealy = 1 * time.Second + loadbalancerDeleteInitDelay = 1 * time.Second loadbalancerDeleteFactor = 1.2 loadbalancerDeleteSteps = 13 @@ -320,7 +320,7 @@ func getSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpt func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, loadbalancerID string) (string, error) { backoff := wait.Backoff{ - Duration: loadbalancerActiveInitDealy, + Duration: loadbalancerActiveInitDelay, Factor: loadbalancerActiveFactor, Steps: loadbalancerActiveSteps, } @@ -350,7 +350,7 @@ func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID string) error { backoff := wait.Backoff{ - Duration: loadbalancerDeleteInitDealy, + Duration: loadbalancerDeleteInitDelay, Factor: loadbalancerDeleteFactor, Steps: loadbalancerDeleteSteps, } diff --git a/pkg/cloudprovider/providers/openstack/openstack_test.go b/pkg/cloudprovider/providers/openstack/openstack_test.go index 0875185b63d..6cf0968835b 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_test.go +++ b/pkg/cloudprovider/providers/openstack/openstack_test.go @@ -45,14 +45,14 @@ const ( // waiting for specified volume status. Starting with 1 // seconds, multiplying by 1.2 with each step and taking 13 steps at maximum // it will time out after 32s, which roughly corresponds to 30s - volumeStatusInitDealy = 1 * time.Second + volumeStatusInitDelay = 1 * time.Second volumeStatusFactor = 1.2 volumeStatusSteps = 13 ) func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status string) { backoff := wait.Backoff{ - Duration: volumeStatusInitDealy, + Duration: volumeStatusInitDelay, Factor: volumeStatusFactor, Steps: volumeStatusSteps, } From 9727cd0636562a01b92f9a4d05a96da5bf2de114 Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Thu, 23 Nov 2017 11:50:04 +0800 Subject: [PATCH 093/794] declare in front --- plugin/pkg/scheduler/algorithm/predicates/predicates.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 917b35acf51..8931814bdcd 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -308,7 +308,8 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s continue } - if pvc.Spec.VolumeName == "" { + pvName := pvc.Spec.VolumeName + if pvName == "" { // PVC is not bound. It was either deleted and created again or // it was forcefuly unbound by admin. The pod can still use the // original PV where it was bound to -> log the error and count @@ -318,7 +319,6 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s continue } - pvName := pvc.Spec.VolumeName pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil || pv == nil { // if the PV is not found, log the error From 19caa9c50dc7e3458457005f31d08ecf10685677 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 23 Nov 2017 10:01:23 +0100 Subject: [PATCH 094/794] Skip pods that refer to PVCs that are being deleted Scheduler should ignore pods that refer to PVCs that either do not exist or are being deleted. --- plugin/pkg/scheduler/algorithm/types.go | 6 ++ plugin/pkg/scheduler/core/extender_test.go | 2 +- .../pkg/scheduler/core/generic_scheduler.go | 35 +++++++- .../scheduler/core/generic_scheduler_test.go | 81 ++++++++++++++++++- plugin/pkg/scheduler/factory/factory.go | 10 ++- plugin/pkg/scheduler/scheduler_test.go | 6 +- plugin/pkg/scheduler/testing/fake_lister.go | 15 ++++ 7 files changed, 148 insertions(+), 7 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/types.go b/plugin/pkg/scheduler/algorithm/types.go index b3e34e02401..5c75d75097a 100644 --- a/plugin/pkg/scheduler/algorithm/types.go +++ b/plugin/pkg/scheduler/algorithm/types.go @@ -117,6 +117,12 @@ type ReplicaSetLister interface { GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error) } +// PersistentVolumeClaimLister interface represents anything that can list PVCs for a scheduler. +type PersistentVolumeClaimLister interface { + // Gets given PVC + Get(namespace, name string) (*v1.PersistentVolumeClaim, error) +} + var _ ControllerLister = &EmptyControllerLister{} // EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data diff --git a/plugin/pkg/scheduler/core/extender_test.go b/plugin/pkg/scheduler/core/extender_test.go index 143ba795dd6..3f389aff4dc 100644 --- a/plugin/pkg/scheduler/core/extender_test.go +++ b/plugin/pkg/scheduler/core/extender_test.go @@ -317,7 +317,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { } queue := NewSchedulingQueue() scheduler := NewGenericScheduler( - cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil) + cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) podIgnored := &v1.Pod{} machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index e14d220be3f..70b5e872999 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -98,6 +98,7 @@ type genericScheduler struct { extenders []algorithm.SchedulerExtender lastNodeIndexLock sync.Mutex lastNodeIndex uint64 + pvcLister algorithm.PersistentVolumeClaimLister cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder @@ -110,6 +111,10 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister trace := utiltrace.New(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name)) defer trace.LogIfLong(100 * time.Millisecond) + if err := podPassesBasicChecks(pod, g.pvcLister); err != nil { + return "", err + } + nodes, err := nodeLister.List() if err != nil { return "", err @@ -995,6 +1000,32 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedule return true } +// podPassesBasicChecks makes sanity checks on the pod if it can be scheduled. +func podPassesBasicChecks(pod *v1.Pod, pvcLister algorithm.PersistentVolumeClaimLister) error { + // Check PVCs used by the pod + namespace := pod.Namespace + manifest := &(pod.Spec) + for i := range manifest.Volumes { + volume := &manifest.Volumes[i] + if volume.PersistentVolumeClaim == nil { + // Volume is not a PVC, ignore + continue + } + pvcName := volume.PersistentVolumeClaim.ClaimName + pvc, err := pvcLister.Get(namespace, pvcName) + if err != nil { + // The error has already enough context ("persistentvolumeclaim "myclaim" not found") + return err + } + + if pvc.DeletionTimestamp != nil { + return fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name) + } + } + + return nil +} + func NewGenericScheduler( cache schedulercache.Cache, eCache *EquivalenceCache, @@ -1004,7 +1035,8 @@ func NewGenericScheduler( prioritizers []algorithm.PriorityConfig, priorityMetaProducer algorithm.MetadataProducer, extenders []algorithm.SchedulerExtender, - volumeBinder *volumebinder.VolumeBinder) algorithm.ScheduleAlgorithm { + volumeBinder *volumebinder.VolumeBinder, + pvcLister algorithm.PersistentVolumeClaimLister) algorithm.ScheduleAlgorithm { return &genericScheduler{ cache: cache, equivalenceCache: eCache, @@ -1016,5 +1048,6 @@ func NewGenericScheduler( extenders: extenders, cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), volumeBinder: volumeBinder, + pvcLister: pvcLister, } } diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/plugin/pkg/scheduler/core/generic_scheduler_test.go index 092f8789b3e..baa76414f09 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/core/generic_scheduler_test.go @@ -186,6 +186,7 @@ func TestGenericScheduler(t *testing.T) { predicates map[string]algorithm.FitPredicate prioritizers []algorithm.PriorityConfig nodes []string + pvcs []*v1.PersistentVolumeClaim pod *v1.Pod pods []*v1.Pod expectedHosts sets.String @@ -300,6 +301,77 @@ func TestGenericScheduler(t *testing.T) { }, }, }, + { + // Pod with existing PVC + predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, + nodes: []string{"machine1", "machine2"}, + pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "ignore"}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "existingPVC", + }, + }, + }, + }, + }, + }, + expectedHosts: sets.NewString("machine1", "machine2"), + name: "existing PVC", + wErr: nil, + }, + { + // Pod with non existing PVC + predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, + nodes: []string{"machine1", "machine2"}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "ignore"}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "unknownPVC", + }, + }, + }, + }, + }, + }, + name: "unknown PVC", + expectsErr: true, + wErr: fmt.Errorf("persistentvolumeclaim \"unknownPVC\" not found"), + }, + { + // Pod with deleting PVC + predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, + nodes: []string{"machine1", "machine2"}, + pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "ignore"}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "existingPVC", + }, + }, + }, + }, + }, + }, + name: "deleted PVC", + expectsErr: true, + wErr: fmt.Errorf("persistentvolumeclaim \"existingPVC\" is being deleted"), + }, } for _, test := range tests { cache := schedulercache.New(time.Duration(0), wait.NeverStop) @@ -309,9 +381,14 @@ func TestGenericScheduler(t *testing.T) { for _, name := range test.nodes { cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}) } + pvcs := []*v1.PersistentVolumeClaim{} + for _, pvc := range test.pvcs { + pvcs = append(pvcs, pvc) + } + pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil) + cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if !reflect.DeepEqual(err, test.wErr) { @@ -1190,7 +1267,7 @@ func TestPreempt(t *testing.T) { extenders = append(extenders, extender) } scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil) + cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) // Call Preempt and check the expected results. node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) if err != nil { diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 764f449ccc4..df3e5cdebec 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -903,7 +903,7 @@ func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, glog.Info("Created equivalence class cache") } - algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder) + algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, &pvcLister{f.pVCLister}) podBackoff := util.CreateDefaultPodBackoff() return &scheduler.Config{ @@ -935,6 +935,14 @@ func (n *nodeLister) List() ([]*v1.Node, error) { return n.NodeLister.List(labels.Everything()) } +type pvcLister struct { + corelisters.PersistentVolumeClaimLister +} + +func (p *pvcLister) Get(namespace, name string) (*v1.PersistentVolumeClaim, error) { + return p.PersistentVolumeClaimLister.PersistentVolumeClaims(namespace).Get(name) +} + func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) { pluginArgs, err := f.getPluginArgs() if err != nil { diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index e08397723f7..c3c8ccab9a6 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -532,7 +532,8 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. []algorithm.PriorityConfig{}, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, - nil) + nil, + schedulertesting.FakePersistentVolumeClaimLister{}) bindingChan := make(chan *v1.Binding, 1) errChan := make(chan error, 1) configurator := &FakeConfigurator{ @@ -575,7 +576,8 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc []algorithm.PriorityConfig{}, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, - nil) + nil, + schedulertesting.FakePersistentVolumeClaimLister{}) bindingChan := make(chan *v1.Binding, 2) configurator := &FakeConfigurator{ Config: &Config{ diff --git a/plugin/pkg/scheduler/testing/fake_lister.go b/plugin/pkg/scheduler/testing/fake_lister.go index 35f763087fe..75db4af4cbc 100644 --- a/plugin/pkg/scheduler/testing/fake_lister.go +++ b/plugin/pkg/scheduler/testing/fake_lister.go @@ -176,3 +176,18 @@ func (f FakeStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Stat } return } + +// FakePersistentVolumeClaimLister implements PersistentVolumeClaimLister on []*v1.PersistentVolumeClaim for test purposes. +type FakePersistentVolumeClaimLister []*v1.PersistentVolumeClaim + +var _ PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{} + +// List returns nodes as a []string. +func (f FakePersistentVolumeClaimLister) Get(namespace, name string) (*v1.PersistentVolumeClaim, error) { + for _, pvc := range f { + if pvc.Name == name && pvc.Namespace == namespace { + return pvc, nil + } + } + return nil, fmt.Errorf("persistentvolumeclaim %q not found", name) +} From 0a96a75cea2c8eb146cd5fad65e68d7368faa45e Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 23 Nov 2017 10:04:42 +0100 Subject: [PATCH 095/794] Remove PVCLister and use informer directly. --- plugin/pkg/scheduler/algorithm/types.go | 6 ---- plugin/pkg/scheduler/core/BUILD | 1 + .../pkg/scheduler/core/generic_scheduler.go | 9 +++--- plugin/pkg/scheduler/factory/factory.go | 10 +----- plugin/pkg/scheduler/testing/BUILD | 1 + plugin/pkg/scheduler/testing/fake_lister.go | 31 ++++++++++++++++--- 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/types.go b/plugin/pkg/scheduler/algorithm/types.go index 5c75d75097a..b3e34e02401 100644 --- a/plugin/pkg/scheduler/algorithm/types.go +++ b/plugin/pkg/scheduler/algorithm/types.go @@ -117,12 +117,6 @@ type ReplicaSetLister interface { GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error) } -// PersistentVolumeClaimLister interface represents anything that can list PVCs for a scheduler. -type PersistentVolumeClaimLister interface { - // Gets given PVC - Get(namespace, name string) (*v1.PersistentVolumeClaim, error) -} - var _ ControllerLister = &EmptyControllerLister{} // EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data diff --git a/plugin/pkg/scheduler/core/BUILD b/plugin/pkg/scheduler/core/BUILD index aa14221fe52..ee42cd1fdb3 100644 --- a/plugin/pkg/scheduler/core/BUILD +++ b/plugin/pkg/scheduler/core/BUILD @@ -64,6 +64,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index 70b5e872999..6505cbe58a4 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" utiltrace "k8s.io/apiserver/pkg/util/trace" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" @@ -98,10 +99,10 @@ type genericScheduler struct { extenders []algorithm.SchedulerExtender lastNodeIndexLock sync.Mutex lastNodeIndex uint64 - pvcLister algorithm.PersistentVolumeClaimLister cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder + pvcLister corelisters.PersistentVolumeClaimLister } // Schedule tries to schedule the given pod to one of node in the node list. @@ -1001,7 +1002,7 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedule } // podPassesBasicChecks makes sanity checks on the pod if it can be scheduled. -func podPassesBasicChecks(pod *v1.Pod, pvcLister algorithm.PersistentVolumeClaimLister) error { +func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeClaimLister) error { // Check PVCs used by the pod namespace := pod.Namespace manifest := &(pod.Spec) @@ -1012,7 +1013,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister algorithm.PersistentVolumeClaim continue } pvcName := volume.PersistentVolumeClaim.ClaimName - pvc, err := pvcLister.Get(namespace, pvcName) + pvc, err := pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) if err != nil { // The error has already enough context ("persistentvolumeclaim "myclaim" not found") return err @@ -1036,7 +1037,7 @@ func NewGenericScheduler( priorityMetaProducer algorithm.MetadataProducer, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, - pvcLister algorithm.PersistentVolumeClaimLister) algorithm.ScheduleAlgorithm { + pvcLister corelisters.PersistentVolumeClaimLister) algorithm.ScheduleAlgorithm { return &genericScheduler{ cache: cache, equivalenceCache: eCache, diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index df3e5cdebec..a597cfe07f4 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -903,7 +903,7 @@ func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, glog.Info("Created equivalence class cache") } - algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, &pvcLister{f.pVCLister}) + algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister) podBackoff := util.CreateDefaultPodBackoff() return &scheduler.Config{ @@ -935,14 +935,6 @@ func (n *nodeLister) List() ([]*v1.Node, error) { return n.NodeLister.List(labels.Everything()) } -type pvcLister struct { - corelisters.PersistentVolumeClaimLister -} - -func (p *pvcLister) Get(namespace, name string) (*v1.PersistentVolumeClaim, error) { - return p.PersistentVolumeClaimLister.PersistentVolumeClaims(namespace).Get(name) -} - func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) { pluginArgs, err := f.getPluginArgs() if err != nil { diff --git a/plugin/pkg/scheduler/testing/BUILD b/plugin/pkg/scheduler/testing/BUILD index 03ab8639c1e..1a3d2c07d73 100644 --- a/plugin/pkg/scheduler/testing/BUILD +++ b/plugin/pkg/scheduler/testing/BUILD @@ -22,6 +22,7 @@ go_library( "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", ], ) diff --git a/plugin/pkg/scheduler/testing/fake_lister.go b/plugin/pkg/scheduler/testing/fake_lister.go index 75db4af4cbc..f01457a5bcf 100644 --- a/plugin/pkg/scheduler/testing/fake_lister.go +++ b/plugin/pkg/scheduler/testing/fake_lister.go @@ -24,6 +24,7 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + corelisters "k8s.io/client-go/listers/core/v1" . "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -180,14 +181,34 @@ func (f FakeStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Stat // FakePersistentVolumeClaimLister implements PersistentVolumeClaimLister on []*v1.PersistentVolumeClaim for test purposes. type FakePersistentVolumeClaimLister []*v1.PersistentVolumeClaim -var _ PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{} +var _ corelisters.PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{} -// List returns nodes as a []string. -func (f FakePersistentVolumeClaimLister) Get(namespace, name string) (*v1.PersistentVolumeClaim, error) { - for _, pvc := range f { - if pvc.Name == name && pvc.Namespace == namespace { +func (f FakePersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + return nil, fmt.Errorf("not implemented") +} + +func (f FakePersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister { + return &fakePersistentVolumeClaimNamespaceLister{ + pvcs: f, + namespace: namespace, + } +} + +// fakePersistentVolumeClaimNamespaceLister is implementation of PersistentVolumeClaimNamespaceLister returned by List() above. +type fakePersistentVolumeClaimNamespaceLister struct { + pvcs []*v1.PersistentVolumeClaim + namespace string +} + +func (f *fakePersistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + for _, pvc := range f.pvcs { + if pvc.Name == name && pvc.Namespace == f.namespace { return pvc, nil } } return nil, fmt.Errorf("persistentvolumeclaim %q not found", name) } + +func (f fakePersistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + return nil, fmt.Errorf("not implemented") +} From 7de8d545f344d85badb349367abce59c691835b5 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 10 Nov 2017 17:32:08 +0800 Subject: [PATCH 096/794] Add test case for RunCreateToken --- cmd/kubeadm/app/cmd/BUILD | 5 ++ cmd/kubeadm/app/cmd/token_test.go | 85 +++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index 8c8e3a8e2d4..bdbbfc695db 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -90,6 +90,11 @@ go_test( deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/cmd/kubeadm/app/cmd/token_test.go b/cmd/kubeadm/app/cmd/token_test.go index 2db9cd9e0b6..c3a344d1691 100644 --- a/cmd/kubeadm/app/cmd/token_test.go +++ b/cmd/kubeadm/app/cmd/token_test.go @@ -20,6 +20,12 @@ import ( "bytes" "regexp" "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" ) const ( @@ -44,3 +50,82 @@ func TestRunGenerateToken(t *testing.T) { t.Errorf("RunGenerateToken's output did not match expected regex; wanted: [%s], got: [%s]", TokenExpectedRegex, output) } } + +func TestRunCreateToken(t *testing.T) { + var buf bytes.Buffer + fakeClient := &fake.Clientset{} + fakeClient.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.NewNotFound(v1.Resource("secrets"), "foo") + }) + + testCases := []struct { + name string + token string + usages []string + extraGroups []string + expectedError bool + }{ + { + name: "valid: empty token", + token: "", + usages: []string{"signing", "authentication"}, + extraGroups: []string{"system:bootstrappers:foo"}, + expectedError: false, + }, + { + name: "valid: non-empty token", + token: "abcdef.1234567890123456", + usages: []string{"signing", "authentication"}, + extraGroups: []string{"system:bootstrappers:foo"}, + expectedError: false, + }, + { + name: "valid: no extraGroups", + token: "abcdef.1234567890123456", + usages: []string{"signing", "authentication"}, + extraGroups: []string{}, + expectedError: false, + }, + { + name: "invalid: incorrect token", + token: "123456.AABBCCDDEEFFGGHH", + usages: []string{"signing", "authentication"}, + extraGroups: []string{}, + expectedError: true, + }, + { + name: "invalid: incorrect extraGroups", + token: "abcdef.1234567890123456", + usages: []string{"signing", "authentication"}, + extraGroups: []string{"foo"}, + expectedError: true, + }, + { + name: "invalid: specifying --groups when --usages doesn't include authentication", + token: "abcdef.1234567890123456", + usages: []string{"signing"}, + extraGroups: []string{"foo"}, + expectedError: true, + }, + { + name: "invalid: partially incorrect usages", + token: "abcdef.1234567890123456", + usages: []string{"foo", "authentication"}, + extraGroups: []string{"system:bootstrappers:foo"}, + expectedError: true, + }, + { + name: "invalid: all incorrect usages", + token: "abcdef.1234567890123456", + usages: []string{"foo", "bar"}, + extraGroups: []string{"system:bootstrappers:foo"}, + expectedError: true, + }, + } + for _, tc := range testCases { + err := RunCreateToken(&buf, fakeClient, tc.token, 0, tc.usages, tc.extraGroups, "", false, "") + if (err != nil) != tc.expectedError { + t.Errorf("Test case %s: RunCreateToken expected error: %v, saw: %v", tc.name, tc.expectedError, (err != nil)) + } + } +} From 1c96fa21d24b847432e3775e0fd45bb251693d8c Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Thu, 23 Nov 2017 20:21:20 +0800 Subject: [PATCH 097/794] Minor cleanup in kubeadm. --- cmd/kubeadm/app/BUILD | 1 - cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 1 - cmd/kubeadm/app/cmd/token.go | 3 +-- cmd/kubeadm/app/phases/token/BUILD | 14 -------------- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 2 +- 5 files changed, 2 insertions(+), 19 deletions(-) delete mode 100644 cmd/kubeadm/app/phases/token/BUILD diff --git a/cmd/kubeadm/app/BUILD b/cmd/kubeadm/app/BUILD index a2366dc98c1..24502fd9478 100644 --- a/cmd/kubeadm/app/BUILD +++ b/cmd/kubeadm/app/BUILD @@ -44,7 +44,6 @@ filegroup( "//cmd/kubeadm/app/phases/kubelet:all-srcs", "//cmd/kubeadm/app/phases/markmaster:all-srcs", "//cmd/kubeadm/app/phases/selfhosting:all-srcs", - "//cmd/kubeadm/app/phases/token:all-srcs", "//cmd/kubeadm/app/phases/upgrade:all-srcs", "//cmd/kubeadm/app/phases/uploadconfig:all-srcs", "//cmd/kubeadm/app/preflight:all-srcs", diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index dcf554fbabb..12e902d3ce5 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -80,7 +80,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { func(obj *kubeadm.NodeConfiguration, c fuzz.Continue) { c.FuzzNoCustom(obj) obj.CACertPath = "foo" - obj.CACertPath = "foo" obj.DiscoveryFile = "foo" obj.DiscoveryToken = "foo" obj.DiscoveryTokenAPIServers = []string{"foo"} diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index ee2aba0ffb8..87b8d077750 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -390,8 +390,7 @@ func getClientset(file string, dryRun bool) (clientset.Interface, error) { } return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil } - client, err := kubeconfigutil.ClientSetFromFile(file) - return client, err + return kubeconfigutil.ClientSetFromFile(file) } func getJoinCommand(token string, kubeConfigFile string) (string, error) { diff --git a/cmd/kubeadm/app/phases/token/BUILD b/cmd/kubeadm/app/phases/token/BUILD deleted file mode 100644 index 7e76248ad95..00000000000 --- a/cmd/kubeadm/app/phases/token/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 2eae65ab1a2..52613d4c4c1 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -58,7 +58,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC errs = append(errs, err) } - // Create/update RBAC rules that makes the 1.8.0+ nodes to rotate certificates and get their CSRs approved automatically + // Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically if err := nodebootstraptoken.AutoApproveNodeCertificateRotation(client); err != nil { errs = append(errs, err) } From 235df842fafe21fe90f5691ca5eb1ab775fbe54d Mon Sep 17 00:00:00 2001 From: yuexiao-wang Date: Fri, 24 Nov 2017 10:01:54 +0800 Subject: [PATCH 098/794] wrong number of args in apiserver/pkg Signed-off-by: yuexiao-wang --- .../k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go | 4 ++-- .../k8s.io/apiserver/pkg/server/filters/compression_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go index 11033454682..33ab5b52994 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go @@ -1321,7 +1321,7 @@ func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc t.Errorf("column %d has unexpected format: %q with type %q", j, column.Format, column.Type) } if column.Priority < 0 || column.Priority > 2 { - t.Errorf("column %d has unexpected priority", j, column.Priority) + t.Errorf("column %d has unexpected priority: %q", j, column.Priority) } if len(column.Description) == 0 { t.Errorf("column %d has no description", j) @@ -1332,7 +1332,7 @@ func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc } for i, row := range table.Rows { if len(row.Cells) != len(table.ColumnDefinitions) { - t.Errorf("row %d did not have the correct number of cells: %d in %v", len(table.ColumnDefinitions), row.Cells) + t.Errorf("row %d did not have the correct number of cells: %d in %v", i, len(table.ColumnDefinitions), row.Cells) } for j, cell := range row.Cells { // do not add to this test without discussion - may break clients diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/compression_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/compression_test.go index 931c9051542..07d46d5bf5d 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/compression_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/compression_test.go @@ -94,7 +94,7 @@ func TestCompression(t *testing.T) { } body, err := ioutil.ReadAll(reader) if err != nil { - t.Fatal("unexpected error: %v", err) + t.Fatalf("unexpected error: %v", err) } if !bytes.Equal(body, responseData) { t.Fatalf("Expected response body %s to equal %s", body, responseData) From 3323861ed343fd4c12faeaa157da9925f51a314d Mon Sep 17 00:00:00 2001 From: zhangxiaoyu-zidif Date: Fri, 24 Nov 2017 10:55:03 +0800 Subject: [PATCH 099/794] fix binary check for nfs.go --- pkg/volume/nfs/nfs.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 1435db92815..f61fbd25944 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -194,15 +194,15 @@ func (nfsMounter *nfsMounter) CanMount() error { exec := nfsMounter.plugin.host.GetExec(nfsMounter.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - if _, err := exec.Run("/bin/ls", "/sbin/mount.nfs"); err != nil { + if _, err := exec.Run("test", "-x", "/sbin/mount.nfs"); err != nil { return fmt.Errorf("Required binary /sbin/mount.nfs is missing") } - if _, err := exec.Run("/bin/ls", "/sbin/mount.nfs4"); err != nil { + if _, err := exec.Run("test", "-x", "/sbin/mount.nfs4"); err != nil { return fmt.Errorf("Required binary /sbin/mount.nfs4 is missing") } return nil case "darwin": - if _, err := exec.Run("/bin/ls", "/sbin/mount_nfs"); err != nil { + if _, err := exec.Run("test", "-x", "/sbin/mount_nfs"); err != nil { return fmt.Errorf("Required binary /sbin/mount_nfs is missing") } } From 58ed69a9c87b6cfcfcb0829638f5ba5a7914b94f Mon Sep 17 00:00:00 2001 From: Gavin Date: Fri, 24 Nov 2017 15:04:19 +0800 Subject: [PATCH 100/794] put pod controllerref to metadata --- plugin/pkg/scheduler/algorithm/priorities/metadata.go | 3 +++ .../algorithm/priorities/node_prefer_avoid_pods.go | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/plugin/pkg/scheduler/algorithm/priorities/metadata.go index 1e16c4aad4d..fb561241798 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/plugin/pkg/scheduler/algorithm/priorities/metadata.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -47,6 +48,7 @@ type priorityMetadata struct { podTolerations []v1.Toleration affinity *v1.Affinity podSelectors []labels.Selector + controllerRef *metav1.OwnerReference } // PriorityMetadata is a MetadataProducer. Node info can be nil. @@ -62,6 +64,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo podTolerations: tolerationsPreferNoSchedule, affinity: pod.Spec.Affinity, podSelectors: podSelectors, + controllerRef: priorityutil.GetControllerRef(pod), } } diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index c4311fb3ab5..7392f76b716 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" @@ -31,8 +32,14 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } + var controllerRef *metav1.OwnerReference + if priorityMeta, ok := meta.(*priorityMetadata); ok { + controllerRef = priorityMeta.controllerRef + } else { + // We couldn't parse metadata - fallback to the podspec. + controllerRef = priorityutil.GetControllerRef(pod) + } - controllerRef := priorityutil.GetControllerRef(pod) if controllerRef != nil { // Ignore pods that are owned by other controller than ReplicationController // or ReplicaSet. From 26bd052ce2fec5c3c1b47ed96e1ff91ec669df37 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Fri, 24 Nov 2017 11:42:05 +0800 Subject: [PATCH 101/794] fix comment about PodAffinityTerm in api --- pkg/apis/core/types.go | 2 +- staging/src/k8s.io/api/core/v1/generated.proto | 1 + staging/src/k8s.io/api/core/v1/types.go | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 032e6d84c40..c452e1b7dfa 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2338,13 +2338,13 @@ type PodAffinityTerm struct { LabelSelector *metav1.LabelSelector // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" + // +optional Namespaces []string // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. // Empty topologyKey is not allowed. - // +optional TopologyKey string } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 4f00a69e077..3fa5e2618db 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -2620,6 +2620,7 @@ message PodAffinityTerm { // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" + // +optional repeated string namespaces = 2; // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index bb5a5a104f4..a805ee85469 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2587,6 +2587,7 @@ type PodAffinityTerm struct { LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" + // +optional Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node From f55c1ea78e43b819aec6d392588e940bdc82d89d Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 24 Nov 2017 16:10:15 +0800 Subject: [PATCH 102/794] update IPVS readme --- pkg/proxy/ipvs/README.md | 57 ++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index c0f616c9217..fc24f20b939 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -10,32 +10,17 @@ Linux kernel. IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP and UDP-based services to the real servers, and make services of real servers appear as irtual services on a single IP address. -## How to use +## Run kube-proxy in ipvs mode -#### Load IPVS kernel modules +Currently, local-up scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags. -Currently the IPVS kernel module can't be loaded automatically, so first we should use the following command to load IPVS kernel -modules manually. - -```shell -modprobe ip_vs -modprobe ip_vs_rr -modprobe ip_vs_wrr -modprobe ip_vs_sh -modprobe nf_conntrack_ipv4 -``` - -After that, use `lsmod | grep ip_vs` to make sure kernel modules are loaded. - -#### Run kube-proxy in ipvs mode - -#### Local UP Cluster +### Local UP Cluster Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md). Users should export the env `KUBEPROXY_MODE=ipvs` to specify the ipvs mode before deploying the cluster if want to run kube-proxy in ipvs mode. -#### Cluster Created by Kubeadm +### Cluster Created by Kubeadm Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/). @@ -56,12 +41,14 @@ apiVersion: kubeadm.k8s.io/v1alpha1 ... featureGates: SupportIPVSProxyMode: true -... +mode: ipvs ``` -#### Test +## Debug -Use `ipvsadm` tool to test whether the kube-proxy start succeed. By default we may get result like: +### Check IPVS proxy rules + +People can use `ipvsadm` tool to check whether kube-proxy are maintaining IPVS rules correctly. For example, we may get IPVS proxy rules like: ```shell # ipvsadm -ln @@ -73,3 +60,29 @@ TCP 10.0.0.1:443 rr persistent 10800 TCP 10.0.0.10:53 rr UDP 10.0.0.10:53 rr ``` + +### Why kube-proxy can't start IPVS mode + +People can do the following check list step by step: + +**1. Enable IPVS feature gateway** + +Currently IPVS-based kube-proxy is still in alpha phase, people need to enable `--feature-gates=SupportIPVSProxyMode=true` explicitly. + +**2. Specify proxy-mode=ipvs** + +Tell kube-proxy that proxy-mode=ipvs, please. + +**3. Load ipvs required kernel modules** + +The following kernel modules are required by IPVS-based kube-proxy: + +```shell +ip_vs +ip_vs_rr +ip_vs_wrr +ip_vs_sh +nf_conntrack_ipv4 +``` + +IPVS-based kube-proxy will load them automatically. If it fails to load them, please check whether they are compiled into your kernel. From 6e878767b6bcf644841bcff3a8859d9d51f6fe88 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Thu, 31 Aug 2017 13:01:07 +0100 Subject: [PATCH 103/794] Smoke test for OpenAPI paths in the test server Add a smoke test that checks for the existence of some representative paths from the apiextensions-server and the kube-aggregator server, both part of the delegation chain in kube-apiserver. --- test/integration/master/BUILD | 2 + .../integration/master/kube_apiserver_test.go | 72 ++++++++++++++++++- 2 files changed, 71 insertions(+), 3 deletions(-) diff --git a/test/integration/master/BUILD b/test/integration/master/BUILD index 2d658707950..a8d6598071c 100644 --- a/test/integration/master/BUILD +++ b/test/integration/master/BUILD @@ -29,6 +29,7 @@ go_test( "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -49,6 +50,7 @@ go_test( "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", ], ) diff --git a/test/integration/master/kube_apiserver_test.go b/test/integration/master/kube_apiserver_test.go index 73b4fc14cef..6beadcd67c5 100644 --- a/test/integration/master/kube_apiserver_test.go +++ b/test/integration/master/kube_apiserver_test.go @@ -17,21 +17,25 @@ limitations under the License. package master import ( + "encoding/json" + "strings" "testing" appsv1beta1 "k8s.io/api/apps/v1beta1" corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/kube-aggregator/pkg/apis/apiregistration" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/framework" ) func TestRun(t *testing.T) { - result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()) - defer result.TearDownFn() + server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()) + defer server.TearDownFn() - client, err := kubernetes.NewForConfig(result.ClientConfig) + client, err := kubernetes.NewForConfig(server.ClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -72,3 +76,65 @@ func TestRun(t *testing.T) { t.Fatalf("Failed to create deployment: %v", err) } } + +// TestOpenAPIDelegationChainPlumbing is a smoke test that checks for +// the existence of some representative paths from the +// apiextensions-server and the kube-aggregator server, both part of +// the delegation chain in kube-apiserver. +func TestOpenAPIDelegationChainPlumbing(t *testing.T) { + server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()) + defer server.TearDownFn() + + kubeclient, err := kubernetes.NewForConfig(server.ClientConfig) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + result := kubeclient.RESTClient().Get().AbsPath("/swagger.json").Do() + status := 0 + result.StatusCode(&status) + if status != 200 { + t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", 200, status) + } + + raw, err := result.Raw() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + type openAPISchema struct { + Paths map[string]interface{} `json:"paths"` + } + + var doc openAPISchema + err = json.Unmarshal(raw, &doc) + if err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + matchedExtension := false + extensionsPrefix := "/apis/" + apiextensions.GroupName + + matchedRegistration := false + registrationPrefix := "/apis/" + apiregistration.GroupName + + for path := range doc.Paths { + if strings.HasPrefix(path, extensionsPrefix) { + matchedExtension = true + } + if strings.HasPrefix(path, registrationPrefix) { + matchedRegistration = true + } + if matchedExtension && matchedRegistration { + return + } + } + + if !matchedExtension { + t.Errorf("missing path: %q", extensionsPrefix) + } + + if !matchedRegistration { + t.Errorf("missing path: %q", registrationPrefix) + } +} From 2f353314a75b49da652616f73cf77efa1ed03a64 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Fri, 24 Nov 2017 02:37:32 -0800 Subject: [PATCH 104/794] Remove unused directories. --- pkg/BUILD | 1 - pkg/auth/authenticator/OWNERS | 1 - pkg/auth/group/OWNERS | 1 - pkg/auth/handlers/OWNERS | 1 - pkg/hyperkube/BUILD | 22 ---------------------- pkg/hyperkube/doc.go | 30 ------------------------------ 6 files changed, 56 deletions(-) delete mode 100755 pkg/auth/authenticator/OWNERS delete mode 100755 pkg/auth/group/OWNERS delete mode 100755 pkg/auth/handlers/OWNERS delete mode 100644 pkg/hyperkube/BUILD delete mode 100644 pkg/hyperkube/doc.go diff --git a/pkg/BUILD b/pkg/BUILD index 056e8e3304a..ce2de3dad29 100644 --- a/pkg/BUILD +++ b/pkg/BUILD @@ -85,7 +85,6 @@ filegroup( "//pkg/features:all-srcs", "//pkg/fieldpath:all-srcs", "//pkg/generated:all-srcs", - "//pkg/hyperkube:all-srcs", "//pkg/kubeapiserver:all-srcs", "//pkg/kubectl:all-srcs", "//pkg/kubelet:all-srcs", diff --git a/pkg/auth/authenticator/OWNERS b/pkg/auth/authenticator/OWNERS deleted file mode 100755 index 94487992079..00000000000 --- a/pkg/auth/authenticator/OWNERS +++ /dev/null @@ -1 +0,0 @@ -reviewers: diff --git a/pkg/auth/group/OWNERS b/pkg/auth/group/OWNERS deleted file mode 100755 index 94487992079..00000000000 --- a/pkg/auth/group/OWNERS +++ /dev/null @@ -1 +0,0 @@ -reviewers: diff --git a/pkg/auth/handlers/OWNERS b/pkg/auth/handlers/OWNERS deleted file mode 100755 index 94487992079..00000000000 --- a/pkg/auth/handlers/OWNERS +++ /dev/null @@ -1 +0,0 @@ -reviewers: diff --git a/pkg/hyperkube/BUILD b/pkg/hyperkube/BUILD deleted file mode 100644 index 3a623579705..00000000000 --- a/pkg/hyperkube/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importpath = "k8s.io/kubernetes/pkg/hyperkube", -) diff --git a/pkg/hyperkube/doc.go b/pkg/hyperkube/doc.go deleted file mode 100644 index d66d3ac3c26..00000000000 --- a/pkg/hyperkube/doc.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package hyperkube is a framework for kubernetes server components. It -// allows us to combine all of the kubernetes server components into a single -// binary where the user selects which components to run in any individual -// process. -// -// Currently, only one server component can be run at once. As such there is -// no need to harmonize flags or identify logs across the various servers. In -// the future we will support launching and running many servers -- either by -// managing processes or running in-proc. -// -// This package is inspired by https://github.com/spf13/cobra. However, as -// the eventual goal is to run *multiple* servers from one call, a new package -// was needed. -package hyperkube // import "k8s.io/kubernetes/pkg/hyperkube" From e0edce33eff14ff1e5e6b5dc77d71b5b270851c1 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 22 Nov 2017 17:49:55 +0800 Subject: [PATCH 105/794] export ENABLE_POD_PRIORITY_PREEMPTION=true to enable Pod priority and preemption --- hack/local-up-cluster.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index d2d000ee780..5fbfb08b794 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -71,6 +71,8 @@ FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"} STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"} # enable swagger ui ENABLE_SWAGGER_UI=${ENABLE_SWAGGER_UI:-false} +# enable Pod priority and preemption +ENABLE_POD_PRIORITY_PREEMPTION=${ENABLE_POD_PRIORITY_PREEMPTION:-""} # enable kubernetes dashboard ENABLE_CLUSTER_DASHBOARD=${KUBE_ENABLE_CLUSTER_DASHBOARD:-false} @@ -115,11 +117,16 @@ if [ "${CLOUD_PROVIDER}" == "openstack" ]; then fi fi -#set feature gates if using ipvs mode +# set feature gates if using ipvs mode if [ "${KUBEPROXY_MODE}" == "ipvs" ]; then FEATURE_GATES="$FEATURE_GATES,SupportIPVSProxyMode=true" fi +# set feature gates if enable Pod priority and preemption +if [ "${ENABLE_POD_PRIORITY_PREEMPTION}" == true ]; then + FEATURE_GATES="$FEATURE_GATES,PodPriority=true" +fi + # warn if users are running with swap allowed if [ "${FAIL_SWAP_ON}" == "false" ]; then echo "WARNING : The kubelet is configured to not fail if swap is enabled; production deployments should disable swap." @@ -417,6 +424,14 @@ function start_apiserver { if [[ -n "${NODE_ADMISSION}" ]]; then security_admission=",NodeRestriction" fi + if [ "${ENABLE_POD_PRIORITY_PREEMPTION}" == true ]; then + security_admission=",Priority" + if [[ -n "${RUNTIME_CONFIG}" ]]; then + RUNTIME_CONFIG+="," + fi + RUNTIME_CONFIG+="scheduling.k8s.io/v1alpha1=true" + fi + # Admission Controllers to invoke prior to persisting objects in cluster ADMISSION_CONTROL=MutatingAdmissionWebhook,Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},DefaultStorageClass,DefaultTolerationSeconds,ValidatingAdmissionWebhook,ResourceQuota From 4b216f7cd9e3feab784233a4f2f3de03c5abd2f2 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Fri, 24 Nov 2017 03:12:47 -0800 Subject: [PATCH 106/794] Remove redundant code in container manager. - Reuse stub implementations from unsupported implementations. - Delete test file that didn't contain any tests. --- pkg/kubelet/cm/BUILD | 9 +- .../cm/container_manager_unsupported.go | 57 +-------- .../cm/container_manager_unsupported_test.go | 116 ------------------ .../cm/pod_container_manager_unsupported.go | 30 +---- 4 files changed, 5 insertions(+), 207 deletions(-) delete mode 100644 pkg/kubelet/cm/container_manager_unsupported_test.go diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 127192c637d..e3c910faf2e 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -77,9 +77,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "container_manager_unsupported_test.go", - ] + select({ + srcs = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "cgroup_manager_linux_test.go", "cgroup_manager_test.go", @@ -91,11 +89,10 @@ go_test( }), importpath = "k8s.io/kubernetes/pkg/kubelet/cm", library = ":go_default_library", - deps = [ - "//pkg/util/mount:go_default_library", - ] + select({ + deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//pkg/kubelet/eviction/api:go_default_library", + "//pkg/util/mount:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go index 6453397b177..97a3a3a6337 100644 --- a/pkg/kubelet/cm/container_manager_unsupported.go +++ b/pkg/kubelet/cm/container_manager_unsupported.go @@ -25,16 +25,13 @@ import ( "k8s.io/client-go/tools/record" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) type unsupportedContainerManager struct { + containerManagerStub } var _ ContainerManager = &unsupportedContainerManager{} @@ -43,58 +40,6 @@ func (unsupportedContainerManager) Start(_ *v1.Node, _ ActivePodsFunc, _ config. return fmt.Errorf("Container Manager is unsupported in this build") } -func (unsupportedContainerManager) SystemCgroupsLimit() v1.ResourceList { - return v1.ResourceList{} -} - -func (unsupportedContainerManager) GetNodeConfig() NodeConfig { - return NodeConfig{} -} - -func (unsupportedContainerManager) GetMountedSubsystems() *CgroupSubsystems { - return &CgroupSubsystems{} -} - -func (unsupportedContainerManager) GetQOSContainersInfo() QOSContainersInfo { - return QOSContainersInfo{} -} - -func (unsupportedContainerManager) UpdateQOSCgroups() error { - return nil -} - -func (cm *unsupportedContainerManager) Status() Status { - return Status{} -} - -func (cm *unsupportedContainerManager) GetNodeAllocatableReservation() v1.ResourceList { - return nil -} - -func (cm *unsupportedContainerManager) GetCapacity() v1.ResourceList { - return nil -} - -func (cm *unsupportedContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { - return nil, []string{} -} - -func (cm *unsupportedContainerManager) NewPodContainerManager() PodContainerManager { - return &unsupportedPodContainerManager{} -} - -func (cm *unsupportedContainerManager) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { - return &kubecontainer.RunContainerOptions{}, nil -} - -func (cm *unsupportedContainerManager) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { - return nil -} - -func (cm *unsupportedContainerManager) InternalContainerLifecycle() InternalContainerLifecycle { - return &internalContainerLifecycleImpl{cpumanager.NewFakeManager()} -} - func NewContainerManager(_ mount.Interface, _ cadvisor.Interface, _ NodeConfig, failSwapOn bool, devicePluginEnabled bool, recorder record.EventRecorder) (ContainerManager, error) { return &unsupportedContainerManager{}, nil } diff --git a/pkg/kubelet/cm/container_manager_unsupported_test.go b/pkg/kubelet/cm/container_manager_unsupported_test.go deleted file mode 100644 index 592164b93f2..00000000000 --- a/pkg/kubelet/cm/container_manager_unsupported_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// +build !linux,!windows - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/util/mount" -) - -type fakeMountInterface struct { - mountPoints []mount.MountPoint -} - -func (mi *fakeMountInterface) Mount(source string, target string, fstype string, options []string) error { - return fmt.Errorf("unsupported") -} - -func (mi *fakeMountInterface) Unmount(target string) error { - return fmt.Errorf("unsupported") -} - -func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { - return mi.mountPoints, nil -} - -func (mi *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool { - return (mp.Path == dir) -} - -func (mi *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) { - return false, fmt.Errorf("unsupported") -} - -func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { - return false, fmt.Errorf("unsupported") -} -func (mi *fakeMountInterface) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { - return "", nil -} - -func (mi *fakeMountInterface) DeviceOpened(pathname string) (bool, error) { - for _, mp := range mi.mountPoints { - if mp.Device == pathname { - return true, nil - } - } - return false, nil -} - -func (mi *fakeMountInterface) PathIsDevice(pathname string) (bool, error) { - return true, nil -} - -func (mi *fakeMountInterface) MakeRShared(path string) error { - return nil -} - -func (mi *fakeMountInterface) GetFileType(pathname string) (mount.FileType, error) { - return mount.FileType("fake"), nil -} - -func (mi *fakeMountInterface) MakeDir(pathname string) error { - return nil -} - -func (mi *fakeMountInterface) MakeFile(pathname string) error { - return nil -} - -func (mi *fakeMountInterface) ExistsPath(pathname string) bool { - return true -} - -func fakeContainerMgrMountInt() mount.Interface { - return &fakeMountInterface{ - []mount.MountPoint{ - { - Device: "cgroup", - Type: "cgroup", - Opts: []string{"rw", "relatime", "cpuset"}, - }, - { - Device: "cgroup", - Type: "cgroup", - Opts: []string{"rw", "relatime", "cpu"}, - }, - { - Device: "cgroup", - Type: "cgroup", - Opts: []string{"rw", "relatime", "cpuacct"}, - }, - { - Device: "cgroup", - Type: "cgroup", - Opts: []string{"rw", "relatime", "memory"}, - }, - }, - } -} diff --git a/pkg/kubelet/cm/pod_container_manager_unsupported.go b/pkg/kubelet/cm/pod_container_manager_unsupported.go index e69542b1823..d62eb7fa4e8 100644 --- a/pkg/kubelet/cm/pod_container_manager_unsupported.go +++ b/pkg/kubelet/cm/pod_container_manager_unsupported.go @@ -18,36 +18,8 @@ limitations under the License. package cm -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - type unsupportedPodContainerManager struct { + podContainerManagerStub } var _ PodContainerManager = &unsupportedPodContainerManager{} - -func (m *unsupportedPodContainerManager) Exists(_ *v1.Pod) bool { - return true -} - -func (m *unsupportedPodContainerManager) EnsureExists(_ *v1.Pod) error { - return nil -} - -func (m *unsupportedPodContainerManager) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { - return "", "" -} - -func (m *unsupportedPodContainerManager) ReduceCPULimits(_ CgroupName) error { - return nil -} - -func (m *unsupportedPodContainerManager) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) { - return nil, nil -} - -func (m *unsupportedPodContainerManager) Destroy(name CgroupName) error { - return nil -} From de358fb21f9a89d935842c21c209359970c123d6 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Thu, 9 Nov 2017 03:50:25 -0500 Subject: [PATCH 107/794] Use file store utility for device plugin check-pointing --- pkg/kubelet/cm/deviceplugin/BUILD | 4 +++ pkg/kubelet/cm/deviceplugin/manager.go | 30 ++++++++++++++------- pkg/kubelet/cm/deviceplugin/manager_test.go | 5 ++++ 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/BUILD b/pkg/kubelet/cm/deviceplugin/BUILD index 9c91c2e9df2..8474da6fac7 100644 --- a/pkg/kubelet/cm/deviceplugin/BUILD +++ b/pkg/kubelet/cm/deviceplugin/BUILD @@ -24,6 +24,8 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/util/store:go_default_library", + "//pkg/util/filesystem:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -58,6 +60,8 @@ go_test( deps = [ "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", + "//pkg/kubelet/util/store:go_default_library", + "//pkg/util/filesystem:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 6535479fe43..fc8af83ce51 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -19,7 +19,6 @@ package deviceplugin import ( "encoding/json" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -38,6 +37,8 @@ import ( "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" + utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" + utilfs "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -80,6 +81,7 @@ type ManagerImpl struct { // podDevices contains pod to allocated device mapping. podDevices podDevices + store utilstore.Store } type sourcesReadyStub struct{} @@ -114,6 +116,11 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { // Before that, initializes them to perform no-op operations. manager.activePods = func() []*v1.Pod { return []*v1.Pod{} } manager.sourcesReady = &sourcesReadyStub{} + var err error + manager.store, err = utilstore.NewFileStore(dir, utilfs.DefaultFs{}) + if err != nil { + return nil, fmt.Errorf("failed to initialize device plugin checkpointing store: %+v", err) + } return manager, nil } @@ -415,22 +422,27 @@ func (m *ManagerImpl) writeCheckpoint() error { if err != nil { return err } - filepath := m.checkpointFile() - return ioutil.WriteFile(filepath, dataJSON, 0644) + err = m.store.Write(kubeletDevicePluginCheckpoint, dataJSON) + if err != nil { + return fmt.Errorf("failed to write deviceplugin checkpoint file %q: %v", kubeletDevicePluginCheckpoint, err) + } + return nil } // Reads device to container allocation information from disk, and populates // m.allocatedDevices accordingly. func (m *ManagerImpl) readCheckpoint() error { - filepath := m.checkpointFile() - content, err := ioutil.ReadFile(filepath) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to read checkpoint file %q: %v", filepath, err) + content, err := m.store.Read(kubeletDevicePluginCheckpoint) + if err != nil { + if err == utilstore.ErrKeyNotFound { + return nil + } + return fmt.Errorf("failed to read checkpoint file %q: %v", kubeletDevicePluginCheckpoint, err) } - glog.V(2).Infof("Read checkpoint file %s\n", filepath) + glog.V(4).Infof("Read checkpoint file %s\n", kubeletDevicePluginCheckpoint) var data checkpointData if err := json.Unmarshal(content, &data); err != nil { - return fmt.Errorf("failed to unmarshal checkpoint data: %v", err) + return fmt.Errorf("failed to unmarshal deviceplugin checkpoint data: %v", err) } m.mutex.Lock() diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 9a74ec93b50..f1139dbc8f9 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -33,6 +33,8 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" "k8s.io/kubernetes/pkg/kubelet/lifecycle" + utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" + utilfs "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -265,6 +267,7 @@ func TestCheckpoint(t *testing.T) { allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } + testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) testManager.podDevices.insert("pod1", "con1", resourceName1, constructDevices([]string{"dev1", "dev2"}), @@ -394,6 +397,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) { activePods: podsStub.getActivePods, sourcesReady: &sourcesReadyStub{}, } + testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) testManager.allDevices[resourceName1] = sets.NewString() testManager.allDevices[resourceName1].Insert(devID1) @@ -557,6 +561,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) { allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } + testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) // require one of resource1 and one of resource2 testManager.allocatedDevices[resourceName1] = sets.NewString() testManager.allocatedDevices[resourceName1].Insert(devID1) From 2b95212ad339e00de1cffd49a0de67ce6058ddd0 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 24 Nov 2017 17:10:39 +0100 Subject: [PATCH 108/794] admission_test.go(TestAdmitPreferNonmutating): simplify test by replacing expectedPodUser by a constant value. --- .../security/podsecuritypolicy/admission_test.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go index 2b99c0b4c29..0aae6fafec0 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go @@ -352,7 +352,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit bool shouldPassValidate bool expectMutation bool - expectedPodUser *int64 expectedContainerUser *int64 expectedPSP string }{ @@ -363,7 +362,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: privilegedPSP.Name, }, @@ -374,7 +372,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: privilegedPSP.Name, }, @@ -385,7 +382,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: true, - expectedPodUser: nil, expectedContainerUser: &mutating1.Spec.RunAsUser.Ranges[0].Min, expectedPSP: mutating1.Name, }, @@ -397,7 +393,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: privilegedPSP.Name, }, @@ -409,7 +404,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: false, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: "", }, @@ -421,7 +415,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: "", }, @@ -433,7 +426,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, - expectedPodUser: nil, expectedContainerUser: nil, expectedPSP: "", }, @@ -447,10 +439,8 @@ func TestAdmitPreferNonmutating(t *testing.T) { if v.pod.Spec.SecurityContext != nil { actualPodUser = v.pod.Spec.SecurityContext.RunAsUser } - if (actualPodUser == nil) != (v.expectedPodUser == nil) { - t.Errorf("%s expected pod user %v, got %v", k, v.expectedPodUser, actualPodUser) - } else if actualPodUser != nil && *actualPodUser != *v.expectedPodUser { - t.Errorf("%s expected pod user %v, got %v", k, *v.expectedPodUser, *actualPodUser) + if actualPodUser != nil { + t.Errorf("%s expected pod user nil, got %v", k, *actualPodUser) } actualContainerUser := (*int64)(nil) From b1ae1d67b209f445a1254eb7a53a14bdcabebf3d Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 24 Nov 2017 17:11:51 +0100 Subject: [PATCH 109/794] admission_test.go(TestAdmitPreferNonmutating): simplify test by replacing shouldPassAdmit by a constant value. --- .../podsecuritypolicy/admission_test.go | 42 +++++++------------ 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go index 0aae6fafec0..8b2c2b701ed 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go @@ -349,7 +349,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { pod *kapi.Pod podBeforeUpdate *kapi.Pod psps []*extensions.PodSecurityPolicy - shouldPassAdmit bool shouldPassValidate bool expectMutation bool expectedContainerUser *int64 @@ -359,7 +358,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { operation: kadmission.Create, pod: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{privilegedPSP}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, expectedContainerUser: nil, @@ -369,7 +367,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { operation: kadmission.Create, pod: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1, privilegedPSP}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, expectedContainerUser: nil, @@ -379,7 +376,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { operation: kadmission.Create, pod: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: true, expectedContainerUser: &mutating1.Spec.RunAsUser.Ranges[0].Min, @@ -390,7 +386,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { pod: changedPodWithSC.DeepCopy(), podBeforeUpdate: podWithSC.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1, privilegedPSP}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, expectedContainerUser: nil, @@ -401,7 +396,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { pod: changedPod.DeepCopy(), podBeforeUpdate: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1}, - shouldPassAdmit: true, shouldPassValidate: false, expectMutation: false, expectedContainerUser: nil, @@ -412,7 +406,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { pod: unprivilegedRunAsAnyPod.DeepCopy(), podBeforeUpdate: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, expectedContainerUser: nil, @@ -423,7 +416,6 @@ func TestAdmitPreferNonmutating(t *testing.T) { pod: gcChangedPod.DeepCopy(), podBeforeUpdate: unprivilegedRunAsAnyPod.DeepCopy(), psps: []*extensions.PodSecurityPolicy{mutating2, mutating1}, - shouldPassAdmit: true, shouldPassValidate: true, expectMutation: false, expectedContainerUser: nil, @@ -432,26 +424,24 @@ func TestAdmitPreferNonmutating(t *testing.T) { } for k, v := range tests { - testPSPAdmitAdvanced(k, v.operation, v.psps, nil, &user.DefaultInfo{}, v.pod, v.podBeforeUpdate, v.shouldPassAdmit, v.shouldPassValidate, v.expectMutation, v.expectedPSP, t) + testPSPAdmitAdvanced(k, v.operation, v.psps, nil, &user.DefaultInfo{}, v.pod, v.podBeforeUpdate, true, v.shouldPassValidate, v.expectMutation, v.expectedPSP, t) - if v.shouldPassAdmit { - actualPodUser := (*int64)(nil) - if v.pod.Spec.SecurityContext != nil { - actualPodUser = v.pod.Spec.SecurityContext.RunAsUser - } - if actualPodUser != nil { - t.Errorf("%s expected pod user nil, got %v", k, *actualPodUser) - } + actualPodUser := (*int64)(nil) + if v.pod.Spec.SecurityContext != nil { + actualPodUser = v.pod.Spec.SecurityContext.RunAsUser + } + if actualPodUser != nil { + t.Errorf("%s expected pod user nil, got %v", k, *actualPodUser) + } - actualContainerUser := (*int64)(nil) - if v.pod.Spec.Containers[0].SecurityContext != nil { - actualContainerUser = v.pod.Spec.Containers[0].SecurityContext.RunAsUser - } - if (actualContainerUser == nil) != (v.expectedContainerUser == nil) { - t.Errorf("%s expected container user %v, got %v", k, v.expectedContainerUser, actualContainerUser) - } else if actualContainerUser != nil && *actualContainerUser != *v.expectedContainerUser { - t.Errorf("%s expected container user %v, got %v", k, *v.expectedContainerUser, *actualContainerUser) - } + actualContainerUser := (*int64)(nil) + if v.pod.Spec.Containers[0].SecurityContext != nil { + actualContainerUser = v.pod.Spec.Containers[0].SecurityContext.RunAsUser + } + if (actualContainerUser == nil) != (v.expectedContainerUser == nil) { + t.Errorf("%s expected container user %v, got %v", k, v.expectedContainerUser, actualContainerUser) + } else if actualContainerUser != nil && *actualContainerUser != *v.expectedContainerUser { + t.Errorf("%s expected container user %v, got %v", k, *v.expectedContainerUser, *actualContainerUser) } } } From 8ed0bc12504b05bac0f1f4334ba26708207d9987 Mon Sep 17 00:00:00 2001 From: Ferran Rodenas Date: Thu, 19 Oct 2017 15:44:29 +0200 Subject: [PATCH 110/794] Send events on ip and port allocator repair controller errors Signed-off-by: Ferran Rodenas --- pkg/master/controller.go | 8 ++++--- pkg/master/master.go | 2 +- .../core/service/ipallocator/controller/BUILD | 3 +++ .../service/ipallocator/controller/repair.go | 24 ++++++++++++++----- .../ipallocator/controller/repair_test.go | 8 +++---- .../service/portallocator/controller/BUILD | 3 +++ .../portallocator/controller/repair.go | 23 +++++++++++++----- .../portallocator/controller/repair_test.go | 8 +++---- 8 files changed, 55 insertions(+), 24 deletions(-) diff --git a/pkg/master/controller.go b/pkg/master/controller.go index ef919b2963d..291078b2710 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -48,6 +48,7 @@ const kubernetesServiceName = "kubernetes" type Controller struct { ServiceClient coreclient.ServicesGetter NamespaceClient coreclient.NamespacesGetter + EventClient coreclient.EventsGetter ServiceClusterIPRegistry rangeallocation.RangeRegistry ServiceClusterIPInterval time.Duration @@ -77,10 +78,11 @@ type Controller struct { } // NewBootstrapController returns a controller for watching the core capabilities of the master -func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.LegacyRESTStorage, serviceClient coreclient.ServicesGetter, nsClient coreclient.NamespacesGetter) *Controller { +func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.LegacyRESTStorage, serviceClient coreclient.ServicesGetter, nsClient coreclient.NamespacesGetter, eventClient coreclient.EventsGetter) *Controller { return &Controller{ ServiceClient: serviceClient, NamespaceClient: nsClient, + EventClient: eventClient, EndpointReconciler: c.ExtraConfig.EndpointReconcilerConfig.Reconciler, EndpointInterval: c.ExtraConfig.EndpointReconcilerConfig.Interval, @@ -124,8 +126,8 @@ func (c *Controller) Start() { return } - repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceClient, &c.ServiceClusterIPRange, c.ServiceClusterIPRegistry) - repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceClient, c.ServiceNodePortRange, c.ServiceNodePortRegistry) + repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceClient, c.EventClient, &c.ServiceClusterIPRange, c.ServiceClusterIPRegistry) + repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceClient, c.EventClient, c.ServiceNodePortRange, c.ServiceNodePortRegistry) // run all of the controllers once prior to returning from Start. if err := repairClusterIPs.RunOnce(); err != nil { diff --git a/pkg/master/master.go b/pkg/master/master.go index b20a6f5e929..27353e4d9ce 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -374,7 +374,7 @@ func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic. if c.ExtraConfig.EnableCoreControllers { controllerName := "bootstrap-controller" coreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) - bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient) + bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient) m.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook) m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook) } diff --git a/pkg/registry/core/service/ipallocator/controller/BUILD b/pkg/registry/core/service/ipallocator/controller/BUILD index b6a8f0a64d1..43498a212ec 100644 --- a/pkg/registry/core/service/ipallocator/controller/BUILD +++ b/pkg/registry/core/service/ipallocator/controller/BUILD @@ -11,15 +11,18 @@ go_library( srcs = ["repair.go"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], ) diff --git a/pkg/registry/core/service/ipallocator/controller/repair.go b/pkg/registry/core/service/ipallocator/controller/repair.go index fa2cae7f3a2..b4aaf1c289c 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair.go +++ b/pkg/registry/core/service/ipallocator/controller/repair.go @@ -21,11 +21,14 @@ import ( "net" "time" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" @@ -54,6 +57,7 @@ type Repair struct { network *net.IPNet alloc rangeallocation.RangeRegistry leaks map[string]int // counter per leaked IP + recorder record.EventRecorder } // How many times we need to detect a leak before we clean up. This is to @@ -62,13 +66,18 @@ const numRepairsBeforeLeakCleanup = 3 // NewRepair creates a controller that periodically ensures that all clusterIPs are uniquely allocated across the cluster // and generates informational warnings for a cluster that is not in sync. -func NewRepair(interval time.Duration, serviceClient coreclient.ServicesGetter, network *net.IPNet, alloc rangeallocation.RangeRegistry) *Repair { +func NewRepair(interval time.Duration, serviceClient coreclient.ServicesGetter, eventClient coreclient.EventsGetter, network *net.IPNet, alloc rangeallocation.RangeRegistry) *Repair { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&coreclient.EventSinkImpl{Interface: eventClient.Events("")}) + recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "ipallocator-repair-controller"}) + return &Repair{ interval: interval, serviceClient: serviceClient, network: network, alloc: alloc, leaks: map[string]int{}, + recorder: recorder, } } @@ -136,6 +145,7 @@ func (c *Repair) runOnce() error { ip := net.ParseIP(svc.Spec.ClusterIP) if ip == nil { // cluster IP is corrupt + c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPNotValid", "Cluster IP %s is not a valid IP; please recreate service", svc.Spec.ClusterIP) runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.ClusterIP, svc.Name, svc.Namespace)) continue } @@ -147,22 +157,24 @@ func (c *Repair) runOnce() error { stored.Release(ip) } else { // cluster IP doesn't seem to be allocated - runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not allocated; repairing", svc.Spec.ClusterIP, svc.Name, svc.Namespace)) + c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPNotAllocated", "Cluster IP %s is not allocated; repairing", ip) + runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not allocated; repairing", ip, svc.Name, svc.Namespace)) } delete(c.leaks, ip.String()) // it is used, so it can't be leaked case ipallocator.ErrAllocated: - // TODO: send event // cluster IP is duplicate + c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPAlreadyAllocated", "Cluster IP %s was assigned to multiple services; please recreate service", ip) runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace)) case err.(*ipallocator.ErrNotInRange): - // TODO: send event // cluster IP is out of range + c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPOutOfRange", "Cluster IP %s is not within the service CIDR %s; please recreate service", ip, c.network) runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network)) case ipallocator.ErrFull: - // TODO: send event // somehow we are out of IPs - return fmt.Errorf("the service CIDR %v is full; you must widen the CIDR in order to create new services", rebuilt) + c.recorder.Eventf(&svc, v1.EventTypeWarning, "ServiceCIDRFull", "Service CIDR %s is full; you must widen the CIDR in order to create new services", c.network) + return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services", c.network) default: + c.recorder.Eventf(&svc, v1.EventTypeWarning, "UnknownError", "Unable to allocate cluster IP %s due to an unknown error", ip) return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err) } } diff --git a/pkg/registry/core/service/ipallocator/controller/repair_test.go b/pkg/registry/core/service/ipallocator/controller/repair_test.go index 51392b843a2..0bbd6f5959c 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair_test.go +++ b/pkg/registry/core/service/ipallocator/controller/repair_test.go @@ -55,7 +55,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, } _, cidr, _ := net.ParseCIDR(ipregistry.item.Range) - r := NewRepair(0, fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -68,7 +68,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), cidr, ipregistry) + r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -169,7 +169,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } diff --git a/pkg/registry/core/service/portallocator/controller/BUILD b/pkg/registry/core/service/portallocator/controller/BUILD index 01408f6fba6..3483044bcd7 100644 --- a/pkg/registry/core/service/portallocator/controller/BUILD +++ b/pkg/registry/core/service/portallocator/controller/BUILD @@ -11,16 +11,19 @@ go_library( srcs = ["repair.go"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], ) diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index e02ca3ef1e4..e7024ded053 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -20,12 +20,15 @@ import ( "fmt" "time" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" @@ -40,6 +43,7 @@ type Repair struct { portRange net.PortRange alloc rangeallocation.RangeRegistry leaks map[int]int // counter per leaked port + recorder record.EventRecorder } // How many times we need to detect a leak before we clean up. This is to @@ -48,13 +52,18 @@ const numRepairsBeforeLeakCleanup = 3 // NewRepair creates a controller that periodically ensures that all ports are uniquely allocated across the cluster // and generates informational warnings for a cluster that is not in sync. -func NewRepair(interval time.Duration, serviceClient coreclient.ServicesGetter, portRange net.PortRange, alloc rangeallocation.RangeRegistry) *Repair { +func NewRepair(interval time.Duration, serviceClient coreclient.ServicesGetter, eventClient coreclient.EventsGetter, portRange net.PortRange, alloc rangeallocation.RangeRegistry) *Repair { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&coreclient.EventSinkImpl{Interface: eventClient.Events("")}) + recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "portallocator-repair-controller"}) + return &Repair{ interval: interval, serviceClient: serviceClient, portRange: portRange, alloc: alloc, leaks: map[int]int{}, + recorder: recorder, } } @@ -130,22 +139,24 @@ func (c *Repair) runOnce() error { stored.Release(port) } else { // doesn't seem to be allocated + c.recorder.Eventf(svc, v1.EventTypeWarning, "PortNotAllocated", "Port %d is not allocated; repairing", port) runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s is not allocated; repairing", port, svc.Name, svc.Namespace)) } delete(c.leaks, port) // it is used, so it can't be leaked case portallocator.ErrAllocated: - // TODO: send event // port is duplicate, reallocate + c.recorder.Eventf(svc, v1.EventTypeWarning, "PortAlreadyAllocated", "Port %d was assigned to multiple services; please recreate service", port) runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace)) case err.(*portallocator.ErrNotInRange): - // TODO: send event // port is out of range, reallocate - runtime.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %v; please recreate", port, svc.Name, svc.Namespace, c.portRange)) + c.recorder.Eventf(svc, v1.EventTypeWarning, "PortOutOfRange", "Port %d is not within the port range %s; please recreate service", port, c.portRange) + runtime.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %s; please recreate", port, svc.Name, svc.Namespace, c.portRange)) case portallocator.ErrFull: - // TODO: send event // somehow we are out of ports - return fmt.Errorf("the port range %v is full; you must widen the port range in order to create new services", c.portRange) + c.recorder.Eventf(svc, v1.EventTypeWarning, "PortRangeFull", "Port range %s is full; you must widen the port range in order to create new services", c.portRange) + return fmt.Errorf("the port range %s is full; you must widen the port range in order to create new services", c.portRange) default: + c.recorder.Eventf(svc, v1.EventTypeWarning, "UnknownError", "Unable to allocate port %d due to an unknown error", port) return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err) } } diff --git a/pkg/registry/core/service/portallocator/controller/repair_test.go b/pkg/registry/core/service/portallocator/controller/repair_test.go index a0116062531..151c791cc39 100644 --- a/pkg/registry/core/service/portallocator/controller/repair_test.go +++ b/pkg/registry/core/service/portallocator/controller/repair_test.go @@ -55,7 +55,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, } pr, _ := net.ParsePortRange(registry.item.Range) - r := NewRepair(0, fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -68,7 +68,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), *pr, registry) + r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -175,7 +175,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) } From 032416c75deddfdd826beadccb6da2b910b6534d Mon Sep 17 00:00:00 2001 From: supereagle Date: Sun, 12 Nov 2017 19:00:21 +0800 Subject: [PATCH 111/794] use core client with explicit version fix more usage of deprecated core client --- .../resourcequota/resource_quota_controller_test.go | 2 +- .../volume/persistentvolume/pv_controller.go | 2 +- pkg/kubectl/cmd/drain.go | 8 ++++---- pkg/kubectl/cmd/top_node.go | 4 ++-- pkg/kubectl/cmd/top_pod.go | 4 ++-- pkg/kubectl/cmd/util/env/env_resolve.go | 4 ++-- pkg/volume/azure_file/azure_util.go | 4 ++-- pkg/volume/cephfs/cephfs.go | 2 +- pkg/volume/iscsi/attacher.go | 2 +- pkg/volume/iscsi/iscsi.go | 2 +- pkg/volume/rbd/rbd.go | 2 +- test/e2e/auth/pod_security_policy.go | 10 +++++----- test/e2e/framework/deployment_util.go | 2 +- .../instrumentation/logging/elasticsearch/kibana.go | 6 +++--- .../e2e/instrumentation/logging/elasticsearch/utils.go | 10 +++++----- test/e2e/multicluster/ubernetes_lite.go | 2 +- test/e2e/network/service.go | 2 +- test/e2e/upgrades/apps/daemonsets.go | 4 ++-- test/e2e/upgrades/apps/statefulset.go | 2 +- test/integration/deployment/util.go | 2 +- 20 files changed, 38 insertions(+), 38 deletions(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index dcbbad8b7ba..6ed7ba9c968 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -89,7 +89,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister alwaysStarted := make(chan struct{}) close(alwaysStarted) resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ - QuotaClient: kubeClient.Core(), + QuotaClient: kubeClient.CoreV1(), ResourceQuotaInformer: informerFactory.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 980d960c750..9614cb65688 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -802,7 +802,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) { glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) return newVol, err diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..39bb33121f5 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -331,7 +331,7 @@ func (o *DrainOptions) deleteOrEvictPodsSimple(nodeInfo *resource.Info) error { func (o *DrainOptions) getController(namespace string, controllerRef *metav1.OwnerReference) (interface{}, error) { switch controllerRef.Kind { case "ReplicationController": - return o.client.Core().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) + return o.client.CoreV1().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "DaemonSet": return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) case "Job": @@ -455,7 +455,7 @@ func (ps podStatuses) Message() string { // getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we // are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) { - podList, err := o.client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + podList, err := o.client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()}) if err != nil { return pods, err @@ -497,7 +497,7 @@ func (o *DrainOptions) deletePod(pod corev1.Pod) error { gracePeriodSeconds := int64(o.GracePeriodSeconds) deleteOptions.GracePeriodSeconds = &gracePeriodSeconds } - return o.client.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + return o.client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) } func (o *DrainOptions) evictPod(pod corev1.Pod, policyGroupVersion string) error { @@ -533,7 +533,7 @@ func (o *DrainOptions) deleteOrEvictPods(pods []corev1.Pod) error { } getPodFn := func(namespace, name string) (*corev1.Pod, error) { - return o.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + return o.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) } if len(policyGroupVersion) > 0 { diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index ab7b992cd5d..dace2918d69 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -122,8 +122,8 @@ func (o *TopNodeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] if err != nil { return err } - o.NodeClient = clientset.Core() - o.Client = metricsutil.NewHeapsterMetricsClient(clientset.Core(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) + o.NodeClient = clientset.CoreV1() + o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) o.Printer = metricsutil.NewTopCmdPrinter(out) return nil } diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index d64c012f045..2ffdc6dff8e 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -118,8 +118,8 @@ func (o *TopPodOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s if err != nil { return err } - o.PodClient = clientset.Core() - o.Client = metricsutil.NewHeapsterMetricsClient(clientset.Core(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) + o.PodClient = clientset.CoreV1() + o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port) o.Printer = metricsutil.NewTopCmdPrinter(out) return nil } diff --git a/pkg/kubectl/cmd/util/env/env_resolve.go b/pkg/kubectl/cmd/util/env/env_resolve.go index d663f730471..ede70215ad4 100644 --- a/pkg/kubectl/cmd/util/env/env_resolve.go +++ b/pkg/kubectl/cmd/util/env/env_resolve.go @@ -46,7 +46,7 @@ func getSecretRefValue(client kubernetes.Interface, namespace string, store *Res secret, ok := store.SecretStore[secretSelector.Name] if !ok { var err error - secret, err = client.Core().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) + secret, err = client.CoreV1().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } @@ -64,7 +64,7 @@ func getConfigMapRefValue(client kubernetes.Interface, namespace string, store * configMap, ok := store.ConfigMapStore[configMapSelector.Name] if !ok { var err error - configMap, err = client.Core().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 038d78518df..f1685dad4e6 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -50,7 +50,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret return "", "", fmt.Errorf("Cannot get kube client") } - keys, err := kubeClient.Core().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) + keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) if err != nil { return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName) } @@ -85,7 +85,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun }, Type: "Opaque", } - _, err := kubeClient.Core().Secrets(nameSpace).Create(secret) + _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret) if errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index 5c0fdd04a74..234ee7f25c5 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -100,7 +100,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index c039a70db8c..b86b2f2499e 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -186,7 +186,7 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index a7893ec7218..4ea6e792ef8 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -118,7 +118,7 @@ func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.V if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 596a19dca8b..2bad82bcdf6 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -164,7 +164,7 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index cc6b2878912..3a3849907d4 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -131,7 +131,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { It("should forbid pod creation when no PSP is available", func() { By("Running a restricted pod") - _, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted")) + _, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted")) expectForbidden(err) }) @@ -141,12 +141,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() By("Running a restricted pod") - pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed")) + pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed")) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) testPrivilegedPods(f, func(pod *v1.Pod) { - _, err := c.Core().Pods(ns).Create(pod) + _, err := c.CoreV1().Pods(ns).Create(pod) expectForbidden(err) }) }) @@ -160,12 +160,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() testPrivilegedPods(f, func(pod *v1.Pod) { - p, err := c.Core().Pods(ns).Create(pod) + p, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) // Verify expected PSP was used. - p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{}) + p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) validated, found := p.Annotations[psputil.ValidatedPSPAnnotation] Expect(found).To(BeTrue(), "PSP annotation not found") diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 8a249d1e131..192e78a9fe4 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -286,7 +286,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name) } podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return client.Core().Pods(namespace).List(options) + return client.CoreV1().Pods(namespace).List(options) } rsList := []*extensions.ReplicaSet{replicaSet} podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index cc4a4c9c446..da554bec093 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Check for the existence of the Kibana service. ginkgo.By("Checking the Kibana service exists.") - s := f.ClientSet.Core().Services(metav1.NamespaceSystem) + s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { @@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure the Kibana pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, pod := range pods.Items { err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) @@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure we get a response from the Kibana UI.") err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { - req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if err != nil { framework.Logf("Failed to get services proxy request: %v", err) return false, nil diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index dd1ca884f4b..ecc30cda3d5 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error { f := p.Framework // Check for the existence of the Elasticsearch service. framework.Logf("Checking the Elasticsearch service exists.") - s := f.ClientSet.Core().Services(api.NamespaceSystem) + s := f.ClientSet.CoreV1().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error @@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error { framework.Logf("Checking to make sure the Elasticsearch pods are running") labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String() options := meta_v1.ListOptions{LabelSelector: labelSelector} - pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) if err != nil { return err } @@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error { err = nil var body []byte for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error { framework.Logf("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() { func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { f := p.Framework - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("Failed to get services proxy request: %v", errProxy) return nil diff --git a/test/e2e/multicluster/ubernetes_lite.go b/test/e2e/multicluster/ubernetes_lite.go index 7ceace73d83..a83ad4d26a8 100644 --- a/test/e2e/multicluster/ubernetes_lite.go +++ b/test/e2e/multicluster/ubernetes_lite.go @@ -433,7 +433,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) By("Creating pods for each static PV") for _, config := range configs { podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.Core().Pods(ns).Create(podConfig) + config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 147837e06b4..d97be344a87 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -813,7 +813,7 @@ var _ = SIGDescribe("Services", func() { tcpService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { framework.Logf("Cleaning up the updating NodePorts test service") - err := cs.Core().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(serviceName, nil) Expect(err).NotTo(HaveOccurred()) }() jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 4b9f19c5140..3550f651bdf 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -119,7 +119,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) } func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -139,7 +139,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) { selector := labels.Set(labelSet).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := f.ClientSet.Core().Pods(namespace).List(options) + podList, err := f.ClientSet.CoreV1().Pods(namespace).List(options) if err != nil { return false, err } diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index 6b3224290ea..e5075f6d0e2 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -66,7 +66,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { t.tester.PauseNewPods(t.set) By("Creating service " + headlessSvcName + " in namespace " + ns) - _, err := f.ClientSet.Core().Services(ns).Create(t.service) + _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) Expect(err).NotTo(HaveOccurred()) By("Creating statefulset " + ssName + " in namespace " + ns) diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index dc196c92a33..bb6c1d48be7 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -210,7 +210,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { addPodConditionReady(pod, metav1.Now()) - _, err := c.Core().Pods(ns).UpdateStatus(pod) + _, err := c.CoreV1().Pods(ns).UpdateStatus(pod) return err } From 2419af51fd814b460173db1cbd24f0a75f8f4a56 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Thu, 2 Nov 2017 21:31:33 +0800 Subject: [PATCH 112/794] Improve kubeadm validation unit test coverage. --- .../kubeadm/validation/validation_test.go | 198 +++++++++++++++++- 1 file changed, 192 insertions(+), 6 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 3027970c509..7bfba1a7b53 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -17,6 +17,8 @@ limitations under the License. package validation import ( + "io/ioutil" + "os" "testing" "time" @@ -36,12 +38,13 @@ func TestValidateTokenDiscovery(t *testing.T) { f *field.Path expected bool }{ - {&kubeadm.NodeConfiguration{Token: "772ef5.6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"192.168.122.100:9898"}}, nil, true}, - {&kubeadm.NodeConfiguration{Token: ".6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"192.168.122.100:9898"}}, nil, false}, - {&kubeadm.NodeConfiguration{Token: "772ef5.", DiscoveryTokenAPIServers: []string{"192.168.122.100:9898"}}, nil, false}, - {&kubeadm.NodeConfiguration{Token: "772ef5.6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"2001:db8::100:9898"}}, nil, true}, - {&kubeadm.NodeConfiguration{Token: ".6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"2001:db8::100:9898"}}, nil, false}, - {&kubeadm.NodeConfiguration{Token: "772ef5.", DiscoveryTokenAPIServers: []string{"2001:db8::100:9898"}}, nil, false}, + {&kubeadm.NodeConfiguration{Token: "772ef5.6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}}, nil, true}, + {&kubeadm.NodeConfiguration{Token: ".6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}}, nil, false}, + {&kubeadm.NodeConfiguration{Token: "772ef5.", DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}}, nil, false}, + {&kubeadm.NodeConfiguration{Token: "772ef5.6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"2001:db8::100:6443"}}, nil, true}, + {&kubeadm.NodeConfiguration{Token: ".6b6baab1d4a0a171", DiscoveryTokenAPIServers: []string{"2001:db8::100:6443"}}, nil, false}, + {&kubeadm.NodeConfiguration{Token: "772ef5.", DiscoveryTokenAPIServers: []string{"2001:db8::100:6443"}}, nil, false}, + {&kubeadm.NodeConfiguration{Token: "abcdef.1234567890123456@foobar", DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}}, nil, false}, } for _, rt := range tests { err := ValidateToken(rt.c.Token, rt.f).ToAggregate() @@ -330,6 +333,22 @@ func TestValidateMasterConfiguration(t *testing.T) { CertificatesDir: "/some/other/cert/dir", Token: "abcdef.0123456789abcdef", }, false}, + {"valid master configuration with incorrect IPv4 pod subnet", + &kubeadm.MasterConfiguration{ + API: kubeadm.API{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + AuthorizationModes: []string{"Node", "RBAC"}, + Networking: kubeadm.Networking{ + ServiceSubnet: "10.96.0.1/12", + DNSDomain: "cluster.local", + PodSubnet: "10.0.1.15", + }, + CertificatesDir: "/some/other/cert/dir", + Token: "abcdef.0123456789abcdef", + NodeName: nodename, + }, false}, {"valid master configuration with IPv4 service subnet", &kubeadm.MasterConfiguration{ API: kubeadm.API{ @@ -366,6 +385,7 @@ func TestValidateMasterConfiguration(t *testing.T) { Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", + PodSubnet: "10.0.1.15/16", }, CertificatesDir: "/some/other/cert/dir", Token: "abcdef.0123456789abcdef", @@ -606,3 +626,169 @@ func TestValidateKubeletConfiguration(t *testing.T) { t.Errorf("failed ValidateKubeletConfiguration: expect errors but got no error") } } + +func TestValidateArgSelection(t *testing.T) { + var tests = []struct { + name string + c *kubeadm.NodeConfiguration + expected bool + }{ + { + "invalid: DiscoveryToken and DiscoveryFile cannot both be set", + &kubeadm.NodeConfiguration{ + DiscoveryFile: "https://url/file.conf", + DiscoveryToken: "abcdef.1234567890123456", + }, + false, + }, + { + "invalid: DiscoveryToken or DiscoveryFile must be set", + &kubeadm.NodeConfiguration{ + DiscoveryFile: "", + DiscoveryToken: "", + }, + false, + }, + { + "invalid: DiscoveryTokenAPIServers not set", + &kubeadm.NodeConfiguration{ + DiscoveryToken: "abcdef.1234567890123456", + }, + false, + }, + { + "invalid: DiscoveryTokenCACertHashes cannot be used with DiscoveryFile", + &kubeadm.NodeConfiguration{ + DiscoveryFile: "https://url/file.conf", + DiscoveryTokenCACertHashes: []string{"sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc"}, + }, + false, + }, + { + "invalid: using token-based discovery without DiscoveryTokenCACertHashes and DiscoveryTokenUnsafeSkipCAVerification", + &kubeadm.NodeConfiguration{ + DiscoveryToken: "abcdef.1234567890123456", + DiscoveryTokenUnsafeSkipCAVerification: false, + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}, + }, + false, + }, + { + "WARNING: kubeadm doesn't fully support multiple API Servers yet", + &kubeadm.NodeConfiguration{ + DiscoveryToken: "abcdef.1234567890123456", + DiscoveryTokenUnsafeSkipCAVerification: true, + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443", "192.168.122.88:6443"}, + }, + true, + }, + { + "valid: DiscoveryFile with DiscoveryTokenAPIServers", + &kubeadm.NodeConfiguration{ + DiscoveryFile: "https://url/file.conf", + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}, + }, + true, + }, + { + "valid: DiscoveryFile without DiscoveryTokenAPIServers", + &kubeadm.NodeConfiguration{ + DiscoveryFile: "https://url/file.conf", + }, + true, + }, + { + "valid: using token-based discovery with DiscoveryTokenCACertHashes", + &kubeadm.NodeConfiguration{ + DiscoveryToken: "abcdef.1234567890123456", + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}, + DiscoveryTokenCACertHashes: []string{"sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc"}, + DiscoveryTokenUnsafeSkipCAVerification: false, + }, + true, + }, + { + "valid: using token-based discovery with DiscoveryTokenCACertHashe but skip ca verification", + &kubeadm.NodeConfiguration{ + DiscoveryToken: "abcdef.1234567890123456", + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}, + DiscoveryTokenCACertHashes: []string{"sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc"}, + DiscoveryTokenUnsafeSkipCAVerification: true, + }, + true, + }, + } + for _, rt := range tests { + err := ValidateArgSelection(rt.c, nil).ToAggregate() + if (err == nil) != rt.expected { + t.Errorf( + "%s test case failed: ValidateArgSelection:\n\texpected: %t\n\t actual: %t", + rt.name, + rt.expected, + (err == nil), + ) + } + } +} + +func TestValidateJoinDiscoveryTokenAPIServer(t *testing.T) { + var tests = []struct { + s *kubeadm.NodeConfiguration + expected bool + }{ + { + &kubeadm.NodeConfiguration{ + DiscoveryTokenAPIServers: []string{"192.168.122.100"}, + }, + false, + }, + { + &kubeadm.NodeConfiguration{ + DiscoveryTokenAPIServers: []string{"192.168.122.100:6443"}, + }, + true, + }, + } + for _, rt := range tests { + actual := ValidateJoinDiscoveryTokenAPIServer(rt.s, nil) + if (len(actual) == 0) != rt.expected { + t.Errorf( + "failed ValidateJoinDiscoveryTokenAPIServer:\n\texpected: %t\n\t actual: %t", + rt.expected, + (len(actual) == 0), + ) + } + } +} + +func TestValidateDiscoveryFile(t *testing.T) { + tmpfile, err := ioutil.TempFile("/tmp", "test_discovery_file") + if err != nil { + t.Errorf("Error creating temporary file: %v", err) + } + defer os.Remove(tmpfile.Name()) + + var tests = []struct { + s string + expected bool + }{ + {"foo", false}, + {"/foo/bar/file_which_i_believe_not_existing.conf", false}, + {tmpfile.Name(), true}, + {"http://[::1]a", false}, + {"http://url/file.conf", false}, + {"https://u r l/file.conf", false}, + {"https://url/file.conf", true}, + } + for i, rt := range tests { + actual := ValidateDiscoveryFile(rt.s, nil) + if (len(actual) == 0) != rt.expected { + t.Errorf( + "%d: failed ValidateDiscoveryFile:\n\texpected: %t\n\t actual: %t", + i, + rt.expected, + (len(actual) == 0), + ) + } + } +} From b5b75997fb2ca8be8321416c46cc147353259b57 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Sat, 25 Nov 2017 12:29:48 +0800 Subject: [PATCH 113/794] Add test case for validate kube-proxy configuration. --- .../kubeadm/validation/validation_test.go | 246 ++++++++++++++++++ 1 file changed, 246 insertions(+) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 7bfba1a7b53..085065f36be 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -19,6 +19,7 @@ package validation import ( "io/ioutil" "os" + "strings" "testing" "time" @@ -627,6 +628,251 @@ func TestValidateKubeletConfiguration(t *testing.T) { } } +func TestValidateKubeProxyConfiguration(t *testing.T) { + successCases := []kubeadm.MasterConfiguration{ + { + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "192.168.59.103", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + } + + for _, successCase := range successCases { + if errs := ValidateProxy(&successCase, nil); len(errs) != 0 { + t.Errorf("failed ValidateProxy: expect no errors but got %v", errs) + } + } + + errorCases := []struct { + masterConfig kubeadm.MasterConfiguration + msg string + }{ + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + // only BindAddress is invalid + BindAddress: "10.10.12.11:2000", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "not a valid textual representation of an IP address", + }, + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "10.10.12.11", + // only HealthzBindAddress is invalid + HealthzBindAddress: "0.0.0.0", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "must be IP:port", + }, + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "10.10.12.11", + HealthzBindAddress: "0.0.0.0:12345", + // only MetricsBindAddress is invalid + MetricsBindAddress: "127.0.0.1", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "must be IP:port", + }, + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "10.10.12.11", + HealthzBindAddress: "0.0.0.0:12345", + MetricsBindAddress: "127.0.0.1:10249", + // only ClusterCIDR is invalid + ClusterCIDR: "192.168.59.0", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "must be a valid CIDR block (e.g. 10.100.0.0/16)", + }, + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "10.10.12.11", + HealthzBindAddress: "0.0.0.0:12345", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + // only UDPIdleTimeout is invalid + UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "must be greater than 0", + }, + { + masterConfig: kubeadm.MasterConfiguration{ + KubeProxy: kubeadm.KubeProxy{ + Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ + BindAddress: "10.10.12.11", + HealthzBindAddress: "0.0.0.0:12345", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + // only ConfigSyncPeriod is invalid + ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second}, + IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ + SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ + Max: utilpointer.Int32Ptr(2), + MaxPerCore: utilpointer.Int32Ptr(1), + Min: utilpointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, + }, + }, + msg: "must be greater than 0", + }, + } + + for i, errorCase := range errorCases { + if errs := ValidateProxy(&errorCase.masterConfig, nil); len(errs) == 0 { + t.Errorf("%d failed ValidateProxy: expected error for %s, but got no error", i, errorCase.msg) + } else if !strings.Contains(errs[0].Error(), errorCase.msg) { + t.Errorf("%d failed ValidateProxy: unexpected error: %v, expected: %s", i, errs[0], errorCase.msg) + } + } +} + func TestValidateArgSelection(t *testing.T) { var tests = []struct { name string From 776525f5725f0c88338b6e5a1c5c033a2aa75b42 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 13 Nov 2017 17:15:55 +0800 Subject: [PATCH 114/794] Add parent PR title to cherry-picked PR title --- hack/cherry_pick_pull.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hack/cherry_pick_pull.sh b/hack/cherry_pick_pull.sh index 1a909302d47..3dab5a4947f 100755 --- a/hack/cherry_pick_pull.sh +++ b/hack/cherry_pick_pull.sh @@ -131,12 +131,13 @@ function make-a-pr() { # when we shove the heredoc at hub directly, tickling the ioctl # crash. prtext="$(mktemp -t prtext.XXXX)" # cleaned in return_to_kansas + local numandtitle=$(printf '%s\n' "${SUBJECTS[@]}") cat >"${prtext}" < Date: Sat, 25 Nov 2017 16:05:15 +0800 Subject: [PATCH 115/794] Validate IPVSConfiguration only when IPVS mode is enable. --- .../kubeproxyconfig/validation/validation.go | 4 +- .../validation/validation_test.go | 68 ++++++++++++------- 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go index 55bbe7930d5..5856aaf0f61 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -35,7 +35,9 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { newPath := field.NewPath("KubeProxyConfiguration") allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...) - allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...) + if config.Mode == kubeproxyconfig.ProxyModeIPVS { + allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...) + } allErrs = append(allErrs, validateKubeProxyConntrackConfiguration(config.Conntrack, newPath.Child("KubeProxyConntrackConfiguration"))...) allErrs = append(allErrs, validateProxyMode(config.Mode, newPath.Child("Mode"))...) allErrs = append(allErrs, validateClientConnectionConfiguration(config.ClientConnection, newPath.Child("ClientConnection"))...) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go index f316a304462..f17225e433e 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go @@ -42,6 +42,7 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, + Mode: kubeproxyconfig.ProxyModeIPVS, IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, @@ -54,6 +55,26 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, }, + { + BindAddress: "192.168.59.103", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ + Max: pointer.Int32Ptr(2), + MaxPerCore: pointer.Int32Ptr(1), + Min: pointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + }, } for _, successCase := range successCases { @@ -80,10 +101,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), @@ -108,10 +125,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), @@ -136,10 +149,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), @@ -164,10 +173,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), @@ -192,10 +197,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), @@ -220,10 +221,31 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, - IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{ - SyncPeriod: metav1.Duration{Duration: 10 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ + Max: pointer.Int32Ptr(2), + MaxPerCore: pointer.Int32Ptr(1), + Min: pointer.Int32Ptr(1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, + }, + msg: "must be greater than 0", + }, + { + config: kubeproxyconfig.KubeProxyConfiguration{ + BindAddress: "192.168.59.103", + HealthzBindAddress: "0.0.0.0:10256", + MetricsBindAddress: "127.0.0.1:10249", + ClusterCIDR: "192.168.59.0/24", + UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, + ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, + IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ + MasqueradeAll: true, + SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, + MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + // not specifying valid period in IPVS mode. + Mode: kubeproxyconfig.ProxyModeIPVS, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ Max: pointer.Int32Ptr(2), MaxPerCore: pointer.Int32Ptr(1), From 26626529a7f5ee7b10b50fbfdfa71d1beac58250 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Sun, 26 Nov 2017 11:03:14 +0800 Subject: [PATCH 116/794] should check return err --- cmd/cloud-controller-manager/app/controllermanager.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index c93da56dad5..258e07a03a9 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -143,9 +143,9 @@ func Run(s *options.CloudControllerManagerServer) error { clientBuilder = rootClientBuilder } - err := StartControllers(s, kubeconfig, clientBuilder, stop, recorder, cloud) - glog.Fatalf("error running controllers: %v", err) - panic("unreachable") + if err := StartControllers(s, kubeconfig, clientBuilder, stop, recorder, cloud); err != nil { + glog.Fatalf("error running controllers: %v", err) + } } if !s.LeaderElection.LeaderElect { From 3836857229cff769c4e522079faba0a949841c0c Mon Sep 17 00:00:00 2001 From: Mikkel Oscar Lyderik Larsen Date: Sun, 26 Nov 2017 20:32:49 +0100 Subject: [PATCH 117/794] e2e: Only create PSP if RBAC is enabled Using PSP in e2e tests depend on RBAC being enabled in the cluster and thus PSP should only be used when RBAC is. --- test/e2e/framework/psp_util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/psp_util.go b/test/e2e/framework/psp_util.go index d3938c90e83..bb8d0dadc9b 100644 --- a/test/e2e/framework/psp_util.go +++ b/test/e2e/framework/psp_util.go @@ -97,7 +97,7 @@ var ( ) func CreatePrivilegedPSPBinding(f *Framework, namespace string) { - if !IsPodSecurityPolicyEnabled(f) { + if !IsPodSecurityPolicyEnabled(f) || !IsRBACEnabled(f) { return } // Create the privileged PSP & role From 185d5c1f3debec7f7c81cd713307134103038497 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 8 Nov 2017 11:43:56 +0800 Subject: [PATCH 118/794] fix typo and adjust import sequence --- staging/src/k8s.io/apiserver/pkg/admission/config.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config.go b/staging/src/k8s.io/apiserver/pkg/admission/config.go index 72da98fe263..eb979861207 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config.go @@ -17,6 +17,7 @@ limitations under the License. package admission import ( + "bytes" "fmt" "io" "io/ioutil" @@ -27,8 +28,6 @@ import ( "github.com/ghodss/yaml" "github.com/golang/glog" - "bytes" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -151,8 +150,8 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi return nil, nil } -// GetAdmissionPluginConfiguration takes the admission configuration and returns a reader -// for the specified plugin. If no specific configuration is present, we return a nil reader. +// ConfigFor returns a reader for the specified plugin. +// If no specific configuration is present, we return a nil reader. func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { // there is no config, so there is no potential config if p.config == nil { From 888580e03257a756afc2acc9c0236bb0221ee1d8 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Mon, 27 Nov 2017 13:11:38 +0800 Subject: [PATCH 119/794] clean up failure domain from InterPodAffinityPriority --- plugin/pkg/scheduler/algorithm/priorities/BUILD | 1 - .../algorithm/priorities/interpod_affinity.go | 11 +++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/BUILD b/plugin/pkg/scheduler/algorithm/priorities/BUILD index 62913a89bd4..da6cba4cedc 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/BUILD +++ b/plugin/pkg/scheduler/algorithm/priorities/BUILD @@ -26,7 +26,6 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index ae168d9c763..ab781aa9d34 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -17,13 +17,11 @@ limitations under the License. package priorities import ( - "strings" "sync" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" @@ -62,17 +60,14 @@ type podAffinityPriorityMap struct { // counts store the mapping from node name to so-far computed score of // the node. counts map[string]float64 - // failureDomains contain default failure domains keys - failureDomains priorityutil.Topologies // The first error that we faced. firstError error } func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap { return &podAffinityPriorityMap{ - nodes: nodes, - counts: make(map[string]float64, len(nodes)), - failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(kubeletapis.DefaultFailureDomains, ",")}, + nodes: nodes, + counts: make(map[string]float64, len(nodes)), } } @@ -97,7 +92,7 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini p.Lock() defer p.Unlock() for _, node := range p.nodes { - if p.failureDomains.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) { + if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) { p.counts[node.Name] += weight } } From a8cfd22c1669155fa57980ff9108b6b434d99a58 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Mon, 27 Nov 2017 10:44:08 +0100 Subject: [PATCH 120/794] Log actual return code, not the default value. --- test/e2e/framework/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 99c37212648..9d8cb14da4b 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2130,8 +2130,8 @@ func (b kubectlBuilder) Exec() (string, error) { if err != nil { var rc int = 127 if ee, ok := err.(*exec.ExitError); ok { - Logf("rc: %d", rc) rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus()) + Logf("rc: %d", rc) } return "", uexec.CodeExitError{ Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err), From 552e4d3a9ddb102bce50d4ddbad164880cf2f68c Mon Sep 17 00:00:00 2001 From: Szymon Scharmach Date: Wed, 18 Oct 2017 14:43:55 +0200 Subject: [PATCH 121/794] Cpu manager reconclie loop can restore state --- pkg/kubelet/cm/cpumanager/cpu_manager.go | 23 ++++++++++++++++++++-- pkg/kubelet/cm/cpumanager/policy.go | 2 ++ pkg/kubelet/cm/cpumanager/policy_static.go | 8 +++++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 9dc6b98b4e7..6e1fd9cacb1 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -160,8 +160,8 @@ func NewManager( } func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { - glog.Infof("[cpumanger] starting with %s policy", m.policy.Name()) - glog.Infof("[cpumanger] reconciling every %v", m.reconcilePeriod) + glog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) + glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) m.activePods = activePods m.podStatusProvider = podStatusProvider @@ -242,6 +242,25 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec continue } + // Check whether container is present in state, there may be 3 reasons why it's not present: + // - policy does not want to track the container + // - kubelet has just been restarted - and there is no previous state file + // - container has been removed from state by RemoveContainer call (DeletionTimestamp is set) + if _, ok := m.state.GetCPUSet(containerID); !ok { + if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil { + glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + err := m.AddContainer(pod, &container, containerID) + if err != nil { + glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err) + failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) + } + } else { + // if DeletionTimestamp is set, pod has already been removed from state + // skip the pod/container since it's not running and will be deleted soon + continue + } + } + cset := m.state.GetCPUSetOrDefault(containerID) if cset.IsEmpty() { // NOTE: This should not happen outside of tests. diff --git a/pkg/kubelet/cm/cpumanager/policy.go b/pkg/kubelet/cm/cpumanager/policy.go index 39eb76316b1..c79091659e3 100644 --- a/pkg/kubelet/cm/cpumanager/policy.go +++ b/pkg/kubelet/cm/cpumanager/policy.go @@ -25,6 +25,8 @@ import ( type Policy interface { Name() string Start(s state.State) + // AddContainer call is idempotent AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error + // RemoveContainer call is idempotent RemoveContainer(s state.State, containerID string) error } diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index dfbb0a297d0..9a461bacb63 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -156,9 +156,15 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet { } func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error { - glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 { + glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) // container belongs in an exclusively allocated pool + + if _, ok := s.GetCPUSet(containerID); ok { + glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID) + return nil + } + cpuset, err := p.allocateCPUs(s, numCPUs) if err != nil { glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err) From f42f79edb073798e1f40ad9867653087e66a842c Mon Sep 17 00:00:00 2001 From: Shiyang Wang Date: Fri, 24 Nov 2017 18:06:20 +0800 Subject: [PATCH 122/794] fix spaces around the / --- pkg/printers/internalversion/printers.go | 8 ++++---- pkg/printers/internalversion/printers_test.go | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 2c52c5e22fe..9811a64f348 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -1483,20 +1483,20 @@ func formatHPAMetrics(specs []autoscaling.MetricSpec, statuses []autoscaling.Met if len(statuses) > i && statuses[i].Pods != nil { current = statuses[i].Pods.CurrentAverageValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Pods.TargetAverageValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Pods.TargetAverageValue.String())) case autoscaling.ObjectMetricSourceType: current := "" if len(statuses) > i && statuses[i].Object != nil { current = statuses[i].Object.CurrentValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Object.TargetValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Object.TargetValue.String())) case autoscaling.ResourceMetricSourceType: if spec.Resource.TargetAverageValue != nil { current := "" if len(statuses) > i && statuses[i].Resource != nil { current = statuses[i].Resource.CurrentAverageValue.String() } - list = append(list, fmt.Sprintf("%s / %s", current, spec.Resource.TargetAverageValue.String())) + list = append(list, fmt.Sprintf("%s/%s", current, spec.Resource.TargetAverageValue.String())) } else { current := "" if len(statuses) > i && statuses[i].Resource != nil && statuses[i].Resource.CurrentAverageUtilization != nil { @@ -1507,7 +1507,7 @@ func formatHPAMetrics(specs []autoscaling.MetricSpec, statuses []autoscaling.Met if spec.Resource.TargetAverageUtilization != nil { target = fmt.Sprintf("%d%%", *spec.Resource.TargetAverageUtilization) } - list = append(list, fmt.Sprintf("%s / %s", current, target)) + list = append(list, fmt.Sprintf("%s/%s", current, target)) } default: list = append(list, "") diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 8662c91fc37..f20c4191815 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -2182,7 +2182,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // pods source type { @@ -2219,7 +2219,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // object source type (no current) { @@ -2251,7 +2251,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // object source type { @@ -2296,7 +2296,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // resource source type, targetVal (no current) { @@ -2324,7 +2324,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/100m\t2\t10\t4\t\n", }, // resource source type, targetVal { @@ -2361,7 +2361,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m\t2\t10\t4\t\n", }, // resource source type, targetUtil (no current) { @@ -2389,7 +2389,7 @@ func TestPrintHPA(t *testing.T) { DesiredReplicas: 5, }, }, - "some-hpa\tReplicationController/some-rc\t / 80%\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t/80%\t2\t10\t4\t\n", }, // resource source type, targetUtil { @@ -2427,7 +2427,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50% / 80%\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50%/80%\t2\t10\t4\t\n", }, // multiple specs { @@ -2486,7 +2486,7 @@ func TestPrintHPA(t *testing.T) { }, }, }, - "some-hpa\tReplicationController/some-rc\t50m / 100m, 50% / 80% + 1 more...\t2\t10\t4\t\n", + "some-hpa\tReplicationController/some-rc\t50m/100m, 50%/80% + 1 more...\t2\t10\t4\t\n", }, } From 81a5ca68a7ade40e6d841692c860b87ccb7d6905 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 24 Nov 2017 14:08:26 +0100 Subject: [PATCH 123/794] client-gen: clarify clientset-api-path --- .../src/k8s.io/code-generator/cmd/client-gen/args/args.go | 4 ++-- .../cmd/client-gen/generators/generator_for_group.go | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go index fee31d90dc0..f2274c7772f 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -43,7 +43,7 @@ type CustomArgs struct { // ClientsetOutputPath is the path the clientset will be generated at. It's // populated from command-line arguments. ClientsetOutputPath string - // ClientsetAPIPath is the default API path for generated clients. + // ClientsetAPIPath is the default API HTTP path for generated clients. ClientsetAPIPath string // ClientsetOnly determines if we should generate the clients for groups and // types along with the clientset. It's populated from command-line @@ -58,7 +58,7 @@ func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") pflag.StringVar(&ca.InputBasePath, "input-base", "k8s.io/kubernetes/pkg/apis", "base path to look for the api group.") pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", "internalclientset", "the name of the generated clientset package.") - pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", "", "the value of default API path.") + pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", "/apis", "the value of default API HTTP path, starting with / and without trailing /.") pflag.StringVar(&ca.ClientsetOutputPath, "clientset-path", "k8s.io/kubernetes/pkg/client/clientset_generated/", "the generated clientset will be output to /.") pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", false, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") pflag.BoolVar(&ca.FakeClient, "fake-clientset", true, "when set, client-gen will generate the fake clientset that can be used in tests") diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 3646794ef39..8e569a84f95 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -72,13 +72,10 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer sw := generator.NewSnippetWriter(w, c, "$", "$") apiPath := func(group string) string { - if len(g.apiPath) > 0 { - return `"` + g.apiPath + `"` - } if group == "core" { return `"/api"` } - return `"/apis"` + return `"` + g.apiPath + `"` } groupName := g.group From 536522bb1e60562a4a3be92261f018106240f44f Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Sat, 25 Nov 2017 11:38:10 +0100 Subject: [PATCH 124/794] client-gen: separate input-base logic from CustomArgs --- .../cmd/client-gen/args/args.go | 22 +- .../cmd/client-gen/args/gvpackages.go | 223 ++++++++++-------- .../cmd/client-gen/args/gvpackages_test.go | 58 +++-- .../client-gen/generators/client_generator.go | 30 +-- .../generators/fake/fake_client_generator.go | 2 +- .../fake/generator_fake_for_clientset.go | 4 +- .../generators/generator_for_clientset.go | 4 +- .../generators/scheme/generator_for_scheme.go | 10 +- .../code-generator/cmd/client-gen/main.go | 12 +- .../cmd/client-gen/types/helpers.go | 20 +- .../cmd/client-gen/types/types.go | 14 +- .../cmd/informer-gen/generators/generic.go | 6 +- .../informer-gen/generators/groupinterface.go | 4 +- .../cmd/informer-gen/generators/packages.go | 20 +- 14 files changed, 229 insertions(+), 200 deletions(-) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go index f2274c7772f..5eac82806eb 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -27,12 +27,6 @@ type CustomArgs struct { // A sorted list of group versions to generate. For each of them the package path is found // in GroupVersionToInputPath. Groups []types.GroupVersions - // GroupVersionToInputPath is a map between GroupVersion and the path to the respective - // types.go, relative to InputBasePath. We still need GroupVersions in the - // struct because we need an order. - GroupVersionToInputPath map[types.GroupVersion]string - // The base for the path of GroupVersionToInputPath. - InputBasePath string // Overrides for which types should be included in the client. IncludedTypesOverrides map[types.GroupVersion][]string @@ -54,12 +48,24 @@ type CustomArgs struct { } func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - pflag.Var(NewGVPackagesValue(&ca.GroupVersionToInputPath, &ca.Groups, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") + gvsBuilder := NewGroupVersionsBuilder(&ca.Groups) + pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") - pflag.StringVar(&ca.InputBasePath, "input-base", "k8s.io/kubernetes/pkg/apis", "base path to look for the api group.") + pflag.Var(NewInputBasePathValue(gvsBuilder, "k8s.io/kubernetes/pkg/apis"), "input-base", "base path to look for the api group.") pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", "internalclientset", "the name of the generated clientset package.") pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", "/apis", "the value of default API HTTP path, starting with / and without trailing /.") pflag.StringVar(&ca.ClientsetOutputPath, "clientset-path", "k8s.io/kubernetes/pkg/client/clientset_generated/", "the generated clientset will be output to /.") pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", false, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") pflag.BoolVar(&ca.FakeClient, "fake-clientset", true, "when set, client-gen will generate the fake clientset that can be used in tests") } + +// GroupVersionPackages returns a map from GroupVersion to the package with the types.go. +func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string { + res := map[types.GroupVersion]string{} + for _, pkg := range ca.Groups { + for _, v := range pkg.Versions { + res[types.GroupVersion{Group: pkg.Group, Version: v.Version}] = v.Package + } + } + return res +} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go index 4ffdd32ed1e..8da71d6f9bf 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go @@ -21,24 +21,48 @@ import ( "encoding/csv" "flag" "path" - "strings" - - "path/filepath" "sort" + "strings" "k8s.io/code-generator/cmd/client-gen/types" ) -type gvPackagesValue struct { - gvToPath *map[types.GroupVersion]string - groups *[]types.GroupVersions - changed bool +type inputBasePathValue struct { + builder *groupVersionsBuilder } -func NewGVPackagesValue(gvToPath *map[types.GroupVersion]string, groups *[]types.GroupVersions, def []string) *gvPackagesValue { +var _ flag.Value = &inputBasePathValue{} + +func NewInputBasePathValue(builder *groupVersionsBuilder, def string) *inputBasePathValue { + v := &inputBasePathValue{ + builder: builder, + } + v.Set(def) + return v +} + +func (s *inputBasePathValue) Set(val string) error { + s.builder.importBasePath = val + return s.builder.update() +} + +func (s *inputBasePathValue) Type() string { + return "string" +} + +func (s *inputBasePathValue) String() string { + return s.builder.importBasePath +} + +type gvPackagesValue struct { + builder *groupVersionsBuilder + groups []string + changed bool +} + +func NewGVPackagesValue(builder *groupVersionsBuilder, def []string) *gvPackagesValue { gvp := new(gvPackagesValue) - gvp.gvToPath = gvToPath - gvp.groups = groups + gvp.builder = builder if def != nil { if err := gvp.set(def); err != nil { panic(err) @@ -49,6 +73,95 @@ func NewGVPackagesValue(gvToPath *map[types.GroupVersion]string, groups *[]types var _ flag.Value = &gvPackagesValue{} +func (s *gvPackagesValue) set(vs []string) error { + if s.changed { + s.groups = append(s.groups, vs...) + } else { + s.groups = append([]string(nil), vs...) + } + + s.builder.groups = s.groups + return s.builder.update() +} + +func (s *gvPackagesValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + s.changed = true + return nil +} + +func (s *gvPackagesValue) Type() string { + return "stringSlice" +} + +func (s *gvPackagesValue) String() string { + str, _ := writeAsCSV(s.groups) + return "[" + str + "]" +} + +type groupVersionsBuilder struct { + value *[]types.GroupVersions + groups []string + importBasePath string +} + +func NewGroupVersionsBuilder(groups *[]types.GroupVersions) *groupVersionsBuilder { + return &groupVersionsBuilder{ + value: groups, + } +} + +func (p *groupVersionsBuilder) update() error { + var seenGroups = make(map[types.Group]*types.GroupVersions) + for _, v := range p.groups { + pth, gvString := parsePathGroupVersion(v) + gv, err := types.ToGroupVersion(gvString) + if err != nil { + return err + } + + versionPkg := types.PackageVersion{Package: path.Join(p.importBasePath, pth, gv.Group.NonEmpty(), gv.Version.String()), Version: gv.Version} + if group, ok := seenGroups[gv.Group]; ok { + seenGroups[gv.Group].Versions = append(group.Versions, versionPkg) + } else { + seenGroups[gv.Group] = &types.GroupVersions{ + PackageName: gv.Group.NonEmpty(), + Group: gv.Group, + Versions: []types.PackageVersion{versionPkg}, + } + } + } + + var groupNames []string + for groupName := range seenGroups { + groupNames = append(groupNames, groupName.String()) + } + sort.Strings(groupNames) + *p.value = []types.GroupVersions{} + for _, groupName := range groupNames { + *p.value = append(*p.value, *seenGroups[types.Group(groupName)]) + } + + return nil +} + +func parsePathGroupVersion(pgvString string) (gvPath string, gvString string) { + subs := strings.Split(pgvString, "/") + length := len(subs) + switch length { + case 0, 1, 2: + return "", pgvString + default: + return strings.Join(subs[:length-2], "/"), strings.Join(subs[length-2:], "/") + } +} + func readAsCSV(val string) ([]string, error) { if val == "" { return []string{}, nil @@ -68,93 +181,3 @@ func writeAsCSV(vals []string) (string, error) { w.Flush() return strings.TrimSuffix(b.String(), "\n"), nil } - -func (s *gvPackagesValue) set(vs []string) error { - if !s.changed { - *s.gvToPath = map[types.GroupVersion]string{} - *s.groups = []types.GroupVersions{} - } - - var seenGroups = make(map[types.Group]*types.GroupVersions) - for _, g := range *s.groups { - seenGroups[g.Group] = &g - } - - for _, v := range vs { - pth, gvString := parsePathGroupVersion(v) - gv, err := types.ToGroupVersion(gvString) - if err != nil { - return err - } - - if group, ok := seenGroups[gv.Group]; ok { - seenGroups[gv.Group].Versions = append(group.Versions, gv.Version) - } else { - seenGroups[gv.Group] = &types.GroupVersions{ - PackageName: gv.Group.NonEmpty(), - Group: gv.Group, - Versions: []types.Version{gv.Version}, - } - } - - (*s.gvToPath)[gv] = groupVersionPath(pth, gv.Group.String(), gv.Version.String()) - } - - var groupNames []string - for groupName := range seenGroups { - groupNames = append(groupNames, groupName.String()) - } - sort.Strings(groupNames) - *s.groups = []types.GroupVersions{} - for _, groupName := range groupNames { - *s.groups = append(*s.groups, *seenGroups[types.Group(groupName)]) - } - - return nil -} - -func (s *gvPackagesValue) Set(val string) error { - vs, err := readAsCSV(val) - if err != nil { - return err - } - if err := s.set(vs); err != nil { - return err - } - s.changed = true - return nil -} - -func (s *gvPackagesValue) Type() string { - return "stringSlice" -} - -func (s *gvPackagesValue) String() string { - strs := make([]string, 0, len(*s.gvToPath)) - for gv, pth := range *s.gvToPath { - strs = append(strs, path.Join(pth, gv.Group.String(), gv.Version.String())) - } - str, _ := writeAsCSV(strs) - return "[" + str + "]" -} - -func parsePathGroupVersion(pgvString string) (gvPath string, gvString string) { - subs := strings.Split(pgvString, "/") - length := len(subs) - switch length { - case 0, 1, 2: - return "", pgvString - default: - return strings.Join(subs[:length-2], "/"), strings.Join(subs[length-2:], "/") - } -} - -func groupVersionPath(gvPath string, group string, version string) (path string) { - // special case for the core group - if group == "api" { - path = filepath.Join("core", version) - } else { - path = filepath.Join(gvPath, group, version) - } - return -} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go index 1a88dbf32aa..0df1633edff 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go @@ -31,6 +31,7 @@ func TestGVPackageFlag(t *testing.T) { tests := []struct { args []string def []string + importBasePath string expected map[types.GroupVersion]string expectedGroups []types.GroupVersions parseError string @@ -42,47 +43,55 @@ func TestGVPackageFlag(t *testing.T) { }, { args: []string{"foo/bar/v1", "foo/bar/v2", "foo/bar/", "foo/v1"}, - expected: map[types.GroupVersion]string{ - {Group: "bar", Version: ""}: "foo/bar", - {Group: "bar", Version: "v1"}: "foo/bar/v1", - {Group: "bar", Version: "v2"}: "foo/bar/v2", - {Group: "foo", Version: "v1"}: "foo/v1", - }, expectedGroups: []types.GroupVersions{ - {PackageName: "bar", Group: types.Group("bar"), Versions: []types.Version{types.Version("v1"), types.Version("v2"), types.Version("")}}, - {PackageName: "foo", Group: types.Group("foo"), Versions: []types.Version{types.Version("v1")}}, + {PackageName: "bar", Group: types.Group("bar"), Versions: []types.PackageVersion{ + {"foo/bar/v1", types.Version("v1")}, + {"foo/bar/v2", types.Version("v2")}, + {"foo/bar", types.Version("")}, + }}, + {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ + {"foo/v1", types.Version("v1")}, + }}, }, }, { args: []string{"foo/bar/v1", "foo/bar/v2", "foo/bar/", "foo/v1"}, def: []string{"foo/bar/v1alpha1", "foo/v1"}, - expected: map[types.GroupVersion]string{ - {Group: "bar", Version: ""}: "foo/bar", - {Group: "bar", Version: "v1"}: "foo/bar/v1", - {Group: "bar", Version: "v2"}: "foo/bar/v2", - {Group: "foo", Version: "v1"}: "foo/v1", - }, expectedGroups: []types.GroupVersions{ - {PackageName: "bar", Group: types.Group("bar"), Versions: []types.Version{types.Version("v1"), types.Version("v2"), types.Version("")}}, - {PackageName: "foo", Group: types.Group("foo"), Versions: []types.Version{types.Version("v1")}}, + {PackageName: "bar", Group: types.Group("bar"), Versions: []types.PackageVersion{ + {"foo/bar/v1", types.Version("v1")}, + {"foo/bar/v2", types.Version("v2")}, + {"foo/bar", types.Version("")}, + }}, + {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ + {"foo/v1", types.Version("v1")}, + }}, }, }, { args: []string{"api/v1", "api"}, - expected: map[types.GroupVersion]string{ - {Group: "api", Version: "v1"}: "core/v1", - {Group: "api", Version: ""}: "core", - }, expectedGroups: []types.GroupVersions{ - {PackageName: "core", Group: types.Group("api"), Versions: []types.Version{types.Version("v1"), types.Version("")}}, + {PackageName: "core", Group: types.Group("api"), Versions: []types.PackageVersion{ + {"core/v1", types.Version("v1")}, + {"core", types.Version("")}, + }}, + }, + }, + { + args: []string{"foo/v1"}, + importBasePath: "k8s.io/api", + expectedGroups: []types.GroupVersions{ + {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ + {"k8s.io/api/foo/v1", types.Version("v1")}, + }}, }, }, } for i, test := range tests { fs := pflag.NewFlagSet("testGVPackage", pflag.ContinueOnError) - gvp := map[types.GroupVersion]string{} groups := []types.GroupVersions{} - fs.Var(NewGVPackagesValue(&gvp, &groups, test.def), "input", "usage") + importBasePath := test.importBasePath + fs.Var(NewGVPackagesValue(NewGroupVersionsBuilder(&groups, &importBasePath), test.def), "input", "usage") args := []string{} for _, a := range test.args { @@ -99,9 +108,6 @@ func TestGVPackageFlag(t *testing.T) { } else if err != nil { t.Errorf("%d: expected nil error, got %v", i, err) } - if !reflect.DeepEqual(gvp, test.expected) { - t.Errorf("%d: expected %+v, got %+v", i, test.expected, gvp) - } if !reflect.DeepEqual(groups, test.expectedGroups) { t.Errorf("%d: expected groups %+v, got groups %+v", i, test.expectedGroups, groups) } diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 5d314848f0d..3042267e9dc 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -233,7 +233,7 @@ func packageForScheme(customArgs *clientgenargs.CustomArgs, clientsetPackage str NextGroup: for _, group := range customArgs.Groups { for _, v := range group.Versions { - if v == "" { + if v.String() == "" { internalClient = true break NextGroup } @@ -258,7 +258,7 @@ NextGroup: DefaultGen: generator.DefaultGen{ OptionalName: "register", }, - InputPackages: customArgs.GroupVersionToInputPath, + InputPackages: customArgs.GroupVersionPackages(), OutputPackage: schemePackage, OutputPath: filepath.Join(srcTreePath, schemePackage), Groups: customArgs.Groups, @@ -274,13 +274,13 @@ NextGroup: // applyGroupOverrides applies group name overrides to each package, if applicable. If there is a // comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", use the -// first field (somegroup) as the name of the group when generating. +// first field (somegroup) as the name of the group in Go code, e.g. as the func name in a clientset. // // If the first field of the groupName is not unique within the clientset, use "// +groupName=unique func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.CustomArgs) { // Create a map from "old GV" to "new GV" so we know what changes we need to make. changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion) - for gv, inputDir := range customArgs.GroupVersionToInputPath { + for gv, inputDir := range customArgs.GroupVersionPackages() { p := universe.Package(inputDir) if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { newGV := clientgentypes.GroupVersion{ @@ -296,7 +296,7 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust for _, gvs := range customArgs.Groups { gv := clientgentypes.GroupVersion{ Group: gvs.Group, - Version: gvs.Versions[0], // we only need a version, and the first will do + Version: gvs.Versions[0].Version, // we only need a version, and the first will do } if newGV, ok := changes[gv]; ok { // There's an override, so use it. @@ -312,19 +312,6 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust } } customArgs.Groups = newGroups - - // Modify customArgs.GroupVersionToInputPath based on the groupName overrides. - newGVToInputPath := make(map[clientgentypes.GroupVersion]string) - for gv, inputDir := range customArgs.GroupVersionToInputPath { - if newGV, ok := changes[gv]; ok { - // There's an override, so use it. - newGVToInputPath[newGV] = inputDir - } else { - // No override. - newGVToInputPath[gv] = inputDir - } - } - customArgs.GroupVersionToInputPath = newGVToInputPath } // Packages makes the client package definition. @@ -344,7 +331,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat gvToTypes := map[clientgentypes.GroupVersion][]*types.Type{} groupGoNames := make(map[clientgentypes.GroupVersion]string) - for gv, inputDir := range customArgs.GroupVersionToInputPath { + for gv, inputDir := range customArgs.GroupVersionPackages() { p := context.Universe.Package(path.Vendorless(inputDir)) // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as @@ -398,11 +385,12 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + gvPackages := customArgs.GroupVersionPackages() for _, group := range customArgs.Groups { for _, version := range group.Versions { - gv := clientgentypes.GroupVersion{Group: group.Group, Version: version} + gv := clientgentypes.GroupVersion{Group: group.Group, Version: version.Version} types := gvToTypes[gv] - inputPath := customArgs.GroupVersionToInputPath[gv] + inputPath := gvPackages[gv] packageList = append(packageList, packageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], customArgs.ClientsetAPIPath, arguments.OutputBase, inputPath, boilerplate)) if customArgs.FakeClient { packageList = append(packageList, fake.PackageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], inputPath, boilerplate)) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index 941cc35b0db..277a3ce1045 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -116,7 +116,7 @@ func PackageForClientset(customArgs *clientgenargs.CustomArgs, fakeClientsetPack DefaultGen: generator.DefaultGen{ OptionalName: "register", }, - InputPackages: customArgs.GroupVersionToInputPath, + InputPackages: customArgs.GroupVersionPackages(), OutputPackage: fakeClientsetPackage, Groups: customArgs.Groups, GroupGoNames: groupGoNames, diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index b6a8de3f81e..ea9ed8deb45 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -63,7 +63,7 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", group.PackageName, version.NonEmpty()) fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") - groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{group.Group, version}]) + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{group.Group, version.Version}]) imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), groupClientPackage))) imports = append(imports, strings.ToLower(fmt.Sprintf("fake%s%s \"%s\"", groupAlias, version.NonEmpty(), fakeGroupClientPackage))) } @@ -87,7 +87,7 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr // perhaps we can adapt the go2ild framework to this kind of usage. sw := generator.NewSnippetWriter(w, c, "$", "$") - allGroups := clientgentypes.ToGroupVersionPackages(g.groups, g.groupGoNames) + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) sw.Do(common, nil) sw.Do(checkImpl, nil) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index a7fdf85d151..de52646600c 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -59,7 +59,7 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { for _, group := range g.groups { for _, version := range group.Versions { typedClientPath := filepath.Join(g.clientsetPackage, "typed", group.PackageName, version.NonEmpty()) - groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{group.Group, version}]) + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{group.Group, version.Version}]) imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), typedClientPath))) } } @@ -71,7 +71,7 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr // perhaps we can adapt the go2ild framework to this kind of usage. sw := generator.NewSnippetWriter(w, c, "$", "$") - allGroups := clientgentypes.ToGroupVersionPackages(g.groups, g.groupGoNames) + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) m := map[string]interface{}{ "allGroups": allGroups, "Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go index 416877c4949..f0f726cf728 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -61,18 +61,18 @@ func (g *GenScheme) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.ImportTracker.ImportLines()...) for _, group := range g.Groups { for _, version := range group.Versions { - packagePath := g.InputPackages[clientgentypes.GroupVersion{Group: group.Group, Version: version}] - groupAlias := strings.ToLower(g.GroupGoNames[clientgentypes.GroupVersion{group.Group, version}]) + packagePath := g.InputPackages[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}] + groupAlias := strings.ToLower(g.GroupGoNames[clientgentypes.GroupVersion{group.Group, version.Version}]) if g.CreateRegistry { // import the install package for internal clientsets instead of the type package with register.go - if version != "" { + if version.Version != "" { packagePath = filepath.Dir(packagePath) } packagePath = filepath.Join(packagePath, "install") imports = append(imports, strings.ToLower(fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath)))) break } else { - imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), path.Vendorless(packagePath)))) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.Version.NonEmpty(), path.Vendorless(packagePath)))) } } } @@ -82,7 +82,7 @@ func (g *GenScheme) Imports(c *generator.Context) (imports []string) { func (g *GenScheme) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - allGroupVersions := clientgentypes.ToGroupVersionPackages(g.Groups, g.GroupGoNames) + allGroupVersions := clientgentypes.ToGroupVersionInfo(g.Groups, g.GroupGoNames) allInstallGroups := clientgentypes.ToGroupInstallPackages(g.Groups, g.GroupGoNames) m := map[string]interface{}{ diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 72404808ec2..392284519c1 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -19,7 +19,6 @@ package main import ( "flag" - "path" "path/filepath" "github.com/golang/glog" @@ -53,12 +52,11 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - // Prefix with InputBaseDir and add client dirs as input dirs. - for gv, pth := range customArgs.GroupVersionToInputPath { - customArgs.GroupVersionToInputPath[gv] = path.Join(customArgs.InputBasePath, pth) - } - for _, pkg := range customArgs.GroupVersionToInputPath { - arguments.InputDirs = append(arguments.InputDirs, pkg) + // add group version package as input dirs for gengo + for _, pkg := range customArgs.Groups { + for _, v := range pkg.Versions { + arguments.InputDirs = append(arguments.InputDirs, v.Package) + } } if err := arguments.Execute( diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go index bc4d9dea306..33e6ac451bf 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -75,27 +75,27 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { // Determine the default version among versions. If a user calls a group client // without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the // default version will be returned. -func defaultVersion(versions []Version) Version { +func defaultVersion(versions []PackageVersion) Version { var versionStrings []string for _, version := range versions { - versionStrings = append(versionStrings, string(version)) + versionStrings = append(versionStrings, version.Version.String()) } sort.Sort(sortableSliceOfVersions(versionStrings)) return Version(versionStrings[len(versionStrings)-1]) } -// ToGroupVersionPackages is a helper function used by generators for groups. -func ToGroupVersionPackages(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionPackage { - var groupVersionPackages []GroupVersionPackage +// ToGroupVersionInfo is a helper function used by generators for groups. +func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { + var groupVersionPackages []GroupVersionInfo for _, group := range groups { defaultVersion := defaultVersion(group.Versions) for _, version := range group.Versions { - groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version}] - groupVersionPackages = append(groupVersionPackages, GroupVersionPackage{ + groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] + groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ Group: Group(namer.IC(group.Group.NonEmpty())), - Version: Version(namer.IC(version.String())), - PackageAlias: strings.ToLower(groupGoName + version.NonEmpty()), - IsDefaultVersion: version == defaultVersion && version != "", + Version: Version(namer.IC(version.Version.String())), + PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), + IsDefaultVersion: version.Version == defaultVersion && version.Version != "", GroupGoName: groupGoName, LowerCaseGroupGoName: namer.IL(groupGoName), }) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go b/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go index a2a87b1e1d6..17fd6e92a70 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -42,20 +42,26 @@ func (g Group) NonEmpty() string { return string(g) } +type PackageVersion struct { + Version + // The fully qualified package, e.g. k8s.io/kubernetes/pkg/apis/apps, where the types.go is found. + Package string +} + type GroupVersion struct { Group Group Version Version } type GroupVersions struct { - // The package name of the group, e.g. extensions or networking + // The name of the package for this group, e.g. apps. PackageName string Group Group - Versions []Version + Versions []PackageVersion } -// GroupVersionPackage contains group name, version name, and the package name client-gen will generate for this group version. -type GroupVersionPackage struct { +// GroupVersionInfo contains all the info around a group version. +type GroupVersionInfo struct { Group Group Version Version // If a user calls a group client without specifying the version (e.g., diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index 891cac31382..54632de0530 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -105,10 +105,10 @@ func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w i Versions: []*version{}, } for _, v := range groupVersions.Versions { - gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v} + gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version} version := &version{ - Name: v.NonEmpty(), - GoName: namer.IC(v.NonEmpty()), + Name: v.Version.NonEmpty(), + GoName: namer.IC(v.Version.NonEmpty()), Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), } schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go index 253b79f377b..0bba93c4b2e 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go @@ -69,11 +69,11 @@ func (g *groupInterfaceGenerator) GenerateType(c *generator.Context, t *types.Ty versions := make([]versionData, 0, len(g.groupVersions.Versions)) for _, version := range g.groupVersions.Versions { - gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version} + gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version.Version} versionPackage := filepath.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty())) iface := c.Universe.Type(types.Name{Package: versionPackage, Name: "Interface"}) versions = append(versions, versionData{ - Name: namer.IC(version.NonEmpty()), + Name: namer.IC(version.Version.NonEmpty()), Interface: iface, New: c.Universe.Function(types.Name{Package: versionPackage, Name: "New"}), }) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index faa012188df..6e0d5c00225 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -18,6 +18,7 @@ package generators import ( "fmt" + "path" "path/filepath" "strings" @@ -158,7 +159,8 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat gv.Version = clientgentypes.Version(parts[len(parts)-1]) targetGroupVersions = externalGroupVersions } - groupPkgName := strings.ToLower(gv.Group.NonEmpty()) + groupPackageName := gv.Group.NonEmpty() + gvPackage := path.Clean(p.Path) // If there's a comment of the form "// +groupName=somegroup" or // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the @@ -169,9 +171,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as // the Go group identifier in CamelCase. It defaults - groupGoNames[groupPkgName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { - groupGoNames[groupPkgName] = namer.IC(override[0]) + groupGoNames[groupPackageName] = namer.IC(override[0]) } var typesToGenerate []*types.Type @@ -192,23 +194,23 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat continue } - groupVersionsEntry, ok := targetGroupVersions[groupPkgName] + groupVersionsEntry, ok := targetGroupVersions[groupPackageName] if !ok { groupVersionsEntry = clientgentypes.GroupVersions{ - PackageName: groupPkgName, + PackageName: groupPackageName, Group: gv.Group, } } - groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, gv.Version) - targetGroupVersions[groupPkgName] = groupVersionsEntry + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{Version: gv.Version, Package: gvPackage}) + targetGroupVersions[groupPackageName] = groupVersionsEntry orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} typesToGenerate = orderer.OrderTypes(typesToGenerate) if internal { - packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPkgName, gv, groupGoNames[groupPkgName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage)) + packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage)) } else { - packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPkgName, gv, groupGoNames[groupPkgName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage)) + packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage)) } } From dc2e57ba7495d70a4731238e0e8a3c0c1c1cbbb1 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sun, 26 Nov 2017 21:47:50 +0800 Subject: [PATCH 125/794] refactor canUseIPVSMode and test it --- cmd/kube-proxy/app/server_others.go | 14 ++--- cmd/kube-proxy/app/server_test.go | 13 ++++- pkg/proxy/ipvs/proxier.go | 44 +++++++++++---- pkg/proxy/ipvs/proxier_test.go | 84 +++++++++++++++++++++++++++++ 4 files changed, 139 insertions(+), 16 deletions(-) diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 380e83e2a0a..30a8ac9bbf9 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -83,6 +83,7 @@ func newProxyServer( var iptInterface utiliptables.Interface var ipvsInterface utilipvs.Interface + var kernelHandler ipvs.KernelHandler var ipsetInterface utilipset.Interface var dbus utildbus.Interface @@ -92,6 +93,7 @@ func newProxyServer( dbus = utildbus.New() iptInterface = utiliptables.New(execer, dbus, protocol) ipvsInterface = utilipvs.New(execer) + kernelHandler = ipvs.NewLinuxKernelHandler() ipsetInterface = utilipset.New(execer) // We omit creation of pretty much everything if we run in cleanup mode @@ -133,7 +135,7 @@ func newProxyServer( var serviceEventHandler proxyconfig.ServiceHandler var endpointsEventHandler proxyconfig.EndpointsHandler - proxyMode := getProxyMode(string(config.Mode), iptInterface, ipsetInterface, iptables.LinuxKernelCompatTester{}) + proxyMode := getProxyMode(string(config.Mode), iptInterface, kernelHandler, ipsetInterface, iptables.LinuxKernelCompatTester{}) if proxyMode == proxyModeIPTables { glog.V(0).Info("Using iptables Proxier.") nodeIP := net.ParseIP(config.BindAddress) @@ -269,7 +271,7 @@ func newProxyServer( }, nil } -func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { +func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { if proxyMode == proxyModeUserspace { return proxyModeUserspace } @@ -280,7 +282,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, ipsetver if utilfeature.DefaultFeatureGate.Enabled(features.SupportIPVSProxyMode) { if proxyMode == proxyModeIPVS { - return tryIPVSProxy(iptver, ipsetver, kcompat) + return tryIPVSProxy(iptver, khandle, ipsetver, kcompat) } else { glog.Warningf("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) @@ -290,10 +292,10 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, ipsetver return tryIPTablesProxy(iptver, kcompat) } -func tryIPVSProxy(iptver iptables.IPTablesVersioner, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { +func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging - // IPVS Proxier relies on ipset - useIPVSProxy, err := ipvs.CanUseIPVSProxier(ipsetver) + // IPVS Proxier relies on ip_vs_* kernel modules and ipset + useIPVSProxy, err := ipvs.CanUseIPVSProxier(khandle, ipsetver) if err != nil { // Try to fallback to iptables before falling back to userspace utilruntime.HandleError(fmt.Errorf("can't determine whether to use ipvs proxy, error: %v", err)) diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index e957e262e75..f50898ae623 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -72,6 +72,15 @@ func (fake *fakeKernelCompatTester) IsCompatible() error { return nil } +// fakeKernelHandler implements KernelHandler. +type fakeKernelHandler struct { + modules []string +} + +func (fake *fakeKernelHandler) GetModules() ([]string, error) { + return fake.modules, nil +} + func Test_getProxyMode(t *testing.T) { if runtime.GOOS != "linux" { t.Skip("skipping on non-Linux") @@ -82,6 +91,7 @@ func Test_getProxyMode(t *testing.T) { annotationVal string iptablesVersion string ipsetVersion string + kmods []string kernelCompat bool iptablesError error ipsetError error @@ -140,7 +150,8 @@ func Test_getProxyMode(t *testing.T) { versioner := &fakeIPTablesVersioner{c.iptablesVersion, c.iptablesError} kcompater := &fakeKernelCompatTester{c.kernelCompat} ipsetver := &fakeIPSetVersioner{c.ipsetVersion, c.ipsetError} - r := getProxyMode(c.flag, versioner, ipsetver, kcompater) + khandler := &fakeKernelHandler{c.kmods} + r := getProxyMode(c.flag, versioner, khandler, ipsetver, kcompater) if r != c.expected { t.Errorf("Case[%d] Expected %q, got %q", i, c.expected, r) } diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 5f5a09d2447..841ce1fd041 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -688,14 +688,28 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) { } } -// CanUseIPVSProxier returns true if we can use the ipvs Proxier. -// This is determined by checking if all the required kernel modules can be loaded. It may -// return an error if it fails to get the kernel modules information without error, in which -// case it will also return false. -func CanUseIPVSProxier(ipsetver IPSetVersioner) (bool, error) { - // Try to load IPVS required kernel modules using modprobe +// KernelHandler can handle the current installed kernel modules. +type KernelHandler interface { + GetModules() ([]string, error) +} + +// LinuxKernelHandler implements KernelHandler interface. +type LinuxKernelHandler struct { + executor utilexec.Interface +} + +// NewLinuxKernelHandler initializes LinuxKernelHandler with exec. +func NewLinuxKernelHandler() *LinuxKernelHandler { + return &LinuxKernelHandler{ + executor: utilexec.New(), + } +} + +// GetModules returns all installed kernel modules. +func (handle *LinuxKernelHandler) GetModules() ([]string, error) { + // Try to load IPVS required kernel modules using modprobe first for _, kmod := range ipvsModules { - err := utilexec.New().Command("modprobe", "--", kmod).Run() + err := handle.executor.Command("modprobe", "--", kmod).Run() if err != nil { glog.Warningf("Failed to load kernel module %v with modprobe. "+ "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", kmod) @@ -703,12 +717,24 @@ func CanUseIPVSProxier(ipsetver IPSetVersioner) (bool, error) { } // Find out loaded kernel modules - out, err := utilexec.New().Command("cut", "-f1", "-d", " ", "/proc/modules").CombinedOutput() + out, err := handle.executor.Command("cut", "-f1", "-d", " ", "/proc/modules").CombinedOutput() if err != nil { - return false, err + return nil, err } mods := strings.Split(string(out), "\n") + return mods, nil +} + +// CanUseIPVSProxier returns true if we can use the ipvs Proxier. +// This is determined by checking if all the required kernel modules can be loaded. It may +// return an error if it fails to get the kernel modules information without error, in which +// case it will also return false. +func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner) (bool, error) { + mods, err := handle.GetModules() + if err != nil { + return false, fmt.Errorf("error getting installed ipvs required kernel modules: %v", err) + } wantModules := sets.NewString() loadModules := sets.NewString() wantModules.Insert(ipvsModules...) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index b490f1a37e2..b7864a8b88e 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -18,6 +18,7 @@ package ipvs import ( "bytes" + "fmt" "net" "reflect" "testing" @@ -87,6 +88,25 @@ func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedNa return nil } +// fakeKernelHandler implements KernelHandler. +type fakeKernelHandler struct { + modules []string +} + +func (fake *fakeKernelHandler) GetModules() ([]string, error) { + return fake.modules, nil +} + +// fakeKernelHandler implements KernelHandler. +type fakeIPSetVersioner struct { + version string + err error +} + +func (fake *fakeIPSetVersioner) GetVersion() (string, error) { + return fake.version, fake.err +} + func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP) *Proxier { fcmd := fakeexec.FakeCmd{ CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ @@ -180,6 +200,70 @@ func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) *ap return ept } +func TestCanUseIPVSProxier(t *testing.T) { + testCases := []struct { + mods []string + kernelErr error + ipsetVersion string + ipsetErr error + ok bool + }{ + // case 0, kernel error + { + mods: []string{"foo", "bar", "baz"}, + kernelErr: fmt.Errorf("oops"), + ipsetVersion: "0.0", + ok: false, + }, + // case 1, ipset error + { + mods: []string{"foo", "bar", "baz"}, + ipsetVersion: MinIPSetCheckVersion, + ipsetErr: fmt.Errorf("oops"), + ok: false, + }, + // case 2, missing required kernel modules and ipset version too low + { + mods: []string{"foo", "bar", "baz"}, + ipsetVersion: "1.1", + ok: false, + }, + // case 3, missing required ip_vs_* kernel modules + { + mods: []string{"ip_vs", "a", "bc", "def"}, + ipsetVersion: MinIPSetCheckVersion, + ok: false, + }, + // case 4, ipset version too low + { + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: "4.3.0", + ok: false, + }, + // case 5 + { + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: MinIPSetCheckVersion, + ok: true, + }, + // case 6 + { + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4", "foo", "bar"}, + ipsetVersion: "6.19", + ok: true, + }, + } + + for i := range testCases { + handle := &fakeKernelHandler{modules: testCases[i].mods} + versioner := &fakeIPSetVersioner{version: testCases[i].ipsetVersion, err: testCases[i].ipsetErr} + ok, _ := CanUseIPVSProxier(handle, versioner) + if ok != testCases[i].ok { + t.Errorf("Case [%d], expect %v, got %v", i, testCases[i].ok, ok) + } + } +} + func TestNodePort(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() From 7b372143c5e70fe046e95a0e23897e010e040963 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 27 Nov 2017 19:31:39 +0800 Subject: [PATCH 126/794] test ipvs proxy mode when feature gateway unset --- cmd/kube-proxy/app/server_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index f50898ae623..0fc92c259c8 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -145,6 +145,23 @@ func Test_getProxyMode(t *testing.T) { kernelCompat: true, expected: proxyModeIPTables, }, + { // specify ipvs, feature gateway disabled, iptables version ok, kernel is compatible + flag: "ipvs", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // specify ipvs, feature gateway disabled, iptables version too low + flag: "ipvs", + iptablesVersion: "0.0.0", + expected: proxyModeUserspace, + }, + { // specify ipvs, feature gateway disabled, iptables version ok, kernel is not compatible + flag: "ipvs", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: false, + expected: proxyModeUserspace, + }, } for i, c := range cases { versioner := &fakeIPTablesVersioner{c.iptablesVersion, c.iptablesError} From f9ff53d06eaf62f087fd58cdd075ba129e1bbd32 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Mon, 27 Nov 2017 14:49:17 +0100 Subject: [PATCH 127/794] Do not log trailing whitespace. --- pkg/registry/rbac/rest/storage_rbac.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index b8704f8e465..67f34604c49 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -264,7 +264,7 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { case result.Operation == reconciliation.ReconcileUpdate: glog.Infof("updated role.%s/%s in %v with additional permissions: %v", rbac.GroupName, role.Name, namespace, result.MissingRules) case result.Operation == reconciliation.ReconcileCreate: - glog.Infof("created role.%s/%s in %v ", rbac.GroupName, role.Name, namespace) + glog.Infof("created role.%s/%s in %v", rbac.GroupName, role.Name, namespace) } return nil }) From eed826a25ec83fac1531a788b6383f53da04d1ee Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Mon, 27 Nov 2017 21:15:10 +0800 Subject: [PATCH 128/794] fix bug when cloud is nil --- cmd/cloud-controller-manager/app/controllermanager.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index c93da56dad5..531fd81ac8d 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -93,6 +93,10 @@ func Run(s *options.CloudControllerManagerServer) error { glog.Fatalf("Cloud provider could not be initialized: %v", err) } + if cloud == nil { + glog.Fatalf("cloud provider is nil") + } + if cloud.HasClusterID() == false { if s.AllowUntaggedCloud == true { glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") From 7dce7fe1eb3143b116820369c2179f28b5af232a Mon Sep 17 00:00:00 2001 From: supereagle Date: Mon, 27 Nov 2017 22:13:19 +0800 Subject: [PATCH 129/794] use authentication client with explicit version --- cmd/cloud-controller-manager/app/controllermanager.go | 2 +- cmd/kube-controller-manager/app/controllermanager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index c93da56dad5..8b4631be157 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -136,7 +136,7 @@ func Run(s *options.CloudControllerManagerServer) error { clientBuilder = controller.SAControllerClientBuilder{ ClientConfig: restclient.AnonymousClientConfig(kubeconfig), CoreClient: kubeClient.CoreV1(), - AuthenticationClient: kubeClient.Authentication(), + AuthenticationClient: kubeClient.AuthenticationV1(), Namespace: "kube-system", } } else { diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index d08e9658a8d..d68fd8c64b1 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -139,7 +139,7 @@ func Run(s *options.CMServer) error { clientBuilder = controller.SAControllerClientBuilder{ ClientConfig: restclient.AnonymousClientConfig(kubeconfig), CoreClient: kubeClient.CoreV1(), - AuthenticationClient: kubeClient.Authentication(), + AuthenticationClient: kubeClient.AuthenticationV1(), Namespace: "kube-system", } } else { From 79352bafaa1ffca69a885b9f47d83008ca6108c2 Mon Sep 17 00:00:00 2001 From: supereagle Date: Mon, 27 Nov 2017 22:42:00 +0800 Subject: [PATCH 130/794] use policy client with explicit version --- pkg/kubectl/cmd/drain.go | 2 +- test/e2e/apps/disruption.go | 6 +++--- test/e2e/autoscaling/cluster_size_autoscaling.go | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..eb968010791 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -518,7 +518,7 @@ func (o *DrainOptions) evictPod(pod corev1.Pod, policyGroupVersion string) error DeleteOptions: deleteOptions, } // Remember to change change the URL manipulation func when Evction's version change - return o.client.Policy().Evictions(eviction.Namespace).Evict(eviction) + return o.client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction) } // deleteOrEvictPods deletes or evicts the pods on the api server diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index f86f83cb434..ac39720d90b 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -64,7 +64,7 @@ var _ = SIGDescribe("DisruptionController", func() { // Since disruptionAllowed starts out 0, if we see it ever become positive, // that means the controller is working. err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) if err != nil { return false, err } @@ -226,7 +226,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable MinAvailable: &minAvailable, }, } - _, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) Expect(err).NotTo(HaveOccurred()) } @@ -241,7 +241,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail MaxUnavailable: &maxUnavailable, }, } - _, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 0d5f60dc919..3eb2ccd8ace 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -964,10 +964,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str MinAvailable: &minAvailable, }, } - _, err = f.ClientSet.Policy().PodDisruptionBudgets(namespace).Create(pdb) + _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb) defer func() { - f.ClientSet.Policy().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{}) + f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{}) }() framework.ExpectNoError(err) @@ -1904,7 +1904,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { var finalErr error for _, newPdbName := range newPdbs { By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName)) - err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{}) + err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{}) if err != nil { // log error, but attempt to remove other pdbs glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) @@ -1942,7 +1942,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { MinAvailable: &minAvailable, }, } - _, err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Create(pdb) + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb) newPdbs = append(newPdbs, pdbName) if err != nil { From b158125dad1114599446f779e6e613b09e456631 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 27 Nov 2017 09:24:29 -0800 Subject: [PATCH 131/794] e2e: test containers projected volume updates should not exit The mounttest container should be running until the test determines to give up (i.e., time out) and kill it. It should not exit prematurely by itself. Bump the `--retry-timeout` to a much higher value. --- test/e2e/common/projected.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/common/projected.go b/test/e2e/common/projected.go index 10193466735..decb9595930 100644 --- a/test/e2e/common/projected.go +++ b/test/e2e/common/projected.go @@ -1517,7 +1517,7 @@ func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations { Name: "client-container", Image: mountImage, - Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, + Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath}, VolumeMounts: []v1.VolumeMount{ { Name: "podinfo", From 280b833f397f26ac687f65ad3ae19b5db2152bc7 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Mon, 27 Nov 2017 21:53:22 +0200 Subject: [PATCH 132/794] Fix master upgrade cornercases --- .../reactive/kubernetes_master.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 1547bc8bc93..4b0a94a9abe 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -102,12 +102,25 @@ def check_for_upgrade_needed(): add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') + if should_reinstall_snaps(): + set_upgrade_needed() + + +def should_reinstall_snaps(): + ''' Return true if we should redeploy snaps. ''' + # Snaps should be upgrades if: + # a) channel changed, or + # b) the Charms attached snaps (resources) changed + config = hookenv.config() + previous_channel = config.previous('channel') + new_channel = hookenv.config('channel') + if new_channel != previous_channel: + return True resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] - if any_file_changed(paths): - set_upgrade_needed() + return any_file_changed(paths) def add_rbac_roles(): @@ -360,6 +373,7 @@ def set_app_version(): @when('cdk-addons.configured', 'kube-api-endpoint.available', 'kube-control.connected') +@when_not('kubernetes-master.upgrade-needed') def idle_status(kube_api, kube_control): ''' Signal at the end of the run that we are running. ''' if not all_kube_system_pods_running(): From 0475c8527c04b37852e7fcf24d261e81f921eb75 Mon Sep 17 00:00:00 2001 From: Saad Ali Date: Mon, 27 Nov 2017 16:23:31 -0800 Subject: [PATCH 133/794] Change `GCEDiskAlphaAPI` to `DiskAlphaAPI` Change name in comment --- pkg/cloudprovider/providers/gce/gce_disks.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index 6918c57c0df..7f9ad894d68 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -173,7 +173,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( manager.gce.projectID, manager.gce.region, diskToCreateAlpha).Do() } - return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") + return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"DiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") } func (manager *gceServiceManager) AttachDiskOnCloudProvider( @@ -323,7 +323,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( }, nil } - return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") + return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"DiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") } func (manager *gceServiceManager) DeleteDiskOnCloudProvider( @@ -346,7 +346,7 @@ func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider( manager.gce.projectID, manager.gce.region, diskName).Do() } - return nil, fmt.Errorf("DeleteRegionalDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") + return nil, fmt.Errorf("DeleteRegionalDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"DiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") } func (manager *gceServiceManager) WaitForZoneOp( @@ -497,7 +497,7 @@ func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDis } return manager.gce.serviceAlpha.RegionDisks.Resize(manager.gce.projectID, disk.Region, disk.Name, resizeServiceRequest).Do() } - return nil, fmt.Errorf("RegionalResizeDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") + return nil, fmt.Errorf("RegionalResizeDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"DiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") } // Disks is interface for manipulation with GCE PDs. From 1a10652173f2fb48bc4d59a8a746313076258544 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 27 Nov 2017 19:34:59 +0800 Subject: [PATCH 134/794] test ipvs proxy mode when feature gateway set --- cmd/kube-proxy/app/server_test.go | 134 ++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 0fc92c259c8..ba470950556 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -28,8 +28,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + utilfeature "k8s.io/apiserver/pkg/util/feature" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" + "k8s.io/kubernetes/pkg/proxy/ipvs" "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/util/iptables" utilpointer "k8s.io/kubernetes/pkg/util/pointer" @@ -175,6 +177,138 @@ func Test_getProxyMode(t *testing.T) { } } +// This is a coarse test, but it offers some modicum of confidence as the code is evolved. +func Test_getProxyModeEnableFeatureGateway(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("skipping on non-Linux") + } + + // enable IPVS feature gateway + utilfeature.DefaultFeatureGate.Set("SupportIPVSProxyMode=true") + + var cases = []struct { + flag string + iptablesVersion string + ipsetVersion string + kernelCompat bool + iptablesError error + ipsetError error + mods []string + expected string + }{ + { // flag says userspace + flag: "userspace", + expected: proxyModeUserspace, + }, + { // flag says iptables, error detecting version + flag: "iptables", + iptablesError: fmt.Errorf("oops!"), + expected: proxyModeUserspace, + }, + { // flag says iptables, version too low + flag: "iptables", + iptablesVersion: "0.0.0", + expected: proxyModeUserspace, + }, + { // flag says iptables, version ok, kernel not compatible + flag: "iptables", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: false, + expected: proxyModeUserspace, + }, + { // flag says iptables, version ok, kernel is compatible + flag: "iptables", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // detect, error + flag: "", + iptablesError: fmt.Errorf("oops!"), + expected: proxyModeUserspace, + }, + { // detect, version too low + flag: "", + iptablesVersion: "0.0.0", + expected: proxyModeUserspace, + }, + { // detect, version ok, kernel not compatible + flag: "", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: false, + expected: proxyModeUserspace, + }, + { // detect, version ok, kernel is compatible + flag: "", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // detect, version ok, kernel is compatible + flag: "", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // flag says ipvs, ipset version ok, kernel modules installed + flag: "ipvs", + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: ipvs.MinIPSetCheckVersion, + expected: proxyModeIPVS, + }, + { // flag says ipvs, ipset version too low, fallback on iptables mode + flag: "ipvs", + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: "0.0", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // flag says ipvs, bad ipset version, fallback on iptables mode + flag: "ipvs", + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: "a.b.c", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // flag says ipvs, required kernel modules are not installed, fallback on iptables mode + flag: "ipvs", + mods: []string{"foo", "bar", "baz"}, + ipsetVersion: ipvs.MinIPSetCheckVersion, + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: true, + expected: proxyModeIPTables, + }, + { // flag says ipvs, required kernel modules are not installed, iptables version too old, fallback on userspace mode + flag: "ipvs", + mods: []string{"foo", "bar", "baz"}, + ipsetVersion: ipvs.MinIPSetCheckVersion, + iptablesVersion: "0.0.0", + kernelCompat: true, + expected: proxyModeUserspace, + }, + { // flag says ipvs, ipset version too low, iptables version too old, kernel not compatible, fallback on userspace mode + flag: "ipvs", + mods: []string{"ip_vs", "ip_vs_rr", "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4"}, + ipsetVersion: "0.0", + iptablesVersion: iptables.MinCheckVersion, + kernelCompat: false, + expected: proxyModeUserspace, + }, + } + for i, c := range cases { + versioner := &fakeIPTablesVersioner{c.iptablesVersion, c.iptablesError} + kcompater := &fakeKernelCompatTester{c.kernelCompat} + ipsetver := &fakeIPSetVersioner{c.ipsetVersion, c.ipsetError} + khandle := &fakeKernelHandler{c.mods} + r := getProxyMode(c.flag, versioner, khandle, ipsetver, kcompater) + if r != c.expected { + t.Errorf("Case[%d] Expected %q, got %q", i, c.expected, r) + } + } +} + // This test verifies that NewProxyServer does not crash when CleanupAndExit is true. func TestProxyServerWithCleanupAndExit(t *testing.T) { // Each bind address below is a separate test case From 10aea7e88c6f57235bffc2b5d778d9cfb0cc5bc4 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 27 Nov 2017 20:10:43 +0800 Subject: [PATCH 135/794] update bazel BUILD file --- cmd/kube-proxy/app/BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 4a4cb061112..02bbd56e48a 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -92,12 +92,14 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/proxy/ipvs:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) From 50520be6495f83ffd683f0efc6101c123ac66bc5 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 28 Nov 2017 09:54:28 +0800 Subject: [PATCH 136/794] completely remove the option to use auto-detect --- cmd/kubelet/app/server.go | 6 +----- pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go | 6 ------ pkg/kubelet/kubelet.go | 8 +------- pkg/kubelet/kubelet_node_status.go | 7 ------- 4 files changed, 2 insertions(+), 25 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8100b44afae..9313d3a7ae5 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -289,12 +289,8 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { } } - if s.CloudProvider == kubeletconfigv1alpha1.AutoDetectCloudProvider { - glog.Warning("--cloud-provider=auto-detect is deprecated. The desired cloud provider should be set explicitly") - } - if kubeDeps.Cloud == nil { - if !cloudprovider.IsExternal(s.CloudProvider) && s.CloudProvider != kubeletconfigv1alpha1.AutoDetectCloudProvider { + if !cloudprovider.IsExternal(s.CloudProvider) { cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go index 90ac66813b5..94780a441f7 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go @@ -30,12 +30,6 @@ import ( const ( DefaultRootDir = "/var/lib/kubelet" - // DEPRECATED: auto detecting cloud providers goes against the initiative - // for out-of-tree cloud providers as we'll now depend on cAdvisor integrations - // with cloud providers instead of in the core repo. - // More details here: https://github.com/kubernetes/kubernetes/issues/50986 - AutoDetectCloudProvider = "auto-detect" - DefaultIPTablesMasqueradeBit = 14 DefaultIPTablesDropBit = 15 ) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fab8dcefbb6..d271d2d167b 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -57,7 +57,6 @@ import ( "k8s.io/kubernetes/pkg/features" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" - kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -521,7 +520,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, recorder: kubeDeps.Recorder, cadvisor: kubeDeps.CAdvisorInterface, cloud: kubeDeps.Cloud, - autoDetectCloudProvider: (kubeletconfigv1alpha1.AutoDetectCloudProvider == cloudProvider), externalCloudProvider: cloudprovider.IsExternal(cloudProvider), providerID: providerID, nodeRef: nodeRef, @@ -1032,11 +1030,7 @@ type Kubelet struct { // Cloud provider interface. cloud cloudprovider.Interface - // DEPRECATED: auto detecting cloud providers goes against the initiative - // for out-of-tree cloud providers as we'll now depend on cAdvisor integrations - // with cloud providers instead of in the core repo. - // More details here: https://github.com/kubernetes/kubernetes/issues/50986 - autoDetectCloudProvider bool + // Indicates that the node initialization happens in an external cloud controller externalCloudProvider bool // Reference to this node. diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 7e09c16815f..006283e808f 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -347,13 +347,6 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { } } else { node.Spec.ExternalID = kl.hostname - if kl.autoDetectCloudProvider { - // If no cloud provider is defined - use the one detected by cadvisor - info, err := kl.GetCachedMachineInfo() - if err == nil { - kl.updateCloudProviderFromMachineInfo(node, info) - } - } } kl.setNodeStatus(node) From 8dc1c5ea4faf3cf0483f5d4f2e8f28635419a4c3 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 28 Nov 2017 09:55:24 +0800 Subject: [PATCH 137/794] auto-generated BUILD file --- pkg/kubelet/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 7fb53501242..ab35f843abd 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -45,7 +45,6 @@ go_library( "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", - "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/certificate:go_default_library", "//pkg/kubelet/cm:go_default_library", From 090c67539ae2b771a81586d90289d3d62fe8ab66 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 28 Nov 2017 08:04:14 +0000 Subject: [PATCH 138/794] use ListByResourceGroup instead of List() --- pkg/cloudprovider/providers/azure/azure_blobDiskController.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 32c009483b4..b853bf691fc 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -487,7 +487,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { } func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { - accountListResult, err := c.common.cloud.StorageAccountClient.List() + accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(c.common.resourceGroup) if err != nil { return nil, err } From 33f6625a843af4c9cfa53a7add8aa44c838f370f Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Tue, 28 Nov 2017 17:40:35 +0800 Subject: [PATCH 139/794] fix scheduling queue unit test This change makes sure the Pop() test finish completely. --- plugin/pkg/scheduler/core/scheduling_queue_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugin/pkg/scheduler/core/scheduling_queue_test.go b/plugin/pkg/scheduler/core/scheduling_queue_test.go index 343b7593bf5..cd3ba05db9b 100644 --- a/plugin/pkg/scheduler/core/scheduling_queue_test.go +++ b/plugin/pkg/scheduler/core/scheduling_queue_test.go @@ -18,6 +18,7 @@ package core import ( "reflect" + "sync" "testing" "k8s.io/api/core/v1" @@ -87,12 +88,16 @@ func TestPriorityQueue_Add(t *testing.T) { func TestPriorityQueue_Pop(t *testing.T) { q := NewPriorityQueue() + wg := sync.WaitGroup{} + wg.Add(1) go func() { + defer wg.Done() if p, err := q.Pop(); err != nil || p != &highPriorityPod { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name) } }() q.Add(&highPriorityPod) + wg.Wait() } func TestPriorityQueue_Update(t *testing.T) { From 6dddafe73b2e61d476b14a3ab77ea3087a16f79d Mon Sep 17 00:00:00 2001 From: Di Xu Date: Tue, 28 Nov 2017 18:09:27 +0800 Subject: [PATCH 140/794] fix bad output format for attaching pods --- pkg/kubectl/cmd/attach.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index 6d1b23323d1..7bc49ca06da 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -259,11 +259,6 @@ func (p *AttachOptions) Run() error { } fn := func() error { - - if !p.Quiet && stderr != nil { - fmt.Fprintln(stderr, "If you don't see a command prompt, try pressing enter.") - } - restClient, err := restclient.RESTClientFor(p.Config) if err != nil { return err @@ -285,6 +280,9 @@ func (p *AttachOptions) Run() error { return p.Attach.Attach("POST", req.URL(), p.Config, p.In, p.Out, p.Err, t.Raw, sizeQueue) } + if !p.Quiet && stderr != nil { + fmt.Fprintln(stderr, "If you don't see a command prompt, try pressing enter.") + } if err := t.Safe(fn); err != nil { return err } From 1550df99eb4fb1a5c012f68105393419760a5025 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 28 Nov 2017 12:24:40 +0200 Subject: [PATCH 141/794] The change in channels will be caught config change after the upgrade. --- .../reactive/kubernetes_master.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 4b0a94a9abe..5cbe89d2e43 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -102,25 +102,11 @@ def check_for_upgrade_needed(): add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') - if should_reinstall_snaps(): - set_upgrade_needed() - - -def should_reinstall_snaps(): - ''' Return true if we should redeploy snaps. ''' - # Snaps should be upgrades if: - # a) channel changed, or - # b) the Charms attached snaps (resources) changed - config = hookenv.config() - previous_channel = config.previous('channel') - new_channel = hookenv.config('channel') - if new_channel != previous_channel: - return True - resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] - return any_file_changed(paths) + if any_file_changed(paths): + set_upgrade_needed() def add_rbac_roles(): From cbf0945afca9d261cab7dc32215271853578eb87 Mon Sep 17 00:00:00 2001 From: zhangxiaoyu-zidif Date: Tue, 28 Nov 2017 19:02:56 +0800 Subject: [PATCH 142/794] fix binary check for glusterfs.go --- pkg/volume/glusterfs/glusterfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index c42e3cdf794..4f083e990e2 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -238,7 +238,7 @@ func (b *glusterfsMounter) CanMount() error { exe := b.plugin.host.GetExec(b.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - if _, err := exe.Run("/bin/ls", gciLinuxGlusterMountBinaryPath); err != nil { + if _, err := exe.Run("test", "-x", gciLinuxGlusterMountBinaryPath); err != nil { return fmt.Errorf("Required binary %s is missing", gciLinuxGlusterMountBinaryPath) } } From 607e863f85005b06b754b6c511e0f48bdca297bb Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Wed, 22 Nov 2017 16:21:37 +0100 Subject: [PATCH 143/794] WIP: extend node e2e test suite with containerized Kubelet --- test/e2e_node/services/kubelet.go | 35 ++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 0e655349075..70459348d51 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -60,9 +60,13 @@ func (a *args) Set(value string) error { // kubeletArgs is the override kubelet args specified by the test runner. var kubeletArgs args +var kubeletContainerized bool +var hyperkubeImage string func init() { flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.") + flag.BoolVar(&kubeletContainerized, "kubelet-containerized", false, "Run kubelet in a docker container") + flag.StringVar(&hyperkubeImage, "hyperkube-image", "", "Docker image with containerized kubelet") } // RunKubelet starts kubelet and waits for termination signal. Once receives the @@ -93,6 +97,10 @@ const ( // startKubelet starts the Kubelet in a separate process or returns an error // if the Kubelet fails to start. func (e *E2EServices) startKubelet() (*server, error) { + if kubeletContainerized && hyperkubeImage == "" { + return nil, fmt.Errorf("the --hyperkube-image option must be set") + } + glog.Info("Starting kubelet") // set feature gates so we can check which features are enabled and pass the appropriate flags @@ -125,7 +133,32 @@ func (e *E2EServices) startKubelet() (*server, error) { // sense to test it that way isSystemd = true unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31()) - cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", builder.GetKubeletServerBin()) + if kubeletContainerized { + cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", + "/usr/bin/docker", "run", "--name=kubelet", + "--rm", "--privileged", "--net=host", "--pid=host", + "-e HOST=/rootfs", "-e HOST_ETC=/host-etc", + "-v", "/etc/localtime:/etc/localtime:ro", + "-v", "/etc/machine-id:/etc/machine-id:ro", + "-v", filepath.Dir(kubeconfigPath)+":/etc/kubernetes", + "-v", "/:/rootfs:ro,rslave", + "-v", "/run:/run", + "-v", "/sys/fs/cgroup:/sys/fs/cgroup:rw", + "-v", "/sys:/sys:rw", + "-v", "/usr/bin/docker:/usr/bin/docker:ro", + "-v", "/var/lib/cni:/var/lib/cni", + "-v", "/var/lib/docker:/var/lib/docker", + "-v", "/var/lib/kubelet:/var/lib/kubelet:rw,rslave", + "-v", "/var/log:/var/log", + "-v", manifestPath+":"+manifestPath+":rw", + hyperkubeImage, "/hyperkube", "kubelet", + "--containerized", + ) + kubeconfigPath = "/etc/kubernetes/kubeconfig" + } else { + cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", builder.GetKubeletServerBin()) + } + killCommand = exec.Command("systemctl", "kill", unitName) restartCommand = exec.Command("systemctl", "restart", unitName) e.logs["kubelet.log"] = LogFileData{ From 5f6699ebc588e485c263219ab25b3a9e218f8abe Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Tue, 28 Nov 2017 11:56:18 -0600 Subject: [PATCH 144/794] kubelet: include runtime error in event on CreatePodSandbox failure --- pkg/kubelet/kuberuntime/kuberuntime_manager.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 06d91b7540b..c63e02dc8bc 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -645,20 +645,20 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat if err != nil { createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) - ref, err := ref.GetReference(legacyscheme.Scheme, pod) - if err != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + ref, referr := ref.GetReference(legacyscheme.Scheme, pod) + if referr != nil { + glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } - m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox.") + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err) return } glog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { - ref, err := ref.GetReference(legacyscheme.Scheme, pod) - if err != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + ref, referr := ref.GetReference(legacyscheme.Scheme, pod) + if referr != nil { + glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err) glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) From 61d984843852661b56922c9da46e019e2a70cd9e Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 28 Nov 2017 20:47:19 +0200 Subject: [PATCH 145/794] Improve handling of snap resources --- .../reactive/kubernetes_master.py | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 5cbe89d2e43..95f7a8a2a3b 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -62,7 +62,6 @@ nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') - def set_upgrade_needed(): set_state('kubernetes-master.upgrade-needed') config = hookenv.config() @@ -102,11 +101,28 @@ def check_for_upgrade_needed(): add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') + if snap_resources_changed(): + set_upgrade_needed() + + +def snap_resources_changed(): + ''' + Check if the snapped resources have changed. The first time this method is + called will report no change. + + Returns: True in case a snap resource file has changed + + ''' + db = unitdata.kv() resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] - if any_file_changed(paths): - set_upgrade_needed() + if db.get('snap.resources.fingerprint.initialised'): + return any_file_changed(paths) + else: + db.set('snap.resources.fingerprint.initialised', True) + any_file_changed(paths) + return False def add_rbac_roles(): @@ -221,6 +237,7 @@ def install_snaps(): snap.install('kube-scheduler', channel=channel) hookenv.status_set('maintenance', 'Installing cdk-addons snap') snap.install('cdk-addons', channel=channel) + snap_resources_changed() set_state('kubernetes-master.snaps.installed') remove_state('kubernetes-master.components.started') From 54662ca7fac2ac554c58e4faaed6593ffa513345 Mon Sep 17 00:00:00 2001 From: George Kudrayvtsev Date: Tue, 28 Nov 2017 18:04:51 -0800 Subject: [PATCH 146/794] Updates Kube-proxy validators to allow Windows 'kernelspace' mode. --- pkg/proxy/apis/kubeproxyconfig/types.go | 7 ++++--- pkg/proxy/apis/kubeproxyconfig/validation/validation.go | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/types.go b/pkg/proxy/apis/kubeproxyconfig/types.go index d8bf19fe5f3..13c92a1a392 100644 --- a/pkg/proxy/apis/kubeproxyconfig/types.go +++ b/pkg/proxy/apis/kubeproxyconfig/types.go @@ -160,9 +160,10 @@ type KubeProxyConfiguration struct { type ProxyMode string const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" - ProxyModeIPVS ProxyMode = "ipvs" + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" + ProxyModeIPVS ProxyMode = "ipvs" + ProxyModeKernelspace ProxyMode = "kernelspace" ) // IPVSSchedulerMethod is the algorithm for allocating TCP connections and diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go index 55bbe7930d5..cf576e564d4 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -146,9 +146,10 @@ func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) fiel case kubeproxyconfig.ProxyModeUserspace: case kubeproxyconfig.ProxyModeIPTables: case kubeproxyconfig.ProxyModeIPVS: + case kubeproxyconfig.ProxyModeKernelspace: case "": default: - modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)} + modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS), string(kubeproxyconfig.ProxyModeKernelspace)} errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy (currently iptables)", strings.Join(modes, ",")) allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) } From 38de5581b4eb66ed8f8361b3baca41c0927a4271 Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Wed, 29 Nov 2017 10:47:25 +0800 Subject: [PATCH 147/794] cleanup useless functions --- pkg/apis/core/helper/helpers.go | 59 ------------------------------ pkg/apis/core/v1/helper/helpers.go | 23 ------------ 2 files changed, 82 deletions(-) diff --git a/pkg/apis/core/helper/helpers.go b/pkg/apis/core/helper/helpers.go index ca397c8eb7f..d0c004cf997 100644 --- a/pkg/apis/core/helper/helpers.go +++ b/pkg/apis/core/helper/helpers.go @@ -254,46 +254,18 @@ func IsIntegerResourceName(str string) bool { return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) } -// Extended and HugePages resources -func IsScalarResourceName(name core.ResourceName) bool { - return IsExtendedResourceName(name) || IsHugePageResourceName(name) -} - // this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here func IsServiceIPSet(service *core.Service) bool { return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" } -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *core.Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == core.ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - var standardFinalizers = sets.NewString( string(core.FinalizerKubernetes), metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents, ) -// HasAnnotation returns a bool if passed in annotation exists -func HasAnnotation(obj core.ObjectMeta, ann string) bool { - _, found := obj.Annotations[ann] - return found -} - -// SetMetaDataAnnotation sets the annotation and value -func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) { - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[ann] = value -} - func IsStandardFinalizerName(str string) bool { return standardFinalizers.Has(str) } @@ -482,37 +454,6 @@ func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool return true } -// TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool { - if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { - return false - } - - if toleration.Key != taint.Key { - return false - } - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value { - return true - } - if toleration.Operator == core.TolerationOpExists { - return true - } - return false -} - -// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool { - tolerated := false - for i := range tolerations { - if TolerationToleratesTaint(&tolerations[i], taint) { - tolerated = true - break - } - } - return tolerated -} - // GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations // and converts it to the []Taint type in core. func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { diff --git a/pkg/apis/core/v1/helper/helpers.go b/pkg/apis/core/v1/helper/helpers.go index 4b21aefcfce..b90a6de1f92 100644 --- a/pkg/apis/core/v1/helper/helpers.go +++ b/pkg/apis/core/v1/helper/helpers.go @@ -89,15 +89,6 @@ func IsServiceIPSet(service *v1.Service) bool { return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" } -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *v1.Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == v1.ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - // AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, // only if they do not already exist func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { @@ -416,20 +407,6 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { return "" } -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} - // GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations // and converts it to the NodeAffinity type in api. // TODO: update when storage node affinity graduates to beta From e80dcba2974267f2106f53286681072920e5652b Mon Sep 17 00:00:00 2001 From: wenlxie Date: Wed, 29 Nov 2017 13:02:27 +0800 Subject: [PATCH 148/794] should check the return value of os.DiskIsAttached --- pkg/cloudprovider/providers/openstack/openstack_volumes.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index f670bce6d06..2b5cb7ba24c 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -419,7 +419,11 @@ func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) { attached := make(map[string]bool) for _, volumeID := range volumeIDs { - isAttached, _ := os.DiskIsAttached(instanceID, volumeID) + isAttached, err := os.DiskIsAttached(instanceID, volumeID) + if err != nil && err != ErrNotFound { + attached[volumeID] = true + continue + } attached[volumeID] = isAttached } return attached, nil From 53e6c8cc5b226f9c8e9eb34fce98d8e2fc81a2d4 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 29 Nov 2017 06:07:18 +0000 Subject: [PATCH 149/794] change default azure file/dir mode to 0755 --- pkg/volume/azure_file/azure_util.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 038d78518df..7931a9adf63 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -30,8 +30,8 @@ const ( fileMode = "file_mode" dirMode = "dir_mode" vers = "vers" - defaultFileMode = "0700" - defaultDirMode = "0700" + defaultFileMode = "0755" + defaultDirMode = "0755" defaultVers = "3.0" ) From b9d01eb8ab4af93e68ea5ebc33b205a555a8d20e Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 29 Nov 2017 15:27:44 +0800 Subject: [PATCH 150/794] remove winkernel dead test code --- pkg/proxy/winkernel/proxier_test.go | 2031 --------------------------- 1 file changed, 2031 deletions(-) delete mode 100644 pkg/proxy/winkernel/proxier_test.go diff --git a/pkg/proxy/winkernel/proxier_test.go b/pkg/proxy/winkernel/proxier_test.go deleted file mode 100644 index 3bb2f641ff1..00000000000 --- a/pkg/proxy/winkernel/proxier_test.go +++ /dev/null @@ -1,2031 +0,0 @@ -// +build windows - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winkernel - -import ( - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - - "fmt" - "net" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/util/async" - "k8s.io/utils/exec" - fakeexec "k8s.io/utils/exec/testing" -) - -func newFakeServiceInfo(service proxy.ServicePortName, ip net.IP, port int, protocol api.Protocol, onlyNodeLocalEndpoints bool) *serviceInfo { - return &serviceInfo{ - sessionAffinityType: api.ServiceAffinityNone, // default - stickyMaxAgeSeconds: int(api.DefaultClientIPServiceAffinitySeconds), // default - clusterIP: ip, - port: port, - protocol: protocol, - onlyNodeLocalEndpoints: onlyNodeLocalEndpoints, - } -} - -func TestDeleteEndpointConnections(t *testing.T) { - fcmd := fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ - func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil }, - func() ([]byte, error) { - return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted.") - }, - }, - } - fexec := fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, - func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, - }, - LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, - } - - serviceMap := make(map[proxy.ServicePortName]*serviceInfo) - svc1 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc1"}, Port: "p80"} - svc2 := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: "svc2"}, Port: "p80"} - serviceMap[svc1] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 40), 80, api.ProtocolUDP, false) - serviceMap[svc2] = newFakeServiceInfo(svc1, net.IPv4(10, 20, 30, 41), 80, api.ProtocolTCP, false) - - fakeProxier := Proxier{exec: &fexec, serviceMap: serviceMap} - - testCases := []endpointServicePair{ - { - endpoint: "10.240.0.3:80", - servicePortName: svc1, - }, - { - endpoint: "10.240.0.4:80", - servicePortName: svc1, - }, - { - endpoint: "10.240.0.5:80", - servicePortName: svc2, - }, - } - - expectCommandExecCount := 0 - for i := range testCases { - input := map[endpointServicePair]bool{testCases[i]: true} - fakeProxier.deleteEndpointConnections(input) - svcInfo := fakeProxier.serviceMap[testCases[i].servicePortName] - if svcInfo.protocol == api.ProtocolUDP { - svcIp := svcInfo.clusterIP.String() - endpointIp := strings.Split(testCases[i].endpoint, ":")[0] - expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p udp", svcIp, endpointIp) - execCommand := strings.Join(fcmd.CombinedOutputLog[expectCommandExecCount], " ") - if expectCommand != execCommand { - t.Errorf("Exepect comand: %s, but executed %s", expectCommand, execCommand) - } - expectCommandExecCount += 1 - } - - if expectCommandExecCount != fexec.CommandCalls { - t.Errorf("Exepect comand executed %d times, but got %d", expectCommandExecCount, fexec.CommandCalls) - } - } -} - -type fakeClosable struct { - closed bool -} - -func (c *fakeClosable) Close() error { - c.closed = true - return nil -} - -func TestRevertPorts(t *testing.T) { - testCases := []struct { - replacementPorts []localPort - existingPorts []localPort - expectToBeClose []bool - }{ - { - replacementPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - existingPorts: []localPort{}, - expectToBeClose: []bool{true, true, true}, - }, - { - replacementPorts: []localPort{}, - existingPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - expectToBeClose: []bool{}, - }, - { - replacementPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - existingPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - expectToBeClose: []bool{false, false, false}, - }, - { - replacementPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - existingPorts: []localPort{ - {port: 5001}, - {port: 5003}, - }, - expectToBeClose: []bool{false, true, false}, - }, - { - replacementPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - }, - existingPorts: []localPort{ - {port: 5001}, - {port: 5002}, - {port: 5003}, - {port: 5004}, - }, - expectToBeClose: []bool{false, false, false}, - }, - } - - for i, tc := range testCases { - replacementPortsMap := make(map[localPort]closeable) - for _, lp := range tc.replacementPorts { - replacementPortsMap[lp] = &fakeClosable{} - } - existingPortsMap := make(map[localPort]closeable) - for _, lp := range tc.existingPorts { - existingPortsMap[lp] = &fakeClosable{} - } - revertPorts(replacementPortsMap, existingPortsMap) - for j, expectation := range tc.expectToBeClose { - if replacementPortsMap[tc.replacementPorts[j]].(*fakeClosable).closed != expectation { - t.Errorf("Expect replacement localport %v to be %v in test case %v", tc.replacementPorts[j], expectation, i) - } - } - for _, lp := range tc.existingPorts { - if existingPortsMap[lp].(*fakeClosable).closed == true { - t.Errorf("Expect existing localport %v to be false in test case %v", lp, i) - } - } - } - -} - -// fakePortOpener implements portOpener. -type fakePortOpener struct { - openPorts []*localPort -} - -// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules -// to lock a local port. -func (f *fakePortOpener) OpenLocalPort(lp *localPort) (closeable, error) { - f.openPorts = append(f.openPorts, lp) - return nil, nil -} - -type fakeHealthChecker struct { - services map[types.NamespacedName]uint16 - endpoints map[types.NamespacedName]int -} - -func newFakeHealthChecker() *fakeHealthChecker { - return &fakeHealthChecker{ - services: map[types.NamespacedName]uint16{}, - endpoints: map[types.NamespacedName]int{}, - } -} - -func (fake *fakeHealthChecker) SyncServices(newServices map[types.NamespacedName]uint16) error { - fake.services = newServices - return nil -} - -func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error { - fake.endpoints = newEndpoints - return nil -} - -func getFakeHnsNetwork() *hnsNetworkInfo { - return &hnsNetworkInfo{ - id: "00000000-0000-0000-0000-000000000001", - name: "fakeNetwork", - }, nil -} - -const testHostname = "test-hostname" - -func NewFakeProxier() *Proxier { - fakeHnsNetwork := getFakeHnsNetwork() - // TODO: Call NewProxier after refactoring out the goroutine - // invocation into a Run() method. - p := &Proxier{ - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(testHostname), - clusterCIDR: "10.0.0.0/24", - hostname: testHostname, - portsMap: make(map[localPort]closeable), - healthChecker: newFakeHealthChecker(), - network: fakeHnsNetwork, - } - p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1) - return p -} - -func errorf(msg string, rules []iptablestest.Rule, t *testing.T) { - for _, r := range rules { - t.Logf("%q", r) - } - t.Errorf("%v", msg) -} - -func TestLoadBalancer(t *testing.T) { - fp := NewFakeProxier() - svcIP := "10.20.30.41" - svcPort := 80 - svcNodePort := 3001 - svcLBIP := "1.2.3.4" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "LoadBalancer" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Status.LoadBalancer.Ingress = []api.LoadBalancerIngress{{ - IP: svcLBIP, - }} - }), - ) - - epIP := "10.180.0.1" - makeEndpointsMap(fp, - makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: epIP, - }}, - Ports: []api.EndpointPort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - }}, - }} - }), - ) - - fp.syncProxyRules() - - proto := strings.ToLower(string(api.ProtocolTCP)) - fwChain := string(serviceFirewallChainName(svcPortName.String(), proto)) - svcChain := string(servicePortChainName(svcPortName.String(), proto)) - //lbChain := string(serviceLBChainName(svcPortName.String(), proto)) - - // TODO - -} - -func TestNodePort(t *testing.T) { - - fp := NewFakeProxier() - svcIP := "10.20.30.41" - svcPort := 80 - svcNodePort := 3001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - }), - ) - - epIP := "10.180.0.1" - makeEndpointsMap(fp, - makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: epIP, - }}, - Ports: []api.EndpointPort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - }}, - }} - }), - ) - - fp.syncProxyRules() - - proto := strings.ToLower(string(api.ProtocolTCP)) - svcChain := string(servicePortChainName(svcPortName.String(), proto)) - - // TODO -} - -func TestExternalIPsReject(t *testing.T) { - - fp := NewFakeProxier() - svcIP := "10.20.30.41" - svcPort := 80 - svcExternalIPs := "50.60.70.81" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "ClusterIP" - svc.Spec.ClusterIP = svcIP - svc.Spec.ExternalIPs = []string{svcExternalIPs} - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(svcPort), - }} - }), - ) - makeEndpointsMap(fp) - - fp.syncProxyRules() - -} - -func TestNodePortReject(t *testing.T) { - - fp := NewFakeProxier() - svcIP := "10.20.30.41" - svcPort := 80 - svcNodePort := 3001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - }), - ) - makeEndpointsMap(fp) - - fp.syncProxyRules() - - // TODO -} - -func strPtr(s string) *string { - return &s -} - -func TestOnlyLocalLoadBalancing(t *testing.T) { - - fp := NewFakeProxier() - svcIP := "10.20.30.41" - svcPort := 80 - svcNodePort := 3001 - svcLBIP := "1.2.3.4" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "LoadBalancer" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Status.LoadBalancer.Ingress = []api.LoadBalancerIngress{{ - IP: svcLBIP, - }} - svc.Annotations[api.BetaAnnotationExternalTraffic] = api.AnnotationValueExternalTrafficLocal - }), - ) - - epIP1 := "10.180.0.1" - epIP2 := "10.180.2.1" - epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort) - epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort) - makeEndpointsMap(fp, - makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: epIP1, - NodeName: nil, - }, { - IP: epIP2, - NodeName: strPtr(testHostname), - }}, - Ports: []api.EndpointPort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - }}, - }} - }), - ) - - fp.syncProxyRules() - - proto := strings.ToLower(string(api.ProtocolTCP)) - fwChain := string(serviceFirewallChainName(svcPortName.String(), proto)) - lbChain := string(serviceLBChainName(svcPortName.String(), proto)) - - nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrLocal)) - localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrNonLocal)) - - // TODO -} - -func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) { - - fp := NewFakeProxier() - // set cluster CIDR to empty before test - fp.clusterCIDR = "" - onlyLocalNodePorts(t, fp) -} - -func TestOnlyLocalNodePorts(t *testing.T) { - - fp := NewFakeProxier() - onlyLocalNodePorts(t, fp) -} - -func onlyLocalNodePorts(t *testing.T, fp *Proxier) { - shouldLBTOSVCRuleExist := len(fp.clusterCIDR) > 0 - svcIP := "10.20.30.41" - svcPort := 80 - svcNodePort := 3001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []api.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: api.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Annotations[api.BetaAnnotationExternalTraffic] = api.AnnotationValueExternalTrafficLocal - }), - ) - - epIP1 := "10.180.0.1" - epIP2 := "10.180.2.1" - epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort) - epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort) - makeEndpointsMap(fp, - makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: epIP1, - NodeName: nil, - }, { - IP: epIP2, - NodeName: strPtr(testHostname), - }}, - Ports: []api.EndpointPort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - }}, - }} - }), - ) - - fp.syncProxyRules() - - // TODO -} - -func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service { - svc := &api.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{}, - }, - Spec: api.ServiceSpec{}, - Status: api.ServiceStatus{}, - } - svcFunc(svc) - return svc -} - -func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, port, nodeport int32, targetPort int) []api.ServicePort { - svcPort := api.ServicePort{ - Name: name, - Protocol: protocol, - Port: port, - NodePort: nodeport, - TargetPort: intstr.FromInt(targetPort), - } - return append(array, svcPort) -} - -func TestBuildServiceMapAddRemove(t *testing.T) { - - fp := NewFakeProxier() - - services := []*api.Service{ - makeTestService("somewhere-else", "cluster-ip", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeClusterIP - svc.Spec.ClusterIP = "172.16.55.4" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0) - }), - makeTestService("somewhere-else", "node-port", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeNodePort - svc.Spec.ClusterIP = "172.16.55.10" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0) - }), - makeTestService("somewhere", "load-balancer", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeLoadBalancer - svc.Spec.ClusterIP = "172.16.55.11" - svc.Spec.LoadBalancerIP = "5.6.7.8" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001) - svc.Status.LoadBalancer = api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{ - {IP: "10.1.2.4"}, - }, - } - }), - makeTestService("somewhere", "only-local-load-balancer", func(svc *api.Service) { - svc.ObjectMeta.Annotations = map[string]string{ - api.BetaAnnotationExternalTraffic: api.AnnotationValueExternalTrafficLocal, - api.BetaAnnotationHealthCheckNodePort: "345", - } - svc.Spec.Type = api.ServiceTypeLoadBalancer - svc.Spec.ClusterIP = "172.16.55.12" - svc.Spec.LoadBalancerIP = "5.6.7.8" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003) - svc.Status.LoadBalancer = api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{ - {IP: "10.1.2.3"}, - }, - } - }), - } - - for i := range services { - fp.OnServiceAdd(services[i]) - } - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 8 { - t.Errorf("expected service map length 8, got %v", fp.serviceMap) - } - - // The only-local-loadbalancer ones get added - if len(result.hcServices) != 1 { - t.Errorf("expected 1 healthcheck port, got %v", result.hcServices) - } else { - nsn := makeNSN("somewhere", "only-local-load-balancer") - if port, found := result.hcServices[nsn]; !found || port != 345 { - t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.hcServices) - } - } - - if len(result.staleServices) != 0 { - // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) - } - - // Remove some stuff - // oneService is a modification of services[0] with removed first port. - oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeClusterIP - svc.Spec.ClusterIP = "172.16.55.4" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0) - }) - - fp.OnServiceUpdate(services[0], oneService) - fp.OnServiceDelete(services[1]) - fp.OnServiceDelete(services[2]) - fp.OnServiceDelete(services[3]) - - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 1 { - t.Errorf("expected service map length 1, got %v", fp.serviceMap) - } - - if len(result.hcServices) != 0 { - t.Errorf("expected 0 healthcheck ports, got %v", result.hcServices) - } - - // All services but one were deleted. While you'd expect only the ClusterIPs - // from the three deleted services here, we still have the ClusterIP for - // the not-deleted service, because one of it's ServicePorts was deleted. - expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"} - if len(result.staleServices) != len(expectedStaleUDPServices) { - t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.staleServices.List()) - } - for _, ip := range expectedStaleUDPServices { - if !result.staleServices.Has(ip) { - t.Errorf("expected stale UDP service service %s", ip) - } - } -} - -func TestBuildServiceMapServiceHeadless(t *testing.T) { - - fp := NewFakeProxier() - - makeServiceMap(fp, - makeTestService("somewhere-else", "headless", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeClusterIP - svc.Spec.ClusterIP = api.ClusterIPNone - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0) - }), - makeTestService("somewhere-else", "headless-without-port", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeClusterIP - svc.Spec.ClusterIP = api.ClusterIPNone - }), - ) - - // Headless service should be ignored - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 0 { - t.Errorf("expected service map length 0, got %d", len(fp.serviceMap)) - } - - // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %d", len(result.hcServices)) - } - - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) - } -} - -func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { - - fp := NewFakeProxier() - - makeServiceMap(fp, - makeTestService("somewhere-else", "external-name", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeExternalName - svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored - svc.Spec.ExternalName = "foo2.bar.com" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0) - }), - ) - - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 0 { - t.Errorf("expected service map length 0, got %v", fp.serviceMap) - } - // No proxied services, so no healthchecks - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) - } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices) - } -} - -func TestBuildServiceMapServiceUpdate(t *testing.T) { - fp := NewFakeProxier() - - servicev1 := makeTestService("somewhere", "some-service", func(svc *api.Service) { - svc.Spec.Type = api.ServiceTypeClusterIP - svc.Spec.ClusterIP = "172.16.55.4" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0) - }) - servicev2 := makeTestService("somewhere", "some-service", func(svc *api.Service) { - svc.ObjectMeta.Annotations = map[string]string{ - api.BetaAnnotationExternalTraffic: api.AnnotationValueExternalTrafficLocal, - api.BetaAnnotationHealthCheckNodePort: "345", - } - svc.Spec.Type = api.ServiceTypeLoadBalancer - svc.Spec.ClusterIP = "172.16.55.4" - svc.Spec.LoadBalancerIP = "5.6.7.8" - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002) - svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003) - svc.Status.LoadBalancer = api.LoadBalancerStatus{ - Ingress: []api.LoadBalancerIngress{ - {IP: "10.1.2.3"}, - }, - } - }) - - fp.OnServiceAdd(servicev1) - - result := updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 2 { - t.Errorf("expected service map length 2, got %v", fp.serviceMap) - } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) - } - if len(result.staleServices) != 0 { - // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) - } - - // Change service to load-balancer - fp.OnServiceUpdate(servicev1, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 2 { - t.Errorf("expected service map length 2, got %v", fp.serviceMap) - } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) - } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) - } - - // No change; make sure the service map stays the same and there are - // no health-check changes - fp.OnServiceUpdate(servicev2, servicev2) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 2 { - t.Errorf("expected service map length 2, got %v", fp.serviceMap) - } - if len(result.hcServices) != 1 { - t.Errorf("expected healthcheck ports length 1, got %v", result.hcServices) - } - if len(result.staleServices) != 0 { - t.Errorf("expected stale UDP services length 0, got %v", result.staleServices.List()) - } - - // And back to ClusterIP - fp.OnServiceUpdate(servicev2, servicev1) - result = updateServiceMap(fp.serviceMap, &fp.serviceChanges) - if len(fp.serviceMap) != 2 { - t.Errorf("expected service map length 2, got %v", fp.serviceMap) - } - if len(result.hcServices) != 0 { - t.Errorf("expected healthcheck ports length 0, got %v", result.hcServices) - } - if len(result.staleServices) != 0 { - // Services only added, so nothing stale yet - t.Errorf("expected stale UDP services length 0, got %d", len(result.staleServices)) - } -} - -func Test_getLocalIPs(t *testing.T) { - testCases := []struct { - endpointsMap map[proxy.ServicePortName][]*endpointsInfo - expected map[types.NamespacedName]sets.String - }{{ - // Case[0]: nothing - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{}, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[1]: unnamed port - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expected: map[types.NamespacedName]sets.String{}, - }, { - // Case[2]: unnamed port local - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.1"), - }, - }, { - // Case[3]: named local and non-local ports for the same IP. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.2"), - }, - }, { - // Case[4]: named local and non-local ports for different IPs. - endpointsMap: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {endpoint: "2.2.2.2:22", isLocal: true}, - {endpoint: "2.2.2.22:22", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p23"): { - {endpoint: "2.2.2.3:23", isLocal: true}, - }, - makeServicePortName("ns4", "ep4", "p44"): { - {endpoint: "4.4.4.4:44", isLocal: true}, - {endpoint: "4.4.4.5:44", isLocal: false}, - }, - makeServicePortName("ns4", "ep4", "p45"): { - {endpoint: "4.4.4.6:45", isLocal: true}, - }, - }, - expected: map[types.NamespacedName]sets.String{ - {Namespace: "ns2", Name: "ep2"}: sets.NewString("2.2.2.2", "2.2.2.22", "2.2.2.3"), - {Namespace: "ns4", Name: "ep4"}: sets.NewString("4.4.4.4", "4.4.4.6"), - }, - }} - - for tci, tc := range testCases { - // outputs - localIPs := getLocalIPs(tc.endpointsMap) - - if !reflect.DeepEqual(localIPs, tc.expected) { - t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs) - } - } -} - -// This is a coarse test, but it offers some modicum of confidence as the code is evolved. -func Test_endpointsToEndpointsMap(t *testing.T) { - testCases := []struct { - newEndpoints *api.Endpoints - expected map[proxy.ServicePortName][]*endpointsInfo - }{{ - // Case[0]: nothing - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[1]: no changes, unnamed port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[2]: no changes, named port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "port", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "port"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[3]: new port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[4]: remove port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}), - expected: map[proxy.ServicePortName][]*endpointsInfo{}, - }, { - // Case[5]: new IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "2.2.2.2", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }, { - Name: "p2", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "2.2.2.2:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p2"): { - {endpoint: "1.1.1.1:22", isLocal: false}, - {endpoint: "2.2.2.2:22", isLocal: false}, - }, - }, - }, { - // Case[6]: remove IP and port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[7]: rename port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p2", - Port: 11, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p2"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - }, { - // Case[8]: renumber port - newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{ - { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p1", - Port: 22, - }}, - }, - } - }), - expected: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p1"): { - {endpoint: "1.1.1.1:22", isLocal: false}, - }, - }, - }} - - for tci, tc := range testCases { - // outputs - newEndpoints := endpointsToEndpointsMap(tc.newEndpoints, "host") - - if len(newEndpoints) != len(tc.expected) { - t.Errorf("[%d] expected %d new, got %d: %v", tci, len(tc.expected), len(newEndpoints), spew.Sdump(newEndpoints)) - } - for x := range tc.expected { - if len(newEndpoints[x]) != len(tc.expected[x]) { - t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(tc.expected[x]), x, len(newEndpoints[x])) - } else { - for i := range newEndpoints[x] { - if *(newEndpoints[x][i]) != *(tc.expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, tc.expected[x][i], *(newEndpoints[x][i])) - } - } - } - } - } -} - -func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) *api.Endpoints { - ept := &api.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - eptFunc(ept) - return ept -} - -func makeEndpointsMap(proxier *Proxier, allEndpoints ...*api.Endpoints) { - for i := range allEndpoints { - proxier.OnEndpointsAdd(allEndpoints[i]) - } - - proxier.mu.Lock() - defer proxier.mu.Unlock() - proxier.endpointsSynced = true -} - -func makeNSN(namespace, name string) types.NamespacedName { - return types.NamespacedName{Namespace: namespace, Name: name} -} - -func makeServicePortName(ns, name, port string) proxy.ServicePortName { - return proxy.ServicePortName{ - NamespacedName: makeNSN(ns, name), - Port: port, - } -} - -func makeServiceMap(proxier *Proxier, allServices ...*api.Service) { - for i := range allServices { - proxier.OnServiceAdd(allServices[i]) - } - - proxier.mu.Lock() - defer proxier.mu.Unlock() - proxier.servicesSynced = true -} - -func compareEndpointsMaps(t *testing.T, tci int, newMap, expected map[proxy.ServicePortName][]*endpointsInfo) { - if len(newMap) != len(expected) { - t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) - } - for x := range expected { - if len(newMap[x]) != len(expected[x]) { - t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) - } else { - for i := range expected[x] { - if *(newMap[x][i]) != *(expected[x][i]) { - t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newMap[x][i]) - } - } - } - } -} - -func Test_updateEndpointsMap(t *testing.T) { - var nodeName = testHostname - - emptyEndpoint := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{} - } - unnamedPort := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Port: 11, - }}, - }} - } - unnamedPortLocal := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Port: 11, - }}, - }} - } - namedPortLocal := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }} - } - namedPort := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }} - } - namedPortRenamed := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11-2", - Port: 11, - }}, - }} - } - namedPortRenumbered := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 22, - }}, - }} - } - namedPortsLocalNoLocal := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "1.1.1.2", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }, { - Name: "p12", - Port: 12, - }}, - }} - } - multipleSubsets := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.2", - }}, - Ports: []api.EndpointPort{{ - Name: "p12", - Port: 12, - }}, - }} - } - multipleSubsetsWithLocal := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.2", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p12", - Port: 12, - }}, - }} - } - multipleSubsetsMultiplePortsLocal := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }, { - Name: "p12", - Port: 12, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.3", - }}, - Ports: []api.EndpointPort{{ - Name: "p13", - Port: 13, - }}, - }} - } - multipleSubsetsIPsPorts1 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "1.1.1.2", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }, { - Name: "p12", - Port: 12, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.3", - }, { - IP: "1.1.1.4", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p13", - Port: 13, - }, { - Name: "p14", - Port: 14, - }}, - }} - } - multipleSubsetsIPsPorts2 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "2.2.2.1", - }, { - IP: "2.2.2.2", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p21", - Port: 21, - }, { - Name: "p22", - Port: 22, - }}, - }} - } - complexBefore1 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }} - } - complexBefore2 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "2.2.2.2", - NodeName: &nodeName, - }, { - IP: "2.2.2.22", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p22", - Port: 22, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "2.2.2.3", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p23", - Port: 23, - }}, - }} - } - complexBefore4 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "4.4.4.4", - NodeName: &nodeName, - }, { - IP: "4.4.4.5", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p44", - Port: 44, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "4.4.4.6", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p45", - Port: 45, - }}, - }} - } - complexAfter1 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.1", - }, { - IP: "1.1.1.11", - }}, - Ports: []api.EndpointPort{{ - Name: "p11", - Port: 11, - }}, - }, { - Addresses: []api.EndpointAddress{{ - IP: "1.1.1.2", - }}, - Ports: []api.EndpointPort{{ - Name: "p12", - Port: 12, - }, { - Name: "p122", - Port: 122, - }}, - }} - } - complexAfter3 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "3.3.3.3", - }}, - Ports: []api.EndpointPort{{ - Name: "p33", - Port: 33, - }}, - }} - } - complexAfter4 := func(ept *api.Endpoints) { - ept.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{ - IP: "4.4.4.4", - NodeName: &nodeName, - }}, - Ports: []api.EndpointPort{{ - Name: "p44", - Port: 44, - }}, - }} - } - - testCases := []struct { - // previousEndpoints and currentEndpoints are used to call appropriate - // handlers OnEndpoints* (based on whether corresponding values are nil - // or non-nil) and must be of equal length. - previousEndpoints []*api.Endpoints - currentEndpoints []*api.Endpoints - oldEndpoints map[proxy.ServicePortName][]*endpointsInfo - expectedResult map[proxy.ServicePortName][]*endpointsInfo - expectedStaleEndpoints []endpointServicePair - expectedStaleServiceNames map[proxy.ServicePortName]bool - expectedHealthchecks map[types.NamespacedName]int - }{{ - // Case[0]: nothing - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[1]: no change, unnamed port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", unnamedPort), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", unnamedPort), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[2]: no change, named port, local - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortLocal), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortLocal), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 1, - }, - }, { - // Case[3]: no change, multiple subsets - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsets), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsets), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.2:12", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.2:12", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[4]: no change, multiple subsets, multiple ports, local - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p13"): { - {endpoint: "1.1.1.3:13", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p13"): { - {endpoint: "1.1.1.3:13", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 1, - }, - }, { - // Case[5]: no change, multiple endpoints, subsets, IPs, and ports - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1), - makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1), - makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p13"): { - {endpoint: "1.1.1.3:13", isLocal: false}, - {endpoint: "1.1.1.4:13", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p14"): { - {endpoint: "1.1.1.3:14", isLocal: false}, - {endpoint: "1.1.1.4:14", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p21"): { - {endpoint: "2.2.2.1:21", isLocal: false}, - {endpoint: "2.2.2.2:21", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {endpoint: "2.2.2.1:22", isLocal: false}, - {endpoint: "2.2.2.2:22", isLocal: true}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p13"): { - {endpoint: "1.1.1.3:13", isLocal: false}, - {endpoint: "1.1.1.4:13", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p14"): { - {endpoint: "1.1.1.3:14", isLocal: false}, - {endpoint: "1.1.1.4:14", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p21"): { - {endpoint: "2.2.2.1:21", isLocal: false}, - {endpoint: "2.2.2.2:21", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {endpoint: "2.2.2.1:22", isLocal: false}, - {endpoint: "2.2.2.2:22", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 2, - makeNSN("ns2", "ep2"): 1, - }, - }, { - // Case[6]: add an Endpoints - previousEndpoints: []*api.Endpoints{ - nil, - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", unnamedPortLocal), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", ""): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 1, - }, - }, { - // Case[7]: remove an Endpoints - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", unnamedPortLocal), - }, - currentEndpoints: []*api.Endpoints{ - nil, - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: true}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", ""), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[8]: add an IP and port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", "p12"): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 1, - }, - }, { - // Case[9]: remove an IP and port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.2:11", isLocal: true}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.1:12", isLocal: false}, - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), - }, { - endpoint: "1.1.1.1:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), - }, { - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[10]: add a subset - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.2:12", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", "p12"): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns1", "ep1"): 1, - }, - }, { - // Case[11]: remove a subset - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", multipleSubsets), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.2:12", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.2:12", - servicePortName: makeServicePortName("ns1", "ep1", "p12"), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[12]: rename a port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortRenamed), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11-2"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", "p11-2"): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[13]: renumber a port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPort), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", namedPortRenumbered), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:22", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "1.1.1.1:11", - servicePortName: makeServicePortName("ns1", "ep1", "p11"), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{}, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, { - // Case[14]: complex add and remove - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", complexBefore1), - makeTestEndpoints("ns2", "ep2", complexBefore2), - nil, - makeTestEndpoints("ns4", "ep4", complexBefore4), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", complexAfter1), - nil, - makeTestEndpoints("ns3", "ep3", complexAfter3), - makeTestEndpoints("ns4", "ep4", complexAfter4), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - makeServicePortName("ns2", "ep2", "p22"): { - {endpoint: "2.2.2.2:22", isLocal: true}, - {endpoint: "2.2.2.22:22", isLocal: true}, - }, - makeServicePortName("ns2", "ep2", "p23"): { - {endpoint: "2.2.2.3:23", isLocal: true}, - }, - makeServicePortName("ns4", "ep4", "p44"): { - {endpoint: "4.4.4.4:44", isLocal: true}, - {endpoint: "4.4.4.5:44", isLocal: true}, - }, - makeServicePortName("ns4", "ep4", "p45"): { - {endpoint: "4.4.4.6:45", isLocal: true}, - }, - }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", "p11"): { - {endpoint: "1.1.1.1:11", isLocal: false}, - {endpoint: "1.1.1.11:11", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p12"): { - {endpoint: "1.1.1.2:12", isLocal: false}, - }, - makeServicePortName("ns1", "ep1", "p122"): { - {endpoint: "1.1.1.2:122", isLocal: false}, - }, - makeServicePortName("ns3", "ep3", "p33"): { - {endpoint: "3.3.3.3:33", isLocal: false}, - }, - makeServicePortName("ns4", "ep4", "p44"): { - {endpoint: "4.4.4.4:44", isLocal: true}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{{ - endpoint: "2.2.2.2:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), - }, { - endpoint: "2.2.2.22:22", - servicePortName: makeServicePortName("ns2", "ep2", "p22"), - }, { - endpoint: "2.2.2.3:23", - servicePortName: makeServicePortName("ns2", "ep2", "p23"), - }, { - endpoint: "4.4.4.5:44", - servicePortName: makeServicePortName("ns4", "ep4", "p44"), - }, { - endpoint: "4.4.4.6:45", - servicePortName: makeServicePortName("ns4", "ep4", "p45"), - }}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", "p12"): true, - makeServicePortName("ns1", "ep1", "p122"): true, - makeServicePortName("ns3", "ep3", "p33"): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{ - makeNSN("ns4", "ep4"): 1, - }, - }, { - // Case[15]: change from 0 endpoint address to 1 unnamed port - previousEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", emptyEndpoint), - }, - currentEndpoints: []*api.Endpoints{ - makeTestEndpoints("ns1", "ep1", unnamedPort), - }, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ - makeServicePortName("ns1", "ep1", ""): { - {endpoint: "1.1.1.1:11", isLocal: false}, - }, - }, - expectedStaleEndpoints: []endpointServicePair{}, - expectedStaleServiceNames: map[proxy.ServicePortName]bool{ - makeServicePortName("ns1", "ep1", ""): true, - }, - expectedHealthchecks: map[types.NamespacedName]int{}, - }, - } - - for tci, tc := range testCases { - - fp := NewFakeProxier() - fp.hostname = nodeName - - // First check that after adding all previous versions of endpoints, - // the fp.oldEndpoints is as we expect. - for i := range tc.previousEndpoints { - if tc.previousEndpoints[i] != nil { - fp.OnEndpointsAdd(tc.previousEndpoints[i]) - } - } - updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) - compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) - - // Now let's call appropriate handlers to get to state we want to be. - if len(tc.previousEndpoints) != len(tc.currentEndpoints) { - t.Fatalf("[%d] different lengths of previous and current endpoints", tci) - continue - } - - for i := range tc.previousEndpoints { - prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i] - switch { - case prev == nil: - fp.OnEndpointsAdd(curr) - case curr == nil: - fp.OnEndpointsDelete(prev) - default: - fp.OnEndpointsUpdate(prev, curr) - } - } - result := updateEndpointsMap(fp.endpointsMap, &fp.endpointsChanges, fp.hostname) - newMap := fp.endpointsMap - compareEndpointsMaps(t, tci, newMap, tc.expectedResult) - if len(result.staleEndpoints) != len(tc.expectedStaleEndpoints) { - t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.staleEndpoints), result.staleEndpoints) - } - for _, x := range tc.expectedStaleEndpoints { - if result.staleEndpoints[x] != true { - t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.staleEndpoints) - } - } - if len(result.staleServiceNames) != len(tc.expectedStaleServiceNames) { - t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.staleServiceNames), result.staleServiceNames) - } - for svcName := range tc.expectedStaleServiceNames { - if result.staleServiceNames[svcName] != true { - t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.staleServiceNames) - } - } - if !reflect.DeepEqual(result.hcEndpoints, tc.expectedHealthchecks) { - t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.hcEndpoints) - } - } -} - -// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces. From b5aab25fe1b63f5188ea41976213a3d0ec2c1e74 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 29 Nov 2017 15:30:16 +0800 Subject: [PATCH 151/794] update bazel BUILD --- pkg/proxy/winkernel/BUILD | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/pkg/proxy/winkernel/BUILD b/pkg/proxy/winkernel/BUILD index 42494391696..8fd2b1f1509 100644 --- a/pkg/proxy/winkernel/BUILD +++ b/pkg/proxy/winkernel/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -36,33 +36,6 @@ go_library( }), ) -go_test( - name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "proxier_test.go", - ], - "//conditions:default": [], - }), - importpath = "k8s.io/kubernetes/pkg/proxy/winkernel", - library = ":go_default_library", - deps = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "//pkg/apis/core:go_default_library", - "//pkg/proxy:go_default_library", - "//pkg/util/async:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/exec/testing:go_default_library", - ], - "//conditions:default": [], - }), -) - filegroup( name = "package-srcs", srcs = glob(["**"]), From 66c2ec32f2497616ddd6912cfc97f0b4306f9eea Mon Sep 17 00:00:00 2001 From: Shiyang Wang Date: Mon, 14 Aug 2017 16:24:30 +0800 Subject: [PATCH 152/794] Fix Content negotiation incorrect when Accept header uses type parameters --- .../handlers/negotiation/negotiate.go | 19 ++++++++++++++--- .../handlers/negotiation/negotiate_test.go | 21 ++++++++++++++++++- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 8f8a50fe3e7..7f4225a5b93 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -273,6 +273,13 @@ func acceptMediaTypeOptions(params map[string]string, accepts *AcceptedMediaType return options, true } +type candidateMediaType struct { + accepted *AcceptedMediaType + clauses goautoneg.Accept +} + +type candidateMediaTypeSlice []candidateMediaType + // NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and // a list of alternatives along with the accepted media type parameters. func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { @@ -282,6 +289,7 @@ func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endp }, true } + var candidates candidateMediaTypeSlice clauses := goautoneg.ParseAccept(header) for _, clause := range clauses { for i := range accepted { @@ -290,12 +298,17 @@ func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endp case clause.Type == accepts.Type && clause.SubType == accepts.SubType, clause.Type == accepts.Type && clause.SubType == "*", clause.Type == "*" && clause.SubType == "*": - // TODO: should we prefer the first type with no unrecognized options? Do we need to ignore unrecognized - // parameters. - return acceptMediaTypeOptions(clause.Params, accepts, endpoint) + candidates = append(candidates, candidateMediaType{accepted: accepts, clauses: clause}) } } } + + for _, v := range candidates { + if retVal, ret := acceptMediaTypeOptions(v.clauses.Params, v.accepted, endpoint); ret { + return retVal, true + } + } + return MediaTypeOptions{}, false } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate_test.go index 8a747ff73dc..1d11d0a3048 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate_test.go @@ -181,7 +181,26 @@ func TestNegotiate(t *testing.T) { serializer: fakeCodec, params: map[string]string{"pretty": "1"}, }, - + { + req: &http.Request{ + Header: http.Header{ + "Accept": []string{"application/json;as=BOGUS;v=v1alpha1;g=meta.k8s.io, application/json"}, + }, + }, + contentType: "application/json", + ns: &fakeNegotiater{serializer: fakeCodec, types: []string{"application/json"}}, + serializer: fakeCodec, + }, + { + req: &http.Request{ + Header: http.Header{ + "Accept": []string{"application/BOGUS, application/json"}, + }, + }, + contentType: "application/json", + ns: &fakeNegotiater{serializer: fakeCodec, types: []string{"application/json"}}, + serializer: fakeCodec, + }, // "application" is not a valid media type, so the server will reject the response during // negotiation (the server, in error, has specified an invalid media type) { From f0ace95218825c6218cba954640e830fea4498c3 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 29 Nov 2017 09:56:42 +0200 Subject: [PATCH 153/794] Handling the case of an upgrade from a non-rolling master with resource change --- .../reactive/kubernetes_master.py | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 95f7a8a2a3b..a56737e16d7 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -62,13 +62,14 @@ nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') -def set_upgrade_needed(): + +def set_upgrade_needed(forced=False): set_state('kubernetes-master.upgrade-needed') config = hookenv.config() previous_channel = config.previous('channel') require_manual = config.get('require-manual-upgrade') hookenv.log('set upgrade needed') - if previous_channel is None or not require_manual: + if previous_channel is None or not require_manual or forced: hookenv.log('forcing upgrade') set_state('kubernetes-master.upgrade-specified') @@ -101,16 +102,27 @@ def check_for_upgrade_needed(): add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') - if snap_resources_changed(): + changed = snap_resources_changed() + if changed == 'yes': set_upgrade_needed() + elif changed == 'unknown': + # We are here on an upgrade from non-rolling master + # Since this upgrade might also include resource updates eg + # juju upgrade-charm kubernetes-master --resource kube-any=my.snap + # we take no risk and forcibly upgrade the snaps. + # Forcibly means we do not prompt the user to call the upgrade action. + set_upgrade_needed(forced=True) + def snap_resources_changed(): ''' Check if the snapped resources have changed. The first time this method is - called will report no change. + called will report "unknown". - Returns: True in case a snap resource file has changed + Returns: "yes" in case a snap resource file has changed, + "no" in case a snap resources are the same as last call, + "unknown" if it is the first time this method is called ''' db = unitdata.kv() @@ -118,11 +130,12 @@ def snap_resources_changed(): 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if db.get('snap.resources.fingerprint.initialised'): - return any_file_changed(paths) + result = 'yes' if any_file_changed(paths) else 'no' + return result else: db.set('snap.resources.fingerprint.initialised', True) any_file_changed(paths) - return False + return 'unknown' def add_rbac_roles(): From 992aee0d9933c0c54b134e1664acd44c36dfd654 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Mon, 27 Nov 2017 00:13:05 +0530 Subject: [PATCH 154/794] add better error handling for unstructured helpers --- .../pkg/apis/meta/v1/unstructured/helpers.go | 180 ++++++++++-------- .../apis/meta/v1/unstructured/unstructured.go | 22 +-- .../v1/unstructured/unstructured_list_test.go | 5 +- 3 files changed, 112 insertions(+), 95 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 8d03525474d..fdc688f0732 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -31,147 +31,163 @@ import ( ) // NestedFieldCopy returns a deep copy of the value of a nested field. -// false is returned if the value is missing. -// nil, true is returned for a nil field. -func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return nil, false +// Returns false if the value is missing. +// No error is returned for a nil field. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err } - return runtime.DeepCopyJSONValue(val), true + return runtime.DeepCopyJSONValue(val), true, nil } -func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { +func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj for _, field := range fields { if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { - return nil, false + return nil, false, nil } } else { - // Expected map[string]interface{}, got something else - return nil, false + return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) } } - return val, true + return val, true, nil } // NestedString returns the string value of a nested field. -// Returns false if value is not found or is not a string. -func NestedString(obj map[string]interface{}, fields ...string) (string, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return "", false +// Returns false if value is not found and an error if not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return "", found, err } s, ok := val.(string) - return s, ok + if !ok { + return "", false, fmt.Errorf("%v is of the type %T, expected string", val, val) + } + return s, true, nil } // NestedBool returns the bool value of a nested field. -// Returns false if value is not found or is not a bool. -func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return false, false +// Returns false if value is not found and an error if not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return false, found, err } b, ok := val.(bool) - return b, ok + if !ok { + return false, false, fmt.Errorf("%v is of the type %T, expected bool", val, val) + } + return b, true, nil } -// NestedFloat64 returns the bool value of a nested field. -// Returns false if value is not found or is not a float64. -func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return 0, false +// NestedFloat64 returns the float64 value of a nested field. +// Returns false if value is not found and an error if not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err } f, ok := val.(float64) - return f, ok + if !ok { + return 0, false, fmt.Errorf("%v is of the type %T, expected float64", val, val) + } + return f, true, nil } // NestedInt64 returns the int64 value of a nested field. -// Returns false if value is not found or is not an int64. -func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return 0, false +// Returns false if value is not found and an error if not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err } i, ok := val.(int64) - return i, ok + if !ok { + return 0, false, fmt.Errorf("%v is of the type %T, expected int64", val, val) + } + return i, true, nil } // NestedStringSlice returns a copy of []string value of a nested field. -// Returns false if value is not found, is not a []interface{} or contains non-string items in the slice. -func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) +// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + m, ok := val.([]interface{}) if !ok { - return nil, false + return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) } - if m, ok := val.([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } else { - return nil, false - } + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false, fmt.Errorf("contains non-string key in the slice: %v is of the type %T, expected string", v, v) } - return strSlice, true } - return nil, false + return strSlice, true, nil } // NestedSlice returns a deep copy of []interface{} value of a nested field. -// Returns false if value is not found or is not a []interface{}. -func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) +// Returns false if value is not found and an error if not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + _, ok := val.([]interface{}) if !ok { - return nil, false + return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) } - if _, ok := val.([]interface{}); ok { - return runtime.DeepCopyJSONValue(val).([]interface{}), true - } - return nil, false + return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil } // NestedStringMap returns a copy of map[string]string value of a nested field. -// Returns false if value is not found, is not a map[string]interface{} or contains non-string values in the map. -func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool) { - m, ok := nestedMapNoCopy(obj, fields...) - if !ok { - return nil, false +// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err } strMap := make(map[string]string, len(m)) for k, v := range m { if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false + return nil, false, fmt.Errorf("contains non-string key in the map: %v is of the type %T, expected string", v, v) } } - return strMap, true + return strMap, true, nil } // NestedMap returns a deep copy of map[string]interface{} value of a nested field. -// Returns false if value is not found or is not a map[string]interface{}. -func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { - m, ok := nestedMapNoCopy(obj, fields...) - if !ok { - return nil, false +// Returns false if value is not found and an error if not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err } - return runtime.DeepCopyJSON(m), true + return runtime.DeepCopyJSON(m), true, nil } // nestedMapNoCopy returns a map[string]interface{} value of a nested field. -// Returns false if value is not found or is not a map[string]interface{}. -func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { - val, ok := nestedFieldNoCopy(obj, fields...) - if !ok { - return nil, false +// Returns false if value is not found and an error if not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + val, found, err := nestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err } m, ok := val.(map[string]interface{}) - return m, ok + if !ok { + return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) + } + return m, true, nil } // SetNestedField sets the value of a nested field to a deep copy of the value provided. @@ -245,8 +261,8 @@ func RemoveNestedField(obj map[string]interface{}, fields ...string) { } func getNestedString(obj map[string]interface{}, fields ...string) string { - val, ok := NestedString(obj, fields...) - if !ok { + val, found, err := NestedString(obj, fields...) + if !found || err != nil { return "" } return val @@ -256,11 +272,11 @@ func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { // though this field is a *bool, but when decoded from JSON, it's // unmarshalled as bool. var controllerPtr *bool - if controller, ok := NestedBool(v, "controller"); ok { + if controller, found, err := NestedBool(v, "controller"); err == nil && found { controllerPtr = &controller } var blockOwnerDeletionPtr *bool - if blockOwnerDeletion, ok := NestedBool(v, "blockOwnerDeletion"); ok { + if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found { blockOwnerDeletionPtr = &blockOwnerDeletion } return metav1.OwnerReference{ diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 36e769bd602..2a13330490a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -138,8 +138,8 @@ func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { } func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - field, ok := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") - if !ok { + field, found, err := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + if !found || err != nil { return nil } original, ok := field.([]interface{}) @@ -228,8 +228,8 @@ func (u *Unstructured) SetResourceVersion(version string) { } func (u *Unstructured) GetGeneration() int64 { - val, ok := NestedInt64(u.Object, "metadata", "generation") - if !ok { + val, found, err := NestedInt64(u.Object, "metadata", "generation") + if !found || err != nil { return 0 } return val @@ -289,8 +289,8 @@ func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { } func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { - val, ok := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") - if !ok { + val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !found || err != nil { return nil } return &val @@ -305,7 +305,7 @@ func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds } func (u *Unstructured) GetLabels() map[string]string { - m, _ := NestedStringMap(u.Object, "metadata", "labels") + m, _, _ := NestedStringMap(u.Object, "metadata", "labels") return m } @@ -314,7 +314,7 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - m, _ := NestedStringMap(u.Object, "metadata", "annotations") + m, _, _ := NestedStringMap(u.Object, "metadata", "annotations") return m } @@ -337,8 +337,8 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { } func (u *Unstructured) GetInitializers() *metav1.Initializers { - m, ok := nestedMapNoCopy(u.Object, "metadata", "initializers") - if !ok { + m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") + if !found || err != nil { return nil } out := &metav1.Initializers{} @@ -362,7 +362,7 @@ func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { } func (u *Unstructured) GetFinalizers() []string { - val, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list_test.go index db935774a79..04ada449b95 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list_test.go @@ -35,8 +35,9 @@ func TestUnstructuredList(t *testing.T) { content := list.UnstructuredContent() items := content["items"].([]interface{}) require.Len(t, items, 1) - val, ok := NestedFieldCopy(items[0].(map[string]interface{}), "metadata", "name") - require.True(t, ok) + val, found, err := NestedFieldCopy(items[0].(map[string]interface{}), "metadata", "name") + require.True(t, found) + require.NoError(t, err) assert.Equal(t, "test", val) } From e1312f2c00edf969ea55904700e55b14e9f9ba1e Mon Sep 17 00:00:00 2001 From: pospispa Date: Thu, 23 Nov 2017 16:00:35 +0100 Subject: [PATCH 155/794] Addressing Comments from Code Review Addressing comments from code review (https://github.com/kubernetes/kubernetes/pull/55824#pullrequestreview-78597250) in order to simplify the code. --- pkg/controller/volume/pvcprotection/BUILD | 1 + .../pvc_protection_controller.go | 19 +- pkg/kubelet/volumemanager/populator/BUILD | 1 - .../desired_state_of_world_populator.go | 3 +- pkg/util/slice/slice.go | 21 ++ pkg/util/slice/slice_test.go | 64 +++++ pkg/volume/util/BUILD | 2 - pkg/volume/util/finalizer.go | 46 ---- pkg/volume/util/finalizer_test.go | 231 ------------------ 9 files changed, 101 insertions(+), 287 deletions(-) delete mode 100644 pkg/volume/util/finalizer_test.go diff --git a/pkg/controller/volume/pvcprotection/BUILD b/pkg/controller/volume/pvcprotection/BUILD index 5c713a259d2..a296dd22c0c 100644 --- a/pkg/controller/volume/pvcprotection/BUILD +++ b/pkg/controller/volume/pvcprotection/BUILD @@ -8,6 +8,7 @@ go_library( deps = [ "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", + "//pkg/util/slice:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 40bf3e5de5c..8ce491ffcd3 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" + "k8s.io/kubernetes/pkg/util/slice" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -153,7 +154,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error { return err } - if volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc) { + if isDeletionCandidate(pvc) { // PVC should be deleted. Check if it's used and remove finalizer if // it's not. isUsed, err := c.isBeingUsed(pvc) @@ -165,7 +166,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error { } } - if !volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc) { + if needToAddFinalizer(pvc) { // PVC is not being deleted -> it should have the finalizer. The // finalizer should be added by admission plugin, this is just to add // the finalizer to old PVCs that were created before the admission @@ -177,7 +178,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error { func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone := pvc.DeepCopy() - volumeutil.AddProtectionFinalizer(claimClone) + claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) if err != nil { glog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name) @@ -189,7 +190,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone := pvc.DeepCopy() - volumeutil.RemoveProtectionFinalizer(claimClone) + claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) if err != nil { glog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) @@ -247,7 +248,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) { } glog.V(4).Infof("Got event on PVC %s", key) - if (!volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc)) || (volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc)) { + if needToAddFinalizer(pvc) || isDeletionCandidate(pvc) { c.queue.Add(key) } } @@ -282,3 +283,11 @@ func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) { } } } + +func isDeletionCandidate(pvc *v1.PersistentVolumeClaim) bool { + return pvc.ObjectMeta.DeletionTimestamp != nil && slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) +} + +func needToAddFinalizer(pvc *v1.PersistentVolumeClaim) bool { + return pvc.ObjectMeta.DeletionTimestamp == nil && !slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) +} diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index a3bd36cc057..f8938c1ce3d 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index ac762a040fa..f696becd3c4 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -41,7 +41,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" - volumeutil "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -444,7 +443,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( // and users should not be that surprised. // It should happen only in very rare case when scheduler schedules // a pod and user deletes a PVC that's used by it at the same time. - if volumeutil.IsPVCBeingDeleted(pvc) { + if pvc.ObjectMeta.DeletionTimestamp != nil { return "", "", fmt.Errorf( "can't start pod because PVC %s/%s is being deleted", namespace, diff --git a/pkg/util/slice/slice.go b/pkg/util/slice/slice.go index b408dbae841..b9809cc2972 100644 --- a/pkg/util/slice/slice.go +++ b/pkg/util/slice/slice.go @@ -68,3 +68,24 @@ func ContainsString(slice []string, s string, modifier func(s string) string) bo } return false } + +// RemoveString returns a newly created []string that contains all items from slice that +// are not equal to s and modifier(s) in case modifier func is provided. +func RemoveString(slice []string, s string, modifier func(s string) string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + if item == s { + continue + } + if modifier != nil && modifier(item) == s { + continue + } + newSlice = append(newSlice, item) + } + if len(newSlice) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newSlice = nil + } + return newSlice +} diff --git a/pkg/util/slice/slice_test.go b/pkg/util/slice/slice_test.go index c39f54c1f14..19b46c227e8 100644 --- a/pkg/util/slice/slice_test.go +++ b/pkg/util/slice/slice_test.go @@ -106,3 +106,67 @@ func TestContainsString(t *testing.T) { t.Errorf("ContainsString didn't find the string by modifier") } } + +func TestRemoveString(t *testing.T) { + modifier := func(s string) string { + if s == "ab" { + return "ee" + } + return s + } + tests := []struct { + testName string + input []string + remove string + modifier func(s string) string + want []string + }{ + { + testName: "Nil input slice", + input: nil, + remove: "", + modifier: nil, + want: nil, + }, + { + testName: "Slice doesn't contain the string", + input: []string{"a", "ab", "cdef"}, + remove: "NotPresentInSlice", + modifier: nil, + want: []string{"a", "ab", "cdef"}, + }, + { + testName: "All strings removed, result is nil", + input: []string{"a"}, + remove: "a", + modifier: nil, + want: nil, + }, + { + testName: "No modifier func, one string removed", + input: []string{"a", "ab", "cdef"}, + remove: "ab", + modifier: nil, + want: []string{"a", "cdef"}, + }, + { + testName: "No modifier func, all(three) strings removed", + input: []string{"ab", "a", "ab", "cdef", "ab"}, + remove: "ab", + modifier: nil, + want: []string{"a", "cdef"}, + }, + { + testName: "Removed both the string and the modifier func result", + input: []string{"a", "cd", "ab", "ee"}, + remove: "ee", + modifier: modifier, + want: []string{"a", "cd"}, + }, + } + for _, tt := range tests { + if got := RemoveString(tt.input, tt.remove, tt.modifier); !reflect.DeepEqual(got, tt.want) { + t.Errorf("%v: RemoveString(%v, %q, %T) = %v WANT %v", tt.testName, tt.input, tt.remove, tt.modifier, got, tt.want) + } + } +} diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index 48a0b0ee6bf..7a4ed937692 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -62,7 +62,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "finalizer_test.go", "util_test.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ @@ -76,7 +75,6 @@ go_test( deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/volume/util/finalizer.go b/pkg/volume/util/finalizer.go index 84631545060..1bc03ad8e78 100644 --- a/pkg/volume/util/finalizer.go +++ b/pkg/volume/util/finalizer.go @@ -16,53 +16,7 @@ limitations under the License. package util -import ( - "k8s.io/api/core/v1" -) - const ( // Name of finalizer on PVCs that have a running pod. PVCProtectionFinalizer = "kubernetes.io/pvc-protection" ) - -// IsPVCBeingDeleted returns: -// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set -// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil -func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool { - return pvc.ObjectMeta.DeletionTimestamp != nil -} - -// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is -// present among the pvc.Finalizers -func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool { - for _, finalizer := range pvc.Finalizers { - if finalizer == PVCProtectionFinalizer { - return true - } - } - return false -} - -// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case -// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not -// informer's cached copy.) -func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { - newFinalizers := make([]string, 0) - for _, finalizer := range pvc.Finalizers { - if finalizer != PVCProtectionFinalizer { - newFinalizers = append(newFinalizers, finalizer) - } - } - if len(newFinalizers) == 0 { - // Sanitize for unit tests so we don't need to distinguish empty array - // and nil. - newFinalizers = nil - } - pvc.Finalizers = newFinalizers -} - -// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that -// pvc is writable (i.e. is not informer's cached copy.) -func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { - pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer) -} diff --git a/pkg/volume/util/finalizer_test.go b/pkg/volume/util/finalizer_test.go deleted file mode 100644 index 210ea3b3e63..00000000000 --- a/pkg/volume/util/finalizer_test.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - arbitraryTime = metav1.Date(2017, 11, 1, 14, 28, 47, 0, time.FixedZone("CET", 0)) -) - -func TestIsPVCBeingDeleted(t *testing.T) { - tests := []struct { - pvc *v1.PersistentVolumeClaim - want bool - }{ - { - pvc: &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - DeletionTimestamp: nil, - }, - }, - want: false, - }, - { - pvc: &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - DeletionTimestamp: &arbitraryTime, - }, - }, - want: true, - }, - } - for _, tt := range tests { - if got := IsPVCBeingDeleted(tt.pvc); got != tt.want { - t.Errorf("IsPVCBeingDeleted(%v) = %v WANT %v", tt.pvc, got, tt.want) - } - } -} - -func TestAddProtectionFinalizer(t *testing.T) { - tests := []struct { - name string - pvc *v1.PersistentVolumeClaim - want *v1.PersistentVolumeClaim - }{ - { - "PVC without finalizer", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - }, - }, - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{PVCProtectionFinalizer}, - }, - }, - }, - { - "PVC with some finalizers", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer}, - }, - }, - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer}, - }, - }, - }, - } - for _, test := range tests { - got := test.pvc.DeepCopy() - AddProtectionFinalizer(got) - if !reflect.DeepEqual(got, test.want) { - t.Errorf("Test %q: expected:\n%s\n\ngot:\n%s", test.name, spew.Sdump(test.want), spew.Sdump(got)) - } - } -} - -func TestRemoveProtectionFinalizer(t *testing.T) { - tests := []struct { - name string - pvc *v1.PersistentVolumeClaim - want *v1.PersistentVolumeClaim - }{ - { - "PVC without finalizer", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - }, - }, - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - }, - }, - }, - { - "PVC with finalizer", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{PVCProtectionFinalizer}, - }, - }, - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - }, - }, - }, - { - "PVC with many finalizers", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer}, - }, - }, - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer}, - }, - }, - }, - } - for _, test := range tests { - got := test.pvc.DeepCopy() - RemoveProtectionFinalizer(got) - if !reflect.DeepEqual(got, test.want) { - t.Errorf("Test %q: expected:\n%s\n\ngot:\n%s", test.name, spew.Sdump(test.want), spew.Sdump(got)) - } - } -} - -func TestIsProtectionFinalizerPresent(t *testing.T) { - tests := []struct { - name string - pvc *v1.PersistentVolumeClaim - want bool - }{ - { - "PVC without finalizer", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - }, - }, - false, - }, - { - "PVC with many unrelated finalizers", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer}, - }, - }, - false, - }, - { - "PVC with many finalizers", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer}, - }, - }, - true, - }, - { - "PVC with finalizer", - &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns", - Finalizers: []string{PVCProtectionFinalizer}, - }, - }, - true, - }, - } - for _, test := range tests { - got := IsProtectionFinalizerPresent(test.pvc) - if got != test.want { - t.Errorf("Test %q: expected %v, got %v", test.name, test.want, got) - } - } -} From 8961f69c7eb1b5cc52616153e5a79b06d1171c83 Mon Sep 17 00:00:00 2001 From: Vincent Palmer Date: Wed, 29 Nov 2017 16:01:26 +0100 Subject: [PATCH 156/794] Fixed typo --- pkg/proxy/ipvs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index c0f616c9217..a93cf77b53a 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -8,7 +8,7 @@ This document shows how to use kube-proxy ipvs mode. Linux kernel. IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP -and UDP-based services to the real servers, and make services of real servers appear as irtual services on a single IP address. +and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address. ## How to use From 0512f2b44814018ad0756862639b327a63625b7a Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Wed, 29 Nov 2017 16:10:07 +0100 Subject: [PATCH 157/794] security_context_test.go(TestVerifyRunAsNonRoot): remove unused variables. --- .../kuberuntime/security_context_test.go | 31 +++++++------------ 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/pkg/kubelet/kuberuntime/security_context_test.go b/pkg/kubelet/kuberuntime/security_context_test.go index ae724ab314a..4f9c1c3b0c1 100644 --- a/pkg/kubelet/kuberuntime/security_context_test.go +++ b/pkg/kubelet/kuberuntime/security_context_test.go @@ -47,35 +47,29 @@ func TestVerifyRunAsNonRoot(t *testing.T) { rootUser := int64(0) runAsNonRootTrue := true runAsNonRootFalse := false - imageRootUser := int64(0) - imageNonRootUser := int64(123) for _, test := range []struct { - desc string - sc *v1.SecurityContext - imageUser int64 - fail bool + desc string + sc *v1.SecurityContext + fail bool }{ { - desc: "Pass if SecurityContext is not set", - sc: nil, - imageUser: imageRootUser, - fail: false, + desc: "Pass if SecurityContext is not set", + sc: nil, + fail: false, }, { desc: "Pass if RunAsNonRoot is not set", sc: &v1.SecurityContext{ RunAsUser: &rootUser, }, - imageUser: imageRootUser, - fail: false, + fail: false, }, { desc: "Pass if RunAsNonRoot is false (image user is root)", sc: &v1.SecurityContext{ RunAsNonRoot: &runAsNonRootFalse, }, - imageUser: imageRootUser, - fail: false, + fail: false, }, { desc: "Pass if RunAsNonRoot is false (RunAsUser is root)", @@ -83,8 +77,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) { RunAsNonRoot: &runAsNonRootFalse, RunAsUser: &rootUser, }, - imageUser: imageNonRootUser, - fail: false, + fail: false, }, { desc: "Fail if container's RunAsUser is root and RunAsNonRoot is true", @@ -92,16 +85,14 @@ func TestVerifyRunAsNonRoot(t *testing.T) { RunAsNonRoot: &runAsNonRootTrue, RunAsUser: &rootUser, }, - imageUser: imageNonRootUser, - fail: true, + fail: true, }, { desc: "Fail if image's user is root and RunAsNonRoot is true", sc: &v1.SecurityContext{ RunAsNonRoot: &runAsNonRootTrue, }, - imageUser: imageRootUser, - fail: true, + fail: true, }, } { pod.Spec.Containers[0].SecurityContext = test.sc From 7b6aa0904695f31900a751b10d2c7678a377fc07 Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Tue, 24 Oct 2017 21:08:21 -0500 Subject: [PATCH 158/794] validate container state transitions --- pkg/apis/core/validation/validation.go | 33 +++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..4e79c736b20 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -3322,6 +3322,31 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList { return allErrs } +// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted +func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, restartPolicy core.RestartPolicy) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if restartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure { + continue + } + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + // ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { @@ -3329,10 +3354,16 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + fldPath = field.NewPath("status") if newPod.Spec.NodeName != oldPod.Spec.NodeName { - allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly")) } + // If pod should not restart, make sure the status update does not transition + // any terminated containers to a non-terminated state. + allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + // For status update we ignore changes to pod spec. newPod.Spec = oldPod.Spec From fcfca65a54e53f5e49693c2c599e63a0dab8661e Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 29 Nov 2017 10:34:02 -0500 Subject: [PATCH 159/794] Heketi documentats incorrectly about sizes in GBs Heketi documentation incorrectly says that volume size is created in GB but in fact is in GiB. Fix both resizing and create volume functions to relfect that. --- pkg/volume/glusterfs/glusterfs.go | 17 ++++++++--------- pkg/volume/util.go | 19 +++++++++++++++++++ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index c42e3cdf794..25822e7957d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -695,7 +695,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glog.V(2).Infof("Allocated GID [%d] for PVC %s", gid, p.options.PVC.Name) - glusterfs, sizeGB, err := p.CreateVolume(gid) + glusterfs, sizeGiB, err := p.CreateVolume(gid) if err != nil { if releaseErr := gidTable.Release(gid); releaseErr != nil { glog.Errorf("error when releasing GID in storageclass: %s", scName) @@ -724,7 +724,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { } pv.Spec.Capacity = v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)), } return pv, nil } @@ -732,10 +732,9 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) { var clusterIDs []string capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volSizeBytes := capacity.Value() - // Glusterfs creates volumes in units of GBs - sz := int(volume.RoundUpSize(volSizeBytes, 1000*1000*1000)) - glog.V(2).Infof("create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisionerConfig) + // Glusterfs creates volumes in units of GiB, but heketi documentation incorrectly reports GBs + sz := int(volume.RoundUpToGiB(capacity)) + glog.V(2).Infof("create volume of size: %d GiB and configuration %+v", sz, p.provisionerConfig) if p.url == "" { glog.Errorf("REST server endpoint is empty") return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") @@ -1077,10 +1076,10 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res // Find out delta size expansionSize := (newSize.Value() - oldSize.Value()) - expansionSizeGB := int(volume.RoundUpSize(expansionSize, 1000*1000*1000)) + expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB)) // Make volume expansion request - volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGB} + volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB} // Expand the volume volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq) @@ -1090,6 +1089,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res } glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) - newVolumeSize := resource.MustParse(fmt.Sprintf("%dG", volumeInfoRes.Size)) + newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size)) return newVolumeSize, nil } diff --git a/pkg/volume/util.go b/pkg/volume/util.go index e2890516980..3f71da1b862 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -38,6 +38,13 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) +const ( + // GB - GigaByte size + GB = 1000 * 1000 * 1000 + // GIB - GibiByte size + GIB = 1024 * 1024 * 1024 +) + type RecycleEventRecorder func(eventtype, message string) // RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume @@ -288,6 +295,18 @@ func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes } +// RoundUpToGB rounds up given quantity to chunks of GB +func RoundUpToGB(size resource.Quantity) int64 { + requestBytes := size.Value() + return RoundUpSize(requestBytes, GB) +} + +// RoundUpToGiB rounds up given quantity upto chunks of GiB +func RoundUpToGiB(size resource.Quantity) int64 { + requestBytes := size.Value() + return RoundUpSize(requestBytes, GIB) +} + // GenerateVolumeName returns a PV name with clusterName prefix. The function // should be used to generate a name of GCE PD or Cinder volume. It basically // adds "-dynamic-" before the PV name, making sure the resulting From 234ce8da41effbc964a5c77658f279f88a639d0e Mon Sep 17 00:00:00 2001 From: George Kudrayvtsev Date: Wed, 29 Nov 2017 14:05:39 -0800 Subject: [PATCH 160/794] Makes modes OS-specific (+ fixes tests). --- pkg/proxy/apis/kubeproxyconfig/types.go | 12 +++++++----- .../apis/kubeproxyconfig/validation/validation.go | 14 +++++++++++--- .../kubeproxyconfig/validation/validation_test.go | 13 +++++++++---- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/types.go b/pkg/proxy/apis/kubeproxyconfig/types.go index 13c92a1a392..2a971c120d7 100644 --- a/pkg/proxy/apis/kubeproxyconfig/types.go +++ b/pkg/proxy/apis/kubeproxyconfig/types.go @@ -152,11 +152,13 @@ type KubeProxyConfiguration struct { ConfigSyncPeriod metav1.Duration } -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (newer, faster). If blank, use the best-available proxy (currently iptables, but may -// change in future versions). If the iptables proxy is selected, regardless of how, but -// the system's kernel or iptables versions are insufficient, this always falls back to the -// userspace proxy. +// Currently, four modes of proxying are available total: 'userspace' (older, stable), 'iptables' +// (newer, faster), 'ipvs', and 'kernelspace' (Windows only, newer). +// +// If blank, use the best-available proxy (currently iptables, but may change in +// future versions). If the iptables proxy is selected, regardless of how, but +// the system's kernel or iptables versions are insufficient, this always falls +// back to the userspace proxy. type ProxyMode string const ( diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go index cf576e564d4..4edbe92af55 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -19,6 +19,7 @@ package validation import ( "fmt" "net" + "runtime" "strconv" "strings" @@ -146,11 +147,18 @@ func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) fiel case kubeproxyconfig.ProxyModeUserspace: case kubeproxyconfig.ProxyModeIPTables: case kubeproxyconfig.ProxyModeIPVS: - case kubeproxyconfig.ProxyModeKernelspace: case "": + case kubeproxyconfig.ProxyModeKernelspace: + if runtime.GOOS != "windows" { + errMsg := fmt.Sprintf("%s is only supported on Windows", string(kubeproxyconfig.ProxyModeKernelspace)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + } default: - modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS), string(kubeproxyconfig.ProxyModeKernelspace)} - errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy (currently iptables)", strings.Join(modes, ",")) + modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)} + if runtime.GOOS == "windows" { + modes = append(modes, string(kubeproxyconfig.ProxyModeKernelspace)) + } + errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(modes, ",")) allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) } return allErrs diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go index f316a304462..b3498264fed 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go @@ -18,6 +18,7 @@ package validation import ( "fmt" + "runtime" "strings" "testing" "time" @@ -488,11 +489,15 @@ func TestValidateProxyMode(t *testing.T) { newPath := field.NewPath("KubeProxyConfiguration") successCases := []kubeproxyconfig.ProxyMode{ kubeproxyconfig.ProxyModeUserspace, - kubeproxyconfig.ProxyModeIPTables, - kubeproxyconfig.ProxyModeIPVS, kubeproxyconfig.ProxyMode(""), } + if runtime.GOOS == "windows" { + successCases = append(successCases, kubeproxyconfig.ProxyModeKernelspace) + } else { + successCases = append(successCases, kubeproxyconfig.ProxyModeIPTables, kubeproxyconfig.ProxyModeIPVS) + } + for _, successCase := range successCases { if errs := validateProxyMode(successCase, newPath.Child("ProxyMode")); len(errs) != 0 { t.Errorf("expected success: %v", errs) @@ -505,13 +510,13 @@ func TestValidateProxyMode(t *testing.T) { }{ { mode: kubeproxyconfig.ProxyMode("non-existing"), - msg: "or blank (blank means the best-available proxy (currently iptables)", + msg: "or blank (blank means the best-available proxy [currently iptables])", }, } for _, errorCase := range errorCases { if errs := validateProxyMode(errorCase.mode, newPath.Child("ProxyMode")); len(errs) == 0 { - t.Errorf("expected failure for %s", errorCase.msg) + t.Errorf("expected failure %s for %v", errorCase.msg, errorCase.mode) } else if !strings.Contains(errs[0].Error(), errorCase.msg) { t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg) } From 91ccdaa057eb893e9aa9e3ed76ca9a15258b92b4 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Thu, 30 Nov 2017 09:31:46 +0800 Subject: [PATCH 161/794] kubeadm: Use the v1.9 branch by default --- cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 948956e46d9..bb13179a0ab 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -39,7 +39,7 @@ const ( // DefaultClusterDNSIP defines default DNS IP DefaultClusterDNSIP = "10.96.0.10" // DefaultKubernetesVersion defines default kubernetes version - DefaultKubernetesVersion = "stable-1.8" + DefaultKubernetesVersion = "stable-1.9" // DefaultAPIBindPort defines default API port DefaultAPIBindPort = 6443 // DefaultAuthorizationModes defines default authorization modes From 3ef8ab4d70658aa2c506323defd61dc98f5496dc Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Wed, 29 Nov 2017 13:46:24 +0800 Subject: [PATCH 162/794] Heap is not thread safe in scheduling queue --- plugin/pkg/scheduler/core/scheduling_queue.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugin/pkg/scheduler/core/scheduling_queue.go b/plugin/pkg/scheduler/core/scheduling_queue.go index 78bdf099015..f258fd1ee76 100644 --- a/plugin/pkg/scheduler/core/scheduling_queue.go +++ b/plugin/pkg/scheduler/core/scheduling_queue.go @@ -149,8 +149,6 @@ type UnschedulablePods interface { // scheduling. This is called activeQ and is a Heap. Another queue holds // pods that are already tried and are determined to be unschedulable. The latter // is called unschedulableQ. -// Heap is already thread safe, but we need to acquire another lock here to ensure -// atomicity of operations on the two data structures.. type PriorityQueue struct { lock sync.RWMutex cond sync.Cond @@ -611,7 +609,7 @@ func (h *heapData) Pop() interface{} { return item.obj } -// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// Heap is a producer/consumer queue that implements a heap data structure. // It can be used to implement priority queues and similar data structures. type Heap struct { // data stores objects and has a queue that keeps their ordering according From be40a40ee03c954edb1a653a17b4334574e61686 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 30 Nov 2017 14:10:54 +0800 Subject: [PATCH 163/794] fix typo --- pkg/kubeapiserver/default_storage_factory_builder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubeapiserver/default_storage_factory_builder.go b/pkg/kubeapiserver/default_storage_factory_builder.go index ca1a3d35b33..b315bbde843 100644 --- a/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/pkg/kubeapiserver/default_storage_factory_builder.go @@ -56,7 +56,7 @@ func NewStorageFactory(storageConfig storagebackend.Config, defaultMediaType str return serverstorage.NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig, specialDefaultResourcePrefixes), nil } -// Merges the given defaultResourceConfig with specifc GroupvVersionResource overrides. +// Merges the given defaultResourceConfig with specific GroupVersionResource overrides. func mergeResourceEncodingConfigs(defaultResourceEncoding *serverstorage.DefaultResourceEncodingConfig, resourceEncodingOverrides []schema.GroupVersionResource) *serverstorage.DefaultResourceEncodingConfig { resourceEncodingConfig := defaultResourceEncoding for _, gvr := range resourceEncodingOverrides { @@ -66,7 +66,7 @@ func mergeResourceEncodingConfigs(defaultResourceEncoding *serverstorage.Default return resourceEncodingConfig } -// Merges the given defaultResourceConfig with specifc GroupVersion overrides. +// Merges the given defaultResourceConfig with specific GroupVersion overrides. func mergeGroupEncodingConfigs(defaultResourceEncoding *serverstorage.DefaultResourceEncodingConfig, storageEncodingOverrides map[string]schema.GroupVersion) *serverstorage.DefaultResourceEncodingConfig { resourceEncodingConfig := defaultResourceEncoding for group, storageEncodingVersion := range storageEncodingOverrides { From 0cc6a4d93738a974b0e330aab57cee9807021d69 Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Thu, 30 Nov 2017 14:14:59 +0800 Subject: [PATCH 164/794] new testcase to cgroup_manager_linux.go --- pkg/kubelet/cm/cgroup_manager_linux_test.go | 32 +++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pkg/kubelet/cm/cgroup_manager_linux_test.go b/pkg/kubelet/cm/cgroup_manager_linux_test.go index ed60c4d50e8..36c5a7c30c8 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux_test.go +++ b/pkg/kubelet/cm/cgroup_manager_linux_test.go @@ -99,3 +99,35 @@ func TestLibcontainerAdapterAdaptToSystemdAsCgroupFs(t *testing.T) { } } } + +func TestLibcontainerAdapterNotAdaptToSystemd(t *testing.T) { + cgroupfs := newLibcontainerAdapter(libcontainerCgroupfs) + otherAdatper := newLibcontainerAdapter(libcontainerCgroupManagerType("test")) + + testCases := []struct { + input string + expected string + }{ + { + input: "/", + expected: "/", + }, + { + input: "/Burstable", + expected: "/Burstable", + }, + { + input: "", + expected: "", + }, + } + for _, testCase := range testCases { + if actual := cgroupfs.adaptName(CgroupName(testCase.input), true); actual != testCase.expected { + t.Errorf("Unexpected result, input: %v, expected: %v, actual: %v", testCase.input, testCase.expected, actual) + } + + if actual := otherAdatper.adaptName(CgroupName(testCase.input), true); actual != testCase.expected { + t.Errorf("Unexpected result, input: %v, expected: %v, actual: %v", testCase.input, testCase.expected, actual) + } + } +} From 08f40eac6d008a3967e5dd5be5acd913ec5ffe64 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 29 Nov 2017 05:51:32 +0000 Subject: [PATCH 165/794] return error when create azure share failed remember error info in CreateFileShare fix typo --- pkg/cloudprovider/providers/azure/azure_storage.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_storage.go b/pkg/cloudprovider/providers/azure/azure_storage.go index 81388bc428a..641fb8fd6ad 100644 --- a/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/pkg/cloudprovider/providers/azure/azure_storage.go @@ -42,20 +42,24 @@ func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location str // find the access key with this account key, err := az.getStorageAccesskey(account.Name) if err != nil { - glog.V(2).Infof("no key found for storage account %s", account.Name) + err = fmt.Errorf("could not get storage key for storage account %s: %v", account.Name, err) continue } err = az.createFileShare(account.Name, key, name, requestGB) if err != nil { - glog.V(2).Infof("failed to create share %s in account %s: %v", name, account.Name, err) + err = fmt.Errorf("failed to create share %s in account %s: %v", name, account.Name, err) continue } glog.V(4).Infof("created share %s in account %s", name, account.Name) return account.Name, key, err } } - return "", "", fmt.Errorf("failed to find a matching storage account") + + if err == nil { + err = fmt.Errorf("failed to find a matching storage account") + } + return "", "", err } // DeleteFileShare deletes a file share using storage account name and key From 184eb8316292efb6ee308324dfc8df768a21ff09 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Sun, 26 Nov 2017 14:21:10 +0800 Subject: [PATCH 166/794] remove extra level check of glog --- .../algorithm/priorities/balanced_resource_allocation.go | 4 +--- .../pkg/scheduler/algorithm/priorities/interpod_affinity.go | 4 +--- plugin/pkg/scheduler/algorithm/priorities/least_requested.go | 4 +--- plugin/pkg/scheduler/algorithm/priorities/most_requested.go | 4 +--- .../pkg/scheduler/algorithm/priorities/selector_spreading.go | 4 +--- 5 files changed, 5 insertions(+), 15 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index 718282da11f..791e28ee1a0 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -74,9 +74,7 @@ func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercach score = int((1 - diff) * float64(schedulerapi.MaxPriority)) } if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.V(10).Infof( + glog.Infof( "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index ae168d9c763..9b88257f803 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -229,9 +229,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node } result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.V(10).Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) + glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) } } return result, nil diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go index 74306451638..73d5db676e9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go @@ -73,9 +73,7 @@ func calculateUnusedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, cpuScore := calculateUnusedScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name) memoryScore := calculateUnusedScore(totalResources.Memory, allocatableResources.Memory, node.Name) if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.V(10).Infof( + glog.Infof( "%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go index 4245d4938ba..9d1697db93e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go @@ -76,9 +76,7 @@ func calculateUsedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, no cpuScore := calculateUsedScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name) memoryScore := calculateUsedScore(totalResources.Memory, allocatableResources.Memory, node.Name) if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.V(10).Infof( + glog.Infof( "%v -> %v: Most Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index aa195b0e4f8..7258d52ea7b 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -163,9 +163,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa } result[i].Score = int(fScore) if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.V(10).Infof( + glog.Infof( "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), ) } From 916812ea1d6a66fe862dea538eeef1193b818bb1 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Sat, 25 Nov 2017 11:45:19 +0100 Subject: [PATCH 167/794] code-gen: uniform reusable main.go logic Split main.go into plumbing and calls to NewDefaults, AddFlags and Validate. --- hack/.golint_failures | 1 - .../cmd/client-gen/args/args.go | 63 +++++++++++++-- .../cmd/client-gen/args/gvpackages_test.go | 27 ++++--- .../code-generator/cmd/client-gen/main.go | 36 ++++----- .../cmd/conversion-gen/args/args.go | 81 +++++++++++++++++++ .../conversion-gen/generators/conversion.go | 34 +------- .../code-generator/cmd/conversion-gen/main.go | 38 +++++---- .../cmd/deepcopy-gen/args/args.go | 54 +++++++++++++ .../code-generator/cmd/deepcopy-gen/main.go | 31 ++++--- .../cmd/defaulter-gen/args/args.go | 54 +++++++++++++ .../code-generator/cmd/defaulter-gen/main.go | 33 ++++---- .../cmd/informer-gen/args/args.go | 77 ++++++++++++++++++ .../cmd/informer-gen/generators/customargs.go | 33 -------- .../cmd/informer-gen/generators/packages.go | 6 +- .../code-generator/cmd/informer-gen/main.go | 39 +++++---- .../cmd/lister-gen/args/args.go | 56 +++++++++++++ .../code-generator/cmd/lister-gen/main.go | 23 ++++-- .../cmd/openapi-gen/args/args.go | 53 ++++++++++++ .../code-generator/cmd/openapi-gen/main.go | 22 +++-- .../k8s.io/code-generator/pkg/util/build.go | 52 ++++++++++++ 20 files changed, 629 insertions(+), 184 deletions(-) create mode 100644 staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go create mode 100644 staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go create mode 100644 staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/args.go create mode 100644 staging/src/k8s.io/code-generator/cmd/informer-gen/args/args.go delete mode 100644 staging/src/k8s.io/code-generator/cmd/informer-gen/generators/customargs.go create mode 100644 staging/src/k8s.io/code-generator/cmd/lister-gen/args/args.go create mode 100644 staging/src/k8s.io/code-generator/cmd/openapi-gen/args/args.go create mode 100644 staging/src/k8s.io/code-generator/pkg/util/build.go diff --git a/hack/.golint_failures b/hack/.golint_failures index 6cd1db4fbb1..33a057e99fd 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -727,7 +727,6 @@ staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme staging/src/k8s.io/code-generator/cmd/client-gen/types staging/src/k8s.io/code-generator/cmd/conversion-gen/generators staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf -staging/src/k8s.io/code-generator/cmd/informer-gen/generators staging/src/k8s.io/code-generator/cmd/lister-gen/generators staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go index 5eac82806eb..f17608fadde 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -17,11 +17,23 @@ limitations under the License. package args import ( + "fmt" + "path" + "github.com/spf13/pflag" + "k8s.io/gengo/args" "k8s.io/code-generator/cmd/client-gen/types" + codegenutil "k8s.io/code-generator/pkg/util" ) +var DefaultInputDirs = []string{ + "k8s.io/apimachinery/pkg/fields", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/apimachinery/pkg/apimachinery/registered", +} + // ClientGenArgs is a wrapper for arguments to client-gen. type CustomArgs struct { // A sorted list of group versions to generate. For each of them the package path is found @@ -47,16 +59,53 @@ type CustomArgs struct { FakeClient bool } -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + ClientsetName: "internalclientset", + ClientsetAPIPath: "/apis", + ClientsetOnly: false, + FakeClient: true, + } + genericArgs.CustomArgs = customArgs + genericArgs.InputDirs = DefaultInputDirs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + customArgs.ClientsetOutputPath = path.Join(pkg, "pkg/client/clientset/") + } + + return genericArgs, customArgs +} + +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { gvsBuilder := NewGroupVersionsBuilder(&ca.Groups) pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") - pflag.Var(NewInputBasePathValue(gvsBuilder, "k8s.io/kubernetes/pkg/apis"), "input-base", "base path to look for the api group.") - pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", "internalclientset", "the name of the generated clientset package.") - pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", "/apis", "the value of default API HTTP path, starting with / and without trailing /.") - pflag.StringVar(&ca.ClientsetOutputPath, "clientset-path", "k8s.io/kubernetes/pkg/client/clientset_generated/", "the generated clientset will be output to /.") - pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", false, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") - pflag.BoolVar(&ca.FakeClient, "fake-clientset", true, "when set, client-gen will generate the fake clientset that can be used in tests") + pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.") + pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.") + pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.") + pflag.StringVar(&ca.ClientsetOutputPath, "clientset-path", ca.ClientsetOutputPath, "the generated clientset will be output to /.") + pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") + pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests") +} + +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.ClientsetName) == 0 { + return fmt.Errorf("clientset name cannot be empty") + } + if len(customArgs.ClientsetAPIPath) == 0 { + return fmt.Errorf("clientset API path cannot be empty") + } + if len(customArgs.ClientsetOutputPath) == 0 { + return fmt.Errorf("clientset path cannot be empty") + } + + return nil } // GroupVersionPackages returns a map from GroupVersion to the package with the types.go. diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go index 0df1633edff..8154c2eec12 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/gvpackages_test.go @@ -45,12 +45,12 @@ func TestGVPackageFlag(t *testing.T) { args: []string{"foo/bar/v1", "foo/bar/v2", "foo/bar/", "foo/v1"}, expectedGroups: []types.GroupVersions{ {PackageName: "bar", Group: types.Group("bar"), Versions: []types.PackageVersion{ - {"foo/bar/v1", types.Version("v1")}, - {"foo/bar/v2", types.Version("v2")}, - {"foo/bar", types.Version("")}, + {"v1", "foo/bar/v1"}, + {"v2", "foo/bar/v2"}, + {"", "foo/bar"}, }}, {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ - {"foo/v1", types.Version("v1")}, + {"v1", "foo/v1"}, }}, }, }, @@ -59,12 +59,12 @@ func TestGVPackageFlag(t *testing.T) { def: []string{"foo/bar/v1alpha1", "foo/v1"}, expectedGroups: []types.GroupVersions{ {PackageName: "bar", Group: types.Group("bar"), Versions: []types.PackageVersion{ - {"foo/bar/v1", types.Version("v1")}, - {"foo/bar/v2", types.Version("v2")}, - {"foo/bar", types.Version("")}, + {"v1", "foo/bar/v1"}, + {"v2", "foo/bar/v2"}, + {"", "foo/bar"}, }}, {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ - {"foo/v1", types.Version("v1")}, + {"v1", "foo/v1"}, }}, }, }, @@ -72,8 +72,8 @@ func TestGVPackageFlag(t *testing.T) { args: []string{"api/v1", "api"}, expectedGroups: []types.GroupVersions{ {PackageName: "core", Group: types.Group("api"), Versions: []types.PackageVersion{ - {"core/v1", types.Version("v1")}, - {"core", types.Version("")}, + {"v1", "core/v1"}, + {"", "core"}, }}, }, }, @@ -82,7 +82,7 @@ func TestGVPackageFlag(t *testing.T) { importBasePath: "k8s.io/api", expectedGroups: []types.GroupVersions{ {PackageName: "foo", Group: types.Group("foo"), Versions: []types.PackageVersion{ - {"k8s.io/api/foo/v1", types.Version("v1")}, + {"v1", "k8s.io/api/foo/v1"}, }}, }, }, @@ -90,8 +90,9 @@ func TestGVPackageFlag(t *testing.T) { for i, test := range tests { fs := pflag.NewFlagSet("testGVPackage", pflag.ContinueOnError) groups := []types.GroupVersions{} - importBasePath := test.importBasePath - fs.Var(NewGVPackagesValue(NewGroupVersionsBuilder(&groups, &importBasePath), test.def), "input", "usage") + builder := NewGroupVersionsBuilder(&groups) + fs.Var(NewGVPackagesValue(builder, test.def), "input", "usage") + fs.Var(NewInputBasePathValue(builder, test.importBasePath), "input-base-path", "usage") args := []string{} for _, a := range test.args { diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 392284519c1..40c3069819d 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -23,43 +23,37 @@ import ( "github.com/golang/glog" "github.com/spf13/pflag" - - clientgenargs "k8s.io/code-generator/cmd/client-gen/args" - "k8s.io/code-generator/cmd/client-gen/generators" "k8s.io/gengo/args" + + generatorargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators" ) func main() { - arguments := args.Default().WithoutDefaultFlagParsing() - - // Custom args. - customArgs := &clientgenargs.CustomArgs{} - customArgs.AddFlags(pflag.CommandLine) + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.CustomArgs = customArgs - arguments.InputDirs = []string{ - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/apimachinery/pkg/apimachinery/registered", - } + // TODO: move this out of client-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + customArgs.ClientsetOutputPath = "k8s.io/kubernetes/pkg/client/clientset_generated/" - // Register default flags. We do this manually here because we have to override InputDirs below after additional - // input dirs are parse fromt he command-line. - arguments.AddFlags(pflag.CommandLine) + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } + // add group version package as input dirs for gengo for _, pkg := range customArgs.Groups { for _, v := range pkg.Versions { - arguments.InputDirs = append(arguments.InputDirs, v.Package) + genericArgs.InputDirs = append(genericArgs.InputDirs, v.Package) } } - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go new file mode 100644 index 00000000000..0d85222db2d --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from +// apimachinery. +var DefaultBasePeerDirs = []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/conversion", + "k8s.io/apimachinery/pkg/runtime", +} + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. + BasePeerDirs []string + + // Custom peer dirs which are application specific. Always consider these as + // last-ditch possibilities for conversions. + ExtraPeerDirs []string // + + // Skipunsafe indicates whether to generate unsafe conversions to improve the efficiency + // of these operations. The unsafe operation is a direct pointer assignment via unsafe + // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to + // allow structs that are identical to be assigned to each other. + SkipUnsafe bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + BasePeerDirs: DefaultBasePeerDirs, + SkipUnsafe: false, + } + genericArgs.CustomArgs = customArgs + genericArgs.OutputFileBaseName = "conversion_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BasePeerDirs, "base-peer-dirs", ca.BasePeerDirs, + "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") + pflag.CommandLine.BoolVar(&ca.SkipUnsafe, "skip-unsafe", ca.SkipUnsafe, + "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index b784950552e..aa82f150bf5 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -30,32 +30,10 @@ import ( "k8s.io/gengo/types" "github.com/golang/glog" + + conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" ) -// DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from -// apimachinery. -var DefaultBasePeerDirs = []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/conversion", - "k8s.io/apimachinery/pkg/runtime", -} - -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct { - // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. - BasePeerDirs []string - - // Custom peer dirs which are application specific. Always consider these as - // last-ditch possibilities for conversions. - ExtraPeerDirs []string // - - // Skipunsafe indicates whether to generate unsafe conversions to improve the efficiency - // of these operations. The unsafe operation is a direct pointer assignment via unsafe - // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to - // allow structs that are identical to be assigned to each other. - SkipUnsafe bool -} - // These are the comment tags that carry parameters for conversion generation. const ( // e.g., "+k8s:conversion-gen=" in doc.go, where is the @@ -264,7 +242,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat continue } skipUnsafe := false - if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { + if customArgs, ok := arguments.CustomArgs.(*conversionargs.CustomArgs); ok { peerPkgs = append(peerPkgs, customArgs.BasePeerDirs...) peerPkgs = append(peerPkgs, customArgs.ExtraPeerDirs...) skipUnsafe = customArgs.SkipUnsafe @@ -593,12 +571,6 @@ func argsFromType(inType, outType *types.Type) generator.Args { } } -func defaultingArgsFromType(inType *types.Type) generator.Args { - return generator.Args{ - "inType": inType, - } -} - const nameTmpl = "Convert_$.inType|publicIT$_To_$.outType|publicIT$" func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, bool) { diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index aab38ad1a6b..4cba1c6bdf2 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -35,37 +35,35 @@ limitations under the License. package main import ( + "flag" "path/filepath" - "k8s.io/code-generator/cmd/conversion-gen/generators" - "k8s.io/gengo/args" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/gengo/args" + + generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" + "k8s.io/code-generator/cmd/conversion-gen/generators" ) func main() { - arguments := args.Default() - - // Custom args. - customArgs := &generators.CustomArgs{ - BasePeerDirs: generators.DefaultBasePeerDirs, - SkipUnsafe: false, - } - pflag.CommandLine.StringSliceVar(&customArgs.BasePeerDirs, "base-peer-dirs", customArgs.BasePeerDirs, - "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") - pflag.CommandLine.StringSliceVar(&customArgs.ExtraPeerDirs, "extra-peer-dirs", customArgs.ExtraPeerDirs, - "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") - pflag.CommandLine.BoolVar(&customArgs.SkipUnsafe, "skip-unsafe", customArgs.SkipUnsafe, - "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputFileBaseName = "conversion_generated" - arguments.CustomArgs = customArgs + // TODO: move this out of conversion-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go new file mode 100644 index 00000000000..789713012ad --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "deepcopy_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BoundingDirs, "bounding-dirs", ca.BoundingDirs, + "Comma-separated list of import paths which bound the types for which deep-copies will be generated.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 75cd29b2e73..bb506cf7f9e 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -43,30 +43,35 @@ limitations under the License. package main import ( + "flag" "path/filepath" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/deepcopy-gen/generators" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" + + generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" ) func main() { - arguments := args.Default() - - // Custom args. - customArgs := &generators.CustomArgs{} - pflag.CommandLine.StringSliceVar(&customArgs.BoundingDirs, "bounding-dirs", customArgs.BoundingDirs, - "Comma-separated list of import paths which bound the types for which deep-copies will be generated.") + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputFileBaseName = "deepcopy_generated" - arguments.CustomArgs = customArgs + // TODO: move this out of deepcopy-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/args.go new file mode 100644 index 00000000000..3c5a042c7ca --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "zz_generated.defaults" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index 119ca119011..89d2b39bc6a 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -42,32 +42,35 @@ limitations under the License. package main import ( + "flag" "path/filepath" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/defaulter-gen/generators" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" + + generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" ) func main() { - arguments := args.Default() - - // Custom args. - customArgs := &generators.CustomArgs{ - ExtraPeerDirs: []string{}, - } - pflag.CommandLine.StringSliceVar(&customArgs.ExtraPeerDirs, "extra-peer-dirs", customArgs.ExtraPeerDirs, - "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputFileBaseName = "zz_generated.defaults" - arguments.CustomArgs = customArgs + // TODO: move this out of defaulter-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/args/args.go new file mode 100644 index 00000000000..ba7f7209175 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/args/args.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + VersionedClientSetPackage string + InternalClientSetPackage string + ListersPackage string + SingleDirectory bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + SingleDirectory: false, + } + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/informers") + customArgs.VersionedClientSetPackage = path.Join(pkg, "pkg/client/clientset/versioned") + customArgs.InternalClientSetPackage = path.Join(pkg, "pkg/client/clientset/internalversion") + customArgs.ListersPackage = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use") + fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use") + fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use") + fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.VersionedClientSetPackage) == 0 { + return fmt.Errorf("versioned clientset package cannot be empty") + } + if len(customArgs.ListersPackage) == 0 { + return fmt.Errorf("listers package cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/customargs.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/customargs.go deleted file mode 100644 index 168309a920c..00000000000 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/customargs.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import "github.com/spf13/pflag" - -type CustomArgs struct { - VersionedClientSetPackage string - InternalClientSetPackage string - ListersPackage string - SingleDirectory bool -} - -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use") - fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use") - fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use") - fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") -} diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index 6e0d5c00225..2a6ef2eeb56 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -22,6 +22,7 @@ import ( "path/filepath" "strings" + "github.com/golang/glog" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -29,8 +30,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - - "github.com/golang/glog" + informergenargs "k8s.io/code-generator/cmd/informer-gen/args" ) // NameSystems returns the name system used by the generators in this package. @@ -113,7 +113,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat boilerplate = append(boilerplate, []byte(generatedBy())...) - customArgs, ok := arguments.CustomArgs.(*CustomArgs) + customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) if !ok { glog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) } diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go index 78c0b3829cb..fc6feeb30bc 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go @@ -17,34 +17,39 @@ limitations under the License. package main import ( + "flag" "path/filepath" - "k8s.io/code-generator/cmd/informer-gen/generators" - "k8s.io/gengo/args" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/informer-gen/generators" + "k8s.io/gengo/args" + + generatorargs "k8s.io/code-generator/cmd/informer-gen/args" ) func main() { - arguments := args.Default() - - // Custom arguments. - customArgs := &generators.CustomArgs{ - VersionedClientSetPackage: "k8s.io/kubernetes/pkg/client/clientset_generated/clientset", - InternalClientSetPackage: "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - ListersPackage: "k8s.io/kubernetes/pkg/client/listers", - SingleDirectory: false, - } - customArgs.AddFlags(pflag.CommandLine) + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" - arguments.CustomArgs = customArgs + // TODO: move out of informer-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" + customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + customArgs.ListersPackage = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/args/args.go new file mode 100644 index 00000000000..34914ea8c9b --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/args/args.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct{} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go index 3b305f82877..a89817f4957 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go @@ -17,23 +17,36 @@ limitations under the License. package main import ( + "flag" "path/filepath" + "github.com/golang/glog" + "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/gengo/args" - "github.com/golang/glog" + generatorargs "k8s.io/code-generator/cmd/lister-gen/args" ) func main() { - arguments := args.Default() + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" + // TODO: move this out of lister-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/args.go new file mode 100644 index 00000000000..f9bb17e1a56 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/args.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct{} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + genericArgs.OutputFileBaseName = "openapi_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + + return nil +} diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go index 67355f83494..e3c6f6c6cd8 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -20,23 +20,35 @@ limitations under the License. package main import ( + "flag" "path/filepath" + "github.com/golang/glog" + "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/kube-openapi/pkg/generators" - "github.com/golang/glog" + generatorargs "k8s.io/code-generator/cmd/openapi-gen/args" ) func main() { - arguments := args.Default() + genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - arguments.OutputFileBaseName = "openapi_generated" + // TODO: move this out of openapi-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } // Run it. - if err := arguments.Execute( + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), generators.Packages, diff --git a/staging/src/k8s.io/code-generator/pkg/util/build.go b/staging/src/k8s.io/code-generator/pkg/util/build.go new file mode 100644 index 00000000000..9d3e8a8e151 --- /dev/null +++ b/staging/src/k8s.io/code-generator/pkg/util/build.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + gobuild "go/build" + "path/filepath" + "strings" +) + +// CurrentPackage returns the go package of the current directory, or "" if it cannot +// be derived from the GOPATH. +func CurrentPackage() string { + for _, root := range gobuild.Default.SrcDirs() { + if pkg, ok := hasSubdir(root, "."); ok { + return pkg + } + } + return "" +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + // ensure a tailing separator to properly compare on word-boundaries + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + // check whether root dir starts with root + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + // cut off root + return filepath.ToSlash(dir[len(root):]), true +} From 94d2a67263b388bc59500ef20a21201dc49f029a Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 27 Nov 2017 11:09:07 +0100 Subject: [PATCH 168/794] client-gen: use --output-package instead of --clientset-path --- hack/update-codegen.sh | 2 +- .../hack/update-codegen.sh | 2 +- .../cmd/client-gen/args/args.go | 21 ++++++++++++------- .../client-gen/generators/client_generator.go | 2 +- .../generators/fake/fake_client_generator.go | 10 ++++----- .../code-generator/cmd/client-gen/main.go | 10 ++++----- .../k8s.io/code-generator/generate-groups.sh | 2 +- .../generate-internal-groups.sh | 4 ++-- .../kube-aggregator/hack/update-codegen.sh | 2 +- .../src/k8s.io/metrics/hack/update-codegen.sh | 2 +- 10 files changed, 31 insertions(+), 26 deletions(-) diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 01135e2451a..9c29807b438 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -72,7 +72,7 @@ INTERNAL_DIRS_CSV=$(IFS=',';echo "${INTERNAL_DIRS[*]// /,}";IFS=$) # This can be called with one flag, --verify-only, so it works for both the # update- and verify- scripts. ${clientgen} --input-base="k8s.io/kubernetes/pkg/apis" --input="${INTERNAL_DIRS_CSV}" "$@" -${clientgen} --output-base "${KUBE_ROOT}/vendor" --clientset-path="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" "$@" +${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" "$@" listergen_internal_apis=( $( diff --git a/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh b/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh index efc52ecd886..8b1e582bb46 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh @@ -50,7 +50,7 @@ apiextensions/ apiextensions/v1beta1 ) INPUT="--input ${INPUT_APIS[@]}" -CLIENTSET_PATH="--clientset-path k8s.io/apiextensions-apiserver/pkg/client/clientset" +CLIENTSET_PATH="--output-package k8s.io/apiextensions-apiserver/pkg/client/clientset" ${CLIENTGEN} ${INPUT_BASE} ${INPUT} ${CLIENTSET_PATH} --output-base ${SCRIPT_BASE} ${CLIENTGEN} --clientset-name="clientset" ${INPUT_BASE} --input apiextensions/v1beta1 ${CLIENTSET_PATH} --output-base ${SCRIPT_BASE} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go index f17608fadde..6d4bc1739b1 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -46,9 +46,6 @@ type CustomArgs struct { // ClientsetName is the name of the clientset to be generated. It's // populated from command-line arguments. ClientsetName string - // ClientsetOutputPath is the path the clientset will be generated at. It's - // populated from command-line arguments. - ClientsetOutputPath string // ClientsetAPIPath is the default API HTTP path for generated clients. ClientsetAPIPath string // ClientsetOnly determines if we should generate the clients for groups and @@ -71,7 +68,7 @@ func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { genericArgs.InputDirs = DefaultInputDirs if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { - customArgs.ClientsetOutputPath = path.Join(pkg, "pkg/client/clientset/") + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/clientset") } return genericArgs, customArgs @@ -84,9 +81,11 @@ func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.") pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.") pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.") - pflag.StringVar(&ca.ClientsetOutputPath, "clientset-path", ca.ClientsetOutputPath, "the generated clientset will be output to /.") pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests") + + // support old flags + fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-package", fs.GetNormalizeFunc())) } func Validate(genericArgs *args.GeneratorArgs) error { @@ -101,9 +100,6 @@ func Validate(genericArgs *args.GeneratorArgs) error { if len(customArgs.ClientsetAPIPath) == 0 { return fmt.Errorf("clientset API path cannot be empty") } - if len(customArgs.ClientsetOutputPath) == 0 { - return fmt.Errorf("clientset path cannot be empty") - } return nil } @@ -118,3 +114,12 @@ func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string { } return res } + +func mapFlagName(from, to string, old func(fs *pflag.FlagSet, name string) pflag.NormalizedName) func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + return func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + if name == from { + name = to + } + return old(fs, name) + } +} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 3042267e9dc..74af0b31d47 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -371,7 +371,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } var packageList []generator.Package - clientsetPackage := filepath.Join(customArgs.ClientsetOutputPath, customArgs.ClientsetName) + clientsetPackage := filepath.Join(arguments.OutputPackagePath, customArgs.ClientsetName) packageList = append(packageList, packageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) packageList = append(packageList, packageForScheme(customArgs, clientsetPackage, arguments.OutputBase, groupGoNames, boilerplate)) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index 277a3ce1045..ec439c2f7ae 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -84,12 +84,12 @@ func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli } } -func PackageForClientset(customArgs *clientgenargs.CustomArgs, fakeClientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { +func PackageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { return &generator.DefaultPackage{ // TODO: we'll generate fake clientset for different release in the future. // Package name and path are hard coded for now. PackageName: "fake", - PackagePath: filepath.Join(fakeClientsetPackage, "fake"), + PackagePath: filepath.Join(clientsetPackage, "fake"), HeaderText: boilerplate, PackageDocumentation: []byte( `// This package has the automatically generated fake clientset. @@ -107,17 +107,17 @@ func PackageForClientset(customArgs *clientgenargs.CustomArgs, fakeClientsetPack }, groups: customArgs.Groups, groupGoNames: groupGoNames, - fakeClientsetPackage: fakeClientsetPackage, + fakeClientsetPackage: clientsetPackage, outputPackage: "fake", imports: generator.NewImportTracker(), - realClientsetPackage: filepath.Join(customArgs.ClientsetOutputPath, customArgs.ClientsetName), + realClientsetPackage: clientsetPackage, }, &scheme.GenScheme{ DefaultGen: generator.DefaultGen{ OptionalName: "register", }, InputPackages: customArgs.GroupVersionPackages(), - OutputPackage: fakeClientsetPackage, + OutputPackage: clientsetPackage, Groups: customArgs.Groups, GroupGoNames: groupGoNames, ImportTracker: generator.NewImportTracker(), diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 40c3069819d..5869d83a1d5 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -35,17 +35,13 @@ func main() { // Override defaults. // TODO: move this out of client-gen genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt") - customArgs.ClientsetOutputPath = "k8s.io/kubernetes/pkg/client/clientset_generated/" + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) - } - // add group version package as input dirs for gengo for _, pkg := range customArgs.Groups { for _, v := range pkg.Versions { @@ -53,6 +49,10 @@ func main() { } } + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } + if err := genericArgs.Execute( generators.NameSystems(), generators.DefaultNameSystem(), diff --git a/staging/src/k8s.io/code-generator/generate-groups.sh b/staging/src/k8s.io/code-generator/generate-groups.sh index 145b12400b4..b92296d96d7 100755 --- a/staging/src/k8s.io/code-generator/generate-groups.sh +++ b/staging/src/k8s.io/code-generator/generate-groups.sh @@ -67,7 +67,7 @@ fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/clientset" - ${GOPATH}/bin/client-gen --clientset-name versioned --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --clientset-path ${OUTPUT_PKG}/clientset "$@" + ${GOPATH}/bin/client-gen --clientset-name versioned --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/clientset "$@" fi if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then diff --git a/staging/src/k8s.io/code-generator/generate-internal-groups.sh b/staging/src/k8s.io/code-generator/generate-internal-groups.sh index b995dd4d56a..0de606d39af 100755 --- a/staging/src/k8s.io/code-generator/generate-internal-groups.sh +++ b/staging/src/k8s.io/code-generator/generate-internal-groups.sh @@ -87,9 +87,9 @@ fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/clientset" if [ -n "${INT_APIS_PKG}" ]; then - ${GOPATH}/bin/client-gen --clientset-name internalversion --input-base "" --input $(codegen::join , $(printf '%s/ ' "${INT_FQ_APIS[@]}")) --clientset-path ${OUTPUT_PKG}/clientset "$@" + ${GOPATH}/bin/client-gen --clientset-name internalversion --input-base "" --input $(codegen::join , $(printf '%s/ ' "${INT_FQ_APIS[@]}")) --output-package ${OUTPUT_PKG}/clientset "$@" fi - ${GOPATH}/bin/client-gen --clientset-name versioned --input-base "" --input $(codegen::join , "${EXT_FQ_APIS[@]}") --clientset-path ${OUTPUT_PKG}/clientset "$@" + ${GOPATH}/bin/client-gen --clientset-name versioned --input-base "" --input $(codegen::join , "${EXT_FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/clientset "$@" fi if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then diff --git a/staging/src/k8s.io/kube-aggregator/hack/update-codegen.sh b/staging/src/k8s.io/kube-aggregator/hack/update-codegen.sh index 53e9889caeb..527be571b4e 100755 --- a/staging/src/k8s.io/kube-aggregator/hack/update-codegen.sh +++ b/staging/src/k8s.io/kube-aggregator/hack/update-codegen.sh @@ -51,7 +51,7 @@ apiregistration/ apiregistration/v1beta1 ) INPUT="--input ${INPUT_APIS[@]}" -CLIENTSET_PATH="--clientset-path k8s.io/kube-aggregator/pkg/client/clientset_generated" +CLIENTSET_PATH="--output-package k8s.io/kube-aggregator/pkg/client/clientset_generated" ${CLIENTGEN} ${INPUT_BASE} ${INPUT} ${CLIENTSET_PATH} --output-base ${SCRIPT_BASE} ${CLIENTGEN} --clientset-name="clientset" ${INPUT_BASE} --input apiregistration/v1beta1 ${CLIENTSET_PATH} --output-base ${SCRIPT_BASE} diff --git a/staging/src/k8s.io/metrics/hack/update-codegen.sh b/staging/src/k8s.io/metrics/hack/update-codegen.sh index 52ce78f437a..429a7152c44 100755 --- a/staging/src/k8s.io/metrics/hack/update-codegen.sh +++ b/staging/src/k8s.io/metrics/hack/update-codegen.sh @@ -34,7 +34,7 @@ go build -o "${CLIENTGEN}" ${CODEGEN_PKG}/cmd/client-gen PREFIX=k8s.io/metrics/pkg/apis INPUT_BASE="--input-base ${PREFIX}" -CLIENTSET_PATH="--clientset-path k8s.io/metrics/pkg/client/clientset_generated" +CLIENTSET_PATH="--output-package k8s.io/metrics/pkg/client/clientset_generated" ${CLIENTGEN} --clientset-name="clientset" ${INPUT_BASE} --input metrics/v1alpha1 --input metrics/v1beta1 ${CLIENTSET_PATH} --output-base ${SCRIPT_BASE} From d49bbb92282160e59fa778bc3deed07796a40c81 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 30 Nov 2017 10:16:49 +0100 Subject: [PATCH 169/794] conversion-gen: add godocs for peer dirs --- .../code-generator/cmd/conversion-gen/args/args.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go index 0d85222db2d..3b7b0123de9 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -33,12 +33,14 @@ var DefaultBasePeerDirs = []string{ // CustomArgs is used by the gengo framework to pass args specific to this generator. type CustomArgs struct { - // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. + // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. Peer dirs + // are declared to make the generator pick up manually written conversion funcs from external + // packages. BasePeerDirs []string - // Custom peer dirs which are application specific. Always consider these as - // last-ditch possibilities for conversions. - ExtraPeerDirs []string // + // Custom peer dirs which are application specific. Peer dirs are declared to make the + // generator pick up manually written conversion funcs from external packages. + ExtraPeerDirs []string // Skipunsafe indicates whether to generate unsafe conversions to improve the efficiency // of these operations. The unsafe operation is a direct pointer assignment via unsafe From a88a54bc6c5dadec6aec51388b6193af121b89b3 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 27 Nov 2017 11:38:14 +0100 Subject: [PATCH 170/794] Update bazel --- staging/BUILD | 1 + .../code-generator/cmd/client-gen/args/BUILD | 2 ++ .../code-generator/cmd/conversion-gen/BUILD | 2 ++ .../cmd/conversion-gen/args/BUILD | 26 ++++++++++++++++++ .../cmd/conversion-gen/generators/BUILD | 1 + .../code-generator/cmd/deepcopy-gen/BUILD | 6 ++++- .../cmd/deepcopy-gen/args/BUILD | 27 +++++++++++++++++++ .../code-generator/cmd/defaulter-gen/BUILD | 6 ++++- .../cmd/defaulter-gen/args/BUILD | 27 +++++++++++++++++++ .../code-generator/cmd/informer-gen/BUILD | 2 ++ .../cmd/informer-gen/args/BUILD | 27 +++++++++++++++++++ .../cmd/informer-gen/generators/BUILD | 3 +-- .../code-generator/cmd/lister-gen/BUILD | 3 +++ .../code-generator/cmd/lister-gen/args/BUILD | 27 +++++++++++++++++++ .../code-generator/cmd/openapi-gen/BUILD | 7 ++++- .../code-generator/cmd/openapi-gen/args/BUILD | 26 ++++++++++++++++++ .../src/k8s.io/code-generator/pkg/util/BUILD | 22 +++++++++++++++ 17 files changed, 210 insertions(+), 5 deletions(-) create mode 100644 staging/src/k8s.io/code-generator/cmd/conversion-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/cmd/informer-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/cmd/lister-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/cmd/openapi-gen/args/BUILD create mode 100644 staging/src/k8s.io/code-generator/pkg/util/BUILD diff --git a/staging/BUILD b/staging/BUILD index fe653506ce4..b7f2b8fbd08 100644 --- a/staging/BUILD +++ b/staging/BUILD @@ -210,6 +210,7 @@ filegroup( "//staging/src/k8s.io/code-generator/cmd/lister-gen:all-srcs", "//staging/src/k8s.io/code-generator/cmd/openapi-gen:all-srcs", "//staging/src/k8s.io/code-generator/cmd/set-gen:all-srcs", + "//staging/src/k8s.io/code-generator/pkg/util:all-srcs", "//staging/src/k8s.io/code-generator/third_party/forked/golang/reflect:all-srcs", "//staging/src/k8s.io/kube-aggregator:all-srcs", "//staging/src/k8s.io/metrics/pkg/apis/custom_metrics:all-srcs", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD index 03a93e447d4..fc7ed844947 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD @@ -17,6 +17,8 @@ go_library( deps = [ "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD index eea6e91524e..a244fae00f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/conversion-gen/generators:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], @@ -35,6 +36,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/conversion-gen/args:all-srcs", "//staging/src/k8s.io/code-generator/cmd/conversion-gen/generators:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/BUILD new file mode 100644 index 00000000000..e06b0b1bce1 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/args/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/conversion-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD index 49290d578c3..fd9feff4027 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD @@ -11,6 +11,7 @@ go_library( importpath = "k8s.io/code-generator/cmd/conversion-gen/generators", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD index d02e19af509..e399aab102c 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/deepcopy-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:go_default_library", ], @@ -33,6 +34,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args:all-srcs", + ], tags = ["automanaged"], ) diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/BUILD new file mode 100644 index 00000000000..cf86fb77658 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/deepcopy-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD index 947327e961d..8306dd8992a 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/defaulter-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:go_default_library", ], @@ -33,6 +34,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/defaulter-gen/args:all-srcs", + ], tags = ["automanaged"], ) diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/BUILD new file mode 100644 index 00000000000..6f9bb87c54b --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/args/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/defaulter-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD index 81eeb5c6110..cd80dab10fe 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/informer-gen/generators:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], @@ -35,6 +36,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/informer-gen/args:all-srcs", "//staging/src/k8s.io/code-generator/cmd/informer-gen/generators:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/args/BUILD new file mode 100644 index 00000000000..e2233810152 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/args/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/informer-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD index 5b1de7e77a7..9c220b55aa9 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "customargs.go", "factory.go", "factoryinterface.go", "generic.go", @@ -22,9 +21,9 @@ go_library( importpath = "k8s.io/code-generator/cmd/informer-gen/generators", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", + "//vendor/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD index 186c410117b..5d44c37176b 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD @@ -18,6 +18,8 @@ go_library( importpath = "k8s.io/code-generator/cmd/lister-gen", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/lister-gen/args:go_default_library", "//vendor/k8s.io/code-generator/cmd/lister-gen/generators:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", ], @@ -34,6 +36,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/lister-gen/args:all-srcs", "//staging/src/k8s.io/code-generator/cmd/lister-gen/generators:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/args/BUILD new file mode 100644 index 00000000000..53c1eefdea6 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/args/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/lister-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/pkg/util:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD index bf4a9faef11..533b7cb5d01 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD @@ -18,6 +18,8 @@ go_library( importpath = "k8s.io/code-generator/cmd/openapi-gen", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/code-generator/cmd/openapi-gen/args:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/generators:go_default_library", ], @@ -32,6 +34,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/code-generator/cmd/openapi-gen/args:all-srcs", + ], tags = ["automanaged"], ) diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/BUILD new file mode 100644 index 00000000000..e1eb70ea719 --- /dev/null +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/args/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["args.go"], + importpath = "k8s.io/code-generator/cmd/openapi-gen/args", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/gengo/args:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/code-generator/pkg/util/BUILD b/staging/src/k8s.io/code-generator/pkg/util/BUILD new file mode 100644 index 00000000000..4b6ea978cb2 --- /dev/null +++ b/staging/src/k8s.io/code-generator/pkg/util/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["build.go"], + importpath = "k8s.io/code-generator/pkg/util", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) From d23c7e1f0283d1090def726365107b62b4c7e0c6 Mon Sep 17 00:00:00 2001 From: Krzysztof Jastrzebski Date: Thu, 30 Nov 2017 10:59:11 +0100 Subject: [PATCH 171/794] Change Auto-Repair e2e test tags. --- test/e2e/lifecycle/node_auto_repairs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/lifecycle/node_auto_repairs.go b/test/e2e/lifecycle/node_auto_repairs.go index 162c3544461..989b7b18823 100644 --- a/test/e2e/lifecycle/node_auto_repairs.go +++ b/test/e2e/lifecycle/node_auto_repairs.go @@ -37,7 +37,7 @@ const ( repairTimeout = 20 * time.Minute ) -var _ = SIGDescribe("Node Auto Repairs [Slow] [Disruptive]", func() { +var _ = SIGDescribe("Node Auto Repairs [Serial][Disruptive]", func() { f := framework.NewDefaultFramework("lifecycle") var c clientset.Interface var originalNodes map[string]string From 85ac2dc9fa38f3b1df7456df23223b7271ce9014 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Thu, 30 Nov 2017 16:50:40 +0200 Subject: [PATCH 172/794] Remove do-restart states --- .../reactive/kubernetes_master.py | 29 ++++--------------- 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 1547bc8bc93..0692e1891d5 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -414,7 +414,7 @@ def start_master(etcd): configure_apiserver(etcd) configure_controller_manager() configure_scheduler() - + set_state('kubernetes-master.components.started') hookenv.open_port(6443) @@ -554,7 +554,7 @@ def kick_api_server(tls): if data_changed('cert', tls.get_server_cert()): # certificate changed, so restart the api server hookenv.log("Certificate information changed, restarting api server") - set_state('kube-apiserver.do-restart') + restart_apiserver() tls_client.reset_certificate_write_flag('server') @@ -837,42 +837,25 @@ def shutdown(): service_stop('snap.kube-scheduler.daemon') -@when('kube-apiserver.do-restart') def restart_apiserver(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-apiserver') host.service_restart('snap.kube-apiserver.daemon') hookenv.status_set(prev_state, prev_msg) - remove_state('kube-apiserver.do-restart') - set_state('kube-apiserver.started') -@when('kube-controller-manager.do-restart') def restart_controller_manager(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-controller-manager') host.service_restart('snap.kube-controller-manager.daemon') hookenv.status_set(prev_state, prev_msg) - remove_state('kube-controller-manager.do-restart') - set_state('kube-controller-manager.started') -@when('kube-scheduler.do-restart') def restart_scheduler(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-scheduler') host.service_restart('snap.kube-scheduler.daemon') hookenv.status_set(prev_state, prev_msg) - remove_state('kube-scheduler.do-restart') - set_state('kube-scheduler.started') - - -@when_all('kube-apiserver.started', - 'kube-controller-manager.started', - 'kube-scheduler.started') -@when_not('kubernetes-master.components.started') -def componenets_started(): - set_state('kubernetes-master.components.started') def arch(): @@ -1088,8 +1071,7 @@ def configure_apiserver(etcd): api_opts['admission-control'] = ','.join(admission_control) configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') - - set_state('kube-apiserver.do-restart') + restart_apiserver() def configure_controller_manager(): @@ -1111,8 +1093,7 @@ def configure_controller_manager(): configure_kubernetes_service('kube-controller-manager', controller_opts, 'controller-manager-extra-args') - - set_state('kube-controller-manager.do-restart') + restart_controller_manager() def configure_scheduler(): @@ -1125,7 +1106,7 @@ def configure_scheduler(): configure_kubernetes_service('kube-scheduler', scheduler_opts, 'scheduler-extra-args') - set_state('kube-scheduler.do-restart') + restart_scheduler() def setup_basic_auth(password=None, username='admin', uid='admin', From 94b45a9e847a002bb6f6e43a11dcf83782e103df Mon Sep 17 00:00:00 2001 From: Abrar Shivani Date: Mon, 20 Nov 2017 21:38:20 -0800 Subject: [PATCH 173/794] Fix session out issue while creating volume and error message coming up while attaching the volume --- .../providers/vsphere/nodemanager.go | 23 +++++++++++++++++-- .../providers/vsphere/vsphere.go | 14 +++++++---- .../providers/vsphere/vsphere_util.go | 11 ++++++--- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 493ea61045e..da849989421 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -265,14 +265,33 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) return *nodeInfo, nil } -func (nm *NodeManager) GetNodeDetails() []NodeDetails { +func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { nm.nodeInfoLock.RLock() defer nm.nodeInfoLock.RUnlock() var nodeDetails []NodeDetails + vsphereSessionRefreshMap := make(map[string]bool) + + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for nodeName, nodeInfo := range nm.nodeInfoMap { nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm}) + if vsphereSessionRefreshMap[nodeInfo.vcServer] { + continue + } + vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] + if vsphereInstance == nil { + err := fmt.Errorf("vSphereInstance for vc server %q not found while looking for vm %q", nodeInfo.vcServer, nodeInfo.vm) + return nil, err + } + err := vsphereInstance.conn.Connect(ctx) + if err != nil { + return nil, err + } + vsphereSessionRefreshMap[nodeInfo.vcServer] = true } - return nodeDetails + return nodeDetails, nil } func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) { diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 77f80e23549..0139015fc39 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -639,7 +639,8 @@ func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) { instanceID, err := instanceIDInternal() if err != nil { - isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + var isManagedObjectNotFoundError bool + isManagedObjectNotFoundError, err = vs.retry(nodeName, err) if isManagedObjectNotFoundError { if err == nil { glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) @@ -729,14 +730,17 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN requestTime := time.Now() diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) if err != nil { - isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + var isManagedObjectNotFoundError bool + isManagedObjectNotFoundError, err = vs.retry(nodeName, err) if isManagedObjectNotFoundError { if err == nil { glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) + glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err) } } } + glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err) vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err) return diskUUID, err } @@ -792,7 +796,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error requestTime := time.Now() err := detachDiskInternal(volPath, nodeName) if err != nil { - isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + var isManagedObjectNotFoundError bool + isManagedObjectNotFoundError, err = vs.retry(nodeName, err) if isManagedObjectNotFoundError { if err == nil { err = detachDiskInternal(volPath, nodeName) @@ -847,7 +852,8 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b requestTime := time.Now() isAttached, err := diskIsAttachedInternal(volPath, nodeName) if err != nil { - isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + var isManagedObjectNotFoundError bool + isManagedObjectNotFoundError, err = vs.retry(nodeName, err) if isManagedObjectNotFoundError { if err == vclib.ErrNoVMFound { isAttached, err = false, nil diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index efedb062139..1fa88ef7430 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -187,8 +187,13 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod // Get all datastores accessible for the virtual machine object. func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { - nodeVmDetails := nodeManager.GetNodeDetails() - if nodeVmDetails == nil || len(nodeVmDetails) == 0 { + nodeVmDetails, err := nodeManager.GetNodeDetails() + if err != nil { + glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) + return nil, err + } + + if len(nodeVmDetails) == 0 { msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails) glog.Error(msg) return nil, fmt.Errorf(msg) @@ -210,7 +215,7 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, } } glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) - sharedDatastores, err := getDatastoresForEndpointVC(ctx, dc, sharedDatastores) + sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores) if err != nil { glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) return nil, err From a720dd4e23a108aff185d559ff6db1b38be2965d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 30 Nov 2017 10:38:59 -0500 Subject: [PATCH 174/794] Add hyperkube to make quick-release Building hyperkube also used to be very slow, but that is no longer true per Jeff. So let's switch it on by default for quick-release. --- build/lib/release.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/build/lib/release.sh b/build/lib/release.sh index afacffbdce9..870451601f6 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -28,11 +28,7 @@ readonly RELEASE_STAGE="${LOCAL_OUTPUT_ROOT}/release-stage" readonly RELEASE_TARS="${LOCAL_OUTPUT_ROOT}/release-tars" readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images" -KUBE_BUILD_HYPERKUBE=${KUBE_BUILD_HYPERKUBE:-n} -if [[ -n "${KUBE_DOCKER_IMAGE_TAG-}" && -n "${KUBE_DOCKER_REGISTRY-}" ]]; then - # retain legacy behavior of automatically building hyperkube during releases - KUBE_BUILD_HYPERKUBE=y -fi +KUBE_BUILD_HYPERKUBE=${KUBE_BUILD_HYPERKUBE:-y} # Validate a ci version # From ac336a6eb23c6da2eaeb43bdb5113621994c60c3 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 30 Nov 2017 17:09:52 -0500 Subject: [PATCH 175/794] Add rbac policies for NetworkPolicy --- .../authorizer/rbac/bootstrappolicy/policy.go | 16 ++++++-- .../testdata/cluster-roles.yaml | 37 +++++++++++++++++++ 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 02c896128aa..99710269c40 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -48,6 +48,7 @@ const ( storageGroup = "storage.k8s.io" resMetricsGroup = "metrics.k8s.io" customMetricsGroup = "custom.metrics.k8s.io" + networkingGroup = "networking.k8s.io" ) func addDefaultMetadata(obj runtime.Object) { @@ -231,10 +232,13 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", - "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + "replicasets", "replicasets/scale", "replicationcontrollers/scale", + "networkpolicies").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), + // additional admin powers rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(), @@ -267,9 +271,12 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", - "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + "replicasets", "replicasets/scale", "replicationcontrollers/scale", + "networkpolicies").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + + rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), }, }, { @@ -295,9 +302,12 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", - "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale", + "networkpolicies").RuleOrDie(), rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + + rbac.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), }, }, { diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 4db6a8a1130..26b7607aac8 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -181,6 +181,7 @@ items: - deployments/rollback - deployments/scale - ingresses + - networkpolicies - replicasets - replicasets/scale - replicationcontrollers/scale @@ -206,6 +207,19 @@ items: - patch - update - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - authorization.k8s.io resources: @@ -359,6 +373,7 @@ items: - deployments/rollback - deployments/scale - ingresses + - networkpolicies - replicasets - replicasets/scale - replicationcontrollers/scale @@ -384,6 +399,19 @@ items: - patch - update - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -471,6 +499,7 @@ items: - deployments - deployments/scale - ingresses + - networkpolicies - replicasets - replicasets/scale - replicationcontrollers/scale @@ -486,6 +515,14 @@ items: - get - list - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: From d92fcd2f53a183245a1ab937283daa9497ddbdd5 Mon Sep 17 00:00:00 2001 From: supereagle Date: Fri, 1 Dec 2017 09:01:49 +0800 Subject: [PATCH 176/794] use rbac client with explicit version --- .../instrumentation/monitoring/custom_metrics_stackdriver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index cdf2c127fec..bad18e1bd33 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -104,8 +104,8 @@ func testAdapter(f *framework.Framework, kubeClient clientset.Interface, customM } defer CleanupAdapter() - _, err = kubeClient.Rbac().ClusterRoleBindings().Create(HPAPermissions) - defer kubeClient.Rbac().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{}) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) + defer kubeClient.RbacV1().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{}) // Run application that exports the metric err = createSDExporterPods(f, kubeClient) From 81918177d9ebde034097c32a66a0280d965210f8 Mon Sep 17 00:00:00 2001 From: wenjgao Date: Fri, 1 Dec 2017 10:02:58 +0800 Subject: [PATCH 177/794] fix error typo of rbd volume teardown --- pkg/volume/rbd/disk_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index 87607370448..9610ebc2b7a 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -115,9 +115,9 @@ func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter m } notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if err != nil && !os.IsNotExist(err) { + if mntErr != nil && !os.IsNotExist(mntErr) { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err + return mntErr } if notMnt { if err := os.Remove(volPath); err != nil { From 1f2262e6b0f5bab90fecba4bde5ee8cc6f65d0ae Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 1 Dec 2017 11:24:04 +0800 Subject: [PATCH 178/794] Move some kubelet constants to a common place. --- .../apis/kubeletconfig/validation/validation.go | 10 +++++----- pkg/kubelet/cm/container_manager.go | 7 ------- pkg/kubelet/cm/node_container_manager.go | 7 ++++--- pkg/kubelet/eviction/helpers.go | 4 ++-- pkg/kubelet/eviction/helpers_test.go | 4 ++-- pkg/kubelet/types/constants.go | 5 +++++ test/e2e_node/eviction_test.go | 4 ++-- 7 files changed, 20 insertions(+), 21 deletions(-) diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation.go b/pkg/kubelet/apis/kubeletconfig/validation/validation.go index f0b243081bb..d3f95a0b88c 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation.go @@ -22,7 +22,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" utilvalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" - containermanager "k8s.io/kubernetes/pkg/kubelet/cm" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) // ValidateKubeletConfiguration validates `kc` and returns an error if it is invalid @@ -91,13 +91,13 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error } for _, val := range kc.EnforceNodeAllocatable { switch val { - case containermanager.NodeAllocatableEnforcementKey: - case containermanager.SystemReservedEnforcementKey: - case containermanager.KubeReservedEnforcementKey: + case kubetypes.NodeAllocatableEnforcementKey: + case kubetypes.SystemReservedEnforcementKey: + case kubetypes.KubeReservedEnforcementKey: continue default: allErrors = append(allErrors, fmt.Errorf("Invalid option %q specified for EnforceNodeAllocatable (--enforce-node-allocatable) setting. Valid options are %q, %q or %q", - val, containermanager.NodeAllocatableEnforcementKey, containermanager.SystemReservedEnforcementKey, containermanager.KubeReservedEnforcementKey)) + val, kubetypes.NodeAllocatableEnforcementKey, kubetypes.SystemReservedEnforcementKey, kubetypes.KubeReservedEnforcementKey)) } } return utilerrors.NewAggregate(allErrors) diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index dfdcf8d82c8..626b251d6d7 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -122,13 +122,6 @@ type Status struct { SoftRequirements error } -const ( - // Uer visible keys for managing node allocatable enforcement on the node. - NodeAllocatableEnforcementKey = "pods" - SystemReservedEnforcementKey = "system-reserved" - KubeReservedEnforcementKey = "kube-reserved" -) - // containerManager for the kubelet is currently an injected dependency. // We need to parse the --qos-reserve-requests option in // cmd/kubelet/app/server.go and there isn't really a good place to put diff --git a/pkg/kubelet/cm/node_container_manager.go b/pkg/kubelet/cm/node_container_manager.go index 66e0d82467e..04e5acdd1a5 100644 --- a/pkg/kubelet/cm/node_container_manager.go +++ b/pkg/kubelet/cm/node_container_manager.go @@ -32,6 +32,7 @@ import ( kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) const ( @@ -62,7 +63,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { // default cpu shares on cgroups are low and can cause cpu starvation. nodeAllocatable := cm.capacity // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. - if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(NodeAllocatableEnforcementKey) { + if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) { nodeAllocatable = cm.getNodeAllocatableAbsolute() } @@ -101,7 +102,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { }() } // Now apply kube reserved and system reserved limits if required. - if nc.EnforceNodeAllocatable.Has(SystemReservedEnforcementKey) { + if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) @@ -110,7 +111,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) } - if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) { + if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index f8be310b34c..8be0c49a28e 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -29,9 +29,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - "k8s.io/kubernetes/pkg/kubelet/cm" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/pkg/kubelet/server/stats" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" schedulerutils "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) @@ -198,7 +198,7 @@ func parseThresholdStatement(signal evictionapi.Signal, val string) (evictionapi // getAllocatableThreshold returns the thresholds applicable for the allocatable configuration func getAllocatableThreshold(allocatableConfig []string) []evictionapi.Threshold { for _, key := range allocatableConfig { - if key == cm.NodeAllocatableEnforcementKey { + if key == kubetypes.NodeAllocatableEnforcementKey { return []evictionapi.Threshold{ { Signal: evictionapi.SignalAllocatableMemoryAvailable, diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 7af5c795ba5..5a794112051 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -30,8 +30,8 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - "k8s.io/kubernetes/pkg/kubelet/cm" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/quota" ) @@ -61,7 +61,7 @@ func TestParseThresholdConfig(t *testing.T) { expectThresholds: []evictionapi.Threshold{}, }, "all memory eviction values": { - allocatableConfig: []string{cm.NodeAllocatableEnforcementKey}, + allocatableConfig: []string{kubetypes.NodeAllocatableEnforcementKey}, evictionHard: map[string]string{"memory.available": "150Mi"}, evictionSoft: map[string]string{"memory.available": "300Mi"}, evictionSoftGracePeriod: map[string]string{"memory.available": "30s"}, diff --git a/pkg/kubelet/types/constants.go b/pkg/kubelet/types/constants.go index 65f17c4a7a6..b76b70a7578 100644 --- a/pkg/kubelet/types/constants.go +++ b/pkg/kubelet/types/constants.go @@ -24,4 +24,9 @@ const ( DockerContainerRuntime = "docker" RktContainerRuntime = "rkt" RemoteContainerRuntime = "remote" + + // User visible keys for managing node allocatable enforcement on the node. + NodeAllocatableEnforcementKey = "pods" + SystemReservedEnforcementKey = "system-reserved" + KubeReservedEnforcementKey = "kube-reserved" ) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 0d2cf113bac..064d02d7f91 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -28,8 +28,8 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - "k8s.io/kubernetes/pkg/kubelet/cm" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru initialConfig.KubeReserved = map[string]string{ string(v1.ResourceMemory): kubeReserved.String(), } - initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey} + initialConfig.EnforceNodeAllocatable = []string{kubetypes.NodeAllocatableEnforcementKey} initialConfig.CgroupsPerQOS = true }) runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{ From 8048823d0e1afed753d09d5ca3b4aabd97554280 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 1 Dec 2017 11:24:41 +0800 Subject: [PATCH 179/794] Auto generated BUILD files. --- pkg/kubelet/apis/kubeletconfig/validation/BUILD | 2 +- pkg/kubelet/cm/BUILD | 1 + pkg/kubelet/eviction/BUILD | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/apis/kubeletconfig/validation/BUILD b/pkg/kubelet/apis/kubeletconfig/validation/BUILD index 7383b8a0c22..cbbd5b9f70b 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/BUILD +++ b/pkg/kubelet/apis/kubeletconfig/validation/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", - "//pkg/kubelet/cm:go_default_library", + "//pkg/kubelet/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", ], diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 127192c637d..039c5941d22 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -57,6 +57,7 @@ go_library( "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/qos:go_default_library", + "//pkg/kubelet/types:go_default_library", "//pkg/util/file:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index 0f4d6f82099..44f167752f1 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -18,7 +18,6 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", - "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/types:go_default_library", From 2522b3601b9992c8c94bcf71a999a1db5d77c0d5 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Fri, 1 Dec 2017 14:31:12 +0800 Subject: [PATCH 180/794] Fix a typo in kubectl/diff cmd long description. --- pkg/kubectl/cmd/diff.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/diff.go b/pkg/kubectl/cmd/diff.go index f70303328be..e536069952d 100644 --- a/pkg/kubectl/cmd/diff.go +++ b/pkg/kubectl/cmd/diff.go @@ -44,7 +44,7 @@ var ( Diff configurations specified by filename or stdin between their local, last-applied, live and/or "merged" versions. - LOCAL and LIVE versions are diffed by default. Other availble keywords + LOCAL and LIVE versions are diffed by default. Other available keywords are MERGED and LAST. Output is always YAML. From f0d894543bf2ffd892a253dd37950e2fa41b7475 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 1 Dec 2017 15:15:31 +0800 Subject: [PATCH 181/794] remove unnecessary condition judgement --- pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 471fdc31a61..e6b2710fb33 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -1287,7 +1287,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. return nil } - if loadbalancer != nil && loadbalancer.VipPortID != "" { + if loadbalancer.VipPortID != "" { portID := loadbalancer.VipPortID floatingIP, err := getFloatingIPByPortID(lbaas.network, portID) if err != nil && err != ErrNotFound { From 2afba1e40b08c77238a20b517eb6361987c1400c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 1 Dec 2017 07:45:56 +0000 Subject: [PATCH 182/794] remove time waiting after create storage account --- .../providers/azure/azure_blobDiskController.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index d502808b46c..a6eda963ef7 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -542,14 +542,6 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto c.addAccountState(storageAccountName, newAccountState) } - if !bExist { - // SA Accounts takes time to be provisioned - // so if this account was just created allow it sometime - // before polling - glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status", storageAccountName) - time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned - } - // finally, make sure that we default container is created // before handing it back over return c.ensureDefaultContainer(storageAccountName) From dc9d9cac438365587d681bf731ccb28b56727f6b Mon Sep 17 00:00:00 2001 From: Shiyang Wang Date: Fri, 1 Dec 2017 15:25:54 +0800 Subject: [PATCH 183/794] remove deadcode --- pkg/kubectl/BUILD | 1 - pkg/kubectl/versioned_client.go | 39 --------------------------------- 2 files changed, 40 deletions(-) delete mode 100644 pkg/kubectl/versioned_client.go diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index fd1a9876c1a..ed937ba9a23 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -116,7 +116,6 @@ go_library( "service_basic.go", "serviceaccount.go", "sorting_printer.go", - "versioned_client.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl", deps = [ diff --git a/pkg/kubectl/versioned_client.go b/pkg/kubectl/versioned_client.go deleted file mode 100644 index f381aed69d4..00000000000 --- a/pkg/kubectl/versioned_client.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" -) - -// TODO: get rid of this and plumb the caller correctly -func versionedExtensionsClientV1beta1(internalClient internalclientset.Interface) clientextensionsv1beta1.ExtensionsV1beta1Interface { - if internalClient == nil { - return &clientextensionsv1beta1.ExtensionsV1beta1Client{} - } - return clientextensionsv1beta1.New(internalClient.Extensions().RESTClient()) -} - -// TODO: get rid of this and plumb the caller correctly -func versionedAppsClientV1beta1(internalClient internalclientset.Interface) clientappsv1beta1.AppsV1beta1Interface { - if internalClient == nil { - return &clientappsv1beta1.AppsV1beta1Client{} - } - return clientappsv1beta1.New(internalClient.Apps().RESTClient()) -} From af882afd3e04f8d7a00fc74246c54f2daf0e68e8 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 1 Dec 2017 14:41:48 +0800 Subject: [PATCH 184/794] should not ignore return messages from wait function --- .../openstack/openstack_loadbalancer.go | 86 +++++++++++++++---- 1 file changed, 69 insertions(+), 17 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 471fdc31a61..c90cb9b2a90 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -660,7 +660,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv glog.V(2).Infof("LoadBalancer %s already exists", name) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod) if lbmethod == "" { @@ -685,7 +688,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv // Unknown error, retry later return nil, fmt.Errorf("error creating LB listener: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) @@ -711,7 +717,11 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv // Unknown error, retry later return nil, fmt.Errorf("error creating pool for listener %s: %v", listener.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } + } glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) @@ -742,7 +752,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } else { // After all members have been processed, remaining members are deleted as obsolete. members = popMember(members, addr, int(port.NodePort)) @@ -758,7 +771,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } monitorID := pool.MonitorID @@ -774,7 +790,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv if err != nil { return nil, fmt.Errorf("error creating LB pool healthmonitor: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } monitorID = monitor.ID } else if lbaas.opts.CreateMonitor == false { glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) @@ -802,7 +821,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // get and delete pool members members, err := getMembersByPoolID(lbaas.lb, pool.ID) @@ -816,7 +838,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } } glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) @@ -825,14 +850,20 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // delete listener err = listeners.Delete(lbaas.lb, listener.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleteting obsolete listener: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) } @@ -1168,7 +1199,10 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service if err != nil { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // Remove any old members for this port @@ -1181,7 +1215,10 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } } @@ -1342,7 +1379,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // delete all members and pools @@ -1353,7 +1393,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // delete pool @@ -1361,7 +1404,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // delete all listeners @@ -1370,7 +1416,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) + } } // delete loadbalancer @@ -1378,7 +1427,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. if err != nil && !isNotFound(err) { return err } - waitLoadbalancerDeleted(lbaas.lb, loadbalancer.ID) + err = waitLoadbalancerDeleted(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("failed to delete loadbalancer: %v", err) + } // Delete the Security Group if lbaas.opts.ManageSecurityGroups { From fddbff25f6a32f0414f9178ccaa8ef4244634ea0 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 1 Dec 2017 12:25:50 +0100 Subject: [PATCH 185/794] kube-apiserver: fix runtime-config flag docs --- pkg/kubeapiserver/options/api_enablement.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/kubeapiserver/options/api_enablement.go b/pkg/kubeapiserver/options/api_enablement.go index fe5accd4f1a..a5d7f625de0 100644 --- a/pkg/kubeapiserver/options/api_enablement.go +++ b/pkg/kubeapiserver/options/api_enablement.go @@ -38,7 +38,8 @@ func NewAPIEnablementOptions() *APIEnablementOptions { func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { fs.Var(&s.RuntimeConfig, "runtime-config", ""+ "A set of key=value pairs that describe runtime configuration that may be passed "+ - "to apiserver. apis/ key can be used to turn on/off specific api versions. "+ - "apis// can be used to turn on/off specific resources. api/all and "+ + "to apiserver. / (or for the core group) key can be used to "+ + "turn on/off specific api versions. // (or / "+ + "for the core group) can be used to turn on/off specific resources. api/all and "+ "api/legacy are special keys to control all and legacy api versions respectively.") } From f760e00af74bd7776bfe49f6f54343a78f3b70eb Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 1 Dec 2017 15:14:58 +0100 Subject: [PATCH 186/794] Add job controller test verifying if backoff is reseted on success --- pkg/controller/job/job_controller_test.go | 148 ++++++++++++++++------ 1 file changed, 106 insertions(+), 42 deletions(-) diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index 4a9ddac1f88..21a2b8f8d25 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -102,24 +102,43 @@ func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod con return jm, sharedInformers } +func newPod(name string, job *batch.Job) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: job.Spec.Selector.MatchLabels, + Namespace: job.Namespace, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)}, + }, + } +} + // create count pods with the given phase for the given job func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod { pods := []v1.Pod{} for i := int32(0); i < count; i++ { - newPod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("pod-%v", rand.String(10)), - Labels: job.Spec.Selector.MatchLabels, - Namespace: job.Namespace, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)}, - }, - Status: v1.PodStatus{Phase: status}, - } - pods = append(pods, newPod) + newPod := newPod(fmt.Sprintf("pod-%v", rand.String(10)), job) + newPod.Status = v1.PodStatus{Phase: status} + pods = append(pods, *newPod) } return pods } +func setPodsStatuses(podIndexer cache.Indexer, job *batch.Job, pendingPods, activePods, succeededPods, failedPods int32) { + for _, pod := range newPodList(pendingPods, v1.PodPending, job) { + podIndexer.Add(&pod) + } + for _, pod := range newPodList(activePods, v1.PodRunning, job) { + podIndexer.Add(&pod) + } + for _, pod := range newPodList(succeededPods, v1.PodSucceeded, job) { + podIndexer.Add(&pod) + } + for _, pod := range newPodList(failedPods, v1.PodFailed, job) { + podIndexer.Add(&pod) + } +} + func TestControllerSyncJob(t *testing.T) { jobConditionComplete := batch.JobComplete jobConditionFailed := batch.JobFailed @@ -273,18 +292,7 @@ func TestControllerSyncJob(t *testing.T) { } sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() - for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) { - podIndexer.Add(&pod) - } - for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) { - podIndexer.Add(&pod) - } - for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) { - podIndexer.Add(&pod) - } - for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) { - podIndexer.Add(&pod) - } + setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods) // run forget, err := manager.syncJob(getKey(job, t)) @@ -424,15 +432,7 @@ func TestSyncJobPastDeadline(t *testing.T) { job.Status.StartTime = &start sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() - for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) { - podIndexer.Add(&pod) - } - for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) { - podIndexer.Add(&pod) - } - for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) { - podIndexer.Add(&pod) - } + setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods) // run forget, err := manager.syncJob(getKey(job, t)) @@ -680,17 +680,6 @@ func TestJobPodLookup(t *testing.T) { } } -func newPod(name string, job *batch.Job) *v1.Pod { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: job.Spec.Selector.MatchLabels, - Namespace: job.Namespace, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)}, - }, - } -} - func TestGetPodsForJob(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) @@ -1269,3 +1258,78 @@ func bumpResourceVersion(obj metav1.Object) { ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32) obj.SetResourceVersion(strconv.FormatInt(ver+1, 10)) } + +type pods struct { + pending int32 + active int32 + succeed int32 + failed int32 +} + +func TestJobBackoffReset(t *testing.T) { + testCases := map[string]struct { + // job setup + parallelism int32 + completions int32 + backoffLimit int32 + + // pod setup - each row is additive! + pods []pods + }{ + "parallelism=1": { + 1, 2, 1, + []pods{ + {0, 1, 0, 1}, + {0, 0, 1, 0}, + }, + }, + "parallelism=2 (just failure)": { + 2, 2, 1, + []pods{ + {0, 2, 0, 1}, + {0, 0, 1, 0}, + }, + }, + } + + for name, tc := range testCases { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) + DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing + manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + manager.jobStoreSynced = alwaysReady + var actual *batch.Job + manager.updateHandler = func(job *batch.Job) error { + actual = job + return nil + } + + // job & pods setup + job := newJob(tc.parallelism, tc.completions, tc.backoffLimit) + key := getKey(job, t) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) + podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() + + setPodsStatuses(podIndexer, job, tc.pods[0].pending, tc.pods[0].active, tc.pods[0].succeed, tc.pods[0].failed) + manager.queue.Add(key) + manager.processNextWorkItem() + retries := manager.queue.NumRequeues(key) + if retries != 1 { + t.Errorf("%s: expected exactly 1 retry, got %d", name, retries) + } + + job = actual + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Replace([]interface{}{actual}, actual.ResourceVersion) + setPodsStatuses(podIndexer, job, tc.pods[1].pending, tc.pods[1].active, tc.pods[1].succeed, tc.pods[1].failed) + manager.processNextWorkItem() + retries = manager.queue.NumRequeues(key) + if retries != 0 { + t.Errorf("%s: expected exactly 0 retries, got %d", name, retries) + } + if getCondition(actual, batch.JobFailed, "BackoffLimitExceeded") { + t.Errorf("%s: unexpected job failure", name) + } + } +} From 0f591aeabc2572d245b101676f1a211ac2ac6433 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Fri, 1 Dec 2017 16:28:32 +0200 Subject: [PATCH 187/794] Fix flake8 error --- .../juju/layers/kubernetes-master/reactive/kubernetes_master.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index a56737e16d7..942171f7f3e 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -114,7 +114,6 @@ def check_for_upgrade_needed(): set_upgrade_needed(forced=True) - def snap_resources_changed(): ''' Check if the snapped resources have changed. The first time this method is From 0a5a1f1cb45f847f43b4ba66097949562fe31207 Mon Sep 17 00:00:00 2001 From: Bruno Miguel Custodio Date: Fri, 1 Dec 2017 17:00:05 +0000 Subject: [PATCH 188/794] fix wording in kube-scheduler warning --- plugin/cmd/kube-scheduler/app/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 93982f898e3..0f6f274c15e 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -156,7 +156,7 @@ func NewOptions() (*Options, error) { func (o *Options) Complete() error { if len(o.ConfigFile) == 0 { - glog.Warning("WARNING: all flags than --config are deprecated. Please begin using a config file ASAP.") + glog.Warning("WARNING: all flags other than --config are deprecated. Please begin using a config file ASAP.") o.applyDeprecatedHealthzAddressToConfig() o.applyDeprecatedHealthzPortToConfig() o.applyDeprecatedAlgorithmSourceOptionsToConfig() From 38f4f9b303f1718bac312192c137d9e2cb3c8748 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 1 Dec 2017 11:47:49 -0800 Subject: [PATCH 189/794] Cleanup for service API validation --- pkg/apis/core/validation/BUILD | 1 + pkg/apis/core/validation/validation.go | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/apis/core/validation/BUILD b/pkg/apis/core/validation/BUILD index e2d08109daa..19169985a54 100644 --- a/pkg/apis/core/validation/BUILD +++ b/pkg/apis/core/validation/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/capabilities:go_default_library", "//pkg/features:go_default_library", "//pkg/fieldpath:go_default_library", + "//pkg/master/ports:go_default_library", "//pkg/security/apparmor:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..a98bad9e29a 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -51,6 +51,7 @@ import ( "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/fieldpath" + "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/security/apparmor" ) @@ -3390,9 +3391,9 @@ func ValidateService(service *core.Service) field.ErrorList { // This is a workaround for broken cloud environments that // over-open firewalls. Hopefully it can go away when more clouds // understand containers better. - if port.Port == 10250 { + if port.Port == ports.KubeletPort { portPath := specPath.Child("ports").Index(ix) - allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) + allErrs = append(allErrs, field.Invalid(portPath, port.Port, fmt.Sprintf("may not expose port %v externally since it is used by kubelet", ports.KubeletPort))) } } if service.Spec.ClusterIP == "None" { @@ -3404,7 +3405,7 @@ func ValidateService(service *core.Service) field.ErrorList { } case core.ServiceTypeExternalName: if service.Spec.ClusterIP != "" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) + allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIP"), "must be empty for ExternalName services")) } if len(service.Spec.ExternalName) > 0 { allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) @@ -3483,7 +3484,7 @@ func ValidateService(service *core.Service) field.ErrorList { for i := range service.Spec.Ports { portPath := portsPath.Index(i) if service.Spec.Ports[i].NodePort != 0 { - allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) + allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'")) } } } @@ -3533,7 +3534,7 @@ func ValidateService(service *core.Service) field.ErrorList { val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] } if service.Spec.Type != core.ServiceTypeLoadBalancer { - allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) + allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'")) } _, err := apiservice.GetLoadBalancerSourceRanges(service) if err != nil { From 88b30392e643428545197a047c334d6f50c075ba Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Fri, 1 Dec 2017 12:26:30 -0800 Subject: [PATCH 190/794] Fix typo in test comment. --- test/integration/scheduler_perf/scheduler_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index 8da4b2212bf..e6073377d75 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -258,7 +258,7 @@ func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testC } // writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration. -//TODO: As of now, this function is not doing anything expect for reading input values to priority structs. +//TODO: As of now, this function is not doing anything except for reading input values to priority structs. func writePodAndNodeTopologyToConfig(config *testConfig) error { // High Level structure that should be filled for every predicate or priority. inputConfig := &schedulerPerfConfig{ From 25050da75837b2aaee8d87bbdcef8498a21f610f Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 29 Nov 2017 19:38:03 -0500 Subject: [PATCH 191/794] Add e2e test for volume resizing --- test/e2e/storage/BUILD | 1 + test/e2e/storage/volume_expand.go | 206 ++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+) create mode 100644 test/e2e/storage/volume_expand.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index c65c4413f9b..ddee6ef0107 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -19,6 +19,7 @@ go_library( "persistent_volumes-vsphere.go", "pv_reclaimpolicy.go", "pvc_label_selector.go", + "volume_expand.go", "volume_io.go", "volume_metrics.go", "volume_provisioning.go", diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go new file mode 100644 index 00000000000..f8ef16d2f14 --- /dev/null +++ b/test/e2e/storage/volume_expand.go @@ -0,0 +1,206 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + resizePollInterval = 2 * time.Second + // total time to wait for cloudprovider or file system resize to finish + totalResizeWaitPeriod = 20 * time.Minute +) + +var _ = SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() { + var ( + c clientset.Interface + ns string + err error + pvc *v1.PersistentVolumeClaim + resizableSc *storage.StorageClass + ) + + f := framework.NewDefaultFramework("volume-expand") + BeforeEach(func() { + framework.SkipUnlessProviderIs("aws", "gce") + c = f.ClientSet + ns = f.Namespace.Name + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + test := storageClassTest{ + name: "default", + claimSize: "2Gi", + } + resizableSc, err = createResizableStorageClass(test, ns, "resizing", c) + Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class") + Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) + + pvc = newClaim(test, ns, "default") + pvc.Spec.StorageClassName = &resizableSc.Name + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + }) + + AfterEach(func() { + framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace)) + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(resizableSc.Name, nil)) + }) + + It("Verify if editing PVC allows resize", func() { + By("Waiting for pvc to be in bound phase") + pvcClaims := []*v1.PersistentVolumeClaim{pvc} + pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + Expect(len(pvs)).To(Equal(1)) + + By("Creating a pod with dynamically provisioned volume") + pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") + Expect(err).NotTo(HaveOccurred(), "While creating pods for resizing") + defer func() { + err = framework.DeletePodWithWait(f, c, pod) + Expect(err).NotTo(HaveOccurred(), "while cleaning up pod already deleted in resize test") + }() + + By("Expanding current pvc") + newSize := resource.MustParse("6Gi") + pvc, err = expandPVCSize(pvc, newSize, c) + Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") + Expect(pvc).NotTo(BeNil()) + + pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvc.Name) + } + + By("Waiting for cloudprovider resize to finish") + err = waitForControllerVolumeResize(pvc, c) + Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish") + + By("Checking for conditions on pvc") + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "While fetching pvc after controller resize") + + inProgressConditions := pvc.Status.Conditions + Expect(len(inProgressConditions)).To(Equal(1), "pvc must have resize condition") + Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimResizing), "pvc must have resizing condition") + + By("Deleting the previously created pod") + err = framework.DeletePodWithWait(f, c, pod) + Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing") + + By("Creating a new pod with same volume") + pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") + Expect(err).NotTo(HaveOccurred(), "while recreating pod for resizing") + defer func() { + err = framework.DeletePodWithWait(f, c, pod2) + Expect(err).NotTo(HaveOccurred(), "while cleaning up pod before exiting resizing test") + }() + + By("Waiting for file system resize to finish") + pvc, err = waitForFSResize(pvc, c) + Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish") + + pvcConditions := pvc.Status.Conditions + Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") + }) +}) + +func createResizableStorageClass(t storageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) { + stKlass := newStorageClass(t, ns, suffix) + allowExpansion := true + stKlass.AllowVolumeExpansion = &allowExpansion + + var err error + stKlass, err = c.StorageV1().StorageClasses().Create(stKlass) + return stKlass, err +} + +func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { + pvcName := origPVC.Name + updatedPVC := origPVC.DeepCopy() + + waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) { + var err error + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(pvcName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching pvc %q for resizing with %v", pvcName, err) + } + + updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(updatedPVC) + if err == nil { + return true, nil + } + framework.Logf("Error updating pvc %s with %v", pvcName, err) + return false, nil + }) + return updatedPVC, waitErr +} + +func waitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) error { + pvName := pvc.Spec.VolumeName + return wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) { + pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] + + pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err) + } + + pvSize := pv.Spec.Capacity[v1.ResourceStorage] + + // If pv size is greater or equal to requested size that means controller resize is finished. + if pvSize.Cmp(pvcSize) >= 0 { + return true, nil + } + return false, nil + }) +} + +func waitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { + var updatedPVC *v1.PersistentVolumeClaim + waitErr := wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) { + var err error + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + + if err != nil { + return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) + } + + pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] + pvcStatusSize := updatedPVC.Status.Capacity[v1.ResourceStorage] + + //If pvc's status field size is greater than or equal to pvc's size then done + if pvcStatusSize.Cmp(pvcSize) >= 0 { + return true, nil + } + return false, nil + }) + return updatedPVC, waitErr +} From ecdb47a40b654a316d99265b9297b21f016aa65a Mon Sep 17 00:00:00 2001 From: Shaomin Chen Date: Fri, 1 Dec 2017 10:31:48 -0800 Subject: [PATCH 192/794] Fix issue #390 --- pkg/cloudprovider/providers/vsphere/nodemanager.go | 9 +++++++-- pkg/cloudprovider/providers/vsphere/vsphere_util.go | 12 +++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 493ea61045e..c068a239afd 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -18,13 +18,14 @@ package vsphere import ( "fmt" + "strings" + "sync" + "github.com/golang/glog" "golang.org/x/net/context" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" - "strings" - "sync" ) // Stores info about the kubernetes node @@ -241,6 +242,10 @@ func (nm *NodeManager) removeNode(node *v1.Node) { nm.registeredNodesLock.Lock() delete(nm.registeredNodes, node.ObjectMeta.Name) nm.registeredNodesLock.Unlock() + + nm.nodeInfoLock.Lock() + delete(nm.nodeInfoMap, node.ObjectMeta.Name) + nm.nodeInfoLock.Unlock() } // GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address. diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index efedb062139..62687d32ea0 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -32,12 +32,13 @@ import ( "fmt" + "path/filepath" + "github.com/vmware/govmomi/vim25/mo" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" - "path/filepath" ) const ( @@ -194,13 +195,18 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, return nil, fmt.Errorf(msg) } var sharedDatastores []*vclib.DatastoreInfo - for index, nodeVmDetail := range nodeVmDetails { + for _, nodeVmDetail := range nodeVmDetails { glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager) if err != nil { + if err == vclib.ErrNoVMFound { + glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) + continue + } return nil, err } - if index == 0 { + + if len(sharedDatastores) == 0 { sharedDatastores = accessibleDatastores } else { sharedDatastores = intersect(sharedDatastores, accessibleDatastores) From aac60b6cbbe330bd6c9ef37bc52ec607fe8f6298 Mon Sep 17 00:00:00 2001 From: wackxu Date: Thu, 30 Nov 2017 16:46:17 +0800 Subject: [PATCH 193/794] delete a node from its cache if it gets node not found error --- plugin/pkg/scheduler/factory/factory.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 764f449ccc4..a52b8d69fc0 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -1109,6 +1109,21 @@ func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod } else { if _, ok := err.(*core.FitError); ok { glog.V(4).Infof("Unable to schedule %v %v: no fit: %v; waiting", pod.Namespace, pod.Name, err) + } else if errors.IsNotFound(err) { + if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { + nodeName := errStatus.Status().Details.Name + // when node is not found, We do not remove the node right away. Trying again to get + // the node and if the node is still not found, then remove it from the scheduler cache. + _, err := factory.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + factory.schedulerCache.RemoveNode(&node) + // invalidate cached predicate for the node + if factory.enableEquivalenceClassCache { + factory.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName) + } + } + } } else { glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err) } From 623d7c42ac2b0c24a609c48fa83905f4f36360b6 Mon Sep 17 00:00:00 2001 From: dhilipkumars Date: Sat, 2 Dec 2017 12:47:31 +0530 Subject: [PATCH 194/794] Move some tests to use go sub-test --- pkg/controller/deployment/progress_test.go | 70 ++++++------ pkg/controller/deployment/recreate_test.go | 8 +- pkg/controller/deployment/sync_test.go | 122 +++++++++++---------- 3 files changed, 104 insertions(+), 96 deletions(-) diff --git a/pkg/controller/deployment/progress_test.go b/pkg/controller/deployment/progress_test.go index 9677728e7d0..978b21469fb 100644 --- a/pkg/controller/deployment/progress_test.go +++ b/pkg/controller/deployment/progress_test.go @@ -163,13 +163,15 @@ func TestRequeueStuckDeployment(t *testing.T) { dc.enqueueDeployment = dc.enqueue for _, test := range tests { - if test.nowFn != nil { - nowFn = test.nowFn - } - got := dc.requeueStuckDeployment(test.d, test.status) - if got != test.expected { - t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected) - } + t.Run(test.name, func(t *testing.T) { + if test.nowFn != nil { + nowFn = test.nowFn + } + got := dc.requeueStuckDeployment(test.d, test.status) + if got != test.expected { + t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected) + } + }) } } @@ -310,32 +312,34 @@ func TestSyncRolloutStatus(t *testing.T) { } for _, test := range tests { - fake := fake.Clientset{} - dc := &DeploymentController{ - client: &fake, - } - - if test.newRS != nil { - test.allRSs = append(test.allRSs, test.newRS) - } - - err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d) - if err != nil { - t.Error(err) - } - - newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType) - switch { - case newCond == nil: - if test.d.Spec.ProgressDeadlineSeconds != nil { - t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType) + t.Run(test.name, func(t *testing.T) { + fake := fake.Clientset{} + dc := &DeploymentController{ + client: &fake, } - case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason: - t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason) - case !test.lastUpdate.IsZero() && test.lastUpdate != testTime: - t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime) - case !test.lastTransition.IsZero() && test.lastTransition != testTime: - t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime) - } + + if test.newRS != nil { + test.allRSs = append(test.allRSs, test.newRS) + } + + err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d) + if err != nil { + t.Error(err) + } + + newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType) + switch { + case newCond == nil: + if test.d.Spec.ProgressDeadlineSeconds != nil { + t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType) + } + case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason: + t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason) + case !test.lastUpdate.IsZero() && test.lastUpdate != testTime: + t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime) + case !test.lastTransition.IsZero() && test.lastTransition != testTime: + t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime) + } + }) } } diff --git a/pkg/controller/deployment/recreate_test.go b/pkg/controller/deployment/recreate_test.go index 2cf8661780a..d557b5633ab 100644 --- a/pkg/controller/deployment/recreate_test.go +++ b/pkg/controller/deployment/recreate_test.go @@ -115,9 +115,11 @@ func TestOldPodsRunning(t *testing.T) { } for _, test := range tests { - if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { - t.Errorf("%s: expected %t, got %t", test.name, expected, got) - } + t.Run(test.name, func(t *testing.T) { + if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { + t.Errorf("%s: expected %t, got %t", test.name, expected, got) + } + }) } } diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go index ce74a3eead6..6f5cc96b344 100644 --- a/pkg/controller/deployment/sync_test.go +++ b/pkg/controller/deployment/sync_test.go @@ -267,72 +267,74 @@ func TestScale(t *testing.T) { } for _, test := range tests { - _ = olderTimestamp - t.Log(test.name) - fake := fake.Clientset{} - dc := &DeploymentController{ - client: &fake, - eventRecorder: &record.FakeRecorder{}, - } - - if test.newRS != nil { - desiredReplicas := *(test.oldDeployment.Spec.Replicas) - if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { - desiredReplicas = desired + t.Run(test.name, func(t *testing.T) { + _ = olderTimestamp + t.Log(test.name) + fake := fake.Clientset{} + dc := &DeploymentController{ + client: &fake, + eventRecorder: &record.FakeRecorder{}, } - deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) - } - for i := range test.oldRSs { - rs := test.oldRSs[i] - if rs == nil { - continue - } - desiredReplicas := *(test.oldDeployment.Spec.Replicas) - if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { - desiredReplicas = desired - } - deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) - } - if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { - t.Errorf("%s: unexpected error: %v", test.name, err) - continue - } + if test.newRS != nil { + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { + desiredReplicas = desired + } + deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) + } + for i := range test.oldRSs { + rs := test.oldRSs[i] + if rs == nil { + continue + } + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { + desiredReplicas = desired + } + deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment)) + } - // Construct the nameToSize map that will hold all the sizes we got our of tests - // Skip updating the map if the replica set wasn't updated since there will be - // no update action for it. - nameToSize := make(map[string]int32) - if test.newRS != nil { - nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas) - } - for i := range test.oldRSs { - rs := test.oldRSs[i] - nameToSize[rs.Name] = *(rs.Spec.Replicas) - } - // Get all the UPDATE actions and update nameToSize with all the updated sizes. - for _, action := range fake.Actions() { - rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) - if !test.wasntUpdated[rs.Name] { + if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + return + } + + // Construct the nameToSize map that will hold all the sizes we got our of tests + // Skip updating the map if the replica set wasn't updated since there will be + // no update action for it. + nameToSize := make(map[string]int32) + if test.newRS != nil { + nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas) + } + for i := range test.oldRSs { + rs := test.oldRSs[i] nameToSize[rs.Name] = *(rs.Spec.Replicas) } - } - - if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] { - t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name]) - continue - } - if len(test.expectedOld) != len(test.oldRSs) { - t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) - continue - } - for n := range test.oldRSs { - rs := test.oldRSs[n] - expected := test.expectedOld[n] - if *(expected.Spec.Replicas) != nameToSize[rs.Name] { - t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name]) + // Get all the UPDATE actions and update nameToSize with all the updated sizes. + for _, action := range fake.Actions() { + rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) + if !test.wasntUpdated[rs.Name] { + nameToSize[rs.Name] = *(rs.Spec.Replicas) + } } - } + + if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] { + t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name]) + return + } + if len(test.expectedOld) != len(test.oldRSs) { + t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs)) + return + } + for n := range test.oldRSs { + rs := test.oldRSs[n] + expected := test.expectedOld[n] + if *(expected.Spec.Replicas) != nameToSize[rs.Name] { + t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name]) + } + } + }) } } From 4b71941082da8b04d330e916be3fa0b33501ccce Mon Sep 17 00:00:00 2001 From: Ri Xu Date: Sat, 2 Dec 2017 21:12:39 +0800 Subject: [PATCH 195/794] Simple code and typo fixed. Signed-off-by: Ri Xu --- pkg/controller/service/service_controller.go | 2 +- pkg/kubectl/explain/formatter.go | 4 ++-- pkg/kubectl/plugins/plugins.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index 38c6b99da2c..3496f0681ca 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -747,7 +747,7 @@ func (s *ServiceController) syncService(key string) error { var cachedService *cachedService var retryDelay time.Duration defer func() { - glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/kubectl/explain/formatter.go b/pkg/kubectl/explain/formatter.go index 5543dbf1bb9..ab9c53e9ed1 100644 --- a/pkg/kubectl/explain/formatter.go +++ b/pkg/kubectl/explain/formatter.go @@ -105,10 +105,10 @@ func wrapString(str string, wrap int) []string { l := line{wrap: wrap} for _, word := range words { - if l.Add(word) == false { + if !l.Add(word) { wrapped = append(wrapped, l.String()) l = line{wrap: wrap} - if l.Add(word) == false { + if !l.Add(word) { panic("Couldn't add to empty line.") } } diff --git a/pkg/kubectl/plugins/plugins.go b/pkg/kubectl/plugins/plugins.go index f2cc17847b3..7eca6a2f35f 100644 --- a/pkg/kubectl/plugins/plugins.go +++ b/pkg/kubectl/plugins/plugins.go @@ -65,7 +65,7 @@ func (p Plugin) Validate() error { if len(p.Name) == 0 || len(p.ShortDesc) == 0 || (len(p.Command) == 0 && len(p.Tree) == 0) { return ErrIncompletePlugin } - if strings.Index(p.Name, " ") > -1 { + if strings.Contains(p.Name, " ") { return ErrInvalidPluginName } for _, flag := range p.Flags { @@ -102,7 +102,7 @@ func (f Flag) Validate() error { if len(f.Name) == 0 || len(f.Desc) == 0 { return ErrIncompleteFlag } - if strings.Index(f.Name, " ") > -1 { + if strings.Contains(f.Name, " ") { return ErrInvalidFlagName } return f.ValidateShorthand() From 7568462ec31bdefbecfd355a8763cbef469a6cf9 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 2 Dec 2017 21:45:07 -0500 Subject: [PATCH 196/794] Remove hacks added for mesos Since Mesos is no longer in your main repository and since we have things like dynamic kubelet configuration in progress, we should drop these undocumented, untested, private hooks. cmd/kubelet/app/server.go::CreateAPIServerClientConfig CreateAPIServerClientConfig::getRuntime pkg/kubelet/kubelet_pods.go::getPhase Also remove stuff from Dependencies struct that were specific to the Mesos integration (ContainerRuntimeOptions and Options) Also remove stale references in test/e2e and and test owners file --- cmd/kubelet/app/server.go | 13 ++++--------- pkg/api/endpoints/util.go | 3 +-- pkg/kubelet/kubelet.go | 22 +--------------------- pkg/kubelet/kubelet_getters.go | 5 ++--- pkg/kubelet/kubelet_network.go | 8 ++++---- pkg/kubelet/kubelet_pods.go | 10 ++++------ pkg/kubelet/kubelet_pods_test.go | 6 +++--- test/e2e/framework/service_util.go | 16 ---------------- test/test_owners.csv | 3 --- 9 files changed, 19 insertions(+), 67 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8100b44afae..2f37c6251c3 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -333,7 +333,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { var heartbeatClient v1core.CoreV1Interface var externalKubeClient clientset.Interface - clientConfig, err := CreateAPIServerClientConfig(s) + clientConfig, err := createAPIServerClientConfig(s) var clientCertificateManager certificate.Manager if err == nil { @@ -617,10 +617,9 @@ func createClientConfig(s *options.KubeletServer) (*restclient.Config, error) { } } -// CreateAPIServerClientConfig generates a client.Config from command line flags +// createAPIServerClientConfig generates a client.Config from command line flags // via createClientConfig and then injects chaos into the configuration via addChaosToClientConfig. -// This func is exported to support integration with third party kubelet extensions (e.g. kubernetes-mesos). -func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) { +func createAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) { clientConfig, err := createClientConfig(s) if err != nil { return nil, err @@ -692,15 +691,11 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. credentialprovider.SetPreferredDockercfgPath(kubeFlags.RootDirectory) glog.V(2).Infof("Using root directory: %v", kubeFlags.RootDirectory) - builder := kubeDeps.Builder - if builder == nil { - builder = CreateAndInitKubelet - } if kubeDeps.OSInterface == nil { kubeDeps.OSInterface = kubecontainer.RealOS{} } - k, err := builder(kubeCfg, + k, err := CreateAndInitKubelet(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, kubeFlags.ContainerRuntime, diff --git a/pkg/api/endpoints/util.go b/pkg/api/endpoints/util.go index 49bdbc47a19..3d7b6e514f6 100644 --- a/pkg/api/endpoints/util.go +++ b/pkg/api/endpoints/util.go @@ -89,8 +89,7 @@ type addressKey struct { // any existing ready state. func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress { // use addressKey to distinguish between two endpoints that are identical addresses - // but may have come from different hosts, for attribution. For instance, Mesos - // assigns pods the node IP, but the pods are distinct. + // but may have come from different hosts, for attribution. key := addressKey{ip: addr.IP} if addr.TargetRef != nil { key.uid = addr.TargetRef.UID diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fab8dcefbb6..b8421581be9 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -225,27 +225,7 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping // these objects while we figure out a more comprehensive dependency injection story for the Kubelet. type Dependencies struct { - // TODO(mtaufen): KubeletBuilder: - // Mesos currently uses this as a hook to let them make their own call to - // let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with - // their own KubeletBootstrap. It's a useful hook. I need to think about what - // a nice home for it would be. There seems to be a trend, between this and - // the Options fields below, of providing hooks where you can add extra functionality - // to the Kubelet for your solution. Maybe we should centralize these sorts of things? - Builder Builder - - // TODO(mtaufen): ContainerRuntimeOptions and Options: - // Arrays of functions that can do arbitrary things to the Kubelet and the Runtime - // seem like a difficult path to trace when it's time to debug something. - // I'm leaving these fields here for now, but there is likely an easier-to-follow - // way to support their intended use cases. E.g. ContainerRuntimeOptions - // is used by Mesos to set an environment variable in containers which has - // some connection to their container GC. It seems that Mesos intends to use - // Options to add additional node conditions that are updated as part of the - // Kubelet lifecycle (see https://github.com/kubernetes/kubernetes/pull/21521). - // We should think about providing more explicit ways of doing these things. - ContainerRuntimeOptions []kubecontainer.Option - Options []Option + Options []Option // Injected Dependencies Auth server.AuthInterface diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index fefd8aac144..1b98f9189fb 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -179,9 +179,8 @@ func (kl *Kubelet) GetHostname() string { return kl.hostname } -// GetRuntime returns the current Runtime implementation in use by the kubelet. This func -// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos). -func (kl *Kubelet) GetRuntime() kubecontainer.Runtime { +// getRuntime returns the current Runtime implementation in use by the kubelet. +func (kl *Kubelet) getRuntime() kubecontainer.Runtime { return kl.containerRuntime } diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index d7e47de5576..73fddb56e50 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -65,7 +65,7 @@ func (nh *networkHost) GetKubeClient() clientset.Interface { } func (nh *networkHost) GetRuntime() kubecontainer.Runtime { - return nh.kubelet.GetRuntime() + return nh.kubelet.getRuntime() } func (nh *networkHost) SupportsLegacyFeatures() bool { @@ -88,7 +88,7 @@ type criNetworkHost struct { // Any network plugin invoked by a cri must implement NamespaceGetter // to talk directly to the runtime instead. func (c *criNetworkHost) GetNetNS(containerID string) (string, error) { - return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) + return c.kubelet.getRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) } // NoOpLegacyHost implements the network.LegacyHost interface for the remote @@ -106,7 +106,7 @@ func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface { return nil } -// GetRuntime always returns "nil" for 'NoOpLegacyHost' +// getRuntime always returns "nil" for 'NoOpLegacyHost' func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime { return nil } @@ -188,7 +188,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) { // kubelet -> generic runtime -> runtime shim -> network plugin // docker/rkt non-cri implementations have a passthrough UpdatePodCIDR - if err := kl.GetRuntime().UpdatePodCIDR(cidr); err != nil { + if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil { glog.Errorf("Failed to update pod CIDR: %v", err) return } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 6241f4ba340..7c94a30ef13 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -814,7 +814,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k if runningPod != nil { p = *runningPod } else if status != nil { - p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status) + p = kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), status) } else { return fmt.Errorf("one of the two arguments must be non-nil: runningPod, status") } @@ -1231,10 +1231,8 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) } -// GetPhase returns the phase of a pod given its container info. -// This func is exported to simplify integration with 3rd party kubelet -// integrations like kubernetes-mesos. -func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { +// getPhase returns the phase of a pod given its container info. +func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { initialized := 0 pendingInitialization := 0 failedInitialization := 0 @@ -1364,7 +1362,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po // Assume info is ready to process spec := &pod.Spec allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) - s.Phase = GetPhase(spec, allStatus) + s.Phase = getPhase(spec, allStatus) kl.probeManager.UpdatePodStatus(pod.UID, s) s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index f313f80e056..66cde220d44 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -1845,7 +1845,7 @@ func TestPodPhaseWithRestartAlways(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } @@ -1945,7 +1945,7 @@ func TestPodPhaseWithRestartNever(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } @@ -2058,7 +2058,7 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) { }, } for _, test := range tests { - status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) + status := getPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses) assert.Equal(t, test.status, status, "[test %s]", test.test) } } diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index cfb3ebdf278..7288dea4272 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -1103,22 +1103,6 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { for _, port := range ss.Ports { for _, addr := range ss.Addresses { containerPort := port.Port - hostPort := port.Port - - // use endpoint annotations to recover the container port in a Mesos setup - // compare contrib/mesos/pkg/service/endpoints_controller.syncService - key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) - mesosContainerPortString := endpoints.Annotations[key] - if mesosContainerPortString != "" { - mesosContainerPort, err := strconv.Atoi(mesosContainerPortString) - if err != nil { - continue - } - containerPort = int32(mesosContainerPort) - Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) - } - - // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } diff --git a/test/test_owners.csv b/test/test_owners.csv index d7c2d109dc0..170579118d5 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -248,9 +248,6 @@ Loadbalancing: L7 GCE should create ingress with given static-ip,eparis,1, Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1,network "Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1,node "MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1,node -Mesos applies slave attributes as labels,justinsb,1,apps -Mesos schedules pods annotated with roles on correct slaves,tallclair,1,apps -Mesos starts static pods on every node in the mesos cluster,lavalamp,1,apps MetricsGrabber should grab all metrics from API server.,gmarek,0,instrumentation MetricsGrabber should grab all metrics from a ControllerManager.,gmarek,0,instrumentation MetricsGrabber should grab all metrics from a Kubelet.,gmarek,0,instrumentation From 15ed07fb44acd1d88ef966723cecc77928aa6d10 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Mon, 4 Dec 2017 16:16:06 +0800 Subject: [PATCH 197/794] rename mustrunas to capabilities The file context has nothing to do with `mustrunas`, so it's quite odd to use `mustrunas` as the file name. I guess it's copied from other places. --- pkg/security/podsecuritypolicy/capabilities/BUILD | 4 ++-- .../capabilities/{mustrunas.go => capabilities.go} | 0 .../capabilities/{mustrunas_test.go => capabilities_test.go} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename pkg/security/podsecuritypolicy/capabilities/{mustrunas.go => capabilities.go} (100%) rename pkg/security/podsecuritypolicy/capabilities/{mustrunas_test.go => capabilities_test.go} (100%) diff --git a/pkg/security/podsecuritypolicy/capabilities/BUILD b/pkg/security/podsecuritypolicy/capabilities/BUILD index 87681f9e788..2d307a808d2 100644 --- a/pkg/security/podsecuritypolicy/capabilities/BUILD +++ b/pkg/security/podsecuritypolicy/capabilities/BUILD @@ -9,8 +9,8 @@ load( go_library( name = "go_default_library", srcs = [ + "capabilities.go", "doc.go", - "mustrunas.go", "types.go", ], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", @@ -24,7 +24,7 @@ go_library( go_test( name = "go_default_test", - srcs = ["mustrunas_test.go"], + srcs = ["capabilities_test.go"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", library = ":go_default_library", deps = [ diff --git a/pkg/security/podsecuritypolicy/capabilities/mustrunas.go b/pkg/security/podsecuritypolicy/capabilities/capabilities.go similarity index 100% rename from pkg/security/podsecuritypolicy/capabilities/mustrunas.go rename to pkg/security/podsecuritypolicy/capabilities/capabilities.go diff --git a/pkg/security/podsecuritypolicy/capabilities/mustrunas_test.go b/pkg/security/podsecuritypolicy/capabilities/capabilities_test.go similarity index 100% rename from pkg/security/podsecuritypolicy/capabilities/mustrunas_test.go rename to pkg/security/podsecuritypolicy/capabilities/capabilities_test.go From 44877d87cbf5c23408d07e567dcf4d6be2387bdb Mon Sep 17 00:00:00 2001 From: Li Yi Date: Mon, 4 Dec 2017 21:16:31 +0800 Subject: [PATCH 198/794] Fix typo Change-Id: Ie8a4e9cf510fe2f7e7445af03476a0e7759a0360 Signed-off-by: Li Yi --- plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 1b7b34d96bd..6f613342cad 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -163,7 +163,7 @@ func defaultPredicates() sets.String { // Fit is determined by node disk pressure condition. factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate), - // Fit is determied by node condtions: not ready, network unavailable and out of disk. + // Fit is determined by node conditions: not ready, network unavailable or out of disk. factory.RegisterMandatoryFitPredicate("CheckNodeCondition", predicates.CheckNodeConditionPredicate), // Fit is determined based on whether a pod can tolerate all of the node's taints From cd4fb16e161b43384a163e549785356e20501eed Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 4 Sep 2017 23:39:49 +0200 Subject: [PATCH 199/794] fluentd-elasticsearch add-on: Improve README --- cluster/addons/fluentd-elasticsearch/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/README.md b/cluster/addons/fluentd-elasticsearch/README.md index 59cc5ddfe99..ed012ac1fef 100644 --- a/cluster/addons/fluentd-elasticsearch/README.md +++ b/cluster/addons/fluentd-elasticsearch/README.md @@ -8,7 +8,7 @@ is a graphical interface for viewing and querying the logs stored in Elasticsearch. **Note:** this addon should **not** be used as-is in production. This is -an example and you should treat is as such. Please see at least the +an example and you should treat it as such. Please see at least the [Security](#security) and the [Storage](#storage) sections for more information. @@ -19,9 +19,9 @@ a Deployment, but allows for maintaining state on storage volumes. ### Security -Elasticsearch has capabilities to enable authorization using +Elasticsearch has capabilities to enable authorization using the [X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled` -in Elasticsearch and Kibana configurations. It can also be set via +in Elasticsearch and Kibana configurations. It can also be set via the `XPACK_SECURITY_ENABLED` env variable. After enabling the feature, follow [official documentation][setupCreds] to set up credentials in Elasticsearch and Kibana. Don't forget to propagate those credentials also to @@ -31,7 +31,7 @@ and [Secrets][secret] to store credentials in the Kubernetes apiserver. ### Initialization -The Elasticsearch Statefulset manifest specifies that there shall be an +The Elasticsearch StatefulSet manifest specifies that there shall be an [init container][initContainer] executing before Elasticsearch containers themselves, in order to ensure that the kernel state variable `vm.max_map_count` is at least 262144, since this is a requirement of @@ -61,7 +61,7 @@ Learn more in the [official Kubernetes documentation][k8sElasticsearchDocs]. Since Fluentd talks to the Elasticsearch service inside the cluster, instances on masters won't work, because masters have no kube-proxy. Don't mark masters -with a label mentioned in the previous paragraph or add a taint on them to +with the label mentioned in the previous paragraph or add a taint on them to avoid Fluentd pods scheduling there. [fluentd]: http://www.fluentd.org/ From 995b840bc5f1a9c685fc16756c1c51a7976f39d3 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Mon, 4 Dec 2017 11:20:15 -0800 Subject: [PATCH 200/794] Set NON_MASQUERADE_CIDR in gce/config-test --- cluster/gce/config-test.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 12a45ddad8d..c8696e857e2 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -352,6 +352,8 @@ OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" # Network Policy plugin specific settings. NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico +NON_MASQUERADE_CIDR="0.0.0.0/0" + # How should the kubelet configure hairpin mode? HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none From b7bdd7ba489d5ee7d6c48bb0c030b3fcabb794db Mon Sep 17 00:00:00 2001 From: liz Date: Mon, 4 Dec 2017 14:59:36 -0500 Subject: [PATCH 201/794] Update systemstat9 to allow compilation on OSX The latest version of system statadds stubbed out methods for non-Linux OSes: https://bitbucket.org/bertimus9/systemstat/pull-requests/2 --- Godeps/Godeps.json | 2 +- .../bitbucket.org/bertimus9/systemstat/BUILD | 2 + .../bertimus9/systemstat/systemstat_ex.go | 49 ++++++++++ .../bertimus9/systemstat/systemstat_linux.go | 81 ----------------- .../bertimus9/systemstat/utils.go | 90 +++++++++++++++++++ 5 files changed, 142 insertions(+), 82 deletions(-) create mode 100644 vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go create mode 100644 vendor/bitbucket.org/bertimus9/systemstat/utils.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d89bc78af29..a7937ff5c70 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -11,7 +11,7 @@ "Deps": [ { "ImportPath": "bitbucket.org/bertimus9/systemstat", - "Rev": "1468fd0db20598383c9393cccaa547de6ad99e5e" + "Rev": "6edb7bbcb021f6510db33e604f7e18861293a14a" }, { "ImportPath": "bitbucket.org/ww/goautoneg", diff --git a/vendor/bitbucket.org/bertimus9/systemstat/BUILD b/vendor/bitbucket.org/bertimus9/systemstat/BUILD index 6b8b3cf6af2..70507f160b1 100644 --- a/vendor/bitbucket.org/bertimus9/systemstat/BUILD +++ b/vendor/bitbucket.org/bertimus9/systemstat/BUILD @@ -4,6 +4,8 @@ go_library( name = "go_default_library", srcs = [ "systemstat.go", + "systemstat_ex.go", + "utils.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "systemstat_linux.go", diff --git a/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go new file mode 100644 index 00000000000..4b9f2fd758c --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go @@ -0,0 +1,49 @@ +// Copyright (c) 2013 Phillip Bond +// Licensed under the MIT License +// see file LICENSE + +// +build !linux + +package systemstat + +import ( + "syscall" + "time" +) + +func getUptime(procfile string) (uptime UptimeSample) { + notImplemented("getUptime") + uptime.Time = time.Now() + return +} + +func getLoadAvgSample(procfile string) (samp LoadAvgSample) { + notImplemented("getLoadAvgSample") + samp.Time = time.Now() + return +} + +func getMemSample(procfile string) (samp MemSample) { + notImplemented("getMemSample") + samp.Time = time.Now() + return +} + +func getProcCPUSample() (s ProcCPUSample) { + var processInfo syscall.Rusage + syscall.Getrusage(syscall.RUSAGE_SELF, &processInfo) + + s.Time = time.Now() + s.ProcMemUsedK = int64(processInfo.Maxrss) + s.User = float64(processInfo.Utime.Usec)/1000000 + float64(processInfo.Utime.Sec) + s.System = float64(processInfo.Stime.Usec)/1000000 + float64(processInfo.Stime.Sec) + s.Total = s.User + s.System + + return +} + +func getCPUSample(procfile string) (samp CPUSample) { + notImplemented("getCPUSample") + samp.Time = time.Now() + return +} diff --git a/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go index 02a475efe86..b4abdd5b425 100644 --- a/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go +++ b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go @@ -11,8 +11,6 @@ import ( "bytes" "io" "io/ioutil" - "log" - "runtime" "strconv" "strings" "syscall" @@ -163,82 +161,3 @@ func getCPUSample(procfile string) (samp CPUSample) { } return } - -func getSimpleCPUAverage(first CPUSample, second CPUSample) (avg SimpleCPUAverage) { - //walltimediff := second.Time.Sub(first.Time) - //dT := float64(first.Total - second.Total) - - dI := float64(second.Idle - first.Idle) - dTot := float64(second.Total - first.Total) - avg.IdlePct = dI / dTot * 100 - avg.BusyPct = (dTot - dI) * 100 / dTot - //log.Printf("cpu idle ticks %f, total ticks %f, idle pct %f, busy pct %f\n", dI, dTot, avg.IdlePct, avg.BusyPct) - return -} - -func subtractAndConvertTicks(first uint64, second uint64) float64 { - return float64(first - second) -} - -func getCPUAverage(first CPUSample, second CPUSample) (avg CPUAverage) { - dTot := float64(second.Total - first.Total) - invQuotient := 100.00 / dTot - - avg.UserPct = subtractAndConvertTicks(second.User, first.User) * invQuotient - avg.NicePct = subtractAndConvertTicks(second.Nice, first.Nice) * invQuotient - avg.SystemPct = subtractAndConvertTicks(second.System, first.System) * invQuotient - avg.IdlePct = subtractAndConvertTicks(second.Idle, first.Idle) * invQuotient - avg.IowaitPct = subtractAndConvertTicks(second.Iowait, first.Iowait) * invQuotient - avg.IrqPct = subtractAndConvertTicks(second.Irq, first.Irq) * invQuotient - avg.SoftIrqPct = subtractAndConvertTicks(second.SoftIrq, first.SoftIrq) * invQuotient - avg.StealPct = subtractAndConvertTicks(second.Steal, first.Steal) * invQuotient - avg.GuestPct = subtractAndConvertTicks(second.Guest, first.Guest) * invQuotient - avg.Time = second.Time - avg.Seconds = second.Time.Sub(first.Time).Seconds() - return -} - -func getProcCPUAverage(first ProcCPUSample, second ProcCPUSample, procUptime float64) (avg ProcCPUAverage) { - dT := second.Time.Sub(first.Time).Seconds() - - avg.UserPct = 100 * (second.User - first.User) / dT - avg.SystemPct = 100 * (second.System - first.System) / dT - avg.TotalPct = 100 * (second.Total - first.Total) / dT - avg.PossiblePct = 100.0 * float64(runtime.NumCPU()) - avg.CumulativeTotalPct = 100 * second.Total / procUptime - avg.Time = second.Time - avg.Seconds = dT - return -} - -func parseCPUFields(fields []string, stat *CPUSample) { - numFields := len(fields) - stat.Name = fields[0] - for i := 1; i < numFields; i++ { - val, numerr := strconv.ParseUint(fields[i], 10, 64) - if numerr != nil { - log.Println("systemstat.parseCPUFields(): Error parsing (field, value): ", i, fields[i]) - } - stat.Total += val - switch i { - case 1: - stat.User = val - case 2: - stat.Nice = val - case 3: - stat.System = val - case 4: - stat.Idle = val - case 5: - stat.Iowait = val - case 6: - stat.Irq = val - case 7: - stat.SoftIrq = val - case 8: - stat.Steal = val - case 9: - stat.Guest = val - } - } -} diff --git a/vendor/bitbucket.org/bertimus9/systemstat/utils.go b/vendor/bitbucket.org/bertimus9/systemstat/utils.go new file mode 100644 index 00000000000..201b97de235 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/utils.go @@ -0,0 +1,90 @@ +package systemstat + +import ( + "log" + "runtime" + "strconv" +) + +func notImplemented(fn string) { + log.Printf("systemstat/%s is not implemented for this OS: %s\n", fn, runtime.GOOS) +} + +func getSimpleCPUAverage(first CPUSample, second CPUSample) (avg SimpleCPUAverage) { + //walltimediff := second.Time.Sub(first.Time) + //dT := float64(first.Total - second.Total) + + dI := float64(second.Idle - first.Idle) + dTot := float64(second.Total - first.Total) + avg.IdlePct = dI / dTot * 100 + avg.BusyPct = (dTot - dI) * 100 / dTot + //log.Printf("cpu idle ticks %f, total ticks %f, idle pct %f, busy pct %f\n", dI, dTot, avg.IdlePct, avg.BusyPct) + return +} + +func subtractAndConvertTicks(first uint64, second uint64) float64 { + return float64(first - second) +} + +func getCPUAverage(first CPUSample, second CPUSample) (avg CPUAverage) { + dTot := float64(second.Total - first.Total) + invQuotient := 100.00 / dTot + + avg.UserPct = subtractAndConvertTicks(second.User, first.User) * invQuotient + avg.NicePct = subtractAndConvertTicks(second.Nice, first.Nice) * invQuotient + avg.SystemPct = subtractAndConvertTicks(second.System, first.System) * invQuotient + avg.IdlePct = subtractAndConvertTicks(second.Idle, first.Idle) * invQuotient + avg.IowaitPct = subtractAndConvertTicks(second.Iowait, first.Iowait) * invQuotient + avg.IrqPct = subtractAndConvertTicks(second.Irq, first.Irq) * invQuotient + avg.SoftIrqPct = subtractAndConvertTicks(second.SoftIrq, first.SoftIrq) * invQuotient + avg.StealPct = subtractAndConvertTicks(second.Steal, first.Steal) * invQuotient + avg.GuestPct = subtractAndConvertTicks(second.Guest, first.Guest) * invQuotient + avg.Time = second.Time + avg.Seconds = second.Time.Sub(first.Time).Seconds() + return +} + +func getProcCPUAverage(first ProcCPUSample, second ProcCPUSample, procUptime float64) (avg ProcCPUAverage) { + dT := second.Time.Sub(first.Time).Seconds() + + avg.UserPct = 100 * (second.User - first.User) / dT + avg.SystemPct = 100 * (second.System - first.System) / dT + avg.TotalPct = 100 * (second.Total - first.Total) / dT + avg.PossiblePct = 100.0 * float64(runtime.NumCPU()) + avg.CumulativeTotalPct = 100 * second.Total / procUptime + avg.Time = second.Time + avg.Seconds = dT + return +} + +func parseCPUFields(fields []string, stat *CPUSample) { + numFields := len(fields) + stat.Name = fields[0] + for i := 1; i < numFields; i++ { + val, numerr := strconv.ParseUint(fields[i], 10, 64) + if numerr != nil { + log.Println("systemstat.parseCPUFields(): Error parsing (field, value): ", i, fields[i]) + } + stat.Total += val + switch i { + case 1: + stat.User = val + case 2: + stat.Nice = val + case 3: + stat.System = val + case 4: + stat.Idle = val + case 5: + stat.Iowait = val + case 6: + stat.Irq = val + case 7: + stat.SoftIrq = val + case 8: + stat.Steal = val + case 9: + stat.Guest = val + } + } +} From 5c8fe23722db05dda25a54d4275737af329227eb Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 4 Dec 2017 13:51:29 -0600 Subject: [PATCH 202/794] Use struct key for TLS cache --- .../src/k8s.io/client-go/transport/cache.go | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/client-go/transport/cache.go b/staging/src/k8s.io/client-go/transport/cache.go index da22cdee590..7c40848c79f 100644 --- a/staging/src/k8s.io/client-go/transport/cache.go +++ b/staging/src/k8s.io/client-go/transport/cache.go @@ -31,12 +31,28 @@ import ( // the config has no custom TLS options, http.DefaultTransport is returned. type tlsTransportCache struct { mu sync.Mutex - transports map[string]*http.Transport + transports map[tlsCacheKey]*http.Transport } const idleConnsPerHost = 25 -var tlsCache = &tlsTransportCache{transports: make(map[string]*http.Transport)} +var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} + +type tlsCacheKey struct { + insecure bool + caData string + certData string + keyData string + serverName string +} + +func (t tlsCacheKey) String() string { + keyText := "" + if len(t.keyData) > 0 { + keyText = "" + } + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s", t.insecure, t.caData, t.certData, keyText, t.serverName) +} func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { key, err := tlsConfigKey(config) @@ -82,11 +98,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { } // tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor -func tlsConfigKey(c *Config) (string, error) { +func tlsConfigKey(c *Config) (tlsCacheKey, error) { // Make sure ca/key/cert content is loaded if err := loadTLSFiles(c); err != nil { - return "", err + return tlsCacheKey{}, err } - // Only include the things that actually affect the tls.Config - return fmt.Sprintf("%v/%x/%x/%x/%v", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData, c.TLS.ServerName), nil + return tlsCacheKey{ + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + serverName: c.TLS.ServerName, + }, nil } From 8d2f9fe1cf1efa2935bf46eada3b648926d7d304 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Mon, 4 Dec 2017 14:25:22 -0800 Subject: [PATCH 203/794] add yaml-quote for GCE_GLBC_IMAGE --- cluster/common.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/common.sh b/cluster/common.sh index ee19ac221d2..785afa9ced9 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -665,6 +665,7 @@ ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTO ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-}) ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log}) GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-}) +GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-}) PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-}) PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-}) ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false}) From f922935f2f98907c53232e4950098c2bc0c2bd8b Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Fri, 1 Dec 2017 14:51:56 -0800 Subject: [PATCH 204/794] Hit ILB endpoint in ILB e2e test --- test/e2e/network/service.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 147837e06b4..6d55465155d 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1433,9 +1433,37 @@ var _ = SIGDescribe("Services", func() { svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, createTimeout) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) lbIngress := &svc.Status.LoadBalancer.Ingress[0] + svcPort := int(svc.Spec.Ports[0].Port) // should have an internal IP. Expect(isInternalEndpoint(lbIngress)).To(BeTrue()) + // ILBs are not accessible from the test orchestrator, so it's necessary to use + // a pod to test the service. + By("hitting the internal load balancer from pod") + framework.Logf("creating pod with host network") + hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") + + framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) + tcpIngressIP := framework.GetIngressPoint(lbIngress) + if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { + cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) + stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + if err != nil { + framework.Logf("error curling; stdout: %v. err: %v", stdout, err) + return false, nil + } + + if !strings.Contains(stdout, "hello") { + framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout) + return false, nil + } + + framework.Logf("Successful curl; stdout: %v", stdout) + return true, nil + }); pollErr != nil { + framework.Failf("Failed to hit ILB IP, err: %v", pollErr) + } + By("switching to external type LoadBalancer") svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { disableILB(svc) @@ -1455,6 +1483,11 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) Expect(isInternalEndpoint(lbIngress)).To(BeFalse()) + By("hitting the external load balancer") + framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) + tcpIngressIP = framework.GetIngressPoint(lbIngress) + jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) + // GCE cannot test a specific IP because the test may not own it. This cloud specific condition // will be removed when GCP supports similar functionality. if framework.ProviderIs("azure") { From 943040d8278884e6d33ad3ca06ccdb236e2dcf8c Mon Sep 17 00:00:00 2001 From: zouyee Date: Tue, 5 Dec 2017 08:47:55 +0800 Subject: [PATCH 205/794] outdent err block --- pkg/volume/util.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/volume/util.go b/pkg/volume/util.go index e2890516980..ab394fe8be2 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -87,9 +87,8 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po // Recycler will try again and the old pod will be hopefuly deleted // at that time. return fmt.Errorf("old recycler pod found, will retry later") - } else { - return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err) } + return fmt.Errorf("unexpected error creating recycler pod: %+v", err) } err = waitForPod(pod, recyclerClient, podCh) @@ -274,9 +273,8 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers timeout := (pvSize / giSize) * int64(timeoutIncrement) if timeout < int64(minimumTimeout) { return int64(minimumTimeout) - } else { - return timeout } + return timeout } // RoundUpSize calculates how many allocation units are needed to accommodate @@ -304,7 +302,7 @@ func GenerateVolumeName(clusterName, pvName string, maxLength int) string { return prefix + "-" + pvName } -// Check if the path from the mounter is empty. +// GetPath checks if the path from the mounter is empty. func GetPath(mounter Mounter) (string, error) { path := mounter.GetPath() if path == "" { @@ -313,7 +311,7 @@ func GetPath(mounter Mounter) (string, error) { return path, nil } -// ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name +// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name // Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name. // However, if the PVCName ends with `-`, we will hash the prefix, and then add the integer to the hash. // This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones, From d51fbd35ce928f4a71f374d43979accbfa21880c Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Mon, 4 Dec 2017 17:17:54 +0800 Subject: [PATCH 206/794] warn if kubectl create with extra argument --- pkg/kubectl/cmd/create.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index e3340bcf228..0dec72cb849 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -278,8 +278,8 @@ func createAndRefresh(info *resource.Info) error { // NameFromCommandArgs is a utility function for commands that assume the first argument is a resource name func NameFromCommandArgs(cmd *cobra.Command, args []string) (string, error) { - if len(args) == 0 { - return "", cmdutil.UsageErrorf(cmd, "NAME is required") + if len(args) != 1 { + return "", cmdutil.UsageErrorf(cmd, "exactly one NAME is required, got %d", len(args)) } return args[0], nil } From e48b6f3d157a040e73a3658605c6cc12fdddee0c Mon Sep 17 00:00:00 2001 From: George Kudrayvtsev Date: Mon, 4 Dec 2017 16:57:31 -0800 Subject: [PATCH 207/794] Separates validation per-runtime. --- .../kubeproxyconfig/validation/validation.go | 31 ++++++++++++++----- .../validation/validation_test.go | 2 +- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go index 4edbe92af55..376d281c9d8 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -142,28 +142,43 @@ func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyCon } func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { + if runtime.GOOS == "windows" { + return validateProxyModeWindows(mode, fldPath) + } + + return validateProxyModeLinux(mode, fldPath) +} + +func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + switch mode { case kubeproxyconfig.ProxyModeUserspace: case kubeproxyconfig.ProxyModeIPTables: case kubeproxyconfig.ProxyModeIPVS: case "": - case kubeproxyconfig.ProxyModeKernelspace: - if runtime.GOOS != "windows" { - errMsg := fmt.Sprintf("%s is only supported on Windows", string(kubeproxyconfig.ProxyModeKernelspace)) - allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) - } default: modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)} - if runtime.GOOS == "windows" { - modes = append(modes, string(kubeproxyconfig.ProxyModeKernelspace)) - } errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(modes, ",")) allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) } return allErrs } +func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + switch mode { + case kubeproxyconfig.ProxyModeUserspace: + case kubeproxyconfig.ProxyModeKernelspace: + default: + modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeKernelspace)} + errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(modes, ",")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + } + return allErrs +} + func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go index b3498264fed..2736e53af66 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go @@ -510,7 +510,7 @@ func TestValidateProxyMode(t *testing.T) { }{ { mode: kubeproxyconfig.ProxyMode("non-existing"), - msg: "or blank (blank means the best-available proxy [currently iptables])", + msg: "or blank (blank means the", }, } From def22b796ca93118fe0ab667e2a0015565441c0c Mon Sep 17 00:00:00 2001 From: Josh Horwitz Date: Mon, 4 Dec 2017 21:48:38 -0500 Subject: [PATCH 208/794] Use hostname for CCM resource lock id --- cmd/cloud-controller-manager/app/controllermanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index c93da56dad5..d4675c9f478 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -165,7 +165,7 @@ func Run(s *options.CloudControllerManagerServer) error { "cloud-controller-manager", leaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ - Identity: id + "-external-cloud-controller", + Identity: id, EventRecorder: recorder, }) if err != nil { From 7df64d59fb90bb5bb72d3d702f9da6cdc04378f2 Mon Sep 17 00:00:00 2001 From: weekface Date: Mon, 14 Aug 2017 19:15:38 +0800 Subject: [PATCH 209/794] Remove useless error --- pkg/api/resource/helpers.go | 2 +- pkg/printers/internalversion/describe.go | 24 +++++-------------- pkg/printers/internalversion/describe_test.go | 5 +--- 3 files changed, 8 insertions(+), 23 deletions(-) diff --git a/pkg/api/resource/helpers.go b/pkg/api/resource/helpers.go index 78084393eb2..6947799e877 100644 --- a/pkg/api/resource/helpers.go +++ b/pkg/api/resource/helpers.go @@ -27,7 +27,7 @@ import ( // PodRequestsAndLimits returns a dictionary of all defined resources summed up for all // containers of the pod. -func PodRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { +func PodRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity) { reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, container := range pod.Spec.Containers { for name, quantity := range container.Resources.Requests { diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index c33b1c636ce..5bdf6306f15 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -2626,9 +2626,7 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events w.Write(LEVEL_0, "ExternalID:\t%s\n", node.Spec.ExternalID) } if canViewPods && nodeNonTerminatedPodsList != nil { - if err := describeNodeResource(nodeNonTerminatedPodsList, node, w); err != nil { - return err - } + describeNodeResource(nodeNonTerminatedPodsList, node, w) } else { w.Write(LEVEL_0, "Pods:\tnot authorized\n") } @@ -2868,7 +2866,7 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e }) } -func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) error { +func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) { w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\n") @@ -2878,10 +2876,7 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node } for _, pod := range nodeNonTerminatedPodsList.Items { - req, limit, err := resourcehelper.PodRequestsAndLimits(&pod) - if err != nil { - return err - } + req, limit := resourcehelper.PodRequestsAndLimits(&pod) cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 @@ -2894,10 +2889,7 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") w.Write(LEVEL_1, "------------\t----------\t---------------\t-------------\n") - reqs, limits, err := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - if err != nil { - return err - } + reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 @@ -2906,16 +2898,12 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node w.Write(LEVEL_1, "%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) - return nil } -func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { +func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity) { reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, pod := range podList.Items { - podReqs, podLimits, err := resourcehelper.PodRequestsAndLimits(&pod) - if err != nil { - return nil, nil, err - } + podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod) for podReqName, podReqValue := range podReqs { if value, ok := reqs[podReqName]; !ok { reqs[podReqName] = *podReqValue.Copy() diff --git a/pkg/printers/internalversion/describe_test.go b/pkg/printers/internalversion/describe_test.go index 0896dc0bc35..71dfd3f05f4 100644 --- a/pkg/printers/internalversion/describe_test.go +++ b/pkg/printers/internalversion/describe_test.go @@ -848,10 +848,7 @@ func TestGetPodsTotalRequests(t *testing.T) { } for _, testCase := range testCases { - reqs, _, err := getPodsTotalRequestsAndLimits(testCase.pods) - if err != nil { - t.Errorf("Unexpected error %v", err) - } + reqs, _ := getPodsTotalRequestsAndLimits(testCase.pods) if !apiequality.Semantic.DeepEqual(reqs, testCase.expectedReqs) { t.Errorf("Expected %v, got %v", testCase.expectedReqs, reqs) } From 1f840944a67e4140086aebf45b67aa53e24ffca3 Mon Sep 17 00:00:00 2001 From: Josh Horwitz Date: Mon, 4 Dec 2017 22:58:11 -0500 Subject: [PATCH 210/794] Ensure PVL controller is next pending initializer before labeling the PV --- pkg/controller/cloud/pvlcontroller.go | 23 +++++++++++----------- pkg/controller/cloud/pvlcontroller_test.go | 7 ++++++- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/pkg/controller/cloud/pvlcontroller.go b/pkg/controller/cloud/pvlcontroller.go index 3ae1fbf6bc9..a567643bfdd 100644 --- a/pkg/controller/cloud/pvlcontroller.go +++ b/pkg/controller/cloud/pvlcontroller.go @@ -182,7 +182,7 @@ func (pvlc *PersistentVolumeLabelController) addLabels(key string) error { func (pvlc *PersistentVolumeLabelController) addLabelsToVolume(vol *v1.PersistentVolume) error { var volumeLabels map[string]string - // Only add labels if in the list of initializers + // Only add labels if the next pending initializer. if needsInitialization(vol.Initializers, initializerName) { if labeler, ok := (pvlc.cloud).(cloudprovider.PVLabeler); ok { labels, err := labeler.GetLabelsForVolume(vol) @@ -265,16 +265,17 @@ func removeInitializer(initializers *metav1.Initializers, name string) *metav1.I return &metav1.Initializers{Pending: updated} } +// needsInitialization checks whether or not the PVL is the next pending initializer. func needsInitialization(initializers *metav1.Initializers, name string) bool { - hasInitializer := false - - if initializers != nil { - for _, pending := range initializers.Pending { - if pending.Name == name { - hasInitializer = true - break - } - } + if initializers == nil { + return false } - return hasInitializer + + if len(initializers.Pending) == 0 { + return false + } + + // There is at least one initializer still pending so check to + // see if the PVL is the next in line. + return initializers.Pending[0].Name == name } diff --git a/pkg/controller/cloud/pvlcontroller_test.go b/pkg/controller/cloud/pvlcontroller_test.go index 48b079122c5..2ab2f11394d 100644 --- a/pkg/controller/cloud/pvlcontroller_test.go +++ b/pkg/controller/cloud/pvlcontroller_test.go @@ -146,11 +146,16 @@ func TestAddLabelsToVolume(t *testing.T) { initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: initializerName}}}, shouldLabel: true, }, - "PV with other initializers": { + "PV with other initializers only": { vol: pv, initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}}}, shouldLabel: false, }, + "PV with other initializers first": { + vol: pv, + initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}, {Name: initializerName}}}, + shouldLabel: false, + }, } for d, tc := range testCases { From d4244f3dede646c5292c8200755d428ae4fbad83 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Mon, 4 Dec 2017 14:01:58 -0800 Subject: [PATCH 211/794] Re-uses device plugin resources allocated to init containers. Implements option 2 mentioned in https://github.com/kubernetes/kubernetes/issues/56022#issuecomment-348286184 --- pkg/kubelet/cm/deviceplugin/manager.go | 21 +++++-- pkg/kubelet/cm/deviceplugin/manager_test.go | 64 +++++++++++++++++++++ pkg/kubelet/cm/deviceplugin/pod_devices.go | 30 ++++++++++ 3 files changed, 110 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 6535479fe43..db28e36d5d6 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -240,16 +240,19 @@ func (m *ManagerImpl) Devices() map[string][]pluginapi.Device { // from the registered device plugins. func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { pod := attrs.Pod + devicesToReuse := make(map[string]sets.String) // TODO: Reuse devices between init containers and regular containers. for _, container := range pod.Spec.InitContainers { - if err := m.allocateContainerResources(pod, &container); err != nil { + if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil { return err } + m.podDevices.addContainerAllocatedResources(string(pod.UID), container.Name, devicesToReuse) } for _, container := range pod.Spec.Containers { - if err := m.allocateContainerResources(pod, &container); err != nil { + if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil { return err } + m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, devicesToReuse) } m.mutex.Lock() @@ -471,7 +474,7 @@ func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { // Returns list of device Ids we need to allocate with Allocate rpc call. // Returns empty list in case we don't need to issue the Allocate rpc call. -func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int) (sets.String, error) { +func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) { m.mutex.Lock() defer m.mutex.Unlock() needed := required @@ -497,6 +500,14 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi return nil, fmt.Errorf("can't allocate unregistered device %v", resource) } devices = sets.NewString() + // Allocates from reusableDevices list first. + for device := range reusableDevices { + devices.Insert(device) + needed-- + if needed == 0 { + return devices, nil + } + } // Needs to allocate additional devices. if m.allocatedDevices[resource] == nil { m.allocatedDevices[resource] = sets.NewString() @@ -523,7 +534,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // plugin resources for the input container, issues an Allocate rpc request // for each new device resource requirement, processes their AllocateResponses, // and updates the cached containerDevices on success. -func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container) error { +func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error { podUID := string(pod.UID) contName := container.Name allocatedDevicesUpdated := false @@ -544,7 +555,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont m.updateAllocatedDevices(m.activePods()) allocatedDevicesUpdated = true } - allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed) + allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource]) if err != nil { return err } diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 9a74ec93b50..7cd9a1aa9f1 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -539,6 +539,70 @@ func TestPodContainerDeviceAllocation(t *testing.T) { as.Nil(err) runContainerOpts3 := testManager.GetDeviceRunContainerOptions(newPod, &newPod.Spec.Containers[0]) as.Equal(1, len(runContainerOpts3.Envs)) + + // Requesting to create a pod that requests resourceName1 in init containers and normal containers + // should succeed with devices allocated to init containers reallocated to normal containers. + podWithPluginResourcesInInitContainers := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: uuid.NewUUID(), + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Name: string(uuid.NewUUID()), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(resourceName1): resourceQuantity2, + }, + }, + }, + { + Name: string(uuid.NewUUID()), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(resourceName1): resourceQuantity1, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: string(uuid.NewUUID()), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(resourceName1): resourceQuantity2, + v1.ResourceName(resourceName2): resourceQuantity2, + }, + }, + }, + { + Name: string(uuid.NewUUID()), + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName(resourceName1): resourceQuantity2, + v1.ResourceName(resourceName2): resourceQuantity2, + }, + }, + }, + }, + }, + } + podsStub.updateActivePods([]*v1.Pod{podWithPluginResourcesInInitContainers}) + err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: podWithPluginResourcesInInitContainers}) + as.Nil(err) + podUID := string(podWithPluginResourcesInInitContainers.UID) + initCont1 := podWithPluginResourcesInInitContainers.Spec.InitContainers[0].Name + initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name + normalCont1 := podWithPluginResourcesInInitContainers.Spec.Containers[0].Name + normalCont2 := podWithPluginResourcesInInitContainers.Spec.Containers[1].Name + initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, resourceName1) + initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, resourceName1) + normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, resourceName1) + normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, resourceName1) + as.True(initCont2Devices.IsSuperset(initCont1Devices)) + as.True(initCont2Devices.IsSuperset(normalCont1Devices)) + as.True(initCont2Devices.IsSuperset(normalCont2Devices)) + as.Equal(0, normalCont1Devices.Intersection(normalCont2Devices).Len()) } func TestSanitizeNodeAllocatable(t *testing.T) { diff --git a/pkg/kubelet/cm/deviceplugin/pod_devices.go b/pkg/kubelet/cm/deviceplugin/pod_devices.go index 495a5729879..311c8d0c60f 100644 --- a/pkg/kubelet/cm/deviceplugin/pod_devices.go +++ b/pkg/kubelet/cm/deviceplugin/pod_devices.go @@ -78,6 +78,36 @@ func (pdev podDevices) containerDevices(podUID, contName, resource string) sets. return devs.deviceIds } +// Populates allocatedResources with the device resources allocated to the specified . +func (pdev podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { + containers, exists := pdev[podUID] + if !exists { + return + } + resources, exists := containers[contName] + if !exists { + return + } + for resource, devices := range resources { + allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds) + } +} + +// Removes the device resources allocated to the specified from allocatedResources. +func (pdev podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { + containers, exists := pdev[podUID] + if !exists { + return + } + resources, exists := containers[contName] + if !exists { + return + } + for resource, devices := range resources { + allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds) + } +} + // Returns all of devices allocated to the pods being tracked, keyed by resourceName. func (pdev podDevices) devices() map[string]sets.String { ret := make(map[string]sets.String) From e31ed07a9cf60fbf9cecfbd5938e09d85d53d086 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 5 Dec 2017 15:15:46 +0800 Subject: [PATCH 212/794] also check pod securityContextt hostNetwork in exec admission controller --- plugin/pkg/admission/exec/admission.go | 46 ++++++++++++++++---------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/plugin/pkg/admission/exec/admission.go b/plugin/pkg/admission/exec/admission.go index 456d47e07e7..0188b2ac760 100644 --- a/plugin/pkg/admission/exec/admission.go +++ b/plugin/pkg/admission/exec/admission.go @@ -49,9 +49,10 @@ type DenyExec struct { client internalclientset.Interface // these flags control which items will be checked to deny exec/attach - hostIPC bool - hostPID bool - privileged bool + hostNetwork bool + hostIPC bool + hostPID bool + privileged bool } var _ admission.ValidationInterface = &DenyExec{} @@ -62,22 +63,24 @@ var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&DenyExec{}) // using host based configurations. func NewDenyEscalatingExec() *DenyExec { return &DenyExec{ - Handler: admission.NewHandler(admission.Connect), - hostIPC: true, - hostPID: true, - privileged: true, + Handler: admission.NewHandler(admission.Connect), + hostNetwork: true, + hostIPC: true, + hostPID: true, + privileged: true, } } // NewDenyExecOnPrivileged creates a new admission controller that is only checking the privileged -// option. This is for legacy support of the DenyExecOnPrivileged admission controller. Most -// of the time NewDenyEscalatingExec should be preferred. +// option. This is for legacy support of the DenyExecOnPrivileged admission controller. +// Most of the time NewDenyEscalatingExec should be preferred. func NewDenyExecOnPrivileged() *DenyExec { return &DenyExec{ - Handler: admission.NewHandler(admission.Connect), - hostIPC: false, - hostPID: false, - privileged: true, + Handler: admission.NewHandler(admission.Connect), + hostNetwork: false, + hostIPC: false, + hostPID: false, + privileged: true, } } @@ -96,12 +99,19 @@ func (d *DenyExec) Validate(a admission.Attributes) (err error) { return admission.NewForbidden(a, err) } - if d.hostPID && pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostPID { - return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host pid")) - } + if pod.Spec.SecurityContext != nil { + securityContext := pod.Spec.SecurityContext + if d.hostNetwork && securityContext.HostNetwork { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host network")) + } - if d.hostIPC && pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostIPC { - return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host ipc")) + if d.hostPID && securityContext.HostPID { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host pid")) + } + + if d.hostIPC && securityContext.HostIPC { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host ipc")) + } } if d.privileged && isPrivileged(pod) { From 22398f8d3c0d71db5869eace174f5721f8499224 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 5 Dec 2017 19:40:31 +0800 Subject: [PATCH 213/794] remove dead code in lifecycle admission --- .../plugin/namespace/lifecycle/admission.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 158f4164276..81c24f6a5a6 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -69,22 +69,9 @@ type Lifecycle struct { forceLiveLookupCache *utilcache.LRUExpireCache } -type forceLiveLookupEntry struct { - expiry time.Time -} - var _ = initializer.WantsExternalKubeInformerFactory(&Lifecycle{}) var _ = initializer.WantsExternalKubeClientSet(&Lifecycle{}) -func makeNamespaceKey(namespace string) *v1.Namespace { - return &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Namespace: "", - }, - } -} - func (l *Lifecycle) Admit(a admission.Attributes) error { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() && l.immortalNamespaces.Has(a.GetName()) { @@ -182,7 +169,7 @@ func (l *Lifecycle) Admit(a admission.Attributes) error { } // TODO: This should probably not be a 403 - return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated.", a.GetNamespace())) + return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated", a.GetNamespace())) } return nil From c53120e6b9ad944217a0a92bd4301424c95df089 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Tue, 31 Oct 2017 15:44:05 -0400 Subject: [PATCH 214/794] ensure PrinterForCommand is consumed through cmdutil.Factory --- pkg/kubectl/cmd/cmd.go | 2 +- pkg/kubectl/cmd/config/BUILD | 2 -- pkg/kubectl/cmd/config/config.go | 4 ++-- pkg/kubectl/cmd/config/config_test.go | 2 +- pkg/kubectl/cmd/config/view.go | 10 ++++------ pkg/kubectl/cmd/config/view_test.go | 3 ++- pkg/kubectl/cmd/util/factory_builder.go | 2 +- pkg/kubectl/cmd/util/printing.go | 4 ++-- 8 files changed, 13 insertions(+), 16 deletions(-) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 782fa37c2b1..22c1189b54a 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -336,7 +336,7 @@ func NewKubectlCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob } cmds.AddCommand(alpha) - cmds.AddCommand(cmdconfig.NewCmdConfig(clientcmd.NewDefaultPathOptions(), out, err)) + cmds.AddCommand(cmdconfig.NewCmdConfig(f, clientcmd.NewDefaultPathOptions(), out, err)) cmds.AddCommand(NewCmdPlugin(f, in, out, err)) cmds.AddCommand(NewCmdVersion(f, out)) cmds.AddCommand(NewCmdApiVersions(f, out)) diff --git a/pkg/kubectl/cmd/config/BUILD b/pkg/kubectl/cmd/config/BUILD index 3a1fd047769..e5d53853ea2 100644 --- a/pkg/kubectl/cmd/config/BUILD +++ b/pkg/kubectl/cmd/config/BUILD @@ -33,8 +33,6 @@ go_library( "//pkg/kubectl/util/i18n:go_default_library", "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", diff --git a/pkg/kubectl/cmd/config/config.go b/pkg/kubectl/cmd/config/config.go index cffd0aee3d7..b63d1cf15f3 100644 --- a/pkg/kubectl/cmd/config/config.go +++ b/pkg/kubectl/cmd/config/config.go @@ -31,7 +31,7 @@ import ( ) // NewCmdConfig creates a command object for the "config" action, and adds all child commands to it. -func NewCmdConfig(pathOptions *clientcmd.PathOptions, out, errOut io.Writer) *cobra.Command { +func NewCmdConfig(f cmdutil.Factory, pathOptions *clientcmd.PathOptions, out, errOut io.Writer) *cobra.Command { if len(pathOptions.ExplicitFileFlag) == 0 { pathOptions.ExplicitFileFlag = clientcmd.RecommendedConfigPathFlag } @@ -53,7 +53,7 @@ func NewCmdConfig(pathOptions *clientcmd.PathOptions, out, errOut io.Writer) *co // file paths are common to all sub commands cmd.PersistentFlags().StringVar(&pathOptions.LoadingRules.ExplicitPath, pathOptions.ExplicitFileFlag, pathOptions.LoadingRules.ExplicitPath, "use a particular kubeconfig file") - cmd.AddCommand(NewCmdConfigView(out, errOut, pathOptions)) + cmd.AddCommand(NewCmdConfigView(f, out, errOut, pathOptions)) cmd.AddCommand(NewCmdConfigSetCluster(out, pathOptions)) cmd.AddCommand(NewCmdConfigSetAuthInfo(out, pathOptions)) cmd.AddCommand(NewCmdConfigSetContext(out, pathOptions)) diff --git a/pkg/kubectl/cmd/config/config_test.go b/pkg/kubectl/cmd/config/config_test.go index e331aaed086..2fc8000278d 100644 --- a/pkg/kubectl/cmd/config/config_test.go +++ b/pkg/kubectl/cmd/config/config_test.go @@ -865,7 +865,7 @@ func testConfigCommand(args []string, startingConfig clientcmdapi.Config, t *tes buf := bytes.NewBuffer([]byte{}) - cmd := NewCmdConfig(clientcmd.NewDefaultPathOptions(), buf, buf) + cmd := NewCmdConfig(cmdutil.NewFactory(nil), clientcmd.NewDefaultPathOptions(), buf, buf) cmd.SetArgs(argsToUse) cmd.Execute() diff --git a/pkg/kubectl/cmd/config/view.go b/pkg/kubectl/cmd/config/view.go index eb9975f92b9..d6b9de100dc 100644 --- a/pkg/kubectl/cmd/config/view.go +++ b/pkg/kubectl/cmd/config/view.go @@ -23,8 +23,6 @@ import ( "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/util/flag" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -57,7 +55,7 @@ var ( kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'`) ) -func NewCmdConfigView(out, errOut io.Writer, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { +func NewCmdConfigView(f cmdutil.Factory, out, errOut io.Writer, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { options := &ViewOptions{ConfigAccess: ConfigAccess} // Default to yaml defaultOutputFormat := "yaml" @@ -82,7 +80,7 @@ func NewCmdConfigView(out, errOut io.Writer, ConfigAccess clientcmd.ConfigAccess } printOpts := cmdutil.ExtractCmdPrintOptions(cmd, false) - printer, err := cmdutil.PrinterForOptions(meta.NewDefaultRESTMapper(nil, nil), latest.Scheme, nil, []runtime.Decoder{latest.Codec}, printOpts) + printer, err := f.PrinterForOptions(printOpts) cmdutil.CheckErr(err) printer = printers.NewVersionedPrinter(printer, latest.Scheme, latest.ExternalVersion) @@ -94,8 +92,8 @@ func NewCmdConfigView(out, errOut io.Writer, ConfigAccess clientcmd.ConfigAccess cmd.Flags().Set("output", defaultOutputFormat) options.Merge.Default(true) - f := cmd.Flags().VarPF(&options.Merge, "merge", "", "Merge the full hierarchy of kubeconfig files") - f.NoOptDefVal = "true" + mergeFlag := cmd.Flags().VarPF(&options.Merge, "merge", "", "Merge the full hierarchy of kubeconfig files") + mergeFlag.NoOptDefVal = "true" cmd.Flags().BoolVar(&options.RawByteData, "raw", false, "Display raw byte data") cmd.Flags().BoolVar(&options.Flatten, "flatten", false, "Flatten the resulting kubeconfig file into self-contained output (useful for creating portable kubeconfig files)") cmd.Flags().BoolVar(&options.Minify, "minify", false, "Remove all information not used by current-context from the output") diff --git a/pkg/kubectl/cmd/config/view_test.go b/pkg/kubectl/cmd/config/view_test.go index 177210d024b..873e5f0797d 100644 --- a/pkg/kubectl/cmd/config/view_test.go +++ b/pkg/kubectl/cmd/config/view_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) type viewClusterTest struct { @@ -142,7 +143,7 @@ func (test viewClusterTest) run(t *testing.T) { pathOptions.EnvVar = "" buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdConfigView(buf, errBuf, pathOptions) + cmd := NewCmdConfigView(cmdutil.NewFactory(nil), buf, errBuf, pathOptions) cmd.Flags().Parse(test.flags) if err := cmd.Execute(); err != nil { t.Fatalf("unexpected error executing command: %v,kubectl config view flags: %v", err, test.flags) diff --git a/pkg/kubectl/cmd/util/factory_builder.go b/pkg/kubectl/cmd/util/factory_builder.go index 2c433032e4a..4d7e99b5ca0 100644 --- a/pkg/kubectl/cmd/util/factory_builder.go +++ b/pkg/kubectl/cmd/util/factory_builder.go @@ -56,7 +56,7 @@ func (f *ring2Factory) PrinterForOptions(options *printers.PrintOptions) (printe // TODO: used by the custom column implementation and the name implementation, break this dependency decoders := []runtime.Decoder{f.clientAccessFactory.Decoder(true), unstructured.UnstructuredJSONScheme} encoder := f.clientAccessFactory.JSONEncoder() - return PrinterForOptions(mapper, typer, encoder, decoders, options) + return printerForOptions(mapper, typer, encoder, decoders, options) } func (f *ring2Factory) PrinterForMapping(options *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) { diff --git a/pkg/kubectl/cmd/util/printing.go b/pkg/kubectl/cmd/util/printing.go index d978b9ef96c..02952707ed2 100644 --- a/pkg/kubectl/cmd/util/printing.go +++ b/pkg/kubectl/cmd/util/printing.go @@ -81,10 +81,10 @@ func ValidateOutputArgs(cmd *cobra.Command) error { return nil } -// PrinterForOptions returns the printer for the outputOptions (if given) or +// printerForOptions returns the printer for the outputOptions (if given) or // returns the default printer for the command. Requires that printer flags have // been added to cmd (see AddPrinterFlags). -func PrinterForOptions(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options *printers.PrintOptions) (printers.ResourcePrinter, error) { +func printerForOptions(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options *printers.PrintOptions) (printers.ResourcePrinter, error) { printer, err := printers.GetStandardPrinter(mapper, typer, encoder, decoders, *options) if err != nil { return nil, err From 8c9c2ee2d87303d408cf237591d478bbdfbbd3de Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Wed, 1 Nov 2017 14:37:43 -0400 Subject: [PATCH 215/794] update type-check to use printers.PritnHandler --- pkg/kubectl/cmd/util/printing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/util/printing.go b/pkg/kubectl/cmd/util/printing.go index 02952707ed2..8d06f8d3713 100644 --- a/pkg/kubectl/cmd/util/printing.go +++ b/pkg/kubectl/cmd/util/printing.go @@ -93,7 +93,7 @@ func printerForOptions(mapper meta.RESTMapper, typer runtime.ObjectTyper, encode // we try to convert to HumanReadablePrinter, if return ok, it must be no generic // we execute AddHandlers() here before maybeWrapSortingPrinter so that we don't // need to convert to delegatePrinter again then invoke AddHandlers() - if humanReadablePrinter, ok := printer.(*printers.HumanReadablePrinter); ok { + if humanReadablePrinter, ok := printer.(printers.PrintHandler); ok { printersinternal.AddHandlers(humanReadablePrinter) } From 7f2c1d2b3be2eddb0118c5cbd5bfa146a96efd26 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 5 Dec 2017 13:14:06 -0600 Subject: [PATCH 216/794] Typo --- api/openapi-spec/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/README.md b/api/openapi-spec/README.md index 59fbd90100d..167f7cec938 100644 --- a/api/openapi-spec/README.md +++ b/api/openapi-spec/README.md @@ -4,7 +4,7 @@ This folder contains an [OpenAPI specification][openapi] for Kubernetes API. ## Vendor Extensions -Kuberntes extends OpenAPI using these extensions. Note the version that +Kubernetes extends OpenAPI using these extensions. Note the version that extensions has been added. ### `x-kubernetes-group-version-kind` From 31332fa84a0928085200ba5a2e35118516ee2c48 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 4 Dec 2017 15:06:07 -0500 Subject: [PATCH 217/794] Drop using cloud provider to set host address feature As part of the larger plan to drop --cloud-provider and --cloud-config from kube-apiserver, we need to stop calling Cloud Provider API to find the external ip address when one is not specified on the command line. When ExternalHost is not specified, we check if AdvertiseAddress is specified and use that, if that is missing then we use os.Hostname(). When testing this feature, found a problem that when ExternalHost is specified, the port was not added in the generated URL. So fixed that as well. --- cmd/kube-apiserver/app/server.go | 14 +++++- pkg/kubeapiserver/options/BUILD | 2 - pkg/kubeapiserver/options/cloudprovider.go | 49 ------------------- .../src/k8s.io/apiserver/pkg/server/config.go | 16 +++--- 4 files changed, 22 insertions(+), 59 deletions(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index f578ee677b4..51f4bfd2162 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -630,8 +630,18 @@ func defaultOptions(s *options.ServerRunOptions) error { if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}, []net.IP{apiServerServiceIP}); err != nil { return fmt.Errorf("error creating self-signed certificates: %v", err) } - if err := s.CloudProvider.DefaultExternalHost(s.GenericServerRunOptions); err != nil { - return fmt.Errorf("error setting the external host value: %v", err) + + if len(s.GenericServerRunOptions.ExternalHost) == 0 { + if len(s.GenericServerRunOptions.AdvertiseAddress) > 0 { + s.GenericServerRunOptions.ExternalHost = s.GenericServerRunOptions.AdvertiseAddress.String() + } else { + if hostname, err := os.Hostname(); err == nil { + s.GenericServerRunOptions.ExternalHost = hostname + } else { + return fmt.Errorf("error finding host name: %v", err) + } + } + glog.Infof("external host was not specified, using %v", s.GenericServerRunOptions.ExternalHost) } s.Authentication.ApplyAuthorization(s.Authorization) diff --git a/pkg/kubeapiserver/options/BUILD b/pkg/kubeapiserver/options/BUILD index 4c8d3d517cd..6d26b666571 100644 --- a/pkg/kubeapiserver/options/BUILD +++ b/pkg/kubeapiserver/options/BUILD @@ -21,7 +21,6 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/kubeapiserver/authenticator:go_default_library", "//pkg/kubeapiserver/authorizer:go_default_library", "//pkg/kubeapiserver/authorizer/modes:go_default_library", @@ -29,7 +28,6 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", diff --git a/pkg/kubeapiserver/options/cloudprovider.go b/pkg/kubeapiserver/options/cloudprovider.go index 310acb592ce..9b8119fb072 100644 --- a/pkg/kubeapiserver/options/cloudprovider.go +++ b/pkg/kubeapiserver/options/cloudprovider.go @@ -17,15 +17,7 @@ limitations under the License. package options import ( - "fmt" - "os" - - "github.com/golang/glog" "github.com/spf13/pflag" - - "k8s.io/api/core/v1" - genericoptions "k8s.io/apiserver/pkg/server/options" - "k8s.io/kubernetes/pkg/cloudprovider" ) type CloudProviderOptions struct { @@ -49,44 +41,3 @@ func (s *CloudProviderOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") } - -func (s *CloudProviderOptions) DefaultExternalHost(genericoptions *genericoptions.ServerRunOptions) error { - if len(genericoptions.ExternalHost) != 0 { - return nil - } - - if cloudprovider.IsCloudProvider(s.CloudProvider) { - glog.Info("--external-hostname was not specified. Trying to get it from the cloud provider.") - - cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) - if err != nil { - return fmt.Errorf("%q cloud provider could not be initialized: %v", s.CloudProvider, err) - } - instances, supported := cloud.Instances() - if !supported { - return fmt.Errorf("%q cloud provider has no instances", s.CloudProvider) - } - hostname, err := os.Hostname() - if err != nil { - return fmt.Errorf("failed to get hostname: %v", err) - } - nodeName, err := instances.CurrentNodeName(hostname) - if err != nil { - return fmt.Errorf("failed to get NodeName from %q cloud provider: %v", s.CloudProvider, err) - } - addrs, err := instances.NodeAddresses(nodeName) - if err != nil { - return fmt.Errorf("failed to get external host address from %q cloud provider: %v", s.CloudProvider, err) - } else { - for _, addr := range addrs { - if addr.Type == v1.NodeExternalIP { - genericoptions.ExternalHost = addr.Address - glog.Warning("[Deprecated] Getting host address using cloud provider is " + - "now deprecated. Please use --external-hostname explicitly") - } - } - } - } - - return nil -} diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 877071ad3b2..fe912a94d24 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -338,13 +338,17 @@ type CompletedConfig struct { // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { - if len(c.ExternalAddress) == 0 && c.PublicAddress != nil { - hostAndPort := c.PublicAddress.String() - if c.ReadWritePort != 0 { - hostAndPort = net.JoinHostPort(hostAndPort, strconv.Itoa(c.ReadWritePort)) - } - c.ExternalAddress = hostAndPort + host := c.ExternalAddress + if host == "" && c.PublicAddress != nil { + host = c.PublicAddress.String() } + if !strings.Contains(host, ":") { + if c.ReadWritePort != 0 { + host = net.JoinHostPort(host, strconv.Itoa(c.ReadWritePort)) + } + } + c.ExternalAddress = host + if c.OpenAPIConfig != nil && c.OpenAPIConfig.SecurityDefinitions != nil { // Setup OpenAPI security: all APIs will have the same authentication for now. c.OpenAPIConfig.DefaultSecurity = []map[string][]string{} From 4d1319d111221c039378c27a7ec8af5ba1d5e90c Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 27 Nov 2017 11:19:05 +0800 Subject: [PATCH 218/794] use gnu-cp in building etcd image --- cluster/images/etcd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index b32a266f764..bf4e6505909 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -53,7 +53,7 @@ endif build: # Copy the content in this dir to the temp dir, # without copying the subdirectories. - find ./ -maxdepth 1 -type f | xargs cp -t $(TEMP_DIR) + find ./ -maxdepth 1 -type f | xargs -I {} cp {} $(TEMP_DIR) # Compile attachlease docker run -i -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ From e830b5390b7ea6f23362c3dbd0f0359a53c5760c Mon Sep 17 00:00:00 2001 From: Jonathan MacMillan Date: Tue, 5 Dec 2017 19:00:16 -0800 Subject: [PATCH 219/794] Fix a comment in hack/lib/version.sh about which tags are used to get the version. --- hack/lib/version.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index 0df161e37c6..23111d51d3c 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -62,7 +62,7 @@ kube::version::get_version_vars() { fi fi - # Use git describe to find the version based on annotated tags. + # Use git describe to find the version based on tags. if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then # This translates the "git describe" to an actual semver.org # compatible semantic version that looks something like this: From 0edcbc5044e8b95c5e913dd0eaf7d3422096b1d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Wed, 15 Nov 2017 13:28:55 +0100 Subject: [PATCH 220/794] Update CHANGELOG.md I should've done that in https://github.com/kubernetes/kubernetes/pull/55466. The version was already bumped, this is just to keep track of what changed. --- cluster/addons/addon-manager/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cluster/addons/addon-manager/CHANGELOG.md b/cluster/addons/addon-manager/CHANGELOG.md index 2d3a6011b19..39901846ecb 100644 --- a/cluster/addons/addon-manager/CHANGELOG.md +++ b/cluster/addons/addon-manager/CHANGELOG.md @@ -1,6 +1,9 @@ ### Version 8.4 (Thu November 30 2017 zou nengren @zouyee) - Update kubectl to v1.8.4. +### Version 6.5 (Wed October 15 2017 Daniel Kłobuszewski ) + - Support for HA masters. + ### Version 6.4-beta.2 (Mon June 12 2017 Jeff Grafton ) - Update kubectl to v1.6.4. - Refresh base images. From 6a7aca31297da840cf452676fc86bddbb613cb82 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Thu, 23 Nov 2017 11:08:28 +0530 Subject: [PATCH 221/794] enable podpreset by default in local up cluster This commit enables PodPreset in Admission control and also for that to work on the apiserver level enalbes the API group settings.k8s.io/v1alpha1. --- hack/local-up-cluster.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 31399f8102d..0a3560d9610 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -421,7 +421,7 @@ function start_apiserver { # Admission Controllers to invoke prior to persisting objects in cluster # # ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden. - ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodPreset # This is the default dir and filename where the apiserver will generate a self-signed cert # which should be able to be used as the CA to verify itself @@ -464,6 +464,13 @@ function start_apiserver { RUNTIME_CONFIG+="admissionregistration.k8s.io/v1alpha1" fi + if [[ ${ADMISSION_CONTROL} == *"PodPreset"* ]]; then + if [[ -n "${RUNTIME_CONFIG}" ]]; then + RUNTIME_CONFIG+="," + fi + RUNTIME_CONFIG+="settings.k8s.io/v1alpha1" + fi + runtime_config="" if [[ -n "${RUNTIME_CONFIG}" ]]; then runtime_config="--runtime-config=${RUNTIME_CONFIG}" From 5a3c2d80aad1542b332498c8ac60413de87e588a Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Wed, 6 Dec 2017 15:17:30 +0100 Subject: [PATCH 222/794] Limit number of pods listed as master liveness check. --- cluster/gce/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 34d710ce7fe..1ad4b98f195 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -1532,7 +1532,7 @@ function check-cluster() { -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \ ${secure} \ --max-time 5 --fail \ - "https://${KUBE_MASTER_IP}/api/v1/pods" > "${curl_out}" 2>&1; do + "https://${KUBE_MASTER_IP}/api/v1/pods?limit=100" > "${curl_out}" 2>&1; do local elapsed=$(($(date +%s) - ${start_time})) if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2 From 4207b4fd2c663104b11b6acc2bd9cfededfe872e Mon Sep 17 00:00:00 2001 From: Connor Doyle Date: Wed, 6 Dec 2017 09:02:55 -0600 Subject: [PATCH 223/794] Add ConnorDoyle as approver in /pkg/kubelet/cm. --- pkg/kubelet/cm/OWNERS | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 pkg/kubelet/cm/OWNERS diff --git a/pkg/kubelet/cm/OWNERS b/pkg/kubelet/cm/OWNERS new file mode 100644 index 00000000000..307f9436c00 --- /dev/null +++ b/pkg/kubelet/cm/OWNERS @@ -0,0 +1,10 @@ +approvers: +- Random-Liu +- dchen1107 +- derekwaynecarr +- tallclair +- vishh +- yujuhong +- ConnorDoyle +reviewers: +- sig-node-reviewers From a0874620f1bcdb6e8cf50c5112844c9f376d0954 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 5 Dec 2017 15:05:56 -0800 Subject: [PATCH 224/794] Improve etcd-version-monitor metrics proxying, add etcd 3.1 gprc metric support --- cluster/images/etcd-version-monitor/BUILD | 2 + cluster/images/etcd-version-monitor/Makefile | 2 +- cluster/images/etcd-version-monitor/README.md | 14 +- .../etcd-version-monitor.go | 245 +++++++++++++----- 4 files changed, 190 insertions(+), 73 deletions(-) diff --git a/cluster/images/etcd-version-monitor/BUILD b/cluster/images/etcd-version-monitor/BUILD index bbc1137eb0a..a97642e25dc 100644 --- a/cluster/images/etcd-version-monitor/BUILD +++ b/cluster/images/etcd-version-monitor/BUILD @@ -19,6 +19,8 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", ], diff --git a/cluster/images/etcd-version-monitor/Makefile b/cluster/images/etcd-version-monitor/Makefile index 09fb29607dc..e061b900151 100644 --- a/cluster/images/etcd-version-monitor/Makefile +++ b/cluster/images/etcd-version-monitor/Makefile @@ -20,7 +20,7 @@ ARCH:=amd64 GOLANG_VERSION?=1.8.3 REGISTRY?=gcr.io/google-containers -TAG?=0.1.0 +TAG?=0.1.1 IMAGE:=$(REGISTRY)/etcd-version-monitor:$(TAG) CURRENT_DIR:=$(pwd) TEMP_DIR:=$(shell mktemp -d) diff --git a/cluster/images/etcd-version-monitor/README.md b/cluster/images/etcd-version-monitor/README.md index bd000219fa7..3cfb675837d 100644 --- a/cluster/images/etcd-version-monitor/README.md +++ b/cluster/images/etcd-version-monitor/README.md @@ -1,11 +1,19 @@ # etcd-version-monitor -This is a tool for exporting metrics related to etcd version, like etcd -server's binary version, cluster version, and counts of different kinds of -gRPC calls (which is a characteristic of v3), etc. These metrics are in +This is a tool for exporting etcd metrics and supplementing them with etcd +server binary version and cluster version. These metrics are in prometheus format and can be scraped by a prometheus server. The metrics are exposed at the http://localhost:9101/metrics endpoint. +For etcd 3.1+, the +[go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus) +metrics format, which backward incompatibly replaces the 3.0 legacy grpc metric +format, is exposed in both the 3.1 format and in the 3.0. This preserves +backward compatiblity. + +For etcd 3.1+, the `--metrics=extensive` must be set on etcd for grpc request +latency metrics (`etcd_grpc_unary_requests_duration_seconds`) to be exposed. + **RUNNING THE TOOL** To run this tool as a docker container: diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.go b/cluster/images/etcd-version-monitor/etcd-version-monitor.go index f455cde3d37..d87f895730a 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.go +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.go @@ -25,6 +25,8 @@ import ( "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/spf13/pflag" ) @@ -52,6 +54,11 @@ const ( // Initialize prometheus metrics to be exported. var ( + // Register all custom metrics with a dedicated registry to keep them separate. + customMetricRegistry = prometheus.NewRegistry() + + // Custom etcd version metric since etcd 3.2- does not export one. + // This will be replaced by https://github.com/coreos/etcd/pull/8960 in etcd 3.3. etcdVersion = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, @@ -59,15 +66,122 @@ var ( Help: "Etcd server's binary version", }, []string{"binary_version"}) - etcdGRPCRequestsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "grpc_requests_total", - Help: "Counter of received grpc requests, labeled by the grpc method and service names", + + gatherer = &monitorGatherer{ + // Rewrite rules for etcd metrics that are exported by default. + exported: map[string]*exportedMetric{ + // etcd 3.0 metric format for total grpc requests with renamed method and service labels. + "etcd_grpc_requests_total": { + rewriters: []rewriteFunc{ + func(mf *dto.MetricFamily) (*dto.MetricFamily, error) { + mf = deepCopyMetricFamily(mf) + renameLabels(mf, map[string]string{ + "grpc_method": "method", + "grpc_service": "service", + }) + return mf, nil + }, + }, + }, + // etcd 3.1+ metric format for total grpc requests. + "grpc_server_handled_total": { + rewriters: []rewriteFunc{ + // Export the metric exactly as-is. For 3.1+ metrics, we will + // pass all metrics directly through. + identity, + // Write to the etcd 3.0 metric format for backward compatibility. + func(mf *dto.MetricFamily) (*dto.MetricFamily, error) { + mf = deepCopyMetricFamily(mf) + renameMetric(mf, "etcd_grpc_requests_total") + renameLabels(mf, map[string]string{ + "grpc_method": "method", + "grpc_service": "service", + }) + return mf, nil + }, + }, + }, + + // etcd 3.0 metric format for grpc request latencies, + // rewritten to the etcd 3.1+ format. + "etcd_grpc_unary_requests_duration_seconds": { + rewriters: []rewriteFunc{ + func(mf *dto.MetricFamily) (*dto.MetricFamily, error) { + mf = deepCopyMetricFamily(mf) + renameMetric(mf, "grpc_server_handling_seconds") + tpeName := "grpc_type" + tpeVal := "unary" + for _, m := range mf.Metric { + m.Label = append(m.Label, &dto.LabelPair{Name: &tpeName, Value: &tpeVal}) + } + return mf, nil + }, + }, + }, + // etcd 3.1+ metric format for total grpc requests. + "grpc_server_handling_seconds": {}, }, - []string{"method", "service"}) + } ) +// monitorGatherer is a custom metric gatherer for prometheus that exports custom metrics +// defined by this monitor as well as rewritten etcd metrics. +type monitorGatherer struct { + exported map[string]*exportedMetric +} + +// exportedMetric identifies a metric that is exported and defines how it is rewritten before +// it is exported. +type exportedMetric struct { + rewriters []rewriteFunc +} + +// rewriteFunc rewrites metrics before they are exported. +type rewriteFunc func(mf *dto.MetricFamily) (*dto.MetricFamily, error) + +func (m *monitorGatherer) Gather() ([]*dto.MetricFamily, error) { + etcdMetrics, err := scrapeMetrics() + if err != nil { + return nil, err + } + exported, err := m.rewriteExportedMetrics(etcdMetrics) + if err != nil { + return nil, err + } + custom, err := customMetricRegistry.Gather() + if err != nil { + return nil, err + } + result := make([]*dto.MetricFamily, 0, len(exported)+len(custom)) + result = append(result, exported...) + result = append(result, custom...) + return result, nil +} + +func (m *monitorGatherer) rewriteExportedMetrics(metrics map[string]*dto.MetricFamily) ([]*dto.MetricFamily, error) { + results := make([]*dto.MetricFamily, 0, len(metrics)) + for n, mf := range metrics { + if e, ok := m.exported[n]; ok { + // Apply rewrite rules for metrics that have them. + if e.rewriters == nil { + results = append(results, mf) + } else { + for _, rewriter := range e.rewriters { + new, err := rewriter(mf) + if err != nil { + return nil, err + } + results = append(results, new) + } + } + } else { + // Proxy all metrics without any rewrite rules directly. + results = append(results, mf) + } + } + return results, nil +} + // Struct for unmarshalling the json response from etcd's /version endpoint. type EtcdVersion struct { BinaryVersion string `json:"etcdserver"` @@ -132,83 +246,78 @@ func getVersionPeriodically(stopCh <-chan struct{}) { } } -// Struct for storing labels for gRPC request types. -type GRPCRequestLabels struct { - Method string - Service string -} - -// Function for fetching etcd grpc request counts and feeding it to the prometheus metric. -func getGRPCRequestCount(lastRecordedCount *map[GRPCRequestLabels]float64) error { - // Create the get request for the etcd metrics endpoint. +// scrapeMetrics scrapes the prometheus metrics from the etcd metrics URI. +func scrapeMetrics() (map[string]*dto.MetricFamily, error) { req, err := http.NewRequest("GET", etcdMetricsScrapeURI, nil) if err != nil { - return fmt.Errorf("Failed to create GET request for etcd metrics: %v", err) + return nil, fmt.Errorf("Failed to create GET request for etcd metrics: %v", err) } // Send the get request and receive a response. client := &http.Client{} resp, err := client.Do(req) if err != nil { - return fmt.Errorf("Failed to receive GET response for etcd metrics: %v", err) + return nil, fmt.Errorf("Failed to receive GET response for etcd metrics: %v", err) } defer resp.Body.Close() // Parse the metrics in text format to a MetricFamily struct. var textParser expfmt.TextParser - metricFamilies, err := textParser.TextToMetricFamilies(resp.Body) - if err != nil { - return fmt.Errorf("Failed to parse etcd metrics: %v", err) - } - - // Look through the grpc requests metric family and update our promotheus metric. - for _, metric := range metricFamilies["etcd_grpc_requests_total"].GetMetric() { - var grpcRequestLabels GRPCRequestLabels - for _, label := range metric.GetLabel() { - if label.GetName() == "grpc_method" { - grpcRequestLabels.Method = label.GetValue() - } - if label.GetName() == "grpc_service" { - grpcRequestLabels.Service = label.GetValue() - } - } - if grpcRequestLabels.Method == "" || grpcRequestLabels.Service == "" { - return fmt.Errorf("Could not get value for grpc_method and/or grpc_service label") - } - - // Get last recorded value and new value of the metric and update it suitably. - previousMetricValue := 0.0 - if value, ok := (*lastRecordedCount)[grpcRequestLabels]; ok { - previousMetricValue = value - } - newMetricValue := metric.GetCounter().GetValue() - (*lastRecordedCount)[grpcRequestLabels] = newMetricValue - if newMetricValue >= previousMetricValue { - etcdGRPCRequestsTotal.With(prometheus.Labels{ - "method": grpcRequestLabels.Method, - "service": grpcRequestLabels.Service, - }).Add(newMetricValue - previousMetricValue) - } - } - return nil + return textParser.TextToMetricFamilies(resp.Body) } -// Function for periodically fetching etcd GRPC request counts. -func getGRPCRequestCountPeriodically(stopCh <-chan struct{}) { - // This map stores last recorded count for a given grpc request type. - lastRecordedCount := make(map[GRPCRequestLabels]float64) - for { - if err := getGRPCRequestCount(&lastRecordedCount); err != nil { - glog.Errorf("Failed to fetch etcd grpc request counts: %v", err) - } - select { - case <-stopCh: - break - case <-time.After(scrapeTimeout): +func renameMetric(mf *dto.MetricFamily, name string) { + mf.Name = &name +} + +func renameLabels(mf *dto.MetricFamily, nameMapping map[string]string) { + for _, m := range mf.Metric { + for _, lbl := range m.Label { + if alias, ok := nameMapping[*lbl.Name]; ok { + lbl.Name = &alias + } } } } +func identity(mf *dto.MetricFamily) (*dto.MetricFamily, error) { + return mf, nil +} + +func deepCopyMetricFamily(mf *dto.MetricFamily) *dto.MetricFamily { + r := &dto.MetricFamily{} + r.Name = mf.Name + r.Help = mf.Help + r.Type = mf.Type + r.Metric = make([]*dto.Metric, len(mf.Metric)) + for i, m := range mf.Metric { + r.Metric[i] = deepCopyMetric(m) + } + return r +} + +func deepCopyMetric(m *dto.Metric) *dto.Metric { + r := &dto.Metric{} + r.Label = make([]*dto.LabelPair, len(m.Label)) + for i, lp := range m.Label { + r.Label[i] = deepCopyLabelPair(lp) + } + r.Gauge = m.Gauge + r.Counter = m.Counter + r.Summary = m.Summary + r.Untyped = m.Untyped + r.Histogram = m.Histogram + r.TimestampMs = m.TimestampMs + return r +} + +func deepCopyLabelPair(lp *dto.LabelPair) *dto.LabelPair { + r := &dto.LabelPair{} + r.Name = lp.Name + r.Value = lp.Value + return r +} + func main() { // Register the commandline flags passed to the tool. registerFlags(pflag.CommandLine) @@ -216,18 +325,16 @@ func main() { pflag.Parse() // Register the metrics we defined above with prometheus. - prometheus.MustRegister(etcdVersion) - prometheus.MustRegister(etcdGRPCRequestsTotal) - prometheus.Unregister(prometheus.NewGoCollector()) + customMetricRegistry.MustRegister(etcdVersion) + customMetricRegistry.Unregister(prometheus.NewGoCollector()) // Spawn threads for periodically scraping etcd version metrics. stopCh := make(chan struct{}) defer close(stopCh) go getVersionPeriodically(stopCh) - go getGRPCRequestCountPeriodically(stopCh) // Serve our metrics on listenAddress/metricsPath. glog.Infof("Listening on: %v", listenAddress) - http.Handle(metricsPath, prometheus.UninstrumentedHandler()) + http.Handle(metricsPath, promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) glog.Errorf("Stopped listening/serving metrics: %v", http.ListenAndServe(listenAddress, nil)) } From 1390b96913f37af0ffd153fbb686c75c56e4c445 Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Wed, 15 Nov 2017 11:50:25 -0800 Subject: [PATCH 225/794] Add resource limits to prometheus-to-sd to guarantee qos --- .../addons/metadata-proxy/gce/metadata-proxy.yaml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index 767322549c0..a710f917cd4 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -38,19 +38,28 @@ spec: dnsPolicy: Default containers: - name: metadata-proxy - image: gcr.io/google_containers/metadata-proxy:v0.1.5 + image: gcr.io/google_containers/metadata-proxy:v0.1.6 securityContext: privileged: true + # Request and limit resources to get guaranteed QoS. resources: requests: - memory: "32Mi" + memory: "25Mi" cpu: "30m" limits: - memory: "32Mi" + memory: "25Mi" cpu: "30m" # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter image: gcr.io/google_containers/prometheus-to-sd:v0.2.2 + # Request and limit resources to get guaranteed QoS. + resources: + requests: + memory: "20Mi" + cpu: "2m" + limits: + memory: "20Mi" + cpu: "2m" command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons From 06c84b9183e9064dcd3d654476823e99b56c4a9f Mon Sep 17 00:00:00 2001 From: Shashwat shagun Date: Wed, 6 Dec 2017 23:23:19 +0530 Subject: [PATCH 226/794] Update kube-up.sh --- cluster/kube-up.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index 8a51f8a4555..a2813f99e2f 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -34,7 +34,7 @@ source "${KUBE_ROOT}/cluster/kube-util.sh" DEPRECATED_PROVIDERS=( "centos" - "libvert-coreos" + "libvirt-coreos" "local" "openstack-heat" "photon-controller" From 7b6a1d3e7725ad1ab59286cbaf5afa89915a0dbe Mon Sep 17 00:00:00 2001 From: Cole Wagner Date: Wed, 6 Dec 2017 14:21:00 -0800 Subject: [PATCH 227/794] Fix conformance testdata OWNERS file. --- test/conformance/testdata/OWNERS | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/conformance/testdata/OWNERS b/test/conformance/testdata/OWNERS index 309935c356e..8ffe82de9ad 100644 --- a/test/conformance/testdata/OWNERS +++ b/test/conformance/testdata/OWNERS @@ -1,8 +1,6 @@ # To be owned by sig-architecture. -# TODO(mml): Exclude parent owners once -# https://github.com/kubernetes/test-infra/issues/5197 is implemented. options: - - no_parent_owners: true + no_parent_owners: true reviewers: - bgrant0607 - smarterclayton From 3e7e4ab39724199b8a1e7843d8ce58c036bc88c2 Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Thu, 7 Dec 2017 10:34:53 +0800 Subject: [PATCH 228/794] old test file will create a leak file in current directory. this patch fix this. modified: pkg/kubelet/cm/deviceplugin/manager_test.go --- pkg/kubelet/cm/deviceplugin/manager_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 9a74ec93b50..d6d6d0b04e9 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -19,6 +19,8 @@ package deviceplugin import ( "flag" "fmt" + "io/ioutil" + "os" "reflect" "sync/atomic" "testing" @@ -260,7 +262,12 @@ func TestCheckpoint(t *testing.T) { resourceName1 := "domain1.com/resource1" resourceName2 := "domain2.com/resource2" + as := assert.New(t) + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) testManager := &ManagerImpl{ + socketdir: tmpDir, allDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), @@ -298,8 +305,7 @@ func TestCheckpoint(t *testing.T) { expectedAllocatedDevices := testManager.podDevices.devices() expectedAllDevices := testManager.allDevices - err := testManager.writeCheckpoint() - as := assert.New(t) + err = testManager.writeCheckpoint() as.Nil(err) testManager.podDevices = make(podDevices) @@ -385,7 +391,11 @@ func TestPodContainerDeviceAllocation(t *testing.T) { nodeInfo := &schedulercache.NodeInfo{} nodeInfo.SetNode(cachedNode) + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) testManager := &ManagerImpl{ + socketdir: tmpDir, callback: monitorCallback, allDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), @@ -485,7 +495,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) { } podsStub.updateActivePods([]*v1.Pod{pod}) - err := testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) + err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) as.Nil(err) runContainerOpts := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) as.NotNil(runContainerOpts) From d66d8f053dabb141ebfa89224d0ccc76354ceb4c Mon Sep 17 00:00:00 2001 From: Di Xu Date: Thu, 7 Dec 2017 13:30:20 +0800 Subject: [PATCH 229/794] refactor getting uninitialized in kubectl get --- pkg/kubectl/cmd/resource/get.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index b3cd8eb2580..a7a442ff9ef 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -65,6 +65,8 @@ type GetOptions struct { ShowKind bool LabelColumns []string Export bool + + IncludeUninitialized bool } var ( @@ -190,9 +192,13 @@ func (options *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args options.ExplicitNamespace = false } + options.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, false) + switch { case options.Watch || options.WatchOnly: - + // include uninitialized objects when watching on a single object + // unless explicitly set --include-uninitialized=false + options.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, len(args) == 2) default: if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(options.Filenames) { fmt.Fprint(options.ErrOut, "You must specify the type of resource to get. ", cmdutil.ValidResourceTypeList(f)) @@ -240,7 +246,7 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str FieldSelectorParam(options.FieldSelector). ExportParam(options.Export). RequestChunksOf(options.ChunkSize). - IncludeUninitialized(cmdutil.ShouldIncludeUninitialized(cmd, false)). // TODO: this needs to be better factored + IncludeUninitialized(options.IncludeUninitialized). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest(). @@ -442,11 +448,6 @@ func (options *GetOptions) raw(f cmdutil.Factory) error { // watch starts a client-side watch of one or more resources. // TODO: remove the need for arguments here. func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - // TODO: this could be better factored - // include uninitialized objects when watching on a single object - // unless explicitly set --include-uninitialized=false - includeUninitialized := cmdutil.ShouldIncludeUninitialized(cmd, len(args) == 2) - r := f.NewBuilder(). Unstructured(). NamespaceParam(options.Namespace).DefaultNamespace().AllNamespaces(options.AllNamespaces). @@ -455,7 +456,7 @@ func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []s FieldSelectorParam(options.FieldSelector). ExportParam(options.Export). RequestChunksOf(options.ChunkSize). - IncludeUninitialized(includeUninitialized). + IncludeUninitialized(options.IncludeUninitialized). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). From 96a8ff6bd63105e6ac7f8d8daf1be8a6dd4118e3 Mon Sep 17 00:00:00 2001 From: linweibin Date: Wed, 6 Dec 2017 15:15:35 +0800 Subject: [PATCH 230/794] Remove unused code in pkg/api/,pkg/apis/ --- pkg/api/resource/BUILD | 1 - pkg/api/resource/helpers_test.go | 20 ------------------- pkg/api/testing/BUILD | 1 - pkg/api/testing/serialization_test.go | 13 ------------ pkg/apis/core/validation/validation.go | 9 --------- .../extensions/validation/validation_test.go | 7 ------- 6 files changed, 51 deletions(-) diff --git a/pkg/api/resource/BUILD b/pkg/api/resource/BUILD index f31dbd9db26..47850ca889a 100644 --- a/pkg/api/resource/BUILD +++ b/pkg/api/resource/BUILD @@ -37,6 +37,5 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], ) diff --git a/pkg/api/resource/helpers_test.go b/pkg/api/resource/helpers_test.go index 7d55c18d80b..b8917a58483 100644 --- a/pkg/api/resource/helpers_test.go +++ b/pkg/api/resource/helpers_test.go @@ -18,10 +18,8 @@ package resource import ( "testing" - "time" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "k8s.io/kubernetes/pkg/apis/core" ) @@ -62,21 +60,3 @@ func TestDefaultResourceHelpers(t *testing.T) { t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format) } } - -func newPod(now metav1.Time, ready bool, beforeSec int) *api.Pod { - conditionStatus := api.ConditionFalse - if ready { - conditionStatus = api.ConditionTrue - } - return &api.Pod{ - Status: api.PodStatus{ - Conditions: []api.PodCondition{ - { - Type: api.PodReady, - LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)), - Status: conditionStatus, - }, - }, - }, - } -} diff --git a/pkg/api/testing/BUILD b/pkg/api/testing/BUILD index 9c04ee34584..8d52ce4c2b0 100644 --- a/pkg/api/testing/BUILD +++ b/pkg/api/testing/BUILD @@ -85,7 +85,6 @@ go_test( "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/api/testing/serialization_test.go b/pkg/api/testing/serialization_test.go index d47fad73fd7..64f6a7d7ec3 100644 --- a/pkg/api/testing/serialization_test.go +++ b/pkg/api/testing/serialization_test.go @@ -23,10 +23,8 @@ import ( "io/ioutil" "math/rand" "reflect" - "strings" "testing" - "github.com/golang/protobuf/proto" jsoniter "github.com/json-iterator/go" "k8s.io/api/core/v1" @@ -66,17 +64,6 @@ func fuzzInternalObject(t *testing.T, forVersion schema.GroupVersion, item runti return item } -// dataAsString returns the given byte array as a string; handles detecting -// protocol buffers. -func dataAsString(data []byte) string { - dataString := string(data) - if !strings.HasPrefix(dataString, "{") { - dataString = "\n" + hex.Dump(data) - proto.NewBuffer(make([]byte, 0, 1024)).DebugPrint("decoded object", data) - } - return dataString -} - func Convert_v1beta1_ReplicaSet_to_api_ReplicationController(in *v1beta1.ReplicaSet, out *api.ReplicationController, s conversion.Scope) error { intermediate1 := &extensions.ReplicaSet{} if err := k8s_v1beta1.Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, intermediate1, s); err != nil { diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..708165255f1 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -212,15 +212,6 @@ func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *f // value that were not valid. Otherwise this returns an empty list or nil. type ValidateNameFunc apimachineryvalidation.ValidateNameFunc -// maskTrailingDash replaces the final character of a string with a subdomain safe -// value if is a dash. -func maskTrailingDash(name string) string { - if strings.HasSuffix(name, "-") { - return name[:len(name)-2] + "a" - } - return name -} - // ValidatePodName can be used to check whether the given pod name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. diff --git a/pkg/apis/extensions/validation/validation_test.go b/pkg/apis/extensions/validation/validation_test.go index 4a3818801ff..d3a0d0fce1d 100644 --- a/pkg/apis/extensions/validation/validation_test.go +++ b/pkg/apis/extensions/validation/validation_test.go @@ -1232,11 +1232,6 @@ func TestValidateDeployment(t *testing.T) { } } -func int64p(i int) *int64 { - i64 := int64(i) - return &i64 -} - func TestValidateDeploymentStatus(t *testing.T) { collisionCount := int32(-3) tests := []struct { @@ -1473,8 +1468,6 @@ func TestValidateDeploymentRollback(t *testing.T) { } } -type ingressRules map[string]string - func TestValidateIngress(t *testing.T) { defaultBackend := extensions.IngressBackend{ ServiceName: "default-backend", From 5c7a1a2c5d2d6add3ad0913ef6c6ce07d254a2dc Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 7 Dec 2017 06:24:20 +0000 Subject: [PATCH 231/794] enable flexvolume on Windows --- pkg/kubelet/volume_host.go | 7 ++++++- pkg/volume/flexvolume/plugin.go | 7 ++++++- pkg/volume/util.go | 9 +++++++++ pkg/volume/util_test.go | 31 +++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index ae7847bc6fb..de71e3c4b0f 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -19,6 +19,7 @@ package kubelet import ( "fmt" "net" + "runtime" "github.com/golang/glog" @@ -91,7 +92,11 @@ func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string } func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { - return kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName) + dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName) + if runtime.GOOS == "windows" { + dir = volume.GetWindowsPath(dir) + } + return dir } func (kvh *kubeletVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string { diff --git a/pkg/volume/flexvolume/plugin.go b/pkg/volume/flexvolume/plugin.go index ba46e2c52d1..6de4614a936 100644 --- a/pkg/volume/flexvolume/plugin.go +++ b/pkg/volume/flexvolume/plugin.go @@ -19,6 +19,7 @@ package flexvolume import ( "fmt" "path" + "runtime" "strings" "sync" @@ -100,7 +101,11 @@ func (plugin *flexVolumePlugin) Init(host volume.VolumeHost) error { func (plugin *flexVolumePlugin) getExecutable() string { parts := strings.Split(plugin.driverName, "/") execName := parts[len(parts)-1] - return path.Join(plugin.execPath, execName) + execPath := path.Join(plugin.execPath, execName) + if runtime.GOOS == "windows" { + execPath = volume.GetWindowsPath(execPath) + } + return execPath } // Name is part of the volume.VolumePlugin interface. diff --git a/pkg/volume/util.go b/pkg/volume/util.go index e2890516980..0baea012073 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -495,3 +495,12 @@ func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, req } return true } + +// GetWindowsPath get a windows path +func GetWindowsPath(path string) string { + windowsPath := strings.Replace(path, "/", "\\", -1) + if strings.HasPrefix(windowsPath, "\\") { + windowsPath = "c:" + windowsPath + } + return windowsPath +} diff --git a/pkg/volume/util_test.go b/pkg/volume/util_test.go index c902fde2a7a..273722a0c3d 100644 --- a/pkg/volume/util_test.go +++ b/pkg/volume/util_test.go @@ -869,3 +869,34 @@ func TestValidateZone(t *testing.T) { } } } + +func TestGetWindowsPath(t *testing.T) { + tests := []struct { + path string + expectedPath string + }{ + { + path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`, + expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + }, + { + path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + }, + { + path: `/`, + expectedPath: `c:\`, + }, + { + path: ``, + expectedPath: ``, + }, + } + + for _, test := range tests { + result := GetWindowsPath(test.path) + if result != test.expectedPath { + t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath) + } + } +} From 6bc18d995804b744eacaae261f5986c98145f590 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 7 Dec 2017 13:08:04 +0800 Subject: [PATCH 232/794] Check both name and ports for azure health probes --- pkg/cloudprovider/providers/azure/BUILD | 1 + .../providers/azure/azure_loadbalancer.go | 2 +- .../azure/azure_loadbalancer_test.go | 99 +++++++++++++++++++ 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index cd4f0a67db0..a72de9f2a6d 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -59,6 +59,7 @@ go_library( go_test( name = "go_default_test", srcs = [ + "azure_loadbalancer_test.go", "azure_test.go", "azure_util_test.go", ], diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index c7fd51bc997..c1bc5bf972f 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -1193,7 +1193,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want func findProbe(probes []network.Probe, probe network.Probe) bool { for _, existingProbe := range probes { - if strings.EqualFold(*existingProbe.Name, *probe.Name) { + if strings.EqualFold(*existingProbe.Name, *probe.Name) && *existingProbe.Port == *probe.Port { return true } } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go new file mode 100644 index 00000000000..e09ec585fad --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest/to" + "github.com/stretchr/testify/assert" +) + +func TestFindProbe(t *testing.T) { + tests := []struct { + msg string + existingProbe []network.Probe + curProbe network.Probe + expected bool + }{ + { + msg: "empty existing probes should return false", + expected: false, + }, + { + msg: "probe names match while ports unmatch should return false", + existingProbe: []network.Probe{ + { + Name: to.StringPtr("httpProbe"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(1), + }, + }, + }, + curProbe: network.Probe{ + Name: to.StringPtr("httpProbe"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(2), + }, + }, + expected: false, + }, + { + msg: "probe ports match while names unmatch should return false", + existingProbe: []network.Probe{ + { + Name: to.StringPtr("probe1"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(1), + }, + }, + }, + curProbe: network.Probe{ + Name: to.StringPtr("probe2"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(1), + }, + }, + expected: false, + }, + { + msg: "both probe ports and names match should return true", + existingProbe: []network.Probe{ + { + Name: to.StringPtr("matchName"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(1), + }, + }, + }, + curProbe: network.Probe{ + Name: to.StringPtr("matchName"), + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Port: to.Int32Ptr(1), + }, + }, + expected: true, + }, + } + + for i, test := range tests { + findResult := findProbe(test.existingProbe, test.curProbe) + assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } +} From e6e3b756c6cd85afbd72e27c69df7657ede8ff34 Mon Sep 17 00:00:00 2001 From: wenjgao Date: Thu, 7 Dec 2017 19:34:44 +0800 Subject: [PATCH 233/794] fix rbd volume plugin ConstructVolume --- pkg/volume/rbd/rbd.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index fcdb4fa5ee2..26903aafd8f 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -81,7 +81,7 @@ func (plugin *rbdPlugin) GetPluginName() string { } func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - mon, err := getVolumeSourceMonitors(spec) + pool, err := getVolumeSourcePool(spec) if err != nil { return "", err } @@ -92,7 +92,7 @@ func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) { return fmt.Sprintf( "%v:%v", - mon, + pool, img), nil } @@ -346,11 +346,22 @@ func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, } func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) + sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) + if err != nil { + return nil, err + } + s := dstrings.Split(sourceName, "-image-") + if len(s) != 2 { + return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName) + } rbdVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ - CephMonitors: []string{}, + RBDPool: s[0], + RBDImage: s[1], }, }, } From d2cbc37c05a66e8213714ddd09c8a1f45545eb5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Thu, 7 Dec 2017 14:23:05 +0100 Subject: [PATCH 234/794] Bump fluentd-gcp version --- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index 7f6a47deb4b..dd516db77e0 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-gcp-v2.0.10 + name: fluentd-gcp-v2.0.11 namespace: kube-system labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v2.0.10 + version: v2.0.11 spec: updateStrategy: type: RollingUpdate @@ -16,7 +16,7 @@ spec: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" - version: v2.0.10 + version: v2.0.11 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.10 + image: gcr.io/google-containers/fluentd-gcp:2.0.11 env: - name: FLUENTD_ARGS value: --no-supervisor -q From 90e7b5ff4fa5dc481ac4367d743da0d9d3fd3bcb Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 5 Dec 2017 16:04:12 +0100 Subject: [PATCH 235/794] Decrease the number of completions for flaky test --- test/e2e/apps/job.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 4f65be150c8..8a4b8c0caa7 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -77,12 +77,15 @@ var _ = SIGDescribe("Job", func() { // Worst case analysis: 15 failures, each taking 1 minute to // run due to some slowness, 1 in 2^15 chance of happening, // causing test flake. Should be very rare. - job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, 999) + // With the introduction of backoff limit and high failure rate this + // is hitting its timeout, the 3 is a reasonable that should make this + // test less flaky, for now. + job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999) job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job) Expect(err).NotTo(HaveOccurred()) By("Ensuring job reaches completions") - err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions) + err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions) Expect(err).NotTo(HaveOccurred()) }) From e256c28beb44de41d06fa99efa9fecd0e93b55bd Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Thu, 7 Dec 2017 12:22:13 -0800 Subject: [PATCH 236/794] Convert scheduler_perf tests to use subtest. Combine four separate tests into a table-driven test that uses subtests to logically organize the tests. --- .../scheduler_perf/scheduler_bench_test.go | 36 ++++++++----------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index d848219ee4c..ee2ca2fbf1b 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -17,6 +17,7 @@ limitations under the License. package benchmark import ( + "fmt" "testing" "time" @@ -27,28 +28,19 @@ import ( "github.com/golang/glog" ) -// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate -// when the cluster has 100 nodes and 0 scheduled pods -func BenchmarkScheduling100Nodes0Pods(b *testing.B) { - benchmarkScheduling(100, 0, b) -} - -// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate -// when the cluster has 100 nodes and 1000 scheduled pods -func BenchmarkScheduling100Nodes1000Pods(b *testing.B) { - benchmarkScheduling(100, 1000, b) -} - -// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate -// when the cluster has 1000 nodes and 0 scheduled pods -func BenchmarkScheduling1000Nodes0Pods(b *testing.B) { - benchmarkScheduling(1000, 0, b) -} - -// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate -// when the cluster has 1000 nodes and 1000 scheduled pods -func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) { - benchmarkScheduling(1000, 1000, b) +// BenchmarkScheduling benchmarks the scheduling rate when the cluster has +// various quantities of nodes and scheduled pods. +func BenchmarkScheduling(b *testing.B) { + tests := []struct{ nodes, pods int }{ + {nodes: 100, pods: 0}, + {nodes: 100, pods: 1000}, + {nodes: 1000, pods: 0}, + {nodes: 1000, pods: 1000}, + } + for _, test := range tests { + name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.pods) + b.Run(name, func(b *testing.B) { benchmarkScheduling(test.nodes, test.pods, b) }) + } } // benchmarkScheduling benchmarks scheduling rate with specific number of nodes From 3ef37c038e6815604cbed54ee1f6849942a7fc59 Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Thu, 7 Dec 2017 15:33:22 -0500 Subject: [PATCH 237/794] HugePages feature is beta in 1.10 release --- pkg/features/kube_features.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 80599039bc4..9dbf74f778d 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -153,7 +153,7 @@ const ( CPUManager utilfeature.Feature = "CPUManager" // owner: @derekwaynecarr - // alpha: v1.8 + // beta: v1.10 // // Enable pods to consume pre-allocated huge pages of varying page sizes HugePages utilfeature.Feature = "HugePages" @@ -235,7 +235,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta}, PersistentLocalVolumes: {Default: false, PreRelease: utilfeature.Alpha}, LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha}, - HugePages: {Default: false, PreRelease: utilfeature.Alpha}, + HugePages: {Default: true, PreRelease: utilfeature.Beta}, DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, PodPriority: {Default: false, PreRelease: utilfeature.Alpha}, EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha}, From b75e97443d65394afabe5672303e5674b97063a9 Mon Sep 17 00:00:00 2001 From: Nick Platt Date: Thu, 7 Dec 2017 20:53:25 -0600 Subject: [PATCH 238/794] Reword double negative; link to readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 87840067880..d0bc7178e61 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ $ cd kubernetes $ make quick-release ``` -If you are less impatient, head over to the [developer's documentation]. +For the full story, head over to the [developer's documentation]. ## Support @@ -71,7 +71,7 @@ That said, if you have questions, reach out to us [communication]: https://github.com/kubernetes/community/blob/master/communication.md [community repository]: https://github.com/kubernetes/community [containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/ -[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel +[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel#readme [Docker environment]: https://docs.docker.com/engine [Go environment]: https://golang.org/doc/install [GoDoc]: https://godoc.org/k8s.io/kubernetes @@ -81,6 +81,6 @@ That said, if you have questions, reach out to us [Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615 [Submit Queue]: http://submit-queue.k8s.io/#/ci [Submit Queue Widget]: http://submit-queue.k8s.io/health.svg?v=1 -[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/ +[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/ [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]() From 65efeee64f772e0f38037e91a677138a335a7570 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 8 Dec 2017 16:03:56 +0800 Subject: [PATCH 239/794] Remove unused ScrubDNS interface from cloudprovider --- pkg/cloudprovider/cloud.go | 2 -- pkg/cloudprovider/providers/aws/aws.go | 5 --- pkg/cloudprovider/providers/azure/azure.go | 5 --- .../providers/cloudstack/cloudstack.go | 5 --- pkg/cloudprovider/providers/fake/fake.go | 5 --- pkg/cloudprovider/providers/gce/gce.go | 15 -------- pkg/cloudprovider/providers/gce/gce_test.go | 36 ------------------- .../providers/openstack/openstack.go | 5 --- pkg/cloudprovider/providers/ovirt/ovirt.go | 5 --- pkg/cloudprovider/providers/photon/photon.go | 5 --- .../providers/vsphere/vsphere.go | 5 --- 11 files changed, 93 deletions(-) diff --git a/pkg/cloudprovider/cloud.go b/pkg/cloudprovider/cloud.go index 2e46d47d339..2f5cbbf1a1a 100644 --- a/pkg/cloudprovider/cloud.go +++ b/pkg/cloudprovider/cloud.go @@ -44,8 +44,6 @@ type Interface interface { Routes() (Routes, bool) // ProviderName returns the cloud provider ID. ProviderName() string - // ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods. - ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) // HasClusterID returns true if a ClusterID is required and set HasClusterID() bool } diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index ad55f5383d7..41a4a4b5041 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1135,11 +1135,6 @@ func (c *Cloud) ProviderName() string { return ProviderName } -// ScrubDNS filters DNS settings for pods. -func (c *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services. func (c *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { return c, true diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index ffc2030a8fa..d60e11423f8 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -489,11 +489,6 @@ func (az *Cloud) Routes() (cloudprovider.Routes, bool) { return az, true } -// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods. -func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (az *Cloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack.go b/pkg/cloudprovider/providers/cloudstack/cloudstack.go index e4979a75c42..89d2c1f01f2 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack.go @@ -181,11 +181,6 @@ func (cs *CSCloud) ProviderName() string { return ProviderName } -// ScrubDNS filters DNS settings for pods. -func (cs *CSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (cs *CSCloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/fake/fake.go b/pkg/cloudprovider/providers/fake/fake.go index 5efc09a11bf..d040c51df65 100644 --- a/pkg/cloudprovider/providers/fake/fake.go +++ b/pkg/cloudprovider/providers/fake/fake.go @@ -110,11 +110,6 @@ func (f *FakeCloud) ProviderName() string { return f.Provider } -// ScrubDNS filters DNS settings for pods. -func (f *FakeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (f *FakeCloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 8447092f164..95d4f33afb5 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "net/http" - "regexp" "runtime" "strconv" "strings" @@ -667,20 +666,6 @@ func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) { } } -// Known-useless DNS search path. -var uselessDNSSearchRE = regexp.MustCompile(`^[0-9]+.google.internal.$`) - -// ScrubDNS filters DNS settings for pods. -func (gce *GCECloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - // GCE has too many search paths by default. Filter the ones we know are useless. - for _, s := range searches { - if !uselessDNSSearchRE.MatchString(s) { - srchOut = append(srchOut, s) - } - } - return nameservers, srchOut -} - // HasClusterID returns true if the cluster has a clusterID func (gce *GCECloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/gce/gce_test.go b/pkg/cloudprovider/providers/gce/gce_test.go index e3020acb7db..d4201b54bd4 100644 --- a/pkg/cloudprovider/providers/gce/gce_test.go +++ b/pkg/cloudprovider/providers/gce/gce_test.go @@ -175,42 +175,6 @@ func TestComparingHostURLs(t *testing.T) { } } -func TestScrubDNS(t *testing.T) { - tcs := []struct { - nameserversIn []string - searchesIn []string - nameserversOut []string - searchesOut []string - }{ - { - nameserversIn: []string{"1.2.3.4", "5.6.7.8"}, - nameserversOut: []string{"1.2.3.4", "5.6.7.8"}, - }, - { - searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "google.internal."}, - searchesOut: []string{"c.prj.internal.", "google.internal."}, - }, - { - searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "zone.c.prj.internal.", "google.internal."}, - searchesOut: []string{"c.prj.internal.", "zone.c.prj.internal.", "google.internal."}, - }, - { - searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "zone.c.prj.internal.", "google.internal.", "unexpected"}, - searchesOut: []string{"c.prj.internal.", "zone.c.prj.internal.", "google.internal.", "unexpected"}, - }, - } - gce := &GCECloud{} - for i := range tcs { - n, s := gce.ScrubDNS(tcs[i].nameserversIn, tcs[i].searchesIn) - if !reflect.DeepEqual(n, tcs[i].nameserversOut) { - t.Errorf("Expected %v, got %v", tcs[i].nameserversOut, n) - } - if !reflect.DeepEqual(s, tcs[i].searchesOut) { - t.Errorf("Expected %v, got %v", tcs[i].searchesOut, s) - } - } -} - func TestSplitProviderID(t *testing.T) { providers := []struct { providerID string diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index a2a517a4d3e..00e15d228b2 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -486,11 +486,6 @@ func (os *OpenStack) ProviderName() string { return ProviderName } -// ScrubDNS filters DNS settings for pods. -func (os *OpenStack) ScrubDNS(nameServers, searches []string) ([]string, []string) { - return nameServers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (os *OpenStack) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/ovirt/ovirt.go b/pkg/cloudprovider/providers/ovirt/ovirt.go index e688257ad87..eaade007074 100644 --- a/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -128,11 +128,6 @@ func (v *OVirtCloud) ProviderName() string { return ProviderName } -// ScrubDNS filters DNS settings for pods. -func (v *OVirtCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (v *OVirtCloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/photon/photon.go b/pkg/cloudprovider/providers/photon/photon.go index 5071de5c531..d2e8c2d780a 100644 --- a/pkg/cloudprovider/providers/photon/photon.go +++ b/pkg/cloudprovider/providers/photon/photon.go @@ -546,11 +546,6 @@ func (pc *PCCloud) Routes() (cloudprovider.Routes, bool) { return nil, false } -// ScrubDNS filters DNS settings for pods. -func (pc *PCCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // HasClusterID returns true if the cluster has a clusterID func (pc *PCCloud) HasClusterID() bool { return true diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 77f80e23549..5d4577be32f 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -689,11 +689,6 @@ func (vs *VSphere) Routes() (cloudprovider.Routes, bool) { return nil, false } -// ScrubDNS filters DNS settings for pods. -func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - // AttachDisk attaches given virtual disk volume to the compute running kubelet. func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) { attachDiskInternal := func(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) { From f3e044ea2b62c3160bae8c546c681e55912b61d3 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 8 Dec 2017 17:58:22 +0800 Subject: [PATCH 240/794] Remove unused federation docs --- docs/admin/federation-controller-manager.md | 3 --- docs/admin/kubefed.md | 3 --- docs/admin/kubefed_init.md | 3 --- docs/admin/kubefed_join.md | 3 --- docs/admin/kubefed_options.md | 3 --- docs/admin/kubefed_unjoin.md | 3 --- docs/admin/kubefed_version.md | 3 --- 7 files changed, 21 deletions(-) delete mode 100644 docs/admin/federation-controller-manager.md delete mode 100644 docs/admin/kubefed.md delete mode 100644 docs/admin/kubefed_init.md delete mode 100644 docs/admin/kubefed_join.md delete mode 100644 docs/admin/kubefed_options.md delete mode 100644 docs/admin/kubefed_unjoin.md delete mode 100644 docs/admin/kubefed_version.md diff --git a/docs/admin/federation-controller-manager.md b/docs/admin/federation-controller-manager.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/federation-controller-manager.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed.md b/docs/admin/kubefed.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed_init.md b/docs/admin/kubefed_init.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed_init.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed_join.md b/docs/admin/kubefed_join.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed_join.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed_options.md b/docs/admin/kubefed_options.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed_options.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed_unjoin.md b/docs/admin/kubefed_unjoin.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed_unjoin.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. diff --git a/docs/admin/kubefed_version.md b/docs/admin/kubefed_version.md deleted file mode 100644 index b6fd7a0f989..00000000000 --- a/docs/admin/kubefed_version.md +++ /dev/null @@ -1,3 +0,0 @@ -This file is autogenerated, but we've stopped checking such files into the -repository to reduce the need for rebases. Please run hack/generate-docs.sh to -populate this file. From d917214d6f4d6757be9b4a4448fd1aeb5afea940 Mon Sep 17 00:00:00 2001 From: James Ravn Date: Fri, 8 Dec 2017 14:51:52 +0000 Subject: [PATCH 241/794] Update cadvisor godeps to v0.28.3 --- Godeps/Godeps.json | 354 +++++++++--------- .../google/cadvisor/accelerators/nvidia.go | 14 +- .../cadvisor/container/containerd/factory.go | 2 +- .../google/cadvisor/container/crio/factory.go | 2 +- .../google/cadvisor/container/crio/handler.go | 2 +- .../cadvisor/container/docker/docker.go | 60 ++- .../cadvisor/container/docker/factory.go | 5 +- .../google/cadvisor/container/raw/factory.go | 2 +- .../google/cadvisor/container/rkt/factory.go | 2 +- .../cadvisor/container/systemd/factory.go | 2 +- vendor/github.com/google/cadvisor/fs/fs.go | 20 +- .../google/cadvisor/http/handlers.go | 4 +- .../google/cadvisor/machine/info.go | 2 +- .../google/cadvisor/manager/container.go | 8 +- .../google/cadvisor/manager/manager.go | 58 +-- .../cadvisor/manager/watcher/rkt/rkt.go | 4 +- .../google/cadvisor/metrics/prometheus.go | 14 +- .../google/cadvisor/utils/cpuload/cpuload.go | 2 +- .../cadvisor/utils/oomparser/oomparser.go | 2 +- 19 files changed, 298 insertions(+), 261 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d89bc78af29..851e8d0588e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -106,7 +106,7 @@ }, { "ImportPath": "github.com/Microsoft/go-winio", - "Comment": "v0.4.4-7-g7843996", + "Comment": "v0.4.5", "Rev": "78439966b38d69bf38227fbf57ac8a6fee70f69a" }, { @@ -442,77 +442,77 @@ }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/services/tasks/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/services/version/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/types", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/types/task", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/containers", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/dialer", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/errdefs", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/namespaces", - "Comment": "v1.0.0-beta.2-159-g27d450a", + "Comment": "v1.0.0-beta.2-159-g27d450a0", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containernetworking/cni/libcni", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/invoke", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/020", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/current", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/version", - "Comment": "v0.6.0-rc1-6-ga7885cb", + "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { @@ -907,167 +907,167 @@ }, { "ImportPath": "github.com/docker/distribution/digestset", - "Comment": "v2.6.0-rc.1-209-gedc3ab2", + "Comment": "v2.6.0-rc.1-209-gedc3ab29", "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.6.0-rc.1-209-gedc3ab2", + "Comment": "v2.6.0-rc.1-209-gedc3ab29", "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/docker/api", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/events", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/image", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/swarm/runtime", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/time", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/volume", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/client", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/tlsconfig", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/go-connections/nat", - "Comment": "v0.2.1-30-g3ede32e", + "Comment": "v0.3.0", "Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d" }, { "ImportPath": "github.com/docker/go-connections/sockets", - "Comment": "v0.2.1-30-g3ede32e", + "Comment": "v0.3.0", "Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d" }, { "ImportPath": "github.com/docker/go-connections/tlsconfig", - "Comment": "v0.2.1-30-g3ede32e", + "Comment": "v0.3.0", "Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d" }, { @@ -1077,7 +1077,7 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-910-gba46b92", + "Comment": "v0.8.0-dev.2-910-gba46b928", "Rev": "ba46b928444931e6865d8618dc03622cac79aa6f" }, { @@ -1204,132 +1204,132 @@ }, { "ImportPath": "github.com/gogo/protobuf/gogoproto", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/compare", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/description", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/equal", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/face", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/gostring", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/marshalto", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/populate", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/size", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/stringer", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/testgen", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/union", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/types", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/vanity", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/vanity/command", - "Comment": "v0.4-3-gc0656ed", + "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { @@ -1382,218 +1382,218 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/client/v2", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.28.2", - "Rev": "49440c7e0af98f96993e4d4b5777991f65091f23" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -2251,77 +2251,77 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc4-50-g4d6e672", + "Comment": "v1.0.0-rc4-50-g4d6e6720", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { diff --git a/vendor/github.com/google/cadvisor/accelerators/nvidia.go b/vendor/github.com/google/cadvisor/accelerators/nvidia.go index ac528db0e13..054d206b2ac 100644 --- a/vendor/github.com/google/cadvisor/accelerators/nvidia.go +++ b/vendor/github.com/google/cadvisor/accelerators/nvidia.go @@ -47,7 +47,7 @@ const nvidiaVendorId = "0x10de" // Setup initializes NVML if nvidia devices are present on the node. func (nm *NvidiaManager) Setup() { if !detectDevices(nvidiaVendorId) { - glog.Info("No NVIDIA devices found.") + glog.V(4).Info("No NVIDIA devices found.") return } @@ -56,7 +56,7 @@ func (nm *NvidiaManager) Setup() { return } go func() { - glog.Info("Starting goroutine to initialize NVML") + glog.V(2).Info("Starting goroutine to initialize NVML") // TODO: use globalHousekeepingInterval for range time.Tick(time.Minute) { nm.initializeNVML() @@ -71,7 +71,7 @@ func (nm *NvidiaManager) Setup() { func detectDevices(vendorId string) bool { devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) if err != nil { - glog.Warningf("error reading %q: %v", sysFsPCIDevicesPath, err) + glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) return false } @@ -79,11 +79,11 @@ func detectDevices(vendorId string) bool { vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") content, err := ioutil.ReadFile(vendorPath) if err != nil { - glog.Infof("Error while reading %q: %v", vendorPath, err) + glog.V(4).Infof("Error while reading %q: %v", vendorPath, err) continue } if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) { - glog.Infof("Found device with vendorId %q", vendorId) + glog.V(3).Infof("Found device with vendorId %q", vendorId) return true } } @@ -95,7 +95,7 @@ func (nm *NvidiaManager) initializeNVML() { if err := gonvml.Initialize(); err != nil { // This is under a logging level because otherwise we may cause // log spam if the drivers/nvml is not installed on the system. - glog.V(3).Infof("Could not initialize NVML: %v", err) + glog.V(4).Infof("Could not initialize NVML: %v", err) return } numDevices, err := gonvml.DeviceCount() @@ -107,7 +107,7 @@ func (nm *NvidiaManager) initializeNVML() { nm.Unlock() return } - glog.Infof("NVML initialized. Number of nvidia devices: %v", numDevices) + glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) for i := 0; i < int(numDevices); i++ { device, err := gonvml.DeviceHandleByIndex(uint(i)) diff --git a/vendor/github.com/google/cadvisor/container/containerd/factory.go b/vendor/github.com/google/cadvisor/container/containerd/factory.go index a021538b162..dba43ef32e2 100644 --- a/vendor/github.com/google/cadvisor/container/containerd/factory.go +++ b/vendor/github.com/google/cadvisor/container/containerd/factory.go @@ -133,7 +133,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.Infof("Registering containerd factory") + glog.V(1).Infof("Registering containerd factory") f := &containerdFactory{ cgroupSubsystems: cgroupSubsystems, client: client, diff --git a/vendor/github.com/google/cadvisor/container/crio/factory.go b/vendor/github.com/google/cadvisor/container/crio/factory.go index b0151c7f12e..0c77db69ed1 100644 --- a/vendor/github.com/google/cadvisor/container/crio/factory.go +++ b/vendor/github.com/google/cadvisor/container/crio/factory.go @@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.Infof("Registering CRI-O factory") + glog.V(1).Infof("Registering CRI-O factory") f := &crioFactory{ client: client, cgroupSubsystems: cgroupSubsystems, diff --git a/vendor/github.com/google/cadvisor/container/crio/handler.go b/vendor/github.com/google/cadvisor/container/crio/handler.go index 391d383322f..024341da8ab 100644 --- a/vendor/github.com/google/cadvisor/container/crio/handler.go +++ b/vendor/github.com/google/cadvisor/container/crio/handler.go @@ -185,7 +185,7 @@ func newCrioContainerHandler( } // TODO for env vars we wanted to show from container.Config.Env from whitelist //for _, exposedEnv := range metadataEnvs { - //glog.Infof("TODO env whitelist: %v", exposedEnv) + //glog.V(4).Infof("TODO env whitelist: %v", exposedEnv) //} return handler, nil diff --git a/vendor/github.com/google/cadvisor/container/docker/docker.go b/vendor/github.com/google/cadvisor/container/docker/docker.go index 3d3dfa5b6b9..b0ed227dda0 100644 --- a/vendor/github.com/google/cadvisor/container/docker/docker.go +++ b/vendor/github.com/google/cadvisor/container/docker/docker.go @@ -23,26 +23,33 @@ import ( dockertypes "github.com/docker/docker/api/types" "golang.org/x/net/context" + "time" + "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/machine" ) +const defaultTimeout = time.Second * 5 + +func defaultContext() context.Context { + ctx, _ := context.WithTimeout(context.Background(), defaultTimeout) + return ctx +} + func Status() (v1.DockerStatus, error) { client, err := Client() if err != nil { return v1.DockerStatus{}, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - dockerInfo, err := client.Info(context.Background()) + dockerInfo, err := client.Info(defaultContext()) if err != nil { return v1.DockerStatus{}, err } - return StatusFromDockerInfo(dockerInfo), nil + return StatusFromDockerInfo(dockerInfo) } -func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus { +func StatusFromDockerInfo(dockerInfo dockertypes.Info) (v1.DockerStatus, error) { out := v1.DockerStatus{} - out.Version = VersionString() - out.APIVersion = APIVersionString() out.KernelVersion = machine.KernelVersion() out.OS = dockerInfo.OperatingSystem out.Hostname = dockerInfo.Name @@ -54,7 +61,18 @@ func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus { for _, v := range dockerInfo.DriverStatus { out.DriverStatus[v[0]] = v[1] } - return out + var err error + ver, err := VersionString() + if err != nil { + return out, err + } + out.Version = ver + ver, err = APIVersionString() + if err != nil { + return out, err + } + out.APIVersion = ver + return out, nil } func Images() ([]v1.DockerImage, error) { @@ -62,7 +80,7 @@ func Images() ([]v1.DockerImage, error) { if err != nil { return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - images, err := client.ImageList(context.Background(), dockertypes.ImageListOptions{All: false}) + images, err := client.ImageList(defaultContext(), dockertypes.ImageListOptions{All: false}) if err != nil { return nil, err } @@ -95,14 +113,14 @@ func ValidateInfo() (*dockertypes.Info, error) { return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - dockerInfo, err := client.Info(context.Background()) + dockerInfo, err := client.Info(defaultContext()) if err != nil { return nil, fmt.Errorf("failed to detect Docker info: %v", err) } // Fall back to version API if ServerVersion is not set in info. if dockerInfo.ServerVersion == "" { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err != nil { return nil, fmt.Errorf("unable to get docker version: %v", err) } @@ -125,35 +143,43 @@ func ValidateInfo() (*dockertypes.Info, error) { } func Version() ([]int, error) { - return parseVersion(VersionString(), version_re, 3) + ver, err := VersionString() + if err != nil { + return nil, err + } + return parseVersion(ver, version_re, 3) } func APIVersion() ([]int, error) { - return parseVersion(APIVersionString(), apiversion_re, 2) + ver, err := APIVersionString() + if err != nil { + return nil, err + } + return parseVersion(ver, apiversion_re, 2) } -func VersionString() string { +func VersionString() (string, error) { docker_version := "Unknown" client, err := Client() if err == nil { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err == nil { docker_version = version.Version } } - return docker_version + return docker_version, err } -func APIVersionString() string { +func APIVersionString() (string, error) { docker_api_version := "Unknown" client, err := Client() if err == nil { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err == nil { docker_api_version = version.APIVersion } } - return docker_api_version + return docker_api_version, err } func parseVersion(version_string string, regex *regexp.Regexp, length int) ([]int, error) { diff --git a/vendor/github.com/google/cadvisor/container/docker/factory.go b/vendor/github.com/google/cadvisor/container/docker/factory.go index a5ce4d14246..9eb1ff526d0 100644 --- a/vendor/github.com/google/cadvisor/container/docker/factory.go +++ b/vendor/github.com/google/cadvisor/container/docker/factory.go @@ -340,7 +340,8 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c glog.Errorf("devicemapper filesystem stats will not be reported: %v", err) } - status := StatusFromDockerInfo(*dockerInfo) + // Safe to ignore error - driver status should always be populated. + status, _ := StatusFromDockerInfo(*dockerInfo) thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName] } @@ -352,7 +353,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c } } - glog.Infof("Registering Docker factory") + glog.V(1).Infof("Registering Docker factory") f := &dockerFactory{ cgroupSubsystems: cgroupSubsystems, client: client, diff --git a/vendor/github.com/google/cadvisor/container/raw/factory.go b/vendor/github.com/google/cadvisor/container/raw/factory.go index 36d236c8dd5..1b8a43a4077 100644 --- a/vendor/github.com/google/cadvisor/container/raw/factory.go +++ b/vendor/github.com/google/cadvisor/container/raw/factory.go @@ -83,7 +83,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return err } - glog.Infof("Registering Raw factory") + glog.V(1).Infof("Registering Raw factory") factory := &rawFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/vendor/github.com/google/cadvisor/container/rkt/factory.go b/vendor/github.com/google/cadvisor/container/rkt/factory.go index f29c615ebaa..3f79d753e0d 100644 --- a/vendor/github.com/google/cadvisor/container/rkt/factory.go +++ b/vendor/github.com/google/cadvisor/container/rkt/factory.go @@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") } - glog.Infof("Registering Rkt factory") + glog.V(1).Infof("Registering Rkt factory") factory := &rktFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/vendor/github.com/google/cadvisor/container/systemd/factory.go b/vendor/github.com/google/cadvisor/container/systemd/factory.go index 4e71d40bda6..cb3b7c89cd3 100644 --- a/vendor/github.com/google/cadvisor/container/systemd/factory.go +++ b/vendor/github.com/google/cadvisor/container/systemd/factory.go @@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string { // Register registers the systemd container factory. func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { - glog.Infof("Registering systemd factory") + glog.V(1).Infof("Registering systemd factory") factory := &systemdFactory{} container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) return nil diff --git a/vendor/github.com/google/cadvisor/fs/fs.go b/vendor/github.com/google/cadvisor/fs/fs.go index b2eb7bd6c35..271b01e3562 100644 --- a/vendor/github.com/google/cadvisor/fs/fs.go +++ b/vendor/github.com/google/cadvisor/fs/fs.go @@ -136,8 +136,8 @@ func NewFsInfo(context Context) (FsInfo, error) { fsInfo.addDockerImagesLabel(context, mounts) fsInfo.addCrioImagesLabel(context, mounts) - glog.Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) - glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) + glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) + glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions) fsInfo.addSystemRootLabel(mounts) return fsInfo, nil } @@ -162,7 +162,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) { path := filepath.Join(dir, file.Name()) target, err := os.Readlink(path) if err != nil { - glog.Infof("Failed to resolve symlink for %q", path) + glog.Warningf("Failed to resolve symlink for %q", path) continue } device, err := filepath.Abs(filepath.Join(dir, target)) @@ -438,7 +438,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { file, err := os.Open(diskStatsFile) if err != nil { if os.IsNotExist(err) { - glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile) + glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile) return diskStatsMap, nil } return nil, err @@ -561,12 +561,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec du - %v", err) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) cmd.Process.Kill() }) stdoutb, souterr := ioutil.ReadAll(stdoutp) if souterr != nil { - glog.Errorf("failed to read from stdout for cmd %v - %v", cmd.Args, souterr) + glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr) } stderrb, _ := ioutil.ReadAll(stderrp) err = cmd.Wait() @@ -600,7 +600,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) findCmd.Process.Kill() }) err := findCmd.Wait() @@ -741,7 +741,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs mount %#v", mount) + glog.V(4).Infof("btrfs mount %#v", mount) if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { err := syscall.Stat(mount.Mountpoint, buf) if err != nil { @@ -749,8 +749,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) - glog.Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) + glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) + glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) return int(major(buf.Dev)), int(minor(buf.Dev)), nil } else { diff --git a/vendor/github.com/google/cadvisor/http/handlers.go b/vendor/github.com/google/cadvisor/http/handlers.go index a2b4055dde0..8950072b4b9 100644 --- a/vendor/github.com/google/cadvisor/http/handlers.go +++ b/vendor/github.com/google/cadvisor/http/handlers.go @@ -60,7 +60,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut // Setup the authenticator object if httpAuthFile != "" { - glog.Infof("Using auth file %s", httpAuthFile) + glog.V(1).Infof("Using auth file %s", httpAuthFile) secrets := auth.HtpasswdFileProvider(httpAuthFile) authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) @@ -70,7 +70,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut authenticated = true } if httpAuthFile == "" && httpDigestFile != "" { - glog.Infof("Using digest file %s", httpDigestFile) + glog.V(1).Infof("Using digest file %s", httpDigestFile) secrets := auth.HtdigestFileProvider(httpDigestFile) authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) diff --git a/vendor/github.com/google/cadvisor/machine/info.go b/vendor/github.com/google/cadvisor/machine/info.go index 282f71dee48..be90f17e96c 100644 --- a/vendor/github.com/google/cadvisor/machine/info.go +++ b/vendor/github.com/google/cadvisor/machine/info.go @@ -49,7 +49,7 @@ func getInfoFromFiles(filePaths string) string { return strings.TrimSpace(string(id)) } } - glog.Infof("Couldn't collect info from any of the files in %q", filePaths) + glog.Warningf("Couldn't collect info from any of the files in %q", filePaths) return "" } diff --git a/vendor/github.com/google/cadvisor/manager/container.go b/vendor/github.com/google/cadvisor/manager/container.go index 1c3194bbf12..8193bb5e375 100644 --- a/vendor/github.com/google/cadvisor/manager/container.go +++ b/vendor/github.com/google/cadvisor/manager/container.go @@ -377,8 +377,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h // Create cpu load reader. loadReader, err := cpuload.New() if err != nil { - // TODO(rjnagal): Promote to warning once we support cpu load inside namespaces. - glog.Infof("Could not initialize cpu load reader for %q: %s", ref.Name, err) + glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err) } else { cont.loadReader = loadReader } @@ -467,7 +466,7 @@ func (c *containerData) housekeeping() { stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) if err != nil { if c.allowErrorLogging() { - glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) + glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < numSamples { // Ignore, not enough stats yet. @@ -483,6 +482,7 @@ func (c *containerData) housekeeping() { instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(float64(usageMemory)) + // Don't set verbosity since this is already protected by the logUsage flag. glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) } } @@ -504,7 +504,7 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin err := c.updateStats() if err != nil { if c.allowErrorLogging() { - glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err) + glog.Warning("Failed to update stats for container \"%s\": %s", c.info.Name, err) } } // Log if housekeeping took too long. diff --git a/vendor/github.com/google/cadvisor/manager/manager.go b/vendor/github.com/google/cadvisor/manager/manager.go index 69c59b681c9..08955833bff 100644 --- a/vendor/github.com/google/cadvisor/manager/manager.go +++ b/vendor/github.com/google/cadvisor/manager/manager.go @@ -148,19 +148,19 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn if err != nil { return nil, err } - glog.Infof("cAdvisor running in container: %q", selfContainer) + glog.V(2).Infof("cAdvisor running in container: %q", selfContainer) var ( dockerStatus info.DockerStatus rktPath string ) if tempDockerStatus, err := docker.Status(); err != nil { - glog.Warningf("Unable to connect to Docker: %v", err) + glog.V(5).Infof("Docker not connected: %v", err) } else { dockerStatus = tempDockerStatus } if tmpRktPath, err := rkt.RktPath(); err != nil { - glog.Warningf("unable to connect to Rkt api service: %v", err) + glog.V(5).Infof("Rkt not connected: %v", err) } else { rktPath = tmpRktPath } @@ -171,7 +171,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn } crioInfo, err := crioClient.Info() if err != nil { - glog.Warningf("unable to connect to CRI-O api service: %v", err) + glog.V(5).Infof("CRI-O not connected: %v", err) } context := fs.Context{ @@ -222,13 +222,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn return nil, err } newManager.machineInfo = *machineInfo - glog.Infof("Machine: %+v", newManager.machineInfo) + glog.V(1).Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } - glog.Infof("Version: %+v", *versionInfo) + glog.V(1).Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil @@ -267,12 +267,12 @@ type manager struct { func (self *manager) Start() error { err := docker.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Docker container factory registration failed: %v.", err) + glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err) } err = rkt.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the rkt container factory failed: %v", err) + glog.V(5).Infof("Registration of the rkt container factory failed: %v", err) } else { watcher, err := rktwatcher.NewRktContainerWatcher() if err != nil { @@ -283,17 +283,17 @@ func (self *manager) Start() error { err = containerd.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the containerd container factory failed: %v", err) + glog.V(5).Infof("Registration of the containerd container factory failed: %v", err) } err = crio.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the crio container factory failed: %v", err) + glog.V(5).Infof("Registration of the crio container factory failed: %v", err) } err = systemd.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the systemd container factory failed: %v", err) + glog.V(5).Infof("Registration of the systemd container factory failed: %v", err) } err = raw.Register(self, self.fsInfo, self.ignoreMetrics) @@ -326,12 +326,12 @@ func (self *manager) Start() error { if err != nil { return err } - glog.Infof("Starting recovery of all containers") + glog.V(2).Infof("Starting recovery of all containers") err = self.detectSubcontainers("/") if err != nil { return err } - glog.Infof("Recovery completed") + glog.V(2).Infof("Recovery completed") // Watch for new container. quitWatcher := make(chan error) @@ -849,29 +849,25 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c if err != nil { return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) } - glog.V(3).Infof("Got config from %q: %q", v, configFile) + glog.V(4).Infof("Got config from %q: %q", v, configFile) if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") { newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } else { newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } } @@ -946,11 +942,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche } devicesCgroupPath, err := handler.GetCgroupPath("devices") if err != nil { - glog.Infof("Error getting devices cgroup path: %v", err) + glog.Warningf("Error getting devices cgroup path: %v", err) } else { cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) if err != nil { - glog.Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) + glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) } } @@ -959,7 +955,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche collectorConfigs := collector.GetCollectorConfigs(labels) err = m.registerCollectors(collectorConfigs, cont) if err != nil { - glog.Infof("failed to register collectors for %q: %v", containerName, err) + glog.Warningf("Failed to register collectors for %q: %v", containerName, err) } // Add the container name and all its aliases. The aliases must be within the namespace of the factory. @@ -1179,7 +1175,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { } func (self *manager) watchForNewOoms() error { - glog.Infof("Started watching for new ooms in manager") + glog.V(2).Infof("Started watching for new ooms in manager") outStream := make(chan *oomparser.OomInstance, 10) oomLog, err := oomparser.New() if err != nil { @@ -1347,8 +1343,14 @@ func getVersionInfo() (*info.VersionInfo, error) { kernel_version := machine.KernelVersion() container_os := machine.ContainerOsVersion() - docker_version := docker.VersionString() - docker_api_version := docker.APIVersionString() + docker_version, err := docker.VersionString() + if err != nil { + return nil, err + } + docker_api_version, err := docker.APIVersionString() + if err != nil { + return nil, err + } return &info.VersionInfo{ KernelVersion: kernel_version, diff --git a/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go b/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go index d54c628886a..4c54d9b94e0 100644 --- a/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go +++ b/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go @@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error { } func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) { - glog.Infof("starting detectRktContainers thread") + glog.V(1).Infof("Starting detectRktContainers thread") ticker := time.Tick(10 * time.Second) curpods := make(map[string]*rktapi.Pod) @@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan for id, pod := range curpods { if _, ok := newpods[id]; !ok { for _, cgroup := range podToCgroup(pod) { - glog.Infof("cgroup to delete = %v", cgroup) + glog.V(2).Infof("cgroup to delete = %v", cgroup) self.sendDestroyEvent(cgroup, events) } } diff --git a/vendor/github.com/google/cadvisor/metrics/prometheus.go b/vendor/github.com/google/cadvisor/metrics/prometheus.go index f401e1278bf..2dd7747b834 100644 --- a/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -820,11 +820,19 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) glog.Warningf("Couldn't get containers: %s", err) return } + rawLabels := map[string]struct{}{} for _, container := range containers { - labels, values := []string{}, []string{} - for l, v := range c.containerLabelsFunc(container) { + for l := range c.containerLabelsFunc(container) { + rawLabels[l] = struct{}{} + } + } + for _, container := range containers { + values := make([]string, 0, len(rawLabels)) + labels := make([]string, 0, len(rawLabels)) + containerLabels := c.containerLabelsFunc(container) + for l := range rawLabels { labels = append(labels, sanitizeLabelName(l)) - values = append(values, v) + values = append(values, containerLabels[l]) } // Container spec diff --git a/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go b/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go index e536d90be1c..f3d29b8dd05 100644 --- a/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go +++ b/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go @@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) { if err != nil { return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err) } - glog.V(3).Info("Using a netlink-based load reader") + glog.V(4).Info("Using a netlink-based load reader") return reader, nil } diff --git a/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go b/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go index 184cdd73fda..a73243f2e3e 100644 --- a/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go +++ b/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go @@ -143,7 +143,7 @@ func (glogAdapter) Infof(format string, args ...interface{}) { glog.V(4).Infof(format, args) } func (glogAdapter) Warningf(format string, args ...interface{}) { - glog.Infof(format, args) + glog.V(2).Infof(format, args) } func (glogAdapter) Errorf(format string, args ...interface{}) { glog.Warningf(format, args) From 1748b42b21699ab90f1cbbf8631744368406df8b Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Fri, 8 Dec 2017 12:06:41 -0500 Subject: [PATCH 242/794] LimitRange ignores objects previously marked for deletion --- plugin/pkg/admission/limitranger/admission.go | 12 ++++++++++++ plugin/pkg/admission/limitranger/admission_test.go | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index 73336b7f56d..bee1a9f5677 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -113,6 +113,18 @@ func (l *LimitRanger) runLimitFunc(a admission.Attributes, limitFn func(limitRan } } + // ignore all objects marked for deletion + oldObj := a.GetOldObject() + if oldObj != nil { + oldAccessor, err := meta.Accessor(oldObj) + if err != nil { + return admission.NewForbidden(a, err) + } + if oldAccessor.GetDeletionTimestamp() != nil { + return nil + } + } + items, err := l.GetLimitRanges(a) if err != nil { return err diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index a19930b6856..33213a582bc 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -733,6 +733,15 @@ func TestLimitRangerAdmitPod(t *testing.T) { if err != nil { t.Errorf("Should have ignored calls to any subresource of pod %v", err) } + + // a pod that is undergoing termination should never be blocked + terminatingPod := validPod("terminatingPod", 1, api.ResourceRequirements{}) + now := metav1.Now() + terminatingPod.DeletionTimestamp = &now + err = handler.Validate(admission.NewAttributesRecord(&terminatingPod, &terminatingPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "terminatingPod", api.Resource("pods").WithVersion("version"), "", admission.Update, nil)) + if err != nil { + t.Errorf("LimitRange should ignore a pod marked for termination") + } } // newMockClientForTest creates a mock client that returns a client configured for the specified list of limit ranges From f344f50fca28e79ac67e25527b1add9bf418f957 Mon Sep 17 00:00:00 2001 From: David Xia Date: Fri, 8 Dec 2017 14:25:46 -0600 Subject: [PATCH 243/794] examples: Make messages more informative by including pod name and namespace --- .../out-of-cluster-client-configuration/main.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go index 252307b1826..9d79b977608 100644 --- a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go +++ b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go @@ -62,15 +62,18 @@ func main() { // Examples for error handling: // - Use helper functions like e.g. errors.IsNotFound() // - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message - _, err = clientset.CoreV1().Pods("default").Get("example-xxxxx", metav1.GetOptions{}) + namespace := "default" + pod := "example-xxxxx" + _, err = clientset.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) if errors.IsNotFound(err) { - fmt.Printf("Pod not found\n") + fmt.Printf("Pod %s in namespace %s not found\n", pod, namespace) } else if statusError, isStatus := err.(*errors.StatusError); isStatus { - fmt.Printf("Error getting pod %v\n", statusError.ErrStatus.Message) + fmt.Printf("Error getting pod %s in namespace %s: %v\n", + pod, namespace, statusError.ErrStatus.Message) } else if err != nil { panic(err.Error()) } else { - fmt.Printf("Found pod\n") + fmt.Printf("Found pod %s in namespace %s\n", pod, namespace) } time.Sleep(10 * time.Second) From 9a47249180ce15d55f0a58394603d74120be2de9 Mon Sep 17 00:00:00 2001 From: zoues Date: Sun, 10 Dec 2017 11:11:37 +0800 Subject: [PATCH 244/794] typo --- pkg/controller/statefulset/stateful_pod_control.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index fff08046297..d189f05ce05 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -172,7 +172,7 @@ func (spc *realStatefulPodControl) recordClaimEvent(verb string, set *apps.State } } -// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which mush be a member of +// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which must be a member of // set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method // may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with // set's Spec. From 46d3fa052dc087efa39585d3a9d582ee49a697f5 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Sun, 10 Dec 2017 15:39:31 +0800 Subject: [PATCH 245/794] remove dead code --- .../src/k8s.io/client-go/discovery/discovery_client.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/staging/src/k8s.io/client-go/discovery/discovery_client.go b/staging/src/k8s.io/client-go/discovery/discovery_client.go index 26319f49468..5490fb12ca8 100644 --- a/staging/src/k8s.io/client-go/discovery/discovery_client.go +++ b/staging/src/k8s.io/client-go/discovery/discovery_client.go @@ -395,15 +395,6 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} } -func stringDoesntExistIn(str string, slice []string) bool { - for _, s := range slice { - if s == str { - return false - } - } - return true -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *DiscoveryClient) RESTClient() restclient.Interface { From 86d02ac36811dfadad089f9a5d2ceb95b1288056 Mon Sep 17 00:00:00 2001 From: Steve Larkin Date: Sun, 10 Dec 2017 13:00:14 +0100 Subject: [PATCH 246/794] Fix YAMLDecoder Read behaviour Make it adhere to the Read contract by returning the number of bytes read. --- .../k8s.io/apimachinery/pkg/util/yaml/decoder.go | 2 +- .../apimachinery/pkg/util/yaml/decoder_test.go | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 6ebfaea707d..56de33a7fdf 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -122,7 +122,7 @@ func (d *YAMLDecoder) Read(data []byte) (n int, err error) { if left <= len(data) { copy(data, d.remaining) d.remaining = nil - return len(d.remaining), nil + return left, nil } // caller will need to reread diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go index bd4403648f4..1eebd2018f0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go @@ -22,12 +22,26 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "math/rand" "reflect" "strings" "testing" ) +func TestYAMLDecoder(t *testing.T) { + d := `--- +stuff: 1 + test-foo: 1 +` + s := NewDocumentDecoder(ioutil.NopCloser(bytes.NewReader([]byte(d)))) + b := make([]byte, len(d)) + n, err := s.Read(b) + if err != nil || n != len(d) { + t.Fatalf("unexpected body: %d / %v", n, err) + } +} + func TestSplitYAMLDocument(t *testing.T) { testCases := []struct { input string From 62f45189e118c51d0674f76023403a226dfc04c7 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 10 Dec 2017 21:04:44 -0500 Subject: [PATCH 247/794] Sort default cidrs for reproducible builds In different distros or environments, we may end up with a different order of the default string printed during help and man page generation, So we should sort so the string we print is the same everytime. --- pkg/cloudprovider/providers/gce/gce_loadbalancer.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index 6b8995b2f50..3d8f68e6f84 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "net" + "sort" "strings" "github.com/golang/glog" @@ -62,7 +63,9 @@ func init() { // String is the method to format the flag's value, part of the flag.Value interface. func (c *cidrs) String() string { - return strings.Join(c.ipn.StringSlice(), ",") + s := c.ipn.StringSlice() + sort.Strings(s) + return strings.Join(s, ",") } // Set supports a value of CSV or the flag repeated multiple times From 096cc665ff904b41c25e219229ddc972f6e641b2 Mon Sep 17 00:00:00 2001 From: lichuqiang Date: Fri, 8 Dec 2017 17:32:11 +0800 Subject: [PATCH 248/794] fix bug in container lifecycle event generation --- pkg/kubelet/lifecycle/BUILD | 1 + pkg/kubelet/lifecycle/handlers.go | 4 +-- pkg/kubelet/lifecycle/handlers_test.go | 44 ++++++++++++++++++++++++-- 3 files changed, 44 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index cd4997cdd2b..d2463e5de9d 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -39,6 +39,7 @@ go_test( library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/util/format:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", ], diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index 450c985e2a4..b941d85537d 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -58,14 +58,14 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, // TODO(tallclair): Pass a proper timeout value. output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { - msg := fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) + msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) glog.V(1).Infof(msg) } return msg, err case handler.HTTPGet != nil: msg, err := hr.runHTTPHandler(pod, container, handler) if err != nil { - msg := fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) + msg = fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) glog.V(1).Infof(msg) } return msg, err diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go index 009d8609272..900bd393688 100644 --- a/pkg/kubelet/lifecycle/handlers_test.go +++ b/pkg/kubelet/lifecycle/handlers_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/util/format" ) func TestResolvePortInt(t *testing.T) { @@ -78,12 +79,14 @@ func TestResolvePortStringUnknown(t *testing.T) { type fakeContainerCommandRunner struct { Cmd []string ID kubecontainer.ContainerID + Err error + Msg string } func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { f.Cmd = cmd f.ID = id - return nil, nil + return []byte(f.Msg), f.Err } func TestRunHandlerExec(t *testing.T) { @@ -185,6 +188,40 @@ func TestRunHandlerNil(t *testing.T) { } } +func TestRunHandlerExecFailure(t *testing.T) { + expectedErr := fmt.Errorf("invalid command") + fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()} + handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil) + + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + containerName := "containerFoo" + command := []string{"ls", "--a"} + + container := v1.Container{ + Name: containerName, + Lifecycle: &v1.Lifecycle{ + PostStart: &v1.Handler{ + Exec: &v1.ExecAction{ + Command: command, + }, + }, + }, + } + + pod := v1.Pod{} + pod.ObjectMeta.Name = "podFoo" + pod.ObjectMeta.Namespace = "nsFoo" + pod.Spec.Containers = []v1.Container{container} + expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error()) + msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err == nil { + t.Errorf("expected error: %v", expectedErr) + } + if msg != expectedErrMsg { + t.Errorf("unexpected error message: %q; expected %q", msg, expectedErrMsg) + } +} + func TestRunHandlerHttpFailure(t *testing.T) { expectedErr := fmt.Errorf("fake http error") expectedResp := http.Response{ @@ -210,12 +247,13 @@ func TestRunHandlerHttpFailure(t *testing.T) { pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} + expectedErrMsg := fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", "bar", containerName, format.Pod(&pod), expectedErr, expectedErr.Error()) msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expected error: %v", expectedErr) } - if msg != expectedErr.Error() { - t.Errorf("unexpected error message: %q; expected %q", msg, expectedErr) + if msg != expectedErrMsg { + t.Errorf("unexpected error message: %q; expected %q", msg, expectedErrMsg) } if fakeHttp.url != "http://foo:8080/bar" { t.Errorf("unexpected url: %s", fakeHttp.url) From f7611017e99dd2d033936df0eb35df36e02d00f5 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 11 Dec 2017 10:39:57 +0800 Subject: [PATCH 249/794] check and set promiscuous mode with netlink because vishvananda/netlink already supports it --- pkg/kubelet/network/kubenet/kubenet_linux.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/pkg/kubelet/network/kubenet/kubenet_linux.go b/pkg/kubelet/network/kubenet/kubenet_linux.go index 8551946d495..f41c59d843e 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -334,20 +334,18 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube // Put the container bridge into promiscuous mode to force it to accept hairpin packets. // TODO: Remove this once the kernel bug (#20096) is fixed. - // TODO: check and set promiscuous mode with netlink once vishvananda/netlink supports it if plugin.hairpinMode == kubeletconfig.PromiscuousBridge { - output, err := plugin.execer.Command("ip", "link", "show", "dev", BridgeName).CombinedOutput() - if err != nil || strings.Index(string(output), "PROMISC") < 0 { - _, err := plugin.execer.Command("ip", "link", "set", BridgeName, "promisc", "on").CombinedOutput() - if err != nil { - return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err) - } - } - link, err := netlink.LinkByName(BridgeName) if err != nil { return fmt.Errorf("failed to lookup %q: %v", BridgeName, err) } + if link.Attrs().Promisc != 1 { + // promiscuous mode is not on, then turn it on. + err := netlink.SetPromiscOn(link) + if err != nil { + return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err) + } + } // configure the ebtables rules to eliminate duplicate packets by best effort plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr) From 4f400e5d2f35f47f98dcb57635c80e86dbe6484a Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 11 Dec 2017 15:20:55 +0800 Subject: [PATCH 250/794] ignore images in used by running containers when GC --- pkg/kubelet/images/image_gc_manager.go | 25 +++++++++------ pkg/kubelet/images/image_gc_manager_test.go | 35 ++++++++++++--------- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index 344e0156a49..b8503968bef 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -168,7 +168,7 @@ func (im *realImageGCManager) Start() { if im.initialized { ts = time.Now() } - err := im.detectImages(ts) + _, err := im.detectImages(ts) if err != nil { glog.Warningf("[imageGCManager] Failed to monitor images: %v", err) } else { @@ -194,18 +194,19 @@ func (im *realImageGCManager) GetImageList() ([]kubecontainer.Image, error) { return im.imageCache.get(), nil } -func (im *realImageGCManager) detectImages(detectTime time.Time) error { +func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) { + imagesInUse := sets.NewString() + images, err := im.runtime.ListImages() if err != nil { - return err + return imagesInUse, err } pods, err := im.runtime.GetPods(true) if err != nil { - return err + return imagesInUse, err } // Make a set of images in use by containers. - imagesInUse := sets.NewString() for _, pod := range pods { for _, container := range pod.Containers { glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) @@ -231,7 +232,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error { } // Set last used time to now if the image is being used. - if isImageUsed(image, imagesInUse) { + if isImageUsed(image.ID, imagesInUse) { glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) im.imageRecords[image.ID].lastUsed = now } @@ -248,7 +249,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error { } } - return nil + return imagesInUse, nil } func (im *realImageGCManager) GarbageCollect() error { @@ -309,7 +310,7 @@ func (im *realImageGCManager) DeleteUnusedImages() (int64, error) { // Note that error may be nil and the number of bytes free may be less // than bytesToFree. func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) { - err := im.detectImages(freeTime) + imagesInUse, err := im.detectImages(freeTime) if err != nil { return 0, err } @@ -320,6 +321,10 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( // Get all images in eviction order. images := make([]evictionInfo, 0, len(im.imageRecords)) for image, record := range im.imageRecords { + if isImageUsed(image, imagesInUse) { + glog.V(5).Infof("Image ID %s is being used", image) + continue + } images = append(images, evictionInfo{ id: image, imageRecord: *record, @@ -385,9 +390,9 @@ func (ev byLastUsedAndDetected) Less(i, j int) bool { } } -func isImageUsed(image container.Image, imagesInUse sets.String) bool { +func isImageUsed(imageID string, imagesInUse sets.String) bool { // Check the image ID. - if _, ok := imagesInUse[image.ID]; ok { + if _, ok := imagesInUse[imageID]; ok { return true } return false diff --git a/pkg/kubelet/images/image_gc_manager_test.go b/pkg/kubelet/images/image_gc_manager_test.go index aac3bad0f47..fe680f45d8e 100644 --- a/pkg/kubelet/images/image_gc_manager_test.go +++ b/pkg/kubelet/images/image_gc_manager_test.go @@ -112,7 +112,7 @@ func TestDetectImagesInitialDetect(t *testing.T) { } startTime := time.Now().Add(-time.Millisecond) - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) @@ -145,7 +145,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -159,7 +159,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { detectedTime := zero.Add(time.Second) startTime := time.Now().Add(-time.Millisecond) - err = manager.detectImages(detectedTime) + _, err = manager.detectImages(detectedTime) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) noContainer, ok := manager.getImageRecord(imageID(0)) @@ -190,7 +190,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -199,7 +199,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { // Simulate container being stopped. fakeRuntime.AllPodList = []*containertest.FakePod{} - err = manager.detectImages(time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) container1, ok := manager.getImageRecord(imageID(0)) @@ -226,14 +226,14 @@ func TestDetectImagesWithRemovedImages(t *testing.T) { }}, } - err := manager.detectImages(zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) // Simulate both images being removed. fakeRuntime.ImageList = []container.Image{} - err = manager.detectImages(time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 0) } @@ -297,7 +297,8 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { } // Make 1 be more recently used than 0. - require.NoError(t, manager.detectImages(zero)) + _, err := manager.detectImages(zero) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{ @@ -305,13 +306,15 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { }, }}, } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{}, }}, } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) spaceFreed, err := manager.freeSpace(1024, time.Now()) @@ -335,14 +338,17 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { } // Make 1 more recently detected but used at the same time as 0. - require.NoError(t, manager.detectImages(zero)) + _, err := manager.detectImages(zero) + require.NoError(t, err) fakeRuntime.ImageList = []container.Image{ makeImage(0, 1024), makeImage(1, 2048), } - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{} - require.NoError(t, manager.detectImages(time.Now())) + _, err = manager.detectImages(time.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) spaceFreed, err := manager.freeSpace(1024, time.Now()) @@ -448,7 +454,8 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) t.Log(fakeClock.Now()) - require.NoError(t, manager.detectImages(fakeClock.Now())) + _, err := manager.detectImages(fakeClock.Now()) + require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) // no space freed since one image is in used, and another one is not old enough spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) From 7927127c7dd29a668580b822ca9479ae2ddf8084 Mon Sep 17 00:00:00 2001 From: dungeonmaster18 Date: Sat, 9 Dec 2017 20:59:25 +0530 Subject: [PATCH 251/794] remove internal version api from apply Signed-off-by: dungeonmaster18 --- pkg/kubectl/apply.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/kubectl/apply.go b/pkg/kubectl/apply.go index fc716369a13..1e8f79caecd 100644 --- a/pkg/kubectl/apply.go +++ b/pkg/kubectl/apply.go @@ -17,9 +17,9 @@ limitations under the License. package kubectl import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/resource" ) @@ -35,7 +35,7 @@ func GetOriginalConfiguration(mapping *meta.RESTMapping, obj runtime.Object) ([] return nil, nil } - original, ok := annots[api.LastAppliedConfigAnnotation] + original, ok := annots[v1.LastAppliedConfigAnnotation] if !ok { return nil, nil } @@ -60,7 +60,7 @@ func SetOriginalConfiguration(info *resource.Info, original []byte) error { annots = map[string]string{} } - annots[api.LastAppliedConfigAnnotation] = string(original) + annots[v1.LastAppliedConfigAnnotation] = string(original) return info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots) } @@ -85,8 +85,8 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. annots = map[string]string{} } - original := annots[api.LastAppliedConfigAnnotation] - delete(annots, api.LastAppliedConfigAnnotation) + original := annots[v1.LastAppliedConfigAnnotation] + delete(annots, v1.LastAppliedConfigAnnotation) if err := accessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } @@ -97,7 +97,7 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } if annotate { - annots[api.LastAppliedConfigAnnotation] = string(modified) + annots[v1.LastAppliedConfigAnnotation] = string(modified) if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } @@ -109,7 +109,7 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } // Restore the object to its original condition. - annots[api.LastAppliedConfigAnnotation] = original + annots[v1.LastAppliedConfigAnnotation] = original if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } From 208df08ea6648b295bb394ef7a14ad0a442f6672 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Mon, 11 Dec 2017 16:42:31 +0800 Subject: [PATCH 252/794] remove useless validation from pod's resourcequota admission ResourceQuota is a validating admission plugin. Before it runs, pods has already been validated. It's not necessary to validate it again. --- pkg/quota/evaluator/core/BUILD | 2 -- pkg/quota/evaluator/core/pods.go | 19 -------------- pkg/quota/evaluator/core/pods_test.go | 26 ------------------- .../admission/resourcequota/admission_test.go | 6 ----- 4 files changed, 53 deletions(-) diff --git a/pkg/quota/evaluator/core/BUILD b/pkg/quota/evaluator/core/BUILD index 85d50d0855a..00962bb02ff 100644 --- a/pkg/quota/evaluator/core/BUILD +++ b/pkg/quota/evaluator/core/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/apis/core/helper:go_default_library", "//pkg/apis/core/helper/qos:go_default_library", "//pkg/apis/core/v1:go_default_library", - "//pkg/apis/core/validation:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission/util:go_default_library", "//pkg/quota:go_default_library", @@ -34,7 +33,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/initialization:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/evaluator/core/pods.go index ba935eab5af..be7d3307181 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/evaluator/core/pods.go @@ -29,12 +29,10 @@ import ( "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper/qos" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" @@ -118,23 +116,6 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj return fmt.Errorf("Unexpected input object %v", item) } - // Pod level resources are often set during admission control - // As a consequence, we want to verify that resources are valid prior - // to ever charging quota prematurely in case they are not. - // TODO remove this entire section when we have a validation step in admission. - allErrs := field.ErrorList{} - fldPath := field.NewPath("spec").Child("containers") - for i, ctr := range pod.Spec.Containers { - allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, fldPath.Index(i).Child("resources"))...) - } - fldPath = field.NewPath("spec").Child("initContainers") - for i, ctr := range pod.Spec.InitContainers { - allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, fldPath.Index(i).Child("resources"))...) - } - if len(allErrs) > 0 { - return allErrs.ToAggregate() - } - // BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container // must make an explicit request for the resource. this was a mistake. it coupled // validation with resource counting, but we did this before QoS was even defined. diff --git a/pkg/quota/evaluator/core/pods_test.go b/pkg/quota/evaluator/core/pods_test.go index 2c06bdcb4b2..35febe9374e 100644 --- a/pkg/quota/evaluator/core/pods_test.go +++ b/pkg/quota/evaluator/core/pods_test.go @@ -36,32 +36,6 @@ func TestPodConstraintsFunc(t *testing.T) { required []api.ResourceName err string }{ - "init container resource invalid": { - pod: &api.Pod{ - Spec: api.PodSpec{ - InitContainers: []api.Container{{ - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("2m")}, - Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")}, - }, - }}, - }, - }, - err: `spec.initContainers[0].resources.requests: Invalid value: "2m": must be less than or equal to cpu limit`, - }, - "container resource invalid": { - pod: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{{ - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("2m")}, - Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")}, - }, - }}, - }, - }, - err: `spec.containers[0].resources.requests: Invalid value: "2m": must be less than or equal to cpu limit`, - }, "init container resource missing": { pod: &api.Pod{ Spec: api.PodSpec{ diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index 755d0f5d009..09855f2ead9 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -724,12 +724,6 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { if err == nil { t.Errorf("Expected an error because the pod does not specify a memory limit") } - // verify the requests and limits are actually valid (in this case, we fail because the limits < requests) - newPod = validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("200m", "2Gi"), getResourceList("100m", "1Gi"))) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil)) - if err == nil { - t.Errorf("Expected an error because the pod does not specify a memory limit") - } } // TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in From 16d4fb7e52c54b7de684a6da59fe98af9c1ae001 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 11 Dec 2017 09:53:27 -0500 Subject: [PATCH 253/794] Display apiGroups before resources in PolicyRule --- pkg/apis/rbac/helpers.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apis/rbac/helpers.go b/pkg/apis/rbac/helpers.go index 4b1a1d3a0c7..373711500b1 100644 --- a/pkg/apis/rbac/helpers.go +++ b/pkg/apis/rbac/helpers.go @@ -147,6 +147,10 @@ func (r PolicyRule) String() string { func (r PolicyRule) CompactString() string { formatStringParts := []string{} formatArgs := []interface{}{} + if len(r.APIGroups) > 0 { + formatStringParts = append(formatStringParts, "APIGroups:%q") + formatArgs = append(formatArgs, r.APIGroups) + } if len(r.Resources) > 0 { formatStringParts = append(formatStringParts, "Resources:%q") formatArgs = append(formatArgs, r.Resources) @@ -159,10 +163,6 @@ func (r PolicyRule) CompactString() string { formatStringParts = append(formatStringParts, "ResourceNames:%q") formatArgs = append(formatArgs, r.ResourceNames) } - if len(r.APIGroups) > 0 { - formatStringParts = append(formatStringParts, "APIGroups:%q") - formatArgs = append(formatArgs, r.APIGroups) - } if len(r.Verbs) > 0 { formatStringParts = append(formatStringParts, "Verbs:%q") formatArgs = append(formatArgs, r.Verbs) From 8c608041d6fd9c708dba2374a780c6107fafecd2 Mon Sep 17 00:00:00 2001 From: Piotr Szczesniak Date: Mon, 11 Dec 2017 18:06:56 +0100 Subject: [PATCH 254/794] BUmped Heapster to v1.5.0 --- .../google/heapster-controller.yaml | 16 ++++++++-------- .../heapster-controller-combined.yaml | 16 ++++++++-------- .../influxdb/heapster-controller.yaml | 16 ++++++++-------- .../stackdriver/heapster-controller.yaml | 12 ++++++------ .../standalone/heapster-controller.yaml | 12 ++++++------ 5 files changed, 36 insertions(+), 36 deletions(-) diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 31e2e173d18..c69d72155ff 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.5.0-beta.3 + name: heapster-v1.5.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.5.0-beta.3 + version: v1.5.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 template: metadata: labels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -58,7 +58,7 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=gcm - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer @@ -89,7 +89,7 @@ spec: - --memory={{ base_metrics_memory }} - --extra-memory={{metrics_memory_per_node}}Mi - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=heapster - --poll-period=300000 - --estimator=exponential @@ -118,7 +118,7 @@ spec: - --memory={{base_eventer_memory}} - --extra-memory={{eventer_memory_per_node}}Ki - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=eventer - --poll-period=300000 - --estimator=exponential diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 60d19bc18e7..03207169e66 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.5.0-beta.3 + name: heapster-v1.5.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.5.0-beta.3 + version: v1.5.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 template: metadata: labels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: @@ -60,7 +60,7 @@ spec: - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - --sink=gcm:?metrics=autoscaling - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer @@ -91,7 +91,7 @@ spec: - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=heapster - --poll-period=300000 - --estimator=exponential @@ -120,7 +120,7 @@ spec: - --memory={{ base_eventer_memory }} - --extra-memory={{ eventer_memory_per_node }}Ki - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=eventer - --poll-period=300000 - --estimator=exponential diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 0821dbcd13f..9f8f7811d4b 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -23,29 +23,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.5.0-beta.3 + name: heapster-v1.5.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.5.0-beta.3 + version: v1.5.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 template: metadata: labels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -58,7 +58,7 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer @@ -89,7 +89,7 @@ spec: - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=heapster - --poll-period=300000 - --estimator=exponential @@ -118,7 +118,7 @@ spec: - --memory={{ base_eventer_memory }} - --extra-memory={{ eventer_memory_per_node }}Ki - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=eventer - --poll-period=300000 - --estimator=exponential diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index a4ce962cc9f..8f75492c626 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -21,29 +21,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.5.0-beta.3 + name: heapster-v1.5.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.5.0-beta.3 + version: v1.5.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 template: metadata: labels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -101,7 +101,7 @@ spec: - --memory={{ base_metrics_memory }} - --extra-memory={{metrics_memory_per_node}}Mi - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=heapster - --poll-period=300000 - --estimator=exponential diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index f2c43740f78..daca44b75e2 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -21,29 +21,29 @@ metadata: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: heapster-v1.5.0-beta.3 + name: heapster-v1.5.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v1.5.0-beta.3 + version: v1.5.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 template: metadata: labels: k8s-app: heapster - version: v1.5.0-beta.3 + version: v1.5.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -80,7 +80,7 @@ spec: - --memory={{ base_metrics_memory }} - --extra-memory={{ metrics_memory_per_node }}Mi - --threshold=5 - - --deployment=heapster-v1.5.0-beta.3 + - --deployment=heapster-v1.5.0 - --container=heapster - --poll-period=300000 - --estimator=exponential From 57059ea4428265cdea7e4c79646ad476fe79fcd8 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Fri, 8 Dec 2017 19:51:37 -0500 Subject: [PATCH 255/794] apilb template writes a log file to /var/log/nginx.*.log and not into the nginx directory where the log rotation is setup. Adding a log rotation file for these logs. --- .../reactive/load_balancer.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py index a3577a35302..c5272359572 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py +++ b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py @@ -21,6 +21,7 @@ import subprocess from charms import layer from charms.reactive import when, when_any, when_not from charms.reactive import set_state, remove_state +from charms.reactive import hook from charmhelpers.core import hookenv from charmhelpers.core import host from charmhelpers.contrib.charmsupport import nrpe @@ -35,6 +36,25 @@ from subprocess import STDOUT from subprocess import CalledProcessError +apilb_nginx = """/var/log/nginx.*.log { + daily + missingok + rotate 14 + compress + delaycompress + notifempty + create 0640 www-data adm + sharedscripts + prerotate + if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\ + run-parts /etc/logrotate.d/httpd-prerotate; \\ + fi \\ + endscript + postrotate + invoke-rc.d nginx rotate >/dev/null 2>&1 + endscript +}""" + @when('certificates.available') def request_server_certificates(tls): '''Send the data that is required to create a server certificate for @@ -89,6 +109,14 @@ def close_old_port(): hookenv.log('Port %d already closed, skipping.' % old_port) +def maybe_write_apilb_logrotate_config(): + filename = '/etc/logrotate.d/apilb_nginx' + if not os.path.exists(filename): + # Set log rotation for apilb log file + with open(filename, 'w+') as fp: + fp.write(apilb_nginx) + + @when('nginx.available', 'apiserver.available', 'certificates.server.cert.available') def install_load_balancer(apiserver, tls): @@ -123,9 +151,16 @@ def install_load_balancer(apiserver, tls): server_certificate=server_cert_path, server_key=server_key_path, ) + + maybe_write_apilb_logrotate_config() hookenv.status_set('active', 'Loadbalancer ready.') +@hook('upgrade-charm') +def upgrade_charm(): + maybe_write_apilb_logrotate_config() + + @when('nginx.available') def set_nginx_version(): ''' Surface the currently deployed version of nginx to Juju ''' From f99aae9ce3eb805854c894f67b7072623e07d4da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Dec 2017 19:37:18 +0200 Subject: [PATCH 256/794] kubeadm: Don't downgrade etcd on cluster downgrade --- cmd/kubeadm/app/cmd/init.go | 1 - cmd/kubeadm/app/cmd/join.go | 1 - cmd/kubeadm/app/cmd/upgrade/apply.go | 2 +- cmd/kubeadm/app/features/features.go | 4 ++-- cmd/kubeadm/app/phases/upgrade/staticpods.go | 12 ++++++------ 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 7d09adce957..bcf98c0f508 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -228,7 +228,6 @@ func AddInitOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight, sk // NewInit validates given arguments and instantiates Init struct with provided information. func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, ignorePreflightErrors sets.String, skipTokenPrint, dryRun bool, criSocket string) (*Init, error) { - fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta") if cfgPath != "" { b, err := ioutil.ReadFile(cfgPath) diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 23fdea08a0f..2bc32fe4d72 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -199,7 +199,6 @@ type Join struct { // NewJoin instantiates Join struct with given arguments func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, ignorePreflightErrors sets.String, criSocket string) (*Join, error) { - fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta") if cfg.NodeName == "" { cfg.NodeName = nodeutil.GetHostname("") diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 7313bffb6c6..08629ef2b78 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -203,7 +203,7 @@ func SetImplicitFlags(flags *applyFlags) error { // EnforceVersionPolicies makes sure that the version the user specified is valid to upgrade to // There are both fatal and skippable (with --force) errors func EnforceVersionPolicies(flags *applyFlags, versionGetter upgrade.VersionGetter) error { - fmt.Printf("[upgrade/version] You have chosen to upgrade to version %q\n", flags.newK8sVersionStr) + fmt.Printf("[upgrade/version] You have chosen to change the cluster version to %q\n", flags.newK8sVersionStr) versionSkewErrs := upgrade.EnforceVersionPolicies(versionGetter, flags.newK8sVersionStr, flags.newK8sVersion, flags.parent.allowExperimentalUpgrades, flags.parent.allowRCUpgrades) if versionSkewErrs != nil { diff --git a/cmd/kubeadm/app/features/features.go b/cmd/kubeadm/app/features/features.go index 36f5a495c4e..57627cb367b 100644 --- a/cmd/kubeadm/app/features/features.go +++ b/cmd/kubeadm/app/features/features.go @@ -33,7 +33,7 @@ const ( // CoreDNS is alpha in v1.9 CoreDNS = "CoreDNS" - // SelfHosting is beta in v1.9 + // SelfHosting is alpha in v1.8 and v1.9 SelfHosting = "SelfHosting" // StoreCertsInSecrets is alpha in v1.8 and v1.9 @@ -47,7 +47,7 @@ var v190 = version.MustParseSemantic("v1.9.0-alpha.1") // InitFeatureGates are the default feature gates for the init command var InitFeatureGates = FeatureList{ - SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}}, + SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}}, StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}}, // We don't want to advertise this feature gate exists in v1.9 to avoid confusion as it is not yet working HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190, HiddenInHelpText: true}, diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 258f0dba91a..b661ea20593 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -154,7 +154,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd) } - fmt.Printf("[upgrade/staticpods] Moved upgraded manifest to %q and backed up old manifest to %q\n", currentManifestPath, backupManifestPath) + fmt.Printf("[upgrade/staticpods] Moved new manifest to %q and backed up old manifest to %q\n", currentManifestPath, backupManifestPath) fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component") // Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to @@ -178,7 +178,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string) (bool, error) { // Add etcd static pod spec only if external etcd is not configured if len(cfg.Etcd.Endpoints) != 0 { - return false, fmt.Errorf("external etcd cannot be upgraded with kubeadm") + return false, fmt.Errorf("external etcd detected, won't try to change any etcd state") } // Checking health state of etcd before proceeding with the upgrtade etcdCluster := util.LocalEtcdCluster{} @@ -191,7 +191,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM backupEtcdDir := pathMgr.BackupEtcdDir() runningEtcdDir := cfg.Etcd.DataDir if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil { - return true, fmt.Errorf("fail to back up etcd data with %v", err) + return true, fmt.Errorf("fail to back up etcd data: %v", err) } // Need to check currently used version and version from constants, if differs then upgrade @@ -206,7 +206,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM // Comparing current etcd version with desired to catch the same version or downgrade condition and fail on them. if desiredEtcdVersion.LessThan(currentEtcdVersion) { - return true, fmt.Errorf("the requested etcd version (%s) for Kubernetes v(%s) is lower than the currently running version (%s)", desiredEtcdVersion.String(), cfg.KubernetesVersion, currentEtcdVersion.String()) + return false, fmt.Errorf("the desired etcd version for this Kubernetes version %q is %q, but the current etcd version is %q. Won't downgrade etcd, instead just continue", cfg.KubernetesVersion, desiredEtcdVersion.String(), currentEtcdVersion.String()) } // For the case when desired etcd version is the same as current etcd version if strings.Compare(desiredEtcdVersion.String(), currentEtcdVersion.String()) == 0 { @@ -288,7 +288,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager if fatal { return err } - fmt.Printf("[etcd] non fatal issue encountered during upgrade: %v\n", err) + fmt.Printf("[upgrade/etcd] non fatal issue encountered during upgrade: %v\n", err) } } @@ -298,7 +298,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager } // Write the updated static Pod manifests into the temporary directory - fmt.Printf("[upgrade/staticpods] Writing upgraded Static Pod manifests to %q\n", pathMgr.TempManifestDir()) + fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir()) err = controlplane.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg) if err != nil { return fmt.Errorf("error creating init static pod manifest files: %v", err) From 80bc5604894db30f898403972775fd0690754bce Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 11 Dec 2017 12:52:29 -0500 Subject: [PATCH 257/794] check for empty label before assigning --- pkg/kubectl/resource/builder.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/kubectl/resource/builder.go b/pkg/kubectl/resource/builder.go index 4ff130b7f90..53669138114 100644 --- a/pkg/kubectl/resource/builder.go +++ b/pkg/kubectl/resource/builder.go @@ -341,6 +341,10 @@ func (b *Builder) LabelSelectorParam(s string) *Builder { // LabelSelector accepts a selector directly and will filter the resulting list by that object. // Use LabelSelectorParam instead for user input. func (b *Builder) LabelSelector(selector string) *Builder { + if len(selector) == 0 { + return b + } + b.labelSelector = &selector return b } From 9ca41b451ad8cf964a94ad9edde6d9e0a41b5bf2 Mon Sep 17 00:00:00 2001 From: zacharysarah Date: Thu, 7 Dec 2017 17:45:59 -0600 Subject: [PATCH 258/794] Doc updates for 1.9 --- cmd/kube-proxy/app/server.go | 6 +++--- cmd/kubelet/app/options/options.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 6de788607fe..2f9ea2eac3e 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -125,11 +125,11 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { // All flags below here are deprecated and will eventually be removed. - fs.Var(componentconfig.IPVar{Val: &options.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.Int32Var(&options.healthzPort, "healthz-port", options.healthzPort, "The port to bind the health check server. Use 0 to disable.") - fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to 0.0.0.0 for all interfaces)") - fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(options.config.OOMScoreAdj, "oom-score-adj", utilpointer.Int32PtrDerefOr(options.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&options.config.ResourceContainer, "resource-container", options.config.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 4e9e10e9df4..6d8e13abe8c 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -382,7 +382,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.StringVar(&c.ManifestURL, "manifest-url", c.ManifestURL, "URL for accessing the container manifest") fs.Var(flag.NewColonSeparatedMultimapStringString(&c.ManifestURLHeader), "manifest-url-header", "Comma-separated list of HTTP headers to use when accessing the manifest URL. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: `--manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful'`") fs.BoolVar(&c.EnableServer, "enable-server", c.EnableServer, "Enable the Kubelet's server") - fs.Var(componentconfig.IPVar{Val: &c.Address}, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &c.Address}, "address", "The IP address for the Kubelet to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(&c.Port, "port", c.Port, "The port for the Kubelet to serve on.") fs.Int32Var(&c.ReadOnlyPort, "read-only-port", c.ReadOnlyPort, "The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable)") @@ -427,7 +427,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.BoolVar(&c.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") fs.Int32Var(&c.CAdvisorPort, "cadvisor-port", c.CAdvisorPort, "The port of the localhost cAdvisor endpoint (set to 0 to disable)") fs.Int32Var(&c.HealthzPort, "healthz-port", c.HealthzPort, "The port of the localhost healthz endpoint (set to 0 to disable)") - fs.Var(componentconfig.IPVar{Val: &c.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on. (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &c.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(&c.OOMScoreAdj, "oom-score-adj", c.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]") fs.StringVar(&c.ClusterDomain, "cluster-domain", c.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") From c52413b82e656e4f4332374013750ea1f990882e Mon Sep 17 00:00:00 2001 From: prashima Date: Wed, 6 Dec 2017 15:22:29 -0800 Subject: [PATCH 259/794] Fixes issue#392. --- .../providers/vsphere/vclib/virtualmachine.go | 12 +++++++++++- pkg/cloudprovider/providers/vsphere/vsphere.go | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go index 8077b5583e6..db45b8e1935 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go @@ -19,6 +19,7 @@ package vclib import ( "context" "fmt" + "strings" "time" "github.com/golang/glog" @@ -362,12 +363,14 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } + // filter vm devices to retrieve device for the given vmdk file identified by disk path for _, device := range vmDevices { if vmDevices.TypeName(device) == "VirtualDisk" { virtualDevice := device.GetVirtualDevice() if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { - if backing.FileName == diskPath { + if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { + glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) return device, nil } } @@ -376,6 +379,13 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s return nil, nil } +func matchVirtualDiskAndVolPath(diskPath, volPath string) bool { + fileExt := ".vmdk" + diskPath = strings.TrimSuffix(diskPath, fileExt) + volPath = strings.TrimSuffix(volPath, fileExt) + return diskPath == volPath +} + // deleteController removes latest added SCSI controller from VM. func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error { controllerDeviceList := vmDevices.SelectByType(controllerDevice) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 77f80e23549..d97823fd4ec 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -835,6 +835,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) return false, err } + volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) attached, err := vm.IsDiskAttached(ctx, volPath) if err != nil { @@ -842,6 +843,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b volPath, vSphereInstance) } + glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath) return attached, err } requestTime := time.Now() From ba2bf598fb2de58bca94ba0bde721a70df36fcea Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Mon, 11 Dec 2017 11:45:56 -0800 Subject: [PATCH 260/794] gce: tighten up perms on kube-env --- cluster/gce/configure-vm.sh | 2 ++ cluster/gce/gci/configure.sh | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 4bc6dc5a67a..3fa13fb981f 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -149,6 +149,7 @@ function curl-metadata() { } function set-kube-env() { + (umask 700; local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml" until curl-metadata kube-env > "${kube_env_yaml}"; do @@ -164,6 +165,7 @@ for k,v in yaml.load(sys.stdin).iteritems(): print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v)))) print("""export {var}""".format(var = k)) ' < """${kube_env_yaml}""")" + ) } function remove-docker-artifacts() { diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index c8a7b117de9..40060f613c4 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -48,6 +48,7 @@ EOF function download-kube-env { # Fetch kube-env from GCE metadata server. + (umask 700; local -r tmp_kube_env="/tmp/kube-env.yaml" curl --fail --retry 5 --retry-delay 3 --silent --show-error \ -H "X-Google-Metadata-Request: True" \ @@ -60,10 +61,12 @@ for k,v in yaml.load(sys.stdin).iteritems(): print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))) ''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env") rm -f "${tmp_kube_env}" + ) } function download-kube-master-certs { # Fetch kube-env from GCE metadata server. + (umask 700; local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml" curl --fail --retry 5 --retry-delay 3 --silent --show-error \ -H "X-Google-Metadata-Request: True" \ @@ -76,6 +79,7 @@ for k,v in yaml.load(sys.stdin).iteritems(): print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))) ''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs") rm -f "${tmp_kube_master_certs}" + ) } function validate-hash { From 7b4311e51802483ad70198b6cea3b9601d142105 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 11 Dec 2017 15:37:12 -0500 Subject: [PATCH 261/794] kubelet should use the value of the cri container runtime endpoint from cadvisor --- pkg/kubelet/cadvisor/helpers_linux.go | 2 +- pkg/kubelet/cadvisor/util.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/cadvisor/helpers_linux.go b/pkg/kubelet/cadvisor/helpers_linux.go index ea10588c8c7..10b5e05a008 100644 --- a/pkg/kubelet/cadvisor/helpers_linux.go +++ b/pkg/kubelet/cadvisor/helpers_linux.go @@ -43,7 +43,7 @@ func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) { // This is a temporary workaround to get stats for cri-o from cadvisor // and should be removed. // Related to https://github.com/kubernetes/kubernetes/issues/51798 - if i.runtimeEndpoint == "/var/run/crio.sock" { + if i.runtimeEndpoint == CrioSocket { return cadvisorfs.LabelCrioImages, nil } } diff --git a/pkg/kubelet/cadvisor/util.go b/pkg/kubelet/cadvisor/util.go index 7937917a89b..e4107d5b4a7 100644 --- a/pkg/kubelet/cadvisor/util.go +++ b/pkg/kubelet/cadvisor/util.go @@ -29,6 +29,12 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) +const ( + // Please keep this in sync with the one in: + // github.com/google/cadvisor/container/crio/client.go + CrioSocket = "/var/run/crio/crio.sock" +) + func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList { c := v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity( @@ -71,5 +77,5 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool { return runtime == kubetypes.RktContainerRuntime || (runtime == kubetypes.DockerContainerRuntime && goruntime.GOOS == "linux") || - runtimeEndpoint == "/var/run/crio.sock" + runtimeEndpoint == CrioSocket } From e1cf77858e8a8502a293b432658d06f4fbc020d0 Mon Sep 17 00:00:00 2001 From: Walter Fender Date: Mon, 4 Dec 2017 14:44:54 -0800 Subject: [PATCH 262/794] Add e2e test for when a webhook does not return. Adding code to simulate a webhook not returning. Ensure that we verify that the returned error is a timeout. --- test/e2e/apimachinery/webhook.go | 33 +++++++++++++++++++++++++++++++- test/images/webhook/Makefile | 4 ++-- test/images/webhook/main.go | 9 +++++++-- 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index cd7c12c4dab..e9a714f49ee 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -56,6 +56,7 @@ const ( skipNamespaceLabelValue = "yes" skippedNamespaceName = "exempted-namesapce" disallowedPodName = "disallowed-pod" + hangingPodName = "hanging-pod" disallowedConfigMapName = "disallowed-configmap" allowedConfigMapName = "allowed-configmap" crdName = "e2e-test-webhook-crd" @@ -99,7 +100,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() { // Note that in 1.9 we will have backwards incompatible change to // admission webhooks, so the image will be updated to 1.9 sometime in // the development 1.9 cycle. - deployWebhookAndService(f, "gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v6", context) + deployWebhookAndService(f, "gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7", context) }) AfterEach(func() { cleanWebhookTest(client, namespaceName) @@ -453,6 +454,17 @@ func testWebhook(f *framework.Framework) { framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error()) } + By("create a pod that causes the webhook to hang") + client = f.ClientSet + // Creating the pod, the request should be rejected + pod = hangingPod(f) + _, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).NotTo(BeNil()) + expectedTimeoutErr := "request did not complete within allowed duration" + if !strings.Contains(err.Error(), expectedTimeoutErr) { + framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error()) + } + By("create a configmap that should be denied by the webhook") // Creating the configmap, the request should be rejected configmap := nonCompliantConfigMap(f) @@ -631,6 +643,25 @@ func nonCompliantPod(f *framework.Framework) *v1.Pod { } } +func hangingPod(f *framework.Framework) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: hangingPodName, + Labels: map[string]string{ + "webhook-e2e-test": "wait-forever", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "wait-forever", + Image: framework.GetPauseImageName(f.ClientSet), + }, + }, + }, + } +} + func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap { return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/images/webhook/Makefile b/test/images/webhook/Makefile index 75f04cd3745..a201dd5b233 100644 --- a/test/images/webhook/Makefile +++ b/test/images/webhook/Makefile @@ -14,7 +14,7 @@ build: CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o webhook . - docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v6 . + docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 . rm -rf webhook push: - gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v6 + gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index d487c08caa0..da2e4e9d3fb 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -85,11 +85,16 @@ func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { reviewResponse.Allowed = true var msg string - for k, v := range pod.Labels { - if k == "webhook-e2e-test" && v == "webhook-disallow" { + if v, ok := pod.Labels["webhook-e2e-test"]; ok { + if v == "webhook-disallow" { reviewResponse.Allowed = false msg = msg + "the pod contains unwanted label; " } + if v == "wait-forever" { + reviewResponse.Allowed = false + msg = msg + "the pod response should not be sent; " + <-make(chan int) // Sleep forever - no one sends to this channel + } } for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, "webhook-disallow") { From 39721a2811045f93cf05822246ab3098d7d8ea59 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 12 Dec 2017 01:52:30 -0500 Subject: [PATCH 263/794] Add tests for accept content-type fallback --- .../apiserver/pkg/endpoints/apiserver_test.go | 40 ++++++++++++++++--- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index eda01358252..4dce02f9abc 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -2026,11 +2026,29 @@ func TestGetPartialObjectMetadata(t *testing.T) { accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, + { + accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "Simple", Group: testGroupVersion.Group, Version: testGroupVersion.Version}, + }, + { + accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1alpha1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadata", Group: "meta.k8s.io", Version: "v1alpha1"}, + }, { list: true, accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1alpha1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, }, + { + list: true, + accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadata;v=v1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "SimpleList", Group: testGroupVersion.Group, Version: testGroupVersion.Version}, + }, + { + list: true, + accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadataList;v=v1alpha1;g=meta.k8s.io, application/json", + expectKind: schema.GroupVersionKind{Kind: "PartialObjectMetadataList", Group: "meta.k8s.io", Version: "v1alpha1"}, + }, { accept: runtime.ContentTypeJSON + ";as=PartialObjectMetadataList;v=v1alpha1;g=meta.k8s.io", statusCode: http.StatusNotAcceptable, @@ -2096,12 +2114,22 @@ func TestGetPartialObjectMetadata(t *testing.T) { t.Errorf("%d: invalid status: %#v\n%s", i, resp, bodyOrDie(resp)) continue } - itemOut, body, err := extractBodyObject(resp, metainternalversion.Codecs.LegacyCodec(metav1alpha1.SchemeGroupVersion)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.expected, itemOut) { - t.Errorf("%d: did not match: %s", i, diff.ObjectReflectDiff(test.expected, itemOut)) + body := "" + if test.expected != nil { + itemOut, d, err := extractBodyObject(resp, metainternalversion.Codecs.LegacyCodec(metav1alpha1.SchemeGroupVersion)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.expected, itemOut) { + t.Errorf("%d: did not match: %s", i, diff.ObjectReflectDiff(test.expected, itemOut)) + } + body = d + } else { + d, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + body = string(d) } obj := &unstructured.Unstructured{} if err := json.Unmarshal([]byte(body), obj); err != nil { From d380e2972ea72086c61f98b52b9b4bd97fef9ad6 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Tue, 12 Dec 2017 14:51:56 +0530 Subject: [PATCH 264/794] Add VolumeMode in GlusterFS PV spec. This ensures that GlusterFS pvs are file backed storage. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index c42e3cdf794..08fce929861 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -704,10 +704,12 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glog.Errorf("create volume error: %v.", err) return nil, fmt.Errorf("create volume error: %v", err) } + mode := v1.PersistentVolumeFilesystem pv := new(v1.PersistentVolume) pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes + pv.Spec.VolumeMode = &mode if len(pv.Spec.AccessModes) == 0 { pv.Spec.AccessModes = p.plugin.GetAccessModes() } From e913612003cbe71c9f58dc6c65913f6e2cc4c345 Mon Sep 17 00:00:00 2001 From: Steve Larkin Date: Tue, 12 Dec 2017 19:11:23 +0100 Subject: [PATCH 265/794] Extend YAMLDecoder Read tests --- .../pkg/util/yaml/decoder_test.go | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go index 1eebd2018f0..3c1ad7b2219 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder_test.go @@ -29,16 +29,28 @@ import ( "testing" ) -func TestYAMLDecoder(t *testing.T) { +func TestYAMLDecoderReadBytesLength(t *testing.T) { d := `--- stuff: 1 test-foo: 1 ` - s := NewDocumentDecoder(ioutil.NopCloser(bytes.NewReader([]byte(d)))) - b := make([]byte, len(d)) - n, err := s.Read(b) - if err != nil || n != len(d) { - t.Fatalf("unexpected body: %d / %v", n, err) + testCases := []struct { + bufLen int + expectLen int + expectErr error + }{ + {len(d), len(d), nil}, + {len(d) + 10, len(d), nil}, + {len(d) - 10, len(d) - 10, io.ErrShortBuffer}, + } + + for i, testCase := range testCases { + r := NewDocumentDecoder(ioutil.NopCloser(bytes.NewReader([]byte(d)))) + b := make([]byte, testCase.bufLen) + n, err := r.Read(b) + if err != testCase.expectErr || n != testCase.expectLen { + t.Fatalf("%d: unexpected body: %d / %v", i, n, err) + } } } From 3d5288ebf380c65e165674a62235e22976560a6a Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 12 Dec 2017 13:20:56 -0500 Subject: [PATCH 266/794] Check ns setup error during e2e --- test/e2e/framework/framework.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index f0d9eb513df..e628accaa28 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -378,7 +378,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) ( f.namespacesToDelete = append(f.namespacesToDelete, ns) } - if !f.SkipPrivilegedPSPBinding { + if err == nil && !f.SkipPrivilegedPSPBinding { CreatePrivilegedPSPBinding(f, ns.Name) } From 18c5234b3aa60036c0b4b83e8545dba9a4d8fc6d Mon Sep 17 00:00:00 2001 From: Chakravarthy Nelluri Date: Mon, 11 Dec 2017 22:57:45 -0500 Subject: [PATCH 267/794] Mark Flexvolume as GA --- api/openapi-spec/swagger.json | 6 +++--- api/swagger-spec/apps_v1.json | 4 ++-- api/swagger-spec/apps_v1beta1.json | 4 ++-- api/swagger-spec/apps_v1beta2.json | 4 ++-- api/swagger-spec/batch_v1.json | 4 ++-- api/swagger-spec/batch_v1beta1.json | 4 ++-- api/swagger-spec/batch_v2alpha1.json | 4 ++-- api/swagger-spec/extensions_v1beta1.json | 4 ++-- api/swagger-spec/settings.k8s.io_v1alpha1.json | 4 ++-- api/swagger-spec/v1.json | 6 +++--- docs/api-reference/apps/v1/definitions.html | 4 ++-- docs/api-reference/apps/v1beta1/definitions.html | 4 ++-- docs/api-reference/apps/v1beta2/definitions.html | 4 ++-- docs/api-reference/batch/v1/definitions.html | 4 ++-- docs/api-reference/batch/v1beta1/definitions.html | 4 ++-- docs/api-reference/batch/v2alpha1/definitions.html | 4 ++-- docs/api-reference/extensions/v1beta1/definitions.html | 4 ++-- .../settings.k8s.io/v1alpha1/definitions.html | 4 ++-- docs/api-reference/v1/definitions.html | 6 +++--- pkg/apis/core/types.go | 6 +++--- staging/src/k8s.io/api/core/v1/generated.proto | 8 +++----- staging/src/k8s.io/api/core/v1/types.go | 8 +++----- .../src/k8s.io/api/core/v1/types_swagger_doc_generated.go | 6 +++--- 23 files changed, 53 insertions(+), 57 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 89d72410065..7014bba7119 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -75655,7 +75655,7 @@ } }, "io.k8s.api.core.v1.FlexVolumeSource": { - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], @@ -77006,7 +77006,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" }, "flocker": { @@ -78895,7 +78895,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" }, "flocker": { diff --git a/api/swagger-spec/apps_v1.json b/api/swagger-spec/apps_v1.json index 7956e22ea58..8109d5910fb 100644 --- a/api/swagger-spec/apps_v1.json +++ b/api/swagger-spec/apps_v1.json @@ -6799,7 +6799,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -7197,7 +7197,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/apps_v1beta1.json b/api/swagger-spec/apps_v1beta1.json index 8a004fb748f..e2533176f49 100644 --- a/api/swagger-spec/apps_v1beta1.json +++ b/api/swagger-spec/apps_v1beta1.json @@ -4433,7 +4433,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -4831,7 +4831,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/apps_v1beta2.json b/api/swagger-spec/apps_v1beta2.json index 0aecb380f24..be427887ae3 100644 --- a/api/swagger-spec/apps_v1beta2.json +++ b/api/swagger-spec/apps_v1beta2.json @@ -6799,7 +6799,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -7197,7 +7197,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/batch_v1.json b/api/swagger-spec/batch_v1.json index 4d154abb9ad..28787d81889 100644 --- a/api/swagger-spec/batch_v1.json +++ b/api/swagger-spec/batch_v1.json @@ -1773,7 +1773,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -2171,7 +2171,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/batch_v1beta1.json b/api/swagger-spec/batch_v1beta1.json index c84dd3cde5f..bb9b870bda2 100644 --- a/api/swagger-spec/batch_v1beta1.json +++ b/api/swagger-spec/batch_v1beta1.json @@ -1828,7 +1828,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -2226,7 +2226,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/batch_v2alpha1.json b/api/swagger-spec/batch_v2alpha1.json index fbaaa4ee818..cde661970dd 100644 --- a/api/swagger-spec/batch_v2alpha1.json +++ b/api/swagger-spec/batch_v2alpha1.json @@ -1828,7 +1828,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -2226,7 +2226,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json index 56f79527b51..d8b20a3fd98 100644 --- a/api/swagger-spec/extensions_v1beta1.json +++ b/api/swagger-spec/extensions_v1beta1.json @@ -7441,7 +7441,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -7839,7 +7839,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/settings.k8s.io_v1alpha1.json b/api/swagger-spec/settings.k8s.io_v1alpha1.json index 6167891529e..dc442a8ebbe 100644 --- a/api/swagger-spec/settings.k8s.io_v1alpha1.json +++ b/api/swagger-spec/settings.k8s.io_v1alpha1.json @@ -1615,7 +1615,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", @@ -2013,7 +2013,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index e988dfc85a5..97be62be175 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20607,7 +20607,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "azureFile": { "$ref": "v1.AzureFilePersistentVolumeSource", @@ -21022,7 +21022,7 @@ }, "v1.FlexVolumeSource": { "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], @@ -21596,7 +21596,7 @@ }, "flexVolume": { "$ref": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future." + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "cinder": { "$ref": "v1.CinderVolumeSource", diff --git a/docs/api-reference/apps/v1/definitions.html b/docs/api-reference/apps/v1/definitions.html index a8a675a8e85..af89c45aa2c 100755 --- a/docs/api-reference/apps/v1/definitions.html +++ b/docs/api-reference/apps/v1/definitions.html @@ -2615,7 +2615,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

@@ -3262,7 +3262,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html index 3b522ed1981..8bf4a106ec9 100755 --- a/docs/api-reference/apps/v1beta1/definitions.html +++ b/docs/api-reference/apps/v1beta1/definitions.html @@ -2726,7 +2726,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -3259,7 +3259,7 @@ The StatefulSet guarantees that a given network identity will always map to the - + diff --git a/docs/api-reference/apps/v1beta2/definitions.html b/docs/api-reference/apps/v1beta2/definitions.html index b6ddabf7827..0eb7eae50ba 100755 --- a/docs/api-reference/apps/v1beta2/definitions.html +++ b/docs/api-reference/apps/v1beta2/definitions.html @@ -3377,7 +3377,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -3965,7 +3965,7 @@ The StatefulSet guarantees that a given network identity will always map to the - + diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index 9cec3b5687f..291cf172cda 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -2089,7 +2089,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -2629,7 +2629,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/batch/v1beta1/definitions.html b/docs/api-reference/batch/v1beta1/definitions.html index e1466c71b4d..1e84af1e5ec 100755 --- a/docs/api-reference/batch/v1beta1/definitions.html +++ b/docs/api-reference/batch/v1beta1/definitions.html @@ -2061,7 +2061,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -2663,7 +2663,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/batch/v2alpha1/definitions.html b/docs/api-reference/batch/v2alpha1/definitions.html index 9ed32f89dfd..12662acce97 100755 --- a/docs/api-reference/batch/v2alpha1/definitions.html +++ b/docs/api-reference/batch/v2alpha1/definitions.html @@ -2020,7 +2020,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -2636,7 +2636,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index 547557f250c..bd302b0eb89 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -3163,7 +3163,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -3843,7 +3843,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html index 61d69b2884d..aa446f591ad 100755 --- a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html +++ b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html @@ -2999,7 +2999,7 @@ When an object is created, the system will populate this list with the current s

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -3414,7 +3414,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index 78489b60724..b169977ecee 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -2244,7 +2244,7 @@ The resulting set of endpoints can be viewed as:

v1.FlexVolumeSource

-

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

+

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

@@ -4087,7 +4087,7 @@ Examples:
- + @@ -8380,7 +8380,7 @@ Examples:
- + diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 032e6d84c40..b6b570b06e4 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -269,7 +269,7 @@ type VolumeSource struct { Quobyte *QuobyteVolumeSource // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource @@ -352,7 +352,7 @@ type PersistentVolumeSource struct { // +optional ISCSI *ISCSIPersistentVolumeSource // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource // Cinder represents a cinder volume attached and mounted on kubelets host machine @@ -868,7 +868,7 @@ type FCVolumeSource struct { } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. Driver string diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 096d5240725..de2c1e690e8 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1170,7 +1170,7 @@ message FCVolumeSource { } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. message FlexVolumeSource { // Driver is the name of the driver to use for this volume. optional string driver = 1; @@ -2439,8 +2439,7 @@ message PersistentVolumeSource { optional FlockerVolumeSource flocker = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; @@ -4477,8 +4476,7 @@ message VolumeSource { optional RBDVolumeSource rbd = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 6ab6911600a..728cbd5a62b 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -302,8 +302,7 @@ type VolumeSource struct { // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // Cinder represents a cinder volume attached and mounted on kubelets host machine @@ -417,8 +416,7 @@ type PersistentVolumeSource struct { // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -1084,7 +1082,7 @@ type QuobyteVolumeSource struct { } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 0f141b41050..c50dd0a0520 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -617,7 +617,7 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { } var map_FlexVolumeSource = map[string]string{ - "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "driver": "Driver is the name of the driver to use for this volume.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", @@ -1251,7 +1251,7 @@ var map_PersistentVolumeSource = map[string]string{ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", @@ -2184,7 +2184,7 @@ var map_VolumeSource = map[string]string{ "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", From 6559e4dedeffa0c1d97b5b8795b24047a4617081 Mon Sep 17 00:00:00 2001 From: Rye Terrell Date: Tue, 12 Dec 2017 14:33:38 -0600 Subject: [PATCH 268/794] make kube-dns addon optional --- .../juju/layers/kubernetes-master/config.yaml | 4 ++++ .../reactive/kubernetes_master.py | 22 +++++++++---------- .../reactive/kubernetes_worker.py | 4 +++- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index 18b2b691d57..4001cd979ac 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -3,6 +3,10 @@ options: type: boolean default: True description: Deploy the Kubernetes Dashboard and Heapster addons + enable-kube-dns: + type: boolean + default: True + description: Deploy kube-dns addon dns_domain: type: string default: cluster.local diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 1547bc8bc93..5736e938110 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -438,10 +438,10 @@ def etcd_data_change(etcd): @when('cdk-addons.configured') def send_cluster_dns_detail(kube_control): ''' Send cluster DNS info ''' - # Note that the DNS server doesn't necessarily exist at this point. We know - # where we're going to put it, though, so let's send the info anyway. - dns_ip = get_dns_ip() - kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) + enableKubeDNS = hookenv.config('enable-kube-dns') + dnsDomain = hookenv.config('dns_domain') + dns_ip = None if not enableKubeDNS else get_dns_ip() + kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS) @when('kube-control.connected') @@ -563,11 +563,12 @@ def configure_cdk_addons(): ''' Configure CDK addons ''' remove_state('cdk-addons.configured') dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower() + dnsEnabled = str(hookenv.config('enable-kube-dns')).lower() args = [ 'arch=' + arch(), - 'dns-ip=' + get_dns_ip(), 'dns-domain=' + hookenv.config('dns_domain'), - 'enable-dashboard=' + dbEnabled + 'enable-dashboard=' + dbEnabled, + 'enable-kube-dns=' + dnsEnabled ] check_call(['snap', 'set', 'cdk-addons'] + args) if not addons_ready(): @@ -951,11 +952,10 @@ def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, def get_dns_ip(): - '''Get an IP address for the DNS server on the provided cidr.''' - interface = ipaddress.IPv4Interface(service_cidr()) - # Add .10 at the end of the network - ip = interface.network.network_address + 10 - return ip.exploded + cmd = "kubectl get service --namespace kube-system kube-dns --output json" + output = check_output(cmd, shell=True).decode() + svc = json.loads(output) + return svc['spec']['clusterIP'] def get_kubernetes_service_ip(): diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index d43f06768a8..01c215c61c8 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -518,7 +518,6 @@ def configure_kubelet(dns): kubelet_opts['v'] = '0' kubelet_opts['address'] = '0.0.0.0' kubelet_opts['port'] = '10250' - kubelet_opts['cluster-dns'] = dns['sdn-ip'] kubelet_opts['cluster-domain'] = dns['domain'] kubelet_opts['anonymous-auth'] = 'false' kubelet_opts['client-ca-file'] = ca_cert_path @@ -527,6 +526,9 @@ def configure_kubelet(dns): kubelet_opts['logtostderr'] = 'true' kubelet_opts['fail-swap-on'] = 'false' + if (dns['enable-kube-dns']): + kubelet_opts['cluster-dns'] = dns['sdn-ip'] + privileged = is_state('kubernetes-worker.privileged') kubelet_opts['allow-privileged'] = 'true' if privileged else 'false' From 7c4c321c9899ebe0c8cb21477f47d9f9743ddf19 Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Tue, 12 Dec 2017 12:55:20 -0800 Subject: [PATCH 269/794] added defaults for --watch-cache-sizes description. --- staging/src/k8s.io/apiserver/pkg/server/options/etcd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index d522cde9bcb..9bfa3a0a778 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -120,7 +120,8 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled.") + "when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices) "+ + "have system defaults set by heuristics, others default to default-watch-cache-size") fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, "The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'.") From a73382566b29c2602289e2d70994d9147a2745e4 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Tue, 12 Dec 2017 20:53:27 -0800 Subject: [PATCH 270/794] Update nvidia-gpu-device-plugin addon. This includes changes from GoogleCloudPlatform/container-engine-accelerators#38 and GoogleCloudPlatform/container-engine-accelerators#37 --- cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml index 58c233ef027..5b157548c19 100644 --- a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml +++ b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml @@ -36,7 +36,7 @@ spec: hostPath: path: /dev containers: - - image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:5e3837c3ab99e90d4c19053998ad86239591de4264bc177faad75642b64b723d" + - image: "gcr.io/google-containers/nvidia-gpu-device-plugin@sha256:0e79da6998a61257585e0d3fb5848240129f0fa5b4ad972dfed4049448093c33" command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] name: nvidia-gpu-device-plugin resources: From f1e41813156be94ee835aa9e1c59545904f3329d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 13:29:21 +0800 Subject: [PATCH 271/794] Enhance proxy mode validation --- .../apis/kubeproxyconfig/validation/BUILD | 1 + .../kubeproxyconfig/validation/validation.go | 40 +++++++++---------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/BUILD b/pkg/proxy/apis/kubeproxyconfig/validation/BUILD index 2c8abb0dcab..9737c96624a 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/BUILD +++ b/pkg/proxy/apis/kubeproxyconfig/validation/BUILD @@ -14,6 +14,7 @@ go_library( "//pkg/apis/core/validation:go_default_library", "//pkg/proxy/apis/kubeproxyconfig:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go index 376d281c9d8..19be3b386a3 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/validation.go +++ b/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -24,6 +24,7 @@ import ( "strings" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" @@ -150,33 +151,32 @@ func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) fiel } func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} + validModes := sets.NewString( + string(kubeproxyconfig.ProxyModeUserspace), + string(kubeproxyconfig.ProxyModeIPTables), + string(kubeproxyconfig.ProxyModeIPVS), + ) - switch mode { - case kubeproxyconfig.ProxyModeUserspace: - case kubeproxyconfig.ProxyModeIPTables: - case kubeproxyconfig.ProxyModeIPVS: - case "": - default: - modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)} - errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(modes, ",")) - allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + if mode == "" || validModes.Has(string(mode)) { + return nil } - return allErrs + + errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(validModes.List(), ",")) + return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)} } func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} + validModes := sets.NewString( + string(kubeproxyconfig.ProxyModeUserspace), + string(kubeproxyconfig.ProxyModeKernelspace), + ) - switch mode { - case kubeproxyconfig.ProxyModeUserspace: - case kubeproxyconfig.ProxyModeKernelspace: - default: - modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeKernelspace)} - errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(modes, ",")) - allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + if mode == "" || validModes.Has(string(mode)) { + return nil } - return allErrs + + errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(validModes.List(), ",")) + return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)} } func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList { From 0682372bae44be6e54d5a716f2a795c1110fbbca Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 13 Dec 2017 00:44:24 -0500 Subject: [PATCH 272/794] Define default role for full kubelet API access --- .../authorizer/rbac/bootstrappolicy/policy.go | 11 ++++++ .../testdata/cluster-roles.yaml | 34 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index b54d7c6dfdc..4914641ba18 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -340,6 +340,17 @@ func ClusterRoles() []rbac.ClusterRole { eventsRule(), }, }, + { + // a role to use for full access to the kubelet API + ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"}, + Rules: []rbac.PolicyRule{ + // Allow read-only access to the Node API objects + rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + // Allow all API calls to the nodes + rbac.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbac.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(), + }, + }, { // a role to use for bootstrapping a node's client certificates ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"}, diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 1e2a36c6289..2bd026e76e1 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -842,6 +842,40 @@ items: - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kubelet-api-admin + rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - proxy + - apiGroups: + - "" + resources: + - nodes/log + - nodes/metrics + - nodes/proxy + - nodes/spec + - nodes/stats + verbs: + - '*' - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: From 806e4f5afa86d3d1e096c00c08f748edc2053c18 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:11:55 +0800 Subject: [PATCH 273/794] Add a general VMSet interface for both scale sets and availability sets --- .../providers/azure/azure_vmsets.go | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 pkg/cloudprovider/providers/azure/azure_vmsets.go diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go new file mode 100644 index 00000000000..dd5cc308dcd --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "github.com/Azure/azure-sdk-for-go/arm/network" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/cloudprovider" +) + +// VMSet defines functions all vmsets (including scale set and availabitlity +// set) should be implemented. +type VMSet interface { + // GetInstanceIDByNodeName gets the cloud provider ID by node name. + // It must return ("", cloudprovider.InstanceNotFound) if the instance does + // not exist or is no longer running. + GetInstanceIDByNodeName(name string) (string, error) + // GetInstanceTypeByNodeName gets the instance type by node name. + GetInstanceTypeByNodeName(name string) (string, error) + // GetIPByNodeName gets machine IP by node name. + GetIPByNodeName(name, vmSetName string) (string, error) + // GetPrimaryInterface gets machine primary network interface by node name and vmSet. + GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) + // GetNodeNameByProviderID gets the node name by provider ID. + GetNodeNameByProviderID(providerID string) (types.NodeName, error) + + // GetZoneByNodeName gets cloudprovider.Zone by node name. + GetZoneByNodeName(name string) (cloudprovider.Zone, error) + + // GetPrimaryVMSetName returns the VM set name depending on the configured vmType. + // It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType. + GetPrimaryVMSetName() string + // GetVMSetNames selects all possible availability sets or scale sets + // (depending vmType configured) for service load balancer, if the service has + // no loadbalancer mode annotaion returns the primary VMSet. If service annotation + // for loadbalancer exists then return the eligible VMSet. + GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) + // EnsureHostsInPool ensures the given Node's primary IP configurations are + // participating in the specified LoadBalancer Backend Pool. + EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error + // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. + EnsureBackendPoolDeleted(poolID, vmSetName string) error +} From 7944bc3117f701b48c8ac02ac067d52fd5fc7245 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:13:15 +0800 Subject: [PATCH 274/794] Add scale set implementation of VMSet interface --- .../providers/azure/azure_backoff.go | 8 +- .../providers/azure/azure_util_vmss.go | 763 +++++++++++++++++- .../providers/azure/azure_wrap.go | 33 +- 3 files changed, 752 insertions(+), 52 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 6f5e41349db..3cf5d8930fa 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -58,13 +58,13 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return machine, exists, err } -// GetScaleSetsVMWithRetry invokes az.getScaleSetsVM with exponential backoff retry -func (az *Cloud) GetScaleSetsVMWithRetry(name types.NodeName) (compute.VirtualMachineScaleSetVM, bool, error) { +// GetScaleSetsVMWithRetry invokes ss.getScaleSetVM with exponential backoff retry +func (ss *scaleSet) GetScaleSetsVMWithRetry(name types.NodeName, scaleSetName string) (compute.VirtualMachineScaleSetVM, bool, error) { var machine compute.VirtualMachineScaleSetVM var exists bool - err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + err := wait.ExponentialBackoff(ss.resourceRequestBackoff, func() (bool, error) { var retryErr error - machine, exists, retryErr = az.getVmssVirtualMachine(name) + machine, exists, retryErr = ss.getScaleSetVM(string(name), scaleSetName) if retryErr != nil { glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr) return false, nil diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index e16e3173552..45f631af951 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -17,51 +17,222 @@ limitations under the License. package azure import ( + "errors" "fmt" + "regexp" + "sort" "strconv" + "strings" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" ) -func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) { - az.operationPollRateLimiter.Accept() - machine, exists, err := az.getVmssVirtualMachine(nodeName) +var ( + // ErrorNotVmssInstance indicates an instance is not belongint to any vmss. + ErrorNotVmssInstance = errors.New("not a vmss instance") + + scaleSetNameRE = regexp.MustCompile(`^/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`) +) + +// scaleSet implements VMSet interface for Azure scale set. +type scaleSet struct { + *Cloud + + // availabilitySet is also required for scaleSet because some instances + // (e.g. master nodes) may not belong to any scale sets. + availabilitySet VMSet +} + +// newScaleSet creates a new scaleSet. +func newScaleSet(az *Cloud) VMSet { + return &scaleSet{ + Cloud: az, + availabilitySet: newAvailabilitySet(az), + } +} + +// GetInstanceIDByNodeName gets the cloud provider ID by node name. +// It must return ("", cloudprovider.InstanceNotFound) if the instance does +// not exist or is no longer running. +func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { + instanceID, err := ss.getScaleSetInstanceIDByName(name, ss.PrimaryScaleSetName) + if err != nil { + if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + // TODO: find a better way to identify the type of VM. + return ss.availabilitySet.GetInstanceIDByNodeName(name) + } + + return "", err + } + + return instanceID, nil +} + +func (ss *scaleSet) getScaleSetInstanceIDByName(name, scaleSetName string) (string, error) { + var machine compute.VirtualMachineScaleSetVM + var exists bool + var err error + + ss.operationPollRateLimiter.Accept() + machine, exists, err = ss.getScaleSetVM(name, scaleSetName) + if err != nil { + if ss.CloudProviderBackoff { + glog.V(2).Infof("InstanceID(%s) backing off", name) + machine, exists, err = ss.GetScaleSetsVMWithRetry(types.NodeName(name), scaleSetName) + if err != nil { + glog.V(2).Infof("InstanceID(%s) abort backoff", name) + return "", err + } + } else { + return "", err + } + } else if !exists { + return "", cloudprovider.InstanceNotFound + } + + return *machine.ID, nil +} + +func (ss *scaleSet) getScaleSetVM(nodeName, scaleSetName string) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { + instanceID, err := getScaleSetVMInstanceID(nodeName) + if err != nil { + return vm, false, err + } + + return ss.getScaleSetVMByID(instanceID, scaleSetName) +} + +func (ss *scaleSet) getScaleSetVMByID(instanceID, scaleSetName string) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { + var realErr error + + // scaleSetName is required to query VM info. + if scaleSetName == "" { + scaleSetName = ss.PrimaryScaleSetName + } + + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", instanceID) + vm, err = ss.VirtualMachineScaleSetVMsClient.Get(ss.ResourceGroup, scaleSetName, instanceID) + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", instanceID) + + exists, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return vm, false, realErr + } + if !exists { + return vm, false, nil + } + + return vm, exists, err +} + +// GetNodeNameByProviderID gets the node name by provider ID. +func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { + // NodeName is not part of providerID for vmss instances. + parts := strings.Split(providerID, "/") + instanceID := parts[len(parts)-1] + machine, exist, err := ss.getScaleSetVMByID(instanceID, ss.PrimaryScaleSetName) + if !exist { return "", cloudprovider.InstanceNotFound } if err != nil { - glog.Errorf("error: az.getIPForVmssMachine(%s), az.getVmssVirtualMachine(%s), err=%v", nodeName, nodeName, err) return "", err } - nicID, err := getPrimaryInterfaceIDForVmssMachine(machine) + return types.NodeName(*machine.OsProfile.ComputerName), nil +} + +// GetInstanceTypeByNodeName gets the instance type by node name. +func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { + instanceType, err := ss.getScaleSetInstanceTypeByNodeName(name) if err != nil { - glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err) + if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + // TODO: find a better way to identify the type of VM. + return ss.availabilitySet.GetInstanceTypeByNodeName(name) + } + return "", err } - nicName, err := getLastSegment(nicID) + return instanceType, nil +} + +func (ss *scaleSet) getScaleSetInstanceTypeByNodeName(name string) (string, error) { + machine, exists, err := ss.getScaleSetVM(name, ss.PrimaryScaleSetName) if err != nil { - glog.Errorf("error: az.getIPForVmssMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err) + glog.Errorf("error: ss.getScaleSetInstanceTypeByNodeName(%s), ss.getScaleSetVM(%s) err=%v", name, name, err) return "", err + } else if !exists { + return "", cloudprovider.InstanceNotFound } - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) - nic, err := az.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(az.ResourceGroup, az.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) + if machine.Sku.Name != nil { + return *machine.Sku.Name, nil + } + + return "", fmt.Errorf("instance type is not defined") +} + +// GetZoneByNodeName gets cloudprovider.Zone by node name. +func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { + instanceID, err := getScaleSetVMInstanceID(name) if err != nil { - glog.Errorf("error: az.getIPForVmssMachine(%s), az.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err) + if err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + // TODO: find a better way to identify the type of VM. + return ss.availabilitySet.GetZoneByNodeName(name) + } + return cloudprovider.Zone{}, err + } + + vm, err := ss.VirtualMachineScaleSetVMsClient.Get(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, instanceID) + if err != nil { + return cloudprovider.Zone{}, err + } + + // PlatformFaultDomain is not included in VirtualMachineScaleSetVM, so we get it from VirtualMachineScaleSetVMInstanceView. + vmView, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, instanceID) + if err != nil { + return cloudprovider.Zone{}, err + } + + failureDomain := strconv.Itoa(int(*vmView.PlatformFaultDomain)) + zone := cloudprovider.Zone{ + FailureDomain: failureDomain, + Region: *(vm.Location), + } + return zone, nil +} + +// GetPrimaryVMSetName returns the VM set name depending on the configured vmType. +// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType. +func (ss *scaleSet) GetPrimaryVMSetName() string { + return ss.Config.PrimaryScaleSetName +} + +// GetIPByNodeName gets machine IP by node name. +func (ss *scaleSet) GetIPByNodeName(nodeName, vmSetName string) (string, error) { + nic, err := ss.GetPrimaryInterface(nodeName, vmSetName) + if err != nil { + glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q, %q), err=%v", nodeName, nodeName, vmSetName, err) return "", err } ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) + glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) return "", err } @@ -70,7 +241,7 @@ func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) { } // This returns the full identifier of the primary NIC for the given VM. -func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetVM) (string, error) { +func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) { if len(*machine.NetworkProfile.NetworkInterfaces) == 1 { return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil } @@ -87,7 +258,7 @@ func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetV // machineName is composed of computerNamePrefix and 36-based instanceID. // And instanceID part if in fixed length of 6 characters. // Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/. -func getVmssInstanceID(machineName string) (string, error) { +func getScaleSetVMInstanceID(machineName string) (string, error) { nameLength := len(machineName) if nameLength < 6 { return "", ErrorNotVmssInstance @@ -100,3 +271,563 @@ func getVmssInstanceID(machineName string) (string, error) { return fmt.Sprintf("%d", instanceID), nil } + +// extractScaleSetNameByVMID extracts the scaleset name by scaleSetVirtualMachine's ID. +func extractScaleSetNameByVMID(vmID string) (string, error) { + matches := scaleSetNameRE.FindStringSubmatch(vmID) + if len(matches) != 2 { + return "", ErrorNotVmssInstance + } + + return matches[1], nil +} + +// listScaleSetsWithRetry lists scale sets with exponential backoff retry. +func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { + var err error + var result compute.VirtualMachineScaleSetListResult + allScaleSets := make([]string, 0) + + backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.List start for %v", ss.ResourceGroup) + result, err = ss.VirtualMachineScaleSetsClient.List(ss.ResourceGroup) + glog.V(10).Infof("VirtualMachineScaleSetsClient.List end for %v", ss.ResourceGroup) + if err != nil { + glog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", ss.ResourceGroup, err) + return false, err + } + + return true, nil + }) + if backoffError != nil { + return nil, backoffError + } + + appendResults := (result.Value != nil && len(*result.Value) > 1) + for appendResults { + for _, scaleSet := range *result.Value { + allScaleSets = append(allScaleSets, *scaleSet.Name) + } + appendResults = false + + if result.NextLink != nil { + backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults start for %v", ss.ResourceGroup) + result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(result) + glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults end for %v", ss.ResourceGroup) + if err != nil { + glog.Errorf("VirtualMachineScaleSetsClient.ListNextResults for %v failed: %v", ss.ResourceGroup, err) + return false, err + } + + return true, nil + }) + if backoffError != nil { + return nil, backoffError + } + + appendResults = (result.Value != nil && len(*result.Value) > 1) + } + + } + + return allScaleSets, nil +} + +// listScaleSetVMsWithRetry lists VMs belonging to the specified scale set with exponential backoff retry. +func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) { + var err error + var result compute.VirtualMachineScaleSetVMListResult + allVMs := make([]compute.VirtualMachineScaleSetVM, 0) + + backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List start for %v", scaleSetName) + result, err = ss.VirtualMachineScaleSetVMsClient.List(ss.ResourceGroup, scaleSetName, "", "", "") + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List end for %v", scaleSetName) + if err != nil { + glog.Errorf("VirtualMachineScaleSetVMsClient.List for %v failed: %v", scaleSetName, err) + return false, err + } + + return true, nil + }) + if backoffError != nil { + return nil, backoffError + } + + appendResults := (result.Value != nil && len(*result.Value) > 1) + for appendResults { + allVMs = append(allVMs, *result.Value...) + appendResults = false + + if result.NextLink != nil { + backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults start for %v", scaleSetName) + result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(result) + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults end for %v", ss.ResourceGroup) + if err != nil { + glog.Errorf("VirtualMachineScaleSetVMsClient.ListNextResults for %v failed: %v", scaleSetName, err) + return false, err + } + + return true, nil + }) + if backoffError != nil { + return nil, backoffError + } + + appendResults = (result.Value != nil && len(*result.Value) > 1) + } + + } + + return allVMs, nil +} + +// getAgentPoolAvailabiliySets lists the virtual machines for for the resource group and then builds +// a list of availability sets that match the nodes available to k8s. +func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { + scaleSetNames, err := ss.listScaleSetsWithRetry() + if err != nil { + return nil, err + } + + vmNameToScaleSetName := make(map[string]string, len(scaleSetNames)) + for _, scaleSetName := range scaleSetNames { + vms, err := ss.listScaleSetVMsWithRetry(scaleSetName) + if err != nil { + return nil, err + } + + for idx := range vms { + vm := vms[idx] + if vm.OsProfile != nil || vm.OsProfile.ComputerName != nil { + vmNameToScaleSetName[*vm.OsProfile.ComputerName] = scaleSetName + } + } + } + + agentPoolScaleSets := &[]string{} + availableScaleSetNames := sets.NewString() + for nx := range nodes { + if isMasterNode(nodes[nx]) { + continue + } + + nodeName := nodes[nx].Name + ssName, ok := vmNameToScaleSetName[nodeName] + if !ok { + // TODO: support master nodes not managed by VMSS. + glog.Errorf("Node %q is not belonging to any known scale sets", nodeName) + return nil, fmt.Errorf("node %q is not belonging to any known scale sets", nodeName) + } + + if availableScaleSetNames.Has(ssName) { + continue + } + + *agentPoolScaleSets = append(*agentPoolScaleSets, ssName) + } + + return agentPoolScaleSets, nil +} + +// GetVMSetNames selects all possible availability sets or scale sets +// (depending vmType configured) for service load balancer. If the service has +// no loadbalancer mode annotaion returns the primary VMSet. If service annotation +// for loadbalancer exists then return the eligible VMSet. +func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) { + hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service) + if !hasMode { + // no mode specified in service annotation default to PrimaryScaleSetName. + scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName} + return scaleSetNames, nil + } + + scaleSetNames, err := ss.getAgentPoolScaleSets(nodes) + if err != nil { + glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err) + return nil, err + } + if len(*scaleSetNames) == 0 { + glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) + return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes)) + } + + // sort the list to have deterministic selection + sort.Strings(*scaleSetNames) + + if !isAuto { + if serviceVMSetNames == nil || len(serviceVMSetNames) == 0 { + return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value") + } + // validate scale set exists + var found bool + for sasx := range serviceVMSetNames { + for asx := range *scaleSetNames { + if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetNames[sasx]) { + found = true + serviceVMSetNames[sasx] = (*scaleSetNames)[asx] + break + } + } + if !found { + glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx]) + return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx]) + } + } + vmSetNames = &serviceVMSetNames + } + + return vmSetNames, nil +} + +// GetPrimaryInterface gets machine primary network interface by node name and vmSet. +func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) { + ss.operationPollRateLimiter.Accept() + machine, exists, err := ss.getScaleSetVM(nodeName, vmSetName) + if !exists || err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + // TODO: find a better way to identify the type of VM. + return ss.availabilitySet.GetPrimaryInterface(nodeName, "") + } + if err != nil { + glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getScaleSetVM(%s), err=%v", nodeName, nodeName, err) + return network.Interface{}, err + } + + nicID, err := ss.getPrimaryInterfaceID(machine) + if err != nil { + glog.Errorf("error: ss.GetPrimaryInterface(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err) + return network.Interface{}, err + } + + nicName, err := getLastSegment(nicID) + if err != nil { + glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, nicID, err) + return network.Interface{}, err + } + + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) + nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "") + glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) + if err != nil { + glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, nicName, "", err) + return network.Interface{}, err + } + + // Fix interface's location, which is required when updating the interface. + // TODO: is this a bug of azure SDK? + if nic.Location == nil || *nic.Location == "" { + nic.Location = &ss.Config.Location + } + + return nic, nil +} + +// getScaleSet gets a scale set by name. +func (ss *scaleSet) getScaleSet(name string) (compute.VirtualMachineScaleSet, bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): start", name) + result, err := ss.VirtualMachineScaleSetsClient.Get(ss.ResourceGroup, name) + glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): end", name) + + exists, realErr := checkResourceExistsFromError(err) + if realErr != nil { + return result, false, realErr + } + + if !exists { + return result, false, nil + } + + return result, exists, err +} + +// getScaleSetWithRetry gets scale set with exponential backoff retry +func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) { + var result compute.VirtualMachineScaleSet + var exists bool + + err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + var retryErr error + result, exists, retryErr = ss.getScaleSet(name) + if retryErr != nil { + glog.Errorf("backoff: failure, will retry,err=%v", retryErr) + return false, nil + } + glog.V(2).Infof("backoff: success") + return true, nil + }) + + return result, exists, err +} + +// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets. +func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) { + networkConfigurations := *networkConfigurationList + if len(networkConfigurations) == 1 { + return &networkConfigurations[0], nil + } + + for idx := range networkConfigurations { + networkConfig := &networkConfigurations[idx] + if networkConfig.Primary != nil && *networkConfig.Primary == true { + return networkConfig, nil + } + } + + return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName) +} + +func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) { + ipConfigurations := *config.IPConfigurations + if len(ipConfigurations) == 1 { + return &ipConfigurations[0], nil + } + + for idx := range ipConfigurations { + ipConfig := &ipConfigurations[idx] + if ipConfig.Primary != nil && *ipConfig.Primary == true { + return ipConfig, nil + } + } + + return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set %q", scaleSetName) +} + +// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. +func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error { + return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): start", *virtualMachineScaleSet.Name) + respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet, nil) + resp := <-respChan + err := <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) + return processRetryResponse(resp.Response, err) + }) +} + +// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. +func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { + return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): start", scaleSetName) + respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, scaleSetName, vmInstanceIDs, nil) + resp := <-respChan + err := <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) + return processRetryResponse(resp.Response, err) + }) +} + +// EnsureHostsInPool ensures the given Node's primary IP configurations are +// participating in the specified LoadBalancer Backend Pool. +func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error { + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName) + if err != nil { + glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) + return err + } + if !exists { + errorMessage := fmt.Errorf("Scale set %q not found", vmSetName) + glog.Errorf("%v", errorMessage) + return errorMessage + } + + // Find primary network interface configuration. + networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations + primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName) + if err != nil { + return err + } + + // Find primary IP configuration. + primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName) + if err != nil { + return err + } + + // Update primary IP configuration's LoadBalancerBackendAddressPools. + foundPool := false + newBackendPools := []compute.SubResource{} + if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil { + newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools + } + for _, existingPool := range newBackendPools { + if strings.EqualFold(backendPoolID, *existingPool.ID) { + foundPool = true + break + } + } + if !foundPool { + newBackendPools = append(newBackendPools, + compute.SubResource{ + ID: to.StringPtr(backendPoolID), + }) + primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools + + glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) + respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) + resp := <-respChan + err := <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + if ss.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + } + } + if err != nil { + return err + } + } + + // Construct instanceIDs from nodes. + instanceIDs := []string{} + for _, curNode := range nodes { + curScaleSetName, err := extractScaleSetNameByVMID(curNode.Spec.ExternalID) + if err != nil { + glog.V(2).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name) + continue + } + if curScaleSetName != vmSetName { + glog.V(2).Infof("Node %q is not belonging to scale set %q, omitting it", curNode.Name, vmSetName) + continue + } + + instanceID, err := getLastSegment(curNode.Spec.ExternalID) + if err != nil { + glog.Errorf("Failed to get last segment from %q: %v", curNode.Spec.ExternalID, err) + return err + } + + instanceIDs = append(instanceIDs, instanceID) + } + + // Update instances to latest VMSS model. + vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ + InstanceIds: &instanceIDs, + } + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) + respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) + resp := <-respChan + err = <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName) + if ss.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + } + } + if err != nil { + return err + } + + return nil +} + +// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. +func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName) + if err != nil { + glog.Errorf("ss.EnsureBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, vmSetName, vmSetName, err) + return err + } + if !exists { + glog.V(2).Infof("ss.EnsureBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, vmSetName, vmSetName) + return nil + } + + // Find primary network interface configuration. + networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations + primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName) + if err != nil { + return err + } + + // Find primary IP configuration. + primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName) + if err != nil { + return err + } + + // Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration. + if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 { + return nil + } + existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools + newBackendPools := []compute.SubResource{} + foundPool := false + for i := len(existingBackendPools) - 1; i >= 0; i-- { + curPool := existingBackendPools[i] + if strings.EqualFold(poolID, *curPool.ID) { + glog.V(10).Infof("EnsureBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, vmSetName) + foundPool = true + newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...) + } + } + if !foundPool { + // Pool not found, assume it has been already removed. + return nil + } + + // Update scale set with backoff. + primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools + glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName) + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) + respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) + resp := <-respChan + err = <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + if ss.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName) + } + } + if err != nil { + return err + } + + // Update instances to latest VMSS model. + instanceIDs := []string{"*"} + vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ + InstanceIds: &instanceIDs, + } + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) + updateRespChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) + updateResp := <-updateRespChan + err = <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName) + if ss.CloudProviderBackoff && shouldRetryAPIRequest(updateResp.Response, err) { + glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", vmSetName, err) + retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", vmSetName) + } + } + if err != nil { + return err + } + + return nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 52d033b9294..f1aa0def597 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -17,19 +17,14 @@ limitations under the License. package azure import ( - "errors" "net/http" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/go-autorest/autorest" "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" -) -var ( - // ErrorNotVmssInstance indicates an instance is not belongint to any vmss. - ErrorNotVmssInstance = errors.New("not a vmss instance") + "k8s.io/apimachinery/pkg/types" ) // checkExistsFromError inspects an error and returns a true if err is nil, @@ -80,32 +75,6 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM return vm, exists, err } -func (az *Cloud) getVmssVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { - var realErr error - - vmName := string(nodeName) - instanceID, err := getVmssInstanceID(vmName) - if err != nil { - return vm, false, err - } - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", vmName) - vm, err = az.VirtualMachineScaleSetVMsClient.Get(az.ResourceGroup, az.PrimaryScaleSetName, instanceID) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", vmName) - - exists, realErr = checkResourceExistsFromError(err) - if realErr != nil { - return vm, false, realErr - } - - if !exists { - return vm, false, nil - } - - return vm, exists, err -} - func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { var realErr error From 906abde7337bbd9b88b29d416b219046e4da358b Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:17:37 +0800 Subject: [PATCH 275/794] Add availability sets implementation of VMSet interface --- .../providers/azure/azure_util.go | 502 ++++++++++++------ 1 file changed, 341 insertions(+), 161 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 958275ca66b..6181550571b 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -30,8 +30,10 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" ) @@ -54,6 +56,7 @@ const ( nodeLabelRole = "kubernetes.io/role" ) +var errNotInVMSet = errors.New("vm is not in the vmset") var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`) // returns the full identifier of a machine @@ -133,115 +136,22 @@ func (az *Cloud) getpublicIPAddressID(pipName string) string { pipName) } -// getLoadBalancerAvailabilitySetNames selects all possible availability sets for -// service load balancer, if the service has no loadbalancer mode annotaion returns the -// primary availability set if service annotation for loadbalancer availability set -// exists then return the eligible a availability set -func (az *Cloud) getLoadBalancerAvailabilitySetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) { - hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service) - if !hasMode { - // no mode specified in service annotation default to PrimaryAvailabilitySetName - availabilitySetNames = &[]string{az.Config.PrimaryAvailabilitySetName} - return availabilitySetNames, nil - } - availabilitySetNames, err = az.getAgentPoolAvailabiliySets(nodes) - if err != nil { - glog.Errorf("az.getLoadBalancerAvailabilitySetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) - return nil, err - } - if len(*availabilitySetNames) == 0 { - glog.Errorf("az.getLoadBalancerAvailabilitySetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) - return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) - } - // sort the list to have deterministic selection - sort.Strings(*availabilitySetNames) - if !isAuto { - if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 { - return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value") - } - // validate availability set exists - var found bool - for sasx := range serviceAvailabilitySetNames { - for asx := range *availabilitySetNames { - if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) { - found = true - serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx] - break - } - } - if !found { - glog.Errorf("az.getLoadBalancerAvailabilitySetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) - return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx]) - } - } - availabilitySetNames = &serviceAvailabilitySetNames - } - - return availabilitySetNames, nil -} - -// lists the virtual machines for for the resource group and then builds -// a list of availability sets that match the nodes available to k8s -func (az *Cloud) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { - vms, err := az.VirtualMachineClientListWithRetry() - if err != nil { - glog.Errorf("az.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) - return nil, err - } - vmNameToAvailabilitySetID := make(map[string]string, len(vms)) - for vmx := range vms { - vm := vms[vmx] - if vm.AvailabilitySet != nil { - vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID - } - } - availabilitySetIDs := sets.NewString() - agentPoolAvailabilitySets = &[]string{} - for nx := range nodes { - nodeName := (*nodes[nx]).Name - if isMasterNode(nodes[nx]) { - continue - } - asID, ok := vmNameToAvailabilitySetID[nodeName] - if !ok { - glog.Errorf("az.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) - return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) - } - if availabilitySetIDs.Has(asID) { - // already added in the list - continue - } - asName, err := getLastSegment(asID) - if err != nil { - glog.Errorf("az.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) - return nil, err - } - // AvailabilitySet ID is currently upper cased in a indeterministic way - // We want to keep it lower case, before the ID get fixed - asName = strings.ToLower(asName) - - *agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName) - } - - return agentPoolAvailabilitySets, nil -} - -func (az *Cloud) mapLoadBalancerNameToAvailabilitySet(lbName string, clusterName string) (availabilitySetName string) { - availabilitySetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix) +func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) { + vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix) if strings.EqualFold(clusterName, lbName) { - availabilitySetName = az.Config.PrimaryAvailabilitySetName + vmSetName = az.vmSet.GetPrimaryVMSetName() } - return availabilitySetName + return vmSetName } // For a load balancer, all frontend ip should reference either a subnet or publicIpAddress. // Thus Azure do not allow mixed type (public and internal) load balancer. // So we'd have a separate name for internal load balancer. // This would be the name for Azure LoadBalancer resource. -func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName string, isInternal bool) string { - lbNamePrefix := availabilitySetName - if strings.EqualFold(availabilitySetName, az.Config.PrimaryAvailabilitySetName) { +func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string { + lbNamePrefix := vmSetName + if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) { lbNamePrefix = clusterName } if isInternal { @@ -402,67 +312,7 @@ outer: } func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) { - if az.Config.VMType == vmTypeVMSS { - ip, err := az.getIPForVmssMachine(nodeName) - if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { - return az.getIPForStandardMachine(nodeName) - } - - return ip, err - } - - return az.getIPForStandardMachine(nodeName) -} - -func (az *Cloud) getIPForStandardMachine(nodeName types.NodeName) (string, error) { - az.operationPollRateLimiter.Accept() - machine, exists, err := az.getVirtualMachine(nodeName) - if !exists { - return "", cloudprovider.InstanceNotFound - } - if err != nil { - glog.Errorf("error: az.getIPForMachine(%s), az.getVirtualMachine(%s), err=%v", nodeName, nodeName, err) - return "", err - } - - nicID, err := getPrimaryInterfaceID(machine) - if err != nil { - glog.Errorf("error: az.getIPForMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err) - return "", err - } - - nicName, err := getLastSegment(nicID) - if err != nil { - glog.Errorf("error: az.getIPForMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err) - return "", err - } - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) - nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) - if err != nil { - glog.Errorf("error: az.getIPForMachine(%s), az.InterfacesClient.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err) - return "", err - } - - ipConfig, err := getPrimaryIPConfig(nic) - if err != nil { - glog.Errorf("error: az.getIPForMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) - return "", err - } - - targetIP := *ipConfig.PrivateIPAddress - return targetIP, nil -} - -// splitProviderID converts a providerID to a NodeName. -func splitProviderID(providerID string) (types.NodeName, error) { - matches := providerIDRE.FindStringSubmatch(providerID) - if len(matches) != 2 { - return "", errors.New("error splitting providerID") - } - return types.NodeName(matches[1]), nil + return az.vmSet.GetIPByNodeName(string(nodeName), "") } var polyTable = crc32.MakeTable(crc32.Koopman) @@ -519,3 +369,333 @@ func ExtractDiskData(diskData interface{}) (provisioningState string, diskState } return provisioningState, diskState, nil } + +// availabilitySet implements VMSet interface for Azure availability sets. +type availabilitySet struct { + *Cloud +} + +// newStandardSet creates a new availabilitySet. +func newAvailabilitySet(az *Cloud) VMSet { + return &availabilitySet{ + Cloud: az, + } +} + +// GetInstanceIDByNodeName gets the cloud provider ID by node name. +// It must return ("", cloudprovider.InstanceNotFound) if the instance does +// not exist or is no longer running. +func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) { + var machine compute.VirtualMachine + var exists bool + var err error + + as.operationPollRateLimiter.Accept() + machine, exists, err = as.getVirtualMachine(types.NodeName(name)) + if err != nil { + if as.CloudProviderBackoff { + glog.V(2).Infof("InstanceID(%s) backing off", name) + machine, exists, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) + if err != nil { + glog.V(2).Infof("InstanceID(%s) abort backoff", name) + return "", err + } + } else { + return "", err + } + } else if !exists { + return "", cloudprovider.InstanceNotFound + } + return *machine.ID, nil +} + +// GetNodeNameByProviderID gets the node name by provider ID. +func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { + // NodeName is part of providerID for standard instances. + matches := providerIDRE.FindStringSubmatch(providerID) + if len(matches) != 2 { + return "", errors.New("error splitting providerID") + } + + return types.NodeName(matches[1]), nil +} + +// GetInstanceTypeByNodeName gets the instance type by node name. +func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { + machine, exists, err := as.getVirtualMachine(types.NodeName(name)) + if err != nil { + glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err) + return "", err + } else if !exists { + return "", cloudprovider.InstanceNotFound + } + + return string(machine.HardwareProfile.VMSize), nil +} + +// GetZoneByNodeName gets zone from instance view. +func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { + vm, err := as.VirtualMachinesClient.Get(as.ResourceGroup, name, compute.InstanceView) + if err != nil { + return cloudprovider.Zone{}, err + } + + failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)) + zone := cloudprovider.Zone{ + FailureDomain: failureDomain, + Region: *(vm.Location), + } + return zone, nil +} + +// GetPrimaryVMSetName returns the VM set name depending on the configured vmType. +// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType. +func (as *availabilitySet) GetPrimaryVMSetName() string { + return as.Config.PrimaryAvailabilitySetName +} + +// GetIPByNodeName gets machine IP by node name. +func (as *availabilitySet) GetIPByNodeName(name, vmSetName string) (string, error) { + nic, err := as.GetPrimaryInterface(name, vmSetName) + if err != nil { + return "", err + } + + ipConfig, err := getPrimaryIPConfig(nic) + if err != nil { + glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err) + return "", err + } + + targetIP := *ipConfig.PrivateIPAddress + return targetIP, nil +} + +// getAgentPoolAvailabiliySets lists the virtual machines for for the resource group and then builds +// a list of availability sets that match the nodes available to k8s. +func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { + vms, err := as.VirtualMachineClientListWithRetry() + if err != nil { + glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) + return nil, err + } + vmNameToAvailabilitySetID := make(map[string]string, len(vms)) + for vmx := range vms { + vm := vms[vmx] + if vm.AvailabilitySet != nil { + vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID + } + } + availabilitySetIDs := sets.NewString() + agentPoolAvailabilitySets = &[]string{} + for nx := range nodes { + nodeName := (*nodes[nx]).Name + if isMasterNode(nodes[nx]) { + continue + } + asID, ok := vmNameToAvailabilitySetID[nodeName] + if !ok { + glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) + return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) + } + if availabilitySetIDs.Has(asID) { + // already added in the list + continue + } + asName, err := getLastSegment(asID) + if err != nil { + glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) + return nil, err + } + // AvailabilitySet ID is currently upper cased in a indeterministic way + // We want to keep it lower case, before the ID get fixed + asName = strings.ToLower(asName) + + *agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName) + } + + return agentPoolAvailabilitySets, nil +} + +// GetVMSetNames selects all possible availability sets or scale sets +// (depending vmType configured) for service load balancer, if the service has +// no loadbalancer mode annotaion returns the primary VMSet. If service annotation +// for loadbalancer exists then return the eligible VMSet. +func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) { + hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service) + if !hasMode { + // no mode specified in service annotation default to PrimaryAvailabilitySetName + availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName} + return availabilitySetNames, nil + } + availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes) + if err != nil { + glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) + return nil, err + } + if len(*availabilitySetNames) == 0 { + glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) + return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) + } + // sort the list to have deterministic selection + sort.Strings(*availabilitySetNames) + if !isAuto { + if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 { + return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value") + } + // validate availability set exists + var found bool + for sasx := range serviceAvailabilitySetNames { + for asx := range *availabilitySetNames { + if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) { + found = true + serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx] + break + } + } + if !found { + glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) + return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx]) + } + } + availabilitySetNames = &serviceAvailabilitySetNames + } + + return availabilitySetNames, nil +} + +// GetPrimaryInterface gets machine primary network interface by node name and vmSet. +func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) { + var machine compute.VirtualMachine + + as.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", nodeName) + machine, err := as.VirtualMachineClientGetWithRetry(as.ResourceGroup, nodeName, "") + if err != nil { + glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) + return network.Interface{}, err + } + glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", nodeName) + + primaryNicID, err := getPrimaryInterfaceID(machine) + if err != nil { + return network.Interface{}, err + } + nicName, err := getLastSegment(primaryNicID) + if err != nil { + return network.Interface{}, err + } + + // Check availability set + if vmSetName != "" { + expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName) + if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { + glog.V(3).Infof( + "GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName) + return network.Interface{}, errNotInVMSet + } + } + + as.operationPollRateLimiter.Accept() + glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) + nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "") + glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) + if err != nil { + return network.Interface{}, err + } + + return nic, nil +} + +// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is +// participating in the specified LoadBalancer Backend Pool. +func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string) error { + vmName := mapNodeNameToVMName(nodeName) + nic, err := as.GetPrimaryInterface(vmName, vmSetName) + if err != nil { + if err == errNotInVMSet { + glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + return nil + } + + glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + return err + } + + var primaryIPConfig *network.InterfaceIPConfiguration + primaryIPConfig, err = getPrimaryIPConfig(nic) + if err != nil { + return err + } + + foundPool := false + newBackendPools := []network.BackendAddressPool{} + if primaryIPConfig.LoadBalancerBackendAddressPools != nil { + newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools + } + for _, existingPool := range newBackendPools { + if strings.EqualFold(backendPoolID, *existingPool.ID) { + foundPool = true + break + } + } + if !foundPool { + newBackendPools = append(newBackendPools, + network.BackendAddressPool{ + ID: to.StringPtr(backendPoolID), + }) + + primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools + + nicName := *nic.Name + glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) + as.operationPollRateLimiter.Accept() + glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name) + respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil) + resp := <-respChan + err := <-errChan + glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) + if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) + retryErr := as.CreateOrUpdateInterfaceWithRetry(nic) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) + } + } + if err != nil { + return err + } + } + return nil +} + +// EnsureHostsInPool ensures the given Node's primary IP configurations are +// participating in the specified LoadBalancer Backend Pool. +func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error { + hostUpdates := make([]func() error, len(nodes)) + for i, node := range nodes { + localNodeName := node.Name + f := func() error { + err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName) + if err != nil { + return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err) + } + return nil + } + hostUpdates[i] = f + } + + errs := utilerrors.AggregateGoroutines(hostUpdates...) + if errs != nil { + return utilerrors.Flatten(errs) + } + + return nil +} + +// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. +func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { + // Do nothing for availability set. + return nil +} From af5b079ef79f3893e745afd7b14b3d7426350c92 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:19:47 +0800 Subject: [PATCH 276/794] Initialize vmSet based on vmType setting and call vmSet interface instead of azureClient --- pkg/cloudprovider/providers/azure/azure.go | 19 ++- .../providers/azure/azure_instances.go | 111 ++---------------- .../providers/azure/azure_zones.go | 21 +--- 3 files changed, 24 insertions(+), 127 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index ffc2030a8fa..b910ceff7db 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -198,6 +198,7 @@ type Cloud struct { operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff metadata *InstanceMetadata + vmSet VMSet // Clients for vmss. VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient @@ -346,16 +347,16 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.SecurityGroupsClient = securityGroupsClient virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID) - az.VirtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.VirtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.VirtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second + virtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint + virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetVMsClient.Client) az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID) - az.VirtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.VirtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.VirtualMachineScaleSetsClient.PollingDelay = 5 * time.Second + virtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint + virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetsClient.Client) az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient @@ -421,6 +422,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount } + if az.Config.VMType == vmTypeVMSS { + az.vmSet = newScaleSet(&az) + } else { + az.vmSet = newAvailabilitySet(&az) + } + if err := initDiskControllers(&az); err != nil { return nil, err } diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index 8378e596a9d..9c5976c3dfc 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -22,7 +22,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" ) @@ -48,6 +47,7 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { } return addresses, nil } + ip, err := az.GetIPForMachineWithRetry(name) if err != nil { glog.V(2).Infof("NodeAddresses(%s) abort backoff", name) @@ -64,7 +64,7 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (az *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - name, err := splitProviderID(providerID) + name, err := az.vmSet.GetNodeNameByProviderID(providerID) if err != nil { return nil, err } @@ -80,7 +80,7 @@ func (az *Cloud) ExternalID(name types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) { - name, err := splitProviderID(providerID) + name, err := az.vmSet.GetNodeNameByProviderID(providerID) if err != nil { return false, err } @@ -118,70 +118,14 @@ func (az *Cloud) InstanceID(name types.NodeName) (string, error) { } } - if az.Config.VMType == vmTypeVMSS { - id, err := az.getVmssInstanceID(name) - if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { - // Retry with standard type because master nodes may not belong to any vmss. - return az.getStandardInstanceID(name) - } - - return id, err - } - - return az.getStandardInstanceID(name) -} - -func (az *Cloud) getVmssInstanceID(name types.NodeName) (string, error) { - var machine compute.VirtualMachineScaleSetVM - var exists bool - var err error - az.operationPollRateLimiter.Accept() - machine, exists, err = az.getVmssVirtualMachine(name) - if err != nil { - if az.CloudProviderBackoff { - glog.V(2).Infof("InstanceID(%s) backing off", name) - machine, exists, err = az.GetScaleSetsVMWithRetry(name) - if err != nil { - glog.V(2).Infof("InstanceID(%s) abort backoff", name) - return "", err - } - } else { - return "", err - } - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - return *machine.ID, nil -} - -func (az *Cloud) getStandardInstanceID(name types.NodeName) (string, error) { - var machine compute.VirtualMachine - var exists bool - var err error - az.operationPollRateLimiter.Accept() - machine, exists, err = az.getVirtualMachine(name) - if err != nil { - if az.CloudProviderBackoff { - glog.V(2).Infof("InstanceID(%s) backing off", name) - machine, exists, err = az.GetVirtualMachineWithRetry(name) - if err != nil { - glog.V(2).Infof("InstanceID(%s) abort backoff", name) - return "", err - } - } else { - return "", err - } - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - return *machine.ID, nil + return az.vmSet.GetInstanceIDByNodeName(string(name)) } // InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (az *Cloud) InstanceTypeByProviderID(providerID string) (string, error) { - name, err := splitProviderID(providerID) + name, err := az.vmSet.GetNodeNameByProviderID(providerID) if err != nil { return "", err } @@ -207,46 +151,7 @@ func (az *Cloud) InstanceType(name types.NodeName) (string, error) { } } - if az.Config.VMType == vmTypeVMSS { - machineType, err := az.getVmssInstanceType(name) - if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { - // Retry with standard type because master nodes may not belong to any vmss. - return az.getStandardInstanceType(name) - } - - return machineType, err - } - - return az.getStandardInstanceType(name) -} - -// getVmssInstanceType gets instance with type vmss. -func (az *Cloud) getVmssInstanceType(name types.NodeName) (string, error) { - machine, exists, err := az.getVmssVirtualMachine(name) - if err != nil { - glog.Errorf("error: az.InstanceType(%s), az.getVmssVirtualMachine(%s) err=%v", name, name, err) - return "", err - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - - if machine.Sku.Name != nil { - return *machine.Sku.Name, nil - } - - return "", fmt.Errorf("instance type is not set") -} - -// getStandardInstanceType gets instance with standard type. -func (az *Cloud) getStandardInstanceType(name types.NodeName) (string, error) { - machine, exists, err := az.getVirtualMachine(name) - if err != nil { - glog.Errorf("error: az.InstanceType(%s), az.getVirtualMachine(%s) err=%v", name, name, err) - return "", err - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - return string(machine.HardwareProfile.VMSize), nil + return az.vmSet.GetInstanceTypeByNodeName(string(name)) } // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances @@ -255,8 +160,8 @@ func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { return fmt.Errorf("not supported") } -// CurrentNodeName returns the name of the node we are currently running on -// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname +// CurrentNodeName returns the name of the node we are currently running on. +// On Azure this is the hostname, so we just return the hostname. func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } diff --git a/pkg/cloudprovider/providers/azure/azure_zones.go b/pkg/cloudprovider/providers/azure/azure_zones.go index 192456f43a8..75d0c412515 100644 --- a/pkg/cloudprovider/providers/azure/azure_zones.go +++ b/pkg/cloudprovider/providers/azure/azure_zones.go @@ -21,13 +21,10 @@ import ( "io" "io/ioutil" "net/http" - "strconv" "sync" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider" - - "github.com/Azure/azure-sdk-for-go/arm/compute" ) const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo" @@ -63,10 +60,11 @@ func (az *Cloud) GetZone() (cloudprovider.Zone, error) { // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. func (az *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) { - nodeName, err := splitProviderID(providerID) + nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) if err != nil { return cloudprovider.Zone{}, err } + return az.GetZoneByNodeName(nodeName) } @@ -74,20 +72,7 @@ func (az *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, err // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. func (az *Cloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) { - - vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, string(nodeName), compute.InstanceView) - - if err != nil { - return cloudprovider.Zone{}, err - } - - failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)) - - zone := cloudprovider.Zone{ - FailureDomain: failureDomain, - Region: *(vm.Location), - } - return zone, nil + return az.vmSet.GetZoneByNodeName(string(nodeName)) } func fetchFaultDomain() (*string, error) { From 86111df41debdfcb4765304036068f39124c29be Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:20:41 +0800 Subject: [PATCH 277/794] Add load balancer implementation of vmSet --- .../providers/azure/azure_loadbalancer.go | 220 +++++------------- 1 file changed, 62 insertions(+), 158 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index c1bc5bf972f..6657a074d89 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -23,49 +23,48 @@ import ( "strings" "k8s.io/api/core/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" ) -// ServiceAnnotationLoadBalancerInternal is the annotation used on the service -const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal" +const ( + // ServiceAnnotationLoadBalancerInternal is the annotation used on the service + ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal" -// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service -// to specify what subnet it is exposed on -const ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet" + // ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service + // to specify what subnet it is exposed on + ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet" -// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the -// Azure load balancer selection based on availability sets -// There are currently three possible load balancer selection modes : -// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") -// In this case the Loadbalancer of the primary Availability set is selected -// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set -// is selected which has the miinimum rules associated with it. -// 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the -// miinimum rules associated with it. -const ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode" + // ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the + // Azure load balancer selection based on availability sets + // There are currently three possible load balancer selection modes : + // 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") + // In this case the Loadbalancer of the primary Availability set is selected + // 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set + // is selected which has the miinimum rules associated with it. + // 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the + // miinimum rules associated with it. + ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode" -// ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the -// Azure load balancer auto selection from the availability sets -const ServiceAnnotationLoadBalancerAutoModeValue = "__auto__" + // ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the + // Azure load balancer auto selection from the availability sets + ServiceAnnotationLoadBalancerAutoModeValue = "__auto__" -// ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service. -const ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name" + // ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service. + ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name" -// ServiceAnnotationSharedSecurityRule is the annotation used on the service -// to specify that the service should be exposed using an Azure security rule -// that may be shared with other service, trading specificity of rules for an -// increase in the number of services that can be exposed. This relies on the -// Azure "augmented security rules" feature which at the time of writing is in -// preview and available only in certain regions. -const ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule" + // ServiceAnnotationSharedSecurityRule is the annotation used on the service + // to specify that the service should be exposed using an Azure security rule + // that may be shared with other service, trading specificity of rules for an + // increase in the number of services that can be exposed. This relies on the + // Azure "augmented security rules" feature which at the time of writing is in + // preview and available only in certain regions. + ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule" +) // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. @@ -166,15 +165,16 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi return nil } -// getServiceLoadBalancer gets the loadbalancer for the service if it already exists -// If wantLb is TRUE then -it selects a new load balancer +// getServiceLoadBalancer gets the loadbalancer for the service if it already exists. +// If wantLb is TRUE then -it selects a new load balancer. // In case the selected load balancer does not exists it returns network.LoadBalancer struct -// with added metadata (such as name, location) and existsLB set to FALSE -// By default - cluster default LB is returned +// with added metadata (such as name, location) and existsLB set to FALSE. +// By default - cluster default LB is returned. func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer - defaultLBName := az.getLoadBalancerName(clusterName, az.Config.PrimaryAvailabilitySetName, isInternal) + primaryVMSetName := az.vmSet.GetPrimaryVMSetName() + defaultLBName := az.getLoadBalancerName(clusterName, primaryVMSetName, isInternal) existingLBs, err := az.ListLBWithRetry() if err != nil { @@ -234,18 +234,19 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal) - availabilitySetNames, err := az.getLoadBalancerAvailabilitySetNames(service, nodes) + vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) if err != nil { - glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.getLoadBalancerAvailabilitySetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) + glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err } - glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - availabilitysetsnames %v", clusterName, serviceName, isInternal, *availabilitySetNames) + glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) + mapExistingLBs := map[string]network.LoadBalancer{} for _, lb := range *existingLBs { mapExistingLBs[*lb.Name] = lb } selectedLBRuleCount := math.MaxInt32 - for _, currASName := range *availabilitySetNames { + for _, currASName := range *vmSetNames { currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal) lb, exists := mapExistingLBs[currLBName] if !exists { @@ -272,13 +273,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi } if selectedLB == nil { - err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected availability sets %v", clusterName, serviceName, isInternal, *availabilitySetNames) + err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames) glog.Error(err) return nil, false, err } // validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount { - err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, availabilitysetnames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *availabilitySetNames) + err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames) glog.Error(err) return selectedLB, existsLb, err } @@ -741,6 +742,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) + // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. + vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) + az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName) + + // Remove the LB. az.operationPollRateLimiter.Accept() glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) err := az.DeleteLBWithRetry(lbName) @@ -761,23 +767,10 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if wantLb && nodes != nil { // Add the machines to the backend pool if they're not already - availabilitySetName := az.mapLoadBalancerNameToAvailabilitySet(lbName, clusterName) - hostUpdates := make([]func() error, len(nodes)) - for i, node := range nodes { - localNodeName := node.Name - f := func() error { - err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID, availabilitySetName) - if err != nil { - return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err) - } - return nil - } - hostUpdates[i] = f - } - - errs := utilerrors.AggregateGoroutines(hostUpdates...) - if errs != nil { - return nil, utilerrors.Flatten(errs) + vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) + err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName) + if err != nil { + return nil, err } } @@ -1246,95 +1239,6 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b return false } -// This ensures the given VM's Primary NIC's Primary IP Configuration is -// participating in the specified LoadBalancer Backend Pool. -func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, availabilitySetName string) error { - var machine compute.VirtualMachine - vmName := mapNodeNameToVMName(nodeName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", vmName) - machine, err := az.VirtualMachineClientGetWithRetry(az.ResourceGroup, vmName, "") - if err != nil { - glog.V(2).Infof("ensureHostInPool(%s, %s, %s) abort backoff", serviceName, nodeName, backendPoolID) - return err - } - glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", vmName) - - primaryNicID, err := getPrimaryInterfaceID(machine) - if err != nil { - return err - } - nicName, err := getLastSegment(primaryNicID) - if err != nil { - return err - } - - // Check availability set - if availabilitySetName != "" { - expectedAvailabilitySetName := az.getAvailabilitySetID(availabilitySetName) - if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { - glog.V(3).Infof( - "nicupdate(%s): skipping nic (%s) since it is not in the availabilitySet(%s)", - serviceName, nicName, availabilitySetName) - return nil - } - } - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) - nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) - if err != nil { - return err - } - - var primaryIPConfig *network.InterfaceIPConfiguration - primaryIPConfig, err = getPrimaryIPConfig(nic) - if err != nil { - return err - } - - foundPool := false - newBackendPools := []network.BackendAddressPool{} - if primaryIPConfig.LoadBalancerBackendAddressPools != nil { - newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools - } - for _, existingPool := range newBackendPools { - if strings.EqualFold(backendPoolID, *existingPool.ID) { - foundPool = true - break - } - } - if !foundPool { - newBackendPools = append(newBackendPools, - network.BackendAddressPool{ - ID: to.StringPtr(backendPoolID), - }) - - primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools - - glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name) - respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) - retryErr := az.CreateOrUpdateInterfaceWithRetry(nic) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) - } - } - if err != nil { - return err - } - } - return nil -} - // Check if service requires an internal load balancer. func requiresInternalLoadBalancer(service *v1.Service) bool { if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok { @@ -1354,28 +1258,28 @@ func subnet(service *v1.Service) *string { return nil } -// getServiceLoadBalancerMode parses the mode value -// if the value is __auto__ it returns isAuto = TRUE -// if anything else it returns the unique availability set names after triming spaces -func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, availabilitySetNames []string) { +// getServiceLoadBalancerMode parses the mode value. +// if the value is __auto__ it returns isAuto = TRUE. +// if anything else it returns the unique VM set names after triming spaces. +func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, vmSetNames []string) { mode, hasMode := service.Annotations[ServiceAnnotationLoadBalancerMode] mode = strings.TrimSpace(mode) isAuto = strings.EqualFold(mode, ServiceAnnotationLoadBalancerAutoModeValue) if !isAuto { // Break up list of "AS1,AS2" - availabilitySetParsedList := strings.Split(mode, ",") + vmSetParsedList := strings.Split(mode, ",") - // Trim the availability set names and remove duplicates + // Trim the VM set names and remove duplicates // e.g. {"AS1"," AS2", "AS3", "AS3"} => {"AS1", "AS2", "AS3"} - availabilitySetNameSet := sets.NewString() - for _, v := range availabilitySetParsedList { - availabilitySetNameSet.Insert(strings.TrimSpace(v)) + vmSetNameSet := sets.NewString() + for _, v := range vmSetParsedList { + vmSetNameSet.Insert(strings.TrimSpace(v)) } - availabilitySetNames = availabilitySetNameSet.List() + vmSetNames = vmSetNameSet.List() } - return hasMode, isAuto, availabilitySetNames + return hasMode, isAuto, vmSetNames } func useSharedSecurityRule(service *v1.Service) bool { From 97fab90cd5c64ce491e18827f1e90b66d8c9ab9e Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Dec 2017 14:21:57 +0800 Subject: [PATCH 278/794] Fix unit tests --- pkg/cloudprovider/providers/azure/BUILD | 1 + pkg/cloudprovider/providers/azure/azure_test.go | 6 ++++-- pkg/cloudprovider/providers/azure/azure_util_test.go | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index a72de9f2a6d..322024f2c61 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -24,6 +24,7 @@ go_library( "azure_storageaccount.go", "azure_util.go", "azure_util_vmss.go", + "azure_vmsets.go", "azure_wrap.go", "azure_zones.go", ], diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 073a82e6b36..f141ae2d1b0 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -870,6 +870,7 @@ func getTestCloud() (az *Cloud) { az.SecurityGroupsClient = newFakeAzureNSGClient() az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient() az.InterfacesClient = newFakeAzureInterfacesClient() + az.vmSet = newAvailabilitySet(az) return az } @@ -1631,7 +1632,8 @@ func TestDecodeInstanceInfo(t *testing.T) { } } -func TestSplitProviderID(t *testing.T) { +func TestGetNodeNameByProviderID(t *testing.T) { + az := getTestCloud() providers := []struct { providerID string name types.NodeName @@ -1666,7 +1668,7 @@ func TestSplitProviderID(t *testing.T) { } for _, test := range providers { - name, err := splitProviderID(test.providerID) + name, err := az.vmSet.GetNodeNameByProviderID(test.providerID) if (err != nil) != test.fail { t.Errorf("Expected to failt=%t, with pattern %v", test.fail, test) } diff --git a/pkg/cloudprovider/providers/azure/azure_util_test.go b/pkg/cloudprovider/providers/azure/azure_util_test.go index 46f351f47b5..cac803c2eb0 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_test.go +++ b/pkg/cloudprovider/providers/azure/azure_util_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestGetVmssInstanceID(t *testing.T) { +func TestGetScaleSetVMInstanceID(t *testing.T) { tests := []struct { msg string machineName string @@ -43,7 +43,7 @@ func TestGetVmssInstanceID(t *testing.T) { } for i, test := range tests { - instanceID, err := getVmssInstanceID(test.machineName) + instanceID, err := getScaleSetVMInstanceID(test.machineName) if test.expectError { assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) } else { From 3ee3cf65cbec2780c6f472bf37bdfd63a2960e6d Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Wed, 13 Dec 2017 14:46:58 +0800 Subject: [PATCH 279/794] enhance kube-schedule init flag --- plugin/cmd/kube-scheduler/app/server.go | 5 +++-- plugin/cmd/kube-scheduler/scheduler.go | 10 ---------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 93982f898e3..68f8dad3c37 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -70,6 +70,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + utilflag "k8s.io/apiserver/pkg/util/flag" "github.com/prometheus/client_golang/prometheus" ) @@ -342,8 +343,8 @@ through the API as necessary.`, glog.Fatalf("unable to apply config defaults: %v", err) } - flags := cmd.Flags() - AddFlags(opts, flags) + AddFlags(opts, pflag.CommandLine) + utilflag.InitFlags() cmd.MarkFlagFilename("config", "yaml", "yml", "json") diff --git a/plugin/cmd/kube-scheduler/scheduler.go b/plugin/cmd/kube-scheduler/scheduler.go index 07ab2ca1168..f9e93db9c8c 100644 --- a/plugin/cmd/kube-scheduler/scheduler.go +++ b/plugin/cmd/kube-scheduler/scheduler.go @@ -17,12 +17,8 @@ limitations under the License. package main import ( - goflag "flag" "os" - "github.com/spf13/pflag" - - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration @@ -32,12 +28,6 @@ import ( func main() { command := app.NewSchedulerCommand() - // TODO: once we switch everything over to Cobra commands, we can go back to calling - // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the - // normalize func and add the go flag set by hand. - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - // utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() From d8fa60f24370f4ad5ae70a9f862bcde0f2cc10a2 Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Wed, 13 Dec 2017 14:56:36 +0800 Subject: [PATCH 280/794] auto generated file --- plugin/cmd/kube-scheduler/BUILD | 2 -- plugin/cmd/kube-scheduler/app/BUILD | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin/cmd/kube-scheduler/BUILD b/plugin/cmd/kube-scheduler/BUILD index 1e51fd6a99d..a195d550b3f 100644 --- a/plugin/cmd/kube-scheduler/BUILD +++ b/plugin/cmd/kube-scheduler/BUILD @@ -28,8 +28,6 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", "//plugin/cmd/kube-scheduler/app:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], ) diff --git a/plugin/cmd/kube-scheduler/app/BUILD b/plugin/cmd/kube-scheduler/app/BUILD index 9de3152c18c..5584eebff35 100644 --- a/plugin/cmd/kube-scheduler/app/BUILD +++ b/plugin/cmd/kube-scheduler/app/BUILD @@ -38,6 +38,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", From 07bf6e8088a9a3708ba21156128942cc3a09ce14 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Wed, 13 Dec 2017 15:11:17 +0530 Subject: [PATCH 281/794] bump(github.com/json-iterator/go): 13f86432b882000a51c6e610c620974462691a97 --- Godeps/Godeps.json | 4 +- vendor/github.com/json-iterator/go/.gitignore | 5 +- .../github.com/json-iterator/go/.travis.yml | 1 + vendor/github.com/json-iterator/go/Gopkg.lock | 33 +++++ vendor/github.com/json-iterator/go/Gopkg.toml | 33 +++++ vendor/github.com/json-iterator/go/README.md | 10 +- vendor/github.com/json-iterator/go/build.sh | 12 ++ .../json-iterator/go/feature_adapter.go | 6 + .../json-iterator/go/feature_any.go | 3 + .../json-iterator/go/feature_config.go | 82 +++++++--- .../json-iterator/go/feature_iter.go | 23 ++- .../json-iterator/go/feature_iter_array.go | 6 +- .../json-iterator/go/feature_iter_int.go | 81 ++++++++++ .../json-iterator/go/feature_iter_object.go | 89 ++++++++--- .../json-iterator/go/feature_iter_skip.go | 6 +- .../go/feature_iter_skip_sloppy.go | 2 +- .../go/feature_iter_skip_strict.go | 4 +- .../json-iterator/go/feature_iter_string.go | 10 +- .../json-iterator/go/feature_json_number.go | 18 ++- .../json-iterator/go/feature_pool.go | 2 + .../json-iterator/go/feature_reflect.go | 104 ++++++++----- .../json-iterator/go/feature_reflect_array.go | 2 +- .../go/feature_reflect_extension.go | 50 +++++-- .../go/feature_reflect_native.go | 140 +++++++++++++++--- .../json-iterator/go/feature_reflect_slice.go | 12 +- .../go/feature_reflect_struct_decoder.go | 26 +++- .../json-iterator/go/feature_stream.go | 21 +-- 27 files changed, 628 insertions(+), 157 deletions(-) create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml create mode 100755 vendor/github.com/json-iterator/go/build.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 851e8d0588e..46b35c1fbc0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1900,8 +1900,8 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Comment": "1.0.0", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Comment": "1.0.4-7-g13f8643", + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/jteeuwen/go-bindata", diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore index 501fcdc9a69..15556530a85 100644 --- a/vendor/github.com/json-iterator/go/.gitignore +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -1,3 +1,4 @@ -.idea +/vendor +/bug_test.go /coverage.txt -/profile.out +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml index 945b9c5947c..449e67cd01a 100644 --- a/vendor/github.com/json-iterator/go/.travis.yml +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -2,6 +2,7 @@ language: go go: - 1.8.x + - 1.x before_install: - go get -t -v ./... diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000000..f34f5b4ad1c --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,33 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/google/gofuzz" + packages = ["."] + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert","require"] + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f8b7cf3941d3792cbbd570bb53c093adaf774334d1162c651565c97a58dc9d09" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000000..0ac55ef876a --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,33 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/google/gofuzz" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 23a4b57c8e7..3a0d680983b 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -44,7 +44,9 @@ with ```go import "github.com/json-iterator/go" -jsoniter.Marshal(&data) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) ``` Replace @@ -58,7 +60,9 @@ with ```go import "github.com/json-iterator/go" -jsoniter.Unmarshal(input, &data) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) ``` [More documentation](http://jsoniter.com/migrate-from-go-std.html) @@ -76,5 +80,7 @@ Contributors * [thockin](https://github.com/thockin) * [mattn](https://github.com/mattn) * [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100755 index 00000000000..b45ef688313 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go index edb477c4fd1..0214b711a65 100644 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -110,6 +110,7 @@ type Encoder struct { // Encode encode interface{} as JSON to io.Writer func (adapter *Encoder) Encode(val interface{}) error { adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") adapter.stream.Flush() return adapter.stream.Error } @@ -125,3 +126,8 @@ func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { config.EscapeHTML = escapeHTML adapter.stream.cfg = config.Froze().(*frozenConfig) } + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go index 6733dce4cc5..87716d1fcf2 100644 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -1,6 +1,7 @@ package jsoniter import ( + "errors" "fmt" "io" "reflect" @@ -157,6 +158,8 @@ func (iter *Iterator) readAny() Any { return iter.readArrayAny() case '-': return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} default: return iter.readNumberAny(true) } diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go index fc055d504eb..78a2ce1a593 100644 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -12,23 +12,26 @@ import ( // Config customize how the API should behave. // The API is created from Config by Froze. type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - TagKey string + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool } type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - decoderCache unsafe.Pointer - encoderCache unsafe.Pointer - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + decoderCache unsafe.Pointer + encoderCache unsafe.Pointer + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator } // API the public interface of this package. @@ -44,6 +47,8 @@ type API interface { Get(data []byte, path ...interface{}) Any NewEncoder(writer io.Writer) *Encoder NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) } // ConfigDefault the default API @@ -53,24 +58,27 @@ var ConfigDefault = Config{ // ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, }.Froze() // ConfigFastest marshals float with only 6 digits precision var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field }.Froze() // Froze forge API from config func (cfg Config) Froze() API { // TODO: cache frozen config frozenConfig := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), } atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) @@ -83,10 +91,31 @@ func (cfg Config) Froze() API { if cfg.UseNumber { frozenConfig.useNumber() } + if cfg.ValidateJsonRawMessage { + frozenConfig.validateJsonRawMessage() + } frozenConfig.configBeforeFrozen = cfg return frozenConfig } +func (cfg *frozenConfig) validateJsonRawMessage() { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return false + }} + cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) + cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) +} + func (cfg *frozenConfig) useNumber() { cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { if iter.WhatIsNext() == NumberValue { @@ -104,7 +133,7 @@ func (cfg *frozenConfig) getTagKey() string { return tagKey } -func (cfg *frozenConfig) registerExtension(extension Extension) { +func (cfg *frozenConfig) RegisterExtension(extension Extension) { cfg.extensions = append(cfg.extensions, extension) } @@ -310,3 +339,10 @@ func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { iter := Parse(cfg, reader, 512) return &Decoder{iter} } + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go index 4357d69bac7..95ae54fbfe4 100644 --- a/vendor/github.com/json-iterator/go/feature_iter.go +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -77,6 +77,7 @@ type Iterator struct { captureStartedAt int captured []byte Error error + Attachment interface{} // open for customized decoder } // NewIterator creates an empty Iterator instance @@ -167,7 +168,7 @@ func (iter *Iterator) isObjectEnd() bool { if c == '}' { return true } - iter.ReportError("isObjectEnd", "object ended prematurely") + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) return true } @@ -200,8 +201,22 @@ func (iter *Iterator) ReportError(operation string, msg string) { if peekStart < 0 { peekStart = 0 } - iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) } // CurrentBuffer gets current buffer as string for debugging purpose @@ -210,7 +225,7 @@ func (iter *Iterator) CurrentBuffer() string { if peekStart < 0 { peekStart = 0 } - return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head, + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) } diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go index cbc3ec8d16a..6188cb4577a 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_array.go +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -19,7 +19,7 @@ func (iter *Iterator) ReadArray() (ret bool) { case ',': return true default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c})) + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) return } } @@ -42,7 +42,7 @@ func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c = iter.nextToken() } if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end") + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) return false } return true @@ -53,6 +53,6 @@ func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { iter.skipThreeBytes('u', 'l', 'l') return true // null } - iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c})) + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) return false } diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go index 886879efdbb..4781c63933c 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -115,6 +115,7 @@ func (iter *Iterator) ReadUint32() (ret uint32) { func (iter *Iterator) readUint32(c byte) (ret uint32) { ind := intDigits[c] if ind == 0 { + iter.assertInteger() return 0 // single zero } if ind == invalidCharForNumber { @@ -127,12 +128,14 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind2 := intDigits[iter.buf[i]] if ind2 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } i++ ind3 := intDigits[iter.buf[i]] if ind3 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*10 + uint32(ind2) } //iter.head = i + 1 @@ -141,30 +144,35 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind4 := intDigits[iter.buf[i]] if ind4 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*100 + uint32(ind2)*10 + uint32(ind3) } i++ ind5 := intDigits[iter.buf[i]] if ind5 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) } i++ ind6 := intDigits[iter.buf[i]] if ind6 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) } i++ ind7 := intDigits[iter.buf[i]] if ind7 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) } i++ ind8 := intDigits[iter.buf[i]] if ind8 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) } i++ @@ -172,6 +180,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) iter.head = i if ind9 == invalidCharForNumber { + iter.assertInteger() return value } } @@ -180,6 +189,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind = intDigits[iter.buf[i]] if ind == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } if value > uint32SafeToMultiply10 { @@ -194,6 +204,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { value = (value << 3) + (value << 1) + uint32(ind) } if !iter.loadMore() { + iter.assertInteger() return value } } @@ -226,6 +237,7 @@ func (iter *Iterator) ReadUint64() uint64 { func (iter *Iterator) readUint64(c byte) (ret uint64) { ind := intDigits[c] if ind == 0 { + iter.assertInteger() return 0 // single zero } if ind == invalidCharForNumber { @@ -233,11 +245,73 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { return } value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } for { for i := iter.head; i < iter.tail; i++ { ind = intDigits[iter.buf[i]] if ind == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } if value > uint64SafeToMultiple10 { @@ -252,7 +326,14 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { value = (value << 3) + (value << 1) + uint64(ind) } if !iter.loadMore() { + iter.assertInteger() return value } } } + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go index 3bdb5576eed..dfd91fa60d9 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -19,15 +19,33 @@ func (iter *Iterator) ReadObject() (ret string) { c = iter.nextToken() if c == '"' { iter.unreadByte() - return string(iter.readObjectFieldAsBytes()) + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } } if c == '}' { return "" // end of object } - iter.ReportError("ReadObject", `expect " after {`) + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) return case ',': - return string(iter.readObjectFieldAsBytes()) + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } case '}': return "" // end of object default: @@ -44,17 +62,34 @@ func (iter *Iterator) readFieldHash() int32 { for i := iter.head; i < iter.tail; i++ { // require ascii string and no escape b := iter.buf[i] - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' + if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return int32(hash) } if b == '"' { iter.head = i + 1 c = iter.nextToken() if c != ':' { iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 } return int32(hash) } + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } hash ^= int64(b) hash *= 0x1000193 } @@ -80,18 +115,38 @@ func calcHash(str string) int32 { // ReadObjectCB read object with callback, the key is ascii only and field name not copied func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() + var fieldBytes []byte + var field string if c == '{' { c = iter.nextToken() if c == '"' { iter.unreadByte() - field := iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { return false } c = iter.nextToken() for c == ',' { - field = iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { return false } c = iter.nextToken() @@ -105,14 +160,14 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { if c == '}' { return true } - iter.ReportError("ReadObjectCB", `expect " after }`) + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) return false } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') return true // null } - iter.ReportError("ReadObjectCB", `expect { or n`) + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) return false } @@ -125,7 +180,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field") + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) return false } if !callback(iter, field) { @@ -135,7 +190,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { for c == ',' { field = iter.ReadString() if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field") + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) return false } if !callback(iter, field) { @@ -152,14 +207,14 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { if c == '}' { return true } - iter.ReportError("ReadMapCB", `expect " after }`) + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) return false } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') return true // null } - iter.ReportError("ReadMapCB", `expect { or n`) + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) return false } @@ -176,7 +231,7 @@ func (iter *Iterator) readObjectStart() bool { iter.skipThreeBytes('u', 'l', 'l') return false } - iter.ReportError("readObjectStart", "expect { or n") + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) return false } @@ -192,7 +247,7 @@ func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { } } if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field") + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) return } iter.head++ diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go index b008d98c99a..f58beb9137b 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_skip.go +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -25,7 +25,7 @@ func (iter *Iterator) ReadBool() (ret bool) { iter.skipFourBytes('a', 'l', 's', 'e') return false } - iter.ReportError("ReadBool", "expect t or f") + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) return } @@ -59,7 +59,9 @@ func (iter *Iterator) stopCapture() []byte { iter.captureStartedAt = -1 iter.captured = nil if len(captured) == 0 { - return remaining + copied := make([]byte, len(remaining)) + copy(copied, remaining) + return copied } captured = append(captured, remaining...) return captured diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go index 047d58a4bc9..8fcdc3b69bd 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -1,4 +1,4 @@ -//+build jsoniter-sloppy +//+build jsoniter_sloppy package jsoniter diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go index d2676382540..f67bc2e8315 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -1,4 +1,4 @@ -//+build !jsoniter-sloppy +//+build !jsoniter_sloppy package jsoniter @@ -64,7 +64,7 @@ func (iter *Iterator) trySkipString() bool { } else if c == '\\' { return false } else if c < ' ' { - iter.ReportError("ReadString", + iter.ReportError("trySkipString", fmt.Sprintf(`invalid control character found: %d`, c)) return true // already failed } diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go index b764600460e..adc487ea804 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_string.go +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -28,7 +28,7 @@ func (iter *Iterator) ReadString() (ret string) { iter.skipThreeBytes('u', 'l', 'l') return "" } - iter.ReportError("ReadString", `expects " or n`) + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) return } @@ -47,7 +47,7 @@ func (iter *Iterator) readStringSlowPath() (ret string) { str = append(str, c) } } - iter.ReportError("ReadString", "unexpected end of input") + iter.ReportError("readStringSlowPath", "unexpected end of input") return } @@ -104,7 +104,7 @@ func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { case 't': str = append(str, '\t') default: - iter.ReportError("ReadString", + iter.ReportError("readEscapedChar", `invalid escape char after \`) return nil } @@ -139,7 +139,7 @@ func (iter *Iterator) ReadStringAsSlice() (ret []byte) { } return copied } - iter.ReportError("ReadString", `expects " or n`) + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) return } @@ -156,7 +156,7 @@ func (iter *Iterator) readU4() (ret rune) { } else if c >= 'A' && c <= 'F' { ret = ret*16 + rune(c-'A'+10) } else { - iter.ReportError("readU4", "expects 0~9 or a~f") + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) return } } diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go index 0439f672528..e187b200a9c 100644 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -1,9 +1,25 @@ package jsoniter -import "encoding/json" +import ( + "encoding/json" + "strconv" +) type Number string +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + func CastJsonNumber(val interface{}) (string, bool) { switch typedVal := val.(type) { case json.Number: diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go index 73962bc6f6c..52d38e68554 100644 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -28,6 +28,7 @@ func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { func (cfg *frozenConfig) ReturnStream(stream *Stream) { stream.Error = nil + stream.Attachment = nil select { case cfg.streamPool <- stream: return @@ -48,6 +49,7 @@ func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { iter.Error = nil + iter.Attachment = nil select { case cfg.iteratorPool <- iter: return diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go index 05d91b49c8b..bed7764ed7d 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -72,24 +72,24 @@ func init() { textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() } -type optionalDecoder struct { - valueType reflect.Type - valueDecoder ValDecoder +type OptionalDecoder struct { + ValueType reflect.Type + ValueDecoder ValDecoder } -func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if iter.ReadNil() { *((*unsafe.Pointer)(ptr)) = nil } else { if *((*unsafe.Pointer)(ptr)) == nil { //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) + value := reflect.New(decoder.ValueType) newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) + decoder.ValueDecoder.Decode(newPtr, iter) *((*uintptr)(ptr)) = uintptr(newPtr) } else { //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) } } } @@ -113,11 +113,31 @@ func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { } } -type optionalEncoder struct { +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type optionalMapEncoder struct { valueEncoder ValEncoder } -func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { +func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { if *((*unsafe.Pointer)(ptr)) == nil { stream.WriteNil() } else { @@ -125,15 +145,13 @@ func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } } -func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { +func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) { WriteToStream(val, stream, encoder) } -func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if *((*unsafe.Pointer)(ptr)) == nil { - return true - } - return false +func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + p := *((*unsafe.Pointer)(ptr)) + return p == nil || encoder.valueEncoder.IsEmpty(p) } type placeholderEncoder struct { @@ -146,7 +164,7 @@ func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) + encoder.getRealEncoder().EncodeInterface(val, stream) } func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { @@ -154,11 +172,11 @@ func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { } func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 30; i++ { + for i := 0; i < 500; i++ { realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) _, isPlaceholder := realDecoder.(*placeholderEncoder) if isPlaceholder { - time.Sleep(time.Second) + time.Sleep(10 * time.Millisecond) } else { return realDecoder } @@ -172,11 +190,11 @@ type placeholderDecoder struct { } func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 30; i++ { + for i := 0; i < 500; i++ { realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) _, isPlaceholder := realDecoder.(*placeholderDecoder) if isPlaceholder { - time.Sleep(time.Second) + time.Sleep(10 * time.Millisecond) } else { realDecoder.Decode(ptr, iter) return @@ -256,7 +274,7 @@ func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { if decoder != nil { return decoder, nil } - decoder = getTypeDecoderFromExtension(typ) + decoder = getTypeDecoderFromExtension(cfg, typ) if decoder != nil { cfg.addDecoderToCache(cacheKey, decoder) return decoder, nil @@ -267,6 +285,9 @@ func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { for _, extension := range extensions { decoder = extension.DecorateDecoder(typ, decoder) } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } cfg.addDecoderToCache(cacheKey, decoder) return decoder, err } @@ -289,7 +310,7 @@ func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error templateInterface := reflect.New(typ).Elem().Interface() var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} + decoder = &OptionalDecoder{typ.Elem(), decoder} } return decoder, nil } @@ -302,7 +323,7 @@ func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error templateInterface := reflect.New(typ).Elem().Interface() var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} + decoder = &OptionalDecoder{typ.Elem(), decoder} } return decoder, nil } @@ -423,7 +444,7 @@ func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { if encoder != nil { return encoder, nil } - encoder = getTypeEncoderFromExtension(typ) + encoder = getTypeEncoderFromExtension(cfg, typ) if encoder != nil { cfg.addEncoderToCache(cacheKey, encoder) return encoder, nil @@ -434,6 +455,9 @@ func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { for _, extension := range extensions { encoder = extension.DecorateEncoder(typ, encoder) } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } cfg.addEncoderToCache(cacheKey, encoder) return encoder, err } @@ -452,7 +476,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error return &jsoniterNumberCodec{}, nil } if typ.Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) + checkIsEmpty, err := createCheckIsEmpty(cfg, typ) if err != nil { return nil, err } @@ -462,12 +486,24 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error checkIsEmpty: checkIsEmpty, } if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} + encoder = &OptionalEncoder{encoder} + } + return encoder, nil + } + if reflect.PtrTo(typ).Implements(marshalerType) { + checkIsEmpty, err := createCheckIsEmpty(cfg, reflect.PtrTo(typ)) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, } return encoder, nil } if typ.Implements(textMarshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) + checkIsEmpty, err := createCheckIsEmpty(cfg, typ) if err != nil { return nil, err } @@ -477,7 +513,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error checkIsEmpty: checkIsEmpty, } if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} + encoder = &OptionalEncoder{encoder} } return encoder, nil } @@ -490,7 +526,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error return createEncoderOfSimpleType(cfg, typ) } -func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { +func createCheckIsEmpty(cfg *frozenConfig, typ reflect.Type) (checkIsEmpty, error) { kind := typ.Kind() switch kind { case reflect.String: @@ -535,9 +571,9 @@ func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { case reflect.Slice: return &sliceEncoder{}, nil case reflect.Map: - return &mapEncoder{}, nil + return encoderOfMap(cfg, typ) case reflect.Ptr: - return &optionalEncoder{}, nil + return &OptionalEncoder{}, nil default: return nil, fmt.Errorf("unsupported type: %v", typ) } @@ -648,7 +684,7 @@ func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) if err != nil { return nil, err } - return &optionalDecoder{elemType, decoder}, nil + return &OptionalDecoder{elemType, decoder}, nil } func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { @@ -657,9 +693,9 @@ func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) if err != nil { return nil, err } - encoder := &optionalEncoder{elemEncoder} + encoder := &OptionalEncoder{elemEncoder} if elemType.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} + encoder = &OptionalEncoder{encoder} } return encoder, nil } diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go index e23f187b7c8..d661fb6fe56 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -21,7 +21,7 @@ func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { return nil, err } if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} + encoder = &OptionalEncoder{encoder} } return &arrayEncoder{typ, typ.Elem(), encoder}, nil } diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go index 3dd38299d49..c129076bcc0 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -161,22 +161,31 @@ func RegisterExtension(extension Extension) { extensions = append(extensions, extension) } -func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(typ) +func getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(cfg, typ) if decoder != nil { for _, extension := range extensions { decoder = extension.DecorateDecoder(typ, decoder) } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } } return decoder } -func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { +func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { for _, extension := range extensions { decoder := extension.CreateDecoder(typ) if decoder != nil { return decoder } } + for _, extension := range cfg.extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } typeName := typ.String() decoder := typeDecoders[typeName] if decoder != nil { @@ -185,29 +194,38 @@ func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { if typ.Kind() == reflect.Ptr { decoder := typeDecoders[typ.Elem().String()] if decoder != nil { - return &optionalDecoder{typ.Elem(), decoder} + return &OptionalDecoder{typ.Elem(), decoder} } } return nil } -func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(typ) +func getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(cfg, typ) if encoder != nil { for _, extension := range extensions { encoder = extension.DecorateEncoder(typ, encoder) } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } } return encoder } -func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { +func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { for _, extension := range extensions { encoder := extension.CreateEncoder(typ) if encoder != nil { return encoder } } + for _, extension := range cfg.extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } typeName := typ.String() encoder := typeEncoders[typeName] if encoder != nil { @@ -216,7 +234,7 @@ func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { if typ.Kind() == reflect.Ptr { encoder := typeEncoders[typ.Elem().String()] if encoder != nil { - return &optionalEncoder{encoder} + return &OptionalEncoder{encoder} } } return nil @@ -254,7 +272,7 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err for _, binding := range structDescriptor.Fields { binding.levels = append([]int{i}, binding.levels...) omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &optionalEncoder{binding.Encoder} + binding.Encoder = &OptionalEncoder{binding.Encoder} binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} binding.Decoder = &structFieldDecoder{&field, binding.Decoder} @@ -269,7 +287,7 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err if decoder == nil { var err error decoder, err = decoderOfType(cfg, field.Type) - if err != nil { + if len(fieldNames) > 0 && err != nil { return nil, err } } @@ -277,12 +295,13 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err if encoder == nil { var err error encoder, err = encoderOfType(cfg, field.Type) - if err != nil { + if len(fieldNames) > 0 && err != nil { return nil, err } - // map is stored as pointer in the struct - if field.Type.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} + // map is stored as pointer in the struct, + // and treat nil or empty map as empty field + if encoder != nil && field.Type.Kind() == reflect.Map { + encoder = &optionalMapEncoder{encoder} } } binding := &Binding{ @@ -323,6 +342,9 @@ func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Bin for _, extension := range extensions { extension.UpdateStructDescriptor(structDescriptor) } + for _, extension := range cfg.extensions { + extension.UpdateStructDescriptor(structDescriptor) + } processTags(structDescriptor, cfg) // merge normal & embedded bindings & sort with original order allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go index b37dab3d8a1..95bd1e87cc5 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -4,6 +4,7 @@ import ( "encoding" "encoding/base64" "encoding/json" + "reflect" "unsafe" ) @@ -31,7 +32,9 @@ type intCodec struct { } func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int)(ptr)) = iter.ReadInt() + if !iter.ReadNil() { + *((*int)(ptr)) = iter.ReadInt() + } } func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -50,7 +53,9 @@ type uintptrCodec struct { } func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) + if !iter.ReadNil() { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) + } } func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -69,7 +74,9 @@ type int8Codec struct { } func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int8)(ptr)) = iter.ReadInt8() + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } } func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -88,7 +95,9 @@ type int16Codec struct { } func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int16)(ptr)) = iter.ReadInt16() + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } } func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -107,7 +116,9 @@ type int32Codec struct { } func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int32)(ptr)) = iter.ReadInt32() + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } } func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -126,7 +137,9 @@ type int64Codec struct { } func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int64)(ptr)) = iter.ReadInt64() + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } } func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -145,7 +158,10 @@ type uintCodec struct { } func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint)(ptr)) = iter.ReadUint() + if !iter.ReadNil() { + *((*uint)(ptr)) = iter.ReadUint() + return + } } func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -164,7 +180,9 @@ type uint8Codec struct { } func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint8)(ptr)) = iter.ReadUint8() + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } } func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -183,7 +201,9 @@ type uint16Codec struct { } func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint16)(ptr)) = iter.ReadUint16() + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } } func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -202,7 +222,9 @@ type uint32Codec struct { } func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint32)(ptr)) = iter.ReadUint32() + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } } func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -221,7 +243,9 @@ type uint64Codec struct { } func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint64)(ptr)) = iter.ReadUint64() + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } } func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -240,7 +264,9 @@ type float32Codec struct { } func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*float32)(ptr)) = iter.ReadFloat32() + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } } func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -259,7 +285,9 @@ type float64Codec struct { } func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*float64)(ptr)) = iter.ReadFloat64() + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } } func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -278,7 +306,9 @@ type boolCodec struct { } func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*bool)(ptr)) = iter.ReadBool() + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } } func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -297,7 +327,42 @@ type emptyInterfaceCodec struct { } func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*interface{})(ptr)) = iter.Read() + existing := *((*interface{})(ptr)) + + // Checking for both typed and untyped nil pointers. + if existing != nil && + reflect.TypeOf(existing).Kind() == reflect.Ptr && + !reflect.ValueOf(existing).IsNil() { + + var ptrToExisting interface{} + for { + elem := reflect.ValueOf(existing).Elem() + if elem.Kind() != reflect.Ptr || elem.IsNil() { + break + } + ptrToExisting = existing + existing = elem.Interface() + } + + if iter.ReadNil() { + if ptrToExisting != nil { + nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) + reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) + } else { + *((*interface{})(ptr)) = nil + } + } else { + iter.ReadVal(existing) + } + + return + } + + if iter.ReadNil() { + *((*interface{})(ptr)) = nil + } else { + *((*interface{})(ptr)) = iter.Read() + } } func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -309,7 +374,8 @@ func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Strea } func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - return ptr == nil + emptyInterface := (*emptyInterface)(ptr) + return emptyInterface.typ == nil } type nonEmptyInterfaceCodec struct { @@ -326,15 +392,20 @@ func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) e.typ = nonEmptyInterface.itab.typ e.word = nonEmptyInterface.word iter.ReadVal(&i) + if e.word == nil { + nonEmptyInterface.itab = nil + } nonEmptyInterface.word = e.word } func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { nonEmptyInterface := (*nonEmptyInterface)(ptr) var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word + if nonEmptyInterface.itab != nil { + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + } stream.WriteVal(i) } @@ -370,7 +441,15 @@ type jsonNumberCodec struct { } func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } } func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -389,7 +468,15 @@ type jsoniterNumberCodec struct { } func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } } func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { @@ -521,7 +608,7 @@ type stringModeNumberDecoder struct { func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { c := iter.nextToken() if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect "`) + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) return } decoder.elemDecoder.Decode(ptr, iter) @@ -530,7 +617,7 @@ func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } c = iter.readByte() if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect "`) + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) return } } @@ -595,7 +682,12 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { templateInterface := encoder.templateInterface templateInterface.word = ptr realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(json.Marshaler) + marshaler, ok := (*realInterface).(json.Marshaler) + if !ok { + stream.WriteVal(nil) + return + } + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go index 7377eec7b3b..51a8daecfba 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -21,7 +21,7 @@ func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { return nil, err } if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} + encoder = &OptionalEncoder{encoder} } return &sliceEncoder{typ, typ.Elem(), encoder}, nil } @@ -127,12 +127,10 @@ func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Typ newVal := reflect.MakeSlice(sliceType, newLen, newCap) dst := unsafe.Pointer(newVal.Pointer()) // copy old array into new array - originalBytesCount := uintptr(slice.Len) * elementType.Size() - srcPtr := (*[1 << 30]byte)(slice.Data) - dstPtr := (*[1 << 30]byte)(dst) - for i := uintptr(0); i < originalBytesCount; i++ { - dstPtr[i] = srcPtr[i] - } + originalBytesCount := slice.Len * int(elementType.Size()) + srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount}) + dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount}) + copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader)) slice.Data = dst slice.Len = newLen slice.Cap = newCap diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go index b3417fd73a7..e6ced77c22e 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -427,8 +427,18 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } - fieldBytes := iter.readObjectFieldAsBytes() - field := *(*string)(unsafe.Pointer(&fieldBytes)) + var fieldBytes []byte + var field string + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } fieldDecoder := decoder.fields[strings.ToLower(field)] if fieldDecoder == nil { iter.Skip() @@ -436,8 +446,16 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) fieldDecoder.Decode(ptr, iter) } for iter.nextToken() == ',' { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } fieldDecoder = decoder.fields[strings.ToLower(field)] if fieldDecoder == nil { iter.Skip() diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go index 9c8470a03ae..97355eb5b70 100644 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -4,15 +4,16 @@ import ( "io" ) -// Stream is a io.Writer like object, with JSON specific write functions. +// stream is a io.Writer like object, with JSON specific write functions. // Error is not returned as return value, but stored as Error member on this stream instance. type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - n int - Error error - indention int + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int + Attachment interface{} // open for customized encoder } // NewStream create new stream instance. @@ -191,6 +192,9 @@ func (stream *Stream) ensure(minimal int) { func (stream *Stream) growAtLeast(minimal int) { if stream.out != nil { stream.Flush() + if stream.Available() >= minimal { + return + } } toGrow := len(stream.buf) if toGrow < minimal { @@ -280,8 +284,7 @@ func (stream *Stream) WriteArrayStart() { // WriteEmptyArray write [] func (stream *Stream) WriteEmptyArray() { - stream.writeByte('[') - stream.writeByte(']') + stream.writeTwoBytes('[', ']') } // WriteArrayEnd write ] with possible indention From 7cccb94edb14273209f9bbb4b033e8df1b917aec Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Wed, 13 Dec 2017 17:43:16 +0800 Subject: [PATCH 282/794] Kubectl: Move no-headers flag get out of for loop --- pkg/kubectl/cmd/resource/get.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index b3cd8eb2580..4013cb42874 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -308,6 +308,7 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str } filteredResourceCount := 0 + noHeaders := cmdutil.GetFlagBool(cmd, "no-headers") for ix := range objs { var mapping *meta.RESTMapping var original runtime.Object @@ -350,7 +351,6 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str // TODO: this doesn't belong here // add linebreak between resource groups (if there is more than one) // skip linebreak above first resource group - noHeaders := cmdutil.GetFlagBool(cmd, "no-headers") if lastMapping != nil && !noHeaders { fmt.Fprintf(options.ErrOut, "%s\n", "") } From 8ac24a5ed2a5e99f02608690bcd0b7a4f7a96b5f Mon Sep 17 00:00:00 2001 From: bistros Date: Wed, 13 Dec 2017 18:43:30 +0900 Subject: [PATCH 283/794] fixed typo in kubeadm/v1alpha1/defaults.go fixed some type in comment --- cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 16c2db8e89c..ea28af88e5f 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -65,7 +65,7 @@ const ( DefaultProxyBindAddressv4 = "0.0.0.0" // DefaultProxyBindAddressv6 is the default bind address when the advertise address is v6 DefaultProxyBindAddressv6 = "::" - // KubeproxyKubeConfigFileName efines the file name for the kube-proxy's KubeConfig file + // KubeproxyKubeConfigFileName defines the file name for the kube-proxy's KubeConfig file KubeproxyKubeConfigFileName = "/var/lib/kube-proxy/kubeconfig.conf" ) From 5da7b11a31efa1d4c42b8d852db13ad91b943908 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Wed, 13 Dec 2017 16:05:20 +0530 Subject: [PATCH 284/794] add benchmark for ConfigCompatibleWithStandardLibrary --- pkg/api/testing/serialization_test.go | 31 +++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/pkg/api/testing/serialization_test.go b/pkg/api/testing/serialization_test.go index d47fad73fd7..85f41d674dd 100644 --- a/pkg/api/testing/serialization_test.go +++ b/pkg/api/testing/serialization_test.go @@ -545,8 +545,9 @@ func BenchmarkDecodeIntoJSON(b *testing.B) { b.StopTimer() } -// BenchmarkDecodeJSON provides a baseline for JSON decode performance -func BenchmarkDecodeIntoJSONCodecGen(b *testing.B) { +// BenchmarkDecodeIntoJSONCodecGenConfigFast provides a baseline +// for JSON decode performance with jsoniter.ConfigFast +func BenchmarkDecodeIntoJSONCodecGenConfigFast(b *testing.B) { kcodec := testapi.Default.Codec() items := benchmarkItems(b) width := len(items) @@ -568,3 +569,29 @@ func BenchmarkDecodeIntoJSONCodecGen(b *testing.B) { } b.StopTimer() } + +// BenchmarkDecodeIntoJSONCodecGenConfigCompatibleWithStandardLibrary +// provides a baseline for JSON decode performance +// with jsoniter.ConfigCompatibleWithStandardLibrary +func BenchmarkDecodeIntoJSONCodecGenConfigCompatibleWithStandardLibrary(b *testing.B) { + kcodec := testapi.Default.Codec() + items := benchmarkItems(b) + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(kcodec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + obj := v1.Pod{} + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(encoded[i%width], &obj); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} From 9c1763580208e8f703042c45b13075c3459950f0 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Wed, 13 Dec 2017 16:05:34 +0530 Subject: [PATCH 285/794] replace ConfigFast with ConfigCompatibleWithStandardLibrary --- .../pkg/apis/apiextensions/fuzzer/fuzzer.go | 26 ------------------- .../pkg/apis/meta/v1/group_version_test.go | 2 +- .../pkg/apis/meta/v1/types_test.go | 2 +- .../pkg/runtime/serializer/json/json.go | 8 +++--- 4 files changed, 6 insertions(+), 32 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go index fcde569686c..a7cfb0ae7cb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go @@ -60,22 +60,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { } if isValue || c.Intn(10) == 0 { c.Fuzz(vobj.Field(i).Addr().Interface()) - - // JSON keys must not contain escape char with our JSON codec (jsoniter) - // TODO: remove this when/if we moved from jsoniter.ConfigFastest to ConfigCompatibleWithStandardLibrary - if field.Type.Kind() == reflect.Map { - keys := append([]reflect.Value(nil), vobj.Field(i).MapKeys()...) - for _, k := range keys { - stripped := toJSONString(k.String()) - if stripped == k.String() { - continue - } - // set new key - vobj.Field(i).SetMapIndex(reflect.ValueOf(stripped), vobj.Field(i).MapIndex(k)) - // remove old - vobj.Field(i).SetMapIndex(k, reflect.Value{}) - } - } } } } @@ -125,13 +109,3 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { }, } } - -func toJSONString(s string) string { - return strings.Map(func(r rune) rune { - // replace chars which are not supported in keys by jsoniter.ConfigFastest - if r == '\\' || r == '"' { - return 'x' - } - return r - }, s) -} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/group_version_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/group_version_test.go index 2217aa293d4..1f7f07e81d0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/group_version_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/group_version_test.go @@ -47,7 +47,7 @@ func TestGroupVersionUnmarshalJSON(t *testing.T) { t.Errorf("JSON codec failed to unmarshal input '%s': expected %+v, got %+v", c.input, c.expect, result.GV) } // test the json-iterator codec - if err := jsoniter.ConfigFastest.Unmarshal(c.input, &result); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(c.input, &result); err != nil { t.Errorf("json-iterator codec failed to unmarshal input '%v': %v", c.input, err) } if !reflect.DeepEqual(result.GV, c.expect) { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types_test.go index 21aa9560e92..116f7505a42 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types_test.go @@ -58,7 +58,7 @@ func TestVerbsUgorjiUnmarshalJSON(t *testing.T) { for i, c := range cases { var result APIResource - if err := jsoniter.ConfigFastest.Unmarshal([]byte(c.input), &result); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal([]byte(c.input), &result); err != nil { t.Errorf("[%d] Failed to unmarshal input '%v': %v", i, c.input, err) } if !reflect.DeepEqual(result, c.result) { diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 8a217f32e31..d3b37c75e3b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -154,7 +154,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := jsoniter.ConfigFastest.Unmarshal(data, into); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil @@ -188,7 +188,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := jsoniter.ConfigFastest.Unmarshal(data, obj); err != nil { + if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, obj); err != nil { return nil, actual, err } return obj, actual, nil @@ -197,7 +197,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { if s.yaml { - json, err := jsoniter.ConfigFastest.Marshal(obj) + json, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(obj) if err != nil { return err } @@ -210,7 +210,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } if s.pretty { - data, err := jsoniter.ConfigFastest.MarshalIndent(obj, "", " ") + data, err := jsoniter.ConfigCompatibleWithStandardLibrary.MarshalIndent(obj, "", " ") if err != nil { return err } From 9c3b2a00ac88929c4012154c29dbca68d866fd18 Mon Sep 17 00:00:00 2001 From: linyouchong Date: Wed, 13 Dec 2017 18:02:16 +0800 Subject: [PATCH 286/794] fix incorrect log --- pkg/volume/csi/csi_mounter.go | 8 ++++---- pkg/volume/csi/csi_plugin_test.go | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 84d10362bdb..3009fdf47ea 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -269,21 +269,21 @@ func (c *csiMountMgr) TearDownAt(dir string) error { // TODO make all assertion calls private within the client itself if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil { - glog.Errorf(log("mounter.SetUpAt failed to assert version: %v", err)) + glog.Errorf(log("mounter.TearDownAt failed to assert version: %v", err)) return err } if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil { - glog.Errorf(log("mounter.SetUpAt failed: %v", err)) + glog.Errorf(log("mounter.TearDownAt failed: %v", err)) return err } // clean mount point dir if err := removeMountDir(c.plugin, dir); err != nil { - glog.Error(log("mounter.SetUpAt failed to clean mount dir [%s]: %v", dir, err)) + glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) return err } - glog.V(4).Infof(log("mounte.SetUpAt successfully unmounted dir [%s]", dir)) + glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir)) return nil } diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index 26d5f8c14de..a2dd7035956 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -230,11 +230,11 @@ func TestPluginNewUnmounter(t *testing.T) { csiUnmounter := unmounter.(*csiMountMgr) if err != nil { - t.Fatalf("Failed to make a new Mounter: %v", err) + t.Fatalf("Failed to make a new Unmounter: %v", err) } if csiUnmounter == nil { - t.Fatal("failed to create CSI mounter") + t.Fatal("failed to create CSI Unmounter") } if csiUnmounter.podUID != testPodUID { @@ -305,6 +305,6 @@ func TestPluginNewDetacher(t *testing.T) { t.Error("plugin not set for detacher") } if csiDetacher.k8s == nil { - t.Error("Kubernetes client not set for attacher") + t.Error("Kubernetes client not set for detacher") } } From 3d65f4cbd3038901bffcbd652f1f2a720de713d7 Mon Sep 17 00:00:00 2001 From: David Eads Date: Wed, 13 Dec 2017 08:23:25 -0500 Subject: [PATCH 287/794] expose special storage locations for downstream consumption --- pkg/kubeapiserver/default_storage_factory_builder.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kubeapiserver/default_storage_factory_builder.go b/pkg/kubeapiserver/default_storage_factory_builder.go index ca1a3d35b33..7ebc9468865 100644 --- a/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/pkg/kubeapiserver/default_storage_factory_builder.go @@ -29,8 +29,8 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" ) -// specialDefaultResourcePrefixes are prefixes compiled into Kubernetes. -var specialDefaultResourcePrefixes = map[schema.GroupResource]string{ +// SpecialDefaultResourcePrefixes are prefixes compiled into Kubernetes. +var SpecialDefaultResourcePrefixes = map[schema.GroupResource]string{ {Group: "", Resource: "replicationControllers"}: "controllers", {Group: "", Resource: "replicationcontrollers"}: "controllers", {Group: "", Resource: "endpoints"}: "services/endpoints", @@ -53,7 +53,7 @@ func NewStorageFactory(storageConfig storagebackend.Config, defaultMediaType str if err != nil { return nil, err } - return serverstorage.NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig, specialDefaultResourcePrefixes), nil + return serverstorage.NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig, SpecialDefaultResourcePrefixes), nil } // Merges the given defaultResourceConfig with specifc GroupvVersionResource overrides. From 2f7793df7dad4e66d5b2ea828a55f4e21ba36099 Mon Sep 17 00:00:00 2001 From: David Eads Date: Wed, 13 Dec 2017 08:48:14 -0500 Subject: [PATCH 288/794] allow convert to default on a per object basis --- pkg/kubectl/cmd/convert.go | 59 ++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/pkg/kubectl/cmd/convert.go b/pkg/kubectl/cmd/convert.go index adff80c5302..285b726011d 100644 --- a/pkg/kubectl/cmd/convert.go +++ b/pkg/kubectl/cmd/convert.go @@ -99,19 +99,15 @@ type ConvertOptions struct { out io.Writer printer printers.ResourcePrinter - outputVersion schema.GroupVersion + specifiedOutputVersion schema.GroupVersion } // outputVersion returns the preferred output version for generic content (JSON, YAML, or templates) // defaultVersion is never mutated. Nil simply allows clean passing in common usage from client.Config -func outputVersion(cmd *cobra.Command, defaultVersion *schema.GroupVersion) (schema.GroupVersion, error) { +func outputVersion(cmd *cobra.Command) (schema.GroupVersion, error) { outputVersionString := cmdutil.GetFlagString(cmd, "output-version") if len(outputVersionString) == 0 { - if defaultVersion == nil { - return schema.GroupVersion{}, nil - } - - return *defaultVersion, nil + return schema.GroupVersion{}, nil } return schema.ParseGroupVersion(outputVersionString) @@ -119,13 +115,10 @@ func outputVersion(cmd *cobra.Command, defaultVersion *schema.GroupVersion) (sch // Complete collects information required to run Convert command from command line. func (o *ConvertOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) (err error) { - o.outputVersion, err = outputVersion(cmd, &scheme.Registry.EnabledVersionsForGroup(api.GroupName)[0]) + o.specifiedOutputVersion, err = outputVersion(cmd) if err != nil { return err } - if !scheme.Registry.IsEnabledVersion(o.outputVersion) { - return cmdutil.UsageErrorf(cmd, "'%s' is not a registered version.", o.outputVersion) - } // build the builder o.builder = f.NewBuilder(). @@ -184,7 +177,7 @@ func (o *ConvertOptions) RunConvert() error { return fmt.Errorf("no objects passed to convert") } - objects, err := asVersionedObject(infos, !singleItemImplied, o.outputVersion, o.encoder) + objects, err := asVersionedObject(infos, !singleItemImplied, o.specifiedOutputVersion, o.encoder) if err != nil { return err } @@ -194,7 +187,7 @@ func (o *ConvertOptions) RunConvert() error { if err != nil { return err } - filteredObj, err := objectListToVersionedObject(items, o.outputVersion) + filteredObj, err := objectListToVersionedObject(items, o.specifiedOutputVersion) if err != nil { return err } @@ -206,9 +199,14 @@ func (o *ConvertOptions) RunConvert() error { // objectListToVersionedObject receives a list of api objects and a group version // and squashes the list's items into a single versioned runtime.Object. -func objectListToVersionedObject(objects []runtime.Object, version schema.GroupVersion) (runtime.Object, error) { +func objectListToVersionedObject(objects []runtime.Object, specifiedOutputVersion schema.GroupVersion) (runtime.Object, error) { objectList := &api.List{Items: objects} - converted, err := tryConvert(scheme.Scheme, objectList, version, scheme.Registry.GroupOrDie(api.GroupName).GroupVersion) + targetVersions := []schema.GroupVersion{} + if !specifiedOutputVersion.Empty() { + targetVersions = append(targetVersions, specifiedOutputVersion) + } + targetVersions = append(targetVersions, scheme.Registry.GroupOrDie(api.GroupName).GroupVersion) + converted, err := tryConvert(scheme.Scheme, objectList, targetVersions...) if err != nil { return nil, err } @@ -219,8 +217,8 @@ func objectListToVersionedObject(objects []runtime.Object, version schema.GroupV // the objects as children, or if only a single Object is present, as that object. The provided // version will be preferred as the conversion target, but the Object's mapping version will be // used if that version is not present. -func asVersionedObject(infos []*resource.Info, forceList bool, version schema.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { - objects, err := asVersionedObjects(infos, version, encoder) +func asVersionedObject(infos []*resource.Info, forceList bool, specifiedOutputVersion schema.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { + objects, err := asVersionedObjects(infos, specifiedOutputVersion, encoder) if err != nil { return nil, err } @@ -230,7 +228,13 @@ func asVersionedObject(infos []*resource.Info, forceList bool, version schema.Gr object = objects[0] } else { object = &api.List{Items: objects} - converted, err := tryConvert(scheme.Scheme, object, version, scheme.Registry.GroupOrDie(api.GroupName).GroupVersion) + targetVersions := []schema.GroupVersion{} + if !specifiedOutputVersion.Empty() { + targetVersions = append(targetVersions, specifiedOutputVersion) + } + targetVersions = append(targetVersions, scheme.Registry.GroupOrDie(api.GroupName).GroupVersion) + + converted, err := tryConvert(scheme.Scheme, object, targetVersions...) if err != nil { return nil, err } @@ -238,7 +242,7 @@ func asVersionedObject(infos []*resource.Info, forceList bool, version schema.Gr } actualVersion := object.GetObjectKind().GroupVersionKind() - if actualVersion.Version != version.Version { + if actualVersion.Version != specifiedOutputVersion.Version { defaultVersionInfo := "" if len(actualVersion.Version) > 0 { defaultVersionInfo = fmt.Sprintf("Defaulting to %q", actualVersion.Version) @@ -251,16 +255,17 @@ func asVersionedObject(infos []*resource.Info, forceList bool, version schema.Gr // asVersionedObjects converts a list of infos into versioned objects. The provided // version will be preferred as the conversion target, but the Object's mapping version will be // used if that version is not present. -func asVersionedObjects(infos []*resource.Info, version schema.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { +func asVersionedObjects(infos []*resource.Info, specifiedOutputVersion schema.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { objects := []runtime.Object{} for _, info := range infos { if info.Object == nil { continue } + targetVersions := []schema.GroupVersion{} // objects that are not part of api.Scheme must be converted to JSON // TODO: convert to map[string]interface{}, attach to runtime.Unknown? - if !version.Empty() { + if !specifiedOutputVersion.Empty() { if _, _, err := scheme.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { // TODO: ideally this would encode to version, but we don't expose multiple codecs here. data, err := runtime.Encode(encoder, info.Object) @@ -271,9 +276,19 @@ func asVersionedObjects(infos []*resource.Info, version schema.GroupVersion, enc objects = append(objects, &runtime.Unknown{Raw: data}) continue } + targetVersions = append(targetVersions, specifiedOutputVersion) + } else { + gvks, _, err := scheme.Scheme.ObjectKinds(info.Object) + if err == nil { + for _, gvk := range gvks { + for _, version := range scheme.Registry.EnabledVersionsForGroup(gvk.Group) { + targetVersions = append(targetVersions, version) + } + } + } } - converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) + converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, targetVersions...) if err != nil { return nil, err } From 07e7bf60ce712336e240941ca0440f1d9e775f88 Mon Sep 17 00:00:00 2001 From: Daniel Nardo Date: Mon, 11 Dec 2017 10:10:43 -0800 Subject: [PATCH 289/794] Update debian setup script to match GCI. --- cluster/gce/configure-vm.sh | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 4bc6dc5a67a..29df03f7272 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -86,9 +86,41 @@ ensure-local-disks() { function config-ip-firewall { echo "Configuring IP firewall rules" + # Do not consider loopback addresses as martian source or destination while + # routing. This enables the use of 127/8 for local routing purposes. + sysctl -w net.ipv4.conf.all.route_localnet=1 + + # We need to add rules to accept all TCP/UDP/ICMP packets. + if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then + echo "Add rules to accept all inbound TCP/UDP/ICMP packets" + iptables -A INPUT -p TCP -j ACCEPT + iptables -A INPUT -p UDP -j ACCEPT + iptables -A INPUT -p ICMP -j ACCEPT + fi + if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then + echo "Add rules to accept all forwarded TCP/UDP/ICMP packets" + iptables -A FORWARD -p TCP -j ACCEPT + iptables -A FORWARD -p UDP -j ACCEPT + iptables -A FORWARD -p ICMP -j ACCEPT + fi + + # Flush iptables nat table + iptables -t nat -F || true + + if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then + echo "Add rules for ip masquerade" + iptables -t nat -N IP-MASQ + iptables -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ + iptables -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN + iptables -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN + iptables -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN + iptables -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN + iptables -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE + fi + if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then echo "Add rule for metadata concealment" - iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988 + iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988 fi } @@ -171,7 +203,6 @@ function remove-docker-artifacts() { apt-get-install bridge-utils # Remove docker artifacts on minion nodes, if present - iptables -t nat -F || true ifconfig docker0 down || true brctl delbr docker0 || true echo "== Finished deleting docker0 ==" From a75aa0f41c51add6fa02c8bfc9362cfe9a5be8bc Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Thu, 14 Dec 2017 01:07:31 +0530 Subject: [PATCH 290/794] update staging godeps --- staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json | 2 +- staging/src/k8s.io/apimachinery/Godeps/Godeps.json | 2 +- staging/src/k8s.io/apiserver/Godeps/Godeps.json | 2 +- staging/src/k8s.io/client-go/Godeps/Godeps.json | 2 +- staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json | 2 +- staging/src/k8s.io/metrics/Godeps/Godeps.json | 2 +- staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json | 2 +- staging/src/k8s.io/sample-controller/Godeps/Godeps.json | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index b392217aff2..08607c1ddcf 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -260,7 +260,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index 5e2eaf6ad15..a264f944b4e 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -124,7 +124,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/mailru/easyjson/buffer", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index ac7aef5679c..83c6b74dbaa 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -480,7 +480,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index aeef01ca2ca..d9c1c4f0410 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -228,7 +228,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index 28fdfdb17fb..ec78f699e49 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -240,7 +240,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 2c07f97666c..e964a638073 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -104,7 +104,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 7fcd03d546e..3281bf2fb31 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -232,7 +232,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 11ddb94e1a2..5c0249ded73 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -128,7 +128,7 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/juju/ratelimit", From 18d24d8303e2078260ec34ca8fb4b1a83bb585d7 Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Wed, 13 Dec 2017 12:39:37 -0800 Subject: [PATCH 291/794] added more description for flag '--watch-cache-sizes' to make the format of the flag clearer. --- staging/src/k8s.io/apiserver/pkg/server/options/etcd.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 9bfa3a0a778..4d5d1bc22ad 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -119,8 +119,9 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ - "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled. Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices) "+ + "The individual override format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is optional, and size is a number. It takes effect when watch-cache is enabled. "+ + "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ "have system defaults set by heuristics, others default to default-watch-cache-size") fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, From bbcf59bde00a487be84cd3e5bcb30277947b2090 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 12 Dec 2017 17:30:27 -0800 Subject: [PATCH 292/794] gce/upgrade.sh: Prompt if etcd version is unspecified. We shouldn't upgrade etcd without first warning the user that some etcd version transitions can't be undone. We don't know what version the user currently has, so we require either an explicit version and image, or an interactive acknowledgement of this caveat. This is modeled after the STORAGE_MEDIA_TYPE prompt just above. --- cluster/gce/upgrade.sh | 33 ++++++++++++++++++++++++++++++++ test/e2e/framework/nodes_util.go | 4 ++++ 2 files changed, 37 insertions(+) diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index 452d8cfdf89..82ea8f9cb4b 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -537,6 +537,39 @@ if [[ -z "${STORAGE_MEDIA_TYPE:-}" ]] && [[ "${STORAGE_BACKEND:-}" != "etcd2" ]] fi fi +# Prompt if etcd image/version is unspecified when doing master upgrade. +# In e2e tests, we use TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true to skip this +# prompt, simulating the behavior when the user confirms interactively. +# All other automated use of this script should explicitly specify a version. +if [[ "${master_upgrade}" == "true" ]]; then + if [[ -z "${ETCD_IMAGE:-}" && -z "${TEST_ETCD_IMAGE:-}" ]] || [[ -z "${ETCD_VERSION:-}" && -z "${TEST_ETCD_VERSION:-}" ]]; then + echo + echo "***WARNING***" + echo "Upgrading Kubernetes with this script might result in an upgrade to a new etcd version." + echo "Some etcd version upgrades, such as 3.0.x to 3.1.x, DO NOT offer a downgrade path." + echo "To pin the etcd version to your current one (e.g. v3.0.17), set the following variables" + echo "before running this script:" + echo + echo "# example: pin to etcd v3.0.17" + echo "export ETCD_IMAGE=3.0.17" + echo "export ETCD_VERSION=3.0.17" + echo + echo "Alternatively, if you choose to allow an etcd upgrade that doesn't support downgrade," + echo "you might still be able to downgrade Kubernetes by pinning to the newer etcd version." + echo "In all cases, it is strongly recommended to have an etcd backup before upgrading." + echo + if [ -t 0 ] && [ -t 1 ]; then + read -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm + if [[ "${confirm}" != "y" ]]; then + exit 1 + fi + elif [[ "${TEST_ALLOW_IMPLICIT_ETCD_UPGRADE:-}" != "true" ]]; then + echo "ETCD_IMAGE and ETCD_VERSION must be specified when run non-interactively." >&2 + exit 1 + fi + fi +fi + print-node-version-info "Pre-Upgrade" if [[ "${local_binaries}" == "false" ]]; then diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 2908d9688fa..f24f85fcc3c 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -78,6 +78,10 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error { "TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion, "STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage, "TEST_ETCD_IMAGE=3.1.10") + } else { + // In e2e tests, we skip the confirmation prompt about + // implicit etcd upgrades to simulate the user entering "y". + env = append(env, "TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true") } v := "v" + rawV From cb6df34bef6444eee84f1bf69cb0b7230bb34a86 Mon Sep 17 00:00:00 2001 From: Yongkun Anfernee Gui Date: Wed, 29 Nov 2017 18:00:03 -0800 Subject: [PATCH 293/794] Test probe for redirect endpoint Make sure #18233 works --- pkg/probe/http/http_test.go | 42 +++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/pkg/probe/http/http_test.go b/pkg/probe/http/http_test.go index 6e9efb86d29..d06b8937569 100644 --- a/pkg/probe/http/http_test.go +++ b/pkg/probe/http/http_test.go @@ -52,6 +52,16 @@ func TestHTTPProbeChecker(t *testing.T) { w.Write([]byte(output)) } + redirectHandler := func(s int, bad bool) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + http.Redirect(w, r, "/new", s) + } else if bad && r.URL.Path == "/new" { + w.WriteHeader(http.StatusInternalServerError) + } + } + } + prober := New() testCases := []struct { handler func(w http.ResponseWriter, r *http.Request) @@ -122,6 +132,38 @@ func TestHTTPProbeChecker(t *testing.T) { }, health: probe.Failure, }, + { + handler: redirectHandler(http.StatusMovedPermanently, false), // 301 + health: probe.Success, + }, + { + handler: redirectHandler(http.StatusMovedPermanently, true), // 301 + health: probe.Failure, + }, + { + handler: redirectHandler(http.StatusFound, false), // 302 + health: probe.Success, + }, + { + handler: redirectHandler(http.StatusFound, true), // 302 + health: probe.Failure, + }, + { + handler: redirectHandler(http.StatusTemporaryRedirect, false), // 307 + health: probe.Success, + }, + { + handler: redirectHandler(http.StatusTemporaryRedirect, true), // 307 + health: probe.Failure, + }, + { + handler: redirectHandler(http.StatusPermanentRedirect, false), // 308 + health: probe.Success, + }, + { + handler: redirectHandler(http.StatusPermanentRedirect, true), // 308 + health: probe.Failure, + }, } for i, test := range testCases { func() { From bea2d8d1aabd5a7ec3274b08cee7911e9abb2ba0 Mon Sep 17 00:00:00 2001 From: Manuel de Brito Fontes Date: Sat, 2 Dec 2017 14:49:39 -0300 Subject: [PATCH 294/794] Fix NLB icmp permission duplication --- .../providers/aws/aws_loadbalancer.go | 88 ++++++++++++++----- 1 file changed, 66 insertions(+), 22 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 6baaca2ac6c..438b5617f19 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -662,7 +662,6 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se if clientTraffic { clientRuleAnnotation := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) - mtuRuleAnnotation := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, lbName) // Client Traffic permission := &ec2.IpPermission{ FromPort: aws.Int64(port), @@ -682,26 +681,6 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se } else { removes = append(removes, permission) } - - // MTU discovery - permission = &ec2.IpPermission{ - IpProtocol: aws.String("icmp"), - FromPort: aws.Int64(3), - ToPort: aws.Int64(4), - } - ranges = []*ec2.IpRange{} - for _, cidr := range clientCidrs { - ranges = append(ranges, &ec2.IpRange{ - CidrIp: aws.String(cidr), - Description: aws.String(mtuRuleAnnotation), - }) - } - permission.IpRanges = ranges - if add { - adds = append(adds, permission) - } else { - removes = append(removes, permission) - } } else { healthRuleAnnotation := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) @@ -725,8 +704,8 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se removes = append(removes, permission) } } - } + if len(adds) > 0 { changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, adds) if err != nil { @@ -736,6 +715,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } + if len(removes) > 0 { changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, removes) if err != nil { @@ -745,6 +725,70 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } + + if clientTraffic { + // MTU discovery + mtuRuleAnnotation := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, lbName) + mtuPermission := &ec2.IpPermission{ + IpProtocol: aws.String("icmp"), + FromPort: aws.Int64(3), + ToPort: aws.Int64(4), + } + ranges := []*ec2.IpRange{} + for _, cidr := range clientCidrs { + ranges = append(ranges, &ec2.IpRange{ + CidrIp: aws.String(cidr), + Description: aws.String(mtuRuleAnnotation), + }) + } + mtuPermission.IpRanges = ranges + + group, err := c.findSecurityGroup(instanceSecurityGroupID) + if err != nil { + glog.Warningf("Error retrieving security group: %q", err) + return err + } + + if group == nil { + glog.Warning("Security group not found: ", instanceSecurityGroupID) + return nil + } + + icmpExists := false + permCount := 0 + for _, perm := range group.IpPermissions { + if *perm.IpProtocol == "icmp" { + icmpExists = true + continue + } + + if perm.FromPort != nil { + permCount++ + } + } + + if !icmpExists && permCount > 0 { + // the icmp permission is missing + changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) + if err != nil { + glog.Warningf("Error adding MTU permission to security group: %q", err) + return err + } + if !changed { + glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + } + } else if icmpExists && permCount == 0 { + // there is no additional permissions, remove icmp + changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) + if err != nil { + glog.Warningf("Error removing MTU permission to security group: %q", err) + return err + } + if !changed { + glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + } + } + } } return nil } From cca35ae141544e04cdf9e7c3ace201bc58091c5e Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Wed, 13 Dec 2017 15:45:24 -0800 Subject: [PATCH 295/794] Fix admission metrics tests --- .../pkg/admission/metrics/metrics_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go index dff2598ddb4..859e3d30eb9 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go @@ -65,7 +65,7 @@ func TestObserveAdmissionController(t *testing.T) { "version": resource.Version, "resource": resource.Resource, "subresource": "subresource", - "type": "validate", + "type": "admit", "rejected": "false", } expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", wantLabels, 1) @@ -133,7 +133,7 @@ func TestWithMetrics(t *testing.T) { "validate-interfaces-dont-validate", "some-ns", admission.Create, - &validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true}, + &validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false}, true, false, }, { @@ -148,7 +148,7 @@ func TestWithMetrics(t *testing.T) { "some-ns", admission.Create, &mutatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false}, - true, false, + false, true, }, } { Metrics.reset() @@ -165,7 +165,7 @@ func TestWithMetrics(t *testing.T) { continue } - filter := map[string]string{"rejected": "false"} + filter := map[string]string{"type": "admit", "rejected": "false"} if !test.admit { filter["rejected"] = "true" } @@ -175,7 +175,7 @@ func TestWithMetrics(t *testing.T) { expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", filter, 0) } - if err == nil { + if err != nil { // skip validation step if mutation failed continue } @@ -190,8 +190,8 @@ func TestWithMetrics(t *testing.T) { continue } - filter = map[string]string{"rejected": "false"} - if !test.admit { + filter = map[string]string{"type": "validate", "rejected": "false"} + if !test.validate { filter["rejected"] = "true" } if _, validating := test.handler.(admission.ValidationInterface); validating { @@ -239,7 +239,7 @@ type mutatingFakeHandler struct { admit bool } -func (h *mutatingFakeHandler) Amit(a admission.Attributes) (err error) { +func (h *mutatingFakeHandler) Admit(a admission.Attributes) (err error) { if h.admit { return nil } From da610ecbb9ed10b5108eb9a00db392c95f7c005f Mon Sep 17 00:00:00 2001 From: Casey Davenport Date: Tue, 12 Dec 2017 17:39:34 -0800 Subject: [PATCH 296/794] Schedule Calico components even on tainted nodes --- .../calico-policy-controller/calico-node-daemonset.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml index 5c4b69f7140..2b2d31e1df8 100644 --- a/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-daemonset.yaml @@ -149,5 +149,10 @@ spec: hostPath: path: /etc/cni/net.d tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" + # Make sure calico/node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists From bd4e89cf5d149fd8cf0fd9e48adfdb2f52555b2e Mon Sep 17 00:00:00 2001 From: Tim Pepper Date: Tue, 28 Nov 2017 14:17:35 -0800 Subject: [PATCH 297/794] kubectl: point info url to user guide overview The kubectl command output suggests a user find more information at the github source repo, eg: $ kubectl --help kubectl controls the Kubernetes cluster manager. Find more information at https://github.com/kubernetes/kubernetes. But there is curated user documentation available at, eg: https://kubernetes.io/docs/reference/kubectl/overview/ which upon referencing yields a much better experience for a user than the top of the source repo. Fixes #56511 Signed-off-by: Tim Pepper --- pkg/kubectl/cmd/cmd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 782fa37c2b1..6195c7c8da1 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -221,7 +221,8 @@ func NewKubectlCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob Long: templates.LongDesc(` kubectl controls the Kubernetes cluster manager. - Find more information at https://github.com/kubernetes/kubernetes.`), + Find more information at: + https://kubernetes.io/docs/reference/kubectl/overview/`), Run: runHelp, BashCompletionFunction: bashCompletionFunc, } From 48684133ec7b3fb28c0af9e108e16b41ba179a6a Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Sun, 10 Dec 2017 21:55:25 -0800 Subject: [PATCH 298/794] Modified local-volume provisioner e2e tests to use bind mounts --- test/e2e/storage/persistent_volumes-local.go | 35 ++++++++++++++++---- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 5dd4cba67d9..d689ad27d20 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -331,9 +331,11 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S setupLocalVolumeProvisioner(config) volumePath = path.Join( hostBase, discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID()))) + setupLocalVolumeProvisionerMountPoint(config, volumePath) }) AfterEach(func() { + cleanupLocalVolumeProvisionerMountPoint(config, volumePath) cleanupLocalVolumeProvisioner(config, volumePath) cleanupStorageClass(config) }) @@ -344,11 +346,6 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"} framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind) - By("Creating a directory under discovery path") - framework.Logf("creating local volume under path %q", volumePath) - mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath) - err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0) - Expect(err).NotTo(HaveOccurred()) By("Waiting for a PersitentVolume to be created") oldPV, err := waitForLocalPersistentVolume(config.client, volumePath) Expect(err).NotTo(HaveOccurred()) @@ -868,7 +865,7 @@ func setupLocalVolumeProvisioner(config *localTestConfig) { createVolumeConfigMap(config) By("Initializing local volume discovery base path") - mkdirCmd := fmt.Sprintf("mkdir %v -m 777", path.Join(hostBase, discoveryDir)) + mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", path.Join(hostBase, discoveryDir)) err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0) Expect(err).NotTo(HaveOccurred()) } @@ -877,7 +874,7 @@ func cleanupLocalVolumeProvisioner(config *localTestConfig, volumePath string) { By("Cleaning up cluster role binding") deleteClusterRoleBinding(config) - By("Removing the test directory") + By("Removing the test discovery directory") removeCmd := fmt.Sprintf("rm -r %s", path.Join(hostBase, discoveryDir)) err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, config.node0) Expect(err).NotTo(HaveOccurred()) @@ -889,6 +886,30 @@ func cleanupLocalVolumeProvisioner(config *localTestConfig, volumePath string) { Expect(err).NotTo(HaveOccurred()) } +func setupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string) { + By(fmt.Sprintf("Creating local directory at path %q", volumePath)) + mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath) + err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0) + Expect(err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("Mounting local directory at path %q", volumePath)) + mntCmd := fmt.Sprintf("sudo mount --bind %v %v", volumePath, volumePath) + err = framework.IssueSSHCommand(mntCmd, framework.TestContext.Provider, config.node0) + Expect(err).NotTo(HaveOccurred()) +} + +func cleanupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string) { + By(fmt.Sprintf("Unmounting the test mount point from %q", volumePath)) + umountCmd := fmt.Sprintf("sudo umount %v", volumePath) + err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, config.node0) + Expect(err).NotTo(HaveOccurred()) + + By("Removing the test mount point") + removeCmd := fmt.Sprintf("rm -r %s", volumePath) + err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, config.node0) + +} + func createServiceAccount(config *localTestConfig) { serviceAccount := v1.ServiceAccount{ TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ServiceAccount"}, From 6c6a6b6561eedcb4f7bfd0320cc7a6140ec0ce7d Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Wed, 13 Dec 2017 17:45:47 -0800 Subject: [PATCH 299/794] GCE: bump COS image version to cos-stable-63-10032-71-0 --- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/kubemark/gce/config-default.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index c503c034e16..bd3fe8ceaf9 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -80,7 +80,7 @@ fi # Also please update corresponding image for node e2e at: # https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml CVM_VERSION=${CVM_VERSION:-container-vm-v20170627} -GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0} +GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 12a45ddad8d..91f9bcbcc1f 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -74,7 +74,7 @@ fi # Also please update corresponding image for node e2e at: # https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml CVM_VERSION=${CVM_VERSION:-container-vm-v20170627} -GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0} +GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-63-10032-71-0} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} diff --git a/cluster/kubemark/gce/config-default.sh b/cluster/kubemark/gce/config-default.sh index 94b7dfb3464..9d159603a94 100644 --- a/cluster/kubemark/gce/config-default.sh +++ b/cluster/kubemark/gce/config-default.sh @@ -37,7 +37,7 @@ EVENT_PD=${EVENT_PD:-false} MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci} NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci} -MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-stable-60-9592-90-0} +MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-stable-63-10032-71-0} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} # GPUs supported in GCE do not have compatible drivers in Debian 7. From 0acf12a4e82f89fbc0648c5e41078ce46924f667 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Tue, 14 Nov 2017 18:32:57 -0800 Subject: [PATCH 300/794] Move 'DefaultTerminationGracePeriodSeconds' into a separate const group --- staging/src/k8s.io/api/core/v1/types.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 728cbd5a62b..136c09547bd 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2433,7 +2433,11 @@ const ( // parameters such as nameservers and search paths should be defined via // DNSConfig. DNSNone DNSPolicy = "None" +) +const ( + // DefaultTerminationGracePeriodSeconds indicates the default duration in + // seconds a pod needs to terminate gracefully. DefaultTerminationGracePeriodSeconds = 30 ) From c4e63cb77787bba92a9ee911b10af2f9075c6e34 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 13 Dec 2017 21:56:18 -0500 Subject: [PATCH 301/794] gce: split legacy kubelet node role binding and bootstrapper role binding --- .../kubelet-binding.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cluster/addons/rbac/legacy-kubelet-user-disable/kubelet-binding.yaml b/cluster/addons/rbac/legacy-kubelet-user-disable/kubelet-binding.yaml index 1d1832763b4..4cd7174eafc 100644 --- a/cluster/addons/rbac/legacy-kubelet-user-disable/kubelet-binding.yaml +++ b/cluster/addons/rbac/legacy-kubelet-user-disable/kubelet-binding.yaml @@ -7,6 +7,20 @@ metadata: labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: EnsureExists +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: [] +--- +# This is required so that new clusters still have bootstrap permissions +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubelet-bootstrap + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole From d073c10dbc47ebccc057ccd31e9bb427ed8b5067 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 27 Nov 2017 23:19:38 -0500 Subject: [PATCH 302/794] Refactor flex pv to allow secret namespace --- pkg/api/persistentvolume/util.go | 12 ++- pkg/api/persistentvolume/util_test.go | 18 ++++- pkg/apis/core/types.go | 28 ++++++- pkg/apis/core/validation/validation.go | 23 +++++- pkg/apis/core/validation/validation_test.go | 2 +- pkg/printers/internalversion/describe.go | 12 ++- pkg/volume/flexvolume/attacher-defaults.go | 13 ++- pkg/volume/flexvolume/common_test.go | 2 +- pkg/volume/flexvolume/driver-call.go | 21 ++++- pkg/volume/flexvolume/flexvolume_test.go | 2 +- pkg/volume/flexvolume/plugin.go | 20 ++++- pkg/volume/flexvolume/util.go | 80 ++++++++++++++++--- .../authorizer/node/node_authorizer_test.go | 4 +- staging/src/k8s.io/api/core/v1/types.go | 28 ++++++- 14 files changed, 231 insertions(+), 34 deletions(-) diff --git a/pkg/api/persistentvolume/util.go b/pkg/api/persistentvolume/util.go index fe6dc9b2c6f..76b2b1c51c6 100644 --- a/pkg/api/persistentvolume/util.go +++ b/pkg/api/persistentvolume/util.go @@ -62,8 +62,16 @@ func VisitPVSecretNames(pv *api.PersistentVolume, visitor Visitor) bool { } } case source.FlexVolume != nil: - if source.FlexVolume.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.FlexVolume.SecretRef.Name) { - return false + if source.FlexVolume.SecretRef != nil { + // previously persisted PV objects use claimRef namespace + ns := getClaimRefNamespace(pv) + if len(source.FlexVolume.SecretRef.Namespace) > 0 { + // use the secret namespace if namespace is set + ns = source.FlexVolume.SecretRef.Namespace + } + if !visitor(ns, source.FlexVolume.SecretRef.Name) { + return false + } } case source.RBD != nil: if source.RBD.SecretRef != nil { diff --git a/pkg/api/persistentvolume/util_test.go b/pkg/api/persistentvolume/util_test.go index ef8932d3403..ca2d827eb7d 100644 --- a/pkg/api/persistentvolume/util_test.go +++ b/pkg/api/persistentvolume/util_test.go @@ -61,8 +61,15 @@ func TestPVSecrets(t *testing.T) { {Spec: api.PersistentVolumeSpec{ ClaimRef: &api.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}, PersistentVolumeSource: api.PersistentVolumeSource{ - FlexVolume: &api.FlexVolumeSource{ - SecretRef: &api.LocalObjectReference{ + FlexVolume: &api.FlexPersistentVolumeSource{ + SecretRef: &api.SecretReference{ + Name: "Spec.PersistentVolumeSource.FlexVolume.SecretRef", + Namespace: "flexns"}}}}}, + {Spec: api.PersistentVolumeSpec{ + ClaimRef: &api.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}, + PersistentVolumeSource: api.PersistentVolumeSource{ + FlexVolume: &api.FlexPersistentVolumeSource{ + SecretRef: &api.SecretReference{ Name: "Spec.PersistentVolumeSource.FlexVolume.SecretRef"}}}}}, {Spec: api.PersistentVolumeSpec{ ClaimRef: &api.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}, @@ -160,15 +167,22 @@ func TestPVSecrets(t *testing.T) { expectedNamespacedNames := sets.NewString( "claimrefns/Spec.PersistentVolumeSource.AzureFile.SecretName", "Spec.PersistentVolumeSource.AzureFile.SecretNamespace/Spec.PersistentVolumeSource.AzureFile.SecretName", + "claimrefns/Spec.PersistentVolumeSource.CephFS.SecretRef", "cephfs/Spec.PersistentVolumeSource.CephFS.SecretRef", + "claimrefns/Spec.PersistentVolumeSource.FlexVolume.SecretRef", + "flexns/Spec.PersistentVolumeSource.FlexVolume.SecretRef", + "claimrefns/Spec.PersistentVolumeSource.RBD.SecretRef", "rbdns/Spec.PersistentVolumeSource.RBD.SecretRef", + "claimrefns/Spec.PersistentVolumeSource.ScaleIO.SecretRef", "scaleions/Spec.PersistentVolumeSource.ScaleIO.SecretRef", + "claimrefns/Spec.PersistentVolumeSource.ISCSI.SecretRef", "iscsi/Spec.PersistentVolumeSource.ISCSI.SecretRef", + "storageosns/Spec.PersistentVolumeSource.StorageOS.SecretRef", ) if missingNames := expectedNamespacedNames.Difference(extractedNamesWithNamespace); len(missingNames) > 0 { diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index b6b570b06e4..0b9c2870b5f 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -354,7 +354,7 @@ type PersistentVolumeSource struct { // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional - FlexVolume *FlexVolumeSource + FlexVolume *FlexPersistentVolumeSource // Cinder represents a cinder volume attached and mounted on kubelets host machine // +optional Cinder *CinderVolumeSource @@ -867,6 +867,32 @@ type FCVolumeSource struct { WWIDs []string } +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. type FlexVolumeSource struct { diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..048f810d3fb 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -1257,6 +1257,27 @@ func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) fi return allErrs } +func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + // Make sure user-specified options don't use kubernetes namespaces + for k := range fv.Options { + namespace := k + if parts := strings.SplitN(k, "/", 2); len(parts) == 2 { + namespace = parts[0] + } + normalized := "." + strings.ToLower(namespace) + if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved")) + } + } + + return allErrs +} + func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if azure.SecretName == "" { @@ -1588,7 +1609,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { } if pv.Spec.FlexVolume != nil { numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) + allErrs = append(allErrs, validateFlexPersistentVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) } if pv.Spec.AzureFile != nil { if numVolumes > 0 { diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 25dd774ab86..9ebe91b57c0 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -469,7 +469,7 @@ func TestValidatePersistentVolumeSourceUpdate(t *testing.T) { validPvSourceNoUpdate := validVolume.DeepCopy() invalidPvSourceUpdateType := validVolume.DeepCopy() invalidPvSourceUpdateType.Spec.PersistentVolumeSource = core.PersistentVolumeSource{ - FlexVolume: &core.FlexVolumeSource{ + FlexVolume: &core.FlexPersistentVolumeSource{ Driver: "kubernetes.io/blue", FSType: "ext4", }, diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index c33b1c636ce..fc20117e2d3 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -1076,6 +1076,16 @@ func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolu azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly) } +func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ + " Driver:\t%v\n"+ + " FSType:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + " Options:\t%v\n", + flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) +} + func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ " Driver:\t%v\n"+ @@ -1184,7 +1194,7 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( case pv.Spec.AzureFile != nil: printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w) case pv.Spec.FlexVolume != nil: - printFlexVolumeSource(pv.Spec.FlexVolume, w) + printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w) case pv.Spec.Flocker != nil: printFlockerVolumeSource(pv.Spec.Flocker, w) case pv.Spec.CSI != nil: diff --git a/pkg/volume/flexvolume/attacher-defaults.go b/pkg/volume/flexvolume/attacher-defaults.go index bf8dcfe8254..e578443c47b 100644 --- a/pkg/volume/flexvolume/attacher-defaults.go +++ b/pkg/volume/flexvolume/attacher-defaults.go @@ -48,7 +48,16 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin // MountDevice is part of the volume.Attacher interface func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error { glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name, ", device ", devicePath, ", deviceMountPath ", deviceMountPath) - volSource, readOnly := getVolumeSource(spec) + + volSourceFSType, err := getFSType(spec) + if err != nil { + return err + } + + readOnly, err := getReadOnly(spec) + if err != nil { + return err + } options := make([]string, 0) @@ -60,5 +69,5 @@ func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, dev diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: a.plugin.host.GetExec(a.plugin.GetPluginName())} - return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSource.FSType, options) + return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSourceFSType, options) } diff --git a/pkg/volume/flexvolume/common_test.go b/pkg/volume/flexvolume/common_test.go index 079b32adac6..9300c2a7650 100644 --- a/pkg/volume/flexvolume/common_test.go +++ b/pkg/volume/flexvolume/common_test.go @@ -119,7 +119,7 @@ func fakePersistentVolumeSpec() *volume.Spec { }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: &v1.FlexVolumeSource{ + FlexVolume: &v1.FlexPersistentVolumeSource{ Driver: "kubernetes.io/fakeAttacher", ReadOnly: false, }, diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index e3e4527ddc9..98e5640224e 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -162,10 +162,25 @@ func (dc *DriverCall) Run() (*DriverStatus, error) { type OptionsForDriver map[string]string func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) { - volSource, readOnly := getVolumeSource(spec) + + volSourceFSType, err := getFSType(spec) + if err != nil { + return nil, err + } + + readOnly, err := getReadOnly(spec) + if err != nil { + return nil, err + } + + volSourceOptions, err := getOptions(spec) + if err != nil { + return nil, err + } + options := map[string]string{} - options[optionFSType] = volSource.FSType + options[optionFSType] = volSourceFSType if readOnly { options[optionReadWrite] = "ro" @@ -179,7 +194,7 @@ func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions options[key] = value } - for key, value := range volSource.Options { + for key, value := range volSourceOptions { options[key] = value } diff --git a/pkg/volume/flexvolume/flexvolume_test.go b/pkg/volume/flexvolume/flexvolume_test.go index 4b0b8b95a10..c90ad49322d 100644 --- a/pkg/volume/flexvolume/flexvolume_test.go +++ b/pkg/volume/flexvolume/flexvolume_test.go @@ -185,7 +185,7 @@ func TestCanSupport(t *testing.T) { if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) { t.Errorf("Expected true") } - if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) { + if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexPersistentVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) { t.Errorf("Expected true") } if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) { diff --git a/pkg/volume/flexvolume/plugin.go b/pkg/volume/flexvolume/plugin.go index ba46e2c52d1..4c201f3a6ab 100644 --- a/pkg/volume/flexvolume/plugin.go +++ b/pkg/volume/flexvolume/plugin.go @@ -132,8 +132,11 @@ func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) // CanSupport is part of the volume.VolumePlugin interface. func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool { - source, _ := getVolumeSource(spec) - return (source != nil) && (source.Driver == plugin.driverName) + sourceDriver, err := getDriver(spec) + if err != nil { + return false + } + return sourceDriver == plugin.driverName } // RequiresRemount is part of the volume.VolumePlugin interface. @@ -156,10 +159,19 @@ func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vo // newMounterInternal is the internal mounter routine to build the volume. func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, runner exec.Interface) (volume.Mounter, error) { - source, readOnly := getVolumeSource(spec) + sourceDriver, err := getDriver(spec) + if err != nil { + return nil, err + } + + readOnly, err := getReadOnly(spec) + if err != nil { + return nil, err + } + return &flexVolumeMounter{ flexVolume: &flexVolume{ - driverName: source.Driver, + driverName: sourceDriver, execPath: plugin.getExecutable(), mounter: mounter, plugin: plugin, diff --git a/pkg/volume/flexvolume/util.go b/pkg/volume/flexvolume/util.go index bc86e1a60da..b706712101c 100644 --- a/pkg/volume/flexvolume/util.go +++ b/pkg/volume/flexvolume/util.go @@ -22,15 +22,18 @@ import ( "os" "github.com/golang/glog" - api "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace string, driverName string, host volume.VolumeHost) error { - fv, _ := getVolumeSource(spec) - if fv.SecretRef == nil { + secretName, secretNamespace, err := getSecretNameAndNamespace(spec, namespace) + if err != nil { + return err + } + + if len(secretName) == 0 || len(secretNamespace) == 0 { return nil } @@ -39,9 +42,9 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace return fmt.Errorf("Cannot get kube client") } - secrets, err := util.GetSecretForPV(namespace, fv.SecretRef.Name, driverName, host.GetKubeClient()) + secrets, err := util.GetSecretForPV(secretNamespace, secretName, driverName, host.GetKubeClient()) if err != nil { - err = fmt.Errorf("Couldn't get secret %v/%v err: %v", namespace, fv.SecretRef.Name, err) + err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNamespace, secretName, err) return err } for name, data := range secrets { @@ -52,15 +55,68 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace return nil } -func getVolumeSource(spec *volume.Spec) (volumeSource *api.FlexVolumeSource, readOnly bool) { +var notFlexVolume = fmt.Errorf("not a flex volume") + +func getDriver(spec *volume.Spec) (string, error) { if spec.Volume != nil && spec.Volume.FlexVolume != nil { - volumeSource = spec.Volume.FlexVolume - readOnly = volumeSource.ReadOnly - } else if spec.PersistentVolume != nil { - volumeSource = spec.PersistentVolume.Spec.FlexVolume - readOnly = spec.ReadOnly + return spec.Volume.FlexVolume.Driver, nil } - return + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { + return spec.PersistentVolume.Spec.FlexVolume.Driver, nil + } + return "", notFlexVolume +} + +func getFSType(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.FlexVolume != nil { + return spec.Volume.FlexVolume.FSType, nil + } + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { + return spec.PersistentVolume.Spec.FlexVolume.FSType, nil + } + return "", notFlexVolume +} + +func getSecretNameAndNamespace(spec *volume.Spec, podNamespace string) (string, string, error) { + if spec.Volume != nil && spec.Volume.FlexVolume != nil { + if spec.Volume.FlexVolume.SecretRef == nil { + return "", "", nil + } + return spec.Volume.FlexVolume.SecretRef.Name, podNamespace, nil + } + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { + if spec.PersistentVolume.Spec.FlexVolume.SecretRef == nil { + return "", "", nil + } + secretName := spec.PersistentVolume.Spec.FlexVolume.SecretRef.Name + secretNamespace := spec.PersistentVolume.Spec.FlexVolume.SecretRef.Namespace + if len(secretNamespace) == 0 { + secretNamespace = podNamespace + } + return secretName, secretNamespace, nil + } + return "", "", notFlexVolume +} + +func getReadOnly(spec *volume.Spec) (bool, error) { + if spec.Volume != nil && spec.Volume.FlexVolume != nil { + return spec.Volume.FlexVolume.ReadOnly, nil + } + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { + // ReadOnly is specified at the PV level + return spec.ReadOnly, nil + } + return false, notFlexVolume +} + +func getOptions(spec *volume.Spec) (map[string]string, error) { + if spec.Volume != nil && spec.Volume.FlexVolume != nil { + return spec.Volume.FlexVolume.Options, nil + } + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { + return spec.PersistentVolume.Spec.FlexVolume.Options, nil + } + return nil, notFlexVolume } func prepareForMount(mounter mount.Interface, deviceMountPath string) (bool, error) { diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer_test.go b/plugin/pkg/auth/authorizer/node/node_authorizer_test.go index b6410dce989..4f5f731bda0 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer_test.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer_test.go @@ -410,7 +410,7 @@ func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume) { for i := 0; i < opts.uniquePVCsPerPod; i++ { pv := &api.PersistentVolume{} pv.Name = fmt.Sprintf("pv%d-%s-%s", i, pod.Name, pod.Namespace) - pv.Spec.FlexVolume = &api.FlexVolumeSource{SecretRef: &api.LocalObjectReference{Name: fmt.Sprintf("secret-%s", pv.Name)}} + pv.Spec.FlexVolume = &api.FlexPersistentVolumeSource{SecretRef: &api.SecretReference{Name: fmt.Sprintf("secret-%s", pv.Name)}} pv.Spec.ClaimRef = &api.ObjectReference{Name: fmt.Sprintf("pvc%d-%s", i, pod.Name), Namespace: pod.Namespace} pvs = append(pvs, pv) @@ -421,7 +421,7 @@ func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume) { for i := 0; i < opts.sharedPVCsPerPod; i++ { pv := &api.PersistentVolume{} pv.Name = fmt.Sprintf("pv%d-shared-%s", i, pod.Namespace) - pv.Spec.FlexVolume = &api.FlexVolumeSource{SecretRef: &api.LocalObjectReference{Name: fmt.Sprintf("secret-%s", pv.Name)}} + pv.Spec.FlexVolume = &api.FlexPersistentVolumeSource{SecretRef: &api.SecretReference{Name: fmt.Sprintf("secret-%s", pv.Name)}} pv.Spec.ClaimRef = &api.ObjectReference{Name: fmt.Sprintf("pvc%d-shared", i), Namespace: pod.Namespace} pvs = append(pvs, pv) diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 728cbd5a62b..de4c80af322 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -418,7 +418,7 @@ type PersistentVolumeSource struct { // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` @@ -1081,6 +1081,32 @@ type QuobyteVolumeSource struct { Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` } +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. type FlexVolumeSource struct { From 13854c46a7db958411a61cf9188ce70ff7f4c3d1 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 14 Dec 2017 00:06:23 -0500 Subject: [PATCH 303/794] Raise RBAC DENY log level --- plugin/pkg/auth/authorizer/rbac/rbac.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/auth/authorizer/rbac/rbac.go b/plugin/pkg/auth/authorizer/rbac/rbac.go index 92094807630..1f507eb0ed2 100644 --- a/plugin/pkg/auth/authorizer/rbac/rbac.go +++ b/plugin/pkg/auth/authorizer/rbac/rbac.go @@ -79,7 +79,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (aut // Build a detailed log of the denial. // Make the whole block conditional so we don't do a lot of string-building we won't use. - if glog.V(2) { + if glog.V(5) { var operation string if requestAttributes.IsResourceRequest() { b := &bytes.Buffer{} From 4a16f16af42f4aa565adf832846db0f1c69fa953 Mon Sep 17 00:00:00 2001 From: chrislovecnm Date: Wed, 13 Dec 2017 22:42:07 -0700 Subject: [PATCH 304/794] Adding myself as a reviewer to aws cloud provider --- pkg/cloudprovider/providers/aws/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cloudprovider/providers/aws/OWNERS b/pkg/cloudprovider/providers/aws/OWNERS index 294b5c5eb55..905c5f972a2 100644 --- a/pkg/cloudprovider/providers/aws/OWNERS +++ b/pkg/cloudprovider/providers/aws/OWNERS @@ -6,3 +6,4 @@ reviewers: - jsafrane - justinsb - zmerlynn +- chrislovecnm From 20a465ee4a4cf211334262f417a79aadc24da7b3 Mon Sep 17 00:00:00 2001 From: chrislovecnm Date: Wed, 13 Dec 2017 22:53:49 -0700 Subject: [PATCH 305/794] Adding myself as a reviewer to aws credentialprovider --- pkg/credentialprovider/aws/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/credentialprovider/aws/OWNERS b/pkg/credentialprovider/aws/OWNERS index 2fedc3d63ba..5d2b86e103f 100755 --- a/pkg/credentialprovider/aws/OWNERS +++ b/pkg/credentialprovider/aws/OWNERS @@ -4,3 +4,4 @@ reviewers: - therc - lixiaobing10051267 - goltermann +- chrislovecnm From ffbfd81c0636d2046ac7c2729040f548c9c74798 Mon Sep 17 00:00:00 2001 From: zhengjiajin Date: Thu, 14 Dec 2017 17:31:49 +0800 Subject: [PATCH 306/794] remove dependency from cobra, only use option test init flag --- cmd/kubeadm/app/preflight/checks.go | 7 ++-- plugin/cmd/kube-scheduler/app/server.go | 46 ++++++++++++------------- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a79ff7bc866..43e53cf52c8 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -525,9 +525,12 @@ func (eac ExtraArgsCheck) Check() (warnings, errors []error) { warnings = append(warnings, argsCheck("kube-controller-manager", eac.ControllerManagerExtraArgs, flags)...) } if len(eac.SchedulerExtraArgs) > 0 { - command := schedulerapp.NewSchedulerCommand() + opts, err := schedulerapp.NewOptions() + if err != nil { + warnings = append(warnings, err) + } flags := pflag.NewFlagSet("", pflag.ContinueOnError) - flags.AddFlagSet(command.Flags()) + opts.AddFlags(flags) warnings = append(warnings, argsCheck("kube-scheduler", eac.SchedulerExtraArgs, flags)...) } return warnings, nil diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 68f8dad3c37..455e8180320 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -104,36 +104,36 @@ type Options struct { } // AddFlags adds flags for a specific SchedulerServer to the specified FlagSet -func AddFlags(options *Options, fs *pflag.FlagSet) { - fs.StringVar(&options.ConfigFile, "config", options.ConfigFile, "The path to the configuration file.") +func (o *Options) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.") // All flags below here are deprecated and will eventually be removed. - fs.Int32Var(&options.healthzPort, "port", ports.SchedulerPort, "The port that the scheduler's http service runs on") - fs.StringVar(&options.healthzAddress, "address", options.healthzAddress, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") - fs.StringVar(&options.algorithmProvider, "algorithm-provider", options.algorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) - fs.StringVar(&options.policyConfigFile, "policy-config-file", options.policyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true") + fs.Int32Var(&o.healthzPort, "port", ports.SchedulerPort, "The port that the scheduler's http service runs on") + fs.StringVar(&o.healthzAddress, "address", o.healthzAddress, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") + fs.StringVar(&o.algorithmProvider, "algorithm-provider", o.algorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) + fs.StringVar(&o.policyConfigFile, "policy-config-file", o.policyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true") usage := fmt.Sprintf("Name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config==false. The config must be provided as the value of an element in 'Data' map with the key='%v'", componentconfig.SchedulerPolicyConfigMapKey) - fs.StringVar(&options.policyConfigMapName, "policy-configmap", options.policyConfigMapName, usage) - fs.StringVar(&options.policyConfigMapNamespace, "policy-configmap-namespace", options.policyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.") - fs.BoolVar(&options.useLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file") - fs.BoolVar(&options.config.EnableProfiling, "profiling", options.config.EnableProfiling, "Enable profiling via web interface host:port/debug/pprof/") - fs.BoolVar(&options.config.EnableContentionProfiling, "contention-profiling", options.config.EnableContentionProfiling, "Enable lock contention profiling, if profiling is enabled") - fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.StringVar(&options.config.ClientConnection.KubeConfigFile, "kubeconfig", options.config.ClientConnection.KubeConfigFile, "Path to kubeconfig file with authorization and master location information.") - fs.StringVar(&options.config.ClientConnection.ContentType, "kube-api-content-type", options.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") - fs.Float32Var(&options.config.ClientConnection.QPS, "kube-api-qps", options.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") - fs.Int32Var(&options.config.ClientConnection.Burst, "kube-api-burst", options.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") - fs.StringVar(&options.config.SchedulerName, "scheduler-name", options.config.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".") - fs.StringVar(&options.config.LeaderElection.LockObjectNamespace, "lock-object-namespace", options.config.LeaderElection.LockObjectNamespace, "Define the namespace of the lock object.") - fs.StringVar(&options.config.LeaderElection.LockObjectName, "lock-object-name", options.config.LeaderElection.LockObjectName, "Define the name of the lock object.") - fs.Int32Var(&options.config.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", options.config.HardPodAffinitySymmetricWeight, + fs.StringVar(&o.policyConfigMapName, "policy-configmap", o.policyConfigMapName, usage) + fs.StringVar(&o.policyConfigMapNamespace, "policy-configmap-namespace", o.policyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.") + fs.BoolVar(&o.useLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file") + fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&o.config.EnableContentionProfiling, "contention-profiling", o.config.EnableContentionProfiling, "Enable lock contention profiling, if profiling is enabled") + fs.StringVar(&o.master, "master", o.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") + fs.StringVar(&o.config.ClientConnection.KubeConfigFile, "kubeconfig", o.config.ClientConnection.KubeConfigFile, "Path to kubeconfig file with authorization and master location information.") + fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") + fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") + fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") + fs.StringVar(&o.config.SchedulerName, "scheduler-name", o.config.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".") + fs.StringVar(&o.config.LeaderElection.LockObjectNamespace, "lock-object-namespace", o.config.LeaderElection.LockObjectNamespace, "Define the namespace of the lock object.") + fs.StringVar(&o.config.LeaderElection.LockObjectName, "lock-object-name", o.config.LeaderElection.LockObjectName, "Define the name of the lock object.") + fs.Int32Var(&o.config.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", o.config.HardPodAffinitySymmetricWeight, "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+ "to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.") fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file") - fs.StringVar(&options.config.FailureDomains, "failure-domains", options.config.FailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") + fs.StringVar(&o.config.FailureDomains, "failure-domains", o.config.FailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.") - leaderelectionconfig.BindFlags(&options.config.LeaderElection.LeaderElectionConfiguration, fs) + leaderelectionconfig.BindFlags(&o.config.LeaderElection.LeaderElectionConfiguration, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) } @@ -343,7 +343,7 @@ through the API as necessary.`, glog.Fatalf("unable to apply config defaults: %v", err) } - AddFlags(opts, pflag.CommandLine) + opts.AddFlags(pflag.CommandLine) utilflag.InitFlags() cmd.MarkFlagFilename("config", "yaml", "yml", "json") From e0cfe94066e279f0e424d147f38c38a1c8cf0502 Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Thu, 14 Dec 2017 11:03:17 +0100 Subject: [PATCH 307/794] Reduce CPU request of Dasboard addon --- cluster/addons/dashboard/dashboard-controller.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index ac05d3a9a29..59bf7c4daf8 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -35,7 +35,7 @@ spec: cpu: 100m memory: 300Mi requests: - cpu: 100m + cpu: 50m memory: 100Mi ports: - containerPort: 8443 From afac46bd496d6bb5819b0762adabdcd13acd6818 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Tue, 5 Dec 2017 16:36:49 +0100 Subject: [PATCH 308/794] Add e2e test for custom metrics with Prometheus and Stackdriver --- test/e2e/autoscaling/BUILD | 1 + .../autoscaling/custom_metrics_autoscaling.go | 112 +++++++++++------- .../monitoring/custom_metrics_deployments.go | 71 ++++++++++- 3 files changed, 136 insertions(+), 48 deletions(-) diff --git a/test/e2e/autoscaling/BUILD b/test/e2e/autoscaling/BUILD index ef75e7d9a68..f80aeee6817 100644 --- a/test/e2e/autoscaling/BUILD +++ b/test/e2e/autoscaling/BUILD @@ -31,6 +31,7 @@ go_library( "//vendor/google.golang.org/api/monitoring/v3:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/e2e/autoscaling/custom_metrics_autoscaling.go b/test/e2e/autoscaling/custom_metrics_autoscaling.go index f5409ec5cd5..15ceadcc35d 100644 --- a/test/e2e/autoscaling/custom_metrics_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_autoscaling.go @@ -20,18 +20,19 @@ import ( "context" "time" - "golang.org/x/oauth2/google" - clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/framework" - gcm "google.golang.org/api/monitoring/v3" as "k8s.io/api/autoscaling/v2beta1" + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/instrumentation/monitoring" + + . "github.com/onsi/ginkgo" + "golang.org/x/oauth2/google" ) const ( @@ -46,15 +47,32 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me }) f := framework.NewDefaultFramework("horizontal-pod-autoscaling") - var kubeClient clientset.Interface - It("should autoscale with Custom Metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { - kubeClient = f.ClientSet - testHPA(f, kubeClient) + It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { + initialReplicas := 2 + scaledReplicas := 1 + deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) + customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name), deployment, nil, initialReplicas, scaledReplicas) + }) + + It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { + initialReplicas := 2 + scaledReplicas := 1 + deployment := monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) + pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.ObjectMeta.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100) + customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name), deployment, pod, initialReplicas, scaledReplicas) + }) + + It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() { + initialReplicas := 2 + scaledReplicas := 1 + deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) + customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name), deployment, nil, initialReplicas, scaledReplicas) }) }) -func testHPA(f *framework.Framework, kubeClient clientset.Interface) { +func customMetricTest(f *framework.Framework, kubeClient clientset.Interface, hpa *as.HorizontalPodAutoscaler, + deployment *extensions.Deployment, pod *corev1.Pod, initialReplicas, scaledReplicas int) { projectId := framework.TestContext.CloudConfig.ProjectID ctx := context.Background() @@ -92,51 +110,55 @@ func testHPA(f *framework.Framework, kubeClient clientset.Interface) { defer monitoring.CleanupAdapter() // Run application that exports the metric - err = createDeploymentsToScale(f, kubeClient) + err = createDeploymentToScale(f, kubeClient, deployment, pod) if err != nil { framework.Failf("Failed to create stackdriver-exporter pod: %v", err) } - defer cleanupDeploymentsToScale(f, kubeClient) + defer cleanupDeploymentsToScale(f, kubeClient, deployment, pod) - // Autoscale the deployments - err = createPodsHPA(f, kubeClient) + // Wait for the deployment to run + waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, initialReplicas) + + // Autoscale the deployment + _, err = kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(hpa) if err != nil { - framework.Failf("Failed to create 'Pods' HPA: %v", err) - } - err = createObjectHPA(f, kubeClient) - if err != nil { - framework.Failf("Failed to create 'Objects' HPA: %v", err) + framework.Failf("Failed to create HPA: %v", err) } - waitForReplicas(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1) - waitForReplicas(dummyDeploymentName, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1) + waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, scaledReplicas) } -func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error { - _, err := cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100)) - if err != nil { - return err +func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error { + if deployment != nil { + _, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment) + if err != nil { + return err + } } - _, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100)) - if err != nil { - return err + if pod != nil { + _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod) + if err != nil { + return err + } } - _, err = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100)) - return err + return nil } -func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) { - _ = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{}) - _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{}) - _ = cs.ExtensionsV1beta1().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{}) +func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) { + if deployment != nil { + _ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{}) + } + if pod != nil { + _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{}) + } } -func createPodsHPA(f *framework.Framework, cs clientset.Interface) error { +func podsHPA(namespace string) *as.HorizontalPodAutoscaler { var minReplicas int32 = 1 - _, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{ + return &as.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "custom-metrics-pods-hpa", - Namespace: f.Namespace.ObjectMeta.Name, + Namespace: namespace, }, Spec: as.HorizontalPodAutoscalerSpec{ Metrics: []as.MetricSpec{ @@ -156,16 +178,15 @@ func createPodsHPA(f *framework.Framework, cs clientset.Interface) error { Name: stackdriverExporterDeployment, }, }, - }) - return err + } } -func createObjectHPA(f *framework.Framework, cs clientset.Interface) error { +func objectHPA(namespace string) *as.HorizontalPodAutoscaler { var minReplicas int32 = 1 - _, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{ + return &as.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "custom-metrics-objects-hpa", - Namespace: f.Namespace.ObjectMeta.Name, + Namespace: namespace, }, Spec: as.HorizontalPodAutoscalerSpec{ Metrics: []as.MetricSpec{ @@ -189,8 +210,7 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error { Name: dummyDeploymentName, }, }, - }) - return err + } } func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 2adae0d4b6c..05b66946c48 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -18,6 +18,7 @@ package monitoring import ( "fmt" + gcm "google.golang.org/api/monitoring/v3" corev1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -27,8 +28,8 @@ import ( ) var ( - CustomMetricName = "foo-metric" - UnusedMetricName = "unused-metric" + CustomMetricName = "foo" + UnusedMetricName = "unused" CustomMetricValue = int64(448) UnusedMetricValue = int64(446) // HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for @@ -116,6 +117,72 @@ func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.Pod } } +// PrometheusExporterDeployment is a Deployment of simple application with two containers +// one exposing a metric in prometheus fromat and second a prometheus-to-sd container +// that scrapes the metric and pushes it to stackdriver. +func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment { + return &extensions.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: extensions.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "name": name, + }, + }, + Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080), + }, + Replicas: &replicas, + }, + } +} + +func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) corev1.PodSpec { + return corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "prometheus-exporter", + Image: "gcr.io/google-containers/prometheus-dummy-exporter:v0.1.0", + ImagePullPolicy: corev1.PullPolicy("Always"), + Command: []string{"/prometheus_dummy_exporter", "--metric-name=" + metricName, + fmt.Sprintf("--metric-value=%v", metricValue), fmt.Sprintf("=--port=%d", port)}, + Ports: []corev1.ContainerPort{{ContainerPort: port}}, + }, + { + Name: "prometheus-to-sd", + Image: "gcr.io/google-containers/prometheus-to-sd:v0.2.3", + ImagePullPolicy: corev1.PullPolicy("Always"), + Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port), + "--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"}, + Env: []corev1.EnvVar{ + { + Name: "POD_ID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + } +} + // CreateAdapter creates Custom Metrics - Stackdriver adapter. func CreateAdapter() error { stat, err := framework.RunKubectl("create", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/adapter-beta.yaml") From 6592e44d18285bfd99ee3651a464a4d21e4b30c3 Mon Sep 17 00:00:00 2001 From: Marco Ceppi Date: Thu, 14 Dec 2017 12:58:52 +0000 Subject: [PATCH 309/794] Use an s390x default-http-backend This needs to be refactored to eventually say all non x86 architectures place a -arch() in the image name to support ppc64el, arm, etc. Most all gcr.io/google_containers have -arch() image names. --- .../layers/kubernetes-worker/reactive/kubernetes_worker.py | 6 ++++++ .../kubernetes-worker/templates/default-http-backend.yaml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 4e233cb8991..fea91cb798a 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -605,6 +605,12 @@ def launch_default_ingress_controller(): context['arch'] = arch() addon_path = '/root/cdk/addons/{}' + context['defaultbackend_image'] = \ + "gcr.io/google_containers/defaultbackend:1.4" + if arch() == 's390x': + context['defaultbackend_image'] = \ + "gcr.io/google_containers/defaultbackend-s390x:1.4" + # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml index 6c826ac320e..91b800ab753 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml @@ -17,7 +17,7 @@ spec: # Any image is permissable as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.0 + image: {{ defaultbackend_image }} livenessProbe: httpGet: path: /healthz From 8dc4c4089be54b94ada625ddc2a9d4971ccb8a55 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Thu, 14 Dec 2017 09:37:55 -0800 Subject: [PATCH 310/794] pkg/controller/bootstrap: update jose package --- pkg/controller/bootstrap/BUILD | 2 +- pkg/controller/bootstrap/jws.go | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index 6dfcd204c9d..737dd4eae99 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -46,7 +46,7 @@ go_library( "//pkg/bootstrap/api:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/square/go-jose:go_default_library", + "//vendor/gopkg.in/square/go-jose.v2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/bootstrap/jws.go b/pkg/controller/bootstrap/jws.go index ec73ceb4887..273a002c202 100644 --- a/pkg/controller/bootstrap/jws.go +++ b/pkg/controller/bootstrap/jws.go @@ -20,19 +20,28 @@ import ( "fmt" "strings" - jose "github.com/square/go-jose" + jose "gopkg.in/square/go-jose.v2" ) // computeDetachedSig takes content and token details and computes a detached // JWS signature. This is described in Appendix F of RFC 7515. Basically, this // is a regular JWS with the content part of the signature elided. func computeDetachedSig(content, tokenID, tokenSecret string) (string, error) { - jwk := &jose.JsonWebKey{ + jwk := &jose.JSONWebKey{ Key: []byte(tokenSecret), KeyID: tokenID, } - signer, err := jose.NewSigner(jose.HS256, jwk) + opts := &jose.SignerOptions{ + // Since this is a symetric key, go-jose doesn't automatically include + // the KeyID as part of the protected header. We have to pass it here + // explicitly. + ExtraHeaders: map[jose.HeaderKey]interface{}{ + "kid": tokenID, + }, + } + + signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.HS256, Key: jwk}, opts) if err != nil { return "", fmt.Errorf("can't make a HS256 signer from the given token: %v", err) } From 44d0004152d4854e7592063b1cf26712cf0ca907 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Thu, 14 Dec 2017 09:38:09 -0800 Subject: [PATCH 311/794] generated: revendor --- Godeps/Godeps.json | 27 +- Godeps/LICENSES | 1260 ++++++++--------- vendor/BUILD | 2 +- vendor/github.com/square/go-jose/README.md | 212 --- vendor/github.com/square/go-jose/crypter.go | 349 ----- vendor/github.com/square/go-jose/shared.go | 224 --- vendor/github.com/square/go-jose/signing.go | 218 --- vendor/github.com/square/go-jose/utils.go | 74 - .../square/go-jose.v2}/.gitcookies.sh.enc | 0 .../square/go-jose.v2}/.gitignore | 0 .../square/go-jose.v2}/.travis.yml | 10 +- .../square/go-jose.v2}/BUG-BOUNTY.md | 0 .../square/go-jose.v2}/BUILD | 12 +- .../square/go-jose.v2}/CONTRIBUTING.md | 0 .../square/go-jose.v2}/LICENSE | 0 vendor/gopkg.in/square/go-jose.v2/README.md | 119 ++ .../square/go-jose.v2}/asymmetric.go | 103 +- .../square/go-jose.v2}/cipher/BUILD | 2 +- .../square/go-jose.v2}/cipher/cbc_hmac.go | 0 .../square/go-jose.v2}/cipher/concat_kdf.go | 0 .../square/go-jose.v2}/cipher/ecdh_es.go | 2 +- .../square/go-jose.v2}/cipher/key_wrap.go | 0 vendor/gopkg.in/square/go-jose.v2/crypter.go | 510 +++++++ .../square/go-jose.v2}/doc.go | 9 +- .../square/go-jose.v2}/encoding.go | 21 +- .../square/go-jose.v2}/json/BUILD | 2 +- .../square/go-jose.v2}/json/LICENSE | 0 .../square/go-jose.v2}/json/README.md | 0 .../square/go-jose.v2}/json/decode.go | 0 .../square/go-jose.v2}/json/encode.go | 0 .../square/go-jose.v2}/json/indent.go | 0 .../square/go-jose.v2}/json/scanner.go | 0 .../square/go-jose.v2}/json/stream.go | 0 .../square/go-jose.v2}/json/tags.go | 0 .../square/go-jose.v2}/jwe.go | 95 +- .../square/go-jose.v2}/jwk.go | 154 +- .../square/go-jose.v2}/jws.go | 88 +- vendor/gopkg.in/square/go-jose.v2/shared.go | 417 ++++++ vendor/gopkg.in/square/go-jose.v2/signing.go | 343 +++++ .../square/go-jose.v2}/symmetric.go | 27 +- 40 files changed, 2398 insertions(+), 1882 deletions(-) delete mode 100644 vendor/github.com/square/go-jose/README.md delete mode 100644 vendor/github.com/square/go-jose/crypter.go delete mode 100644 vendor/github.com/square/go-jose/shared.go delete mode 100644 vendor/github.com/square/go-jose/signing.go delete mode 100644 vendor/github.com/square/go-jose/utils.go rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/.gitcookies.sh.enc (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/.gitignore (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/.travis.yml (85%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/BUG-BOUNTY.md (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/BUILD (64%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/CONTRIBUTING.md (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/LICENSE (100%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/README.md rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/asymmetric.go (85%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/cipher/BUILD (90%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/cipher/cbc_hmac.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/cipher/concat_kdf.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/cipher/ecdh_es.go (96%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/cipher/key_wrap.go (100%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/crypter.go rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/doc.go (68%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/encoding.go (89%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/BUILD (91%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/LICENSE (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/README.md (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/decode.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/encode.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/indent.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/scanner.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/stream.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/json/tags.go (100%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/jwe.go (71%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/jwk.go (70%) rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/jws.go (70%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/shared.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/signing.go rename vendor/{github.com/square/go-jose => gopkg.in/square/go-jose.v2}/symmetric.go (93%) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d89bc78af29..4930c2221c6 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2488,18 +2488,6 @@ "ImportPath": "github.com/spf13/viper", "Rev": "7fb2782df3d83e0036cc89f461ed0422628776f4" }, - { - "ImportPath": "github.com/square/go-jose", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" - }, - { - "ImportPath": "github.com/square/go-jose/cipher", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" - }, - { - "ImportPath": "github.com/square/go-jose/json", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" - }, { "ImportPath": "github.com/storageos/go-api", "Rev": "74f9beb613cacf0cc282facc2e1550a3231e126f" @@ -3028,6 +3016,21 @@ "Comment": "v1.0-16-g20b71e5", "Rev": "20b71e5b60d756d3d2f80def009790325acc2b23" }, + { + "ImportPath": "gopkg.in/square/go-jose.v2", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v2/cipher", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v2/json", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, { "ImportPath": "gopkg.in/warnings.v0", "Comment": "v0.1.1", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 41b5664763f..2b40e59609c 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -78126,636 +78126,6 @@ SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/square/go-jose licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/square/go-jose/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/square/go-jose/cipher licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/square/go-jose/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/square/go-jose/json licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/square/go-jose/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/storageos/go-api licensed under: = @@ -87277,6 +86647,636 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/gopkg.in/square/go-jose.v2 licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/gopkg.in/square/go-jose.v2/cipher licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/gopkg.in/square/go-jose.v2/json licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/gopkg.in/square/go-jose.v2/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/gopkg.in/warnings.v0 licensed under: = diff --git a/vendor/BUILD b/vendor/BUILD index 5ce4ade4b4b..5b92af906f8 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -328,7 +328,6 @@ filegroup( "//vendor/github.com/spf13/jwalterweatherman:all-srcs", "//vendor/github.com/spf13/pflag:all-srcs", "//vendor/github.com/spf13/viper:all-srcs", - "//vendor/github.com/square/go-jose:all-srcs", "//vendor/github.com/storageos/go-api:all-srcs", "//vendor/github.com/stretchr/objx:all-srcs", "//vendor/github.com/stretchr/testify/assert:all-srcs", @@ -397,6 +396,7 @@ filegroup( "//vendor/gopkg.in/gcfg.v1:all-srcs", "//vendor/gopkg.in/inf.v0:all-srcs", "//vendor/gopkg.in/natefinch/lumberjack.v2:all-srcs", + "//vendor/gopkg.in/square/go-jose.v2:all-srcs", "//vendor/gopkg.in/warnings.v0:all-srcs", "//vendor/gopkg.in/yaml.v2:all-srcs", "//vendor/k8s.io/gengo/args:all-srcs", diff --git a/vendor/github.com/square/go-jose/README.md b/vendor/github.com/square/go-jose/README.md deleted file mode 100644 index 60293ffa26b..00000000000 --- a/vendor/github.com/square/go-jose/README.md +++ /dev/null @@ -1,212 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![release](https://img.shields.io/github/release/square/go-jose.svg?style=flat)](https://github.com/square/go-jose/releases) -[![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/github.com/square/go-jose/crypter.go b/vendor/github.com/square/go-jose/crypter.go deleted file mode 100644 index a38632ddc65..00000000000 --- a/vendor/github.com/square/go-jose/crypter.go +++ /dev/null @@ -1,349 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "fmt" - "reflect" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) -} - -// MultiEncrypter represents an encrypter which supports multiple recipients. -type MultiEncrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) - AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// SetCompression sets a compression algorithm to be applied before encryption. -func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { - ctx.compressionAlg = compressionAlg -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := encryptionKey.(type) { - case *JsonWebKey: - keyID = encryptionKey.KeyID - rawKey = encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch alg { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.AddRecipient(alg, encryptionKey) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { - var recipient recipientKeyInfo - - switch alg { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) - } - - recipient, err = makeJWERecipient(alg, encryptionKey) - - if err == nil { - ctx.recipients = append(ctx.recipients, recipient) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case *JsonWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - if err == nil && encryptionKey.KeyID != "" { - recipient.keyID = encryptionKey.KeyID - } - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case *JsonWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{ - Enc: ctx.contentAlg, - } - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - recipient.header.Alg = string(info.keyAlg) - if info.keyID != "" { - recipient.header.Kid = info.keyID - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - obj.protected.Zip = ctx.compressionAlg - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -// Decrypt and validate the object and return the plaintext. -func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(headers.Crit) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.Enc) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - for _, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - break - } - } - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return plaintext, err -} diff --git a/vendor/github.com/square/go-jose/shared.go b/vendor/github.com/square/go-jose/shared.go deleted file mode 100644 index 9d895a912cf..00000000000 --- a/vendor/github.com/square/go-jose/shared.go +++ /dev/null @@ -1,224 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/elliptic" - "errors" - "fmt" -) - -// KeyAlgorithm represents a key management algorithm. -type KeyAlgorithm string - -// SignatureAlgorithm represents a signature (or MAC) algorithm. -type SignatureAlgorithm string - -// ContentEncryption represents a content encryption algorithm. -type ContentEncryption string - -// CompressionAlgorithm represents an algorithm used for plaintext compression. -type CompressionAlgorithm string - -var ( - // ErrCryptoFailure represents an error in cryptographic primitive. This - // occurs when, for example, a message had an invalid authentication tag or - // could not be decrypted. - ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive") - - // ErrUnsupportedAlgorithm indicates that a selected algorithm is not - // supported. This occurs when trying to instantiate an encrypter for an - // algorithm that is not yet implemented. - ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm") - - // ErrUnsupportedKeyType indicates that the given key type/format is not - // supported. This occurs when trying to instantiate an encrypter and passing - // it a key of an unrecognized type or with unsupported parameters, such as - // an RSA private key with more than two primes. - ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format") - - // ErrNotSupported serialization of object is not supported. This occurs when - // trying to compact-serialize an object which can't be represented in - // compact form. - ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object") - - // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a - // nonce header parameter was included in an unprotected header object. - ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header") -) - -// Key management algorithms -const ( - RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5 - RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1 - RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256 - A128KW = KeyAlgorithm("A128KW") // AES key wrap (128) - A192KW = KeyAlgorithm("A192KW") // AES key wrap (192) - A256KW = KeyAlgorithm("A256KW") // AES key wrap (256) - DIRECT = KeyAlgorithm("dir") // Direct encryption - ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES - ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128) - ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192) - ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256) - A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128) - A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192) - A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256) - PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128) - PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192) - PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256) -) - -// Signature algorithms -const ( - HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256 - HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384 - HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512 - RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256 - RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384 - RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512 - ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256 - ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384 - ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512 - PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256 - PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384 - PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512 -) - -// Content encryption algorithms -const ( - A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128) - A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192) - A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256) - A128GCM = ContentEncryption("A128GCM") // AES-GCM (128) - A192GCM = ContentEncryption("A192GCM") // AES-GCM (192) - A256GCM = ContentEncryption("A256GCM") // AES-GCM (256) -) - -// Compression algorithms -const ( - NONE = CompressionAlgorithm("") // No compression - DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951) -) - -// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing). -type rawHeader struct { - Alg string `json:"alg,omitempty"` - Enc ContentEncryption `json:"enc,omitempty"` - Zip CompressionAlgorithm `json:"zip,omitempty"` - Crit []string `json:"crit,omitempty"` - Apu *byteBuffer `json:"apu,omitempty"` - Apv *byteBuffer `json:"apv,omitempty"` - Epk *JsonWebKey `json:"epk,omitempty"` - Iv *byteBuffer `json:"iv,omitempty"` - Tag *byteBuffer `json:"tag,omitempty"` - Jwk *JsonWebKey `json:"jwk,omitempty"` - Kid string `json:"kid,omitempty"` - Nonce string `json:"nonce,omitempty"` -} - -// JoseHeader represents the read-only JOSE header for JWE/JWS objects. -type JoseHeader struct { - KeyID string - JsonWebKey *JsonWebKey - Algorithm string - Nonce string -} - -// sanitized produces a cleaned-up header object from the raw JSON. -func (parsed rawHeader) sanitized() JoseHeader { - return JoseHeader{ - KeyID: parsed.Kid, - JsonWebKey: parsed.Jwk, - Algorithm: parsed.Alg, - Nonce: parsed.Nonce, - } -} - -// Merge headers from src into dst, giving precedence to headers from l. -func (dst *rawHeader) merge(src *rawHeader) { - if src == nil { - return - } - - if dst.Alg == "" { - dst.Alg = src.Alg - } - if dst.Enc == "" { - dst.Enc = src.Enc - } - if dst.Zip == "" { - dst.Zip = src.Zip - } - if dst.Crit == nil { - dst.Crit = src.Crit - } - if dst.Crit == nil { - dst.Crit = src.Crit - } - if dst.Apu == nil { - dst.Apu = src.Apu - } - if dst.Apv == nil { - dst.Apv = src.Apv - } - if dst.Epk == nil { - dst.Epk = src.Epk - } - if dst.Iv == nil { - dst.Iv = src.Iv - } - if dst.Tag == nil { - dst.Tag = src.Tag - } - if dst.Kid == "" { - dst.Kid = src.Kid - } - if dst.Jwk == nil { - dst.Jwk = src.Jwk - } - if dst.Nonce == "" { - dst.Nonce = src.Nonce - } -} - -// Get JOSE name of curve -func curveName(crv elliptic.Curve) (string, error) { - switch crv { - case elliptic.P256(): - return "P-256", nil - case elliptic.P384(): - return "P-384", nil - case elliptic.P521(): - return "P-521", nil - default: - return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve") - } -} - -// Get size of curve in bytes -func curveSize(crv elliptic.Curve) int { - bits := crv.Params().BitSize - - div := bits / 8 - mod := bits % 8 - - if mod == 0 { - return div - } - - return div + 1 -} diff --git a/vendor/github.com/square/go-jose/signing.go b/vendor/github.com/square/go-jose/signing.go deleted file mode 100644 index c6ed2c92b1c..00000000000 --- a/vendor/github.com/square/go-jose/signing.go +++ /dev/null @@ -1,218 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "fmt" -) - -// NonceSource represents a source of random nonces to go into JWS objects -type NonceSource interface { - Nonce() (string, error) -} - -// Signer represents a signer which takes a payload and produces a signed JWS object. -type Signer interface { - Sign(payload []byte) (*JsonWebSignature, error) - SetNonceSource(source NonceSource) - SetEmbedJwk(embed bool) -} - -// MultiSigner represents a signer which supports multiple recipients. -type MultiSigner interface { - Sign(payload []byte) (*JsonWebSignature, error) - SetNonceSource(source NonceSource) - SetEmbedJwk(embed bool) - AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error -} - -type payloadSigner interface { - signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) -} - -type payloadVerifier interface { - verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error -} - -type genericSigner struct { - recipients []recipientSigInfo - nonceSource NonceSource - embedJwk bool -} - -type recipientSigInfo struct { - sigAlg SignatureAlgorithm - keyID string - publicKey *JsonWebKey - signer payloadSigner -} - -// NewSigner creates an appropriate signer based on the key type -func NewSigner(alg SignatureAlgorithm, signingKey interface{}) (Signer, error) { - // NewMultiSigner never fails (currently) - signer := NewMultiSigner() - - err := signer.AddRecipient(alg, signingKey) - if err != nil { - return nil, err - } - - return signer, nil -} - -// NewMultiSigner creates a signer for multiple recipients -func NewMultiSigner() MultiSigner { - return &genericSigner{ - recipients: []recipientSigInfo{}, - embedJwk: true, - } -} - -// newVerifier creates a verifier based on the key type -func newVerifier(verificationKey interface{}) (payloadVerifier, error) { - switch verificationKey := verificationKey.(type) { - case *rsa.PublicKey: - return &rsaEncrypterVerifier{ - publicKey: verificationKey, - }, nil - case *ecdsa.PublicKey: - return &ecEncrypterVerifier{ - publicKey: verificationKey, - }, nil - case []byte: - return &symmetricMac{ - key: verificationKey, - }, nil - case *JsonWebKey: - return newVerifier(verificationKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -func (ctx *genericSigner) AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error { - recipient, err := makeJWSRecipient(alg, signingKey) - if err != nil { - return err - } - - ctx.recipients = append(ctx.recipients, recipient) - return nil -} - -func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) { - switch signingKey := signingKey.(type) { - case *rsa.PrivateKey: - return newRSASigner(alg, signingKey) - case *ecdsa.PrivateKey: - return newECDSASigner(alg, signingKey) - case []byte: - return newSymmetricSigner(alg, signingKey) - case *JsonWebKey: - recipient, err := makeJWSRecipient(alg, signingKey.Key) - if err != nil { - return recipientSigInfo{}, err - } - recipient.keyID = signingKey.KeyID - return recipient, nil - default: - return recipientSigInfo{}, ErrUnsupportedKeyType - } -} - -func (ctx *genericSigner) Sign(payload []byte) (*JsonWebSignature, error) { - obj := &JsonWebSignature{} - obj.payload = payload - obj.Signatures = make([]Signature, len(ctx.recipients)) - - for i, recipient := range ctx.recipients { - protected := &rawHeader{ - Alg: string(recipient.sigAlg), - } - - if recipient.publicKey != nil && ctx.embedJwk { - protected.Jwk = recipient.publicKey - } - if recipient.keyID != "" { - protected.Kid = recipient.keyID - } - - if ctx.nonceSource != nil { - nonce, err := ctx.nonceSource.Nonce() - if err != nil { - return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err) - } - protected.Nonce = nonce - } - - serializedProtected := mustSerializeJSON(protected) - - input := []byte(fmt.Sprintf("%s.%s", - base64URLEncode(serializedProtected), - base64URLEncode(payload))) - - signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg) - if err != nil { - return nil, err - } - - signatureInfo.protected = protected - obj.Signatures[i] = signatureInfo - } - - return obj, nil -} - -// SetNonceSource provides or updates a nonce pool to the first recipients. -// After this method is called, the signer will consume one nonce per -// signature, returning an error it is unable to get a nonce. -func (ctx *genericSigner) SetNonceSource(source NonceSource) { - ctx.nonceSource = source -} - -// SetEmbedJwk specifies if the signing key should be embedded in the protected header, -// if any. It defaults to 'true'. -func (ctx *genericSigner) SetEmbedJwk(embed bool) { - ctx.embedJwk = embed -} - -// Verify validates the signature on the object and returns the payload. -func (obj JsonWebSignature) Verify(verificationKey interface{}) ([]byte, error) { - verifier, err := newVerifier(verificationKey) - if err != nil { - return nil, err - } - - for _, signature := range obj.Signatures { - headers := signature.mergedHeaders() - if len(headers.Crit) > 0 { - // Unsupported crit header - continue - } - - input := obj.computeAuthData(&signature) - alg := SignatureAlgorithm(headers.Alg) - err := verifier.verifyPayload(input, signature.Signature, alg) - if err == nil { - return obj.payload, nil - } - } - - return nil, ErrCryptoFailure -} diff --git a/vendor/github.com/square/go-jose/utils.go b/vendor/github.com/square/go-jose/utils.go deleted file mode 100644 index 4ca2bc06bae..00000000000 --- a/vendor/github.com/square/go-jose/utils.go +++ /dev/null @@ -1,74 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/x509" - "encoding/pem" - "fmt" -) - -// LoadPublicKey loads a public key from PEM/DER-encoded data. -func LoadPublicKey(data []byte) (interface{}, error) { - input := data - - block, _ := pem.Decode(data) - if block != nil { - input = block.Bytes - } - - // Try to load SubjectPublicKeyInfo - pub, err0 := x509.ParsePKIXPublicKey(input) - if err0 == nil { - return pub, nil - } - - cert, err1 := x509.ParseCertificate(input) - if err1 == nil { - return cert.PublicKey, nil - } - - return nil, fmt.Errorf("square/go-jose: parse error, got '%s' and '%s'", err0, err1) -} - -// LoadPrivateKey loads a private key from PEM/DER-encoded data. -func LoadPrivateKey(data []byte) (interface{}, error) { - input := data - - block, _ := pem.Decode(data) - if block != nil { - input = block.Bytes - } - - var priv interface{} - priv, err0 := x509.ParsePKCS1PrivateKey(input) - if err0 == nil { - return priv, nil - } - - priv, err1 := x509.ParsePKCS8PrivateKey(input) - if err1 == nil { - return priv, nil - } - - priv, err2 := x509.ParseECPrivateKey(input) - if err2 == nil { - return priv, nil - } - - return nil, fmt.Errorf("square/go-jose: parse error, got '%s', '%s' and '%s'", err0, err1, err2) -} diff --git a/vendor/github.com/square/go-jose/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc similarity index 100% rename from vendor/github.com/square/go-jose/.gitcookies.sh.enc rename to vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc diff --git a/vendor/github.com/square/go-jose/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore similarity index 100% rename from vendor/github.com/square/go-jose/.gitignore rename to vendor/gopkg.in/square/go-jose.v2/.gitignore diff --git a/vendor/github.com/square/go-jose/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml similarity index 85% rename from vendor/github.com/square/go-jose/.travis.yml rename to vendor/gopkg.in/square/go-jose.v2/.travis.yml index b4622c6f2f3..c7f8f75ffb5 100644 --- a/vendor/github.com/square/go-jose/.travis.yml +++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml @@ -8,13 +8,15 @@ matrix: - go: tip go: -- 1.3 -- 1.4 - 1.5 - 1.6 - 1.7 +- 1.8 +- 1.9 - tip +go_import_path: gopkg.in/square/go-jose.v2 + before_script: - export PATH=$HOME/.local/bin:$PATH @@ -26,13 +28,15 @@ before_install: - bash .gitcookies.sh || true - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls +- go get github.com/stretchr/testify/assert - go get golang.org/x/tools/cmd/cover || true - go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user `whoami` +- pip install cram --user script: - go test . -v -covermode=count -coverprofile=profile.cov - go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov +- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov - go test ./json -v # no coverage for forked encoding/json package - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t - cd .. diff --git a/vendor/github.com/square/go-jose/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md similarity index 100% rename from vendor/github.com/square/go-jose/BUG-BOUNTY.md rename to vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md diff --git a/vendor/github.com/square/go-jose/BUILD b/vendor/gopkg.in/square/go-jose.v2/BUILD similarity index 64% rename from vendor/github.com/square/go-jose/BUILD rename to vendor/gopkg.in/square/go-jose.v2/BUILD index a7f8e736010..f5a1e7efa6f 100644 --- a/vendor/github.com/square/go-jose/BUILD +++ b/vendor/gopkg.in/square/go-jose.v2/BUILD @@ -13,13 +13,13 @@ go_library( "shared.go", "signing.go", "symmetric.go", - "utils.go", ], - importpath = "github.com/square/go-jose", + importpath = "gopkg.in/square/go-jose.v2", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/square/go-jose/cipher:go_default_library", - "//vendor/github.com/square/go-jose/json:go_default_library", + "//vendor/golang.org/x/crypto/ed25519:go_default_library", + "//vendor/gopkg.in/square/go-jose.v2/cipher:go_default_library", + "//vendor/gopkg.in/square/go-jose.v2/json:go_default_library", ], ) @@ -34,8 +34,8 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//vendor/github.com/square/go-jose/cipher:all-srcs", - "//vendor/github.com/square/go-jose/json:all-srcs", + "//vendor/gopkg.in/square/go-jose.v2/cipher:all-srcs", + "//vendor/gopkg.in/square/go-jose.v2/json:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/vendor/github.com/square/go-jose/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/square/go-jose/CONTRIBUTING.md rename to vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md diff --git a/vendor/github.com/square/go-jose/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE similarity index 100% rename from vendor/github.com/square/go-jose/LICENSE rename to vendor/gopkg.in/square/go-jose.v2/LICENSE diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md new file mode 100644 index 00000000000..43bf1fbea4b --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/README.md @@ -0,0 +1,119 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) +[![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) +[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). +Tables of supported algorithms are shown below. The library supports both +the compact and full serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 1](https://gopkg.in/square/go-jose.v1) is the old stable version: + + import "gopkg.in/square/go-jose.v1" + +[Version 2](https://gopkg.in/square/go-jose.v2) is for new development: + + import "gopkg.in/square/go-jose.v2" + +The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain +backwards compatible. No new feature development will take place on the `v1` branch, +however bug fixes and security fixes will be backported. + +The interface for [go-jose.v2](https://gopkg.in/square/go-jose.v2) is mostly +stable, but we suggest pinning to a particular revision for now as we still reserve +the right to make changes. New feature development happens on this branch. + +New in [go-jose.v2](https://gopkg.in/square/go-jose.v2) is a +[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) sub-package +contributed by [@shaxbee](https://github.com/shaxbee). + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + AES, HMAC | []byte + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example. diff --git a/vendor/github.com/square/go-jose/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go similarity index 85% rename from vendor/github.com/square/go-jose/asymmetric.go rename to vendor/gopkg.in/square/go-jose.v2/asymmetric.go index 5b3846d1063..15e9d11a27d 100644 --- a/vendor/github.com/square/go-jose/asymmetric.go +++ b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go @@ -28,7 +28,9 @@ import ( "fmt" "math/big" - "github.com/square/go-jose/cipher" + "golang.org/x/crypto/ed25519" + "gopkg.in/square/go-jose.v2/cipher" + "gopkg.in/square/go-jose.v2/json" ) // A generic RSA-based encrypter/verifier @@ -46,6 +48,10 @@ type ecEncrypterVerifier struct { publicKey *ecdsa.PublicKey } +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + // A key generator for ECDH-ES type ecKeyGenerator struct { size int @@ -58,6 +64,10 @@ type ecDecrypterSigner struct { privateKey *ecdsa.PrivateKey } +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + // newRSARecipient creates recipientKeyInfo based on the given key. func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { // Verify that key management algorithm is supported by this encrypter @@ -94,7 +104,7 @@ func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipi return recipientSigInfo{ sigAlg: sigAlg, - publicKey: &JsonWebKey{ + publicKey: &JSONWebKey{ Key: &privateKey.PublicKey, }, signer: &rsaDecrypterSigner{ @@ -103,6 +113,25 @@ func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipi }, nil } +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: &JSONWebKey{ + Key: privateKey.Public(), + }, + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + // newECDHRecipient creates recipientKeyInfo based on the given key. func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { // Verify that key management algorithm is supported by this encrypter @@ -139,7 +168,7 @@ func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (re return recipientSigInfo{ sigAlg: sigAlg, - publicKey: &JsonWebKey{ + publicKey: &JSONWebKey{ Key: &privateKey.PublicKey, }, signer: &ecDecrypterSigner{ @@ -178,7 +207,7 @@ func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, e // Decrypt the given payload and return the content encryption key. func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) } // Decrypt the given payload. Based on the key encryption algorithm, @@ -366,10 +395,15 @@ func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + headers := rawHeader{ - Epk: &JsonWebKey{ - Key: &priv.PublicKey, - }, + headerEPK: makeRawMessage(b), } return out, headers, nil @@ -377,11 +411,15 @@ func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { // Decrypt the given payload and return the content encryption key. func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - if headers.Epk == nil { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("square/go-jose: invalid epk header") + } + if epk == nil { return nil, errors.New("square/go-jose: missing epk header") } - publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) + publicKey, ok := epk.Key.(*ecdsa.PublicKey) if publicKey == nil || !ok { return nil, errors.New("square/go-jose: invalid epk header") } @@ -390,19 +428,26 @@ func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientI return nil, errors.New("square/go-jose: invalid public key in epk header") } - apuData := headers.Apu.bytes() - apvData := headers.Apv.bytes() + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("square/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("square/go-jose: invalid apv header") + } deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) } var keySize int - switch KeyAlgorithm(headers.Alg) { + algorithm := headers.getAlgorithm() + switch algorithm { case ECDH_ES: // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.Enc), generator.keySize()), nil + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil case ECDH_ES_A128KW: keySize = 16 case ECDH_ES_A192KW: @@ -413,7 +458,7 @@ func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientI return nil, ErrUnsupportedAlgorithm } - key := deriveKey(headers.Alg, keySize) + key := deriveKey(string(algorithm), keySize) block, err := aes.NewCipher(key) if err != nil { return nil, err @@ -421,6 +466,32 @@ func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientI return josecipher.KeyUnwrap(block, recipient.encryptedKey) } +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(randReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("square/go-jose: ed25519 signature failed to verify") + } + return nil +} // Sign the given payload func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { @@ -457,7 +528,7 @@ func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) keyBytes := curveBits / 8 if curveBits%8 > 0 { - keyBytes += 1 + keyBytes++ } // We serialize the outpus (r and s) into big-endian byte arrays and pad diff --git a/vendor/github.com/square/go-jose/cipher/BUILD b/vendor/gopkg.in/square/go-jose.v2/cipher/BUILD similarity index 90% rename from vendor/github.com/square/go-jose/cipher/BUILD rename to vendor/gopkg.in/square/go-jose.v2/cipher/BUILD index f64ce98dbcd..fa0ad976750 100644 --- a/vendor/github.com/square/go-jose/cipher/BUILD +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/BUILD @@ -8,7 +8,7 @@ go_library( "ecdh_es.go", "key_wrap.go", ], - importpath = "github.com/square/go-jose/cipher", + importpath = "gopkg.in/square/go-jose.v2/cipher", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/square/go-jose/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go similarity index 100% rename from vendor/github.com/square/go-jose/cipher/cbc_hmac.go rename to vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go diff --git a/vendor/github.com/square/go-jose/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go similarity index 100% rename from vendor/github.com/square/go-jose/cipher/concat_kdf.go rename to vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go diff --git a/vendor/github.com/square/go-jose/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go similarity index 96% rename from vendor/github.com/square/go-jose/cipher/ecdh_es.go rename to vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go index f23d49e1f65..c128e327f31 100644 --- a/vendor/github.com/square/go-jose/cipher/ecdh_es.go +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go @@ -28,7 +28,7 @@ import ( // size may be at most 1<<16 bytes (64 KiB). func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than 1<<16") + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") } // algId, partyUInfo, partyVInfo inputs must be prefixed with the length diff --git a/vendor/github.com/square/go-jose/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go similarity index 100% rename from vendor/github.com/square/go-jose/cipher/key_wrap.go rename to vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go new file mode 100644 index 00000000000..0681c81192d --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go @@ -0,0 +1,510 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "gopkg.in/square/go-jose.v2/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if rcpts == nil || len(rcpts) == 0 { + return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + default: + return recipientKeyInfo{}, ErrUnsupportedKeyType + } +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + default: + return nil, ErrUnsupportedKeyType + } +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil || err != nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/square/go-jose/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go similarity index 68% rename from vendor/github.com/square/go-jose/doc.go rename to vendor/gopkg.in/square/go-jose.v2/doc.go index f96acaa2328..dd1387f3f06 100644 --- a/vendor/github.com/square/go-jose/doc.go +++ b/vendor/gopkg.in/square/go-jose.v2/doc.go @@ -17,10 +17,11 @@ /* Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on -encryption and signing based on the JSON Web Encryption and JSON Web Signature -standards. The library supports both the compact and full serialization -formats, and has optional support for multiple recipients. +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON +Web Token support available in a sub-package. The library supports both the +compact and full serialization formats, and has optional support for multiple +recipients. */ package jose diff --git a/vendor/github.com/square/go-jose/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go similarity index 89% rename from vendor/github.com/square/go-jose/encoding.go rename to vendor/gopkg.in/square/go-jose.v2/encoding.go index 50ee88fdc42..9f37ef46509 100644 --- a/vendor/github.com/square/go-jose/encoding.go +++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go @@ -21,29 +21,14 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "encoding/json" "io" "math/big" "regexp" - "strings" - - "github.com/square/go-jose/json" ) var stripWhitespaceRegex = regexp.MustCompile("\\s") -// Url-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// Url-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - // Helper function to serialize known-good objects. // Precondition: value is not a nil pointer. func mustSerializeJSON(value interface{}) []byte { @@ -162,7 +147,7 @@ func (b *byteBuffer) UnmarshalJSON(data []byte) error { return nil } - decoded, err := base64URLDecode(encoded) + decoded, err := base64.RawURLEncoding.DecodeString(encoded) if err != nil { return err } @@ -173,7 +158,7 @@ func (b *byteBuffer) UnmarshalJSON(data []byte) error { } func (b *byteBuffer) base64() string { - return base64URLEncode(b.data) + return base64.RawURLEncoding.EncodeToString(b.data) } func (b *byteBuffer) bytes() []byte { diff --git a/vendor/github.com/square/go-jose/json/BUILD b/vendor/gopkg.in/square/go-jose.v2/json/BUILD similarity index 91% rename from vendor/github.com/square/go-jose/json/BUILD rename to vendor/gopkg.in/square/go-jose.v2/json/BUILD index dcff08e0f26..a7ce83a17b2 100644 --- a/vendor/github.com/square/go-jose/json/BUILD +++ b/vendor/gopkg.in/square/go-jose.v2/json/BUILD @@ -10,7 +10,7 @@ go_library( "stream.go", "tags.go", ], - importpath = "github.com/square/go-jose/json", + importpath = "gopkg.in/square/go-jose.v2/json", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/square/go-jose/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE similarity index 100% rename from vendor/github.com/square/go-jose/json/LICENSE rename to vendor/gopkg.in/square/go-jose.v2/json/LICENSE diff --git a/vendor/github.com/square/go-jose/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md similarity index 100% rename from vendor/github.com/square/go-jose/json/README.md rename to vendor/gopkg.in/square/go-jose.v2/json/README.md diff --git a/vendor/github.com/square/go-jose/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go similarity index 100% rename from vendor/github.com/square/go-jose/json/decode.go rename to vendor/gopkg.in/square/go-jose.v2/json/decode.go diff --git a/vendor/github.com/square/go-jose/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go similarity index 100% rename from vendor/github.com/square/go-jose/json/encode.go rename to vendor/gopkg.in/square/go-jose.v2/json/encode.go diff --git a/vendor/github.com/square/go-jose/json/indent.go b/vendor/gopkg.in/square/go-jose.v2/json/indent.go similarity index 100% rename from vendor/github.com/square/go-jose/json/indent.go rename to vendor/gopkg.in/square/go-jose.v2/json/indent.go diff --git a/vendor/github.com/square/go-jose/json/scanner.go b/vendor/gopkg.in/square/go-jose.v2/json/scanner.go similarity index 100% rename from vendor/github.com/square/go-jose/json/scanner.go rename to vendor/gopkg.in/square/go-jose.v2/json/scanner.go diff --git a/vendor/github.com/square/go-jose/json/stream.go b/vendor/gopkg.in/square/go-jose.v2/json/stream.go similarity index 100% rename from vendor/github.com/square/go-jose/json/stream.go rename to vendor/gopkg.in/square/go-jose.v2/json/stream.go diff --git a/vendor/github.com/square/go-jose/json/tags.go b/vendor/gopkg.in/square/go-jose.v2/json/tags.go similarity index 100% rename from vendor/github.com/square/go-jose/json/tags.go rename to vendor/gopkg.in/square/go-jose.v2/json/tags.go diff --git a/vendor/github.com/square/go-jose/jwe.go b/vendor/gopkg.in/square/go-jose.v2/jwe.go similarity index 71% rename from vendor/github.com/square/go-jose/jwe.go rename to vendor/gopkg.in/square/go-jose.v2/jwe.go index 686397a738e..f2176cfb0b9 100644 --- a/vendor/github.com/square/go-jose/jwe.go +++ b/vendor/gopkg.in/square/go-jose.v2/jwe.go @@ -17,14 +17,14 @@ package jose import ( + "encoding/base64" + "encoding/json" "fmt" "strings" - - "github.com/square/go-jose/json" ) -// rawJsonWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. -type rawJsonWebEncryption struct { +// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. +type rawJSONWebEncryption struct { Protected *byteBuffer `json:"protected,omitempty"` Unprotected *rawHeader `json:"unprotected,omitempty"` Header *rawHeader `json:"header,omitempty"` @@ -42,13 +42,13 @@ type rawRecipientInfo struct { EncryptedKey string `json:"encrypted_key,omitempty"` } -// JsonWebEncryption represents an encrypted JWE object after parsing. -type JsonWebEncryption struct { - Header JoseHeader +// JSONWebEncryption represents an encrypted JWE object after parsing. +type JSONWebEncryption struct { + Header Header protected, unprotected *rawHeader recipients []recipientInfo aad, iv, ciphertext, tag []byte - original *rawJsonWebEncryption + original *rawJSONWebEncryption } // recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing. @@ -58,7 +58,7 @@ type recipientInfo struct { } // GetAuthData retrieves the (optional) authenticated data attached to the object. -func (obj JsonWebEncryption) GetAuthData() []byte { +func (obj JSONWebEncryption) GetAuthData() []byte { if obj.aad != nil { out := make([]byte, len(obj.aad)) copy(out, obj.aad) @@ -69,7 +69,7 @@ func (obj JsonWebEncryption) GetAuthData() []byte { } // Get the merged header values -func (obj JsonWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { +func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { out := rawHeader{} out.merge(obj.protected) out.merge(obj.unprotected) @@ -82,26 +82,26 @@ func (obj JsonWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { } // Get the additional authenticated data from a JWE object. -func (obj JsonWebEncryption) computeAuthData() []byte { +func (obj JSONWebEncryption) computeAuthData() []byte { var protected string if obj.original != nil { protected = obj.original.Protected.base64() } else { - protected = base64URLEncode(mustSerializeJSON((obj.protected))) + protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected))) } output := []byte(protected) if obj.aad != nil { output = append(output, '.') - output = append(output, []byte(base64URLEncode(obj.aad))...) + output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...) } return output } // ParseEncrypted parses an encrypted message in compact or full serialization format. -func ParseEncrypted(input string) (*JsonWebEncryption, error) { +func ParseEncrypted(input string) (*JSONWebEncryption, error) { input = stripWhitespace(input) if strings.HasPrefix(input, "{") { return parseEncryptedFull(input) @@ -111,8 +111,8 @@ func ParseEncrypted(input string) (*JsonWebEncryption, error) { } // parseEncryptedFull parses a message in compact format. -func parseEncryptedFull(input string) (*JsonWebEncryption, error) { - var parsed rawJsonWebEncryption +func parseEncryptedFull(input string) (*JSONWebEncryption, error) { + var parsed rawJSONWebEncryption err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err @@ -122,16 +122,22 @@ func parseEncryptedFull(input string) (*JsonWebEncryption, error) { } // sanitized produces a cleaned-up JWE object from the raw JSON. -func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{ +func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{ original: parsed, unprotected: parsed.Unprotected, } // Check that there is not a nonce in the unprotected headers - if (parsed.Unprotected != nil && parsed.Unprotected.Nonce != "") || - (parsed.Header != nil && parsed.Header.Nonce != "") { - return nil, ErrUnprotectedNonce + if parsed.Unprotected != nil { + if nonce := parsed.Unprotected.getNonce(); nonce != "" { + return nil, ErrUnprotectedNonce + } + } + if parsed.Header != nil { + if nonce := parsed.Header.getNonce(); nonce != "" { + return nil, ErrUnprotectedNonce + } } if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 { @@ -143,11 +149,16 @@ func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) { // Note: this must be called _after_ we parse the protected header, // otherwise fields from the protected header will not get picked up. - obj.Header = obj.mergedHeaders(nil).sanitized() + var err error + mergedHeaders := obj.mergedHeaders(nil) + obj.Header, err = mergedHeaders.sanitized() + if err != nil { + return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders) + } if len(parsed.Recipients) == 0 { obj.recipients = []recipientInfo{ - recipientInfo{ + { header: parsed.Header, encryptedKey: parsed.EncryptedKey.bytes(), }, @@ -155,13 +166,13 @@ func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) { } else { obj.recipients = make([]recipientInfo, len(parsed.Recipients)) for r := range parsed.Recipients { - encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey) + encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey) if err != nil { return nil, err } // Check that there is not a nonce in the unprotected header - if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.Nonce != "" { + if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" { return nil, ErrUnprotectedNonce } @@ -172,7 +183,7 @@ func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) { for _, recipient := range obj.recipients { headers := obj.mergedHeaders(&recipient) - if headers.Alg == "" || headers.Enc == "" { + if headers.getAlgorithm() == "" || headers.getEncryption() == "" { return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers") } } @@ -186,38 +197,38 @@ func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) { } // parseEncryptedCompact parses a message in compact format. -func parseEncryptedCompact(input string) (*JsonWebEncryption, error) { +func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { parts := strings.Split(input, ".") if len(parts) != 5 { return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts") } - rawProtected, err := base64URLDecode(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { return nil, err } - encryptedKey, err := base64URLDecode(parts[1]) + encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { return nil, err } - iv, err := base64URLDecode(parts[2]) + iv, err := base64.RawURLEncoding.DecodeString(parts[2]) if err != nil { return nil, err } - ciphertext, err := base64URLDecode(parts[3]) + ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3]) if err != nil { return nil, err } - tag, err := base64URLDecode(parts[4]) + tag, err := base64.RawURLEncoding.DecodeString(parts[4]) if err != nil { return nil, err } - raw := &rawJsonWebEncryption{ + raw := &rawJSONWebEncryption{ Protected: newBuffer(rawProtected), EncryptedKey: newBuffer(encryptedKey), Iv: newBuffer(iv), @@ -229,7 +240,7 @@ func parseEncryptedCompact(input string) (*JsonWebEncryption, error) { } // CompactSerialize serializes an object using the compact serialization format. -func (obj JsonWebEncryption) CompactSerialize() (string, error) { +func (obj JSONWebEncryption) CompactSerialize() (string, error) { if len(obj.recipients) != 1 || obj.unprotected != nil || obj.protected == nil || obj.recipients[0].header != nil { return "", ErrNotSupported @@ -239,16 +250,16 @@ func (obj JsonWebEncryption) CompactSerialize() (string, error) { return fmt.Sprintf( "%s.%s.%s.%s.%s", - base64URLEncode(serializedProtected), - base64URLEncode(obj.recipients[0].encryptedKey), - base64URLEncode(obj.iv), - base64URLEncode(obj.ciphertext), - base64URLEncode(obj.tag)), nil + base64.RawURLEncoding.EncodeToString(serializedProtected), + base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), + base64.RawURLEncoding.EncodeToString(obj.iv), + base64.RawURLEncoding.EncodeToString(obj.ciphertext), + base64.RawURLEncoding.EncodeToString(obj.tag)), nil } // FullSerialize serializes an object using the full JSON serialization format. -func (obj JsonWebEncryption) FullSerialize() string { - raw := rawJsonWebEncryption{ +func (obj JSONWebEncryption) FullSerialize() string { + raw := rawJSONWebEncryption{ Unprotected: obj.unprotected, Iv: newBuffer(obj.iv), Ciphertext: newBuffer(obj.ciphertext), @@ -262,7 +273,7 @@ func (obj JsonWebEncryption) FullSerialize() string { for _, recipient := range obj.recipients { info := rawRecipientInfo{ Header: recipient.header, - EncryptedKey: base64URLEncode(recipient.encryptedKey), + EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey), } raw.Recipients = append(raw.Recipients, info) } diff --git a/vendor/github.com/square/go-jose/jwk.go b/vendor/gopkg.in/square/go-jose.v2/jwk.go similarity index 70% rename from vendor/github.com/square/go-jose/jwk.go rename to vendor/gopkg.in/square/go-jose.v2/jwk.go index 5f5464a3d55..8e8f9e7f797 100644 --- a/vendor/github.com/square/go-jose/jwk.go +++ b/vendor/gopkg.in/square/go-jose.v2/jwk.go @@ -29,11 +29,13 @@ import ( "reflect" "strings" - "github.com/square/go-jose/json" + "golang.org/x/crypto/ed25519" + + "gopkg.in/square/go-jose.v2/json" ) -// rawJsonWebKey represents a public or private key in JWK format, used for parsing/serializing. -type rawJsonWebKey struct { +// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing. +type rawJSONWebKey struct { Use string `json:"use,omitempty"` Kty string `json:"kty,omitempty"` Kid string `json:"kid,omitempty"` @@ -58,8 +60,8 @@ type rawJsonWebKey struct { X5c []string `json:"x5c,omitempty"` } -// JsonWebKey represents a public or private key in JWK format. -type JsonWebKey struct { +// JSONWebKey represents a public or private key in JWK format. +type JSONWebKey struct { Key interface{} Certificates []*x509.Certificate KeyID string @@ -68,15 +70,19 @@ type JsonWebKey struct { } // MarshalJSON serializes the given key to its JSON representation. -func (k JsonWebKey) MarshalJSON() ([]byte, error) { - var raw *rawJsonWebKey +func (k JSONWebKey) MarshalJSON() ([]byte, error) { + var raw *rawJSONWebKey var err error switch key := k.Key.(type) { + case ed25519.PublicKey: + raw = fromEdPublicKey(key) case *ecdsa.PublicKey: raw, err = fromEcPublicKey(key) case *rsa.PublicKey: raw = fromRsaPublicKey(key) + case ed25519.PrivateKey: + raw, err = fromEdPrivateKey(key) case *ecdsa.PrivateKey: raw, err = fromEcPrivateKey(key) case *rsa.PrivateKey: @@ -103,8 +109,8 @@ func (k JsonWebKey) MarshalJSON() ([]byte, error) { } // UnmarshalJSON reads a key from its JSON representation. -func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) { - var raw rawJsonWebKey +func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { + var raw rawJSONWebKey err = json.Unmarshal(data, &raw) if err != nil { return err @@ -126,12 +132,22 @@ func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) { } case "oct": key, err = raw.symmetricKey() + case "OKP": + if raw.Crv == "Ed25519" && raw.X != nil { + if raw.D != nil { + key, err = raw.edPrivateKey() + } else { + key, err = raw.edPublicKey() + } + } else { + err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv) + } default: err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty) } if err == nil { - *k = JsonWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use} + *k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use} } k.Certificates = make([]*x509.Certificate, len(raw.X5c)) @@ -149,17 +165,17 @@ func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) { return } -// JsonWebKeySet represents a JWK Set object. -type JsonWebKeySet struct { - Keys []JsonWebKey `json:"keys"` +// JSONWebKeySet represents a JWK Set object. +type JSONWebKeySet struct { + Keys []JSONWebKey `json:"keys"` } // Key convenience method returns keys by key ID. Specification states // that a JWK Set "SHOULD" use distinct key IDs, but allows for some // cases where they are not distinct. Hence method returns a slice -// of JsonWebKeys. -func (s *JsonWebKeySet) Key(kid string) []JsonWebKey { - var keys []JsonWebKey +// of JSONWebKeys. +func (s *JSONWebKeySet) Key(kid string) []JSONWebKey { + var keys []JSONWebKey for _, key := range s.Keys { if key.KeyID == kid { keys = append(keys, key) @@ -171,6 +187,7 @@ func (s *JsonWebKeySet) Key(kid string) []JsonWebKey { const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}` const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}` +const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}` func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) { coordLength := curveSize(curve) @@ -190,12 +207,20 @@ func rsaThumbprintInput(n *big.Int, e int) (string, error) { newBuffer(n.Bytes()).base64()), nil } +func edThumbprintInput(ed ed25519.PublicKey) (string, error) { + crv := "Ed25519" + return fmt.Sprintf(edThumbprintTemplate, crv, + newFixedSizeBuffer(ed, 32).base64()), nil +} + // Thumbprint computes the JWK Thumbprint of a key using the // indicated hash algorithm. -func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { +func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { var input string var err error switch key := k.Key.(type) { + case ed25519.PublicKey: + input, err = edThumbprintInput(key) case *ecdsa.PublicKey: input, err = ecThumbprintInput(key.Curve, key.X, key.Y) case *ecdsa.PrivateKey: @@ -204,6 +229,8 @@ func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case *rsa.PrivateKey: input, err = rsaThumbprintInput(key.N, key.E) + case ed25519.PrivateKey: + input, err = edThumbprintInput(ed25519.PublicKey(key[0:32])) default: return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } @@ -217,8 +244,18 @@ func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { return h.Sum(nil), nil } -// Valid checks that the key contains the expected parameters -func (k *JsonWebKey) Valid() bool { +// IsPublic returns true if the JWK represents a public key (not symmetric, not private). +func (k *JSONWebKey) IsPublic() bool { + switch k.Key.(type) { + case *ecdsa.PublicKey, *rsa.PublicKey, *ed25519.PublicKey: + return true + default: + return false + } +} + +// Valid checks that the key contains the expected parameters. +func (k *JSONWebKey) Valid() bool { if k.Key == nil { return false } @@ -239,13 +276,21 @@ func (k *JsonWebKey) Valid() bool { if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 { return false } + case *ed25519.PublicKey: + if len(*key) != 32 { + return false + } + case *ed25519.PrivateKey: + if len(*key) != 64 { + return false + } default: return false } return true } -func (key rawJsonWebKey) rsaPublicKey() (*rsa.PublicKey, error) { +func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) { if key.N == nil || key.E == nil { return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values") } @@ -256,15 +301,23 @@ func (key rawJsonWebKey) rsaPublicKey() (*rsa.PublicKey, error) { }, nil } -func fromRsaPublicKey(pub *rsa.PublicKey) *rawJsonWebKey { - return &rawJsonWebKey{ +func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey { + return &rawJSONWebKey{ + Kty: "OKP", + Crv: "Ed25519", + X: newBuffer(pub), + } +} + +func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey { + return &rawJSONWebKey{ Kty: "RSA", N: newBuffer(pub.N.Bytes()), E: newBufferFromInt(uint64(pub.E)), } } -func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) { +func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) { var curve elliptic.Curve switch key.Crv { case "P-256": @@ -295,7 +348,7 @@ func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) { }, nil } -func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) { +func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) { if pub == nil || pub.X == nil || pub.Y == nil { return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)") } @@ -314,7 +367,7 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) { return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)") } - key := &rawJsonWebKey{ + key := &rawJSONWebKey{ Kty: "EC", Crv: name, X: newFixedSizeBuffer(xBytes, size), @@ -324,7 +377,37 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) { return key, nil } -func (key rawJsonWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) { +func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) { + var missing []string + switch { + case key.D == nil: + missing = append(missing, "D") + case key.X == nil: + missing = append(missing, "X") + } + + if len(missing) > 0 { + return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", ")) + } + + privateKey := make([]byte, ed25519.PrivateKeySize) + copy(privateKey[0:32], key.X.bytes()) + copy(privateKey[32:], key.D.bytes()) + rv := ed25519.PrivateKey(privateKey) + return rv, nil +} + +func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) { + if key.X == nil { + return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value") + } + publicKey := make([]byte, ed25519.PublicKeySize) + copy(publicKey[0:32], key.X.bytes()) + rv := ed25519.PublicKey(publicKey) + return rv, nil +} + +func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) { var missing []string switch { case key.N == nil: @@ -369,7 +452,14 @@ func (key rawJsonWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) { return rv, err } -func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJsonWebKey, error) { +func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) { + raw := fromEdPublicKey(ed25519.PublicKey(ed[0:32])) + + raw.D = newBuffer(ed[32:]) + return raw, nil +} + +func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) { if len(rsa.Primes) != 2 { return nil, ErrUnsupportedKeyType } @@ -383,7 +473,7 @@ func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJsonWebKey, error) { return raw, nil } -func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) { +func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) { var curve elliptic.Curve switch key.Crv { case "P-256": @@ -417,7 +507,7 @@ func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) { }, nil } -func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJsonWebKey, error) { +func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) { raw, err := fromEcPublicKey(&ec.PublicKey) if err != nil { return nil, err @@ -432,14 +522,14 @@ func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJsonWebKey, error) { return raw, nil } -func fromSymmetricKey(key []byte) (*rawJsonWebKey, error) { - return &rawJsonWebKey{ +func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) { + return &rawJSONWebKey{ Kty: "oct", K: newBuffer(key), }, nil } -func (key rawJsonWebKey) symmetricKey() ([]byte, error) { +func (key rawJSONWebKey) symmetricKey() ([]byte, error) { if key.K == nil { return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value") } diff --git a/vendor/github.com/square/go-jose/jws.go b/vendor/gopkg.in/square/go-jose.v2/jws.go similarity index 70% rename from vendor/github.com/square/go-jose/jws.go rename to vendor/gopkg.in/square/go-jose.v2/jws.go index 4b60bd29dcd..5e23a91b04b 100644 --- a/vendor/github.com/square/go-jose/jws.go +++ b/vendor/gopkg.in/square/go-jose.v2/jws.go @@ -17,14 +17,16 @@ package jose import ( + "encoding/base64" + "errors" "fmt" "strings" - "github.com/square/go-jose/json" + "gopkg.in/square/go-jose.v2/json" ) -// rawJsonWebSignature represents a raw JWS JSON object. Used for parsing/serializing. -type rawJsonWebSignature struct { +// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing. +type rawJSONWebSignature struct { Payload *byteBuffer `json:"payload,omitempty"` Signatures []rawSignatureInfo `json:"signatures,omitempty"` Protected *byteBuffer `json:"protected,omitempty"` @@ -39,16 +41,19 @@ type rawSignatureInfo struct { Signature *byteBuffer `json:"signature,omitempty"` } -// JsonWebSignature represents a signed JWS object after parsing. -type JsonWebSignature struct { - payload []byte +// JSONWebSignature represents a signed JWS object after parsing. +type JSONWebSignature struct { + payload []byte + // Signatures attached to this object (may be more than one for multi-sig). + // Be careful about accessing these directly, prefer to use Verify() or + // VerifyMulti() to ensure that the data you're getting is verified. Signatures []Signature } // Signature represents a single signature over the JWS payload and protected header. type Signature struct { // Header fields, such as the signature algorithm - Header JoseHeader + Header Header // The actual signature value Signature []byte @@ -59,7 +64,7 @@ type Signature struct { } // ParseSigned parses a signed message in compact or full serialization format. -func ParseSigned(input string) (*JsonWebSignature, error) { +func ParseSigned(input string) (*JSONWebSignature, error) { input = stripWhitespace(input) if strings.HasPrefix(input, "{") { return parseSignedFull(input) @@ -77,25 +82,25 @@ func (sig Signature) mergedHeaders() rawHeader { } // Compute data to be signed -func (obj JsonWebSignature) computeAuthData(signature *Signature) []byte { +func (obj JSONWebSignature) computeAuthData(signature *Signature) []byte { var serializedProtected string if signature.original != nil && signature.original.Protected != nil { serializedProtected = signature.original.Protected.base64() } else if signature.protected != nil { - serializedProtected = base64URLEncode(mustSerializeJSON(signature.protected)) + serializedProtected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON(signature.protected)) } else { serializedProtected = "" } return []byte(fmt.Sprintf("%s.%s", serializedProtected, - base64URLEncode(obj.payload))) + base64.RawURLEncoding.EncodeToString(obj.payload))) } // parseSignedFull parses a message in full format. -func parseSignedFull(input string) (*JsonWebSignature, error) { - var parsed rawJsonWebSignature +func parseSignedFull(input string) (*JSONWebSignature, error) { + var parsed rawJSONWebSignature err := json.Unmarshal([]byte(input), &parsed) if err != nil { return nil, err @@ -105,12 +110,12 @@ func parseSignedFull(input string) (*JsonWebSignature, error) { } // sanitized produces a cleaned-up JWS object from the raw JSON. -func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) { +func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { if parsed.Payload == nil { return nil, fmt.Errorf("square/go-jose: missing payload in JWS message") } - obj := &JsonWebSignature{ + obj := &JSONWebSignature{ payload: parsed.Payload.bytes(), Signatures: make([]Signature, len(parsed.Signatures)), } @@ -126,7 +131,8 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) { } } - if parsed.Header != nil && parsed.Header.Nonce != "" { + // Check that there is not a nonce in the unprotected header + if parsed.Header != nil && parsed.Header.getNonce() != "" { return nil, ErrUnprotectedNonce } @@ -147,7 +153,18 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) { Signature: parsed.Signature, } - signature.Header = signature.mergedHeaders().sanitized() + var err error + signature.Header, err = signature.mergedHeaders().sanitized() + if err != nil { + return nil, err + } + + // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded. + jwk := signature.Header.JSONWebKey + if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) { + return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key") + } + obj.Signatures = append(obj.Signatures, signature) } @@ -161,46 +178,57 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) { } // Check that there is not a nonce in the unprotected header - if sig.Header != nil && sig.Header.Nonce != "" { + if sig.Header != nil && sig.Header.getNonce() != "" { return nil, ErrUnprotectedNonce } + var err error + obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized() + if err != nil { + return nil, err + } + obj.Signatures[i].Signature = sig.Signature.bytes() + // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded. + jwk := obj.Signatures[i].Header.JSONWebKey + if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) { + return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key") + } + // Copy value of sig original := sig obj.Signatures[i].header = sig.Header obj.Signatures[i].original = &original - obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized() } return obj, nil } // parseSignedCompact parses a message in compact format. -func parseSignedCompact(input string) (*JsonWebSignature, error) { +func parseSignedCompact(input string) (*JSONWebSignature, error) { parts := strings.Split(input, ".") if len(parts) != 3 { return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts") } - rawProtected, err := base64URLDecode(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { return nil, err } - payload, err := base64URLDecode(parts[1]) + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { return nil, err } - signature, err := base64URLDecode(parts[2]) + signature, err := base64.RawURLEncoding.DecodeString(parts[2]) if err != nil { return nil, err } - raw := &rawJsonWebSignature{ + raw := &rawJSONWebSignature{ Payload: newBuffer(payload), Protected: newBuffer(rawProtected), Signature: newBuffer(signature), @@ -209,7 +237,7 @@ func parseSignedCompact(input string) (*JsonWebSignature, error) { } // CompactSerialize serializes an object using the compact serialization format. -func (obj JsonWebSignature) CompactSerialize() (string, error) { +func (obj JSONWebSignature) CompactSerialize() (string, error) { if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil { return "", ErrNotSupported } @@ -218,14 +246,14 @@ func (obj JsonWebSignature) CompactSerialize() (string, error) { return fmt.Sprintf( "%s.%s.%s", - base64URLEncode(serializedProtected), - base64URLEncode(obj.payload), - base64URLEncode(obj.Signatures[0].Signature)), nil + base64.RawURLEncoding.EncodeToString(serializedProtected), + base64.RawURLEncoding.EncodeToString(obj.payload), + base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)), nil } // FullSerialize serializes an object using the full JSON serialization format. -func (obj JsonWebSignature) FullSerialize() string { - raw := rawJsonWebSignature{ +func (obj JSONWebSignature) FullSerialize() string { + raw := rawJSONWebSignature{ Payload: newBuffer(obj.payload), } diff --git a/vendor/gopkg.in/square/go-jose.v2/shared.go b/vendor/gopkg.in/square/go-jose.v2/shared.go new file mode 100644 index 00000000000..4c19dc382b7 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/shared.go @@ -0,0 +1,417 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/elliptic" + "errors" + "fmt" + + "gopkg.in/square/go-jose.v2/json" +) + +// KeyAlgorithm represents a key management algorithm. +type KeyAlgorithm string + +// SignatureAlgorithm represents a signature (or MAC) algorithm. +type SignatureAlgorithm string + +// ContentEncryption represents a content encryption algorithm. +type ContentEncryption string + +// CompressionAlgorithm represents an algorithm used for plaintext compression. +type CompressionAlgorithm string + +// ContentType represents type of the contained data. +type ContentType string + +var ( + // ErrCryptoFailure represents an error in cryptographic primitive. This + // occurs when, for example, a message had an invalid authentication tag or + // could not be decrypted. + ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive") + + // ErrUnsupportedAlgorithm indicates that a selected algorithm is not + // supported. This occurs when trying to instantiate an encrypter for an + // algorithm that is not yet implemented. + ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm") + + // ErrUnsupportedKeyType indicates that the given key type/format is not + // supported. This occurs when trying to instantiate an encrypter and passing + // it a key of an unrecognized type or with unsupported parameters, such as + // an RSA private key with more than two primes. + ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format") + + // ErrNotSupported serialization of object is not supported. This occurs when + // trying to compact-serialize an object which can't be represented in + // compact form. + ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object") + + // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a + // nonce header parameter was included in an unprotected header object. + ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header") +) + +// Key management algorithms +const ( + ED25519 = KeyAlgorithm("ED25519") + RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5 + RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1 + RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256 + A128KW = KeyAlgorithm("A128KW") // AES key wrap (128) + A192KW = KeyAlgorithm("A192KW") // AES key wrap (192) + A256KW = KeyAlgorithm("A256KW") // AES key wrap (256) + DIRECT = KeyAlgorithm("dir") // Direct encryption + ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES + ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128) + ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192) + ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256) + A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128) + A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192) + A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256) + PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128) + PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192) + PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256) +) + +// Signature algorithms +const ( + EdDSA = SignatureAlgorithm("EdDSA") + HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256 + HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384 + HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512 + RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256 + ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384 + ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512 + PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512 +) + +// Content encryption algorithms +const ( + A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128) + A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192) + A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256) + A128GCM = ContentEncryption("A128GCM") // AES-GCM (128) + A192GCM = ContentEncryption("A192GCM") // AES-GCM (192) + A256GCM = ContentEncryption("A256GCM") // AES-GCM (256) +) + +// Compression algorithms +const ( + NONE = CompressionAlgorithm("") // No compression + DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951) +) + +// A key in the protected header of a JWS object. Use of the Header... +// constants is preferred to enhance type safety. +type HeaderKey string + +const ( + HeaderType HeaderKey = "typ" // string + HeaderContentType = "cty" // string + + // These are set by go-jose and shouldn't need to be set by consumers of the + // library. + headerAlgorithm = "alg" // string + headerEncryption = "enc" // ContentEncryption + headerCompression = "zip" // CompressionAlgorithm + headerCritical = "crit" // []string + + headerAPU = "apu" // *byteBuffer + headerAPV = "apv" // *byteBuffer + headerEPK = "epk" // *JSONWebKey + headerIV = "iv" // *byteBuffer + headerTag = "tag" // *byteBuffer + + headerJWK = "jwk" // *JSONWebKey + headerKeyID = "kid" // string + headerNonce = "nonce" // string +) + +// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing). +// +// The decoding of the constituent items is deferred because we want to marshal +// some members into particular structs rather than generic maps, but at the +// same time we need to receive any extra fields unhandled by this library to +// pass through to consuming code in case it wants to examine them. +type rawHeader map[HeaderKey]*json.RawMessage + +// Header represents the read-only JOSE header for JWE/JWS objects. +type Header struct { + KeyID string + JSONWebKey *JSONWebKey + Algorithm string + Nonce string + + // Any headers not recognised above get unmarshaled from JSON in a generic + // manner and placed in this map. + ExtraHeaders map[HeaderKey]interface{} +} + +func (parsed rawHeader) set(k HeaderKey, v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + parsed[k] = makeRawMessage(b) + return nil +} + +// getString gets a string from the raw JSON, defaulting to "". +func (parsed rawHeader) getString(k HeaderKey) string { + v, ok := parsed[k] + if !ok { + return "" + } + var s string + err := json.Unmarshal(*v, &s) + if err != nil { + return "" + } + return s +} + +// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if +// not specified. +func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) { + v := parsed[k] + if v == nil { + return nil, nil + } + var bb *byteBuffer + err := json.Unmarshal(*v, &bb) + if err != nil { + return nil, err + } + return bb, nil +} + +// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm. +func (parsed rawHeader) getAlgorithm() KeyAlgorithm { + return KeyAlgorithm(parsed.getString(headerAlgorithm)) +} + +// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm. +func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm { + return SignatureAlgorithm(parsed.getString(headerAlgorithm)) +} + +// getEncryption extracts parsed "enc" from the raw JSON. +func (parsed rawHeader) getEncryption() ContentEncryption { + return ContentEncryption(parsed.getString(headerEncryption)) +} + +// getCompression extracts parsed "zip" from the raw JSON. +func (parsed rawHeader) getCompression() CompressionAlgorithm { + return CompressionAlgorithm(parsed.getString(headerCompression)) +} + +func (parsed rawHeader) getNonce() string { + return parsed.getString(headerNonce) +} + +// getEPK extracts parsed "epk" from the raw JSON. +func (parsed rawHeader) getEPK() (*JSONWebKey, error) { + v := parsed[headerEPK] + if v == nil { + return nil, nil + } + var epk *JSONWebKey + err := json.Unmarshal(*v, &epk) + if err != nil { + return nil, err + } + return epk, nil +} + +// getAPU extracts parsed "apu" from the raw JSON. +func (parsed rawHeader) getAPU() (*byteBuffer, error) { + return parsed.getByteBuffer(headerAPU) +} + +// getAPV extracts parsed "apv" from the raw JSON. +func (parsed rawHeader) getAPV() (*byteBuffer, error) { + return parsed.getByteBuffer(headerAPV) +} + +// getIV extracts parsed "iv" frpom the raw JSON. +func (parsed rawHeader) getIV() (*byteBuffer, error) { + return parsed.getByteBuffer(headerIV) +} + +// getTag extracts parsed "tag" frpom the raw JSON. +func (parsed rawHeader) getTag() (*byteBuffer, error) { + return parsed.getByteBuffer(headerTag) +} + +// getJWK extracts parsed "jwk" from the raw JSON. +func (parsed rawHeader) getJWK() (*JSONWebKey, error) { + v := parsed[headerJWK] + if v == nil { + return nil, nil + } + var jwk *JSONWebKey + err := json.Unmarshal(*v, &jwk) + if err != nil { + return nil, err + } + return jwk, nil +} + +// getCritical extracts parsed "crit" from the raw JSON. If omitted, it +// returns an empty slice. +func (parsed rawHeader) getCritical() ([]string, error) { + v := parsed[headerCritical] + if v == nil { + return nil, nil + } + + var q []string + err := json.Unmarshal(*v, &q) + if err != nil { + return nil, err + } + return q, nil +} + +// sanitized produces a cleaned-up header object from the raw JSON. +func (parsed rawHeader) sanitized() (h Header, err error) { + for k, v := range parsed { + if v == nil { + continue + } + switch k { + case headerJWK: + var jwk *JSONWebKey + err = json.Unmarshal(*v, &jwk) + if err != nil { + err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v)) + return + } + h.JSONWebKey = jwk + case headerKeyID: + var s string + err = json.Unmarshal(*v, &s) + if err != nil { + err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v)) + return + } + h.KeyID = s + case headerAlgorithm: + var s string + err = json.Unmarshal(*v, &s) + if err != nil { + err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v)) + return + } + h.Algorithm = s + case headerNonce: + var s string + err = json.Unmarshal(*v, &s) + if err != nil { + err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v)) + return + } + h.Nonce = s + default: + if h.ExtraHeaders == nil { + h.ExtraHeaders = map[HeaderKey]interface{}{} + } + var v2 interface{} + err = json.Unmarshal(*v, &v2) + if err != nil { + err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v)) + return + } + h.ExtraHeaders[k] = v2 + } + } + return +} + +func (dst rawHeader) isSet(k HeaderKey) bool { + dvr := dst[k] + if dvr == nil { + return false + } + + var dv interface{} + err := json.Unmarshal(*dvr, &dv) + if err != nil { + return true + } + + if dvStr, ok := dv.(string); ok { + return dvStr != "" + } + + return true +} + +// Merge headers from src into dst, giving precedence to headers from l. +func (dst rawHeader) merge(src *rawHeader) { + if src == nil { + return + } + + for k, v := range *src { + if dst.isSet(k) { + continue + } + + dst[k] = v + } +} + +// Get JOSE name of curve +func curveName(crv elliptic.Curve) (string, error) { + switch crv { + case elliptic.P256(): + return "P-256", nil + case elliptic.P384(): + return "P-384", nil + case elliptic.P521(): + return "P-521", nil + default: + return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve") + } +} + +// Get size of curve in bytes +func curveSize(crv elliptic.Curve) int { + bits := crv.Params().BitSize + + div := bits / 8 + mod := bits % 8 + + if mod == 0 { + return div + } + + return div + 1 +} + +func makeRawMessage(b []byte) *json.RawMessage { + rm := json.RawMessage(b) + return &rm +} diff --git a/vendor/gopkg.in/square/go-jose.v2/signing.go b/vendor/gopkg.in/square/go-jose.v2/signing.go new file mode 100644 index 00000000000..13e956d6680 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/signing.go @@ -0,0 +1,343 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + + "golang.org/x/crypto/ed25519" + + "gopkg.in/square/go-jose.v2/json" +) + +// NonceSource represents a source of random nonces to go into JWS objects +type NonceSource interface { + Nonce() (string, error) +} + +// Signer represents a signer which takes a payload and produces a signed JWS object. +type Signer interface { + Sign(payload []byte) (*JSONWebSignature, error) + Options() SignerOptions +} + +// SigningKey represents an algorithm/key used to sign a message. +type SigningKey struct { + Algorithm SignatureAlgorithm + Key interface{} +} + +// SignerOptions represents options that can be set when creating signers. +type SignerOptions struct { + NonceSource NonceSource + EmbedJWK bool + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { + if so.ExtraHeaders == nil { + so.ExtraHeaders = map[HeaderKey]interface{}{} + } + so.ExtraHeaders[k] = v + return so +} + +// WithContentType adds a content type ("cty") header and returns the updated +// SignerOptions. +func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions { + return so.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated SignerOptions. +func (so *SignerOptions) WithType(typ ContentType) *SignerOptions { + return so.WithHeader(HeaderType, typ) +} + +type payloadSigner interface { + signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) +} + +type payloadVerifier interface { + verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error +} + +type genericSigner struct { + recipients []recipientSigInfo + nonceSource NonceSource + embedJWK bool + extraHeaders map[HeaderKey]interface{} +} + +type recipientSigInfo struct { + sigAlg SignatureAlgorithm + publicKey *JSONWebKey + signer payloadSigner +} + +// NewSigner creates an appropriate signer based on the key type +func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) { + return NewMultiSigner([]SigningKey{sig}, opts) +} + +// NewMultiSigner creates a signer for multiple recipients +func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) { + signer := &genericSigner{recipients: []recipientSigInfo{}} + + if opts != nil { + signer.nonceSource = opts.NonceSource + signer.embedJWK = opts.EmbedJWK + signer.extraHeaders = opts.ExtraHeaders + } + + for _, sig := range sigs { + err := signer.addRecipient(sig.Algorithm, sig.Key) + if err != nil { + return nil, err + } + } + + return signer, nil +} + +// newVerifier creates a verifier based on the key type +func newVerifier(verificationKey interface{}) (payloadVerifier, error) { + switch verificationKey := verificationKey.(type) { + case ed25519.PublicKey: + return &edEncrypterVerifier{ + publicKey: verificationKey, + }, nil + case *rsa.PublicKey: + return &rsaEncrypterVerifier{ + publicKey: verificationKey, + }, nil + case *ecdsa.PublicKey: + return &ecEncrypterVerifier{ + publicKey: verificationKey, + }, nil + case []byte: + return &symmetricMac{ + key: verificationKey, + }, nil + case JSONWebKey: + return newVerifier(verificationKey.Key) + case *JSONWebKey: + return newVerifier(verificationKey.Key) + default: + return nil, ErrUnsupportedKeyType + } +} + +func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { + recipient, err := makeJWSRecipient(alg, signingKey) + if err != nil { + return err + } + + ctx.recipients = append(ctx.recipients, recipient) + return nil +} + +func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) { + switch signingKey := signingKey.(type) { + case ed25519.PrivateKey: + return newEd25519Signer(alg, signingKey) + case *rsa.PrivateKey: + return newRSASigner(alg, signingKey) + case *ecdsa.PrivateKey: + return newECDSASigner(alg, signingKey) + case []byte: + return newSymmetricSigner(alg, signingKey) + case JSONWebKey: + return newJWKSigner(alg, signingKey) + case *JSONWebKey: + return newJWKSigner(alg, *signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType + } +} + +func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { + recipient, err := makeJWSRecipient(alg, signingKey.Key) + if err != nil { + return recipientSigInfo{}, err + } + if recipient.publicKey != nil { + // recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo + // was created for the inner key (such as a RSA or ECDSA public key). It contains + // the pub key for embedding, but doesn't have extra params like key id. + publicKey := signingKey + publicKey.Key = recipient.publicKey.Key + recipient.publicKey = &publicKey + + // This should be impossible, but let's check anyway. + if !recipient.publicKey.IsPublic() { + return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public") + } + } + return recipient, nil +} + +func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) { + obj := &JSONWebSignature{} + obj.payload = payload + obj.Signatures = make([]Signature, len(ctx.recipients)) + + for i, recipient := range ctx.recipients { + protected := map[HeaderKey]interface{}{ + headerAlgorithm: string(recipient.sigAlg), + } + + if recipient.publicKey != nil { + // We want to embed the JWK or set the kid header, but not both. Having a protected + // header that contains an embedded JWK while also simultaneously containing the kid + // header is confusing, and at least in ACME the two are considered to be mutually + // exclusive. The fact that both can exist at the same time is a somewhat unfortunate + // result of the JOSE spec. We've decided that this library will only include one or + // the other to avoid this confusion. + // + // See https://github.com/square/go-jose/issues/157 for more context. + if ctx.embedJWK { + protected[headerJWK] = recipient.publicKey + } else { + protected[headerKeyID] = recipient.publicKey.KeyID + } + } + + if ctx.nonceSource != nil { + nonce, err := ctx.nonceSource.Nonce() + if err != nil { + return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err) + } + protected[headerNonce] = nonce + } + + for k, v := range ctx.extraHeaders { + protected[k] = v + } + + serializedProtected := mustSerializeJSON(protected) + + input := []byte(fmt.Sprintf("%s.%s", + base64.RawURLEncoding.EncodeToString(serializedProtected), + base64.RawURLEncoding.EncodeToString(payload))) + + signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg) + if err != nil { + return nil, err + } + + signatureInfo.protected = &rawHeader{} + for k, v := range protected { + b, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err) + } + (*signatureInfo.protected)[k] = makeRawMessage(b) + } + obj.Signatures[i] = signatureInfo + } + + return obj, nil +} + +func (ctx *genericSigner) Options() SignerOptions { + return SignerOptions{ + NonceSource: ctx.nonceSource, + EmbedJWK: ctx.embedJWK, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Verify validates the signature on the object and returns the payload. +// This function does not support multi-signature, if you desire multi-sig +// verification use VerifyMulti instead. +// +// Be careful when verifying signatures based on embedded JWKs inside the +// payload header. You cannot assume that the key received in a payload is +// trusted. +func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { + verifier, err := newVerifier(verificationKey) + if err != nil { + return nil, err + } + + if len(obj.Signatures) > 1 { + return nil, errors.New("square/go-jose: too many signatures in payload; expecting only one") + } + + signature := obj.Signatures[0] + headers := signature.mergedHeaders() + critical, err := headers.getCritical() + if err != nil { + return nil, err + } + if len(critical) > 0 { + // Unsupported crit header + return nil, ErrCryptoFailure + } + + input := obj.computeAuthData(&signature) + alg := headers.getSignatureAlgorithm() + err = verifier.verifyPayload(input, signature.Signature, alg) + if err == nil { + return obj.payload, nil + } + + return nil, ErrCryptoFailure +} + +// VerifyMulti validates (one of the multiple) signatures on the object and +// returns the index of the signature that was verified, along with the signature +// object and the payload. We return the signature and index to guarantee that +// callers are getting the verified value. +func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { + verifier, err := newVerifier(verificationKey) + if err != nil { + return -1, Signature{}, nil, err + } + + for i, signature := range obj.Signatures { + headers := signature.mergedHeaders() + critical, err := headers.getCritical() + if err != nil { + continue + } + if len(critical) > 0 { + // Unsupported crit header + continue + } + + input := obj.computeAuthData(&signature) + alg := headers.getSignatureAlgorithm() + err = verifier.verifyPayload(input, signature.Signature, alg) + if err == nil { + return i, signature, obj.payload, nil + } + } + + return -1, Signature{}, nil, ErrCryptoFailure +} diff --git a/vendor/github.com/square/go-jose/symmetric.go b/vendor/gopkg.in/square/go-jose.v2/symmetric.go similarity index 93% rename from vendor/github.com/square/go-jose/symmetric.go rename to vendor/gopkg.in/square/go-jose.v2/symmetric.go index c47fc9757de..5be00f925a9 100644 --- a/vendor/github.com/square/go-jose/symmetric.go +++ b/vendor/gopkg.in/square/go-jose.v2/symmetric.go @@ -25,10 +25,11 @@ import ( "crypto/sha512" "crypto/subtle" "errors" + "fmt" "hash" "io" - "github.com/square/go-jose/cipher" + "gopkg.in/square/go-jose.v2/cipher" ) // Random reader (stubbed out in tests) @@ -229,11 +230,12 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return recipientInfo{}, err } + header := &rawHeader{} + header.set(headerIV, newBuffer(parts.iv)) + header.set(headerTag, newBuffer(parts.tag)) + return recipientInfo{ - header: &rawHeader{ - Iv: newBuffer(parts.iv), - Tag: newBuffer(parts.tag), - }, + header: header, encryptedKey: parts.ciphertext, }, nil case A128KW, A192KW, A256KW: @@ -258,7 +260,7 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie // Decrypt the content encryption key. func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - switch KeyAlgorithm(headers.Alg) { + switch headers.getAlgorithm() { case DIRECT: cek := make([]byte, len(ctx.key)) copy(cek, ctx.key) @@ -266,10 +268,19 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien case A128GCMKW, A192GCMKW, A256GCMKW: aead := newAESGCM(len(ctx.key)) + iv, err := headers.getIV() + if err != nil { + return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err) + } + tag, err := headers.getTag() + if err != nil { + return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err) + } + parts := &aeadParts{ - iv: headers.Iv.bytes(), + iv: iv.bytes(), ciphertext: recipient.encryptedKey, - tag: headers.Tag.bytes(), + tag: tag.bytes(), } cek, err := aead.decrypt(ctx.key, []byte{}, parts) From e3f8f64c1743e261fffa1df6795fa44ea226d3b1 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Tue, 5 Dec 2017 16:21:20 -0800 Subject: [PATCH 312/794] refactored mount, attach, resize operation's so that all failures generate events and event generation is more consistent. refactored operation generator and operation executor to use more general generated functions for operations, completions, and events. --- .../volume/persistentvolume/pv_controller.go | 4 +- pkg/volume/util/metrics.go | 6 +- .../nestedpendingoperations.go | 20 +- .../nestedpendingoperations_test.go | 88 ++-- .../operationexecutor/operation_executor.go | 61 +-- .../operation_executor_test.go | 144 +++--- .../operationexecutor/operation_generator.go | 428 +++++++++++------- pkg/volume/util/types/types.go | 8 + 8 files changed, 440 insertions(+), 319 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 980d960c750..e13d7a813d6 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -1250,7 +1250,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete") err = deleter.Delete() - opComplete(err) + opComplete(&err) if err != nil { // Deleter failed return false, err @@ -1373,7 +1373,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision") volume, err = provisioner.Provision() - opComplete(err) + opComplete(&err) if err != nil { strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) diff --git a/pkg/volume/util/metrics.go b/pkg/volume/util/metrics.go index ab2d76286bb..e3af12df890 100644 --- a/pkg/volume/util/metrics.go +++ b/pkg/volume/util/metrics.go @@ -49,12 +49,12 @@ func registerMetrics() { } // OperationCompleteHook returns a hook to call when an operation is completed -func OperationCompleteHook(plugin, operationName string) func(error) { +func OperationCompleteHook(plugin, operationName string) func(*error) { requestTime := time.Now() - opComplete := func(err error) { + opComplete := func(err *error) { timeTaken := time.Since(requestTime).Seconds() // Create metric with operation name and plugin name - if err != nil { + if *err != nil { storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc() } else { storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken) diff --git a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go index 82462c1f2f6..526ea403cee 100644 --- a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go +++ b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -55,7 +55,7 @@ type NestedPendingOperations interface { // concatenation of volumeName and podName is removed from the list of // executing operations allowing a new operation to be started with the // volumeName without error. - Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error, operationCompleteFunc func(error)) error + Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, generatedOperations types.GeneratedOperations) error // Wait blocks until all operations are completed. This is typically // necessary during tests - the test should wait until all operations finish @@ -94,8 +94,7 @@ type operation struct { func (grm *nestedPendingOperations) Run( volumeName v1.UniqueVolumeName, podName types.UniquePodName, - operationFunc func() error, - operationCompleteFunc func(error)) error { + generatedOperations types.GeneratedOperations) error { grm.lock.Lock() defer grm.lock.Unlock() opExists, previousOpIndex := grm.isOperationExists(volumeName, podName) @@ -128,15 +127,20 @@ func (grm *nestedPendingOperations) Run( }) } - go func() (err error) { + go func() (eventErr, detailedErr error) { // Handle unhandled panics (very unlikely) defer k8sRuntime.HandleCrash() // Handle completion of and error, if any, from operationFunc() - defer grm.operationComplete(volumeName, podName, &err) - defer operationCompleteFunc(err) + defer grm.operationComplete(volumeName, podName, &detailedErr) + if generatedOperations.CompleteFunc != nil { + defer generatedOperations.CompleteFunc(&detailedErr) + } + if generatedOperations.EventRecorderFunc != nil { + defer generatedOperations.EventRecorderFunc(&eventErr) + } // Handle panic, if any, from operationFunc() - defer k8sRuntime.RecoverFromPanic(&err) - return operationFunc() + defer k8sRuntime.RecoverFromPanic(&detailedErr) + return generatedOperations.OperationFunc() }() return nil diff --git a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go index 8882303bde5..5865f96c21a 100644 --- a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go +++ b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go @@ -47,10 +47,10 @@ func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) { // Arrange grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") - operation := func() error { return nil } + operation := func() (error, error) { return nil, nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation}) // Assert if err != nil { @@ -63,11 +63,11 @@ func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) { grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */) volume1Name := v1.UniqueVolumeName("volume1-name") volume2Name := v1.UniqueVolumeName("volume2-name") - operation := func() error { return nil } + operation := func() (error, error) { return nil, nil } // Act - err1 := grm.Run(volume1Name, "" /* operationSubName */, operation, func(error) {}) - err2 := grm.Run(volume2Name, "" /* operationSubName */, operation, func(error) {}) + err1 := grm.Run(volume1Name, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation}) + err2 := grm.Run(volume2Name, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation}) // Assert if err1 != nil { @@ -85,11 +85,11 @@ func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1PodName := types.UniquePodName("operation1-podname") operation2PodName := types.UniquePodName("operation2-podname") - operation := func() error { return nil } + operation := func() (error, error) { return nil, nil } // Act - err1 := grm.Run(volumeName, operation1PodName, operation, func(error) {}) - err2 := grm.Run(volumeName, operation2PodName, operation, func(error) {}) + err1 := grm.Run(volumeName, operation1PodName, types.GeneratedOperations{OperationFunc: operation}) + err2 := grm.Run(volumeName, operation2PodName, types.GeneratedOperations{OperationFunc: operation}) // Assert if err1 != nil { @@ -105,10 +105,10 @@ func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) { // Arrange grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") - operation := func() error { return nil } + operation := func() (error, error) { return nil, nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation}) // Assert if err != nil { @@ -122,7 +122,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -133,7 +133,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -154,7 +154,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -165,7 +165,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -185,7 +185,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -195,7 +195,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -215,7 +215,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -225,7 +225,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeLong), // Longer duration to accommodate for backoff func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -246,14 +246,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -271,14 +271,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) + err1 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) + err2 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -296,14 +296,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T) operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) + err1 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) + err2 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -320,14 +320,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -344,7 +344,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -352,7 +352,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -367,7 +367,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation3}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -388,7 +388,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -396,7 +396,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2}) // Assert if err2 == nil { @@ -411,7 +411,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation3}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -471,7 +471,7 @@ func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } @@ -500,7 +500,7 @@ func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1}) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } @@ -522,28 +522,28 @@ func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) { } } -func generateCallbackFunc(done chan<- interface{}) func() error { - return func() error { +func generateCallbackFunc(done chan<- interface{}) func() (error, error) { + return func() (error, error) { done <- true - return nil + return nil, nil } } -func generateWaitFunc(done <-chan interface{}) func() error { - return func() error { +func generateWaitFunc(done <-chan interface{}) func() (error, error) { + return func() (error, error) { <-done - return nil + return nil, nil } } -func generatePanicFunc() func() error { - return func() error { +func generatePanicFunc() func() (error, error) { + return func() (error, error) { panic("testing panic") } } -func generateNoopFunc() func() error { - return func() error { return nil } +func generateNoopFunc() func() (error, error) { + return func() (error, error) { return nil, nil } } func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error { diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 543067be6b7..7df9f43d79a 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -571,30 +571,28 @@ func (oe *operationExecutor) IsOperationPending(volumeName v1.UniqueVolumeName, func (oe *operationExecutor) AttachVolume( volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - attachFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_attach") return oe.pendingOperations.Run( - volumeToAttach.VolumeName, "" /* podName */, attachFunc, opCompleteFunc) + volumeToAttach.VolumeName, "" /* podName */, generatedOperations) } func (oe *operationExecutor) DetachVolume( volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - detachFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_detach") return oe.pendingOperations.Run( - volumeToDetach.VolumeName, "" /* podName */, detachFunc, opCompleteFunc) + volumeToDetach.VolumeName, "" /* podName */, generatedOperations) } func (oe *operationExecutor) VerifyVolumesAreAttached( @@ -661,7 +659,7 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( } for pluginName, pluginNodeVolumes := range bulkVerifyPluginsByNode { - bulkVerifyVolumeFunc, err := oe.operationGenerator.GenerateBulkVolumeVerifyFunc( + generatedOperations, err := oe.operationGenerator.GenerateBulkVolumeVerifyFunc( pluginNodeVolumes, pluginName, volumeSpecMapByPlugin[pluginName], @@ -670,10 +668,9 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) } - opCompleteFunc := util.OperationCompleteHook(pluginName, "verify_volumes_are_attached") // Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin uniquePluginName := v1.UniqueVolumeName(pluginName) - err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, bulkVerifyVolumeFunc, opCompleteFunc) + err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, generatedOperations) if err != nil { glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) } @@ -684,15 +681,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttachedPerNode( attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - volumesAreAttachedFunc, err := + generatedOperations, err := oe.operationGenerator.GenerateVolumesAreAttachedFunc(attachedVolumes, nodeName, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook("", "verify_volumes_are_attached_per_node") // Give an empty UniqueVolumeName so that this operation could be executed concurrently. - return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, volumesAreAttachedFunc, opCompleteFunc) + return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, generatedOperations) } func (oe *operationExecutor) MountVolume( @@ -700,7 +696,7 @@ func (oe *operationExecutor) MountVolume( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error { - mountFunc, plugin, err := oe.operationGenerator.GenerateMountVolumeFunc( + generatedOperations, err := oe.operationGenerator.GenerateMountVolumeFunc( waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) if err != nil { return err @@ -715,16 +711,15 @@ func (oe *operationExecutor) MountVolume( } // TODO mount_device - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_mount") return oe.pendingOperations.Run( - volumeToMount.VolumeName, podName, mountFunc, opCompleteFunc) + volumeToMount.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) UnmountVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - unmountFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateUnmountVolumeFunc(volumeToUnmount, actualStateOfWorld) if err != nil { return err @@ -734,42 +729,40 @@ func (oe *operationExecutor) UnmountVolume( // same volume in parallel podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_unmount") return oe.pendingOperations.Run( - volumeToUnmount.VolumeName, podName, unmountFunc, opCompleteFunc) + volumeToUnmount.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) UnmountDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - unmountDeviceFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateUnmountDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "unmount_device") return oe.pendingOperations.Run( - deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc, opCompleteFunc) + deviceToDetach.VolumeName, "" /* podName */, generatedOperations) } func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCWithResizeRequest, resizeMap expandcache.VolumeResizeMap) error { - expandFunc, pluginName, err := oe.operationGenerator.GenerateExpandVolumeFunc(pvcWithResizeRequest, resizeMap) + generatedOperations, err := oe.operationGenerator.GenerateExpandVolumeFunc(pvcWithResizeRequest, resizeMap) if err != nil { return err } uniqueVolumeKey := v1.UniqueVolumeName(pvcWithResizeRequest.UniquePVCKey()) - opCompleteFunc := util.OperationCompleteHook(pluginName, "expand_volume") - return oe.pendingOperations.Run(uniqueVolumeKey, "", expandFunc, opCompleteFunc) + + return oe.pendingOperations.Run(uniqueVolumeKey, "", generatedOperations) } func (oe *operationExecutor) MapVolume( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - mapFunc, plugin, err := oe.operationGenerator.GenerateMapVolumeFunc( + generatedOperations, err := oe.operationGenerator.GenerateMapVolumeFunc( waitForAttachTimeout, volumeToMount, actualStateOfWorld) if err != nil { return err @@ -785,15 +778,14 @@ func (oe *operationExecutor) MapVolume( podName = volumehelper.GetUniquePodName(volumeToMount.Pod) } - opCompleteFunc := util.OperationCompleteHook(plugin, "map_volume") return oe.pendingOperations.Run( - volumeToMount.VolumeName, podName, mapFunc, opCompleteFunc) + volumeToMount.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) UnmapVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - unmapFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateUnmapVolumeFunc(volumeToUnmount, actualStateOfWorld) if err != nil { return err @@ -803,16 +795,15 @@ func (oe *operationExecutor) UnmapVolume( // same volume in parallel podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) - opCompleteFunc := util.OperationCompleteHook(plugin, "unmap_volume") return oe.pendingOperations.Run( - volumeToUnmount.VolumeName, podName, unmapFunc, opCompleteFunc) + volumeToUnmount.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) UnmapDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - unmapDeviceFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateUnmapDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) if err != nil { return err @@ -822,24 +813,22 @@ func (oe *operationExecutor) UnmapDevice( // the same volume in parallel podName := nestedpendingoperations.EmptyUniquePodName - opCompleteFunc := util.OperationCompleteHook(plugin, "unmap_device") return oe.pendingOperations.Run( - deviceToDetach.VolumeName, podName, unmapDeviceFunc, opCompleteFunc) + deviceToDetach.VolumeName, podName, generatedOperations) } func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - verifyControllerAttachedVolumeFunc, plugin, err := + generatedOperations, err := oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "verify_controller_attached_volume") return oe.pendingOperations.Run( - volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc, opCompleteFunc) + volumeToMount.VolumeName, "" /* podName */, generatedOperations) } // VolumeStateHandler defines a set of operations for handling mount/unmount/detach/reconstruct volume-related operations diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index 4e06b39616b..18e68a3ab0d 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -350,87 +350,123 @@ func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) Opera } } -func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil -} -func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { - return func() error { - startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil -} -func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { - return func() error { - startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil -} -func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { - return func() error { - startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil -} -func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { - return func() error { - startOperationAndBlock(fopg.ch, fopg.quit) - return nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } -func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil +} +func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { + startOperationAndBlock(fopg.ch, fopg.quit) + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil +} +func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { + startOperationAndBlock(fopg.ch, fopg.quit) + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil +} +func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { + startOperationAndBlock(fopg.ch, fopg.quit) + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil +} +func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { + startOperationAndBlock(fopg.ch, fopg.quit) + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } func (fopg *fakeOperationGenerator) GenerateExpandVolumeFunc(pvcWithResizeRequest *expandcache.PVCWithResizeRequest, - resizeMap expandcache.VolumeResizeMap) (func() error, string, error) { - return func() error { + resizeMap expandcache.VolumeResizeMap) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } func (fopg *fakeOperationGenerator) GenerateBulkVolumeVerifyFunc( pluginNodeVolumes map[types.NodeName][]*volume.Spec, pluginNane string, volumeSpecMap map[*volume.Spec]v1.UniqueVolumeName, - actualStateOfWorldAttacherUpdater ActualStateOfWorldAttacherUpdater) (func() error, error) { - return func() error { + actualStateOfWorldAttacherUpdater ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, }, nil } -func (fopg *fakeOperationGenerator) GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) { - return func() error { +func (fopg *fakeOperationGenerator) GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { startOperationAndBlock(fopg.ch, fopg.quit) - return nil - }, "", nil + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil } func (fopg *fakeOperationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 2ff4b668f00..a322ed4ca3d 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/util/resizefs" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" + volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -83,34 +84,34 @@ func NewOperationGenerator(kubeClient clientset.Interface, // OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable type OperationGenerator interface { // Generates the MountVolume function needed to perform the mount of a volume plugin - GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) + GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) // Generates the UnmountVolume function needed to perform the unmount of a volume plugin - GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) + GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) // Generates the AttachVolume function needed to perform attach of a volume plugin - GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the DetachVolume function needed to perform the detach of a volume plugin - GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the VolumesAreAttached function needed to verify if volume plugins are attached - GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) + GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the UnMountDevice function needed to perform the unmount of a device - GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) + GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) // Generates the function needed to check if the attach_detach controller has attached the volume plugin - GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the MapVolume function needed to perform the map of a volume plugin - GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (func() error, string, error) + GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) // Generates the UnmapVolume function needed to perform the unmap of a volume plugin - GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) + GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) // Generates the UnmapDevice function needed to perform the unmap of a device - GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) + GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) // GetVolumePluginMgr returns volume plugin manager GetVolumePluginMgr() *volume.VolumePluginMgr @@ -118,15 +119,15 @@ type OperationGenerator interface { GenerateBulkVolumeVerifyFunc( map[types.NodeName][]*volume.Spec, string, - map[*volume.Spec]v1.UniqueVolumeName, ActualStateOfWorldAttacherUpdater) (func() error, error) + map[*volume.Spec]v1.UniqueVolumeName, ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) - GenerateExpandVolumeFunc(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) (func() error, string, error) + GenerateExpandVolumeFunc(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) (volumetypes.GeneratedOperations, error) } func (og *operationGenerator) GenerateVolumesAreAttachedFunc( attachedVolumes []AttachedVolume, nodeName types.NodeName, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { // volumesPerPlugin maps from a volume plugin to a list of volume specs which belong // to this type of plugin @@ -154,7 +155,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( volumeSpecMap[volumeAttached.VolumeSpec] = volumeAttached.VolumeName } - return func() error { + volumesAreAttachedFunc := func() (error, error) { // For each volume plugin, pass the list of volume specs to VolumesAreAttached to check // whether the volumes are still attached. @@ -195,7 +196,13 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( } } } - return nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: volumesAreAttachedFunc, + CompleteFunc: util.OperationCompleteHook("", "verify_volumes_are_attached_per_node"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -203,9 +210,9 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( pluginNodeVolumes map[types.NodeName][]*volume.Spec, pluginName string, volumeSpecMap map[*volume.Spec]v1.UniqueVolumeName, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { - return func() error { + bulkVolumeVerifyFunc := func() (error, error) { attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil || attachableVolumePlugin == nil { @@ -213,7 +220,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( "BulkVerifyVolume.FindAttachablePluginBySpec failed for plugin %q with: %v", pluginName, err) - return nil + return nil, nil } volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() @@ -223,19 +230,19 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( "BulkVerifyVolume.NewAttacher failed for getting plugin %q with: %v", attachableVolumePlugin, newAttacherErr) - return nil + return nil, nil } bulkVolumeVerifier, ok := volumeAttacher.(volume.BulkVolumeVerifier) if !ok { glog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier) - return nil + return nil, nil } attached, bulkAttachErr := bulkVolumeVerifier.BulkVerifyVolumes(pluginNodeVolumes) if bulkAttachErr != nil { glog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr) - return nil + return nil, nil } for nodeName, volumeSpecs := range pluginNodeVolumes { @@ -260,26 +267,43 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( } } - return nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: bulkVolumeVerifyFunc, + CompleteFunc: util.OperationCompleteHook(pluginName, "verify_volumes_are_attached"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil + } func (og *operationGenerator) GenerateAttachVolumeFunc( volumeToAttach VolumeToAttach, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { // Get attacher plugin + eventRecorderFunc := func(err *error) { + if *err != nil { + for _, pod := range volumeToAttach.ScheduledPods { + og.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, (*err).Error()) + } + } + } + attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) + eventRecorderFunc(&err) + return volumetypes.GeneratedOperations{}, volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) } volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - return nil, attachableVolumePlugin.GetPluginName(), volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) + eventRecorderFunc(&err) + return volumetypes.GeneratedOperations{}, volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) } - return func() error { + attachVolumeFunc := func() (error, error) { // Execute attach devicePath, attachErr := volumeAttacher.Attach( volumeToAttach.VolumeSpec, volumeToAttach.NodeName) @@ -298,11 +322,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.Attach failed", attachErr) - for _, pod := range volumeToAttach.ScheduledPods { - og.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - } - return detailedErr + return volumeToAttach.GenerateError("AttachVolume.Attach failed", attachErr) } glog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) @@ -312,11 +332,17 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. - return volumeToAttach.GenerateErrorDetailed("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) + return volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) } - return nil - }, attachableVolumePlugin.GetPluginName(), nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: attachVolumeFunc, + EventRecorderFunc: eventRecorderFunc, + CompleteFunc: util.OperationCompleteHook(attachableVolumePlugin.GetPluginName(), "volume_attach"), + }, nil } func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { @@ -326,7 +352,7 @@ func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach AttachedVolume, verifySafeToDetach bool, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { var volumeName string var attachableVolumePlugin volume.AttachableVolumePlugin var pluginName string @@ -337,13 +363,13 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } volumeName, err = attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec) if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) } } else { // Get attacher plugin and the volumeName by splitting the volume unique name in case @@ -351,11 +377,11 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( // when a pod has been deleted during the controller downtime pluginName, volumeName, err = volumehelper.SplitUniqueName(volumeToDetach.VolumeName) if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } } @@ -365,10 +391,10 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) + return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) } - return func() error { + getVolumePluginMgrFunc := func() (error, error) { var err error if verifySafeToDetach { err = og.verifyVolumeIsSafeToDetach(volumeToDetach) @@ -380,7 +406,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( // On failure, add volume back to ReportAsAttached list actualStateOfWorld.AddVolumeToReportAsAttached( volumeToDetach.VolumeName, volumeToDetach.NodeName) - return volumeToDetach.GenerateErrorDetailed("DetachVolume.Detach failed", err) + return volumeToDetach.GenerateError("DetachVolume.Detach failed", err) } glog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) @@ -389,25 +415,33 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( actualStateOfWorld.MarkVolumeAsDetached( volumeToDetach.VolumeName, volumeToDetach.NodeName) - return nil - }, pluginName, nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: getVolumePluginMgrFunc, + CompleteFunc: util.OperationCompleteHook(pluginName, "volume_detach"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil } func (og *operationGenerator) GenerateMountVolumeFunc( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, - isRemount bool) (func() error, string, error) { + isRemount bool) (volumetypes.GeneratedOperations, error) { // Get mounter plugin volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err != nil || volumePlugin == nil { - return nil, "", volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) } affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin) if affinityErr != nil { - return nil, volumePlugin.GetPluginName(), affinityErr + eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) + return volumetypes.GeneratedOperations{}, detailedErr } volumeMounter, newMounterErr := volumePlugin.NewMounter( @@ -417,13 +451,15 @@ func (og *operationGenerator) GenerateMountVolumeFunc( if newMounterErr != nil { eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return nil, volumePlugin.GetPluginName(), detailedErr + return volumetypes.GeneratedOperations{}, detailedErr } mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin) if mountCheckError != nil { - return nil, volumePlugin.GetPluginName(), mountCheckError + eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.UnsupportedMountOption, eventErr.Error()) + return volumetypes.GeneratedOperations{}, detailedErr } // Get attacher, if possible @@ -440,7 +476,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup } - return func() error { + mountVolumeFunc := func() (error, error) { if volumeAttacher != nil { // Wait for attachable volumes to finish attaching glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) @@ -449,7 +485,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MountVolume.WaitForAttach failed", err) + return volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err) } glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) @@ -459,14 +495,14 @@ func (og *operationGenerator) GenerateMountVolumeFunc( resizeError := og.resizeFileSystem(volumeToMount, devicePath, volumePlugin.GetPluginName()) if resizeError != nil { - return volumeToMount.GenerateErrorDetailed("MountVolume.Resize failed", resizeError) + return volumeToMount.GenerateError("MountVolume.Resize failed", resizeError) } deviceMountPath, err := volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MountVolume.GetDeviceMountPath failed", err) + return volumeToMount.GenerateError("MountVolume.GetDeviceMountPath failed", err) } // Mount device to global mount path @@ -476,9 +512,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( deviceMountPath) if err != nil { // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountDevice failed", err) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MountVolume.MountDevice failed", err) } glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath))) @@ -488,7 +522,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeToMount.VolumeName) if markDeviceMountedErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MountVolume.MarkDeviceAsMounted failed", markDeviceMountedErr) + return volumeToMount.GenerateError("MountVolume.MarkDeviceAsMounted failed", markDeviceMountedErr) } } @@ -497,9 +531,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( err = fmt.Errorf( "Verify that your node machine has the required components before attempting to mount this volume type. %s", canMountErr) - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.CanMount failed", err) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MountVolume.CanMount failed", err) } } @@ -507,9 +539,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( mountErr := volumeMounter.SetUp(fsGroup) if mountErr != nil { // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.SetUp failed", mountErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MountVolume.SetUp failed", mountErr) } simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.SetUp succeeded", "") @@ -532,11 +562,23 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeToMount.VolumeGidValue) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsMounted failed", markVolMountedErr) + return volumeToMount.GenerateError("MountVolume.MarkVolumeAsMounted failed", markVolMountedErr) } - return nil - }, volumePlugin.GetPluginName(), nil + return nil, nil + } + + eventRecorderFunc := func(err *error) { + if *err != nil { + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, (*err).Error()) + } + } + + return volumetypes.GeneratedOperations{ + OperationFunc: mountVolumeFunc, + EventRecorderFunc: eventRecorderFunc, + CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_mount"), + }, nil } func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath string, pluginName string) error { @@ -608,26 +650,26 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount MountedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { // Get mountable plugin volumePlugin, err := og.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName) if err != nil || volumePlugin == nil { - return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) } volumeUnmounter, newUnmounterErr := volumePlugin.NewUnmounter( volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID) if newUnmounterErr != nil { - return nil, volumePlugin.GetPluginName(), volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) } - return func() error { + unmountVolumeFunc := func() (error, error) { // Execute unmount unmountErr := volumeUnmounter.TearDown() if unmountErr != nil { // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateErrorDetailed("UnmountVolume.TearDown failed", unmountErr) + return volumeToUnmount.GenerateError("UnmountVolume.TearDown failed", unmountErr) } glog.Infof( @@ -648,37 +690,43 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) } - return nil - }, volumePlugin.GetPluginName(), nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: unmountVolumeFunc, + CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_unmount"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil } func (og *operationGenerator) GenerateUnmountDeviceFunc( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, - mounter mount.Interface) (func() error, string, error) { + mounter mount.Interface) (volumetypes.GeneratedOperations, error) { // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(deviceToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) } volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) } volumeAttacher, err := attachableVolumePlugin.NewAttacher() if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) } - return func() error { + unmountDeviceFunc := func() (error, error) { deviceMountPath, err := volumeAttacher.GetDeviceMountPath(deviceToDetach.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("GetDeviceMountPath failed", err) + return deviceToDetach.GenerateError("GetDeviceMountPath failed", err) } refs, err := attachableVolumePlugin.GetDeviceMountRefs(deviceMountPath) @@ -686,13 +734,13 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( if err == nil { err = fmt.Errorf("The device mount path %q is still mounted by other references %v", deviceMountPath, refs) } - return deviceToDetach.GenerateErrorDetailed("GetDeviceMountRefs check failed", err) + return deviceToDetach.GenerateError("GetDeviceMountRefs check failed", err) } // Execute unmount unmountDeviceErr := volumeDetacher.UnmountDevice(deviceMountPath) if unmountDeviceErr != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("UnmountDevice failed", unmountDeviceErr) + return deviceToDetach.GenerateError("UnmountDevice failed", unmountDeviceErr) } // Before logging that UnmountDevice succeeded and moving on, // use mounter.PathIsDevice to check if the path is a device, @@ -700,27 +748,33 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( // else on the system. Retry if it returns true. deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, mounter) if deviceOpenedErr != nil { - return deviceOpenedErr + return nil, deviceOpenedErr } // The device is still in use elsewhere. Caller will log and retry. if deviceOpened { - return deviceToDetach.GenerateErrorDetailed( + return deviceToDetach.GenerateError( "UnmountDevice failed", fmt.Errorf("the device is in use when it was no longer expected to be in use")) } - glog.Infof(deviceToDetach.GenerateMsgDetailed("UnmountDevice succeeded", "")) + glog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( deviceToDetach.VolumeName) if markDeviceUnmountedErr != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr) + return deviceToDetach.GenerateError("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr) } - return nil - }, attachableVolumePlugin.GetPluginName(), nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: unmountDeviceFunc, + CompleteFunc: util.OperationCompleteHook(attachableVolumePlugin.GetPluginName(), "unmount_device"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil } // GenerateMapVolumeFunc marks volume as mounted based on following steps. @@ -731,25 +785,27 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( // device map path. Once symbolic links are created, take fd lock by // loopback for the device to avoid silent volume replacement. This lock // will be realased once no one uses the device. -// If all steps are completed, the volume is marked as unmounted. +// If all steps are completed, the volume is marked as mounted. func (og *operationGenerator) GenerateMapVolumeFunc( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, - actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { // Get block volume mapper plugin var blockVolumeMapper volume.BlockVolumeMapper blockVolumePlugin, err := og.volumePluginMgr.FindMapperPluginBySpec(volumeToMount.VolumeSpec) if err != nil { - return nil, "", volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed", err) } if blockVolumePlugin == nil { - return nil, "", volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } affinityErr := checkNodeAffinity(og, volumeToMount, blockVolumePlugin) if affinityErr != nil { - return nil, blockVolumePlugin.GetPluginName(), affinityErr + eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.NodeAffinity check failed", affinityErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) + return volumetypes.GeneratedOperations{}, detailedErr } blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper( volumeToMount.VolumeSpec, @@ -758,7 +814,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( if newMapperErr != nil { eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.NewBlockVolumeMapper initialization failed", newMapperErr) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) - return nil, blockVolumePlugin.GetPluginName(), detailedErr + return volumetypes.GeneratedOperations{}, detailedErr } // Get attacher, if possible @@ -769,7 +825,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( volumeAttacher, _ = attachableVolumePlugin.NewAttacher() } - return func() error { + mapVolumeFunc := func() (error, error) { var devicePath string if volumeAttacher != nil { // Wait for attachable volumes to finish attaching @@ -779,7 +835,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MapVolume.WaitForAttach failed", err) + return volumeToMount.GenerateError("MapVolume.WaitForAttach failed", err) } glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) @@ -789,23 +845,21 @@ func (og *operationGenerator) GenerateMapVolumeFunc( volumeToMount.VolumeName) if markDeviceMappedErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) + return volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) } } // A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice() pluginDevicePath, mapErr := blockVolumeMapper.SetUpDevice() if mapErr != nil { // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.SetUp failed", mapErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MapVolume.SetUp failed", mapErr) } // Update devicePath for none attachable plugin case if len(devicePath) == 0 { if len(pluginDevicePath) != 0 { devicePath = pluginDevicePath } else { - return volumeToMount.GenerateErrorDetailed("MapVolume failed", fmt.Errorf("Device path of the volume is empty")) + return volumeToMount.GenerateError("MapVolume failed", fmt.Errorf("Device path of the volume is empty")) } } // Set up global map path under the given plugin directory using symbolic link @@ -813,14 +867,12 @@ func (og *operationGenerator) GenerateMapVolumeFunc( blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MapVolume.GetDeviceMountPath failed", err) + return volumeToMount.GenerateError("MapVolume.GetDeviceMountPath failed", err) } mapErr = og.blkUtil.MapDevice(devicePath, globalMapPath, string(volumeToMount.Pod.UID)) if mapErr != nil { // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) } // Device mapping for global map path succeeded simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) @@ -833,9 +885,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( mapErr = og.blkUtil.MapDevice(devicePath, volumeMapPath, volName) if mapErr != nil { // On failure, return error. Caller will log and retry. - eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) - return detailedErr + return volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) } // Take filedescriptor lock to keep a block device opened. Otherwise, there is a case @@ -844,7 +894,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // for the block device is required. _, err = og.blkUtil.AttachFileDevice(devicePath) if err != nil { - return volumeToMount.GenerateErrorDetailed("MapVolume.AttachFileDevice failed", err) + return volumeToMount.GenerateError("MapVolume.AttachFileDevice failed", err) } // Device mapping for pod device map path succeeded @@ -864,11 +914,23 @@ func (og *operationGenerator) GenerateMapVolumeFunc( volumeToMount.VolumeGidValue) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr) + return volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr) } - return nil - }, blockVolumePlugin.GetPluginName(), nil + return nil, nil + } + + eventRecorderFunc := func(err *error) { + if *err != nil { + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, (*err).Error()) + } + } + + return volumetypes.GeneratedOperations{ + OperationFunc: mapVolumeFunc, + EventRecorderFunc: eventRecorderFunc, + CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "map_volume"), + }, nil } // GenerateUnmapVolumeFunc marks volume as unmonuted based on following steps. @@ -877,32 +939,32 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // If all steps are completed, the volume is marked as unmounted. func (og *operationGenerator) GenerateUnmapVolumeFunc( volumeToUnmount MountedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { // Get block volume unmapper plugin var blockVolumeUnmapper volume.BlockVolumeUnmapper blockVolumePlugin, err := og.volumePluginMgr.FindMapperPluginByName(volumeToUnmount.PluginName) if err != nil { - return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) } if blockVolumePlugin == nil { - return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID) if newUnmapperErr != nil { - return nil, blockVolumePlugin.GetPluginName(), volumeToUnmount.GenerateErrorDetailed("UnmapVolume.NewUnmapper failed", newUnmapperErr) + return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.NewUnmapper failed", newUnmapperErr) } - return func() error { + unmapVolumeFunc := func() (error, error) { // Try to unmap volumeName symlink under pod device map path dir // pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName} podDeviceUnmapPath, volName := blockVolumeUnmapper.GetPodDeviceMapPath() unmapDeviceErr := og.blkUtil.UnmapDevice(podDeviceUnmapPath, volName) if unmapDeviceErr != nil { // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.UnmapDevice on pod device map path failed", unmapDeviceErr) + return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on pod device map path failed", unmapDeviceErr) } // Try to unmap podUID symlink under global map path dir // plugins/kubernetes.io/{PluginName}/volumeDevices/{volumePluginDependentPath}/{podUID} @@ -910,12 +972,12 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( blockVolumeUnmapper.GetGlobalMapPath(volumeToUnmount.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.GetGlobalUnmapPath failed", err) + return volumeToUnmount.GenerateError("UnmapVolume.GetGlobalUnmapPath failed", err) } unmapDeviceErr = og.blkUtil.UnmapDevice(globalUnmapPath, string(volumeToUnmount.PodUID)) if unmapDeviceErr != nil { // On failure, return error. Caller will log and retry. - return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) + return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) } glog.Infof( @@ -936,8 +998,14 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) } - return nil - }, blockVolumePlugin.GetPluginName(), nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: unmapVolumeFunc, + CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_volume"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil } // GenerateUnmapDeviceFunc marks device as unmounted based on following steps. @@ -953,56 +1021,56 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( func (og *operationGenerator) GenerateUnmapDeviceFunc( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, - mounter mount.Interface) (func() error, string, error) { + mounter mount.Interface) (volumetypes.GeneratedOperations, error) { // Get block volume mapper plugin var blockVolumeMapper volume.BlockVolumeMapper blockVolumePlugin, err := og.volumePluginMgr.FindMapperPluginBySpec(deviceToDetach.VolumeSpec) if err != nil { - return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed", err) } if blockVolumePlugin == nil { - return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper( deviceToDetach.VolumeSpec, nil, /* Pod */ volume.VolumeOptions{}) if newMapperErr != nil { - return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewBlockVolumeMapper initialization failed", newMapperErr) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewBlockVolumeMapper initialization failed", newMapperErr) } blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( string(deviceToDetach.VolumeName), "" /* podUID */) if newUnmapperErr != nil { - return nil, blockVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr) + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr) } - return func() error { + unmapDeviceFunc := func() (error, error) { // Search under globalMapPath dir if all symbolic links from pods have been removed already. // If symbolick links are there, pods may still refer the volume. globalMapPath, err := blockVolumeMapper.GetGlobalMapPath(deviceToDetach.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("UnmapDevice.GetGlobalMapPath failed", err) + return deviceToDetach.GenerateError("UnmapDevice.GetGlobalMapPath failed", err) } refs, err := og.blkUtil.GetDeviceSymlinkRefs(deviceToDetach.DevicePath, globalMapPath) if err != nil { - return deviceToDetach.GenerateErrorDetailed("UnmapDevice.GetDeviceSymlinkRefs check failed", err) + return deviceToDetach.GenerateError("UnmapDevice.GetDeviceSymlinkRefs check failed", err) } if len(refs) > 0 { err = fmt.Errorf("The device %q is still referenced from other Pods %v", globalMapPath, refs) - return deviceToDetach.GenerateErrorDetailed("UnmapDevice failed", err) + return deviceToDetach.GenerateError("UnmapDevice failed", err) } // Execute tear down device unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) if unmapErr != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("UnmapDevice.TearDownDevice failed", unmapErr) + return deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr) } // Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data @@ -1010,7 +1078,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath) if removeMapPathErr != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("UnmapDevice failed", removeMapPathErr) + return deviceToDetach.GenerateError("UnmapDevice failed", removeMapPathErr) } // The block volume is not referenced from Pods. Release file descriptor lock. @@ -1021,7 +1089,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( } else { err = og.blkUtil.RemoveLoopDevice(loopPath) if err != nil { - return deviceToDetach.GenerateErrorDetailed("UnmapDevice.AttachFileDevice failed", err) + return deviceToDetach.GenerateError("UnmapDevice.AttachFileDevice failed", err) } } @@ -1031,11 +1099,11 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( // else on the system. Retry if it returns true. deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, mounter) if deviceOpenedErr != nil { - return deviceOpenedErr + return nil, deviceOpenedErr } // The device is still in use elsewhere. Caller will log and retry. if deviceOpened { - return deviceToDetach.GenerateErrorDetailed( + return deviceToDetach.GenerateError( "UnmapDevice failed", fmt.Errorf("the device is in use when it was no longer expected to be in use")) } @@ -1047,24 +1115,30 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( deviceToDetach.VolumeName) if markDeviceUnmountedErr != nil { // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateErrorDetailed("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr) + return deviceToDetach.GenerateError("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr) } - return nil - }, blockVolumePlugin.GetPluginName(), nil + return nil, nil + } + + return volumetypes.GeneratedOperations{ + OperationFunc: unmapDeviceFunc, + CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_device"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil } func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( volumeToMount VolumeToMount, nodeName types.NodeName, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err != nil || volumePlugin == nil { - return nil, "", volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.FindPluginBySpec failed", err) + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.FindPluginBySpec failed", err) } - return func() error { + verifyControllerAttachedVolumeFunc := func() (error, error) { if !volumeToMount.PluginIsAttachable { // If the volume does not implement the attacher interface, it is // assumed to be attached and the actual state of the world is @@ -1074,10 +1148,10 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr) + return volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr) } - return nil + return nil, nil } if !volumeToMount.ReportedInUse { @@ -1087,19 +1161,19 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( // periodically by kubelet, so it may take as much as 10 seconds // before this clears. // Issue #28141 to enable on demand status updates. - return volumeToMount.GenerateErrorDetailed("Volume has not been added to the list of VolumesInUse in the node's volume status", nil) + return volumeToMount.GenerateError("Volume has not been added to the list of VolumesInUse in the node's volume status", nil) } // Fetch current node object node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) + return volumeToMount.GenerateError("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) } if node == nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed( + return volumeToMount.GenerateError( "VerifyControllerAttachedVolume failed", fmt.Errorf("Node object retrieved from API server is nil")) } @@ -1111,15 +1185,22 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( glog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath))) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) + return volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) } - return nil + return nil, nil } } // Volume not attached, return error. Caller will log and retry. - return volumeToMount.GenerateErrorDetailed("Volume not attached according to node status", nil) - }, volumePlugin.GetPluginName(), nil + return volumeToMount.GenerateError("Volume not attached according to node status", nil) + } + + return volumetypes.GeneratedOperations{ + OperationFunc: verifyControllerAttachedVolumeFunc, + CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "verify_controller_attached_volume"), + EventRecorderFunc: nil, // nil because we do not want to generate event on error + }, nil + } func (og *operationGenerator) verifyVolumeIsSafeToDetach( @@ -1158,17 +1239,17 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( func (og *operationGenerator) GenerateExpandVolumeFunc( pvcWithResizeRequest *expandcache.PVCWithResizeRequest, - resizeMap expandcache.VolumeResizeMap) (func() error, string, error) { + resizeMap expandcache.VolumeResizeMap) (volumetypes.GeneratedOperations, error) { volumeSpec := volume.NewSpecFromPersistentVolume(pvcWithResizeRequest.PersistentVolume, false) volumePlugin, err := og.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec) if err != nil { - return nil, "", fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", pvcWithResizeRequest.QualifiedName(), err) + return volumetypes.GeneratedOperations{}, fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", pvcWithResizeRequest.QualifiedName(), err) } - expandFunc := func() error { + expandVolumeFunc := func() (error, error) { newSize := pvcWithResizeRequest.ExpectedSize pvSize := pvcWithResizeRequest.PersistentVolume.Spec.Capacity[v1.ResourceStorage] if pvSize.Cmp(newSize) < 0 { @@ -1178,9 +1259,8 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( pvcWithResizeRequest.CurrentSize) if expandErr != nil { - glog.Errorf("Error expanding volume %q of plugin %s : %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr) - og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, expandErr.Error()) - return expandErr + detailedErr := fmt.Errorf("Error expanding volume %q of plugin %s : %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr) + return detailedErr, detailedErr } glog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) newSize = updatedSize @@ -1190,9 +1270,8 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( updateErr := resizeMap.UpdatePVSize(pvcWithResizeRequest, newSize) if updateErr != nil { - glog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcWithResizeRequest.QualifiedName(), updateErr) - og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, updateErr.Error()) - return updateErr + detailedErr := fmt.Errorf("Error updating PV spec capacity for volume %q with : %v", pvcWithResizeRequest.QualifiedName(), updateErr) + return detailedErr, detailedErr } glog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) } @@ -1205,24 +1284,32 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( err := resizeMap.MarkAsResized(pvcWithResizeRequest, newSize) if err != nil { - glog.Errorf("Error marking pvc %s as resized : %v", pvcWithResizeRequest.QualifiedName(), err) - og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, err.Error()) - return err + detailedErr := fmt.Errorf("Error marking pvc %s as resized : %v", pvcWithResizeRequest.QualifiedName(), err) + return detailedErr, detailedErr } } - return nil + return nil, nil } - return expandFunc, volumePlugin.GetPluginName(), nil + + eventRecorderFunc := func(err *error) { + if *err != nil { + og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, (*err).Error()) + } + } + + return volumetypes.GeneratedOperations{ + OperationFunc: expandVolumeFunc, + EventRecorderFunc: eventRecorderFunc, + CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "expand_volume"), + }, nil } func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error { mountOptions := volume.MountOptionFromSpec(volumeToMount.VolumeSpec) if len(mountOptions) > 0 && !plugin.SupportsMountOption() { - eventErr, detailedErr := volumeToMount.GenerateError("Mount options are not supported for this volume type", nil) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.UnsupportedMountOption, eventErr.Error()) - return detailedErr + return fmt.Errorf("Mount options are not supported for this volume type") } return nil } @@ -1238,14 +1325,11 @@ func checkNodeAffinity(og *operationGenerator, volumeToMount VolumeToMount, plug if pv != nil { nodeLabels, err := og.volumePluginMgr.Host.GetNodeLabels() if err != nil { - return volumeToMount.GenerateErrorDetailed("Error getting node labels", err) + return err } - err = util.CheckNodeAffinity(pv, nodeLabels) if err != nil { - eventErr, detailedErr := volumeToMount.GenerateError("Storage node affinity check failed", err) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return detailedErr + return err } } return nil diff --git a/pkg/volume/util/types/types.go b/pkg/volume/util/types/types.go index 9375ad6750c..9815545ff60 100644 --- a/pkg/volume/util/types/types.go +++ b/pkg/volume/util/types/types.go @@ -24,3 +24,11 @@ type UniquePodName types.UID // UniquePVCName defines the type to key pvc off type UniquePVCName types.UID + +// GeneratedOperations contains the operation that is created as well as +// supporting functions required for the operation executor +type GeneratedOperations struct { + OperationFunc func() (eventErr error, detailedErr error) + EventRecorderFunc func(*error) + CompleteFunc func(*error) +} From 67cf959a1d187c0eeb34b42d0c72749faef98645 Mon Sep 17 00:00:00 2001 From: Kai Chen Date: Thu, 14 Dec 2017 11:11:58 -0800 Subject: [PATCH 313/794] Improve error messages and comments in KubeAdm. --- cmd/kubeadm/app/cmd/config.go | 8 ++++---- cmd/kubeadm/app/cmd/phases/certs.go | 2 +- cmd/kubeadm/app/cmd/phases/controlplane.go | 4 ++-- cmd/kubeadm/app/cmd/phases/kubeconfig_test.go | 12 ++++++------ cmd/kubeadm/app/cmd/phases/util.go | 2 +- cmd/kubeadm/app/discovery/file/file.go | 2 +- cmd/kubeadm/app/features/features_test.go | 2 +- cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go | 10 +++++----- cmd/kubeadm/app/phases/selfhosting/selfhosting.go | 4 ++-- cmd/kubeadm/app/phases/upgrade/prepull.go | 6 +++--- cmd/kubeadm/app/phases/upgrade/staticpods.go | 2 +- cmd/kubeadm/app/preflight/checks.go | 2 +- cmd/kubeadm/app/util/apiclient/dryrunclient.go | 6 +++--- 13 files changed, 31 insertions(+), 31 deletions(-) diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index c9f7fa01f17..00684a884a2 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -65,7 +65,7 @@ func NewCmdConfig(out io.Writer) *cobra.Command { return cmd } -// NewCmdConfigUpload returs cobra.Command for "kubeadm config upload" command +// NewCmdConfigUpload returns cobra.Command for "kubeadm config upload" command func NewCmdConfigUpload(out io.Writer, kubeConfigFile *string) *cobra.Command { cmd := &cobra.Command{ Use: "upload", @@ -78,7 +78,7 @@ func NewCmdConfigUpload(out io.Writer, kubeConfigFile *string) *cobra.Command { return cmd } -// NewCmdConfigView returs cobra.Command for "kubeadm config view" command +// NewCmdConfigView returns cobra.Command for "kubeadm config view" command func NewCmdConfigView(out io.Writer, kubeConfigFile *string) *cobra.Command { return &cobra.Command{ Use: "view", @@ -98,7 +98,7 @@ func NewCmdConfigView(out io.Writer, kubeConfigFile *string) *cobra.Command { } } -// NewCmdConfigUploadFromFile verifies given kubernetes config file and returs cobra.Command for +// NewCmdConfigUploadFromFile verifies given kubernetes config file and returns cobra.Command for // "kubeadm config upload from-file" command func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Command { var cfgPath string @@ -131,7 +131,7 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co return cmd } -// NewCmdConfigUploadFromFlags returs cobra.Command for "kubeadm config upload from-flags" command +// NewCmdConfigUploadFromFlags returns cobra.Command for "kubeadm config upload from-flags" command func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.Command { cfg := &kubeadmapiext.MasterConfiguration{} legacyscheme.Scheme.Default(cfg) diff --git a/cmd/kubeadm/app/cmd/phases/certs.go b/cmd/kubeadm/app/cmd/phases/certs.go index 6f37c6d20dd..3c87983ad2b 100644 --- a/cmd/kubeadm/app/cmd/phases/certs.go +++ b/cmd/kubeadm/app/cmd/phases/certs.go @@ -206,7 +206,7 @@ func getCertsSubCommands(defaultKubernetesVersion string) []*cobra.Command { // runCmdFunc creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters) func runCmdFunc(cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) { - // the following statement build a clousure that wraps a call to a cmdFunc, binding + // the following statement build a closure that wraps a call to a cmdFunc, binding // the function itself with the specific parameters of each sub command. // Please note that specific parameter should be passed as value, while other parameters - passed as reference - // are shared between sub commands and gets access to current value e.g. flags value. diff --git a/cmd/kubeadm/app/cmd/phases/controlplane.go b/cmd/kubeadm/app/cmd/phases/controlplane.go index 183c13981a8..726294990a5 100644 --- a/cmd/kubeadm/app/cmd/phases/controlplane.go +++ b/cmd/kubeadm/app/cmd/phases/controlplane.go @@ -82,7 +82,7 @@ func getControlPlaneSubCommands(outDir, defaultKubernetesVersion string) []*cobr // This is used for unit testing only... // If we wouldn't set this to something, the code would dynamically look up the version from the internet - // By setting this explicitely for tests workarounds that + // By setting this explicitly for tests workarounds that if defaultKubernetesVersion != "" { cfg.KubernetesVersion = defaultKubernetesVersion } @@ -164,7 +164,7 @@ func getControlPlaneSubCommands(outDir, defaultKubernetesVersion string) []*cobr // runCmdControlPlane creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters) func runCmdControlPlane(cmdFunc func(outDir string, cfg *kubeadmapi.MasterConfiguration) error, outDir, cfgPath *string, featureGatesString *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) { - // the following statement build a clousure that wraps a call to a cmdFunc, binding + // the following statement build a closure that wraps a call to a cmdFunc, binding // the function itself with the specific parameters of each sub command. // Please note that specific parameter should be passed as value, while other parameters - passed as reference - // are shared between sub commands and gets access to current value e.g. flags value. diff --git a/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go b/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go index d3a4d93b0c0..089147ada70 100644 --- a/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go +++ b/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go @@ -170,10 +170,10 @@ func TestKubeConfigSubCommandsThatCreateFilesWithFlags(t *testing.T) { outputdir := tmpdir - // Retrives ca cert for assertions + // Retrieves ca cert for assertions caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName) if err != nil { - t.Fatalf("couldn't retrive ca cert: %v", err) + t.Fatalf("couldn't retrieve ca cert: %v", err) } // Get subcommands working in the temporary directory @@ -272,10 +272,10 @@ func TestKubeConfigSubCommandsThatCreateFilesWithConfigFile(t *testing.T) { // Adds a pki folder with a ca certs to the temp folder pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir) - // Retrives ca cert for assertions + // Retrieves ca cert for assertions caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName) if err != nil { - t.Fatalf("couldn't retrive ca cert: %v", err) + t.Fatalf("couldn't retrieve ca cert: %v", err) } // Adds a master configuration file @@ -327,10 +327,10 @@ func TestKubeConfigSubCommandsThatWritesToOut(t *testing.T) { outputdir := tmpdir - // Retrives ca cert for assertions + // Retrieves ca cert for assertions caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName) if err != nil { - t.Fatalf("couldn't retrive ca cert: %v", err) + t.Fatalf("couldn't retrieve ca cert: %v", err) } commonFlags := []string{ diff --git a/cmd/kubeadm/app/cmd/phases/util.go b/cmd/kubeadm/app/cmd/phases/util.go index 32b7f3fb1d4..92be3a187c3 100644 --- a/cmd/kubeadm/app/cmd/phases/util.go +++ b/cmd/kubeadm/app/cmd/phases/util.go @@ -29,7 +29,7 @@ import ( // runCmdPhase creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of input parameters) func runCmdPhase(cmdFunc func(outDir string, cfg *kubeadmapi.MasterConfiguration) error, outDir, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) { - // the following statement build a clousure that wraps a call to a cmdFunc, binding + // the following statement build a closure that wraps a call to a cmdFunc, binding // the function itself with the specific parameters of each sub command. // Please note that specific parameter should be passed as value, while other parameters - passed as reference - // are shared between sub commands and gets access to current value e.g. flags value. diff --git a/cmd/kubeadm/app/discovery/file/file.go b/cmd/kubeadm/app/discovery/file/file.go index 5dc0188e37c..60d3d48d08b 100644 --- a/cmd/kubeadm/app/discovery/file/file.go +++ b/cmd/kubeadm/app/discovery/file/file.go @@ -75,7 +75,7 @@ func ValidateClusterInfo(clusterinfo *clientcmdapi.Config) (*clientcmdapi.Cluste clusterinfoCM, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { if apierrors.IsForbidden(err) { - // If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenicated users + // If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users // In that case, trust the cluster admin and do not refresh the cluster-info credentials fmt.Printf("[discovery] Could not access the %s ConfigMap for refreshing the cluster-info information, but the TLS cert is valid so proceeding...\n", bootstrapapi.ConfigMapClusterInfo) return true, nil diff --git a/cmd/kubeadm/app/features/features_test.go b/cmd/kubeadm/app/features/features_test.go index 71c1e2b9c21..9c157d68ecb 100644 --- a/cmd/kubeadm/app/features/features_test.go +++ b/cmd/kubeadm/app/features/features_test.go @@ -46,7 +46,7 @@ func TestKnownFeatures(t *testing.T) { if r[1] != f2 { t.Errorf("KnownFeatures returned %s values, expected %s", r[1], f2) } - // check the second value is feature3; prerelease should not shown fo GA features; default should be present + // check the second value is feature3; prerelease should not be shown for GA features; default should be present f3 := "feature3=true|false (default=false)" if r[2] != f3 { t.Errorf("KnownFeatures returned %s values, expected %s", r[2], f3) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go index 5290b2fb598..947bc75ab2f 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go @@ -143,7 +143,7 @@ func TestBuildKubeConfigFromSpecWithClientAuth(t *testing.T) { // Creates a CA caCert, caKey := certstestutil.SetupCertificateAuthorithy(t) - // Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a ClientAuth + // Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a ClientAuth config := setupdKubeConfigWithClientAuth(t, caCert, caKey, "https://1.2.3.4:1234", "myClientName", "myOrg1", "myOrg2") // Asserts spec data are propagated to the kubeconfig @@ -155,7 +155,7 @@ func TestBuildKubeConfigFromSpecWithTokenAuth(t *testing.T) { // Creates a CA caCert, _ := certstestutil.SetupCertificateAuthorithy(t) - // Executes buildKubeConfigFromSpec passing a KubeConfigSpec wiht a Token + // Executes buildKubeConfigFromSpec passing a KubeConfigSpec with a Token config := setupdKubeConfigWithTokenAuth(t, caCert, "https://1.2.3.4:1234", "myClientName", "123456") // Asserts spec data are propagated to the kubeconfig @@ -219,7 +219,7 @@ func TestCreateKubeConfigFileIfNotExists(t *testing.T) { t.Errorf("createKubeConfigFileIfNotExists failed") } - // Assert creted files is there + // Assert that the created file is there testutil.AssertFileExists(t, tmpdir, "test.conf") } } @@ -338,10 +338,10 @@ func TestWriteKubeConfig(t *testing.T) { // Adds a pki folder with a ca cert to the temp folder pkidir := testutil.SetupPkiDirWithCertificateAuthorithy(t, tmpdir) - // Retrives ca cert for assertions + // Retrieves ca cert for assertions caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkidir, kubeadmconstants.CACertAndKeyBaseName) if err != nil { - t.Fatalf("couldn't retrive ca cert: %v", err) + t.Fatalf("couldn't retrieve ca cert: %v", err) } // Creates a Master Configuration pointing to the pkidir folder diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go index 18ae9563f8f..e1035421b56 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -112,7 +112,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea } // Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to - // remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy, + // remove the Static Pod (or the mirror Pod respectively). This implicitly also tests that the API server endpoint is healthy, // because this blocks until the API server returns a 404 Not Found when getting the Static Pod staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName) if err := waiter.WaitForPodToDisappear(staticPodName); err != nil { @@ -129,7 +129,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea return nil } -// BuildDaemonSet is responsible for mutating the PodSpec and return a DaemonSet which is suitable for the self-hosting purporse +// BuildDaemonSet is responsible for mutating the PodSpec and returns a DaemonSet which is suitable for self-hosting func BuildDaemonSet(name string, podSpec *v1.PodSpec, mutators map[string][]PodSpecMutatorFunc) *apps.DaemonSet { // Mutate the PodSpec so it's suitable for self-hosting diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go index 5d0b2940234..3dd3c5583c7 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull.go @@ -41,7 +41,7 @@ type Prepuller interface { DeleteFunc(string) error } -// DaemonSetPrepuller makes sure the control plane images are availble on all masters +// DaemonSetPrepuller makes sure the control plane images are available on all masters type DaemonSetPrepuller struct { client clientset.Interface cfg *kubeadmapi.MasterConfiguration @@ -99,11 +99,11 @@ func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) err } } - // Create a channel for streaming data from goroutines that run in parallell to a blocking for loop that cleans up + // Create a channel for streaming data from goroutines that run in parallel to a blocking for loop that cleans up prePulledChan := make(chan string, len(componentsToPrepull)) for _, component := range componentsToPrepull { go func(c string) { - // Wait as long as needed. This WaitFunc call should be blocking until completetion + // Wait as long as needed. This WaitFunc call should be blocking until completion kubePrepuller.WaitFunc(c) // When the task is done, go ahead and cleanup by sending the name to the channel prePulledChan <- c diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index b661ea20593..8ea3b2559f5 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -337,7 +337,7 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr errs = append(errs, err) } } - // Let the user know there we're problems, but we tried to reçover + // Let the user know there were problems, but we tried to recover return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs) } diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a79ff7bc866..4924da287e5 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -79,7 +79,7 @@ func (e *Error) Error() string { } // Checker validates the state of the system to ensure kubeadm will be -// successful as often as possilble. +// successful as often as possible. type Checker interface { Check() (warnings, errors []error) Name() string diff --git a/cmd/kubeadm/app/util/apiclient/dryrunclient.go b/cmd/kubeadm/app/util/apiclient/dryrunclient.go index 030187987d4..6c0d796e6fa 100644 --- a/cmd/kubeadm/app/util/apiclient/dryrunclient.go +++ b/cmd/kubeadm/app/util/apiclient/dryrunclient.go @@ -85,10 +85,10 @@ func NewDryRunClient(drg DryRunGetter, w io.Writer) clientset.Interface { // This client doesn't apply changes to the backend. The client gets GET/LIST values from the DryRunGetter implementation. // This client logs all I/O to the writer w in YAML format func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface { - // Build a chain of reactors to act like a normal clientset; but log everything's that happening and don't change any state + // Build a chain of reactors to act like a normal clientset; but log everything that is happening and don't change any state client := fakeclientset.NewSimpleClientset() - // Build the chain of reactors. Order matters; first item here will be invoked first on match, then the second one will be evaluted, etc. + // Build the chain of reactors. Order matters; first item here will be invoked first on match, then the second one will be evaluated, etc. defaultReactorChain := []core.Reactor{ // Log everything that happens. Default the object if it's about to be created/updated so that the logged object is representative. &core.SimpleReactor{ @@ -223,7 +223,7 @@ func logDryRunAction(action core.Action, w io.Writer, marshalFunc MarshalFunc) { patchAction, ok := action.(core.PatchAction) if ok { - // Replace all occurences of \" with a simple " when printing + // Replace all occurrences of \" with a simple " when printing fmt.Fprintf(w, "[dryrun] Attached patch:\n\t%s\n", strings.Replace(string(patchAction.GetPatch()), `\"`, `"`, -1)) } } From b33aaa0df4b33d6b849cb93299d281d03eef71f3 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 14 Dec 2017 16:54:32 -0500 Subject: [PATCH 314/794] Remove mutation from pvc validation --- pkg/apis/core/validation/validation.go | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..3d7c1979930 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -1773,27 +1773,26 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) + newPvcClone := newPvc.DeepCopy() + oldPvcClone := oldPvc.DeepCopy() + // PVController needs to update PVC.Spec w/ VolumeName. // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. if len(oldPvc.Spec.VolumeName) == 0 { // volumeName changes are allowed once. - // Reset back to empty string after equality check - oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName - defer func() { oldPvc.Spec.VolumeName = "" }() + oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName } if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { - newPVCSpecCopy := newPvc.Spec.DeepCopy() - // lets make sure storage values are same. - if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { - newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] + if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil { + newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] } oldSize := oldPvc.Spec.Resources.Requests["storage"] newSize := newPvc.Spec.Resources.Requests["storage"] - if !apiequality.Semantic.DeepEqual(*newPVCSpecCopy, oldPvc.Spec) { + if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims")) } if newSize.Cmp(oldSize) < 0 { @@ -1803,7 +1802,7 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl } else { // changes to Spec are not allowed, but updates to label/and some annotations are OK. // no-op updates pass validation. - if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { + if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) } } @@ -1815,8 +1814,6 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) } - - newPvc.Status = oldPvc.Status return allErrs } From c4b908ad526ccc5b530588b39966cafa3e36f84f Mon Sep 17 00:00:00 2001 From: iloayuil Date: Fri, 15 Dec 2017 09:27:52 +0800 Subject: [PATCH 315/794] typo wrong, not "namespace", but "secretName" namespace, _ := claims[NamespaceClaim].(string) if len(namespace) == 0 { return nil, false, errors.New("namespace claim is missing") } secretName, _ := claims[SecretNameClaim].(string) if len(namespace) == 0 { return nil, false, errors.New("secretName claim is missing") } --- pkg/serviceaccount/jwt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/serviceaccount/jwt.go b/pkg/serviceaccount/jwt.go index a7051be3077..fc380e33a87 100644 --- a/pkg/serviceaccount/jwt.go +++ b/pkg/serviceaccount/jwt.go @@ -194,7 +194,7 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool return nil, false, errors.New("namespace claim is missing") } secretName, _ := claims[SecretNameClaim].(string) - if len(namespace) == 0 { + if len(secretName) == 0 { return nil, false, errors.New("secretName claim is missing") } serviceAccountName, _ := claims[ServiceAccountNameClaim].(string) From 6149df089e2667fefb740e408ece883fd76dd40e Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Fri, 1 Dec 2017 11:07:28 +0800 Subject: [PATCH 316/794] add admission into RecommendedOption --- .../pkg/cmd/server/start.go | 2 +- .../test/integration/testserver/start.go | 3 +- .../src/k8s.io/apiserver/pkg/server/config.go | 5 +++ .../apiserver/pkg/server/options/admission.go | 4 ++ .../pkg/server/options/recommended.go | 42 ++++++++++++++++++- .../kube-aggregator/pkg/cmd/server/start.go | 2 +- .../plugin/banflunder/admission_test.go | 5 +-- .../wardleinitializer/wardleinitializer.go | 4 +- .../wardleinitializer_test.go | 6 +-- .../sample-apiserver/pkg/cmd/server/start.go | 37 ++++++++-------- 10 files changed, 76 insertions(+), 34 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go index 6b41a2e06de..d2096412bfd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go @@ -98,7 +98,7 @@ func (o CustomResourceDefinitionsServerOptions) Config() (*apiserver.Config, err } serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go index 99cbe9b3f85..81314842be3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/start.go @@ -45,6 +45,7 @@ func DefaultServerConfig() (*extensionsapiserver.Config, error) { options.RecommendedOptions.SecureServing.BindPort = port options.RecommendedOptions.Authentication = nil // disable options.RecommendedOptions.Authorization = nil // disable + options.RecommendedOptions.Admission = nil // disable options.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") etcdURL, ok := os.LookupEnv("KUBE_INTEGRATION_ETCD_URL") if !ok { @@ -58,7 +59,7 @@ func DefaultServerConfig() (*extensionsapiserver.Config, error) { if err := options.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } - if err := options.RecommendedOptions.ApplyTo(genericConfig); err != nil { + if err := options.RecommendedOptions.ApplyTo(genericConfig, nil); err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 877071ad3b2..cd98717d2c0 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -188,9 +188,13 @@ type Config struct { PublicAddress net.IP } +type AdmissionInitializersInitFunc func() (admission.PluginInitializer, error) + type RecommendedConfig struct { Config + ExtraAdmissionInitializersInitFunc []AdmissionInitializersInitFunc + // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config // by default, or the kubeconfig given with kubeconfig command line flag. @@ -259,6 +263,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { return &RecommendedConfig{ Config: *NewConfig(codecs), + ExtraAdmissionInitializersInitFunc: make([]AdmissionInitializersInitFunc, 0), } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 6232567f7a4..30716869146 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -100,6 +100,10 @@ func (a *AdmissionOptions) ApplyTo( scheme *runtime.Scheme, pluginInitializers ...admission.PluginInitializer, ) error { + if a == nil { + return nil + } + pluginNames := a.PluginNames if len(a.PluginNames) == 0 { pluginNames = a.enabledPluginNames() diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 21c3dd76159..eff7cde33d3 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,9 +17,12 @@ limitations under the License. package options import ( + "fmt" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/storage/storagebackend" ) @@ -35,6 +38,7 @@ type RecommendedOptions struct { Audit *AuditOptions Features *FeatureOptions CoreAPI *CoreAPIOptions + Admission *AdmissionOptions } func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { @@ -46,6 +50,7 @@ func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptio Audit: NewAuditOptions(), Features: NewFeatureOptions(), CoreAPI: NewCoreAPIOptions(), + Admission: NewAdmissionOptions(), } } @@ -57,9 +62,13 @@ func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { o.Audit.AddFlags(fs) o.Features.AddFlags(fs) o.CoreAPI.AddFlags(fs) + o.Admission.AddFlags(fs) } -func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { +// ApplyTo adds RecommendedOptions to the server configuration. +// scheme is the scheme of the apiserver types that are sent to the admission chain. +// pluginInitializers can be empty, it is only need for additional initializers. +func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *runtime.Scheme) error { if err := o.Etcd.ApplyTo(&config.Config); err != nil { return err } @@ -81,6 +90,36 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.CoreAPI.ApplyTo(config); err != nil { return err } + if o.Admission != nil { + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if o.CoreAPI == nil { + return fmt.Errorf("admission depends on CoreAPI, so it must be set") + } + // Admission need scheme to construct admission initializer. + if scheme == nil { + return fmt.Errorf("admission depends on shceme, so it must be set") + } + + pluginInitializers := []admission.PluginInitializer{} + for _, initFunc := range config.ExtraAdmissionInitializersInitFunc { + intializer, err := initFunc() + if err != nil { + return err + } + pluginInitializers = append(pluginInitializers, intializer) + } + + err := o.Admission.ApplyTo( + &config.Config, + config.SharedInformerFactory, + config.ClientConfig, + scheme, + pluginInitializers...) + if err != nil { + return err + } + } + return nil } @@ -93,6 +132,7 @@ func (o *RecommendedOptions) Validate() []error { errors = append(errors, o.Audit.Validate()...) errors = append(errors, o.Features.Validate()...) errors = append(errors, o.CoreAPI.Validate()...) + errors = append(errors, o.Admission.Validate()...) return errors } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go index 3be7856966b..1b1a652febe 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/cmd/server/start.go @@ -109,7 +109,7 @@ func (o AggregatorOptions) RunAggregator(stopCh <-chan struct{}) error { serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return err } serverConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck( diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go index 5b08387b0e7..4e21833e046 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder/admission_test.go @@ -113,10 +113,7 @@ func TestBanflunderAdmissionPlugin(t *testing.T) { t.Fatalf("scenario %d: failed to create banflunder admission plugin due to = %v", index, err) } - targetInitializer, err := wardleinitializer.New(informersFactory) - if err != nil { - t.Fatalf("scenario %d: failed to crate wardle plugin initializer due to = %v", index, err) - } + targetInitializer := wardleinitializer.New(informersFactory) targetInitializer.Initialize(target) err = admission.ValidateInitialization(target) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go index c53c8a4944a..b41e3dfba09 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer.go @@ -28,10 +28,10 @@ type pluginInitializer struct { var _ admission.PluginInitializer = pluginInitializer{} // New creates an instance of wardle admission plugins initializer. -func New(informers informers.SharedInformerFactory) (pluginInitializer, error) { +func New(informers informers.SharedInformerFactory) pluginInitializer { return pluginInitializer{ informers: informers, - }, nil + } } // Initialize checks the initialization interfaces implemented by a plugin diff --git a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go index 221876a617c..c64ed3ab3e1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/admission/wardleinitializer/wardleinitializer_test.go @@ -31,10 +31,8 @@ import ( func TestWantsInternalWardleInformerFactory(t *testing.T) { cs := &fake.Clientset{} sf := informers.NewSharedInformerFactory(cs, time.Duration(1)*time.Second) - target, err := wardleinitializer.New(sf) - if err != nil { - t.Fatalf("expected to create an instance of initializer but got an error = %s", err.Error()) - } + target := wardleinitializer.New(sf) + wantWardleInformerFactory := &wantInternalWardleInformerFactory{} target.Initialize(wantWardleInformerFactory) if wantWardleInformerFactory.sf != sf { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index ae1e12dc75e..b375d46e609 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" "k8s.io/sample-apiserver/pkg/admission/plugin/banflunder" @@ -38,16 +39,15 @@ const defaultEtcdPathPrefix = "/registry/wardle.kubernetes.io" type WardleServerOptions struct { RecommendedOptions *genericoptions.RecommendedOptions - Admission *genericoptions.AdmissionOptions - StdOut io.Writer - StdErr io.Writer + SharedInformerFactory informers.SharedInformerFactory + StdOut io.Writer + StdErr io.Writer } func NewWardleServerOptions(out, errOut io.Writer) *WardleServerOptions { o := &WardleServerOptions{ RecommendedOptions: genericoptions.NewRecommendedOptions(defaultEtcdPathPrefix, apiserver.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion)), - Admission: genericoptions.NewAdmissionOptions(), StdOut: out, StdErr: errOut, @@ -79,7 +79,6 @@ func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) flags := cmd.Flags() o.RecommendedOptions.AddFlags(flags) - o.Admission.AddFlags(flags) return cmd } @@ -87,7 +86,6 @@ func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) func (o WardleServerOptions) Validate(args []string) error { errors := []error{} errors = append(errors, o.RecommendedOptions.Validate()...) - errors = append(errors, o.Admission.Validate()...) return utilerrors.NewAggregate(errors) } @@ -95,9 +93,9 @@ func (o *WardleServerOptions) Complete() error { return nil } -func (o WardleServerOptions) Config() (*apiserver.Config, error) { +func (o *WardleServerOptions) Config() (*apiserver.Config, error) { // register admission plugins - banflunder.Register(o.Admission.Plugins) + banflunder.Register(o.RecommendedOptions.Admission.Plugins) // TODO have a "real" external address if err := o.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { @@ -105,21 +103,20 @@ func (o WardleServerOptions) Config() (*apiserver.Config, error) { } serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { - return nil, err + + admissionInitializerInitFunc := func() (admission.PluginInitializer, error) { + client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) + if err != nil { + return nil, err + } + informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) + o.SharedInformerFactory = informerFactory + return wardleinitializer.New(informerFactory), nil } - client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) - if err != nil { - return nil, err - } - informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) - admissionInitializer, err := wardleinitializer.New(informerFactory) - if err != nil { - return nil, err - } + serverConfig.ExtraAdmissionInitializersInitFunc = []genericapiserver.AdmissionInitializersInitFunc{admissionInitializerInitFunc} - if err := o.Admission.ApplyTo(&serverConfig.Config, serverConfig.SharedInformerFactory, serverConfig.ClientConfig, apiserver.Scheme, admissionInitializer); err != nil { + if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 5dab6bc40a86bf3633bb1f09a048bcc0206b460f Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 1 Dec 2017 19:39:50 +0800 Subject: [PATCH 317/794] update bazel --- staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD index c969d4b0519..6e54da37f05 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/sample-apiserver/pkg/admission/plugin/banflunder:go_default_library", From 4a999d73e33d151bc0b6c55b32f4a849e91f1dc2 Mon Sep 17 00:00:00 2001 From: xialonglee Date: Fri, 15 Dec 2017 11:15:24 +0800 Subject: [PATCH 318/794] make sure that 'ldflags' are spaces safe --- hack/lib/version.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index 0df161e37c6..29f40515e93 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -133,8 +133,8 @@ kube::version::ldflag() { local val=${2} # If you update these, also update the list pkg/version/def.bzl. - echo "-X ${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}" - echo "-X ${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}" + echo "-X '${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}'" + echo "-X '${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}'" } # Prints the value that needs to be passed to the -ldflags parameter of go build From 34c3a254d808e29a71064adddeb219fe44661821 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 14 Dec 2017 15:43:37 -0500 Subject: [PATCH 319/794] Process cluster-scoped owners correctly --- .../garbagecollector/garbagecollector.go | 2 +- pkg/controller/garbagecollector/operations.go | 12 +- test/integration/garbagecollector/BUILD | 2 + .../cluster_scoped_owner_test.go | 147 ++++++++++++++++++ .../garbage_collector_test.go | 4 +- 5 files changed, 159 insertions(+), 8 deletions(-) create mode 100644 test/integration/garbagecollector/cluster_scoped_owner_test.go diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index dad94e6e97e..c416130585b 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -297,7 +297,7 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no if err != nil { return false, nil, err } - resource, err := gc.apiResource(reference.APIVersion, reference.Kind, len(item.identity.Namespace) != 0) + resource, err := gc.apiResource(reference.APIVersion, reference.Kind) if err != nil { return false, nil, err } diff --git a/pkg/controller/garbagecollector/operations.go b/pkg/controller/garbagecollector/operations.go index 36897547b97..16f631f489b 100644 --- a/pkg/controller/garbagecollector/operations.go +++ b/pkg/controller/garbagecollector/operations.go @@ -32,7 +32,7 @@ import ( // apiResource consults the REST mapper to translate an tuple to a unversioned.APIResource struct. -func (gc *GarbageCollector) apiResource(apiVersion, kind string, namespaced bool) (*metav1.APIResource, error) { +func (gc *GarbageCollector) apiResource(apiVersion, kind string) (*metav1.APIResource, error) { fqKind := schema.FromAPIVersionAndKind(apiVersion, kind) mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), apiVersion) if err != nil { @@ -41,7 +41,7 @@ func (gc *GarbageCollector) apiResource(apiVersion, kind string, namespaced bool glog.V(5).Infof("map kind %s, version %s to resource %s", kind, apiVersion, mapping.Resource) resource := metav1.APIResource{ Name: mapping.Resource, - Namespaced: namespaced, + Namespaced: mapping.Scope == meta.RESTScopeNamespace, Kind: kind, } return &resource, nil @@ -53,7 +53,7 @@ func (gc *GarbageCollector) deleteObject(item objectReference, policy *metav1.De if err != nil { return err } - resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + resource, err := gc.apiResource(item.APIVersion, item.Kind) if err != nil { return err } @@ -69,7 +69,7 @@ func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstr if err != nil { return nil, err } - resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + resource, err := gc.apiResource(item.APIVersion, item.Kind) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func (gc *GarbageCollector) updateObject(item objectReference, obj *unstructured if err != nil { return nil, err } - resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + resource, err := gc.apiResource(item.APIVersion, item.Kind) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*un if err != nil { return nil, err } - resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + resource, err := gc.apiResource(item.APIVersion, item.Kind) if err != nil { return nil, err } diff --git a/test/integration/garbagecollector/BUILD b/test/integration/garbagecollector/BUILD index ff6f60c8ec8..8bd1651d139 100644 --- a/test/integration/garbagecollector/BUILD +++ b/test/integration/garbagecollector/BUILD @@ -9,6 +9,7 @@ go_test( name = "go_default_test", size = "large", srcs = [ + "cluster_scoped_owner_test.go", "garbage_collector_test.go", "main_test.go", ], @@ -25,6 +26,7 @@ go_test( "//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/test/integration/garbagecollector/cluster_scoped_owner_test.go b/test/integration/garbagecollector/cluster_scoped_owner_test.go new file mode 100644 index 00000000000..a1ae4d2025d --- /dev/null +++ b/test/integration/garbagecollector/cluster_scoped_owner_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "io" + "net/http" + "strings" + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" + "k8s.io/kubernetes/test/integration/framework" +) + +type roundTripFunc func(req *http.Request) (*http.Response, error) + +func (w roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return w(req) +} + +type readDelayer struct { + delay time.Duration + io.ReadCloser +} + +func (b *readDelayer) Read(p []byte) (n int, err error) { + defer time.Sleep(b.delay) + return b.ReadCloser.Read(p) +} + +func TestClusterScopedOwners(t *testing.T) { + // Start the test server and wrap the client to delay PV watch responses + server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()) + server.ClientConfig.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.URL.Query().Get("watch") != "true" || !strings.Contains(req.URL.String(), "persistentvolumes") { + return rt.RoundTrip(req) + } + resp, err := rt.RoundTrip(req) + if err != nil { + return resp, err + } + resp.Body = &readDelayer{30 * time.Second, resp.Body} + return resp, err + }) + } + ctx := setupWithServer(t, server, 5) + defer ctx.tearDown() + + _, clientSet := ctx.gc, ctx.clientSet + + ns := createNamespaceOrDie("gc-cluster-scope-deletion", clientSet, t) + defer deleteNamespaceOrDie(ns.Name, clientSet, t) + + t.Log("Create a pair of objects") + pv, err := clientSet.CoreV1().PersistentVolumes().Create(&v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{Name: "pv-valid"}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}, + Capacity: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Gi")}, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, + }, + }) + if err != nil { + t.Fatal(err) + } + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm-valid", + OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: pv.Name, UID: pv.UID}}, + }, + }); err != nil { + t.Fatal(err) + } + + t.Log("Create a namespaced object with a missing parent") + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm-missing", + Labels: map[string]string{"missing": "true"}, + OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: "missing-name", UID: types.UID("missing-uid")}}, + }, + }); err != nil { + t.Fatal(err) + } + + t.Log("Create a namespaced object with a missing type parent") + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm-invalid", + OwnerReferences: []metav1.OwnerReference{{Kind: "UnknownType", APIVersion: "unknown.group/v1", Name: "invalid-name", UID: types.UID("invalid-uid")}}, + }, + }); err != nil { + t.Fatal(err) + } + + // wait for deletable children to go away + if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { + _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + return true, nil + case err != nil: + return false, err + default: + t.Logf("cm with missing parent still exists, retrying") + return false, nil + } + }); err != nil { + t.Fatal(err) + } + t.Logf("deletable children removed") + + // Give time for blocked children to be incorrectly cleaned up + time.Sleep(5 * time.Second) + + // ensure children with unverifiable parents don't get reaped + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-invalid", metav1.GetOptions{}); err != nil { + t.Fatalf("child with invalid ownerRef is unexpectedly missing: %v", err) + } + + // ensure children with present parents don't get reaped + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-valid", metav1.GetOptions{}); err != nil { + t.Fatalf("child with valid ownerRef is unexpectedly missing: %v", err) + } +} diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 897c30ff2dc..b8242ad70f6 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -200,8 +200,10 @@ type testContext struct { // if workerCount > 0, will start the GC, otherwise it's up to the caller to Run() the GC. func setup(t *testing.T, workerCount int) *testContext { - result := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()) + return setupWithServer(t, kubeapiservertesting.StartTestServerOrDie(t, nil, framework.SharedEtcd()), workerCount) +} +func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, workerCount int) *testContext { clientSet, err := clientset.NewForConfig(result.ClientConfig) if err != nil { t.Fatalf("error creating clientset: %v", err) From 61369863b244a267725f1e75803819d68eea0e0e Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 27 Nov 2017 23:25:42 -0500 Subject: [PATCH 320/794] Generated files --- api/openapi-spec/swagger.json | 33 +- api/swagger-spec/v1.json | 59 +- docs/api-reference/v1/definitions.html | 132 +- pkg/apis/core/v1/zz_generated.conversion.go | 34 +- pkg/apis/core/zz_generated.deepcopy.go | 34 +- .../src/k8s.io/api/core/v1/generated.pb.go | 3276 +++++++++-------- .../src/k8s.io/api/core/v1/generated.proto | 32 +- .../core/v1/types_swagger_doc_generated.go | 13 + .../api/core/v1/zz_generated.deepcopy.go | 34 +- 9 files changed, 2153 insertions(+), 1494 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 7014bba7119..3812d6d978b 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -75654,6 +75654,37 @@ } } }, + "io.k8s.api.core.v1.FlexPersistentVolumeSource": { + "description": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "required": [ + "driver" + ], + "properties": { + "driver": { + "description": "Driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "description": "Optional: Extra command options if any.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "readOnly": { + "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + } + } + }, "io.k8s.api.core.v1.FlexVolumeSource": { "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "required": [ @@ -77007,7 +77038,7 @@ }, "flexVolume": { "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" + "$ref": "#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource" }, "flocker": { "description": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 97be62be175..cc2cebe67cf 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20606,7 +20606,7 @@ "description": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running" }, "flexVolume": { - "$ref": "v1.FlexVolumeSource", + "$ref": "v1.FlexPersistentVolumeSource", "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "azureFile": { @@ -21020,9 +21020,9 @@ } } }, - "v1.FlexVolumeSource": { - "id": "v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "v1.FlexPersistentVolumeSource": { + "id": "v1.FlexPersistentVolumeSource", + "description": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", "required": [ "driver" ], @@ -21036,7 +21036,7 @@ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." }, "secretRef": { - "$ref": "v1.LocalObjectReference", + "$ref": "v1.SecretReference", "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." }, "readOnly": { @@ -21049,16 +21049,6 @@ } } }, - "v1.LocalObjectReference": { - "id": "v1.LocalObjectReference", - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "properties": { - "name": { - "type": "string", - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" - } - } - }, "v1.AzureFilePersistentVolumeSource": { "id": "v1.AzureFilePersistentVolumeSource", "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", @@ -21803,6 +21793,16 @@ } } }, + "v1.LocalObjectReference": { + "id": "v1.LocalObjectReference", + "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "properties": { + "name": { + "type": "string", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + } + } + }, "v1.PersistentVolumeClaimVolumeSource": { "id": "v1.PersistentVolumeClaimVolumeSource", "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", @@ -21865,6 +21865,35 @@ } } }, + "v1.FlexVolumeSource": { + "id": "v1.FlexVolumeSource", + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "required": [ + "driver" + ], + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the driver to use for this volume." + }, + "fsType": { + "type": "string", + "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." + }, + "secretRef": { + "$ref": "v1.LocalObjectReference", + "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + }, + "readOnly": { + "type": "boolean", + "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + }, + "options": { + "type": "object", + "description": "Optional: Extra command options if any." + } + } + }, "v1.CephFSVolumeSource": { "id": "v1.CephFSVolumeSource", "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index b169977ecee..cbb2f013aad 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -3531,6 +3531,40 @@ Examples:

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

v1.FlexVolumeSource

+
+
+

v1.ServiceStatus

+
+

ServiceStatus represents the current status of a service.

+
+ +++++++ + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

loadBalancer

LoadBalancer contains the current status of the load-balancer, if one is present.

false

v1.LoadBalancerStatus

+

v1.NFSVolumeSource

@@ -3579,40 +3613,6 @@ Examples:
-
-
-

v1.ServiceStatus

-
-

ServiceStatus represents the current status of a service.

-
- ------- - - - - - - - - - - - - - - - - - - -
NameDescriptionRequiredSchemaDefault

loadBalancer

LoadBalancer contains the current status of the load-balancer, if one is present.

false

v1.LoadBalancerStatus

-

v1.HTTPHeader

@@ -4089,7 +4089,7 @@ Examples:

flexVolume

FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

false

-

v1.FlexVolumeSource

+

v1.FlexPersistentVolumeSource

@@ -6880,6 +6880,68 @@ Examples:
+
+
+

v1.FlexPersistentVolumeSource

+
+

FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

driver

Driver is the name of the driver to use for this volume.

true

string

fsType

Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.

false

string

secretRef

Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.

false

v1.SecretReference

readOnly

Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.

false

boolean

false

options

Optional: Extra command options if any.

false

object

+

v1.EndpointsList

diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 2910297937f..84a602bc465 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -141,6 +141,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_core_ExecAction_To_v1_ExecAction, Convert_v1_FCVolumeSource_To_core_FCVolumeSource, Convert_core_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource, + Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource, Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource, Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource, Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource, @@ -1758,6 +1760,34 @@ func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, o return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) } +func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s) +} + func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType @@ -3250,7 +3280,7 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1 out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.FlexVolume = (*core.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) @@ -3278,7 +3308,7 @@ func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *co out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.FlexVolume = (*v1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index 186760a1922..c2b4e7b6a52 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -1547,6 +1547,38 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { *out = *in @@ -3152,7 +3184,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(FlexVolumeSource) + *out = new(FlexPersistentVolumeSource) (*in).DeepCopyInto(*out) } } diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index c2fe6bc5ceb..78afdc75084 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -76,6 +76,7 @@ limitations under the License. EventSource ExecAction FCVolumeSource + FlexPersistentVolumeSource FlexVolumeSource FlockerVolumeSource GCEPersistentDiskVolumeSource @@ -458,586 +459,592 @@ func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{51} +} + func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} + return fileDescriptorGenerated, []int{54} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{61} + return fileDescriptorGenerated, []int{62} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{90} + return fileDescriptorGenerated, []int{91} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{100} + return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{108} + return fileDescriptorGenerated, []int{109} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} + return fileDescriptorGenerated, []int{110} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{134} + return fileDescriptorGenerated, []int{135} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} + return fileDescriptorGenerated, []int{139} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{142} + return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{143} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{144} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} + return fileDescriptorGenerated, []int{146} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{153} + return fileDescriptorGenerated, []int{154} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} + return fileDescriptorGenerated, []int{174} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{184} + return fileDescriptorGenerated, []int{185} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{185} + return fileDescriptorGenerated, []int{186} } func init() { @@ -1092,6 +1099,7 @@ func init() { proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") @@ -3529,6 +3537,72 @@ func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a + i++ + v := m.Options[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3556,11 +3630,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n53, err := m.SecretRef.MarshalTo(dAtA[i:]) + n54, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 } dAtA[i] = 0x20 i++ @@ -3744,11 +3818,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n54, err := m.Port.MarshalTo(dAtA[i:]) + n55, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -3817,32 +3891,32 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n55, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n56, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n56, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n56 } - if m.TCPSocket != nil { - dAtA[i] = 0x1a + if m.HTTPGet != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n57, err := m.TCPSocket.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) + n57, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n57 } + if m.TCPSocket != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) + n58, err := m.TCPSocket.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } return i, nil } @@ -3980,11 +4054,11 @@ func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } dAtA[i] = 0x58 i++ @@ -4072,11 +4146,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) + n60, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n60 } dAtA[i] = 0x58 i++ @@ -4145,21 +4219,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n60, err := m.PostStart.MarshalTo(dAtA[i:]) + n61, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n61 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n61, err := m.PreStop.MarshalTo(dAtA[i:]) + n62, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n62 } return i, nil } @@ -4182,19 +4256,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n63, err := m.Spec.MarshalTo(dAtA[i:]) + n63, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n63 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n64, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 return i, nil } @@ -4241,11 +4315,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n64, err := (&v).MarshalTo(dAtA[i:]) + n65, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n65 } } if len(m.Min) > 0 { @@ -4272,11 +4346,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n65, err := (&v).MarshalTo(dAtA[i:]) + n66, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n66 } } if len(m.Default) > 0 { @@ -4303,11 +4377,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n66, err := (&v).MarshalTo(dAtA[i:]) + n67, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n67 } } if len(m.DefaultRequest) > 0 { @@ -4334,11 +4408,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n67, err := (&v).MarshalTo(dAtA[i:]) + n68, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n68 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4365,11 +4439,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n68, err := (&v).MarshalTo(dAtA[i:]) + n69, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n69 } } return i, nil @@ -4393,11 +4467,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n69, err := m.ListMeta.MarshalTo(dAtA[i:]) + n70, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n70 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4461,11 +4535,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n70, err := m.ListMeta.MarshalTo(dAtA[i:]) + n71, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n71 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4684,27 +4758,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n71, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n71 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n72, err := m.Spec.MarshalTo(dAtA[i:]) + n72, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n72 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n73, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n73, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n73 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n74, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 return i, nil } @@ -4726,11 +4800,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(dAtA[i:]) + n75, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n75 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4819,27 +4893,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n75, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n75 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n76, err := m.Spec.MarshalTo(dAtA[i:]) + n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n76 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n77, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n77, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n77 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n78, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 return i, nil } @@ -4888,11 +4962,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n78, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n79, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n79 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -4935,19 +5009,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n79, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n79 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n80, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n80, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n80 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n81, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -4978,11 +5052,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n81, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n82, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n82 } return i, nil } @@ -5005,11 +5079,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n82, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n83, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n83 return i, nil } @@ -5031,11 +5105,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n83, err := m.ListMeta.MarshalTo(dAtA[i:]) + n84, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n84 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5112,11 +5186,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n84, err := (&v).MarshalTo(dAtA[i:]) + n85, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n85 } } return i, nil @@ -5274,11 +5348,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n85, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n86, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n86 } return i, nil } @@ -5322,11 +5396,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n86, err := (&v).MarshalTo(dAtA[i:]) + n87, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n87 } } if len(m.Allocatable) > 0 { @@ -5353,11 +5427,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n87, err := (&v).MarshalTo(dAtA[i:]) + n88, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n88 } } dAtA[i] = 0x1a @@ -5391,19 +5465,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n88, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n88 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n89, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n89, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n89 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) + n90, err := m.NodeInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5575,20 +5649,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n90, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n91, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n91 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n91, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n92, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n92 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -5676,11 +5750,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n92, err := m.Initializers.MarshalTo(dAtA[i:]) + n93, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n93 } return i, nil } @@ -5749,27 +5823,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n93, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n93 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n94, err := m.Spec.MarshalTo(dAtA[i:]) + n94, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n94 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n95, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n95, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n95 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n96, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n96 return i, nil } @@ -5791,27 +5865,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n96, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n96 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n97, err := m.Spec.MarshalTo(dAtA[i:]) + n97, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n97 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n98, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n98, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n98 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n99, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 return i, nil } @@ -5841,19 +5915,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n99, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n99 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n100, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n100, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n100 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n101, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n101 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5883,11 +5957,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n101, err := m.ListMeta.MarshalTo(dAtA[i:]) + n102, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n101 + i += n102 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5936,11 +6010,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n102, err := m.Resources.MarshalTo(dAtA[i:]) + n103, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n102 + i += n103 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -5949,11 +6023,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n103, err := m.Selector.MarshalTo(dAtA[i:]) + n104, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n103 + i += n104 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -6028,11 +6102,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n104, err := (&v).MarshalTo(dAtA[i:]) + n105, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n104 + i += n105 } } if len(m.Conditions) > 0 { @@ -6098,11 +6172,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n105, err := m.ListMeta.MarshalTo(dAtA[i:]) + n106, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n105 + i += n106 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6137,163 +6211,163 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n106, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n107, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n107, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n107 } - if m.HostPath != nil { - dAtA[i] = 0x1a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n108, err := m.HostPath.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n108, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n108 } - if m.Glusterfs != nil { - dAtA[i] = 0x22 + if m.HostPath != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n109, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n109, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n109 } - if m.NFS != nil { - dAtA[i] = 0x2a + if m.Glusterfs != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n110, err := m.NFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n110, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n110 } - if m.RBD != nil { - dAtA[i] = 0x32 + if m.NFS != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n111, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n111, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n111 } - if m.ISCSI != nil { - dAtA[i] = 0x3a + if m.RBD != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n112, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n112, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n112 } - if m.Cinder != nil { - dAtA[i] = 0x42 + if m.ISCSI != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n113, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n113, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n113 } - if m.CephFS != nil { - dAtA[i] = 0x4a + if m.Cinder != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n114, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n114, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n114 } - if m.FC != nil { - dAtA[i] = 0x52 + if m.CephFS != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n115, err := m.FC.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n115, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n115 } - if m.Flocker != nil { - dAtA[i] = 0x5a + if m.FC != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n116, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n116, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n116 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.Flocker != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n117, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n117, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n117 } - if m.AzureFile != nil { - dAtA[i] = 0x6a + if m.FlexVolume != nil { + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n118, err := m.AzureFile.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n118, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n118 } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 + if m.AzureFile != nil { + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n119, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n119, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n119 } - if m.Quobyte != nil { - dAtA[i] = 0x7a + if m.VsphereVolume != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n120, err := m.Quobyte.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n120, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n120 } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n121, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n121 + } if m.AzureDisk != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n121, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n122, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n122 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6301,11 +6375,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n122, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n123, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n123 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6313,11 +6387,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n123, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n124, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n124 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6325,11 +6399,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n124, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n125, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n125 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6337,11 +6411,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n125, err := m.Local.MarshalTo(dAtA[i:]) + n126, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n126 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6349,11 +6423,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n126, err := m.StorageOS.MarshalTo(dAtA[i:]) + n127, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n127 } if m.CSI != nil { dAtA[i] = 0xb2 @@ -6361,11 +6435,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n127, err := m.CSI.MarshalTo(dAtA[i:]) + n128, err := m.CSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n128 } return i, nil } @@ -6409,21 +6483,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n128, err := (&v).MarshalTo(dAtA[i:]) + n129, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n129 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n129, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n130, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n130 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6443,11 +6517,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n130, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n131, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n131 } dAtA[i] = 0x2a i++ @@ -6555,27 +6629,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n131, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n131 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n132, err := m.Spec.MarshalTo(dAtA[i:]) + n132, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n132 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n133, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n133, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n133 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n134, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 return i, nil } @@ -6640,11 +6714,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n134, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n135, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n135 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -6790,19 +6864,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n135, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n135 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n136, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n136, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n136 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n137, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n137 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6989,11 +7063,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n137, err := m.ListMeta.MarshalTo(dAtA[i:]) + n138, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n138 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7053,11 +7127,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n138, err := m.SinceTime.MarshalTo(dAtA[i:]) + n139, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n139 } dAtA[i] = 0x30 i++ @@ -7146,11 +7220,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n139, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n140, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n140 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -7201,11 +7275,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n140, err := m.PodController.MarshalTo(dAtA[i:]) + n141, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n141 } return i, nil } @@ -7329,11 +7403,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n141, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n142, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n142 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7365,11 +7439,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n142, err := m.Affinity.MarshalTo(dAtA[i:]) + n143, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n143 } dAtA[i] = 0x9a i++ @@ -7450,11 +7524,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n143, err := m.DNSConfig.MarshalTo(dAtA[i:]) + n144, err := m.DNSConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n144 } return i, nil } @@ -7510,11 +7584,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n144, err := m.StartTime.MarshalTo(dAtA[i:]) + n145, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n145 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7565,19 +7639,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n145, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n145 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n146, err := m.Status.MarshalTo(dAtA[i:]) + n146, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n146 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n147, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n147 return i, nil } @@ -7599,19 +7673,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n147, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n147 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n148, err := m.Template.MarshalTo(dAtA[i:]) + n148, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n148 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n149, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n149 return i, nil } @@ -7633,11 +7707,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n149, err := m.ListMeta.MarshalTo(dAtA[i:]) + n150, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n150 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7671,19 +7745,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n150, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n150 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n151, err := m.Spec.MarshalTo(dAtA[i:]) + n151, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n151 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n152, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n152 return i, nil } @@ -7763,19 +7837,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n152, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n152 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n153, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n153, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n153 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) + n154, err := m.EvictionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n154 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7808,11 +7882,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n154, err := m.Preference.MarshalTo(dAtA[i:]) + n155, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n155 return i, nil } @@ -7834,11 +7908,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n155, err := m.Handler.MarshalTo(dAtA[i:]) + n156, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n156 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -7988,11 +8062,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n156, err := m.SecretRef.MarshalTo(dAtA[i:]) + n157, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n157 } dAtA[i] = 0x40 i++ @@ -8059,11 +8133,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n157, err := m.SecretRef.MarshalTo(dAtA[i:]) + n158, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n158 } dAtA[i] = 0x40 i++ @@ -8094,11 +8168,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n159 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -8130,27 +8204,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n159 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n160, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n160 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n161, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n161, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n161 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n162, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 return i, nil } @@ -8180,11 +8254,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n162, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n163, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n163 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8214,11 +8288,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n163, err := m.ListMeta.MarshalTo(dAtA[i:]) + n164, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n164 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8280,11 +8354,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n164, err := m.Template.MarshalTo(dAtA[i:]) + n165, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n165 } dAtA[i] = 0x20 i++ @@ -8363,11 +8437,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n165, err := m.Divisor.MarshalTo(dAtA[i:]) + n166, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n166 return i, nil } @@ -8389,27 +8463,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n166 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n167, err := m.Spec.MarshalTo(dAtA[i:]) + n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n167 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n168, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n168, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n168 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n169, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n169 return i, nil } @@ -8431,11 +8505,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n169, err := m.ListMeta.MarshalTo(dAtA[i:]) + n170, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n170 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8490,11 +8564,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n170, err := (&v).MarshalTo(dAtA[i:]) + n171, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n171 } } if len(m.Scopes) > 0 { @@ -8554,11 +8628,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n171, err := (&v).MarshalTo(dAtA[i:]) + n172, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n172 } } if len(m.Used) > 0 { @@ -8585,11 +8659,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n172, err := (&v).MarshalTo(dAtA[i:]) + n173, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n173 } } return i, nil @@ -8634,11 +8708,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n173, err := (&v).MarshalTo(dAtA[i:]) + n174, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n174 } } if len(m.Requests) > 0 { @@ -8665,11 +8739,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n174, err := (&v).MarshalTo(dAtA[i:]) + n175, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n175 } } return i, nil @@ -8736,11 +8810,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n175, err := m.SecretRef.MarshalTo(dAtA[i:]) + n176, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n176 } dAtA[i] = 0x20 i++ @@ -8808,11 +8882,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n176, err := m.SecretRef.MarshalTo(dAtA[i:]) + n177, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n177 } dAtA[i] = 0x20 i++ @@ -8871,11 +8945,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n178, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n178 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -8951,11 +9025,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n178, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n179 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -8987,11 +9061,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n180, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n180 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -9027,11 +9101,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n180, err := m.ListMeta.MarshalTo(dAtA[i:]) + n181, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n181 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9065,11 +9139,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n181, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n182, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n182 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9189,11 +9263,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n182, err := m.Capabilities.MarshalTo(dAtA[i:]) + n183, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n183 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -9209,11 +9283,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n183, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n184, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n184 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -9271,11 +9345,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n184, err := m.Reference.MarshalTo(dAtA[i:]) + n185, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n185 return i, nil } @@ -9297,27 +9371,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n185, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n185 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n186, err := m.Spec.MarshalTo(dAtA[i:]) + n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n186 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n187, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n187, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n187 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n188, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n188 return i, nil } @@ -9339,11 +9413,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n188, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n189, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n189 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9399,11 +9473,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n189, err := m.ListMeta.MarshalTo(dAtA[i:]) + n190, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9437,11 +9511,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n190, err := m.ListMeta.MarshalTo(dAtA[i:]) + n191, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n191 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9486,11 +9560,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n191, err := m.TargetPort.MarshalTo(dAtA[i:]) + n192, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n192 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -9637,11 +9711,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n192, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n193, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n193 } return i, nil } @@ -9664,11 +9738,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n193, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n194, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n194 return i, nil } @@ -9691,11 +9765,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n194, err := m.ClientIP.MarshalTo(dAtA[i:]) + n195, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n195 } return i, nil } @@ -9739,11 +9813,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n195, err := m.SecretRef.MarshalTo(dAtA[i:]) + n196, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n196 } return i, nil } @@ -9787,11 +9861,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n196, err := m.SecretRef.MarshalTo(dAtA[i:]) + n197, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n196 + i += n197 } return i, nil } @@ -9840,11 +9914,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n197, err := m.Port.MarshalTo(dAtA[i:]) + n198, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n197 + i += n198 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -9883,11 +9957,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n198, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n199, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n198 + i += n199 } return i, nil } @@ -9953,11 +10027,11 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n199, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n200, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n199 + i += n200 return i, nil } @@ -10050,32 +10124,32 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n200, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n201, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n201, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n201 } - if m.ConfigMap != nil { - dAtA[i] = 0x1a + if m.DownwardAPI != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n202, err := m.ConfigMap.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n202, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n202 } + if m.ConfigMap != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n203, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n203 + } return i, nil } @@ -10098,163 +10172,163 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n203, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n204, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n204, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n204 } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a + if m.EmptyDir != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n205, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n205, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n205 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n206, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n206, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n206 } - if m.GitRepo != nil { - dAtA[i] = 0x2a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n207, err := m.GitRepo.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n207, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n207 } - if m.Secret != nil { - dAtA[i] = 0x32 + if m.GitRepo != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n208, err := m.Secret.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n208, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n208 } - if m.NFS != nil { - dAtA[i] = 0x3a + if m.Secret != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n209, err := m.NFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n209, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n209 } - if m.ISCSI != nil { - dAtA[i] = 0x42 + if m.NFS != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n210, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n210, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n210 } - if m.Glusterfs != nil { - dAtA[i] = 0x4a + if m.ISCSI != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n211, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n211, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n211 } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 + if m.Glusterfs != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n212, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n212, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n212 } - if m.RBD != nil { - dAtA[i] = 0x5a + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n213, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n213, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n213 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.RBD != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n214, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n214, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n214 } - if m.Cinder != nil { - dAtA[i] = 0x6a + if m.FlexVolume != nil { + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n215, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n215, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n215 } - if m.CephFS != nil { - dAtA[i] = 0x72 + if m.Cinder != nil { + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n216, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n216, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n216 } - if m.Flocker != nil { - dAtA[i] = 0x7a + if m.CephFS != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n217, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n217, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n217 } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n218, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n218 + } if m.DownwardAPI != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n218, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n219, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n219 } if m.FC != nil { dAtA[i] = 0x8a @@ -10262,11 +10336,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n219, err := m.FC.MarshalTo(dAtA[i:]) + n220, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n220 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -10274,11 +10348,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n220, err := m.AzureFile.MarshalTo(dAtA[i:]) + n221, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n221 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -10286,11 +10360,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n221, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n222, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n222 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -10298,11 +10372,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n222, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n223, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n222 + i += n223 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -10310,11 +10384,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n223, err := m.Quobyte.MarshalTo(dAtA[i:]) + n224, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n223 + i += n224 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -10322,11 +10396,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n224, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n225, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n224 + i += n225 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -10334,11 +10408,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n225, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n226, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n225 + i += n226 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -10346,11 +10420,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n226, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n227, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n226 + i += n227 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -10358,11 +10432,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n227, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n228, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n227 + i += n228 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -10370,11 +10444,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n228, err := m.Projected.MarshalTo(dAtA[i:]) + n229, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n228 + i += n229 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -10382,11 +10456,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n229, err := m.StorageOS.MarshalTo(dAtA[i:]) + n230, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n229 + i += n230 } return i, nil } @@ -10446,11 +10520,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n230, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n231, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n230 + i += n231 return i, nil } @@ -11332,6 +11406,29 @@ func (m *FCVolumeSource) Size() (n int) { return n } +func (m *FlexPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *FlexVolumeSource) Size() (n int) { var l int _ = l @@ -14505,6 +14602,30 @@ func (this *FCVolumeSource) String() string { }, "") return s } +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} func (this *FlexVolumeSource) String() string { if this == nil { return "nil" @@ -15331,7 +15452,7 @@ func (this *PersistentVolumeSource) String() string { `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, @@ -24366,6 +24487,283 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -34215,7 +34613,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.FlexVolume == nil { - m.FlexVolume = &FlexVolumeSource{} + m.FlexVolume = &FlexPersistentVolumeSource{} } if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -49114,777 +49512,779 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12346 bytes of a gzipped FileDescriptorProto + // 12382 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0xf5, 0x9b, 0xef, 0xdc, 0x5d, 0xa9, 0x77, 0x24, 0x6d, 0xaf, 0x4a, - 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xcb, 0xe9, 0x4e, 0x30, 0x33, 0x3d, 0xb3, 0xdb, - 0xda, 0x9d, 0xd9, 0x56, 0xf6, 0xec, 0xee, 0x9d, 0x10, 0xe7, 0xab, 0xe9, 0xce, 0x99, 0x29, 0x4d, - 0x4d, 0x55, 0xab, 0xaa, 0x7a, 0x76, 0x47, 0x01, 0x11, 0xf6, 0x19, 0xf0, 0x07, 0xfc, 0x20, 0x6c, + 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xe2, 0x74, 0x27, 0x98, 0x99, 0x9e, 0xd9, 0x6d, + 0xed, 0xce, 0x6c, 0x2b, 0x7b, 0x76, 0xf7, 0x4e, 0x88, 0xf3, 0xd5, 0x74, 0xe7, 0xcc, 0x94, 0xa6, + 0xa6, 0xaa, 0x55, 0x55, 0x3d, 0xbb, 0xa3, 0x80, 0x08, 0x5b, 0x06, 0xfc, 0x01, 0x3f, 0x2e, 0x6c, 0xc2, 0xc6, 0x40, 0xe0, 0x08, 0x1b, 0x07, 0x9c, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x80, 0x6d, 0x8c, 0x1d, 0x0e, 0xf8, 0x83, 0xc1, 0xfe, 0x71, 0x44, 0x10, 0x1e, 0xc3, 0x40, 0xd8, 0xc1, 0x0f, 0x3b, 0x6c, 0xf3, 0x8b, 0x31, 0x36, 0x8e, 0xfc, 0xac, 0xcc, 0xea, 0xaa, 0xee, 0x9e, 0xd5, 0xec, 0x48, - 0x10, 0xf7, 0xaf, 0x3b, 0xdf, 0xcb, 0x97, 0x59, 0xf9, 0xf1, 0xf2, 0xe5, 0xcb, 0xf7, 0x01, 0x6f, - 0xee, 0x5e, 0x8b, 0xe6, 0xdd, 0x60, 0x61, 0xb7, 0xb3, 0x49, 0x42, 0x9f, 0xc4, 0x24, 0x5a, 0xd8, - 0x27, 0x7e, 0x2b, 0x08, 0x17, 0x04, 0xc0, 0x69, 0xbb, 0x0b, 0xcd, 0x20, 0x24, 0x0b, 0xfb, 0xaf, - 0x2c, 0x6c, 0x13, 0x9f, 0x84, 0x4e, 0x4c, 0x5a, 0xf3, 0xed, 0x30, 0x88, 0x03, 0x84, 0x38, 0xce, - 0xbc, 0xd3, 0x76, 0xe7, 0x29, 0xce, 0xfc, 0xfe, 0x2b, 0x73, 0x2f, 0x6f, 0xbb, 0xf1, 0x4e, 0x67, - 0x73, 0xbe, 0x19, 0xec, 0x2d, 0x6c, 0x07, 0xdb, 0xc1, 0x02, 0x43, 0xdd, 0xec, 0x6c, 0xb1, 0x7f, - 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xad, 0x25, 0xcd, 0x90, 0x07, 0x31, 0xf1, 0x23, 0x37, 0xf0, - 0xa3, 0x97, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x0b, 0xed, 0xdd, 0x6d, 0x0a, 0x8b, 0x4c, - 0x84, 0x85, 0xfd, 0x57, 0x36, 0x49, 0xec, 0x74, 0xf5, 0x68, 0xee, 0xb5, 0x84, 0xdc, 0x9e, 0xd3, - 0xdc, 0x71, 0x7d, 0x12, 0x1e, 0x48, 0x1a, 0x0b, 0x21, 0x89, 0x82, 0x4e, 0xd8, 0x24, 0x27, 0xaa, - 0x15, 0x2d, 0xec, 0x91, 0xd8, 0xc9, 0xf8, 0xfa, 0xb9, 0x85, 0xbc, 0x5a, 0x61, 0xc7, 0x8f, 0xdd, - 0xbd, 0xee, 0x66, 0x3e, 0xd7, 0xaf, 0x42, 0xd4, 0xdc, 0x21, 0x7b, 0x4e, 0x57, 0xbd, 0x57, 0xf3, - 0xea, 0x75, 0x62, 0xd7, 0x5b, 0x70, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x1b, 0x16, 0x5c, - 0x5e, 0xbc, 0xd7, 0x58, 0xf1, 0x9c, 0x28, 0x76, 0x9b, 0x4b, 0x5e, 0xd0, 0xdc, 0x6d, 0xc4, 0x41, - 0x48, 0xee, 0x06, 0x5e, 0x67, 0x8f, 0x34, 0xd8, 0x40, 0xa0, 0x97, 0x60, 0x6c, 0x9f, 0xfd, 0xaf, - 0x55, 0xcb, 0xd6, 0x65, 0xeb, 0x4a, 0x69, 0x69, 0xe6, 0xd7, 0x0e, 0x2b, 0x9f, 0x3a, 0x3a, 0xac, - 0x8c, 0xdd, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x64, 0x2b, 0xda, 0x38, 0x68, 0x93, 0x72, - 0x81, 0xe1, 0x4e, 0x09, 0xdc, 0x91, 0xd5, 0x06, 0x2d, 0xc5, 0x02, 0x8a, 0x16, 0xa0, 0xd4, 0x76, - 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x2f, 0x17, 0x2f, 0x5b, 0x57, 0x86, 0x97, 0x66, 0x05, 0x6a, 0xa9, - 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xd3, 0xba, 0xed, 0x7b, 0x07, 0xe5, 0xa1, 0xcb, - 0xd6, 0x95, 0xb1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x70, 0x01, 0xc6, 0x16, 0xb7, - 0xb6, 0x5c, 0xdf, 0x8d, 0x0f, 0xd0, 0x5d, 0x98, 0xf0, 0x83, 0x16, 0x91, 0xff, 0xd9, 0x57, 0x8c, - 0x5f, 0xbd, 0x3c, 0xdf, 0xbd, 0x32, 0xe7, 0xd7, 0x35, 0xbc, 0xa5, 0x99, 0xa3, 0xc3, 0xca, 0x84, - 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x78, 0x3b, 0x68, 0x29, 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, - 0xb6, 0x9e, 0xa0, 0x2d, 0x4d, 0x1f, 0x1d, 0x56, 0xc6, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, 0x30, - 0x4d, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x67, 0xf2, 0xe8, 0x6a, 0xa8, 0x4b, 0xe7, - 0x8e, 0x0e, 0x2b, 0xd3, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0x7f, 0x08, 0x53, 0x8b, 0x71, 0xec, 0x34, - 0x77, 0x48, 0x8b, 0xcf, 0x20, 0x7a, 0x0d, 0x86, 0x7c, 0x67, 0x8f, 0x88, 0xf9, 0xbd, 0x2c, 0x06, - 0x76, 0x68, 0xdd, 0xd9, 0x23, 0xc7, 0x87, 0x95, 0x99, 0x3b, 0xbe, 0xfb, 0x41, 0x47, 0xac, 0x0a, - 0x5a, 0x86, 0x19, 0x36, 0xba, 0x0a, 0xd0, 0x22, 0xfb, 0x6e, 0x93, 0xd4, 0x9d, 0x78, 0x47, 0xcc, - 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0x1f, 0x40, 0x69, 0x71, 0x3f, 0x70, 0x5b, - 0xf5, 0xa0, 0x15, 0xa1, 0x5d, 0x98, 0x6e, 0x87, 0x64, 0x8b, 0x84, 0xaa, 0xa8, 0x6c, 0x5d, 0x2e, - 0x5e, 0x19, 0xbf, 0x7a, 0x25, 0xf3, 0x63, 0x4d, 0xd4, 0x15, 0x3f, 0x0e, 0x0f, 0x96, 0x1e, 0x17, - 0xed, 0x4d, 0xa7, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0x77, 0x05, 0xb8, 0xb0, 0xf8, 0x61, 0x27, 0x24, - 0x55, 0x37, 0xda, 0x4d, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xae, 0x27, 0x23, 0xa0, 0x96, 0x56, 0x55, - 0x94, 0x63, 0x85, 0x81, 0x5e, 0x86, 0x51, 0xfa, 0xfb, 0x0e, 0xae, 0x89, 0x4f, 0x3e, 0x27, 0x90, - 0xc7, 0xab, 0x4e, 0xec, 0x54, 0x39, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0xe3, 0x4d, 0xb6, 0x21, 0xb7, - 0xd7, 0x82, 0x16, 0x61, 0x93, 0x59, 0x5a, 0x7a, 0x91, 0xa2, 0x2f, 0x27, 0xc5, 0xc7, 0x87, 0x95, - 0x32, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x0d, 0x31, 0x4a, 0x90, - 0xb1, 0xb7, 0xae, 0x68, 0x5b, 0x65, 0x98, 0x6d, 0x95, 0x89, 0xec, 0x6d, 0x82, 0x5e, 0x81, 0xa1, - 0x5d, 0xd7, 0x6f, 0x95, 0x47, 0x18, 0xad, 0xa7, 0xe8, 0x9c, 0xdf, 0x74, 0xfd, 0xd6, 0xf1, 0x61, - 0x65, 0xd6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x23, 0x0b, 0x2a, 0x0c, 0xb6, 0xea, 0x7a, - 0xa4, 0x4e, 0xc2, 0xc8, 0x8d, 0x62, 0xe2, 0xc7, 0xc6, 0x80, 0x5e, 0x05, 0x88, 0x48, 0x33, 0x24, - 0xb1, 0x36, 0xa4, 0x6a, 0x61, 0x34, 0x14, 0x04, 0x6b, 0x58, 0x94, 0x21, 0x44, 0x3b, 0x4e, 0xc8, - 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x10, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0x83, 0x21, 0x14, 0xfb, 0x31, - 0x04, 0xf4, 0x45, 0x98, 0x4e, 0x1a, 0x8b, 0xda, 0x4e, 0x53, 0x0e, 0x20, 0xdb, 0x32, 0x0d, 0x13, - 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb4, 0xc4, 0xe2, 0xa1, 0x5f, 0xfd, 0x09, 0xff, 0x56, 0xfb, 0x17, - 0x2c, 0x18, 0x5d, 0x72, 0xfd, 0x96, 0xeb, 0x6f, 0xa3, 0xaf, 0xc2, 0x18, 0x3d, 0x9b, 0x5a, 0x4e, - 0xec, 0x08, 0xbe, 0xf7, 0x59, 0x6d, 0x6f, 0xa9, 0xa3, 0x62, 0xbe, 0xbd, 0xbb, 0x4d, 0x0b, 0xa2, - 0x79, 0x8a, 0x4d, 0x77, 0xdb, 0xed, 0xcd, 0xf7, 0x49, 0x33, 0x5e, 0x23, 0xb1, 0x93, 0x7c, 0x4e, - 0x52, 0x86, 0x15, 0x55, 0x74, 0x13, 0x46, 0x62, 0x27, 0xdc, 0x26, 0xb1, 0x60, 0x80, 0x99, 0x8c, - 0x8a, 0xd7, 0xc4, 0x74, 0x47, 0x12, 0xbf, 0x49, 0x92, 0x63, 0x61, 0x83, 0x55, 0xc5, 0x82, 0x84, - 0xfd, 0x53, 0x16, 0x5c, 0x5c, 0x6e, 0xd4, 0x72, 0xd6, 0xd5, 0xb3, 0x30, 0xd2, 0x0a, 0xdd, 0x7d, - 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, 0x28, 0xba, 0x06, 0x13, 0xfc, 0x40, 0xba, - 0xe1, 0xf8, 0x2d, 0x4f, 0x0e, 0xf1, 0x79, 0x81, 0x3d, 0x71, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x84, - 0x03, 0xdd, 0x84, 0x89, 0x65, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0xcf, 0x41, - 0xd1, 0x69, 0xb5, 0x18, 0x0f, 0x2b, 0x2d, 0x5d, 0x38, 0x3a, 0xac, 0x14, 0x17, 0x5b, 0x74, 0x33, - 0x81, 0xc2, 0x3a, 0xc0, 0x14, 0x03, 0xbd, 0x00, 0x43, 0xad, 0x30, 0x68, 0x97, 0x0b, 0x0c, 0xf3, - 0x31, 0xba, 0xef, 0xaa, 0x61, 0xd0, 0x4e, 0xa1, 0x32, 0x1c, 0xfb, 0x57, 0x0a, 0xf0, 0xe4, 0x32, - 0x69, 0xef, 0xac, 0x36, 0x72, 0x46, 0xe5, 0x0a, 0x8c, 0xed, 0x05, 0xbe, 0x1b, 0x07, 0x61, 0x24, - 0x9a, 0x66, 0xdb, 0x7d, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x97, 0x61, 0xa8, 0x9d, 0xb0, 0xea, 0x09, - 0xc9, 0xe6, 0x19, 0x93, 0x66, 0x10, 0x8a, 0xd1, 0x89, 0x48, 0x28, 0xd8, 0x94, 0xc2, 0xb8, 0x13, - 0x91, 0x10, 0x33, 0x48, 0xb2, 0xde, 0xe9, 0x4e, 0x10, 0x7b, 0x28, 0xb5, 0xde, 0x29, 0x04, 0x6b, - 0x58, 0xa8, 0x0e, 0x25, 0xfe, 0x0f, 0x93, 0x2d, 0xc6, 0x91, 0x72, 0x56, 0x49, 0x43, 0x22, 0x89, - 0x55, 0x32, 0xc9, 0x36, 0x84, 0x2c, 0xc4, 0x09, 0x11, 0x63, 0x9e, 0x46, 0xfa, 0xce, 0xd3, 0x2f, - 0x15, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, 0x07, 0x2e, 0xf3, 0x68, 0xbc, 0x15, - 0x34, 0x1d, 0x2f, 0xbd, 0xc7, 0x4e, 0x6b, 0xf4, 0x7e, 0xc8, 0x02, 0xb4, 0xec, 0xfa, 0x2d, 0x12, - 0x9e, 0x81, 0x5c, 0x78, 0xb2, 0x0d, 0x78, 0x0b, 0xa6, 0x96, 0x3d, 0x97, 0xf8, 0x71, 0xad, 0xbe, - 0x1c, 0xf8, 0x5b, 0xee, 0x36, 0xfa, 0x3c, 0x4c, 0x51, 0x31, 0x39, 0xe8, 0xc4, 0x0d, 0xd2, 0x0c, - 0x7c, 0x26, 0x51, 0x50, 0xe1, 0x12, 0x1d, 0x1d, 0x56, 0xa6, 0x36, 0x0c, 0x08, 0x4e, 0x61, 0xda, - 0xbf, 0x43, 0x3f, 0x34, 0xd8, 0x6b, 0x07, 0x3e, 0xf1, 0xe3, 0xe5, 0xc0, 0x6f, 0x71, 0xc9, 0xf3, - 0xf3, 0x30, 0x14, 0xd3, 0x8e, 0xf3, 0x8f, 0x7c, 0x56, 0x4e, 0x2d, 0xed, 0xee, 0xf1, 0x61, 0xe5, - 0xb1, 0xee, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x2d, 0x30, 0x12, 0xc5, 0x4e, 0xdc, 0x89, 0xc4, - 0x67, 0x3f, 0x2d, 0x3f, 0xbb, 0xc1, 0x4a, 0x8f, 0x0f, 0x2b, 0xd3, 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, - 0x02, 0x7a, 0x1e, 0x46, 0xf7, 0x48, 0x14, 0x39, 0xdb, 0x52, 0x68, 0x98, 0x16, 0x75, 0x47, 0xd7, - 0x78, 0x31, 0x96, 0x70, 0xf4, 0x0c, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0x26, 0x05, 0xe2, - 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x07, 0x0b, 0xa6, 0x55, 0x5f, 0x79, 0x5b, 0x67, 0x70, - 0x3a, 0xbc, 0x0b, 0xd0, 0x94, 0x1f, 0x18, 0x31, 0x7e, 0x37, 0x7e, 0xf5, 0xd9, 0xac, 0x25, 0xdc, - 0x3d, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0x2f, 0x2d, 0x38, 0x97, 0xfa, 0xa2, - 0x5b, 0x6e, 0x14, 0xa3, 0xf7, 0xba, 0xbe, 0x6a, 0x7e, 0xb0, 0xaf, 0xa2, 0xb5, 0xd9, 0x37, 0xa9, - 0x35, 0x27, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0xc3, 0x6e, 0x4c, 0xf6, 0xe4, 0xc7, 0x3c, 0xd3, 0xf3, - 0x63, 0x78, 0xaf, 0x92, 0x19, 0xa9, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, 0xbe, - 0x6c, 0xd7, 0x9c, 0xf6, 0x19, 0xcc, 0x45, 0x0d, 0x86, 0x18, 0x75, 0xde, 0xf1, 0xe7, 0xb2, 0x3b, - 0x2e, 0xba, 0x33, 0x4f, 0x45, 0x3f, 0x2e, 0x62, 0x2b, 0x66, 0x46, 0x8b, 0x30, 0x23, 0x31, 0xf7, - 0x06, 0x94, 0x14, 0x02, 0x9a, 0x81, 0xe2, 0x2e, 0xe1, 0xd7, 0xaa, 0x12, 0xa6, 0x3f, 0xd1, 0x79, - 0x18, 0xde, 0x77, 0xbc, 0x8e, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x7c, 0xe1, 0x9a, 0x65, 0xff, 0x22, - 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x5f, 0x30, 0x93, 0x0f, 0xe1, 0xbc, 0x97, 0xc1, 0xc3, 0xc4, - 0x40, 0x0c, 0xce, 0xf3, 0x9e, 0x14, 0x7d, 0x3d, 0x9f, 0x05, 0xc5, 0x99, 0x6d, 0xd0, 0x63, 0x20, - 0x68, 0xd3, 0x15, 0xe5, 0x78, 0xac, 0xbf, 0x42, 0x5c, 0xbe, 0x2d, 0xca, 0xb0, 0x82, 0x52, 0x06, - 0x71, 0x5e, 0x75, 0xfe, 0x26, 0x39, 0x68, 0x10, 0x8f, 0x34, 0xe3, 0x20, 0xfc, 0x58, 0xbb, 0xff, - 0x14, 0x1f, 0x7d, 0xce, 0x5f, 0xc6, 0x05, 0x81, 0xe2, 0x4d, 0x72, 0xc0, 0xa7, 0x42, 0xff, 0xba, - 0x62, 0xcf, 0xaf, 0xfb, 0x69, 0x0b, 0x26, 0xd5, 0xd7, 0x9d, 0xc1, 0x46, 0x5a, 0x32, 0x37, 0xd2, - 0x53, 0x3d, 0xd7, 0x63, 0xce, 0x16, 0xfa, 0x53, 0xc6, 0x02, 0x04, 0x4e, 0x3d, 0x0c, 0xe8, 0xd0, - 0x50, 0x9e, 0xfd, 0x71, 0x4e, 0xc8, 0x20, 0xdf, 0x75, 0x93, 0x1c, 0x6c, 0x04, 0x54, 0x7c, 0xc8, - 0xfe, 0x2e, 0x63, 0xd6, 0x86, 0x7a, 0xce, 0xda, 0xcf, 0x16, 0xe0, 0x82, 0x1a, 0x01, 0xe3, 0x80, - 0xfe, 0xb3, 0x3e, 0x06, 0xaf, 0xc0, 0x78, 0x8b, 0x6c, 0x39, 0x1d, 0x2f, 0x56, 0x37, 0xe7, 0x61, - 0xae, 0x3d, 0xa9, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc1, 0xb0, 0xfd, 0xf8, 0x38, 0xe3, 0xbd, 0xb1, - 0x43, 0x57, 0x30, 0x95, 0xde, 0x34, 0xfd, 0xc7, 0x84, 0xae, 0xff, 0x10, 0xba, 0x8e, 0x67, 0x60, - 0xd8, 0xdd, 0xa3, 0x67, 0x71, 0xc1, 0x3c, 0x62, 0x6b, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x0c, 0x8c, - 0x36, 0x83, 0xbd, 0x3d, 0xc7, 0x6f, 0x95, 0x8b, 0x4c, 0x9e, 0x1c, 0xa7, 0xc7, 0xf5, 0x32, 0x2f, - 0xc2, 0x12, 0x86, 0x9e, 0x84, 0x21, 0x27, 0xdc, 0x8e, 0xca, 0x43, 0x0c, 0x67, 0x8c, 0xb6, 0xb4, - 0x18, 0x6e, 0x47, 0x98, 0x95, 0x52, 0x39, 0xf1, 0x7e, 0x10, 0xee, 0xba, 0xfe, 0x76, 0xd5, 0x0d, - 0x99, 0xd0, 0xa7, 0xc9, 0x89, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x15, 0x86, 0xdb, 0x41, 0x18, - 0x47, 0xe5, 0x11, 0x36, 0xdc, 0x4f, 0xe7, 0x6c, 0x25, 0xfe, 0xb5, 0xf5, 0x20, 0x8c, 0x93, 0x0f, - 0xa0, 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x6f, 0x81, 0x22, 0xf1, 0xf7, 0xcb, 0xa3, 0x8c, 0xca, 0x5c, - 0x16, 0x95, 0x15, 0x7f, 0xff, 0xae, 0x13, 0x26, 0x7c, 0x66, 0xc5, 0xdf, 0xc7, 0xb4, 0x0e, 0xfa, - 0x32, 0x94, 0xa4, 0xee, 0x34, 0x2a, 0x8f, 0xe5, 0x2f, 0x31, 0x2c, 0x90, 0x30, 0xf9, 0xa0, 0xe3, - 0x86, 0x64, 0x8f, 0xf8, 0x71, 0x94, 0xdc, 0x7e, 0x25, 0x34, 0xc2, 0x09, 0x35, 0xf4, 0x65, 0x79, - 0x9d, 0x5b, 0x0b, 0x3a, 0x7e, 0x1c, 0x95, 0x4b, 0xac, 0x7b, 0x99, 0x8a, 0xb6, 0xbb, 0x09, 0x5e, - 0xfa, 0xbe, 0xc7, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xe9, 0xb9, 0xfb, 0xc4, 0x27, 0x51, 0x54, - 0x0f, 0x83, 0x4d, 0x52, 0x06, 0xd6, 0xf3, 0x8b, 0xd9, 0xfa, 0xa7, 0x60, 0x93, 0x2c, 0xcd, 0x1e, - 0x1d, 0x56, 0x26, 0x6f, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x29, 0x2a, 0xa0, 0xba, 0x09, - 0xd1, 0xf1, 0x7e, 0x44, 0x99, 0x74, 0x8a, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x0d, 0x25, 0xcf, - 0xdd, 0x22, 0xcd, 0x83, 0xa6, 0x47, 0xca, 0x13, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xf8, - 0x05, 0x40, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x17, 0x1e, 0x8b, 0x49, 0xb8, 0xe7, 0xfa, 0x0e, 0xdd, - 0x0e, 0x42, 0x9e, 0x64, 0x5a, 0xbc, 0x49, 0xb6, 0xde, 0x2e, 0x89, 0xa1, 0x7b, 0x6c, 0x23, 0x13, - 0x0b, 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x9a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, - 0x0f, 0xca, 0x53, 0x8c, 0xe0, 0x67, 0xa4, 0x9a, 0xae, 0x66, 0x82, 0xe9, 0x8d, 0x37, 0xf9, 0x87, - 0xd3, 0xb5, 0xd1, 0x26, 0x53, 0xdb, 0x74, 0x42, 0x37, 0x3e, 0xa0, 0xeb, 0x97, 0x3c, 0x88, 0xcb, - 0xd3, 0x3d, 0xef, 0x8f, 0x3a, 0xaa, 0xd2, 0xed, 0xe8, 0x85, 0x38, 0x4d, 0x90, 0x6e, 0xed, 0x28, - 0x6e, 0xb9, 0x7e, 0x79, 0x86, 0x71, 0x0c, 0xb5, 0x33, 0x1a, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xd9, - 0xd0, 0x1f, 0xb7, 0x29, 0x07, 0x9d, 0x65, 0x88, 0x89, 0xca, 0x46, 0x02, 0x70, 0x82, 0x43, 0x8f, - 0xe5, 0x38, 0x3e, 0x28, 0x23, 0x86, 0xaa, 0xb6, 0xcb, 0xc6, 0xc6, 0x97, 0x31, 0x2d, 0x47, 0xb7, - 0x60, 0x94, 0xf8, 0xfb, 0xab, 0x61, 0xb0, 0x57, 0x3e, 0x97, 0xbf, 0x67, 0x57, 0x38, 0x0a, 0x67, - 0xe8, 0xc9, 0x05, 0x40, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0x72, 0xc6, 0x8c, 0xf0, 0x09, 0x38, - 0xcf, 0x26, 0xe0, 0x0b, 0xa2, 0x6e, 0x79, 0x23, 0x07, 0xef, 0xb8, 0x07, 0x0c, 0xe7, 0x52, 0x47, - 0xdf, 0x01, 0x93, 0x7c, 0x43, 0x71, 0x7d, 0x6f, 0x54, 0xbe, 0xc0, 0xbe, 0xe6, 0x72, 0xfe, 0xe6, - 0xe4, 0x88, 0x4b, 0x17, 0x44, 0x87, 0x26, 0xf5, 0xd2, 0x08, 0x9b, 0xd4, 0xec, 0x4d, 0x98, 0x52, - 0x7c, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x61, 0xca, 0x90, 0xe5, 0x8d, 0xbd, 0x44, 0x67, 0x8a, 0xe9, - 0xe9, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0x7e, 0x48, 0x96, 0x0e, 0x62, 0xc2, 0x6f, 0x5d, 0x45, 0x6d, - 0xa6, 0x24, 0x00, 0x27, 0x38, 0xf6, 0xff, 0xe3, 0x72, 0x4f, 0xc2, 0x1c, 0x07, 0x38, 0x0e, 0x5e, - 0x82, 0xb1, 0x9d, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x38, 0x91, 0x74, 0x6e, 0x88, 0x72, 0xac, - 0x30, 0xd0, 0x9b, 0x30, 0xd9, 0xd4, 0x1b, 0x10, 0x67, 0x99, 0x1a, 0x02, 0xa3, 0x75, 0x6c, 0xe2, - 0xa2, 0x6b, 0x30, 0xc6, 0x5e, 0x6b, 0x9a, 0x81, 0x27, 0xee, 0x77, 0xf2, 0x40, 0x1e, 0xab, 0x8b, - 0xf2, 0x63, 0xed, 0x37, 0x56, 0xd8, 0xf4, 0xce, 0x4d, 0xbb, 0x50, 0xab, 0x8b, 0x53, 0x44, 0xdd, - 0xb9, 0x6f, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0xdf, 0x28, 0x68, 0xa3, 0x4c, 0x6f, 0x2c, 0x04, 0xd5, - 0x61, 0xf4, 0xbe, 0xe3, 0xc6, 0xae, 0xbf, 0x2d, 0xc4, 0x85, 0xe7, 0x7b, 0x1e, 0x29, 0xac, 0xd2, - 0x3d, 0x5e, 0x81, 0x1f, 0x7a, 0xe2, 0x0f, 0x96, 0x64, 0x28, 0xc5, 0xb0, 0xe3, 0xfb, 0x94, 0x62, - 0x61, 0x50, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f, 0x2c, 0xc9, 0xa0, 0xf7, 0x00, 0xe4, 0xb2, - 0x24, 0x2d, 0xf1, 0x4a, 0xf2, 0x52, 0x7f, 0xa2, 0x1b, 0xaa, 0xce, 0xd2, 0x14, 0x3d, 0x52, 0x93, - 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x4c, 0xac, 0xea, 0xee, 0x0c, 0xfa, 0x76, 0xca, 0x09, 0x9c, 0x30, - 0x26, 0xad, 0xc5, 0x58, 0x0c, 0xce, 0x0b, 0x83, 0x49, 0xc5, 0x1b, 0xee, 0x1e, 0xd1, 0xb9, 0x86, - 0x20, 0x82, 0x13, 0x7a, 0xf6, 0xcf, 0x17, 0xa1, 0x9c, 0xd7, 0x5d, 0xba, 0xe8, 0xc8, 0x03, 0x37, - 0x5e, 0xa6, 0xd2, 0x90, 0x65, 0x2e, 0xba, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x9d, 0xfd, 0xc8, 0xdd, - 0x96, 0x97, 0x9a, 0xe1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, - 0x67, 0x38, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x47, 0x0c, 0xf5, 0xd1, 0x47, 0x18, - 0x43, 0x34, 0x7c, 0xba, 0x43, 0x84, 0xbe, 0x02, 0xb0, 0xe5, 0xfa, 0x6e, 0xb4, 0xc3, 0xa8, 0x8f, - 0x9c, 0x98, 0xba, 0x92, 0xa5, 0x56, 0x15, 0x15, 0xac, 0x51, 0x44, 0xaf, 0xc3, 0xb8, 0xda, 0x80, - 0xb5, 0x6a, 0x79, 0xd4, 0x7c, 0xe3, 0x49, 0xb8, 0x51, 0x15, 0xeb, 0x78, 0xf6, 0xfb, 0xe9, 0xf5, - 0x22, 0x76, 0x80, 0x36, 0xbe, 0xd6, 0xa0, 0xe3, 0x5b, 0xe8, 0x3d, 0xbe, 0xf6, 0xaf, 0x16, 0x61, - 0xda, 0x68, 0xac, 0x13, 0x0d, 0xc0, 0xb3, 0xae, 0xd3, 0x73, 0xce, 0x89, 0x89, 0xd8, 0x7f, 0x76, - 0xff, 0xad, 0xa2, 0x9f, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x2b, 0x50, 0xf2, 0x9c, 0x88, 0xe9, - 0x36, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xee, 0x11, 0x4e, 0x14, 0x6b, 0x47, 0x0d, 0xa7, 0x9d, - 0x90, 0xa4, 0x07, 0x32, 0x95, 0x7d, 0xe4, 0x3b, 0xaf, 0xea, 0x04, 0x15, 0x90, 0x0e, 0x30, 0x87, - 0xa1, 0x6b, 0x30, 0x11, 0x12, 0xb6, 0x2a, 0x96, 0xa9, 0x28, 0xc7, 0x96, 0xd9, 0x70, 0x22, 0xf3, - 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x44, 0x94, 0x1f, 0xe9, 0x21, 0xca, 0x3f, 0x0f, 0xa3, 0xec, 0x87, - 0x5a, 0x01, 0x6a, 0x36, 0x6a, 0xbc, 0x18, 0x4b, 0x78, 0x7a, 0xc1, 0x8c, 0x0d, 0xb8, 0x60, 0x5e, - 0x80, 0xa9, 0xaa, 0x43, 0xf6, 0x02, 0x7f, 0xc5, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, 0x51, 0x19, 0x86, - 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x10, 0xa5, 0x80, 0x87, 0xa8, 0x60, 0x6e, 0xff, 0x56, 0x01, 0x26, - 0xab, 0xc4, 0x23, 0x31, 0xe1, 0x57, 0x99, 0x08, 0xad, 0x02, 0xda, 0x0e, 0x9d, 0x26, 0xa9, 0x93, - 0xd0, 0x0d, 0x5a, 0xba, 0xae, 0xb3, 0xc8, 0xde, 0x13, 0xd0, 0xf5, 0x2e, 0x28, 0xce, 0xa8, 0x81, - 0xde, 0x85, 0xc9, 0x76, 0x48, 0x0c, 0x15, 0x9d, 0x95, 0x27, 0x8d, 0xd4, 0x75, 0x44, 0x2e, 0x08, - 0x1b, 0x45, 0xd8, 0x24, 0x85, 0xbe, 0x0d, 0x66, 0x82, 0xb0, 0xbd, 0xe3, 0xf8, 0x55, 0xd2, 0x26, - 0x7e, 0x8b, 0x4a, 0xfa, 0x42, 0x05, 0x71, 0xfe, 0xe8, 0xb0, 0x32, 0x73, 0x3b, 0x05, 0xc3, 0x5d, - 0xd8, 0xe8, 0x5d, 0x98, 0x6d, 0x87, 0x41, 0xdb, 0xd9, 0x66, 0x0b, 0x45, 0x08, 0x34, 0x9c, 0xfb, - 0xbc, 0x74, 0x74, 0x58, 0x99, 0xad, 0xa7, 0x81, 0xc7, 0x87, 0x95, 0x73, 0x6c, 0xa0, 0x68, 0x49, - 0x02, 0xc4, 0xdd, 0x64, 0xec, 0x6d, 0xb8, 0x50, 0x0d, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, 0x58, - 0xaf, 0x69, 0xba, 0x83, 0x75, 0x79, 0x77, 0xe5, 0x6f, 0xd1, 0x99, 0xe7, 0x94, 0x56, 0x93, 0xcb, - 0x2f, 0xab, 0xae, 0x47, 0x72, 0x74, 0x14, 0x7f, 0xbb, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x67, 0x05, - 0x2b, 0xf7, 0x59, 0xe1, 0x1d, 0x18, 0xdb, 0x72, 0x89, 0xd7, 0xc2, 0x64, 0x4b, 0xcc, 0xcc, 0x73, - 0xf9, 0xcf, 0x6b, 0xab, 0x14, 0x53, 0xea, 0xa4, 0xf8, 0xcd, 0x77, 0x55, 0x54, 0xc6, 0x8a, 0x0c, - 0xda, 0x85, 0x19, 0x79, 0xb5, 0x92, 0x50, 0xb1, 0x89, 0x9f, 0xef, 0x75, 0x5f, 0x33, 0x89, 0xb3, - 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x8b, 0x30, 0xbd, 0xea, 0xee, 0xd1, 0xe3, 0x6a, 0x88, 0x2d, 0x69, - 0x76, 0xd5, 0x65, 0xb7, 0x76, 0x56, 0x6a, 0xff, 0xa8, 0x05, 0x8f, 0x77, 0x8d, 0x8c, 0xd0, 0x5e, - 0x9c, 0xf2, 0x2c, 0xa4, 0xb5, 0x09, 0x85, 0xfe, 0xda, 0x04, 0xfb, 0x1f, 0x59, 0x70, 0x7e, 0x65, - 0xaf, 0x1d, 0x1f, 0x54, 0x5d, 0xf3, 0xe9, 0xe3, 0x0d, 0x18, 0xd9, 0x23, 0x2d, 0xb7, 0xb3, 0x27, - 0x66, 0xae, 0x22, 0x59, 0xfa, 0x1a, 0x2b, 0x3d, 0x3e, 0xac, 0x4c, 0x36, 0xe2, 0x20, 0x74, 0xb6, - 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x1d, 0x8c, 0xee, 0x87, 0xe4, 0x96, 0xbb, 0xe7, 0xca, 0xe7, 0xd2, - 0x9e, 0x1a, 0xb5, 0x79, 0x39, 0xa0, 0xf3, 0xef, 0x74, 0x1c, 0x3f, 0x76, 0xe3, 0x03, 0xf1, 0xaa, - 0x23, 0x89, 0xe0, 0x84, 0x9e, 0xfd, 0x0d, 0x0b, 0xa6, 0x25, 0x2f, 0x59, 0x6c, 0xb5, 0x42, 0x12, - 0x45, 0x68, 0x0e, 0x0a, 0x6e, 0x5b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xb5, 0x3a, 0x2e, 0xb8, 0x6d, - 0x54, 0x87, 0x12, 0x7f, 0x75, 0x4d, 0x16, 0xd7, 0x40, 0x6f, 0xb7, 0xac, 0x07, 0x1b, 0xb2, 0x26, - 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, 0x49, 0xe8, 0x86, 0x28, 0xc7, 0x0a, 0x03, - 0x5d, 0x81, 0x31, 0x3f, 0x68, 0xf1, 0x47, 0x70, 0xbe, 0xa7, 0xd9, 0x92, 0x5d, 0x17, 0x65, 0x58, - 0x41, 0xed, 0xef, 0xb7, 0x60, 0x42, 0x7e, 0xd9, 0x80, 0x02, 0x3a, 0xdd, 0x5a, 0x89, 0x70, 0x9e, - 0x6c, 0x2d, 0x2a, 0x60, 0x33, 0x88, 0x21, 0x57, 0x17, 0x4f, 0x22, 0x57, 0xdb, 0x3f, 0x52, 0x80, - 0x29, 0xd9, 0x9d, 0x46, 0x67, 0x33, 0x22, 0x31, 0xda, 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, - 0xfb, 0x4c, 0xf6, 0x85, 0xce, 0x98, 0x9f, 0x44, 0xd4, 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, - 0x30, 0xeb, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xd7, 0xb3, 0x43, 0x9a, 0xfa, 0x45, 0x41, 0x7d, - 0x76, 0x3d, 0x4d, 0x05, 0x77, 0x13, 0x46, 0x2b, 0x52, 0x89, 0x54, 0xcc, 0xbf, 0xc2, 0xe9, 0xb3, - 0x90, 0xad, 0x43, 0xb2, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x8b, 0x17, 0xa6, 0x35, 0x18, 0x8d, - 0xd8, 0x24, 0xc8, 0xa1, 0xb1, 0x7b, 0x75, 0x9c, 0xcf, 0x57, 0x72, 0x9a, 0xf3, 0xff, 0x11, 0x96, - 0x34, 0x98, 0x16, 0x5c, 0x75, 0xff, 0x13, 0xa2, 0x05, 0x57, 0xfd, 0xc9, 0x39, 0x61, 0xfe, 0x1b, - 0xeb, 0xb3, 0xa6, 0x2a, 0xa0, 0x42, 0x67, 0x3b, 0x24, 0x5b, 0xee, 0x83, 0xb4, 0xd0, 0x59, 0x67, - 0xa5, 0x58, 0x40, 0xd1, 0x7b, 0x30, 0xd1, 0x94, 0xca, 0xe3, 0x84, 0x0d, 0x3c, 0xdb, 0x53, 0x15, - 0xaf, 0x5e, 0x6d, 0xb8, 0x81, 0xdc, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0x77, 0xff, 0x62, 0xbf, - 0x77, 0xff, 0x84, 0x6e, 0xee, 0xcb, 0xb5, 0xfd, 0x63, 0x16, 0x8c, 0x70, 0x15, 0xe4, 0x60, 0x3a, - 0x5b, 0xed, 0x15, 0x2a, 0x19, 0xbb, 0xbb, 0xb4, 0x50, 0x3c, 0x4a, 0xa1, 0x35, 0x28, 0xb1, 0x1f, - 0x4c, 0x15, 0x53, 0xcc, 0xb7, 0x0c, 0xe4, 0xad, 0xea, 0x1d, 0xbc, 0x2b, 0xab, 0xe1, 0x84, 0x82, - 0xfd, 0x83, 0x45, 0xca, 0xaa, 0x12, 0x54, 0xe3, 0x04, 0xb7, 0x1e, 0xdd, 0x09, 0x5e, 0x78, 0x54, - 0x27, 0xf8, 0x36, 0x4c, 0x37, 0xb5, 0x27, 0xaf, 0x64, 0x26, 0xaf, 0xf4, 0x5c, 0x24, 0xda, 0xeb, - 0x18, 0x57, 0xc3, 0x2d, 0x9b, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x76, 0x98, 0xe0, 0xf3, 0x2c, 0x5a, - 0x19, 0x62, 0xad, 0x7c, 0x26, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95, 0xd8, 0xd0, 0xaa, 0x63, 0x83, - 0x98, 0xfd, 0xf3, 0x63, 0x30, 0xbc, 0xb2, 0x4f, 0xfc, 0xf8, 0x0c, 0x18, 0x52, 0x13, 0xa6, 0x5c, - 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x8b, 0xc3, 0x4f, 0x72, 0xb8, 0x3e, 0x26, 0x48, 0x4f, 0xd5, 0x0c, - 0x12, 0x38, 0x45, 0xf2, 0x51, 0xdc, 0xda, 0xaf, 0xc3, 0x08, 0x9f, 0x7b, 0x71, 0x65, 0xcf, 0x54, - 0xb0, 0xb3, 0x41, 0x14, 0xbb, 0x20, 0xd1, 0x28, 0x70, 0x8d, 0xbe, 0xa8, 0x8e, 0xde, 0x87, 0xa9, - 0x2d, 0x37, 0x8c, 0x62, 0x7a, 0xdd, 0x8e, 0x62, 0x67, 0xaf, 0xfd, 0x10, 0xb7, 0x74, 0x35, 0x0e, - 0xab, 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0xb6, 0x61, 0x92, 0x5e, 0x1c, 0x93, 0xa6, 0x46, 0x4f, 0xdc, - 0x94, 0x52, 0xc3, 0xdd, 0xd2, 0x09, 0x61, 0x93, 0x2e, 0x65, 0x26, 0x4d, 0x76, 0xd1, 0x1c, 0x63, - 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, 0x27, 0x31, 0x53, 0x91, 0x92, 0xc9, 0x93, - 0x34, 0x83, 0x90, 0xaf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x63, 0xc3, 0xc2, 0x60, 0x7d, - 0x5d, 0x73, 0x9b, 0x61, 0x60, 0xea, 0x47, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, - 0x91, 0xd0, 0x25, 0x91, 0x78, 0x76, 0xe8, 0x31, 0x8d, 0x0c, 0x8d, 0x9b, 0x90, 0xf2, 0xdf, 0x58, - 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0xb7, 0x21, 0xf6, 0xd2, 0xa0, 0x2d, 0xaf, 0x45, 0x56, 0x8a, 0x05, - 0x14, 0xbd, 0x0d, 0xa3, 0x21, 0xf1, 0x98, 0x02, 0x6e, 0x72, 0xf0, 0x45, 0xce, 0xf5, 0x79, 0xbc, - 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x21, 0xa1, 0x32, 0x84, 0xeb, 0x6f, 0x2b, 0x03, 0x0a, 0xf1, - 0x7e, 0xf0, 0x84, 0x68, 0xff, 0x1c, 0x4e, 0x30, 0xfc, 0x38, 0x0c, 0x3c, 0x8f, 0x84, 0x38, 0xa3, - 0x1a, 0xba, 0x0e, 0xb3, 0xaa, 0xb4, 0xe6, 0x47, 0xb1, 0xe3, 0x37, 0x09, 0x7b, 0x3a, 0x28, 0x25, - 0x52, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0xd7, 0xa9, 0x38, 0x43, 0x47, 0xeb, 0x0c, 0x64, - 0x81, 0xb7, 0x4c, 0x59, 0xe0, 0x62, 0xee, 0xcc, 0xe5, 0xc8, 0x01, 0x47, 0x16, 0x8c, 0x6b, 0x33, - 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, 0xc0, 0x0c, 0x5d, 0xe9, 0xb7, 0x37, 0x99, 0x37, 0x45, - 0x8b, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, 0xcc, 0xb2, 0x68, 0x60, 0xe6, 0x56, 0x8a, 0x20, 0xee, 0x6a, - 0x02, 0xbd, 0x21, 0xb5, 0x51, 0x45, 0xc3, 0x30, 0x8a, 0x6b, 0x9a, 0x8e, 0x0f, 0x2b, 0x33, 0xda, - 0x87, 0xe8, 0xda, 0x27, 0xfb, 0xab, 0xf2, 0x1b, 0x39, 0xb3, 0x59, 0x80, 0x52, 0x53, 0x2d, 0x16, - 0xcb, 0xb4, 0xa5, 0x55, 0xcb, 0x01, 0x27, 0x38, 0x74, 0x8f, 0xd2, 0x2b, 0x48, 0xda, 0x96, 0x8f, - 0x5e, 0x50, 0x30, 0x83, 0xd8, 0xaf, 0x02, 0xac, 0x3c, 0x20, 0x4d, 0xbe, 0xd4, 0xf5, 0x47, 0x5d, - 0x2b, 0xff, 0x51, 0xd7, 0xfe, 0x8f, 0x16, 0x4c, 0xad, 0x2e, 0x1b, 0xd7, 0xc4, 0x79, 0x00, 0x7e, - 0x37, 0xba, 0x77, 0x6f, 0x5d, 0xbe, 0x57, 0x70, 0x95, 0xb3, 0x2a, 0xc5, 0x1a, 0x06, 0xba, 0x08, - 0x45, 0xaf, 0xe3, 0x8b, 0x2b, 0xcb, 0xe8, 0xd1, 0x61, 0xa5, 0x78, 0xab, 0xe3, 0x63, 0x5a, 0xa6, - 0x99, 0xcf, 0x15, 0x07, 0x36, 0x9f, 0xeb, 0xeb, 0x25, 0x81, 0x2a, 0x30, 0x7c, 0xff, 0xbe, 0xdb, - 0x8a, 0xca, 0xc3, 0xc9, 0x5b, 0xca, 0xbd, 0x7b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, 0xfd, 0x97, 0x8a, - 0x30, 0xb3, 0xea, 0x91, 0x07, 0x0f, 0x65, 0x85, 0x3b, 0xa8, 0xc9, 0xdf, 0x9d, 0x6e, 0x29, 0xf1, - 0xb4, 0x8d, 0x1c, 0xfb, 0x0f, 0xc5, 0x7b, 0x30, 0xca, 0x6d, 0x03, 0xf8, 0x60, 0x8c, 0x5f, 0x7d, - 0x25, 0xab, 0x0b, 0xe9, 0xb1, 0x98, 0x17, 0xea, 0x38, 0x6e, 0x28, 0xa5, 0x8e, 0x56, 0x51, 0x8a, - 0x25, 0xc9, 0xb9, 0xcf, 0xc3, 0x84, 0x8e, 0x79, 0x22, 0x8b, 0xa9, 0xbf, 0x6c, 0xc1, 0xb9, 0x55, - 0x2f, 0x68, 0xee, 0xa6, 0xec, 0x2f, 0x5f, 0x87, 0x71, 0xca, 0x34, 0x22, 0xc3, 0xf2, 0xdc, 0xf0, - 0x45, 0x10, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0xee, 0xdc, 0xa9, 0x55, 0xb3, 0x5c, 0x18, 0x04, 0x08, - 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0xf0, 0xd4, 0xf5, 0xe5, 0x95, 0xc4, 0x04, 0xb9, 0xcb, 0x8b, 0x82, - 0x5e, 0x39, 0x5a, 0x5a, 0x57, 0x92, 0x2b, 0x47, 0x95, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x1e, 0x42, - 0x3f, 0x69, 0xc1, 0xb9, 0xeb, 0x6e, 0x4c, 0xcf, 0x80, 0xb4, 0x3d, 0x3f, 0x3d, 0x04, 0x22, 0x37, - 0x0e, 0xc2, 0x83, 0xb4, 0x3d, 0x3f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0xdd, 0x88, 0xf6, - 0xb4, 0x60, 0xea, 0x3d, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x7e, 0x58, 0xcb, 0x0d, 0x99, 0xdc, 0x7a, - 0x20, 0xb6, 0xb3, 0xfa, 0xb0, 0xaa, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x51, 0x0b, 0x2e, 0x5c, 0xf7, - 0x3a, 0x51, 0x4c, 0xc2, 0xad, 0xc8, 0xe8, 0xec, 0xab, 0x50, 0x22, 0xf2, 0x6e, 0x28, 0xfa, 0xaa, - 0xa4, 0x19, 0x75, 0x69, 0xe4, 0xce, 0x04, 0x0a, 0x6f, 0x00, 0x63, 0xe6, 0x93, 0x19, 0xe1, 0xfe, - 0x4c, 0x01, 0x26, 0x6f, 0x6c, 0x6c, 0xd4, 0xaf, 0x93, 0x58, 0xb0, 0xcc, 0xfe, 0x7a, 0x4d, 0xac, - 0xa9, 0x67, 0x7a, 0x49, 0xe0, 0x9d, 0xd8, 0xf5, 0xe6, 0xb9, 0xf7, 0xda, 0x7c, 0xcd, 0x8f, 0x6f, - 0x87, 0x8d, 0x38, 0x74, 0xfd, 0xed, 0x4c, 0x85, 0x8e, 0x64, 0xec, 0xc5, 0x3c, 0xc6, 0x8e, 0x5e, - 0x85, 0x11, 0xe6, 0x3e, 0x27, 0x65, 0xe1, 0x27, 0x94, 0x00, 0xcb, 0x4a, 0x8f, 0x0f, 0x2b, 0xa5, - 0x3b, 0xb8, 0xc6, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, 0xf8, 0x4e, 0x1c, 0xb7, 0x6f, 0x10, 0xa7, - 0x45, 0x42, 0xc9, 0x1d, 0x2e, 0x65, 0x71, 0x07, 0x3a, 0x08, 0x1c, 0x2d, 0xd9, 0x50, 0x49, 0x59, - 0x84, 0x75, 0x3a, 0x76, 0x03, 0x20, 0x81, 0x9d, 0xd2, 0x65, 0xd6, 0xfe, 0x7d, 0x0b, 0x46, 0xb9, - 0x27, 0x43, 0x88, 0xbe, 0x00, 0x43, 0xe4, 0x01, 0x69, 0x0a, 0x31, 0x25, 0xb3, 0xc3, 0xc9, 0x29, - 0xc7, 0x55, 0xb3, 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa5, 0xbd, 0xbd, 0xae, 0xdc, 0x3a, - 0x9e, 0xce, 0xfb, 0x62, 0x35, 0xed, 0xfc, 0x60, 0x14, 0x45, 0x58, 0x56, 0x67, 0x6a, 0xc6, 0x66, - 0xbb, 0x41, 0x19, 0x58, 0xdc, 0x4b, 0x09, 0xb0, 0xb1, 0x5c, 0xe7, 0x48, 0x82, 0x1a, 0x57, 0x33, - 0xca, 0x42, 0x9c, 0x10, 0xb1, 0x37, 0xa0, 0x44, 0x27, 0x75, 0xd1, 0x73, 0x9d, 0xde, 0x1a, 0xce, - 0x17, 0xa1, 0x24, 0xb5, 0x8d, 0x91, 0xf0, 0xb5, 0x60, 0x54, 0xa5, 0x32, 0x32, 0xc2, 0x09, 0xdc, - 0xde, 0x82, 0xf3, 0xec, 0xe9, 0xde, 0x89, 0x77, 0x8c, 0x3d, 0xd6, 0x7f, 0x31, 0xbf, 0x24, 0xa4, - 0x7e, 0x3e, 0x33, 0x65, 0xcd, 0x38, 0x7c, 0x42, 0x52, 0x4c, 0x6e, 0x00, 0xf6, 0x1f, 0x0e, 0xc1, - 0x13, 0xb5, 0x46, 0xbe, 0x93, 0xcb, 0x35, 0x98, 0xe0, 0x32, 0x01, 0x5d, 0xda, 0x8e, 0x27, 0xda, - 0x55, 0x0f, 0x5b, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x29, 0x28, 0xba, 0x1f, 0xf8, 0x69, 0x4b, - 0xd0, 0xda, 0x3b, 0xeb, 0x98, 0x96, 0x53, 0x30, 0x15, 0x2f, 0x38, 0x2b, 0x55, 0x60, 0x25, 0x62, - 0xbc, 0x05, 0x53, 0x6e, 0xd4, 0x8c, 0xdc, 0x9a, 0x4f, 0xf9, 0x4c, 0xe2, 0x20, 0x95, 0xdc, 0x48, - 0x69, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0x35, 0xbe, 0x3e, 0x3c, 0xb0, 0x88, 0xd2, 0xd7, 0xf9, 0x80, - 0x4a, 0x5f, 0x6d, 0xf6, 0x75, 0x11, 0xb3, 0x4a, 0x13, 0xd2, 0x17, 0xff, 0xe0, 0x08, 0x4b, 0x18, - 0x15, 0xf7, 0x9b, 0x3b, 0x4e, 0x7b, 0xb1, 0x13, 0xef, 0x54, 0xdd, 0xa8, 0x19, 0xec, 0x93, 0xf0, - 0x80, 0xdd, 0xd4, 0xc6, 0x12, 0x71, 0x5f, 0x01, 0x96, 0x6f, 0x2c, 0xd6, 0x29, 0x26, 0xee, 0xae, - 0x63, 0xaa, 0xac, 0xe0, 0x34, 0x5c, 0x55, 0x16, 0x61, 0x5a, 0x36, 0xd3, 0x20, 0x11, 0x3b, 0x23, - 0xc6, 0x59, 0xc7, 0x94, 0xeb, 0xa2, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0x37, 0x60, 0xd2, 0xf5, - 0xdd, 0xd8, 0x75, 0xe2, 0x20, 0x64, 0x27, 0x2c, 0xbf, 0x94, 0xb1, 0x17, 0xb8, 0x9a, 0x0e, 0xc0, - 0x26, 0x9e, 0xfd, 0x07, 0x43, 0x30, 0xcb, 0xa6, 0xed, 0x9b, 0x2b, 0xec, 0x13, 0xb3, 0xc2, 0xee, - 0x74, 0xaf, 0xb0, 0xd3, 0x10, 0x77, 0x3f, 0xce, 0x65, 0xf6, 0x3e, 0x94, 0x94, 0x31, 0xaf, 0xb4, - 0x47, 0xb7, 0x72, 0xec, 0xd1, 0xfb, 0x4b, 0x1f, 0xf2, 0xcd, 0xb0, 0x98, 0xf9, 0x66, 0xf8, 0x77, - 0x2c, 0x48, 0x6c, 0x1a, 0xd1, 0x0d, 0x28, 0xb5, 0x03, 0x66, 0x37, 0x10, 0x4a, 0x63, 0x9c, 0x27, - 0x32, 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0xf1, 0xab, 0xcb, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, - 0x1d, 0x92, 0x46, 0xcc, 0xbc, 0xf2, 0xfa, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, - 0x59, 0x0b, 0x80, 0x3f, 0xcb, 0x39, 0xfe, 0x36, 0x39, 0x03, 0x55, 0x63, 0x15, 0x86, 0xa2, 0x36, - 0x69, 0xf6, 0xb2, 0xe8, 0x48, 0xfa, 0xd3, 0x68, 0x93, 0x66, 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, - 0xed, 0xef, 0x01, 0x98, 0x4a, 0xd0, 0x6a, 0x31, 0xd9, 0x43, 0x2f, 0x1b, 0x3e, 0x4f, 0x17, 0x53, - 0x3e, 0x4f, 0x25, 0x86, 0xad, 0x69, 0xb5, 0xde, 0x87, 0xe2, 0x9e, 0xf3, 0x40, 0xa8, 0x2d, 0x5e, - 0xec, 0xdd, 0x0d, 0x4a, 0x7f, 0x7e, 0xcd, 0x79, 0xc0, 0xef, 0x4c, 0x2f, 0xca, 0x05, 0xb2, 0xe6, - 0x3c, 0x38, 0xe6, 0x76, 0x1b, 0x8c, 0x49, 0xdd, 0x72, 0xa3, 0xf8, 0x6b, 0xff, 0x25, 0xf9, 0xcf, - 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0xfa, 0xe2, 0x91, 0x6a, 0xa0, 0xb6, 0x5c, 0x3f, 0xdd, 0x96, - 0xeb, 0x0f, 0xd0, 0x96, 0xeb, 0xa3, 0x0f, 0x61, 0x54, 0x3c, 0x08, 0x33, 0x63, 0x6d, 0x53, 0x25, - 0x92, 0xd7, 0x9e, 0x78, 0x4f, 0xe6, 0x6d, 0x2e, 0xc8, 0x3b, 0xa1, 0x28, 0xed, 0xdb, 0xae, 0x6c, - 0x10, 0xfd, 0x2d, 0x0b, 0xa6, 0xc4, 0x6f, 0x4c, 0x3e, 0xe8, 0x90, 0x28, 0x16, 0xb2, 0xe7, 0xe7, - 0x06, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x39, 0xc9, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0xa5, 0x7a, - 0x81, 0xfe, 0x89, 0x05, 0xe7, 0xf7, 0x9c, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xd8, 0x0d, 0x84, - 0xf1, 0xf9, 0x17, 0x06, 0x9b, 0xfe, 0xae, 0xea, 0xbc, 0x93, 0xd2, 0x4e, 0xf5, 0x7c, 0x16, 0x4a, - 0xdf, 0xae, 0x66, 0xf6, 0x6b, 0x6e, 0x0b, 0xc6, 0xe4, 0x7a, 0xcb, 0xb8, 0x79, 0x57, 0x75, 0xc1, - 0xfa, 0xc4, 0xef, 0xf1, 0xda, 0x4d, 0x9d, 0xb5, 0x23, 0xd6, 0xda, 0x23, 0x6d, 0xe7, 0x7d, 0x98, - 0xd0, 0xd7, 0xd8, 0x23, 0x6d, 0xeb, 0x03, 0x38, 0x97, 0xb1, 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe1, - 0x62, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x63, 0xe9, 0x7c, 0xf0, 0x0c, 0xf4, 0xbd, 0xcb, - 0xa6, 0xbe, 0xf7, 0x52, 0xef, 0x9d, 0x93, 0xa3, 0xf4, 0x7d, 0x4f, 0xef, 0x34, 0xe5, 0xea, 0xe8, - 0x6d, 0x18, 0xf1, 0x68, 0x89, 0xb4, 0x44, 0xb0, 0xfb, 0xef, 0xc8, 0x44, 0x96, 0x62, 0xe5, 0x11, - 0x16, 0x14, 0xec, 0x5f, 0xb0, 0x60, 0xe8, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x72, 0x2e, 0x69, - 0x11, 0x06, 0x67, 0x1e, 0x3b, 0xf7, 0x57, 0x64, 0xa8, 0x9f, 0x9c, 0x81, 0xf9, 0xbf, 0x05, 0x18, - 0xa7, 0x4d, 0x49, 0x93, 0xb9, 0x37, 0x61, 0xd2, 0x73, 0x36, 0x89, 0x27, 0x1f, 0x0d, 0xd3, 0x0a, - 0x93, 0x5b, 0x3a, 0x10, 0x9b, 0xb8, 0xb4, 0xf2, 0x96, 0xfe, 0x7e, 0x2a, 0xe4, 0x17, 0x55, 0xd9, - 0x78, 0x5c, 0xc5, 0x26, 0x2e, 0xbd, 0xbb, 0xdf, 0x77, 0xe2, 0xe6, 0x8e, 0x50, 0xa6, 0xa8, 0xee, - 0xde, 0xa3, 0x85, 0x98, 0xc3, 0xa8, 0x00, 0x27, 0x57, 0xe7, 0x5d, 0x7a, 0x33, 0x0c, 0x7c, 0x21, - 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0x70, 0x7e, 0x1e, 0x66, 0x06, 0x81, 0x03, - 0x38, 0x3f, 0xa3, 0x3a, 0x9c, 0x77, 0xfd, 0xa6, 0xd7, 0x69, 0x91, 0x3b, 0x3e, 0x97, 0xee, 0x3c, - 0xf7, 0x43, 0xd2, 0x12, 0x02, 0xb4, 0xb2, 0xdd, 0xac, 0x65, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5f, - 0x80, 0x73, 0xb7, 0x02, 0xa7, 0xb5, 0xe4, 0x78, 0x8e, 0xdf, 0x24, 0x61, 0xcd, 0xdf, 0xee, 0x6b, - 0x92, 0xa4, 0x1b, 0x10, 0x15, 0xfa, 0x19, 0x10, 0xd9, 0x3b, 0x80, 0xf4, 0x06, 0x84, 0x21, 0x2c, - 0x86, 0x51, 0x97, 0x37, 0x25, 0x96, 0xff, 0x73, 0xd9, 0xd2, 0x75, 0x57, 0xcf, 0x34, 0x13, 0x4f, - 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x1a, 0x64, 0x3a, 0xbf, 0xf5, 0x57, 0xdb, 0xd8, 0xaf, 0xc3, 0x2c, - 0xab, 0x79, 0x32, 0x95, 0x82, 0xfd, 0xd7, 0x2c, 0x98, 0x5e, 0x4f, 0x85, 0x2b, 0x78, 0x96, 0x3d, - 0xec, 0x65, 0xe8, 0xdd, 0x1b, 0xac, 0x14, 0x0b, 0xe8, 0xa9, 0xeb, 0xf7, 0xfe, 0xd4, 0x82, 0x92, - 0x8a, 0x84, 0x72, 0x06, 0x42, 0xed, 0xb2, 0x21, 0xd4, 0x66, 0xea, 0x9d, 0x54, 0x77, 0xf2, 0x64, - 0x5a, 0x74, 0x53, 0x39, 0xde, 0xf7, 0x50, 0x39, 0x25, 0x64, 0xb8, 0x9b, 0xf6, 0x94, 0xe9, 0x9d, - 0x2f, 0x5d, 0xf1, 0x99, 0x4d, 0x90, 0xc2, 0xfd, 0x84, 0xd8, 0x04, 0xa9, 0xfe, 0xe4, 0x70, 0xbf, - 0xba, 0xd6, 0x65, 0x76, 0x2a, 0x7c, 0x2b, 0xb3, 0x9b, 0x67, 0x7b, 0x53, 0xc5, 0xbb, 0xa8, 0x08, - 0x3b, 0x78, 0x51, 0x7a, 0xcc, 0x18, 0x99, 0xf8, 0xc7, 0xa3, 0xd6, 0x24, 0x55, 0xec, 0x1b, 0x30, - 0x9d, 0x1a, 0x30, 0xf4, 0x3a, 0x0c, 0xb7, 0x77, 0x9c, 0x88, 0xa4, 0xec, 0x20, 0x87, 0xeb, 0xb4, - 0xf0, 0xf8, 0xb0, 0x32, 0xa5, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, 0xff, 0x69, 0xc1, 0xd0, 0x7a, - 0xd0, 0x3a, 0x8b, 0xc5, 0xf4, 0x96, 0xb1, 0x98, 0x9e, 0xcc, 0x8b, 0xf9, 0x95, 0xbb, 0x8e, 0x56, - 0x53, 0xeb, 0xe8, 0x52, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x0f, 0xc6, 0x59, 0x24, 0x31, 0x61, 0x97, - 0xf9, 0xaa, 0x71, 0xbf, 0xaa, 0xa4, 0xee, 0x57, 0xd3, 0x1a, 0xaa, 0x76, 0xcb, 0x7a, 0x1e, 0x46, - 0x85, 0x6d, 0x60, 0xda, 0x43, 0x40, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, 0x08, 0x46, 0xe4, 0x32, - 0xf4, 0xcb, 0x16, 0xcc, 0x87, 0xdc, 0xe5, 0xb2, 0x55, 0xed, 0x84, 0xae, 0xbf, 0xdd, 0x68, 0xee, - 0x90, 0x56, 0xc7, 0x73, 0xfd, 0xed, 0xda, 0xb6, 0x1f, 0xa8, 0xe2, 0x95, 0x07, 0xa4, 0xd9, 0x61, - 0x6f, 0x2e, 0x7d, 0xc2, 0xa4, 0x29, 0xdb, 0x9b, 0xab, 0x47, 0x87, 0x95, 0x79, 0x7c, 0x22, 0xda, - 0xf8, 0x84, 0x7d, 0x41, 0xbf, 0x61, 0xc1, 0x02, 0x0f, 0xe8, 0x35, 0x78, 0xff, 0x7b, 0xdc, 0x46, - 0xeb, 0x92, 0x54, 0x42, 0x64, 0x83, 0x84, 0x7b, 0x4b, 0x6f, 0x88, 0x01, 0x5d, 0xa8, 0x9f, 0xac, - 0x2d, 0x7c, 0xd2, 0xce, 0xd9, 0xff, 0xa6, 0x08, 0x93, 0x74, 0x14, 0x93, 0x30, 0x23, 0xaf, 0x1b, - 0x4b, 0xe2, 0xe9, 0xd4, 0x92, 0x98, 0x35, 0x90, 0x4f, 0x27, 0xc2, 0x48, 0x04, 0xb3, 0x9e, 0x13, - 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x9b, 0xc4, 0xe1, 0x36, 0x29, 0xc5, 0x13, 0xdb, 0xcf, 0x28, 0xf5, - 0xd7, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x03, 0x62, 0x86, 0x35, 0xa1, 0xe3, 0x47, 0xfc, - 0x5b, 0x5c, 0xf1, 0x1e, 0x73, 0xb2, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, 0xd1, - 0x82, 0x66, 0x30, 0x35, 0x3c, 0xa8, 0xc1, 0xd4, 0x48, 0x1f, 0x37, 0x9c, 0x3d, 0x98, 0x11, 0xb3, - 0xb2, 0xe5, 0x6e, 0x8b, 0x43, 0xfa, 0xcb, 0x29, 0x83, 0x4a, 0x6b, 0x70, 0xab, 0x98, 0x3e, 0xd6, - 0x94, 0xf6, 0x77, 0xc2, 0x39, 0xda, 0x9c, 0xe9, 0x34, 0x12, 0x21, 0x02, 0xd3, 0xbb, 0x9d, 0x4d, - 0xe2, 0x91, 0x58, 0x96, 0x89, 0x46, 0x33, 0xc5, 0x7e, 0xb3, 0x76, 0x22, 0x5b, 0xde, 0x34, 0x49, - 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2c, 0x60, 0xa6, 0xd9, 0x67, 0x70, 0xfc, 0x7d, 0xd1, 0x3c, 0xfe, - 0xca, 0x79, 0x1c, 0x28, 0xe7, 0xe4, 0x7b, 0x8d, 0x4f, 0x4b, 0x3d, 0x0c, 0x1e, 0x1c, 0x48, 0xd9, - 0xbf, 0xbf, 0xc4, 0xf5, 0x7f, 0x2c, 0xbe, 0x21, 0x95, 0x07, 0x3a, 0xfa, 0x2e, 0x18, 0x6b, 0x3a, - 0x6d, 0xa7, 0xc9, 0x43, 0x46, 0xe6, 0x6a, 0x7f, 0x8c, 0x4a, 0xf3, 0xcb, 0xa2, 0x06, 0xd7, 0x66, - 0x7c, 0x56, 0x7e, 0xa5, 0x2c, 0xee, 0xab, 0xc1, 0x50, 0x4d, 0xce, 0xed, 0xc2, 0xa4, 0x41, 0xec, - 0x91, 0x5e, 0x7d, 0xbf, 0x8b, 0x1f, 0x17, 0xea, 0xc6, 0xb2, 0x07, 0xb3, 0xbe, 0xf6, 0x9f, 0x32, - 0x47, 0x29, 0x4e, 0x7f, 0xba, 0xdf, 0x81, 0xc0, 0x38, 0xa9, 0x66, 0x7a, 0x9e, 0x22, 0x83, 0xbb, - 0x29, 0xdb, 0x7f, 0xcf, 0x82, 0xc7, 0x75, 0x44, 0x2d, 0x38, 0x40, 0x3f, 0x7d, 0x72, 0x15, 0xc6, - 0x82, 0x36, 0x09, 0x9d, 0xe4, 0x4e, 0x76, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, 0x7c, 0x58, 0x39, - 0xaf, 0x53, 0x97, 0xe5, 0x58, 0xd5, 0x44, 0x36, 0x8c, 0xb0, 0xc1, 0x88, 0x44, 0xe0, 0x06, 0x66, - 0x13, 0xc7, 0x9e, 0x56, 0x23, 0x2c, 0x20, 0xf6, 0xf7, 0x58, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x1f, - 0xc0, 0xcc, 0x1e, 0xbd, 0xbe, 0xad, 0x3c, 0x68, 0x87, 0x5c, 0x8d, 0x2e, 0xc7, 0xe9, 0xc5, 0x7e, - 0xe3, 0xa4, 0x7d, 0x64, 0x62, 0x39, 0xb5, 0x96, 0x22, 0x86, 0xbb, 0xc8, 0xdb, 0x7f, 0x5c, 0xe0, - 0x3b, 0x91, 0x49, 0x75, 0xcf, 0xc3, 0x68, 0x3b, 0x68, 0x2d, 0xd7, 0xaa, 0x58, 0x8c, 0x90, 0x62, - 0x57, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x8e, 0xa7, 0x0c, - 0x3f, 0x94, 0xf0, 0xb4, 0xa2, 0x20, 0x58, 0xc3, 0xa2, 0x75, 0xda, 0x61, 0xb0, 0xef, 0xb6, 0x98, - 0x6b, 0x5b, 0xd1, 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0xd1, 0xab, 0x72, 0xc7, 0x8f, 0xf8, 0x01, - 0xe8, 0x6c, 0x8a, 0x58, 0x67, 0x63, 0xc9, 0x55, 0xf9, 0x8e, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, - 0x91, 0xd8, 0x61, 0xe6, 0x0c, 0xc3, 0xf9, 0x36, 0x70, 0x1b, 0x14, 0x43, 0x8f, 0x21, 0x48, 0x2b, - 0x60, 0x51, 0x11, 0xbd, 0x2b, 0x59, 0x30, 0x67, 0xc9, 0xc2, 0xf8, 0x34, 0x77, 0xd9, 0xea, 0xec, - 0x5b, 0xe7, 0xc1, 0xc2, 0xa8, 0xd5, 0xa0, 0x65, 0x7f, 0x77, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x87, - 0x5d, 0x2c, 0xe2, 0xa5, 0xde, 0xf2, 0xe1, 0xe9, 0xf1, 0x07, 0xf4, 0xbd, 0x16, 0x8c, 0x3b, 0x9e, - 0x17, 0x34, 0x9d, 0x98, 0x8d, 0x72, 0xa1, 0x37, 0x8b, 0x12, 0xed, 0x2f, 0x26, 0x35, 0x78, 0x17, - 0x5e, 0x95, 0x96, 0x0a, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0x67, 0xe5, 0x25, 0x80, 0x2f, - 0x8f, 0xb9, 0xf4, 0x25, 0xa0, 0xc4, 0xb8, 0xb1, 0x26, 0xff, 0xa3, 0x3b, 0x46, 0x50, 0xb0, 0xa1, - 0xfc, 0xf8, 0x07, 0x86, 0xd0, 0xd3, 0x2f, 0x1e, 0x18, 0xaa, 0xeb, 0x4e, 0x38, 0xc3, 0xf9, 0x41, - 0x42, 0x34, 0xe9, 0xba, 0x8f, 0x03, 0xce, 0xfb, 0x30, 0xdd, 0x32, 0x8f, 0x5b, 0xb1, 0x9a, 0x9e, - 0xcb, 0xa3, 0x9b, 0x3a, 0x9d, 0x93, 0x03, 0x36, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, 0xdd, 0xa1, - 0x6a, 0xfe, 0x56, 0x20, 0x8c, 0x98, 0xed, 0xdc, 0xb9, 0x3c, 0x88, 0x62, 0xb2, 0x47, 0x31, 0x93, - 0x73, 0x74, 0x5d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x1b, 0x46, 0x98, 0x8f, 0x6a, 0x54, 0x1e, 0xcb, - 0xd7, 0x03, 0x9a, 0xe1, 0x15, 0x92, 0x4d, 0xc5, 0xfe, 0x46, 0x58, 0x50, 0x40, 0x37, 0x64, 0x0c, - 0x96, 0xa8, 0xe6, 0xdf, 0x89, 0x08, 0x8b, 0xc1, 0x52, 0x5a, 0xfa, 0x74, 0x12, 0x5e, 0x85, 0x97, - 0x67, 0x46, 0x0b, 0x36, 0x6a, 0x52, 0x79, 0x45, 0xfc, 0x97, 0x41, 0x88, 0xcb, 0x90, 0xdf, 0x3d, - 0x33, 0x50, 0x71, 0x32, 0x9c, 0x77, 0x4d, 0x12, 0x38, 0x4d, 0xf3, 0x4c, 0x8f, 0xcf, 0x39, 0x1f, - 0x66, 0xd2, 0x1b, 0xeb, 0x91, 0x1e, 0xd7, 0xbf, 0x3f, 0x04, 0x53, 0xe6, 0x42, 0x40, 0x0b, 0x50, - 0x12, 0x44, 0x54, 0x3c, 0x46, 0xb5, 0xb6, 0xd7, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x1e, 0x25, 0xab, - 0xae, 0xd9, 0x01, 0x26, 0xf1, 0x28, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x88, 0xde, 0x0c, 0x82, 0x58, - 0x1d, 0x05, 0x6a, 0xb5, 0x2c, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x08, 0xd8, 0x25, 0xa1, 0x4f, 0x3c, - 0x53, 0x93, 0xa9, 0x8e, 0x80, 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x48, 0x0b, 0x22, 0xb6, 0xfc, - 0x84, 0xa8, 0x9e, 0xd8, 0x55, 0x36, 0xb8, 0x8f, 0xb6, 0x84, 0xa3, 0x2f, 0xc3, 0xe3, 0xca, 0xa5, - 0x1a, 0x73, 0xcd, 0xb0, 0x6c, 0x71, 0xc4, 0xb8, 0x59, 0x3f, 0xbe, 0x9c, 0x8d, 0x86, 0xf3, 0xea, - 0xa3, 0xb7, 0x60, 0x4a, 0x88, 0xc0, 0x92, 0xe2, 0xa8, 0x69, 0xac, 0x70, 0xd3, 0x80, 0xe2, 0x14, - 0x36, 0xaa, 0xc2, 0x0c, 0x2d, 0x61, 0x52, 0xa8, 0xa4, 0xc0, 0x5d, 0xc3, 0xd5, 0x59, 0x7f, 0x33, - 0x05, 0xc7, 0x5d, 0x35, 0xd0, 0x22, 0x4c, 0x73, 0x19, 0x85, 0xde, 0x29, 0xd9, 0x3c, 0x08, 0xdf, - 0x02, 0xb5, 0x11, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0xd7, 0x60, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, - 0x4c, 0x9a, 0x71, 0x27, 0xe4, 0x4e, 0x07, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, - 0x21, 0x9c, 0xcb, 0x70, 0x4b, 0xa2, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, - 0xf5, 0x9a, 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x22, 0x85, 0xab, 0xd5, 0xb9, - 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0x7f, 0x1d, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0x77, 0x0d, 0x26, - 0x64, 0x78, 0x7b, 0x2d, 0xac, 0xb2, 0xfa, 0xcc, 0xeb, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, - 0x0a, 0x0a, 0x9d, 0xb2, 0xc7, 0x4c, 0x42, 0x42, 0x27, 0x38, 0xe8, 0x25, 0x18, 0x8b, 0x88, 0xb7, - 0x75, 0xcb, 0xf5, 0x77, 0xc5, 0xc2, 0x56, 0x5c, 0xb8, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, - 0xec, 0xb8, 0x2d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, 0xbc, 0x53, 0xab, 0x1e, 0x1f, 0x56, 0x9e, 0xce, - 0x8b, 0xda, 0x4f, 0xaf, 0xf6, 0xd1, 0x3c, 0xdd, 0x7e, 0xb4, 0x72, 0xd6, 0xdb, 0xc0, 0xc8, 0x09, - 0xdf, 0x06, 0xae, 0x02, 0x88, 0xaf, 0x96, 0x6b, 0xb9, 0x98, 0xcc, 0xda, 0x75, 0x05, 0xc1, 0x1a, - 0x16, 0x8a, 0x60, 0xb6, 0x19, 0x12, 0x47, 0xde, 0xa1, 0xb9, 0x83, 0xcd, 0xd8, 0xc3, 0x2b, 0x08, - 0x96, 0xd3, 0xc4, 0x70, 0x37, 0x7d, 0x14, 0xc0, 0x6c, 0x4b, 0x78, 0xf0, 0x27, 0x8d, 0x96, 0x4e, - 0xee, 0xd5, 0xc3, 0x0c, 0x72, 0xd2, 0x84, 0x70, 0x37, 0x6d, 0xf4, 0x15, 0x98, 0x93, 0x85, 0xdd, - 0x41, 0x13, 0xd8, 0x76, 0x29, 0x2e, 0x5d, 0x3a, 0x3a, 0xac, 0xcc, 0x55, 0x73, 0xb1, 0x70, 0x0f, - 0x0a, 0x08, 0xc3, 0x08, 0x7b, 0x4b, 0x8a, 0xca, 0xe3, 0xec, 0x9c, 0x7b, 0x21, 0x5f, 0x19, 0x40, - 0xd7, 0xfa, 0x3c, 0x7b, 0x87, 0x12, 0x26, 0xe5, 0xc9, 0xb3, 0x1c, 0x2b, 0xc4, 0x82, 0x12, 0xda, - 0x82, 0x71, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, 0x22, 0xd4, 0x44, 0xbe, 0xec, 0xa7, 0x11, 0x5e, 0x4c, - 0x6a, 0x70, 0xea, 0xca, 0x4a, 0x55, 0x83, 0x60, 0x9d, 0x30, 0xba, 0x0f, 0xd3, 0xc1, 0x7d, 0xca, - 0x1c, 0xa5, 0x96, 0x22, 0x2a, 0x4f, 0xb2, 0xb6, 0x5e, 0x1b, 0x50, 0x4f, 0x6b, 0x54, 0xd6, 0xb8, - 0x96, 0x49, 0x14, 0xa7, 0x5b, 0x41, 0xf3, 0x86, 0xb6, 0x7a, 0x2a, 0xf1, 0x9d, 0x48, 0xb4, 0xd5, - 0xba, 0x72, 0x9a, 0x05, 0xe1, 0xe0, 0x26, 0xd2, 0x6c, 0xf7, 0x4f, 0xa7, 0x82, 0x70, 0x24, 0x20, - 0xac, 0xe3, 0xa1, 0x1d, 0x98, 0x48, 0x9e, 0xac, 0xc2, 0x88, 0x85, 0x00, 0x1b, 0xbf, 0x7a, 0x75, - 0xb0, 0x8f, 0xab, 0x69, 0x35, 0xf9, 0xcd, 0x41, 0x2f, 0xc1, 0x06, 0xe5, 0xb9, 0x6f, 0x81, 0x71, - 0x6d, 0x62, 0x4f, 0xe2, 0x01, 0x30, 0xf7, 0x16, 0xcc, 0xa4, 0xa7, 0xee, 0x44, 0x1e, 0x04, 0xff, - 0xbb, 0x00, 0xd3, 0x19, 0x2f, 0x57, 0x2c, 0xf2, 0x7f, 0x8a, 0xa1, 0x26, 0x81, 0xfe, 0x4d, 0xb6, - 0x58, 0x18, 0x80, 0x2d, 0x4a, 0x1e, 0x5d, 0xcc, 0xe5, 0xd1, 0x82, 0x15, 0x0e, 0x7d, 0x14, 0x56, - 0x68, 0x9e, 0x3e, 0xc3, 0x03, 0x9d, 0x3e, 0xa7, 0xc0, 0x3e, 0x8d, 0x03, 0x6c, 0x74, 0x80, 0x03, - 0xec, 0x07, 0x0b, 0x30, 0x93, 0xb6, 0xf0, 0x3d, 0x83, 0xf7, 0x8e, 0xb7, 0x8d, 0xf7, 0x8e, 0xec, - 0x3c, 0x1a, 0x69, 0xbb, 0xe3, 0xbc, 0xb7, 0x0f, 0x9c, 0x7a, 0xfb, 0x78, 0x61, 0x20, 0x6a, 0xbd, - 0xdf, 0x41, 0xfe, 0x7e, 0x01, 0x2e, 0xa4, 0xab, 0x2c, 0x7b, 0x8e, 0xbb, 0x77, 0x06, 0x63, 0x73, - 0xdb, 0x18, 0x9b, 0x97, 0x07, 0xf9, 0x1a, 0xd6, 0xb5, 0xdc, 0x01, 0xba, 0x97, 0x1a, 0xa0, 0x85, - 0xc1, 0x49, 0xf6, 0x1e, 0xa5, 0x6f, 0x14, 0xe1, 0x52, 0x66, 0xbd, 0xe4, 0xb9, 0x60, 0xd5, 0x78, - 0x2e, 0xb8, 0x9a, 0x7a, 0x2e, 0xb0, 0x7b, 0xd7, 0x3e, 0x9d, 0xf7, 0x03, 0xe1, 0x7b, 0xcb, 0xc2, - 0x53, 0x3e, 0xe4, 0xdb, 0x81, 0xe1, 0x7b, 0xab, 0x08, 0x61, 0x93, 0xee, 0x9f, 0xa7, 0x37, 0x83, - 0x5f, 0xb7, 0xe0, 0x62, 0xe6, 0xdc, 0x9c, 0x81, 0x5e, 0x7d, 0xdd, 0xd4, 0xab, 0x3f, 0x3f, 0xf0, - 0x6a, 0xcd, 0x51, 0xb4, 0xff, 0x41, 0x31, 0xe7, 0x5b, 0x98, 0x66, 0xf2, 0x36, 0x8c, 0x3b, 0xcd, - 0x26, 0x89, 0xa2, 0xb5, 0xa0, 0xa5, 0xc2, 0x35, 0xbe, 0xcc, 0xa4, 0x8d, 0xa4, 0xf8, 0xf8, 0xb0, - 0x32, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x8c, 0x30, 0x5b, 0x38, 0xd5, 0x08, 0xb3, 0x57, - 0x01, 0xf6, 0x95, 0xbe, 0x22, 0xad, 0xe6, 0xd4, 0x34, 0x19, 0x1a, 0x16, 0xfa, 0x0e, 0x76, 0x0b, - 0xe0, 0xc6, 0x40, 0x7c, 0x29, 0xbe, 0x3a, 0xe0, 0x5c, 0xe9, 0x86, 0x45, 0x3c, 0xc8, 0x83, 0x52, - 0x09, 0x2b, 0x92, 0xe8, 0xdb, 0x60, 0x26, 0xe2, 0x31, 0x84, 0x96, 0x3d, 0x27, 0x62, 0x4e, 0x5c, - 0x62, 0x15, 0xb2, 0xc8, 0x0d, 0x8d, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xf9, 0x51, 0x2c, 0xe0, - 0x11, 0x5f, 0x98, 0xcf, 0x26, 0x1f, 0x24, 0xf2, 0x0e, 0x9d, 0x4f, 0x0f, 0x3f, 0x1b, 0x78, 0xad, - 0xa6, 0xfd, 0x83, 0x43, 0xf0, 0x44, 0x0f, 0x26, 0x86, 0x16, 0x4d, 0x23, 0x80, 0x17, 0xd3, 0xfa, - 0xbf, 0xb9, 0xcc, 0xca, 0x86, 0x42, 0x30, 0xb5, 0x56, 0x0a, 0x1f, 0x79, 0xad, 0x7c, 0x9f, 0xa5, - 0x69, 0x66, 0xb9, 0xa9, 0xf0, 0x17, 0x4f, 0xc8, 0x9c, 0x4f, 0x51, 0x55, 0xbb, 0x95, 0xa1, 0xef, - 0xbc, 0x3a, 0x70, 0x77, 0x06, 0x56, 0x80, 0x9e, 0xed, 0x93, 0xd1, 0xd7, 0x2c, 0x78, 0x3a, 0xb3, - 0xbf, 0x86, 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0xdc, 0xb3, 0x25, 0x00, - 0x27, 0x38, 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, - 0x27, 0xad, 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xad, 0x69, 0x78, 0x2c, - 0xc7, 0x13, 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, - 0xd3, 0x3f, 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x6b, 0x16, 0x9c, 0x77, 0xee, - 0x47, 0x5d, 0x69, 0x01, 0xc5, 0x9a, 0x79, 0x2d, 0x53, 0x4f, 0xdb, 0x27, 0x8d, 0x20, 0x73, 0x8b, - 0x3b, 0x9f, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xd8, 0xa5, 0xf2, 0x76, 0x0f, 0xa7, 0xf0, - 0x2c, 0x97, 0x3d, 0xce, 0x53, 0x25, 0x04, 0x2b, 0x3a, 0xe8, 0x2e, 0x94, 0xb6, 0xa5, 0x1f, 0xad, - 0xe0, 0xd9, 0x99, 0x87, 0x60, 0xa6, 0xb3, 0x2d, 0xf7, 0x1d, 0x51, 0x20, 0x9c, 0x90, 0x42, 0x6f, - 0x41, 0xd1, 0xdf, 0x8a, 0x7a, 0x65, 0x36, 0x4a, 0xd9, 0xf2, 0x71, 0xef, 0xfe, 0xf5, 0xd5, 0x06, - 0xa6, 0x15, 0xd1, 0x0d, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, - 0xf6, 0x22, 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, - 0x20, 0x64, 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x08, 0x00, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x36, - 0x8c, 0x34, 0x59, 0xf2, 0x1f, 0xa1, 0xf8, 0xc9, 0x8e, 0x0d, 0xd5, 0x95, 0x1e, 0x88, 0xbf, 0xa0, - 0xf2, 0x72, 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, - 0xcd, 0xa4, 0xd5, 0x23, 0xd7, 0x95, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, - 0xa6, 0xf0, 0xa4, 0xca, 0x7c, 0x43, 0x30, 0xc3, 0x32, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, - 0xcb, 0xb8, 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0xb4, 0x94, 0xe7, 0xb2, - 0xdd, 0xfe, 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x04, 0x6d, 0x00, 0x6c, 0xa9, - 0x18, 0x01, 0x22, 0x4a, 0xfb, 0xa7, 0x07, 0x89, 0x24, 0x20, 0x94, 0x1b, 0xaa, 0x14, 0x6b, 0x74, - 0xd0, 0x57, 0xa1, 0xe4, 0xc8, 0xe4, 0x73, 0x22, 0xce, 0xca, 0xab, 0x99, 0x9b, 0xb0, 0x77, 0x5e, - 0x3e, 0xbe, 0x82, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, - 0x5a, 0x16, 0x76, 0x25, 0xe7, 0x90, 0xba, 0x2b, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0x9f, - 0x61, 0xee, 0x62, 0x77, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x07, 0xfd, 0x83, 0x4e, 0xb0, 0x79, 0x10, - 0x13, 0x11, 0xcc, 0x3d, 0x73, 0xd0, 0xdf, 0xe1, 0x28, 0xdd, 0x83, 0x2e, 0x00, 0x58, 0x12, 0xa1, - 0xdb, 0xda, 0x91, 0x89, 0x1d, 0x85, 0x06, 0xe7, 0xf9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, - 0x8c, 0x1f, 0x26, 0xa4, 0x18, 0x1f, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0x78, 0xf0, 0x6c, 0x3e, - 0x1f, 0xac, 0x67, 0xe0, 0x77, 0xf3, 0xc1, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, - 0x08, 0xe3, 0xfb, 0x41, 0x28, 0x57, 0x15, 0xea, 0x71, 0xb5, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0xfa, - 0xdb, 0x84, 0xe0, 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xcb, 0xe7, - 0xf2, 0x0f, 0x98, 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, - 0x0c, 0xb3, 0x0c, 0x21, 0x2c, 0x0e, 0x7d, 0x4e, 0x3c, 0xaf, 0x2e, 0x0b, 0x69, 0xce, 0x87, 0x58, - 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x12, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x18, - 0xdf, 0x6e, 0xf4, 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x17, 0xa6, 0x9c, 0xf3, 0xb1, 0x7c, - 0x2e, 0x9c, 0xcf, 0x37, 0x19, 0x17, 0xa6, 0x5c, 0x93, 0x92, 0xb0, 0xbf, 0x36, 0xda, 0x2d, 0x95, - 0xb0, 0x3b, 0xd1, 0x77, 0x5b, 0x5d, 0x06, 0x03, 0x9f, 0x1b, 0x54, 0x45, 0x73, 0x8a, 0xf2, 0xe8, - 0xd7, 0x2c, 0x78, 0xac, 0x9d, 0xf9, 0x21, 0xe2, 0x88, 0x1f, 0x4c, 0xd3, 0xc3, 0x3f, 0x5d, 0xe5, - 0x8a, 0xc8, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x32, 0x7f, 0xf1, 0x23, 0xcb, 0xfc, 0x6b, 0x30, 0xc6, - 0xc4, 0xc8, 0x24, 0x78, 0xdc, 0x40, 0x66, 0x77, 0x4c, 0x58, 0x58, 0x16, 0x15, 0xb1, 0x22, 0x81, - 0xbe, 0xdf, 0x82, 0xa7, 0xd2, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0x20, 0x62, 0x7e, 0x1d, 0x5b, 0x15, - 0xdf, 0xff, 0x54, 0xbd, 0x17, 0xf2, 0x71, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0xcd, 0xb8, 0x0f, - 0x8e, 0x98, 0xef, 0x89, 0x03, 0xdc, 0x09, 0x5f, 0x83, 0x89, 0xbd, 0xa0, 0xe3, 0x4b, 0x9f, 0x18, - 0xe1, 0xf1, 0xcc, 0x74, 0xd7, 0x6b, 0x5a, 0x39, 0x36, 0xb0, 0x52, 0x37, 0xc9, 0xb1, 0x87, 0xbd, - 0x49, 0x9e, 0xed, 0xfd, 0xe4, 0xeb, 0x56, 0x86, 0x60, 0xcd, 0x6f, 0xac, 0x5f, 0x30, 0x6f, 0xac, - 0xcf, 0xa6, 0x6f, 0xac, 0x5d, 0x1a, 0x4a, 0xe3, 0xb2, 0x3a, 0x78, 0xa0, 0xf6, 0x41, 0xa3, 0xf4, - 0xd9, 0x1e, 0x5c, 0xee, 0x77, 0x70, 0x30, 0x13, 0xc6, 0x96, 0x7a, 0xdb, 0x4f, 0x4c, 0x18, 0x5b, - 0xb5, 0x2a, 0x66, 0x90, 0x41, 0x23, 0xeb, 0xd8, 0xff, 0xdd, 0x82, 0x62, 0x3d, 0x68, 0x9d, 0x81, - 0xc6, 0xf5, 0x8b, 0x86, 0xc6, 0xf5, 0x89, 0x9c, 0x14, 0xd6, 0xb9, 0xfa, 0xd5, 0x95, 0x94, 0x7e, - 0xf5, 0xa9, 0x3c, 0x02, 0xbd, 0xb5, 0xa9, 0x3f, 0x5e, 0x04, 0x3d, 0xe1, 0x36, 0xfa, 0xb7, 0x0f, - 0x63, 0x0b, 0x5f, 0xec, 0x95, 0x83, 0x5b, 0x50, 0x66, 0x96, 0x8f, 0xd2, 0xcd, 0xf6, 0xcf, 0x98, - 0x49, 0xfc, 0x3d, 0xe2, 0x6e, 0xef, 0xc4, 0xa4, 0x95, 0xfe, 0x9c, 0xb3, 0x33, 0x89, 0xff, 0xaf, - 0x16, 0x4c, 0xa7, 0x5a, 0x47, 0x5e, 0x96, 0xcf, 0xde, 0x43, 0x6a, 0xda, 0x66, 0xfb, 0x3a, 0xf9, - 0xcd, 0x03, 0xa8, 0xe7, 0x2c, 0xa9, 0x85, 0x62, 0x72, 0xb9, 0x7a, 0xef, 0x8a, 0xb0, 0x86, 0x81, - 0x5e, 0x87, 0xf1, 0x38, 0x68, 0x07, 0x5e, 0xb0, 0x7d, 0x70, 0x93, 0xc8, 0x58, 0x4e, 0xea, 0xd1, - 0x71, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0xc9, 0x22, 0xa4, 0x93, 0xb4, 0x7f, 0x73, 0x4d, 0x7e, - 0x32, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa1, 0xad, 0x33, 0xab, 0x32, 0x79, 0x1c, 0xaa, 0x84, 0x51, - 0x56, 0x8f, 0x84, 0x51, 0xcf, 0x52, 0xde, 0xd5, 0x0a, 0x3a, 0xb1, 0xd0, 0x62, 0x69, 0xcc, 0x89, - 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, 0x27, 0x9e, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, - 0xcc, 0x27, 0x35, 0x94, 0x93, 0x4f, 0x8a, 0x85, 0x41, 0x14, 0x96, 0x4c, 0x42, 0x30, 0xd1, 0xc2, - 0x20, 0x4a, 0x13, 0xa7, 0x04, 0xc7, 0xfe, 0x99, 0x22, 0x4c, 0xd4, 0x83, 0x56, 0xf2, 0xa0, 0xf4, - 0x9a, 0xf1, 0xa0, 0x74, 0x39, 0xf5, 0xa0, 0x34, 0xa3, 0xe3, 0x7e, 0xf3, 0xf9, 0xe8, 0xe3, 0x7a, - 0x3e, 0xfa, 0x57, 0x16, 0x9b, 0xb5, 0xea, 0x7a, 0x43, 0xe4, 0x3b, 0x7e, 0x05, 0xc6, 0x19, 0x43, - 0x62, 0xae, 0x9f, 0xf2, 0x95, 0x85, 0xa5, 0x35, 0x58, 0x4f, 0x8a, 0xb1, 0x8e, 0x83, 0xae, 0xc0, - 0x58, 0x44, 0x9c, 0xb0, 0xb9, 0xa3, 0x78, 0x9c, 0x78, 0x83, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x3b, - 0x49, 0x50, 0xc4, 0x62, 0x7e, 0xe6, 0x5e, 0xbd, 0x3f, 0x7c, 0x8b, 0xe4, 0x47, 0x42, 0xb4, 0xef, - 0x01, 0xea, 0xc6, 0x1f, 0xc0, 0xbc, 0xab, 0x62, 0x86, 0x3f, 0x2b, 0x75, 0x85, 0x3e, 0xfb, 0x13, - 0x0b, 0xa6, 0xea, 0x41, 0x8b, 0x6e, 0xdd, 0x3f, 0x4f, 0xfb, 0x54, 0x0f, 0x3f, 0x3a, 0xd2, 0x23, - 0xfc, 0xe8, 0x3f, 0xb0, 0x60, 0xb4, 0x1e, 0xb4, 0xce, 0x40, 0xf7, 0xfd, 0x05, 0x53, 0xf7, 0xfd, - 0x78, 0xce, 0x92, 0xc8, 0x51, 0x77, 0xff, 0x5c, 0x11, 0x26, 0x69, 0x3f, 0x83, 0x6d, 0x39, 0x4b, - 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x15, 0x73, 0x03, 0xcf, 0x0b, 0xee, 0xa7, 0x67, 0x6c, 0x95, - 0x95, 0x62, 0x01, 0x45, 0x2f, 0xc1, 0x58, 0x3b, 0x24, 0xfb, 0x6e, 0xd0, 0x89, 0xd2, 0x5e, 0xce, - 0x75, 0x51, 0x8e, 0x15, 0x06, 0xbd, 0x19, 0x45, 0xae, 0xdf, 0x24, 0xd2, 0xee, 0x6b, 0x88, 0xd9, - 0x7d, 0xf1, 0xb8, 0xe2, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x3d, 0x28, 0xb1, 0xff, 0x8c, 0xa3, 0x9c, - 0x3c, 0xd3, 0x95, 0x48, 0xe6, 0x21, 0x08, 0xe0, 0x84, 0x16, 0xba, 0x0a, 0x10, 0x4b, 0x0b, 0xb5, - 0x48, 0x38, 0xe1, 0x2b, 0x59, 0x5b, 0xd9, 0xae, 0x45, 0x58, 0xc3, 0x42, 0x2f, 0x42, 0x29, 0x76, - 0x5c, 0xef, 0x96, 0xeb, 0x93, 0x48, 0x58, 0xf8, 0x89, 0x5c, 0x1d, 0xa2, 0x10, 0x27, 0x70, 0x2a, - 0xeb, 0xb0, 0x10, 0x0f, 0x3c, 0x4f, 0xde, 0x18, 0xc3, 0x66, 0xb2, 0xce, 0x2d, 0x55, 0x8a, 0x35, - 0x0c, 0xfb, 0x1a, 0x5c, 0xa8, 0x07, 0xad, 0x7a, 0x10, 0xc6, 0xab, 0x41, 0x78, 0xdf, 0x09, 0x5b, - 0x72, 0xfe, 0x2a, 0x32, 0x6d, 0x04, 0xe5, 0x3d, 0xc3, 0x7c, 0x67, 0x1a, 0x09, 0x21, 0x5e, 0x65, - 0xd2, 0xce, 0x09, 0xdd, 0xb1, 0xfe, 0x7d, 0x81, 0x31, 0x8a, 0x54, 0xf2, 0x46, 0xf4, 0x15, 0x98, - 0x8a, 0xc8, 0x2d, 0xd7, 0xef, 0x3c, 0x90, 0x37, 0xd8, 0x1e, 0xbe, 0x6e, 0x8d, 0x15, 0x1d, 0x93, - 0xeb, 0xc1, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0x87, 0x30, 0xec, 0xf8, 0x8b, 0xd1, 0x9d, 0x88, 0x84, - 0x22, 0x79, 0x20, 0x1b, 0x42, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x4b, 0x86, 0xfd, 0x59, 0x0f, 0x7c, - 0x1c, 0x04, 0xb1, 0x5c, 0x64, 0x2c, 0xfd, 0x94, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, - 0xb4, 0xdb, 0x1e, 0x7b, 0x98, 0x76, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x47, 0x41, 0x91, 0xb9, - 0xa9, 0xd1, 0x05, 0xc5, 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0x11, 0xe5, 0x81, 0x6b, - 0xa4, 0x1b, 0xac, 0x08, 0x4b, 0x98, 0xfd, 0x5d, 0xec, 0xc0, 0x60, 0x39, 0xdf, 0xe2, 0x4e, 0x48, - 0xd0, 0x1e, 0x4c, 0xb6, 0xd9, 0x51, 0x2e, 0xa2, 0x67, 0x8b, 0x01, 0x7c, 0x38, 0x7b, 0x3e, 0x9e, - 0x03, 0x4a, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x7f, 0x9a, 0x66, 0x7c, 0xa9, 0xc1, 0xaf, 0x73, 0xa3, - 0xc2, 0x4a, 0x5f, 0xc8, 0xae, 0x73, 0xf9, 0x59, 0x22, 0x93, 0x23, 0x44, 0x58, 0xfa, 0x63, 0x59, - 0x17, 0xbd, 0xc3, 0x5e, 0x53, 0x39, 0x33, 0xe8, 0x97, 0x3c, 0x9a, 0x63, 0x19, 0x0f, 0xa7, 0xa2, - 0x22, 0xd6, 0x88, 0xa0, 0x5b, 0x30, 0x29, 0x52, 0x84, 0x09, 0xd5, 0x4e, 0xd1, 0x50, 0x0c, 0x4c, - 0x62, 0x1d, 0x78, 0x9c, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, 0x4f, 0x69, 0xf9, 0x32, 0x33, 0x6c, - 0x4a, 0x39, 0x6f, 0x79, 0xfa, 0xe8, 0xb0, 0xf2, 0xd4, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, - 0x0d, 0x17, 0x9c, 0x66, 0xec, 0xee, 0x93, 0x2a, 0x71, 0x5a, 0x9e, 0xeb, 0x13, 0x33, 0xec, 0xc7, - 0xc5, 0xa3, 0xc3, 0xca, 0x85, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0x7d, 0x01, 0x4a, 0x2d, 0x3f, - 0x12, 0x63, 0x30, 0x62, 0xa4, 0x82, 0x2d, 0x55, 0xd7, 0x1b, 0xea, 0xfb, 0x93, 0x3f, 0x38, 0xa9, - 0x80, 0xb6, 0x61, 0x42, 0x77, 0xed, 0x13, 0x69, 0x84, 0x5f, 0xee, 0x71, 0xeb, 0x37, 0xfc, 0xe1, - 0xb8, 0x5e, 0x53, 0x59, 0x6c, 0x1b, 0xae, 0x72, 0x06, 0x61, 0xf4, 0x36, 0x20, 0x2a, 0xcc, 0xb8, - 0x4d, 0xb2, 0xd8, 0x64, 0x41, 0xcc, 0x99, 0x36, 0x6c, 0xcc, 0x70, 0x3f, 0x42, 0x8d, 0x2e, 0x0c, - 0x9c, 0x51, 0x0b, 0xdd, 0xa0, 0x1c, 0x45, 0x2f, 0x15, 0x06, 0xf6, 0x52, 0x00, 0x2e, 0x57, 0x49, - 0x3b, 0x24, 0x4d, 0x27, 0x26, 0x2d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0x67, 0x04, - 0xa6, 0x59, 0x78, 0x77, 0x4e, 0x23, 0x7a, 0x77, 0xdc, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0xf7, 0x83, - 0x70, 0x57, 0xc4, 0xea, 0x4b, 0xc2, 0xc6, 0x26, 0x20, 0xac, 0xe3, 0x51, 0x59, 0x91, 0x3d, 0x67, - 0xd6, 0xaa, 0xec, 0x75, 0x69, 0x2c, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xab, - 0x2f, 0xb3, 0x37, 0xa3, 0x14, 0x6a, 0xad, 0xbe, 0x8c, 0x25, 0x1c, 0x91, 0xee, 0x34, 0xbb, 0x53, - 0xf9, 0xaf, 0x7d, 0xdd, 0x7c, 0x79, 0xc0, 0x4c, 0xbb, 0x3e, 0xcc, 0xa8, 0x04, 0xbf, 0x3c, 0x88, - 0x61, 0x54, 0x9e, 0x66, 0x8b, 0x64, 0xf0, 0x08, 0x88, 0x4a, 0xdb, 0x59, 0x4b, 0x51, 0xc2, 0x5d, - 0xb4, 0x8d, 0x70, 0x32, 0x33, 0x7d, 0xf3, 0x51, 0x2d, 0x40, 0x29, 0xea, 0x6c, 0xb6, 0x82, 0x3d, - 0xc7, 0xf5, 0xd9, 0x13, 0x8f, 0x26, 0x88, 0x34, 0x24, 0x00, 0x27, 0x38, 0x68, 0x15, 0xc6, 0x1c, - 0x71, 0x2d, 0x15, 0x8f, 0x32, 0x99, 0xf1, 0x25, 0xe4, 0xd5, 0x95, 0x8b, 0xd9, 0xf2, 0x1f, 0x56, - 0x75, 0xd1, 0x9b, 0x30, 0x29, 0x5c, 0x20, 0x85, 0xf5, 0xf2, 0x39, 0xd3, 0x5b, 0xa6, 0xa1, 0x03, - 0xb1, 0x89, 0x8b, 0xbe, 0x03, 0xa6, 0x28, 0x95, 0x84, 0xb1, 0x95, 0xcf, 0x0f, 0xc2, 0x11, 0xb5, - 0x3c, 0x23, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0x5a, 0xf0, 0xa4, 0xd3, 0x89, 0x03, 0xa6, 0x0e, 0x36, - 0xd7, 0xff, 0x46, 0xb0, 0x4b, 0x7c, 0xf6, 0x12, 0x33, 0xb6, 0x74, 0xf9, 0xe8, 0xb0, 0xf2, 0xe4, - 0x62, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, 0xba, 0x03, 0xe3, 0x71, 0xe0, 0x09, 0xb7, 0x83, 0xa8, 0xfc, - 0x58, 0x7e, 0x38, 0xac, 0x0d, 0x85, 0xa6, 0x2b, 0x5a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, - 0x8f, 0xb1, 0x40, 0xc1, 0x24, 0x2a, 0x3f, 0x9e, 0x3f, 0x30, 0x2a, 0x9e, 0xb0, 0xb9, 0x05, 0x45, - 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xb6, 0x1d, 0xba, 0x01, 0x5b, 0xd8, 0x4a, 0x15, 0x5f, 0x36, - 0x53, 0x4b, 0xd4, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0xbd, 0x88, 0xc9, 0xc2, 0xf2, 0x45, 0x9e, 0xa7, - 0x8c, 0x0b, 0xa7, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x7c, 0x99, 0x5f, 0x99, 0xca, 0x73, 0xf9, - 0x71, 0x39, 0xf4, 0xab, 0x15, 0x17, 0x5c, 0xd4, 0x5f, 0x9c, 0x50, 0x98, 0xfb, 0x56, 0x98, 0xed, - 0x62, 0xbc, 0x27, 0xb2, 0x28, 0xff, 0xa7, 0xc3, 0x50, 0x52, 0x7a, 0x57, 0xb4, 0x60, 0xaa, 0xd3, - 0x2f, 0xa6, 0xd5, 0xe9, 0x63, 0x54, 0xfc, 0xd3, 0x35, 0xe8, 0x1b, 0x86, 0x3d, 0x54, 0x21, 0x3f, - 0xdd, 0x98, 0xae, 0x74, 0xe8, 0xeb, 0xfe, 0xa9, 0x5d, 0xa3, 0x8b, 0x03, 0xeb, 0xe5, 0x87, 0x7a, - 0xde, 0xcc, 0x07, 0xcc, 0xa0, 0x4c, 0x6f, 0x9a, 0xed, 0xa0, 0x55, 0xab, 0xa7, 0x53, 0x8a, 0xd6, - 0x69, 0x21, 0xe6, 0x30, 0x76, 0x57, 0xa0, 0x52, 0x02, 0xbb, 0x2b, 0x8c, 0x3e, 0xe4, 0x5d, 0x41, - 0x12, 0xc0, 0x09, 0x2d, 0xe4, 0xc1, 0x6c, 0xd3, 0xcc, 0x06, 0xab, 0x5c, 0x3e, 0x9f, 0xe9, 0x9b, - 0x97, 0xb5, 0xa3, 0xa5, 0x89, 0x5b, 0x4e, 0x53, 0xc1, 0xdd, 0x84, 0xd1, 0x9b, 0x30, 0xf6, 0x41, - 0x10, 0xb1, 0x55, 0x2c, 0x8e, 0x4a, 0xe9, 0x64, 0x37, 0xf6, 0xce, 0xed, 0x06, 0x2b, 0x3f, 0x3e, - 0xac, 0x8c, 0xd7, 0x83, 0x96, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x00, 0x2e, 0x18, 0x0c, 0x46, 0x75, - 0x17, 0x06, 0xef, 0xee, 0x53, 0xa2, 0xb9, 0x0b, 0xb5, 0x2c, 0x4a, 0x38, 0xbb, 0x01, 0xfb, 0x17, - 0xb9, 0x76, 0x59, 0xe8, 0xa0, 0x48, 0xd4, 0xf1, 0xce, 0x22, 0x17, 0xd4, 0x8a, 0xa1, 0x1e, 0x7b, - 0xe8, 0x17, 0x8c, 0x5f, 0xb5, 0xd8, 0x0b, 0xc6, 0x06, 0xd9, 0x6b, 0x7b, 0x4e, 0x7c, 0x16, 0x7e, - 0x04, 0xef, 0xc0, 0x58, 0x2c, 0x5a, 0xeb, 0x95, 0xbe, 0x4a, 0xeb, 0x14, 0x7b, 0xc5, 0x51, 0xe7, - 0xab, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, 0xf9, 0x0c, 0x48, 0xc8, 0x19, 0xa8, 0x2a, 0xaa, 0xa6, - 0xaa, 0xa2, 0xd2, 0xe7, 0x0b, 0x72, 0x54, 0x16, 0xff, 0xcc, 0xec, 0x37, 0xbb, 0xca, 0x7c, 0xd2, - 0x9f, 0xce, 0xec, 0x1f, 0xb6, 0xe0, 0x7c, 0x96, 0x35, 0x08, 0x95, 0x89, 0xf8, 0x45, 0x4a, 0x3d, - 0x25, 0xaa, 0x11, 0xbc, 0x2b, 0xca, 0xb1, 0xc2, 0x18, 0x38, 0x59, 0xc7, 0xc9, 0x22, 0xca, 0xdd, - 0x06, 0x33, 0x71, 0x30, 0x7a, 0x8b, 0x3b, 0x06, 0x59, 0x2a, 0xb3, 0xef, 0xc9, 0x9c, 0x82, 0xec, - 0x9f, 0x2a, 0xc0, 0x79, 0xfe, 0x16, 0xb0, 0xb8, 0x1f, 0xb8, 0xad, 0x7a, 0xd0, 0x12, 0x6e, 0x52, - 0xef, 0xc2, 0x44, 0x5b, 0xbb, 0xfd, 0xf6, 0x8a, 0x69, 0xa5, 0xdf, 0x92, 0x93, 0x5b, 0x88, 0x5e, - 0x8a, 0x0d, 0x5a, 0xa8, 0x05, 0x13, 0x64, 0xdf, 0x6d, 0x2a, 0x85, 0x72, 0xe1, 0xc4, 0x2c, 0x5d, - 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, 0x47, 0x90, 0xe8, 0xcd, 0xfe, 0x11, 0x0b, 0x1e, 0xcf, - 0x89, 0x80, 0x45, 0x9b, 0xbb, 0xcf, 0x5e, 0x5d, 0x44, 0xce, 0x28, 0xd5, 0x1c, 0x7f, 0x8b, 0xc1, - 0x02, 0x8a, 0xbe, 0x04, 0xc0, 0xdf, 0x52, 0xa8, 0x50, 0x2e, 0x3e, 0x7d, 0xb0, 0xc8, 0x30, 0x5a, - 0xf8, 0x10, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x28, 0xc2, 0x30, 0xd3, 0xdd, 0xa3, 0x55, 0x18, - 0xdd, 0xe1, 0xf1, 0xb6, 0x07, 0x09, 0xed, 0x9d, 0xdc, 0x6e, 0x78, 0x01, 0x96, 0x95, 0xd1, 0x1a, - 0x9c, 0x13, 0xae, 0x78, 0x55, 0xe2, 0x39, 0x07, 0xf2, 0x92, 0xcc, 0xf3, 0x2c, 0xa9, 0xcc, 0x62, - 0xb5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd5, 0x15, 0x65, 0x93, 0x47, 0x2a, 0x57, 0x22, 0x75, - 0x9f, 0x48, 0x9b, 0x6f, 0xc2, 0x64, 0xbb, 0x4b, 0x1d, 0x30, 0x9c, 0x88, 0xfb, 0xa6, 0x0a, 0xc0, - 0xc4, 0x65, 0x66, 0x20, 0x1d, 0x66, 0xf4, 0xb2, 0xb1, 0x13, 0x92, 0x68, 0x27, 0xf0, 0x5a, 0x22, - 0xf5, 0x79, 0x62, 0x06, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0x9d, 0x90, - 0x24, 0x54, 0x46, 0x4c, 0x2a, 0xab, 0x29, 0x38, 0xee, 0xaa, 0x41, 0xd7, 0xd1, 0x05, 0x91, 0x37, - 0x5b, 0x06, 0x68, 0x50, 0xb6, 0x3d, 0xa3, 0xd2, 0x51, 0xa3, 0x47, 0xd0, 0x20, 0x61, 0x5b, 0xa1, - 0x32, 0x6f, 0x6b, 0x59, 0x59, 0x85, 0x8b, 0x86, 0xa4, 0xf2, 0x30, 0xd9, 0x9b, 0x7f, 0xd7, 0x82, - 0x73, 0x19, 0x36, 0x84, 0x9c, 0x55, 0x6d, 0xbb, 0x51, 0xac, 0xd2, 0xfb, 0x68, 0xac, 0x8a, 0x97, - 0x63, 0x85, 0x41, 0xf7, 0x03, 0x67, 0x86, 0x69, 0x06, 0x28, 0x6c, 0x74, 0x04, 0xf4, 0x64, 0x0c, - 0x10, 0x5d, 0x86, 0xa1, 0x4e, 0x44, 0x42, 0x99, 0xf6, 0x58, 0xf2, 0x6f, 0xa6, 0x60, 0x64, 0x10, - 0x2a, 0x51, 0x6e, 0x2b, 0xdd, 0x9e, 0x26, 0x51, 0x72, 0xed, 0x1e, 0x87, 0xd9, 0x3f, 0x50, 0x84, - 0x8b, 0xb9, 0x96, 0xc1, 0xb4, 0x4b, 0x7b, 0x81, 0xef, 0xc6, 0x81, 0x7a, 0x17, 0xe2, 0xd1, 0x6d, - 0x48, 0x7b, 0x67, 0x4d, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x95, 0x59, 0xf1, 0xd3, 0x09, 0x8c, 0x96, - 0xaa, 0x46, 0x62, 0xfc, 0x41, 0x33, 0x91, 0x3d, 0x03, 0x43, 0xed, 0x20, 0xf0, 0xd2, 0xcc, 0x88, - 0x76, 0x37, 0x08, 0x3c, 0xcc, 0x80, 0xe8, 0x33, 0x62, 0x1c, 0x52, 0x0f, 0x21, 0xd8, 0x69, 0x05, - 0x91, 0x36, 0x18, 0xcf, 0xc3, 0xe8, 0x2e, 0x39, 0x08, 0x5d, 0x7f, 0x3b, 0xfd, 0x40, 0x76, 0x93, - 0x17, 0x63, 0x09, 0x37, 0xf3, 0x77, 0x8c, 0x9e, 0x46, 0xfe, 0x0e, 0x7d, 0x66, 0xc7, 0xfa, 0x1e, - 0x6d, 0xdf, 0x57, 0x84, 0x69, 0xbc, 0x54, 0xfd, 0xe6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0xd3, 0xce, - 0xea, 0xd6, 0x7f, 0x36, 0x7e, 0xce, 0x82, 0x69, 0x16, 0xe3, 0x5a, 0x44, 0x67, 0x71, 0x03, 0xff, - 0x0c, 0x44, 0xb7, 0x67, 0x60, 0x38, 0xa4, 0x8d, 0xa6, 0x53, 0x35, 0xb1, 0x9e, 0x60, 0x0e, 0x43, - 0x4f, 0xc2, 0x10, 0xeb, 0x02, 0x9d, 0xbc, 0x09, 0x9e, 0xe5, 0xa2, 0xea, 0xc4, 0x0e, 0x66, 0xa5, - 0xcc, 0x4d, 0x16, 0x93, 0xb6, 0xe7, 0xf2, 0x4e, 0x27, 0x0a, 0xf5, 0x4f, 0x86, 0x9b, 0x6c, 0x66, - 0xd7, 0x3e, 0x9a, 0x9b, 0x6c, 0x36, 0xc9, 0xde, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x4b, 0x99, 0xf5, - 0x06, 0x76, 0x93, 0xed, 0x5d, 0xfb, 0x74, 0xec, 0x1c, 0xb2, 0xcd, 0x0f, 0x8a, 0x67, 0x68, 0x7e, - 0x30, 0x34, 0xa8, 0xe4, 0x38, 0x3c, 0x80, 0xf7, 0x6a, 0xe6, 0x90, 0x7d, 0x42, 0xbc, 0x57, 0x33, - 0xfb, 0x96, 0x73, 0xad, 0xfb, 0xd3, 0x42, 0xce, 0xb7, 0xb0, 0x0b, 0xde, 0x15, 0xca, 0x67, 0x18, - 0x30, 0x12, 0x92, 0xf0, 0x04, 0xe7, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x72, 0x35, 0x3f, 0x50, 0xde, - 0xb5, 0x37, 0x4f, 0xb4, 0x65, 0xe6, 0xcd, 0xf7, 0x0f, 0x3d, 0x94, 0x4c, 0xda, 0x27, 0x74, 0x4d, - 0xbb, 0x94, 0x17, 0x07, 0xbf, 0x94, 0x4f, 0x64, 0x5f, 0xc8, 0xd1, 0x22, 0x4c, 0xef, 0xb9, 0x3e, - 0x65, 0x9b, 0x07, 0xa6, 0x28, 0xaa, 0xc2, 0x22, 0xac, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7b, 0x13, - 0x26, 0x1f, 0x5e, 0x8b, 0xf8, 0x8d, 0x22, 0x3c, 0xd1, 0x63, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, - 0xc6, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0xe7, 0xb7, 0x3a, 0x9e, 0x77, 0xc0, 0x2c, 0xfc, 0x48, 0x4b, - 0x62, 0x08, 0x59, 0x51, 0x05, 0xb0, 0x5f, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0xdb, 0x80, 0x02, - 0x91, 0xb2, 0x36, 0x09, 0x90, 0xc3, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xdb, 0x5d, 0x18, 0x38, 0xa3, - 0x16, 0x15, 0xfa, 0xe9, 0xa9, 0x74, 0xa0, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, - 0xba, 0x0e, 0xb3, 0xce, 0xbe, 0xe3, 0xf2, 0x80, 0x89, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, - 0x31, 0x8d, 0x80, 0xbb, 0xeb, 0xa4, 0x1c, 0x51, 0x47, 0xf2, 0x1d, 0x51, 0x7b, 0xf3, 0xc5, 0x7e, - 0xaa, 0x58, 0xfb, 0x3f, 0x5b, 0xf4, 0xf8, 0xca, 0x48, 0x3b, 0x4f, 0xc7, 0x41, 0xa9, 0x14, 0x35, - 0x9f, 0x50, 0x35, 0x0e, 0xcb, 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xa3, 0x82, 0x21, - 0xba, 0x0b, 0xa7, 0x6f, 0x85, 0x81, 0xbe, 0x0c, 0xa3, 0x2d, 0x77, 0xdf, 0x8d, 0x82, 0x50, 0x6c, - 0x96, 0x13, 0x1a, 0x93, 0x27, 0x7c, 0xb0, 0xca, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, - 0xca, 0x16, 0xdf, 0xe9, 0x04, 0xb1, 0x73, 0x06, 0xc7, 0xf2, 0x75, 0xe3, 0x58, 0xfe, 0x4c, 0x2f, - 0xcf, 0x77, 0xd6, 0xa5, 0xdc, 0xe3, 0xf8, 0x76, 0xea, 0x38, 0x7e, 0xae, 0x3f, 0xa9, 0xde, 0xc7, - 0xf0, 0xbf, 0xb0, 0x60, 0xd6, 0xc0, 0x3f, 0x83, 0xd3, 0x60, 0xd5, 0x3c, 0x0d, 0x9e, 0xee, 0xfb, - 0x0d, 0x39, 0xa7, 0xc0, 0xd7, 0x0b, 0xa9, 0xbe, 0x33, 0xee, 0xff, 0x01, 0x0c, 0xed, 0x38, 0x61, - 0xab, 0x57, 0xd8, 0xdf, 0xae, 0x4a, 0xf3, 0x37, 0x9c, 0xb0, 0xc5, 0x79, 0xf8, 0x4b, 0x2a, 0xf7, - 0xa8, 0x13, 0xb6, 0xfa, 0xfa, 0xe5, 0xb0, 0xa6, 0xd0, 0x35, 0x18, 0x89, 0x9a, 0x41, 0x5b, 0xd9, - 0xe4, 0x5d, 0xe6, 0x79, 0x49, 0x69, 0xc9, 0xf1, 0x61, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, - 0xe7, 0xb6, 0xa1, 0xa4, 0x9a, 0x7e, 0xa4, 0x1e, 0x15, 0xbf, 0x55, 0x84, 0x73, 0x19, 0xeb, 0x02, - 0x45, 0xc6, 0x68, 0xbd, 0x32, 0xe0, 0x72, 0xfa, 0x88, 0xe3, 0x15, 0xb1, 0x1b, 0x4b, 0x4b, 0xcc, - 0xff, 0xc0, 0x8d, 0xde, 0x89, 0x48, 0xba, 0x51, 0x5a, 0xd4, 0xbf, 0x51, 0xda, 0xd8, 0x99, 0x0d, - 0x35, 0x6d, 0x48, 0xf5, 0xf4, 0x91, 0xce, 0xe9, 0x1f, 0x15, 0xe1, 0x7c, 0x56, 0xc0, 0x0c, 0xf4, - 0x9d, 0xa9, 0x24, 0x42, 0xaf, 0x0d, 0x1a, 0x6a, 0x83, 0x67, 0x16, 0x12, 0x11, 0xc6, 0xe6, 0xcd, - 0xb4, 0x42, 0x7d, 0x87, 0x59, 0xb4, 0xc9, 0x1c, 0xe5, 0x42, 0x9e, 0xfc, 0x49, 0x6e, 0xf1, 0xcf, - 0x0d, 0xdc, 0x01, 0x91, 0x35, 0x2a, 0x4a, 0x39, 0xca, 0xc9, 0xe2, 0xfe, 0x8e, 0x72, 0xb2, 0xe5, - 0x39, 0x17, 0xc6, 0xb5, 0xaf, 0x79, 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x14, 0xad, 0xdf, 0x8f, 0x74, - 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x25, 0x9c, 0x52, 0x49, 0x59, 0xb9, 0x2a, 0xa9, 0xcb, 0x30, 0x14, - 0x06, 0x1e, 0x49, 0xe7, 0x95, 0xc1, 0x81, 0x47, 0x30, 0x83, 0x50, 0x8c, 0x38, 0x51, 0x48, 0x4c, - 0xe8, 0x97, 0x2d, 0x71, 0x8d, 0x7a, 0x06, 0x86, 0x3d, 0xb2, 0x4f, 0xa4, 0x36, 0x42, 0xf1, 0xe4, - 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x73, 0x43, 0xf0, 0x54, 0x4f, 0x57, 0x53, 0x7a, 0x65, 0xd9, - 0x76, 0x62, 0x72, 0xdf, 0x39, 0x48, 0x47, 0xbd, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xbb, 0x5d, - 0x1e, 0x38, 0x33, 0xa5, 0xc0, 0x13, 0xf1, 0x32, 0x05, 0xd4, 0x54, 0x1c, 0x15, 0x4f, 0x43, 0x71, - 0x74, 0x15, 0x20, 0x8a, 0xbc, 0x15, 0x9f, 0x4a, 0x60, 0x2d, 0x61, 0x10, 0x9c, 0x04, 0x58, 0x6d, - 0xdc, 0x12, 0x10, 0xac, 0x61, 0xa1, 0x2a, 0xcc, 0xb4, 0xc3, 0x20, 0xe6, 0xfa, 0xd0, 0x2a, 0x37, - 0x45, 0x19, 0x36, 0xbd, 0xfc, 0xea, 0x29, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x87, 0x71, 0xe1, 0xf9, - 0x57, 0x0f, 0x02, 0x4f, 0xa8, 0x6a, 0x94, 0x61, 0x43, 0x23, 0x01, 0x61, 0x1d, 0x4f, 0xab, 0xc6, - 0x94, 0xac, 0xa3, 0x99, 0xd5, 0xb8, 0xa2, 0x55, 0xc3, 0x4b, 0x05, 0xcf, 0x19, 0x1b, 0x28, 0x78, - 0x4e, 0xa2, 0xbc, 0x2a, 0x0d, 0xfc, 0xae, 0x04, 0x7d, 0xd5, 0x3d, 0x3f, 0x3d, 0x04, 0xe7, 0xc4, - 0xc2, 0x79, 0xd4, 0xcb, 0xe5, 0x4e, 0xf7, 0x72, 0x39, 0x0d, 0xf5, 0xd6, 0x37, 0xd7, 0xcc, 0x59, - 0xaf, 0x99, 0x5f, 0x2c, 0xc2, 0x08, 0x9f, 0x8a, 0x33, 0x90, 0xe1, 0x57, 0x85, 0xd2, 0xaf, 0x47, - 0xd8, 0x18, 0xde, 0x97, 0xf9, 0xaa, 0x13, 0x3b, 0xfc, 0xfc, 0x52, 0x6c, 0x34, 0x51, 0x0f, 0xa2, - 0x79, 0x83, 0xd1, 0xce, 0xa5, 0xb4, 0x5a, 0xc0, 0x69, 0x68, 0x6c, 0xf7, 0x2b, 0x00, 0x11, 0x4b, - 0x9c, 0x4f, 0x69, 0x88, 0x00, 0x44, 0x2f, 0xf4, 0x68, 0xbd, 0xa1, 0x90, 0x79, 0x1f, 0x92, 0x25, - 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xf7, 0x06, 0x94, 0x14, 0x72, 0x3f, 0x15, 0xc0, 0x84, 0x7e, 0xea, - 0x7d, 0x11, 0xa6, 0x53, 0x6d, 0x9d, 0x48, 0x83, 0xf0, 0xf3, 0x16, 0x4c, 0xf3, 0x2e, 0xaf, 0xf8, - 0xfb, 0x62, 0xb3, 0x7f, 0x08, 0xe7, 0xbd, 0x8c, 0x4d, 0x27, 0x66, 0x74, 0xf0, 0x4d, 0xaa, 0x34, - 0x06, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x02, 0x63, 0xdc, 0xd1, 0xc5, 0xf1, 0x84, 0x73, 0xc2, - 0x04, 0x4f, 0x44, 0xc1, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x2c, 0xef, 0xf9, 0x4d, 0x72, - 0xa0, 0x6e, 0xc7, 0x1f, 0x67, 0xdf, 0x45, 0x9e, 0x8d, 0x42, 0x4e, 0x9e, 0x0d, 0xfd, 0xd3, 0x8a, - 0x3d, 0x3f, 0xed, 0xa7, 0x2c, 0x10, 0x2b, 0xf0, 0x0c, 0xee, 0x81, 0xdf, 0x6a, 0xde, 0x03, 0xe7, - 0xf2, 0x17, 0x75, 0xce, 0x05, 0xf0, 0x4f, 0x2c, 0x98, 0xe1, 0x08, 0xc9, 0x43, 0xe4, 0xc7, 0x3a, - 0x0f, 0x83, 0x24, 0x7f, 0x53, 0xd9, 0xb6, 0xb3, 0x3f, 0xca, 0x98, 0xac, 0xa1, 0x9e, 0x93, 0xd5, - 0x92, 0x1b, 0xe8, 0x04, 0x49, 0x0d, 0x4f, 0x1c, 0x1a, 0xd6, 0xfe, 0x43, 0x0b, 0x10, 0x6f, 0xc6, - 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, 0x58, 0xa7, - 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xff, 0xd7, 0xe4, 0x13, 0x8c, 0xe8, 0x5f, 0x1f, 0x82, 0xb4, - 0x25, 0x34, 0xba, 0x0b, 0x13, 0x4d, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0xea, 0x65, - 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x3b, 0x74, - 0xf7, 0x5d, 0x8f, 0x6c, 0xb3, 0xab, 0x30, 0x73, 0x87, 0xe2, 0xb6, 0x15, 0xb2, 0x14, 0x6b, 0x18, - 0x19, 0xee, 0x33, 0xc5, 0x47, 0xe7, 0x3e, 0x33, 0x74, 0x42, 0xf7, 0x99, 0xe1, 0x81, 0xdc, 0x67, - 0x30, 0x3c, 0x26, 0xcf, 0x6e, 0xfa, 0x7f, 0xd5, 0xf5, 0x88, 0x10, 0xd8, 0xb8, 0x93, 0xd4, 0xdc, - 0xd1, 0x61, 0xe5, 0x31, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x41, 0xd9, 0xf1, 0xbc, 0xe0, - 0xbe, 0x1a, 0xb5, 0x95, 0xa8, 0xe9, 0x78, 0x49, 0xa4, 0xf4, 0xb1, 0xa5, 0x27, 0x8f, 0x0e, 0x2b, - 0xe5, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xde, 0x85, 0x73, 0x0d, 0x12, 0xca, 0x3c, 0xa9, 0x6a, - 0x8b, 0x6d, 0x40, 0x29, 0x4c, 0x31, 0x95, 0x81, 0x62, 0x95, 0x68, 0x51, 0x2c, 0x25, 0x13, 0x49, - 0x08, 0xd9, 0x7f, 0x6c, 0xc1, 0xa8, 0xb0, 0xae, 0x3e, 0x03, 0x59, 0x66, 0xd1, 0xd0, 0x47, 0x56, - 0xb2, 0x19, 0x2f, 0xeb, 0x4c, 0xae, 0x26, 0xb2, 0x96, 0xd2, 0x44, 0x3e, 0xdd, 0x8b, 0x48, 0x6f, - 0x1d, 0xe4, 0x0f, 0x15, 0x61, 0xca, 0xb4, 0x2c, 0x3f, 0x83, 0x21, 0x58, 0x87, 0xd1, 0x48, 0xb8, - 0x31, 0x14, 0xf2, 0xed, 0x57, 0xd3, 0x93, 0x98, 0x58, 0xb9, 0x08, 0xc7, 0x05, 0x49, 0x24, 0xd3, - 0x3f, 0xa2, 0xf8, 0x08, 0xfd, 0x23, 0xfa, 0x19, 0xf7, 0x0f, 0x9d, 0x86, 0x71, 0xbf, 0xfd, 0x4b, - 0x8c, 0xf9, 0xeb, 0xe5, 0x67, 0x20, 0x17, 0x5c, 0x37, 0x8f, 0x09, 0xbb, 0xc7, 0xca, 0x12, 0x9d, - 0xca, 0x91, 0x0f, 0xfe, 0xb1, 0x05, 0xe3, 0x02, 0xf1, 0x0c, 0xba, 0xfd, 0x6d, 0x66, 0xb7, 0x9f, - 0xe8, 0xd1, 0xed, 0x9c, 0xfe, 0xfe, 0xdd, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x81, 0x32, 0x67, - 0x8c, 0xd1, 0xdb, 0x60, 0xd0, 0x0c, 0x3c, 0x71, 0x98, 0x3f, 0x99, 0xf8, 0xc9, 0xf2, 0xf2, 0x63, - 0xed, 0x37, 0x56, 0xd8, 0xcc, 0x8d, 0x33, 0x08, 0x63, 0x71, 0x80, 0x26, 0x6e, 0x9c, 0x41, 0x18, - 0x63, 0x06, 0x41, 0x2d, 0x80, 0xd8, 0x09, 0xb7, 0x49, 0x4c, 0xcb, 0x84, 0xcb, 0x7d, 0xfe, 0x2e, - 0xec, 0xc4, 0xae, 0x37, 0xef, 0xfa, 0x71, 0x14, 0x87, 0xf3, 0x35, 0x3f, 0xbe, 0x1d, 0xf2, 0xbb, - 0x81, 0xe6, 0xf8, 0xaa, 0x68, 0x61, 0x8d, 0xae, 0xf4, 0xbc, 0x62, 0x6d, 0x0c, 0x9b, 0x0f, 0x85, - 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0d, 0xc6, 0x93, 0xd9, 0x00, 0x9d, 0xcc, 0x27, 0xf5, 0x37, - 0xc6, 0xd4, 0xd0, 0xb2, 0x57, 0x82, 0xaa, 0xee, 0xf9, 0xda, 0x9b, 0x05, 0xd2, 0x86, 0x75, 0xb7, - 0x80, 0xc4, 0x3d, 0x16, 0x7d, 0x7b, 0xd7, 0xfb, 0xf1, 0xcb, 0x7d, 0x78, 0xe9, 0x09, 0x5e, 0x8c, - 0x59, 0xf8, 0x55, 0x16, 0xa6, 0xb2, 0x56, 0x4f, 0xe7, 0x36, 0x59, 0x96, 0x00, 0x9c, 0xe0, 0xa0, - 0x05, 0x71, 0xb3, 0xe4, 0xfa, 0xb9, 0x27, 0x52, 0x37, 0x4b, 0xf9, 0xf9, 0xda, 0xd5, 0xf2, 0x15, - 0x18, 0x57, 0xf9, 0xe2, 0xea, 0x3c, 0xed, 0x96, 0x08, 0x40, 0xb0, 0x92, 0x14, 0x63, 0x1d, 0x07, - 0x6d, 0xc0, 0x74, 0xc4, 0x93, 0xd9, 0x49, 0x67, 0x28, 0xa1, 0x37, 0x78, 0x41, 0xbe, 0x3b, 0x37, - 0x4c, 0xf0, 0x31, 0x2b, 0xe2, 0x9b, 0x55, 0xba, 0x4f, 0xa5, 0x49, 0xa0, 0xb7, 0x60, 0xca, 0xd3, - 0x93, 0x7a, 0xd7, 0x85, 0x5a, 0x41, 0x99, 0x65, 0x1a, 0x29, 0xbf, 0xeb, 0x38, 0x85, 0x4d, 0x85, - 0x00, 0xbd, 0x44, 0x44, 0x2f, 0x73, 0xfc, 0x6d, 0x12, 0x89, 0x6c, 0x57, 0x4c, 0x08, 0xb8, 0x95, - 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x6b, 0x30, 0x21, 0x3f, 0x5f, 0x73, 0x0e, 0x4c, 0x8c, 0x7f, 0x35, - 0x18, 0x36, 0x30, 0xd1, 0x7d, 0xb8, 0x20, 0xff, 0x6f, 0x84, 0xce, 0xd6, 0x96, 0xdb, 0x14, 0xbe, - 0x99, 0xe3, 0x8c, 0xc4, 0xa2, 0xf4, 0x84, 0x58, 0xc9, 0x42, 0x3a, 0x3e, 0xac, 0x5c, 0x16, 0xa3, - 0x96, 0x09, 0x67, 0x93, 0x98, 0x4d, 0x1f, 0xad, 0xc1, 0xb9, 0x1d, 0xe2, 0x78, 0xf1, 0xce, 0xf2, - 0x0e, 0x69, 0xee, 0xca, 0x4d, 0xc4, 0x5c, 0x0e, 0x35, 0x93, 0xd9, 0x1b, 0xdd, 0x28, 0x38, 0xab, - 0x1e, 0x7a, 0x0f, 0xca, 0xed, 0xce, 0xa6, 0xe7, 0x46, 0x3b, 0xeb, 0x41, 0xcc, 0x9e, 0xba, 0x55, - 0xba, 0x35, 0xe1, 0x9b, 0xa8, 0xdc, 0x2d, 0xeb, 0x39, 0x78, 0x38, 0x97, 0x02, 0xfa, 0x10, 0x2e, - 0xa4, 0x16, 0x83, 0xf0, 0x94, 0x9a, 0xca, 0x8f, 0x05, 0xd9, 0xc8, 0xaa, 0xc0, 0x3d, 0x66, 0x33, - 0x41, 0x38, 0xbb, 0x89, 0x8f, 0x66, 0x00, 0xf1, 0x01, 0xad, 0xac, 0x49, 0x37, 0xe8, 0xab, 0x30, - 0xa1, 0xaf, 0x22, 0x71, 0xc0, 0x3c, 0xdb, 0x2f, 0x81, 0xbd, 0x90, 0x8d, 0xd4, 0x8a, 0xd2, 0x61, - 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, 0xc6, 0x9a, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, - 0xef, 0xe5, 0x53, 0xbf, 0x2c, 0x70, 0xc4, 0x80, 0x89, 0xe0, 0x79, 0xbc, 0x0c, 0x2b, 0x0a, 0xf6, - 0xaf, 0x14, 0xa0, 0xd2, 0x27, 0x12, 0x63, 0x4a, 0x07, 0x68, 0x0d, 0xa4, 0x03, 0x5c, 0x94, 0xc9, - 0xe3, 0xd6, 0x53, 0xf7, 0xcf, 0x54, 0x62, 0xb8, 0xe4, 0x16, 0x9a, 0xc6, 0x1f, 0xd8, 0x6e, 0x52, - 0x57, 0x23, 0x0e, 0xf5, 0xb5, 0xe8, 0x35, 0x9e, 0x0f, 0x86, 0x07, 0x97, 0xe8, 0x73, 0x55, 0xc1, - 0xf6, 0x2f, 0x15, 0xe0, 0x82, 0x1a, 0xc2, 0x3f, 0xbf, 0x03, 0x77, 0xa7, 0x7b, 0xe0, 0x4e, 0x41, - 0x91, 0x6e, 0xdf, 0x86, 0x91, 0xc6, 0x41, 0xd4, 0x8c, 0xbd, 0x01, 0x04, 0xa0, 0x67, 0xcc, 0xd8, - 0x32, 0xea, 0x98, 0x36, 0xe2, 0xcb, 0xfc, 0x15, 0x0b, 0xa6, 0x37, 0x96, 0xeb, 0x8d, 0xa0, 0xb9, - 0x4b, 0xe2, 0x45, 0xae, 0x26, 0xc2, 0x42, 0xfe, 0xb1, 0x1e, 0x52, 0xae, 0xc9, 0x92, 0x98, 0x2e, - 0xc3, 0xd0, 0x4e, 0x10, 0xc5, 0xe9, 0x57, 0xb6, 0x1b, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x7f, 0xc7, - 0x82, 0x61, 0x96, 0xf2, 0xb4, 0x5f, 0x6a, 0xdc, 0x41, 0xbe, 0x0b, 0xbd, 0x0e, 0x23, 0x64, 0x6b, - 0x8b, 0x34, 0x63, 0x31, 0xab, 0xd2, 0xbb, 0x6e, 0x64, 0x85, 0x95, 0xd2, 0x43, 0x9f, 0x35, 0xc6, - 0xff, 0x62, 0x81, 0x8c, 0xee, 0x41, 0x29, 0x76, 0xf7, 0xc8, 0x62, 0xab, 0x25, 0xde, 0x29, 0x1e, - 0xc2, 0x99, 0x71, 0x43, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x07, 0x0a, 0x00, 0x89, 0x43, 0x6f, 0xbf, - 0x4f, 0x5c, 0xea, 0xca, 0xfe, 0xfb, 0x6c, 0x46, 0xf6, 0x5f, 0x94, 0x10, 0xcc, 0xc8, 0xfd, 0xab, - 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x9d, 0x64, 0x98, 0x96, 0x61, 0x36, 0x71, 0x48, 0x36, 0xa3, - 0x33, 0xb0, 0x78, 0xec, 0x1b, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0xbf, 0xd7, 0x02, 0xe1, 0x6e, 0x30, - 0xc0, 0x62, 0x7e, 0x57, 0x26, 0xea, 0x34, 0x02, 0xba, 0x5e, 0xce, 0xf7, 0xbf, 0x10, 0x61, 0x5c, - 0xd5, 0xe1, 0x61, 0x04, 0x6f, 0x35, 0x68, 0xd9, 0x2d, 0x10, 0xd0, 0x2a, 0x61, 0x4a, 0x86, 0xfe, - 0xbd, 0xb9, 0x0a, 0xd0, 0x62, 0xb8, 0x5a, 0xe2, 0x3f, 0xc5, 0xaa, 0xaa, 0x0a, 0x82, 0x35, 0x2c, - 0xfb, 0x6f, 0x16, 0x60, 0x5c, 0x06, 0x10, 0xa5, 0xf7, 0xf8, 0xfe, 0xad, 0x9c, 0x28, 0x67, 0x00, - 0xcb, 0x94, 0x49, 0x09, 0xab, 0xd0, 0xf2, 0x7a, 0xa6, 0x4c, 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1e, - 0x46, 0xa3, 0xce, 0x26, 0x43, 0x4f, 0x19, 0xd1, 0x37, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x25, 0x98, - 0xe1, 0xf5, 0xc2, 0xa0, 0xed, 0x6c, 0x73, 0x0d, 0xd2, 0xb0, 0xf2, 0x6a, 0x9b, 0x59, 0x4b, 0xc1, - 0x8e, 0x0f, 0x2b, 0xe7, 0xd3, 0x65, 0x4c, 0xf7, 0xd8, 0x45, 0x85, 0xee, 0x8b, 0x99, 0xb4, 0xc3, - 0x0c, 0xba, 0x01, 0x23, 0x9c, 0xe5, 0x09, 0x16, 0xd4, 0xe3, 0x45, 0x49, 0x73, 0xb3, 0x61, 0x41, - 0xd4, 0x05, 0xd7, 0x14, 0xf5, 0xd1, 0x7b, 0x30, 0xde, 0x0a, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, - 0x58, 0xaf, 0x89, 0x55, 0x93, 0x29, 0x39, 0x55, 0x13, 0x34, 0xdd, 0x75, 0x87, 0x69, 0x4f, 0x13, - 0x10, 0xd6, 0xc9, 0xa1, 0x0d, 0x16, 0xe3, 0x89, 0xa7, 0xb2, 0xef, 0x65, 0x75, 0xa6, 0xb2, 0xdf, - 0x6b, 0x94, 0x27, 0x45, 0x20, 0x28, 0x91, 0x08, 0x3f, 0x21, 0x64, 0x7f, 0xed, 0x1c, 0x18, 0xab, - 0xd5, 0xc8, 0x19, 0x60, 0x9d, 0x52, 0xce, 0x00, 0x0c, 0x63, 0x64, 0xaf, 0x1d, 0x1f, 0x54, 0xdd, - 0xb0, 0x57, 0xd2, 0x99, 0x15, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x9d, 0xd8, 0xa1, - 0xf8, 0x31, 0x26, 0x76, 0x18, 0x3a, 0xc3, 0xc4, 0x0e, 0xeb, 0x30, 0xba, 0xed, 0xc6, 0x98, 0xb4, - 0x03, 0x71, 0xdc, 0x67, 0xae, 0x84, 0xeb, 0x1c, 0xa5, 0x3b, 0xc0, 0xb8, 0x00, 0x60, 0x49, 0x04, - 0xbd, 0xad, 0xf6, 0xc0, 0x48, 0xbe, 0xb4, 0xdc, 0xfd, 0xf8, 0x90, 0xb9, 0x0b, 0x44, 0x22, 0x87, - 0xd1, 0x87, 0x4d, 0xe4, 0xb0, 0x2a, 0xd3, 0x2f, 0x8c, 0xe5, 0x1b, 0x69, 0xb2, 0xec, 0x0a, 0x7d, - 0x92, 0x2e, 0x18, 0x89, 0x2a, 0x4a, 0xa7, 0x97, 0xa8, 0xe2, 0x7b, 0x2d, 0xb8, 0xd0, 0xce, 0xca, - 0xd9, 0x22, 0xd2, 0x27, 0xbc, 0x3e, 0x70, 0x52, 0x1a, 0xa3, 0x41, 0x76, 0x6d, 0xca, 0x44, 0xc3, - 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5b, 0x22, 0xe7, 0xc2, 0x33, 0x39, 0x19, 0x2f, 0x7a, 0xe4, - 0xb9, 0x78, 0x34, 0x79, 0x16, 0x92, 0x5c, 0x17, 0x93, 0x1f, 0x39, 0xd7, 0xc5, 0xdb, 0x2a, 0xd7, - 0x45, 0x8f, 0x48, 0x3a, 0x3c, 0x93, 0x45, 0xdf, 0x0c, 0x17, 0x5a, 0x96, 0x8a, 0xe9, 0xd3, 0xc8, - 0x52, 0xf1, 0x15, 0x93, 0xd9, 0xf3, 0x94, 0x09, 0x2f, 0xf6, 0x61, 0xf6, 0x06, 0xdd, 0xde, 0xec, - 0x9e, 0x67, 0xe4, 0x98, 0x7d, 0xa8, 0x8c, 0x1c, 0x77, 0xf5, 0x5c, 0x17, 0xa8, 0x4f, 0x32, 0x07, - 0x8a, 0x34, 0x60, 0x86, 0x8b, 0xbb, 0xfa, 0x11, 0x74, 0x2e, 0x9f, 0xae, 0x3a, 0x69, 0xba, 0xe9, - 0x66, 0x1d, 0x42, 0xdd, 0x99, 0x33, 0xce, 0x9f, 0x4d, 0xe6, 0x8c, 0x0b, 0xa7, 0x9e, 0x39, 0xe3, - 0xb1, 0x33, 0xc8, 0x9c, 0xf1, 0xf8, 0xc7, 0x9a, 0x39, 0xa3, 0xfc, 0x08, 0x32, 0x67, 0xac, 0x27, - 0x99, 0x33, 0x2e, 0xe6, 0x4f, 0x49, 0x86, 0x55, 0x5a, 0x4e, 0xbe, 0x8c, 0xbb, 0x50, 0x6a, 0x4b, - 0x9f, 0x6a, 0x11, 0xea, 0x27, 0x3b, 0x51, 0x5f, 0x96, 0xe3, 0x35, 0x9f, 0x12, 0x05, 0xc2, 0x09, - 0x29, 0x4a, 0x37, 0xc9, 0x9f, 0xf1, 0x44, 0x0f, 0xc5, 0x58, 0x96, 0xca, 0x21, 0x3f, 0x6b, 0x86, - 0xfd, 0x57, 0x0b, 0x70, 0xa9, 0xf7, 0xba, 0x4e, 0xf4, 0x15, 0xf5, 0x44, 0xbf, 0x9e, 0xd2, 0x57, - 0xf0, 0x4b, 0x40, 0x82, 0x35, 0x70, 0xe0, 0x89, 0xeb, 0x30, 0xab, 0xcc, 0xd1, 0x3c, 0xb7, 0x79, - 0xa0, 0x25, 0xf0, 0x53, 0xae, 0x31, 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, 0x61, 0xda, 0x28, - 0xac, 0x55, 0x85, 0xb0, 0xaf, 0x14, 0x24, 0x0d, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0xd7, 0x2d, 0x78, - 0x3c, 0x27, 0x64, 0xf5, 0xc0, 0x71, 0x15, 0xb6, 0x60, 0xba, 0x6d, 0x56, 0xed, 0x13, 0x7e, 0xc5, - 0x08, 0x8c, 0xad, 0xfa, 0x9a, 0x02, 0xe0, 0x34, 0xd1, 0xa5, 0x2b, 0xbf, 0xf6, 0x7b, 0x97, 0x3e, - 0xf5, 0x9b, 0xbf, 0x77, 0xe9, 0x53, 0xbf, 0xfd, 0x7b, 0x97, 0x3e, 0xf5, 0x17, 0x8f, 0x2e, 0x59, - 0xbf, 0x76, 0x74, 0xc9, 0xfa, 0xcd, 0xa3, 0x4b, 0xd6, 0x6f, 0x1f, 0x5d, 0xb2, 0x7e, 0xf7, 0xe8, - 0x92, 0xf5, 0x03, 0xbf, 0x7f, 0xe9, 0x53, 0xef, 0x16, 0xf6, 0x5f, 0xf9, 0xff, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x99, 0x05, 0xfa, 0x71, 0xfe, 0xdd, 0x00, 0x00, + 0x5c, 0xdc, 0xbf, 0xee, 0x7c, 0x2f, 0x5f, 0x66, 0xe5, 0xc7, 0xcb, 0x97, 0x2f, 0xdf, 0x07, 0xbc, + 0xb1, 0xfb, 0x7a, 0x34, 0xef, 0x06, 0x0b, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24, 0x26, 0xd1, 0xc2, + 0x3e, 0xf1, 0x5b, 0x41, 0xb8, 0x20, 0x00, 0x4e, 0xdb, 0x5d, 0x68, 0x06, 0x21, 0x59, 0xd8, 0x7f, + 0x69, 0x61, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x9a, 0x6f, 0x87, 0x41, 0x1c, 0x20, 0xc4, 0x71, + 0xe6, 0x9d, 0xb6, 0x3b, 0x4f, 0x71, 0xe6, 0xf7, 0x5f, 0x9a, 0x7b, 0x71, 0xdb, 0x8d, 0x77, 0x3a, + 0x9b, 0xf3, 0xcd, 0x60, 0x6f, 0x61, 0x3b, 0xd8, 0x0e, 0x16, 0x18, 0xea, 0x66, 0x67, 0x8b, 0xfd, + 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0x6e, 0x2d, 0x69, 0x86, 0xdc, 0x8f, 0x89, 0x1f, 0xb9, 0x81, + 0x1f, 0xbd, 0xe8, 0xb4, 0xdd, 0x88, 0x84, 0xfb, 0x24, 0x5c, 0x68, 0xef, 0x6e, 0x53, 0x58, 0x64, + 0x22, 0x2c, 0xec, 0xbf, 0xb4, 0x49, 0x62, 0xa7, 0xab, 0x47, 0x73, 0xaf, 0x24, 0xe4, 0xf6, 0x9c, + 0xe6, 0x8e, 0xeb, 0x93, 0xf0, 0x40, 0xd2, 0x58, 0x08, 0x49, 0x14, 0x74, 0xc2, 0x26, 0x39, 0x51, + 0xad, 0x68, 0x61, 0x8f, 0xc4, 0x4e, 0xc6, 0xd7, 0xcf, 0x2d, 0xe4, 0xd5, 0x0a, 0x3b, 0x7e, 0xec, + 0xee, 0x75, 0x37, 0xf3, 0xb9, 0x7e, 0x15, 0xa2, 0xe6, 0x0e, 0xd9, 0x73, 0xba, 0xea, 0xbd, 0x9c, + 0x57, 0xaf, 0x13, 0xbb, 0xde, 0x82, 0xeb, 0xc7, 0x51, 0x1c, 0xa6, 0x2b, 0xd9, 0xdf, 0xb0, 0xe0, + 0xf2, 0xe2, 0xdd, 0xc6, 0x8a, 0xe7, 0x44, 0xb1, 0xdb, 0x5c, 0xf2, 0x82, 0xe6, 0x6e, 0x23, 0x0e, + 0x42, 0x72, 0x27, 0xf0, 0x3a, 0x7b, 0xa4, 0xc1, 0x06, 0x02, 0xbd, 0x00, 0x63, 0xfb, 0xec, 0x7f, + 0xad, 0x5a, 0xb6, 0x2e, 0x5b, 0x57, 0x4a, 0x4b, 0x33, 0xbf, 0x7e, 0x58, 0xf9, 0xd4, 0xd1, 0x61, + 0x65, 0xec, 0x8e, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x0d, 0x23, 0x5b, 0xd1, 0xc6, 0x41, 0x9b, 0x94, + 0x0b, 0x0c, 0x77, 0x4a, 0xe0, 0x8e, 0xac, 0x36, 0x68, 0x29, 0x16, 0x50, 0xb4, 0x00, 0xa5, 0xb6, + 0x13, 0xc6, 0x6e, 0xec, 0x06, 0x7e, 0xb9, 0x78, 0xd9, 0xba, 0x32, 0xbc, 0x34, 0x2b, 0x50, 0x4b, + 0x75, 0x09, 0xc0, 0x09, 0x0e, 0xed, 0x46, 0x48, 0x9c, 0xd6, 0x2d, 0xdf, 0x3b, 0x28, 0x0f, 0x5d, + 0xb6, 0xae, 0x8c, 0x25, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x47, 0x0a, 0x30, 0xb6, 0xb8, + 0xb5, 0xe5, 0xfa, 0x6e, 0x7c, 0x80, 0xee, 0xc0, 0x84, 0x1f, 0xb4, 0x88, 0xfc, 0xcf, 0xbe, 0x62, + 0xfc, 0xea, 0xe5, 0xf9, 0xee, 0x95, 0x39, 0xbf, 0xae, 0xe1, 0x2d, 0xcd, 0x1c, 0x1d, 0x56, 0x26, + 0xf4, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xdb, 0x41, 0x4b, 0x91, 0x2d, 0x30, 0xb2, 0x95, 0x2c, + 0xb2, 0xf5, 0x04, 0x6d, 0x69, 0xfa, 0xe8, 0xb0, 0x32, 0xae, 0x15, 0x60, 0x9d, 0x08, 0xda, 0x84, + 0x69, 0xfa, 0xd7, 0x8f, 0x5d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x95, 0x47, 0x57, 0x43, 0x5d, 0x3a, + 0x77, 0x74, 0x58, 0x99, 0x4e, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x03, 0x98, 0x5a, 0x8c, 0x63, 0xa7, + 0xb9, 0x43, 0x5a, 0x7c, 0x06, 0xd1, 0x2b, 0x30, 0xe4, 0x3b, 0x7b, 0x44, 0xcc, 0xef, 0x65, 0x31, + 0xb0, 0x43, 0xeb, 0xce, 0x1e, 0x39, 0x3e, 0xac, 0xcc, 0xdc, 0xf6, 0xdd, 0xf7, 0x3b, 0x62, 0x55, + 0xd0, 0x32, 0xcc, 0xb0, 0xd1, 0x55, 0x80, 0x16, 0xd9, 0x77, 0x9b, 0xa4, 0xee, 0xc4, 0x3b, 0x62, + 0xbe, 0x91, 0xa8, 0x0b, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0x7d, 0x1f, 0x4a, 0x8b, 0xfb, 0x81, 0xdb, + 0xaa, 0x07, 0xad, 0x08, 0xed, 0xc2, 0x74, 0x3b, 0x24, 0x5b, 0x24, 0x54, 0x45, 0x65, 0xeb, 0x72, + 0xf1, 0xca, 0xf8, 0xd5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0xae, 0xf8, 0x71, 0x78, 0xb0, 0xf4, 0xa8, + 0x68, 0x6f, 0x3a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2b, 0xc0, 0x85, 0xc5, 0x0f, 0x3a, 0x21, + 0xa9, 0xba, 0xd1, 0x6e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x77, 0x3d, 0x19, 0x01, 0xb5, 0xb4, 0xaa, + 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x22, 0x8c, 0xd2, 0xdf, 0xb7, 0x71, 0x4d, 0x7c, 0xf2, 0x39, 0x81, + 0x3c, 0x5e, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6f, 0xb2, 0x0d, 0xb9, + 0xbd, 0x16, 0xb4, 0x08, 0x9b, 0xcc, 0xd2, 0xd2, 0xf3, 0x14, 0x7d, 0x39, 0x29, 0x3e, 0x3e, 0xac, + 0x94, 0x79, 0xdf, 0x04, 0x09, 0x0d, 0x86, 0xf5, 0xfa, 0xc8, 0x56, 0xfb, 0x6b, 0x88, 0x51, 0x82, + 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0xc3, 0x6c, 0xab, 0x4c, 0x64, 0x6f, 0x13, 0xf4, 0x12, 0x0c, + 0xed, 0xba, 0x7e, 0xab, 0x3c, 0xc2, 0x68, 0x3d, 0x41, 0xe7, 0xfc, 0x86, 0xeb, 0xb7, 0x8e, 0x0f, + 0x2b, 0xb3, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x1f, 0x5b, 0x50, 0x61, 0xb0, 0x55, 0xd7, + 0x23, 0x75, 0x12, 0x46, 0x6e, 0x14, 0x13, 0x3f, 0x36, 0x06, 0xf4, 0x2a, 0x40, 0x44, 0x9a, 0x21, + 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xa1, 0x20, 0x58, 0xc3, 0xa2, 0x0c, 0x21, 0xda, 0x71, 0x42, + 0xb6, 0xbe, 0xc4, 0xc0, 0x2a, 0x86, 0xd0, 0x90, 0x00, 0x9c, 0xe0, 0x18, 0x0c, 0xa1, 0xd8, 0x8f, + 0x21, 0xa0, 0x2f, 0xc2, 0x74, 0xd2, 0x58, 0xd4, 0x76, 0x9a, 0x72, 0x00, 0xd9, 0x96, 0x69, 0x98, + 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0xa1, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x4f, 0xf8, 0xb7, 0xda, 0xbf, + 0x68, 0xc1, 0xe8, 0x92, 0xeb, 0xb7, 0x5c, 0x7f, 0x1b, 0x7d, 0x15, 0xc6, 0xe8, 0xd9, 0xd4, 0x72, + 0x62, 0x47, 0xf0, 0xbd, 0xcf, 0x6a, 0x7b, 0x4b, 0x1d, 0x15, 0xf3, 0xed, 0xdd, 0x6d, 0x5a, 0x10, + 0xcd, 0x53, 0x6c, 0xba, 0xdb, 0x6e, 0x6d, 0xbe, 0x47, 0x9a, 0xf1, 0x1a, 0x89, 0x9d, 0xe4, 0x73, + 0x92, 0x32, 0xac, 0xa8, 0xa2, 0x1b, 0x30, 0x12, 0x3b, 0xe1, 0x36, 0x89, 0x05, 0x03, 0xcc, 0x64, + 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x4d, 0x92, 0x1c, 0x0b, 0x1b, 0xac, 0x2a, 0x16, 0x24, + 0xec, 0x9f, 0xb6, 0xe0, 0xe2, 0x72, 0xa3, 0x96, 0xb3, 0xae, 0x9e, 0x86, 0x91, 0x56, 0xe8, 0xee, + 0x93, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0xc1, 0x0f, 0xa4, + 0xeb, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x17, 0xd8, 0x13, 0x77, 0x34, 0x18, 0x36, 0x30, 0x4f, + 0x38, 0xd0, 0x4d, 0x98, 0x58, 0x76, 0xda, 0xce, 0xa6, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x0c, + 0x14, 0x9d, 0x56, 0x8b, 0xf1, 0xb0, 0xd2, 0xd2, 0x85, 0xa3, 0xc3, 0x4a, 0x71, 0xb1, 0x45, 0x37, + 0x13, 0x28, 0xac, 0x03, 0x4c, 0x31, 0xd0, 0x73, 0x30, 0xd4, 0x0a, 0x83, 0x76, 0xb9, 0xc0, 0x30, + 0x1f, 0xa1, 0xfb, 0xae, 0x1a, 0x06, 0xed, 0x14, 0x2a, 0xc3, 0xb1, 0x7f, 0xb5, 0x00, 0x8f, 0x2f, + 0x93, 0xf6, 0xce, 0x6a, 0x23, 0x67, 0x54, 0xae, 0xc0, 0xd8, 0x5e, 0xe0, 0xbb, 0x71, 0x10, 0x46, + 0xa2, 0x69, 0xb6, 0xdd, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x74, 0x19, 0x86, 0xda, 0x09, 0xab, 0x9e, + 0x90, 0x6c, 0x9e, 0x31, 0x69, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x82, 0x4d, 0x29, 0x8c, 0xdb, + 0x11, 0x09, 0x31, 0x83, 0x24, 0xeb, 0x9d, 0xee, 0x04, 0xb1, 0x87, 0x52, 0xeb, 0x9d, 0x42, 0xb0, + 0x86, 0x85, 0xea, 0x50, 0xe2, 0xff, 0x30, 0xd9, 0x62, 0x1c, 0x29, 0x67, 0x95, 0x34, 0x24, 0x92, + 0x58, 0x25, 0x93, 0x6c, 0x43, 0xc8, 0x42, 0x9c, 0x10, 0x31, 0xe6, 0x69, 0xa4, 0xef, 0x3c, 0xfd, + 0x72, 0x01, 0x10, 0x1f, 0xc2, 0x3f, 0x67, 0x03, 0x77, 0xbb, 0x7b, 0xe0, 0x32, 0x8f, 0xc6, 0x9b, + 0x41, 0xd3, 0xf1, 0xd2, 0x7b, 0xec, 0xb4, 0x46, 0xef, 0x87, 0x2d, 0x40, 0xcb, 0xae, 0xdf, 0x22, + 0xe1, 0x19, 0xc8, 0x85, 0x27, 0xdb, 0x80, 0x37, 0x61, 0x6a, 0xd9, 0x73, 0x89, 0x1f, 0xd7, 0xea, + 0xcb, 0x81, 0xbf, 0xe5, 0x6e, 0xa3, 0xcf, 0xc3, 0x14, 0x15, 0x93, 0x83, 0x4e, 0xdc, 0x20, 0xcd, + 0xc0, 0x67, 0x12, 0x05, 0x15, 0x2e, 0xd1, 0xd1, 0x61, 0x65, 0x6a, 0xc3, 0x80, 0xe0, 0x14, 0xa6, + 0xfd, 0xbb, 0xf4, 0x43, 0x83, 0xbd, 0x76, 0xe0, 0x13, 0x3f, 0x5e, 0x0e, 0xfc, 0x16, 0x97, 0x3c, + 0x3f, 0x0f, 0x43, 0x31, 0xed, 0x38, 0xff, 0xc8, 0xa7, 0xe5, 0xd4, 0xd2, 0xee, 0x1e, 0x1f, 0x56, + 0x1e, 0xe9, 0xae, 0xc1, 0x3e, 0x88, 0xd5, 0x41, 0xdf, 0x06, 0x23, 0x51, 0xec, 0xc4, 0x9d, 0x48, + 0x7c, 0xf6, 0x93, 0xf2, 0xb3, 0x1b, 0xac, 0xf4, 0xf8, 0xb0, 0x32, 0xad, 0xaa, 0xf1, 0x22, 0x2c, + 0x2a, 0xa0, 0x67, 0x61, 0x74, 0x8f, 0x44, 0x91, 0xb3, 0x2d, 0x85, 0x86, 0x69, 0x51, 0x77, 0x74, + 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc1, 0x30, 0x09, 0xc3, 0x20, 0x14, 0xab, 0x6a, 0x52, 0x20, + 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0x7f, 0xb0, 0x60, 0x5a, 0xf5, 0x95, 0xb7, 0x75, 0x06, + 0xa7, 0xc3, 0x3b, 0x00, 0x4d, 0xf9, 0x81, 0x11, 0xe3, 0x77, 0xe3, 0x57, 0x9f, 0xce, 0x5a, 0xc2, + 0xdd, 0xc3, 0x98, 0x50, 0x56, 0x45, 0x11, 0xd6, 0xa8, 0xd9, 0xff, 0xd2, 0x82, 0x73, 0xa9, 0x2f, + 0xba, 0xe9, 0x46, 0x31, 0x7a, 0xb7, 0xeb, 0xab, 0xe6, 0x07, 0xfb, 0x2a, 0x5a, 0x9b, 0x7d, 0x93, + 0x5a, 0x73, 0xb2, 0x44, 0xfb, 0xa2, 0xeb, 0x30, 0xec, 0xc6, 0x64, 0x4f, 0x7e, 0xcc, 0x53, 0x3d, + 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, 0x39, 0x01, 0xfb, 0x7f, 0x59, 0x50, 0xe2, + 0xcb, 0x76, 0xcd, 0x69, 0x9f, 0xc1, 0x5c, 0xd4, 0x60, 0x88, 0x51, 0xe7, 0x1d, 0x7f, 0x26, 0xbb, + 0xe3, 0xa2, 0x3b, 0xf3, 0x54, 0xf4, 0xe3, 0x22, 0xb6, 0x62, 0x66, 0xb4, 0x08, 0x33, 0x12, 0x73, + 0xaf, 0x41, 0x49, 0x21, 0xa0, 0x19, 0x28, 0xee, 0x12, 0x7e, 0xad, 0x2a, 0x61, 0xfa, 0x13, 0x9d, + 0x87, 0xe1, 0x7d, 0xc7, 0xeb, 0x88, 0xcd, 0x8e, 0xf9, 0x9f, 0xcf, 0x17, 0x5e, 0xb7, 0xec, 0x5f, + 0x62, 0x7b, 0x4c, 0x34, 0xb2, 0xe2, 0xef, 0x0b, 0x66, 0xf2, 0x01, 0x9c, 0xf7, 0x32, 0x78, 0x98, + 0x18, 0x88, 0xc1, 0x79, 0xde, 0xe3, 0xa2, 0xaf, 0xe7, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x7a, 0x0c, + 0x04, 0x6d, 0xba, 0xa2, 0x1c, 0x8f, 0xf5, 0x57, 0x88, 0xcb, 0xb7, 0x44, 0x19, 0x56, 0x50, 0xca, + 0x20, 0xce, 0xab, 0xce, 0xdf, 0x20, 0x07, 0x0d, 0xe2, 0x91, 0x66, 0x1c, 0x84, 0x1f, 0x6b, 0xf7, + 0x9f, 0xe0, 0xa3, 0xcf, 0xf9, 0xcb, 0xb8, 0x20, 0x50, 0xbc, 0x41, 0x0e, 0xf8, 0x54, 0xe8, 0x5f, + 0x57, 0xec, 0xf9, 0x75, 0x3f, 0x63, 0xc1, 0xa4, 0xfa, 0xba, 0x33, 0xd8, 0x48, 0x4b, 0xe6, 0x46, + 0x7a, 0xa2, 0xe7, 0x7a, 0xcc, 0xd9, 0x42, 0x7f, 0xc6, 0x58, 0x80, 0xc0, 0xa9, 0x87, 0x01, 0x1d, + 0x1a, 0xca, 0xb3, 0x3f, 0xce, 0x09, 0x19, 0xe4, 0xbb, 0x6e, 0x90, 0x83, 0x8d, 0x80, 0x8a, 0x0f, + 0xd9, 0xdf, 0x65, 0xcc, 0xda, 0x50, 0xcf, 0x59, 0xfb, 0xb9, 0x02, 0x5c, 0x50, 0x23, 0x60, 0x1c, + 0xd0, 0x7f, 0xde, 0xc7, 0xe0, 0x25, 0x18, 0x6f, 0x91, 0x2d, 0xa7, 0xe3, 0xc5, 0xea, 0xe6, 0x3c, + 0xcc, 0xb5, 0x27, 0xd5, 0xa4, 0x18, 0xeb, 0x38, 0x27, 0x18, 0xb6, 0x9f, 0x18, 0x67, 0xbc, 0x37, + 0x76, 0xe8, 0x0a, 0xa6, 0xd2, 0x9b, 0xa6, 0xff, 0x98, 0xd0, 0xf5, 0x1f, 0x42, 0xd7, 0xf1, 0x14, + 0x0c, 0xbb, 0x7b, 0xf4, 0x2c, 0x2e, 0x98, 0x47, 0x6c, 0x8d, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x81, + 0xd1, 0x66, 0xb0, 0xb7, 0xe7, 0xf8, 0xad, 0x72, 0x91, 0xc9, 0x93, 0xe3, 0xf4, 0xb8, 0x5e, 0xe6, + 0x45, 0x58, 0xc2, 0xd0, 0xe3, 0x30, 0xe4, 0x84, 0xdb, 0x51, 0x79, 0x88, 0xe1, 0x8c, 0xd1, 0x96, + 0x16, 0xc3, 0xed, 0x08, 0xb3, 0x52, 0x2a, 0x27, 0xde, 0x0b, 0xc2, 0x5d, 0xd7, 0xdf, 0xae, 0xba, + 0x21, 0x13, 0xfa, 0x34, 0x39, 0xf1, 0xae, 0x82, 0x60, 0x0d, 0x0b, 0xad, 0xc2, 0x70, 0x3b, 0x08, + 0xe3, 0xa8, 0x3c, 0xc2, 0x86, 0xfb, 0xc9, 0x9c, 0xad, 0xc4, 0xbf, 0xb6, 0x1e, 0x84, 0x71, 0xf2, + 0x01, 0xf4, 0x5f, 0x84, 0x79, 0x75, 0xf4, 0x6d, 0x50, 0x24, 0xfe, 0x7e, 0x79, 0x94, 0x51, 0x99, + 0xcb, 0xa2, 0xb2, 0xe2, 0xef, 0xdf, 0x71, 0xc2, 0x84, 0xcf, 0xac, 0xf8, 0xfb, 0x98, 0xd6, 0x41, + 0x5f, 0x86, 0x92, 0xd4, 0x9d, 0x46, 0xe5, 0xb1, 0xfc, 0x25, 0x86, 0x05, 0x12, 0x26, 0xef, 0x77, + 0xdc, 0x90, 0xec, 0x11, 0x3f, 0x8e, 0x92, 0xdb, 0xaf, 0x84, 0x46, 0x38, 0xa1, 0x86, 0xbe, 0x2c, + 0xaf, 0x73, 0x6b, 0x41, 0xc7, 0x8f, 0xa3, 0x72, 0x89, 0x75, 0x2f, 0x53, 0xd1, 0x76, 0x27, 0xc1, + 0x4b, 0xdf, 0xf7, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x18, 0x26, 0x3d, 0x77, 0x9f, 0xf8, 0x24, 0x8a, + 0xea, 0x61, 0xb0, 0x49, 0xca, 0xc0, 0x7a, 0x7e, 0x31, 0x5b, 0xff, 0x14, 0x6c, 0x92, 0xa5, 0xd9, + 0xa3, 0xc3, 0xca, 0xe4, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x45, 0x05, 0x54, 0x37, + 0x21, 0x3a, 0xde, 0x8f, 0x28, 0x93, 0x4e, 0xb1, 0x51, 0x09, 0xa7, 0x88, 0xa0, 0xb7, 0xa0, 0xe4, + 0xb9, 0x5b, 0xa4, 0x79, 0xd0, 0xf4, 0x48, 0x79, 0x82, 0x51, 0xcc, 0xdc, 0x56, 0x37, 0x25, 0x12, + 0xbf, 0x00, 0xa8, 0xbf, 0x38, 0xa9, 0x8e, 0xee, 0xc0, 0x23, 0x31, 0x09, 0xf7, 0x5c, 0xdf, 0xa1, + 0xdb, 0x41, 0xc8, 0x93, 0x4c, 0x8b, 0x37, 0xc9, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0x8f, 0x6c, 0x64, + 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x16, 0x4c, 0xb3, 0x9d, 0x50, 0xef, 0x78, 0x5e, 0x3d, 0xf0, 0xdc, + 0xe6, 0x41, 0x79, 0x8a, 0x11, 0xfc, 0x8c, 0x54, 0xd3, 0xd5, 0x4c, 0x30, 0xbd, 0xf1, 0x26, 0xff, + 0x70, 0xba, 0x36, 0xda, 0x64, 0x6a, 0x9b, 0x4e, 0xe8, 0xc6, 0x07, 0x74, 0xfd, 0x92, 0xfb, 0x71, + 0x79, 0xba, 0xe7, 0xfd, 0x51, 0x47, 0x55, 0xba, 0x1d, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0xad, 0x1d, + 0xc5, 0x2d, 0xd7, 0x2f, 0xcf, 0x30, 0x8e, 0xa1, 0x76, 0x46, 0x83, 0x16, 0x62, 0x0e, 0x63, 0x2a, + 0x1b, 0xfa, 0xe3, 0x16, 0xe5, 0xa0, 0xb3, 0x0c, 0x31, 0x51, 0xd9, 0x48, 0x00, 0x4e, 0x70, 0xe8, + 0xb1, 0x1c, 0xc7, 0x07, 0x65, 0xc4, 0x50, 0xd5, 0x76, 0xd9, 0xd8, 0xf8, 0x32, 0xa6, 0xe5, 0xe8, + 0x26, 0x8c, 0x12, 0x7f, 0x7f, 0x35, 0x0c, 0xf6, 0xca, 0xe7, 0xf2, 0xf7, 0xec, 0x0a, 0x47, 0xe1, + 0x0c, 0x3d, 0xb9, 0x00, 0x88, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x50, 0xce, 0x98, 0x11, 0x3e, 0x01, + 0xe7, 0xd9, 0x04, 0x7c, 0x41, 0xd4, 0x2d, 0x6f, 0xe4, 0xe0, 0x1d, 0xf7, 0x80, 0xe1, 0x5c, 0xea, + 0xe8, 0xbb, 0x60, 0x92, 0x6f, 0x28, 0xae, 0xef, 0x8d, 0xca, 0x17, 0xd8, 0xd7, 0x5c, 0xce, 0xdf, + 0x9c, 0x1c, 0x71, 0xe9, 0x82, 0xe8, 0xd0, 0xa4, 0x5e, 0x1a, 0x61, 0x93, 0x9a, 0xbd, 0x09, 0x53, + 0x8a, 0x6f, 0xb1, 0xa5, 0x83, 0x2a, 0x30, 0x4c, 0x19, 0xb2, 0xbc, 0xb1, 0x97, 0xe8, 0x4c, 0x31, + 0x3d, 0x1d, 0xe6, 0xe5, 0x6c, 0xa6, 0xdc, 0x0f, 0xc8, 0xd2, 0x41, 0x4c, 0xf8, 0xad, 0xab, 0xa8, + 0xcd, 0x94, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x5c, 0xee, 0x49, 0x98, 0xe3, 0x00, 0xc7, 0xc1, + 0x0b, 0x30, 0xb6, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x92, 0xce, 0x75, 0x51, 0x8e, + 0x15, 0x06, 0x7a, 0x03, 0x26, 0x9b, 0x7a, 0x03, 0xe2, 0x2c, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, + 0x5c, 0xf4, 0x3a, 0x8c, 0xb1, 0xd7, 0x9a, 0x66, 0xe0, 0x89, 0xfb, 0x9d, 0x3c, 0x90, 0xc7, 0xea, + 0xa2, 0xfc, 0x58, 0xfb, 0x8d, 0x15, 0x36, 0xbd, 0x73, 0xd3, 0x2e, 0xd4, 0xea, 0xe2, 0x14, 0x51, + 0x77, 0xee, 0xeb, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0x37, 0x0a, 0xda, 0x28, 0xd3, 0x1b, 0x0b, 0x41, + 0x75, 0x18, 0xbd, 0xe7, 0xb8, 0xb1, 0xeb, 0x6f, 0x0b, 0x71, 0xe1, 0xd9, 0x9e, 0x47, 0x0a, 0xab, + 0x74, 0x97, 0x57, 0xe0, 0x87, 0x9e, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0xec, 0xf8, 0x3e, 0xa5, + 0x58, 0x18, 0x94, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x5d, 0x00, 0xb9, + 0x2c, 0x49, 0x4b, 0xbc, 0x92, 0xbc, 0xd0, 0x9f, 0xe8, 0x86, 0xaa, 0xb3, 0x34, 0x45, 0x8f, 0xd4, + 0xe4, 0x3f, 0xd6, 0xe8, 0xd9, 0x31, 0x13, 0xab, 0xba, 0x3b, 0x83, 0xbe, 0x93, 0x72, 0x02, 0x27, + 0x8c, 0x49, 0x6b, 0x31, 0x16, 0x83, 0xf3, 0xdc, 0x60, 0x52, 0xf1, 0x86, 0xbb, 0x47, 0x74, 0xae, + 0x21, 0x88, 0xe0, 0x84, 0x9e, 0xfd, 0x0b, 0x45, 0x28, 0xe7, 0x75, 0x97, 0x2e, 0x3a, 0x72, 0xdf, + 0x8d, 0x97, 0xa9, 0x34, 0x64, 0x99, 0x8b, 0x6e, 0x45, 0x94, 0x63, 0x85, 0x41, 0x67, 0x3f, 0x72, + 0xb7, 0xe5, 0xa5, 0x66, 0x38, 0x99, 0xfd, 0x06, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x42, 0xe2, 0x44, + 0xe2, 0x19, 0x4e, 0x5b, 0x25, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xf5, 0x11, 0x43, 0x7d, 0xf4, 0x11, + 0xc6, 0x10, 0x0d, 0x9f, 0xee, 0x10, 0xa1, 0xaf, 0x00, 0x6c, 0xb9, 0xbe, 0x1b, 0xed, 0x30, 0xea, + 0x23, 0x27, 0xa6, 0xae, 0x64, 0xa9, 0x55, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0xab, 0x30, 0xae, 0x36, + 0x60, 0xad, 0x5a, 0x1e, 0x35, 0xdf, 0x78, 0x12, 0x6e, 0x54, 0xc5, 0x3a, 0x9e, 0xfd, 0x5e, 0x7a, + 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, 0x16, 0x7a, 0x8f, 0xaf, 0xfd, 0x6b, 0x45, + 0x98, 0x36, 0x1a, 0xeb, 0x44, 0x03, 0xf0, 0xac, 0x6b, 0xf4, 0x9c, 0x73, 0x62, 0x22, 0xf6, 0x9f, + 0xdd, 0x7f, 0xab, 0xe8, 0x67, 0x21, 0xdd, 0x01, 0xbc, 0x3e, 0xfa, 0x0a, 0x94, 0x3c, 0x27, 0x62, + 0xba, 0x0d, 0x22, 0xf6, 0xdd, 0x20, 0xc4, 0x92, 0x7b, 0x84, 0x13, 0xc5, 0xda, 0x51, 0xc3, 0x69, + 0x27, 0x24, 0xe9, 0x81, 0x4c, 0x65, 0x1f, 0xf9, 0xce, 0xab, 0x3a, 0x41, 0x05, 0xa4, 0x03, 0xcc, + 0x61, 0xe8, 0x75, 0x98, 0x08, 0x09, 0x5b, 0x15, 0xcb, 0x54, 0x94, 0x63, 0xcb, 0x6c, 0x38, 0x91, + 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0x22, 0xca, 0x8f, 0xf4, 0x10, 0xe5, 0x9f, 0x85, 0x51, 0xf6, + 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0xc6, 0x06, 0x5c, 0x30, + 0xcf, 0xc1, 0x54, 0xd5, 0x21, 0x7b, 0x81, 0xbf, 0xe2, 0xb7, 0xda, 0x81, 0xeb, 0xc7, 0xa8, 0x0c, + 0x43, 0xec, 0x74, 0xe0, 0x7b, 0x7b, 0x88, 0x52, 0xc0, 0x43, 0x54, 0x30, 0xb7, 0x7f, 0xbb, 0x00, + 0x93, 0x55, 0xe2, 0x91, 0x98, 0xf0, 0xab, 0x4c, 0x84, 0x56, 0x01, 0x6d, 0x87, 0x4e, 0x93, 0xd4, + 0x49, 0xe8, 0x06, 0x2d, 0x5d, 0xd7, 0x59, 0x64, 0xef, 0x09, 0xe8, 0x5a, 0x17, 0x14, 0x67, 0xd4, + 0x40, 0xef, 0xc0, 0x64, 0x3b, 0x24, 0x86, 0x8a, 0xce, 0xca, 0x93, 0x46, 0xea, 0x3a, 0x22, 0x17, + 0x84, 0x8d, 0x22, 0x6c, 0x92, 0x42, 0xdf, 0x01, 0x33, 0x41, 0xd8, 0xde, 0x71, 0xfc, 0x2a, 0x69, + 0x13, 0xbf, 0x45, 0x25, 0x7d, 0xa1, 0x82, 0x38, 0x7f, 0x74, 0x58, 0x99, 0xb9, 0x95, 0x82, 0xe1, + 0x2e, 0x6c, 0xf4, 0x0e, 0xcc, 0xb6, 0xc3, 0xa0, 0xed, 0x6c, 0xb3, 0x85, 0x22, 0x04, 0x1a, 0xce, + 0x7d, 0x5e, 0x38, 0x3a, 0xac, 0xcc, 0xd6, 0xd3, 0xc0, 0xe3, 0xc3, 0xca, 0x39, 0x36, 0x50, 0xb4, + 0x24, 0x01, 0xe2, 0x6e, 0x32, 0xf6, 0x36, 0x5c, 0xa8, 0x06, 0xf7, 0xfc, 0x7b, 0x4e, 0xd8, 0x5a, + 0xac, 0xd7, 0x34, 0xdd, 0xc1, 0xba, 0xbc, 0xbb, 0xf2, 0xb7, 0xe8, 0xcc, 0x73, 0x4a, 0xab, 0xc9, + 0xe5, 0x97, 0x55, 0xd7, 0x23, 0x39, 0x3a, 0x8a, 0xbf, 0x5d, 0x30, 0x5a, 0x4a, 0xf0, 0xd5, 0xb3, + 0x82, 0x95, 0xfb, 0xac, 0xf0, 0x36, 0x8c, 0x6d, 0xb9, 0xc4, 0x6b, 0x61, 0xb2, 0x25, 0x66, 0xe6, + 0x99, 0xfc, 0xe7, 0xb5, 0x55, 0x8a, 0x29, 0x75, 0x52, 0xfc, 0xe6, 0xbb, 0x2a, 0x2a, 0x63, 0x45, + 0x06, 0xed, 0xc2, 0x8c, 0xbc, 0x5a, 0x49, 0xa8, 0xd8, 0xc4, 0xcf, 0xf6, 0xba, 0xaf, 0x99, 0xc4, + 0xd9, 0x04, 0xe2, 0x14, 0x19, 0xdc, 0x45, 0x98, 0x5e, 0x75, 0xf7, 0xe8, 0x71, 0x35, 0xc4, 0x96, + 0x34, 0xbb, 0xea, 0xb2, 0x5b, 0x3b, 0x2b, 0xb5, 0x7f, 0xcc, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x68, + 0x2f, 0x4e, 0x79, 0x16, 0xd2, 0xda, 0x84, 0x42, 0x7f, 0x6d, 0x82, 0xfd, 0x8f, 0x2c, 0x38, 0xbf, + 0xb2, 0xd7, 0x8e, 0x0f, 0xaa, 0xae, 0xf9, 0xf4, 0xf1, 0x1a, 0x8c, 0xec, 0x91, 0x96, 0xdb, 0xd9, + 0x13, 0x33, 0x57, 0x91, 0x2c, 0x7d, 0x8d, 0x95, 0x1e, 0x1f, 0x56, 0x26, 0x1b, 0x71, 0x10, 0x3a, + 0xdb, 0x84, 0x17, 0x60, 0x81, 0xce, 0x0e, 0x46, 0xf7, 0x03, 0x72, 0xd3, 0xdd, 0x73, 0xe5, 0x73, + 0x69, 0x4f, 0x8d, 0xda, 0xbc, 0x1c, 0xd0, 0xf9, 0xb7, 0x3b, 0x8e, 0x1f, 0xbb, 0xf1, 0x81, 0x78, + 0xd5, 0x91, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0x86, 0x05, 0xd3, 0x92, 0x97, 0x2c, 0xb6, 0x5a, 0x21, + 0x89, 0x22, 0x34, 0x07, 0x05, 0xb7, 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x5a, 0x1d, 0x17, 0xdc, + 0x36, 0xaa, 0x43, 0x89, 0xbf, 0xba, 0x26, 0x8b, 0x6b, 0xa0, 0xb7, 0x5b, 0xd6, 0x83, 0x0d, 0x59, + 0x13, 0x27, 0x44, 0xa4, 0x54, 0xcc, 0xce, 0xa1, 0xa2, 0xf9, 0x24, 0x74, 0x5d, 0x94, 0x63, 0x85, + 0x81, 0xae, 0xc0, 0x98, 0x1f, 0xb4, 0xf8, 0x23, 0x38, 0xdf, 0xd3, 0x6c, 0xc9, 0xae, 0x8b, 0x32, + 0xac, 0xa0, 0xf6, 0x0f, 0x5a, 0x30, 0x21, 0xbf, 0x6c, 0x40, 0x01, 0x9d, 0x6e, 0xad, 0x44, 0x38, + 0x4f, 0xb6, 0x16, 0x15, 0xb0, 0x19, 0xc4, 0x90, 0xab, 0x8b, 0x27, 0x91, 0xab, 0xed, 0x1f, 0x2d, + 0xc0, 0x94, 0xec, 0x4e, 0xa3, 0xb3, 0x19, 0x91, 0x18, 0x6d, 0x40, 0xc9, 0xe1, 0x43, 0x4e, 0xe4, + 0x8a, 0x7d, 0x2a, 0xfb, 0x42, 0x67, 0xcc, 0x4f, 0x22, 0xea, 0x2c, 0xca, 0xda, 0x38, 0x21, 0x84, + 0x3c, 0x98, 0xf5, 0x83, 0x98, 0x1d, 0x7b, 0x0a, 0xde, 0xeb, 0xd9, 0x21, 0x4d, 0xfd, 0xa2, 0xa0, + 0x3e, 0xbb, 0x9e, 0xa6, 0x82, 0xbb, 0x09, 0xa3, 0x15, 0xa9, 0x44, 0x2a, 0xe6, 0x5f, 0xe1, 0xf4, + 0x59, 0xc8, 0xd6, 0x21, 0xd9, 0xbf, 0x62, 0x41, 0x49, 0xa2, 0x9d, 0xc5, 0x0b, 0xd3, 0x1a, 0x8c, + 0x46, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xbd, 0x3a, 0xce, 0xe7, 0x2b, 0x39, 0xcd, 0xf9, 0xff, 0x08, + 0x4b, 0x1a, 0x4c, 0x0b, 0xae, 0xba, 0xff, 0x09, 0xd1, 0x82, 0xab, 0xfe, 0xe4, 0x9c, 0x30, 0xff, + 0x8d, 0xf5, 0x59, 0x53, 0x15, 0x50, 0xa1, 0xb3, 0x1d, 0x92, 0x2d, 0xf7, 0x7e, 0x5a, 0xe8, 0xac, + 0xb3, 0x52, 0x2c, 0xa0, 0xe8, 0x5d, 0x98, 0x68, 0x4a, 0xe5, 0x71, 0xc2, 0x06, 0x9e, 0xee, 0xa9, + 0x8a, 0x57, 0xaf, 0x36, 0xdc, 0x40, 0x6e, 0x59, 0xab, 0x8f, 0x0d, 0x6a, 0xe6, 0xbb, 0x7f, 0xb1, + 0xdf, 0xbb, 0x7f, 0x42, 0x37, 0xf7, 0xe5, 0xda, 0xfe, 0x71, 0x0b, 0x46, 0xb8, 0x0a, 0x72, 0x30, + 0x9d, 0xad, 0xf6, 0x0a, 0x95, 0x8c, 0xdd, 0x1d, 0x5a, 0x28, 0x1e, 0xa5, 0xd0, 0x1a, 0x94, 0xd8, + 0x0f, 0xa6, 0x8a, 0x29, 0xe6, 0x5b, 0x06, 0xf2, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, + 0xc1, 0xfe, 0xa1, 0x22, 0x65, 0x55, 0x09, 0xaa, 0x71, 0x82, 0x5b, 0x0f, 0xef, 0x04, 0x2f, 0x3c, + 0xac, 0x13, 0x7c, 0x1b, 0xa6, 0x9b, 0xda, 0x93, 0x57, 0x32, 0x93, 0x57, 0x7a, 0x2e, 0x12, 0xed, + 0x75, 0x8c, 0xab, 0xe1, 0x96, 0x4d, 0x22, 0x38, 0x4d, 0x15, 0x7d, 0x27, 0x4c, 0xf0, 0x79, 0x16, + 0xad, 0x0c, 0xb1, 0x56, 0x3e, 0x93, 0xbf, 0x5e, 0xf4, 0x26, 0xd8, 0x4a, 0x6c, 0x68, 0xd5, 0xb1, + 0x41, 0xcc, 0xfe, 0x85, 0x31, 0x18, 0x5e, 0xd9, 0x27, 0x7e, 0x7c, 0x06, 0x0c, 0xa9, 0x09, 0x53, + 0xae, 0xbf, 0x1f, 0x78, 0xfb, 0xa4, 0xc5, 0xe1, 0x27, 0x39, 0x5c, 0x1f, 0x11, 0xa4, 0xa7, 0x6a, + 0x06, 0x09, 0x9c, 0x22, 0xf9, 0x30, 0x6e, 0xed, 0xd7, 0x60, 0x84, 0xcf, 0xbd, 0xb8, 0xb2, 0x67, + 0x2a, 0xd8, 0xd9, 0x20, 0x8a, 0x5d, 0x90, 0x68, 0x14, 0xb8, 0x46, 0x5f, 0x54, 0x47, 0xef, 0xc1, + 0xd4, 0x96, 0x1b, 0x46, 0x31, 0xbd, 0x6e, 0x47, 0xb1, 0xb3, 0xd7, 0x7e, 0x80, 0x5b, 0xba, 0x1a, + 0x87, 0x55, 0x83, 0x12, 0x4e, 0x51, 0x46, 0xdb, 0x30, 0x49, 0x2f, 0x8e, 0x49, 0x53, 0xa3, 0x27, + 0x6e, 0x4a, 0xa9, 0xe1, 0x6e, 0xea, 0x84, 0xb0, 0x49, 0x97, 0x32, 0x93, 0x26, 0xbb, 0x68, 0x8e, + 0x31, 0x89, 0x42, 0x31, 0x13, 0x7e, 0xc3, 0xe4, 0x30, 0xca, 0x93, 0x98, 0xa9, 0x48, 0xc9, 0xe4, + 0x49, 0x9a, 0x41, 0xc8, 0x57, 0xa1, 0x44, 0xe8, 0x10, 0x52, 0xc2, 0xe2, 0xb1, 0x61, 0x61, 0xb0, + 0xbe, 0xae, 0xb9, 0xcd, 0x30, 0x30, 0xf5, 0x23, 0x2b, 0x92, 0x12, 0x4e, 0x88, 0xa2, 0x65, 0x18, + 0x89, 0x48, 0xe8, 0x92, 0x48, 0x3c, 0x3b, 0xf4, 0x98, 0x46, 0x86, 0xc6, 0x4d, 0x48, 0xf9, 0x6f, + 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0xb0, 0xdb, 0x10, 0x7b, 0x69, 0xd0, 0x96, 0xd7, 0x22, 0x2b, 0xc5, + 0x02, 0x8a, 0xde, 0x82, 0xd1, 0x90, 0x78, 0x4c, 0x01, 0x37, 0x39, 0xf8, 0x22, 0xe7, 0xfa, 0x3c, + 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x19, 0xc2, 0xf5, 0xb7, 0x95, 0x01, 0x85, + 0x78, 0x3f, 0x78, 0x4c, 0xb4, 0x7f, 0x0e, 0x27, 0x18, 0x7e, 0x1c, 0x06, 0x9e, 0x47, 0x42, 0x9c, + 0x51, 0x0d, 0x5d, 0x83, 0x59, 0x55, 0x5a, 0xf3, 0xa3, 0xd8, 0xf1, 0x9b, 0x84, 0x3d, 0x1d, 0x94, + 0x12, 0xa9, 0x08, 0xa7, 0x11, 0x70, 0x77, 0x1d, 0xfb, 0xeb, 0x54, 0x9c, 0xa1, 0xa3, 0x75, 0x06, + 0xb2, 0xc0, 0x9b, 0xa6, 0x2c, 0x70, 0x31, 0x77, 0xe6, 0x72, 0xe4, 0x80, 0x23, 0x0b, 0xc6, 0xb5, + 0x99, 0x4d, 0xd6, 0xac, 0xd5, 0x63, 0xcd, 0x76, 0x60, 0x86, 0xae, 0xf4, 0x5b, 0x9b, 0xcc, 0x9b, + 0xa2, 0xc5, 0x16, 0x66, 0xe1, 0xc1, 0x16, 0x66, 0x59, 0x34, 0x30, 0x73, 0x33, 0x45, 0x10, 0x77, + 0x35, 0x81, 0x5e, 0x93, 0xda, 0xa8, 0xa2, 0x61, 0x18, 0xc5, 0x35, 0x4d, 0xc7, 0x87, 0x95, 0x19, + 0xed, 0x43, 0x74, 0xed, 0x93, 0xfd, 0x55, 0xf9, 0x8d, 0x9c, 0xd9, 0x2c, 0x40, 0xa9, 0xa9, 0x16, + 0x8b, 0x65, 0xda, 0xd2, 0xaa, 0xe5, 0x80, 0x13, 0x1c, 0xba, 0x47, 0xe9, 0x15, 0x24, 0x6d, 0xcb, + 0x47, 0x2f, 0x28, 0x98, 0x41, 0xec, 0x97, 0x01, 0x56, 0xee, 0x93, 0x26, 0x5f, 0xea, 0xfa, 0xa3, + 0xae, 0x95, 0xff, 0xa8, 0x6b, 0xff, 0x47, 0x0b, 0xa6, 0x56, 0x97, 0x8d, 0x6b, 0xe2, 0x3c, 0x00, + 0xbf, 0x1b, 0xdd, 0xbd, 0xbb, 0x2e, 0xdf, 0x2b, 0xb8, 0xca, 0x59, 0x95, 0x62, 0x0d, 0x03, 0x5d, + 0x84, 0xa2, 0xd7, 0xf1, 0xc5, 0x95, 0x65, 0xf4, 0xe8, 0xb0, 0x52, 0xbc, 0xd9, 0xf1, 0x31, 0x2d, + 0xd3, 0xcc, 0xe7, 0x8a, 0x03, 0x9b, 0xcf, 0xf5, 0xf5, 0x92, 0x40, 0x15, 0x18, 0xbe, 0x77, 0xcf, + 0x6d, 0x45, 0xe5, 0xe1, 0xe4, 0x2d, 0xe5, 0xee, 0xdd, 0x5a, 0x35, 0xc2, 0xbc, 0xdc, 0xfe, 0x5a, + 0x11, 0xe6, 0x56, 0x3d, 0x72, 0xff, 0x23, 0xda, 0xe3, 0x0e, 0x6a, 0xfc, 0x77, 0x32, 0x79, 0xf1, + 0xa4, 0x96, 0x8e, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0x81, 0x00, 0x1f, 0x91, 0xf1, 0xab, 0x6f, + 0x64, 0xb5, 0x9e, 0x3f, 0x20, 0xf3, 0x42, 0x3b, 0xc7, 0xed, 0xa6, 0xd4, 0x49, 0x2b, 0x4a, 0xb1, + 0x24, 0x3e, 0xf7, 0x79, 0x98, 0xd0, 0x31, 0x4f, 0x64, 0x40, 0xf5, 0x97, 0x8a, 0x30, 0x43, 0x7b, + 0xf0, 0x50, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x9c, 0xb6, 0xdd, 0x69, 0xff, 0xd9, 0x78, 0x37, 0x3d, + 0x1b, 0x2f, 0xe5, 0xcd, 0xc6, 0x59, 0xcf, 0xc1, 0x5f, 0xb6, 0xe0, 0xdc, 0xaa, 0x17, 0x34, 0x77, + 0x53, 0x26, 0xb1, 0xaf, 0xc2, 0x38, 0xe5, 0xe3, 0x91, 0xe1, 0x0c, 0x60, 0xb8, 0x87, 0x08, 0x10, + 0xd6, 0xf1, 0xb4, 0x6a, 0xb7, 0x6f, 0xd7, 0xaa, 0x59, 0x5e, 0x25, 0x02, 0x84, 0x75, 0x3c, 0xfb, + 0x37, 0x2d, 0x78, 0xe2, 0xda, 0xf2, 0x4a, 0xb2, 0x14, 0xbb, 0x1c, 0x5b, 0xe8, 0x2d, 0xb0, 0xa5, + 0x75, 0x25, 0xb9, 0x05, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x93, 0xe2, 0xb4, 0xf5, 0x53, 0x16, 0x9c, + 0xbb, 0xe6, 0xc6, 0xf4, 0x58, 0x4e, 0xbb, 0x58, 0xd0, 0x73, 0x39, 0x72, 0xe3, 0x20, 0x3c, 0x48, + 0xbb, 0x58, 0x60, 0x05, 0xc1, 0x1a, 0x16, 0x6f, 0x79, 0xdf, 0x8d, 0x68, 0x4f, 0x0b, 0xa6, 0x2a, + 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb5, 0xdc, 0x90, 0x5d, 0x25, 0x0e, 0x04, 0x87, 0x55, + 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0x3f, 0x66, 0xc1, 0x85, 0x6b, 0x5e, 0x27, 0x8a, 0x49, + 0xb8, 0x15, 0x19, 0x9d, 0x7d, 0x19, 0x4a, 0x44, 0x5e, 0xd7, 0x45, 0x5f, 0x95, 0x80, 0xa9, 0xee, + 0xf1, 0xdc, 0xbf, 0x43, 0xe1, 0x0d, 0x60, 0x5f, 0x7e, 0x32, 0xbb, 0xe8, 0x9f, 0x2d, 0xc0, 0xe4, + 0xf5, 0x8d, 0x8d, 0xfa, 0x35, 0x12, 0x8b, 0x53, 0xac, 0xbf, 0xaa, 0x19, 0x6b, 0x1a, 0xb3, 0x5e, + 0x97, 0xa2, 0x4e, 0xec, 0x7a, 0xf3, 0xdc, 0xa1, 0x70, 0xbe, 0xe6, 0xc7, 0xb7, 0xc2, 0x46, 0x1c, + 0xba, 0xfe, 0x76, 0xa6, 0x8e, 0x4d, 0x9e, 0xb5, 0xc5, 0xbc, 0xb3, 0x16, 0xbd, 0x0c, 0x23, 0xcc, + 0xa3, 0x51, 0x5e, 0x4f, 0x1e, 0x53, 0x77, 0x0a, 0x56, 0x7a, 0x7c, 0x58, 0x29, 0xdd, 0xc6, 0x35, + 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x77, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2d, 0x12, 0x4a, + 0xee, 0x70, 0x29, 0x8b, 0x3b, 0xd0, 0x41, 0xe0, 0x68, 0xc9, 0x86, 0x4a, 0xca, 0x22, 0xac, 0xd3, + 0xb1, 0x1b, 0x00, 0x09, 0xec, 0x94, 0xf4, 0x0b, 0xf6, 0x1f, 0x58, 0x30, 0xca, 0x9d, 0x4b, 0x42, + 0xf4, 0x05, 0x18, 0x22, 0xf7, 0x49, 0x53, 0x48, 0x8e, 0x99, 0x1d, 0x4e, 0x04, 0x0f, 0xae, 0x2d, + 0xa7, 0xff, 0x31, 0xab, 0x85, 0xae, 0xc3, 0x28, 0xed, 0xed, 0x35, 0xe5, 0x69, 0xf3, 0x64, 0xde, + 0x17, 0xab, 0x69, 0xe7, 0xb2, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xdb, 0x0d, 0xca, + 0xc0, 0xe2, 0x5e, 0xe7, 0xec, 0xc6, 0x72, 0x9d, 0x23, 0x09, 0x6a, 0x5c, 0xf3, 0x2b, 0x0b, 0x71, + 0x42, 0xc4, 0xde, 0x80, 0x12, 0x9d, 0xd4, 0x45, 0xcf, 0x75, 0x7a, 0x2b, 0x9d, 0x9f, 0x87, 0x92, + 0x54, 0x00, 0x47, 0xc2, 0xfd, 0x85, 0x51, 0x95, 0xfa, 0xe1, 0x08, 0x27, 0x70, 0x7b, 0x0b, 0xce, + 0x33, 0x6b, 0x0a, 0x27, 0xde, 0x31, 0xf6, 0x58, 0xff, 0xc5, 0xfc, 0x82, 0xb8, 0x88, 0xf1, 0x99, + 0x29, 0x6b, 0xf6, 0xfa, 0x13, 0x92, 0x62, 0x72, 0x29, 0xb3, 0xff, 0x68, 0x08, 0x1e, 0xab, 0x35, + 0xf2, 0xfd, 0x8e, 0x5e, 0x87, 0x09, 0x2e, 0xa6, 0xd1, 0xa5, 0xed, 0x78, 0xa2, 0x5d, 0xf5, 0xd6, + 0xb8, 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0x9e, 0x80, 0xa2, 0xfb, 0xbe, 0x9f, 0x36, 0xce, 0xad, 0xbd, + 0xbd, 0x8e, 0x69, 0x39, 0x05, 0x53, 0x89, 0x8f, 0xb3, 0x52, 0x05, 0x56, 0x52, 0xdf, 0x9b, 0x30, + 0xe5, 0x46, 0xcd, 0xc8, 0xad, 0xf9, 0x94, 0xcf, 0x24, 0x3e, 0x6b, 0x89, 0x92, 0x80, 0x76, 0x5a, + 0x41, 0x71, 0x0a, 0x5b, 0xe3, 0xeb, 0xc3, 0x03, 0x4b, 0x8d, 0x7d, 0xfd, 0x41, 0xa8, 0x40, 0xdc, + 0x66, 0x5f, 0x17, 0x31, 0x43, 0x41, 0x21, 0x10, 0xf3, 0x0f, 0x8e, 0xb0, 0x84, 0xd1, 0x1b, 0x58, + 0x73, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0xaa, 0x1b, 0x35, 0x83, 0x7d, 0x12, 0x1e, 0xb0, 0xcb, + 0xf3, 0x58, 0x72, 0x03, 0x53, 0x80, 0xe5, 0xeb, 0x8b, 0x75, 0x8a, 0x89, 0xbb, 0xeb, 0x98, 0x52, + 0x21, 0x9c, 0x86, 0x54, 0xb8, 0x08, 0xd3, 0xb2, 0x99, 0x06, 0x89, 0xd8, 0x19, 0x31, 0xce, 0x3a, + 0xa6, 0xbc, 0x49, 0x45, 0xb1, 0xea, 0x56, 0x1a, 0x1f, 0xbd, 0x06, 0x93, 0xae, 0xef, 0xc6, 0xae, + 0x13, 0x07, 0x21, 0x3b, 0x61, 0xf9, 0x3d, 0x99, 0x3d, 0x8a, 0xd6, 0x74, 0x00, 0x36, 0xf1, 0xec, + 0x3f, 0x1c, 0x82, 0x59, 0x36, 0x6d, 0xdf, 0x5a, 0x61, 0x9f, 0x98, 0x15, 0x76, 0xbb, 0x7b, 0x85, + 0x9d, 0x86, 0xb8, 0xfb, 0x71, 0x2e, 0xb3, 0xf7, 0xa0, 0xa4, 0xec, 0xab, 0xa5, 0x8b, 0x80, 0x95, + 0xe3, 0x22, 0xd0, 0x5f, 0xfa, 0x90, 0xcf, 0xb8, 0xc5, 0xcc, 0x67, 0xdc, 0xbf, 0x63, 0x41, 0x62, + 0x66, 0x8a, 0xae, 0x43, 0xa9, 0x1d, 0x30, 0x53, 0x8e, 0x50, 0xda, 0x47, 0x3d, 0x96, 0x79, 0x50, + 0xf1, 0x43, 0x91, 0x8f, 0x5f, 0x5d, 0xd6, 0xc0, 0x49, 0x65, 0xb4, 0x04, 0xa3, 0xed, 0x90, 0x34, + 0x62, 0xe6, 0x28, 0xd9, 0x97, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x59, 0x00, + 0xfc, 0xa5, 0xd4, 0xf1, 0xb7, 0xc9, 0x19, 0x68, 0x7f, 0xab, 0x30, 0x14, 0xb5, 0x49, 0xb3, 0x97, + 0x91, 0x4d, 0xd2, 0x9f, 0x46, 0x9b, 0x34, 0x93, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x1f, + 0xc0, 0x54, 0x82, 0x56, 0x8b, 0xc9, 0x1e, 0x7a, 0xd1, 0x70, 0x43, 0xbb, 0x98, 0x72, 0x43, 0x2b, + 0x31, 0x6c, 0x4d, 0xd1, 0xf8, 0x1e, 0x14, 0xf7, 0x9c, 0xfb, 0x42, 0x93, 0xf4, 0x7c, 0xef, 0x6e, + 0x50, 0xfa, 0xf3, 0x6b, 0xce, 0x7d, 0x7e, 0x67, 0x7a, 0x5e, 0x2e, 0x90, 0x35, 0xe7, 0xfe, 0x31, + 0x37, 0xa5, 0x61, 0x4c, 0xea, 0xa6, 0x1b, 0xc5, 0x1f, 0xfe, 0x97, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, + 0x11, 0xd6, 0x96, 0xeb, 0x8b, 0x77, 0xc3, 0x81, 0xda, 0x72, 0xfd, 0x74, 0x5b, 0xae, 0x3f, 0x40, + 0x5b, 0xae, 0x8f, 0x3e, 0x80, 0x51, 0xf1, 0x46, 0xcf, 0xec, 0xe7, 0x4d, 0x2d, 0x55, 0x5e, 0x7b, + 0xe2, 0x89, 0x9f, 0xb7, 0xb9, 0x20, 0xef, 0x84, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4, 0xb7, + 0x2c, 0x98, 0x12, 0xbf, 0x31, 0x79, 0xbf, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x9f, 0x1b, 0xbc, 0x0f, + 0xa2, 0x22, 0xef, 0xca, 0xe7, 0x24, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0x27, + 0x16, 0x9c, 0xdf, 0x73, 0xee, 0xf3, 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xfe, 0x00, 0x5f, + 0x18, 0x6c, 0xfa, 0xbb, 0xaa, 0xf3, 0x4e, 0x4a, 0xd3, 0xe1, 0xf3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, + 0xd9, 0xaf, 0xb9, 0x2d, 0x18, 0x93, 0xeb, 0x2d, 0xe3, 0xe6, 0x5d, 0xd5, 0x05, 0xeb, 0x13, 0x9b, + 0x48, 0x68, 0x37, 0x75, 0xd6, 0x8e, 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xf7, 0x60, 0x42, 0x5f, 0x63, + 0x0f, 0xb5, 0xad, 0xf7, 0xe1, 0x5c, 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x83, 0x8b, 0xb9, 0xeb, + 0xe3, 0x61, 0x36, 0x6c, 0xff, 0xac, 0xa5, 0xf3, 0xc1, 0x33, 0x50, 0xc1, 0x2f, 0x9b, 0x2a, 0xf8, + 0x4b, 0xbd, 0x77, 0x4e, 0x8e, 0x1e, 0xfe, 0x5d, 0xbd, 0xd3, 0x94, 0xab, 0xa3, 0xb7, 0x60, 0xc4, + 0xa3, 0x25, 0xd2, 0x38, 0xc4, 0xee, 0xbf, 0x23, 0x13, 0x59, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, + 0x7f, 0xd1, 0x82, 0xa1, 0x33, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x8b, 0xb9, 0xa4, 0x45, 0x64, 0xa2, + 0x79, 0xec, 0xdc, 0x5b, 0x91, 0xd1, 0x97, 0x72, 0x06, 0xe6, 0xff, 0x16, 0x60, 0x9c, 0x36, 0x25, + 0xad, 0x18, 0xdf, 0x80, 0x49, 0xcf, 0xd9, 0x24, 0x9e, 0x7c, 0xc7, 0x4d, 0x2b, 0x4c, 0x6e, 0xea, + 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x5b, 0xfa, 0x93, 0xb6, 0x90, 0x5f, 0x54, 0x65, 0xe3, 0xbd, 0x1b, + 0x9b, 0xb8, 0xf4, 0xee, 0x7e, 0xcf, 0x89, 0x9b, 0x3b, 0x42, 0x99, 0xa2, 0xba, 0x7b, 0x97, 0x16, + 0x62, 0x0e, 0xa3, 0x02, 0x9c, 0x5c, 0x9d, 0x77, 0xe8, 0xcd, 0x30, 0xf0, 0x85, 0x78, 0xac, 0x04, + 0x38, 0x6c, 0x82, 0x71, 0x1a, 0x3f, 0xc3, 0x1f, 0x7d, 0x98, 0xd9, 0x68, 0x0e, 0xe0, 0x8f, 0x8e, + 0xea, 0x70, 0xde, 0xf5, 0x9b, 0x5e, 0xa7, 0x45, 0x6e, 0xfb, 0x5c, 0xba, 0xf3, 0xdc, 0x0f, 0x48, + 0x4b, 0x08, 0xd0, 0xca, 0x9c, 0xb6, 0x96, 0x81, 0x83, 0x33, 0x6b, 0xda, 0x7f, 0x01, 0xce, 0xdd, + 0x0c, 0x9c, 0xd6, 0x92, 0xe3, 0x39, 0x7e, 0x93, 0x84, 0x35, 0x7f, 0xbb, 0xaf, 0x95, 0x98, 0x6e, + 0xd3, 0x55, 0xe8, 0x67, 0xd3, 0x65, 0xef, 0x00, 0xd2, 0x1b, 0x10, 0xb6, 0xc9, 0x18, 0x46, 0x5d, + 0xde, 0x94, 0x58, 0xfe, 0xcf, 0x64, 0x4b, 0xd7, 0x5d, 0x3d, 0xd3, 0xac, 0x6e, 0x79, 0x01, 0x96, + 0x84, 0xec, 0xd7, 0x21, 0xd3, 0x1f, 0xb1, 0xbf, 0xda, 0xc6, 0x7e, 0x15, 0x66, 0x59, 0xcd, 0x93, + 0xa9, 0x14, 0xec, 0xbf, 0x66, 0xc1, 0xf4, 0x7a, 0x2a, 0x82, 0xc4, 0xd3, 0xec, 0xad, 0x35, 0x43, + 0xef, 0xde, 0x60, 0xa5, 0x58, 0x40, 0x4f, 0x5d, 0xbf, 0xf7, 0x67, 0x16, 0x94, 0x54, 0x70, 0x9a, + 0x33, 0x10, 0x6a, 0x97, 0x0d, 0xa1, 0x36, 0x53, 0xef, 0xa4, 0xba, 0x93, 0x27, 0xd3, 0xa2, 0x1b, + 0x2a, 0x16, 0x42, 0x0f, 0x95, 0x53, 0x42, 0x86, 0x7b, 0xce, 0x4f, 0x99, 0x01, 0x13, 0x64, 0x74, + 0x04, 0x66, 0xa6, 0xa5, 0x70, 0x3f, 0x21, 0x66, 0x5a, 0xaa, 0x3f, 0x39, 0xdc, 0xaf, 0xae, 0x75, + 0x99, 0x9d, 0x0a, 0xdf, 0xce, 0x5c, 0x19, 0xd8, 0xde, 0x54, 0x21, 0x48, 0x2a, 0xc2, 0x35, 0x41, + 0x94, 0x1e, 0x33, 0x46, 0x26, 0xfe, 0xf1, 0x40, 0x42, 0x49, 0x15, 0xfb, 0x3a, 0x4c, 0xa7, 0x06, + 0x0c, 0xbd, 0x0a, 0xc3, 0xed, 0x1d, 0x27, 0x22, 0x29, 0xd3, 0xd4, 0xe1, 0x3a, 0x2d, 0x3c, 0x3e, + 0xac, 0x4c, 0xa9, 0x0a, 0xac, 0x04, 0x73, 0x6c, 0xfb, 0x7f, 0x5a, 0x30, 0xb4, 0x1e, 0xb4, 0xce, + 0x62, 0x31, 0xbd, 0x69, 0x2c, 0xa6, 0xc7, 0xf3, 0xc2, 0xb0, 0xe5, 0xae, 0xa3, 0xd5, 0xd4, 0x3a, + 0xba, 0x94, 0x4b, 0xa1, 0xf7, 0x12, 0xda, 0x83, 0x71, 0x16, 0xdc, 0x4d, 0x98, 0xca, 0xbe, 0x6c, + 0xdc, 0xaf, 0x2a, 0xa9, 0xfb, 0xd5, 0xb4, 0x86, 0xaa, 0xdd, 0xb2, 0x9e, 0x85, 0x51, 0x61, 0xae, + 0x99, 0x76, 0xda, 0x10, 0xb8, 0x58, 0xc2, 0xed, 0x1f, 0x2f, 0x82, 0x11, 0x4c, 0x0e, 0xfd, 0x8a, + 0x05, 0xf3, 0x21, 0xf7, 0x82, 0x6d, 0x55, 0x3b, 0xa1, 0xeb, 0x6f, 0x37, 0x9a, 0x3b, 0xa4, 0xd5, + 0xf1, 0x5c, 0x7f, 0xbb, 0xb6, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x3e, 0x69, 0x76, 0xd8, 0x9b, 0x4b, + 0x9f, 0xc8, 0x75, 0xca, 0x1c, 0xea, 0xea, 0xd1, 0x61, 0x65, 0x1e, 0x9f, 0x88, 0x36, 0x3e, 0x61, + 0x5f, 0xd0, 0x6f, 0x5a, 0xb0, 0xc0, 0x63, 0xac, 0x0d, 0xde, 0xff, 0x1e, 0xb7, 0xd1, 0xba, 0x24, + 0x95, 0x10, 0xd9, 0x20, 0xe1, 0xde, 0xd2, 0x6b, 0x62, 0x40, 0x17, 0xea, 0x27, 0x6b, 0x0b, 0x9f, + 0xb4, 0x73, 0xf6, 0xbf, 0x29, 0xc2, 0x24, 0x1d, 0xc5, 0x24, 0xf2, 0xcb, 0xab, 0xc6, 0x92, 0x78, + 0x32, 0xb5, 0x24, 0x66, 0x0d, 0xe4, 0xd3, 0x09, 0xfa, 0x12, 0xc1, 0xac, 0xe7, 0x44, 0xf1, 0x75, + 0xe2, 0x84, 0xf1, 0x26, 0x71, 0xb8, 0x99, 0x50, 0xf1, 0xc4, 0x26, 0x4d, 0x4a, 0xfd, 0x75, 0x33, + 0x4d, 0x0c, 0x77, 0xd3, 0x47, 0xfb, 0x80, 0x98, 0xad, 0x53, 0xe8, 0xf8, 0x11, 0xff, 0x16, 0x57, + 0xbc, 0xc7, 0x9c, 0xac, 0xd5, 0x39, 0xd1, 0x2a, 0xba, 0xd9, 0x45, 0x0d, 0x67, 0xb4, 0xa0, 0xd9, + 0xb0, 0x0d, 0x0f, 0x6a, 0xc3, 0x36, 0xd2, 0xc7, 0x33, 0x6a, 0x0f, 0x66, 0xc4, 0xac, 0x6c, 0xb9, + 0xdb, 0xe2, 0x90, 0xfe, 0x72, 0xca, 0xc6, 0xd5, 0x1a, 0xdc, 0x50, 0xa9, 0x8f, 0x81, 0xab, 0xfd, + 0xdd, 0x70, 0x8e, 0x36, 0x67, 0xfa, 0xf1, 0x44, 0x88, 0xc0, 0xf4, 0x6e, 0x67, 0x93, 0x78, 0x24, + 0x96, 0x65, 0xa2, 0xd1, 0x4c, 0xb1, 0xdf, 0xac, 0x9d, 0xc8, 0x96, 0x37, 0x4c, 0x12, 0x38, 0x4d, + 0xd3, 0xfe, 0x49, 0x0b, 0x98, 0xb5, 0xfc, 0x19, 0x1c, 0x7f, 0x5f, 0x34, 0x8f, 0xbf, 0x72, 0x1e, + 0x07, 0xca, 0x39, 0xf9, 0x5e, 0xe1, 0xd3, 0x52, 0x0f, 0x83, 0xfb, 0x07, 0x52, 0xf6, 0xef, 0x2f, + 0x71, 0xfd, 0x1f, 0x8b, 0x6f, 0x48, 0x15, 0x14, 0x00, 0x7d, 0x0f, 0x8c, 0x35, 0x9d, 0xb6, 0xd3, + 0xe4, 0x51, 0x3c, 0x73, 0xb5, 0x3f, 0x46, 0xa5, 0xf9, 0x65, 0x51, 0x83, 0x6b, 0x33, 0x3e, 0x2b, + 0xbf, 0x52, 0x16, 0xf7, 0xd5, 0x60, 0xa8, 0x26, 0xe7, 0x76, 0x61, 0xd2, 0x20, 0xf6, 0x50, 0xaf, + 0xbe, 0xdf, 0xc3, 0x8f, 0x0b, 0x75, 0x63, 0xd9, 0x83, 0x59, 0x5f, 0xfb, 0x4f, 0x99, 0xa3, 0x14, + 0xa7, 0x3f, 0xdd, 0xef, 0x40, 0x60, 0x9c, 0x54, 0xf3, 0x06, 0x48, 0x91, 0xc1, 0xdd, 0x94, 0xed, + 0xbf, 0x67, 0xc1, 0xa3, 0x3a, 0xa2, 0x16, 0xaf, 0xa1, 0x9f, 0x3e, 0xb9, 0x0a, 0x63, 0x41, 0x9b, + 0x84, 0x4e, 0x72, 0x27, 0xbb, 0x22, 0x07, 0xfd, 0x96, 0x28, 0x3f, 0x3e, 0xac, 0x9c, 0xd7, 0xa9, + 0xcb, 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x46, 0xd8, 0x60, 0x44, 0x22, 0x96, 0x06, 0x33, 0x53, 0x64, + 0x4f, 0xab, 0x11, 0x16, 0x10, 0xfb, 0xfb, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x61, 0x66, + 0x8f, 0x5e, 0xdf, 0x56, 0xee, 0xb7, 0x43, 0xae, 0x46, 0x97, 0xe3, 0xf4, 0x7c, 0xbf, 0x71, 0xd2, + 0x3e, 0x32, 0x31, 0x66, 0x5b, 0x4b, 0x11, 0xc3, 0x5d, 0xe4, 0xed, 0x3f, 0x29, 0xf0, 0x9d, 0xc8, + 0xa4, 0xba, 0x67, 0x61, 0xb4, 0x1d, 0xb4, 0x96, 0x6b, 0x55, 0x2c, 0x46, 0x48, 0xb1, 0xab, 0x3a, + 0x2f, 0xc6, 0x12, 0x8e, 0xae, 0x02, 0x90, 0xfb, 0x31, 0x09, 0x7d, 0xc7, 0x53, 0x86, 0x1f, 0x4a, + 0x78, 0x5a, 0x51, 0x10, 0xac, 0x61, 0xd1, 0x3a, 0xed, 0x30, 0xd8, 0x77, 0x5b, 0xcc, 0xdb, 0xb0, + 0x68, 0xd6, 0xa9, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0x55, 0xb9, 0xe3, 0x47, 0xfc, 0x00, 0x74, 0x36, + 0x45, 0xf8, 0xb9, 0xb1, 0xe4, 0xaa, 0x7c, 0x5b, 0x07, 0x62, 0x13, 0x17, 0x2d, 0xc2, 0x48, 0xec, + 0x30, 0x73, 0x86, 0xe1, 0x7c, 0xb3, 0xc4, 0x0d, 0x8a, 0xa1, 0x87, 0x75, 0xa4, 0x15, 0xb0, 0xa8, + 0x88, 0xde, 0x91, 0x2c, 0x98, 0xb3, 0x64, 0x61, 0x0f, 0x9c, 0xbb, 0x6c, 0x75, 0xf6, 0xad, 0xf3, + 0x60, 0x61, 0x67, 0x6c, 0xd0, 0xb2, 0xbf, 0xb7, 0x04, 0x90, 0x48, 0x7b, 0xe8, 0x83, 0x2e, 0x16, + 0xf1, 0x42, 0x6f, 0xf9, 0xf0, 0xf4, 0xf8, 0x03, 0xfa, 0x7e, 0x0b, 0xc6, 0x1d, 0xcf, 0x0b, 0x9a, + 0x4e, 0xcc, 0x46, 0xb9, 0xd0, 0x9b, 0x45, 0x89, 0xf6, 0x17, 0x93, 0x1a, 0xbc, 0x0b, 0x2f, 0x4b, + 0x4b, 0x05, 0x0d, 0xd2, 0xb7, 0x17, 0x7a, 0xc3, 0xe8, 0xb3, 0xf2, 0x12, 0xc0, 0x97, 0xc7, 0x5c, + 0xfa, 0x12, 0x50, 0x62, 0xdc, 0x58, 0x93, 0xff, 0xd1, 0x6d, 0x23, 0x4e, 0xdb, 0x50, 0x7e, 0x48, + 0x0a, 0x43, 0xe8, 0xe9, 0x17, 0xa2, 0x0d, 0xd5, 0x75, 0xbf, 0xa8, 0xe1, 0xfc, 0xb8, 0x2d, 0x9a, + 0x74, 0xdd, 0xc7, 0x27, 0xea, 0x3d, 0x98, 0x6e, 0x99, 0xc7, 0xad, 0x58, 0x4d, 0xcf, 0xe4, 0xd1, + 0x4d, 0x9d, 0xce, 0xc9, 0x01, 0x9b, 0x02, 0xe0, 0x34, 0x61, 0x54, 0xe7, 0x1e, 0x6a, 0x35, 0x7f, + 0x2b, 0x10, 0x76, 0xe5, 0x76, 0xee, 0x5c, 0x1e, 0x44, 0x31, 0xd9, 0xa3, 0x98, 0xc9, 0x39, 0xba, + 0x2e, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x05, 0x23, 0xcc, 0x6d, 0x38, 0x2a, 0x8f, 0xe5, 0xeb, 0x01, + 0xcd, 0x88, 0x17, 0xc9, 0xa6, 0x62, 0x7f, 0x23, 0x2c, 0x28, 0xa0, 0xeb, 0x32, 0x2c, 0x4e, 0x54, + 0xf3, 0x6f, 0x47, 0x84, 0x85, 0xc5, 0x29, 0x2d, 0x7d, 0x3a, 0x89, 0x78, 0xc3, 0xcb, 0x33, 0x03, + 0x38, 0x1b, 0x35, 0xa9, 0xbc, 0x22, 0xfe, 0xcb, 0xb8, 0xd0, 0x65, 0xc8, 0xef, 0x9e, 0x19, 0x3b, + 0x3a, 0x19, 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x79, 0xa6, 0xc7, 0xe7, 0x9c, 0x0f, 0x33, 0xe9, + 0x8d, 0xf5, 0x50, 0x8f, 0xeb, 0x3f, 0x18, 0x82, 0x29, 0x73, 0x21, 0xa0, 0x05, 0x28, 0x09, 0x22, + 0x2a, 0x44, 0xa6, 0x5a, 0xdb, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x16, 0x22, 0x94, 0x55, 0xd7, 0xec, + 0x00, 0x93, 0x10, 0xa1, 0x0a, 0x82, 0x35, 0x2c, 0x2a, 0x44, 0x6f, 0x06, 0x41, 0xac, 0x8e, 0x02, + 0xb5, 0x5a, 0x96, 0x58, 0x29, 0x16, 0x50, 0x7a, 0x04, 0xec, 0x92, 0xd0, 0x27, 0x9e, 0xa9, 0xc9, + 0x54, 0x47, 0xc0, 0x0d, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa4, 0x05, 0x11, 0x5b, 0x7e, 0x42, 0x54, + 0x4f, 0xec, 0x2a, 0x1b, 0xdc, 0x6d, 0x5e, 0xc2, 0xd1, 0x97, 0xe1, 0x51, 0xe5, 0xe5, 0x8e, 0xb9, + 0x66, 0x58, 0xb6, 0x38, 0x62, 0xdc, 0xac, 0x1f, 0x5d, 0xce, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0x9b, + 0x30, 0x25, 0x44, 0x60, 0x49, 0x71, 0xd4, 0x34, 0x56, 0xb8, 0x61, 0x40, 0x71, 0x0a, 0x1b, 0x55, + 0x61, 0x86, 0x96, 0x30, 0x29, 0x54, 0x52, 0xe0, 0xde, 0xfa, 0xea, 0xac, 0xbf, 0x91, 0x82, 0xe3, + 0xae, 0x1a, 0x68, 0x11, 0xa6, 0xb9, 0x8c, 0x42, 0xef, 0x94, 0x6c, 0x1e, 0x84, 0xbb, 0x87, 0xda, + 0x08, 0xb7, 0x4c, 0x30, 0x4e, 0xe3, 0xa3, 0xd7, 0x61, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, 0x4c, 0x9a, + 0x71, 0x27, 0xe4, 0x7e, 0x20, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, 0x01, 0x9c, + 0xcb, 0xf0, 0x14, 0xa3, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, 0xf5, 0x9a, + 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x82, 0xb7, 0xab, 0xd5, 0xb9, 0x2a, 0x01, + 0x38, 0xc1, 0xb1, 0x7f, 0x03, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0xf7, 0x3a, 0x4c, 0xc8, 0x8c, + 0x03, 0x5a, 0xa4, 0x6b, 0xf5, 0x99, 0xd7, 0x34, 0x18, 0x36, 0x30, 0x69, 0xdf, 0x7c, 0x15, 0xa7, + 0x3b, 0x65, 0x8f, 0x99, 0x44, 0xe9, 0x4e, 0x70, 0xd0, 0x0b, 0x30, 0x16, 0x11, 0x6f, 0xeb, 0xa6, + 0xeb, 0xef, 0x8a, 0x85, 0xad, 0xb8, 0x70, 0x43, 0x94, 0x63, 0x85, 0x81, 0x96, 0xa0, 0xd8, 0x71, + 0x5b, 0x62, 0x29, 0xcb, 0x03, 0xbf, 0x78, 0xbb, 0x56, 0x3d, 0x3e, 0xac, 0x3c, 0x99, 0x97, 0x48, + 0x81, 0x5e, 0xed, 0xa3, 0x79, 0xba, 0xfd, 0x68, 0xe5, 0xac, 0xb7, 0x81, 0x91, 0x13, 0xbe, 0x0d, + 0x5c, 0x05, 0x10, 0x5f, 0x2d, 0xd7, 0x72, 0x31, 0x99, 0xb5, 0x6b, 0x0a, 0x82, 0x35, 0x2c, 0x14, + 0xc1, 0x6c, 0x33, 0x24, 0x8e, 0xbc, 0x43, 0x73, 0x9f, 0xa7, 0xb1, 0x07, 0x57, 0x10, 0x2c, 0xa7, + 0x89, 0xe1, 0x6e, 0xfa, 0x28, 0x80, 0xd9, 0x96, 0x08, 0xaa, 0x90, 0x34, 0x5a, 0x3a, 0xb9, 0xa3, + 0x15, 0x33, 0xc8, 0x49, 0x13, 0xc2, 0xdd, 0xb4, 0xd1, 0x57, 0x60, 0x4e, 0x16, 0x76, 0xc7, 0xb1, + 0x60, 0xdb, 0xa5, 0xb8, 0x74, 0xe9, 0xe8, 0xb0, 0x32, 0x57, 0xcd, 0xc5, 0xc2, 0x3d, 0x28, 0x20, + 0x0c, 0x23, 0xec, 0x2d, 0x29, 0x2a, 0x8f, 0xb3, 0x73, 0xee, 0xb9, 0x7c, 0x65, 0x00, 0x5d, 0xeb, + 0xf3, 0xec, 0x1d, 0x4a, 0x98, 0x94, 0x27, 0xcf, 0x72, 0xac, 0x10, 0x0b, 0x4a, 0x68, 0x0b, 0xc6, + 0x1d, 0xdf, 0x0f, 0x62, 0x87, 0x8b, 0x50, 0x13, 0xf9, 0xb2, 0x9f, 0x46, 0x78, 0x31, 0xa9, 0xc1, + 0xa9, 0x2b, 0x2b, 0x55, 0x0d, 0x82, 0x75, 0xc2, 0xe8, 0x1e, 0x4c, 0x07, 0xf7, 0x28, 0x73, 0x94, + 0x5a, 0x8a, 0xa8, 0x3c, 0xc9, 0xda, 0x7a, 0x65, 0x40, 0x3d, 0xad, 0x51, 0x59, 0xe3, 0x5a, 0x26, + 0x51, 0x9c, 0x6e, 0x05, 0xcd, 0x1b, 0xda, 0xea, 0xa9, 0xc4, 0x9d, 0x25, 0xd1, 0x56, 0xeb, 0xca, + 0x69, 0x16, 0x17, 0x85, 0x9b, 0x48, 0xb3, 0xdd, 0x3f, 0x9d, 0x8a, 0x8b, 0x92, 0x80, 0xb0, 0x8e, + 0x87, 0x76, 0x60, 0x22, 0x79, 0xb2, 0x0a, 0x23, 0x16, 0x95, 0x6d, 0xfc, 0xea, 0xd5, 0xc1, 0x3e, + 0xae, 0xa6, 0xd5, 0xe4, 0x37, 0x07, 0xbd, 0x04, 0x1b, 0x94, 0xe7, 0xbe, 0x0d, 0xc6, 0xb5, 0x89, + 0x3d, 0x89, 0x07, 0xc0, 0xdc, 0x9b, 0x30, 0x93, 0x9e, 0xba, 0x13, 0x79, 0x10, 0xfc, 0xef, 0x02, + 0x4c, 0x67, 0xbc, 0x5c, 0xb1, 0x64, 0x0c, 0x29, 0x86, 0x9a, 0xe4, 0x5e, 0x30, 0xd9, 0x62, 0x61, + 0x00, 0xb6, 0x28, 0x79, 0x74, 0x31, 0x97, 0x47, 0x0b, 0x56, 0x38, 0xf4, 0x51, 0x58, 0xa1, 0x79, + 0xfa, 0x0c, 0x0f, 0x74, 0xfa, 0x9c, 0x02, 0xfb, 0x34, 0x0e, 0xb0, 0xd1, 0x01, 0x0e, 0xb0, 0x1f, + 0x2a, 0xc0, 0x4c, 0xda, 0xc2, 0xf7, 0x0c, 0xde, 0x3b, 0xde, 0x32, 0xde, 0x3b, 0xb2, 0x53, 0x9b, + 0xa4, 0xed, 0x8e, 0xf3, 0xde, 0x3e, 0x70, 0xea, 0xed, 0xe3, 0xb9, 0x81, 0xa8, 0xf5, 0x7e, 0x07, + 0xf9, 0xfb, 0x05, 0xb8, 0x90, 0xae, 0xb2, 0xec, 0x39, 0xee, 0xde, 0x19, 0x8c, 0xcd, 0x2d, 0x63, + 0x6c, 0x5e, 0x1c, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, 0x6a, 0x80, 0x16, 0x06, 0x27, + 0xd9, 0x7b, 0x94, 0xbe, 0x51, 0x84, 0x4b, 0x99, 0xf5, 0x92, 0xe7, 0x82, 0x55, 0xe3, 0xb9, 0xe0, + 0x6a, 0xea, 0xb9, 0xc0, 0xee, 0x5d, 0xfb, 0x74, 0xde, 0x0f, 0x84, 0x3b, 0x34, 0x8b, 0x18, 0xfa, + 0x80, 0x6f, 0x07, 0x86, 0x3b, 0xb4, 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0x6f, 0x06, 0xbf, 0x61, + 0xc1, 0xc5, 0xcc, 0xb9, 0x39, 0x03, 0xbd, 0xfa, 0xba, 0xa9, 0x57, 0x7f, 0x76, 0xe0, 0xd5, 0x9a, + 0xa3, 0x68, 0xff, 0xc3, 0x62, 0xce, 0xb7, 0x30, 0xcd, 0xe4, 0x2d, 0x18, 0x77, 0x9a, 0x4d, 0x12, + 0x45, 0x6b, 0x41, 0x4b, 0x45, 0xd0, 0x7c, 0x91, 0x49, 0x1b, 0x49, 0xf1, 0xf1, 0x61, 0x65, 0x2e, + 0x4d, 0x22, 0x01, 0x63, 0x9d, 0x82, 0x19, 0xf4, 0xb7, 0x70, 0xaa, 0x41, 0x7f, 0xaf, 0x02, 0xec, + 0x2b, 0x7d, 0x45, 0x5a, 0xcd, 0xa9, 0x69, 0x32, 0x34, 0x2c, 0xf4, 0x5d, 0xec, 0x16, 0xc0, 0x8d, + 0x81, 0xf8, 0x52, 0x7c, 0x79, 0xc0, 0xb9, 0xd2, 0x0d, 0x8b, 0x78, 0xdc, 0x0d, 0xa5, 0x12, 0x56, + 0x24, 0xd1, 0x77, 0xc0, 0x4c, 0xc4, 0xc3, 0x3a, 0x2d, 0x7b, 0x4e, 0xc4, 0x9c, 0xb8, 0xc4, 0x2a, + 0x64, 0xc1, 0x34, 0x1a, 0x29, 0x18, 0xee, 0xc2, 0x46, 0xab, 0xf2, 0xa3, 0x58, 0x0c, 0x2a, 0xbe, + 0x30, 0x9f, 0x4e, 0x3e, 0x48, 0xa4, 0x82, 0x3a, 0x9f, 0x1e, 0x7e, 0x36, 0xf0, 0x5a, 0x4d, 0xfb, + 0x87, 0x86, 0xe0, 0xb1, 0x1e, 0x4c, 0x0c, 0x2d, 0x9a, 0x46, 0x00, 0xcf, 0xa7, 0xf5, 0x7f, 0x73, + 0x99, 0x95, 0x0d, 0x85, 0x60, 0x6a, 0xad, 0x14, 0x3e, 0xf2, 0x5a, 0xf9, 0x01, 0x4b, 0xd3, 0xcc, + 0x72, 0x53, 0xe1, 0x2f, 0x9e, 0x90, 0x39, 0x9f, 0xa2, 0xaa, 0x76, 0x2b, 0x43, 0xdf, 0x79, 0x75, + 0xe0, 0xee, 0x0c, 0xac, 0x00, 0x3d, 0xdb, 0x27, 0xa3, 0x0f, 0x2d, 0x78, 0x32, 0xb3, 0xbf, 0x86, + 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0x3c, 0xe6, 0x25, 0x00, 0x27, 0x38, + 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, 0x27, 0xad, + 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xdd, 0x69, 0x78, 0x24, 0xc7, 0x13, + 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, 0xd3, 0x3f, + 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x43, 0x0b, 0xce, 0x3b, 0xf7, 0xa2, 0xae, + 0x4c, 0x8d, 0x62, 0xcd, 0xbc, 0x92, 0xa9, 0xa7, 0xed, 0x93, 0xd9, 0x91, 0xb9, 0xc5, 0x9d, 0xcf, + 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x82, 0x1e, 0x53, 0x79, 0xbb, 0x87, 0x53, 0x78, 0x96, 0xcb, + 0x1e, 0xe7, 0xa9, 0x12, 0x82, 0x15, 0x1d, 0x74, 0x07, 0x4a, 0xdb, 0xd2, 0x8f, 0x56, 0xf0, 0xec, + 0xcc, 0x43, 0x30, 0xd3, 0xd9, 0x96, 0xfb, 0x8e, 0x28, 0x10, 0x4e, 0x48, 0xa1, 0x37, 0xa1, 0xe8, + 0x6f, 0x45, 0xbd, 0x92, 0x4d, 0xa5, 0x6c, 0xf9, 0x78, 0xc0, 0x85, 0xf5, 0xd5, 0x06, 0xa6, 0x15, + 0xd1, 0x75, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, 0xf6, 0x22, + 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, 0x20, 0x64, + 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x54, 0x06, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x16, 0x8c, 0x34, + 0x59, 0x3e, 0x26, 0xa1, 0xf8, 0xc9, 0x0e, 0xd7, 0xd5, 0x95, 0xb1, 0x89, 0xbf, 0xa0, 0xf2, 0x72, + 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, 0xcd, 0xa4, + 0xd5, 0x23, 0xfd, 0x98, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, 0xa6, 0xf0, + 0xa4, 0xca, 0x7c, 0x43, 0x30, 0x23, 0x65, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, 0xcb, 0xb8, + 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0x00, 0x9b, 0x67, 0xb2, 0xdd, 0xfe, + 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x84, 0xc5, 0x0b, 0x56, 0x31, 0x02, 0x44, + 0xe0, 0xfc, 0xf9, 0x93, 0xc5, 0x75, 0x10, 0x6a, 0x0e, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x57, 0xa1, + 0xe4, 0xc8, 0xcc, 0x80, 0x22, 0x08, 0xce, 0xcb, 0x99, 0xdb, 0xb1, 0x77, 0xd2, 0x44, 0xbe, 0x96, + 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, 0x5f, 0x16, 0x13, + 0x27, 0xe7, 0xb8, 0xba, 0x23, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0xc7, 0x61, 0x8e, 0x63, + 0x77, 0x74, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xfd, 0x4e, 0xb0, 0x79, 0x10, 0x13, 0x11, 0x69, + 0x3f, 0x73, 0xf8, 0xdf, 0xe6, 0x28, 0xdd, 0xc3, 0x2f, 0x00, 0x58, 0x12, 0xa1, 0x1b, 0xdc, 0x91, + 0x59, 0x37, 0x85, 0x2e, 0xe7, 0xd9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, 0x8c, 0x33, 0x26, + 0xa4, 0x18, 0x47, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0xb8, 0xf1, 0x6c, 0x3e, 0x47, 0xac, 0x67, + 0xe0, 0x77, 0x73, 0xc4, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, 0x08, 0xe3, 0x7b, + 0x41, 0x28, 0xd7, 0x17, 0xea, 0x71, 0xc9, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0x0e, 0xdc, 0x84, 0xe0, + 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xca, 0xe7, 0xf2, 0x8f, 0x9a, + 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, 0x0c, 0xb3, 0xf4, + 0x2d, 0x2c, 0x49, 0x40, 0x4e, 0xb0, 0xb5, 0x2e, 0x5b, 0x69, 0xce, 0x91, 0x58, 0x31, 0xe6, 0xd5, + 0xe9, 0x1e, 0x10, 0xb2, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x22, 0xdf, 0x6a, 0xf4, + 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x3f, 0xa6, 0x3c, 0xf4, 0x91, 0x7c, 0x7e, 0x9c, 0xcf, + 0x41, 0x19, 0x3f, 0xa6, 0xfc, 0x93, 0x92, 0xb0, 0x3f, 0x1c, 0xed, 0x96, 0x4f, 0xd8, 0xed, 0xe8, + 0x7b, 0xad, 0x2e, 0xd3, 0x81, 0xcf, 0x0d, 0xaa, 0xac, 0x39, 0x45, 0xc9, 0xf4, 0x43, 0x0b, 0x1e, + 0x69, 0x67, 0x7e, 0x88, 0x38, 0xec, 0x07, 0xd3, 0xf9, 0xf0, 0x4f, 0x57, 0x89, 0x3c, 0xb2, 0xe1, + 0x38, 0xa7, 0xa5, 0xb4, 0xf4, 0x5f, 0xfc, 0xc8, 0xd2, 0xff, 0x1a, 0x8c, 0x31, 0x81, 0x32, 0x89, + 0xec, 0x37, 0x90, 0x01, 0x1e, 0x13, 0x1b, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb4, 0xe0, + 0x89, 0x74, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x25, 0x9a, 0x5f, 0xcc, 0x56, 0xc5, 0xf7, 0x3f, 0x51, + 0xef, 0x85, 0x7c, 0xdc, 0x0f, 0x01, 0xf7, 0x6e, 0x0c, 0x55, 0x33, 0x6e, 0x86, 0x23, 0xe6, 0xcb, + 0xe2, 0x00, 0xb7, 0xc3, 0x57, 0x60, 0x62, 0x2f, 0xe8, 0xf8, 0xd2, 0x3b, 0x46, 0xf8, 0x3e, 0x33, + 0x2d, 0xf6, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0xd4, 0x9d, 0x72, 0xec, 0x41, 0xef, 0x94, 0x67, 0x7b, + 0x53, 0xf9, 0xba, 0x95, 0x21, 0x62, 0xf3, 0xbb, 0xeb, 0x17, 0xcc, 0xbb, 0xeb, 0xd3, 0xe9, 0xbb, + 0x6b, 0x97, 0xae, 0xd2, 0xb8, 0xb6, 0x0e, 0x1e, 0x45, 0x7f, 0xd0, 0x10, 0x8a, 0xb6, 0x07, 0x97, + 0xfb, 0x1d, 0x1c, 0xcc, 0x98, 0xb1, 0xa5, 0x5e, 0xf9, 0x13, 0x63, 0xc6, 0x56, 0xad, 0x8a, 0x19, + 0x64, 0xd0, 0x18, 0x3b, 0xf6, 0x7f, 0xb7, 0xa0, 0x58, 0x0f, 0x5a, 0x67, 0xa0, 0x7b, 0xfd, 0xa2, + 0xa1, 0x7b, 0x7d, 0x2c, 0x27, 0xbf, 0x78, 0xae, 0xa6, 0x75, 0x25, 0xa5, 0x69, 0x7d, 0x22, 0x8f, + 0x40, 0x6f, 0xbd, 0xea, 0x4f, 0x14, 0x41, 0xcf, 0x86, 0x8e, 0xfe, 0xed, 0x83, 0x58, 0xc5, 0x17, + 0x7b, 0x25, 0x48, 0x17, 0x94, 0x99, 0x0d, 0xa4, 0x74, 0xb8, 0xfd, 0x73, 0x66, 0x1c, 0x7f, 0x97, + 0xb8, 0xdb, 0x3b, 0x31, 0x69, 0xa5, 0x3f, 0xe7, 0xec, 0x8c, 0xe3, 0xff, 0xab, 0x05, 0xd3, 0xa9, + 0xd6, 0x91, 0x97, 0xe5, 0xbd, 0xf7, 0x80, 0x3a, 0xb7, 0xd9, 0xbe, 0xee, 0x7e, 0xf3, 0x00, 0xea, + 0x61, 0x4b, 0xea, 0xa3, 0x98, 0x5c, 0xae, 0x5e, 0xbe, 0x22, 0xac, 0x61, 0xa0, 0x57, 0x61, 0x3c, + 0x0e, 0xda, 0x81, 0x17, 0x6c, 0x1f, 0xdc, 0x20, 0x32, 0xaa, 0x93, 0x7a, 0x7e, 0xdc, 0x48, 0x40, + 0x58, 0xc7, 0xb3, 0x7f, 0xaa, 0x08, 0xe9, 0x0c, 0xfa, 0xdf, 0x5a, 0x93, 0x9f, 0xcc, 0x35, 0xf9, + 0x0d, 0x0b, 0x66, 0x68, 0xeb, 0xcc, 0xbe, 0x4c, 0x1e, 0x87, 0x2a, 0x9b, 0x97, 0xd5, 0x23, 0x9b, + 0xd7, 0xd3, 0x94, 0x77, 0xb5, 0x82, 0x4e, 0x2c, 0xf4, 0x59, 0x1a, 0x73, 0xa2, 0xa5, 0x58, 0x40, + 0x05, 0x1e, 0x09, 0x43, 0xe1, 0x93, 0xa7, 0xe3, 0x91, 0x30, 0xc4, 0x02, 0x2a, 0x93, 0x7d, 0x0d, + 0xe5, 0x24, 0xfb, 0x62, 0x31, 0x2a, 0x85, 0x4d, 0x93, 0x10, 0x4c, 0xb4, 0x18, 0x95, 0xd2, 0xd8, + 0x29, 0xc1, 0xb1, 0x7f, 0xb6, 0x08, 0x13, 0xf5, 0xa0, 0x95, 0x3c, 0x2d, 0xbd, 0x62, 0x3c, 0x2d, + 0x5d, 0x4e, 0x3d, 0x2d, 0xcd, 0xe8, 0xb8, 0xdf, 0x7a, 0x48, 0xfa, 0xb8, 0x1e, 0x92, 0xfe, 0x95, + 0xc5, 0x66, 0xad, 0xba, 0xde, 0x10, 0xc9, 0xa8, 0x5f, 0x82, 0x71, 0xc6, 0x90, 0x98, 0x13, 0xa8, + 0x7c, 0x6f, 0x61, 0x39, 0x27, 0xd6, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x30, 0x16, 0x11, 0x27, + 0x6c, 0xee, 0x28, 0x1e, 0x27, 0x5e, 0x23, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x76, 0x12, 0x1e, 0xb1, + 0x98, 0x9f, 0x56, 0x59, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x31, 0x11, 0xed, 0xbb, 0x80, 0xba, 0xf1, + 0x07, 0x30, 0xf4, 0xaa, 0x98, 0x81, 0xd0, 0x4a, 0x5d, 0x41, 0xd0, 0xfe, 0xd4, 0x82, 0xa9, 0x7a, + 0xd0, 0xa2, 0x5b, 0xf7, 0x9b, 0x69, 0x9f, 0xea, 0xb1, 0x61, 0x47, 0x7a, 0xc4, 0x86, 0xfd, 0x07, + 0x16, 0x8c, 0xd6, 0x83, 0xd6, 0x19, 0x68, 0xc1, 0xbf, 0x60, 0x6a, 0xc1, 0x1f, 0xcd, 0x59, 0x12, + 0x39, 0x8a, 0xef, 0x9f, 0x2f, 0xc2, 0x24, 0xed, 0x67, 0xb0, 0x2d, 0x67, 0xc9, 0x18, 0x11, 0x6b, + 0x80, 0x11, 0xa1, 0x62, 0x6e, 0xe0, 0x79, 0xc1, 0xbd, 0xf4, 0x8c, 0xad, 0xb2, 0x52, 0x2c, 0xa0, + 0xe8, 0x05, 0x18, 0x6b, 0x87, 0x64, 0xdf, 0x0d, 0x3a, 0x51, 0xda, 0xdf, 0xb9, 0x2e, 0xca, 0xb1, + 0xc2, 0xa0, 0x37, 0xa3, 0xc8, 0xf5, 0x9b, 0x44, 0x5a, 0x80, 0x0d, 0x31, 0x0b, 0x30, 0x1e, 0xf4, + 0x5d, 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x25, 0xf6, 0x9f, 0x71, 0x94, 0x93, 0xa7, 0x21, 0x13, + 0x99, 0x56, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0x57, 0x01, 0x62, 0x69, 0xab, 0x16, 0x09, 0x77, 0x7c, + 0x25, 0x6b, 0x2b, 0x2b, 0xb6, 0x08, 0x6b, 0x58, 0xe8, 0x79, 0x28, 0xc5, 0x8e, 0xeb, 0xdd, 0x74, + 0x7d, 0x12, 0x09, 0x5b, 0x3f, 0x91, 0x48, 0x45, 0x14, 0xe2, 0x04, 0x4e, 0x65, 0x1d, 0x16, 0xec, + 0x81, 0x27, 0x31, 0x1c, 0x63, 0xd8, 0x4c, 0xd6, 0xb9, 0xa9, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0e, + 0x17, 0xea, 0x41, 0xab, 0x1e, 0x84, 0xf1, 0x6a, 0x10, 0xde, 0x73, 0xc2, 0x96, 0x9c, 0xbf, 0x8a, + 0xcc, 0xe9, 0x41, 0x79, 0xcf, 0x30, 0xdf, 0x99, 0x46, 0xb6, 0x8e, 0x97, 0x99, 0xb4, 0x73, 0x42, + 0xc7, 0xac, 0x7f, 0x5f, 0x60, 0x8c, 0x22, 0x95, 0x59, 0x13, 0x7d, 0x05, 0xa6, 0x22, 0x72, 0xd3, + 0xf5, 0x3b, 0xf7, 0xe5, 0x0d, 0xb6, 0x87, 0xd7, 0x5b, 0x63, 0x45, 0xc7, 0xe4, 0x7a, 0x30, 0xb3, + 0x0c, 0xa7, 0xa8, 0xd1, 0x21, 0x0c, 0x3b, 0xfe, 0x62, 0x74, 0x3b, 0x22, 0xa1, 0xc8, 0xec, 0xc8, + 0x86, 0x10, 0xcb, 0x42, 0x9c, 0xc0, 0xe9, 0x92, 0x61, 0x7f, 0xd6, 0x03, 0x1f, 0x07, 0x41, 0x2c, + 0x17, 0x19, 0xcb, 0x0d, 0xa6, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x02, 0x8a, 0x3a, 0xed, 0xb6, 0xc7, + 0x9e, 0xa8, 0x1d, 0xef, 0x5a, 0x18, 0x74, 0xda, 0xfc, 0x79, 0x50, 0xa4, 0xd5, 0x6a, 0x74, 0x41, + 0x71, 0x46, 0x0d, 0xca, 0x18, 0xb6, 0x22, 0xf6, 0x5b, 0xc4, 0x7b, 0xe0, 0xba, 0xe9, 0x06, 0x2b, + 0xc2, 0x12, 0x66, 0x7f, 0x0f, 0x3b, 0x30, 0x58, 0x42, 0xbe, 0xb8, 0x13, 0x12, 0xb4, 0x07, 0x93, + 0x6d, 0x76, 0x94, 0x8b, 0xd0, 0xe6, 0x62, 0x00, 0x1f, 0xcc, 0xb2, 0x8f, 0x27, 0xe8, 0xd2, 0xc9, + 0x61, 0x93, 0xba, 0xfd, 0x9f, 0xa6, 0x19, 0x5f, 0x6a, 0xf0, 0xeb, 0xdc, 0xa8, 0xb0, 0xd7, 0x17, + 0xb2, 0xeb, 0x5c, 0x7e, 0x0a, 0xcf, 0xe4, 0x08, 0x11, 0x36, 0xff, 0x58, 0xd6, 0x45, 0x6f, 0xb3, + 0x77, 0x55, 0xce, 0x0c, 0xfa, 0x65, 0xf6, 0xe6, 0x58, 0xc6, 0x13, 0xaa, 0xa8, 0x88, 0x35, 0x22, + 0xe8, 0x26, 0x4c, 0x8a, 0xfc, 0x6d, 0x42, 0xb5, 0x53, 0x34, 0x14, 0x03, 0x93, 0x58, 0x07, 0x1e, + 0xa7, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc3, 0x13, 0x5a, 0x32, 0xd3, 0x0c, 0xeb, 0x52, 0xce, 0x5b, + 0x9e, 0x3c, 0x3a, 0xac, 0x3c, 0xb1, 0xd1, 0x0b, 0x11, 0xf7, 0xa6, 0x83, 0x6e, 0xc1, 0x05, 0xa7, + 0x19, 0xbb, 0xfb, 0xa4, 0x4a, 0x9c, 0x96, 0xe7, 0xfa, 0xc4, 0x0c, 0x00, 0x72, 0xf1, 0xe8, 0xb0, + 0x72, 0x61, 0x31, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0x5f, 0x80, 0x52, 0xcb, 0x8f, 0xc4, 0x18, 0x8c, + 0x18, 0x79, 0x7a, 0x4b, 0xd5, 0xf5, 0x86, 0xfa, 0xfe, 0xe4, 0x0f, 0x4e, 0x2a, 0xa0, 0x6d, 0x98, + 0xd0, 0x9d, 0xfc, 0x44, 0x8e, 0xe7, 0x17, 0x7b, 0xdc, 0xfa, 0x0d, 0xcf, 0x38, 0xae, 0xd7, 0x54, + 0xb6, 0xdb, 0x86, 0xd3, 0x9c, 0x41, 0x18, 0xbd, 0x05, 0x88, 0x0a, 0x33, 0x6e, 0x93, 0x2c, 0x36, + 0x59, 0x84, 0x79, 0xa6, 0x0d, 0x1b, 0x33, 0x1c, 0x91, 0x50, 0xa3, 0x0b, 0x03, 0x67, 0xd4, 0x42, + 0xd7, 0x29, 0x47, 0xd1, 0x4b, 0x85, 0xa9, 0xbd, 0x14, 0x80, 0xcb, 0x55, 0xd2, 0x0e, 0x49, 0xd3, + 0x89, 0x49, 0xcb, 0xa4, 0x88, 0x53, 0xf5, 0xe8, 0x79, 0xa3, 0x92, 0x4d, 0x81, 0x69, 0x20, 0xde, + 0x9d, 0x70, 0x8a, 0xde, 0x1d, 0x77, 0x82, 0x28, 0x5e, 0x27, 0xf1, 0xbd, 0x20, 0xdc, 0x15, 0x51, + 0xfb, 0x92, 0x00, 0xb2, 0x09, 0x08, 0xeb, 0x78, 0x54, 0x56, 0x64, 0x0f, 0x9b, 0xb5, 0x2a, 0x7b, + 0x67, 0x1a, 0x4b, 0xf6, 0xc9, 0x75, 0x5e, 0x8c, 0x25, 0x5c, 0xa2, 0xd6, 0xea, 0xcb, 0xec, 0xcd, + 0x28, 0x85, 0x5a, 0xab, 0x2f, 0x63, 0x09, 0x47, 0xa4, 0x3b, 0x07, 0xf2, 0x54, 0xfe, 0xbb, 0x5f, + 0x37, 0x5f, 0x1e, 0x30, 0x0d, 0xb2, 0x0f, 0x33, 0x2a, 0xfb, 0x32, 0x0f, 0x67, 0x18, 0x95, 0xa7, + 0xd9, 0x22, 0x19, 0x3c, 0x16, 0xa2, 0xd2, 0x76, 0xd6, 0x52, 0x94, 0x70, 0x17, 0x6d, 0x23, 0xb0, + 0xcc, 0x4c, 0xdf, 0x64, 0x61, 0x0b, 0x50, 0x8a, 0x3a, 0x9b, 0xad, 0x60, 0xcf, 0x71, 0x7d, 0xf6, + 0xc4, 0xa3, 0x09, 0x22, 0x0d, 0x09, 0xc0, 0x09, 0x0e, 0x5a, 0x85, 0x31, 0x47, 0x5c, 0x4b, 0xc5, + 0xa3, 0x4c, 0x66, 0xa4, 0x09, 0x79, 0x75, 0xe5, 0x62, 0xb6, 0xfc, 0x87, 0x55, 0x5d, 0xf4, 0x06, + 0x4c, 0x0a, 0x67, 0x48, 0x61, 0xc7, 0x7c, 0xce, 0xf4, 0x9b, 0x69, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, + 0xef, 0x82, 0x29, 0x4a, 0x25, 0x61, 0x6c, 0xe5, 0xf3, 0x83, 0x70, 0x44, 0x2d, 0x09, 0x8c, 0x5e, + 0x19, 0xa7, 0x88, 0xa1, 0x16, 0x3c, 0xee, 0x74, 0xe2, 0x80, 0xa9, 0x83, 0xcd, 0xf5, 0xbf, 0x11, + 0xec, 0x12, 0x9f, 0xbd, 0xc4, 0x8c, 0x2d, 0x5d, 0x3e, 0x3a, 0xac, 0x3c, 0xbe, 0xd8, 0x03, 0x0f, + 0xf7, 0xa4, 0x82, 0x6e, 0xc3, 0x78, 0x1c, 0x78, 0xc2, 0x01, 0x21, 0x2a, 0x3f, 0x92, 0x1f, 0x18, + 0x6b, 0x43, 0xa1, 0xe9, 0x8a, 0x16, 0x55, 0x15, 0xeb, 0x74, 0xd0, 0x06, 0xdf, 0x63, 0x2c, 0x64, + 0x30, 0x89, 0xca, 0x8f, 0xe6, 0x0f, 0x8c, 0x8a, 0x2c, 0x6c, 0x6e, 0x41, 0x51, 0x13, 0xeb, 0x64, + 0xd0, 0x35, 0x98, 0x6d, 0x87, 0x6e, 0xc0, 0x16, 0xb6, 0x52, 0xc5, 0x97, 0xcd, 0xbc, 0x1f, 0xf5, + 0x34, 0x02, 0xee, 0xae, 0x43, 0x2f, 0x62, 0xb2, 0xb0, 0x7c, 0x91, 0x27, 0x91, 0xe3, 0xc2, 0x29, + 0x2f, 0xc3, 0x0a, 0x8a, 0xd6, 0x18, 0x5f, 0xe6, 0x57, 0xa6, 0xf2, 0x5c, 0x7e, 0x84, 0x0e, 0xfd, + 0x6a, 0xc5, 0x05, 0x17, 0xf5, 0x17, 0x27, 0x14, 0xe6, 0xbe, 0x1d, 0x66, 0xbb, 0x18, 0xef, 0x89, + 0x6c, 0xcb, 0xff, 0xe9, 0x30, 0x94, 0x94, 0xde, 0x15, 0x2d, 0x98, 0xea, 0xf4, 0x8b, 0x69, 0x75, + 0xfa, 0x18, 0x15, 0xff, 0x74, 0x0d, 0xfa, 0x86, 0x61, 0x19, 0x55, 0xc8, 0xcf, 0x05, 0xa7, 0x2b, + 0x1d, 0xfa, 0x3a, 0x82, 0x6a, 0xd7, 0xe8, 0xe2, 0xc0, 0x7a, 0xf9, 0xa1, 0x9e, 0x37, 0xf3, 0x01, + 0xd3, 0x5b, 0xd3, 0x9b, 0x66, 0x3b, 0x68, 0xd5, 0xea, 0xe9, 0x7c, 0xaf, 0x75, 0x5a, 0x88, 0x39, + 0x8c, 0xdd, 0x15, 0xa8, 0x94, 0xc0, 0xee, 0x0a, 0xa3, 0x0f, 0x78, 0x57, 0x90, 0x04, 0x70, 0x42, + 0x0b, 0x79, 0x30, 0xdb, 0x34, 0x53, 0xf5, 0x2a, 0xe7, 0xcf, 0xa7, 0xfa, 0x26, 0xcd, 0xed, 0x68, + 0x39, 0xfc, 0x96, 0xd3, 0x54, 0x70, 0x37, 0x61, 0xf4, 0x06, 0x8c, 0xbd, 0x1f, 0x44, 0x6c, 0x15, + 0x8b, 0xa3, 0x52, 0xba, 0xdb, 0x8d, 0xbd, 0x7d, 0xab, 0xc1, 0xca, 0x8f, 0x0f, 0x2b, 0xe3, 0xf5, + 0xa0, 0x25, 0xff, 0x62, 0x55, 0x01, 0xdd, 0x87, 0x0b, 0x06, 0x83, 0x51, 0xdd, 0x85, 0xc1, 0xbb, + 0xfb, 0x84, 0x68, 0xee, 0x42, 0x2d, 0x8b, 0x12, 0xce, 0x6e, 0xc0, 0xfe, 0x25, 0xae, 0x5d, 0x16, + 0x3a, 0x28, 0x12, 0x75, 0xbc, 0xb3, 0x48, 0xd4, 0xb5, 0x62, 0xa8, 0xc7, 0x1e, 0xf8, 0x05, 0xe3, + 0xd7, 0x2c, 0xf6, 0x82, 0xb1, 0x41, 0xf6, 0xda, 0x9e, 0x13, 0x9f, 0x85, 0x47, 0xc1, 0xdb, 0x30, + 0x16, 0x8b, 0xd6, 0x7a, 0xe5, 0x16, 0xd3, 0x3a, 0xc5, 0x5e, 0x71, 0xd4, 0xf9, 0x2a, 0x4b, 0xb1, + 0x22, 0x63, 0xff, 0x73, 0x3e, 0x03, 0x12, 0x72, 0x06, 0xaa, 0x8a, 0xaa, 0xa9, 0xaa, 0xa8, 0xf4, + 0xf9, 0x82, 0x1c, 0x95, 0xc5, 0x3f, 0x33, 0xfb, 0xcd, 0xae, 0x32, 0x9f, 0xf4, 0xa7, 0x33, 0xfb, + 0x47, 0x2c, 0x38, 0x9f, 0x65, 0x0d, 0x42, 0x65, 0x22, 0x7e, 0x91, 0x52, 0x4f, 0x89, 0x6a, 0x04, + 0xef, 0x88, 0x72, 0xac, 0x30, 0x06, 0x4e, 0xdb, 0x71, 0xb2, 0xd8, 0x72, 0xb7, 0xc0, 0xcc, 0xea, + 0x8c, 0xde, 0xe4, 0x2e, 0x42, 0x96, 0x4a, 0xbb, 0x7c, 0x32, 0xf7, 0x20, 0xfb, 0xa7, 0x0b, 0x70, + 0x9e, 0xbf, 0x05, 0x2c, 0xee, 0x07, 0x6e, 0xab, 0x1e, 0xb4, 0x84, 0xc3, 0xd4, 0x3b, 0x30, 0xd1, + 0xd6, 0x6e, 0xbf, 0xbd, 0xa2, 0x5b, 0xe9, 0xb7, 0xe4, 0xe4, 0x16, 0xa2, 0x97, 0x62, 0x83, 0x16, + 0x6a, 0xc1, 0x04, 0xd9, 0x77, 0x9b, 0x4a, 0xa1, 0x5c, 0x38, 0x31, 0x4b, 0x57, 0xad, 0xac, 0x68, + 0x74, 0xb0, 0x41, 0xf5, 0x21, 0x64, 0xe1, 0xb3, 0x7f, 0xd4, 0x82, 0x47, 0x73, 0x62, 0x61, 0xd1, + 0xe6, 0xee, 0xb1, 0x57, 0x17, 0x91, 0xd0, 0x4b, 0x35, 0xc7, 0xdf, 0x62, 0xb0, 0x80, 0xa2, 0x2f, + 0x01, 0xf0, 0xb7, 0x14, 0x2a, 0x94, 0x8b, 0x4f, 0x1f, 0x2c, 0x46, 0x8c, 0x16, 0x48, 0x44, 0xd6, + 0xc7, 0x1a, 0x2d, 0xfb, 0x27, 0x8b, 0x30, 0xcc, 0x74, 0xf7, 0x68, 0x15, 0x46, 0x77, 0x78, 0xe4, + 0xed, 0x41, 0x82, 0x7c, 0x27, 0xb7, 0x1b, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x06, 0xe7, 0x84, 0x53, + 0x5e, 0x95, 0x78, 0xce, 0x81, 0xbc, 0x24, 0xf3, 0x24, 0x58, 0x2a, 0xed, 0x5b, 0xad, 0x1b, 0x05, + 0x67, 0xd5, 0x43, 0x6f, 0x76, 0xc5, 0xdb, 0xe4, 0x31, 0xcb, 0x95, 0x48, 0xdd, 0x27, 0xe6, 0xe6, + 0x1b, 0x30, 0xd9, 0xee, 0x52, 0x07, 0x0c, 0x27, 0xe2, 0xbe, 0xa9, 0x02, 0x30, 0x71, 0x99, 0x19, + 0x48, 0x87, 0x19, 0xbd, 0x6c, 0xec, 0x84, 0x24, 0xda, 0x09, 0xbc, 0x96, 0xc8, 0x4b, 0x9f, 0x98, + 0x81, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0xa5, 0xb2, 0xe5, 0xb8, 0x5e, 0x27, 0x24, 0x09, 0x95, 0x11, + 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, 0x75, 0x74, 0x41, 0x24, 0x35, 0x97, 0xa1, 0x1a, + 0x94, 0x6d, 0xcf, 0xa8, 0x74, 0xd9, 0xe8, 0x11, 0x3e, 0x48, 0xd8, 0x56, 0xa8, 0xb4, 0xe8, 0x5a, + 0xca, 0x5c, 0xe1, 0xac, 0x21, 0xa9, 0x3c, 0x48, 0x6a, 0xed, 0xdf, 0xb3, 0xe0, 0x5c, 0x86, 0x0d, + 0x21, 0x67, 0x55, 0xdb, 0x6e, 0x14, 0xab, 0x44, 0x3f, 0x1a, 0xab, 0xe2, 0xe5, 0x58, 0x61, 0xd0, + 0xfd, 0xc0, 0x99, 0x61, 0x9a, 0x01, 0x0a, 0x1b, 0x1d, 0x01, 0x3d, 0x19, 0x03, 0x44, 0x97, 0x61, + 0xa8, 0x13, 0x91, 0x50, 0xe6, 0xa4, 0x96, 0xfc, 0x9b, 0x29, 0x18, 0x19, 0x84, 0x4a, 0x94, 0xdb, + 0x4a, 0xb7, 0xa7, 0x49, 0x94, 0x5c, 0xbb, 0xc7, 0x61, 0xf6, 0xd7, 0x8a, 0x70, 0x31, 0xd7, 0x46, + 0x98, 0x76, 0x69, 0x2f, 0xf0, 0xdd, 0x38, 0x50, 0xef, 0x42, 0x3c, 0xce, 0x0d, 0x69, 0xef, 0xac, + 0x89, 0x72, 0xac, 0x30, 0xd0, 0xd3, 0x30, 0xcc, 0xee, 0xcf, 0x5d, 0xa9, 0x8c, 0x96, 0xaa, 0x3c, + 0xfc, 0x02, 0x07, 0x0f, 0x9c, 0x26, 0xee, 0x29, 0x18, 0x6a, 0x07, 0x81, 0x97, 0x66, 0x46, 0xb4, + 0xbb, 0x41, 0xe0, 0x61, 0x06, 0x44, 0x9f, 0x11, 0xe3, 0x90, 0x7a, 0x08, 0xc1, 0x4e, 0x2b, 0x88, + 0xb4, 0xc1, 0x78, 0x16, 0x46, 0x77, 0xc9, 0x41, 0xe8, 0xfa, 0xdb, 0xe9, 0x07, 0xb2, 0x1b, 0xbc, + 0x18, 0x4b, 0xb8, 0x99, 0xc9, 0x63, 0xf4, 0xb4, 0xf3, 0xbb, 0x8d, 0xf5, 0x3d, 0xda, 0x7e, 0xa0, + 0x08, 0xd3, 0x78, 0xa9, 0xfa, 0xad, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xa7, 0x9d, 0xdf, 0xad, 0xff, + 0x6c, 0xfc, 0xbc, 0x05, 0xd3, 0x2c, 0xda, 0xb5, 0x88, 0xd3, 0xe2, 0x06, 0xfe, 0x19, 0x88, 0x6e, + 0x4f, 0xc1, 0x70, 0x48, 0x1b, 0x4d, 0x27, 0x6d, 0x62, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x87, 0x21, + 0xd6, 0x05, 0x3a, 0x79, 0x13, 0x3c, 0xdf, 0x45, 0xd5, 0x89, 0x1d, 0xcc, 0x4a, 0x99, 0xc3, 0x2c, + 0x26, 0x6d, 0xcf, 0xe5, 0x9d, 0x4e, 0x14, 0xea, 0x9f, 0x0c, 0x87, 0xd9, 0xcc, 0xae, 0x7d, 0x34, + 0x87, 0xd9, 0x6c, 0x92, 0xbd, 0xaf, 0x45, 0xff, 0xa3, 0x00, 0x97, 0x32, 0xeb, 0x0d, 0xec, 0x30, + 0xdb, 0xbb, 0xf6, 0xe9, 0xd8, 0x39, 0x64, 0x9b, 0x1f, 0x14, 0xcf, 0xd0, 0xfc, 0x60, 0x68, 0x50, + 0xc9, 0x71, 0x78, 0x00, 0x3f, 0xd6, 0xcc, 0x21, 0xfb, 0x84, 0xf8, 0xb1, 0x66, 0xf6, 0x2d, 0xe7, + 0x5a, 0xf7, 0x67, 0x85, 0x9c, 0x6f, 0x61, 0x17, 0xbc, 0x2b, 0x94, 0xcf, 0x30, 0x60, 0x24, 0x24, + 0xe1, 0x09, 0xce, 0x63, 0x78, 0x19, 0x56, 0x50, 0xe4, 0x6a, 0x1e, 0xa1, 0x85, 0xfc, 0x94, 0x9e, + 0xb9, 0x4d, 0xcd, 0x9b, 0xef, 0x1f, 0x7a, 0x50, 0x99, 0xb4, 0x77, 0xe8, 0x9a, 0x76, 0x29, 0x2f, + 0x0e, 0x7e, 0x29, 0x9f, 0xc8, 0xbe, 0x90, 0xa3, 0x45, 0x98, 0xde, 0x73, 0x7d, 0xca, 0x36, 0x0f, + 0x4c, 0x51, 0x54, 0x05, 0x48, 0x58, 0x33, 0xc1, 0x38, 0x8d, 0x3f, 0xf7, 0x06, 0x4c, 0x3e, 0xb8, + 0x16, 0xf1, 0x1b, 0x45, 0x78, 0xac, 0xc7, 0xb6, 0xe7, 0xbc, 0xde, 0x98, 0x03, 0x8d, 0xd7, 0x77, + 0xcd, 0x43, 0x1d, 0xce, 0x6f, 0x75, 0x3c, 0xef, 0x80, 0x59, 0xf8, 0x91, 0x96, 0xc4, 0x10, 0xb2, + 0xa2, 0x0a, 0x65, 0xbf, 0x9a, 0x81, 0x83, 0x33, 0x6b, 0xa2, 0xb7, 0x00, 0x05, 0x22, 0x9f, 0x70, + 0x12, 0x2a, 0x87, 0x0d, 0x7c, 0x31, 0xd9, 0x8c, 0xb7, 0xba, 0x30, 0x70, 0x46, 0x2d, 0x2a, 0xf4, + 0xd3, 0x53, 0xe9, 0x40, 0x75, 0x2b, 0x25, 0xf4, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, + 0x9d, 0x7d, 0xc7, 0xe5, 0xa1, 0x13, 0x25, 0x01, 0x2e, 0xf5, 0x2b, 0xdd, 0xdd, 0x62, 0x1a, 0x01, + 0x77, 0xd7, 0x49, 0xb9, 0xa4, 0x8e, 0xe4, 0xbb, 0xa4, 0xf6, 0xe6, 0x8b, 0xfd, 0x54, 0xb1, 0xf6, + 0x7f, 0xb6, 0xe8, 0xf1, 0xa5, 0xa5, 0xf9, 0xd7, 0x33, 0x2a, 0x28, 0x95, 0xa2, 0xe6, 0x1d, 0xaa, + 0xc6, 0x61, 0x59, 0x07, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x28, 0x71, 0x54, 0x30, 0x44, 0x77, 0xe1, + 0xfe, 0xad, 0x30, 0xd0, 0x97, 0x61, 0xb4, 0xe5, 0xee, 0xbb, 0x51, 0x10, 0x8a, 0xcd, 0x72, 0x42, + 0x63, 0xf2, 0x84, 0x0f, 0x56, 0x39, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x50, 0x80, 0x49, 0xd9, 0xe2, + 0xdb, 0x9d, 0x20, 0x76, 0xce, 0xe0, 0x58, 0xbe, 0x66, 0x1c, 0xcb, 0x9f, 0xe9, 0xe5, 0x03, 0xcf, + 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0xcf, 0xf4, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x17, + 0x16, 0xcc, 0x1a, 0xf8, 0x67, 0x70, 0x1a, 0xac, 0x9a, 0xa7, 0xc1, 0x93, 0x7d, 0xbf, 0x21, 0xe7, + 0x14, 0xf8, 0x7a, 0x21, 0xd5, 0x77, 0xc6, 0xfd, 0xdf, 0x87, 0xa1, 0x1d, 0x27, 0x6c, 0xf5, 0x0a, + 0x00, 0xdc, 0x55, 0x69, 0xfe, 0xba, 0x13, 0xb6, 0x38, 0x0f, 0x7f, 0x41, 0x65, 0x21, 0x75, 0xc2, + 0x56, 0x5f, 0xbf, 0x1c, 0xd6, 0x14, 0x7a, 0x1d, 0x46, 0xa2, 0x66, 0xd0, 0x56, 0x36, 0x79, 0x97, + 0x79, 0x86, 0x52, 0x5a, 0x72, 0x7c, 0x58, 0x41, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xb9, 0x6d, + 0x28, 0xa9, 0xa6, 0x1f, 0xaa, 0x47, 0xc5, 0x6f, 0x17, 0xe1, 0x5c, 0xc6, 0xba, 0x40, 0x91, 0x31, + 0x5a, 0x2f, 0x0d, 0xb8, 0x9c, 0x3e, 0xe2, 0x78, 0x45, 0xec, 0xc6, 0xd2, 0x12, 0xf3, 0x3f, 0x70, + 0xa3, 0xb7, 0x23, 0x92, 0x6e, 0x94, 0x16, 0xf5, 0x6f, 0x94, 0x36, 0x76, 0x66, 0x43, 0x4d, 0x1b, + 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xc7, 0x45, 0x38, 0x9f, 0x15, 0x3a, 0x03, 0x7d, 0x77, 0x2a, + 0x9d, 0xd0, 0x2b, 0x83, 0x06, 0xdd, 0xe0, 0x39, 0x86, 0x44, 0xac, 0xb1, 0x79, 0x33, 0xc1, 0x50, + 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x47, 0xb9, 0x90, 0xa7, 0x81, 0x92, 0x5b, 0xfc, 0x73, 0x03, 0x77, + 0x40, 0xe4, 0x8f, 0x8a, 0x52, 0x8e, 0x72, 0xb2, 0xb8, 0xbf, 0xa3, 0x9c, 0x6c, 0x79, 0xce, 0x85, + 0x71, 0xed, 0x6b, 0x1e, 0xea, 0x8c, 0xef, 0xd2, 0x13, 0x45, 0xeb, 0xf7, 0x43, 0x9d, 0xf5, 0x1f, + 0xb5, 0x20, 0x65, 0x09, 0xa7, 0x54, 0x52, 0x56, 0xae, 0x4a, 0xea, 0x32, 0x0c, 0x85, 0x81, 0x47, + 0xd2, 0x19, 0x66, 0x70, 0xe0, 0x11, 0xcc, 0x20, 0x14, 0x23, 0x4e, 0x14, 0x12, 0x13, 0xfa, 0x65, + 0x4b, 0x5c, 0xa3, 0x9e, 0x82, 0x61, 0x8f, 0xec, 0x13, 0xa9, 0x8d, 0x50, 0x3c, 0xf9, 0x26, 0x2d, + 0xc4, 0x1c, 0x66, 0xff, 0xfc, 0x10, 0x3c, 0xd1, 0xd3, 0xd5, 0x94, 0x5e, 0x59, 0xb6, 0x9d, 0x98, + 0xdc, 0x73, 0x0e, 0xd2, 0xf1, 0xaf, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xcc, 0x6e, 0x97, 0x87, 0xd0, + 0x4c, 0x29, 0xf0, 0x44, 0xe4, 0x4c, 0x01, 0x35, 0x15, 0x47, 0xc5, 0xd3, 0x50, 0x1c, 0x5d, 0x05, + 0x88, 0x22, 0x6f, 0xc5, 0xa7, 0x12, 0x58, 0x4b, 0x18, 0x04, 0x27, 0xa1, 0x56, 0x1b, 0x37, 0x05, + 0x04, 0x6b, 0x58, 0xa8, 0x0a, 0x33, 0xed, 0x30, 0x88, 0xb9, 0x3e, 0xb4, 0xca, 0x4d, 0x51, 0x86, + 0x4d, 0x2f, 0xbf, 0x7a, 0x0a, 0x8e, 0xbb, 0x6a, 0xa0, 0x57, 0x61, 0x5c, 0x78, 0xfe, 0xd5, 0x83, + 0xc0, 0x13, 0xaa, 0x1a, 0x65, 0xd8, 0xd0, 0x48, 0x40, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x25, 0xeb, + 0x68, 0x66, 0x35, 0xae, 0x68, 0xd5, 0xf0, 0x52, 0x61, 0x74, 0xc6, 0x06, 0x0a, 0xa3, 0x93, 0x28, + 0xaf, 0x4a, 0x03, 0xbf, 0x2b, 0x41, 0x5f, 0x75, 0xcf, 0xcf, 0x0c, 0xc1, 0x39, 0xb1, 0x70, 0x1e, + 0xf6, 0x72, 0xb9, 0xdd, 0xbd, 0x5c, 0x4e, 0x43, 0xbd, 0xf5, 0xad, 0x35, 0x73, 0xd6, 0x6b, 0xe6, + 0x97, 0x8a, 0x30, 0xc2, 0xa7, 0xe2, 0x0c, 0x64, 0xf8, 0x55, 0xa1, 0xf4, 0xeb, 0x11, 0x40, 0x86, + 0xf7, 0x65, 0xbe, 0xea, 0xc4, 0x0e, 0x3f, 0xbf, 0x14, 0x1b, 0x4d, 0xd4, 0x83, 0x68, 0xde, 0x60, + 0xb4, 0x73, 0x29, 0xad, 0x16, 0x70, 0x1a, 0x1a, 0xdb, 0xfd, 0x0a, 0x40, 0xc4, 0x52, 0xe8, 0x53, + 0x1a, 0x22, 0x14, 0xd1, 0x73, 0x3d, 0x5a, 0x6f, 0x28, 0x64, 0xde, 0x87, 0x64, 0x09, 0x2a, 0x00, + 0xd6, 0x28, 0xce, 0xbd, 0x06, 0x25, 0x85, 0xdc, 0x4f, 0x05, 0x30, 0xa1, 0x9f, 0x7a, 0x5f, 0x84, + 0xe9, 0x54, 0x5b, 0x27, 0xd2, 0x20, 0xfc, 0x82, 0x05, 0xd3, 0xbc, 0xcb, 0x2b, 0xfe, 0xbe, 0xd8, + 0xec, 0x1f, 0xc0, 0x79, 0x2f, 0x63, 0xd3, 0x89, 0x19, 0x1d, 0x7c, 0x93, 0x2a, 0x8d, 0x41, 0x16, + 0x14, 0x67, 0xb6, 0x81, 0xae, 0xc0, 0x18, 0x77, 0x74, 0x71, 0x3c, 0xe1, 0x9c, 0x30, 0xc1, 0x53, + 0x52, 0xf0, 0x32, 0xac, 0xa0, 0xf6, 0xef, 0x58, 0x30, 0xcb, 0x7b, 0x7e, 0x83, 0x1c, 0xa8, 0xdb, + 0xf1, 0xc7, 0xd9, 0x77, 0x91, 0x71, 0xa3, 0x90, 0x93, 0x71, 0x43, 0xff, 0xb4, 0x62, 0xcf, 0x4f, + 0xfb, 0x69, 0x0b, 0xc4, 0x0a, 0x3c, 0x83, 0x7b, 0xe0, 0xb7, 0x9b, 0xf7, 0xc0, 0xb9, 0xfc, 0x45, + 0x9d, 0x73, 0x01, 0xfc, 0x53, 0x0b, 0x66, 0x38, 0x42, 0xf2, 0x10, 0xf9, 0xb1, 0xce, 0xc3, 0x20, + 0x69, 0xe0, 0x54, 0xde, 0xed, 0xec, 0x8f, 0x32, 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb5, 0xe4, 0x06, + 0x3a, 0x41, 0x7a, 0xc3, 0x13, 0x07, 0x89, 0xb5, 0xff, 0xc8, 0x02, 0xc4, 0x9b, 0x31, 0xce, 0x65, + 0x7a, 0xda, 0xb1, 0x52, 0x4d, 0x13, 0x94, 0xb0, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0xa9, 0x0c, 0x4f, + 0xea, 0x35, 0xb9, 0xd8, 0xff, 0x35, 0xf9, 0x04, 0x23, 0xfa, 0xd7, 0x87, 0x20, 0x6d, 0x09, 0x8d, + 0xee, 0xc0, 0x44, 0xd3, 0x69, 0x3b, 0x9b, 0xae, 0xe7, 0xc6, 0x2e, 0x89, 0x7a, 0x99, 0xa1, 0x2c, + 0x6b, 0x78, 0xe2, 0x9d, 0x50, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x0f, 0xd0, 0x0e, 0xdd, 0x7d, 0xd7, + 0x23, 0xdb, 0xec, 0x2a, 0xcc, 0xdc, 0xa1, 0xb8, 0x6d, 0x85, 0x2c, 0xc5, 0x1a, 0x46, 0x86, 0xfb, + 0x4c, 0xf1, 0xe1, 0xb9, 0xcf, 0x0c, 0x9d, 0xd0, 0x7d, 0x66, 0x78, 0x20, 0xf7, 0x19, 0x0c, 0x8f, + 0xc8, 0xb3, 0x9b, 0xfe, 0x5f, 0x75, 0x3d, 0x22, 0x04, 0x36, 0xee, 0x24, 0x35, 0x77, 0x74, 0x58, + 0x79, 0x04, 0x67, 0x62, 0xe0, 0x9c, 0x9a, 0xe8, 0x4b, 0x50, 0x76, 0x3c, 0x2f, 0xb8, 0xa7, 0x46, + 0x6d, 0x25, 0x6a, 0x3a, 0x5e, 0x12, 0x33, 0x7d, 0x6c, 0xe9, 0xf1, 0xa3, 0xc3, 0x4a, 0x79, 0x31, + 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x77, 0xe1, 0x5c, 0x83, 0x84, 0x32, 0x63, 0xaa, 0xda, 0x62, 0x1b, + 0x50, 0x0a, 0x53, 0x4c, 0x65, 0xa0, 0x58, 0x25, 0x5a, 0x3c, 0x4b, 0xc9, 0x44, 0x12, 0x42, 0xf6, + 0x9f, 0x58, 0x30, 0x2a, 0xac, 0xab, 0xcf, 0x40, 0x96, 0x59, 0x34, 0xf4, 0x91, 0x95, 0x6c, 0xc6, + 0xcb, 0x3a, 0x93, 0xab, 0x89, 0xac, 0xa5, 0x34, 0x91, 0x4f, 0xf6, 0x22, 0xd2, 0x5b, 0x07, 0xf9, + 0xc3, 0x45, 0x98, 0x32, 0x2d, 0xcb, 0xcf, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0x6e, 0x0c, 0x85, + 0x7c, 0xfb, 0xd5, 0xf4, 0x24, 0x26, 0x56, 0x2e, 0xc2, 0x71, 0x41, 0x12, 0xc9, 0xf4, 0x8f, 0x28, + 0x3e, 0x44, 0xff, 0x88, 0x7e, 0xc6, 0xfd, 0x43, 0xa7, 0x61, 0xdc, 0x6f, 0xff, 0x32, 0x63, 0xfe, + 0x7a, 0xf9, 0x19, 0xc8, 0x05, 0xd7, 0xcc, 0x63, 0xc2, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xe4, + 0x83, 0x7f, 0x6c, 0xc1, 0xb8, 0x40, 0x3c, 0x83, 0x6e, 0x7f, 0x87, 0xd9, 0xed, 0xc7, 0x7a, 0x74, + 0x3b, 0xa7, 0xbf, 0x7f, 0xb7, 0xa0, 0xfa, 0x5b, 0x0f, 0xc2, 0x78, 0xa0, 0x1c, 0x1a, 0x63, 0xf4, + 0x36, 0x18, 0x34, 0x03, 0x4f, 0x1c, 0xe6, 0x8f, 0x27, 0x7e, 0xb2, 0xbc, 0xfc, 0x58, 0xfb, 0x8d, + 0x15, 0x36, 0x73, 0xe3, 0x0c, 0xc2, 0x58, 0x1c, 0xa0, 0x89, 0x1b, 0x67, 0x10, 0xc6, 0x98, 0x41, + 0x50, 0x0b, 0x20, 0x76, 0xc2, 0x6d, 0x12, 0xd3, 0x32, 0xe1, 0x72, 0x9f, 0xbf, 0x0b, 0x3b, 0xb1, + 0xeb, 0xcd, 0xbb, 0x7e, 0x1c, 0xc5, 0xe1, 0x7c, 0xcd, 0x8f, 0x6f, 0x85, 0xfc, 0x6e, 0xa0, 0x39, + 0xbe, 0x2a, 0x5a, 0x58, 0xa3, 0x2b, 0x3d, 0xaf, 0x58, 0x1b, 0xc3, 0xe6, 0x43, 0xe1, 0xba, 0x28, + 0xc7, 0x0a, 0xc3, 0x7e, 0x8d, 0xf1, 0x64, 0x36, 0x40, 0x27, 0xf3, 0x49, 0xfd, 0xcd, 0x31, 0x35, + 0xb4, 0xec, 0x95, 0xa0, 0xaa, 0x7b, 0xbe, 0xf6, 0x66, 0x81, 0xb4, 0x61, 0xdd, 0x2d, 0x20, 0x71, + 0x8f, 0x45, 0xdf, 0xd9, 0xf5, 0x7e, 0xfc, 0x62, 0x1f, 0x5e, 0x7a, 0x82, 0x17, 0x63, 0x16, 0x88, + 0x95, 0x05, 0xac, 0xac, 0xd5, 0xd3, 0x59, 0x4e, 0x96, 0x25, 0x00, 0x27, 0x38, 0x68, 0x41, 0xdc, + 0x2c, 0xb9, 0x7e, 0xee, 0xb1, 0xd4, 0xcd, 0x52, 0x7e, 0xbe, 0x76, 0xb5, 0x7c, 0x09, 0xc6, 0x55, + 0xe6, 0xb8, 0x3a, 0x4f, 0xc0, 0x25, 0x02, 0x10, 0xac, 0x24, 0xc5, 0x58, 0xc7, 0x41, 0x1b, 0x30, + 0x1d, 0xf1, 0xb4, 0x76, 0xd2, 0x19, 0x4a, 0xe8, 0x0d, 0x9e, 0x93, 0xef, 0xce, 0x0d, 0x13, 0x7c, + 0xcc, 0x8a, 0xf8, 0x66, 0x95, 0xee, 0x53, 0x69, 0x12, 0xe8, 0x4d, 0x98, 0xf2, 0xf4, 0xf4, 0xde, + 0x75, 0xa1, 0x56, 0x50, 0x66, 0x99, 0x46, 0xf2, 0xef, 0x3a, 0x4e, 0x61, 0x53, 0x21, 0x40, 0x2f, + 0x11, 0xd1, 0xcb, 0x1c, 0x7f, 0x9b, 0x44, 0x22, 0xef, 0x15, 0x13, 0x02, 0x6e, 0xe6, 0xe0, 0xe0, + 0xdc, 0xda, 0xe8, 0x75, 0x98, 0x90, 0x9f, 0xaf, 0x39, 0x07, 0x26, 0xc6, 0xbf, 0x1a, 0x0c, 0x1b, + 0x98, 0xe8, 0x1e, 0x5c, 0x90, 0xff, 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6d, 0x0a, 0xdf, 0xcc, 0x71, + 0x46, 0x62, 0x51, 0x7a, 0x42, 0xac, 0x64, 0x21, 0x1d, 0x1f, 0x56, 0x2e, 0x8b, 0x51, 0xcb, 0x84, + 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, 0xe0, 0xdc, 0x0e, 0x71, 0xbc, 0x78, 0x67, 0x79, 0x87, 0x34, + 0x77, 0xe5, 0x26, 0x62, 0x2e, 0x87, 0x9a, 0xc9, 0xec, 0xf5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, + 0x0b, 0xe5, 0x76, 0x67, 0xd3, 0x73, 0xa3, 0x9d, 0xf5, 0x20, 0x66, 0x4f, 0xdd, 0x2a, 0xf1, 0x9a, + 0xf0, 0x4d, 0x54, 0xee, 0x96, 0xf5, 0x1c, 0x3c, 0x9c, 0x4b, 0x01, 0x7d, 0x00, 0x17, 0x52, 0x8b, + 0x41, 0x78, 0x4a, 0x4d, 0xe5, 0xc7, 0x82, 0x6c, 0x64, 0x55, 0xe0, 0x1e, 0xb3, 0x99, 0x20, 0x9c, + 0xdd, 0xc4, 0x47, 0x33, 0x80, 0x78, 0x9f, 0x56, 0xd6, 0xa4, 0x1b, 0xf4, 0x55, 0x98, 0xd0, 0x57, + 0x91, 0x38, 0x60, 0x9e, 0xee, 0x97, 0xca, 0x5e, 0xc8, 0x46, 0x6a, 0x45, 0xe9, 0x30, 0x6c, 0x50, + 0xb4, 0x09, 0x64, 0x7f, 0x1f, 0xba, 0x09, 0x63, 0x4d, 0xcf, 0x25, 0x7e, 0x5c, 0xab, 0xf7, 0xf2, + 0xa9, 0x5f, 0x16, 0x38, 0x62, 0xc0, 0x44, 0xf0, 0x3c, 0x5e, 0x86, 0x15, 0x05, 0xfb, 0x57, 0x0b, + 0x50, 0xe9, 0x13, 0x89, 0x31, 0xa5, 0x03, 0xb4, 0x06, 0xd2, 0x01, 0x2e, 0xca, 0x34, 0x72, 0xeb, + 0xa9, 0xfb, 0x67, 0x2a, 0x45, 0x5c, 0x72, 0x0b, 0x4d, 0xe3, 0x0f, 0x6c, 0x37, 0xa9, 0xab, 0x11, + 0x87, 0xfa, 0x5a, 0xf4, 0x1a, 0xcf, 0x07, 0xc3, 0x83, 0x4b, 0xf4, 0xb9, 0xaa, 0x60, 0xfb, 0x97, + 0x0b, 0x70, 0x41, 0x0d, 0xe1, 0x37, 0xef, 0xc0, 0xdd, 0xee, 0x1e, 0xb8, 0x53, 0x50, 0xa4, 0xdb, + 0xb7, 0x60, 0xa4, 0x71, 0x10, 0x35, 0x63, 0x6f, 0x00, 0x01, 0xe8, 0x29, 0x33, 0xb6, 0x8c, 0x3a, + 0xa6, 0x8d, 0xf8, 0x32, 0x7f, 0xc5, 0x82, 0xe9, 0x8d, 0xe5, 0x7a, 0x23, 0x68, 0xee, 0x92, 0x78, + 0x91, 0xab, 0x89, 0xb0, 0x90, 0x7f, 0xac, 0x07, 0x94, 0x6b, 0xb2, 0x24, 0xa6, 0xcb, 0x30, 0xb4, + 0x13, 0x44, 0x71, 0xfa, 0x95, 0xed, 0x7a, 0x10, 0xc5, 0x98, 0x41, 0xec, 0xdf, 0xb5, 0x60, 0x98, + 0x25, 0x3f, 0xed, 0x97, 0x24, 0x77, 0x90, 0xef, 0x42, 0xaf, 0xc2, 0x08, 0xd9, 0xda, 0x22, 0xcd, + 0x58, 0xcc, 0xaa, 0xf4, 0xae, 0x1b, 0x59, 0x61, 0xa5, 0xf4, 0xd0, 0x67, 0x8d, 0xf1, 0xbf, 0x58, + 0x20, 0xa3, 0xbb, 0x50, 0x8a, 0xdd, 0x3d, 0xb2, 0xd8, 0x6a, 0x89, 0x77, 0x8a, 0x07, 0x70, 0x66, + 0xdc, 0x90, 0x04, 0x70, 0x42, 0xcb, 0xfe, 0x5a, 0x01, 0x20, 0x71, 0xe8, 0xed, 0xf7, 0x89, 0x4b, + 0x5d, 0x79, 0x80, 0x9f, 0xce, 0xc8, 0x03, 0x8c, 0x12, 0x82, 0x19, 0x59, 0x80, 0xd5, 0x30, 0x15, + 0x07, 0x1a, 0xa6, 0xa1, 0x93, 0x0c, 0xd3, 0x32, 0xcc, 0x26, 0x0e, 0xc9, 0x66, 0x74, 0x06, 0x16, + 0x99, 0x7d, 0x23, 0x0d, 0xc4, 0xdd, 0xf8, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x0d, 0x06, 0x58, 0xcc, + 0xef, 0xc8, 0x94, 0x9d, 0x46, 0x40, 0xd7, 0xcb, 0xf9, 0xfe, 0x17, 0x22, 0x8c, 0xab, 0x3a, 0x3c, + 0x8c, 0xe0, 0xad, 0x06, 0x2d, 0xbb, 0x05, 0x02, 0x5a, 0x25, 0x4c, 0xc9, 0xd0, 0xbf, 0x37, 0x57, + 0x01, 0x5a, 0x0c, 0x57, 0x4b, 0x01, 0xa8, 0x58, 0x55, 0x55, 0x41, 0xb0, 0x86, 0x65, 0xff, 0xcd, + 0x02, 0x8c, 0xcb, 0x00, 0xa2, 0xf4, 0x1e, 0xdf, 0xbf, 0x95, 0x13, 0x65, 0x0f, 0x60, 0x39, 0x33, + 0x29, 0x61, 0x15, 0x64, 0x5e, 0xcf, 0x99, 0x29, 0x01, 0x38, 0xc1, 0x41, 0xcf, 0xc2, 0x68, 0xd4, + 0xd9, 0x64, 0xe8, 0x29, 0x23, 0xfa, 0x06, 0x2f, 0xc6, 0x12, 0x8e, 0xbe, 0x04, 0x33, 0xbc, 0x5e, + 0x18, 0xb4, 0x9d, 0x6d, 0xae, 0x41, 0x1a, 0x56, 0x5e, 0x6d, 0x33, 0x6b, 0x29, 0xd8, 0xf1, 0x61, + 0xe5, 0x7c, 0xba, 0x8c, 0xe9, 0x1e, 0xbb, 0xa8, 0xd0, 0x7d, 0x31, 0x93, 0x76, 0x98, 0x41, 0xd7, + 0x61, 0x84, 0xb3, 0x3c, 0xc1, 0x82, 0x7a, 0xbc, 0x28, 0x69, 0x6e, 0x36, 0x2c, 0x9c, 0xba, 0xe0, + 0x9a, 0xa2, 0x3e, 0x7a, 0x17, 0xc6, 0x5b, 0xc1, 0x3d, 0xff, 0x9e, 0x13, 0xb6, 0x16, 0xeb, 0x35, + 0xb1, 0x6a, 0x32, 0x25, 0xa7, 0x6a, 0x82, 0xa6, 0xbb, 0xee, 0x30, 0xed, 0x69, 0x02, 0xc2, 0x3a, + 0x39, 0xb4, 0xc1, 0x62, 0x3c, 0xf1, 0xa4, 0xf6, 0xbd, 0xac, 0xce, 0x54, 0x1e, 0x7c, 0x8d, 0xf2, + 0xa4, 0x08, 0x04, 0x25, 0x52, 0xe2, 0x27, 0x84, 0xec, 0x0f, 0xcf, 0x81, 0xb1, 0x5a, 0x8d, 0xec, + 0x01, 0xd6, 0x29, 0x65, 0x0f, 0xc0, 0x30, 0x46, 0xf6, 0xda, 0xf1, 0x41, 0xd5, 0x0d, 0x7b, 0xa5, + 0x9f, 0x59, 0x11, 0x38, 0xdd, 0x34, 0x25, 0x04, 0x2b, 0x3a, 0xd9, 0x29, 0x1e, 0x8a, 0x1f, 0x63, + 0x8a, 0x87, 0xa1, 0x33, 0x4c, 0xf1, 0xb0, 0x0e, 0xa3, 0xdb, 0x6e, 0x8c, 0x49, 0x3b, 0x10, 0xc7, + 0x7d, 0xe6, 0x4a, 0xb8, 0xc6, 0x51, 0xba, 0x03, 0x8c, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5b, 0x6a, + 0x0f, 0x8c, 0xe4, 0x4b, 0xcb, 0xdd, 0x8f, 0x0f, 0x99, 0xbb, 0x40, 0xa4, 0x74, 0x18, 0x7d, 0xd0, + 0x94, 0x0e, 0xab, 0x32, 0x11, 0xc3, 0x58, 0xbe, 0x91, 0x26, 0xcb, 0xb3, 0xd0, 0x27, 0xfd, 0x82, + 0x91, 0xb2, 0xa2, 0x74, 0x7a, 0x29, 0x2b, 0xbe, 0xdf, 0x82, 0x0b, 0xed, 0xac, 0xec, 0x2d, 0x22, + 0x91, 0xc2, 0xab, 0x03, 0xa7, 0xa7, 0x31, 0x1a, 0x64, 0xd7, 0xa6, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, + 0x1d, 0xe8, 0x70, 0xb3, 0x25, 0xb2, 0x2f, 0x3c, 0x95, 0x93, 0xfb, 0xa2, 0x47, 0xc6, 0x8b, 0x8d, + 0x8c, 0x8c, 0x0b, 0x9f, 0xce, 0xcb, 0xb8, 0x30, 0x70, 0x9e, 0x85, 0x24, 0xeb, 0xc5, 0xe4, 0x47, + 0xce, 0x7a, 0xf1, 0x96, 0xca, 0x7a, 0xd1, 0x23, 0x92, 0x0e, 0xcf, 0x69, 0xd1, 0x37, 0xd7, 0x85, + 0x96, 0xaf, 0x62, 0xfa, 0x74, 0xf2, 0x55, 0x18, 0xcc, 0x9e, 0xa7, 0x4c, 0x78, 0xbe, 0x0f, 0xb3, + 0x37, 0xe8, 0xf6, 0x66, 0xf7, 0x3c, 0x37, 0xc7, 0xec, 0x03, 0xe5, 0xe6, 0xb8, 0xa3, 0xe7, 0xba, + 0x40, 0x7d, 0x92, 0x39, 0x50, 0xa4, 0x01, 0x33, 0x5c, 0xdc, 0xd1, 0x8f, 0xa0, 0x73, 0xf9, 0x74, + 0xd5, 0x49, 0xd3, 0x4d, 0x37, 0xeb, 0x10, 0xea, 0xce, 0x9c, 0x71, 0xfe, 0x6c, 0x32, 0x67, 0x5c, + 0x38, 0xf5, 0xcc, 0x19, 0x8f, 0x9c, 0x41, 0xe6, 0x8c, 0x47, 0x3f, 0xd6, 0xcc, 0x19, 0xe5, 0x87, + 0x90, 0x39, 0x63, 0x3d, 0xc9, 0x9c, 0x71, 0x31, 0x7f, 0x4a, 0x32, 0xac, 0xd2, 0x72, 0xf2, 0x65, + 0xdc, 0x81, 0x52, 0x5b, 0xfa, 0x54, 0x8b, 0x50, 0x3f, 0xd9, 0x29, 0xfb, 0xb2, 0x1c, 0xaf, 0xf9, + 0x94, 0x28, 0x10, 0x4e, 0x48, 0x51, 0xba, 0x49, 0xfe, 0x8c, 0xc7, 0x7a, 0x28, 0xc6, 0xb2, 0x54, + 0x0e, 0xf9, 0x59, 0x33, 0xec, 0xbf, 0x5a, 0x80, 0x4b, 0xbd, 0xd7, 0x75, 0xa2, 0xaf, 0xa8, 0x27, + 0xfa, 0xf5, 0x94, 0xbe, 0x82, 0x5f, 0x02, 0x12, 0xac, 0x81, 0x03, 0x4f, 0x5c, 0x83, 0x59, 0x65, + 0x8e, 0xe6, 0xb9, 0xcd, 0x03, 0x2d, 0x95, 0x9f, 0x72, 0x8d, 0x69, 0xa4, 0x11, 0x70, 0x77, 0x1d, + 0xb4, 0x08, 0xd3, 0x46, 0x61, 0xad, 0x2a, 0x84, 0x7d, 0xa5, 0x20, 0x69, 0x98, 0x60, 0x9c, 0xc6, + 0xb7, 0xbf, 0x6e, 0xc1, 0xa3, 0x39, 0x21, 0xab, 0x07, 0x8e, 0xab, 0xb0, 0x05, 0xd3, 0x6d, 0xb3, + 0x6a, 0x9f, 0xf0, 0x2b, 0x46, 0x60, 0x6c, 0xd5, 0xd7, 0x14, 0x00, 0xa7, 0x89, 0x2e, 0x5d, 0xf9, + 0xf5, 0xdf, 0xbf, 0xf4, 0xa9, 0xdf, 0xfa, 0xfd, 0x4b, 0x9f, 0xfa, 0x9d, 0xdf, 0xbf, 0xf4, 0xa9, + 0xbf, 0x78, 0x74, 0xc9, 0xfa, 0xf5, 0xa3, 0x4b, 0xd6, 0x6f, 0x1d, 0x5d, 0xb2, 0x7e, 0xe7, 0xe8, + 0x92, 0xf5, 0x7b, 0x47, 0x97, 0xac, 0xaf, 0xfd, 0xc1, 0xa5, 0x4f, 0xbd, 0x53, 0xd8, 0x7f, 0xe9, + 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x18, 0x48, 0x7e, 0xc6, 0x9b, 0xdf, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index de2c1e690e8..b9aec0cd477 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1169,6 +1169,36 @@ message FCVolumeSource { repeated string wwids = 5; } +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +message FlexPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional SecretReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; +} + // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. message FlexVolumeSource { @@ -2441,7 +2471,7 @@ message PersistentVolumeSource { // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional - optional FlexVolumeSource flexVolume = 12; + optional FlexPersistentVolumeSource flexVolume = 12; // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index c50dd0a0520..80cacc974e5 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -616,6 +616,19 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { return map_FCVolumeSource } +var map_FlexPersistentVolumeSource = map[string]string{ + "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_FlexPersistentVolumeSource +} + var map_FlexVolumeSource = map[string]string{ "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "driver": "Driver is the name of the driver to use for this volume.", diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index 951b2e26479..a366d0ded9a 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1547,6 +1547,38 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { *out = *in @@ -3165,7 +3197,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(FlexVolumeSource) + *out = new(FlexPersistentVolumeSource) (*in).DeepCopyInto(*out) } } From 02dabb84eb1bd556d1aade73f013c27e5feb7107 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 15 Dec 2017 14:59:05 +0800 Subject: [PATCH 321/794] Remove unused well_known_labels in kubeadm. --- cmd/kubeadm/app/apis/kubeadm/BUILD | 1 - .../app/apis/kubeadm/well_known_labels.go | 43 ------------------- 2 files changed, 44 deletions(-) delete mode 100644 cmd/kubeadm/app/apis/kubeadm/well_known_labels.go diff --git a/cmd/kubeadm/app/apis/kubeadm/BUILD b/cmd/kubeadm/app/apis/kubeadm/BUILD index ef58b5ddc6c..9d6eb70ee96 100644 --- a/cmd/kubeadm/app/apis/kubeadm/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -11,7 +11,6 @@ go_library( "doc.go", "register.go", "types.go", - "well_known_labels.go", "zz_generated.deepcopy.go", ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", diff --git a/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go b/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go deleted file mode 100644 index f2fb530fdc0..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/well_known_labels.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -// Role labels are applied to Nodes to mark their purpose. In particular, we -// usually want to distinguish the master, so that we can isolate privileged -// pods and operations. -// -// Originally we relied on not registering the master, on the fact that the -// master was Unschedulable, and on static manifests for master components. -// But we now do register masters in many environments, are generally moving -// away from static manifests (for better manageability), and working towards -// deprecating the unschedulable field (replacing it with taints & tolerations -// instead). -// -// Even with tainting, a label remains the easiest way of making a positive -// selection, so that pods can schedule only to master nodes for example, and -// thus installations will likely define a label for their master nodes. -// -// So that we can recognize master nodes in consequent places though (such as -// kubectl get nodes), we encourage installations to use the well-known labels. -// We define NodeLabelRole, which is the preferred form, but we will also recognize -// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole). - -const ( - // NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose. - // Use of NodeLabelRole is preferred. - NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role" -) From 3ae7bdd211e3df1350446b6def142b4d31c75e52 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Wed, 22 Nov 2017 16:57:06 +0800 Subject: [PATCH 322/794] remove FilterFunc and use SelectionPredicate everywhere --- .../k8s.io/apiserver/pkg/storage/cacher.go | 8 +++- .../apiserver/pkg/storage/etcd/etcd_helper.go | 24 +++++------ .../pkg/storage/etcd/etcd_watcher.go | 29 +++++++------ .../pkg/storage/etcd/etcd_watcher_test.go | 42 ++++++++++++++----- .../apiserver/pkg/storage/etcd3/store.go | 10 ++--- .../apiserver/pkg/storage/etcd3/watcher.go | 13 +++--- .../apiserver/pkg/storage/interfaces.go | 4 -- .../pkg/storage/selection_predicate.go | 2 +- .../src/k8s.io/apiserver/pkg/storage/util.go | 15 ------- 9 files changed, 78 insertions(+), 69 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go index e3c787d0842..27ead5a1d78 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go @@ -682,12 +682,16 @@ func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported b } func filterFunction(key string, p SelectionPredicate) func(string, runtime.Object) bool { - f := SimpleFilter(p) filterFunc := func(objKey string, obj runtime.Object) bool { if !hasPathPrefix(objKey, key) { return false } - return f(obj) + matches, err := p.Matches(obj) + if err != nil { + glog.Errorf("invalid object for matching. Obj: %v. Err: %v", obj, err) + return false + } + return matches } return filterFunc } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go index 322ccefabd1..6f37f99d2dc 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go @@ -240,7 +240,7 @@ func (h *etcdHelper) Watch(ctx context.Context, key string, resourceVersion stri return nil, err } key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(false, h.quorum, nil, storage.SimpleFilter(pred), h.codec, h.versioner, nil, h.transformer, h) + w := newEtcdWatcher(false, h.quorum, nil, pred, h.codec, h.versioner, nil, h.transformer, h) go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) return w, nil } @@ -255,7 +255,7 @@ func (h *etcdHelper) WatchList(ctx context.Context, key string, resourceVersion return nil, err } key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(true, h.quorum, exceptKey(key), storage.SimpleFilter(pred), h.codec, h.versioner, nil, h.transformer, h) + w := newEtcdWatcher(true, h.quorum, exceptKey(key), pred, h.codec, h.versioner, nil, h.transformer, h) go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) return w, nil } @@ -359,7 +359,7 @@ func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion nodes := make([]*etcd.Node, 0) nodes = append(nodes, response.Node) - if err := h.decodeNodeList(nodes, storage.SimpleFilter(pred), listPtr); err != nil { + if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { return err } trace.Step("Object decoded") @@ -370,7 +370,7 @@ func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion } // decodeNodeList walks the tree of each node in the list and decodes into the specified object -func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, filter storage.FilterFunc, slicePtr interface{}) error { +func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, pred storage.SelectionPredicate, slicePtr interface{}) error { trace := utiltrace.New("decodeNodeList " + getTypeName(slicePtr)) defer trace.LogIfLong(400 * time.Millisecond) v, err := conversion.EnforcePtr(slicePtr) @@ -383,13 +383,13 @@ func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, filter storage.FilterFun // IMPORTANT: do not log each key as a discrete step in the trace log // as it produces an immense amount of log spam when there is a large // amount of content in the list. - if err := h.decodeNodeList(node.Nodes, filter, slicePtr); err != nil { + if err := h.decodeNodeList(node.Nodes, pred, slicePtr); err != nil { return err } continue } - if obj, found := h.getFromCache(node.ModifiedIndex, filter); found { - // obj != nil iff it matches the filter function. + if obj, found := h.getFromCache(node.ModifiedIndex, pred); found { + // obj != nil iff it matches the pred function. if obj != nil { v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) } @@ -407,7 +407,7 @@ func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, filter storage.FilterFun } // being unable to set the version does not prevent the object from being extracted _ = h.versioner.UpdateObject(obj, node.ModifiedIndex) - if filter(obj) { + if matched, err := pred.Matches(obj); err == nil && matched { v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) } if node.ModifiedIndex != 0 { @@ -439,7 +439,7 @@ func (h *etcdHelper) List(ctx context.Context, key string, resourceVersion strin if err != nil { return err } - if err := h.decodeNodeList(nodes, storage.SimpleFilter(pred), listPtr); err != nil { + if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { return err } trace.Step("Node list decoded") @@ -590,7 +590,7 @@ func (h *etcdHelper) GuaranteedUpdate( // their Node.ModifiedIndex, which is unique across all types. // All implementations must be thread-safe. type etcdCache interface { - getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) + getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) addToCache(index uint64, obj runtime.Object) } @@ -598,14 +598,14 @@ func getTypeName(obj interface{}) string { return reflect.TypeOf(obj).String() } -func (h *etcdHelper) getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) { +func (h *etcdHelper) getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) { startTime := time.Now() defer func() { metrics.ObserveGetCache(startTime) }() obj, found := h.cache.Get(index) if found { - if !filter(obj.(runtime.Object)) { + if matched, err := pred.Matches(obj.(runtime.Object)); err != nil || !matched { return nil, true } // We should not return the object itself to avoid polluting the cache if someone diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go index 1cd368bd86e..d86cd50ebab 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go @@ -78,7 +78,7 @@ type etcdWatcher struct { list bool // If we're doing a recursive watch, should be true. quorum bool // If we enable quorum, shoule be true include includeFunc - filter storage.FilterFunc + pred storage.SelectionPredicate etcdIncoming chan *etcd.Response etcdError chan error @@ -105,11 +105,9 @@ const watchWaitDuration = 100 * time.Millisecond // newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. // The versioner must be able to handle the objects that transform creates. -func newEtcdWatcher( - list bool, quorum bool, include includeFunc, filter storage.FilterFunc, +func newEtcdWatcher(list bool, quorum bool, include includeFunc, pred storage.SelectionPredicate, encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc, - valueTransformer ValueTransformer, - cache etcdCache) *etcdWatcher { + valueTransformer ValueTransformer, cache etcdCache) *etcdWatcher { w := &etcdWatcher{ encoding: encoding, versioner: versioner, @@ -119,7 +117,7 @@ func newEtcdWatcher( list: list, quorum: quorum, include: include, - filter: filter, + pred: pred, // Buffer this channel, so that the etcd client is not forced // to context switch with every object it gets, and so that a // long time spent decoding an object won't block the *next* @@ -315,7 +313,7 @@ func (w *etcdWatcher) translate() { // decodeObject extracts an object from the provided etcd node or returns an error. func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) { - if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.SimpleFilter(storage.Everything)); found { + if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found { return obj, nil } @@ -365,7 +363,7 @@ func (w *etcdWatcher) sendAdd(res *etcd.Response) { // the resourceVersion to resume will never be able to get past a bad value. return } - if !w.filter(obj) { + if matched, err := w.pred.Matches(obj); err != nil || !matched { return } action := watch.Added @@ -391,7 +389,10 @@ func (w *etcdWatcher) sendModify(res *etcd.Response) { // the resourceVersion to resume will never be able to get past a bad value. return } - curObjPasses := w.filter(curObj) + curObjPasses := false + if matched, err := w.pred.Matches(curObj); err == nil && matched { + curObjPasses = true + } oldObjPasses := false var oldObj runtime.Object if res.PrevNode != nil && res.PrevNode.Value != "" { @@ -400,10 +401,12 @@ func (w *etcdWatcher) sendModify(res *etcd.Response) { if err := w.versioner.UpdateObject(oldObj, res.Node.ModifiedIndex); err != nil { utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", res.Node.ModifiedIndex, oldObj, err)) } - oldObjPasses = w.filter(oldObj) + if matched, err := w.pred.Matches(oldObj); err == nil && matched { + oldObjPasses = true + } } } - // Some changes to an object may cause it to start or stop matching a filter. + // Some changes to an object may cause it to start or stop matching a pred. // We need to report those as adds/deletes. So we have to check both the previous // and current value of the object. switch { @@ -423,7 +426,7 @@ func (w *etcdWatcher) sendModify(res *etcd.Response) { Object: oldObj, }) } - // Do nothing if neither new nor old object passed the filter. + // Do nothing if neither new nor old object passed the pred. } func (w *etcdWatcher) sendDelete(res *etcd.Response) { @@ -449,7 +452,7 @@ func (w *etcdWatcher) sendDelete(res *etcd.Response) { // the resourceVersion to resume will never be able to get past a bad value. return } - if !w.filter(obj) { + if matched, err := w.pred.Matches(obj); err != nil || !matched { return } w.emit(watch.Event{ diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher_test.go index aae3813a042..4c4d038dce5 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher_test.go @@ -23,6 +23,8 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" apitesting "k8s.io/apimachinery/pkg/api/testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" @@ -41,7 +43,7 @@ var versioner = APIObjectVersioner{} // Implements etcdCache interface as empty methods (i.e. does not cache any objects) type fakeEtcdCache struct{} -func (f *fakeEtcdCache) getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) { +func (f *fakeEtcdCache) getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) { return nil, false } @@ -58,7 +60,7 @@ func TestWatchInterpretations(t *testing.T) { podBar := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}} podBaz := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "baz"}} - // All of these test cases will be run with the firstLetterIsB Filter. + // All of these test cases will be run with the firstLetterIsB SelectionPredicate. table := map[string]struct { actions []string // Run this test item for every action here. prevNodeValue string @@ -128,8 +130,21 @@ func TestWatchInterpretations(t *testing.T) { expectEmit: false, }, } - firstLetterIsB := func(obj runtime.Object) bool { - return obj.(*example.Pod).Name[0] == 'b' + + // Should use fieldSelector here. + // But for the sake of tests (simplifying the codes), use labelSelector to support set-based requirements + selector, err := labels.Parse("metadata.name in (bar, baz)") + if err != nil { + t.Fatal(err) + } + firstLetterIsB := storage.SelectionPredicate{ + Label: selector, + Field: fields.Everything(), + IncludeUninitialized: true, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, bool, error) { + pod := obj.(*example.Pod) + return labels.Set{"metadata.name": pod.Name}, nil, pod.Initializers != nil, nil + }, } for name, item := range table { for _, action := range item.actions { @@ -173,7 +188,7 @@ func TestWatchInterpretations(t *testing.T) { func TestWatchInterpretation_ResponseNotSet(t *testing.T) { _, codecs := testScheme(t) codec := codecs.LegacyCodec(schema.GroupVersion{Version: "v1"}) - w := newEtcdWatcher(false, false, nil, storage.SimpleFilter(storage.Everything), codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -189,7 +204,7 @@ func TestWatchInterpretation_ResponseNoNode(t *testing.T) { codec := codecs.LegacyCodec(schema.GroupVersion{Version: "v1"}) actions := []string{"create", "set", "compareAndSwap", "delete"} for _, action := range actions { - w := newEtcdWatcher(false, false, nil, storage.SimpleFilter(storage.Everything), codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -205,7 +220,7 @@ func TestWatchInterpretation_ResponseBadData(t *testing.T) { codec := codecs.LegacyCodec(schema.GroupVersion{Version: "v1"}) actions := []string{"create", "set", "compareAndSwap", "delete"} for _, action := range actions { - w := newEtcdWatcher(false, false, nil, storage.SimpleFilter(storage.Everything), codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -228,10 +243,17 @@ func TestWatchInterpretation_ResponseBadData(t *testing.T) { func TestSendResultDeleteEventHaveLatestIndex(t *testing.T) { _, codecs := testScheme(t) codec := apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion) - filter := func(obj runtime.Object) bool { - return obj.(*example.Pod).Name != "bar" + selector, _ := fields.ParseSelector("metadata.name!=bar") + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: selector, + IncludeUninitialized: true, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, bool, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, pod.Initializers != nil, nil + }, } - w := newEtcdWatcher(false, false, nil, filter, codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) + w := newEtcdWatcher(false, false, nil, pred, codec, versioner, nil, prefixTransformer{prefix: "test!"}, &fakeEtcdCache{}) eventChan := make(chan watch.Event, 1) w.emit = func(e watch.Event) { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index 2350a5526b8..ff654adec57 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -403,7 +403,7 @@ func (s *store) GetToList(ctx context.Context, key string, resourceVersion strin if err != nil || v.Kind() != reflect.Slice { panic("need ptr to slice") } - if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), storage.SimpleFilter(pred), s.codec, s.versioner); err != nil { + if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner); err != nil { return err } // update version with cluster level revision @@ -492,8 +492,6 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor } keyPrefix := key - filter := storage.SimpleFilter(pred) - // set the appropriate clientv3 options to filter the returned data set var paging bool options := make([]clientv3.OpOption, 0, 4) @@ -587,7 +585,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor continue } - if err := appendListItem(v, data, uint64(kv.ModRevision), filter, s.codec, s.versioner); err != nil { + if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner); err != nil { return err } } @@ -774,14 +772,14 @@ func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objP } // appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice. -func appendListItem(v reflect.Value, data []byte, rev uint64, filter storage.FilterFunc, codec runtime.Codec, versioner storage.Versioner) error { +func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner) error { obj, _, err := codec.Decode(data, nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) if err != nil { return err } // being unable to set the version does not prevent the object from being extracted versioner.UpdateObject(obj, rev) - if filter(obj) { + if matched, err := pred.Matches(obj); err == nil && matched { v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) } return nil diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index 366e161cfa0..38aae2f1fa6 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -72,7 +72,7 @@ type watchChan struct { key string initialRev int64 recursive bool - internalFilter storage.FilterFunc + internalPred storage.SelectionPredicate ctx context.Context cancel context.CancelFunc incomingEventChan chan *event @@ -111,14 +111,14 @@ func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, re key: key, initialRev: rev, recursive: recursive, - internalFilter: storage.SimpleFilter(pred), + internalPred: pred, incomingEventChan: make(chan *event, incomingBufSize), resultChan: make(chan watch.Event, outgoingBufSize), errChan: make(chan error, 1), } if pred.Empty() { // The filter doesn't filter out any object. - wc.internalFilter = nil + wc.internalPred = storage.Everything } wc.ctx, wc.cancel = context.WithCancel(ctx) return wc @@ -250,14 +250,15 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { } func (wc *watchChan) filter(obj runtime.Object) bool { - if wc.internalFilter == nil { + if wc.internalPred.Empty() { return true } - return wc.internalFilter(obj) + matched, err := wc.internalPred.Matches(obj) + return err == nil && matched } func (wc *watchChan) acceptAll() bool { - return wc.internalFilter == nil + return wc.internalPred.Empty() } // transform transforms an event into a result for user if not filtered. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/interfaces.go b/staging/src/k8s.io/apiserver/pkg/storage/interfaces.go index 0d81f05c3d9..987d84f5780 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/interfaces.go @@ -70,10 +70,6 @@ type MatchValue struct { // to that function. type TriggerPublisherFunc func(obj runtime.Object) []MatchValue -// FilterFunc takes an API object and returns true if the object satisfies some requirements. -// TODO: We will remove this type and use SelectionPredicate everywhere. -type FilterFunc func(obj runtime.Object) bool - // Everything accepts all objects. var Everything = SelectionPredicate{ Label: labels.Everything(), diff --git a/staging/src/k8s.io/apiserver/pkg/storage/selection_predicate.go b/staging/src/k8s.io/apiserver/pkg/storage/selection_predicate.go index 83e423a97ab..9c8c3d5994c 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/selection_predicate.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/selection_predicate.go @@ -96,7 +96,7 @@ func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { } matched := s.Label.Matches(labels) if matched && s.Field != nil { - matched = (matched && s.Field.Matches(fields)) + matched = matched && s.Field.Matches(fields) } return matched, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/util.go b/staging/src/k8s.io/apiserver/pkg/storage/util.go index 3e0b7211b95..ebe54ba2cc7 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/util.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/util.go @@ -22,8 +22,6 @@ import ( "strings" "sync/atomic" - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation/path" "k8s.io/apimachinery/pkg/runtime" @@ -40,19 +38,6 @@ func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { } } -// SimpleFilter converts a selection predicate into a FilterFunc. -// It ignores any error from Matches(). -func SimpleFilter(p SelectionPredicate) FilterFunc { - return func(obj runtime.Object) bool { - matches, err := p.Matches(obj) - if err != nil { - glog.Errorf("invalid object for matching. Obj: %v. Err: %v", obj, err) - return false - } - return matches - } -} - func EverythingFunc(runtime.Object) bool { return true } From 9efe856979001b83d544fde99c9a2af3f8bcc170 Mon Sep 17 00:00:00 2001 From: James Munnelly Date: Fri, 15 Dec 2017 12:21:16 +0000 Subject: [PATCH 323/794] Register metav1 types into samplecontroller api scheme --- .../pkg/apis/samplecontroller/v1alpha1/register.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go index babd488d9e9..df5695eb092 100644 --- a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/register.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -47,5 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Foo{}, &FooList{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } From 41bd2f5f858fd34e8051f8cd0ed4a29e38104c50 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Thu, 14 Dec 2017 18:31:35 +0100 Subject: [PATCH 324/794] Fix format string in describers. --- pkg/printers/internalversion/describe.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index c33b1c636ce..a8e2ec1e060 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -943,7 +943,7 @@ func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) { func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ " VolumePath:\t%v\n"+ - " FSType:\t%v\n", + " FSType:\t%v\n"+ " StoragePolicyName:\t%v\n", vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName) } @@ -1081,7 +1081,7 @@ func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { " Driver:\t%v\n"+ " FSType:\t%v\n"+ " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", + " ReadOnly:\t%v\n"+ " Options:\t%v\n", flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } @@ -1096,7 +1096,7 @@ func printFlockerVolumeSource(flocker *api.FlockerVolumeSource, w PrefixWriter) func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ " Driver:\t%v\n"+ - " VolumeHandle:\t%v\n", + " VolumeHandle:\t%v\n"+ " ReadOnly:\t%v\n", csi.Driver, csi.VolumeHandle, csi.ReadOnly) } From 599f74943a666f7235ddca9a8745733b32320048 Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Fri, 17 Nov 2017 16:51:02 +0100 Subject: [PATCH 325/794] Use pod nanny configured with ComponentConfig in Heapster --- .../google/heapster-controller.yaml | 46 ++++++++++++++++++- .../heapster-controller-combined.yaml | 45 +++++++++++++++++- .../influxdb/heapster-controller.yaml | 45 +++++++++++++++++- .../stackdriver/heapster-controller.yaml | 23 +++++++++- .../standalone/heapster-controller.yaml | 23 +++++++++- 5 files changed, 174 insertions(+), 8 deletions(-) diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 31e2e173d18..1bf63f57b58 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -20,6 +20,32 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heapster-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eventer-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -64,7 +90,7 @@ spec: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -73,6 +99,9 @@ spec: requests: cpu: 50m memory: {{ nanny_memory }} + volumeMounts: + - name: heapster-config-volume + mountMath: /etc/config env: - name: MY_POD_NAME valueFrom: @@ -84,6 +113,7 @@ spec: fieldPath: metadata.namespace command: - /pod_nanny + - --config-dir=/etc/config - --cpu={{ base_metrics_cpu }} - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} @@ -93,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: @@ -111,8 +141,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + volumeMounts: + - name: eventer-config-volume + mountMath: /etc/config command: - /pod_nanny + - --config-dir=/etc/config - --cpu=100m - --extra-cpu=0m - --memory={{base_eventer_memory}} @@ -122,6 +156,14 @@ spec: - --container=eventer - --poll-period=300000 - --estimator=exponential + volumes: + - name: heapster-config-volume + configMap: + name: heapster-config + volumes: + - name: eventer-config-volume + configMap: + name: eventer-config serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 60d19bc18e7..72b5b66ba2e 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -20,6 +20,32 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heapster-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eventer-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -66,7 +92,7 @@ spec: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -75,6 +101,9 @@ spec: requests: cpu: 50m memory: {{ nanny_memory }} + volumeMounts: + - name: heapster-config-volume + mountPath: /etc/config env: - name: MY_POD_NAME valueFrom: @@ -86,6 +115,7 @@ spec: fieldPath: metadata.namespace command: - /pod_nanny + - --config-dir=/etc/config - --cpu={{ base_metrics_cpu }} - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} @@ -95,7 +125,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: @@ -104,6 +134,9 @@ spec: requests: cpu: 50m memory: {{ nanny_memory }} + volumeMounts: + - name: eventer-config-volume + mountPath: /etc/config env: - name: MY_POD_NAME valueFrom: @@ -115,6 +148,7 @@ spec: fieldPath: metadata.namespace command: - /pod_nanny + - --config-dir=/etc/config - --cpu=100m - --extra-cpu=0m - --memory={{ base_eventer_memory }} @@ -124,6 +158,13 @@ spec: - --container=eventer - --poll-period=300000 - --estimator=exponential + volumes: + - name: heapster-config-volume + configMap: + name: heapster-config + - name: eventer-config-volume + configMap: + name: eventer-config serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 0821dbcd13f..8e906ceaf50 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -20,6 +20,32 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heapster-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eventer-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -64,7 +90,7 @@ spec: - /eventer - --source=kubernetes:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -82,8 +108,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + volumeMounts: + - name: heapster-config-volume + mountPath: /etc/config command: - /pod_nanny + - --config-dir=/etc/config - --cpu={{ base_metrics_cpu }} - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} @@ -93,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: @@ -111,8 +141,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + volumeMounts: + - name: eventer-config-volume + mountPath: /etc/config command: - /pod_nanny + - --config-dir=/etc/config - --cpu=100m - --extra-cpu=0m - --memory={{ base_eventer_memory }} @@ -122,6 +156,13 @@ spec: - --container=eventer - --poll-period=300000 - --estimator=exponential + volumes: + - name: heapster-config-volume + configMap: + name: heapster-config + - name: eventer-config-volume + configMap: + name: eventer-config serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index a4ce962cc9f..4622035d8d4 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -18,6 +18,19 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heapster-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -76,7 +89,7 @@ spec: fieldRef: fieldPath: metadata.namespace # END_PROMETHEUS_TO_SD - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -85,6 +98,9 @@ spec: requests: cpu: 50m memory: {{ nanny_memory }} + volumeMounts: + - name: heapster-config-volume + mountPath: /etc/config env: - name: MY_POD_NAME valueFrom: @@ -96,6 +112,7 @@ spec: fieldPath: metadata.namespace command: - /pod_nanny + - --config-dir=/etc/config - --cpu={{ base_metrics_cpu }} - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} @@ -105,6 +122,10 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential + volumes: + - name: heapster-config-volume + configMap: + name: heapster-config serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index f2c43740f78..c3686d4caca 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -18,6 +18,19 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: heapster-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -55,7 +68,7 @@ spec: command: - /heapster - --source=kubernetes.summary_api:'' - - image: gcr.io/google_containers/addon-resizer:1.7 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -73,8 +86,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + volumeMounts: + - name: heapster-config-volume + mountPath: /etc/config command: - /pod_nanny + - --config-dir=/etc/config - --cpu={{ base_metrics_cpu }} - --extra-cpu={{ metrics_cpu_per_node }}m - --memory={{ base_metrics_memory }} @@ -84,6 +101,10 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential + volumes: + - name: heapster-config-volume + configMap: + name: heapster-config serviceAccountName: heapster tolerations: - key: "CriticalAddonsOnly" From 7135923896b42bba14eff336689e0b7b55a940f5 Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Tue, 21 Nov 2017 16:57:39 +0100 Subject: [PATCH 326/794] Use pod nanny configured with ComponentConfig in Metrics Server --- .../metrics-server-deployment.yaml | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 5b19bf769a3..e82eb9d74a2 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -7,6 +7,19 @@ metadata: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: metrics-server-config + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -43,7 +56,7 @@ spec: name: https protocol: TCP - name: metrics-server-nanny - image: gcr.io/google_containers/addon-resizer:1.7 + image: gcr.io/google_containers/addon-resizer:1.8.1 resources: limits: cpu: 100m @@ -60,8 +73,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + volumeMounts: + - name: metrics-server-config-volume + mountPath: /etc/config command: - /pod_nanny + - --config-dir=/etc/config - --cpu=40m - --extra-cpu=0.5m - --memory=140Mi @@ -71,6 +88,10 @@ spec: - --container=metrics-server - --poll-period=300000 - --estimator=exponential + volumes: + - name: metrics-server-config-volume + configMap: + name: metrics-server-config tolerations: - key: "CriticalAddonsOnly" operator: "Exists" From d5b75ec630b721380d94b757e706ff00ef1a1289 Mon Sep 17 00:00:00 2001 From: AnubhaKushwaha Date: Fri, 15 Dec 2017 22:08:34 +0530 Subject: [PATCH 327/794] Pointed to community/contributors/guide/README.md --- CONTRIBUTING.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 470f556adfd..9974dc68577 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,7 @@ # Contributing -Information about contributing to the -[kubernetes code repo](README.md) lives in the -[kubernetes community repo](https://github.com/kubernetes/community) -(it's a big topic). +Welcome to Kubernetes! If you are interested in contributing to the [Kubernetes code repo](README.md) then checkout the [Contributor's Guide](https://git.k8s.io/community/contributors/guide/) +The [Kubernetes community repo](https://github.com/kubernetes/community) contains information on how the community is organized and other information that is pertinent to contributing. [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CONTRIBUTING.md?pixel)]() From 1443b1bd1f7518ca375d3bfd729d9a42e3e23972 Mon Sep 17 00:00:00 2001 From: mtanino Date: Wed, 13 Dec 2017 11:31:38 -0500 Subject: [PATCH 328/794] Update detach logic for block volume if devicePath is empty --- pkg/volume/fc/attacher.go | 13 +- pkg/volume/fc/disk_manager.go | 4 +- pkg/volume/fc/fc.go | 47 +++-- pkg/volume/fc/fc_test.go | 9 + pkg/volume/fc/fc_util.go | 202 +++++++++++++++++---- pkg/volume/fc/fc_util_test.go | 4 + pkg/volume/util/device_util.go | 1 + pkg/volume/util/device_util_linux.go | 21 +++ pkg/volume/util/device_util_linux_test.go | 24 ++- pkg/volume/util/device_util_unsupported.go | 6 + 10 files changed, 266 insertions(+), 65 deletions(-) diff --git a/pkg/volume/fc/attacher.go b/pkg/volume/fc/attacher.go index a0714dab36b..ff034c58e69 100644 --- a/pkg/volume/fc/attacher.go +++ b/pkg/volume/fc/attacher.go @@ -200,13 +200,15 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun volumeMode: volumeMode, readOnly: readOnly, mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), }, nil } return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - readOnly: readOnly, - mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + fcDisk: fcDisk, + fsType: fc.FSType, + readOnly: readOnly, + mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), }, nil } @@ -215,6 +217,7 @@ func volumeSpecToUnmounter(mounter mount.Interface) *fcDiskUnmounter { fcDisk: &fcDisk{ io: &osIOHandler{}, }, - mounter: mounter, + mounter: mounter, + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), } } diff --git a/pkg/volume/fc/disk_manager.go b/pkg/volume/fc/disk_manager.go index efd1881808c..13cf923a923 100644 --- a/pkg/volume/fc/disk_manager.go +++ b/pkg/volume/fc/disk_manager.go @@ -31,7 +31,9 @@ type diskManager interface { // Attaches the disk to the kubelet's host machine. AttachDisk(b fcDiskMounter) (string, error) // Detaches the disk from the kubelet's host machine. - DetachDisk(disk fcDiskUnmounter, devName string) error + DetachDisk(disk fcDiskUnmounter, devicePath string) error + // Detaches the block disk from the kubelet's host machine. + DetachBlockFCDisk(disk fcDiskUnmapper, mntPath, devicePath string) error } // utility to mount a disk based filesystem diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index f6c2841bda5..8772ce91558 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -18,6 +18,7 @@ package fc import ( "fmt" + "os" "strconv" "strings" @@ -147,13 +148,15 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, volumeMode: volumeMode, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), }, nil } return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - readOnly: readOnly, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + fcDisk: fcDisk, + fsType: fc.FSType, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), }, nil } @@ -189,8 +192,9 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t manager: manager, io: &osIOHandler{}, plugin: plugin}, - readOnly: readOnly, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), }, nil } @@ -208,7 +212,8 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m plugin: plugin, io: &osIOHandler{}, }, - mounter: mounter, + mounter: mounter, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), }, nil } @@ -225,6 +230,7 @@ func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, ma plugin: plugin, io: &osIOHandler{}, }, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), }, nil } @@ -328,6 +334,7 @@ type fcDiskMounter struct { fsType string volumeMode v1.PersistentVolumeMode mounter *mount.SafeFormatAndMount + deviceUtil util.DeviceUtil } var _ volume.Mounter = &fcDiskMounter{} @@ -362,7 +369,8 @@ func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error { type fcDiskUnmounter struct { *fcDisk - mounter mount.Interface + mounter mount.Interface + deviceUtil util.DeviceUtil } var _ volume.Unmounter = &fcDiskUnmounter{} @@ -380,8 +388,9 @@ func (c *fcDiskUnmounter) TearDownAt(dir string) error { // Block Volumes Support type fcDiskMapper struct { *fcDisk - readOnly bool - mounter mount.Interface + readOnly bool + mounter mount.Interface + deviceUtil util.DeviceUtil } var _ volume.BlockVolumeMapper = &fcDiskMapper{} @@ -392,18 +401,22 @@ func (b *fcDiskMapper) SetUpDevice() (string, error) { type fcDiskUnmapper struct { *fcDisk + deviceUtil util.DeviceUtil } var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{} -func (c *fcDiskUnmapper) TearDownDevice(_, devicePath string) error { - // Remove scsi device from the node. - if !strings.HasPrefix(devicePath, "/dev/") { - return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath) +func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { + err := c.manager.DetachBlockFCDisk(*c, mapPath, devicePath) + if err != nil { + return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err) } - arr := strings.Split(devicePath, "/") - dev := arr[len(arr)-1] - removeFromScsiSubsystem(dev, c.io) + glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) + err = os.RemoveAll(mapPath) + if err != nil { + return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("fc: successfully detached disk: %s", mapPath) return nil } diff --git a/pkg/volume/fc/fc_test.go b/pkg/volume/fc/fc_test.go index 725f717bd67..42a530bc4a5 100644 --- a/pkg/volume/fc/fc_test.go +++ b/pkg/volume/fc/fc_test.go @@ -120,6 +120,15 @@ func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error return nil } +func (fake *fakeDiskManager) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error { + err := os.RemoveAll(mapPath) + if err != nil { + return err + } + fake.detachCalled = true + return nil +} + func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { diff --git a/pkg/volume/fc/fc_util.go b/pkg/volume/fc/fc_util.go index 83d930220b5..050989d353f 100644 --- a/pkg/volume/fc/fc_util.go +++ b/pkg/volume/fc/fc_util.go @@ -29,6 +29,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) type ioHandler interface { @@ -40,6 +41,11 @@ type ioHandler interface { type osIOHandler struct{} +const ( + byPath = "/dev/disk/by-path/" + byID = "/dev/disk/by-id/" +) + func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } @@ -53,37 +59,17 @@ func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.File return ioutil.WriteFile(filename, data, perm) } -// given a disk path like /dev/sdx, find the devicemapper parent -// TODO #23192 Convert this code to use the generic code in ../util -// which is used by the iSCSI implementation -func findMultipathDeviceMapper(disk string, io ioHandler) string { - sys_path := "/sys/block/" - if dirs, err := io.ReadDir(sys_path); err == nil { - for _, f := range dirs { - name := f.Name() - if strings.HasPrefix(name, "dm-") { - if _, err1 := io.Lstat(sys_path + name + "/slaves/" + disk); err1 == nil { - return "/dev/" + name - } - } - } - } - return "" -} - // given a wwn and lun, find the device and associated devicemapper parent -func findDisk(wwn, lun string, io ioHandler) (string, string) { +func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) { fc_path := "-fc-0x" + wwn + "-lun-" + lun - dev_path := "/dev/disk/by-path/" + dev_path := byPath if dirs, err := io.ReadDir(dev_path); err == nil { for _, f := range dirs { name := f.Name() if strings.Contains(name, fc_path) { if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil { - arr := strings.Split(disk, "/") - l := len(arr) - 1 - dev := arr[l] - dm := findMultipathDeviceMapper(dev, io) + dm := deviceUtil.FindMultipathDeviceForDevice(disk) + glog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } @@ -93,7 +79,7 @@ func findDisk(wwn, lun string, io ioHandler) (string, string) { } // given a wwid, find the device and associated devicemapper parent -func findDiskWWIDs(wwid string, io ioHandler) (string, string) { +func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) { // Example wwid format: // 3600508b400105e210000900000490000 // @@ -104,7 +90,7 @@ func findDiskWWIDs(wwid string, io ioHandler) (string, string) { // underscore when wwid is exposed under /dev/by-id. fc_path := "scsi-" + wwid - dev_id := "/dev/disk/by-id/" + dev_id := byID if dirs, err := io.ReadDir(dev_id); err == nil { for _, f := range dirs { name := f.Name() @@ -114,10 +100,8 @@ func findDiskWWIDs(wwid string, io ioHandler) (string, string) { glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err) return "", "" } - arr := strings.Split(disk, "/") - l := len(arr) - 1 - dev := arr[l] - dm := findMultipathDeviceMapper(dev, io) + dm := deviceUtil.FindMultipathDeviceForDevice(disk) + glog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } @@ -197,9 +181,9 @@ func searchDisk(b fcDiskMounter) (string, error) { for true { for _, diskId := range diskIds { if len(wwns) != 0 { - disk, dm = findDisk(diskId, lun, io) + disk, dm = findDisk(diskId, lun, io, b.deviceUtil) } else { - disk, dm = findDiskWWIDs(diskId, io) + disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil) } // if multipath device is found, break if dm != "" { @@ -265,13 +249,153 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { return devicePath, err } -func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devName string) error { - // Remove scsi device from the node. - if !strings.HasPrefix(devName, "/dev/") { - return fmt.Errorf("fc detach disk: invalid device name: %s", devName) +// DetachDisk removes scsi device file such as /dev/sdX from the node. +func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error { + var devices []string + // devicePath might be like /dev/mapper/mpathX. Find destination. + dstPath, err := c.io.EvalSymlinks(devicePath) + if err != nil { + return err + } + // Find slave + if strings.HasPrefix(dstPath, "/dev/dm-") { + devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dstPath) + } else { + // Add single devicepath to devices + devices = append(devices, dstPath) + } + glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices) + var lastErr error + for _, device := range devices { + err := util.detachFCDisk(c.io, device) + if err != nil { + glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + } + } + if lastErr != nil { + glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + return lastErr } - arr := strings.Split(devName, "/") - dev := arr[len(arr)-1] - removeFromScsiSubsystem(dev, c.io) return nil } + +// detachFCDisk removes scsi device file such as /dev/sdX from the node. +func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error { + // Remove scsi device from the node. + if !strings.HasPrefix(devicePath, "/dev/") { + return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath) + } + arr := strings.Split(devicePath, "/") + dev := arr[len(arr)-1] + removeFromScsiSubsystem(dev, io) + return nil +} + +// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file +// such as /dev/sdX from the node, and then removes loopback for the scsi device. +func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error { + // Check if devicePath is valid + if len(devicePath) != 0 { + if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil { + return pathErr + } + } else { + // TODO: FC plugin can't obtain the devicePath from kubelet becuase devicePath + // in volume object isn't updated when volume is attached to kubelet node. + glog.Infof("fc: devicePath is empty. Try to retreive FC configuration from global map path: %v", mapPath) + } + + // Check if global map path is valid + // global map path examples: + // wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/ + // wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/ + if pathExists, pathErr := checkPathExists(mapPath); !pathExists || pathErr != nil { + return pathErr + } + + // Retreive volume plugin dependent path like '50060e801049cfd1-lun-0' from global map path + arr := strings.Split(mapPath, "/") + if len(arr) < 1 { + return fmt.Errorf("Fail to retreive volume plugin information from global map path: %v", mapPath) + } + volumeInfo := arr[len(arr)-1] + + // Search symbolick link which matches volumeInfo under /dev/disk/by-path or /dev/disk/by-id + // then find destination device path from the link + searchPath := byID + if strings.Contains(volumeInfo, "-lun-") { + searchPath = byPath + } + fis, err := ioutil.ReadDir(searchPath) + if err != nil { + return err + } + for _, fi := range fis { + if strings.Contains(fi.Name(), volumeInfo) { + devicePath = path.Join(searchPath, fi.Name()) + glog.V(5).Infof("fc: updated devicePath: %s", devicePath) + break + } + } + if len(devicePath) == 0 { + return fmt.Errorf("fc: failed to find corresponding device from searchPath: %v", searchPath) + } + dstPath, err := c.io.EvalSymlinks(devicePath) + if err != nil { + return err + } + glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath) + + // Get loopback device which takes fd lock for device beofore detaching a volume from node. + var devices []string + blkUtil := volumeutil.NewBlockVolumePathHandler() + dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath) + if len(dm) != 0 { + dstPath = dm + } + loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath) + if err != nil { + glog.Warningf("fc: failed to get loopback for device: %v, err: %v", dstPath, err) + } else { + glog.V(4).Infof("fc: found loopback: %v", loop) + } + + // Detach volume from kubelet node + if len(dm) != 0 { + // Find all devices which are managed by multipath + devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dm) + } else { + // Add single device path to devices + devices = append(devices, dstPath) + } + var lastErr error + for _, device := range devices { + err = util.detachFCDisk(c.io, device) + if err != nil { + glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + } + } + if lastErr != nil { + glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + return lastErr + } + + // The volume was successfully detached from node. We can safely remove the loopback. + err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + if err != nil { + return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err) + } + return nil +} + +func checkPathExists(path string) (bool, error) { + if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil { + return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path) + return pathExists, nil + } + return true, nil +} diff --git a/pkg/volume/fc/fc_util_test.go b/pkg/volume/fc/fc_util_test.go index 47e1c3b1dbf..50c5308b940 100644 --- a/pkg/volume/fc/fc_util_test.go +++ b/pkg/volume/fc/fc_util_test.go @@ -20,6 +20,8 @@ import ( "os" "testing" "time" + + "k8s.io/kubernetes/pkg/volume/util" ) type fakeFileInfo struct { @@ -91,6 +93,7 @@ func TestSearchDisk(t *testing.T) { lun: "0", io: &fakeIOHandler{}, }, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), } devicePath, error := searchDisk(fakeMounter) // if no disk matches input wwn and lun, exit @@ -105,6 +108,7 @@ func TestSearchDiskWWID(t *testing.T) { wwids: []string{"3600508b400105e210000900000490000"}, io: &fakeIOHandler{}, }, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), } devicePath, error := searchDisk(fakeMounter) // if no disk matches input wwid, exit diff --git a/pkg/volume/util/device_util.go b/pkg/volume/util/device_util.go index 9098d7b8597..9d504bc2e78 100644 --- a/pkg/volume/util/device_util.go +++ b/pkg/volume/util/device_util.go @@ -19,6 +19,7 @@ package util //DeviceUtil is a util for common device methods type DeviceUtil interface { FindMultipathDeviceForDevice(disk string) string + FindSlaveDevicesOnMultipath(disk string) []string } type deviceHandler struct { diff --git a/pkg/volume/util/device_util_linux.go b/pkg/volume/util/device_util_linux.go index 0d9851140f6..297004bf950 100644 --- a/pkg/volume/util/device_util_linux.go +++ b/pkg/volume/util/device_util_linux.go @@ -20,6 +20,7 @@ package util import ( "errors" + "path" "strings" ) @@ -59,3 +60,23 @@ func findDeviceForPath(path string, io IoUtil) (string, error) { } return "", errors.New("Illegal path for device " + devicePath) } + +// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices +// which are managed by the devicemapper dm-1. +func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { + var devices []string + io := handler.get_io + // Split path /dev/dm-1 into "", "dev", "dm-1" + parts := strings.Split(dm, "/") + if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") { + return devices + } + disk := parts[2] + slavesPath := path.Join("/sys/block/", disk, "/slaves/") + if files, err := io.ReadDir(slavesPath); err == nil { + for _, f := range files { + devices = append(devices, path.Join("/dev/", f.Name())) + } + } + return devices +} diff --git a/pkg/volume/util/device_util_linux_test.go b/pkg/volume/util/device_util_linux_test.go index 94ac9b5a47b..6ee7891a808 100644 --- a/pkg/volume/util/device_util_linux_test.go +++ b/pkg/volume/util/device_util_linux_test.go @@ -21,6 +21,7 @@ package util import ( "errors" "os" + "reflect" "testing" "time" ) @@ -29,11 +30,14 @@ type mockOsIOHandler struct{} func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { switch dirname { - case "/sys/block/dm-2/slaves/": - f := &fakeFileInfo{ + case "/sys/block/dm-1/slaves": + f1 := &fakeFileInfo{ name: "sda", } - return []os.FileInfo{f}, nil + f2 := &fakeFileInfo{ + name: "sdb", + } + return []os.FileInfo{f1, f2}, nil case "/sys/block/": f1 := &fakeFileInfo{ name: "sda", @@ -62,8 +66,10 @@ func (handler *mockOsIOHandler) EvalSymlinks(path string) (string, error) { "/returns/a/dev": "/dev/sde", "/returns/non/dev": "/sys/block", "/dev/disk/by-path/127.0.0.1:3260-eui.02004567A425678D-lun-0": "/dev/sda", + "/dev/disk/by-path/127.0.0.3:3260-eui.03004567A425678D-lun-0": "/dev/sdb", "/dev/dm-2": "/dev/dm-2", "/dev/dm-3": "/dev/dm-3", + "/dev/sdc": "/dev/sdc", "/dev/sde": "/dev/sde", } return links[path], nil @@ -140,3 +146,15 @@ func TestFindDeviceForPath(t *testing.T) { } } + +func TestFindSlaveDevicesOnMultipath(t *testing.T) { + mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{}) + devices := mockDeviceUtil.FindSlaveDevicesOnMultipath("/dev/dm-1") + if !reflect.DeepEqual(devices, []string{"/dev/sda", "/dev/sdb"}) { + t.Fatalf("failed to find devices managed by mpio device. /dev/sda, /dev/sdb expected got [%s]", devices) + } + dev := mockDeviceUtil.FindSlaveDevicesOnMultipath("/dev/sdc") + if len(dev) != 0 { + t.Fatalf("mpio device not found '' expected got [%s]", dev) + } +} diff --git a/pkg/volume/util/device_util_unsupported.go b/pkg/volume/util/device_util_unsupported.go index 6afb1f13915..0b41eb3741c 100644 --- a/pkg/volume/util/device_util_unsupported.go +++ b/pkg/volume/util/device_util_unsupported.go @@ -22,3 +22,9 @@ package util func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string { return "" } + +// FindSlaveDevicesOnMultipath unsupported returns "" +func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string { + out := []string{} + return out +} From c8fffea765bede01edd219a9d6795b32ac8f80c9 Mon Sep 17 00:00:00 2001 From: mtanino Date: Fri, 15 Dec 2017 11:56:49 -0500 Subject: [PATCH 329/794] Autogenerated code --- pkg/volume/fc/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/volume/fc/BUILD b/pkg/volume/fc/BUILD index b8d53706fb1..f6e513d34a3 100644 --- a/pkg/volume/fc/BUILD +++ b/pkg/volume/fc/BUILD @@ -43,6 +43,7 @@ go_test( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", From bff54a93340dc61471c6211844ee8d14688bac98 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 15 Dec 2017 19:01:28 +0000 Subject: [PATCH 330/794] Do not use AddCleanup --- test/e2e/apimachinery/aggregator.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 02317e204c0..e91fd69130d 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -53,12 +53,6 @@ var _ = SIGDescribe("Aggregator", func() { var c clientset.Interface var aggrclient *aggregatorclient.Clientset f := framework.NewDefaultFramework("aggregator") - framework.AddCleanupAction(func() { - // Cleanup actions will be called even when the tests are skipped and leaves namespace unset. - if len(ns) > 0 { - cleanTest(c, aggrclient, ns) - } - }) BeforeEach(func() { c = f.ClientSet @@ -66,6 +60,10 @@ var _ = SIGDescribe("Aggregator", func() { aggrclient = f.AggregatorClient }) + AfterEach(func() { + cleanTest(c, aggrclient, ns) + }) + It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() { // Make sure the relevant provider supports Agggregator framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery()) From 57e74717292d22af41fe75312503623c62a00a1b Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Fri, 15 Dec 2017 20:05:21 +0000 Subject: [PATCH 331/794] Fix a typo and improve some documentation. --- pkg/kubeapiserver/options/serving.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubeapiserver/options/serving.go b/pkg/kubeapiserver/options/serving.go index d3d2614921a..79e3f67500b 100644 --- a/pkg/kubeapiserver/options/serving.go +++ b/pkg/kubeapiserver/options/serving.go @@ -85,7 +85,7 @@ func (s InsecureServingOptions) Validate(portArg string) []error { errors := []error{} if s.BindPort < 0 || s.BindPort > 65535 { - errors = append(errors, fmt.Errorf("--insecure-port %v must be between 0 and 65535, inclusive. 0 for turning off secure port.", s.BindPort)) + errors = append(errors, fmt.Errorf("--insecure-port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port.", s.BindPort)) } return errors @@ -103,7 +103,7 @@ func (s *InsecureServingOptions) AddFlags(fs *pflag.FlagSet) { "The port on which to serve unsecured, unauthenticated access. It is assumed "+ "that firewall rules are set up such that this port is not reachable from outside of "+ "the cluster and that port 443 on the cluster's public address is proxied to this "+ - "port. This is performed by nginx in the default setup.") + "port. This is performed by nginx in the default setup. Set to zero to disable") } func (s *InsecureServingOptions) AddDeprecatedFlags(fs *pflag.FlagSet) { From 80c75ef19ea37880329a5dfa8fb578d5a8c4263e Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Fri, 15 Dec 2017 14:15:58 -0800 Subject: [PATCH 332/794] Update CHANGELOG-1.9.md for v1.9.0. --- CHANGELOG-1.9.md | 821 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 807 insertions(+), 14 deletions(-) diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index e84bb2d2f6d..1ee4998990e 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -1,40 +1,130 @@ -- [v1.9.0-beta.2](#v190-beta2) - - [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2) +- [v1.9.0](#v190) + - [Downloads for v1.9.0](#downloads-for-v190) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) + - [1.9 Release Notes](#19-release-notes) + - [WARNING: etcd backup strongly recommended](#warning-etcd-backup-strongly-recommended) + - [Introduction to 1.9.0](#introduction-to-190) + - [Major themes](#major-themes) + - [API Machinery](#api-machinery) + - [Apps](#apps) + - [Auth](#auth) + - [AWS](#aws) + - [Azure](#azure) + - [Cluster Lifecycle](#cluster-lifecycle) + - [Instrumentation](#instrumentation) + - [Network](#network) + - [Node](#node) + - [OpenStack](#openstack) + - [Storage](#storage) + - [Windows](#windows) + - [Before Upgrading](#before-upgrading) + - [**API Machinery**](#api-machinery-1) + - [**Auth**](#auth-1) + - [**CLI**](#cli) + - [**Cluster Lifecycle**](#cluster-lifecycle-1) + - [**Multicluster**](#multicluster) + - [**Node**](#node-1) + - [**Network**](#network-1) + - [**Scheduling**](#scheduling) + - [**Storage**](#storage-1) + - [**OpenStack**](#openstack-1) + - [Known Issues](#known-issues) + - [Deprecations](#deprecations) + - [**API Machinery**](#api-machinery-2) + - [**Auth**](#auth-2) + - [**Cluster Lifecycle**](#cluster-lifecycle-2) + - [**Network**](#network-2) + - [**Storage**](#storage-2) + - [**Scheduling**](#scheduling-1) + - [**Node**](#node-2) + - [Notable Changes](#notable-changes) + - [**Workloads API (apps/v1)**](#workloads-api-appsv1) + - [**API Machinery**](#api-machinery-3) + - [**Admission Control**](#admission-control) + - [**API & API server**](#api-&-api-server) + - [**Audit**](#audit) + - [**Custom Resources**](#custom-resources) + - [**Other**](#other) + - [**Apps**](#apps-1) + - [**Auth**](#auth-3) + - [**Audit**](#audit-1) + - [**RBAC**](#rbac) + - [**Other**](#other-1) + - [**GCE**](#gce) + - [**Autoscaling**](#autoscaling) + - [**AWS**](#aws-1) + - [**Azure**](#azure-1) + - [**CLI**](#cli-1) + - [**Kubectl**](#kubectl) + - [**Cluster Lifecycle**](#cluster-lifecycle-3) + - [**API Server**](#api-server) + - [**Cloud Provider Integration**](#cloud-provider-integration) + - [**Kubeadm**](#kubeadm) + - [**Juju**](#juju) + - [**Other**](#other-2) + - [**GCP**](#gcp) + - [**Instrumentation**](#instrumentation-1) + - [**Audit**](#audit-2) + - [**Other**](#other-3) + - [**Multicluster**](#multicluster-1) + - [**Federation**](#federation) + - [**Network**](#network-3) + - [**IPv6**](#ipv6) + - [**IPVS**](#ipvs) + - [**Kube-Proxy**](#kube-proxy) + - [**CoreDNS**](#coredns) + - [**Other**](#other-4) + - [**Node**](#node-3) + - [**Pod API**](#pod-api) + - [**Hardware Accelerators**](#hardware-accelerators) + - [**Container Runtime**](#container-runtime) + - [**Kubelet**](#kubelet) + - [**Other**](#other-5) + - [**OpenStack**](#openstack-2) + - [**Scheduling**](#scheduling-2) + - [**Hardware Accelerators**](#hardware-accelerators-1) + - [**Other**](#other-6) + - [**Storage**](#storage-3) + - [External Dependencies](#external-dependencies) +- [v1.9.0-beta.2](#v190-beta2) + - [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2) + - [Client Binaries](#client-binaries-1) + - [Server Binaries](#server-binaries-1) + - [Node Binaries](#node-binaries-1) - [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1) - [Other notable changes](#other-notable-changes) - [v1.9.0-beta.1](#v190-beta1) - [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1) - - [Client Binaries](#client-binaries-1) - - [Server Binaries](#server-binaries-1) - - [Node Binaries](#node-binaries-1) + - [Client Binaries](#client-binaries-2) + - [Server Binaries](#server-binaries-2) + - [Node Binaries](#node-binaries-2) - [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3) - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-1) - [v1.9.0-alpha.3](#v190-alpha3) - [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3) - - [Client Binaries](#client-binaries-2) - - [Server Binaries](#server-binaries-2) - - [Node Binaries](#node-binaries-2) + - [Client Binaries](#client-binaries-3) + - [Server Binaries](#server-binaries-3) + - [Node Binaries](#node-binaries-3) - [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2) - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-2) - [v1.9.0-alpha.2](#v190-alpha2) - [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2) - - [Client Binaries](#client-binaries-3) - - [Server Binaries](#server-binaries-3) - - [Node Binaries](#node-binaries-3) + - [Client Binaries](#client-binaries-4) + - [Server Binaries](#server-binaries-4) + - [Node Binaries](#node-binaries-4) - [Changelog since v1.8.0](#changelog-since-v180) - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-3) - [v1.9.0-alpha.1](#v190-alpha1) - [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1) - - [Client Binaries](#client-binaries-4) - - [Server Binaries](#server-binaries-4) - - [Node Binaries](#node-binaries-4) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) - [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3) - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-4) @@ -43,6 +133,709 @@ +# v1.9.0 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) + +## Downloads for v1.9.0 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes.tar.gz) | `d8a52a97382a418b69d46a8b3946bd95c404e03a2d50489d16b36517c9dbc7f4` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-src.tar.gz) | `95d35ad7d274e5ed207674983c3e8ec28d8190c17e635ee922e2af8349fb031b` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-darwin-386.tar.gz) | `2646aa4badf9281b42b921c1e9e2ed235e1305d331423f252a3380396e0c383f` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-darwin-amd64.tar.gz) | `e76e69cf58399c10908afce8bb8d1f12cb8811de7b24e657e5f9fc80e7b9b6fb` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-386.tar.gz) | `bcd5ca428eb78fdaadbcf9ff78d9cbcbf70585a2d2582342a4460e55f3bbad13` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-amd64.tar.gz) | `ba96c8e71dba68b1b3abcad769392fb4df53e402cb65ef25cd176346ee2c39e8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-arm64.tar.gz) | `80ceae744fbbfc7759c3d95999075f98e5d86d80e53ea83d16fa8e849da4073d` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-arm.tar.gz) | `86b271e2518230f3502708cbe8f188a3a68b913c812247b8cc6fbb4c9f35f6c8` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-ppc64le.tar.gz) | `8b7506ab64ceb2ff470120432d7a6a93adf14e14e612b3c53b3c238d334b55e2` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-linux-s390x.tar.gz) | `c066aa75a99c141410f9b9a78d230aff4a14dee472fe2b17729e902739798831` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-windows-386.tar.gz) | `a315535d6a64842a7c2efbf2bb876c0b73db7efd4c848812af07956c2446f526` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-client-windows-amd64.tar.gz) | `5d2ba1f008253da1a784c8bb5266d026fb6fdac5d22133b51e86d348dbaff49b` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-server-linux-amd64.tar.gz) | `a8d7be19e3b662681dc50dc0085ca12045979530a27d0200cf986ada3eff4d32` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-server-linux-arm64.tar.gz) | `8ef6ad23c60a50b4255ff41db044b2f5922e2a4b0332303065d9e66688a0b026` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-server-linux-arm.tar.gz) | `7cb99cf65553c9637ee6f55821ea3f778873a9912917ebbd6203e06d5effb055` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-server-linux-ppc64le.tar.gz) | `529b0f45a0fc688aa624aa2b850f28807ce2be3ac1660189f20cd3ae864ac064` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-server-linux-s390x.tar.gz) | `692f0c198da712f15ff93a4634c67f9105e3ec603240b50b51a84480ed63e987` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-linux-amd64.tar.gz) | `7ff3f526d1c4ec23516a65ecec3b947fd8f52d8c0605473b1a87159399dfeab1` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-linux-arm64.tar.gz) | `fada290471467c341734a3cfff63cd0f867aad95623b67096029d76c459bde06` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-linux-arm.tar.gz) | `ded3640bef5f9701f7f622de4ed162cd2e5a968e80a6a56b843ba84a0b146fac` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-linux-ppc64le.tar.gz) | `a83ebe3b360d33c2190bffd5bf0e2c68268ca2c85e3b5295c1a71ddb517a4f90` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-linux-s390x.tar.gz) | `1210efdf35ec5e0b2e96ff7e456e340684ff12dbea36aa255ac592ca7195e168` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.0/kubernetes-node-windows-amd64.tar.gz) | `9961ad142abc7e769bbe962aeb30a014065fae83291a2d65bc2da91f04fbf185` + +## 1.9 Release Notes + +## WARNING: etcd backup strongly recommended + +Before updating to 1.9, you are strongly recommended to back up your etcd data. Consult the installation procedure you are using (kargo, kops, kube-up, kube-aws, kubeadm etc) for specific advice. + +Some upgrade methods might upgrade etcd from 3.0 to 3.1 automatically when you upgrade from Kubernetes 1.8, unless you specify otherwise. Because [etcd does not support downgrading](https://coreos.com/etcd/docs/latest/upgrades/upgrade_3_1.html), you'll need to either remain on etcd 3.1 or restore from a backup if you want to downgrade back to Kubernetes 1.8. + +## Introduction to 1.9.0 + +Kubernetes version 1.9 includes new features and enhancements, as well as fixes to identified issues. The release notes contain a brief overview of the important changes introduced in this release. The content is organized by Special Interest Group ([SIG](https://github.com/kubernetes/community/blob/master/sig-list.md)). + +For initial installations, see the [Setup topics](https://kubernetes.io/docs/setup/pick-right-solution/) in the Kubernetes documentation. + +To upgrade to this release from a previous version, first take any actions required [Before Upgrading](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md#before-upgrading). + +For more information about this release and for the latest documentation, see the [Kubernetes documentation](https://kubernetes.io/docs/home/). + +## Major themes + +Kubernetes is developed by community members whose work is organized into +[Special Interest Groups](https://github.com/kubernetes/community/blob/master/sig-list.md), which provide the themes that guide their work. For the 1.9 release, these themes included: + +### API Machinery + +Extensibility. SIG API Machinery added a new class of admission control webhooks (mutating), and brought the admission control webhooks to beta. + +### Apps + +The core workloads API, which is composed of the DaemonSet, Deployment, ReplicaSet, and StatefulSet kinds, has been promoted to GA stability in the apps/v1 group version. As such, the apps/v1beta2 group version is deprecated, and all new code should use the kinds in the apps/v1 group version. + +### Auth + +SIG Auth focused on extension-related authorization improvements. Permissions can now be added to the built-in RBAC admin/edit/view roles using [cluster role aggregation](https://kubernetes.io/docs/admin/authorization/rbac/#aggregated-clusterroles). [Webhook authorizers](https://kubernetes.io/docs/admin/authorization/webhook/) can now deny requests and short-circuit checking subsequent authorizers. Performance and usability of the beta [PodSecurityPolicy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) feature was also improved. + +### AWS + +In v1.9 SIG AWS has improved stability of EBS support across the board. If a Volume is “stuck” in the attaching state to a node for too long a unschedulable taint will be applied to the node, so a Kubernetes admin can [take manual steps to correct the error](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). Users are encouraged to ensure they are monitoring for the taint, and should consider automatically terminating instances in this state. + +In addition, support for NVMe disks has been added to Kubernetes, and a service of type LoadBalancer can now be backed with an NLB instead of an ELB (alpha). + +### Azure + +SIG Azure worked on improvements in the cloud provider, including significant work on the Azure Load Balancer implementation. + +### Cluster Lifecycle + +SIG Cluster Lifecycle has been focusing on improving kubeadm in order to bring it to GA in a future release, as well as developing the [Cluster API](https://github.com/kubernetes/kube-deploy/tree/master/cluster-api). For kubeadm, most new features, such as support for CoreDNS, IPv6 and Dynamic Kubelet Configuration, have gone in as alpha features. We expect to graduate these features to beta and beyond in the next release. The initial Cluster API spec and GCE sample implementation were developed from scratch during this cycle, and we look forward to stabilizing them into something production-grade during 2018. + +### Instrumentation + +In v1.9 we focused on improving stability of the components owned by the SIG, including Heapster, Custom Metrics API adapters for Prometheus, and Stackdriver. + +### Network + +In v1.9 SIG Network has implemented alpha support for IPv6, and alpha support for CoreDNS as a drop-in replacement for kube-dns. Additionally, SIG Network has begun the deprecation process for the extensions/v1beta1 NetworkPolicy API in favor of the networking.k8s.io/v1 equivalent. + +### Node + +SIG Node iterated on the ability to support more workloads with better performance and improved reliability. Alpha features were improved around hardware accelerator support, device plugins enablement, and cpu pinning policies to enable us to graduate these features to beta in a future release. In addition, a number of reliability and performance enhancements were made across the node to help operators in production. + +### OpenStack + +In this cycle, SIG OpenStack focused on configuration simplification through smarter defaults and the use of auto-detection wherever feasible (Block Storage API versions, Security Groups) as well as updating API support, including: + +* Block Storage (Cinder) V3 is now supported. +* Load Balancer (Octavia) V2 is now supported, in addition to Neutron LBaaS V2. +* Neutron LBaas V1 support has been removed. + +This work enables Kubernetes to take full advantage of the relevant services as exposed by OpenStack clouds. Refer to the [Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#openstack) documentation for more information. + +### Storage + +[SIG Storage](https://github.com/kubernetes/community/tree/master/sig-storage) is responsible for storage and volume plugin components. + +For the 1.9 release, SIG Storage made Kubernetes more pluggable and modular by introducing an alpha implementation of the Container Storage Interface (CSI). CSI will make installing new volume plugins as easy as deploying a pod, and enable third-party storage providers to develop their plugins without the need to add code to the core Kubernetes codebase. + +The SIG also focused on adding functionality to the Kubernetes volume subsystem, such as alpha support for exposing volumes as block devices inside containers, extending the alpha volume-resizing support to more volume plugins, and topology-aware volume scheduling. + +### Windows + +We are advancing support for Windows Server and Windows Server Containers to beta along with continued feature and functional advancements on both the Kubernetes and Windows platforms. This opens the door for many Windows-specific applications and workloads to run on Kubernetes, significantly expanding the implementation scenarios and the enterprise reach of Kubernetes. + +## Before Upgrading + +Consider the following changes, limitations, and guidelines before you upgrade: + +### **API Machinery** + +* The admission API, which is used when the API server calls admission control webhooks, is moved from `admission.v1alpha1` to `admission.v1beta1`. You must **delete any existing webhooks before you upgrade** your cluster, and update them to use the latest API. This change is not backward compatible. +* The admission webhook configurations API, part of the admissionregistration API, is now at v1beta1. Delete any existing webhook configurations before you upgrade, and update your configuration files to use the latest API. For this and the previous change, see also [the documentation]([https://kubernetes.io/docs/admin/extensible-admission-controllers/#external-admission-webhooks](https://kubernetes.io/docs/admin/extensible-admission-controllers/#external-admission-webhooks)). +* A new `ValidatingAdmissionWebhook` is added (replacing `GenericAdmissionWebhook`) and is available in the generic API server. You must update your API server configuration file to pass the webhook to the `--admission-control` flag. ([#55988](https://github.com/kubernetes/kubernetes/pull/55988),[ @caesarxuchao](https://github.com/caesarxuchao)) ([#54513](https://github.com/kubernetes/kubernetes/pull/54513),[ @deads2k](https://github.com/deads2k)) +* The deprecated options `--portal-net` and `--service-node-ports` for the API server are removed. ([#52547](https://github.com/kubernetes/kubernetes/pull/52547),[ @xiangpengzhao](https://github.com/xiangpengzhao)) + +### **Auth** + +* PodSecurityPolicy: A compatibility issue with the allowPrivilegeEscalation field that caused policies to start denying pods they previously allowed was fixed. If you defined PodSecurityPolicy objects using a 1.8.0 client or server and set allowPrivilegeEscalation to false, these objects must be reapplied after you upgrade. ([#53443](https://github.com/kubernetes/kubernetes/pull/53443),[ @liggitt](https://github.com/liggitt)) +* KMS: Alpha integration with GCP KMS was removed in favor of a future out-of-process extension point. Discontinue use of the GCP KMS integration and ensure [data has been decrypted](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#decrypting-all-data) (or reencrypted with a different provider) before upgrading ([#54759](https://github.com/kubernetes/kubernetes/pull/54759),[ @sakshamsharma](https://github.com/sakshamsharma)) + +### **CLI** + +* Swagger 1.2 validation is removed for kubectl. The options `--use-openapi` and `--schema-cache-dir` are also removed because they are no longer needed. ([#53232](https://github.com/kubernetes/kubernetes/pull/53232),[ @apelisse](https://github.com/apelisse)) + +### **Cluster Lifecycle** + +* You must either specify the `--discovery-token-ca-cert-hash` flag to `kubeadm join`, or opt out of the CA pinning feature using `--discovery-token-unsafe-skip-ca-verification`. +* The default `auto-detect` behavior of the kubelet's `--cloud-provider` flag is removed. + * You can manually set `--cloud-provider=auto-detect`, but be aware that this behavior will be removed completely in a future version. + * Best practice for version 1.9 and future versions is to explicitly set a cloud-provider. See [the documentation](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-providers) +* The kubeadm `--skip-preflight-checks` flag is now deprecated and will be removed in a future release. +* If you are using the cloud provider API to determine the external host address of the apiserver, set `--external-hostname` explicitly instead. The cloud provider detection has been deprecated and will be removed in the future ([#54516](https://github.com/kubernetes/kubernetes/pull/54516),[ @dims](https://github.com/dims)) + +### **Multicluster** + +* Development of Kubernetes Federation has moved to [github.com/kubernetes/federation](github.com/kubernetes/federation). This move out of tree also means that Federation will begin releasing separately from Kubernetes. Impact: + * Federation-specific behavior will no longer be included in kubectl + * kubefed will no longer be released as part of Kubernetes + * The Federation servers will no longer be included in the hyperkube binary and image. ([#53816](https://github.com/kubernetes/kubernetes/pull/53816),[ @marun](https://github.com/marun)) + +### **Node** + +* The kubelet `--network-plugin-dir` flag is removed. This flag was deprecated in version 1.7, and is replaced with `--cni-bin-dir`. ([#53564](https://github.com/kubernetes/kubernetes/pull/53564),[ @supereagle](https://github.com/supereagle)) +* kubelet's `--cloud-provider` flag no longer defaults to "auto-detect". If you want cloud-provider support in kubelet, you must set a specific cloud-provider explicitly. ([#53573](https://github.com/kubernetes/kubernetes/pull/53573),[ @dims](https://github.com/dims)) + +### **Network** + +* NetworkPolicy objects are now stored in etcd in v1 format. After you upgrade to version 1.9, make sure that all NetworkPolicy objects are migrated to v1. ([#51955](https://github.com/kubernetes/kubernetes/pull/51955), [@danwinship](https://github.com/danwinship)) +* The API group/version for the kube-proxy configuration has changed from `componentconfig/v1alpha1` to `kubeproxy.config.k8s.io/v1alpha1`. If you are using a config file for kube-proxy instead of the command line flags, you must change its apiVersion to `kubeproxy.config.k8s.io/v1alpha1`. ([#53645](https://github.com/kubernetes/kubernetes/pull/53645), [@xiangpengzhao](https://github.com/xiangpengzhao)) +* The "ServiceNodeExclusion" feature gate must now be enabled for the `alpha.service-controller.kubernetes.io/exclude-balancer` annotation on nodes to be honored. ([#54644](https://github.com/kubernetes/kubernetes/pull/54644),[ @brendandburns](https://github.com/brendandburns)) + +### **Scheduling** + +* Taint key `unreachable` is now in GA. +* Taint key `notReady` is changed to `not-ready`, and is also now in GA. +* These changes are automatically updated for taints. Tolerations for these taints must be updated manually. Specifically, you must: + * Change `node.alpha.kubernetes.io/notReady` to `node.kubernetes.io/not-ready` + * Change `node.alpha.kubernetes.io/unreachable` to `node.kubernetes.io/unreachable` +* The `node.kubernetes.io/memory-pressure` taint now respects the configured whitelist. To use it, you must add it to the whitelist.([#55251](https://github.com/kubernetes/kubernetes/pull/55251),[ @deads2k](https://github.com/deads2k)) +* Refactor kube-scheduler configuration ([#52428](https://github.com/kubernetes/kubernetes/pull/52428)) + * The kube-scheduler command now supports a --config flag which is the location of a file containing a serialized scheduler configuration. Most other kube-scheduler flags are now deprecated. ([#52562](https://github.com/kubernetes/kubernetes/pull/52562),[ @ironcladlou](https://github.com/ironcladlou)) +* Opaque integer resources (OIR), which were (deprecated in v1.8.), have been removed. ([#55103](https://github.com/kubernetes/kubernetes/pull/55103),[ @ConnorDoyle](https://github.com/ConnorDoyle)) + +### **Storage** + +* [alpha] The LocalPersistentVolumes alpha feature now also requires the VolumeScheduling alpha feature. This is a breaking change, and the following changes are required: + * The VolumeScheduling feature gate must also be enabled on kube-scheduler and kube-controller-manager components. + * The NoVolumeNodeConflict predicate has been removed. For non-default schedulers, update your scheduler policy. + * The CheckVolumeBinding predicate must be enabled in non-default schedulers. ([#55039](https://github.com/kubernetes/kubernetes/pull/55039),[ @msau42](https://github.com/msau42)) + +### **OpenStack** + +* Remove the LbaasV1 of OpenStack cloud provider, currently only support LbaasV2. ([#52717](https://github.com/kubernetes/kubernetes/pull/52717),[ @FengyunPan](https://github.com/FengyunPan)) + +## Known Issues + +This section contains a list of known issues reported in Kubernetes 1.9 release. The content is populated from the [v1.9.x known issues and FAQ accumulator](https://github.com/kubernetes/kubernetes/issues/57159](https://github.com/kubernetes/kubernetes/issues/57159). + +* If you are adding Windows Server Virtual Machines as nodes to your Kubernetes environment, there is a compatibility issue with certain virtualization products. Specifically the Windows version of the kubelet.exe calls `GetPhysicallyInstalledSystemMemory` to get the physical memory installed on Windows machines and reports it as part of node metrics to heapster. This API call fails for VMware and VirtualBox virtualization environments. This issue is not present in bare metal Windows deployments, in Hyper-V, or on some of the popular public cloud providers. + +* If you run `kubectl get po` while the API server in unreachable, a misleading error is returned: `the server doesn't have a resource type "po"`. To work around this issue, specify the full resource name in the command instead of the abbreviation: `kubectl get pods`. This issue will be fixed in a future release. + + For more information, see [#57198](https://github.com/kubernetes/kubernetes/issues/57198). + +* Mutating and validating webhook configurations are continuously polled by the API server (once per second). This issue will be fixed in a future release. + + For more information, see [#56357](https://github.com/kubernetes/kubernetes/issues/56357). + +* Audit logging is slow because writes to the log are performed synchronously with requests to the log. This issue will be fixed in a future release. + + For more information, see [#53006](https://github.com/kubernetes/kubernetes/issues/53006). + +* Custom Resource Definitions (CRDs) are not properly deleted under certain conditions. This issue will be fixed in a future release. + + For more information, see [#56348](https://github.com/kubernetes/kubernetes/issues/56348). + +* API server times out after performing a rolling update of the etcd cluster. This issue will be fixed in a future release. + + For more information, see [#47131](https://github.com/kubernetes/kubernetes/issues/47131) + +* If a namespaced resource is owned by a cluster scoped resource, and the namespaced dependent is processed before the cluster scoped owner has ever been observed by the garbage collector, the dependent will be erroneously deleted. + + For more information, see [#54940](https://github.com/kubernetes/kubernetes/issues/54940) + +## Deprecations + +This section provides an overview of deprecated API versions, options, flags, and arguments. Deprecated means that we intend to remove the capability from a future release. After removal, the capability will no longer work. The sections are organized by SIGs. + +### **API Machinery** + +* The kube-apiserver `--etcd-quorum-read` flag is deprecated and the ability to switch off quorum read will be removed in a future release. ([#53795](https://github.com/kubernetes/kubernetes/pull/53795),[ @xiangpengzhao](https://github.com/xiangpengzhao)) +* The `/ui` redirect in kube-apiserver is deprecated and will be removed in Kubernetes 1.10. ([#53046](https://github.com/kubernetes/kubernetes/pull/53046), [@maciaszczykm](https://github.com/maciaszczykm)) +* `etcd2` as a backend is deprecated and support will be removed in Kubernetes 1.13 or 1.14. + +### **Auth** + +* Default controller-manager options for `--cluster-signing-cert-file` and `--cluster-signing-key-file` are deprecated and will be removed in a future release. ([#54495](https://github.com/kubernetes/kubernetes/pull/54495),[ @mikedanese](https://github.com/mikedanese)) +* RBAC objects are now stored in etcd in v1 format. After upgrading to 1.9, ensure all RBAC objects (Roles, RoleBindings, ClusterRoles, ClusterRoleBindings) are at v1. v1alpha1 support is deprecated and will be removed in a future release. ([#52950](https://github.com/kubernetes/kubernetes/pull/52950),[ @liggitt](https://github.com/liggitt)) + +### **Cluster Lifecycle** + +* kube-apiserver: `--ssh-user` and `--ssh-keyfile` are now deprecated and will be removed in a future release. Users of SSH tunnel functionality in Google Container Engine for the Master -> Cluster communication should plan alternate methods for bridging master and node networks. ([#54433](https://github.com/kubernetes/kubernetes/pull/54433),[ @dims](https://github.com/dims)) +* The kubeadm `--skip-preflight-checks` flag is now deprecated and will be removed in a future release. +* If you are using the cloud provider API to determine the external host address of the apiserver, set `--external-hostname` explicitly instead. The cloud provider detection has been deprecated and will be removed in the future ([#54516](https://github.com/kubernetes/kubernetes/pull/54516),[ @dims](https://github.com/dims)) + +### **Network** + +* The NetworkPolicy extensions/v1beta1 API is now deprecated and will be removed in a future release. This functionality has been migrated to a dedicated v1 API - networking.k8s.io/v1. v1beta1 Network Policies can be upgraded to the v1 API with the [cluster/update-storage-objects.sh script](https://github.com/danwinship/kubernetes/blob/master/cluster/update-storage-objects.sh). Documentation can be found [here](https://kubernetes.io/docs/concepts/services-networking/network-policies/). ([#56425](https://github.com/kubernetes/kubernetes/pull/56425), [@cmluciano](https://github.com/cmluciano)) + +### **Storage** + +* The `volume.beta.kubernetes.io/storage-class` annotation is deprecated. It will be removed in a future release. For the StorageClass API object, use v1, and in place of the annotation use `v1.PersistentVolumeClaim.Spec.StorageClassName` and `v1.PersistentVolume.Spec.StorageClassName` instead. ([#53580](https://github.com/kubernetes/kubernetes/pull/53580),[ @xiangpengzhao](https://github.com/xiangpengzhao)) + +### **Scheduling** + +* The kube-scheduler command now supports a `--config` flag, which is the location of a file containing a serialized scheduler configuration. Most other kube-scheduler flags are now deprecated. ([#52562](https://github.com/kubernetes/kubernetes/pull/52562),[ @ironcladlou](https://github.com/ironcladlou)) + +### **Node** + +* The kubelet's `--enable-custom-metrics` flag is now deprecated. ([#54154](https://github.com/kubernetes/kubernetes/pull/54154),[ @mtaufen](https://github.com/mtaufen)) + +## Notable Changes + +### **Workloads API (apps/v1)** + +As announced with the release of version 1.8, the Kubernetes Workloads API is at v1 in version 1.9. This API consists of the DaemonSet, Deployment, ReplicaSet and StatefulSet kinds. + +### **API Machinery** + +#### **Admission Control** + +* Admission webhooks are now in beta, and include the following: + * Mutation support for admission webhooks. ([#54892](https://github.com/kubernetes/kubernetes/pull/54892),[ @caesarxuchao](https://github.com/caesarxuchao)) + * Webhook admission now takes a config file that describes how to authenticate to webhook servers ([#54414](https://github.com/kubernetes/kubernetes/pull/54414),[ @deads2k](https://github.com/deads2k)) + * The dynamic admission webhook now supports a URL in addition to a service reference, to accommodate out-of-cluster webhooks. ([#54889](https://github.com/kubernetes/kubernetes/pull/54889),[ @lavalamp](https://github.com/lavalamp)) + * Added `namespaceSelector` to `externalAdmissionWebhook` configuration to allow applying webhooks only to objects in the namespaces that have matching labels. ([#54727](https://github.com/kubernetes/kubernetes/pull/54727),[ @caesarxuchao](https://github.com/caesarxuchao)) +* Metrics are added for monitoring admission plugins, including the new dynamic (webhook-based) ones. ([#55183](https://github.com/kubernetes/kubernetes/pull/55183),[ @jpbetz](https://github.com/jpbetz)) +* The PodSecurityPolicy annotation kubernetes.io/psp on pods is set only once on create. ([#55486](https://github.com/kubernetes/kubernetes/pull/55486),[ @sttts](https://github.com/sttts)) + +#### **API & API server** + +* Fixed a bug related to discovery information for scale subresources in the apps API group ([#54683](https://github.com/kubernetes/kubernetes/pull/54683),[ @liggitt](https://github.com/liggitt)) +* Fixed a bug that prevented client-go metrics from being registered in Prometheus. This bug affected multiple components. ([#53434](https://github.com/kubernetes/kubernetes/pull/53434),[ @crassirostris](https://github.com/crassirostris)) + +#### **Audit** + +* Fixed a bug so that `kube-apiserver` now waits for open connections to finish before exiting. This fix provides graceful shutdown and ensures that the audit backend no longer drops events on shutdown. ([#53695](https://github.com/kubernetes/kubernetes/pull/53695),[ @hzxuzhonghu](https://github.com/hzxuzhonghu)) +* Webhooks now always retry sending if a connection reset error is returned. ([#53947](https://github.com/kubernetes/kubernetes/pull/53947),[ @crassirostris](https://github.com/crassirostris)) + +#### **Custom Resources** + +* Validation of resources defined by a Custom Resource Definition (CRD) is now in beta ([#54647](https://github.com/kubernetes/kubernetes/pull/54647),[ @colemickens](https://github.com/colemickens)) +* An example CRD controller has been added, at [github.com/kubernetes/sample-controller](github.com/kubernetes/sample-controller). ([#52753](https://github.com/kubernetes/kubernetes/pull/52753),[ @munnerz](https://github.com/munnerz)) +* Custom resources served by CustomResourceDefinition objects now support field selectors for `metadata.name` and `metadata.namespace`. Also fixed an issue with watching a single object; earlier versions could watch only a collection, and so a watch on an instance would fail. ([#53345](https://github.com/kubernetes/kubernetes/pull/53345),[ @ncdc](https://github.com/ncdc)) + +#### **Other** + +* `kube-apiserver` now runs with the default value for `service-cluster-ip-range` ([#52870](https://github.com/kubernetes/kubernetes/pull/52870),[ @jennybuckley](https://github.com/jennybuckley)) +* Add `--etcd-compaction-interval` to apiserver for controlling request of compaction to etcd3 from apiserver. ([#51765](https://github.com/kubernetes/kubernetes/pull/51765),[ @mitake](https://github.com/mitake)) +* The httpstream/spdy calls now support CIDR notation for NO_PROXY ([#54413](https://github.com/kubernetes/kubernetes/pull/54413),[ @kad](https://github.com/kad)) +* Code generation for CRD and User API server types is improved with the addition of two new scripts to k8s.io/code-generator: `generate-groups.sh` and `generate-internal-groups.sh`. ([#52186](https://github.com/kubernetes/kubernetes/pull/52186),[ @sttts](https://github.com/sttts)) +* [beta] Flag `--chunk-size={SIZE}` is added to `kubectl get` to customize the number of results returned in large lists of resources. This reduces the perceived latency of managing large clusters because the server returns the first set of results to the client much more quickly. Pass 0 to disable this feature.([#53768](https://github.com/kubernetes/kubernetes/pull/53768),[ @smarterclayton](https://github.com/smarterclayton)) +* [beta] API chunking via the limit and continue request parameters is promoted to beta in this release. Client libraries using the Informer or ListWatch types will automatically opt in to chunking. ([#52949](https://github.com/kubernetes/kubernetes/pull/52949),[ @smarterclayton](https://github.com/smarterclayton)) +* The `--etcd-quorum-read` flag now defaults to true to ensure correct operation with HA etcd clusters. This flag is deprecated and the flag will be removed in future versions, as well as the ability to turn off this functionality. ([#53717](https://github.com/kubernetes/kubernetes/pull/53717),[ @liggitt](https://github.com/liggitt)) +* Add events.k8s.io api group with v1beta1 API containing redesigned event type. ([#49112](https://github.com/kubernetes/kubernetes/pull/49112),[ @gmarek](https://github.com/gmarek)) +* Fixed a bug where API discovery failures were crashing the kube controller manager via the garbage collector. ([#55259](https://github.com/kubernetes/kubernetes/pull/55259),[ @ironcladlou](https://github.com/ironcladlou)) +* `conversion-gen` is now usable in a context without a vendored k8s.io/kubernetes. The Kubernetes core API is removed from `default extra-peer-dirs`. ([#54394](https://github.com/kubernetes/kubernetes/pull/54394),[ @sttts](https://github.com/sttts)) +* Fixed a bug where the `client-gen` tag for code-generator required a newline between a comment block and a statement. tag shortcomings when newline is omitted ([#53893](https://github.com/kubernetes/kubernetes/pull/53893)) ([#55233](https://github.com/kubernetes/kubernetes/pull/55233),[ @sttts](https://github.com/sttts)) +* The Apiserver proxy now rewrites the URL when a service returns an absolute path with the request's host. ([#52556](https://github.com/kubernetes/kubernetes/pull/52556),[ @roycaihw](https://github.com/roycaihw)) +* The gRPC library is updated to pick up data race fix ([#53124](https://github.com/kubernetes/kubernetes/pull/53124)) ([#53128](https://github.com/kubernetes/kubernetes/pull/53128),[ @dixudx](https://github.com/dixudx)) +* Fixed server name verification of aggregated API servers and webhook admission endpoints ([#56415](https://github.com/kubernetes/kubernetes/pull/56415),[ @liggitt](https://github.com/liggitt)) + +### **Apps** + +* The `kubernetes.io/created-by` annotation is no longer added to controller-created objects. Use the `metadata.ownerReferences` item with controller set to `true` to determine which controller, if any, owns an object. ([#54445](https://github.com/kubernetes/kubernetes/pull/54445),[ @crimsonfaith91](https://github.com/crimsonfaith91)) +* StatefulSet controller now creates a label for each Pod in a StatefulSet. The label is `statefulset.kubernetes.io/pod-name`, where `pod-name` = the name of the Pod. This allows users to create a Service per Pod to expose a connection to individual Pods. ([#55329](https://github.com/kubernetes/kubernetes/pull/55329),[ @kow3ns](https://github.com/kow3ns)) +* DaemonSet status includes a new field named `conditions`, making it consistent with other workloads controllers. ([#55272](https://github.com/kubernetes/kubernetes/pull/55272),[ @janetkuo](https://github.com/janetkuo)) +* StatefulSet status now supports conditions, making it consistent with other core controllers in v1 ([#55268](https://github.com/kubernetes/kubernetes/pull/55268),[ @foxish](https://github.com/foxish)) +* The default garbage collection policy for Deployment, DaemonSet, StatefulSet, and ReplicaSet has changed from OrphanDependents to DeleteDependents when the deletion is requested through an `apps/v1` endpoint. ([#55148](https://github.com/kubernetes/kubernetes/pull/55148),[ @dixudx](https://github.com/dixudx)) + * Clients using older endpoints will be unaffected. This change is only at the REST API level and is independent of the default behavior of particular clients (e.g. this does not affect the default for the kubectl `--cascade` flag). + * If you upgrade your client-go libs and use the `AppsV1()` interface, please note that the default garbage collection behavior is changed. + +### **Auth** + +#### **Audit** + +* RequestReceivedTimestamp and StageTimestamp are added to audit events ([#52981](https://github.com/kubernetes/kubernetes/pull/52981),[ @CaoShuFeng](https://github.com/CaoShuFeng)) +* Advanced audit policy now supports a policy wide omitStage ([#54634](https://github.com/kubernetes/kubernetes/pull/54634),[ @CaoShuFeng](https://github.com/CaoShuFeng)) + +#### **RBAC** + +* New permissions have been added to default RBAC roles ([#52654](https://github.com/kubernetes/kubernetes/pull/52654),[ @liggitt](https://github.com/liggitt)): + * The default admin and edit roles now include read/write permissions + * The view role includes read permissions on poddisruptionbudget.policy resources. +* RBAC rules can now match the same subresource on any resource using the form `*/(subresource)`. For example, `*/scale` matches requests to `replicationcontroller/scale`. ([#53722](https://github.com/kubernetes/kubernetes/pull/53722),[ @deads2k](https://github.com/deads2k)) +* The RBAC bootstrapping policy now allows authenticated users to create selfsubjectrulesreviews. ([#56095](https://github.com/kubernetes/kubernetes/pull/56095),[ @ericchiang](https://github.com/ericchiang)) +* RBAC ClusterRoles can now select other roles to aggregate. ([#54005](https://github.com/kubernetes/kubernetes/pull/54005),[ @deads2k](https://github.com/deads2k)) +* Fixed an issue with RBAC reconciliation that caused duplicated subjects in some bootstrapped RoleBinding objects on each restart of the API server. ([#53239](https://github.com/kubernetes/kubernetes/pull/53239),[ @enj](https://github.com/enj)) + +#### **Other** + +* Pod Security Policy can now manage access to specific FlexVolume drivers ([#53179](https://github.com/kubernetes/kubernetes/pull/53179),[ @wanghaoran1988](https://github.com/wanghaoran1988)) +* Audit policy files without apiVersion and kind are treated as invalid. ([#54267](https://github.com/kubernetes/kubernetes/pull/54267),[ @ericchiang](https://github.com/ericchiang)) +* Fixed a bug that where forbidden errors were encountered when accessing ReplicaSet and DaemonSets objects via the apps API group. ([#54309](https://github.com/kubernetes/kubernetes/pull/54309),[ @liggitt](https://github.com/liggitt)) +* Improved PodSecurityPolicy admission latency. ([#55643](https://github.com/kubernetes/kubernetes/pull/55643),[ @tallclair](https://github.com/tallclair)) +* kube-apiserver: `--oidc-username-prefix` and `--oidc-group-prefix` flags are now correctly enabled. ([#56175](https://github.com/kubernetes/kubernetes/pull/56175),[ @ericchiang](https://github.com/ericchiang)) +* If multiple PodSecurityPolicy objects allow a submitted pod, priority is given to policies that do not require default values for any fields in the pod spec. If default values are required, the first policy ordered by name that allows the pod is used. ([#52849](https://github.com/kubernetes/kubernetes/pull/52849),[ @liggitt](https://github.com/liggitt)) +* A new controller automatically cleans up Certificate Signing Requests that are Approved and Issued, or Denied. ([#51840](https://github.com/kubernetes/kubernetes/pull/51840),[ @jcbsmpsn](https://github.com/jcbsmpsn)) +* PodSecurityPolicies have been added for all in-tree cluster addons ([#55509](https://github.com/kubernetes/kubernetes/pull/55509),[ @tallclair](https://github.com/tallclair)) + +#### **GCE** + +* Added support for PodSecurityPolicy on GCE: `ENABLE_POD_SECURITY_POLICY=true` enables the admission controller, and installs policies for default addons. ([#52367](https://github.com/kubernetes/kubernetes/pull/52367),[ @tallclair](https://github.com/tallclair)) + +### **Autoscaling** + +* HorizontalPodAutoscaler objects now properly functions on scalable resources in any API group. Fixed by adding a polymorphic scale client. ([#53743](https://github.com/kubernetes/kubernetes/pull/53743),[ @DirectXMan12](https://github.com/DirectXMan12)) +* Fixed a set of minor issues with Cluster Autoscaler 1.0.1 ([#54298](https://github.com/kubernetes/kubernetes/pull/54298),[ @mwielgus](https://github.com/mwielgus)) +* HPA tolerance is now configurable by setting the `horizontal-pod-autoscaler-tolerance` flag. ([#52275](https://github.com/kubernetes/kubernetes/pull/52275),[ @mattjmcnaughton](https://github.com/mattjmcnaughton)) +* Fixed a bug that allowed the horizontal pod autoscaler to allocate more `desiredReplica` objects than `maxReplica` objects in certain instances. ([#53690](https://github.com/kubernetes/kubernetes/pull/53690),[ @mattjmcnaughton](https://github.com/mattjmcnaughton)) + +### **AWS** + +* Nodes can now use instance types (such as C5) that use NVMe. ([#56607](https://github.com/kubernetes/kubernetes/pull/56607), [@justinsb](https://github.com/justinsb)) +* Nodes are now unreachable if volumes are stuck in the attaching state. Implemented by applying a taint to the node. ([#55558](https://github.com/kubernetes/kubernetes/pull/55558),[ @gnufied](https://github.com/gnufied)) +* Volumes are now checked for available state before attempting to attach or delete a volume in EBS. ([#55008](https://github.com/kubernetes/kubernetes/pull/55008),[ @gnufied](https://github.com/gnufied)) +* Fixed a bug where error log messages were breaking into two lines. ([#49826](https://github.com/kubernetes/kubernetes/pull/49826),[ @dixudx](https://github.com/dixudx)) +* Fixed a bug so that volumes are now detached from stopped nodes. ([#55893](https://github.com/kubernetes/kubernetes/pull/55893),[ @gnufied](https://github.com/gnufied)) +* You can now override the health check parameters for AWS ELBs by specifying annotations on the corresponding service. The new annotations are: `healthy-threshold`, `unhealthy-threshold`, `timeout`, `interval`. The prefix for all annotations is `service.beta.kubernetes.io/aws-load-balancer-healthcheck-`. ([#56024](https://github.com/kubernetes/kubernetes/pull/56024),[ @dimpavloff](https://github.com/dimpavloff)) +* Fixed a bug so that AWS ECR credentials are now supported in the China region. ([#50108](https://github.com/kubernetes/kubernetes/pull/50108),[ @zzq889](https://github.com/zzq889)) +* Added Amazon NLB support ([#53400](https://github.com/kubernetes/kubernetes/pull/53400),[ @micahhausler](https://github.com/micahhausler)) +* Additional annotations are now properly set or updated for AWS load balancers ([#55731](https://github.com/kubernetes/kubernetes/pull/55731),[ @georgebuckerfield](https://github.com/georgebuckerfield)) +* AWS SDK is updated to version 1.12.7 ([#53561](https://github.com/kubernetes/kubernetes/pull/53561),[ @justinsb](https://github.com/justinsb)) + +### **Azure** + +* Fixed several issues with properly provisioning Azure disk storage ([#55927](https://github.com/kubernetes/kubernetes/pull/55927),[ @andyzhangx](https://github.com/andyzhangx)) +* A new service annotation `service.beta.kubernetes.io/azure-dns-label-name` now sets the Azure DNS label for a public IP address. ([#47849](https://github.com/kubernetes/kubernetes/pull/47849),[ @tomerf](https://github.com/tomerf)) +* Support for GetMountRefs function added; warning messages no longer displayed. ([#54670](https://github.com/kubernetes/kubernetes/pull/54670), [#52401](https://github.com/kubernetes/kubernetes/pull/52401),[ @andyzhangx](https://github.com/andyzhangx)) +* Fixed an issue where an Azure PersistentVolume object would crash because the value of `volumeSource.ReadOnly` was set to nil. ([#54607](https://github.com/kubernetes/kubernetes/pull/54607),[ @andyzhangx](https://github.com/andyzhangx)) +* Fixed an issue with Azure disk mount failures on CoreOS and some other distros ([#54334](https://github.com/kubernetes/kubernetes/pull/54334),[ @andyzhangx](https://github.com/andyzhangx)) +* GRS, RAGRS storage account types are now supported for Azure disks. ([#55931](https://github.com/kubernetes/kubernetes/pull/55931),[ @andyzhangx](https://github.com/andyzhangx)) +* Azure NSG rules are now restricted so that external access is allowed only to the load balancer IP. ([#54177](https://github.com/kubernetes/kubernetes/pull/54177),[ @itowlson](https://github.com/itowlson)) +* Azure NSG rules can be consolidated to reduce the likelihood of hitting Azure resource limits (available only in regions where the Augmented Security Groups preview is available). ([#55740](https://github.com/kubernetes/kubernetes/pull/55740), [@itowlson](https://github.com/itowlson)) +* The Azure SDK is upgraded to v11.1.1. ([#54971](https://github.com/kubernetes/kubernetes/pull/54971),[ @itowlson](https://github.com/itowlson)) +* You can now create Windows mount paths ([#51240](https://github.com/kubernetes/kubernetes/pull/51240),[ @andyzhangx](https://github.com/andyzhangx)) +* Fixed a controller manager crash issue on a manually created k8s cluster. ([#53694](https://github.com/kubernetes/kubernetes/pull/53694),[ @andyzhangx](https://github.com/andyzhangx)) +* Azure-based clusters now support unlimited mount points. ([#54668](https://github.com/kubernetes/kubernetes/pull/54668)) ([#53629](https://github.com/kubernetes/kubernetes/pull/53629),[ @andyzhangx](https://github.com/andyzhangx)) +* Load balancer reconciliation now considers NSG rules based not only on Name, but also on Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, DestinationAddressPrefix, Access, and Direction. This change makes it possible to update NSG rules under more conditions. ([#55752](https://github.com/kubernetes/kubernetes/pull/55752),[ @kevinkim9264](https://github.com/kevinkim9264)) +* Custom mountOptions for the azurefile StorageClass object are now respected. Specifically, `dir_mode` and `file_mode` can now be customized. ([#54674](https://github.com/kubernetes/kubernetes/pull/54674),[ @andyzhangx](https://github.com/andyzhangx)) +* Azure Load Balancer Auto Mode: Services can be annotated to allow auto selection of available load balancers and to provide specific availability sets that host the load balancers (for example, `service.beta.kubernetes.io/azure-load-balancer-mode=auto|as1,as2...`) + +### **CLI** + +#### **Kubectl** + +* `kubectl cp` can now copy a remote file into a local directory. ([#46762](https://github.com/kubernetes/kubernetes/pull/46762),[ @bruceauyeung](https://github.com/bruceauyeung)) +* `kubectl cp` now honors destination names for directories. A complete directory is now copied; in previous versions only the file contents were copied. ([#51215](https://github.com/kubernetes/kubernetes/pull/51215),[ @juanvallejo](https://github.com/juanvallejo)) +* You can now use `kubectl get` with a fieldSelector. ([#50140](https://github.com/kubernetes/kubernetes/pull/50140),[ @dixudx](https://github.com/dixudx)) +* Secret data containing Docker registry auth objects is now generated using the config.json format ([#53916](https://github.com/kubernetes/kubernetes/pull/53916),[ @juanvallejo](https://github.com/juanvallejo)) +* `kubectl apply` now calculates the diff between the current and new configurations based on the OpenAPI spec. If the OpenAPI spec is not available, it falls back to baked-in types. ([#51321](https://github.com/kubernetes/kubernetes/pull/51321),[ @mengqiy](https://github.com/mengqiy)) +* `kubectl explain` now explains `apiservices` and `customresourcedefinition`. (Updated to use OpenAPI instead of Swagger 1.2.) ([#53228](https://github.com/kubernetes/kubernetes/pull/53228),[ @apelisse](https://github.com/apelisse)) +* `kubectl get` now uses OpenAPI schema extensions by default to select columns for custom types. ([#53483](https://github.com/kubernetes/kubernetes/pull/53483),[ @apelisse](https://github.com/apelisse)) +* kubectl `top node` now sorts by name and `top pod` sorts by namespace. Fixed a bug where results were inconsistently sorted. ([#53560](https://github.com/kubernetes/kubernetes/pull/53560),[ @dixudx](https://github.com/dixudx)) +* Added --dry-run option to kubectl drain. ([#52440](https://github.com/kubernetes/kubernetes/pull/52440),[ @juanvallejo](https://github.com/juanvallejo)) +* Kubectl now outputs for columns specified by -o custom-columns but not found in object, rather than "xxx is not found" ([#51750](https://github.com/kubernetes/kubernetes/pull/51750),[ @jianhuiz](https://github.com/jianhuiz)) +* `kubectl create pdb` no longer sets the min-available field by default. ([#53047](https://github.com/kubernetes/kubernetes/pull/53047),[ @yuexiao-wang](https://github.com/yuexiao-wang)) +* The canonical pronunciation of kubectl is "cube control". +* Added --raw to kubectl create to POST using the normal transport. ([#54245](https://github.com/kubernetes/kubernetes/pull/54245),[ @deads2k](https://github.com/deads2k)) +* Added kubectl `create priorityclass` subcommand ([#54858](https://github.com/kubernetes/kubernetes/pull/54858),[ @wackxu](https://github.com/wackxu)) +* Fixed an issue where `kubectl set` commands occasionally encountered conversion errors for ReplicaSet and DaemonSet objects ([#53158](https://github.com/kubernetes/kubernetes/pull/53158),[ @liggitt](https://github.com/liggitt)) + +### **Cluster Lifecycle** + +#### **API Server** + +* [alpha] Added an `--endpoint-reconciler-type` command-line argument to select the endpoint reconciler to use. The default is to use the 'master-count' reconciler which is the default for 1.9 and in use prior to 1.9. The 'lease' reconciler stores endpoints within the storage api for better cleanup of deleted (or removed) API servers. The 'none' reconciler is a no-op reconciler, which can be used in self-hosted environments. ([#51698](https://github.com/kubernetes/kubernetes/pull/51698), [@rphillips](https://github.com/rphillips)) + +#### **Cloud Provider Integration** + +* Added `cloud-controller-manager` to `hyperkube`. This is useful as a number of deployment tools run all of the kubernetes components from the `hyperkube `image/binary. It also makes testing easier as a single binary/image can be built and pushed quickly. ([#54197](https://github.com/kubernetes/kubernetes/pull/54197),[ @colemickens](https://github.com/colemickens)) +* Added the concurrent service sync flag to the Cloud Controller Manager to allow changing the number of workers. (`--concurrent-service-syncs`) ([#55561](https://github.com/kubernetes/kubernetes/pull/55561),[ @jhorwit2](https://github.com/jhorwit2)) +* kubelet's --cloud-provider flag no longer defaults to "auto-detect". If you want cloud-provider support in kubelet, you must set a specific cloud-provider explicitly. ([#53573](https://github.com/kubernetes/kubernetes/pull/53573),[ @dims](https://github.com/dims)) + +#### **Kubeadm** + +* kubeadm health checks can now be skipped with `--ignore-preflight-errors`; the `--skip-preflight-checks` flag is now deprecated and will be removed in a future release. ([#56130](https://github.com/kubernetes/kubernetes/pull/56130),[ @anguslees](https://github.com/anguslees)) ([#56072](https://github.com/kubernetes/kubernetes/pull/56072),[ @kad](https://github.com/kad)) +* You now have the option to use CoreDNS instead of KubeDNS. To install CoreDNS instead of kube-dns, set CLUSTER_DNS_CORE_DNS to 'true'. This support is experimental. ([#52501](https://github.com/kubernetes/kubernetes/pull/52501),[ @rajansandeep](https://github.com/rajansandeep)) ([#55728](https://github.com/kubernetes/kubernetes/pull/55728),[ @rajansandeep](https://github.com/rajansandeep)) +* Added --print-join-command flag for kubeadm token create. ([#56185](https://github.com/kubernetes/kubernetes/pull/56185),[ @mattmoyer](https://github.com/mattmoyer)) +* Added a new --etcd-upgrade keyword to kubeadm upgrade apply. When this keyword is specified, etcd's static pod gets upgraded to the etcd version officially recommended for a target kubernetes release. ([#55010](https://github.com/kubernetes/kubernetes/pull/55010),[ @sbezverk](https://github.com/sbezverk)) +* Kubeadm now supports Kubelet Dynamic Configuration on an alpha level. ([#55803](https://github.com/kubernetes/kubernetes/pull/55803),[ @xiangpengzhao](https://github.com/xiangpengzhao)) +* Added support for adding a Windows node ([#53553](https://github.com/kubernetes/kubernetes/pull/53553),[ @bsteciuk](https://github.com/bsteciuk)) + +#### **Juju** + +* Added support for SAN entries in the master node certificate. ([#54234](https://github.com/kubernetes/kubernetes/pull/54234),[ @hyperbolic2346](https://github.com/hyperbolic2346)) +* Add extra-args configs for scheduler and controller-manager to kubernetes-master charm ([#55185](https://github.com/kubernetes/kubernetes/pull/55185),[ @Cynerva](https://github.com/Cynerva)) +* Add support for RBAC ([#53820](https://github.com/kubernetes/kubernetes/pull/53820),[ @ktsakalozos](https://github.com/ktsakalozos)) +* Fixed iptables FORWARD policy for Docker 1.13 in kubernetes-worker charm ([#54796](https://github.com/kubernetes/kubernetes/pull/54796),[ @Cynerva](https://github.com/Cynerva)) +* Upgrading the kubernetes-master units now results in staged upgrades just like the kubernetes-worker nodes. Use the upgrade action in order to continue the upgrade process on each unit such as juju run-action kubernetes-master/0 upgrade ([#55990](https://github.com/kubernetes/kubernetes/pull/55990),[ @hyperbolic2346](https://github.com/hyperbolic2346)) +* Added extra_sans config option to kubeapi-load-balancer charm. This allows the user to specify extra SAN entries on the certificate generated for the load balancer. ([#54947](https://github.com/kubernetes/kubernetes/pull/54947),[ @hyperbolic2346](https://github.com/hyperbolic2346)) +* Added extra-args configs to kubernetes-worker charm ([#55334](https://github.com/kubernetes/kubernetes/pull/55334),[ @Cynerva](https://github.com/Cynerva)) + +#### **Other** + +* Base images have been bumped to Debian Stretch (9) ([#52744](https://github.com/kubernetes/kubernetes/pull/52744),[ @rphillips](https://github.com/rphillips)) +* Upgraded to go1.9. ([#51375](https://github.com/kubernetes/kubernetes/pull/51375),[ @cblecker](https://github.com/cblecker)) +* Add-on manager now supports HA masters. ([#55466](https://github.com/kubernetes/kubernetes/pull/55466),[ #55782](https://github.com/x13n),[ @x13n](https://github.com/x13n)) +* Hyperkube can now run from a non-standard path. ([#54570](https://github.com/kubernetes/kubernetes/pull/54570)) + +#### **GCP** + +* The service account made available on your nodes is now configurable. ([#52868](https://github.com/kubernetes/kubernetes/pull/52868),[ @ihmccreery](https://github.com/ihmccreery)) +* GCE nodes with NVIDIA GPUs attached now expose nvidia.com/gpu as a resource instead of alpha.kubernetes.io/nvidia-gpu. ([#54826](https://github.com/kubernetes/kubernetes/pull/54826),[ @mindprince](https://github.com/mindprince)) +* Docker's live-restore on COS/ubuntu can now be disabled ([#55260](https://github.com/kubernetes/kubernetes/pull/55260),[ @yujuhong](https://github.com/yujuhong)) +* Metadata concealment is now controlled by the ENABLE_METADATA_CONCEALMENT env var. See cluster/gce/config-default.sh for more info. ([#54150](https://github.com/kubernetes/kubernetes/pull/54150),[ @ihmccreery](https://github.com/ihmccreery)) +* Masquerading rules are now added by default to GCE/GKE ([#55178](https://github.com/kubernetes/kubernetes/pull/55178),[ @dnardo](https://github.com/dnardo)) +* Fixed master startup issues with concurrent iptables invocations. ([#55945](https://github.com/kubernetes/kubernetes/pull/55945),[ @x13n](https://github.com/x13n)) +* Fixed issue deleting internal load balancers when the firewall resource may not exist. ([#53450](https://github.com/kubernetes/kubernetes/pull/53450),[ @nicksardo](https://github.com/nicksardo)) + +### **Instrumentation** + +#### **Audit** + +* Adjust batching audit webhook default parameters: increase queue size, batch size, and initial backoff. Add throttling to the batching audit webhook. Default rate limit is 10 QPS. ([#53417](https://github.com/kubernetes/kubernetes/pull/53417),[ @crassirostris](https://github.com/crassirostris)) + * These parameters are also now configurable. ([#56638](https://github.com/kubernetes/kubernetes/pull/56638), [@crassirostris](https://github.com/crassirostris)) + +#### **Other** + +* Fix a typo in prometheus-to-sd configuration, that drops some stackdriver metrics. ([#56473](https://github.com/kubernetes/kubernetes/pull/56473),[ @loburm](https://github.com/loburm)) +* [fluentd-elasticsearch addon] Elasticsearch and Kibana are updated to version 5.6.4 ([#55400](https://github.com/kubernetes/kubernetes/pull/55400),[ @mrahbar](https://github.com/mrahbar)) +* fluentd now supports CRI log format. ([#54777](https://github.com/kubernetes/kubernetes/pull/54777),[ @Random-Liu](https://github.com/Random-Liu)) +* Bring all prom-to-sd container to the same image version ([#54583](https://github.com/kubernetes/kubernetes/pull/54583)) + * Reduce log noise produced by prometheus-to-sd, by bumping it to version 0.2.2. ([#54635](https://github.com/kubernetes/kubernetes/pull/54635),[ @loburm](https://github.com/loburm)) +* [fluentd-elasticsearch addon] Elasticsearch service name can be overridden via env variable ELASTICSEARCH_SERVICE_NAME ([#54215](https://github.com/kubernetes/kubernetes/pull/54215),[ @mrahbar](https://github.com/mrahbar)) + +### **Multicluster** + +#### **Federation** + +* Kubefed init now supports --imagePullSecrets and --imagePullPolicy, making it possible to use private registries. ([#50740](https://github.com/kubernetes/kubernetes/pull/50740),[ @dixudx](https://github.com/dixudx)) +* Updated cluster printer to enable --show-labels ([#53771](https://github.com/kubernetes/kubernetes/pull/53771),[ @dixudx](https://github.com/dixudx)) +* Kubefed init now supports --nodeSelector, enabling you to determine on what node the controller will be installed. ([#50749](https://github.com/kubernetes/kubernetes/pull/50749),[ @dixudx](https://github.com/dixudx)) + +### **Network** + +#### **IPv6** + +* [alpha] IPv6 support has been added. Notable IPv6 support details include: + * Support for IPv6-only Kubernetes cluster deployments. **Note:** This feature does not provide dual-stack support. + * Support for IPv6 Kubernetes control and data planes. + * Support for Kubernetes IPv6 cluster deployments using kubeadm. + * Support for the iptables kube-proxy backend using ip6tables. + * Relies on CNI 0.6.0 binaries for IPv6 pod networking. + * Adds IPv6 support for kube-dns using SRV records. + * Caveats + * Only the CNI bridge and local-ipam plugins have been tested for the alpha release, although other CNI plugins do support IPv6. + * HostPorts are not supported. +* An IPv6 network mask for pod or cluster cidr network must be /66 or longer. For example: 2001:db1::/66, 2001:dead:beef::/76, 2001:cafe::/118 are supported. 2001:db1::/64 is not supported +* For details, see [the complete list of merged pull requests for IPv6 support](https://github.com/kubernetes/kubernetes/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Amerged+label%3Aarea%2Fipv6). + +#### **IPVS** + +* You can now use the --cleanup-ipvs flag to tell kube-proxy whether to flush all existing ipvs rules in on startup ([#56036](https://github.com/kubernetes/kubernetes/pull/56036),[ @m1093782566](https://github.com/m1093782566)) +* Graduate kube-proxy IPVS mode to beta. ([#56623](https://github.com/kubernetes/kubernetes/pull/56623), [@m1093782566](https://github.com/m1093782566)) + +#### **Kube-Proxy** + +* Added iptables rules to allow Pod traffic even when default iptables policy is to reject. ([#52569](https://github.com/kubernetes/kubernetes/pull/52569),[ @tmjd](https://github.com/tmjd)) +* You can once again use 0 values for conntrack min, max, max per core, tcp close wait timeout, and tcp established timeout; this functionality was broken in 1.8. ([#55261](https://github.com/kubernetes/kubernetes/pull/55261),[ @ncdc](https://github.com/ncdc)) + +#### **CoreDNS** + +* You now have the option to use CoreDNS instead of KubeDNS. To install CoreDNS instead of kube-dns, set CLUSTER_DNS_CORE_DNS to 'true'. This support is experimental. ([#52501](https://github.com/kubernetes/kubernetes/pull/52501),[ @rajansandeep](https://github.com/rajansandeep)) ([#55728](https://github.com/kubernetes/kubernetes/pull/55728),[ @rajansandeep](https://github.com/rajansandeep)) + +#### **Other** + +* Pod addresses will now be removed from the list of endpoints when the pod is in graceful termination. ([#54828](https://github.com/kubernetes/kubernetes/pull/54828),[ @freehan](https://github.com/freehan)) +* You can now use a new supported service annotation for AWS clusters, `service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy`, which lets you specify which [predefined AWS SSL policy](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) you would like to use. ([#54507](https://github.com/kubernetes/kubernetes/pull/54507),[ @micahhausler](https://github.com/micahhausler)) +* Termination grace period for the calico/node add-on DaemonSet has been eliminated, reducing downtime during a rolling upgrade or deletion. ([#55015](https://github.com/kubernetes/kubernetes/pull/55015),[ @fasaxc](https://github.com/fasaxc)) +* Fixed bad conversion in host port chain name generating func which led to some unreachable host ports. ([#55153](https://github.com/kubernetes/kubernetes/pull/55153),[ @chenchun](https://github.com/chenchun)) +* Fixed IPVS availability check ([#51874](https://github.com/kubernetes/kubernetes/pull/51874),[ @vfreex](https://github.com/vfreex)) +* The output for kubectl describe networkpolicy * has been enhanced to be more useful. ([#46951](https://github.com/kubernetes/kubernetes/pull/46951),[ @aanm](https://github.com/aanm)) +* Kernel modules are now loaded automatically inside a kube-proxy pod ([#52003](https://github.com/kubernetes/kubernetes/pull/52003),[ @vfreex](https://github.com/vfreex)) +* Improve resilience by annotating kube-dns addon with podAntiAffinity to prefer scheduling on different nodes. ([#52193](https://github.com/kubernetes/kubernetes/pull/52193),[ @StevenACoffman](https://github.com/StevenACoffman)) +* [alpha] Added DNSConfig field to PodSpec. "None" mode for DNSPolicy is now supported. ([#55848](https://github.com/kubernetes/kubernetes/pull/55848),[ @MrHohn](https://github.com/MrHohn)) +* You can now add "options" to the host's /etc/resolv.conf (or --resolv-conf), and they will be copied into pod's resolv.conf when dnsPolicy is Default. Being able to customize options is important because it is common to leverage options to fine-tune the behavior of DNS client. ([#54773](https://github.com/kubernetes/kubernetes/pull/54773),[ @phsiao](https://github.com/phsiao)) +* Fixed a bug so that the service controller no longer retries if doNotRetry service update fails. ([#54184](https://github.com/kubernetes/kubernetes/pull/54184),[ @MrHohn](https://github.com/MrHohn)) +* Added --no-negcache flag to kube-dns to prevent caching of NXDOMAIN responses. ([#53604](https://github.com/kubernetes/kubernetes/pull/53604),[ @cblecker](https://github.com/cblecker)) + +### **Node** + +#### **Pod API** + +* A single value in metadata.annotations/metadata.labels can now be passed into the containers via the Downward API. ([#55902](https://github.com/kubernetes/kubernetes/pull/55902),[ @yguo0905](https://github.com/yguo0905)) +* Pods will no longer briefly transition to a "Pending" state during the deletion process. ([#54593](https://github.com/kubernetes/kubernetes/pull/54593),[ @dashpole](https://github.com/dashpole)) +* Added pod-level local ephemeral storage metric to the Summary API. Pod-level ephemeral storage reports the total filesystem usage for the containers and emptyDir volumes in the measured Pod. ([#55447](https://github.com/kubernetes/kubernetes/pull/55447),[ @jingxu97](https://github.com/jingxu97)) + +#### **Hardware Accelerators** + +* Kubelet now exposes metrics for NVIDIA GPUs attached to the containers. ([#55188](https://github.com/kubernetes/kubernetes/pull/55188),[ @mindprince](https://github.com/mindprince)) +* The device plugin Alpha API no longer supports returning artifacts per device as part of AllocateResponse. ([#53031](https://github.com/kubernetes/kubernetes/pull/53031),[ @vishh](https://github.com/vishh)) +* Fix to ignore extended resources that are not registered with kubelet during container resource allocation. ([#53547](https://github.com/kubernetes/kubernetes/pull/53547),[ @jiayingz](https://github.com/jiayingz)) + + +#### **Container Runtime** +* [alpha] [cri-tools](https://github.com/kubernetes-incubator/cri-tools): CLI and validation tools for CRI is now v1.0.0-alpha.0. This release mainly focuses on UX improvements. [[@feiskyer](https://github.com/feiskyer)] + * Make crictl command more user friendly and add more subcommands. + * Integrate with CRI verbose option to provide extra debug information. + * Update CRI to kubernetes v1.9. + * Bug fixes in validation test suites. +* [beta] [cri-containerd](https://github.com/kubernetes-incubator/cri-containerd): CRI implementation for containerd is now v1.0.0-beta.0, [[@Random-Liu](https://github.com/Random-Liu)] + * This release supports Kubernetes 1.9+ and containerd v1.0.0+. + * Pass all Kubernetes 1.9 e2e test, node e2e test and CRI validation tests. + * [Kube-up.sh integration](https://github.com/kubernetes-incubator/cri-containerd/blob/master/docs/kube-up.md). + * [Full crictl integration including CRI verbose option.](https://github.com/kubernetes-incubator/cri-containerd/blob/master/docs/crictl.md) + * Integration with cadvisor to provide better summary api support. +* [stable] [cri-o](https://github.com/kubernetes-incubator/cri-o): CRI implementation for OCI-based runtimes is now v1.9. [[@mrunalp](https://github.com/mrunalp)] + * Pass all the Kubernetes 1.9 end-to-end test suites and now gating PRs as well + * Pass all the CRI validation tests + * Release has been focused on bug fixes, stability and performance with runc and Clear Containers + * Minikube integration +* [stable] [frakti](https://github.com/kubernetes/frakti): CRI implementation for hypervisor-based runtimes is now v1.9. [[@resouer](https://github.com/resouer)] + * Added ARM64 release. Upgraded to CNI 0.6.0, added block device as Pod volume mode. Fixed CNI plugin compatibility. + * Passed all CRI validation conformance tests and node end-to-end conformance tests. +* [alpha] [rktlet](https://github.com/kubernetes-incubator/rktlet): CRI implementation for the rkt runtime is now v0.1.0. [[@iaguis](https://github.com/iaguis)] + * This is the first release of rktlet and it implements support for the CRI including fetching images, running pods, CNI networking, logging and exec. +This release passes 129/145 Kubernetes e2e conformance tests. +* Container Runtime Interface API change. [[@yujuhong](https://github.com/yujuhong)] + * A new field is added to CRI container log format to support splitting a long log line into multiple lines. ([#55922](https://github.com/kubernetes/kubernetes/pull/55922), [@Random-Liu](https://github.com/Random-Liu)) + * CRI now supports debugging via a verbose option for status functions. ([#53965](https://github.com/kubernetes/kubernetes/pull/53965), [@Random-Liu](https://github.com/Random-Liu)) + * Kubelet can now provide full summary api support for the CRI container runtime, with the exception of container log stats. ([#55810](https://github.com/kubernetes/kubernetes/pull/55810), [@abhi](https://github.com/abhi)) + * CRI now uses the correct localhost seccomp path when provided with input in the format of localhost//profileRoot/profileName. ([#55450](https://github.com/kubernetes/kubernetes/pull/55450), [@feiskyer](https://github.com/feiskyer)) + + +#### **Kubelet** + +* The EvictionHard, EvictionSoft, EvictionSoftGracePeriod, EvictionMinimumReclaim, SystemReserved, and KubeReserved fields in the KubeletConfiguration object (`kubeletconfig/v1alpha1`) are now of type map[string]string, which facilitates writing JSON and YAML files. ([#54823](https://github.com/kubernetes/kubernetes/pull/54823),[ @mtaufen](https://github.com/mtaufen)) +* Relative paths in the Kubelet's local config files (`--init-config-dir`) will now be resolved relative to the location of the containing files. ([#55648](https://github.com/kubernetes/kubernetes/pull/55648),[ @mtaufen](https://github.com/mtaufen)) +* It is now possible to set multiple manifest URL headers with the kubelet's `--manifest-url-header` flag. Multiple headers for the same key will be added in the order provided. The ManifestURLHeader field in KubeletConfiguration object (kubeletconfig/v1alpha1) is now a map[string][]string, which facilitates writing JSON and YAML files. ([#54643](https://github.com/kubernetes/kubernetes/pull/54643),[ @mtaufen](https://github.com/mtaufen)) +* The Kubelet's feature gates are now specified as a map when provided via a JSON or YAML KubeletConfiguration, rather than as a string of key-value pairs, making them less awkward for users. ([#53025](https://github.com/kubernetes/kubernetes/pull/53025),[ @mtaufen](https://github.com/mtaufen)) + +##### **Other** + +* Fixed a performance issue ([#51899](https://github.com/kubernetes/kubernetes/pull/51899)) identified in large-scale clusters when deleting thousands of pods simultaneously across hundreds of nodes, by actively removing containers of deleted pods, rather than waiting for periodic garbage collection and batching resulting pod API deletion requests. ([#53233](https://github.com/kubernetes/kubernetes/pull/53233),[ @dashpole](https://github.com/dashpole)) +* Problems deleting local static pods have been resolved. ([#48339](https://github.com/kubernetes/kubernetes/pull/48339),[ @dixudx](https://github.com/dixudx)) +* CRI now only calls UpdateContainerResources when cpuset is set. ([#53122](https://github.com/kubernetes/kubernetes/pull/53122),[ @resouer](https://github.com/resouer)) +* Containerd monitoring is now supported. ([#56109](https://github.com/kubernetes/kubernetes/pull/56109),[ @dashpole](https://github.com/dashpole)) +* deviceplugin has been extended to more gracefully handle the full device plugin lifecycle, including: ([#55088](https://github.com/kubernetes/kubernetes/pull/55088),[ @jiayingz](https://github.com/jiayingz)) + * Kubelet now uses an explicit cm.GetDevicePluginResourceCapacity() function that makes it possible to more accurately determine what resources are inactive and return a more accurate view of available resources. + * Extends the device plugin checkpoint data to record registered resources so that we can finish resource removing devices even upon kubelet restarts. + * Passes sourcesReady from kubelet to the device plugin to avoid removing inactive pods during the grace period of kubelet restart. + * Extends the gpu_device_plugin e2e_node test to verify that scheduled pods can continue to run even after a device plugin deletion and kubelet restart. +* The NodeController no longer supports kubelet 1.2. ([#48996](https://github.com/kubernetes/kubernetes/pull/48996),[ @k82cn](https://github.com/k82cn)) +* Kubelet now provides more specific events via FailedSync when unable to sync a pod. ([#53857](https://github.com/kubernetes/kubernetes/pull/53857),[ @derekwaynecarr](https://github.com/derekwaynecarr)) +* You can now disable AppArmor by setting the AppArmor profile to unconfined. ([#52395](https://github.com/kubernetes/kubernetes/pull/52395),[ @dixudx](https://github.com/dixudx)) +* ImageGCManage now consumes ImageFS stats from StatsProvider rather than cadvisor. ([#53094](https://github.com/kubernetes/kubernetes/pull/53094),[ @yguo0905](https://github.com/yguo0905)) +* Hyperkube now supports the support --experimental-dockershim kubelet flag. ([#54508](https://github.com/kubernetes/kubernetes/pull/54508),[ @ivan4th](https://github.com/ivan4th)) +* Kubelet no longer removes default labels from Node API objects on startup ([#54073](https://github.com/kubernetes/kubernetes/pull/54073),[ @liggitt](https://github.com/liggitt)) +* The overlay2 container disk metrics for Docker and CRI-O now work properly. ([#54827](https://github.com/kubernetes/kubernetes/pull/54827),[ @dashpole](https://github.com/dashpole)) +* Removed docker dependency during kubelet start up. ([#54405](https://github.com/kubernetes/kubernetes/pull/54405),[ @resouer](https://github.com/resouer)) +* Added Windows support to the system verification check. ([#53730](https://github.com/kubernetes/kubernetes/pull/53730),[ @bsteciuk](https://github.com/bsteciuk)) +* Kubelet no longer removes unregistered extended resource capacities from node status; cluster admins will have to manually remove extended resources exposed via device plugins when they the remove plugins themselves. ([#53353](https://github.com/kubernetes/kubernetes/pull/53353),[ @jiayingz](https://github.com/jiayingz)) +* The stats summary network value now takes into account multiple network interfaces, and not just eth0. ([#52144](https://github.com/kubernetes/kubernetes/pull/52144),[ @andyxning](https://github.com/andyxning)) +* Base images have been bumped to Debian Stretch (9). ([#52744](https://github.com/kubernetes/kubernetes/pull/52744),[ @rphillips](https://github.com/rphillips)) + +### **OpenStack** + +* OpenStack Cinder support has been improved: + * Cinder version detection now works properly. ([#53115](https://github.com/kubernetes/kubernetes/pull/53115),[ @FengyunPan](https://github.com/FengyunPan)) + * The OpenStack cloud provider now supports Cinder v3 API. ([#52910](https://github.com/kubernetes/kubernetes/pull/52910),[ @FengyunPan](https://github.com/FengyunPan)) +* Load balancing is now more flexible: + * The OpenStack LBaaS v2 Provider is now [configurable](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#openstack). ([#54176](https://github.com/kubernetes/kubernetes/pull/54176),[ @gonzolino](https://github.com/gonzolino)) + * OpenStack Octavia v2 is now supported as a load balancer provider in addition to the existing support for the Neutron LBaaS V2 implementation. Neutron LBaaS V1 support has been removed. ([#55393](https://github.com/kubernetes/kubernetes/pull/55393),[ @jamiehannaford](https://github.com/jamiehannaford)) +* OpenStack security group support has been beefed up ([#50836](https://github.com/kubernetes/kubernetes/pull/50836),[ @FengyunPan](https://github.com/FengyunPan)): + * Kubernetes will now automatically determine the security group for the node + * Nodes can now belong to multiple security groups + +### **Scheduling** + +#### **Hardware Accelerators** + +* Add ExtendedResourceToleration admission controller. This facilitates creation of dedicated nodes with extended resources. If operators want to create dedicated nodes with extended resources (such as GPUs, FPGAs, and so on), they are expected to taint the node with extended resource name as the key. This admission controller, if enabled, automatically adds tolerations for such taints to pods requesting extended resources, so users don't have to manually add these tolerations. ([#55839](https://github.com/kubernetes/kubernetes/pull/55839),[ @mindprince](https://github.com/mindprince)) + +#### **Other** + +* Scheduler cache ignores updates to an assumed pod if updates are limited to pod annotations. ([#54008](https://github.com/kubernetes/kubernetes/pull/54008),[ @yguo0905](https://github.com/yguo0905)) +* Issues with namespace deletion have been resolved. ([#53720](https://github.com/kubernetes/kubernetes/pull/53720),[ @shyamjvs](https://github.com/shyamjvs)) ([#53793](https://github.com/kubernetes/kubernetes/pull/53793),[ @wojtek-t](https://github.com/wojtek-t)) +* Pod preemption has been improved. + * Now takes PodDisruptionBudget into account. ([#56178](https://github.com/kubernetes/kubernetes/pull/56178),[ @bsalamat](https://github.com/bsalamat)) + * Nominated pods are taken into account during scheduling to avoid starvation of higher priority pods. ([#55933](https://github.com/kubernetes/kubernetes/pull/55933),[ @bsalamat](https://github.com/bsalamat)) +* Fixed 'Schedulercache is corrupted' error in kube-scheduler ([#55262](https://github.com/kubernetes/kubernetes/pull/55262),[ @liggitt](https://github.com/liggitt)) +* The kube-scheduler command now supports a --config flag which is the location of a file containing a serialized scheduler configuration. Most other kube-scheduler flags are now deprecated. ([#52562](https://github.com/kubernetes/kubernetes/pull/52562),[ @ironcladlou](https://github.com/ironcladlou)) +* A new scheduling queue helps schedule the highest priority pending pod first. ([#55109](https://github.com/kubernetes/kubernetes/pull/55109),[ @bsalamat](https://github.com/bsalamat)) +* A Pod can now listen to the same port on multiple IP addresses. ([#52421](https://github.com/kubernetes/kubernetes/pull/52421),[ @WIZARD-CXY](https://github.com/WIZARD-CXY)) +* Object count quotas supported on all standard resources using count/. syntax ([#54320](https://github.com/kubernetes/kubernetes/pull/54320),[ @derekwaynecarr](https://github.com/derekwaynecarr)) +* Apply algorithm in scheduler by feature gates. ([#52723](https://github.com/kubernetes/kubernetes/pull/52723),[ @k82cn](https://github.com/k82cn)) +* A new priority function ResourceLimitsPriorityMap (disabled by default and behind alpha feature gate and not part of the scheduler's default priority functions list) that assigns a lowest possible score of 1 to a node that satisfies one or both of input pod's cpu and memory limits, mainly to break ties between nodes with same scores. ([#55906](https://github.com/kubernetes/kubernetes/pull/55906),[ @aveshagarwal](https://github.com/aveshagarwal)) +* Kubelet evictions now take pod priority into account ([#53542](https://github.com/kubernetes/kubernetes/pull/53542),[ @dashpole](https://github.com/dashpole)) +* PodTolerationRestriction admisson plugin: if namespace level tolerations are empty, now they override cluster level tolerations. ([#54812](https://github.com/kubernetes/kubernetes/pull/54812),[ @aveshagarwal](https://github.com/aveshagarwal)) + +### **Storage** + +* [stable] `PersistentVolume` and `PersistentVolumeClaim` objects must now have a capacity greater than zero. +* [stable] Mutation of `PersistentVolumeSource` after creation is no longer allowed +* [alpha] Deletion of `PersistentVolumeClaim` objects that are in use by a pod no longer permitted (if alpha feature is enabled). +* [alpha] Container Storage Interface + * New CSIVolumeSource enables Kubernetes to use external CSI drivers to provision, attach, and mount volumes. +* [alpha] Raw block volumes + * Support for surfacing volumes as raw block devices added to Kubernetes storage system. + * Only Fibre Channel volume plugin supports exposes this functionality, in this release. +* [alpha] Volume resizing + * Added file system resizing for the following volume plugins: GCE PD, Ceph RBD, AWS EBS, OpenStack Cinder +* [alpha] Topology Aware Volume Scheduling + * Improved volume scheduling for Local PersistentVolumes, by allowing the scheduler to make PersistentVolume binding decisions while respecting the Pod's scheduling requirements. + * Dynamic provisioning is not supported with this feature yet. +* [alpha] Containerized mount utilities + * Allow mount utilities, used to mount volumes, to run inside a container instead of on the host. +* Bug Fixes + * ScaleIO volume plugin is no longer dependent on the drv_cfg binary, so a Kubernetes cluster can easily run a containerized kubelet. ([#54956](https://github.com/kubernetes/kubernetes/pull/54956),[ @vladimirvivien](https://github.com/vladimirvivien)) + * AWS EBS Volumes are detached from stopped AWS nodes. ([#55893](https://github.com/kubernetes/kubernetes/pull/55893),[ @gnufied](https://github.com/gnufied)) + * AWS EBS volumes are detached if attached to a different node than expected. ([#55491](https://github.com/kubernetes/kubernetes/pull/55491),[ @gnufied](https://github.com/gnufied)) + * PV Recycle now works in environments that use architectures other than x86. ([#53958](https://github.com/kubernetes/kubernetes/pull/53958),[ @dixudx](https://github.com/dixudx)) + * Pod Security Policy can now manage access to specific FlexVolume drivers.([#53179](https://github.com/kubernetes/kubernetes/pull/53179),[ @wanghaoran1988](https://github.com/wanghaoran1988)) + * To prevent unauthorized access to CHAP Secrets, you can now set the secretNamespace storage class parameters for the following volume types: + * ScaleIO; StoragePool and ProtectionDomain attributes no longer default to the value default. ([#54013](https://github.com/kubernetes/kubernetes/pull/54013),[ @vladimirvivien](https://github.com/vladimirvivien)) + * RBD Persistent Volume Sources ([#54302](https://github.com/kubernetes/kubernetes/pull/54302),[ @sbezverk](https://github.com/sbezverk)) + * iSCSI Persistent Volume Sources ([#51530](https://github.com/kubernetes/kubernetes/pull/51530),[ @rootfs](https://github.com/rootfs)) + * In GCE multizonal clusters, `PersistentVolume` objects will no longer be dynamically provisioned in zones without nodes. ([#52322](https://github.com/kubernetes/kubernetes/pull/52322),[ @davidz627](https://github.com/davidz627)) + * Multi Attach PVC errors and events are now more useful and less noisy. ([#53401](https://github.com/kubernetes/kubernetes/pull/53401),[ @gnufied](https://github.com/gnufied)) + * The compute-rw scope has been removed from GCE nodes ([#53266](https://github.com/kubernetes/kubernetes/pull/53266),[ @mikedanese](https://github.com/mikedanese)) + * Updated vSphere cloud provider to support k8s cluster spread across multiple vCenters ([#55845](https://github.com/kubernetes/kubernetes/pull/55845),[ @rohitjogvmw](https://github.com/rohitjogvmw)) + * vSphere: Fix disk is not getting detached when PV is provisioned on clustered datastore. ([#54438](https://github.com/kubernetes/kubernetes/pull/54438),[ @pshahzeb](https://github.com/pshahzeb)) + * If a non-absolute mountPath is passed to the kubelet, it must now be prefixed with the appropriate root path. ([#55665](https://github.com/kubernetes/kubernetes/pull/55665),[ @brendandburns](https://github.com/brendandburns)) + +## External Dependencies + +* The supported etcd server version is **3.1.10**, as compared to 3.0.17 in v1.8 ([#49393](https://github.com/kubernetes/kubernetes/pull/49393),[ @hongchaodeng](https://github.com/hongchaodeng)) +* The validated docker versions are the same as for v1.8: **1.11.2 to 1.13.1 and 17.03.x** +* The Go version was upgraded from go1.8.3 to **go1.9.2** ([#51375](https://github.com/kubernetes/kubernetes/pull/51375),[ @cblecker](https://github.com/cblecker)) + * The minimum supported go version bumps to 1.9.1. ([#55301](https://github.com/kubernetes/kubernetes/pull/55301),[ @xiangpengzhao](https://github.com/xiangpengzhao)) + * Kubernetes has been upgraded to go1.9.2 ([#55420](https://github.com/kubernetes/kubernetes/pull/55420),[ @cblecker](https://github.com/cblecker)) +* CNI was upgraded to **v0.6.0** ([#51250](https://github.com/kubernetes/kubernetes/pull/51250),[ @dixudx](https://github.com/dixudx)) +* The dashboard add-on has been updated to [v1.8.0](https://github.com/kubernetes/dashboard/releases/tag/v1.8.0). ([#53046](https://github.com/kubernetes/kubernetes/pull/53046), [@maciaszczykm](https://github.com/maciaszczykm)) +* Heapster has been updated to [v1.5.0](https://github.com/kubernetes/heapster/releases/tag/v1.5.0). ([#57046](https://github.com/kubernetes/kubernetes/pull/57046), [@piosz](https://github.com/piosz)) +* Cluster Autoscaler has been updated to [v1.1.0](https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.1.0). ([#56969](https://github.com/kubernetes/kubernetes/pull/56969), [@mwielgus](https://github.com/mwielgus)) +* Update kube-dns 1.14.7 ([#54443](https://github.com/kubernetes/kubernetes/pull/54443),[ @bowei](https://github.com/bowei)) +* Update influxdb to v1.3.3 and grafana to v4.4.3 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319),[ @kairen](https://github.com/kairen)) +- [v1.9.0-beta.2](#v190-beta2) +- [v1.9.0-beta.1](#v190-beta1) +- [v1.9.0-alpha.3](#v190-alpha3) +- [v1.9.0-alpha.2](#v190-alpha2) +- [v1.9.0-alpha.1](#v190-alpha1) + + + # v1.9.0-beta.2 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) From fffd152e0df522473ef289d924e2ab683f44834f Mon Sep 17 00:00:00 2001 From: David Zhu Date: Fri, 15 Dec 2017 15:36:59 -0800 Subject: [PATCH 333/794] Fixed kubelet error message to be more descriptive. Added Attach success event for help in debugging. --- pkg/kubelet/events/event.go | 1 + pkg/kubelet/volumemanager/volume_manager.go | 21 +++++++++++++++++-- .../operationexecutor/operation_generator.go | 5 +++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/events/event.go b/pkg/kubelet/events/event.go index 12740573811..6ec06a2ee99 100644 --- a/pkg/kubelet/events/event.go +++ b/pkg/kubelet/events/event.go @@ -59,6 +59,7 @@ const ( FailedUnmapDevice = "FailedUnmapDevice" WarnAlreadyMountedVolume = "AlreadyMountedVolume" SuccessfulDetachVolume = "SuccessfulDetachVolume" + SuccessfulAttachVolume = "SuccessfulAttachVolume" SuccessfulMountVolume = "SuccessfulMountVolume" SuccessfulUnMountVolume = "SuccessfulUnMountVolume" HostPortConflict = "HostPortConflict" diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index d3e7711407b..88027ce5fb3 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -357,21 +357,38 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { // Timeout expired unmountedVolumes := vm.getUnmountedVolumes(uniquePodName, expectedVolumes) + // Also get unattached volumes for error message + unattachedVolumes := + vm.getUnattachedVolumes(expectedVolumes) + if len(unmountedVolumes) == 0 { return nil } return fmt.Errorf( - "timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v", + "timeout expired waiting for volumes to attach or mount for pod %q/%q. list of unmounted volumes=%v. list of unattached volumes=%v", pod.Namespace, pod.Name, - unmountedVolumes) + unmountedVolumes, + unattachedVolumes) } glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod)) return nil } +// getUnattachedVolumes returns a list of the volumes that are expected to be attached but +// are not currently attached to the node +func (vm *volumeManager) getUnattachedVolumes(expectedVolumes []string) []string { + unattachedVolumes := []string{} + for _, volume := range expectedVolumes { + if !vm.actualStateOfWorld.VolumeExists(v1.UniqueVolumeName(volume)) { + unattachedVolumes = append(unattachedVolumes, volume) + } + } + return unattachedVolumes +} + // verifyVolumesMountedFunc returns a method that returns true when all expected // volumes are mounted. func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 2ff4b668f00..78cdb40dd23 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -305,6 +305,11 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( return detailedErr } + // Successful attach event is useful for user debugging + simpleMsg, _ := volumeToAttach.GenerateMsg("AttachVolume.Attach succeeded", "") + for _, pod := range volumeToAttach.ScheduledPods { + og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg) + } glog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) // Update actual state of world From 76c4146c5dcc59bc0e6cd8d30e8d4db6cbff39a8 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 1 Dec 2017 16:07:48 +0800 Subject: [PATCH 334/794] remove white space on glogs --- pkg/cloudprovider/providers/azure/azure_blobDiskController.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 32c009483b4..002f66163a4 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -535,7 +535,7 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) } - glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType)) + glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType)) cp := storage.AccountCreateParameters{ Sku: &storage.Sku{Name: storageAccountType}, From ed4c51053ed3157b56edfb719bb77264990ba9e2 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 15 Dec 2017 15:50:57 +0800 Subject: [PATCH 335/794] Update kubeadm's minimum supported kubernetes to 1.9. --- cmd/kubeadm/app/cmd/phases/certs_test.go | 2 +- .../app/cmd/phases/controlplane_test.go | 8 +- cmd/kubeadm/app/constants/constants.go | 4 +- cmd/kubeadm/app/phases/upgrade/policy_test.go | 124 +++++++++--------- 4 files changed, 69 insertions(+), 69 deletions(-) diff --git a/cmd/kubeadm/app/cmd/phases/certs_test.go b/cmd/kubeadm/app/cmd/phases/certs_test.go index 211eea5db8f..415e5d13890 100644 --- a/cmd/kubeadm/app/cmd/phases/certs_test.go +++ b/cmd/kubeadm/app/cmd/phases/certs_test.go @@ -34,7 +34,7 @@ import ( ) // phaseTestK8sVersion is a fake kubernetes version to use when testing -const phaseTestK8sVersion = "v1.8.0" +const phaseTestK8sVersion = "v1.9.0" func TestCertsSubCommandsHasFlags(t *testing.T) { diff --git a/cmd/kubeadm/app/cmd/phases/controlplane_test.go b/cmd/kubeadm/app/cmd/phases/controlplane_test.go index 6db2dc3c5a6..6333263dfb1 100644 --- a/cmd/kubeadm/app/cmd/phases/controlplane_test.go +++ b/cmd/kubeadm/app/cmd/phases/controlplane_test.go @@ -93,7 +93,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "all", additionalFlags: []string{ - "--kubernetes-version=v1.8.0", + "--kubernetes-version=v1.9.0", "--apiserver-advertise-address=1.2.3.4", "--apiserver-bind-port=6443", "--service-cidr=1.2.3.4/16", @@ -108,7 +108,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "apiserver", additionalFlags: []string{ - "--kubernetes-version=v1.8.0", + "--kubernetes-version=v1.9.0", "--apiserver-advertise-address=1.2.3.4", "--apiserver-bind-port=6443", "--service-cidr=1.2.3.4/16", @@ -118,7 +118,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "controller-manager", additionalFlags: []string{ - "--kubernetes-version=v1.8.0", + "--kubernetes-version=v1.9.0", "--pod-network-cidr=1.2.3.4/16", }, expectedFiles: []string{"kube-controller-manager.yaml"}, @@ -126,7 +126,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "scheduler", additionalFlags: []string{ - "--kubernetes-version=v1.8.0", + "--kubernetes-version=v1.9.0", }, expectedFiles: []string{"kube-scheduler.yaml"}, }, diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 4cf9076eb9a..84e9bfe317f 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -226,10 +226,10 @@ var ( MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.8.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.9.0") // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = version.MustParseSemantic("v1.8.0") + MinimumKubeletVersion = version.MustParseSemantic("v1.9.0") // MinimumKubeProxyComponentConfigVersion specifies the minimum version for the kubeProxyComponent MinimumKubeProxyComponentConfigVersion = version.MustParseSemantic("v1.9.0-alpha.3") diff --git a/cmd/kubeadm/app/phases/upgrade/policy_test.go b/cmd/kubeadm/app/phases/upgrade/policy_test.go index 64f6d0e692c..e4b1cdc08e6 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy_test.go +++ b/cmd/kubeadm/app/phases/upgrade/policy_test.go @@ -32,135 +32,135 @@ func TestEnforceVersionPolicies(t *testing.T) { }{ { // everything ok vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.5", }, - newK8sVersion: "v1.8.5", + newK8sVersion: "v1.9.5", }, { // everything ok vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.2", - kubeadmVersion: "v1.9.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.2", + kubeadmVersion: "v1.10.1", }, - newK8sVersion: "v1.9.0", + newK8sVersion: "v1.10.0", }, { // downgrades ok vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", }, - newK8sVersion: "v1.8.2", + newK8sVersion: "v1.9.2", }, { // upgrades without bumping the version number ok vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", }, - newK8sVersion: "v1.8.3", + newK8sVersion: "v1.9.3", }, - { // new version must be higher than v1.8.0 + { // new version must be higher than v1.9.0 vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", }, - newK8sVersion: "v1.7.10", - expectedMandatoryErrs: 1, // version must be higher than v1.8.0 + newK8sVersion: "v1.8.10", + expectedMandatoryErrs: 1, // version must be higher than v1.9.0 }, { // upgrading two minor versions in one go is not supported vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.10.0", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.11.0", }, - newK8sVersion: "v1.10.0", + newK8sVersion: "v1.11.0", expectedMandatoryErrs: 1, // can't upgrade two minor versions expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large }, { // downgrading two minor versions in one go is not supported vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.0", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.0", }, - newK8sVersion: "v1.8.3", + newK8sVersion: "v1.9.3", expectedMandatoryErrs: 1, // can't downgrade two minor versions }, { // kubeadm version must be higher than the new kube version. However, patch version skews may be forced vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", }, - newK8sVersion: "v1.8.5", + newK8sVersion: "v1.9.5", expectedSkippableErrs: 1, }, { // kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", }, - newK8sVersion: "v1.9.0", + newK8sVersion: "v1.10.0", expectedMandatoryErrs: 1, }, { // the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though. vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.7.8", - kubeadmVersion: "v1.9.0", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.8.8", + kubeadmVersion: "v1.10.0", }, - newK8sVersion: "v1.9.0", + newK8sVersion: "v1.10.0", expectedSkippableErrs: 1, }, { // experimental upgrades supported if the flag is set vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.9.0-beta.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.10.0-beta.1", }, - newK8sVersion: "v1.9.0-beta.1", + newK8sVersion: "v1.10.0-beta.1", allowExperimental: true, }, { // release candidate upgrades supported if the flag is set vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.9.0-rc.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.10.0-rc.1", }, - newK8sVersion: "v1.9.0-rc.1", + newK8sVersion: "v1.10.0-rc.1", allowRCs: true, }, { // release candidate upgrades supported if the flag is set vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.9.0-rc.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.10.0-rc.1", }, - newK8sVersion: "v1.9.0-rc.1", + newK8sVersion: "v1.10.0-rc.1", allowExperimental: true, }, { // the user should not be able to upgrade to an experimental version if they haven't opted into that vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.9.0-beta.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.10.0-beta.1", }, - newK8sVersion: "v1.9.0-beta.1", + newK8sVersion: "v1.10.0-beta.1", allowRCs: true, expectedSkippableErrs: 1, }, { // the user should not be able to upgrade to an release candidate version if they haven't opted into that vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.9.0-rc.1", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.10.0-rc.1", }, - newK8sVersion: "v1.9.0-rc.1", + newK8sVersion: "v1.10.0-rc.1", expectedSkippableErrs: 1, }, } From 95bece666dab4a1495d1f520f97df96e7af4cb53 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Sat, 16 Dec 2017 10:36:14 +0530 Subject: [PATCH 336/794] Record volumeID in GlusterFS PV spec. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 0ff53f3563b..f0b5be769f9 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -84,6 +84,7 @@ const ( heketiAnn = "heketi-dynamic-provisioner" glusterTypeAnn = "gluster.org/type" glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV" + heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id" ) func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error { @@ -695,7 +696,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glog.V(2).Infof("Allocated GID [%d] for PVC %s", gid, p.options.PVC.Name) - glusterfs, sizeGiB, err := p.CreateVolume(gid) + glusterfs, sizeGiB, volID, err := p.CreateVolume(gid) if err != nil { if releaseErr := gidTable.Release(gid); releaseErr != nil { glog.Errorf("error when releasing GID in storageclass: %s", scName) @@ -721,6 +722,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glusterTypeAnn: "file", "Description": glusterDescAnn, v1.MountOptionAnnotation: "auto_unmount", + heketiVolIDAnn: volID, } pv.Spec.Capacity = v1.ResourceList{ @@ -729,7 +731,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { return pv, nil } -func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) { +func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) { var clusterIDs []string capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Glusterfs creates volumes in units of GiB, but heketi documentation incorrectly reports GBs @@ -737,12 +739,12 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum glog.V(2).Infof("create volume of size: %d GiB and configuration %+v", sz, p.provisionerConfig) if p.url == "" { glog.Errorf("REST server endpoint is empty") - return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") + return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") } cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { glog.Errorf("failed to create glusterfs rest client") - return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") + return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } if p.provisionerConfig.clusterID != "" { clusterIDs = dstrings.Split(p.clusterID, ",") @@ -753,13 +755,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("error creating volume %v ", err) - return nil, 0, fmt.Errorf("error creating volume %v", err) + return nil, 0, "", fmt.Errorf("error creating volume %v", err) } glog.V(1).Infof("volume with size: %d and name: %s created", volume.Size, volume.Name) + volID = volume.Id dynamicHostIps, err := getClusterNodes(cli, volume.Cluster) if err != nil { glog.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) - return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) + return nil, 0, "", fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) } // The 'endpointname' is created in form of 'glusterfs-dynamic-'. @@ -775,14 +778,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum if deleteErr != nil { glog.Errorf("error when deleting the volume :%v , manual deletion required", deleteErr) } - return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err) + return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v", err) } glog.V(3).Infof("dynamic ep %v and svc : %v ", endpoint, service) return &v1.GlusterfsVolumeSource{ EndpointsName: endpoint.Name, Path: volume.Name, ReadOnly: false, - }, sz, nil + }, sz, volID, nil } func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) { From f93d8420b0653c1f0d457a16df13a199a2ab2535 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 1 Dec 2017 17:55:17 +0800 Subject: [PATCH 337/794] bump netlink version because of we need to use new version RouteListFiltered --- Godeps/Godeps.json | 6 +- vendor/github.com/vishvananda/netlink/BUILD | 4 + .../github.com/vishvananda/netlink/Makefile | 5 +- .../github.com/vishvananda/netlink/README.md | 1 + .../vishvananda/netlink/addr_linux.go | 77 +- .../vishvananda/netlink/bridge_linux.go | 18 +- .../vishvananda/netlink/class_linux.go | 25 +- .../vishvananda/netlink/conntrack_linux.go | 14 +- .../github.com/vishvananda/netlink/filter.go | 12 +- .../vishvananda/netlink/filter_linux.go | 85 +- vendor/github.com/vishvananda/netlink/fou.go | 21 + .../vishvananda/netlink/fou_linux.go | 215 +++++ .../vishvananda/netlink/fou_unspecified.go | 15 + .../vishvananda/netlink/genetlink_linux.go | 7 +- .../vishvananda/netlink/gtp_linux.go | 15 +- .../vishvananda/netlink/handle_linux.go | 51 +- .../vishvananda/netlink/handle_unspecified.go | 4 + vendor/github.com/vishvananda/netlink/link.go | 120 ++- .../vishvananda/netlink/link_linux.go | 789 +++++++++++++----- .../github.com/vishvananda/netlink/neigh.go | 3 + .../vishvananda/netlink/neigh_linux.go | 61 +- .../netlink/netlink_unspecified.go | 4 + .../github.com/vishvananda/netlink/nl/BUILD | 2 + .../vishvananda/netlink/nl/addr_linux.go | 13 +- .../vishvananda/netlink/nl/link_linux.go | 30 +- .../vishvananda/netlink/nl/nl_linux.go | 242 +++--- .../vishvananda/netlink/nl/route_linux.go | 39 +- .../vishvananda/netlink/nl/seg6_linux.go | 111 +++ .../vishvananda/netlink/nl/syscall.go | 10 + .../vishvananda/netlink/protinfo_linux.go | 9 +- .../vishvananda/netlink/qdisc_linux.go | 95 ++- .../github.com/vishvananda/netlink/route.go | 64 +- .../vishvananda/netlink/route_linux.go | 314 +++++-- vendor/github.com/vishvananda/netlink/rule.go | 2 + .../vishvananda/netlink/rule_linux.go | 53 +- .../vishvananda/netlink/socket_linux.go | 8 +- vendor/github.com/vishvananda/netlink/xfrm.go | 13 +- .../vishvananda/netlink/xfrm_monitor_linux.go | 7 +- .../vishvananda/netlink/xfrm_policy_linux.go | 19 +- .../vishvananda/netlink/xfrm_state.go | 27 +- .../vishvananda/netlink/xfrm_state_linux.go | 33 +- .../vishvananda/netns/netns_linux.go | 2 + 42 files changed, 1955 insertions(+), 690 deletions(-) create mode 100644 vendor/github.com/vishvananda/netlink/fou.go create mode 100644 vendor/github.com/vishvananda/netlink/fou_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/fou_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/seg6_linux.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 250857a1560..b2043dcf2d5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2530,15 +2530,15 @@ }, { "ImportPath": "github.com/vishvananda/netlink", - "Rev": "f5a6f697a596c788d474984a38a0ac4ba0719e93" + "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" }, { "ImportPath": "github.com/vishvananda/netlink/nl", - "Rev": "f5a6f697a596c788d474984a38a0ac4ba0719e93" + "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" }, { "ImportPath": "github.com/vishvananda/netns", - "Rev": "86bef332bfc3b59b7624a600bd53009ce91a9829" + "Rev": "be1fbeda19366dea804f00efff2dd73a1642fdcc" }, { "ImportPath": "github.com/vmware/govmomi", diff --git a/vendor/github.com/vishvananda/netlink/BUILD b/vendor/github.com/vishvananda/netlink/BUILD index 5a6ee26d7d6..7ed3253d2f2 100644 --- a/vendor/github.com/vishvananda/netlink/BUILD +++ b/vendor/github.com/vishvananda/netlink/BUILD @@ -7,6 +7,8 @@ go_library( "class.go", "conntrack_unspecified.go", "filter.go", + "fou.go", + "fou_unspecified.go", "genetlink_unspecified.go", "handle_unspecified.go", "link.go", @@ -31,6 +33,7 @@ go_library( "class_linux.go", "conntrack_linux.go", "filter_linux.go", + "fou_linux.go", "genetlink_linux.go", "gtp_linux.go", "handle_linux.go", @@ -55,6 +58,7 @@ go_library( deps = [ "//vendor/github.com/vishvananda/netlink/nl:go_default_library", "//vendor/github.com/vishvananda/netns:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], ) diff --git a/vendor/github.com/vishvananda/netlink/Makefile b/vendor/github.com/vishvananda/netlink/Makefile index 6c8413b13a5..a0e68e7a9aa 100644 --- a/vendor/github.com/vishvananda/netlink/Makefile +++ b/vendor/github.com/vishvananda/netlink/Makefile @@ -3,7 +3,8 @@ DIRS := \ nl DEPS = \ - github.com/vishvananda/netns + github.com/vishvananda/netns \ + golang.org/x/sys/unix uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) @@ -18,7 +19,7 @@ $(call goroot,$(DEPS)): .PHONY: $(call testdirs,$(DIRS)) $(call testdirs,$(DIRS)): - sudo -E go test -test.parallel 4 -timeout 60s -v github.com/vishvananda/netlink/$@ + go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@ $(call fmt,$(call testdirs,$(DIRS))): ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q . diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md index 0b61be217e0..a88e2f41840 100644 --- a/vendor/github.com/vishvananda/netlink/README.md +++ b/vendor/github.com/vishvananda/netlink/README.md @@ -89,3 +89,4 @@ There are also a few pieces of low level netlink functionality that still need to be implemented. Routing rules are not in place and some of the more advanced link types. Hopefully there is decent structure and testing in place to make these fairly straightforward to add. + diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 92edb90d09f..8597ab7fcbc 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -2,13 +2,12 @@ package netlink import ( "fmt" - "log" "net" "strings" - "syscall" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // IFA_FLAGS is a u32 attribute. @@ -23,7 +22,7 @@ func AddrAdd(link Link, addr *Addr) error { // AddrAdd will add an IP address to a link device. // Equivalent to: `ip addr add $addr dev $link` func (h *Handle) AddrAdd(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -36,7 +35,7 @@ func AddrReplace(link Link, addr *Addr) error { // AddrReplace will replace (or, if not present, add) an IP address on a link device. // Equivalent to: `ip addr replace $addr dev $link` func (h *Handle) AddrReplace(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -49,7 +48,7 @@ func AddrDel(link Link, addr *Addr) error { // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) AddrDel(link Link, addr *Addr) error { - req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -76,7 +75,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error localAddrData = addr.IP.To16() } - localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData) + localData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData) req.AddData(localData) var peerAddrData []byte if addr.Peer != nil { @@ -89,7 +88,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error peerAddrData = localAddrData } - addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData) + addressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData) req.AddData(addressData) if addr.Flags != 0 { @@ -110,14 +109,14 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast)) + req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) if addr.Label != "" { - labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) req.AddData(labelData) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -132,11 +131,11 @@ func AddrList(link Link, family int) ([]Addr, error) { // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { - req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) if err != nil { return nil, err } @@ -188,21 +187,21 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { var local, dst *net.IPNet for _, attr := range attrs { switch attr.Attr.Type { - case syscall.IFA_ADDRESS: + case unix.IFA_ADDRESS: dst = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.Peer = dst - case syscall.IFA_LOCAL: + case unix.IFA_LOCAL: local = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.IPNet = local - case syscall.IFA_BROADCAST: + case unix.IFA_BROADCAST: addr.Broadcast = attr.Value - case syscall.IFA_LABEL: + case unix.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) case IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) @@ -237,17 +236,35 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribe(netns.None(), netns.None(), ch, done) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribe(ns, netns.None(), ch, done) + return addrSubscribeAt(ns, netns.None(), ch, done, nil) } -func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) +// AddrSubscribeOptions contains a set of options to use with +// AddrSubscribeWithOptions. +type AddrSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// AddrSubscribeWithOptions work like AddrSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err } @@ -262,25 +279,31 @@ func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-cha for { msgs, err := s.Receive() if err != nil { - log.Printf("netlink.AddrSubscribe: Receive() error: %v", err) + if cberr != nil { + cberr(err) + } return } for _, m := range msgs { msgType := m.Header.Type - if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR { - log.Printf("netlink.AddrSubscribe: bad message type: %d", msgType) - continue + if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { + if cberr != nil { + cberr(fmt.Errorf("bad message type: %d", msgType)) + } + return } addr, _, ifindex, err := parseAddr(m.Data) if err != nil { - log.Printf("netlink.AddrSubscribe: could not parse address: %v", err) - continue + if cberr != nil { + cberr(fmt.Errorf("could not parse address: %v", err)) + } + return } ch <- AddrUpdate{LinkAddress: *addr.IPNet, LinkIndex: ifindex, - NewAddr: msgType == syscall.RTM_NEWADDR, + NewAddr: msgType == unix.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, PreferedLft: addr.PreferedLft, diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index a65d6a1319a..6eb331ef154 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -2,9 +2,9 @@ package netlink import ( "fmt" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // BridgeVlanList gets a map of device id to bridge vlan infos. @@ -16,12 +16,12 @@ func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,15 +75,15 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) } func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) @@ -107,7 +107,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged } nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index 91cd3883de9..a4997740e29 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // NOTE: function is in here because it uses other linux functions @@ -50,7 +51,7 @@ func ClassDel(class Class) error { // ClassDel will delete a class from the system. // Equivalent to: `tc class del $class` func (h *Handle) ClassDel(class Class) error { - return h.classModify(syscall.RTM_DELTCLASS, 0, class) + return h.classModify(unix.RTM_DELTCLASS, 0, class) } // ClassChange will change a class in place @@ -64,7 +65,7 @@ func ClassChange(class Class) error { // Equivalent to: `tc class change $class` // The parent and handle MUST NOT be changed. func (h *Handle) ClassChange(class Class) error { - return h.classModify(syscall.RTM_NEWTCLASS, 0, class) + return h.classModify(unix.RTM_NEWTCLASS, 0, class) } // ClassReplace will replace a class to the system. @@ -82,7 +83,7 @@ func ClassReplace(class Class) error { // If a class already exist with this parent/handle pair, the class is changed. // If a class does not already exist with this parent/handle, a new class is created. func (h *Handle) ClassReplace(class Class) error { - return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) + return h.classModify(unix.RTM_NEWTCLASS, unix.NLM_F_CREATE, class) } // ClassAdd will add a class to the system. @@ -95,14 +96,14 @@ func ClassAdd(class Class) error { // Equivalent to: `tc class add $class` func (h *Handle) ClassAdd(class Class) error { return h.classModify( - syscall.RTM_NEWTCLASS, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + unix.RTM_NEWTCLASS, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, class, ) } func (h *Handle) classModify(cmd, flags int, class Class) error { - req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) base := class.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -112,12 +113,12 @@ func (h *Handle) classModify(cmd, flags int, class Class) error { } req.AddData(msg) - if cmd != syscall.RTM_DELTCLASS { + if cmd != unix.RTM_DELTCLASS { if err := classPayload(req, class); err != nil { return err } } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -141,12 +142,12 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { var rtab [256]uint32 var ctab [256]uint32 tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} - if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcrate, rtab[:], cellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate rate table") } opt.Rate = tcrate tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} - if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcceil, ctab[:], ccellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate ceil rate table") } opt.Ceil = tcceil @@ -169,7 +170,7 @@ func ClassList(link Link, parent uint32) ([]Class, error) { // Equivalent to: `tc class show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { - req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -181,7 +182,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index ecf04456590..a0fc74a3722 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // ConntrackTableType Conntrack table for the netlink operation @@ -85,8 +85,8 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) // conntrack -F [table] Flush table // The flush operation applies to all the family types func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { - req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) - _, err := req.Execute(syscall.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, unix.AF_INET, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_NETFILTER, 0) return err } @@ -102,10 +102,10 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami for _, dataRaw := range res { flow := parseRawData(dataRaw) if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(syscall.NETLINK_NETFILTER, 0) + req2.Execute(unix.NETLINK_NETFILTER, 0) matched++ } } @@ -127,8 +127,8 @@ func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily } func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { - req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP) - return req.Execute(syscall.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, unix.NLM_F_DUMP) + return req.Execute(unix.NETLINK_NETFILTER, 0) } // The full conntrack flow structure is very complicated and can be found in the file: diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 938b28b0b03..30b541494e2 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -2,8 +2,6 @@ package netlink import ( "fmt" - - "github.com/vishvananda/netlink/nl" ) type Filter interface { @@ -19,7 +17,7 @@ type FilterAttrs struct { Handle uint32 Parent uint32 Priority uint16 // lower is higher priority - Protocol uint16 // syscall.ETH_P_* + Protocol uint16 // unix.ETH_P_* } func (q FilterAttrs) String() string { @@ -184,14 +182,6 @@ func NewMirredAction(redirIndex int) *MirredAction { } } -// Constants used in TcU32Sel.Flags. -const ( - TC_U32_TERMINAL = nl.TC_U32_TERMINAL - TC_U32_OFFSET = nl.TC_U32_OFFSET - TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET - TC_U32_EAT = nl.TC_U32_EAT -) - // Sel of the U32 filters that contains multiple TcU32Key. This is the copy // and the frontend representation of nl.TcU32Sel. It is serialized into canonical // nl.TcU32Sel with the appropriate endianness. diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index dc0f90af880..7cb7a4fd940 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -9,6 +9,15 @@ import ( "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// Constants used in TcU32Sel.Flags. +const ( + TC_U32_TERMINAL = nl.TC_U32_TERMINAL + TC_U32_OFFSET = nl.TC_U32_OFFSET + TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET + TC_U32_EAT = nl.TC_U32_EAT ) // Fw filter filters on firewall marks @@ -47,7 +56,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.Rate.Rate != 0 { police.Rate.Mpu = fattrs.Mpu police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("TBF: failed to calculate rate table") } police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) @@ -56,7 +65,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.PeakRate.Rate != 0 { police.PeakRate.Mpu = fattrs.Mpu police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("POLICE: failed to calculate peak rate table") } } @@ -90,7 +99,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -101,7 +110,7 @@ func (h *Handle) FilterDel(filter Filter) error { } req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -115,7 +124,7 @@ func FilterAdd(filter Filter) error { // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -128,9 +137,11 @@ func (h *Handle) FilterAdd(filter Filter) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) - if u32, ok := filter.(*U32); ok { + + switch filter := filter.(type) { + case *U32: // Convert TcU32Sel into nl.TcU32Sel as it is without copy. - sel := (*nl.TcU32Sel)(unsafe.Pointer(u32.Sel)) + sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel)) if sel == nil { // match all sel = &nl.TcU32Sel{ @@ -158,63 +169,63 @@ func (h *Handle) FilterAdd(filter Filter) error { } sel.Nkeys = uint8(len(sel.Keys)) nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize()) - if u32.ClassId != 0 { - nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId)) + if filter.ClassId != 0 { + nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId)) } actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil) // backwards compatibility - if u32.RedirIndex != 0 { - u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...) + if filter.RedirIndex != 0 { + filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...) } - if err := EncodeActions(actionsAttr, u32.Actions); err != nil { + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { return err } - } else if fw, ok := filter.(*Fw); ok { - if fw.Mask != 0 { + case *Fw: + if filter.Mask != 0 { b := make([]byte, 4) - native.PutUint32(b, fw.Mask) + native.PutUint32(b, filter.Mask) nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b) } - if fw.InDev != "" { - nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(fw.InDev)) + if filter.InDev != "" { + nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) } - if (fw.Police != nl.TcPolice{}) { + if (filter.Police != nl.TcPolice{}) { police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil) - nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, fw.Police.Serialize()) - if (fw.Police.Rate != nl.TcRateSpec{}) { - payload := SerializeRtab(fw.Rtab) + nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize()) + if (filter.Police.Rate != nl.TcRateSpec{}) { + payload := SerializeRtab(filter.Rtab) nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload) } - if (fw.Police.PeakRate != nl.TcRateSpec{}) { - payload := SerializeRtab(fw.Ptab) + if (filter.Police.PeakRate != nl.TcRateSpec{}) { + payload := SerializeRtab(filter.Ptab) nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload) } } - if fw.ClassId != 0 { + if filter.ClassId != 0 { b := make([]byte, 4) - native.PutUint32(b, fw.ClassId) + native.PutUint32(b, filter.ClassId) nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b) } - } else if bpf, ok := filter.(*BpfFilter); ok { + case *BpfFilter: var bpfFlags uint32 - if bpf.ClassId != 0 { - nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(bpf.ClassId)) + if filter.ClassId != 0 { + nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId)) } - if bpf.Fd >= 0 { - nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(bpf.Fd)))) + if filter.Fd >= 0 { + nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd)))) } - if bpf.Name != "" { - nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(bpf.Name)) + if filter.Name != "" { + nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name)) } - if bpf.DirectAction { + if filter.DirectAction { bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT } nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) } req.AddData(options) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -229,7 +240,7 @@ func FilterList(link Link, parent uint32) ([]Filter, error) { // Equivalent to: `tc filter show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { - req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -241,7 +252,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) if err != nil { return nil, err } @@ -552,7 +563,7 @@ func AdjustSize(sz uint, mpu uint, linklayer int) uint { } } -func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int { +func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, linklayer int) int { bps := rate.Rate mpu := rate.Mpu var sz uint diff --git a/vendor/github.com/vishvananda/netlink/fou.go b/vendor/github.com/vishvananda/netlink/fou.go new file mode 100644 index 00000000000..71e73c37a0a --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/fou.go @@ -0,0 +1,21 @@ +package netlink + +import ( + "errors" +) + +var ( + // ErrAttrHeaderTruncated is returned when a netlink attribute's header is + // truncated. + ErrAttrHeaderTruncated = errors.New("attribute header truncated") + // ErrAttrBodyTruncated is returned when a netlink attribute's body is + // truncated. + ErrAttrBodyTruncated = errors.New("attribute body truncated") +) + +type Fou struct { + Family int + Port int + Protocol int + EncapType int +} diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go new file mode 100644 index 00000000000..62d59bd2d09 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/fou_linux.go @@ -0,0 +1,215 @@ +// +build linux + +package netlink + +import ( + "encoding/binary" + "errors" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + FOU_GENL_NAME = "fou" +) + +const ( + FOU_CMD_UNSPEC uint8 = iota + FOU_CMD_ADD + FOU_CMD_DEL + FOU_CMD_GET + FOU_CMD_MAX = FOU_CMD_GET +) + +const ( + FOU_ATTR_UNSPEC = iota + FOU_ATTR_PORT + FOU_ATTR_AF + FOU_ATTR_IPPROTO + FOU_ATTR_TYPE + FOU_ATTR_REMCSUM_NOPARTIAL + FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL +) + +const ( + FOU_ENCAP_UNSPEC = iota + FOU_ENCAP_DIRECT + FOU_ENCAP_GUE + FOU_ENCAP_MAX = FOU_ENCAP_GUE +) + +var fouFamilyId int + +func FouFamilyId() (int, error) { + if fouFamilyId != 0 { + return fouFamilyId, nil + } + + fam, err := GenlFamilyGet(FOU_GENL_NAME) + if err != nil { + return -1, err + } + + fouFamilyId = int(fam.ID) + return fouFamilyId, nil +} + +func FouAdd(f Fou) error { + return pkgHandle.FouAdd(f) +} + +func (h *Handle) FouAdd(f Fou) error { + fam_id, err := FouFamilyId() + if err != nil { + return err + } + + // setting ip protocol conflicts with encapsulation type GUE + if f.EncapType == FOU_ENCAP_GUE && f.Protocol != 0 { + return errors.New("GUE encapsulation doesn't specify an IP protocol") + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) + + // int to byte for port + bp := make([]byte, 2) + binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_PORT, bp), + nl.NewRtAttr(FOU_ATTR_TYPE, []byte{uint8(f.EncapType)}), + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), + nl.NewRtAttr(FOU_ATTR_IPPROTO, []byte{uint8(f.Protocol)}), + } + raw := []byte{FOU_CMD_ADD, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return err + } + + return nil +} + +func FouDel(f Fou) error { + return pkgHandle.FouDel(f) +} + +func (h *Handle) FouDel(f Fou) error { + fam_id, err := FouFamilyId() + if err != nil { + return err + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) + + // int to byte for port + bp := make([]byte, 2) + binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_PORT, bp), + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), + } + raw := []byte{FOU_CMD_DEL, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return err + } + + return nil +} + +func FouList(fam int) ([]Fou, error) { + return pkgHandle.FouList(fam) +} + +func (h *Handle) FouList(fam int) ([]Fou, error) { + fam_id, err := FouFamilyId() + if err != nil { + return nil, err + } + + req := h.newNetlinkRequest(fam_id, unix.NLM_F_DUMP) + + attrs := []*nl.RtAttr{ + nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(fam)}), + } + raw := []byte{FOU_CMD_GET, 1, 0, 0} + for _, a := range attrs { + raw = append(raw, a.Serialize()...) + } + + req.AddRawData(raw) + + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + fous := make([]Fou, 0, len(msgs)) + for _, m := range msgs { + f, err := deserializeFouMsg(m) + if err != nil { + return fous, err + } + + fous = append(fous, f) + } + + return fous, nil +} + +func deserializeFouMsg(msg []byte) (Fou, error) { + // we'll skip to byte 4 to first attribute + msg = msg[3:] + var shift int + fou := Fou{} + + for { + // attribute header is at least 16 bits + if len(msg) < 4 { + return fou, ErrAttrHeaderTruncated + } + + lgt := int(binary.BigEndian.Uint16(msg[0:2])) + if len(msg) < lgt+4 { + return fou, ErrAttrBodyTruncated + } + attr := binary.BigEndian.Uint16(msg[2:4]) + + shift = lgt + 3 + switch attr { + case FOU_ATTR_AF: + fou.Family = int(msg[5]) + case FOU_ATTR_PORT: + fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) + // port is 2 bytes + shift = lgt + 2 + case FOU_ATTR_IPPROTO: + fou.Protocol = int(msg[5]) + case FOU_ATTR_TYPE: + fou.EncapType = int(msg[5]) + } + + msg = msg[shift:] + + if len(msg) < 4 { + break + } + } + + return fou, nil +} diff --git a/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/vendor/github.com/vishvananda/netlink/fou_unspecified.go new file mode 100644 index 00000000000..3a8365bfe62 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/fou_unspecified.go @@ -0,0 +1,15 @@ +// +build !linux + +package netlink + +func FouAdd(f Fou) error { + return ErrNotImplemented +} + +func FouDel(f Fou) error { + return ErrNotImplemented +} + +func FouList(fam int) ([]Fou, error) { + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go index a388a87001c..ce7969907d4 100644 --- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) type GenlOp struct { @@ -130,9 +131,9 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { Command: nl.GENL_CTRL_CMD_GETFAMILY, Version: nl.GENL_CTRL_VERSION, } - req := h.newNetlinkRequest(nl.GENL_ID_CTRL, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -151,7 +152,7 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) { req := h.newNetlinkRequest(nl.GENL_ID_CTRL, 0) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_CTRL_ATTR_FAMILY_NAME, nl.ZeroTerminated(name))) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go index 7331303ecbe..f5e160ba5c0 100644 --- a/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -7,6 +7,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) type PDP struct { @@ -82,9 +83,9 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { Command: nl.GENL_GTP_CMD_GETPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -96,7 +97,7 @@ func GTPPDPList() ([]*PDP, error) { } func gtpPDPGet(req *nl.NetlinkRequest) (*PDP, error) { - msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -182,7 +183,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_NEWPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -199,7 +200,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(syscall.NETLINK_GENERIC, 0) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) return err } @@ -216,7 +217,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_DELPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -229,7 +230,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(syscall.NETLINK_GENERIC, 0) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) return err } diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go index a04ceae6b61..9f6d7fe0fbd 100644 --- a/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -2,11 +2,11 @@ package netlink import ( "fmt" - "syscall" "time" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // Empty handle used by the netlink package methods @@ -43,14 +43,29 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { if to < time.Microsecond { return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond) } - tv := syscall.NsecToTimeval(to.Nanoseconds()) + tv := unix.NsecToTimeval(to.Nanoseconds()) for _, sh := range h.sockets { - fd := sh.Socket.GetFd() - err := syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, &tv) - if err != nil { + if err := sh.Socket.SetSendTimeout(&tv); err != nil { return err } - err = syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, &tv) + if err := sh.Socket.SetReceiveTimeout(&tv); err != nil { + return err + } + } + return nil +} + +// SetSocketReceiveBufferSize sets the receive buffer size for each +// socket in the netlink handle. The maximum value is capped by +// /proc/sys/net/core/rmem_max. +func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error { + opt := unix.SO_RCVBUF + if force { + opt = unix.SO_RCVBUFFORCE + } + for _, sh := range h.sockets { + fd := sh.Socket.GetFd() + err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, opt, size) if err != nil { return err } @@ -58,6 +73,24 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { return nil } +// GetSocketReceiveBufferSize gets the receiver buffer size for each +// socket in the netlink handle. The retrieved value should be the +// double to the one set for SetSocketReceiveBufferSize. +func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { + results := make([]int, len(h.sockets)) + i := 0 + for _, sh := range h.sockets { + fd := sh.Socket.GetFd() + size, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF) + if err != nil { + return nil, err + } + results[i] = size + i++ + } + return results, nil +} + // NewHandle returns a netlink handle on the network namespace // specified by ns. If ns=netns.None(), current network namespace // will be assumed @@ -101,10 +134,10 @@ func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest { return nl.NewNetlinkRequest(proto, flags) } return &nl.NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.SizeofNlMsghdr), + NlMsghdr: unix.NlMsghdr{ + Len: uint32(unix.SizeofNlMsghdr), Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), + Flags: unix.NLM_F_REQUEST | uint16(flags), }, Sockets: h.sockets, } diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go index 32cf022732d..7da21a6a184 100644 --- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -145,6 +145,10 @@ func (h *Handle) LinkSetFlood(link Link, mode bool) error { return ErrNotImplemented } +func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { + return ErrNotImplemented +} + func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index 4e77037b5c3..d8ba16a948f 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -3,6 +3,7 @@ package netlink import ( "fmt" "net" + "os" ) // Link represents a link device from netlink. Shared link attributes @@ -37,6 +38,9 @@ type LinkAttrs struct { EncapType string Protinfo *Protinfo OperState LinkOperState + NetNsID int + NumTxQueues int + NumRxQueues int } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -258,6 +262,9 @@ const ( type Macvlan struct { LinkAttrs Mode MacvlanMode + + // MACAddrs is only populated for Macvlan SOURCE links + MACAddrs []net.HardwareAddr } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -283,8 +290,10 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag + Mode TuntapMode + Flags TuntapFlag + Queues int + Fds []*os.File } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -326,26 +335,28 @@ func (generic *GenericLink) Type() string { type Vxlan struct { LinkAttrs - VxlanId int - VtepDevIndex int - SrcAddr net.IP - Group net.IP - TTL int - TOS int - Learning bool - Proxy bool - RSC bool - L2miss bool - L3miss bool - UDPCSum bool - NoAge bool - GBP bool - FlowBased bool - Age int - Limit int - Port int - PortLow int - PortHigh int + VxlanId int + VtepDevIndex int + SrcAddr net.IP + Group net.IP + TTL int + TOS int + Learning bool + Proxy bool + RSC bool + L2miss bool + L3miss bool + UDPCSum bool + UDP6ZeroCSumTx bool + UDP6ZeroCSumRx bool + NoAge bool + GBP bool + FlowBased bool + Age int + Limit int + Port int + PortLow int + PortHigh int } func (vxlan *Vxlan) Attrs() *LinkAttrs { @@ -699,12 +710,17 @@ func (gretap *Gretap) Type() string { type Iptun struct { LinkAttrs - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - Link uint32 - Local net.IP - Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Link uint32 + Local net.IP + Remote net.IP + EncapSport uint16 + EncapDport uint16 + EncapType uint16 + EncapFlags uint16 + FlowBased bool } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -715,6 +731,28 @@ func (iptun *Iptun) Type() string { return "ipip" } +type Sittun struct { + LinkAttrs + Link uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 +} + +func (sittun *Sittun) Attrs() *LinkAttrs { + return &sittun.LinkAttrs +} + +func (sittun *Sittun) Type() string { + return "sit" +} + type Vti struct { LinkAttrs IKey uint32 @@ -732,6 +770,32 @@ func (iptun *Vti) Type() string { return "vti" } +type Gretun struct { + LinkAttrs + Link uint32 + IFlags uint16 + OFlags uint16 + IKey uint32 + OKey uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 +} + +func (gretun *Gretun) Attrs() *LinkAttrs { + return &gretun.LinkAttrs +} + +func (gretun *Gretun) Type() string { + return "gre" +} + type Vrf struct { LinkAttrs Table uint32 diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 0fe70295ef2..a6ae1041877 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -11,6 +11,7 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) const ( @@ -20,13 +21,15 @@ const ( ) const ( - TUNTAP_MODE_TUN TuntapMode = syscall.IFF_TUN - TUNTAP_MODE_TAP TuntapMode = syscall.IFF_TAP - TUNTAP_DEFAULTS TuntapFlag = syscall.IFF_TUN_EXCL | syscall.IFF_ONE_QUEUE - TUNTAP_VNET_HDR TuntapFlag = syscall.IFF_VNET_HDR - TUNTAP_TUN_EXCL TuntapFlag = syscall.IFF_TUN_EXCL - TUNTAP_NO_PI TuntapFlag = syscall.IFF_NO_PI - TUNTAP_ONE_QUEUE TuntapFlag = syscall.IFF_ONE_QUEUE + TUNTAP_MODE_TUN TuntapMode = unix.IFF_TUN + TUNTAP_MODE_TAP TuntapMode = unix.IFF_TAP + TUNTAP_DEFAULTS TuntapFlag = unix.IFF_TUN_EXCL | unix.IFF_ONE_QUEUE + TUNTAP_VNET_HDR TuntapFlag = unix.IFF_VNET_HDR + TUNTAP_TUN_EXCL TuntapFlag = unix.IFF_TUN_EXCL + TUNTAP_NO_PI TuntapFlag = unix.IFF_NO_PI + TUNTAP_ONE_QUEUE TuntapFlag = unix.IFF_ONE_QUEUE + TUNTAP_MULTI_QUEUE TuntapFlag = 0x0100 + TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) var lookupByDump = false @@ -61,15 +64,15 @@ func (h *Handle) ensureIndex(link *LinkAttrs) { func (h *Handle) LinkSetARPOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change |= syscall.IFF_NOARP - msg.Flags |= syscall.IFF_NOARP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags |= unix.IFF_NOARP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -80,15 +83,15 @@ func LinkSetARPOff(link Link) error { func (h *Handle) LinkSetARPOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change |= syscall.IFF_NOARP - msg.Flags &= ^uint32(syscall.IFF_NOARP) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags &= ^uint32(unix.IFF_NOARP) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -99,15 +102,84 @@ func LinkSetARPOn(link Link) error { func (h *Handle) SetPromiscOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_PROMISC - msg.Flags = syscall.IFF_PROMISC + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Flags = unix.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrAdd(link, addr) +} + +func (h *Handle) MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_ADD) +} + +func MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrDel(link, addr) +} + +func (h *Handle) MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_DEL) +} + +func MacvlanMACAddrFlush(link Link) error { + return pkgHandle.MacvlanMACAddrFlush(link) +} + +func (h *Handle) MacvlanMACAddrFlush(link Link) error { + return h.macvlanMACAddrChange(link, nil, nl.MACVLAN_MACADDR_FLUSH) +} + +func MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrSet(link, addrs) +} + +func (h *Handle) MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, addrs, nl.MACVLAN_MACADDR_SET) +} + +func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + // IFLA_MACVLAN_MACADDR_MODE = mode + b := make([]byte, 4) + native.PutUint32(b, mode) + nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b) + + // populate message with MAC addrs, if necessary + switch mode { + case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: + if len(addrs) == 1 { + nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) + } + case nl.MACVLAN_MACADDR_SET: + mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil) + for _, addr := range addrs { + nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr)) + } + } + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -118,7 +190,7 @@ func BridgeSetMcastSnoop(link Link, on bool) error { func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { bridge := link.(*Bridge) bridge.MulticastSnooping = &on - return h.linkModify(bridge, syscall.NLM_F_ACK) + return h.linkModify(bridge, unix.NLM_F_ACK) } func SetPromiscOn(link Link) error { @@ -128,15 +200,15 @@ func SetPromiscOn(link Link) error { func (h *Handle) SetPromiscOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_PROMISC - msg.Flags = 0 & ^syscall.IFF_PROMISC + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Flags = 0 & ^unix.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -155,15 +227,15 @@ func LinkSetUp(link Link) error { func (h *Handle) LinkSetUp(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = syscall.IFF_UP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -178,15 +250,15 @@ func LinkSetDown(link Link) error { func (h *Handle) LinkSetDown(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = 0 & ^syscall.IFF_UP + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Flags = 0 & ^unix.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -201,19 +273,19 @@ func LinkSetMTU(link Link, mtu int) error { func (h *Handle) LinkSetMTU(link Link, mtu int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(mtu)) - data := nl.NewRtAttr(syscall.IFLA_MTU, b) + data := nl.NewRtAttr(unix.IFLA_MTU, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -228,16 +300,16 @@ func LinkSetName(link Link, name string) error { func (h *Handle) LinkSetName(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name)) + data := nl.NewRtAttr(unix.IFLA_IFNAME, []byte(name)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -252,16 +324,16 @@ func LinkSetAlias(link Link, name string) error { func (h *Handle) LinkSetAlias(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_IFALIAS, []byte(name)) + data := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(name)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -276,16 +348,16 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr)) + data := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(hwaddr)) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -300,9 +372,9 @@ func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -315,7 +387,7 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -330,9 +402,9 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -345,7 +417,7 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -360,9 +432,9 @@ func LinkSetVfTxRate(link Link, vf, rate int) error { func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -375,7 +447,7 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -391,9 +463,9 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -409,7 +481,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -425,9 +497,9 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -443,7 +515,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -491,19 +563,19 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error { func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(masterIndex)) - data := nl.NewRtAttr(syscall.IFLA_MASTER, b) + data := nl.NewRtAttr(unix.IFLA_MASTER, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -520,19 +592,19 @@ func LinkSetNsPid(link Link, nspid int) error { func (h *Handle) LinkSetNsPid(link Link, nspid int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(nspid)) - data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b) + data := nl.NewRtAttr(unix.IFLA_NET_NS_PID, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -549,9 +621,9 @@ func LinkSetNsFd(link Link, fd int) error { func (h *Handle) LinkSetNsFd(link Link, fd int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -561,24 +633,30 @@ func (h *Handle) LinkSetNsFd(link Link, fd int) error { data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b) req.AddData(data) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } // LinkSetXdpFd adds a bpf function to the driver. The fd must be a bpf // program loaded with bpf(type=BPF_PROG_TYPE_XDP) func LinkSetXdpFd(link Link, fd int) error { + return LinkSetXdpFdWithFlags(link, fd, 0) +} + +// LinkSetXdpFdWithFlags adds a bpf function to the driver with the given +// options. The fd must be a bpf program loaded with bpf(type=BPF_PROG_TYPE_XDP) +func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { base := link.Attrs() ensureIndex(base) - req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := nl.NewNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - addXdpAttrs(&LinkXdp{Fd: fd}, req) + addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -636,6 +714,8 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) if vxlan.UDPCSum { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) @@ -760,6 +840,12 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { } } +func cleanupFds(fds []*os.File) { + for _, f := range fds { + f.Close() + } +} + // LinkAdd adds a new link device. The type and features of the device // are taken from the parameters in the link object. // Equivalent to: `ip link add $link` @@ -771,7 +857,7 @@ func LinkAdd(link Link) error { // are taken fromt the parameters in the link object. // Equivalent to: `ip link add $link` func (h *Handle) LinkAdd(link Link) error { - return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) } func (h *Handle) linkModify(link Link, flags int) error { @@ -785,101 +871,152 @@ func (h *Handle) linkModify(link Link, flags int) error { if tuntap, ok := link.(*Tuntap); ok { // TODO: support user // TODO: support group - // TODO: multi_queue // TODO: support non- persistent - if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { + if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) } - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() + + queues := tuntap.Queues + + var fds []*os.File var req ifReq - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_DEFAULTS) - } else { - req.Flags = uint16(tuntap.Flags) - } - req.Flags |= uint16(tuntap.Mode) copy(req.Name[:15], base.Name) - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) - if errno != 0 { - return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) + + req.Flags = uint16(tuntap.Flags) + + if queues == 0 { //Legacy compatibility + queues = 1 + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) + } + } else { + // For best peformance set Flags to TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR + // when a) KVM has support for this ABI and + // b) the value of the flag is queryable using the TUNGETIFF ioctl + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_MULTI_QUEUE_DEFAULTS) + } } - _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) + + req.Flags |= uint16(tuntap.Mode) + + for i := 0; i < queues; i++ { + localReq := req + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + cleanupFds(fds) + return err + } + + fds = append(fds, file) + _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) + } + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) if errno != 0 { + cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) } + h.ensureIndex(base) // can't set master during create, so set it afterwards if base.MasterIndex != 0 { // TODO: verify MasterIndex is actually a bridge? - return h.LinkSetMasterByIndex(link, base.MasterIndex) + err := h.LinkSetMasterByIndex(link, base.MasterIndex) + if err != nil { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + cleanupFds(fds) + return err + } } + + if tuntap.Queues == 0 { + cleanupFds(fds) + } else { + tuntap.Fds = fds + } + return nil } - req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, flags) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) // TODO: make it shorter if base.Flags&net.FlagUp != 0 { - msg.Change = syscall.IFF_UP - msg.Flags = syscall.IFF_UP + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP } if base.Flags&net.FlagBroadcast != 0 { - msg.Change |= syscall.IFF_BROADCAST - msg.Flags |= syscall.IFF_BROADCAST + msg.Change |= unix.IFF_BROADCAST + msg.Flags |= unix.IFF_BROADCAST } if base.Flags&net.FlagLoopback != 0 { - msg.Change |= syscall.IFF_LOOPBACK - msg.Flags |= syscall.IFF_LOOPBACK + msg.Change |= unix.IFF_LOOPBACK + msg.Flags |= unix.IFF_LOOPBACK } if base.Flags&net.FlagPointToPoint != 0 { - msg.Change |= syscall.IFF_POINTOPOINT - msg.Flags |= syscall.IFF_POINTOPOINT + msg.Change |= unix.IFF_POINTOPOINT + msg.Flags |= unix.IFF_POINTOPOINT } if base.Flags&net.FlagMulticast != 0 { - msg.Change |= syscall.IFF_MULTICAST - msg.Flags |= syscall.IFF_MULTICAST + msg.Change |= unix.IFF_MULTICAST + msg.Flags |= unix.IFF_MULTICAST } + if base.Index != 0 { + msg.Index = int32(base.Index) + } + req.AddData(msg) if base.ParentIndex != 0 { b := make([]byte, 4) native.PutUint32(b, uint32(base.ParentIndex)) - data := nl.NewRtAttr(syscall.IFLA_LINK, b) + data := nl.NewRtAttr(unix.IFLA_LINK, b) req.AddData(data) } else if link.Type() == "ipvlan" { return fmt.Errorf("Can't create ipvlan link without ParentIndex") } - nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) if base.MTU > 0 { - mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) } if base.TxQLen >= 0 { - qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + qlen := nl.NewRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) req.AddData(qlen) } if base.HardwareAddr != nil { - hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr)) + hwaddr := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(base.HardwareAddr)) req.AddData(hwaddr) } + if base.NumTxQueues > 0 { + txqueues := nl.NewRtAttr(nl.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + req.AddData(txqueues) + } + + if base.NumRxQueues > 0 { + rxqueues := nl.NewRtAttr(nl.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + req.AddData(rxqueues) + } + if base.Namespace != nil { var attr *nl.RtAttr switch base.Namespace.(type) { case NsPid: val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) - attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) + attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) case NsFd: val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) @@ -892,60 +1029,65 @@ func (h *Handle) linkModify(link Link, flags int) error { addXdpAttrs(base.Xdp, req) } - linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) - if vlan, ok := link.(*Vlan); ok { + switch link := link.(type) { + case *Vlan: b := make([]byte, 2) - native.PutUint16(b, uint16(vlan.VlanId)) + native.PutUint16(b, uint16(link.VlanId)) data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b) - } else if veth, ok := link.(*Veth); ok { + case *Veth: data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) - nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) - nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName)) + nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) + nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) if base.TxQLen >= 0 { - nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } if base.MTU > 0 { - nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } - } else if vxlan, ok := link.(*Vxlan); ok { - addVxlanAttrs(vxlan, linkInfo) - } else if bond, ok := link.(*Bond); ok { - addBondAttrs(bond, linkInfo) - } else if ipv, ok := link.(*IPVlan); ok { + case *Vxlan: + addVxlanAttrs(link, linkInfo) + case *Bond: + addBondAttrs(link, linkInfo) + case *IPVlan: data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode))) - } else if macv, ok := link.(*Macvlan); ok { - if macv.Mode != MACVLAN_MODE_DEFAULT { + nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + case *Macvlan: + if link.Mode != MACVLAN_MODE_DEFAULT { data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode])) + nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) } - } else if macv, ok := link.(*Macvtap); ok { - if macv.Mode != MACVLAN_MODE_DEFAULT { + case *Macvtap: + if link.Mode != MACVLAN_MODE_DEFAULT { data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode])) + nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) } - } else if gretap, ok := link.(*Gretap); ok { - addGretapAttrs(gretap, linkInfo) - } else if iptun, ok := link.(*Iptun); ok { - addIptunAttrs(iptun, linkInfo) - } else if vti, ok := link.(*Vti); ok { - addVtiAttrs(vti, linkInfo) - } else if vrf, ok := link.(*Vrf); ok { - addVrfAttrs(vrf, linkInfo) - } else if bridge, ok := link.(*Bridge); ok { - addBridgeAttrs(bridge, linkInfo) - } else if gtp, ok := link.(*GTP); ok { - addGTPAttrs(gtp, linkInfo) + case *Gretap: + addGretapAttrs(link, linkInfo) + case *Iptun: + addIptunAttrs(link, linkInfo) + case *Sittun: + addSittunAttrs(link, linkInfo) + case *Gretun: + addGretunAttrs(link, linkInfo) + case *Vti: + addVtiAttrs(link, linkInfo) + case *Vrf: + addVrfAttrs(link, linkInfo) + case *Bridge: + addBridgeAttrs(link, linkInfo) + case *GTP: + addGTPAttrs(link, linkInfo) } req.AddData(linkInfo) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } @@ -975,13 +1117,13 @@ func (h *Handle) LinkDel(link Link) error { h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -1024,16 +1166,16 @@ func (h *Handle) LinkByName(name string) (Link, error) { return h.linkByNameDump(name) } - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) req.AddData(nameData) link, err := execGetLink(req) - if err == syscall.EINVAL { + if err == unix.EINVAL { // older kernels don't support looking up via IFLA_IFNAME // so fall back to dumping all links h.lookupByDump = true @@ -1056,16 +1198,16 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) { return h.linkByAliasDump(alias) } - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) req.AddData(nameData) link, err := execGetLink(req) - if err == syscall.EINVAL { + if err == unix.EINVAL { // older kernels don't support looking up via IFLA_IFALIAS // so fall back to dumping all links h.lookupByDump = true @@ -1082,9 +1224,9 @@ func LinkByIndex(index int) (Link, error) { // LinkByIndex finds a link by index and returns a pointer to the object. func (h *Handle) LinkByIndex(index int) (Link, error) { - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(index) req.AddData(msg) @@ -1092,10 +1234,10 @@ func (h *Handle) LinkByIndex(index int) (Link, error) { } func execGetLink(req *nl.NetlinkRequest) (Link, error) { - msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { if errno, ok := err.(syscall.Errno); ok { - if errno == syscall.ENODEV { + if errno == unix.ENODEV { return nil, LinkNotFoundError{fmt.Errorf("Link not found")} } } @@ -1116,7 +1258,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { // linkDeserialize deserializes a raw message received from netlink into // a link object. -func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { +func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) @@ -1125,7 +1267,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { } base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} - if msg.Flags&syscall.IFF_PROMISC != 0 { + if msg.Flags&unix.IFF_PROMISC != 0 { base.Promisc = 1 } var ( @@ -1136,7 +1278,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { ) for _, attr := range attrs { switch attr.Attr.Type { - case syscall.IFLA_LINKINFO: + case unix.IFLA_LINKINFO: infos, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err @@ -1170,6 +1312,10 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { link = &Gretap{} case "ipip": link = &Iptun{} + case "sit": + link = &Sittun{} + case "gre": + link = &Gretun{} case "vti": link = &Vti{} case "vrf": @@ -1201,6 +1347,10 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { parseGretapData(link, data) case "ipip": parseIptunData(link, data) + case "sit": + parseSittunData(link, data) + case "gre": + parseGretunData(link, data) case "vti": parseVtiData(link, data) case "vrf": @@ -1212,7 +1362,7 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { } } } - case syscall.IFLA_ADDRESS: + case unix.IFLA_ADDRESS: var nonzero bool for _, b := range attr.Value { if b != 0 { @@ -1222,19 +1372,19 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { if nonzero { base.HardwareAddr = attr.Value[:] } - case syscall.IFLA_IFNAME: + case unix.IFLA_IFNAME: base.Name = string(attr.Value[:len(attr.Value)-1]) - case syscall.IFLA_MTU: + case unix.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_LINK: + case unix.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_MASTER: + case unix.IFLA_MASTER: base.MasterIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_TXQLEN: + case unix.IFLA_TXQLEN: base.TxQLen = int(native.Uint32(attr.Value[0:4])) - case syscall.IFLA_IFALIAS: + case unix.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) - case syscall.IFLA_STATS: + case unix.IFLA_STATS: stats32 = attr.Value[:] case IFLA_STATS64: stats64 = attr.Value[:] @@ -1244,17 +1394,19 @@ func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { return nil, err } base.Xdp = xdp - case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED: - if hdr != nil && hdr.Type == syscall.RTM_NEWLINK && - msg.Family == syscall.AF_BRIDGE { + case unix.IFLA_PROTINFO | unix.NLA_F_NESTED: + if hdr != nil && hdr.Type == unix.RTM_NEWLINK && + msg.Family == unix.AF_BRIDGE { attrs, err := nl.ParseRouteAttr(attr.Value[:]) if err != nil { return nil, err } base.Protinfo = parseProtinfo(attrs) } - case syscall.IFLA_OPERSTATE: + case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) + case nl.IFLA_LINK_NETNSID: + base.NetNsID = int(native.Uint32(attr.Value[0:4])) } } @@ -1284,12 +1436,12 @@ func LinkList() ([]Link, error) { func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) if err != nil { return nil, err } @@ -1309,24 +1461,42 @@ func (h *Handle) LinkList() ([]Link, error) { // LinkUpdate is used to pass information back from LinkSubscribe() type LinkUpdate struct { nl.IfInfomsg - Header syscall.NlMsghdr + Header unix.NlMsghdr Link } // LinkSubscribe takes a chan down which notifications will be sent // when links change. Close the 'done' chan to stop subscription. func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribe(netns.None(), netns.None(), ch, done) + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil) } // LinkSubscribeAt works like LinkSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribe(ns, netns.None(), ch, done) + return linkSubscribeAt(ns, netns.None(), ch, done, nil) } -func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) +// LinkSubscribeOptions contains a set of options to use with +// LinkSubscribeWithOptions. +type LinkSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// LinkSubscribeWithOptions work like LinkSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) if err != nil { return err } @@ -1341,15 +1511,22 @@ func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-cha for { msgs, err := s.Receive() if err != nil { + if cberr != nil { + cberr(err) + } return } for _, m := range msgs { ifmsg := nl.DeserializeIfInfomsg(m.Data) - link, err := LinkDeserialize(&m.Header, m.Data) + header := unix.NlMsghdr(m.Header) + link, err := LinkDeserialize(&header, m.Data) if err != nil { + if cberr != nil { + cberr(err) + } return } - ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link} + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} } } }() @@ -1424,22 +1601,49 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) - br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) + br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) nl.NewRtAttrChild(br, attr, boolToByte(mode)) req.AddData(br) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return err } return nil } +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func LinkSetTxQLen(link Link, qlen int) error { + return pkgHandle.LinkSetTxQLen(link, qlen) +} + +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(qlen)) + + data := nl.NewRtAttr(unix.IFLA_TXQLEN, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { @@ -1482,6 +1686,10 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan.L3miss = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_UDP_CSUM: vxlan.UDPCSum = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX: + vxlan.UDP6ZeroCSumTx = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX: + vxlan.UDP6ZeroCSumRx = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_GBP: vxlan.GBP = true case nl.IFLA_VXLAN_FLOWBASED: @@ -1584,7 +1792,8 @@ func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { - if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { + switch datum.Attr.Type { + case nl.IFLA_MACVLAN_MODE: switch native.Uint32(datum.Value[0:4]) { case nl.MACVLAN_MODE_PRIVATE: macv.Mode = MACVLAN_MODE_PRIVATE @@ -1597,7 +1806,16 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.MACVLAN_MODE_SOURCE: macv.Mode = MACVLAN_MODE_SOURCE } - return + case nl.IFLA_MACVLAN_MACADDR_COUNT: + macv.MACAddrs = make([]net.HardwareAddr, 0, int(native.Uint32(datum.Value[0:4]))) + case nl.IFLA_MACVLAN_MACADDR_DATA: + macs, err := nl.ParseRouteAttr(datum.Value[:]) + if err != nil { + panic(fmt.Sprintf("failed to ParseRouteAttr for IFLA_MACVLAN_MACADDR_DATA: %v", err)) + } + for _, macDatum := range macs { + macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) + } } } } @@ -1605,19 +1823,19 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { // copied from pkg/net_linux.go func linkFlags(rawFlags uint32) net.Flags { var f net.Flags - if rawFlags&syscall.IFF_UP != 0 { + if rawFlags&unix.IFF_UP != 0 { f |= net.FlagUp } - if rawFlags&syscall.IFF_BROADCAST != 0 { + if rawFlags&unix.IFF_BROADCAST != 0 { f |= net.FlagBroadcast } - if rawFlags&syscall.IFF_LOOPBACK != 0 { + if rawFlags&unix.IFF_LOOPBACK != 0 { f |= net.FlagLoopback } - if rawFlags&syscall.IFF_POINTOPOINT != 0 { + if rawFlags&unix.IFF_POINTOPOINT != 0 { f |= net.FlagPointToPoint } - if rawFlags&syscall.IFF_MULTICAST != 0 { + if rawFlags&unix.IFF_MULTICAST != 0 { f |= net.FlagMulticast } return f @@ -1699,7 +1917,82 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - gre.FlowBased = int8(datum.Value[0]) != 0 + if len(datum.Value) > 0 { + gre.FlowBased = int8(datum.Value[0]) != 0 + } + } + } +} + +func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + ip := gre.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + } + ip = gre.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gre.IKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey)) + gre.IFlags |= uint16(nl.GRE_KEY) + } + + if gre.OKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey)) + gre.OFlags |= uint16(nl.GRE_KEY) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags)) + + if gre.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link)) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) +} + +func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_OKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_IKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -1713,12 +2006,14 @@ func parseLinkStats64(data []byte) *LinkStatistics { } func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { - attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil) + attrs := nl.NewRtAttr(nl.IFLA_XDP|unix.NLA_F_NESTED, nil) b := make([]byte, 4) native.PutUint32(b, uint32(xdp.Fd)) nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) - native.PutUint32(b, xdp.Flags) - nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b) + if xdp.Flags != 0 { + native.PutUint32(b, xdp.Flags) + nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b) + } req.AddData(attrs) } @@ -1744,6 +2039,12 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + if iptun.FlowBased { + // In flow based mode, no other attributes need to be configured + nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) + return + } + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) ip := iptun.Local.To4() @@ -1762,6 +2063,10 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1778,6 +2083,72 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.Tos = uint8(datum.Value[0]) case nl.IFLA_IPTUN_PMTUDISC: iptun.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + iptun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + iptun.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_TYPE: + iptun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + iptun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + iptun.FlowBased = int8(datum.Value[0]) != 0 + } + } +} + +func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + if sittun.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) + } + + ip := sittun.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = sittun.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + if sittun.Ttl > 0 { + // Would otherwise fail on 3.10 kernel + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) + } + + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) +} + +func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { + sittun := link.(*Sittun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + sittun.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_REMOTE: + sittun.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_TTL: + sittun.Ttl = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_TOS: + sittun.Tos = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_PMTUDISC: + sittun.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_TYPE: + sittun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + sittun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + sittun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + sittun.EncapDport = ntohs(datum.Value[0:2]) } } } diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 0e5eb90c9eb..3f5cd497a73 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -14,6 +14,9 @@ type Neigh struct { Flags int IP net.IP HardwareAddr net.HardwareAddr + LLIPAddr net.IP //Used in the case of NHRP + Vlan int + VNI int } // String returns $ip/$hwaddr $label diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index f069db25733..f75c22649f9 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -2,10 +2,10 @@ package netlink import ( "net" - "syscall" "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) const ( @@ -73,7 +73,7 @@ func NeighAdd(neigh *Neigh) error { // NeighAdd will add an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh add ....` func (h *Handle) NeighAdd(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_EXCL) } // NeighSet will add or replace an IP to MAC mapping to the ARP table @@ -85,7 +85,7 @@ func NeighSet(neigh *Neigh) error { // NeighSet will add or replace an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh replace....` func (h *Handle) NeighSet(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_REPLACE) } // NeighAppend will append an entry to FDB @@ -97,7 +97,7 @@ func NeighAppend(neigh *Neigh) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) NeighAppend(neigh *Neigh) error { - return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_APPEND) } // NeighAppend will append an entry to FDB @@ -109,7 +109,7 @@ func neighAdd(neigh *Neigh, mode int) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) neighAdd(neigh *Neigh, mode int) error { - req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWNEIGH, mode|unix.NLM_F_ACK) return neighHandle(neigh, req) } @@ -122,12 +122,13 @@ func NeighDel(neigh *Neigh) error { // NeighDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) NeighDel(neigh *Neigh) error { - req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELNEIGH, unix.NLM_F_ACK) return neighHandle(neigh, req) } func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { var family int + if neigh.Family > 0 { family = neigh.Family } else { @@ -151,12 +152,25 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { dstData := nl.NewRtAttr(NDA_DST, ipData) req.AddData(dstData) - if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil { + if neigh.LLIPAddr != nil { + llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4()) + req.AddData(llIPData) + } else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil { hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) req.AddData(hwData) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if neigh.Vlan != 0 { + vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) + req.AddData(vlanData) + } + + if neigh.VNI != 0 { + vniData := nl.NewRtAttr(NDA_VNI, nl.Uint32Attr(uint32(neigh.VNI))) + req.AddData(vniData) + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -189,7 +203,7 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { - req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) msg := Ndmsg{ Family: uint8(family), Index: uint32(linkIndex), @@ -197,7 +211,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { } req.AddData(&msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) if err != nil { return nil, err } @@ -237,12 +251,37 @@ func NeighDeserialize(m []byte) (*Neigh, error) { return nil, err } + // This should be cached for perfomance + // once per table dump + link, err := LinkByIndex(neigh.LinkIndex) + if err != nil { + return nil, err + } + encapType := link.Attrs().EncapType + for _, attr := range attrs { switch attr.Attr.Type { case NDA_DST: neigh.IP = net.IP(attr.Value) case NDA_LLADDR: - neigh.HardwareAddr = net.HardwareAddr(attr.Value) + // BUG: Is this a bug in the netlink library? + // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) + // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) + attrLen := attr.Attr.Len - unix.SizeofRtAttr + if attrLen == 4 && (encapType == "ipip" || + encapType == "sit" || + encapType == "gre") { + neigh.LLIPAddr = net.IP(attr.Value) + } else if attrLen == 16 && + encapType == "tunnel6" { + neigh.IP = net.IP(attr.Value) + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } + case NDA_VLAN: + neigh.Vlan = int(native.Uint16(attr.Value[0:2])) + case NDA_VNI: + neigh.VNI = int(native.Uint32(attr.Value[0:4])) } } diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 2d57c16d741..86111b92ce1 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -108,6 +108,10 @@ func LinkSetFlood(link Link, mode bool) error { return ErrNotImplemented } +func LinkSetTxQLen(link Link, qlen int) error { + return ErrNotImplemented +} + func LinkAdd(link Link) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/nl/BUILD b/vendor/github.com/vishvananda/netlink/nl/BUILD index 61f95dd7f4c..11f21534cc4 100644 --- a/vendor/github.com/vishvananda/netlink/nl/BUILD +++ b/vendor/github.com/vishvananda/netlink/nl/BUILD @@ -15,6 +15,7 @@ go_library( "mpls_linux.go", "nl_linux.go", "route_linux.go", + "seg6_linux.go", "tc_linux.go", "xfrm_linux.go", "xfrm_monitor_linux.go", @@ -28,6 +29,7 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/vishvananda/netns:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], }), diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index fe362e9fa7c..50db3b4cdd8 100644 --- a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -1,17 +1,18 @@ package nl import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) type IfAddrmsg struct { - syscall.IfAddrmsg + unix.IfAddrmsg } func NewIfAddrmsg(family int) *IfAddrmsg { return &IfAddrmsg{ - IfAddrmsg: syscall.IfAddrmsg{ + IfAddrmsg: unix.IfAddrmsg{ Family: uint8(family), }, } @@ -35,15 +36,15 @@ func NewIfAddrmsg(family int) *IfAddrmsg { // SizeofIfAddrmsg = 0x8 func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { - return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) + return (*IfAddrmsg)(unsafe.Pointer(&b[0:unix.SizeofIfAddrmsg][0])) } func (msg *IfAddrmsg) Serialize() []byte { - return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfAddrmsg) Len() int { - return syscall.SizeofIfAddrmsg + return unix.SizeofIfAddrmsg } // struct ifa_cacheinfo { diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index e4a192f8eaf..ba0b3e19c67 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,14 +1,15 @@ package nl import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) const ( DEFAULT_CHANGE = 0xFFFFFFFF // doesn't exist in syscall - IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota + IFLA_VFINFO_LIST = unix.IFLA_IFALIAS + 1 + iota IFLA_STATS64 IFLA_VF_PORTS IFLA_PORT_SELF @@ -118,6 +119,10 @@ const ( IFLA_MACVLAN_UNSPEC = iota IFLA_MACVLAN_MODE IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_MACADDR_MODE + IFLA_MACVLAN_MACADDR + IFLA_MACVLAN_MACADDR_DATA + IFLA_MACVLAN_MACADDR_COUNT IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS ) @@ -129,6 +134,13 @@ const ( MACVLAN_MODE_SOURCE = 16 ) +const ( + MACVLAN_MACADDR_ADD = iota + MACVLAN_MACADDR_DEL + MACVLAN_MACADDR_FLUSH + MACVLAN_MACADDR_SET +) + const ( IFLA_BOND_UNSPEC = iota IFLA_BOND_MODE @@ -443,6 +455,13 @@ func (msg *VfTrust) Serialize() []byte { return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:] } +const ( + XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota + XDP_FLAGS_SKB_MODE + XDP_FLAGS_DRV_MODE + XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE +) + const ( IFLA_XDP_UNSPEC = iota IFLA_XDP_FD /* fd of xdp program to attach, or -1 to remove */ @@ -468,7 +487,12 @@ const ( IFLA_IPTUN_6RD_RELAY_PREFIX IFLA_IPTUN_6RD_PREFIXLEN IFLA_IPTUN_6RD_RELAY_PREFIXLEN - IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN + IFLA_IPTUN_ENCAP_TYPE + IFLA_IPTUN_ENCAP_FLAGS + IFLA_IPTUN_ENCAP_SPORT + IFLA_IPTUN_ENCAP_DPORT + IFLA_IPTUN_COLLECT_METADATA + IFLA_IPTUN_MAX = IFLA_IPTUN_COLLECT_METADATA ) const ( diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 1329acd8643..bc8e82c2cc4 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -13,18 +13,19 @@ import ( "unsafe" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) const ( // Family type definitions - FAMILY_ALL = syscall.AF_UNSPEC - FAMILY_V4 = syscall.AF_INET - FAMILY_V6 = syscall.AF_INET6 + FAMILY_ALL = unix.AF_UNSPEC + FAMILY_V4 = unix.AF_INET + FAMILY_V6 = unix.AF_INET6 FAMILY_MPLS = AF_MPLS ) // SupportedNlFamilies contains the list of netlink families this netlink package supports -var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER} +var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETLINK_NETFILTER} var nextSeqNr uint32 @@ -77,161 +78,161 @@ type NetlinkRequestData interface { // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { - syscall.IfInfomsg + unix.IfInfomsg } // Create an IfInfomsg with family specified func NewIfInfomsg(family int) *IfInfomsg { return &IfInfomsg{ - IfInfomsg: syscall.IfInfomsg{ + IfInfomsg: unix.IfInfomsg{ Family: uint8(family), }, } } func DeserializeIfInfomsg(b []byte) *IfInfomsg { - return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) + return (*IfInfomsg)(unsafe.Pointer(&b[0:unix.SizeofIfInfomsg][0])) } func (msg *IfInfomsg) Serialize() []byte { - return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfInfomsg) Len() int { - return syscall.SizeofIfInfomsg + return unix.SizeofIfInfomsg } func (msg *IfInfomsg) EncapType() string { switch msg.Type { case 0: return "generic" - case syscall.ARPHRD_ETHER: + case unix.ARPHRD_ETHER: return "ether" - case syscall.ARPHRD_EETHER: + case unix.ARPHRD_EETHER: return "eether" - case syscall.ARPHRD_AX25: + case unix.ARPHRD_AX25: return "ax25" - case syscall.ARPHRD_PRONET: + case unix.ARPHRD_PRONET: return "pronet" - case syscall.ARPHRD_CHAOS: + case unix.ARPHRD_CHAOS: return "chaos" - case syscall.ARPHRD_IEEE802: + case unix.ARPHRD_IEEE802: return "ieee802" - case syscall.ARPHRD_ARCNET: + case unix.ARPHRD_ARCNET: return "arcnet" - case syscall.ARPHRD_APPLETLK: + case unix.ARPHRD_APPLETLK: return "atalk" - case syscall.ARPHRD_DLCI: + case unix.ARPHRD_DLCI: return "dlci" - case syscall.ARPHRD_ATM: + case unix.ARPHRD_ATM: return "atm" - case syscall.ARPHRD_METRICOM: + case unix.ARPHRD_METRICOM: return "metricom" - case syscall.ARPHRD_IEEE1394: + case unix.ARPHRD_IEEE1394: return "ieee1394" - case syscall.ARPHRD_INFINIBAND: + case unix.ARPHRD_INFINIBAND: return "infiniband" - case syscall.ARPHRD_SLIP: + case unix.ARPHRD_SLIP: return "slip" - case syscall.ARPHRD_CSLIP: + case unix.ARPHRD_CSLIP: return "cslip" - case syscall.ARPHRD_SLIP6: + case unix.ARPHRD_SLIP6: return "slip6" - case syscall.ARPHRD_CSLIP6: + case unix.ARPHRD_CSLIP6: return "cslip6" - case syscall.ARPHRD_RSRVD: + case unix.ARPHRD_RSRVD: return "rsrvd" - case syscall.ARPHRD_ADAPT: + case unix.ARPHRD_ADAPT: return "adapt" - case syscall.ARPHRD_ROSE: + case unix.ARPHRD_ROSE: return "rose" - case syscall.ARPHRD_X25: + case unix.ARPHRD_X25: return "x25" - case syscall.ARPHRD_HWX25: + case unix.ARPHRD_HWX25: return "hwx25" - case syscall.ARPHRD_PPP: + case unix.ARPHRD_PPP: return "ppp" - case syscall.ARPHRD_HDLC: + case unix.ARPHRD_HDLC: return "hdlc" - case syscall.ARPHRD_LAPB: + case unix.ARPHRD_LAPB: return "lapb" - case syscall.ARPHRD_DDCMP: + case unix.ARPHRD_DDCMP: return "ddcmp" - case syscall.ARPHRD_RAWHDLC: + case unix.ARPHRD_RAWHDLC: return "rawhdlc" - case syscall.ARPHRD_TUNNEL: + case unix.ARPHRD_TUNNEL: return "ipip" - case syscall.ARPHRD_TUNNEL6: + case unix.ARPHRD_TUNNEL6: return "tunnel6" - case syscall.ARPHRD_FRAD: + case unix.ARPHRD_FRAD: return "frad" - case syscall.ARPHRD_SKIP: + case unix.ARPHRD_SKIP: return "skip" - case syscall.ARPHRD_LOOPBACK: + case unix.ARPHRD_LOOPBACK: return "loopback" - case syscall.ARPHRD_LOCALTLK: + case unix.ARPHRD_LOCALTLK: return "ltalk" - case syscall.ARPHRD_FDDI: + case unix.ARPHRD_FDDI: return "fddi" - case syscall.ARPHRD_BIF: + case unix.ARPHRD_BIF: return "bif" - case syscall.ARPHRD_SIT: + case unix.ARPHRD_SIT: return "sit" - case syscall.ARPHRD_IPDDP: + case unix.ARPHRD_IPDDP: return "ip/ddp" - case syscall.ARPHRD_IPGRE: + case unix.ARPHRD_IPGRE: return "gre" - case syscall.ARPHRD_PIMREG: + case unix.ARPHRD_PIMREG: return "pimreg" - case syscall.ARPHRD_HIPPI: + case unix.ARPHRD_HIPPI: return "hippi" - case syscall.ARPHRD_ASH: + case unix.ARPHRD_ASH: return "ash" - case syscall.ARPHRD_ECONET: + case unix.ARPHRD_ECONET: return "econet" - case syscall.ARPHRD_IRDA: + case unix.ARPHRD_IRDA: return "irda" - case syscall.ARPHRD_FCPP: + case unix.ARPHRD_FCPP: return "fcpp" - case syscall.ARPHRD_FCAL: + case unix.ARPHRD_FCAL: return "fcal" - case syscall.ARPHRD_FCPL: + case unix.ARPHRD_FCPL: return "fcpl" - case syscall.ARPHRD_FCFABRIC: + case unix.ARPHRD_FCFABRIC: return "fcfb0" - case syscall.ARPHRD_FCFABRIC + 1: + case unix.ARPHRD_FCFABRIC + 1: return "fcfb1" - case syscall.ARPHRD_FCFABRIC + 2: + case unix.ARPHRD_FCFABRIC + 2: return "fcfb2" - case syscall.ARPHRD_FCFABRIC + 3: + case unix.ARPHRD_FCFABRIC + 3: return "fcfb3" - case syscall.ARPHRD_FCFABRIC + 4: + case unix.ARPHRD_FCFABRIC + 4: return "fcfb4" - case syscall.ARPHRD_FCFABRIC + 5: + case unix.ARPHRD_FCFABRIC + 5: return "fcfb5" - case syscall.ARPHRD_FCFABRIC + 6: + case unix.ARPHRD_FCFABRIC + 6: return "fcfb6" - case syscall.ARPHRD_FCFABRIC + 7: + case unix.ARPHRD_FCFABRIC + 7: return "fcfb7" - case syscall.ARPHRD_FCFABRIC + 8: + case unix.ARPHRD_FCFABRIC + 8: return "fcfb8" - case syscall.ARPHRD_FCFABRIC + 9: + case unix.ARPHRD_FCFABRIC + 9: return "fcfb9" - case syscall.ARPHRD_FCFABRIC + 10: + case unix.ARPHRD_FCFABRIC + 10: return "fcfb10" - case syscall.ARPHRD_FCFABRIC + 11: + case unix.ARPHRD_FCFABRIC + 11: return "fcfb11" - case syscall.ARPHRD_FCFABRIC + 12: + case unix.ARPHRD_FCFABRIC + 12: return "fcfb12" - case syscall.ARPHRD_IEEE802_TR: + case unix.ARPHRD_IEEE802_TR: return "tr" - case syscall.ARPHRD_IEEE80211: + case unix.ARPHRD_IEEE80211: return "ieee802.11" - case syscall.ARPHRD_IEEE80211_PRISM: + case unix.ARPHRD_IEEE80211_PRISM: return "ieee802.11/prism" - case syscall.ARPHRD_IEEE80211_RADIOTAP: + case unix.ARPHRD_IEEE80211_RADIOTAP: return "ieee802.11/radiotap" - case syscall.ARPHRD_IEEE802154: + case unix.ARPHRD_IEEE802154: return "ieee802.15.4" case 65534: @@ -243,7 +244,7 @@ func (msg *IfInfomsg) EncapType() string { } func rtaAlignOf(attrlen int) int { - return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) + return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) } func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { @@ -254,7 +255,7 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { // Extend RtAttr to handle data and children type RtAttr struct { - syscall.RtAttr + unix.RtAttr Data []byte children []NetlinkRequestData } @@ -262,7 +263,7 @@ type RtAttr struct { // Create a new Extended RtAttr object func NewRtAttr(attrType int, data []byte) *RtAttr { return &RtAttr{ - RtAttr: syscall.RtAttr{ + RtAttr: unix.RtAttr{ Type: uint16(attrType), }, children: []NetlinkRequestData{}, @@ -277,16 +278,21 @@ func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { return attr } +// AddChild adds an existing RtAttr as a child. +func (a *RtAttr) AddChild(attr *RtAttr) { + a.children = append(a.children, attr) +} + func (a *RtAttr) Len() int { if len(a.children) == 0 { - return (syscall.SizeofRtAttr + len(a.Data)) + return (unix.SizeofRtAttr + len(a.Data)) } l := 0 for _, child := range a.children { l += rtaAlignOf(child.Len()) } - l += syscall.SizeofRtAttr + l += unix.SizeofRtAttr return rtaAlignOf(l + len(a.Data)) } @@ -319,7 +325,7 @@ func (a *RtAttr) Serialize() []byte { } type NetlinkRequest struct { - syscall.NlMsghdr + unix.NlMsghdr Data []NetlinkRequestData RawData []byte Sockets map[int]*SocketHandle @@ -327,7 +333,7 @@ type NetlinkRequest struct { // Serialize the Netlink Request into a byte array func (req *NetlinkRequest) Serialize() []byte { - length := syscall.SizeofNlMsghdr + length := unix.SizeofNlMsghdr dataBytes := make([][]byte, len(req.Data)) for i, data := range req.Data { dataBytes[i] = data.Serialize() @@ -337,8 +343,8 @@ func (req *NetlinkRequest) Serialize() []byte { req.Len = uint32(length) b := make([]byte, length) - hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] - next := syscall.SizeofNlMsghdr + hdr := (*(*[unix.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := unix.SizeofNlMsghdr copy(b[0:next], hdr) for _, data := range dataBytes { for _, dataByte := range data { @@ -421,10 +427,10 @@ done: if m.Header.Pid != pid { return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } - if m.Header.Type == syscall.NLMSG_DONE { + if m.Header.Type == unix.NLMSG_DONE { break done } - if m.Header.Type == syscall.NLMSG_ERROR { + if m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { @@ -436,7 +442,7 @@ done: continue } res = append(res, m.Data) - if m.Header.Flags&syscall.NLM_F_MULTI == 0 { + if m.Header.Flags&unix.NLM_F_MULTI == 0 { break done } } @@ -449,10 +455,10 @@ done: // the message is serialized func NewNetlinkRequest(proto, flags int) *NetlinkRequest { return &NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.SizeofNlMsghdr), + NlMsghdr: unix.NlMsghdr{ + Len: uint32(unix.SizeofNlMsghdr), Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), + Flags: unix.NLM_F_REQUEST | uint16(flags), Seq: atomic.AddUint32(&nextSeqNr, 1), }, } @@ -460,21 +466,21 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { type NetlinkSocket struct { fd int32 - lsa syscall.SockaddrNetlink + lsa unix.SockaddrNetlink sync.Mutex } func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = syscall.AF_NETLINK - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) + s.lsa.Family = unix.AF_NETLINK + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) return nil, err } @@ -551,21 +557,21 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = syscall.AF_NETLINK + s.lsa.Family = unix.AF_NETLINK for _, g := range groups { s.lsa.Groups |= (1 << (g - 1)) } - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) return nil, err } @@ -586,7 +592,7 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne func (s *NetlinkSocket) Close() { fd := int(atomic.SwapInt32(&s.fd, -1)) - syscall.Close(fd) + unix.Close(fd) } func (s *NetlinkSocket) GetFd() int { @@ -598,7 +604,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { if fd < 0 { return fmt.Errorf("Send called on a closed socket") } - if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { + if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { return err } return nil @@ -609,26 +615,40 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { if fd < 0 { return nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, syscall.Getpagesize()) - nr, _, err := syscall.Recvfrom(fd, rb, 0) + rb := make([]byte, unix.Getpagesize()) + nr, _, err := unix.Recvfrom(fd, rb, 0) if err != nil { return nil, err } - if nr < syscall.NLMSG_HDRLEN { + if nr < unix.NLMSG_HDRLEN { return nil, fmt.Errorf("Got short response from netlink") } rb = rb[:nr] return syscall.ParseNetlinkMessage(rb) } +// SetSendTimeout allows to set a send timeout on the socket +func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { + // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine + // remains stuck on a send on a closed fd + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) +} + +// SetReceiveTimeout allows to set a receive timeout on the socket +func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { + // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine + // remains stuck on a recvmsg on a closed fd + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) +} + func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := syscall.Getsockname(fd) + lsa, err := unix.Getsockname(fd) if err != nil { return 0, err } switch v := lsa.(type) { - case *syscall.SockaddrNetlink: + case *unix.SockaddrNetlink: return v.Pid, nil } return 0, fmt.Errorf("Wrong socket type") @@ -683,24 +703,24 @@ func Uint64Attr(v uint64) []byte { func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr - for len(b) >= syscall.SizeofRtAttr { + for len(b) >= unix.SizeofRtAttr { a, vbuf, alen, err := netlinkRouteAttrAndValue(b) if err != nil { return nil, err } - ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} + ra := syscall.NetlinkRouteAttr{Attr: syscall.RtAttr(*a), Value: vbuf[:int(a.Len)-unix.SizeofRtAttr]} attrs = append(attrs, ra) b = b[alen:] } return attrs, nil } -func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { - a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) - if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { - return nil, nil, 0, syscall.EINVAL +func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { + a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, unix.EINVAL } - return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil + return a, b[unix.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil } // SocketHandle contains the netlink socket and the associated diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index 1a064d65d2f..f6906fcaf7e 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -1,65 +1,66 @@ package nl import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) type RtMsg struct { - syscall.RtMsg + unix.RtMsg } func NewRtMsg() *RtMsg { return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_UNIVERSE, - Protocol: syscall.RTPROT_BOOT, - Type: syscall.RTN_UNICAST, + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_UNIVERSE, + Protocol: unix.RTPROT_BOOT, + Type: unix.RTN_UNICAST, }, } } func NewRtDelMsg() *RtMsg { return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_NOWHERE, + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_NOWHERE, }, } } func (msg *RtMsg) Len() int { - return syscall.SizeofRtMsg + return unix.SizeofRtMsg } func DeserializeRtMsg(b []byte) *RtMsg { - return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) + return (*RtMsg)(unsafe.Pointer(&b[0:unix.SizeofRtMsg][0])) } func (msg *RtMsg) Serialize() []byte { - return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] } type RtNexthop struct { - syscall.RtNexthop + unix.RtNexthop Children []NetlinkRequestData } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0])) + return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) } func (msg *RtNexthop) Len() int { if len(msg.Children) == 0 { - return syscall.SizeofRtNexthop + return unix.SizeofRtNexthop } l := 0 for _, child := range msg.Children { l += rtaAlignOf(child.Len()) } - l += syscall.SizeofRtNexthop + l += unix.SizeofRtNexthop return rtaAlignOf(l) } @@ -67,8 +68,8 @@ func (msg *RtNexthop) Serialize() []byte { length := msg.Len() msg.RtNexthop.Len = uint16(length) buf := make([]byte, length) - copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) - next := rtaAlignOf(syscall.SizeofRtNexthop) + copy(buf, (*(*[unix.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(unix.SizeofRtNexthop) if len(msg.Children) > 0 { for _, child := range msg.Children { childBuf := child.Serialize() diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go new file mode 100644 index 00000000000..b3425f6b0ec --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -0,0 +1,111 @@ +package nl + +import ( + "errors" + "fmt" + "net" +) + +type IPv6SrHdr struct { + nextHdr uint8 + hdrLen uint8 + routingType uint8 + segmentsLeft uint8 + firstSegment uint8 + flags uint8 + reserved uint16 + + Segments []net.IP +} + +func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { + if len(s1.Segments) != len(s2.Segments) { + return false + } + for i := range s1.Segments { + if s1.Segments[i].Equal(s2.Segments[i]) != true { + return false + } + } + return s1.nextHdr == s2.nextHdr && + s1.hdrLen == s2.hdrLen && + s1.routingType == s2.routingType && + s1.segmentsLeft == s2.segmentsLeft && + s1.firstSegment == s2.firstSegment && + s1.flags == s2.flags + // reserved doesn't need to be identical. +} + +// seg6 encap mode +const ( + SEG6_IPTUN_MODE_INLINE = iota + SEG6_IPTUN_MODE_ENCAP +) + +// number of nested RTATTR +// from include/uapi/linux/seg6_iptunnel.h +const ( + SEG6_IPTUNNEL_UNSPEC = iota + SEG6_IPTUNNEL_SRH + __SEG6_IPTUNNEL_MAX +) +const ( + SEG6_IPTUNNEL_MAX = __SEG6_IPTUNNEL_MAX - 1 +) + +func EncodeSEG6Encap(mode int, segments []net.IP) ([]byte, error) { + nsegs := len(segments) // nsegs: number of segments + if nsegs == 0 { + return nil, errors.New("EncodeSEG6Encap: No Segment in srh") + } + b := make([]byte, 12, 12+len(segments)*16) + native := NativeEndian() + native.PutUint32(b, uint32(mode)) + b[4] = 0 // srh.nextHdr (0 when calling netlink) + b[5] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) + b[6] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) + b[7] = uint8(nsegs - 1) // srh.segmentsLeft + b[8] = uint8(nsegs - 1) // srh.firstSegment + b[9] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) + // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 + native.PutUint16(b[10:], 0) // srh.reserved + for _, netIP := range segments { + b = append(b, netIP...) // srh.Segments + } + return b, nil +} + +func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { + native := NativeEndian() + mode := int(native.Uint32(buf)) + srh := IPv6SrHdr{ + nextHdr: buf[4], + hdrLen: buf[5], + routingType: buf[6], + segmentsLeft: buf[7], + firstSegment: buf[8], + flags: buf[9], + reserved: native.Uint16(buf[10:12]), + } + buf = buf[12:] + if len(buf)%16 != 0 { + err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) + return mode, nil, err + } + for len(buf) > 0 { + srh.Segments = append(srh.Segments, net.IP(buf[:16])) + buf = buf[16:] + } + return mode, srh.Segments, nil +} + +// Helper functions +func SEG6EncapModeString(mode int) string { + switch mode { + case SEG6_IPTUN_MODE_INLINE: + return "inline" + case SEG6_IPTUN_MODE_ENCAP: + return "encap" + } + return "unknown" +} diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index 3473e536384..fc631e0e505 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -65,4 +65,14 @@ const ( LWTUNNEL_ENCAP_IP LWTUNNEL_ENCAP_ILA LWTUNNEL_ENCAP_IP6 + LWTUNNEL_ENCAP_SEG6 + LWTUNNEL_ENCAP_BPF +) + +// routing header types +const ( + IPV6_SRCRT_STRICT = 0x01 // Deprecated; will be removed + IPV6_SRCRT_TYPE_0 = 0 // Deprecated; will be removed + IPV6_SRCRT_TYPE_2 = 2 // IPv6 type 2 Routing Header + IPV6_SRCRT_TYPE_4 = 4 // Segment Routing with IPv6 ) diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 10dd0d53357..43c465f0575 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func LinkGetProtinfo(link Link) (Protinfo, error) { @@ -15,10 +16,10 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) var pi Protinfo - req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { return pi, err } @@ -33,7 +34,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { return pi, err } for _, attr := range attrs { - if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { + if attr.Attr.Type != unix.IFLA_PROTINFO|unix.NLA_F_NESTED { continue } infos, err := nl.ParseRouteAttr(attr.Value) diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 2c0deddb323..91193145ae7 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -8,6 +8,7 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) // NOTE function is here because it uses other linux functions @@ -84,7 +85,7 @@ func QdiscDel(qdisc Qdisc) error { // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func (h *Handle) QdiscDel(qdisc Qdisc) error { - return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) + return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) } // QdiscChange will change a qdisc in place @@ -98,7 +99,7 @@ func QdiscChange(qdisc Qdisc) error { // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func (h *Handle) QdiscChange(qdisc Qdisc) error { - return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) + return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) } // QdiscReplace will replace a qdisc to the system. @@ -113,8 +114,8 @@ func QdiscReplace(qdisc Qdisc) error { // The handle MUST change. func (h *Handle) QdiscReplace(qdisc Qdisc) error { return h.qdiscModify( - syscall.RTM_NEWQDISC, - syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_REPLACE, qdisc) } @@ -128,13 +129,13 @@ func QdiscAdd(qdisc Qdisc) error { // Equivalent to: `tc qdisc add $qdisc` func (h *Handle) QdiscAdd(qdisc Qdisc) error { return h.qdiscModify( - syscall.RTM_NEWQDISC, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, qdisc) } func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { - req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) base := qdisc.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -145,13 +146,13 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { req.AddData(msg) // When deleting don't bother building the rest of the netlink payload - if cmd != syscall.RTM_DELQDISC { + if cmd != unix.RTM_DELQDISC { if err := qdiscPayload(req, qdisc); err != nil { return err } } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -160,71 +161,73 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) - if prio, ok := qdisc.(*Prio); ok { + + switch qdisc := qdisc.(type) { + case *Prio: tcmap := nl.TcPrioMap{ - Bands: int32(prio.Bands), - Priomap: prio.PriorityMap, + Bands: int32(qdisc.Bands), + Priomap: qdisc.PriorityMap, } options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize()) - } else if tbf, ok := qdisc.(*Tbf); ok { + case *Tbf: opt := nl.TcTbfQopt{} - opt.Rate.Rate = uint32(tbf.Rate) - opt.Peakrate.Rate = uint32(tbf.Peakrate) - opt.Limit = tbf.Limit - opt.Buffer = tbf.Buffer + opt.Rate.Rate = uint32(qdisc.Rate) + opt.Peakrate.Rate = uint32(qdisc.Peakrate) + opt.Limit = qdisc.Limit + opt.Buffer = qdisc.Buffer nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize()) - if tbf.Rate >= uint64(1<<32) { - nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(tbf.Rate)) + if qdisc.Rate >= uint64(1<<32) { + nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) } - if tbf.Peakrate >= uint64(1<<32) { - nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(tbf.Peakrate)) + if qdisc.Peakrate >= uint64(1<<32) { + nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) } - if tbf.Peakrate > 0 { - nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(tbf.Minburst)) + if qdisc.Peakrate > 0 { + nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) } - } else if htb, ok := qdisc.(*Htb); ok { + case *Htb: opt := nl.TcHtbGlob{} - opt.Version = htb.Version - opt.Rate2Quantum = htb.Rate2Quantum - opt.Defcls = htb.Defcls + opt.Version = qdisc.Version + opt.Rate2Quantum = qdisc.Rate2Quantum + opt.Defcls = qdisc.Defcls // TODO: Handle Debug properly. For now default to 0 - opt.Debug = htb.Debug - opt.DirectPkts = htb.DirectPkts + opt.Debug = qdisc.Debug + opt.DirectPkts = qdisc.DirectPkts nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize()) // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) - } else if netem, ok := qdisc.(*Netem); ok { + case *Netem: opt := nl.TcNetemQopt{} - opt.Latency = netem.Latency - opt.Limit = netem.Limit - opt.Loss = netem.Loss - opt.Gap = netem.Gap - opt.Duplicate = netem.Duplicate - opt.Jitter = netem.Jitter + opt.Latency = qdisc.Latency + opt.Limit = qdisc.Limit + opt.Loss = qdisc.Loss + opt.Gap = qdisc.Gap + opt.Duplicate = qdisc.Duplicate + opt.Jitter = qdisc.Jitter options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) // Correlation corr := nl.TcNetemCorr{} - corr.DelayCorr = netem.DelayCorr - corr.LossCorr = netem.LossCorr - corr.DupCorr = netem.DuplicateCorr + corr.DelayCorr = qdisc.DelayCorr + corr.LossCorr = qdisc.LossCorr + corr.DupCorr = qdisc.DuplicateCorr if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize()) } // Corruption corruption := nl.TcNetemCorrupt{} - corruption.Probability = netem.CorruptProb - corruption.Correlation = netem.CorruptCorr + corruption.Probability = qdisc.CorruptProb + corruption.Correlation = qdisc.CorruptCorr if corruption.Probability > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize()) } // Reorder reorder := nl.TcNetemReorder{} - reorder.Probability = netem.ReorderProb - reorder.Correlation = netem.ReorderCorr + reorder.Probability = qdisc.ReorderProb + reorder.Correlation = qdisc.ReorderCorr if reorder.Probability > 0 { nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize()) } - } else if _, ok := qdisc.(*Ingress); ok { + case *Ingress: // ingress filters must use the proper handle if qdisc.Attrs().Parent != HANDLE_INGRESS { return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") @@ -246,7 +249,7 @@ func QdiscList(link Link) ([]Qdisc, error) { // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { - req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) index := int32(0) if link != nil { base := link.Attrs() @@ -259,7 +262,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 03ac4b23916..2cd58ee3342 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -16,6 +16,7 @@ type Destination interface { Decode([]byte) error Encode() ([]byte, error) String() string + Equal(Destination) bool } type Encap interface { @@ -23,6 +24,7 @@ type Encap interface { Decode([]byte) error Encode() ([]byte, error) String() string + Equal(Encap) bool } // Route represents a netlink route. @@ -43,6 +45,8 @@ type Route struct { MPLSDst *int NewDst Destination Encap Encap + MTU int + AdvMSS int } func (r Route) String() string { @@ -72,6 +76,25 @@ func (r Route) String() string { return fmt.Sprintf("{%s}", strings.Join(elems, " ")) } +func (r Route) Equal(x Route) bool { + return r.LinkIndex == x.LinkIndex && + r.ILinkIndex == x.ILinkIndex && + r.Scope == x.Scope && + ipNetEqual(r.Dst, x.Dst) && + r.Src.Equal(x.Src) && + r.Gw.Equal(x.Gw) && + nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) && + r.Protocol == x.Protocol && + r.Priority == x.Priority && + r.Table == x.Table && + r.Type == x.Type && + r.Tos == x.Tos && + r.Flags == x.Flags && + (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && + (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && + (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap))) +} + func (r *Route) SetFlag(flag NextHopFlag) { r.Flags |= int(flag) } @@ -110,7 +133,46 @@ func (n *NexthopInfo) String() string { elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap)) } elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1)) - elems = append(elems, fmt.Sprintf("Gw: %d", n.Gw)) + elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw)) elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags())) return fmt.Sprintf("{%s}", strings.Join(elems, " ")) } + +func (n NexthopInfo) Equal(x NexthopInfo) bool { + return n.LinkIndex == x.LinkIndex && + n.Hops == x.Hops && + n.Gw.Equal(x.Gw) && + n.Flags == x.Flags && + (n.NewDst == x.NewDst || (n.NewDst != nil && n.NewDst.Equal(x.NewDst))) && + (n.Encap == x.Encap || (n.Encap != nil && n.Encap.Equal(x.Encap))) +} + +type nexthopInfoSlice []*NexthopInfo + +func (n nexthopInfoSlice) Equal(x []*NexthopInfo) bool { + if len(n) != len(x) { + return false + } + for i := range n { + if n[i] == nil || x[i] == nil { + return false + } + if !n[i].Equal(*x[i]) { + return false + } + } + return true +} + +// ipNetEqual returns true iff both IPNet are equal +func ipNetEqual(ipn1 *net.IPNet, ipn2 *net.IPNet) bool { + if ipn1 == ipn2 { + return true + } + if ipn1 == nil || ipn2 == nil { + return false + } + m1, _ := ipn1.Mask.Size() + m2, _ := ipn2.Mask.Size() + return m1 == m2 && ipn1.IP.Equal(ipn2.IP) +} diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index cd739e71469..fd5ac898354 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -8,16 +8,17 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) // RtAttr is shared so it is in netlink_linux.go const ( - SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE - SCOPE_SITE Scope = syscall.RT_SCOPE_SITE - SCOPE_LINK Scope = syscall.RT_SCOPE_LINK - SCOPE_HOST Scope = syscall.RT_SCOPE_HOST - SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE + SCOPE_UNIVERSE Scope = unix.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = unix.RT_SCOPE_SITE + SCOPE_LINK Scope = unix.RT_SCOPE_LINK + SCOPE_HOST Scope = unix.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE ) const ( @@ -34,8 +35,8 @@ const ( ) const ( - FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK - FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE + FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE ) var testFlags = []flagString{ @@ -86,6 +87,34 @@ func (d *MPLSDestination) String() string { return strings.Join(s, "/") } +func (d *MPLSDestination) Equal(x Destination) bool { + o, ok := x.(*MPLSDestination) + if !ok { + return false + } + if d == nil && o == nil { + return true + } + if d == nil || o == nil { + return false + } + if d.Labels == nil && o.Labels == nil { + return true + } + if d.Labels == nil || o.Labels == nil { + return false + } + if len(d.Labels) != len(o.Labels) { + return false + } + for i := range d.Labels { + if d.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + type MPLSEncap struct { Labels []int } @@ -96,17 +125,17 @@ func (e *MPLSEncap) Type() int { func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { - return fmt.Errorf("Lack of bytes") + return fmt.Errorf("lack of bytes") } native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { - return fmt.Errorf("Lack of bytes") + return fmt.Errorf("lack of bytes") } buf = buf[:l] typ := native.Uint16(buf[2:]) if typ != nl.MPLS_IPTUNNEL_DST { - return fmt.Errorf("Unknown MPLS Encap Type: %d", typ) + return fmt.Errorf("unknown MPLS Encap Type: %d", typ) } e.Labels = nl.DecodeMPLSStack(buf[4:]) return nil @@ -129,6 +158,107 @@ func (e *MPLSEncap) String() string { return strings.Join(s, "/") } +func (e *MPLSEncap) Equal(x Encap) bool { + o, ok := x.(*MPLSEncap) + if !ok { + return false + } + if e == nil && o == nil { + return true + } + if e == nil || o == nil { + return false + } + if e.Labels == nil && o.Labels == nil { + return true + } + if e.Labels == nil || o.Labels == nil { + return false + } + if len(e.Labels) != len(o.Labels) { + return false + } + for i := range e.Labels { + if e.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + +// SEG6 definitions +type SEG6Encap struct { + Mode int + Segments []net.IP +} + +func (e *SEG6Encap) Type() int { + return nl.LWTUNNEL_ENCAP_SEG6 +} +func (e *SEG6Encap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lack of bytes") + } + native := nl.NativeEndian() + // Get Length(l) & Type(typ) : 2 + 2 bytes + l := native.Uint16(buf) + if len(buf) < int(l) { + return fmt.Errorf("lack of bytes") + } + buf = buf[:l] // make sure buf size upper limit is Length + typ := native.Uint16(buf[2:]) + if typ != nl.SEG6_IPTUNNEL_SRH { + return fmt.Errorf("unknown SEG6 Type: %d", typ) + } + + var err error + e.Mode, e.Segments, err = nl.DecodeSEG6Encap(buf[4:]) + + return err +} +func (e *SEG6Encap) Encode() ([]byte, error) { + s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) + native := nl.NativeEndian() + hdr := make([]byte, 4) + native.PutUint16(hdr, uint16(len(s)+4)) + native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) + return append(hdr, s...), err +} +func (e *SEG6Encap) String() string { + segs := make([]string, 0, len(e.Segments)) + // append segment backwards (from n to 0) since seg#0 is the last segment. + for i := len(e.Segments); i > 0; i-- { + segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + } + str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), + len(e.Segments), strings.Join(segs, " ")) + return str +} +func (e *SEG6Encap) Equal(x Encap) bool { + o, ok := x.(*SEG6Encap) + if !ok { + return false + } + if e == o { + return true + } + if e == nil || o == nil { + return false + } + if e.Mode != o.Mode { + return false + } + if len(e.Segments) != len(o.Segments) { + return false + } + for i := range e.Segments { + if !e.Segments[i].Equal(o.Segments[i]) { + return false + } + } + return true +} + // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -138,8 +268,8 @@ func RouteAdd(route *Route) error { // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func (h *Handle) RouteAdd(route *Route) error { - flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK - req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -152,8 +282,8 @@ func RouteReplace(route *Route) error { // RouteReplace will add a route to the system. // Equivalent to: `ip route replace $route` func (h *Handle) RouteReplace(route *Route) error { - flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK - req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -166,7 +296,7 @@ func RouteDel(route *Route) error { // RouteDel will delete a route from the system. // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { - req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) return h.routeHandle(route, req, nl.NewRtDelMsg()) } @@ -189,12 +319,12 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { dstData = route.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) } else if route.MPLSDst != nil { family = nl.FAMILY_MPLS msg.Dst_len = uint8(20) - msg.Type = syscall.RTN_UNICAST - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + msg.Type = unix.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) } if route.NewDst != nil { @@ -232,7 +362,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg srcData = route.Src.To16() } // The commonly used src ip for routes is actually PREFSRC - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PREFSRC, srcData)) } if route.Gw != nil { @@ -247,14 +377,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { gwData = route.Gw.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) } if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { rtnh := &nl.RtNexthop{ - RtNexthop: syscall.RtNexthop{ + RtNexthop: unix.RtNexthop{ Hops: uint8(nh.Hops), Ifindex: int32(nh.LinkIndex), Flags: uint8(nh.Flags), @@ -267,9 +397,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg return fmt.Errorf("gateway, source, and destination ip are not the same IP family") } if gwFamily == FAMILY_V4 { - children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4()))) + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) } else { - children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16()))) + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To16()))) } } if nh.NewDst != nil { @@ -295,15 +425,15 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_MULTIPATH, buf)) } if route.Table > 0 { if route.Table >= 256 { - msg.Table = syscall.RT_TABLE_UNSPEC + msg.Table = unix.RT_TABLE_UNSPEC b := make([]byte, 4) native.PutUint32(b, uint32(route.Table)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_TABLE, b)) } else { msg.Table = uint8(route.Table) } @@ -312,7 +442,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Priority > 0 { b := make([]byte, 4) native.PutUint32(b, uint32(route.Priority)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) } if route.Tos > 0 { msg.Tos = uint8(route.Tos) @@ -324,6 +454,25 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Type = uint8(route.Type) } + var metrics []*nl.RtAttr + // TODO: support other rta_metric values + if route.MTU > 0 { + b := nl.Uint32Attr(uint32(route.MTU)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) + } + if route.AdvMSS > 0 { + b := nl.Uint32Attr(uint32(route.AdvMSS)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) + } + + if metrics != nil { + attr := nl.NewRtAttr(unix.RTA_METRICS, nil) + for _, metric := range metrics { + attr.AddChild(metric) + } + rtAttrs = append(rtAttrs, attr) + } + msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) msg.Family = uint8(family) @@ -338,9 +487,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg ) native.PutUint32(b, uint32(route.LinkIndex)) - req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -373,11 +522,11 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) infmsg := nl.NewIfInfomsg(family) req.AddData(infmsg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err } @@ -385,11 +534,11 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) var res []Route for _, m := range msgs { msg := nl.DeserializeRtMsg(m) - if msg.Flags&syscall.RTM_F_CLONED != 0 { + if msg.Flags&unix.RTM_F_CLONED != 0 { // Ignore cloned routes continue } - if msg.Table != syscall.RT_TABLE_MAIN { + if msg.Table != unix.RT_TABLE_MAIN { if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables continue @@ -401,7 +550,7 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) } if filter != nil { switch { - case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table: + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: continue case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: continue @@ -421,19 +570,8 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) continue case filterMask&RT_FILTER_DST != 0: if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) { - if filter.Dst == nil { - if route.Dst != nil { - continue - } - } else { - if route.Dst == nil { - continue - } - aMaskLen, aMaskBits := route.Dst.Mask.Size() - bMaskLen, bMaskBits := filter.Dst.Mask.Size() - if !(route.Dst.IP.Equal(filter.Dst.IP) && aMaskLen == bMaskLen && aMaskBits == bMaskBits) { - continue - } + if !ipNetEqual(route.Dst, filter.Dst) { + continue } } } @@ -463,11 +601,11 @@ func deserializeRoute(m []byte) (Route, error) { var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case syscall.RTA_GATEWAY: + case unix.RTA_GATEWAY: route.Gw = net.IP(attr.Value) - case syscall.RTA_PREFSRC: + case unix.RTA_PREFSRC: route.Src = net.IP(attr.Value) - case syscall.RTA_DST: + case unix.RTA_DST: if msg.Family == nl.FAMILY_MPLS { stack := nl.DecodeMPLSStack(attr.Value) if len(stack) == 0 || len(stack) > 1 { @@ -480,36 +618,36 @@ func deserializeRoute(m []byte) (Route, error) { Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), } } - case syscall.RTA_OIF: + case unix.RTA_OIF: route.LinkIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_IIF: + case unix.RTA_IIF: route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_PRIORITY: + case unix.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_TABLE: + case unix.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) - case syscall.RTA_MULTIPATH: + case unix.RTA_MULTIPATH: parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { - if len(value) < syscall.SizeofRtNexthop { - return nil, nil, fmt.Errorf("Lack of bytes") + if len(value) < unix.SizeofRtNexthop { + return nil, nil, fmt.Errorf("lack of bytes") } nh := nl.DeserializeRtNexthop(value) if len(value) < int(nh.RtNexthop.Len) { - return nil, nil, fmt.Errorf("Lack of bytes") + return nil, nil, fmt.Errorf("lack of bytes") } info := &NexthopInfo{ LinkIndex: int(nh.RtNexthop.Ifindex), Hops: int(nh.RtNexthop.Hops), Flags: int(nh.RtNexthop.Flags), } - attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + attrs, err := nl.ParseRouteAttr(value[unix.SizeofRtNexthop:int(nh.RtNexthop.Len)]) if err != nil { return nil, nil, err } var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case syscall.RTA_GATEWAY: + case unix.RTA_GATEWAY: info.Gw = net.IP(attr.Value) case nl.RTA_NEWDST: var d Destination @@ -566,6 +704,19 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case nl.RTA_ENCAP: encap = attr + case unix.RTA_METRICS: + metrics, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return route, err + } + for _, metric := range metrics { + switch metric.Attr.Type { + case unix.RTAX_MTU: + route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_ADVMSS: + route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + } + } } } @@ -578,6 +729,11 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } + case nl.LWTUNNEL_ENCAP_SEG6: + e = &SEG6Encap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } } route.Encap = e } @@ -594,7 +750,7 @@ func RouteGet(destination net.IP) ([]Route, error) { // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { - req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte var bitlen uint8 @@ -610,10 +766,10 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { msg.Dst_len = bitlen req.AddData(msg) - rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) + rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) req.AddData(rtaDst) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err } @@ -633,17 +789,35 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { // RouteSubscribe takes a chan down which notifications will be sent // when routes are added or deleted. Close the 'done' chan to stop subscription. func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(netns.None(), netns.None(), ch, done) + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil) } // RouteSubscribeAt works like RouteSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(ns, netns.None(), ch, done) + return routeSubscribeAt(ns, netns.None(), ch, done, nil) } -func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { - s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) +// RouteSubscribeOptions contains a set of options to use with +// RouteSubscribeWithOptions. +type RouteSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// RouteSubscribeWithOptions work like RouteSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) if err != nil { return err } @@ -658,11 +832,17 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < for { msgs, err := s.Receive() if err != nil { + if cberr != nil { + cberr(err) + } return } for _, m := range msgs { route, err := deserializeRoute(m.Data) if err != nil { + if cberr != nil { + cberr(err) + } return } ch <- RouteUpdate{Type: m.Header.Type, Route: route} diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index f0243defd7d..7fc8ae5df15 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -8,6 +8,7 @@ import ( // Rule represents a netlink rule. type Rule struct { Priority int + Family int Table int Mark int Mask int @@ -20,6 +21,7 @@ type Rule struct { OifName string SuppressIfgroup int SuppressPrefixlen int + Invert bool } func (r Rule) String() string { diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index f9cdc855f1f..6238ae45864 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -3,11 +3,13 @@ package netlink import ( "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) +const FibRuleInvert = 0x2 + // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func RuleAdd(rule *Rule) error { @@ -17,7 +19,7 @@ func RuleAdd(rule *Rule) error { // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func (h *Handle) RuleAdd(rule *Rule) error { - req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWRULE, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return ruleHandle(rule, req) } @@ -30,15 +32,31 @@ func RuleDel(rule *Rule) error { // RuleDel deletes a rule from the system. // Equivalent to: ip rule del func (h *Handle) RuleDel(rule *Rule) error { - req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_DELRULE, unix.NLM_F_ACK) return ruleHandle(rule, req) } func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg := nl.NewRtMsg() - msg.Family = syscall.AF_INET - var dstFamily uint8 + msg.Family = unix.AF_INET + msg.Protocol = unix.RTPROT_BOOT + msg.Scope = unix.RT_SCOPE_UNIVERSE + msg.Table = unix.RT_TABLE_UNSPEC + msg.Type = unix.RTN_UNSPEC + if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { + msg.Type = unix.RTN_UNICAST + } + if rule.Invert { + msg.Flags |= FibRuleInvert + } + if rule.Family != 0 { + msg.Family = uint8(rule.Family) + } + if rule.Table >= 0 && rule.Table < 256 { + msg.Table = uint8(rule.Table) + } + var dstFamily uint8 var rtAttrs []*nl.RtAttr if rule.Dst != nil && rule.Dst.IP != nil { dstLen, _ := rule.Dst.Mask.Size() @@ -46,12 +64,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) dstFamily = msg.Family var dstData []byte - if msg.Family == syscall.AF_INET { + if msg.Family == unix.AF_INET { dstData = rule.Dst.IP.To4() } else { dstData = rule.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) } if rule.Src != nil && rule.Src.IP != nil { @@ -62,19 +80,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { srcLen, _ := rule.Src.Mask.Size() msg.Src_len = uint8(srcLen) var srcData []byte - if msg.Family == syscall.AF_INET { + if msg.Family == unix.AF_INET { srcData = rule.Src.IP.To4() } else { srcData = rule.Src.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) - } - - if rule.Table >= 0 { - msg.Table = uint8(rule.Table) - if rule.Table >= 256 { - msg.Table = syscall.RT_TABLE_UNSPEC - } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_SRC, srcData)) } req.AddData(msg) @@ -139,7 +150,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } - _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -152,11 +163,11 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { - req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) + req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) if err != nil { return nil, err } @@ -172,9 +183,11 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule := NewRule() + rule.Invert = msg.Flags&FibRuleInvert > 0 + for j := range attrs { switch attrs[j].Attr.Type { - case syscall.RTA_TABLE: + case unix.RTA_TABLE: rule.Table = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_SRC: rule.Src = &net.IPNet{ diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index b42b84f0cfe..99e9fb4d897 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,9 +4,9 @@ import ( "errors" "fmt" "net" - "syscall" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) const ( @@ -123,15 +123,15 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } - s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG) + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) if err != nil { return nil, err } defer s.Close() req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) req.AddData(&socketRequest{ - Family: syscall.AF_INET, - Protocol: syscall.IPPROTO_TCP, + Family: unix.AF_INET, + Protocol: unix.IPPROTO_TCP, ID: SocketID{ SourcePort: uint16(localTCP.Port), DestinationPort: uint16(remoteTCP.Port), diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm.go index 9962dcf7006..02b41842e10 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/vendor/github.com/vishvananda/netlink/xfrm.go @@ -2,19 +2,20 @@ package netlink import ( "fmt" - "syscall" + + "golang.org/x/sys/unix" ) // Proto is an enum representing an ipsec protocol. type Proto uint8 const ( - XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING - XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP - XFRM_PROTO_AH Proto = syscall.IPPROTO_AH - XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS + XFRM_PROTO_ROUTE2 Proto = unix.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP + XFRM_PROTO_AH Proto = unix.IPPROTO_AH + XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin - XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW + XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW ) func (p Proto) String() string { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index 7b98c9cb6d3..efe72ddf29c 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -2,11 +2,10 @@ package netlink import ( "fmt" - "syscall" - - "github.com/vishvananda/netns" "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" ) type XfrmMsg interface { @@ -39,7 +38,7 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error if err != nil { return nil } - s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...) + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_XFRM, groups...) if err != nil { return err } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index c3d4e422272..fde0c2ca5ad 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,9 +1,8 @@ package netlink import ( - "syscall" - "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { @@ -55,7 +54,7 @@ func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { } func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := &nl.XfrmUserpolicyInfo{} selFromPolicy(&msg.Sel, policy) @@ -91,7 +90,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -121,12 +120,12 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) if err != nil { return nil, err } @@ -165,13 +164,13 @@ func XfrmPolicyFlush() error { // XfrmPolicyFlush will flush the policies on the system. // Equivalent to: `ip xfrm policy flush` func (h *Handle) XfrmPolicyFlush() error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK) - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) msg := &nl.XfrmUserpolicyId{} selFromPolicy(&msg.Sel, policy) @@ -189,7 +188,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo resType = 0 } - msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go index 368a9b986d6..d14740dc55b 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -3,6 +3,7 @@ package netlink import ( "fmt" "net" + "time" ) // XfrmStateAlgo represents the algorithm to use for the ipsec encryption. @@ -67,6 +68,19 @@ type XfrmStateLimits struct { TimeUseHard uint64 } +// XfrmStateStats represents the current number of bytes/packets +// processed by this State, the State's installation and first use +// time and the replay window counters. +type XfrmStateStats struct { + ReplayWindow uint32 + Replay uint32 + Failed uint32 + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + // XfrmState represents the state of an ipsec policy. It optionally // contains an XfrmStateAlgo for encryption and one for authentication. type XfrmState struct { @@ -78,6 +92,7 @@ type XfrmState struct { Reqid int ReplayWindow int Limits XfrmStateLimits + Statistics XfrmStateStats Mark *XfrmMark Auth *XfrmStateAlgo Crypt *XfrmStateAlgo @@ -94,10 +109,16 @@ func (sa XfrmState) Print(stats bool) string { if !stats { return sa.String() } - - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d", + at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) + ut := "-" + if sa.Statistics.UseTime > 0 { + ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) + } + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ + "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard) + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, + sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) } func printLimit(lmt uint64) string { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 6a7bc0deca2..7fc92900c05 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,10 +2,10 @@ package netlink import ( "fmt" - "syscall" "unsafe" "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" ) func writeStateAlgo(a *XfrmStateAlgo) []byte { @@ -111,7 +111,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { if state.Spi == 0 { return fmt.Errorf("Spi must be set when adding xfrm state.") } - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := xfrmUsersaInfoFromXfrmState(state) @@ -157,13 +157,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) msg := &nl.XfrmUserSpiInfo{} msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) @@ -177,7 +177,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req.AddData(out) } - msgs, err := req.Execute(syscall.NETLINK_XFRM, 0) + msgs, err := req.Execute(unix.NETLINK_XFRM, 0) if err != nil { return nil, err } @@ -216,9 +216,9 @@ func XfrmStateList(family int) ([]XfrmState, error) { // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) - msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) if err != nil { return nil, err } @@ -255,7 +255,7 @@ func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { } func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { - req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) msg := &nl.XfrmUsersaId{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) @@ -278,7 +278,7 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState resType = 0 } - msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } @@ -308,6 +308,7 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.Reqid = int(msg.Reqid) state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) + curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) return &state } @@ -386,11 +387,11 @@ func XfrmStateFlush(proto Proto) error { // proto = 0 means any transformation protocols // Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` func (h *Handle) XfrmStateFlush(proto Proto) error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, unix.NLM_F_ACK) req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) - _, err := req.Execute(syscall.NETLINK_XFRM, 0) + _, err := req.Execute(unix.NETLINK_XFRM, 0) if err != nil { return err } @@ -429,6 +430,16 @@ func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) } +func curToStats(cur *nl.XfrmLifetimeCur, wstats *nl.XfrmStats, stats *XfrmStateStats) { + stats.Bytes = cur.Bytes + stats.Packets = cur.Packets + stats.AddTime = cur.AddTime + stats.UseTime = cur.UseTime + stats.ReplayWindow = wstats.ReplayWindow + stats.Replay = wstats.Replay + stats.Failed = wstats.IntegrityFailed +} + func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg := &nl.XfrmUsersaInfo{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go index a267c710595..34af6e28b30 100644 --- a/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -188,6 +188,8 @@ func getPidForContainer(id string) (int, error) { filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), // Even more recent docker versions under cgroup/systemd/docker// filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), + // Kubernetes with docker and CNI is even more different + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), } var filename string From a9b67a4b52cab6514755a573c2599c06f7a1844a Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 29 Nov 2017 17:11:35 +0800 Subject: [PATCH 338/794] fix ipvs proxier nodeport --- pkg/proxy/ipvs/netlink.go | 11 +++- pkg/proxy/ipvs/netlink_linux.go | 66 +++++++++++++++++++++++- pkg/proxy/ipvs/netlink_unsupported.go | 7 +++ pkg/proxy/ipvs/proxier.go | 74 +++++++++++++++++---------- pkg/proxy/ipvs/testing/fake.go | 55 ++++++++++++++++++-- 5 files changed, 178 insertions(+), 35 deletions(-) diff --git a/pkg/proxy/ipvs/netlink.go b/pkg/proxy/ipvs/netlink.go index 4f66f706ee5..45551da36ad 100644 --- a/pkg/proxy/ipvs/netlink.go +++ b/pkg/proxy/ipvs/netlink.go @@ -16,14 +16,21 @@ limitations under the License. package ipvs +import ( + "k8s.io/apimachinery/pkg/util/sets" +) + // NetLinkHandle for revoke netlink interface type NetLinkHandle interface { - // EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. + // EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. EnsureAddressBind(address, devName string) (exist bool, err error) // UnbindAddress unbind address from the interface UnbindAddress(address, devName string) error - // EnsureDummyDevice checks if dummy device is exist and, if not, create one. If the dummy device is already exist, return true. + // EnsureDummyDevice checks if dummy device is exist and, if not, create one. If the dummy device is already exist, return true. EnsureDummyDevice(devName string) (exist bool, err error) // DeleteDummyDevice deletes the given dummy device by name. DeleteDummyDevice(devName string) error + // GetLocalAddresses returns all unique local type IP addresses based on filter device interface. If filter device is not given, + // it will list all unique local type addresses. + GetLocalAddresses(filterDev string) (sets.String, error) } diff --git a/pkg/proxy/ipvs/netlink_linux.go b/pkg/proxy/ipvs/netlink_linux.go index e709afafabb..2553e4c6243 100644 --- a/pkg/proxy/ipvs/netlink_linux.go +++ b/pkg/proxy/ipvs/netlink_linux.go @@ -22,6 +22,14 @@ import ( "fmt" "net" "syscall" + // TODO: replace syscall with golang.org/x/sys/unix? + // The Go doc for syscall says: + // NOTE: This package is locked down. + // Code outside the standard Go repository should be migrated to use the corresponding package in the golang.org/x/sys repository. + // That is also where updates required by new systems or versions should be applied. + // See https://golang.org/s/go1.4-syscall for more information. + + "k8s.io/apimachinery/pkg/util/sets" "github.com/vishvananda/netlink" ) @@ -30,7 +38,7 @@ type netlinkHandle struct { netlink.Handle } -// NewNetLinkHandle will crate a new netlinkHandle +// NewNetLinkHandle will crate a new NetLinkHandle func NewNetLinkHandle() NetLinkHandle { return &netlinkHandle{netlink.Handle{}} } @@ -96,3 +104,59 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error { } return h.LinkDel(dummy) } + +// GetLocalAddresses lists all LOCAL type IP addresses from host based on filter device. +// If filter device is not specified, it's equivalent to exec: +// $ ip route show table local type local proto kernel +// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1 +// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10 +// 10.0.0.252 dev kube-ipvs0 scope host src 10.0.0.252 +// 100.106.89.164 dev eth0 scope host src 100.106.89.164 +// 127.0.0.0/8 dev lo scope host src 127.0.0.1 +// 127.0.0.1 dev lo scope host src 127.0.0.1 +// 172.17.0.1 dev docker0 scope host src 172.17.0.1 +// 192.168.122.1 dev virbr0 scope host src 192.168.122.1 +// Then cut the unique src IP fields, +// --> result set: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1] + +// If filter device is specified, it's equivalent to exec: +// $ ip route show table local type local proto kernel dev kube-ipvs0 +// 10.0.0.1 scope host src 10.0.0.1 +// 10.0.0.10 scope host src 10.0.0.10 +// Then cut the unique src IP fields, +// --> result set: [10.0.0.1, 10.0.0.10] +func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) { + linkIndex := -1 + if len(filterDev) != 0 { + link, err := h.LinkByName(filterDev) + if err != nil { + return nil, fmt.Errorf("error get filter device %s, err: %v", filterDev, err) + } + linkIndex = link.Attrs().Index + } + + routeFilter := &netlink.Route{ + Table: syscall.RT_TABLE_LOCAL, + Type: syscall.RTN_LOCAL, + Protocol: syscall.RTPROT_KERNEL, + } + filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL + + // find filter device + if linkIndex != -1 { + routeFilter.LinkIndex = linkIndex + filterMask |= netlink.RT_FILTER_OIF + } + + routes, err := h.RouteListFiltered(netlink.FAMILY_ALL, routeFilter, filterMask) + if err != nil { + return nil, fmt.Errorf("error list route table, err: %v", err) + } + res := sets.NewString() + for _, route := range routes { + if route.Src != nil { + res.Insert(route.Src.String()) + } + } + return res, nil +} diff --git a/pkg/proxy/ipvs/netlink_unsupported.go b/pkg/proxy/ipvs/netlink_unsupported.go index 1e22685b279..b70550387ad 100644 --- a/pkg/proxy/ipvs/netlink_unsupported.go +++ b/pkg/proxy/ipvs/netlink_unsupported.go @@ -20,6 +20,8 @@ package ipvs import ( "fmt" + + "k8s.io/apimachinery/pkg/util/sets" ) type emptyHandle struct { @@ -49,3 +51,8 @@ func (h *emptyHandle) EnsureDummyDevice(devName string) (bool, error) { func (h *emptyHandle) DeleteDummyDevice(devName string) error { return fmt.Errorf("netlink is not supported in this platform") } + +// GetLocalAddresses is part of interface. +func (h *emptyHandle) GetLocalAddresses(filterDev string) (sets.String, error) { + return nil, fmt.Errorf("netlink is not supported in this platform") +} diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 7e15fd8ac93..9f0def1e1de 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -35,7 +35,6 @@ import ( clientv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -171,36 +170,57 @@ type IPGetter interface { NodeIPs() ([]net.IP, error) } -type realIPGetter struct{} +// realIPGetter is a real NodeIP handler, it implements IPGetter. +type realIPGetter struct { + // nl is a handle for revoking netlink interface + nl NetLinkHandle +} +// NodeIPs returns all LOCAL type IP addresses from host which are taken as the Node IPs of NodePort service. +// Firstly, it will list source IP exists in local route table with `kernel` protocol type. For example, +// $ ip route show table local type local proto kernel +// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1 +// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10 +// 10.0.0.252 dev kube-ipvs0 scope host src 10.0.0.252 +// 100.106.89.164 dev eth0 scope host src 100.106.89.164 +// 127.0.0.0/8 dev lo scope host src 127.0.0.1 +// 127.0.0.1 dev lo scope host src 127.0.0.1 +// 172.17.0.1 dev docker0 scope host src 172.17.0.1 +// 192.168.122.1 dev virbr0 scope host src 192.168.122.1 +// Then cut the unique src IP fields, +// --> result set1: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1] + +// NOTE: For cases where an LB acts as a VIP (e.g. Google cloud), the VIP IP is considered LOCAL, but the protocol +// of the entry is 66, e.g. `10.128.0.6 dev ens4 proto 66 scope host`. Therefore, the rule mentioned above will +// filter these entries out. + +// Secondly, as we bind Cluster IPs to the dummy interface in IPVS proxier, we need to filter the them out so that +// we can eventually get the Node IPs. Fortunately, the dummy interface created by IPVS proxier is known as `kube-ipvs0`, +// so we just need to specify the `dev kube-ipvs0` argument in ip route command, for example, +// $ ip route show table local type local proto kernel dev kube-ipvs0 +// 10.0.0.1 scope host src 10.0.0.1 +// 10.0.0.10 scope host src 10.0.0.10 +// Then cut the unique src IP fields, +// --> result set2: [10.0.0.1, 10.0.0.10] + +// Finally, Node IP set = set1 - set2 func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) { - interfaces, err := net.Interfaces() + // Pass in empty filter device name for list all LOCAL type addresses. + allAddress, err := r.nl.GetLocalAddresses("") if err != nil { - return nil, err + return nil, fmt.Errorf("error listing LOCAL type addresses from host, error: %v", err) } - for i := range interfaces { - name := interfaces[i].Name - // We assume node ip bind to eth{x} - if !strings.HasPrefix(name, "eth") { - continue - } - intf, err := net.InterfaceByName(name) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Failed to get interface by name: %s, error: %v", name, err)) - continue - } - addrs, err := intf.Addrs() - if err != nil { - utilruntime.HandleError(fmt.Errorf("Failed to get addresses from interface: %s, error: %v", name, err)) - continue - } - for _, a := range addrs { - if ipnet, ok := a.(*net.IPNet); ok { - ips = append(ips, ipnet.IP) - } - } + dummyAddress, err := r.nl.GetLocalAddresses(DefaultDummyDevice) + if err != nil { + return nil, fmt.Errorf("error listing LOCAL type addresses from device: %s, error: %v", DefaultDummyDevice, err) } - return + // exclude ip address from dummy interface created by IPVS proxier - they are all Cluster IPs. + nodeAddress := allAddress.Difference(dummyAddress) + // translate ip string to IP + for _, ipStr := range nodeAddress.UnsortedList() { + ips = append(ips, net.ParseIP(ipStr)) + } + return ips, nil } // Proxier implements ProxyProvider @@ -294,7 +314,7 @@ func NewProxier(ipt utiliptables.Interface, healthzServer: healthzServer, ipvs: ipvs, ipvsScheduler: scheduler, - ipGetter: &realIPGetter{}, + ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, iptablesData: bytes.NewBuffer(nil), natChains: bytes.NewBuffer(nil), natRules: bytes.NewBuffer(nil), diff --git a/pkg/proxy/ipvs/testing/fake.go b/pkg/proxy/ipvs/testing/fake.go index 6b3f0ddc1ba..561e8b3c6b0 100644 --- a/pkg/proxy/ipvs/testing/fake.go +++ b/pkg/proxy/ipvs/testing/fake.go @@ -16,21 +16,33 @@ limitations under the License. package testing -//FakeNetlinkHandle mock implementation of proxy NetlinkHandle +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// FakeNetlinkHandle mock implementation of proxy NetlinkHandle type FakeNetlinkHandle struct { + // localAddresses is a network interface name to all of its IP addresses map, e.g. + // eth0 -> [1.2.3.4, 10.20.30.40] + localAddresses map[string][]string } -//NewFakeNetlinkHandle will create a new FakeNetlinkHandle +// NewFakeNetlinkHandle will create a new FakeNetlinkHandle func NewFakeNetlinkHandle() *FakeNetlinkHandle { - return &FakeNetlinkHandle{} + fake := &FakeNetlinkHandle{ + localAddresses: make(map[string][]string), + } + return fake } -//EnsureAddressBind is a mock implementation +// EnsureAddressBind is a mock implementation func (h *FakeNetlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) { return false, nil } -//UnbindAddress is a mock implementation +// UnbindAddress is a mock implementation func (h *FakeNetlinkHandle) UnbindAddress(address, devName string) error { return nil } @@ -44,3 +56,36 @@ func (h *FakeNetlinkHandle) EnsureDummyDevice(devName string) (bool, error) { func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error { return nil } + +// GetLocalAddresses is a mock implementation +func (h *FakeNetlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) { + res := sets.NewString() + if len(filterDev) != 0 { + // list all addresses from a given network interface. + for _, addr := range h.localAddresses[filterDev] { + res.Insert(addr) + } + return res, nil + } + // If filterDev is not given, will list all addresses from all available network interface. + for linkName := range h.localAddresses { + // list all addresses from a given network interface. + for _, addr := range h.localAddresses[linkName] { + res.Insert(addr) + } + } + return res, nil +} + +// SetLocalAddresses set IP addresses to the given interface device. It's not part of interface. +func (h *FakeNetlinkHandle) SetLocalAddresses(dev string, ips ...string) error { + if h.localAddresses == nil { + h.localAddresses = make(map[string][]string) + } + if len(dev) == 0 { + return fmt.Errorf("device name can't be empty") + } + h.localAddresses[dev] = make([]string, 0) + h.localAddresses[dev] = append(h.localAddresses[dev], ips...) + return nil +} From 81897022db1a52a35fe20af694b425a3fdc3ff88 Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Sat, 16 Dec 2017 19:19:49 +0800 Subject: [PATCH 339/794] fix magic string for runtime type --- pkg/kubelet/cadvisor/helpers_linux.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cadvisor/helpers_linux.go b/pkg/kubelet/cadvisor/helpers_linux.go index 10b5e05a008..bff89894c5e 100644 --- a/pkg/kubelet/cadvisor/helpers_linux.go +++ b/pkg/kubelet/cadvisor/helpers_linux.go @@ -22,6 +22,7 @@ import ( "fmt" cadvisorfs "github.com/google/cadvisor/fs" + "k8s.io/kubernetes/pkg/kubelet/types" ) // imageFsInfoProvider knows how to translate the configured runtime @@ -35,11 +36,11 @@ type imageFsInfoProvider struct { // For remote runtimes, it handles additional runtimes natively understood by cAdvisor. func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) { switch i.runtime { - case "docker": + case types.DockerContainerRuntime: return cadvisorfs.LabelDockerImages, nil - case "rkt": + case types.RktContainerRuntime: return cadvisorfs.LabelRktImages, nil - case "remote": + case types.RemoteContainerRuntime: // This is a temporary workaround to get stats for cri-o from cadvisor // and should be removed. // Related to https://github.com/kubernetes/kubernetes/issues/51798 From a71d1680d43ab3904a993cd46944af7fd1cb6303 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sat, 16 Dec 2017 08:19:36 -0500 Subject: [PATCH 340/794] Add vikaschoudhary16 as reviewer in pkg/kubelet/cm/deviceplugin --- pkg/kubelet/cm/deviceplugin/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kubelet/cm/deviceplugin/OWNERS b/pkg/kubelet/cm/deviceplugin/OWNERS index ec9b7ddc156..a374cd52454 100644 --- a/pkg/kubelet/cm/deviceplugin/OWNERS +++ b/pkg/kubelet/cm/deviceplugin/OWNERS @@ -4,3 +4,4 @@ approvers: reviewers: - mindprince - RenaudWasTaken +- vikaschoudhary16 From a37d8ec1f92f2ec9afb9520c11f392c23c689b94 Mon Sep 17 00:00:00 2001 From: Mikkel Oscar Lyderik Larsen Date: Wed, 13 Dec 2017 14:24:01 +0100 Subject: [PATCH 341/794] Don't create PSP binding when RBAC is not enabled --- test/e2e/framework/psp_util.go | 54 ++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/test/e2e/framework/psp_util.go b/test/e2e/framework/psp_util.go index bb8d0dadc9b..4e6e4f8a701 100644 --- a/test/e2e/framework/psp_util.go +++ b/test/e2e/framework/psp_util.go @@ -97,7 +97,7 @@ var ( ) func CreatePrivilegedPSPBinding(f *Framework, namespace string) { - if !IsPodSecurityPolicyEnabled(f) || !IsRBACEnabled(f) { + if !IsPodSecurityPolicyEnabled(f) { return } // Create the privileged PSP & role @@ -114,30 +114,34 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) { psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp) ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) - // Create the Role to bind it to the namespace. - _, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, - Rules: []rbacv1beta1.PolicyRule{{ - APIGroups: []string{"extensions"}, - Resources: []string{"podsecuritypolicies"}, - ResourceNames: []string{podSecurityPolicyPrivileged}, - Verbs: []string{"use"}, - }}, - }) - ExpectNoError(err, "Failed to create PSP role") + if IsRBACEnabled(f) { + // Create the Role to bind it to the namespace. + _, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, + Rules: []rbacv1beta1.PolicyRule{{ + APIGroups: []string{"extensions"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{podSecurityPolicyPrivileged}, + Verbs: []string{"use"}, + }}, + }) + ExpectNoError(err, "Failed to create PSP role") + } }) - By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", - podSecurityPolicyPrivileged, namespace)) - BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(), - podSecurityPolicyPrivileged, - namespace, - rbacv1beta1.Subject{ - Kind: rbacv1beta1.ServiceAccountKind, - Namespace: namespace, - Name: "default", - }) - ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), - serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, - schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) + if IsRBACEnabled(f) { + By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", + podSecurityPolicyPrivileged, namespace)) + BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(), + podSecurityPolicyPrivileged, + namespace, + rbacv1beta1.Subject{ + Kind: rbacv1beta1.ServiceAccountKind, + Namespace: namespace, + Name: "default", + }) + ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(), + serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, + schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) + } } From 8c51d235d6c2ecc902312871c53b86f5c0fa737e Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Tue, 14 Nov 2017 05:48:43 -0500 Subject: [PATCH 342/794] Refactor TestPodContainerDeviceAllocation to make it readable and extensible --- pkg/kubelet/cm/deviceplugin/manager_test.go | 383 +++++++++++--------- 1 file changed, 217 insertions(+), 166 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index f25c1e82166..158163a4f60 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -366,37 +366,25 @@ func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, err return nil, nil } -func TestPodContainerDeviceAllocation(t *testing.T) { - flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) - var logLevel string - flag.StringVar(&logLevel, "logLevel", "4", "test") - flag.Lookup("v").Value.Set(logLevel) - - resourceName1 := "domain1.com/resource1" - resourceQuantity1 := *resource.NewQuantity(int64(2), resource.DecimalSI) - devID1 := "dev1" - devID2 := "dev2" - resourceName2 := "domain2.com/resource2" - resourceQuantity2 := *resource.NewQuantity(int64(1), resource.DecimalSI) - devID3 := "dev3" - devID4 := "dev4" - - as := require.New(t) - monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {} - podsStub := activePodsStub{ - activePods: []*v1.Pod{}, - } - cachedNode := &v1.Node{ - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{}, +func makePod(limits v1.ResourceList) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: uuid.NewUUID(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: limits, + }, + }, + }, }, } - nodeInfo := &schedulercache.NodeInfo{} - nodeInfo.SetNode(cachedNode) +} - tmpDir, err := ioutil.TempDir("", "checkpoint") - as.Nil(err) - defer os.RemoveAll(tmpDir) +func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource) *ManagerImpl { + monitorCallback := func(resourceName string, added, updated, deleted []pluginapi.Device) {} testManager := &ManagerImpl{ socketdir: tmpDir, callback: monitorCallback, @@ -404,158 +392,221 @@ func TestPodContainerDeviceAllocation(t *testing.T) { allocatedDevices: make(map[string]sets.String), endpoints: make(map[string]endpoint), podDevices: make(podDevices), - activePods: podsStub.getActivePods, + activePods: activePods, sourcesReady: &sourcesReadyStub{}, } testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) + for _, res := range testRes { + testManager.allDevices[res.resourceName] = sets.NewString() + for _, dev := range res.devs { + testManager.allDevices[res.resourceName].Insert(dev) + } + if res.resourceName == "domain1.com/resource1" { + testManager.endpoints[res.resourceName] = &MockEndpoint{ + allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { + resp := new(pluginapi.AllocateResponse) + resp.Envs = make(map[string]string) + for _, dev := range devs { + switch dev { + case "dev1": + resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ + ContainerPath: "/dev/aaa", + HostPath: "/dev/aaa", + Permissions: "mrw", + }) - testManager.allDevices[resourceName1] = sets.NewString() - testManager.allDevices[resourceName1].Insert(devID1) - testManager.allDevices[resourceName1].Insert(devID2) - testManager.allDevices[resourceName2] = sets.NewString() - testManager.allDevices[resourceName2].Insert(devID3) - testManager.allDevices[resourceName2].Insert(devID4) + resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ + ContainerPath: "/dev/bbb", + HostPath: "/dev/bbb", + Permissions: "mrw", + }) - testManager.endpoints[resourceName1] = &MockEndpoint{ - allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { - resp := new(pluginapi.AllocateResponse) - resp.Envs = make(map[string]string) - for _, dev := range devs { - switch dev { - case "dev1": - resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ - ContainerPath: "/dev/aaa", - HostPath: "/dev/aaa", - Permissions: "mrw", - }) + resp.Mounts = append(resp.Mounts, &pluginapi.Mount{ + ContainerPath: "/container_dir1/file1", + HostPath: "host_dir1/file1", + ReadOnly: true, + }) - resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ - ContainerPath: "/dev/bbb", - HostPath: "/dev/bbb", - Permissions: "mrw", - }) + case "dev2": + resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ + ContainerPath: "/dev/ccc", + HostPath: "/dev/ccc", + Permissions: "mrw", + }) - resp.Mounts = append(resp.Mounts, &pluginapi.Mount{ - ContainerPath: "/container_dir1/file1", - HostPath: "host_dir1/file1", - ReadOnly: true, - }) + resp.Mounts = append(resp.Mounts, &pluginapi.Mount{ + ContainerPath: "/container_dir1/file2", + HostPath: "host_dir1/file2", + ReadOnly: true, + }) - case "dev2": - resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{ - ContainerPath: "/dev/ccc", - HostPath: "/dev/ccc", - Permissions: "mrw", - }) - - resp.Mounts = append(resp.Mounts, &pluginapi.Mount{ - ContainerPath: "/container_dir1/file2", - HostPath: "host_dir1/file2", - ReadOnly: true, - }) - - resp.Envs["key1"] = "val1" - } + resp.Envs["key1"] = "val1" + } + } + return resp, nil + }, } - return resp, nil - }, - } + } + if res.resourceName == "domain2.com/resource2" { + testManager.endpoints[res.resourceName] = &MockEndpoint{ + allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { + resp := new(pluginapi.AllocateResponse) + resp.Envs = make(map[string]string) + for _, dev := range devs { + switch dev { + case "dev3": + resp.Envs["key2"] = "val2" - testManager.endpoints[resourceName2] = &MockEndpoint{ - allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) { - resp := new(pluginapi.AllocateResponse) - resp.Envs = make(map[string]string) - for _, dev := range devs { - switch dev { - case "dev3": - resp.Envs["key2"] = "val2" - - case "dev4": - resp.Envs["key2"] = "val3" - } + case "dev4": + resp.Envs["key2"] = "val3" + } + } + return resp, nil + }, } - return resp, nil + } + } + return testManager +} + +func getTestNodeInfo(allocatable v1.ResourceList) *schedulercache.NodeInfo { + cachedNode := &v1.Node{ + Status: v1.NodeStatus{ + Allocatable: allocatable, }, } + nodeInfo := &schedulercache.NodeInfo{} + nodeInfo.SetNode(cachedNode) + return nodeInfo +} - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: uuid.NewUUID(), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: string(uuid.NewUUID()), - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity1, - v1.ResourceName("cpu"): resourceQuantity1, - v1.ResourceName(resourceName2): resourceQuantity2, - }, - }, - }, - }, - }, +type TestResource struct { + resourceName string + resourceQuantity resource.Quantity + devs []string +} + +func TestPodContainerDeviceAllocation(t *testing.T) { + flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) + var logLevel string + flag.StringVar(&logLevel, "logLevel", "4", "test") + flag.Lookup("v").Value.Set(logLevel) + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + devs: []string{"dev1", "dev2"}, } - - podsStub.updateActivePods([]*v1.Pod{pod}) - err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + devs: []string{"dev3", "dev4"}, + } + testResources := make([]TestResource, 2) + testResources = append(testResources, res1) + testResources = append(testResources, res2) + as := require.New(t) + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + tmpDir, err := ioutil.TempDir("", "checkpoint") as.Nil(err) - runContainerOpts := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) - as.NotNil(runContainerOpts) - as.Equal(len(runContainerOpts.Devices), 3) - as.Equal(len(runContainerOpts.Mounts), 2) - as.Equal(len(runContainerOpts.Envs), 2) + defer os.RemoveAll(tmpDir) + nodeInfo := getTestNodeInfo(v1.ResourceList{}) + testManager := getTestManager(tmpDir, podsStub.getActivePods, testResources) - // Requesting to create a pod without enough resources should fail. - as.Equal(2, testManager.allocatedDevices[resourceName1].Len()) - failPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: uuid.NewUUID(), + testPods := []*v1.Pod{ + makePod(v1.ResourceList{ + v1.ResourceName(res1.resourceName): res1.resourceQuantity, + v1.ResourceName("cpu"): res1.resourceQuantity, + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + makePod(v1.ResourceList{ + v1.ResourceName(res1.resourceName): res2.resourceQuantity}), + makePod(v1.ResourceList{ + v1.ResourceName(res2.resourceName): res2.resourceQuantity}), + } + testCases := []struct { + description string + testPod *v1.Pod + expectedContainerOptsLen []int + expectedAllocatedResName1 int + expectedAllocatedResName2 int + expErr error + }{ + { + description: "Successfull allocation of two Res1 resources and one Res2 resource", + testPod: testPods[0], + expectedContainerOptsLen: []int{3, 2, 2}, + expectedAllocatedResName1: 2, + expectedAllocatedResName2: 1, + expErr: nil, }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: string(uuid.NewUUID()), - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity2, - }, - }, - }, - }, + { + description: "Requesting to create a pod without enough resources should fail", + testPod: testPods[1], + expectedContainerOptsLen: nil, + expectedAllocatedResName1: 2, + expectedAllocatedResName2: 1, + expErr: fmt.Errorf("requested number of devices unavailable for domain1.com/resource1. Requested: 1, Available: 0"), + }, + { + description: "Successfull allocation of all available Res1 resources and Res2 resources", + testPod: testPods[2], + expectedContainerOptsLen: []int{0, 0, 1}, + expectedAllocatedResName1: 2, + expectedAllocatedResName2: 2, + expErr: nil, }, } - err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: failPod}) - as.NotNil(err) - runContainerOpts2 := testManager.GetDeviceRunContainerOptions(failPod, &failPod.Spec.Containers[0]) - as.Nil(runContainerOpts2) - - // Requesting to create a new pod with a single resourceName2 should succeed. - newPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: uuid.NewUUID(), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: string(uuid.NewUUID()), - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceName(resourceName2): resourceQuantity2, - }, - }, - }, - }, - }, + activePods := []*v1.Pod{} + for _, testCase := range testCases { + pod := testCase.testPod + activePods = append(activePods, pod) + podsStub.updateActivePods(activePods) + err := testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) + if !reflect.DeepEqual(err, testCase.expErr) { + t.Errorf("DevicePluginManager error (%v). expected error: %v but got: %v", + testCase.description, testCase.expErr, err) + } + runContainerOpts := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) + if testCase.expectedContainerOptsLen == nil { + as.Nil(runContainerOpts) + } else { + as.Equal(len(runContainerOpts.Devices), testCase.expectedContainerOptsLen[0]) + as.Equal(len(runContainerOpts.Mounts), testCase.expectedContainerOptsLen[1]) + as.Equal(len(runContainerOpts.Envs), testCase.expectedContainerOptsLen[2]) + } + as.Equal(testCase.expectedAllocatedResName1, testManager.allocatedDevices[res1.resourceName].Len()) + as.Equal(testCase.expectedAllocatedResName2, testManager.allocatedDevices[res2.resourceName].Len()) } - err = testManager.Allocate(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: newPod}) - as.Nil(err) - runContainerOpts3 := testManager.GetDeviceRunContainerOptions(newPod, &newPod.Spec.Containers[0]) - as.Equal(1, len(runContainerOpts3.Envs)) +} + +func TestInitContainerDeviceAllocation(t *testing.T) { // Requesting to create a pod that requests resourceName1 in init containers and normal containers // should succeed with devices allocated to init containers reallocated to normal containers. + res1 := TestResource{ + resourceName: "domain1.com/resource1", + resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI), + devs: []string{"dev1", "dev2"}, + } + res2 := TestResource{ + resourceName: "domain2.com/resource2", + resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI), + devs: []string{"dev3", "dev4"}, + } + testResources := make([]TestResource, 2) + testResources = append(testResources, res1) + testResources = append(testResources, res2) + as := require.New(t) + podsStub := activePodsStub{ + activePods: []*v1.Pod{}, + } + nodeInfo := getTestNodeInfo(v1.ResourceList{}) + tmpDir, err := ioutil.TempDir("", "checkpoint") + as.Nil(err) + defer os.RemoveAll(tmpDir) + testManager := getTestManager(tmpDir, podsStub.getActivePods, testResources) + podWithPluginResourcesInInitContainers := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -566,7 +617,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity2, + v1.ResourceName(res1.resourceName): res2.resourceQuantity, }, }, }, @@ -574,7 +625,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity1, + v1.ResourceName(res1.resourceName): res1.resourceQuantity, }, }, }, @@ -584,8 +635,8 @@ func TestPodContainerDeviceAllocation(t *testing.T) { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity2, - v1.ResourceName(resourceName2): resourceQuantity2, + v1.ResourceName(res1.resourceName): res2.resourceQuantity, + v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, }, }, @@ -593,8 +644,8 @@ func TestPodContainerDeviceAllocation(t *testing.T) { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - v1.ResourceName(resourceName1): resourceQuantity2, - v1.ResourceName(resourceName2): resourceQuantity2, + v1.ResourceName(res1.resourceName): res2.resourceQuantity, + v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, }, }, @@ -609,10 +660,10 @@ func TestPodContainerDeviceAllocation(t *testing.T) { initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name normalCont1 := podWithPluginResourcesInInitContainers.Spec.Containers[0].Name normalCont2 := podWithPluginResourcesInInitContainers.Spec.Containers[1].Name - initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, resourceName1) - initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, resourceName1) - normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, resourceName1) - normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, resourceName1) + initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, res1.resourceName) + initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, res1.resourceName) + normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, res1.resourceName) + normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, res1.resourceName) as.True(initCont2Devices.IsSuperset(initCont1Devices)) as.True(initCont2Devices.IsSuperset(normalCont1Devices)) as.True(initCont2Devices.IsSuperset(normalCont2Devices)) From 386258b07ae38e7e47938f3e8d21f6a16047a56e Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Sun, 17 Dec 2017 17:00:52 +0800 Subject: [PATCH 343/794] [cloudprovider]should reuse code rather than rewrite it --- .../providers/vsphere/vclib/datacenter.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go index d325c72dfe1..8b0a10e9a92 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go @@ -138,14 +138,8 @@ func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) return nil, errors.New("Failed to parse vmDiskPath") } - finder := getFinder(dc) - ds, err := finder.Datastore(ctx, datastorePathObj.Datastore) - if err != nil { - glog.Errorf("Failed while searching for datastore: %s. err: %+v", datastorePathObj.Datastore, err) - return nil, err - } - datastore := Datastore{ds, dc} - return &datastore, nil + + return dc.GetDatastoreByName(ctx, datastorePathObj.Datastore) } // GetDatastoreByName gets the Datastore object for the given datastore name From 2ab0e3217e9140e03a485b7e23e01acafe6f05f6 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Sun, 17 Dec 2017 17:21:14 +0800 Subject: [PATCH 344/794] remove unuse code in cloudprovider --- pkg/cloudprovider/plugins.go | 12 ------------ pkg/cloudprovider/providers/gce/gce.go | 14 -------------- pkg/cloudprovider/providers/vsphere/vclib/utils.go | 12 ------------ 3 files changed, 38 deletions(-) diff --git a/pkg/cloudprovider/plugins.go b/pkg/cloudprovider/plugins.go index 0fc41f5eaf8..888532717aa 100644 --- a/pkg/cloudprovider/plugins.go +++ b/pkg/cloudprovider/plugins.go @@ -60,18 +60,6 @@ func IsCloudProvider(name string) bool { return found } -// CloudProviders returns the name of all registered cloud providers in a -// string slice -func CloudProviders() []string { - names := []string{} - providersMutex.Lock() - defer providersMutex.Unlock() - for name := range providers { - names = append(names, name) - } - return names -} - // GetCloudProvider creates an instance of the named cloud provider, or nil if // the name is unknown. The error return is only used if the named provider // was known but failed to initialize. The config parameter specifies the diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 95d4f33afb5..9a2d92f6ef0 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -695,20 +695,6 @@ func gceSubnetworkURL(apiEndpoint, project, region, subnetwork string) string { return apiEndpoint + strings.Join([]string{"projects", project, "regions", region, "subnetworks", subnetwork}, "/") } -// getProjectIDInURL parses full resource URLS and shorter URLS -// https://www.googleapis.com/compute/v1/projects/myproject/global/networks/mycustom -// projects/myproject/global/networks/mycustom -// All return "myproject" -func getProjectIDInURL(urlStr string) (string, error) { - fields := strings.Split(urlStr, "/") - for i, v := range fields { - if v == "projects" && i < len(fields)-1 { - return fields[i+1], nil - } - } - return "", fmt.Errorf("could not find project field in url: %v", urlStr) -} - // getRegionInURL parses full resource URLS and shorter URLS // https://www.googleapis.com/compute/v1/projects/myproject/regions/us-central1/subnetworks/a // projects/myproject/regions/us-central1/subnetworks/a diff --git a/pkg/cloudprovider/providers/vsphere/vclib/utils.go b/pkg/cloudprovider/providers/vsphere/vclib/utils.go index bac429d6deb..d449e5fe905 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/utils.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/utils.go @@ -146,18 +146,6 @@ func GetPathFromVMDiskPath(vmDiskPath string) string { return datastorePathObj.Path } -// GetDatastoreFromVMDiskPath retrieves the path from VM Disk Path. -// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is vsanDatastore -func GetDatastoreFromVMDiskPath(vmDiskPath string) string { - datastorePathObj := new(object.DatastorePath) - isSuccess := datastorePathObj.FromString(vmDiskPath) - if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) - return "" - } - return datastorePathObj.Datastore -} - //GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { datastorePathObj := new(object.DatastorePath) From d80dbe7fea5f15ab68f348e26b5e74c25f21b000 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Thu, 30 Nov 2017 09:28:13 +0800 Subject: [PATCH 345/794] Fix GCE CreateVolume allocates in chunks of GiB incorrectly --- pkg/volume/gce_pd/gce_pd.go | 2 +- pkg/volume/gce_pd/gce_pd_test.go | 2 +- pkg/volume/gce_pd/gce_util.go | 4 ++-- test/e2e/storage/volume_provisioning.go | 26 ++++++++++++++++--------- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 5576f1fabea..2b94cb29731 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -423,7 +423,7 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ diff --git a/pkg/volume/gce_pd/gce_pd_test.go b/pkg/volume/gce_pd/gce_pd_test.go index e9124dee400..79c96b8ac50 100644 --- a/pkg/volume/gce_pd/gce_pd_test.go +++ b/pkg/volume/gce_pd/gce_pd_test.go @@ -183,7 +183,7 @@ func TestPlugin(t *testing.T) { } cap := persistentSpec.Spec.Capacity[v1.ResourceStorage] size := cap.Value() - if size != 100*1024*1024*1024 { + if size != 100*volume.GB { t.Errorf("Provision() returned unexpected volume size: %v", size) } diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 3d17a455017..0ec7566ea9f 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -82,8 +82,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() - // GCE works with gigabytes, convert to GiB with rounding up - requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024) + // GCE PDs are allocated in chunks of GBs (not GiBs) + requestGB := volume.RoundUpSize(requestBytes, volume.GB) // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 38d1b257930..2c77177de3e 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -256,8 +256,8 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { "type": "pd-ssd", "zone": cloudZone, }, - "1.5Gi", - "2Gi", + "1.5G", + "2G", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-ssd") }, @@ -269,8 +269,8 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { map[string]string{ "type": "pd-standard", }, - "1.5Gi", - "2Gi", + "1.5G", + "2G", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -435,8 +435,8 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { map[string]string{ "type": "pd-standard", }, - "1Gi", - "1Gi", + "1G", + "1G", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -469,8 +469,8 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { map[string]string{ "type": "pd-standard", }, - "1Gi", - "1Gi", + "1G", + "1G", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, @@ -520,7 +520,7 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { name: "unmanaged_zone", provisioner: "kubernetes.io/gce-pd", parameters: map[string]string{"zone": unmanagedZone}, - claimSize: "1Gi", + claimSize: "1G", } sc := newStorageClass(test, ns, suffix) sc, err = c.StorageV1().StorageClasses().Create(sc) @@ -640,6 +640,14 @@ var _ = SIGDescribe("Dynamic Provisioning", func() { claimSize: "2Gi", expectedSize: "2Gi", } + // gce or gke + if getDefaultPluginName() == "kubernetes.io/gce-pd" { + // using GB not GiB as e2e test unit since gce-pd returns GB, + // or expectedSize may be greater than claimSize. + test.claimSize = "2G" + test.expectedSize = "2G" + } + claim := newClaim(test, ns, "default") testDynamicProvisioning(test, c, claim, nil) }) From a043d45152e5d76b30e71ac20a7386047b389e4e Mon Sep 17 00:00:00 2001 From: rohitjogvmw Date: Sun, 17 Dec 2017 08:49:13 -0800 Subject: [PATCH 346/794] COntroller-manager is crashing in customer environment as vSphere Cloud Provider is not using lower case naming while creating clientBuilder. With this fix, ClientBuilder is created using lowercase naming. --- pkg/cloudprovider/providers/vsphere/vsphere.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 94f9b5019c4..5707dd3d06c 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -217,7 +217,7 @@ func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) // Only on controller node it is required to register listeners. // Register callbacks for node updates - client := clientBuilder.ClientOrDie("vSphere-cloud-provider") + client := clientBuilder.ClientOrDie("vsphere-cloud-provider") factory := informers.NewSharedInformerFactory(client, 5*time.Minute) nodeInformer := factory.Core().V1().Nodes() nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ From 41a4db7f489d5afeedc255a31465e0e1895a0a5c Mon Sep 17 00:00:00 2001 From: Matt Kelly Date: Sun, 17 Dec 2017 13:05:45 -0500 Subject: [PATCH 347/794] kubeadm: Only check for well-known files in preflight --- cmd/kubeadm/app/preflight/checks.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a79ff7bc866..523a894771a 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -79,7 +79,7 @@ func (e *Error) Error() string { } // Checker validates the state of the system to ensure kubeadm will be -// successful as often as possilble. +// successful as often as possible. type Checker interface { Check() (warnings, errors []error) Name() string @@ -848,6 +848,8 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi warns, _ := criCtlChecker.Check() useCRI := len(warns) == 0 + manifestsDir := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName) + checks := []Checker{ KubernetesVersionCheck{KubernetesVersion: cfg.KubernetesVersion, KubeadmVersion: kubeadmversion.Get().GitVersion}, SystemVerificationCheck{CRISocket: criSocket}, @@ -860,7 +862,10 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi PortOpenCheck{port: 10250}, PortOpenCheck{port: 10251}, PortOpenCheck{port: 10252}, - DirAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName)}, + FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeAPIServer, manifestsDir)}, + FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeControllerManager, manifestsDir)}, + FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.KubeScheduler, manifestsDir)}, + FileAvailableCheck{Path: kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.Etcd, manifestsDir)}, FileContentCheck{Path: bridgenf, Content: []byte{'1'}}, SwapCheck{}, InPathCheck{executable: "ip", mandatory: true, exec: execer}, From 540baa9796eb06d4f6d56011ca8bc84b933654da Mon Sep 17 00:00:00 2001 From: chshou Date: Mon, 2 Oct 2017 23:16:26 -0700 Subject: [PATCH 348/794] Allow use resource ID to specify public IP address in azure_loadbalancer --- .../providers/azure/azure_util.go | 20 +++++++++++++++++++ .../providers/azure/azure_wrap.go | 7 ++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 6181550571b..1804faba588 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -699,3 +699,23 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) er // Do nothing for availability set. return nil } + +// parseResourceGroupNameFromID parses the resource group name from a resource ID +func parseResourceGroupNameFromID(resourceID string) (resourceGroupName string, err error) { + reg, err := regexp.Compile(`(?i)(.*?)/resourceGroups/(?P\S+)/providers/(.*?)`) + + if err != nil { + return "", err + } + + matchNames := reg.SubexpNames() + matches := reg.FindStringSubmatch(resourceID) + + for i := range matchNames { + if matchNames[i] == "rgname" { + return matches[i], nil + } + } + + return "", fmt.Errorf("Invalid resource ID: %s", resourceID) +} diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index f1aa0def597..2990e12d18b 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -156,10 +156,11 @@ func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResul func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) { var realErr error + var realErr error az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%s): start", name) - pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, name, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%s): end", name) + glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): start", resourceGroup, pipName) + pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "") + glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): end", resourceGroup, pipName) exists, realErr = checkResourceExistsFromError(err) if realErr != nil { From 9054137d05b8796b0f99be9628b79d4106b998f1 Mon Sep 17 00:00:00 2001 From: chshou Date: Mon, 9 Oct 2017 23:58:47 -0700 Subject: [PATCH 349/794] annotate service with resource group --- .../providers/azure/azure_backoff.go | 6 +++--- .../providers/azure/azure_loadbalancer.go | 6 +++--- .../providers/azure/azure_util.go | 20 ------------------- 3 files changed, 6 insertions(+), 26 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 3cf5d8930fa..fffb9cd07b1 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -302,11 +302,11 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): start", *pip.Name) - respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) + glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", resourceGroup, *pip.Name) + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(resourceGroup, *pip.Name, pip, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): end", *pip.Name) + glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", resourceGroup, *pip.Name) return processRetryResponse(resp.Response, err) }) } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 6657a074d89..c48cf858f6b 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -416,9 +416,9 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): end", *pip.Name) az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%q): start", *pip.Name) - pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, *pip.Name, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%q): end", *pip.Name) + glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): start", pipResourceGroup, *pip.Name) + pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "") + glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): end", pipResourceGroup, *pip.Name) if err != nil { return nil, err } diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 1804faba588..6181550571b 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -699,23 +699,3 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) er // Do nothing for availability set. return nil } - -// parseResourceGroupNameFromID parses the resource group name from a resource ID -func parseResourceGroupNameFromID(resourceID string) (resourceGroupName string, err error) { - reg, err := regexp.Compile(`(?i)(.*?)/resourceGroups/(?P\S+)/providers/(.*?)`) - - if err != nil { - return "", err - } - - matchNames := reg.SubexpNames() - matches := reg.FindStringSubmatch(resourceID) - - for i := range matchNames { - if matchNames[i] == "rgname" { - return matches[i], nil - } - } - - return "", fmt.Errorf("Invalid resource ID: %s", resourceID) -} From 03730a3e906e94634adb812fe3b1f6e958e15735 Mon Sep 17 00:00:00 2001 From: chshou Date: Sat, 14 Oct 2017 22:58:48 -0700 Subject: [PATCH 350/794] delete pip by matching name and rg --- pkg/cloudprovider/providers/azure/azure_backoff.go | 6 +++--- pkg/cloudprovider/providers/azure/azure_loadbalancer.go | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index fffb9cd07b1..be93dbf4d41 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -328,11 +328,11 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): start", pipName) - respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): start", pipResourceGroup, pipName) + respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): end", pipName) + glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): end", pipResourceGroup, pipName) return processRetryResponse(resp, err) }) } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index c48cf858f6b..d48fbbdeefb 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -1239,6 +1239,14 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b return false } +func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string { + if resourceGroup, ok := service.Annotations[ServiceAnnotationLoadBalancerPublicIPAddressResourceGroup]; ok { + return resourceGroup + } + + return az.ResourceGroup +} + // Check if service requires an internal load balancer. func requiresInternalLoadBalancer(service *v1.Service) bool { if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok { From 9f26becae14f867a3c1b097e4b168bef4fbcd739 Mon Sep 17 00:00:00 2001 From: chshou Date: Sat, 21 Oct 2017 10:47:29 -0700 Subject: [PATCH 351/794] get rg inside 'ensure' methods --- pkg/cloudprovider/providers/azure/azure_backoff.go | 6 +++--- pkg/cloudprovider/providers/azure/azure_loadbalancer.go | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index be93dbf4d41..1d903ceab79 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -302,11 +302,11 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", resourceGroup, *pip.Name) - respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(resourceGroup, *pip.Name, pip, nil) + glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", pipResourceGroup, *pip.Name) + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", resourceGroup, *pip.Name) + glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) return processRetryResponse(resp.Response, err) }) } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index d48fbbdeefb..6b08beb063d 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -394,6 +394,7 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri return &pip, nil } + serviceName := getServiceName(service) pip.Name = to.StringPtr(pipName) pip.Location = to.StringPtr(az.Location) pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{ From 6b8ff76500f760776436219c7595b26f43edc45a Mon Sep 17 00:00:00 2001 From: chshou Date: Thu, 2 Nov 2017 23:06:46 -0700 Subject: [PATCH 352/794] minor fixes --- pkg/cloudprovider/providers/azure/azure_loadbalancer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 6b08beb063d..bc56ef90aab 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -1241,7 +1241,7 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b } func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string { - if resourceGroup, ok := service.Annotations[ServiceAnnotationLoadBalancerPublicIPAddressResourceGroup]; ok { + if resourceGroup, found := service.Annotations[ServiceAnnotationLoadBalancerResourceGroup]; found { return resourceGroup } @@ -1250,7 +1250,7 @@ func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string { // Check if service requires an internal load balancer. func requiresInternalLoadBalancer(service *v1.Service) bool { - if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok { + if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternal]; found { return l == "true" } @@ -1259,7 +1259,7 @@ func requiresInternalLoadBalancer(service *v1.Service) bool { func subnet(service *v1.Service) *string { if requiresInternalLoadBalancer(service) { - if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; ok { + if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; found { return &l } } From 9c83e800fbb71ba540331763667f00f3939ad071 Mon Sep 17 00:00:00 2001 From: chshou Date: Sun, 3 Dec 2017 11:24:38 -0800 Subject: [PATCH 353/794] reapplied the changes after merge --- .../providers/azure/azure_backoff.go | 28 ++++++------ .../providers/azure/azure_loadbalancer.go | 43 +++++++++++-------- .../providers/azure/azure_wrap.go | 7 ++- 3 files changed, 45 insertions(+), 33 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 1d903ceab79..33fc9064095 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -243,23 +243,23 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { return allLBs, nil } -// ListPIPWithRetry list the PIP resources in az.ResourceGroup -func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { +// ListPIPWithRetry list the PIP resources in the given resource group +func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) { allPIPs := []network.PublicIPAddress{} var result network.PublicIPAddressListResult err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup) - result, retryErr = az.PublicIPAddressesClient.List(az.ResourceGroup) - glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup) + glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", pipResourceGroup) + result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup) + glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", - az.ResourceGroup, + pipResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", az.ResourceGroup) + glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup) return true, nil }) if err != nil { @@ -271,21 +271,21 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { allPIPs = append(allPIPs, *result.Value...) appendResults = false - // follow the next link to get all the vms for resource group + // follow the next link to get all the pip resources for resource group if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", az.ResourceGroup) + glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", pipResourceGroup) result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", az.ResourceGroup) + glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", - az.ResourceGroup, + pipResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", az.ResourceGroup) + glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", pipResourceGroup) return true, nil }) if err != nil { @@ -299,7 +299,7 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { } // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { +func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", pipResourceGroup, *pip.Name) @@ -325,7 +325,7 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { } // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry -func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { +func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): start", pipResourceGroup, pipName) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index bc56ef90aab..48079f7f618 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -66,6 +66,10 @@ const ( ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule" ) +// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service +// to specify the resource group of load balancer objects that are not in the same resource group as the cluster. +const ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group" + // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { @@ -81,7 +85,7 @@ func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (statu return status, true, nil } -func getPublicIPLabel(service *v1.Service) string { +func getPublicIPDomainNameLabel(service *v1.Service) string { if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found { return labelName } @@ -315,7 +319,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L if err != nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) } - pip, existsPip, err := az.getPublicIPAddress(pipName) + pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName) if err != nil { return nil, err } @@ -337,7 +341,9 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) return getPublicIPName(clusterName, service), nil } - pips, err := az.ListPIPWithRetry() + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) + + pips, err := az.ListPIPWithRetry(pipResourceGroup) if err != nil { return "", err } @@ -348,7 +354,7 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) return *pip.Name, nil } } - return "", fmt.Errorf("user supplied IP Address %s was not found", loadBalancerIP) + return "", fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup) } func flipServiceInternalAnnotation(service *v1.Service) *v1.Service { @@ -385,8 +391,9 @@ func (az *Cloud) findServiceIPAddress(clusterName string, service *v1.Service, i return lbStatus.Ingress[0].IP, nil } -func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel string) (*network.PublicIPAddress, error) { - pip, existsPip, err := az.getPublicIPAddress(pipName) +func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel string) (*network.PublicIPAddress, error) { + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) + pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) if err != nil { return nil, err } @@ -408,13 +415,13 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri pip.Tags = &map[string]*string{"service": &serviceName} glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) az.operationPollRateLimiter.Accept() - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): start", *pip.Name) - err = az.CreateOrUpdatePIPWithRetry(pip) + glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) + err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) if err != nil { glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) return nil, err } - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): end", *pip.Name) + glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): start", pipResourceGroup, *pip.Name) @@ -546,8 +553,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if err != nil { return nil, err } - domainNameLabel := getPublicIPLabel(service) - pip, err := az.ensurePublicIPExists(serviceName, pipName, domainNameLabel) + domainNameLabel := getPublicIPDomainNameLabel(service) + pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel) if err != nil { return nil, err } @@ -1137,7 +1144,9 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want } } - pips, err := az.ListPIPWithRetry() + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) + + pips, err := az.ListPIPWithRetry(pipResourceGroup) if err != nil { return nil, err } @@ -1154,14 +1163,14 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want } else { glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) az.operationPollRateLimiter.Accept() - glog.V(10).Infof("DeletePublicIPWithRetry(%q): start", pipName) - err = az.DeletePublicIPWithRetry(pipName) + glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) + err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) if err != nil { glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName) // We let err to pass through // It may be ignorable } - glog.V(10).Infof("DeletePublicIPWithRetry(%q): end", pipName) // response not read yet... + glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) // response not read yet... err = ignoreStatusNotFoundFromError(err) if err != nil { @@ -1176,8 +1185,8 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want if !isInternal && wantLb { // Confirm desired public ip resource exists var pip *network.PublicIPAddress - domainNameLabel := getPublicIPLabel(service) - if pip, err = az.ensurePublicIPExists(serviceName, desiredPipName, domainNameLabel); err != nil { + domainNameLabel := getPublicIPDomainNameLabel(service) + if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel); err != nil { return nil, err } return pip, nil diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 2990e12d18b..53dd992b4e7 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -153,8 +153,11 @@ func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResul return lbListResult, exists, err } -func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) { - var realErr error +func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pip network.PublicIPAddress, exists bool, err error) { + resourceGroup := az.ResourceGroup + if pipResourceGroup != "" { + resourceGroup = pipResourceGroup + } var realErr error az.operationPollRateLimiter.Accept() From 7a43f736c4c51ff122ea9d21986d7e62f40df2cb Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Sat, 9 Dec 2017 15:14:18 +0800 Subject: [PATCH 354/794] correct the annotations in container_manager.go --- pkg/kubelet/cm/container_manager.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 626b251d6d7..da7bf4a4642 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -122,11 +122,7 @@ type Status struct { SoftRequirements error } -// containerManager for the kubelet is currently an injected dependency. -// We need to parse the --qos-reserve-requests option in -// cmd/kubelet/app/server.go and there isn't really a good place to put -// the code. If/When the kubelet dependency injection gets worked out, -// maybe there will be a better place for it. +// parsePercentage parses the percentage string to numeric value. func parsePercentage(v string) (int64, error) { if !strings.HasSuffix(v, "%") { return 0, fmt.Errorf("percentage expected, got '%s'", v) From 1793e6eb1823b795a6e3d90d8e40ed4800cb9194 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 18 Dec 2017 09:25:19 +0800 Subject: [PATCH 355/794] add pkg/util/ipvs OWNERS file --- pkg/util/ipvs/OWNERS | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 pkg/util/ipvs/OWNERS diff --git a/pkg/util/ipvs/OWNERS b/pkg/util/ipvs/OWNERS new file mode 100644 index 00000000000..fce2911d6a2 --- /dev/null +++ b/pkg/util/ipvs/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - thockin + - m1093782566 +approvers: + - thockin + - m1093782566 + From 094eaf4249a4fd40557e08f80bf907ef9b722dec Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 18 Dec 2017 11:01:26 +0800 Subject: [PATCH 356/794] test get node IP --- pkg/proxy/ipvs/proxier_test.go | 77 ++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index b7864a8b88e..f1b27ec525c 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -264,6 +264,83 @@ func TestCanUseIPVSProxier(t *testing.T) { } } +func TestGetNodeIPs(t *testing.T) { + testCases := []struct { + devAddresses map[string][]string + expectIPs []string + }{ + // case 0 + { + devAddresses: map[string][]string{"eth0": {"1.2.3.4"}, "lo": {"127.0.0.1"}}, + expectIPs: []string{"1.2.3.4", "127.0.0.1"}, + }, + // case 1 + { + devAddresses: map[string][]string{"lo": {"127.0.0.1"}}, + expectIPs: []string{"127.0.0.1"}, + }, + // case 2 + { + devAddresses: map[string][]string{}, + expectIPs: []string{}, + }, + // case 3 + { + devAddresses: map[string][]string{"encap0": {"10.20.30.40"}, "lo": {"127.0.0.1"}, "docker0": {"172.17.0.1"}}, + expectIPs: []string{"10.20.30.40", "127.0.0.1", "172.17.0.1"}, + }, + // case 4 + { + devAddresses: map[string][]string{"encaps9": {"10.20.30.40"}, "lo": {"127.0.0.1"}, "encap7": {"10.20.30.31"}}, + expectIPs: []string{"10.20.30.40", "127.0.0.1", "10.20.30.31"}, + }, + // case 5 + { + devAddresses: map[string][]string{"kube-ipvs0": {"1.2.3.4"}, "lo": {"127.0.0.1"}, "encap7": {"10.20.30.31"}}, + expectIPs: []string{"127.0.0.1", "10.20.30.31"}, + }, + // case 6 + { + devAddresses: map[string][]string{"kube-ipvs0": {"1.2.3.4", "2.3.4.5"}, "lo": {"127.0.0.1"}}, + expectIPs: []string{"127.0.0.1"}, + }, + // case 7 + { + devAddresses: map[string][]string{"kube-ipvs0": {"1.2.3.4", "2.3.4.5"}}, + expectIPs: []string{}, + }, + // case 8 + { + devAddresses: map[string][]string{"kube-ipvs0": {"1.2.3.4", "2.3.4.5"}, "eth5": {"3.4.5.6"}, "lo": {"127.0.0.1"}}, + expectIPs: []string{"127.0.0.1", "3.4.5.6"}, + }, + // case 9 + { + devAddresses: map[string][]string{"ipvs0": {"1.2.3.4"}, "lo": {"127.0.0.1"}, "encap7": {"10.20.30.31"}}, + expectIPs: []string{"127.0.0.1", "10.20.30.31", "1.2.3.4"}, + }, + } + + for i := range testCases { + fake := netlinktest.NewFakeNetlinkHandle() + for dev, addresses := range testCases[i].devAddresses { + fake.SetLocalAddresses(dev, addresses...) + } + r := realIPGetter{nl: fake} + ips, err := r.NodeIPs() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + ipStrs := sets.NewString() + for _, ip := range ips { + ipStrs.Insert(ip.String()) + } + if !ipStrs.Equal(sets.NewString(testCases[i].expectIPs...)) { + t.Errorf("case[%d], unexpected mismatch, expected: %v, got: %v", i, testCases[i].expectIPs, ips) + } + } +} + func TestNodePort(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() From 8c4bfd0ca347602c6e3d77f6ea02ee62041cd34a Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Tue, 5 Dec 2017 17:01:07 +0800 Subject: [PATCH 357/794] add fake ut --- pkg/proxy/ipvs/testing/fake_test.go | 49 +++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 pkg/proxy/ipvs/testing/fake_test.go diff --git a/pkg/proxy/ipvs/testing/fake_test.go b/pkg/proxy/ipvs/testing/fake_test.go new file mode 100644 index 00000000000..fabc30a584b --- /dev/null +++ b/pkg/proxy/ipvs/testing/fake_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestSetGetLocalAddresses(t *testing.T) { + fake := NewFakeNetlinkHandle() + fake.SetLocalAddresses("eth0", "1.2.3.4") + expected := sets.NewString("1.2.3.4") + addr, _ := fake.GetLocalAddresses("eth0") + if !reflect.DeepEqual(expected, addr) { + t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr) + } + list, _ := fake.GetLocalAddresses("") + if !reflect.DeepEqual(expected, list) { + t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list) + } + fake.SetLocalAddresses("lo", "127.0.0.1") + expected = sets.NewString("127.0.0.1") + addr, _ = fake.GetLocalAddresses("lo") + if !reflect.DeepEqual(expected, addr) { + t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr) + } + list, _ = fake.GetLocalAddresses("") + expected = sets.NewString("1.2.3.4", "127.0.0.1") + if !reflect.DeepEqual(expected, list) { + t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list) + } +} From 22a4edcd722dbd0012670720efb173c533ceee6c Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Tue, 5 Dec 2017 17:21:11 +0800 Subject: [PATCH 358/794] update bazel BUILD --- pkg/proxy/ipvs/BUILD | 1 - pkg/proxy/ipvs/testing/BUILD | 10 ++++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 30945e44334..5efde3ae9c6 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -67,7 +67,6 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/pkg/proxy/ipvs/testing/BUILD b/pkg/proxy/ipvs/testing/BUILD index 0fbd41c253f..90c05cb0c11 100644 --- a/pkg/proxy/ipvs/testing/BUILD +++ b/pkg/proxy/ipvs/testing/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -12,6 +13,7 @@ go_library( srcs = ["fake.go"], importpath = "k8s.io/kubernetes/pkg/proxy/ipvs/testing", tags = ["automanaged"], + deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) filegroup( @@ -26,3 +28,11 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["fake_test.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/ipvs/testing", + library = ":go_default_library", + deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], +) From bf1fb46347836e8446530272fa0e184ed3eddaf1 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sat, 16 Dec 2017 15:13:34 -0500 Subject: [PATCH 359/794] Look for requested resources in the Requests --- pkg/kubelet/cm/deviceplugin/manager.go | 5 ++++- pkg/kubelet/cm/deviceplugin/manager_test.go | 12 ++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index dea81c1f1b7..5e1137c1149 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -550,7 +550,10 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont podUID := string(pod.UID) contName := container.Name allocatedDevicesUpdated := false - for k, v := range container.Resources.Limits { + // NOTE: Skipping the Resources.Limits is safe here because: + // 1. If container Spec mentions Limits only, implicitly Requests, equal to Limits, will get added to the Spec. + // 2. If container Spec mentions Limits, which are greater than or less than Requests, will fail at validation. + for k, v := range container.Resources.Requests { resource := string(k) needed := int(v.Value()) glog.V(3).Infof("needs %d %s", needed, resource) diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 158163a4f60..d7a032694c5 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -366,7 +366,7 @@ func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, err return nil, nil } -func makePod(limits v1.ResourceList) *v1.Pod { +func makePod(requests v1.ResourceList) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -375,7 +375,7 @@ func makePod(limits v1.ResourceList) *v1.Pod { Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ - Limits: limits, + Requests: requests, }, }, }, @@ -616,7 +616,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ + Requests: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, }, }, @@ -624,7 +624,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ + Requests: v1.ResourceList{ v1.ResourceName(res1.resourceName): res1.resourceQuantity, }, }, @@ -634,7 +634,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ + Requests: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, @@ -643,7 +643,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ + Requests: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, From 9b549bd5f45eaa3f4e7a8788e41c2352b1605a0e Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Mon, 18 Dec 2017 13:42:58 +0800 Subject: [PATCH 360/794] Restrict url check conditions when creating with --raw --- pkg/kubectl/cmd/create.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/create.go b/pkg/kubectl/cmd/create.go index e3340bcf228..5c00d69acd3 100644 --- a/pkg/kubectl/cmd/create.go +++ b/pkg/kubectl/cmd/create.go @@ -123,7 +123,7 @@ func (o *CreateOptions) ValidateArgs(cmd *cobra.Command, args []string) error { if len(o.FilenameOptions.Filenames) != 1 { return cmdutil.UsageErrorf(cmd, "--raw can only use a single local file or stdin") } - if strings.HasPrefix(o.FilenameOptions.Filenames[0], "http") { + if strings.Index(o.FilenameOptions.Filenames[0], "http://") == 0 || strings.Index(o.FilenameOptions.Filenames[0], "https://") == 0 { return cmdutil.UsageErrorf(cmd, "--raw cannot read from a url") } if o.FilenameOptions.Recursive { From d474b86e0582dee5a6c7f59144060222756cc468 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Fri, 15 Dec 2017 22:23:43 +0800 Subject: [PATCH 361/794] Propagate error up instead panic --- pkg/kubelet/cm/cpumanager/cpu_manager.go | 16 ++++------------ pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 18 ++---------------- 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 6e1fd9cacb1..6c59dca18d4 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -98,13 +98,7 @@ type manager struct { var _ Manager = &manager{} // NewManager creates new cpu manager based on provided policy -func NewManager( - cpuPolicyName string, - reconcilePeriod time.Duration, - machineInfo *cadvisorapi.MachineInfo, - nodeAllocatableReservation v1.ResourceList, - stateFileDirecory string, -) (Manager, error) { +func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirecory string) (Manager, error) { var policy Policy switch policyName(cpuPolicyName) { @@ -120,18 +114,16 @@ func NewManager( glog.Infof("[cpumanager] detected CPU topology: %v", topo) reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] if !ok { - // The static policy cannot initialize without this information. Panic! - panic("[cpumanager] unable to determine reserved CPU resources for static policy") + // The static policy cannot initialize without this information. + return nil, fmt.Errorf("[cpumanager] unable to determine reserved CPU resources for static policy") } if reservedCPUs.IsZero() { - // Panic! - // // The static policy requires this to be nonzero. Zero CPU reservation // would allow the shared pool to be completely exhausted. At that point // either we would violate our guarantee of exclusivity or need to evict // any pod that has at least one container that requires zero CPUs. // See the comments in policy_static.go for more details. - panic("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero") + return nil, fmt.Errorf("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero") } // Take the ceiling of the reservation, since fractional CPUs cannot be diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 2a3b1201a41..9381ea470bc 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -234,7 +234,6 @@ func TestCPUManagerGenerate(t *testing.T) { cpuPolicyName string nodeAllocatableReservation v1.ResourceList isTopologyBroken bool - panicMsg string expectedPolicy string expectedError error skipIfPermissionsError bool @@ -270,14 +269,14 @@ func TestCPUManagerGenerate(t *testing.T) { description: "static policy - broken reservation", cpuPolicyName: "static", nodeAllocatableReservation: v1.ResourceList{}, - panicMsg: "unable to determine reserved CPU resources for static policy", + expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"), skipIfPermissionsError: true, }, { description: "static policy - no CPU resources", cpuPolicyName: "static", nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)}, - panicMsg: "the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero", + expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"), skipIfPermissionsError: true, }, } @@ -319,19 +318,6 @@ func TestCPUManagerGenerate(t *testing.T) { t.Errorf("cannot create state file: %s", err.Error()) } defer os.RemoveAll(sDir) - defer func() { - if err := recover(); err != nil { - if testCase.panicMsg != "" { - if !strings.Contains(err.(string), testCase.panicMsg) { - t.Errorf("Unexpected panic message. Have: %q wants %q", err, testCase.panicMsg) - } - } else { - t.Errorf("Unexpected panic: %q", err) - } - } else if testCase.panicMsg != "" { - t.Error("Expected panic hasn't been raised") - } - }() mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, testCase.nodeAllocatableReservation, sDir) if testCase.expectedError != nil { From 2e4c4b23adb5e0be21192abb5eab40ae00102eb5 Mon Sep 17 00:00:00 2001 From: pospispa Date: Tue, 5 Dec 2017 20:42:29 +0100 Subject: [PATCH 362/794] PVC Protection Alpha Feature E2E Tests PVC Protection alpha feature was introduced in PRs: - https://github.com/kubernetes/kubernetes/pull/55824 - https://github.com/kubernetes/kubernetes/pull/55873 That's why E2E tests for this feature are added. --- test/e2e/storage/BUILD | 3 + test/e2e/storage/pvc_protection.go | 125 +++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 test/e2e/storage/pvc_protection.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index ddee6ef0107..13969e42c24 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -19,6 +19,7 @@ go_library( "persistent_volumes-vsphere.go", "pv_reclaimpolicy.go", "pvc_label_selector.go", + "pvc_protection.go", "volume_expand.go", "volume_io.go", "volume_metrics.go", @@ -49,6 +50,8 @@ go_library( "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/metrics:go_default_library", + "//pkg/util/slice:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go new file mode 100644 index 00000000000..b43ee5d4d91 --- /dev/null +++ b/test/e2e/storage/pvc_protection.go @@ -0,0 +1,125 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/util/slice" + volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/test/e2e/framework" +) + +var _ = SIGDescribe("PVC Protection [Feature:PVCProtection]", func() { + var ( + client clientset.Interface + nameSpace string + err error + pvc *v1.PersistentVolumeClaim + pvcCreatedAndNotDeleted bool + ) + + f := framework.NewDefaultFramework("pvc-protection") + BeforeEach(func() { + client = f.ClientSet + nameSpace = f.Namespace.Name + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + + By("Creating a PVC") + suffix := "pvc-protection" + defaultSC := getDefaultStorageClassName(client) + testStorageClass := storageClassTest{ + claimSize: "1Gi", + } + pvc = newClaim(testStorageClass, nameSpace, suffix) + pvc.Spec.StorageClassName = &defaultSC + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + Expect(err).NotTo(HaveOccurred(), "Error creating PVC") + pvcCreatedAndNotDeleted = true + + By("Waiting for PVC to become Bound") + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + + By("Checking that PVC Protection finalizer is set") + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "While getting PVC status") + Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue()) + }) + + AfterEach(func() { + if pvcCreatedAndNotDeleted { + framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace) + } + }) + + It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { + By("Deleting the PVC") + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + waitForPersistentVolumeClaimBeRemoved(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + pvcCreatedAndNotDeleted = false + }) + + It("Verify that PVC in active use by a pod is not removed immediatelly", func() { + By("Creating a Pod that becomes Running and therefore is actively using the PVC") + pvcClaims := []*v1.PersistentVolumeClaim{pvc} + pod, err := framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "") + Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running") + + By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + + By("Checking that the PVC status is Terminating") + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "While checking PVC status") + Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) + + By("Deleting the pod that uses the PVC") + err = framework.DeletePodWithWait(f, client, pod) + Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod") + + By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") + waitForPersistentVolumeClaimBeRemoved(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + pvcCreatedAndNotDeleted = false + }) +}) + +// waitForPersistentVolumeClaimBeRemoved waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. +func waitForPersistentVolumeClaimBeRemoved(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) + return nil + } + framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err) + } + } + return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout) +} From 0de49c461fb39eb6336c0827f34983cf8812e065 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 18 Dec 2017 15:14:12 +0800 Subject: [PATCH 363/794] Remove kube-proxy 1.8 configmap and daemonset manifests in kubeadm. --- cmd/kubeadm/app/constants/constants.go | 3 - .../app/phases/addons/proxy/manifests.go | 90 ------------------- cmd/kubeadm/app/phases/addons/proxy/proxy.go | 71 +++++---------- .../app/phases/addons/proxy/proxy_test.go | 22 ----- 4 files changed, 20 insertions(+), 166 deletions(-) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 4cf9076eb9a..61cc7f70ea9 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -231,9 +231,6 @@ var ( // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports MinimumKubeletVersion = version.MustParseSemantic("v1.8.0") - // MinimumKubeProxyComponentConfigVersion specifies the minimum version for the kubeProxyComponent - MinimumKubeProxyComponentConfigVersion = version.MustParseSemantic("v1.9.0-alpha.3") - // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases SupportedEtcdVersion = map[uint8]string{ 8: "3.0.17", diff --git a/cmd/kubeadm/app/phases/addons/proxy/manifests.go b/cmd/kubeadm/app/phases/addons/proxy/manifests.go index 9962eb44d0c..d141e16289f 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/manifests.go +++ b/cmd/kubeadm/app/phases/addons/proxy/manifests.go @@ -17,37 +17,6 @@ limitations under the License. package proxy const ( - // KubeProxyConfigMap18 is the proxy ConfigMap manifest for Kubernetes version 1.8 - KubeProxyConfigMap18 = ` -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-proxy - namespace: kube-system - labels: - app: kube-proxy -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: {{ .MasterEndpoint }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token -` - // KubeProxyConfigMap19 is the proxy ConfigMap manifest for Kubernetes 1.9 and above KubeProxyConfigMap19 = ` kind: ConfigMap @@ -79,65 +48,6 @@ data: tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token config.conf: |- {{ .ProxyConfig}} -` - // KubeProxyDaemonSet18 is the proxy DaemonSet manifest for Kubernetes version 1.8 - KubeProxyDaemonSet18 = ` -apiVersion: apps/v1beta2 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-proxy - name: kube-proxy - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-proxy - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - k8s-app: kube-proxy - spec: - containers: - - name: kube-proxy - image: {{ if .ImageOverride }}{{ .ImageOverride }}{{ else }}{{ .ImageRepository }}/kube-proxy-{{ .Arch }}:{{ .Version }}{{ end }} - imagePullPolicy: IfNotPresent - command: - - /usr/local/bin/kube-proxy - - --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf - {{ .ClusterCIDR }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /var/lib/kube-proxy - name: kube-proxy - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /lib/modules - name: lib-modules - readOnly: true - hostNetwork: true - serviceAccountName: kube-proxy - tolerations: - - key: {{ .MasterTaintKey }} - effect: NoSchedule - - key: {{ .CloudTaintKey }} - value: "true" - effect: NoSchedule - volumes: - - name: kube-proxy - configMap: - name: kube-proxy - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: lib-modules - hostPath: - path: /lib/modules ` // KubeProxyDaemonSet19 is the proxy DaemonSet manifest for Kubernetes 1.9 and above diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index a0cade34556..0121da57d02 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -63,58 +62,28 @@ func EnsureProxyAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Inte if err != nil { return fmt.Errorf("error when marshaling: %v", err) } - // Parse the given kubernetes version - k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion) - if err != nil { - return fmt.Errorf("couldn't parse kubernetes version %q: %v", cfg.KubernetesVersion, err) - } var proxyConfigMapBytes, proxyDaemonSetBytes []byte - if k8sVersion.AtLeast(kubeadmconstants.MinimumKubeProxyComponentConfigVersion) { - proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19, - struct { - MasterEndpoint string - ProxyConfig string - }{ - MasterEndpoint: masterEndpoint, - ProxyConfig: proxyBytes, - }) - if err != nil { - return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) - } - proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ - ImageRepository: cfg.GetControlPlaneImageRepository(), - Arch: runtime.GOARCH, - Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), - ImageOverride: cfg.UnifiedControlPlaneImage, - MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, - CloudTaintKey: algorithm.TaintExternalCloudProvider, + proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19, + struct { + MasterEndpoint string + ProxyConfig string + }{ + MasterEndpoint: masterEndpoint, + ProxyConfig: proxyBytes, }) - if err != nil { - return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) - } - } else { - proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap18, - struct { - MasterEndpoint string - }{ - MasterEndpoint: masterEndpoint, - }) - if err != nil { - return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) - } - - proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet18, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ - ImageRepository: cfg.GetControlPlaneImageRepository(), - Arch: runtime.GOARCH, - Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), - ImageOverride: cfg.UnifiedControlPlaneImage, - ClusterCIDR: getClusterCIDR(cfg.Networking.PodSubnet), - MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, - CloudTaintKey: algorithm.TaintExternalCloudProvider, - }) - if err != nil { - return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) - } + if err != nil { + return fmt.Errorf("error when parsing kube-proxy configmap template: %v", err) + } + proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ + ImageRepository: cfg.GetControlPlaneImageRepository(), + Arch: runtime.GOARCH, + Version: kubeadmutil.KubernetesVersionToImageTag(cfg.KubernetesVersion), + ImageOverride: cfg.UnifiedControlPlaneImage, + MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, + CloudTaintKey: algorithm.TaintExternalCloudProvider, + }) + if err != nil { + return fmt.Errorf("error when parsing kube-proxy daemonset template: %v", err) } if err := createKubeProxyAddon(proxyConfigMapBytes, proxyDaemonSetBytes, client); err != nil { return err diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go index 7cd2c3d2abd..21893d065da 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go @@ -113,15 +113,6 @@ func TestCompileManifests(t *testing.T) { data interface{} expected bool }{ - { - manifest: KubeProxyConfigMap18, - data: struct { - MasterEndpoint, ProxyConfig string - }{ - MasterEndpoint: "foo", - }, - expected: true, - }, { manifest: KubeProxyConfigMap19, data: struct { @@ -132,19 +123,6 @@ func TestCompileManifests(t *testing.T) { }, expected: true, }, - { - manifest: KubeProxyDaemonSet18, - data: struct{ ImageRepository, Arch, Version, ImageOverride, ClusterCIDR, MasterTaintKey, CloudTaintKey string }{ - ImageRepository: "foo", - Arch: "foo", - Version: "foo", - ImageOverride: "foo", - ClusterCIDR: "foo", - MasterTaintKey: "foo", - CloudTaintKey: "foo", - }, - expected: true, - }, { manifest: KubeProxyDaemonSet19, data: struct{ ImageRepository, Arch, Version, ImageOverride, MasterTaintKey, CloudTaintKey string }{ From 9c9ac106b849f59568e42080b7dbe466098f3eeb Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 18 Dec 2017 15:17:29 +0800 Subject: [PATCH 364/794] Auto generated BUILD files. --- cmd/kubeadm/app/phases/addons/proxy/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/addons/proxy/BUILD b/cmd/kubeadm/app/phases/addons/proxy/BUILD index 6c0a1f8b0d5..6a59d477207 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/BUILD +++ b/cmd/kubeadm/app/phases/addons/proxy/BUILD @@ -41,7 +41,6 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", - "//pkg/util/version:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", From 0c9bb5a9640486f668982175fe80caea304cd23f Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Mon, 18 Dec 2017 16:12:04 +0800 Subject: [PATCH 365/794] remove useless function hasHostPortConflicts `hasHostPortConflicts` is not used anywhere. Delete it. --- pkg/kubelet/BUILD | 2 -- pkg/kubelet/kubelet_pods.go | 18 ------------------ pkg/kubelet/kubelet_pods_test.go | 19 ------------------- 3 files changed, 39 deletions(-) diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index ab35f843abd..3dc3fab3661 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -36,7 +36,6 @@ go_library( "//pkg/apis/core/v1:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", - "//pkg/apis/core/v1/validation:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/features:go_default_library", @@ -125,7 +124,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 7c94a30ef13..40340934920 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -40,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/remotecommand" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -48,7 +47,6 @@ import ( podshelper "k8s.io/kubernetes/pkg/apis/core/pods" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - "k8s.io/kubernetes/pkg/apis/core/v1/validation" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/fieldpath" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -1109,22 +1107,6 @@ func (kl *Kubelet) podKiller() { } } -// hasHostPortConflicts detects pods with conflicted host ports. -func hasHostPortConflicts(pods []*v1.Pod) bool { - ports := sets.String{} - for _, pod := range pods { - if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { - glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) - return true - } - if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { - glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) - return true - } - } - return false -} - // validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state // of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current // running container is preferred over a previous termination. If info about the container is not available then a specific diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 66cde220d44..a993ca2a845 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -2270,25 +2270,6 @@ func TestPortForward(t *testing.T) { } } -// Tests that identify the host port conflicts are detected correctly. -func TestGetHostPortConflicts(t *testing.T) { - pods := []*v1.Pod{ - {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}}, - {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 81}}}}}}, - {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 82}}}}}}, - {Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 83}}}}}}, - } - // Pods should not cause any conflict. - assert.False(t, hasHostPortConflicts(pods), "Should not have port conflicts") - - expected := &v1.Pod{ - Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 81}}}}}, - } - // The new pod should cause conflict and be reported. - pods = append(pods, expected) - assert.True(t, hasHostPortConflicts(pods), "Should have port conflicts") -} - func TestHasHostMountPVC(t *testing.T) { tests := map[string]struct { pvError error From adf3c21495950ae4ea026ff92b6c48225fe92556 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Mon, 18 Dec 2017 10:56:34 +0200 Subject: [PATCH 366/794] Fix LB lint errors --- .../reactive/load_balancer.py | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py index c5272359572..ddb3845a10a 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py +++ b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py @@ -37,24 +37,25 @@ from subprocess import CalledProcessError apilb_nginx = """/var/log/nginx.*.log { - daily - missingok - rotate 14 - compress - delaycompress - notifempty - create 0640 www-data adm - sharedscripts - prerotate - if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\ - run-parts /etc/logrotate.d/httpd-prerotate; \\ - fi \\ - endscript - postrotate - invoke-rc.d nginx rotate >/dev/null 2>&1 - endscript + daily + missingok + rotate 14 + compress + delaycompress + notifempty + create 0640 www-data adm + sharedscripts + prerotate + if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\ + run-parts /etc/logrotate.d/httpd-prerotate; \\ + fi \\ + endscript + postrotate + invoke-rc.d nginx rotate >/dev/null 2>&1 + endscript }""" + @when('certificates.available') def request_server_certificates(tls): '''Send the data that is required to create a server certificate for From 5bc7e2212ae7ce23830f366fe607e0c99a73f36d Mon Sep 17 00:00:00 2001 From: linyouchong Date: Mon, 18 Dec 2017 17:51:38 +0800 Subject: [PATCH 367/794] fix accessmode mapping error --- pkg/volume/csi/csi_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/volume/csi/csi_client.go b/pkg/volume/csi/csi_client.go index ee1776ce4f8..c8b8ad0f28e 100644 --- a/pkg/volume/csi/csi_client.go +++ b/pkg/volume/csi/csi_client.go @@ -217,7 +217,7 @@ func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_A case api.ReadWriteOnce: return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER case api.ReadOnlyMany: - return csipb.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER + return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY case api.ReadWriteMany: return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER } From f41f1887e2d24d705500f536dfbacbe10f62d945 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 29 Nov 2017 17:54:20 +0800 Subject: [PATCH 368/794] Fix minor err in kubeadm --- cmd/kubeadm/app/features/features_test.go | 12 ++++++------ cmd/kubeadm/app/phases/addons/dns/dns.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/kubeadm/app/features/features_test.go b/cmd/kubeadm/app/features/features_test.go index 71c1e2b9c21..6d24ad9c4fa 100644 --- a/cmd/kubeadm/app/features/features_test.go +++ b/cmd/kubeadm/app/features/features_test.go @@ -168,16 +168,16 @@ func TestResolveFeatureGateDependencies(t *testing.T) { expectedFeatures: map[string]bool{}, }, { // others flags - inputFeatures: map[string]bool{"SupportIPVSProxyMode": true}, - expectedFeatures: map[string]bool{"SupportIPVSProxyMode": true}, + inputFeatures: map[string]bool{CoreDNS: true}, + expectedFeatures: map[string]bool{CoreDNS: true}, }, { // just StoreCertsInSecrets flags - inputFeatures: map[string]bool{"StoreCertsInSecrets": true}, - expectedFeatures: map[string]bool{"StoreCertsInSecrets": true, "SelfHosting": true}, + inputFeatures: map[string]bool{StoreCertsInSecrets: true}, + expectedFeatures: map[string]bool{StoreCertsInSecrets: true, SelfHosting: true}, }, { // just HighAvailability flags - inputFeatures: map[string]bool{"HighAvailability": true}, - expectedFeatures: map[string]bool{"HighAvailability": true, "StoreCertsInSecrets": true, "SelfHosting": true}, + inputFeatures: map[string]bool{HighAvailability: true}, + expectedFeatures: map[string]bool{HighAvailability: true, StoreCertsInSecrets: true, SelfHosting: true}, }, } diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index b1866e584fa..aa1e52c941d 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -200,7 +200,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien coreDNSServiceAccount := &v1.ServiceAccount{} if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil { - return fmt.Errorf("unable to decode CoreDNS configmap %v", err) + return fmt.Errorf("unable to decode CoreDNS serviceaccount %v", err) } // Create the ConfigMap for CoreDNS or update it in case it already exists From e6b9b5e0c3633d1486ff2ac9af280fbdc8117b2f Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 18 Dec 2017 17:22:26 +0800 Subject: [PATCH 369/794] add not found error for ipset set and entry delete --- pkg/proxy/ipvs/ipset.go | 4 +++- pkg/proxy/ipvs/proxier.go | 5 ++++- pkg/util/ipset/ipset.go | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pkg/proxy/ipvs/ipset.go b/pkg/proxy/ipvs/ipset.go index 0a01f0bacc6..d61992125ae 100644 --- a/pkg/proxy/ipvs/ipset.go +++ b/pkg/proxy/ipvs/ipset.go @@ -114,7 +114,9 @@ func (set *IPSet) syncIPSetEntries() { // Clean legacy entries for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() { if err := set.handle.DelEntry(entry, set.Name); err != nil { - glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) + if !utilipset.IsNotFoundError(err) { + glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) + } } else { glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) } diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 5f5a09d2447..104a5e9a353 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -824,7 +824,10 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset for _, set := range ipSetsToDestroy { err = ipset.DestroySet(set) if err != nil { - encounteredError = true + if !utilipset.IsNotFoundError(err) { + glog.Errorf("Error removing ipset %s, error: %v", set, err) + encounteredError = true + } } } return encounteredError diff --git a/pkg/util/ipset/ipset.go b/pkg/util/ipset/ipset.go index ba92bff74bf..56993c26305 100644 --- a/pkg/util/ipset/ipset.go +++ b/pkg/util/ipset/ipset.go @@ -322,4 +322,20 @@ func validatePortRange(portRange string) bool { return true } +// IsNotFoundError returns true if the error indicates "not found". It parses +// the error string looking for known values, which is imperfect but works in +// practice. +func IsNotFoundError(err error) bool { + es := err.Error() + if strings.Contains(es, "does not exist") { + // set with the same name already exists + return true + } + if strings.Contains(es, "element is missing") { + // entry is missing from the set + return true + } + return false +} + var _ = Interface(&runner{}) From bb159cf26a6bf80c70d513cfdb668f88ba67dd41 Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Fri, 15 Dec 2017 15:34:17 +0100 Subject: [PATCH 370/794] Reduce CPU and memory requests for Metrics Server Nanny --- cluster/addons/metrics-server/metrics-server-deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 5b19bf769a3..662fcfb1474 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -49,8 +49,8 @@ spec: cpu: 100m memory: 300Mi requests: - cpu: 50m - memory: 100Mi + cpu: 5m + memory: 50Mi env: - name: MY_POD_NAME valueFrom: From 30ab605d85f1225c768c071ce24bafc9a564099a Mon Sep 17 00:00:00 2001 From: Phionah Bugosi Date: Mon, 18 Dec 2017 13:59:14 +0300 Subject: [PATCH 371/794] Add test for Cider ExpandVolume --- pkg/cloudprovider/providers/openstack/BUILD | 1 + .../providers/openstack/openstack_test.go | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 117014a5a20..0b90b0d399b 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -80,6 +80,7 @@ go_test( "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", diff --git a/pkg/cloudprovider/providers/openstack/openstack_test.go b/pkg/cloudprovider/providers/openstack/openstack_test.go index 95cbfc0cb3b..1115c3505ca 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_test.go +++ b/pkg/cloudprovider/providers/openstack/openstack_test.go @@ -30,6 +30,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" @@ -551,6 +552,18 @@ func TestVolumes(t *testing.T) { WaitForVolumeStatus(t, os, vol, volumeAvailableStatus) } + expectedVolSize := resource.MustParse("2Gi") + newVolSize, err := os.ExpandVolume(vol, resource.MustParse("1Gi"), expectedVolSize) + if err != nil { + t.Fatalf("Cannot expand a Cinder volume: %v", err) + } + if newVolSize != expectedVolSize { + t.Logf("Expected: %v but got: %v ", expectedVolSize, newVolSize) + } + t.Logf("Volume expanded to (%v) \n", newVolSize) + + WaitForVolumeStatus(t, os, vol, volumeAvailableStatus) + err = os.DeleteVolume(vol) if err != nil { t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err) From d9bb71b1fe14a4b6584f26ac1411e1914067955f Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Mon, 18 Dec 2017 13:34:33 +0100 Subject: [PATCH 372/794] Add --retry-connrefused to all curl invocations. By default 'Connection refused' error is not a transient error and is not retried. --- cluster/gce/container-linux/configure-helper.sh | 6 +++--- cluster/gce/container-linux/configure.sh | 4 ++-- cluster/gce/container-linux/master.yaml | 2 +- cluster/gce/container-linux/node.yaml | 2 +- cluster/gce/gci/configure-helper.sh | 2 +- cluster/gce/gci/configure.sh | 6 +++--- cluster/gce/gci/master.yaml | 2 +- cluster/gce/gci/node.yaml | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index 439e12fca8f..d438c71d924 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -975,7 +975,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" @@ -1469,7 +1469,7 @@ function setup-rkt { mkdir -p /etc/rkt "${KUBE_HOME}/download/" local rkt_tar="${KUBE_HOME}/download/rkt.tar.gz" local rkt_tmpdir=$(mktemp -d "${KUBE_HOME}/rkt_download.XXXXX") - curl --retry 5 --retry-delay 3 --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent --show-error \ --location --create-dirs --output "${rkt_tar}" \ https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz tar --strip-components=1 -xf "${rkt_tar}" -C "${rkt_tmpdir}" --overwrite @@ -1508,7 +1508,7 @@ function install-docker2aci { local tar_path="${KUBE_HOME}/download/docker2aci.tar.gz" local tmp_path="${KUBE_HOME}/docker2aci" mkdir -p "${KUBE_HOME}/download/" "${tmp_path}" - curl --retry 5 --retry-delay 3 --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent --show-error \ --location --create-dirs --output "${tar_path}" \ https://github.com/appc/docker2aci/releases/download/v0.14.0/docker2aci-v0.14.0.tar.gz tar --strip-components=1 -xf "${tar_path}" -C "${tmp_path}" --overwrite diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh index 6ac60cd81d2..035c88dba31 100755 --- a/cluster/gce/container-linux/configure.sh +++ b/cluster/gce/container-linux/configure.sh @@ -21,7 +21,7 @@ set -o pipefail function download-kube-env { # Fetch kube-env from GCE metadata server. local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_env}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env @@ -55,7 +55,7 @@ function download-or-bust { for url in "${urls[@]}"; do local file="${url##*/}" rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 --retry-connrefused "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" diff --git a/cluster/gce/container-linux/master.yaml b/cluster/gce/container-linux/master.yaml index 4dec695c9d7..be4c3c31ccc 100644 --- a/cluster/gce/container-linux/master.yaml +++ b/cluster/gce/container-linux/master.yaml @@ -17,7 +17,7 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/container-linux/node.yaml b/cluster/gce/container-linux/node.yaml index b203c4fded3..24aaa7b43c1 100644 --- a/cluster/gce/container-linux/node.yaml +++ b/cluster/gce/container-linux/node.yaml @@ -17,7 +17,7 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 4b97d9ee8fd..9307b382ea7 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1655,7 +1655,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 40060f613c4..aa2920fb017 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -50,7 +50,7 @@ function download-kube-env { # Fetch kube-env from GCE metadata server. (umask 700; local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_env}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env @@ -68,7 +68,7 @@ function download-kube-master-certs { # Fetch kube-env from GCE metadata server. (umask 700; local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml" - curl --fail --retry 5 --retry-delay 3 --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_master_certs}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs @@ -106,7 +106,7 @@ function download-or-bust { for url in "${urls[@]}"; do local file="${url##*/}" rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 --retry-connrefused "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" diff --git a/cluster/gce/gci/master.yaml b/cluster/gce/gci/master.yaml index 7854ab4fa93..68fdc5a901a 100644 --- a/cluster/gce/gci/master.yaml +++ b/cluster/gce/gci/master.yaml @@ -15,7 +15,7 @@ write_files: ExecStartPre=/bin/mkdir -p /home/kubernetes/bin ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh ExecStart=/home/kubernetes/bin/configure.sh diff --git a/cluster/gce/gci/node.yaml b/cluster/gce/gci/node.yaml index 52971e2a076..c716af56c49 100644 --- a/cluster/gce/gci/node.yaml +++ b/cluster/gce/gci/node.yaml @@ -15,7 +15,7 @@ write_files: ExecStartPre=/bin/mkdir -p /home/kubernetes/bin ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh ExecStart=/home/kubernetes/bin/configure.sh From a722fdaa1a9b3ed14308888a139babad74836c08 Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Mon, 18 Dec 2017 13:13:54 +0100 Subject: [PATCH 373/794] If minimum mig size is 0, resize to 1 before running test --- test/e2e/autoscaling/cluster_size_autoscaling.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 3eb2ccd8ace..ec1dee63a79 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -415,6 +415,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } } + if minSize == 0 { + newSizes := make(map[string]int) + for mig, size := range originalSizes { + newSizes[mig] = size + } + newSizes[minMig] = 1 + setMigSizes(newSizes) + } + removeLabels := func(nodesToClean sets.String) { By("Removing labels from nodes") for node := range nodesToClean { From 294a8e6040787dd3eb25ac957b00b2bbaf4fb24d Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 18 Dec 2017 09:39:25 -0500 Subject: [PATCH 374/794] add deads to quota owner --- pkg/quota/OWNERS | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/quota/OWNERS b/pkg/quota/OWNERS index d2eabe9cd4e..7025d6d6eea 100644 --- a/pkg/quota/OWNERS +++ b/pkg/quota/OWNERS @@ -1,8 +1,9 @@ approvers: -- derekwaynecarr -- vishh -reviewers: -- smarterclayton - deads2k - derekwaynecarr - vishh +reviewers: +- deads2k +- derekwaynecarr +- smarterclayton +- vishh From e8a72dbb4ba1369b0524655f27c32531ba144d9e Mon Sep 17 00:00:00 2001 From: Mik Vyatskov Date: Mon, 18 Dec 2017 15:46:46 +0100 Subject: [PATCH 375/794] Fix Stackdriver Logging e2e tests Signed-off-by: Mik Vyatskov --- .../logging/stackdrvier/basic.go | 17 +--- .../logging/stackdrvier/utils.go | 89 +++++++++++-------- 2 files changed, 57 insertions(+), 49 deletions(-) diff --git a/test/e2e/instrumentation/logging/stackdrvier/basic.go b/test/e2e/instrumentation/logging/stackdrvier/basic.go index ae1ba4a6f77..7bc5e682b5f 100644 --- a/test/e2e/instrumentation/logging/stackdrvier/basic.go +++ b/test/e2e/instrumentation/logging/stackdrvier/basic.go @@ -18,7 +18,6 @@ package stackdriver import ( "fmt" - "strings" "time" "k8s.io/apimachinery/pkg/util/wait" @@ -107,19 +106,14 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd err = utils.WaitForLogs(c, ingestionInterval, ingestionTimeout) framework.ExpectNoError(err) }) - }) - }) - ginkgo.It("should ingest logs [Feature:StackdriverLogging]", func() { - withLogProviderForScope(f, podsScope, func(p *sdLogProvider) { ginkgo.By("Checking that too long lines are trimmed", func() { - originalLength := 100001 + maxLength := 100000 cmd := []string{ "/bin/sh", "-c", - fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", originalLength), + fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", maxLength+1), } - trimPrefix := "[Trimmed]" pod, err := utils.StartAndReturnSelf(utils.NewExecLoggingPod("synthlogger-4", cmd), f) framework.ExpectNoError(err, "Failed to start a pod") @@ -133,11 +127,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd if log.JSONPayload != nil { return false, fmt.Errorf("got json log entry %v, wanted plain text", log.JSONPayload) } - if len(log.TextPayload) == originalLength { - return false, fmt.Errorf("got non-trimmed entry of length %d", len(log.TextPayload)) - } - if !strings.HasPrefix(log.TextPayload, trimPrefix) { - return false, fmt.Errorf("got message without prefix '%s': %s", trimPrefix, log.TextPayload) + if len(log.TextPayload) > maxLength { + return false, fmt.Errorf("got too long entry of length %d", len(log.TextPayload)) } return true, nil }, utils.JustTimeout, pod.Name()) diff --git a/test/e2e/instrumentation/logging/stackdrvier/utils.go b/test/e2e/instrumentation/logging/stackdrvier/utils.go index 7cd7bd7ebe6..482fc120ece 100644 --- a/test/e2e/instrumentation/logging/stackdrvier/utils.go +++ b/test/e2e/instrumentation/logging/stackdrvier/utils.go @@ -20,6 +20,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "sync" "time" "k8s.io/apimachinery/pkg/util/wait" @@ -45,6 +46,9 @@ const ( // PubSub topic with log entries polling interval sdLoggingPollInterval = 100 * time.Millisecond + + // The parallelism level of polling logs process. + sdLoggingPollParallelism = 10 ) type logProviderScope int @@ -68,6 +72,7 @@ type sdLogProvider struct { logSink *sd.LogSink pollingStopChannel chan struct{} + pollingWG *sync.WaitGroup queueCollection utils.LogsQueueCollection @@ -92,7 +97,8 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro sdService: sdService, pubsubService: pubsubService, framework: f, - pollingStopChannel: make(chan struct{}, 1), + pollingStopChannel: make(chan struct{}), + pollingWG: &sync.WaitGroup{}, queueCollection: utils.NewLogsQueueCollection(maxQueueSize), } return provider, nil @@ -128,13 +134,14 @@ func (p *sdLogProvider) Init() error { return fmt.Errorf("failed to wait for sink to become operational: %v", err) } - go p.pollLogs() + p.startPollingLogs() return nil } func (p *sdLogProvider) Cleanup() { - p.pollingStopChannel <- struct{}{} + close(p.pollingStopChannel) + p.pollingWG.Wait() if p.logSink != nil { projectID := framework.TestContext.CloudConfig.ProjectID @@ -257,44 +264,54 @@ func (p *sdLogProvider) waitSinkInit() error { }) } -func (p *sdLogProvider) pollLogs() { - wait.PollUntil(sdLoggingPollInterval, func() (bool, error) { - messages, err := pullAndAck(p.pubsubService, p.subscription) +func (p *sdLogProvider) startPollingLogs() { + for i := 0; i < sdLoggingPollParallelism; i++ { + p.pollingWG.Add(1) + go func() { + defer p.pollingWG.Done() + + wait.PollUntil(sdLoggingPollInterval, func() (bool, error) { + p.pollLogsOnce() + return false, nil + }, p.pollingStopChannel) + }() + } +} + +func (p *sdLogProvider) pollLogsOnce() { + messages, err := pullAndAck(p.pubsubService, p.subscription) + if err != nil { + framework.Logf("Failed to pull messages from PubSub due to %v", err) + return + } + + for _, msg := range messages { + logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data) if err != nil { - framework.Logf("Failed to pull messages from PubSub due to %v", err) - return false, nil + framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) + continue } - for _, msg := range messages { - logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data) - if err != nil { - framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) - continue - } - - var sdLogEntry sd.LogEntry - if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil { - framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) - continue - } - - name, ok := p.tryGetName(sdLogEntry) - if !ok { - framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) - continue - } - - logEntry, err := convertLogEntry(sdLogEntry) - if err != nil { - framework.Logf("Failed to parse Stackdriver LogEntry: %v", err) - continue - } - - p.queueCollection.Push(name, logEntry) + var sdLogEntry sd.LogEntry + if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil { + framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) + continue } - return false, nil - }, p.pollingStopChannel) + name, ok := p.tryGetName(sdLogEntry) + if !ok { + framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) + continue + } + + logEntry, err := convertLogEntry(sdLogEntry) + if err != nil { + framework.Logf("Failed to parse Stackdriver LogEntry: %v", err) + continue + } + + p.queueCollection.Push(name, logEntry) + } } func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) { From 151398e9610309e483255add1e85b8f894422b4d Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Tue, 5 Dec 2017 16:36:58 -0500 Subject: [PATCH 376/794] add --pod-selector opt kubectl drain --- hack/make-rules/test-cmd-util.sh | 59 ++++++++++++++++++++++++++++++++ pkg/kubectl/cmd/drain.go | 16 +++++++++ 2 files changed, 75 insertions(+) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 50e873dd752..1f204ed6417 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -4300,6 +4300,51 @@ run_cluster_management_tests() { kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' + # create test pods we can work with + kubectl create -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "test-pod-1", + "labels": { + "e": "f" + } + }, + "spec": { + "containers": [ + { + "name": "container-1", + "resources": {}, + "image": "test-image" + } + ] + } +} +__EOF__ + + kubectl create -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "test-pod-2", + "labels": { + "c": "d" + } + }, + "spec": { + "containers": [ + { + "name": "container-1", + "resources": {}, + "image": "test-image" + } + ] + } +} +__EOF__ + ### kubectl cordon update with --dry-run does not mark node unschedulable # Pre-condition: node is schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' @@ -4314,6 +4359,20 @@ run_cluster_management_tests() { kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + ### kubectl drain with --pod-selector only evicts pods that match the given selector + # Pre-condition: node is schedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + # Pre-condition: test-pod-1 and test-pod-2 exist + kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,' + kubectl drain "127.0.0.1" --pod-selector 'e in (f)' + # only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist + kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2' + # delete pod no longer in use + kubectl delete pod/test-pod-2 + # Post-condition: node is schedulable + kubectl uncordon "127.0.0.1" + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + ### kubectl uncordon update with --dry-run is a no-op # Pre-condition: node is already schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index f563f10949c..d43f9178543 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -61,6 +62,7 @@ type DrainOptions struct { backOff clockwork.Clock DeleteLocalData bool Selector string + PodSelector string mapper meta.RESTMapper nodeInfos []*resource.Info Out io.Writer @@ -197,6 +199,8 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite") cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on") + cmd.Flags().StringVarP(&options.PodSelector, "pod-selector", "", options.PodSelector, "Label selector to filter pods on the node") + cmdutil.AddDryRunFlag(cmd) return cmd } @@ -223,6 +227,12 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error { return err } + if len(o.PodSelector) > 0 { + if _, err := labels.Parse(o.PodSelector); err != nil { + return errors.New("--pod-selector= must be a valid label selector") + } + } + o.restClient, err = o.Factory.RESTClient() if err != nil { return err @@ -455,7 +465,13 @@ func (ps podStatuses) Message() string { // getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we // are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) { + labelSelector, err := labels.Parse(o.PodSelector) + if err != nil { + return pods, err + } + podList, err := o.client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + LabelSelector: labelSelector.String(), FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()}) if err != nil { return pods, err From 23c81bb3d45f6a7c324a72a6054ec5255d299c38 Mon Sep 17 00:00:00 2001 From: Rye Terrell Date: Mon, 18 Dec 2017 09:28:03 -0600 Subject: [PATCH 377/794] wait for kubedns to be ready --- .../kubernetes-master/reactive/kubernetes_master.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 5736e938110..d7ca9beed86 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -440,7 +440,13 @@ def send_cluster_dns_detail(kube_control): ''' Send cluster DNS info ''' enableKubeDNS = hookenv.config('enable-kube-dns') dnsDomain = hookenv.config('dns_domain') - dns_ip = None if not enableKubeDNS else get_dns_ip() + dns_ip = None + if enableKubeDNS: + try: + dns_ip = get_dns_ip() + except CalledProcessError: + hookenv.log("kubedns not ready yet") + return kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS) From eba5b6092afcae27a7c925afea76b85d903e87a9 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Wed, 18 Oct 2017 23:37:03 +0200 Subject: [PATCH 378/794] Use k8s.gcr.io vanity domain for container images --- CHANGELOG-1.4.md | 8 +- CHANGELOG-1.5.md | 16 ++-- CHANGELOG-1.6.md | 8 +- api/openapi-spec/swagger.json | 2 +- api/swagger-spec/v1.json | 2 +- build/BUILD | 6 +- build/build-image/Dockerfile | 2 +- build/build-image/cross/Makefile | 4 +- build/common.sh | 10 +- build/debian-base/Makefile | 4 +- build/debian-hyperkube-base/Makefile | 8 +- build/debian-hyperkube-base/README.md | 12 +-- build/debian-iptables/Makefile | 6 +- build/debian-iptables/README.md | 12 +-- build/lib/release.sh | 13 ++- build/pause/Makefile | 8 +- build/root/WORKSPACE | 8 +- cluster/addons/addon-manager/Makefile | 6 +- cluster/addons/addon-manager/README.md | 12 +-- ...o-node-vertical-autoscaler-deployment.yaml | 2 +- ...ypha-horizontal-autoscaler-deployment.yaml | 2 +- .../typha-vertical-autoscaler-deployment.yaml | 2 +- .../glbc/default-svc-controller.yaml | 2 +- .../google/heapster-controller.yaml | 8 +- .../heapster-controller-combined.yaml | 9 +- .../influxdb/heapster-controller.yaml | 8 +- .../influxdb/influxdb-grafana-controller.yaml | 4 +- .../stackdriver/heapster-controller.yaml | 6 +- .../standalone/heapster-controller.yaml | 4 +- .../dashboard/dashboard-controller.yaml | 2 +- .../dns-horizontal-autoscaler.yaml | 2 +- cluster/addons/dns/kube-dns.yaml.base | 6 +- cluster/addons/dns/kube-dns.yaml.in | 6 +- cluster/addons/dns/kube-dns.yaml.sed | 6 +- .../etcd-empty-dir-cleanup.yaml | 2 +- .../fluentd-elasticsearch/es-image/Makefile | 4 +- .../fluentd-elasticsearch/es-statefulset.yaml | 2 +- .../fluentd-elasticsearch/fluentd-es-ds.yaml | 2 +- .../fluentd-es-image/Makefile | 4 +- .../fluentd-es-image/README.md | 4 +- .../addons/fluentd-gcp/event-exporter.yaml | 4 +- .../addons/fluentd-gcp/fluentd-gcp-ds.yaml | 4 +- .../addons/ip-masq-agent/ip-masq-agent.yaml | 2 +- .../metadata-proxy/gce/metadata-proxy.yaml | 4 +- .../metrics-server-deployment.yaml | 4 +- cluster/addons/node-problem-detector/npd.yaml | 2 +- cluster/addons/python-image/Makefile | 4 +- cluster/addons/registry/README.md | 2 +- cluster/addons/registry/images/Makefile | 4 +- cluster/common.sh | 4 +- cluster/gce/config-test.sh | 2 +- .../gce/container-linux/configure-helper.sh | 4 +- cluster/gce/container-linux/configure.sh | 8 +- cluster/gce/gci/configure-helper.sh | 6 +- cluster/gce/gci/configure.sh | 8 +- cluster/gce/gci/mounter/Makefile | 4 +- cluster/gce/util.sh | 6 +- cluster/get-kube-local.sh | 2 +- .../images/etcd-empty-dir-cleanup/Makefile | 4 +- cluster/images/etcd-version-monitor/Makefile | 14 ++- cluster/images/etcd-version-monitor/README.md | 2 +- .../etcd-version-monitor.yaml | 2 +- cluster/images/etcd/Makefile | 8 +- cluster/images/etcd/README.md | 12 +-- cluster/images/hyperkube/BUILD | 2 +- cluster/images/hyperkube/Dockerfile | 2 +- cluster/images/hyperkube/Makefile | 10 +- cluster/images/hyperkube/README.md | 14 +-- cluster/images/kubemark/Makefile | 4 +- .../reactive/kubernetes_worker.py | 6 +- cluster/kubemark/gce/config-default.sh | 2 +- cluster/log-dump/logexporter-daemonset.yaml | 2 +- cluster/restore-from-backup.sh | 4 +- cluster/saltbase/install.sh | 10 +- .../cluster-autoscaler.manifest | 6 +- .../e2e-image-puller.manifest | 96 +++++++++---------- cluster/saltbase/salt/etcd/etcd.manifest | 2 +- .../salt/kube-addons/kube-addon-manager.yaml | 2 +- .../kube-registry-proxy.yaml | 2 +- cluster/saltbase/salt/l7-gcp/glbc.manifest | 2 +- .../salt/rescheduler/rescheduler.manifest | 2 +- .../app/apis/kubeadm/v1alpha1/defaults.go | 2 +- cmd/kubeadm/app/images/images_test.go | 2 +- .../phases/selfhosting/selfhosting_test.go | 18 ++-- .../app/phases/upgrade/staticpods_test.go | 2 +- cmd/kubeadm/app/util/template_test.go | 8 +- cmd/kubelet/app/options/container_runtime.go | 2 +- docs/api-reference/v1/definitions.html | 2 +- examples/cluster-dns/dns-backend-rc.yaml | 2 +- examples/cluster-dns/dns-frontend-pod.yaml | 2 +- examples/cluster-dns/images/backend/Makefile | 4 +- examples/cluster-dns/images/frontend/Makefile | 4 +- examples/explorer/Makefile | 4 +- examples/explorer/pod.yaml | 2 +- examples/guestbook-go/Makefile | 6 +- .../guestbook-go/guestbook-controller.json | 2 +- .../all-in-one/guestbook-all-in-one.yaml | 2 +- .../legacy/redis-master-controller.yaml | 2 +- .../guestbook/redis-master-deployment.yaml | 2 +- examples/kubectl-container/Makefile | 4 +- examples/kubectl-container/pod.json | 4 +- .../spark-master-controller.yaml | 2 +- .../spark-worker-controller.yaml | 2 +- examples/spark/spark-master-controller.yaml | 2 +- examples/spark/spark-worker-controller.yaml | 2 +- examples/spark/zeppelin-controller.yaml | 2 +- examples/storage/cassandra/image/Dockerfile | 2 +- examples/storage/cassandra/image/Makefile | 4 +- examples/storage/redis/redis-controller.yaml | 2 +- examples/storage/redis/redis-master.yaml | 2 +- .../redis/redis-sentinel-controller.yaml | 2 +- examples/storage/rethinkdb/admin-pod.yaml | 2 +- examples/storage/rethinkdb/rc.yaml | 2 +- .../volumes/portworx/portworx-volume-pod.yaml | 2 +- .../portworx/portworx-volume-pvcpod.yaml | 2 +- .../portworx/portworx-volume-pvcscpod.yaml | 2 +- examples/volumes/scaleio/pod-sc-pvc.yaml | 2 +- examples/volumes/scaleio/pod.yaml | 2 +- .../volumes/vsphere/simple-statefulset.yaml | 2 +- .../volumes/vsphere/vsphere-volume-pod.yaml | 2 +- .../vsphere/vsphere-volume-pvcpod.yaml | 2 +- .../vsphere/vsphere-volume-pvcscpod.yaml | 2 +- hack/gen-swagger-doc/README.md | 2 +- hack/lib/swagger.sh | 2 +- hack/local-up-cluster.sh | 2 +- hack/make-rules/test-cmd-util.sh | 38 ++++---- .../deployment-multicontainer-resources.yaml | 4 +- hack/testdata/deployment-multicontainer.yaml | 4 +- hack/testdata/deployment-revision1.yaml | 2 +- hack/testdata/deployment-revision2.yaml | 2 +- hack/testdata/filter/pod-apply-selector.yaml | 2 +- hack/testdata/filter/pod-dont-apply.yaml | 2 +- hack/testdata/multi-resource-json-modify.json | 2 +- hack/testdata/multi-resource-json.json | 2 +- hack/testdata/multi-resource-list-modify.json | 2 +- hack/testdata/multi-resource-list.json | 2 +- .../multi-resource-rclist-modify.json | 4 +- hack/testdata/multi-resource-rclist.json | 4 +- hack/testdata/multi-resource-yaml-modify.yaml | 2 +- hack/testdata/multi-resource-yaml.yaml | 2 +- .../null-propagation/deployment-l1.yaml | 2 +- .../null-propagation/deployment-l2.yaml | 2 +- hack/testdata/pod-apply.yaml | 2 +- hack/testdata/pod-with-api-env.yaml | 2 +- hack/testdata/pod-with-precision.json | 2 +- hack/testdata/pod.yaml | 2 +- hack/testdata/prune/a.yaml | 2 +- hack/testdata/prune/b.yaml | 2 +- .../deployment/deployment/nginx-broken.yaml | 2 +- .../deployment/deployment/nginx.yaml | 2 +- hack/testdata/recursive/deployment/nginx.yaml | 2 +- .../testdata/rollingupdate-daemonset-rv2.yaml | 4 +- hack/testdata/rollingupdate-daemonset.yaml | 2 +- .../rollingupdate-statefulset-rv2.yaml | 4 +- hack/testdata/rollingupdate-statefulset.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod1.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod2.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod3.yaml | 2 +- pkg/api/testing/deep_copy_test.go | 4 +- .../replication_controller_example.json | 2 +- .../validation/testdata/v1/invalidPod1.json | 2 +- .../validation/testdata/v1/invalidPod3.json | 2 +- .../validation/testdata/v1/invalidPod4.yaml | 2 +- pkg/kubelet/dockershim/docker_sandbox.go | 2 +- pkg/kubelet/kubelet_node_status_test.go | 10 +- pkg/kubelet/kubelet_test.go | 4 +- .../operationexecutor/operation_executor.go | 4 +- .../operation_executor_test.go | 4 +- pkg/volume/util/util_test.go | 6 +- pkg/volume/util_test.go | 8 +- .../src/k8s.io/api/core/v1/generated.proto | 2 +- staging/src/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- test/e2e/apimachinery/initializers.go | 2 +- test/e2e/apps/disruption.go | 4 +- test/e2e/auth/metadata_concealment.go | 2 +- test/e2e/common/apparmor.go | 2 +- test/e2e/common/util.go | 6 +- test/e2e/framework/pv_util.go | 2 +- test/e2e/framework/service_util.go | 2 +- test/e2e/framework/util.go | 2 +- test/e2e/framework/volume_util.go | 10 +- .../logging/utils/logging_pod.go | 2 +- .../monitoring/custom_metrics_deployments.go | 2 +- test/e2e/network/dns_common.go | 2 +- .../testing-manifests/ingress/http/rc.yaml | 2 +- .../testing-manifests/ingress/nginx/rc.yaml | 2 +- .../ingress/static-ip/rc.yaml | 2 +- .../serviceloadbalancer/haproxyrc.yaml | 2 +- .../serviceloadbalancer/netexecrc.yaml | 2 +- .../statefulset/cassandra/tester.yaml | 2 +- .../statefulset/etcd/statefulset.yaml | 2 +- .../statefulset/etcd/tester.yaml | 2 +- .../statefulset/mysql-galera/statefulset.yaml | 6 +- .../statefulset/mysql-upgrade/tester.yaml | 2 +- .../statefulset/nginx/statefulset.yaml | 4 +- .../statefulset/redis/statefulset.yaml | 2 +- .../statefulset/zookeeper/statefulset.yaml | 2 +- test/e2e_node/conformance/build/Makefile | 8 +- test/e2e_node/conformance/run_test.sh | 2 +- test/e2e_node/gke_environment_test.go | 6 +- test/e2e_node/image_id_test.go | 2 +- test/e2e_node/image_list.go | 6 +- test/e2e_node/jenkins/gci-init-gpu.yaml | 2 +- test/e2e_node/memory_eviction_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- test/e2e_node/remote/node_conformance.go | 2 +- test/e2e_node/runtime_conformance_test.go | 4 +- .../admin/high-availability/etcd.yaml | 2 +- .../high-availability/kube-apiserver.yaml | 2 +- .../kube-controller-manager.yaml | 2 +- .../high-availability/kube-scheduler.yaml | 2 +- .../admin/limitrange/invalid-pod.yaml | 2 +- .../doc-yaml/admin/limitrange/valid-pod.yaml | 2 +- .../user-guide/downward-api/dapi-pod.yaml | 2 +- .../user-guide/liveness/exec-liveness.yaml | 2 +- .../user-guide/liveness/http-liveness.yaml | 2 +- .../doc-yaml/user-guide/multi-pod.yaml | 2 +- .../user-guide/secrets/secret-env-pod.yaml | 2 +- .../user-guide/secrets/secret-pod.yaml | 2 +- .../pkg/kubectl/builder/kitten-rc.yaml | 2 +- .../kubectl/cmd/auth/rbac-resource-plus.yaml | 2 +- test/images/image-util.sh | 2 +- test/images/iperf/BASEIMAGE | 8 +- test/images/logs-generator/README.md | 6 +- test/images/pets/peer-finder/BASEIMAGE | 8 +- test/images/pets/redis-installer/BASEIMAGE | 8 +- test/images/pets/redis-installer/README.md | 2 +- .../images/pets/zookeeper-installer/BASEIMAGE | 8 +- .../images/pets/zookeeper-installer/README.md | 2 +- test/images/resource-consumer/BASEIMAGE | 8 +- test/images/resource-consumer/README.md | 4 +- test/images/serve-hostname/README.md | 10 +- test/images/volumes-tester/ceph/Makefile | 6 +- test/images/volumes-tester/gluster/Makefile | 6 +- test/images/volumes-tester/iscsi/Makefile | 6 +- test/images/volumes-tester/nfs/Makefile | 6 +- test/images/volumes-tester/rbd/Makefile | 6 +- test/integration/benchmark-controller.json | 2 +- test/integration/framework/util.go | 2 +- .../master/synthetic_master_test.go | 4 +- .../scheduler/volume_binding_test.go | 2 +- .../cluster-autoscaler_template.json | 2 +- .../kubemark/resources/heapster_template.json | 4 +- .../resources/hollow-node_template.yaml | 2 +- .../resources/start-kubemark-master.sh | 2 +- test/utils/image/manifest.go | 2 +- 247 files changed, 541 insertions(+), 525 deletions(-) diff --git a/CHANGELOG-1.4.md b/CHANGELOG-1.4.md index 8c229de490b..5f7adc42124 100644 --- a/CHANGELOG-1.4.md +++ b/CHANGELOG-1.4.md @@ -177,15 +177,15 @@ filename | sha256 hash ### Other notable changes * kube-apiserver now drops unneeded path information if an older version of Windows kubectl sends it. ([#44586](https://github.com/kubernetes/kubernetes/pull/44586), [@mml](https://github.com/mml)) -* Bump gcr.io/google_containers/glbc from 0.8.0 to 0.9.2. Release notes: [0.9.0](https://github.com/kubernetes/ingress/releases/tag/0.9.0), [0.9.1](https://github.com/kubernetes/ingress/releases/tag/0.9.1), [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43098](https://github.com/kubernetes/kubernetes/pull/43098), [@timstclair](https://github.com/timstclair)) +* Bump k8s.gcr.io/glbc from 0.8.0 to 0.9.2. Release notes: [0.9.0](https://github.com/kubernetes/ingress/releases/tag/0.9.0), [0.9.1](https://github.com/kubernetes/ingress/releases/tag/0.9.1), [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43098](https://github.com/kubernetes/kubernetes/pull/43098), [@timstclair](https://github.com/timstclair)) * Patch CVE-2016-8859 in alpine based images: ([#42937](https://github.com/kubernetes/kubernetes/pull/42937), [@timstclair](https://github.com/timstclair)) - * - gcr.io/google-containers/etcd-empty-dir-cleanup - * - gcr.io/google-containers/kube-dnsmasq-amd64 + * - k8s.gcr.io/etcd-empty-dir-cleanup + * - k8s.gcr.io/kube-dnsmasq-amd64 * Check if pathExists before performing Unmount ([#39311](https://github.com/kubernetes/kubernetes/pull/39311), [@rkouj](https://github.com/rkouj)) * Unmount operation should not fail if volume is already unmounted ([#38547](https://github.com/kubernetes/kubernetes/pull/38547), [@rkouj](https://github.com/rkouj)) * Updates base image used for `kube-addon-manager` to latest `python:2.7-slim` and embedded `kubectl` to `v1.3.10`. No functionality changes expected. ([#42842](https://github.com/kubernetes/kubernetes/pull/42842), [@ixdy](https://github.com/ixdy)) * list-resources: don't fail if the grep fails to match any resources ([#41933](https://github.com/kubernetes/kubernetes/pull/41933), [@ixdy](https://github.com/ixdy)) -* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Backporting TPR fix to 1.4 ([#42380](https://github.com/kubernetes/kubernetes/pull/42380), [@foxish](https://github.com/foxish)) * Fix AWS device allocator to only use valid device names ([#41455](https://github.com/kubernetes/kubernetes/pull/41455), [@gnufied](https://github.com/gnufied)) * Reverts to looking up the current VM in vSphere using the machine's UUID, either obtained via sysfs or via the `vm-uuid` parameter in the cloud configuration file. ([#40892](https://github.com/kubernetes/kubernetes/pull/40892), [@robdaemon](https://github.com/robdaemon)) diff --git a/CHANGELOG-1.5.md b/CHANGELOG-1.5.md index dd3b580688e..351ef979904 100644 --- a/CHANGELOG-1.5.md +++ b/CHANGELOG-1.5.md @@ -249,18 +249,18 @@ filename | sha256 hash * kube-up (with gce/gci and gce/coreos providers) now ensures the authentication token file contains correct tokens for the control plane components, even if the file already exists (ensures upgrades and downgrades work successfully) ([#43676](https://github.com/kubernetes/kubernetes/pull/43676), [@liggitt](https://github.com/liggitt)) * Patch CVE-2016-8859 in alpine based images: ([#42936](https://github.com/kubernetes/kubernetes/pull/42936), [@timstclair](https://github.com/timstclair)) - * - gcr.io/google-containers/cluster-proportional-autoscaler-amd64 - * - gcr.io/google-containers/dnsmasq-metrics-amd64 - * - gcr.io/google-containers/etcd-empty-dir-cleanup - * - gcr.io/google-containers/kube-addon-manager - * - gcr.io/google-containers/kube-dnsmasq-amd64 + * - k8s.gcr.io/cluster-proportional-autoscaler-amd64 + * - k8s.gcr.io/dnsmasq-metrics-amd64 + * - k8s.gcr.io/etcd-empty-dir-cleanup + * - k8s.gcr.io/kube-addon-manager + * - k8s.gcr.io/kube-dnsmasq-amd64 * - Disable thin_ls due to excessive iops ([#43113](https://github.com/kubernetes/kubernetes/pull/43113), [@dashpole](https://github.com/dashpole)) * - Ignore .mount cgroups, fixing dissappearing stats * - Fix wc goroutine leak * - Update aws-sdk-go dependency to 1.6.10 * PodSecurityPolicy authorization is correctly enforced by the PodSecurityPolicy admission plugin. ([#43489](https://github.com/kubernetes/kubernetes/pull/43489), [@liggitt](https://github.com/liggitt)) -* Bump gcr.io/google_containers/glbc from 0.9.1 to 0.9.2. Release notes: [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43097](https://github.com/kubernetes/kubernetes/pull/43097), [@timstclair](https://github.com/timstclair)) -* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Bump k8s.gcr.io/glbc from 0.9.1 to 0.9.2. Release notes: [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43097](https://github.com/kubernetes/kubernetes/pull/43097), [@timstclair](https://github.com/timstclair)) +* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * restored normalization of custom `--etcd-prefix` when `--storage-backend` is set to etcd3 ([#42506](https://github.com/kubernetes/kubernetes/pull/42506), [@liggitt](https://github.com/liggitt)) @@ -655,7 +655,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( - [alpha] Introducing the v1alpha1 CRI API to allow pluggable container runtimes; an experimental docker-CRI integration is ready for testing and feedback. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md)) ([kubernetes/features#54](https://github.com/kubernetes/features/issues/54)) - [alpha] Kubelet launches container in a per pod cgroup hierarchy based on quality of service tier ([kubernetes/features#126](https://github.com/kubernetes/features/issues/126)) - [beta] Kubelet integrates with memcg notification API to detect when a hard eviction threshold is crossed ([kubernetes/features#125](https://github.com/kubernetes/features/issues/125)) - - [beta] Introducing the beta version containerized node conformance test gcr.io/google_containers/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/features/issues/84)) + - [beta] Introducing the beta version containerized node conformance test k8s.gcr.io/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/features/issues/84)) - **Scheduling** - [alpha] Added support for accounting opaque integer resources. ([docs](http://kubernetes.io/docs/user-guide/compute-resources/#opaque-integer-resources-alpha-feature)) ([kubernetes/features#76](https://github.com/kubernetes/features/issues/76)) - [beta] PodDisruptionBudget has been promoted to beta, can be used to safely drain nodes while respecting application SLO's ([docs](http://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)) ([kubernetes/features#85](https://github.com/kubernetes/features/issues/85)) diff --git a/CHANGELOG-1.6.md b/CHANGELOG-1.6.md index ae2ce75e411..e7d5a4011f4 100644 --- a/CHANGELOG-1.6.md +++ b/CHANGELOG-1.6.md @@ -1935,7 +1935,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * Fixes a bug in the OpenStack-Heat kubernetes provider, in the handling of differences between the Identity v2 and Identity v3 APIs ([#40105](https://github.com/kubernetes/kubernetes/pull/40105), [@sc68cal](https://github.com/sc68cal)) ### Container Images -* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Remove unnecessary metrics (http/process/go) from being exposed by etcd-version-monitor ([#41807](https://github.com/kubernetes/kubernetes/pull/41807), [@shyamjvs](https://github.com/shyamjvs)) * Align the hyperkube image to support running binaries at /usr/local/bin/ like the other server images ([#41017](https://github.com/kubernetes/kubernetes/pull/41017), [@luxas](https://github.com/luxas)) * Bump up GLBC version from 0.9.0-beta to 0.9.1 ([#41037](https://github.com/kubernetes/kubernetes/pull/41037), [@bprashanth](https://github.com/bprashanth)) @@ -1982,7 +1982,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * Use kube-dns:1.11.0 ([#39925](https://github.com/kubernetes/kubernetes/pull/39925), [@sadlil](https://github.com/sadlil)) ### DNS Autoscaler -* Patch CVE-2016-8859 in gcr.io/google-containers/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) +* Patch CVE-2016-8859 in k8s.gcr.io/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) ### Cluster Autoscaler * Allow the Horizontal Pod Autoscaler controller to talk to the metrics API and custom metrics API as standard APIs. ([#41824](https://github.com/kubernetes/kubernetes/pull/41824), [@DirectXMan12](https://github.com/DirectXMan12)) @@ -2149,7 +2149,7 @@ filename | sha256 hash * Rescheduler uses taints in v1beta1 and will remove old ones (in version v1alpha1) right after its start. ([#43106](https://github.com/kubernetes/kubernetes/pull/43106), [@piosz](https://github.com/piosz)) * kubeadm: `kubeadm reset` won't drain and remove the current node anymore ([#42713](https://github.com/kubernetes/kubernetes/pull/42713), [@luxas](https://github.com/luxas)) * hack/godep-restore.sh: use godep v79 which works ([#42965](https://github.com/kubernetes/kubernetes/pull/42965), [@sttts](https://github.com/sttts)) -* Patch CVE-2016-8859 in gcr.io/google-containers/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) +* Patch CVE-2016-8859 in k8s.gcr.io/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) * Disable devicemapper thin_ls due to excessive iops ([#42899](https://github.com/kubernetes/kubernetes/pull/42899), [@dashpole](https://github.com/dashpole)) @@ -2383,7 +2383,7 @@ filename | sha256 hash * Add configurable limits to CronJob resource to specify how many successful and failed jobs are preserved. ([#40932](https://github.com/kubernetes/kubernetes/pull/40932), [@peay](https://github.com/peay)) * Deprecate outofdisk-transition-frequency and low-diskspace-threshold-mb flags ([#41941](https://github.com/kubernetes/kubernetes/pull/41941), [@dashpole](https://github.com/dashpole)) * Add OWNERS for sample-apiserver in staging ([#42094](https://github.com/kubernetes/kubernetes/pull/42094), [@sttts](https://github.com/sttts)) -* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Add storage.k8s.io/v1 API ([#40088](https://github.com/kubernetes/kubernetes/pull/40088), [@jsafrane](https://github.com/jsafrane)) * Juju - K8s master charm now properly keeps distributed master files in sync for an HA control plane. ([#41351](https://github.com/kubernetes/kubernetes/pull/41351), [@chuckbutler](https://github.com/chuckbutler)) * Fix zsh completion: unknown file attribute error ([#38104](https://github.com/kubernetes/kubernetes/pull/38104), [@elipapa](https://github.com/elipapa)) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 1e463c37288..513f45afc68 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -75018,7 +75018,7 @@ ], "properties": { "names": { - "description": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "description": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", "type": "array", "items": { "type": "string" diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index cc2cebe67cf..206efd37e2f 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20257,7 +20257,7 @@ "items": { "type": "string" }, - "description": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]" + "description": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]" }, "sizeBytes": { "type": "integer", diff --git a/build/BUILD b/build/BUILD index 5f531a1f663..cfd10691b9d 100644 --- a/build/BUILD +++ b/build/BUILD @@ -62,7 +62,11 @@ DOCKERIZED_BINARIES = { [docker_bundle( name = binary, - images = {"gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal"}, + # TODO(thockin): remove the google_containers name after release 1.10. + images = { + "k8s.gcr.io/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal", + "gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal", + }, stamp = True, ) for binary in DOCKERIZED_BINARIES.keys()] diff --git a/build/build-image/Dockerfile b/build/build-image/Dockerfile index 59be59e4091..f9d0adaf034 100644 --- a/build/build-image/Dockerfile +++ b/build/build-image/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. # This file creates a standard build environment for building Kubernetes -FROM gcr.io/google_containers/kube-cross:KUBE_BUILD_IMAGE_CROSS_TAG +FROM k8s.gcr.io/kube-cross:KUBE_BUILD_IMAGE_CROSS_TAG # Mark this as a kube-build container RUN touch /kube-build-image diff --git a/build/build-image/cross/Makefile b/build/build-image/cross/Makefile index 83e2c862226..a8e773d09bb 100644 --- a/build/build-image/cross/Makefile +++ b/build/build-image/cross/Makefile @@ -21,7 +21,7 @@ TAG=$(shell cat VERSION) all: push build: - docker build --pull -t gcr.io/google_containers/$(IMAGE):$(TAG) . + docker build --pull -t k8s.gcr.io/$(IMAGE):$(TAG) . push: build - gcloud docker --server=gcr.io -- push gcr.io/google_containers/$(IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/$(IMAGE):$(TAG) diff --git a/build/common.sh b/build/common.sh index 8f8254ca228..6489f83c783 100755 --- a/build/common.sh +++ b/build/common.sh @@ -96,7 +96,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,busybox kube-scheduler,busybox kube-aggregator,busybox - kube-proxy,gcr.io/google-containers/debian-iptables-amd64:${debian_iptables_version} + kube-proxy,k8s.gcr.io/debian-iptables-amd64:${debian_iptables_version} );; "arm") local targets=( @@ -105,7 +105,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,arm32v7/busybox kube-scheduler,arm32v7/busybox kube-aggregator,arm32v7/busybox - kube-proxy,gcr.io/google-containers/debian-iptables-arm:${debian_iptables_version} + kube-proxy,k8s.gcr.io/debian-iptables-arm:${debian_iptables_version} );; "arm64") local targets=( @@ -114,7 +114,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,arm64v8/busybox kube-scheduler,arm64v8/busybox kube-aggregator,arm64v8/busybox - kube-proxy,gcr.io/google-containers/debian-iptables-arm64:${debian_iptables_version} + kube-proxy,k8s.gcr.io/debian-iptables-arm64:${debian_iptables_version} );; "ppc64le") local targets=( @@ -123,7 +123,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,ppc64le/busybox kube-scheduler,ppc64le/busybox kube-aggregator,ppc64le/busybox - kube-proxy,gcr.io/google-containers/debian-iptables-ppc64le:${debian_iptables_version} + kube-proxy,k8s.gcr.io/debian-iptables-ppc64le:${debian_iptables_version} );; "s390x") local targets=( @@ -132,7 +132,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,s390x/busybox kube-scheduler,s390x/busybox kube-aggregator,s390x/busybox - kube-proxy,gcr.io/google-containers/debian-iptables-s390x:${debian_iptables_version} + kube-proxy,k8s.gcr.io/debian-iptables-s390x:${debian_iptables_version} );; esac diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 47eafa5c645..3c49cb05480 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -14,7 +14,7 @@ all: build -REGISTRY ?= gcr.io/google-containers +REGISTRY ?= k8s.gcr.io IMAGE ?= debian-base BUILD_IMAGE ?= debian-build @@ -71,7 +71,7 @@ endif rm -rf $(TEMP_DIR) push: build - gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) clean: docker rmi -f $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) || true diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index 54ca29f7e9a..46b5709daeb 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -15,15 +15,15 @@ # Build the hyperkube base image. This image is used to build the hyperkube image. # # Usage: -# [ARCH=amd64] [REGISTRY="gcr.io/google-containers"] make (build|push) +# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] make (build|push) -REGISTRY?=gcr.io/google-containers +REGISTRY?=k8s.gcr.io IMAGE?=debian-hyperkube-base TAG=0.8 ARCH?=amd64 CACHEBUST?=1 -BASEIMAGE=gcr.io/google-containers/debian-base-$(ARCH):0.3 +BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3 CNI_VERSION=v0.6.0 TEMP_DIR:=$(shell mktemp -d) @@ -57,4 +57,4 @@ endif rm -rf $(TEMP_DIR) push: build - gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) diff --git a/build/debian-hyperkube-base/README.md b/build/debian-hyperkube-base/README.md index 2cef311cb6d..c5a1216e6bd 100644 --- a/build/debian-hyperkube-base/README.md +++ b/build/debian-hyperkube-base/README.md @@ -1,6 +1,6 @@ ### debian-hyperkube-base -Serves as the base image for `gcr.io/google-containers/hyperkube-${ARCH}` +Serves as the base image for `k8s.gcr.io/hyperkube-${ARCH}` images. This image is compiled for multiple architectures. @@ -12,19 +12,19 @@ If you're editing the Dockerfile or some other thing, please bump the `TAG` in t ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> gcr.io/google-containers/debian-hyperkube-base-amd64:TAG +# ---> k8s.gcr.io/debian-hyperkube-base-amd64:TAG $ make push ARCH=arm -# ---> gcr.io/google-containers/debian-hyperkube-base-arm:TAG +# ---> k8s.gcr.io/debian-hyperkube-base-arm:TAG $ make push ARCH=arm64 -# ---> gcr.io/google-containers/debian-hyperkube-base-arm64:TAG +# ---> k8s.gcr.io/debian-hyperkube-base-arm64:TAG $ make push ARCH=ppc64le -# ---> gcr.io/google-containers/debian-hyperkube-base-ppc64le:TAG +# ---> k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG $ make push ARCH=s390x -# ---> gcr.io/google-containers/debian-hyperkube-base-s390x:TAG +# ---> k8s.gcr.io/debian-hyperkube-base-s390x:TAG ``` If you don't want to push the images, run `make build` instead diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index 1cc75edf17a..d408b6ae41b 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -14,7 +14,7 @@ .PHONY: build push -REGISTRY?="gcr.io/google-containers" +REGISTRY?="k8s.gcr.io" IMAGE=debian-iptables TAG=v10 ARCH?=amd64 @@ -34,7 +34,7 @@ ifeq ($(ARCH),s390x) QEMUARCH=s390x endif -BASEIMAGE=gcr.io/google-containers/debian-base-$(ARCH):0.3 +BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3 build: cp ./* $(TEMP_DIR) @@ -55,6 +55,6 @@ endif docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) push: build - gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) all: push diff --git a/build/debian-iptables/README.md b/build/debian-iptables/README.md index f5c0b3c7e0f..04ea0c26633 100644 --- a/build/debian-iptables/README.md +++ b/build/debian-iptables/README.md @@ -1,6 +1,6 @@ ### debian-iptables -Serves as the base image for `gcr.io/google_containers/kube-proxy-${ARCH}` and multiarch (not `amd64`) `gcr.io/google_containers/flannel-${ARCH}` images. +Serves as the base image for `k8s.gcr.io/kube-proxy-${ARCH}` and multiarch (not `amd64`) `k8s.gcr.io/flannel-${ARCH}` images. This image is compiled for multiple architectures. @@ -11,19 +11,19 @@ If you're editing the Dockerfile or some other thing, please bump the `TAG` in t ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> gcr.io/google_containers/debian-iptables-amd64:TAG +# ---> k8s.gcr.io/debian-iptables-amd64:TAG $ make push ARCH=arm -# ---> gcr.io/google_containers/debian-iptables-arm:TAG +# ---> k8s.gcr.io/debian-iptables-arm:TAG $ make push ARCH=arm64 -# ---> gcr.io/google_containers/debian-iptables-arm64:TAG +# ---> k8s.gcr.io/debian-iptables-arm64:TAG $ make push ARCH=ppc64le -# ---> gcr.io/google_containers/debian-iptables-ppc64le:TAG +# ---> k8s.gcr.io/debian-iptables-ppc64le:TAG $ make push ARCH=s390x -# ---> gcr.io/google_containers/debian-iptables-s390x:TAG +# ---> k8s.gcr.io/debian-iptables-s390x:TAG ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/build/lib/release.sh b/build/lib/release.sh index 870451601f6..ce466a98d37 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -285,7 +285,11 @@ function kube::release::create_docker_images_for_server() { local images_dir="${RELEASE_IMAGES}/${arch}" mkdir -p "${images_dir}" - local -r docker_registry="gcr.io/google_containers" + local -r docker_registry="k8s.gcr.io" + # TODO(thockin): Remove all traces of this after 1.10 release. + # The following is the old non-indirected registry name. To ease the + # transition to the new name (above), we are double-tagging saved images. + local -r deprecated_registry="gcr.io/google_containers" # Docker tags cannot contain '+' local docker_tag="${KUBE_GIT_VERSION/+/_}" if [[ -z "${docker_tag}" ]]; then @@ -306,14 +310,17 @@ function kube::release::create_docker_images_for_server() { local docker_file_path="${docker_build_path}/Dockerfile" local binary_file_path="${binary_dir}/${binary_name}" local docker_image_tag="${docker_registry}" + local deprecated_image_tag="${deprecated_registry}" if [[ ${arch} == "amd64" ]]; then # If we are building a amd64 docker image, preserve the original # image name docker_image_tag+="/${binary_name}:${docker_tag}" + deprecated_image_tag+="/${binary_name}:${docker_tag}" else # If we are building a docker image for another architecture, # append the arch in the image tag docker_image_tag+="/${binary_name}-${arch}:${docker_tag}" + deprecated_image_tag+="/${binary_name}-${arch}:${docker_tag}" fi @@ -325,7 +332,8 @@ function kube::release::create_docker_images_for_server() { printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > ${docker_file_path} "${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null - "${DOCKER[@]}" save "${docker_image_tag}" > "${binary_dir}/${binary_name}.tar" + "${DOCKER[@]}" tag "${docker_image_tag}" ${deprecated_image_tag} >/dev/null + "${DOCKER[@]}" save "${docker_image_tag}" ${deprecated_image_tag} > "${binary_dir}/${binary_name}.tar" echo "${docker_tag}" > ${binary_dir}/${binary_name}.docker_tag rm -rf ${docker_build_path} ln "${binary_dir}/${binary_name}.tar" "${images_dir}/" @@ -344,6 +352,7 @@ function kube::release::create_docker_images_for_server() { # not a release kube::log::status "Deleting docker image ${docker_image_tag}" "${DOCKER[@]}" rmi ${docker_image_tag} &>/dev/null || true + "${DOCKER[@]}" rmi ${deprecated_image_tag} &>/dev/null || true fi ) & done diff --git a/build/pause/Makefile b/build/pause/Makefile index cad23f60857..a5a2ff6a87b 100644 --- a/build/pause/Makefile +++ b/build/pause/Makefile @@ -14,7 +14,7 @@ .PHONY: all push push-legacy container clean orphan -REGISTRY ?= gcr.io/google_containers +REGISTRY ?= k8s.gcr.io IMAGE = $(REGISTRY)/pause-$(ARCH) LEGACY_AMD64_IMAGE = $(REGISTRY)/pause @@ -26,7 +26,7 @@ ARCH ?= amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x CFLAGS = -Os -Wall -Werror -static -KUBE_CROSS_IMAGE ?= gcr.io/google_containers/kube-cross +KUBE_CROSS_IMAGE ?= k8s.gcr.io/kube-cross KUBE_CROSS_VERSION ?= $(shell cat ../build-image/cross/VERSION) BIN = pause @@ -87,13 +87,13 @@ endif push: .push-$(ARCH) .push-$(ARCH): .container-$(ARCH) - gcloud docker -- push $(IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(TAG) touch $@ push-legacy: .push-legacy-$(ARCH) .push-legacy-$(ARCH): .container-$(ARCH) ifeq ($(ARCH),amd64) - gcloud docker -- push $(LEGACY_AMD64_IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(LEGACY_AMD64_IMAGE):$(TAG) endif touch $@ diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 7de0ffaf334..2a000c552df 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -81,16 +81,16 @@ http_file( docker_pull( name = "debian-iptables-amd64", digest = "sha256:a3b936c0fb98a934eecd2cfb91f73658d402b29116084e778ce9ddb68e55383e", - registry = "gcr.io", - repository = "google-containers/debian-iptables-amd64", + registry = "k8s.gcr.io", + repository = "debian-iptables-amd64", tag = "v10", # ignored, but kept here for documentation ) docker_pull( name = "debian-hyperkube-base-amd64", digest = "sha256:fc1b461367730660ac5a40c1eb2d1b23221829acf8a892981c12361383b3742b", - registry = "gcr.io", - repository = "google-containers/debian-hyperkube-base-amd64", + registry = "k8s.gcr.io", + repository = "debian-hyperkube-base-amd64", tag = "0.8", # ignored, but kept here for documentation ) diff --git a/cluster/addons/addon-manager/Makefile b/cluster/addons/addon-manager/Makefile index 854cd4e2557..baa63d2b5b6 100644 --- a/cluster/addons/addon-manager/Makefile +++ b/cluster/addons/addon-manager/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -IMAGE=gcr.io/google-containers/kube-addon-manager +IMAGE=k8s.gcr.io/kube-addon-manager ARCH?=amd64 TEMP_DIR:=$(shell mktemp -d) VERSION=v8.4 @@ -46,12 +46,12 @@ build: docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR) push: build - gcloud docker -- push $(IMAGE)-$(ARCH):$(VERSION) + gcloud docker --server=k8s.gcr.io -- push $(IMAGE)-$(ARCH):$(VERSION) ifeq ($(ARCH),amd64) # Backward compatibility. TODO: deprecate this image tag docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION) - gcloud docker -- push $(IMAGE):$(VERSION) + gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(VERSION) endif clean: diff --git a/cluster/addons/addon-manager/README.md b/cluster/addons/addon-manager/README.md index e9ae53458c9..d8b231b6c5a 100644 --- a/cluster/addons/addon-manager/README.md +++ b/cluster/addons/addon-manager/README.md @@ -40,20 +40,20 @@ The `addon-manager` is built for multiple architectures. ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> gcr.io/google-containers/kube-addon-manager-amd64:VERSION -# ---> gcr.io/google-containers/kube-addon-manager:VERSION (image with backwards-compatible naming) +# ---> k8s.gcr.io/kube-addon-manager-amd64:VERSION +# ---> k8s.gcr.io/kube-addon-manager:VERSION (image with backwards-compatible naming) $ make push ARCH=arm -# ---> gcr.io/google-containers/kube-addon-manager-arm:VERSION +# ---> k8s.gcr.io/kube-addon-manager-arm:VERSION $ make push ARCH=arm64 -# ---> gcr.io/google-containers/kube-addon-manager-arm64:VERSION +# ---> k8s.gcr.io/kube-addon-manager-arm64:VERSION $ make push ARCH=ppc64le -# ---> gcr.io/google-containers/kube-addon-manager-ppc64le:VERSION +# ---> k8s.gcr.io/kube-addon-manager-ppc64le:VERSION $ make push ARCH=s390x -# ---> gcr.io/google-containers/kube-addon-manager-s390x:VERSION +# ---> k8s.gcr.io/kube-addon-manager-s390x:VERSION ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml index c66c3e07200..50ffcab83d9 100644 --- a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 + - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 name: autoscaler command: - /cpvpa diff --git a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml index 4f493b8bcf3..b338df79df0 100644 --- a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2 + - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2 name: autoscaler command: - /cluster-proportional-autoscaler diff --git a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml index c59be9af62a..4f559626855 100644 --- a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 + - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 name: autoscaler command: - /cpvpa diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml index febec626fc1..a2955031360 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml @@ -24,7 +24,7 @@ spec: # Any image is permissible as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.3 + image: k8s.gcr.io/defaultbackend:1.3 livenessProbe: httpGet: path: /healthz diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 81b513281e6..2bf907c6f0c 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -71,7 +71,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -84,13 +84,13 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=gcm - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -123,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 6896fccc232..67f0f3ac2ad 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -71,8 +71,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 - + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -86,13 +85,13 @@ spec: - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - --sink=gcm:?metrics=autoscaling - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -125,7 +124,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 2c389a340b8..9f19bf36cd2 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -71,7 +71,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -84,13 +84,13 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -123,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index d562c748471..7c78ed2c49c 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -29,7 +29,7 @@ spec: operator: "Exists" containers: - name: influxdb - image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3 + image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3 resources: limits: cpu: 100m @@ -46,7 +46,7 @@ spec: - name: influxdb-persistent-storage mountPath: /data - name: grafana - image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3 + image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3 env: resources: # keep request = limit to keep this container in guaranteed class diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index 85e8383adf8..5b1b38bec81 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -56,7 +56,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -71,7 +71,7 @@ spec: - --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110 # BEGIN_PROMETHEUS_TO_SD - name: prom-to-sd - image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 + image: k8s.gcr.io/prometheus-to-sd:v0.2.2 command: - /monitor - --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count @@ -89,7 +89,7 @@ spec: fieldRef: fieldPath: metadata.namespace # END_PROMETHEUS_TO_SD - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: heapster-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index f82d1b70f22..7eacad1e6f3 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -56,7 +56,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + - image: k8s.gcr.io/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -68,7 +68,7 @@ spec: command: - /heapster - --source=kubernetes.summary_api:'' - - image: gcr.io/google_containers/addon-resizer:1.8.1 + - image: k8s.gcr.io/addon-resizer:1.8.1 name: heapster-nanny resources: limits: diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index ac05d3a9a29..c1015f93c51 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: kubernetes-dashboard - image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0 + image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.0 resources: limits: cpu: 100m diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index 9e4e38b8760..cb413e8723c 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -77,7 +77,7 @@ spec: spec: containers: - name: autoscaler - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2 + image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 resources: requests: cpu: "20m" diff --git a/cluster/addons/dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns.yaml.base index e93884df0ca..c7fdf3afa22 100644 --- a/cluster/addons/dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns.yaml.base @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns.yaml.in index 12b09236723..0aa1196e631 100644 --- a/cluster/addons/dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns.yaml.in @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns.yaml.sed index 101cf588e2d..cf7794beb8f 100644 --- a/cluster/addons/dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns.yaml.sed @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml index 05943c8c416..66b3caa36ab 100644 --- a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml +++ b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml @@ -23,4 +23,4 @@ spec: dnsPolicy: Default containers: - name: etcd-empty-dir-cleanup - image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0 + image: k8s.gcr.io/etcd-empty-dir-cleanup:3.0.14.0 diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index 51222ad3800..2ccd991602b 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -14,7 +14,7 @@ .PHONY: binary build push -PREFIX = gcr.io/google-containers +PREFIX = k8s.gcr.io IMAGE = elasticsearch TAG = v5.6.4 @@ -22,7 +22,7 @@ build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: - gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE):$(TAG) binary: CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go diff --git a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml index b6357f47ef3..7e611010990 100644 --- a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml @@ -73,7 +73,7 @@ spec: spec: serviceAccountName: elasticsearch-logging containers: - - image: gcr.io/google-containers/elasticsearch:v5.6.4 + - image: k8s.gcr.io/elasticsearch:v5.6.4 name: elasticsearch-logging resources: # need more cpu upon initialization, therefore burstable class diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index 74242adce74..3690fe1271b 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -75,7 +75,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2 + image: k8s.gcr.io/fluentd-elasticsearch:v2.0.2 env: - name: FLUENTD_ARGS value: --no-supervisor -q diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 0b5fa8a487c..c511e57b861 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -14,7 +14,7 @@ .PHONY: build push -PREFIX = gcr.io/google-containers +PREFIX = k8s.gcr.io IMAGE = fluentd-elasticsearch TAG = v2.0.2 @@ -22,4 +22,4 @@ build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: - gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE):$(TAG) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md index 8b97511a009..9a651d522f9 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md @@ -4,11 +4,11 @@ that collects Docker container log files using [Fluentd][fluentd] and sends them to an instance of [Elasticsearch][elasticsearch]. This image is designed to be used as part of the [Kubernetes][kubernetes] cluster bring up process. The image resides at GCR under the name -[gcr.io/google-containers/fluentd-elasticsearch][image]. +[k8s.gcr.io/fluentd-elasticsearch][image]. [fluentd]: http://www.fluentd.org/ [elasticsearch]: https://www.elastic.co/products/elasticsearch [kubernetes]: https://kubernetes.io -[image]: https://gcr.io/google-containers/fluentd-elasticsearch +[image]: https://k8s.gcr.io/fluentd-elasticsearch [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md?pixel)]() diff --git a/cluster/addons/fluentd-gcp/event-exporter.yaml b/cluster/addons/fluentd-gcp/event-exporter.yaml index 246fa8c42bc..6321b3844f3 100644 --- a/cluster/addons/fluentd-gcp/event-exporter.yaml +++ b/cluster/addons/fluentd-gcp/event-exporter.yaml @@ -47,12 +47,12 @@ spec: serviceAccountName: event-exporter-sa containers: - name: event-exporter - image: gcr.io/google-containers/event-exporter:v0.1.7 + image: k8s.gcr.io/event-exporter:v0.1.7 command: - '/event-exporter' # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 + image: k8s.gcr.io/prometheus-to-sd:v0.2.2 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index dd516db77e0..6996462685d 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.11 + image: k8s.gcr.io/fluentd-gcp:2.0.11 env: - name: FLUENTD_ARGS value: --no-supervisor -q @@ -82,7 +82,7 @@ spec: fi; # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 + image: k8s.gcr.io/prometheus-to-sd:v0.2.2 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml index f6bb21c01b9..c27a802cb75 100644 --- a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml +++ b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml @@ -28,7 +28,7 @@ spec: hostNetwork: true containers: - name: ip-masq-agent - image: gcr.io/google-containers/ip-masq-agent-amd64:v2.0.2 + image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2 resources: requests: cpu: 10m diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index a710f917cd4..f18033b8d69 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -38,7 +38,7 @@ spec: dnsPolicy: Default containers: - name: metadata-proxy - image: gcr.io/google_containers/metadata-proxy:v0.1.6 + image: k8s.gcr.io/metadata-proxy:v0.1.6 securityContext: privileged: true # Request and limit resources to get guaranteed QoS. @@ -51,7 +51,7 @@ spec: cpu: "30m" # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: gcr.io/google_containers/prometheus-to-sd:v0.2.2 + image: k8s.gcr.io/prometheus-to-sd:v0.2.2 # Request and limit resources to get guaranteed QoS. resources: requests: diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index e82eb9d74a2..98e59e5ef69 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -47,7 +47,7 @@ spec: serviceAccountName: metrics-server containers: - name: metrics-server - image: gcr.io/google_containers/metrics-server-amd64:v0.2.0 + image: k8s.gcr.io/metrics-server-amd64:v0.2.0 command: - /metrics-server - --source=kubernetes.summary_api:'' @@ -56,7 +56,7 @@ spec: name: https protocol: TCP - name: metrics-server-nanny - image: gcr.io/google_containers/addon-resizer:1.8.1 + image: k8s.gcr.io/addon-resizer:1.8.1 resources: limits: cpu: 100m diff --git a/cluster/addons/node-problem-detector/npd.yaml b/cluster/addons/node-problem-detector/npd.yaml index 87365ad17f7..714125ea58c 100644 --- a/cluster/addons/node-problem-detector/npd.yaml +++ b/cluster/addons/node-problem-detector/npd.yaml @@ -43,7 +43,7 @@ spec: spec: containers: - name: node-problem-detector - image: gcr.io/google_containers/node-problem-detector:v0.4.1 + image: k8s.gcr.io/node-problem-detector:v0.4.1 command: - "/bin/sh" - "-c" diff --git a/cluster/addons/python-image/Makefile b/cluster/addons/python-image/Makefile index 6da4f7d3ce6..0ae26b2a63c 100644 --- a/cluster/addons/python-image/Makefile +++ b/cluster/addons/python-image/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -IMAGE=gcr.io/google_containers/python +IMAGE=k8s.gcr.io/python VERSION=v1 .PHONY: build push @@ -21,5 +21,5 @@ build: docker build --pull -t "$(IMAGE):$(VERSION)" . push: - gcloud docker -- push "$(IMAGE):$(VERSION)" + gcloud docker --server=k8s.gcr.io -- push "$(IMAGE):$(VERSION)" diff --git a/cluster/addons/registry/README.md b/cluster/addons/registry/README.md index 59542355eda..009e11f5863 100644 --- a/cluster/addons/registry/README.md +++ b/cluster/addons/registry/README.md @@ -199,7 +199,7 @@ spec: spec: containers: - name: kube-registry-proxy - image: gcr.io/google_containers/kube-registry-proxy:0.4 + image: k8s.gcr.io/kube-registry-proxy:0.4 resources: limits: cpu: 100m diff --git a/cluster/addons/registry/images/Makefile b/cluster/addons/registry/images/Makefile index c1b64de1c20..338ef1c4945 100644 --- a/cluster/addons/registry/images/Makefile +++ b/cluster/addons/registry/images/Makefile @@ -15,10 +15,10 @@ .PHONY: build push vet test clean TAG = 0.4 -REPO = gcr.io/google_containers/kube-registry-proxy +REPO = k8s.gcr.io/kube-registry-proxy build: docker build --pull -t $(REPO):$(TAG) . push: - gcloud docker -- push $(REPO):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(REPO):$(TAG) diff --git a/cluster/common.sh b/cluster/common.sh index 95e286e89d0..088de5dd680 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -478,7 +478,7 @@ function stage-images() { local docker_cmd=("docker") - if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/"* ]]; then + if [[ "${KUBE_DOCKER_REGISTRY}" =~ "gcr.io/" ]]; then local docker_push_cmd=("gcloud" "docker") else local docker_push_cmd=("${docker_cmd[@]}") @@ -493,7 +493,7 @@ function stage-images() { ( "${docker_cmd[@]}" load -i "${temp_dir}/kubernetes/server/bin/${binary}.tar" "${docker_cmd[@]}" rmi "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" 2>/dev/null || true - "${docker_cmd[@]}" tag "gcr.io/google_containers/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" + "${docker_cmd[@]}" tag "k8s.gcr.io/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" "${docker_push_cmd[@]}" push "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" ) &> "${temp_dir}/${binary}-push.log" & done diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index d8e6aafca88..34f3ed96153 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -151,7 +151,7 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}" # Useful for scheduling heapster in large clusters with nodes of small size. HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" -# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.10) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}" diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index d438c71d924..068e742dcb7 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -671,7 +671,7 @@ function prepare-kube-proxy-manifest-variables { remove-salt-config-comments "${src_file}" local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="gcr.io/google_containers" + local kube_docker_registry="k8s.gcr.io" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then kube_docker_registry=${KUBE_DOCKER_REGISTRY} fi @@ -853,7 +853,7 @@ function compute-master-manifest-variables { CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," fi - DOCKER_REGISTRY="gcr.io/google_containers" + DOCKER_REGISTRY="k8s.gcr.io" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" fi diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh index 035c88dba31..6651cc8ff51 100755 --- a/cluster/gce/container-linux/configure.sh +++ b/cluster/gce/container-linux/configure.sh @@ -140,12 +140,12 @@ function install-kube-binary-config { echo "Downloading k8s manifests tar" download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" - if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then + local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" + if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" fi cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh" chmod -R 755 "${kube_bin}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 9307b382ea7..cd8733ae06e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1274,7 +1274,7 @@ function prepare-kube-proxy-manifest-variables { remove-salt-config-comments "${src_file}" local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="gcr.io/google_containers" + local kube_docker_registry="k8s.gcr.io" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then kube_docker_registry=${KUBE_DOCKER_REGISTRY} fi @@ -1446,7 +1446,7 @@ function compute-master-manifest-variables { CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," fi - DOCKER_REGISTRY="gcr.io/google_containers" + DOCKER_REGISTRY="k8s.gcr.io" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" fi @@ -2325,7 +2325,7 @@ spec: - name: vol containers: - name: pv-recycler - image: gcr.io/google_containers/busybox:1.27 + image: k8s.gcr.io/busybox:1.27 command: - /bin/sh args: diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index aa2920fb017..6fa08398244 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -221,12 +221,12 @@ function install-kube-manifests { echo "Downloading k8s manifests tar" download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" - if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then + local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" + if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" fi cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh" cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh" diff --git a/cluster/gce/gci/mounter/Makefile b/cluster/gce/gci/mounter/Makefile index 72efa3b77c4..bb2d91ea0ca 100644 --- a/cluster/gce/gci/mounter/Makefile +++ b/cluster/gce/gci/mounter/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG=v2 -REGISTRY=gcr.io/google_containers +REGISTRY=k8s.gcr.io IMAGE=gci-mounter all: container @@ -22,7 +22,7 @@ container: docker build --pull -t ${REGISTRY}/${IMAGE}:${TAG} . push: - gcloud docker -- push ${REGISTRY}/${IMAGE}:${TAG} + gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/${IMAGE}:${TAG} upload: ./stage-upload.sh ${TAG} ${REGISTRY}/${IMAGE}:${TAG} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 34d710ce7fe..43d258e962e 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -236,12 +236,12 @@ function set-preferred-region() { fi # If we're using regional GCR, and we're outside the US, go to the - # regional registry. The gcr.io/google_containers registry is + # regional registry. The k8s.gcr.io registry is # appropriate for US (for now). if [[ "${REGIONAL_KUBE_ADDONS}" == "true" ]] && [[ "${preferred}" != "us" ]]; then - KUBE_ADDON_REGISTRY="${preferred}.gcr.io/google_containers" + KUBE_ADDON_REGISTRY="${preferred}.k8s.gcr.io" else - KUBE_ADDON_REGISTRY="gcr.io/google_containers" + KUBE_ADDON_REGISTRY="k8s.gcr.io" fi if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then diff --git a/cluster/get-kube-local.sh b/cluster/get-kube-local.sh index 972ebce6d70..21a8243dd94 100755 --- a/cluster/get-kube-local.sh +++ b/cluster/get-kube-local.sh @@ -97,7 +97,7 @@ function create_cluster { --pid=host \ --privileged=true \ -d \ - gcr.io/google_containers/hyperkube-${arch}:${release} \ + k8s.gcr.io/hyperkube-${arch}:${release} \ /hyperkube kubelet \ --containerized \ --hostname-override="127.0.0.1" \ diff --git a/cluster/images/etcd-empty-dir-cleanup/Makefile b/cluster/images/etcd-empty-dir-cleanup/Makefile index d84a353ce00..950c024ba05 100644 --- a/cluster/images/etcd-empty-dir-cleanup/Makefile +++ b/cluster/images/etcd-empty-dir-cleanup/Makefile @@ -15,7 +15,7 @@ .PHONY: build push ETCD_VERSION = 3.0.14 -IMAGE = gcr.io/google-containers/etcd-empty-dir-cleanup +IMAGE = k8s.gcr.io/etcd-empty-dir-cleanup TAG = 3.0.14.0 clean: @@ -29,4 +29,4 @@ build: clean rm -rf etcdctl etcd-v$(ETCD_VERSION)-linux-amd64 etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz push: build - gcloud docker -- push $(IMAGE):$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(TAG) diff --git a/cluster/images/etcd-version-monitor/Makefile b/cluster/images/etcd-version-monitor/Makefile index e061b900151..2c9cfa20865 100644 --- a/cluster/images/etcd-version-monitor/Makefile +++ b/cluster/images/etcd-version-monitor/Makefile @@ -15,11 +15,11 @@ # Build the etcd-version-monitor image # # Usage: -# [GOLANG_VERSION=1.8.3] [REGISTRY=gcr.io/google-containers] [TAG=test] make (build|push) +# [GOLANG_VERSION=1.8.3] [REGISTRY=k8s.gcr.io] [TAG=test] make (build|push) # TODO(shyamjvs): Support architectures other than amd64 if needed. ARCH:=amd64 GOLANG_VERSION?=1.8.3 -REGISTRY?=gcr.io/google-containers +REGISTRY?=k8s.gcr.io TAG?=0.1.1 IMAGE:=$(REGISTRY)/etcd-version-monitor:$(TAG) CURRENT_DIR:=$(pwd) @@ -30,13 +30,17 @@ build: cp etcd-version-monitor.go Dockerfile $(TEMP_DIR) # Compile etcd-version-monitor. - docker run -it -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ - /bin/bash -c "CGO_ENABLED=0 go build -o /build/etcd-version-monitor k8s.io/kubernetes/cluster/images/etcd-version-monitor" + docker run -it \ + -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes \ + -v $(TEMP_DIR):/build \ + -e GOARCH=$(ARCH) \ + golang:$(GOLANG_VERSION) \ + /bin/bash -c "CGO_ENABLED=0 go build -o /build/etcd-version-monitor k8s.io/kubernetes/cluster/images/etcd-version-monitor" docker build -t $(IMAGE) $(TEMP_DIR) push: build - gcloud docker -- push $(IMAGE) + gcloud docker --server=k8s.gcr.io -- push $(IMAGE) all: build diff --git a/cluster/images/etcd-version-monitor/README.md b/cluster/images/etcd-version-monitor/README.md index 3cfb675837d..16bd1d88ff2 100644 --- a/cluster/images/etcd-version-monitor/README.md +++ b/cluster/images/etcd-version-monitor/README.md @@ -18,7 +18,7 @@ latency metrics (`etcd_grpc_unary_requests_duration_seconds`) to be exposed. To run this tool as a docker container: - make build -- docker run --net=host -i -t gcr.io/google_containers/etcd-version-monitor:test /etcd-version-monitor --logtostderr +- docker run --net=host -i -t k8s.gcr.io/etcd-version-monitor:test /etcd-version-monitor --logtostderr To run this as a pod on the kubernetes cluster: - Place the 'etcd-version-monitor.yaml' in the manifests directory of diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml index 49f1db39819..f632ac2ed6d 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: etcd-version-monitor - image: gcr.io/google-containers/etcd-version-monitor:0.1.0 + image: k8s.gcr.io/etcd-version-monitor:0.1.0 command: - /etcd-version-monitor - --logtostderr diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index bf4e6505909..577b345850a 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [TAGS=2.2.1 2.3.7 3.0.17 3.1.10] [REGISTRY=gcr.io/google_containers] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [TAGS=2.2.1 2.3.7 3.0.17 3.1.10] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any tag from here. @@ -29,7 +29,7 @@ TAGS?=2.2.1 2.3.7 3.0.17 3.1.10 REGISTRY_TAG?=3.1.10 ARCH?=amd64 -REGISTRY?=gcr.io/google_containers +REGISTRY?=k8s.gcr.io GOLANG_VERSION?=1.7.6 GOARM=7 TEMP_DIR:=$(shell mktemp -d) @@ -105,12 +105,12 @@ endif docker build --pull -t $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) $(TEMP_DIR) push: build - gcloud docker -- push $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) + gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) ifeq ($(ARCH),amd64) # Backward compatibility. TODO: deprecate this image tag docker tag $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) $(REGISTRY)/etcd:$(REGISTRY_TAG) - gcloud docker -- push $(REGISTRY)/etcd:$(REGISTRY_TAG) + gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/etcd:$(REGISTRY_TAG) endif all: build diff --git a/cluster/images/etcd/README.md b/cluster/images/etcd/README.md index e7dfde8bf61..3b7f11ed7e8 100644 --- a/cluster/images/etcd/README.md +++ b/cluster/images/etcd/README.md @@ -10,20 +10,20 @@ For other architectures, `etcd` is cross-compiled from source. Arch-specific `bu ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> gcr.io/google_containers/etcd-amd64:TAG -# ---> gcr.io/google_containers/etcd:TAG +# ---> k8s.gcr.io/etcd-amd64:TAG +# ---> k8s.gcr.io/etcd:TAG $ make push ARCH=arm -# ---> gcr.io/google_containers/etcd-arm:TAG +# ---> k8s.gcr.io/etcd-arm:TAG $ make push ARCH=arm64 -# ---> gcr.io/google_containers/etcd-arm64:TAG +# ---> k8s.gcr.io/etcd-arm64:TAG $ make push ARCH=ppc64le -# ---> gcr.io/google_containers/etcd-ppc64le:TAG +# ---> k8s.gcr.io/etcd-ppc64le:TAG $ make push ARCH=s390x -# ---> gcr.io/google_containers/etcd-s390x:TAG +# ---> k8s.gcr.io/etcd-s390x:TAG ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/images/hyperkube/BUILD b/cluster/images/hyperkube/BUILD index 6ca2ff6d61e..9d77f735a2b 100644 --- a/cluster/images/hyperkube/BUILD +++ b/cluster/images/hyperkube/BUILD @@ -27,7 +27,7 @@ docker_build( docker_bundle( name = "hyperkube", - images = {"gcr.io/google-containers/hyperkube-amd64:{STABLE_DOCKER_TAG}": "hyperkube-internal"}, + images = {"k8s.gcr.io/hyperkube-amd64:{STABLE_DOCKER_TAG}": "hyperkube-internal"}, stamp = True, ) diff --git a/cluster/images/hyperkube/Dockerfile b/cluster/images/hyperkube/Dockerfile index 71d2300a04b..ab905603410 100644 --- a/cluster/images/hyperkube/Dockerfile +++ b/cluster/images/hyperkube/Dockerfile @@ -16,7 +16,7 @@ FROM BASEIMAGE # Create symlinks for each hyperkube server # Also create symlinks to /usr/local/bin/ where the server image binaries live, so the hyperkube image may be -# used instead of gcr.io/google_containers/kube-* without any modifications. +# used instead of k8s.gcr.io/kube-* without any modifications. # TODO: replace manual symlink creation with --make-symlink command once # cross-building with qemu supports go binaries. See #28702 # RUN /hyperkube --make-symlinks diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index d7a8c7d936b..7123fd3a08b 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -15,13 +15,13 @@ # Build the hyperkube image. # # Usage: -# [ARCH=amd64] [REGISTRY="gcr.io/google-containers"] make (build|push) VERSION={some_released_version_of_kubernetes} +# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] make (build|push) VERSION={some_released_version_of_kubernetes} -REGISTRY?=gcr.io/google-containers +REGISTRY?=k8s.gcr.io ARCH?=amd64 HYPERKUBE_BIN?=_output/dockerized/bin/linux/$(ARCH)/hyperkube -BASEIMAGE=gcr.io/google-containers/debian-hyperkube-base-$(ARCH):0.8 +BASEIMAGE=k8s.gcr.io/debian-hyperkube-base-$(ARCH):0.8 TEMP_DIR:=$(shell mktemp -d -t hyperkubeXXXXXX) all: build @@ -44,11 +44,11 @@ endif rm -rf "${TEMP_DIR}" push: build - gcloud docker -- push ${REGISTRY}/hyperkube-${ARCH}:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ifeq ($(ARCH),amd64) docker rmi ${REGISTRY}/hyperkube:${VERSION} 2>/dev/null || true docker tag ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${REGISTRY}/hyperkube:${VERSION} - gcloud docker -- push ${REGISTRY}/hyperkube:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/hyperkube:${VERSION} endif .PHONY: build push all diff --git a/cluster/images/hyperkube/README.md b/cluster/images/hyperkube/README.md index 60fba63aa4b..deed34b2c43 100644 --- a/cluster/images/hyperkube/README.md +++ b/cluster/images/hyperkube/README.md @@ -10,23 +10,23 @@ $ build/run.sh make cross # Build for linux/amd64 (default) -# export REGISTRY=$HOST/$ORG to switch from gcr.io/google_containers +# export REGISTRY=$HOST/$ORG to switch from k8s.gcr.io $ make push VERSION={target_version} ARCH=amd64 -# ---> gcr.io/google_containers/hyperkube-amd64:VERSION -# ---> gcr.io/google_containers/hyperkube:VERSION (image with backwards-compatible naming) +# ---> k8s.gcr.io/hyperkube-amd64:VERSION +# ---> k8s.gcr.io/hyperkube:VERSION (image with backwards-compatible naming) $ make push VERSION={target_version} ARCH=arm -# ---> gcr.io/google_containers/hyperkube-arm:VERSION +# ---> k8s.gcr.io/hyperkube-arm:VERSION $ make push VERSION={target_version} ARCH=arm64 -# ---> gcr.io/google_containers/hyperkube-arm64:VERSION +# ---> k8s.gcr.io/hyperkube-arm64:VERSION $ make push VERSION={target_version} ARCH=ppc64le -# ---> gcr.io/google_containers/hyperkube-ppc64le:VERSION +# ---> k8s.gcr.io/hyperkube-ppc64le:VERSION $ make push VERSION={target_version} ARCH=s390x -# ---> gcr.io/google_containers/hyperkube-s390x:VERSION +# ---> k8s.gcr.io/hyperkube-s390x:VERSION ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/images/kubemark/Makefile b/cluster/images/kubemark/Makefile index 6786aeea245..83b00d5f244 100644 --- a/cluster/images/kubemark/Makefile +++ b/cluster/images/kubemark/Makefile @@ -15,8 +15,8 @@ # build Kubemark image from currently built binaries containing both 'real' master and Hollow Node. # This makefile assumes that the kubemark binary is present in this directory. -REGISTRY?=gcr.io -PROJECT?=google_containers +REGISTRY ?= gcr.io +PROJECT ?= all: gcloudpush diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index d89fa62448d..3ff17998c83 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -635,10 +635,10 @@ def launch_default_ingress_controller(): addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = \ - "gcr.io/google_containers/defaultbackend:1.4" + "k8s.gcr.io/defaultbackend:1.4" if arch() == 's390x': context['defaultbackend_image'] = \ - "gcr.io/google_containers/defaultbackend-s390x:1.4" + "k8s.gcr.io/defaultbackend-s390x:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') @@ -655,7 +655,7 @@ def launch_default_ingress_controller(): # Render the ingress replication controller manifest context['ingress_image'] = \ - "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13" + "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" diff --git a/cluster/kubemark/gce/config-default.sh b/cluster/kubemark/gce/config-default.sh index 9d159603a94..432465dd5cb 100644 --- a/cluster/kubemark/gce/config-default.sh +++ b/cluster/kubemark/gce/config-default.sh @@ -61,7 +61,7 @@ RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} KUBE_APISERVER_REQUEST_TIMEOUT=300 -# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.10) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_VERSION="${TEST_ETCD_VERSION:-}" diff --git a/cluster/log-dump/logexporter-daemonset.yaml b/cluster/log-dump/logexporter-daemonset.yaml index 8099da1b9de..27aafa876f4 100644 --- a/cluster/log-dump/logexporter-daemonset.yaml +++ b/cluster/log-dump/logexporter-daemonset.yaml @@ -36,7 +36,7 @@ spec: spec: containers: - name: logexporter-test - image: gcr.io/google-containers/logexporter:v0.1.1 + image: k8s.gcr.io/logexporter:v0.1.1 env: - name: NODE_NAME valueFrom: diff --git a/cluster/restore-from-backup.sh b/cluster/restore-from-backup.sh index 912e877de98..fec9efee3ab 100755 --- a/cluster/restore-from-backup.sh +++ b/cluster/restore-from-backup.sh @@ -160,7 +160,7 @@ if [ "${ETCD_API}" == "etcd2" ]; then echo "Starting etcd ${ETCD_VERSION} to restore data" image=$(docker run -d -v ${BACKUP_DIR}:/var/etcd/data \ --net=host -p ${etcd_port}:${etcd_port} \ - "gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \ + "k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \ "/usr/local/bin/etcd --data-dir /var/etcd/data --force-new-cluster") if [ "$?" -ne "0" ]; then echo "Docker container didn't started correctly" @@ -191,7 +191,7 @@ elif [ "${ETCD_API}" == "etcd3" ]; then # Run etcdctl snapshot restore command and wait until it is finished. # setting with --name in the etcd manifest file and then it seems to work. docker run -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \ - "gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \ + "k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \ "/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${INITIAL_CLUSTER} --initial-advertise-peer-urls ${INITIAL_ADVERTISE_PEER_URLS}; mv /${NAME}.etcd/member /var/tmp/backup/" if [ "$?" -ne "0" ]; then echo "Docker container didn't started correctly" diff --git a/cluster/saltbase/install.sh b/cluster/saltbase/install.sh index fdf6c634d15..3d730388527 100755 --- a/cluster/saltbase/install.sh +++ b/cluster/saltbase/install.sh @@ -81,7 +81,7 @@ for docker_file in "${KUBE_DOCKER_WRAPPED_BINARIES[@]}"; do done cat <>"${docker_images_sls_file}" -kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers})' +kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-k8s.gcr.io})' EOF # TODO(zmerlynn): Forgive me, this is really gross. But in order to @@ -89,13 +89,13 @@ EOF # have to templatize a couple of the add-ons anyways, manually # templatize the addon registry for regional support. When we get # better templating, we can fix this. -readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" -if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then +readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" +if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then find /srv/salt-new -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" # All the legacy .manifest files with hardcoded gcr.io are JSON. find /srv/salt-new -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" fi echo "+++ Swapping in new configs" diff --git a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest index 6e6ed2c2508..20961835065 100644 --- a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest +++ b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest @@ -25,15 +25,15 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0", + "image": "k8s.gcr.io/cluster-autoscaler:v1.1.0", "livenessProbe": { "httpGet": { "path": "/health-check", "port": 8085 - }, + }, "initialDelaySeconds": 600, "periodSeconds": 60 - }, + }, "command": [ "./run.sh", "--kubernetes=http://127.0.0.1:8080?inClusterConfig=f", diff --git a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest index d7d5a430642..835b61a8c6f 100644 --- a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest +++ b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest @@ -14,13 +14,13 @@ spec: cpu: 100m limits: cpu: 100m - image: gcr.io/google_containers/busybox:1.24 + image: k8s.gcr.io/busybox:1.24 # TODO: Replace this with a go script that pulls in parallel? # Currently it takes ~5m to pull all e2e images, so this is OK, and # fewer moving parts is always better. # TODO: Replace the hardcoded image list with an autogen list; the list is # currently hard-coded for static verification. It was generated via: - # grep -Iiroh "gcr.io/google_.*" "${KUBE_ROOT}/test/e2e" | \ + # grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \ # sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' ' # We always want the subshell to exit 0 so this pod doesn't end up # blocking tests in an Error state. @@ -29,52 +29,52 @@ spec: - -c - > for i in - gcr.io/google_containers/alpine-with-bash:1.0 - gcr.io/google_containers/apparmor-loader:0.1 - gcr.io/google_containers/busybox:1.24 - gcr.io/google_containers/dnsutils:e2e - gcr.io/google_containers/e2e-net-amd64:1.0 - gcr.io/google_containers/echoserver:1.6 - gcr.io/google_containers/eptest:0.1 - gcr.io/google_containers/fakegitserver:0.1 - gcr.io/google_containers/galera-install:0.1 - gcr.io/google_containers/hostexec:1.2 - gcr.io/google_containers/invalid-image:invalid-tag - gcr.io/google_containers/iperf:e2e - gcr.io/google_containers/jessie-dnsutils:e2e - gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5 - gcr.io/google_containers/liveness:e2e - gcr.io/google_containers/logs-generator:v0.1.0 - gcr.io/google_containers/mounttest:0.8 - gcr.io/google_containers/mounttest-user:0.5 - gcr.io/google_containers/mysql-galera:e2e - gcr.io/google_containers/mysql-healthz:1.0 - gcr.io/google_containers/netexec:1.4 - gcr.io/google_containers/netexec:1.5 - gcr.io/google_containers/netexec:1.7 - gcr.io/google_containers/nettest:1.7 - gcr.io/google_containers/nginx:1.7.9 - gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1 - gcr.io/google_containers/nginx-slim:0.7 - gcr.io/google_containers/nginx-slim:0.8 - gcr.io/google_containers/node-problem-detector:v0.3.0 - gcr.io/google_containers/pause - gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 - gcr.io/google_containers/portforwardtester:1.2 - gcr.io/google_containers/redis-install-3.2.0:e2e - gcr.io/google_containers/resource_consumer:beta4 - gcr.io/google_containers/resource_consumer/controller:beta4 + k8s.gcr.io/alpine-with-bash:1.0 + k8s.gcr.io/apparmor-loader:0.1 + k8s.gcr.io/busybox:1.24 + k8s.gcr.io/dnsutils:e2e + k8s.gcr.io/e2e-net-amd64:1.0 + k8s.gcr.io/echoserver:1.6 + k8s.gcr.io/eptest:0.1 + k8s.gcr.io/fakegitserver:0.1 + k8s.gcr.io/galera-install:0.1 + k8s.gcr.io/hostexec:1.2 + k8s.gcr.io/invalid-image:invalid-tag + k8s.gcr.io/iperf:e2e + k8s.gcr.io/jessie-dnsutils:e2e + k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5 + k8s.gcr.io/liveness:e2e + k8s.gcr.io/logs-generator:v0.1.0 + k8s.gcr.io/mounttest:0.8 + k8s.gcr.io/mounttest-user:0.5 + k8s.gcr.io/mysql-galera:e2e + k8s.gcr.io/mysql-healthz:1.0 + k8s.gcr.io/netexec:1.4 + k8s.gcr.io/netexec:1.5 + k8s.gcr.io/netexec:1.7 + k8s.gcr.io/nettest:1.7 + k8s.gcr.io/nginx:1.7.9 + k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1 + k8s.gcr.io/nginx-slim:0.7 + k8s.gcr.io/nginx-slim:0.8 + k8s.gcr.io/node-problem-detector:v0.3.0 + k8s.gcr.io/pause + k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 + k8s.gcr.io/portforwardtester:1.2 + k8s.gcr.io/redis-install-3.2.0:e2e + k8s.gcr.io/resource_consumer:beta4 + k8s.gcr.io/resource_consumer/controller:beta4 gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1 - gcr.io/google_containers/servicelb:0.1 - gcr.io/google_containers/test-webserver:e2e - gcr.io/google_containers/update-demo:kitten - gcr.io/google_containers/update-demo:nautilus - gcr.io/google_containers/volume-ceph:0.1 - gcr.io/google_containers/volume-gluster:0.2 - gcr.io/google_containers/volume-iscsi:0.1 - gcr.io/google_containers/volume-nfs:0.8 - gcr.io/google_containers/volume-rbd:0.1 - gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e + k8s.gcr.io/servicelb:0.1 + k8s.gcr.io/test-webserver:e2e + k8s.gcr.io/update-demo:kitten + k8s.gcr.io/update-demo:nautilus + k8s.gcr.io/volume-ceph:0.1 + k8s.gcr.io/volume-gluster:0.2 + k8s.gcr.io/volume-iscsi:0.1 + k8s.gcr.io/volume-nfs:0.8 + k8s.gcr.io/volume-rbd:0.1 + k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e gcr.io/google_samples/gb-redisslave:nonexistent ; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0; securityContext: @@ -91,7 +91,7 @@ spec: cpu: 100m limits: cpu: 100m - image: gcr.io/google_containers/kube-nethealth-amd64:1.0 + image: k8s.gcr.io/kube-nethealth-amd64:1.0 command: - /bin/sh - -c diff --git a/cluster/saltbase/salt/etcd/etcd.manifest b/cluster/saltbase/salt/etcd/etcd.manifest index 44419aa744f..e5f940c723c 100644 --- a/cluster/saltbase/salt/etcd/etcd.manifest +++ b/cluster/saltbase/salt/etcd/etcd.manifest @@ -39,7 +39,7 @@ "containers":[ { "name": "etcd-container", - "image": "{{ pillar.get('etcd_docker_repository', 'gcr.io/google_containers/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.1.10') }}", + "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.1.10') }}", "resources": { "requests": { "cpu": {{ cpulimit }} diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml index 0b115c1f737..d09d6e5df34 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml +++ b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml @@ -13,7 +13,7 @@ spec: - name: kube-addon-manager # When updating version also bump it in: # - test/kubemark/resources/manifests/kube-addon-manager.yaml - image: gcr.io/google-containers/kube-addon-manager:v6.5 + image: k8s.gcr.io/kube-addon-manager:v6.5 command: - /bin/bash - -c diff --git a/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml b/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml index f00142ad65f..9f05f3ac151 100644 --- a/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml +++ b/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml @@ -18,7 +18,7 @@ spec: spec: containers: - name: kube-registry-proxy - image: gcr.io/google_containers/kube-registry-proxy:0.4 + image: k8s.gcr.io/kube-registry-proxy:0.4 resources: limits: cpu: 100m diff --git a/cluster/saltbase/salt/l7-gcp/glbc.manifest b/cluster/saltbase/salt/l7-gcp/glbc.manifest index c808e5ee0e6..4cc81331675 100644 --- a/cluster/saltbase/salt/l7-gcp/glbc.manifest +++ b/cluster/saltbase/salt/l7-gcp/glbc.manifest @@ -13,7 +13,7 @@ spec: terminationGracePeriodSeconds: 600 hostNetwork: true containers: - - image: gcr.io/google_containers/glbc:0.9.7 + - image: k8s.gcr.io/glbc:0.9.7 livenessProbe: httpGet: path: /healthz diff --git a/cluster/saltbase/salt/rescheduler/rescheduler.manifest b/cluster/saltbase/salt/rescheduler/rescheduler.manifest index ef9af1f5f7f..3ff18eb6bb4 100644 --- a/cluster/saltbase/salt/rescheduler/rescheduler.manifest +++ b/cluster/saltbase/salt/rescheduler/rescheduler.manifest @@ -13,7 +13,7 @@ metadata: spec: hostNetwork: true containers: - - image: gcr.io/google-containers/rescheduler:v0.3.1 + - image: k8s.gcr.io/rescheduler:v0.3.1 name: rescheduler volumeMounts: - mountPath: /var/log/rescheduler.log diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 16c2db8e89c..0c2f1bbb756 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -47,7 +47,7 @@ const ( // DefaultCertificatesDir defines default certificate directory DefaultCertificatesDir = "/etc/kubernetes/pki" // DefaultImageRepository defines default image registry - DefaultImageRepository = "gcr.io/google_containers" + DefaultImageRepository = "k8s.gcr.io" // DefaultManifestsDir defines default manifests directory DefaultManifestsDir = "/etc/kubernetes/manifests" diff --git a/cmd/kubeadm/app/images/images_test.go b/cmd/kubeadm/app/images/images_test.go index 6c1173e6c25..466f204500d 100644 --- a/cmd/kubeadm/app/images/images_test.go +++ b/cmd/kubeadm/app/images/images_test.go @@ -27,7 +27,7 @@ import ( const ( testversion = "v10.1.2-alpha.1.100+0123456789abcdef+SOMETHING" expected = "v10.1.2-alpha.1.100_0123456789abcdef_SOMETHING" - gcrPrefix = "gcr.io/google_containers" + gcrPrefix = "k8s.gcr.io" ) func TestGetCoreImage(t *testing.T) { diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go index 87aa247142d..4de0a7bbee3 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -66,7 +66,7 @@ spec: - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --authorization-mode=Node,RBAC - --etcd-servers=http://127.0.0.1:2379 - image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 + image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -148,7 +148,7 @@ spec: - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --authorization-mode=Node,RBAC - --etcd-servers=http://127.0.0.1:2379 - image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 + image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -220,7 +220,7 @@ spec: - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --address=127.0.0.1 - --use-service-account-credentials=true - image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4 + image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -295,7 +295,7 @@ spec: - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --address=127.0.0.1 - --use-service-account-credentials=true - image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4 + image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -368,7 +368,7 @@ spec: - --leader-elect=true - --kubeconfig=/etc/kubernetes/scheduler.conf - --address=127.0.0.1 - image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4 + image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -419,7 +419,7 @@ spec: - --leader-elect=true - --kubeconfig=/etc/kubernetes/scheduler.conf - --address=127.0.0.1 - image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4 + image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -521,7 +521,7 @@ metadata: name: testpod spec: containers: - - image: gcr.io/google_containers/busybox + - image: k8s.gcr.io/busybox `, expectError: false, }, @@ -537,7 +537,7 @@ spec: "spec": { "containers": [ { - "image": "gcr.io/google_containers/busybox" + "image": "k8s.gcr.io/busybox" } ] } @@ -552,7 +552,7 @@ kind: Pod metadata: name: testpod spec: - - image: gcr.io/google_containers/busybox + - image: k8s.gcr.io/busybox `, expectError: true, }, diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index e51a1295792..5b131fbca1b 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -61,7 +61,7 @@ etcd: image: "" keyFile: "" featureFlags: null -imageRepository: gcr.io/google_containers +imageRepository: k8s.gcr.io kubernetesVersion: %s networking: dnsDomain: cluster.local diff --git a/cmd/kubeadm/app/util/template_test.go b/cmd/kubeadm/app/util/template_test.go index 3a00e05e601..6856539d2f5 100644 --- a/cmd/kubeadm/app/util/template_test.go +++ b/cmd/kubeadm/app/util/template_test.go @@ -22,8 +22,8 @@ import ( const ( validTmpl = "image: {{ .ImageRepository }}/pause-{{ .Arch }}:3.0" - validTmplOut = "image: gcr.io/google_containers/pause-amd64:3.0" - doNothing = "image: gcr.io/google_containers/pause-amd64:3.0" + validTmplOut = "image: k8s.gcr.io/pause-amd64:3.0" + doNothing = "image: k8s.gcr.io/pause-amd64:3.0" invalidTmpl1 = "{{ .baz }/d}" invalidTmpl2 = "{{ !foobar }}" ) @@ -39,7 +39,7 @@ func TestParseTemplate(t *testing.T) { { template: validTmpl, data: struct{ ImageRepository, Arch string }{ - ImageRepository: "gcr.io/google_containers", + ImageRepository: "k8s.gcr.io", Arch: "amd64", }, output: validTmplOut, @@ -49,7 +49,7 @@ func TestParseTemplate(t *testing.T) { { template: doNothing, data: struct{ ImageRepository, Arch string }{ - ImageRepository: "gcr.io/google_containers", + ImageRepository: "k8s.gcr.io", Arch: "amd64", }, output: doNothing, diff --git a/cmd/kubelet/app/options/container_runtime.go b/cmd/kubelet/app/options/container_runtime.go index d1174ea044c..c760ceb0dc3 100644 --- a/cmd/kubelet/app/options/container_runtime.go +++ b/cmd/kubelet/app/options/container_runtime.go @@ -27,7 +27,7 @@ import ( const ( // When these values are updated, also update test/e2e/framework/util.go - defaultPodSandboxImageName = "gcr.io/google_containers/pause" + defaultPodSandboxImageName = "k8s.gcr.io/pause" defaultPodSandboxImageVersion = "3.0" // From pkg/kubelet/rkt/rkt.go to avoid circular import defaultRktAPIServiceEndpoint = "localhost:15441" diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index cbb2f013aad..717e5493163 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -5194,7 +5194,7 @@ Examples:

names

-

Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]

+

Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]

true

string array

diff --git a/examples/cluster-dns/dns-backend-rc.yaml b/examples/cluster-dns/dns-backend-rc.yaml index 9649d367b54..4af1b0dcbaa 100644 --- a/examples/cluster-dns/dns-backend-rc.yaml +++ b/examples/cluster-dns/dns-backend-rc.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: dns-backend - image: gcr.io/google_containers/example-dns-backend:v1 + image: k8s.gcr.io/example-dns-backend:v1 ports: - name: backend-port containerPort: 8000 diff --git a/examples/cluster-dns/dns-frontend-pod.yaml b/examples/cluster-dns/dns-frontend-pod.yaml index b424478383a..4a7695f1138 100644 --- a/examples/cluster-dns/dns-frontend-pod.yaml +++ b/examples/cluster-dns/dns-frontend-pod.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: dns-frontend - image: gcr.io/google_containers/example-dns-frontend:v1 + image: k8s.gcr.io/example-dns-frontend:v1 command: - python - client.py diff --git a/examples/cluster-dns/images/backend/Makefile b/examples/cluster-dns/images/backend/Makefile index 67992ec2666..4e2b938ddca 100644 --- a/examples/cluster-dns/images/backend/Makefile +++ b/examples/cluster-dns/images/backend/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = v1 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io IMAGE = example-dns-backend all: push @@ -22,6 +22,6 @@ image: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: image - gcloud docker -- push $(PREFIX)/$(IMAGE) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE) clean: diff --git a/examples/cluster-dns/images/frontend/Makefile b/examples/cluster-dns/images/frontend/Makefile index 2f6337545fa..f3fd4d2f0a6 100644 --- a/examples/cluster-dns/images/frontend/Makefile +++ b/examples/cluster-dns/images/frontend/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = v1 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io IMAGE = example-dns-frontend all: push @@ -22,6 +22,6 @@ image: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: image - gcloud docker -- push $(PREFIX)/$(IMAGE) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE) clean: diff --git a/examples/explorer/Makefile b/examples/explorer/Makefile index 35dd5bd7e7a..4240fdfd9a0 100644 --- a/examples/explorer/Makefile +++ b/examples/explorer/Makefile @@ -21,10 +21,10 @@ explorer: explorer.go CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./explorer.go container: explorer - docker build --pull -t gcr.io/google_containers/explorer:$(TAG) . + docker build --pull -t k8s.gcr.io/explorer:$(TAG) . push: container - gcloud docker -- push gcr.io/google_containers/explorer:$(TAG) + gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/explorer:$(TAG) clean: rm -f explorer diff --git a/examples/explorer/pod.yaml b/examples/explorer/pod.yaml index 2c26c3e1744..0437a249229 100644 --- a/examples/explorer/pod.yaml +++ b/examples/explorer/pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: explorer - image: gcr.io/google_containers/explorer:1.0 + image: k8s.gcr.io/explorer:1.0 args: ["-port=8080"] ports: - containerPort: 8080 diff --git a/examples/guestbook-go/Makefile b/examples/guestbook-go/Makefile index 9c63819ebc0..25326193b4a 100644 --- a/examples/guestbook-go/Makefile +++ b/examples/guestbook-go/Makefile @@ -15,9 +15,9 @@ # Build the guestbook-go example # Usage: -# [VERSION=v3] [REGISTRY="gcr.io/google_containers"] make build +# [VERSION=v3] [REGISTRY="k8s.gcr.io"] make build VERSION?=v3 -REGISTRY?=gcr.io/google_containers +REGISTRY?=k8s.gcr.io release: clean build push clean @@ -29,7 +29,7 @@ build: # push the image to an registry push: - gcloud docker -- push ${REGISTRY}/guestbook:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/guestbook:${VERSION} # remove previous images and containers clean: diff --git a/examples/guestbook-go/guestbook-controller.json b/examples/guestbook-go/guestbook-controller.json index 82c0e9134fd..0e3553c865a 100644 --- a/examples/guestbook-go/guestbook-controller.json +++ b/examples/guestbook-go/guestbook-controller.json @@ -22,7 +22,7 @@ "containers":[ { "name":"guestbook", - "image":"gcr.io/google_containers/guestbook:v3", + "image":"k8s.gcr.io/guestbook:v3", "ports":[ { "name":"http-server", diff --git a/examples/guestbook/all-in-one/guestbook-all-in-one.yaml b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml index 7735c798983..a3415d6a99d 100644 --- a/examples/guestbook/all-in-one/guestbook-all-in-one.yaml +++ b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml @@ -30,7 +30,7 @@ spec: spec: containers: - name: master - image: gcr.io/google_containers/redis:e2e # or just image: redis + image: k8s.gcr.io/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/guestbook/legacy/redis-master-controller.yaml b/examples/guestbook/legacy/redis-master-controller.yaml index 0bdf9761752..28208103b43 100644 --- a/examples/guestbook/legacy/redis-master-controller.yaml +++ b/examples/guestbook/legacy/redis-master-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: master - image: gcr.io/google_containers/redis:e2e # or just image: redis + image: k8s.gcr.io/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/guestbook/redis-master-deployment.yaml b/examples/guestbook/redis-master-deployment.yaml index 3fbcc0f01cc..d457a09934f 100644 --- a/examples/guestbook/redis-master-deployment.yaml +++ b/examples/guestbook/redis-master-deployment.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: master - image: gcr.io/google_containers/redis:e2e # or just image: redis + image: k8s.gcr.io/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/kubectl-container/Makefile b/examples/kubectl-container/Makefile index ea127f15142..4a2f88e2852 100644 --- a/examples/kubectl-container/Makefile +++ b/examples/kubectl-container/Makefile @@ -37,11 +37,11 @@ tag: .tag container: $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) - docker build --pull -t gcr.io/google_containers/kubectl:$(TAG) . + docker build --pull -t k8s.gcr.io/kubectl:$(TAG) . push: container $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) - gcloud docker -- push gcr.io/google_containers/kubectl:$(TAG) + gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/kubectl:$(TAG) clean: rm -f kubectl diff --git a/examples/kubectl-container/pod.json b/examples/kubectl-container/pod.json index ed0ec6599c3..540715a6c53 100644 --- a/examples/kubectl-container/pod.json +++ b/examples/kubectl-container/pod.json @@ -8,7 +8,7 @@ "containers": [ { "name": "bb", - "image": "gcr.io/google_containers/busybox", + "image": "k8s.gcr.io/busybox", "command": [ "sh", "-c", "sleep 5; wget -O - ${KUBERNETES_RO_SERVICE_HOST}:${KUBERNETES_RO_SERVICE_PORT}/api/v1/pods/; sleep 10000" ], @@ -36,7 +36,7 @@ }, { "name": "kubectl", - "image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", + "image": "k8s.gcr.io/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", "imagePullPolicy": "Always", "args": [ "proxy", "-p", "8001" diff --git a/examples/spark/spark-gluster/spark-master-controller.yaml b/examples/spark/spark-gluster/spark-master-controller.yaml index d0b365b7135..28cb6ac3fb7 100644 --- a/examples/spark/spark-gluster/spark-master-controller.yaml +++ b/examples/spark/spark-gluster/spark-master-controller.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: spark-master - image: gcr.io/google_containers/spark:1.5.2_v1 + image: k8s.gcr.io/spark:1.5.2_v1 command: ["/start-master"] ports: - containerPort: 7077 diff --git a/examples/spark/spark-gluster/spark-worker-controller.yaml b/examples/spark/spark-gluster/spark-worker-controller.yaml index 69cc3cec95e..0030d8a6674 100644 --- a/examples/spark/spark-gluster/spark-worker-controller.yaml +++ b/examples/spark/spark-gluster/spark-worker-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: spark-worker - image: gcr.io/google_containers/spark:1.5.2_v1 + image: k8s.gcr.io/spark:1.5.2_v1 command: ["/start-worker"] ports: - containerPort: 8888 diff --git a/examples/spark/spark-master-controller.yaml b/examples/spark/spark-master-controller.yaml index 60fb7ba8a15..ceaef078d24 100644 --- a/examples/spark/spark-master-controller.yaml +++ b/examples/spark/spark-master-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: spark-master - image: gcr.io/google_containers/spark:1.5.2_v1 + image: k8s.gcr.io/spark:1.5.2_v1 command: ["/start-master"] ports: - containerPort: 7077 diff --git a/examples/spark/spark-worker-controller.yaml b/examples/spark/spark-worker-controller.yaml index 9c748b3e048..3e5ed50ce05 100644 --- a/examples/spark/spark-worker-controller.yaml +++ b/examples/spark/spark-worker-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: spark-worker - image: gcr.io/google_containers/spark:1.5.2_v1 + image: k8s.gcr.io/spark:1.5.2_v1 command: ["/start-worker"] ports: - containerPort: 8081 diff --git a/examples/spark/zeppelin-controller.yaml b/examples/spark/zeppelin-controller.yaml index 56bb90d421d..2f578fcfc2c 100644 --- a/examples/spark/zeppelin-controller.yaml +++ b/examples/spark/zeppelin-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: zeppelin - image: gcr.io/google_containers/zeppelin:v0.5.6_v1 + image: k8s.gcr.io/zeppelin:v0.5.6_v1 ports: - containerPort: 8080 resources: diff --git a/examples/storage/cassandra/image/Dockerfile b/examples/storage/cassandra/image/Dockerfile index 45b75951235..5c4658d7f18 100644 --- a/examples/storage/cassandra/image/Dockerfile +++ b/examples/storage/cassandra/image/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/ubuntu-slim:0.9 +FROM k8s.gcr.io/ubuntu-slim:0.9 ARG BUILD_DATE ARG VCS_REF diff --git a/examples/storage/cassandra/image/Makefile b/examples/storage/cassandra/image/Makefile index ac8ef75e063..71cd0c6dfbb 100644 --- a/examples/storage/cassandra/image/Makefile +++ b/examples/storage/cassandra/image/Makefile @@ -35,7 +35,7 @@ container-dev: build: container container-dev push: build - gcloud docker -- push ${PROJECT}/cassandra:${VERSION} - gcloud docker -- push ${PROJECT}/cassandra:${VERSION}-dev + gcloud docker --server=k8s.gcr.io -- push ${PROJECT}/cassandra:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${PROJECT}/cassandra:${VERSION}-dev .PHONY: all build push diff --git a/examples/storage/redis/redis-controller.yaml b/examples/storage/redis/redis-controller.yaml index fcb5e67cd6f..dab2f7f1ab8 100644 --- a/examples/storage/redis/redis-controller.yaml +++ b/examples/storage/redis/redis-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: redis - image: gcr.io/google_containers/redis:v1 + image: k8s.gcr.io/redis:v1 ports: - containerPort: 6379 resources: diff --git a/examples/storage/redis/redis-master.yaml b/examples/storage/redis/redis-master.yaml index 57305a7a352..589de648f5f 100644 --- a/examples/storage/redis/redis-master.yaml +++ b/examples/storage/redis/redis-master.yaml @@ -9,7 +9,7 @@ metadata: spec: containers: - name: master - image: gcr.io/google_containers/redis:v1 + image: k8s.gcr.io/redis:v1 env: - name: MASTER value: "true" diff --git a/examples/storage/redis/redis-sentinel-controller.yaml b/examples/storage/redis/redis-sentinel-controller.yaml index da09e10cbbc..6c4b14347a4 100644 --- a/examples/storage/redis/redis-sentinel-controller.yaml +++ b/examples/storage/redis/redis-sentinel-controller.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: sentinel - image: gcr.io/google_containers/redis:v1 + image: k8s.gcr.io/redis:v1 env: - name: SENTINEL value: "true" diff --git a/examples/storage/rethinkdb/admin-pod.yaml b/examples/storage/rethinkdb/admin-pod.yaml index eac07f33a5d..12163909d61 100644 --- a/examples/storage/rethinkdb/admin-pod.yaml +++ b/examples/storage/rethinkdb/admin-pod.yaml @@ -7,7 +7,7 @@ metadata: name: rethinkdb-admin spec: containers: - - image: gcr.io/google_containers/rethinkdb:1.16.0_1 + - image: k8s.gcr.io/rethinkdb:1.16.0_1 name: rethinkdb env: - name: POD_NAMESPACE diff --git a/examples/storage/rethinkdb/rc.yaml b/examples/storage/rethinkdb/rc.yaml index 36b319191cd..23becb6e887 100644 --- a/examples/storage/rethinkdb/rc.yaml +++ b/examples/storage/rethinkdb/rc.yaml @@ -16,7 +16,7 @@ spec: role: replicas spec: containers: - - image: gcr.io/google_containers/rethinkdb:1.16.0_1 + - image: k8s.gcr.io/rethinkdb:1.16.0_1 name: rethinkdb env: - name: POD_NAMESPACE diff --git a/examples/volumes/portworx/portworx-volume-pod.yaml b/examples/volumes/portworx/portworx-volume-pod.yaml index c5f195911a6..f44302f59ee 100644 --- a/examples/volumes/portworx/portworx-volume-pod.yaml +++ b/examples/volumes/portworx/portworx-volume-pod.yaml @@ -4,7 +4,7 @@ metadata: name: test-portworx-volume-pod spec: containers: - - image: gcr.io/google_containers/test-webserver + - image: k8s.gcr.io/test-webserver name: test-container volumeMounts: - mountPath: /test-portworx-volume diff --git a/examples/volumes/portworx/portworx-volume-pvcpod.yaml b/examples/volumes/portworx/portworx-volume-pvcpod.yaml index fb92b320f10..bef2d496567 100644 --- a/examples/volumes/portworx/portworx-volume-pvcpod.yaml +++ b/examples/volumes/portworx/portworx-volume-pvcpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/test-webserver + image: k8s.gcr.io/test-webserver volumeMounts: - name: test-volume mountPath: /test-portworx-volume diff --git a/examples/volumes/portworx/portworx-volume-pvcscpod.yaml b/examples/volumes/portworx/portworx-volume-pvcscpod.yaml index 464bf5d8fdd..8bdd5131927 100644 --- a/examples/volumes/portworx/portworx-volume-pvcscpod.yaml +++ b/examples/volumes/portworx/portworx-volume-pvcscpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/test-webserver + image: k8s.gcr.io/test-webserver volumeMounts: - name: test-volume mountPath: /test-portworx-volume diff --git a/examples/volumes/scaleio/pod-sc-pvc.yaml b/examples/volumes/scaleio/pod-sc-pvc.yaml index ceed7b567e8..c94e7bc393e 100644 --- a/examples/volumes/scaleio/pod-sc-pvc.yaml +++ b/examples/volumes/scaleio/pod-sc-pvc.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: pod-sio-small-container - image: gcr.io/google_containers/test-webserver + image: k8s.gcr.io/test-webserver volumeMounts: - mountPath: /test name: test-data diff --git a/examples/volumes/scaleio/pod.yaml b/examples/volumes/scaleio/pod.yaml index 4b53b2b53cc..b13ec668c78 100644 --- a/examples/volumes/scaleio/pod.yaml +++ b/examples/volumes/scaleio/pod.yaml @@ -4,7 +4,7 @@ metadata: name: pod-0 spec: containers: - - image: gcr.io/google_containers/test-webserver + - image: k8s.gcr.io/test-webserver name: pod-0 volumeMounts: - mountPath: /test-pd diff --git a/examples/volumes/vsphere/simple-statefulset.yaml b/examples/volumes/vsphere/simple-statefulset.yaml index 3684a3b64e1..d9de93c2574 100644 --- a/examples/volumes/vsphere/simple-statefulset.yaml +++ b/examples/volumes/vsphere/simple-statefulset.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 + image: k8s.gcr.io/nginx-slim:0.8 ports: - containerPort: 80 name: web diff --git a/examples/volumes/vsphere/vsphere-volume-pod.yaml b/examples/volumes/vsphere/vsphere-volume-pod.yaml index 8660d62e493..0204ad3a59b 100644 --- a/examples/volumes/vsphere/vsphere-volume-pod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pod.yaml @@ -4,7 +4,7 @@ metadata: name: test-vmdk spec: containers: - - image: gcr.io/google_containers/test-webserver + - image: k8s.gcr.io/test-webserver name: test-container volumeMounts: - mountPath: /test-vmdk diff --git a/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml b/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml index 291664adaa9..326c0031f32 100644 --- a/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/test-webserver + image: k8s.gcr.io/test-webserver volumeMounts: - name: test-volume mountPath: /test-vmdk diff --git a/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml b/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml index 036aeb280cb..c569a0b36d9 100644 --- a/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/test-webserver + image: k8s.gcr.io/test-webserver volumeMounts: - name: test-volume mountPath: /test-vmdk diff --git a/hack/gen-swagger-doc/README.md b/hack/gen-swagger-doc/README.md index eacf5119d84..5b35be4aff7 100644 --- a/hack/gen-swagger-doc/README.md +++ b/hack/gen-swagger-doc/README.md @@ -3,7 +3,7 @@ This folder contains the sources needed to build the gen-swagger-doc container. To build the container image, ``` -$ sudo docker build -t gcr.io/google_containers/gen-swagger-docs:v1 . +$ sudo docker build -t k8s.gcr.io/gen-swagger-docs:v1 . ``` To generate the html docs, diff --git a/hack/lib/swagger.sh b/hack/lib/swagger.sh index 25bf16d8181..af872d807e5 100644 --- a/hack/lib/swagger.sh +++ b/hack/lib/swagger.sh @@ -117,7 +117,7 @@ kube::swagger::gen_api_ref_docs() { -v "${swagger_spec_path}":/swagger-source:z \ -v "${register_file}":/register.go:z \ --net=host -e "https_proxy=${KUBERNETES_HTTPS_PROXY:-}" \ - gcr.io/google_containers/gen-swagger-docs:v8 \ + k8s.gcr.io/gen-swagger-docs:v8 \ "${swagger_json_name}" done diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index cac25e5c783..9fadd23db82 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -745,7 +745,7 @@ function start_kubelet { --privileged=true \ -i \ --cidfile=$KUBELET_CIDFILE \ - gcr.io/google_containers/kubelet \ + k8s.gcr.io/kubelet \ /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG & fi } diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 50e873dd752..663fdb27a64 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -37,15 +37,15 @@ KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} CTLRMGR_PORT=${CTLRMGR_PORT:-10252} PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. -IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9" -IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml +IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9" +IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml -IMAGE_PERL="gcr.io/google-containers/perl" -IMAGE_PAUSE_V2="gcr.io/google-containers/pause:2.0" -IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest" -IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml -IMAGE_STATEFULSET_R1="gcr.io/google_containers/nginx-slim:0.7" -IMAGE_STATEFULSET_R2="gcr.io/google_containers/nginx-slim:0.8" +IMAGE_PERL="k8s.gcr.io/perl" +IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0" +IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest" +IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml +IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7" +IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8" # Expose kubectl directly for readability PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH @@ -719,9 +719,9 @@ run_pod_tests() { kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:' ## Patch pod from JSON can change image # Command - kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}' - # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0 - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:' + kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause-amd64:3.0"}]}}' + # Post-condition: valid-pod POD has image k8s.gcr.io/pause-amd64:3.0 + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause-amd64:3.0:' ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected ERROR_FILE="${KUBE_TEMP}/conflict-error" @@ -802,13 +802,13 @@ __EOF__ kubectl delete node node-v1-test "${kube_flags[@]}" ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor - echo -e "#!/bin/bash\n${SED} -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh + echo -e "#!/bin/bash\n${SED} -i \"s/nginx/k8s.gcr.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh chmod +x /tmp/tmp-editor.sh # Pre-condition: valid-pod POD has image nginx kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' [[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]] - # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:' + # Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:' # cleaning rm /tmp/tmp-editor.sh @@ -2746,7 +2746,7 @@ run_deployment_tests() { create_and_use_new_namespace kube::log::status "Testing deployments" # Test kubectl create deployment (using default - old generator) - kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd + kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd # Post-Condition: Deployment "nginx" is created. kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx' # and old generator was used, iow. old defaults are applied @@ -2761,7 +2761,7 @@ run_deployment_tests() { kubectl delete deployment test-nginx-extensions "${kube_flags[@]}" # Test kubectl create deployment - kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 + kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 # Post-Condition: Deployment "nginx" is created. kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx' # and new generator was used, iow. new defaults are applied @@ -2806,7 +2806,7 @@ run_deployment_tests() { kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' # Create deployment - kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd + kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd # Wait for rs to come up. kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' # Delete the deployment with cascade set to false. @@ -3064,7 +3064,7 @@ run_rs_tests() { # Test set commands # Pre-condition: frontend replica set exists at generation 1 kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1' - kubectl set image rs/frontend "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd + kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2' kubectl set env rs/frontend "${kube_flags[@]}" foo=bar kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3' @@ -3151,7 +3151,7 @@ run_daemonset_tests() { # Template Generation should stay 1 kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' # Test set commands - kubectl set image daemonsets/bind "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd + kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2' kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3' diff --git a/hack/testdata/deployment-multicontainer-resources.yaml b/hack/testdata/deployment-multicontainer-resources.yaml index 533e2e46f47..b36ace094b3 100644 --- a/hack/testdata/deployment-multicontainer-resources.yaml +++ b/hack/testdata/deployment-multicontainer-resources.yaml @@ -16,9 +16,9 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:test-cmd + image: k8s.gcr.io/nginx:test-cmd ports: - containerPort: 80 - name: perl - image: gcr.io/google-containers/perl + image: k8s.gcr.io/perl terminationGracePeriodSeconds: 0 diff --git a/hack/testdata/deployment-multicontainer.yaml b/hack/testdata/deployment-multicontainer.yaml index 115888a0290..117483957fd 100644 --- a/hack/testdata/deployment-multicontainer.yaml +++ b/hack/testdata/deployment-multicontainer.yaml @@ -16,8 +16,8 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:test-cmd + image: k8s.gcr.io/nginx:test-cmd ports: - containerPort: 80 - name: perl - image: gcr.io/google-containers/perl + image: k8s.gcr.io/perl diff --git a/hack/testdata/deployment-revision1.yaml b/hack/testdata/deployment-revision1.yaml index cfbec36c454..2bcb8edea73 100644 --- a/hack/testdata/deployment-revision1.yaml +++ b/hack/testdata/deployment-revision1.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:test-cmd + image: k8s.gcr.io/nginx:test-cmd ports: - containerPort: 80 diff --git a/hack/testdata/deployment-revision2.yaml b/hack/testdata/deployment-revision2.yaml index 4b171f604bc..1ed91c1375a 100644 --- a/hack/testdata/deployment-revision2.yaml +++ b/hack/testdata/deployment-revision2.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/filter/pod-apply-selector.yaml b/hack/testdata/filter/pod-apply-selector.yaml index 73b83d6ba37..f296b68b9af 100644 --- a/hack/testdata/filter/pod-apply-selector.yaml +++ b/hack/testdata/filter/pod-apply-selector.yaml @@ -8,4 +8,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/filter/pod-dont-apply.yaml b/hack/testdata/filter/pod-dont-apply.yaml index 7ea1610de90..9b8f9f6e900 100644 --- a/hack/testdata/filter/pod-dont-apply.yaml +++ b/hack/testdata/filter/pod-dont-apply.yaml @@ -8,4 +8,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/multi-resource-json-modify.json b/hack/testdata/multi-resource-json-modify.json index 3ff562e6d9a..2f88c0ac8e3 100644 --- a/hack/testdata/multi-resource-json-modify.json +++ b/hack/testdata/multi-resource-json-modify.json @@ -43,7 +43,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-json.json b/hack/testdata/multi-resource-json.json index 8ba2198e6d2..869fdc7cbfc 100644 --- a/hack/testdata/multi-resource-json.json +++ b/hack/testdata/multi-resource-json.json @@ -41,7 +41,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-list-modify.json b/hack/testdata/multi-resource-list-modify.json index e0885c10241..af902efcfc3 100644 --- a/hack/testdata/multi-resource-list-modify.json +++ b/hack/testdata/multi-resource-list-modify.json @@ -47,7 +47,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-list.json b/hack/testdata/multi-resource-list.json index 8918223aede..17abca285aa 100644 --- a/hack/testdata/multi-resource-list.json +++ b/hack/testdata/multi-resource-list.json @@ -45,7 +45,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-rclist-modify.json b/hack/testdata/multi-resource-rclist-modify.json index 369d73399b9..a28169c8ee5 100644 --- a/hack/testdata/multi-resource-rclist-modify.json +++ b/hack/testdata/multi-resource-rclist-modify.json @@ -26,7 +26,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" @@ -60,7 +60,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-rclist.json b/hack/testdata/multi-resource-rclist.json index 369d73399b9..a28169c8ee5 100644 --- a/hack/testdata/multi-resource-rclist.json +++ b/hack/testdata/multi-resource-rclist.json @@ -26,7 +26,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" @@ -60,7 +60,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "gcr.io/google-containers/pause:2.0", + "image": "k8s.gcr.io/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-yaml-modify.yaml b/hack/testdata/multi-resource-yaml-modify.yaml index 86fe824197b..067b75630ba 100644 --- a/hack/testdata/multi-resource-yaml-modify.yaml +++ b/hack/testdata/multi-resource-yaml-modify.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: mock-container - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 ports: - containerPort: 9949 protocol: TCP diff --git a/hack/testdata/multi-resource-yaml.yaml b/hack/testdata/multi-resource-yaml.yaml index bef9e88b2c2..642ebdb47ed 100644 --- a/hack/testdata/multi-resource-yaml.yaml +++ b/hack/testdata/multi-resource-yaml.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: mock-container - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 ports: - containerPort: 9949 protocol: TCP diff --git a/hack/testdata/null-propagation/deployment-l1.yaml b/hack/testdata/null-propagation/deployment-l1.yaml index c5123abc5ee..051fba91f8e 100644 --- a/hack/testdata/null-propagation/deployment-l1.yaml +++ b/hack/testdata/null-propagation/deployment-l1.yaml @@ -10,4 +10,4 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 diff --git a/hack/testdata/null-propagation/deployment-l2.yaml b/hack/testdata/null-propagation/deployment-l2.yaml index ffcbcc099b1..3b2426a768d 100644 --- a/hack/testdata/null-propagation/deployment-l2.yaml +++ b/hack/testdata/null-propagation/deployment-l2.yaml @@ -14,5 +14,5 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 terminationMessagePolicy: null diff --git a/hack/testdata/pod-apply.yaml b/hack/testdata/pod-apply.yaml index a736a599d86..235d1c8f0f3 100644 --- a/hack/testdata/pod-apply.yaml +++ b/hack/testdata/pod-apply.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/pod-with-api-env.yaml b/hack/testdata/pod-with-api-env.yaml index aef451ac2fe..3f76d210dfb 100644 --- a/hack/testdata/pod-with-api-env.yaml +++ b/hack/testdata/pod-with-api-env.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/busybox + image: k8s.gcr.io/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: TEST_CMD_1 diff --git a/hack/testdata/pod-with-precision.json b/hack/testdata/pod-with-precision.json index 5aac946cd2b..31a43896999 100644 --- a/hack/testdata/pod-with-precision.json +++ b/hack/testdata/pod-with-precision.json @@ -9,7 +9,7 @@ "containers": [ { "name": "kubernetes-pause", - "image": "gcr.io/google_containers/pause-amd64:3.0" + "image": "k8s.gcr.io/pause-amd64:3.0" } ], "restartPolicy": "Never", diff --git a/hack/testdata/pod.yaml b/hack/testdata/pod.yaml index 8ccadcecdbd..92b504ef77a 100644 --- a/hack/testdata/pod.yaml +++ b/hack/testdata/pod.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/prune/a.yaml b/hack/testdata/prune/a.yaml index aa86f28df41..badd10caa8b 100644 --- a/hack/testdata/prune/a.yaml +++ b/hack/testdata/prune/a.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/prune/b.yaml b/hack/testdata/prune/b.yaml index 6d212ead91f..f92fbc47f0a 100644 --- a/hack/testdata/prune/b.yaml +++ b/hack/testdata/prune/b.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml b/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml index 6d98eda02ce..18c0d4ea7a2 100644 --- a/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml +++ b/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/recursive/deployment/deployment/nginx.yaml b/hack/testdata/recursive/deployment/deployment/nginx.yaml index f956f0272b1..f416d6c25cd 100644 --- a/hack/testdata/recursive/deployment/deployment/nginx.yaml +++ b/hack/testdata/recursive/deployment/deployment/nginx.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/recursive/deployment/nginx.yaml b/hack/testdata/recursive/deployment/nginx.yaml index 9842a65da02..7988317f466 100644 --- a/hack/testdata/recursive/deployment/nginx.yaml +++ b/hack/testdata/recursive/deployment/nginx.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: gcr.io/google-containers/nginx:1.7.9 + image: k8s.gcr.io/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/rollingupdate-daemonset-rv2.yaml b/hack/testdata/rollingupdate-daemonset-rv2.yaml index 3214dcffe91..b807e958dd2 100644 --- a/hack/testdata/rollingupdate-daemonset-rv2.yaml +++ b/hack/testdata/rollingupdate-daemonset-rv2.yaml @@ -24,6 +24,6 @@ spec: namespaces: [] containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:latest + image: k8s.gcr.io/pause:latest - name: app - image: gcr.io/google-containers/nginx:test-cmd + image: k8s.gcr.io/nginx:test-cmd diff --git a/hack/testdata/rollingupdate-daemonset.yaml b/hack/testdata/rollingupdate-daemonset.yaml index c8a9cdca21d..4bc77ed5bfe 100644 --- a/hack/testdata/rollingupdate-daemonset.yaml +++ b/hack/testdata/rollingupdate-daemonset.yaml @@ -24,4 +24,4 @@ spec: namespaces: [] containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/rollingupdate-statefulset-rv2.yaml b/hack/testdata/rollingupdate-statefulset-rv2.yaml index fec5493ab69..4e4fc4e6091 100644 --- a/hack/testdata/rollingupdate-statefulset-rv2.yaml +++ b/hack/testdata/rollingupdate-statefulset-rv2.yaml @@ -18,7 +18,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 + image: k8s.gcr.io/nginx-slim:0.8 ports: - containerPort: 80 name: web @@ -27,7 +27,7 @@ spec: - -c - 'while true; do sleep 1; done' - name: pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 ports: - containerPort: 81 name: web-2 diff --git a/hack/testdata/rollingupdate-statefulset.yaml b/hack/testdata/rollingupdate-statefulset.yaml index 2acbf0f322b..ffdc242e17c 100644 --- a/hack/testdata/rollingupdate-statefulset.yaml +++ b/hack/testdata/rollingupdate-statefulset.yaml @@ -18,7 +18,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.7 + image: k8s.gcr.io/nginx-slim:0.7 ports: - containerPort: 80 name: web diff --git a/hack/testdata/sorted-pods/sorted-pod1.yaml b/hack/testdata/sorted-pods/sorted-pod1.yaml index fba02b9017c..aa767a2ea24 100644 --- a/hack/testdata/sorted-pods/sorted-pod1.yaml +++ b/hack/testdata/sorted-pods/sorted-pod1.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/sorted-pods/sorted-pod2.yaml b/hack/testdata/sorted-pods/sorted-pod2.yaml index f0ab7e906ea..f05040569b1 100644 --- a/hack/testdata/sorted-pods/sorted-pod2.yaml +++ b/hack/testdata/sorted-pods/sorted-pod2.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/hack/testdata/sorted-pods/sorted-pod3.yaml b/hack/testdata/sorted-pods/sorted-pod3.yaml index 5d166786c77..e02c501fa12 100644 --- a/hack/testdata/sorted-pods/sorted-pod3.yaml +++ b/hack/testdata/sorted-pods/sorted-pod3.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: gcr.io/google-containers/pause:2.0 + image: k8s.gcr.io/pause:2.0 diff --git a/pkg/api/testing/deep_copy_test.go b/pkg/api/testing/deep_copy_test.go index 27c9f42f6aa..a93ccd00aac 100644 --- a/pkg/api/testing/deep_copy_test.go +++ b/pkg/api/testing/deep_copy_test.go @@ -64,7 +64,7 @@ var benchmarkPod api.Pod = api.Pod{ Containers: []api.Container{ { Name: "etcd-container", - Image: "gcr.io/google_containers/etcd:2.0.9", + Image: "k8s.gcr.io/etcd:2.0.9", Command: []string{ "/usr/local/bin/etcd", "--addr", @@ -120,7 +120,7 @@ var benchmarkPod api.Pod = api.Pod{ }, Ready: true, RestartCount: 0, - Image: "gcr.io/google_containers/etcd:2.0.9", + Image: "k8s.gcr.io/etcd:2.0.9", ImageID: "docker://b6b9a86dc06aa1361357ca1b105feba961f6a4145adca6c54e142c0be0fe87b0", ContainerID: "docker://3cbbf818f1addfc252957b4504f56ef2907a313fe6afc47fc75373674255d46d", }, diff --git a/pkg/api/testing/replication_controller_example.json b/pkg/api/testing/replication_controller_example.json index 70eef1cff32..8f858c023f2 100644 --- a/pkg/api/testing/replication_controller_example.json +++ b/pkg/api/testing/replication_controller_example.json @@ -47,7 +47,7 @@ "containers": [ { "name": "elasticsearch-logging", - "image": "gcr.io/google_containers/elasticsearch:1.0", + "image": "k8s.gcr.io/elasticsearch:1.0", "ports": [ { "name": "db", diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod1.json b/pkg/kubectl/validation/testdata/v1/invalidPod1.json index d935742d77c..384d1857916 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod1.json +++ b/pkg/kubectl/validation/testdata/v1/invalidPod1.json @@ -11,7 +11,7 @@ "containers": [ { "name": "master", - "image": "gcr.io/fake_project/fake_image:fake_tag", + "image": "gcr.io/fake_project/fake_image:fake_tag", "args": "this is a bad command" } ] diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod3.json b/pkg/kubectl/validation/testdata/v1/invalidPod3.json index 4d99181dc07..69e0e853898 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod3.json +++ b/pkg/kubectl/validation/testdata/v1/invalidPod3.json @@ -14,7 +14,7 @@ "containers": [ { "name": "apache-php", - "image": "gcr.io/fake_project/fake_image:fake_tag", + "image": "gcr.io/fake_project/fake_image:fake_tag", "ports": [ { "name": "apache", diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml b/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml index f02bf7b336b..a6958db5eb6 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml +++ b/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml @@ -11,4 +11,4 @@ spec: args: - command: - - \ No newline at end of file + - diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index b595e310096..bc81d5a8e00 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -36,7 +36,7 @@ import ( ) const ( - defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.0" + defaultSandboxImage = "k8s.gcr.io/pause-amd64:3.0" // Various default sandbox resources requests/limits. defaultSandboxCPUshares int64 = 2 diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 38636f70e1d..79ee6e88e25 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -96,7 +96,7 @@ func generateImageTags() []string { // that kubelet report up to maxNamesPerImageInNodeStatus tags. count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1) for ; count > 0; count-- { - tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count)) + tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count)) } return tagList } @@ -492,11 +492,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) { // images will be sorted from max to min in node status. Images: []v1.ContainerImage{ { - Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, SizeBytes: 456, }, { - Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, SizeBytes: 123, }, }, @@ -680,11 +680,11 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, Images: []v1.ContainerImage{ { - Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, SizeBytes: 456, }, { - Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, SizeBytes: 123, }, }, diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 5019121d43a..823a2bb8d13 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -127,12 +127,12 @@ func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubel imageList := []kubecontainer.Image{ { ID: "abc", - RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, Size: 123, }, { ID: "efg", - RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, Size: 456, }, } diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 7df9f43d79a..1d35438c306 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -453,7 +453,7 @@ type MountedVolume struct { // name: test-pd // spec: // containers: - // - image: gcr.io/google_containers/test-webserver + // - image: k8s.gcr.io/test-webserver // name: test-container // volumeMounts: // - mountPath: /test-pd @@ -491,7 +491,7 @@ type MountedVolume struct { // name: test-pd // spec: // containers: - // - image: gcr.io/google_containers/test-webserver + // - image: k8s.gcr.io/test-webserver // name: test-container // volumeMounts: // - mountPath: /test-pd diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index 18e68a3ab0d..6aabd54b1fb 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -493,7 +493,7 @@ func getTestPodWithSecret(podName, secretName string) *v1.Pod { Containers: []v1.Container{ { Name: "secret-volume-test", - Image: "gcr.io/google_containers/mounttest:0.8", + Image: "k8s.gcr.io/mounttest:0.8", Args: []string{ "--file_content=/etc/secret-volume/data-1", "--file_mode=/etc/secret-volume/data-1"}, @@ -532,7 +532,7 @@ func getTestPodWithGCEPD(podName, pdName string) *v1.Pod { Containers: []v1.Container{ { Name: "pd-volume-test", - Image: "gcr.io/google_containers/mounttest:0.8", + Image: "k8s.gcr.io/mounttest:0.8", Args: []string{ "--file_content=/etc/pd-volume/data-1", }, diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index b11be33eeb2..1b56485e137 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -162,7 +162,7 @@ metadata: name: testpod spec: containers: - - image: gcr.io/google_containers/busybox + - image: k8s.gcr.io/busybox `, false, }, @@ -179,7 +179,7 @@ spec: "spec": { "containers": [ { - "image": "gcr.io/google_containers/busybox" + "image": "k8s.gcr.io/busybox" } ] } @@ -195,7 +195,7 @@ kind: Pod metadata: name: testpod spec: - - image: gcr.io/google_containers/busybox + - image: k8s.gcr.io/busybox `, true, }, diff --git a/pkg/volume/util_test.go b/pkg/volume/util_test.go index 273722a0c3d..011793defe0 100644 --- a/pkg/volume/util_test.go +++ b/pkg/volume/util_test.go @@ -94,8 +94,8 @@ func TestRecyclerPod(t *testing.T) { // Pod gets Running and Succeeded newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""), newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"), - newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""), - newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""), + newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""), + newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""), newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"), newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"), newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""), @@ -103,8 +103,8 @@ func TestRecyclerPod(t *testing.T) { }, expectedEvents: []mockEvent{ {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"}, - {v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""}, - {v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""}, + {v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""}, + {v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""}, {v1.EventTypeNormal, "Created container with docker id 83d929aeac82"}, {v1.EventTypeNormal, "Started container with docker id 83d929aeac82"}, }, diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 2cf7941e9dd..ab42a130c38 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -618,7 +618,7 @@ message Container { // Describe a container image message ContainerImage { // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] repeated string names = 1; // The size of the image in bytes. diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 8eab4552d9a..a5b561d8809 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3906,7 +3906,7 @@ type PodSignature struct { // Describe a container image type ContainerImage struct { // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 80cacc974e5..d89be951551 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -308,7 +308,7 @@ func (Container) SwaggerDoc() map[string]string { var map_ContainerImage = map[string]string{ "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", "sizeBytes": "The size of the image in bytes.", } diff --git a/test/e2e/apimachinery/initializers.go b/test/e2e/apimachinery/initializers.go index a38c6c7256b..1253e5b9e49 100644 --- a/test/e2e/apimachinery/initializers.go +++ b/test/e2e/apimachinery/initializers.go @@ -315,7 +315,7 @@ func newReplicaset() *v1beta1.ReplicaSet { Containers: []v1.Container{ { Name: name + "-container", - Image: "gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0", + Image: "k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0", }, }, }, diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index de475112a95..85f5d8cb801 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -257,7 +257,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { Containers: []v1.Container{ { Name: "busybox", - Image: "gcr.io/google_containers/echoserver:1.6", + Image: "k8s.gcr.io/echoserver:1.6", }, }, RestartPolicy: v1.RestartPolicyAlways, @@ -301,7 +301,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) { container := v1.Container{ Name: "busybox", - Image: "gcr.io/google_containers/echoserver:1.6", + Image: "k8s.gcr.io/echoserver:1.6", } if exclusive { container.Ports = []v1.ContainerPort{ diff --git a/test/e2e/auth/metadata_concealment.go b/test/e2e/auth/metadata_concealment.go index 4c3a97bc162..df9455c8208 100644 --- a/test/e2e/auth/metadata_concealment.go +++ b/test/e2e/auth/metadata_concealment.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("Metadata Concealment", func() { Containers: []v1.Container{ { Name: "check-metadata-concealment", - Image: "gcr.io/google_containers/check-metadata-concealment:v0.0.2", + Image: "k8s.gcr.io/check-metadata-concealment:v0.0.2", }, }, RestartPolicy: v1.RestartPolicyOnFailure, diff --git a/test/e2e/common/apparmor.go b/test/e2e/common/apparmor.go index 178496eb5b2..dcac6c0da61 100644 --- a/test/e2e/common/apparmor.go +++ b/test/e2e/common/apparmor.go @@ -185,7 +185,7 @@ func createAppArmorProfileLoader(f *framework.Framework) { Spec: api.PodSpec{ Containers: []api.Container{{ Name: "apparmor-loader", - Image: "gcr.io/google_containers/apparmor-loader:0.1", + Image: "k8s.gcr.io/apparmor-loader:0.1", Args: []string{"-poll", "10s", "/profiles"}, SecurityContext: &api.SecurityContext{ Privileged: &True, diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 1ef5d4866f1..a5e89c43551 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -61,9 +61,9 @@ var CommonImageWhiteList = sets.NewString( imageutils.GetE2EImage(imageutils.ServeHostname), imageutils.GetE2EImage(imageutils.TestWebserver), imageutils.GetE2EImage(imageutils.Hostexec), - "gcr.io/google_containers/volume-nfs:0.8", - "gcr.io/google_containers/volume-gluster:0.2", - "gcr.io/google_containers/e2e-net-amd64:1.0", + "k8s.gcr.io/volume-nfs:0.8", + "k8s.gcr.io/volume-gluster:0.2", + "k8s.gcr.io/e2e-net-amd64:1.0", ) func svcByName(name string, port int) *v1.Service { diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index f39b1e6d5e4..ae20e07ebe6 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -856,7 +856,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo Containers: []v1.Container{ { Name: "write-pod", - Image: "gcr.io/google_containers/busybox:1.24", + Image: "k8s.gcr.io/busybox:1.24", Command: []string{"/bin/sh"}, Args: []string{"-c", command}, SecurityContext: &v1.SecurityContext{ diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 59ba3fc2e1d..7ff5a2e0d52 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -802,7 +802,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod { Containers: []v1.Container{ { Name: "echoserver", - Image: "gcr.io/google_containers/echoserver:1.6", + Image: "k8s.gcr.io/echoserver:1.6", Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, }, }, diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 70ee3d8298c..02f9c43e72a 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -155,7 +155,7 @@ const ( ClaimProvisionTimeout = 5 * time.Minute // When these values are updated, also update cmd/kubelet/app/options/options.go - currentPodInfraContainerImageName = "gcr.io/google_containers/pause" + currentPodInfraContainerImageName = "k8s.gcr.io/pause" currentPodInfraContainerImageVersion = "3.0" // How long a node is allowed to become "Ready" after it is restarted before diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 871534ff61c..03a26d76516 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -56,11 +56,11 @@ import ( // Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage const ( - NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8" - IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1" - GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2" - CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1" - RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1" + NfsServerImage string = "k8s.gcr.io/volume-nfs:0.8" + IscsiServerImage string = "k8s.gcr.io/volume-iscsi:0.1" + GlusterfsServerImage string = "k8s.gcr.io/volume-gluster:0.2" + CephServerImage string = "k8s.gcr.io/volume-ceph:0.1" + RbdServerImage string = "k8s.gcr.io/volume-rbd:0.1" ) const ( diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index 13a099f7efa..10ef37fcf6e 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -101,7 +101,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error { Containers: []api_v1.Container{ { Name: loggingContainerName, - Image: "gcr.io/google_containers/logs-generator:v0.1.0", + Image: "k8s.gcr.io/logs-generator:v0.1.0", Env: []api_v1.EnvVar{ { Name: "LOGS_GENERATOR_LINES_TOTAL", diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 05b66946c48..ebe8d9740a3 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -98,7 +98,7 @@ func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.Pod Containers: []corev1.Container{ { Name: "stackdriver-exporter", - Image: "gcr.io/google-containers/sd-dummy-exporter:v0.1.0", + Image: "k8s.gcr.io/sd-dummy-exporter:v0.1.0", ImagePullPolicy: corev1.PullPolicy("Always"), Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)}, Env: []corev1.EnvVar{ diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index ddcb2680308..2edb22c74b6 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -242,7 +242,7 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { Containers: []v1.Container{ { Name: "dns", - Image: "gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5", + Image: "k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5", Command: []string{ "/usr/sbin/dnsmasq", "-u", "root", diff --git a/test/e2e/testing-manifests/ingress/http/rc.yaml b/test/e2e/testing-manifests/ingress/http/rc.yaml index 9b7d3b624db..a4bcd5e3959 100644 --- a/test/e2e/testing-manifests/ingress/http/rc.yaml +++ b/test/e2e/testing-manifests/ingress/http/rc.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.6 + image: k8s.gcr.io/echoserver:1.6 ports: - containerPort: 8080 readinessProbe: diff --git a/test/e2e/testing-manifests/ingress/nginx/rc.yaml b/test/e2e/testing-manifests/ingress/nginx/rc.yaml index 9d21ff55003..b80b0837a17 100644 --- a/test/e2e/testing-manifests/ingress/nginx/rc.yaml +++ b/test/e2e/testing-manifests/ingress/nginx/rc.yaml @@ -17,7 +17,7 @@ spec: spec: terminationGracePeriodSeconds: 0 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1 + - image: k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1 livenessProbe: httpGet: path: /healthz diff --git a/test/e2e/testing-manifests/ingress/static-ip/rc.yaml b/test/e2e/testing-manifests/ingress/static-ip/rc.yaml index abf9b036edd..391ad674448 100644 --- a/test/e2e/testing-manifests/ingress/static-ip/rc.yaml +++ b/test/e2e/testing-manifests/ingress/static-ip/rc.yaml @@ -11,6 +11,6 @@ spec: spec: containers: - name: echoheaders-https - image: gcr.io/google_containers/echoserver:1.6 + image: k8s.gcr.io/echoserver:1.6 ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml b/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml index 528aa25a125..b1ce4229cb0 100644 --- a/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml +++ b/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml @@ -17,7 +17,7 @@ spec: version: v1 spec: containers: - - image: gcr.io/google_containers/servicelb:0.1 + - image: k8s.gcr.io/servicelb:0.1 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml b/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml index c2aa34ca1fa..f09c090d5b3 100644 --- a/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml +++ b/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml @@ -12,7 +12,7 @@ spec: spec: containers: - name: netexec - image: gcr.io/google_containers/netexec:1.4 + image: k8s.gcr.io/netexec:1.4 ports: - containerPort: 8080 # This is to force these pods to land on different hosts. diff --git a/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml b/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml index 65699f8e3cf..8139cf5f99a 100644 --- a/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: gcr.io/google-containers/cassandra-e2e-test:0.1 + image: k8s.gcr.io/cassandra-e2e-test:0.1 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml b/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml index 4ddeb8be398..d91eda411a4 100644 --- a/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: etcd - image: gcr.io/google_containers/etcd-amd64:2.2.5 + image: k8s.gcr.io/etcd-amd64:2.2.5 imagePullPolicy: Always ports: - containerPort: 2380 diff --git a/test/e2e/testing-manifests/statefulset/etcd/tester.yaml b/test/e2e/testing-manifests/statefulset/etcd/tester.yaml index c5ea0b90c14..ee53bc14196 100644 --- a/test/e2e/testing-manifests/statefulset/etcd/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/etcd/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: gcr.io/google-containers/etcd-statefulset-e2e-test:0.0 + image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml b/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml index 4d982ba218a..facfb82802e 100644 --- a/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: gcr.io/google_containers/galera-install:0.1 + image: k8s.gcr.io/galera-install:0.1 imagePullPolicy: Always args: - "--work-dir=/work-dir" @@ -41,7 +41,7 @@ spec: mountPath: "/etc/mysql" containers: - name: mysql - image: gcr.io/google_containers/mysql-galera:e2e + image: k8s.gcr.io/mysql-galera:e2e ports: - containerPort: 3306 name: mysql @@ -55,7 +55,7 @@ spec: - --defaults-file=/etc/mysql/my-galera.cnf - --user=root readinessProbe: - # TODO: If docker exec is buggy just use gcr.io/google_containers/mysql-healthz:1.0 + # TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0 exec: command: - sh diff --git a/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml b/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml index c7e7ff8d4f8..3a60b3473a6 100644 --- a/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: gcr.io/google-containers/mysql-e2e-test:0.1 + image: k8s.gcr.io/mysql-e2e-test:0.1 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml index a483fd5dd3f..0436fe25bab 100644 --- a/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 + image: k8s.gcr.io/nginx-slim:0.8 ports: - containerPort: 80 name: web @@ -31,4 +31,4 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi \ No newline at end of file + storage: 1Gi diff --git a/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml b/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml index e324ef5613f..7870a7df0d2 100644 --- a/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: gcr.io/google_containers/redis-install-3.2.0:e2e + image: k8s.gcr.io/redis-install-3.2.0:e2e imagePullPolicy: Always args: - "--install-into=/opt" diff --git a/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml b/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml index 4160bdff42d..d2656bb74d0 100644 --- a/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e + image: k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e imagePullPolicy: Always args: - "--install-into=/opt" diff --git a/test/e2e_node/conformance/build/Makefile b/test/e2e_node/conformance/build/Makefile index abc03366bc8..6ff2b4e5126 100644 --- a/test/e2e_node/conformance/build/Makefile +++ b/test/e2e_node/conformance/build/Makefile @@ -15,7 +15,7 @@ # Build the node-test image. # # Usage: -# [ARCH=amd64] [REGISTRY="gcr.io/google_containers"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1} +# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1} # SYSTEM_SPEC_NAME is the name of the system spec used for the node conformance # test. The specs are expected to be in SYSTEM_SPEC_DIR. @@ -23,7 +23,7 @@ SYSTEM_SPEC_NAME?= SYSTEM_SPEC_DIR?=../../system/specs # TODO(random-liu): Add this into release progress. -REGISTRY?=gcr.io/google_containers +REGISTRY?=k8s.gcr.io ARCH?=amd64 # BIN_DIR is the directory to find binaries, overwrite with ../../../../_output/bin # for local development. @@ -76,10 +76,10 @@ endif docker build --pull -t ${IMAGE_NAME}-${ARCH}:${VERSION} ${TEMP_DIR} push: build - gcloud docker -- push ${IMAGE_NAME}-${ARCH}:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${IMAGE_NAME}-${ARCH}:${VERSION} ifeq ($(ARCH),amd64) docker tag ${IMAGE_NAME}-${ARCH}:${VERSION} ${IMAGE_NAME}:${VERSION} - gcloud docker -- push ${IMAGE_NAME}:${VERSION} + gcloud docker --server=k8s.gcr.io -- push ${IMAGE_NAME}:${VERSION} endif .PHONY: all diff --git a/test/e2e_node/conformance/run_test.sh b/test/e2e_node/conformance/run_test.sh index af9f2febc83..6ddb2478832 100755 --- a/test/e2e_node/conformance/run_test.sh +++ b/test/e2e_node/conformance/run_test.sh @@ -44,7 +44,7 @@ SKIP=${SKIP:-""} TEST_ARGS=${TEST_ARGS:-""} # REGISTRY is the image registry for node test image. -REGISTRY=${REGISTRY:-"gcr.io/google_containers"} +REGISTRY=${REGISTRY:-"k8s.gcr.io"} # ARCH is the architecture of current machine, the script will use this to # select corresponding test container image. diff --git a/test/e2e_node/gke_environment_test.go b/test/e2e_node/gke_environment_test.go index bd52964d1fd..0b1c95c348e 100644 --- a/test/e2e_node/gke_environment_test.go +++ b/test/e2e_node/gke_environment_test.go @@ -84,7 +84,7 @@ func checkIPTables() (err error) { // checkPublicGCR checks the access to the public Google Container Registry by // pulling the busybox image. func checkPublicGCR() error { - const image = "gcr.io/google-containers/busybox" + const image = "k8s.gcr.io/busybox" output, err := runCommand("docker", "images", "-q", image) if len(output) != 0 { if _, err := runCommand("docker", "rmi", "-f", image); err != nil { @@ -170,7 +170,7 @@ func checkDockerConfig() error { // checkDockerNetworkClient checks client networking by pinging an external IP // address from a container. func checkDockerNetworkClient() error { - const imageName = "gcr.io/google-containers/busybox" + const imageName = "k8s.gcr.io/busybox" output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com") if err != nil { return err @@ -185,7 +185,7 @@ func checkDockerNetworkClient() error { // within a container and accessing it from outside. func checkDockerNetworkServer() error { const ( - imageName = "gcr.io/google-containers/nginx:1.7.9" + imageName = "k8s.gcr.io/nginx:1.7.9" hostAddr = "127.0.0.1" hostPort = "8088" containerPort = "80" diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index a131c398b82..090eafa2863 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -28,7 +28,7 @@ import ( var _ = framework.KubeDescribe("ImageID", func() { - busyBoxImage := "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" + busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" f := framework.NewDefaultFramework("image-id-test") diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index 01360451fcb..fa323d90fd6 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -47,10 +47,10 @@ const ( // before test running so that the image pulling won't fail in actual test. var NodeImageWhiteList = sets.NewString( "google/cadvisor:latest", - "gcr.io/google-containers/stress:v1", + "k8s.gcr.io/stress:v1", busyboxImage, - "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", - "gcr.io/google_containers/node-problem-detector:v0.4.1", + "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", + "k8s.gcr.io/node-problem-detector:v0.4.1", imageutils.GetE2EImage(imageutils.NginxSlim), imageutils.GetE2EImage(imageutils.ServeHostname), imageutils.GetE2EImage(imageutils.Netexec), diff --git a/test/e2e_node/jenkins/gci-init-gpu.yaml b/test/e2e_node/jenkins/gci-init-gpu.yaml index 064b77095c9..3b943de33f2 100644 --- a/test/e2e_node/jenkins/gci-init-gpu.yaml +++ b/test/e2e_node/jenkins/gci-init-gpu.yaml @@ -2,7 +2,7 @@ runcmd: - modprobe configs - - docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged gcr.io/google_containers/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c + - docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged k8s.gcr.io/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c - mount /tmp /tmp -o remount,exec,suid - usermod -a -G docker jenkins - mkdir -p /var/lib/kubelet diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 63489e2d389..41749098183 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -272,7 +272,7 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) * Containers: []v1.Container{ { Name: ctnName, - Image: "gcr.io/google-containers/stress:v1", + Image: "k8s.gcr.io/stress:v1", ImagePullPolicy: "Always", Env: env, // 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 3c9b6453ec9..9333b66cba7 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { pollInterval = 1 * time.Second pollConsistent = 5 * time.Second pollTimeout = 1 * time.Minute - image = "gcr.io/google_containers/node-problem-detector:v0.4.1" + image = "k8s.gcr.io/node-problem-detector:v0.4.1" ) f := framework.NewDefaultFramework("node-problem-detector") var c clientset.Interface diff --git a/test/e2e_node/remote/node_conformance.go b/test/e2e_node/remote/node_conformance.go index 59e322a77b7..625940bf190 100644 --- a/test/e2e_node/remote/node_conformance.go +++ b/test/e2e_node/remote/node_conformance.go @@ -53,7 +53,7 @@ func commandToString(c *exec.Cmd) string { // Image path constants. const ( - conformanceRegistry = "gcr.io/google_containers" + conformanceRegistry = "k8s.gcr.io" conformanceArch = runtime.GOARCH conformanceTarfile = "node_conformance.tar" conformanceTestBinary = "e2e_node.test" diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index a616dd9b1e1..0a8b7ac57d6 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -275,13 +275,13 @@ while true; do sleep 1; done }, { description: "should not be able to pull non-existing image from gcr.io", - image: "gcr.io/google_containers/invalid-image:invalid-tag", + image: "k8s.gcr.io/invalid-image:invalid-tag", phase: v1.PodPending, waiting: true, }, { description: "should be able to pull image from gcr.io", - image: "gcr.io/google_containers/alpine-with-bash:1.0", + image: "k8s.gcr.io/alpine-with-bash:1.0", phase: v1.PodRunning, waiting: false, }, diff --git a/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml b/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml index 3c73984ba91..d9f2b8e64d2 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml @@ -5,7 +5,7 @@ metadata: spec: hostNetwork: true containers: - - image: gcr.io/google_containers/etcd:2.0.9 + - image: k8s.gcr.io/etcd:2.0.9 name: etcd-container command: - /usr/local/bin/etcd diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml index a41a2c666dc..a415ebc19d9 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml @@ -6,7 +6,7 @@ spec: hostNetwork: true containers: - name: kube-apiserver - image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02 + image: k8s.gcr.io/kube-apiserver:9680e782e08a1a1c94c656190011bd02 command: - /bin/sh - -c diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml index a7cfbd5785b..79851929788 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml @@ -10,7 +10,7 @@ spec: - /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns --cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key --v=2 1>>/var/log/kube-controller-manager.log --leader-elect 2>&1 - image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 + image: k8s.gcr.io/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 livenessProbe: httpGet: path: /healthz diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml index eeef9bb6286..1da30918598 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml @@ -6,7 +6,7 @@ spec: hostNetwork: true containers: - name: kube-scheduler - image: gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9 + image: k8s.gcr.io/kube-scheduler:34d0b8f8b31e27937327961528739bc9 command: - /bin/sh - -c diff --git a/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml b/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml index b63f25debab..ecb45dd95fe 100644 --- a/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml +++ b/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: gcr.io/google_containers/serve_hostname + image: k8s.gcr.io/serve_hostname resources: limits: cpu: "3" diff --git a/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml b/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml index c1ec54183be..d83e91267a4 100644 --- a/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml +++ b/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: gcr.io/google_containers/serve_hostname + image: k8s.gcr.io/serve_hostname resources: limits: cpu: "1" diff --git a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml index a4796fc24c0..64145553558 100644 --- a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/busybox + image: k8s.gcr.io/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: MY_POD_NAME diff --git a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml index 461691df221..204c4ddf59d 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml @@ -10,7 +10,7 @@ spec: - /bin/sh - -c - echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600 - image: gcr.io/google_containers/busybox + image: k8s.gcr.io/busybox livenessProbe: exec: command: diff --git a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml index e7196c65593..1a6ef7bc64c 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml @@ -8,7 +8,7 @@ spec: containers: - args: - /server - image: gcr.io/google_containers/liveness + image: k8s.gcr.io/liveness livenessProbe: httpGet: path: /healthz diff --git a/test/fixtures/doc-yaml/user-guide/multi-pod.yaml b/test/fixtures/doc-yaml/user-guide/multi-pod.yaml index 1a1d758e994..c795ce6f4d2 100644 --- a/test/fixtures/doc-yaml/user-guide/multi-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/multi-pod.yaml @@ -42,7 +42,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: gcr.io/google_containers/serve_hostname + image: k8s.gcr.io/serve_hostname resources: limits: cpu: "1" diff --git a/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml b/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml index a5d9c0ff758..d93d4095e14 100644 --- a/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/busybox + image: k8s.gcr.io/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: MY_SECRET_DATA diff --git a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml index 8f87a8dea5e..1ff2e8652f1 100644 --- a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: gcr.io/google_containers/mounttest:0.8 + image: k8s.gcr.io/mounttest:0.8 command: [ "/mt", "--file_content=/etc/secret-volume/data-1" ] volumeMounts: # name must match the volume name below diff --git a/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml b/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml index 91f1aa06c30..48b15cc190c 100644 --- a/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml +++ b/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml @@ -13,7 +13,7 @@ spec: version: kitten spec: containers: - - image: gcr.io/google_containers/update-demo:kitten + - image: k8s.gcr.io/update-demo:kitten name: update-demo ports: - containerPort: 80 diff --git a/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml b/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml index c358d8aa282..958e054ec51 100644 --- a/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml +++ b/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml @@ -30,7 +30,7 @@ items: spec: containers: - name: kubernetes-serve-hostname - image: gcr.io/google_containers/serve_hostname + image: k8s.gcr.io/serve_hostname resources: limits: cpu: "1" diff --git a/test/images/image-util.sh b/test/images/image-util.sh index d9ff9e31e08..31c54bc1153 100755 --- a/test/images/image-util.sh +++ b/test/images/image-util.sh @@ -97,7 +97,7 @@ push() { fi for arch in ${archs}; do TAG=$(<${IMAGE}/VERSION) - gcloud docker -- push ${REGISTRY}/${IMAGE}-${arch}:${TAG} + gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/${IMAGE}-${arch}:${TAG} done } diff --git a/test/images/iperf/BASEIMAGE b/test/images/iperf/BASEIMAGE index 1bf3907f760..7f7f90c89a0 100644 --- a/test/images/iperf/BASEIMAGE +++ b/test/images/iperf/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=gcr.io/google_containers/ubuntu-slim:0.12 -arm=gcr.io/google_containers/ubuntu-slim-arm:0.12 -arm64=gcr.io/google_containers/ubuntu-slim-arm64:0.12 -ppc64le=gcr.io/google_containers/ubuntu-slim-ppc64le:0.12 +amd64=k8s.gcr.io/ubuntu-slim:0.12 +arm=k8s.gcr.io/ubuntu-slim-arm:0.12 +arm64=k8s.gcr.io/ubuntu-slim-arm64:0.12 +ppc64le=k8s.gcr.io/ubuntu-slim-ppc64le:0.12 diff --git a/test/images/logs-generator/README.md b/test/images/logs-generator/README.md index 8f89bb4a4b5..889ba2a7bdb 100644 --- a/test/images/logs-generator/README.md +++ b/test/images/logs-generator/README.md @@ -33,7 +33,7 @@ line in a given run of the container. Image is located in the public repository of Google Container Registry under the name ``` -gcr.io/google_containers/logs-generator:v0.1.1 +k8s.gcr.io/logs-generator:v0.1.1 ``` ## Examples @@ -42,13 +42,13 @@ gcr.io/google_containers/logs-generator:v0.1.1 docker run -i \ -e "LOGS_GENERATOR_LINES_TOTAL=10" \ -e "LOGS_GENERATOR_DURATION=1s" \ - gcr.io/google_containers/logs-generator:v0.1.1 + k8s.gcr.io/logs-generator:v0.1.1 ``` ``` kubectl run logs-generator \ --generator=run-pod/v1 \ - --image=gcr.io/google_containers/logs-generator:v0.1.1 \ + --image=k8s.gcr.io/logs-generator:v0.1.1 \ --restart=Never \ --env "LOGS_GENERATOR_LINES_TOTAL=1000" \ --env "LOGS_GENERATOR_DURATION=1m" diff --git a/test/images/pets/peer-finder/BASEIMAGE b/test/images/pets/peer-finder/BASEIMAGE index 425fce2600a..0b04ef6e4d5 100644 --- a/test/images/pets/peer-finder/BASEIMAGE +++ b/test/images/pets/peer-finder/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=gcr.io/google-containers/debian-base-amd64:0.3 -arm=gcr.io/google-containers/debian-base-arm:0.3 -arm64=gcr.io/google-containers/debian-base-arm64:0.3 -ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 +amd64=k8s.gcr.io/debian-base-amd64:0.3 +arm=k8s.gcr.io/debian-base-arm:0.3 +arm64=k8s.gcr.io/debian-base-arm64:0.3 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 diff --git a/test/images/pets/redis-installer/BASEIMAGE b/test/images/pets/redis-installer/BASEIMAGE index 425fce2600a..0b04ef6e4d5 100644 --- a/test/images/pets/redis-installer/BASEIMAGE +++ b/test/images/pets/redis-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=gcr.io/google-containers/debian-base-amd64:0.3 -arm=gcr.io/google-containers/debian-base-arm:0.3 -arm64=gcr.io/google-containers/debian-base-arm64:0.3 -ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 +amd64=k8s.gcr.io/debian-base-amd64:0.3 +arm=k8s.gcr.io/debian-base-arm:0.3 +arm64=k8s.gcr.io/debian-base-arm64:0.3 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 diff --git a/test/images/pets/redis-installer/README.md b/test/images/pets/redis-installer/README.md index 04024c13592..e1c3587af34 100644 --- a/test/images/pets/redis-installer/README.md +++ b/test/images/pets/redis-installer/README.md @@ -4,7 +4,7 @@ The image in this directory is the init container for contrib/pets/redis but for You can execute the image locally via: ``` -$ docker run -it gcr.io/google_containers/redis-install-3.2.0:e2e --cmd --install-into=/opt --work-dir=/work-dir +$ docker run -it k8s.gcr.io/redis-install-3.2.0:e2e --cmd --install-into=/opt --work-dir=/work-dir ``` To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install redis into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts. diff --git a/test/images/pets/zookeeper-installer/BASEIMAGE b/test/images/pets/zookeeper-installer/BASEIMAGE index 425fce2600a..0b04ef6e4d5 100644 --- a/test/images/pets/zookeeper-installer/BASEIMAGE +++ b/test/images/pets/zookeeper-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=gcr.io/google-containers/debian-base-amd64:0.3 -arm=gcr.io/google-containers/debian-base-arm:0.3 -arm64=gcr.io/google-containers/debian-base-arm64:0.3 -ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 +amd64=k8s.gcr.io/debian-base-amd64:0.3 +arm=k8s.gcr.io/debian-base-arm:0.3 +arm64=k8s.gcr.io/debian-base-arm64:0.3 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 diff --git a/test/images/pets/zookeeper-installer/README.md b/test/images/pets/zookeeper-installer/README.md index 071bc7aa484..598ca7afe9b 100644 --- a/test/images/pets/zookeeper-installer/README.md +++ b/test/images/pets/zookeeper-installer/README.md @@ -4,7 +4,7 @@ The image in this directory is the init container for contrib/pets/zookeeper but You can execute the image locally via: ``` -$ docker run -it gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e --cmd --install-into=/opt --work-dir=/work-dir +$ docker run -it k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e --cmd --install-into=/opt --work-dir=/work-dir ``` To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install zookeeper into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts. diff --git a/test/images/resource-consumer/BASEIMAGE b/test/images/resource-consumer/BASEIMAGE index 425fce2600a..0b04ef6e4d5 100644 --- a/test/images/resource-consumer/BASEIMAGE +++ b/test/images/resource-consumer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=gcr.io/google-containers/debian-base-amd64:0.3 -arm=gcr.io/google-containers/debian-base-arm:0.3 -arm64=gcr.io/google-containers/debian-base-arm64:0.3 -ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 +amd64=k8s.gcr.io/debian-base-amd64:0.3 +arm=k8s.gcr.io/debian-base-arm:0.3 +arm64=k8s.gcr.io/debian-base-arm64:0.3 +ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 diff --git a/test/images/resource-consumer/README.md b/test/images/resource-consumer/README.md index 19801af8e3d..742bafee369 100644 --- a/test/images/resource-consumer/README.md +++ b/test/images/resource-consumer/README.md @@ -48,7 +48,7 @@ Custom metrics in Prometheus format are exposed on "/metrics" endpoint. ###CURL example ```console -$ kubectl run resource-consumer --image=gcr.io/google_containers/resource_consumer:beta --expose --service-overrides='{ "spec": { "type": "LoadBalancer" } }' --port 8080 +$ kubectl run resource-consumer --image=k8s.gcr.io/resource_consumer:beta --expose --service-overrides='{ "spec": { "type": "LoadBalancer" } }' --port 8080 $ kubectl get services resource-consumer ``` @@ -62,7 +62,7 @@ $ curl --data "millicores=300&durationSec=600" http://:8080/Consume ## Image -Docker image of Resource Consumer can be found in Google Container Registry as gcr.io/google_containers/resource_consumer:beta +Docker image of Resource Consumer can be found in Google Container Registry as k8s.gcr.io/resource_consumer:beta ## Use cases diff --git a/test/images/serve-hostname/README.md b/test/images/serve-hostname/README.md index 28d37336642..957e039610d 100644 --- a/test/images/serve-hostname/README.md +++ b/test/images/serve-hostname/README.md @@ -15,19 +15,19 @@ $ make all-push # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> gcr.io/google_containers/serve_hostname-amd64:TAG +# ---> k8s.gcr.io/serve_hostname-amd64:TAG $ make push ARCH=arm -# ---> gcr.io/google_containers/serve_hostname-arm:TAG +# ---> k8s.gcr.io/serve_hostname-arm:TAG $ make push ARCH=arm64 -# ---> gcr.io/google_containers/serve_hostname-arm64:TAG +# ---> k8s.gcr.io/serve_hostname-arm64:TAG $ make push ARCH=ppc64le -# ---> gcr.io/google_containers/serve_hostname-ppc64le:TAG +# ---> k8s.gcr.io/serve_hostname-ppc64le:TAG $ make push ARCH=s390x -# ---> gcr.io/google_containers/serve_hostname-s390x:TAG +# ---> k8s.gcr.io/serve_hostname-s390x:TAG ``` Of course, if you don't want to push the images, run `make all-container` or `make container ARCH={target_arch}` instead. diff --git a/test/images/volumes-tester/ceph/Makefile b/test/images/volumes-tester/ceph/Makefile index 2883aafb646..c746d188194 100644 --- a/test/images/volumes-tester/ceph/Makefile +++ b/test/images/volumes-tester/ceph/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io all: push @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-ceph $(PREFIX)/volume-ceph:$(TAG) # Add the version tag to the latest image push: image - gcloud docker -- push $(PREFIX)/volume-ceph # Push image tagged as latest to repository - gcloud docker -- push $(PREFIX)/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-ceph # Push image tagged as latest to repository + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/gluster/Makefile b/test/images/volumes-tester/gluster/Makefile index 4aa5b11351e..d769a077a6f 100644 --- a/test/images/volumes-tester/gluster/Makefile +++ b/test/images/volumes-tester/gluster/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.4 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io all: push @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-gluster $(PREFIX)/volume-gluster:$(TAG) # Add the version tag to the latest image push: image - gcloud docker -- push $(PREFIX)/volume-gluster # Push image tagged as latest to repository - gcloud docker -- push $(PREFIX)/volume-gluster:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-gluster # Push image tagged as latest to repository + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-gluster:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/iscsi/Makefile b/test/images/volumes-tester/iscsi/Makefile index dd830d4fd2b..c567d9baa70 100644 --- a/test/images/volumes-tester/iscsi/Makefile +++ b/test/images/volumes-tester/iscsi/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io all: push @@ -34,8 +34,8 @@ block: push: image # Push image tagged as latest to repository - gcloud docker -- push $(PREFIX)/volume-iscsi + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-iscsi # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) - gcloud docker -- push $(PREFIX)/volume-iscsi:$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-iscsi:$(TAG) clean: diff --git a/test/images/volumes-tester/nfs/Makefile b/test/images/volumes-tester/nfs/Makefile index 1e53b19c8b0..b49d08acbfa 100644 --- a/test/images/volumes-tester/nfs/Makefile +++ b/test/images/volumes-tester/nfs/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.8 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io all: push @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-nfs $(PREFIX)/volume-nfs:$(TAG) # Add the version tag to the latest image push: image - gcloud docker -- push $(PREFIX)/volume-nfs # Push image tagged as latest to repository - gcloud docker -- push $(PREFIX)/volume-nfs:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-nfs # Push image tagged as latest to repository + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-nfs:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/rbd/Makefile b/test/images/volumes-tester/rbd/Makefile index dcf3c69f371..53d4c006223 100644 --- a/test/images/volumes-tester/rbd/Makefile +++ b/test/images/volumes-tester/rbd/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = gcr.io/google_containers +PREFIX = k8s.gcr.io all: push @@ -34,8 +34,8 @@ block: push: image # Push image tagged as latest to repository - gcloud docker -- push $(PREFIX)/volume-rbd + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-rbd # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) - gcloud docker -- push $(PREFIX)/volume-rbd:$(TAG) + gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-rbd:$(TAG) clean: diff --git a/test/integration/benchmark-controller.json b/test/integration/benchmark-controller.json index 00444f8900f..6b0c8feb9ac 100644 --- a/test/integration/benchmark-controller.json +++ b/test/integration/benchmark-controller.json @@ -17,7 +17,7 @@ "spec": { "containers": [{ "name": "test-container", - "image": "gcr.io/google_containers/pause-amd64:3.0" + "image": "k8s.gcr.io/pause-amd64:3.0" }] } } diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 88f4ac52497..7639d58eb9d 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -40,7 +40,7 @@ import ( const ( // When these values are updated, also update cmd/kubelet/app/options/options.go // A copy of these values exist in e2e/framework/util.go. - currentPodInfraContainerImageName = "gcr.io/google_containers/pause" + currentPodInfraContainerImageName = "k8s.gcr.io/pause" currentPodInfraContainerImageVersion = "3.0" ) diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index d00f8766b21..aa063be5d2e 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -279,7 +279,7 @@ var deploymentExtensions string = ` "spec": { "containers": [{ "name": "nginx", - "image": "gcr.io/google-containers/nginx:1.7.9" + "image": "k8s.gcr.io/nginx:1.7.9" }] } } @@ -306,7 +306,7 @@ var deploymentApps string = ` "spec": { "containers": [{ "name": "nginx", - "image": "gcr.io/google-containers/nginx:1.7.9" + "image": "k8s.gcr.io/nginx:1.7.9" }] } } diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index e185ce72b77..dd13d9ef4fc 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -461,7 +461,7 @@ func makePod(name, ns string, pvcs []string) *v1.Pod { Containers: []v1.Container{ { Name: "write-pod", - Image: "gcr.io/google_containers/busybox:1.24", + Image: "k8s.gcr.io/busybox:1.24", Command: []string{"/bin/sh"}, Args: []string{"-c", "while true; do sleep 1; done"}, }, diff --git a/test/kubemark/resources/cluster-autoscaler_template.json b/test/kubemark/resources/cluster-autoscaler_template.json index f42c060e259..ffe4a61f8b3 100644 --- a/test/kubemark/resources/cluster-autoscaler_template.json +++ b/test/kubemark/resources/cluster-autoscaler_template.json @@ -14,7 +14,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "gcr.io/google_containers/cluster-autoscaler:v1.0.0", + "image": "k8s.gcr.io/cluster-autoscaler:v1.0.0", "command": [ "./run.sh", "--kubernetes=https://{{master_ip}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/cluster_autoscaler.kubeconfig", diff --git a/test/kubemark/resources/heapster_template.json b/test/kubemark/resources/heapster_template.json index 491596b2ff8..5a9bb512ea1 100644 --- a/test/kubemark/resources/heapster_template.json +++ b/test/kubemark/resources/heapster_template.json @@ -33,7 +33,7 @@ "containers": [ { "name": "heapster", - "image": "gcr.io/google_containers/heapster:v1.3.0", + "image": "k8s.gcr.io/heapster:v1.3.0", "resources": { "requests": { "cpu": "{{METRICS_CPU}}m", @@ -55,7 +55,7 @@ }, { "name": "eventer", - "image": "gcr.io/google_containers/heapster:v1.3.0", + "image": "k8s.gcr.io/heapster:v1.3.0", "resources": { "requests": { "memory": "{{EVENTER_MEM}}Ki" diff --git a/test/kubemark/resources/hollow-node_template.yaml b/test/kubemark/resources/hollow-node_template.yaml index 06352442c00..3a16f1f3538 100644 --- a/test/kubemark/resources/hollow-node_template.yaml +++ b/test/kubemark/resources/hollow-node_template.yaml @@ -93,7 +93,7 @@ spec: cpu: {{HOLLOW_PROXY_CPU}}m memory: {{HOLLOW_PROXY_MEM}}Ki - name: hollow-node-problem-detector - image: gcr.io/google_containers/node-problem-detector:v0.4.1 + image: k8s.gcr.io/node-problem-detector:v0.4.1 env: - name: NODE_NAME valueFrom: diff --git a/test/kubemark/resources/start-kubemark-master.sh b/test/kubemark/resources/start-kubemark-master.sh index 4eddb383094..18419667f54 100755 --- a/test/kubemark/resources/start-kubemark-master.sh +++ b/test/kubemark/resources/start-kubemark-master.sh @@ -692,7 +692,7 @@ fi # Setup docker flags and load images of the master components. assemble-docker-flags -DOCKER_REGISTRY="gcr.io/google_containers" +DOCKER_REGISTRY="k8s.gcr.io" load-docker-images readonly audit_policy_file="/etc/audit_policy.config" diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index d7c88eff49d..a54f6323c1d 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -23,7 +23,7 @@ import ( const ( e2eRegistry = "gcr.io/kubernetes-e2e-test-images" - gcRegistry = "gcr.io/google-containers" + gcRegistry = "k8s.gcr.io" PrivateRegistry = "gcr.io/k8s-authenticated-test" sampleRegistry = "gcr.io/google-samples" ) From f7be352a67e85f8fb85af58de151adccda2a965f Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 14 Dec 2017 20:50:14 -0800 Subject: [PATCH 379/794] gcloud docker now auths k8s.gcr.io by default --- build/build-image/cross/Makefile | 2 +- build/debian-base/Makefile | 2 +- build/debian-hyperkube-base/Makefile | 2 +- build/debian-iptables/Makefile | 2 +- build/pause/Makefile | 4 ++-- cluster/addons/addon-manager/Makefile | 4 ++-- cluster/addons/fluentd-elasticsearch/es-image/Makefile | 2 +- .../addons/fluentd-elasticsearch/fluentd-es-image/Makefile | 2 +- cluster/addons/python-image/Makefile | 2 +- cluster/addons/registry/images/Makefile | 2 +- cluster/gce/gci/mounter/Makefile | 2 +- cluster/images/etcd-empty-dir-cleanup/Makefile | 2 +- cluster/images/etcd-version-monitor/Makefile | 2 +- cluster/images/etcd/Makefile | 4 ++-- cluster/images/hyperkube/Makefile | 4 ++-- examples/cluster-dns/images/backend/Makefile | 2 +- examples/cluster-dns/images/frontend/Makefile | 2 +- examples/explorer/Makefile | 2 +- examples/guestbook-go/Makefile | 2 +- examples/kubectl-container/Makefile | 2 +- examples/storage/cassandra/image/Makefile | 4 ++-- test/e2e_node/conformance/build/Makefile | 4 ++-- test/images/image-util.sh | 2 +- test/images/volumes-tester/ceph/Makefile | 4 ++-- test/images/volumes-tester/gluster/Makefile | 4 ++-- test/images/volumes-tester/iscsi/Makefile | 4 ++-- test/images/volumes-tester/nfs/Makefile | 4 ++-- test/images/volumes-tester/rbd/Makefile | 4 ++-- 28 files changed, 39 insertions(+), 39 deletions(-) diff --git a/build/build-image/cross/Makefile b/build/build-image/cross/Makefile index a8e773d09bb..a9dbc53a7ee 100644 --- a/build/build-image/cross/Makefile +++ b/build/build-image/cross/Makefile @@ -24,4 +24,4 @@ build: docker build --pull -t k8s.gcr.io/$(IMAGE):$(TAG) . push: build - gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/$(IMAGE):$(TAG) + gcloud docker -- push k8s.gcr.io/$(IMAGE):$(TAG) diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 3c49cb05480..c3db5eab27e 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -71,7 +71,7 @@ endif rm -rf $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) clean: docker rmi -f $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) || true diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index 46b5709daeb..0c89c1ded76 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -57,4 +57,4 @@ endif rm -rf $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index d408b6ae41b..b672f04752d 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -55,6 +55,6 @@ endif docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + gcloud docker -- push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) all: push diff --git a/build/pause/Makefile b/build/pause/Makefile index a5a2ff6a87b..4be3f8ea2e9 100644 --- a/build/pause/Makefile +++ b/build/pause/Makefile @@ -87,13 +87,13 @@ endif push: .push-$(ARCH) .push-$(ARCH): .container-$(ARCH) - gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(TAG) + gcloud docker -- push $(IMAGE):$(TAG) touch $@ push-legacy: .push-legacy-$(ARCH) .push-legacy-$(ARCH): .container-$(ARCH) ifeq ($(ARCH),amd64) - gcloud docker --server=k8s.gcr.io -- push $(LEGACY_AMD64_IMAGE):$(TAG) + gcloud docker -- push $(LEGACY_AMD64_IMAGE):$(TAG) endif touch $@ diff --git a/cluster/addons/addon-manager/Makefile b/cluster/addons/addon-manager/Makefile index baa63d2b5b6..ceed1c9697b 100644 --- a/cluster/addons/addon-manager/Makefile +++ b/cluster/addons/addon-manager/Makefile @@ -46,12 +46,12 @@ build: docker build --pull -t $(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(IMAGE)-$(ARCH):$(VERSION) + gcloud docker -- push $(IMAGE)-$(ARCH):$(VERSION) ifeq ($(ARCH),amd64) # Backward compatibility. TODO: deprecate this image tag docker rmi $(IMAGE):$(VERSION) 2>/dev/null || true docker tag $(IMAGE)-$(ARCH):$(VERSION) $(IMAGE):$(VERSION) - gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(VERSION) + gcloud docker -- push $(IMAGE):$(VERSION) endif clean: diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index 2ccd991602b..e6d950beae3 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -22,7 +22,7 @@ build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE):$(TAG) + gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG) binary: CGO_ENABLED=0 GOOS=linux go build -a -ldflags "-w" elasticsearch_logging_discovery.go diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index c511e57b861..8f95ecde77d 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -22,4 +22,4 @@ build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE):$(TAG) + gcloud docker -- push $(PREFIX)/$(IMAGE):$(TAG) diff --git a/cluster/addons/python-image/Makefile b/cluster/addons/python-image/Makefile index 0ae26b2a63c..d8f927a5b8d 100644 --- a/cluster/addons/python-image/Makefile +++ b/cluster/addons/python-image/Makefile @@ -21,5 +21,5 @@ build: docker build --pull -t "$(IMAGE):$(VERSION)" . push: - gcloud docker --server=k8s.gcr.io -- push "$(IMAGE):$(VERSION)" + gcloud docker -- push "$(IMAGE):$(VERSION)" diff --git a/cluster/addons/registry/images/Makefile b/cluster/addons/registry/images/Makefile index 338ef1c4945..566bf5a3744 100644 --- a/cluster/addons/registry/images/Makefile +++ b/cluster/addons/registry/images/Makefile @@ -21,4 +21,4 @@ build: docker build --pull -t $(REPO):$(TAG) . push: - gcloud docker --server=k8s.gcr.io -- push $(REPO):$(TAG) + gcloud docker -- push $(REPO):$(TAG) diff --git a/cluster/gce/gci/mounter/Makefile b/cluster/gce/gci/mounter/Makefile index bb2d91ea0ca..2af09207b77 100644 --- a/cluster/gce/gci/mounter/Makefile +++ b/cluster/gce/gci/mounter/Makefile @@ -22,7 +22,7 @@ container: docker build --pull -t ${REGISTRY}/${IMAGE}:${TAG} . push: - gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/${IMAGE}:${TAG} + gcloud docker -- push ${REGISTRY}/${IMAGE}:${TAG} upload: ./stage-upload.sh ${TAG} ${REGISTRY}/${IMAGE}:${TAG} diff --git a/cluster/images/etcd-empty-dir-cleanup/Makefile b/cluster/images/etcd-empty-dir-cleanup/Makefile index 950c024ba05..0142216f2a0 100644 --- a/cluster/images/etcd-empty-dir-cleanup/Makefile +++ b/cluster/images/etcd-empty-dir-cleanup/Makefile @@ -29,4 +29,4 @@ build: clean rm -rf etcdctl etcd-v$(ETCD_VERSION)-linux-amd64 etcd-v$(ETCD_VERSION)-linux-amd64.tar.gz push: build - gcloud docker --server=k8s.gcr.io -- push $(IMAGE):$(TAG) + gcloud docker -- push $(IMAGE):$(TAG) diff --git a/cluster/images/etcd-version-monitor/Makefile b/cluster/images/etcd-version-monitor/Makefile index 2c9cfa20865..aea20eaa2b8 100644 --- a/cluster/images/etcd-version-monitor/Makefile +++ b/cluster/images/etcd-version-monitor/Makefile @@ -40,7 +40,7 @@ build: docker build -t $(IMAGE) $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(IMAGE) + gcloud docker -- push $(IMAGE) all: build diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 577b345850a..7f2978a1829 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -105,12 +105,12 @@ endif docker build --pull -t $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) $(TEMP_DIR) push: build - gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) + gcloud docker -- push $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) ifeq ($(ARCH),amd64) # Backward compatibility. TODO: deprecate this image tag docker tag $(REGISTRY)/etcd-$(ARCH):$(REGISTRY_TAG) $(REGISTRY)/etcd:$(REGISTRY_TAG) - gcloud docker --server=k8s.gcr.io -- push $(REGISTRY)/etcd:$(REGISTRY_TAG) + gcloud docker -- push $(REGISTRY)/etcd:$(REGISTRY_TAG) endif all: build diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 7123fd3a08b..0dc17bbe45d 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -44,11 +44,11 @@ endif rm -rf "${TEMP_DIR}" push: build - gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/hyperkube-${ARCH}:${VERSION} + gcloud docker -- push ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ifeq ($(ARCH),amd64) docker rmi ${REGISTRY}/hyperkube:${VERSION} 2>/dev/null || true docker tag ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${REGISTRY}/hyperkube:${VERSION} - gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/hyperkube:${VERSION} + gcloud docker -- push ${REGISTRY}/hyperkube:${VERSION} endif .PHONY: build push all diff --git a/examples/cluster-dns/images/backend/Makefile b/examples/cluster-dns/images/backend/Makefile index 4e2b938ddca..6a9fe53de77 100644 --- a/examples/cluster-dns/images/backend/Makefile +++ b/examples/cluster-dns/images/backend/Makefile @@ -22,6 +22,6 @@ image: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: image - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE) + gcloud docker -- push $(PREFIX)/$(IMAGE) clean: diff --git a/examples/cluster-dns/images/frontend/Makefile b/examples/cluster-dns/images/frontend/Makefile index f3fd4d2f0a6..9b375525007 100644 --- a/examples/cluster-dns/images/frontend/Makefile +++ b/examples/cluster-dns/images/frontend/Makefile @@ -22,6 +22,6 @@ image: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . push: image - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/$(IMAGE) + gcloud docker -- push $(PREFIX)/$(IMAGE) clean: diff --git a/examples/explorer/Makefile b/examples/explorer/Makefile index 4240fdfd9a0..59bef7c2f4a 100644 --- a/examples/explorer/Makefile +++ b/examples/explorer/Makefile @@ -24,7 +24,7 @@ container: explorer docker build --pull -t k8s.gcr.io/explorer:$(TAG) . push: container - gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/explorer:$(TAG) + gcloud docker -- push k8s.gcr.io/explorer:$(TAG) clean: rm -f explorer diff --git a/examples/guestbook-go/Makefile b/examples/guestbook-go/Makefile index 25326193b4a..605916fdff3 100644 --- a/examples/guestbook-go/Makefile +++ b/examples/guestbook-go/Makefile @@ -29,7 +29,7 @@ build: # push the image to an registry push: - gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/guestbook:${VERSION} + gcloud docker -- push ${REGISTRY}/guestbook:${VERSION} # remove previous images and containers clean: diff --git a/examples/kubectl-container/Makefile b/examples/kubectl-container/Makefile index 4a2f88e2852..47fbfff923d 100644 --- a/examples/kubectl-container/Makefile +++ b/examples/kubectl-container/Makefile @@ -41,7 +41,7 @@ container: push: container $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) - gcloud docker --server=k8s.gcr.io -- push k8s.gcr.io/kubectl:$(TAG) + gcloud docker -- push k8s.gcr.io/kubectl:$(TAG) clean: rm -f kubectl diff --git a/examples/storage/cassandra/image/Makefile b/examples/storage/cassandra/image/Makefile index 71cd0c6dfbb..ac8ef75e063 100644 --- a/examples/storage/cassandra/image/Makefile +++ b/examples/storage/cassandra/image/Makefile @@ -35,7 +35,7 @@ container-dev: build: container container-dev push: build - gcloud docker --server=k8s.gcr.io -- push ${PROJECT}/cassandra:${VERSION} - gcloud docker --server=k8s.gcr.io -- push ${PROJECT}/cassandra:${VERSION}-dev + gcloud docker -- push ${PROJECT}/cassandra:${VERSION} + gcloud docker -- push ${PROJECT}/cassandra:${VERSION}-dev .PHONY: all build push diff --git a/test/e2e_node/conformance/build/Makefile b/test/e2e_node/conformance/build/Makefile index 6ff2b4e5126..aa52d9eae4e 100644 --- a/test/e2e_node/conformance/build/Makefile +++ b/test/e2e_node/conformance/build/Makefile @@ -76,10 +76,10 @@ endif docker build --pull -t ${IMAGE_NAME}-${ARCH}:${VERSION} ${TEMP_DIR} push: build - gcloud docker --server=k8s.gcr.io -- push ${IMAGE_NAME}-${ARCH}:${VERSION} + gcloud docker -- push ${IMAGE_NAME}-${ARCH}:${VERSION} ifeq ($(ARCH),amd64) docker tag ${IMAGE_NAME}-${ARCH}:${VERSION} ${IMAGE_NAME}:${VERSION} - gcloud docker --server=k8s.gcr.io -- push ${IMAGE_NAME}:${VERSION} + gcloud docker -- push ${IMAGE_NAME}:${VERSION} endif .PHONY: all diff --git a/test/images/image-util.sh b/test/images/image-util.sh index 31c54bc1153..d9ff9e31e08 100755 --- a/test/images/image-util.sh +++ b/test/images/image-util.sh @@ -97,7 +97,7 @@ push() { fi for arch in ${archs}; do TAG=$(<${IMAGE}/VERSION) - gcloud docker --server=k8s.gcr.io -- push ${REGISTRY}/${IMAGE}-${arch}:${TAG} + gcloud docker -- push ${REGISTRY}/${IMAGE}-${arch}:${TAG} done } diff --git a/test/images/volumes-tester/ceph/Makefile b/test/images/volumes-tester/ceph/Makefile index c746d188194..b23e5383a3d 100644 --- a/test/images/volumes-tester/ceph/Makefile +++ b/test/images/volumes-tester/ceph/Makefile @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-ceph $(PREFIX)/volume-ceph:$(TAG) # Add the version tag to the latest image push: image - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-ceph # Push image tagged as latest to repository - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker -- push $(PREFIX)/volume-ceph # Push image tagged as latest to repository + gcloud docker -- push $(PREFIX)/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/gluster/Makefile b/test/images/volumes-tester/gluster/Makefile index d769a077a6f..d18769d254e 100644 --- a/test/images/volumes-tester/gluster/Makefile +++ b/test/images/volumes-tester/gluster/Makefile @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-gluster $(PREFIX)/volume-gluster:$(TAG) # Add the version tag to the latest image push: image - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-gluster # Push image tagged as latest to repository - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-gluster:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker -- push $(PREFIX)/volume-gluster # Push image tagged as latest to repository + gcloud docker -- push $(PREFIX)/volume-gluster:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/iscsi/Makefile b/test/images/volumes-tester/iscsi/Makefile index c567d9baa70..00c69ef141a 100644 --- a/test/images/volumes-tester/iscsi/Makefile +++ b/test/images/volumes-tester/iscsi/Makefile @@ -34,8 +34,8 @@ block: push: image # Push image tagged as latest to repository - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-iscsi + gcloud docker -- push $(PREFIX)/volume-iscsi # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-iscsi:$(TAG) + gcloud docker -- push $(PREFIX)/volume-iscsi:$(TAG) clean: diff --git a/test/images/volumes-tester/nfs/Makefile b/test/images/volumes-tester/nfs/Makefile index b49d08acbfa..29123f0856d 100644 --- a/test/images/volumes-tester/nfs/Makefile +++ b/test/images/volumes-tester/nfs/Makefile @@ -24,7 +24,7 @@ image: docker tag $(PREFIX)/volume-nfs $(PREFIX)/volume-nfs:$(TAG) # Add the version tag to the latest image push: image - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-nfs # Push image tagged as latest to repository - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-nfs:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + gcloud docker -- push $(PREFIX)/volume-nfs # Push image tagged as latest to repository + gcloud docker -- push $(PREFIX)/volume-nfs:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) clean: diff --git a/test/images/volumes-tester/rbd/Makefile b/test/images/volumes-tester/rbd/Makefile index 53d4c006223..68629fffaa2 100644 --- a/test/images/volumes-tester/rbd/Makefile +++ b/test/images/volumes-tester/rbd/Makefile @@ -34,8 +34,8 @@ block: push: image # Push image tagged as latest to repository - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-rbd + gcloud docker -- push $(PREFIX)/volume-rbd # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) - gcloud docker --server=k8s.gcr.io -- push $(PREFIX)/volume-rbd:$(TAG) + gcloud docker -- push $(PREFIX)/volume-rbd:$(TAG) clean: From 880a68ade6ad2b47b554a6572901f867bde21da2 Mon Sep 17 00:00:00 2001 From: John McMeeking Date: Mon, 18 Dec 2017 11:39:51 -0600 Subject: [PATCH 380/794] Fix garbage collector when leader-elect=false **What this PR does / why we need it**: In a 1.8.x master with --leader-elect=false, the garbage collector controller does not work. When deleting a deployment with v1meta.DeletePropagationForeground, the deployment had its deletionTimestamp set and a foreground Deletion finalizer was added, but the deployment, rs and pod were not deleted. This is an issue with how the garbage collector graph_builder behaves when the stopCh=nil. This PR creates a dummy stop channel for the garbage collector controller (and other controllers started by the controller-manager) so that they can work more like they do when when the controller-manager is configured with --leader-elect=true. **Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #57044 **Special notes for your reviewer**: **Release note**: ```release-note Garbage collection doesn't work when the controller-manager uses --leader-elect=false ``` --- cmd/kube-controller-manager/app/controllermanager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index d08e9658a8d..63044ac44b1 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -162,7 +162,9 @@ func Run(s *options.CMServer) error { } if !s.LeaderElection.LeaderElect { - run(nil) + stopCh := make(chan struct{}) + defer close(stopCh) + run(stopCh) panic("unreachable") } From ecba504974efac6ff2945f470e0f958594db6310 Mon Sep 17 00:00:00 2001 From: Yassine TIJANI Date: Thu, 14 Dec 2017 00:57:23 +0000 Subject: [PATCH 381/794] implementing predicates ordering --- .../algorithm/predicates/predicates.go | 45 ++++++++++++++-- .../pkg/scheduler/core/generic_scheduler.go | 53 ++++++++++--------- .../scheduler/core/generic_scheduler_test.go | 11 +++- plugin/pkg/scheduler/scheduler_test.go | 3 ++ 4 files changed, 83 insertions(+), 29 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 6a8d78ada1d..8d458401f98 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -49,9 +49,25 @@ import ( ) const ( - MatchInterPodAffinity = "MatchInterPodAffinity" - CheckVolumeBinding = "CheckVolumeBinding" - + MatchInterPodAffinity = "MatchInterPodAffinity" + CheckVolumeBinding = "CheckVolumeBinding" + CheckNodeConditionPred = "CheckNodeCondition" + GeneralPred = "GeneralPredicates" + HostNamePred = "HostName" + PodFitsHostPortsPred = "PodFitsHostPorts" + MatchNodeSelectorPred = "MatchNodeSelector" + PodFitsResourcesPred = "PodFitsResources" + NoDiskConflictPred = "NoDiskConflict" + PodToleratesNodeTaintsPred = "PodToleratesNodeTaints" + PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints" + CheckNodeLabelPresencePred = "CheckNodeLabelPresence" + checkServiceAffinityPred = "checkServiceAffinity" + MaxEBSVolumeCountPred = "MaxEBSVolumeCount" + MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount" + MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount" + NoVolumeZoneConflictPred = "NoVolumeZoneConflict" + CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure" + CheckNodeDiskPressure = "CheckNodeDiskPressure" // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 @@ -79,6 +95,21 @@ const ( // For example: // https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422 +// IMPORTANT: this list contains the ordering of the predicates, if you develop a new predicates +// it is mandatory to add its name on this list. +// otherwise it won't be processed, see generic_scheduler#podFitsOnNode() +// the order is based on the restrictiveness & complexity of predicates +// design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md +var ( + predicatesOrdering = []string{CheckNodeConditionPred, + GeneralPred, HostNamePred, PodFitsHostPortsPred, + MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred, + PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred, + checkServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, + MaxAzureDiskVolumeCountPred, CheckVolumeBinding, NoVolumeZoneConflictPred, + CheckNodeMemoryPressurePred, CheckNodeDiskPressure, MatchInterPodAffinity} +) + // NodeInfo: Other types for predicate functions... type NodeInfo interface { GetNodeInfo(nodeID string) (*v1.Node, error) @@ -93,6 +124,14 @@ type CachedPersistentVolumeInfo struct { corelisters.PersistentVolumeLister } +func GetPredicatesOrdering() []string { + return predicatesOrdering +} + +func SetPredicatesOrdering(names []string) { + predicatesOrdering = names +} + func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { return c.Get(pvID) } diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index 6505cbe58a4..ae2ccdfac3e 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -444,34 +444,37 @@ func podFitsOnNode( // TODO(bsalamat): consider using eCache and adding proper eCache invalidations // when pods are nominated or their nominations change. eCacheAvailable = eCacheAvailable && !podsAdded - for predicateKey, predicate := range predicateFuncs { - if eCacheAvailable { - // PredicateWithECache will return its cached predicate results. - fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) - } - - // TODO(bsalamat): When one predicate fails and fit is false, why do we continue - // checking other predicates? - if !eCacheAvailable || invalid { - // we need to execute predicate functions since equivalence cache does not work - fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) - if err != nil { - return false, []algorithm.PredicateFailureReason{}, err - } + for _, predicateKey := range predicates.GetPredicatesOrdering() { + //TODO (yastij) : compute average predicate restrictiveness to export it as promethus metric + if predicate, exist := predicateFuncs[predicateKey]; exist { if eCacheAvailable { - // Store data to update eCache after this loop. - if res, exists := predicateResults[predicateKey]; exists { - res.Fit = res.Fit && fit - res.FailReasons = append(res.FailReasons, reasons...) - predicateResults[predicateKey] = res - } else { - predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons} + // PredicateWithECache will return its cached predicate results. + fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) + } + + // TODO(bsalamat): When one predicate fails and fit is false, why do we continue + // checking other predicates? + if !eCacheAvailable || invalid { + // we need to execute predicate functions since equivalence cache does not work + fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) + if err != nil { + return false, []algorithm.PredicateFailureReason{}, err + } + if eCacheAvailable { + // Store data to update eCache after this loop. + if res, exists := predicateResults[predicateKey]; exists { + res.Fit = res.Fit && fit + res.FailReasons = append(res.FailReasons, reasons...) + predicateResults[predicateKey] = res + } else { + predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons} + } } } - } - if !fit { - // eCache is available and valid, and predicates result is unfit, record the fail reasons - failedPredicates = append(failedPredicates, reasons...) + if !fit { + // eCache is available and valid, and predicates result is unfit, record the fail reasons + failedPredicates = append(failedPredicates, reasons...) + } } } } diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/plugin/pkg/scheduler/core/generic_scheduler_test.go index baa76414f09..1b1b9d15c6f 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/core/generic_scheduler_test.go @@ -42,6 +42,10 @@ import ( schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" ) +var ( + order = []string{"false", "true", "matches", "nopods", predicates.MatchInterPodAffinity} +) + func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } @@ -181,6 +185,7 @@ func TestSelectHost(t *testing.T) { } func TestGenericScheduler(t *testing.T) { + predicates.SetPredicatesOrdering(order) tests := []struct { name string predicates map[string]algorithm.FitPredicate @@ -401,6 +406,7 @@ func TestGenericScheduler(t *testing.T) { } func TestFindFitAllError(t *testing.T) { + predicates.SetPredicatesOrdering(order) nodes := []string{"3", "2", "1"} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate} nodeNameToInfo := map[string]*schedulercache.NodeInfo{ @@ -430,8 +436,9 @@ func TestFindFitAllError(t *testing.T) { } func TestFindFitSomeError(t *testing.T) { + predicates.SetPredicatesOrdering(order) nodes := []string{"3", "2", "1"} - predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate} + predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}} nodeNameToInfo := map[string]*schedulercache.NodeInfo{ "3": schedulercache.NewNodeInfo(), @@ -741,6 +748,7 @@ var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int3 // TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes // that podsFitsOnNode works correctly and is tested separately. func TestSelectNodesForPreemption(t *testing.T) { + predicates.SetPredicatesOrdering(order) tests := []struct { name string predicates map[string]algorithm.FitPredicate @@ -879,6 +887,7 @@ func TestSelectNodesForPreemption(t *testing.T) { // TestPickOneNodeForPreemption tests pickOneNodeForPreemption. func TestPickOneNodeForPreemption(t *testing.T) { + predicates.SetPredicatesOrdering(order) tests := []struct { name string predicates map[string]algorithm.FitPredicate diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index c3c8ccab9a6..c017cc4219c 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -43,6 +43,8 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) +var order = []string{"VolumeBindingChecker"} + type fakeBinder struct { b func(binding *v1.Binding) error } @@ -637,6 +639,7 @@ func makePredicateError(failReason string) error { } func TestSchedulerWithVolumeBinding(t *testing.T) { + predicates.SetPredicatesOrdering(order) findErr := fmt.Errorf("find err") assumeErr := fmt.Errorf("assume err") bindErr := fmt.Errorf("bind err") From 2f1108451f468f8dc03318013f9a2fbcf7b59692 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Tue, 5 Dec 2017 16:58:01 -0500 Subject: [PATCH 382/794] Remove hard-coded pod-controller check This allows pods with third-party, or unknown controllers to be drained successfully. --- pkg/kubectl/cmd/drain.go | 68 ++++++++++------------------------------ 1 file changed, 16 insertions(+), 52 deletions(-) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index d43f9178543..52f6a619cd5 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -338,38 +338,8 @@ func (o *DrainOptions) deleteOrEvictPodsSimple(nodeInfo *resource.Info) error { return err } -func (o *DrainOptions) getController(namespace string, controllerRef *metav1.OwnerReference) (interface{}, error) { - switch controllerRef.Kind { - case "ReplicationController": - return o.client.CoreV1().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "DaemonSet": - return o.client.ExtensionsV1beta1().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "Job": - return o.client.BatchV1().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "ReplicaSet": - return o.client.ExtensionsV1beta1().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "StatefulSet": - return o.client.AppsV1beta1().StatefulSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - } - return nil, fmt.Errorf("Unknown controller kind %q", controllerRef.Kind) -} - -func (o *DrainOptions) getPodController(pod corev1.Pod) (*metav1.OwnerReference, error) { - controllerRef := metav1.GetControllerOf(&pod) - if controllerRef == nil { - return nil, nil - } - - // We assume the only reason for an error is because the controller is - // gone/missing, not for any other cause. - // TODO(mml): something more sophisticated than this - // TODO(juntee): determine if it's safe to remove getController(), - // so that drain can work for controller types that we don't know about - _, err := o.getController(pod.Namespace, controllerRef) - if err != nil { - return nil, err - } - return controllerRef, nil +func (o *DrainOptions) getPodController(pod corev1.Pod) *metav1.OwnerReference { + return metav1.GetControllerOf(&pod) } func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -378,21 +348,15 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata return true, nil, nil } - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning - if apierrors.IsNotFound(err) && o.Force { - return true, &warning{err.Error()}, nil - } - return false, nil, &fatal{err.Error()} - } + controllerRef := o.getPodController(pod) if controllerRef != nil { return true, nil, nil } - if !o.Force { - return false, nil, &fatal{kUnmanagedFatal} + if o.Force { + return true, &warning{kUnmanagedWarning}, nil } - return true, &warning{kUnmanagedWarning}, nil + + return false, nil, &fatal{kUnmanagedFatal} } func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -403,23 +367,23 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) // The exception is for pods that are orphaned (the referencing // management resource - including DaemonSet - is not found). // Such pods will be deleted if --force is used. - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning + controllerRef := o.getPodController(pod) + if controllerRef == nil || controllerRef.Kind != "DaemonSet" { + return true, nil, nil + } + + if _, err := o.client.ExtensionsV1beta1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + // remove orphaned pods with a warning if --force is used if apierrors.IsNotFound(err) && o.Force { return true, &warning{err.Error()}, nil } return false, nil, &fatal{err.Error()} } - if controllerRef == nil || controllerRef.Kind != "DaemonSet" { - return true, nil, nil - } - if _, err := o.client.ExtensionsV1beta1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { - return false, nil, &fatal{err.Error()} - } + if !o.IgnoreDaemonsets { return false, nil, &fatal{kDaemonsetFatal} } + return false, &warning{kDaemonsetWarning}, nil } From 015e20cb1598d41e814710e80b596fd987a11035 Mon Sep 17 00:00:00 2001 From: Rohit Jog Date: Wed, 29 Nov 2017 14:08:28 -0800 Subject: [PATCH 383/794] e2e test layout changes for vsphere (#398) --- hack/.golint_failures | 2 + test/e2e/storage/BUILD | 34 +-- test/e2e/storage/empty_dir_wrapper.go | 4 +- test/e2e/storage/flexvolume.go | 3 +- test/e2e/storage/pd.go | 3 +- .../storage/persistent_volumes-disruptive.go | 169 +----------- test/e2e/storage/persistent_volumes-gce.go | 3 +- test/e2e/storage/persistent_volumes-local.go | 5 +- test/e2e/storage/persistent_volumes.go | 3 +- test/e2e/storage/pvc_protection.go | 3 +- test/e2e/storage/utils/BUILD | 37 +++ test/e2e/storage/{ => utils}/framework.go | 2 +- test/e2e/storage/utils/utils.go | 240 ++++++++++++++++++ test/e2e/storage/volume_expand.go | 3 +- test/e2e/storage/volume_io.go | 11 +- test/e2e/storage/volume_metrics.go | 3 +- test/e2e/storage/volume_provisioning.go | 3 +- test/e2e/storage/volumes.go | 8 +- test/e2e/storage/vsphere/BUILD | 67 +++++ .../persistent_volumes-vsphere.go | 9 +- .../storage/{ => vsphere}/pv_reclaimpolicy.go | 7 +- .../{ => vsphere}/pvc_label_selector.go | 7 +- .../storage/{ => vsphere}/vsphere_scale.go | 5 +- .../{ => vsphere}/vsphere_statefulsets.go | 5 +- .../storage/{ => vsphere}/vsphere_stress.go | 5 +- .../storage/{ => vsphere}/vsphere_utils.go | 17 +- .../vsphere_volume_cluster_ds.go | 5 +- .../{ => vsphere}/vsphere_volume_datastore.go | 5 +- .../vsphere_volume_diskformat.go | 5 +- .../{ => vsphere}/vsphere_volume_disksize.go | 5 +- .../{ => vsphere}/vsphere_volume_fstype.go | 5 +- .../vsphere_volume_master_restart.go | 5 +- .../vsphere_volume_node_poweroff.go | 5 +- .../{ => vsphere}/vsphere_volume_ops_storm.go | 5 +- .../{ => vsphere}/vsphere_volume_perf.go | 5 +- .../{ => vsphere}/vsphere_volume_placement.go | 5 +- .../vsphere_volume_vsan_policy.go | 6 +- 37 files changed, 462 insertions(+), 252 deletions(-) create mode 100644 test/e2e/storage/utils/BUILD rename test/e2e/storage/{ => utils}/framework.go (97%) create mode 100644 test/e2e/storage/utils/utils.go create mode 100644 test/e2e/storage/vsphere/BUILD rename test/e2e/storage/{ => vsphere}/persistent_volumes-vsphere.go (96%) rename test/e2e/storage/{ => vsphere}/pv_reclaimpolicy.go (97%) rename test/e2e/storage/{ => vsphere}/pvc_label_selector.go (96%) rename test/e2e/storage/{ => vsphere}/vsphere_scale.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_statefulsets.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_stress.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_utils.go (95%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_cluster_ds.go (96%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_datastore.go (95%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_diskformat.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_disksize.go (96%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_fstype.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_master_restart.go (97%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_node_poweroff.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_ops_storm.go (96%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_perf.go (98%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_placement.go (99%) rename test/e2e/storage/{ => vsphere}/vsphere_volume_vsan_policy.go (99%) diff --git a/hack/.golint_failures b/hack/.golint_failures index 6cd1db4fbb1..1f2f6c9061f 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -796,6 +796,8 @@ test/e2e/scalability test/e2e/scheduling test/e2e/servicecatalog test/e2e/storage +test/e2e/storage/utils +test/e2e/storage/vsphere test/e2e/ui test/e2e/upgrades test/e2e/upgrades/apps diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 13969e42c24..f7c76328535 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -10,66 +10,42 @@ go_library( srcs = [ "empty_dir_wrapper.go", "flexvolume.go", - "framework.go", "pd.go", "persistent_volumes.go", "persistent_volumes-disruptive.go", "persistent_volumes-gce.go", "persistent_volumes-local.go", - "persistent_volumes-vsphere.go", - "pv_reclaimpolicy.go", - "pvc_label_selector.go", "pvc_protection.go", "volume_expand.go", "volume_io.go", "volume_metrics.go", "volume_provisioning.go", "volumes.go", - "vsphere_scale.go", - "vsphere_statefulsets.go", - "vsphere_stress.go", - "vsphere_utils.go", - "vsphere_volume_cluster_ds.go", - "vsphere_volume_datastore.go", - "vsphere_volume_diskformat.go", - "vsphere_volume_disksize.go", - "vsphere_volume_fstype.go", - "vsphere_volume_master_restart.go", - "vsphere_volume_node_poweroff.go", - "vsphere_volume_ops_storm.go", - "vsphere_volume_perf.go", - "vsphere_volume_placement.go", - "vsphere_volume_vsan_policy.go", ], importpath = "k8s.io/kubernetes/test/e2e/storage", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/storage/v1/util:go_default_library", - "//pkg/cloudprovider/providers/vsphere:go_default_library", - "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/util/slice:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/generated:go_default_library", + "//test/e2e/storage/utils:go_default_library", + "//test/e2e/storage/vsphere:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", - "//vendor/github.com/vmware/govmomi/find:go_default_library", - "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", @@ -101,6 +77,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//test/e2e/storage/utils:all-srcs", + "//test/e2e/storage/vsphere:all-srcs", + ], tags = ["automanaged"], ) diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 653c2a0f970..4f1a45898ed 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -30,6 +30,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -51,7 +53,7 @@ const ( wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-" ) -var _ = SIGDescribe("EmptyDir wrapper volumes", func() { +var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { f := framework.NewDefaultFramework("emptydir-wrapper") It("should not conflict", func() { diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 862c692c286..99aafb95c06 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/generated" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -136,7 +137,7 @@ func sshAndLog(cmd, host string) { } } -var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() { +var _ = utils.SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() { f := framework.NewDefaultFramework("flexvolume") // note that namespace deletion is handled by delete-namespace flag diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index ae7041d89e1..6ffecb194ea 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -40,6 +40,7 @@ import ( v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -52,7 +53,7 @@ const ( minNodes = 2 ) -var _ = SIGDescribe("Pod Disks", func() { +var _ = utils.SIGDescribe("Pod Disks", func() { var ( ns string cs clientset.Interface diff --git a/test/e2e/storage/persistent_volumes-disruptive.go b/test/e2e/storage/persistent_volumes-disruptive.go index b43d17e2e19..feb15c6a5a9 100644 --- a/test/e2e/storage/persistent_volumes-disruptive.go +++ b/test/e2e/storage/persistent_volumes-disruptive.go @@ -18,18 +18,17 @@ package storage import ( "fmt" - "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) @@ -37,17 +36,12 @@ type disruptiveTest struct { testItStmt string runTest testBody } -type kubeletOpt string const ( - MinNodes = 2 - NodeStateTimeout = 1 * time.Minute - kStart kubeletOpt = "start" - kStop kubeletOpt = "stop" - kRestart kubeletOpt = "restart" + MinNodes = 2 ) -var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { +var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { f := framework.NewDefaultFramework("disruptive-pv") var ( @@ -223,11 +217,11 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { disruptiveTestTable := []disruptiveTest{ { testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.", - runTest: testKubeletRestartsAndRestoresMount, + runTest: utils.TestKubeletRestartsAndRestoresMount, }, { testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.", - runTest: testVolumeUnmountsFromDeletedPod, + runTest: utils.TestVolumeUnmountsFromDeletedPod, }, } @@ -243,61 +237,6 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { }) }) -// testKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts -func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { - By("Writing to the volume.") - file := "/mnt/_SUCCESS" - out, err := podExec(clientPod, fmt.Sprintf("touch %s", file)) - framework.Logf(out) - Expect(err).NotTo(HaveOccurred()) - - By("Restarting kubelet") - kubeletCommand(kRestart, c, clientPod) - - By("Testing that written file is accessible.") - out, err = podExec(clientPod, fmt.Sprintf("cat %s", file)) - framework.Logf(out) - Expect(err).NotTo(HaveOccurred()) - framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file) -} - -// testVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. -func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { - nodeIP, err := framework.GetHostExternalAddress(c, clientPod) - Expect(err).NotTo(HaveOccurred()) - nodeIP = nodeIP + ":22" - - By("Expecting the volume mount to be found.") - result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider) - framework.LogSSHResult(result) - Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") - Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) - - By("Stopping the kubelet.") - kubeletCommand(kStop, c, clientPod) - defer func() { - if err != nil { - kubeletCommand(kStart, c, clientPod) - } - }() - By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) - err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - By("Starting the kubelet and waiting for pod to delete.") - kubeletCommand(kStart, c, clientPod) - err = f.WaitForPodTerminated(clientPod.Name, "") - if !apierrs.IsNotFound(err) && err != nil { - Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.") - } - - By("Expecting the volume mount not to be found.") - result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider) - framework.LogSSHResult(result) - Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") - Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") - framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) -} - // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed // by the test. func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { @@ -339,101 +278,3 @@ func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, framework.DeletePersistentVolumeClaim(c, pvc.Name, ns) framework.DeletePersistentVolume(c, pv.Name) } - -// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits -// for the desired statues.. -// - First issues the command via `systemctl` -// - If `systemctl` returns stderr "command not found, issues the command via `service` -// - If `service` also returns stderr "command not found", the test is aborted. -// Allowed kubeletOps are `kStart`, `kStop`, and `kRestart` -func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) { - command := "" - sudoPresent := false - systemctlPresent := false - kubeletPid := "" - - nodeIP, err := framework.GetHostExternalAddress(c, pod) - Expect(err).NotTo(HaveOccurred()) - nodeIP = nodeIP + ":22" - - framework.Logf("Checking if sudo command is present") - sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) - if !strings.Contains(sshResult.Stderr, "command not found") { - sudoPresent = true - } - - framework.Logf("Checking if systemctl command is present") - sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) - if !strings.Contains(sshResult.Stderr, "command not found") { - command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) - systemctlPresent = true - } else { - command = fmt.Sprintf("service kubelet %s", string(kOp)) - } - if sudoPresent { - command = fmt.Sprintf("sudo %s", command) - } - - if kOp == kRestart { - kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) - } - - framework.Logf("Attempting `%s`", command) - sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) - framework.LogSSHResult(sshResult) - Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) - - if kOp == kStop { - if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) - } - } - if kOp == kRestart { - // Wait for a minute to check if kubelet Pid is getting changed - isPidChanged := false - for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) { - kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) - if kubeletPid != kubeletPidAfterRestart { - isPidChanged = true - break - } - } - Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet") - framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") - time.Sleep(30 * time.Second) - } - if kOp == kStart || kOp == kRestart { - // For kubelet start and restart operations, Wait until Node becomes Ready - if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) - } - } -} - -// return the Main PID of the Kubelet Process -func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string { - command := "" - if systemctlPresent { - command = "systemctl status kubelet | grep 'Main PID'" - } else { - command = "service kubelet status | grep 'Main PID'" - } - if sudoPresent { - command = fmt.Sprintf("sudo %s", command) - } - framework.Logf("Attempting `%s`", command) - sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP)) - framework.LogSSHResult(sshResult) - Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID") - Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty") - return sshResult.Stdout -} - -// podExec wraps RunKubectl to execute a bash cmd in target pod -func podExec(pod *v1.Pod, bashExec string) (string, error) { - return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec) -} diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index 4609fd56074..08a2c4afa4a 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -26,6 +26,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) // verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node @@ -51,7 +52,7 @@ func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework. } // Testing configurations of single a PV/PVC pair attached to a GCE PD -var _ = SIGDescribe("PersistentVolumes GCEPD", func() { +var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { var ( c clientset.Interface diskName string diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index fb873a9f771..4c09ed26099 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -44,6 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -138,7 +139,7 @@ var ( Level: "s0:c0,c1"} ) -var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() { +var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() { f := framework.NewDefaultFramework("persistent-local-volumes-test") var ( @@ -835,7 +836,7 @@ func createFileDoesntExistCmd(testFileDir string, testFile string) string { // Execute a read or write command in a pod. // Fail on error func podRWCmdExec(pod *v1.Pod, cmd string) string { - out, err := podExec(pod, cmd) + out, err := utils.PodExec(pod, cmd) Expect(err).NotTo(HaveOccurred()) return out } diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 416bb34c6cf..53d28b39bcf 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -29,6 +29,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's @@ -85,7 +86,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, return nil } -var _ = SIGDescribe("PersistentVolumes", func() { +var _ = utils.SIGDescribe("PersistentVolumes", func() { // global vars for the Context()s and It()'s below f := framework.NewDefaultFramework("pv") diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index b43ee5d4d91..5ead6272635 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -30,9 +30,10 @@ import ( "k8s.io/kubernetes/pkg/util/slice" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) -var _ = SIGDescribe("PVC Protection [Feature:PVCProtection]", func() { +var _ = utils.SIGDescribe("PVC Protection [Feature:PVCProtection]", func() { var ( client clientset.Interface nameSpace string diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD new file mode 100644 index 00000000000..7f7debdce11 --- /dev/null +++ b/test/e2e/storage/utils/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "framework.go", + "utils.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/storage/utils", + deps = [ + "//test/e2e/framework:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/test/e2e/storage/framework.go b/test/e2e/storage/utils/framework.go similarity index 97% rename from test/e2e/storage/framework.go rename to test/e2e/storage/utils/framework.go index 443c9926bcc..b8ea82b9bd2 100644 --- a/test/e2e/storage/framework.go +++ b/test/e2e/storage/utils/framework.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package utils import "github.com/onsi/ginkgo" diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go new file mode 100644 index 00000000000..5b27dea6d97 --- /dev/null +++ b/test/e2e/storage/utils/utils.go @@ -0,0 +1,240 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +type KubeletOpt string + +const ( + NodeStateTimeout = 1 * time.Minute + KStart KubeletOpt = "start" + KStop KubeletOpt = "stop" + KRestart KubeletOpt = "restart" +) + +// PodExec wraps RunKubectl to execute a bash cmd in target pod +func PodExec(pod *v1.Pod, bashExec string) (string, error) { + return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec) +} + +// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits +// for the desired statues.. +// - First issues the command via `systemctl` +// - If `systemctl` returns stderr "command not found, issues the command via `service` +// - If `service` also returns stderr "command not found", the test is aborted. +// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart` +func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { + command := "" + sudoPresent := false + systemctlPresent := false + kubeletPid := "" + + nodeIP, err := framework.GetHostExternalAddress(c, pod) + Expect(err).NotTo(HaveOccurred()) + nodeIP = nodeIP + ":22" + + framework.Logf("Checking if sudo command is present") + sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) + if !strings.Contains(sshResult.Stderr, "command not found") { + sudoPresent = true + } + + framework.Logf("Checking if systemctl command is present") + sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) + if !strings.Contains(sshResult.Stderr, "command not found") { + command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) + systemctlPresent = true + } else { + command = fmt.Sprintf("service kubelet %s", string(kOp)) + } + if sudoPresent { + command = fmt.Sprintf("sudo %s", command) + } + + if kOp == KRestart { + kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + } + + framework.Logf("Attempting `%s`", command) + sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) + framework.LogSSHResult(sshResult) + Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) + + if kOp == KStop { + if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + } + } + if kOp == KRestart { + // Wait for a minute to check if kubelet Pid is getting changed + isPidChanged := false + for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) { + kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + if kubeletPid != kubeletPidAfterRestart { + isPidChanged = true + break + } + } + Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet") + framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") + time.Sleep(30 * time.Second) + } + if kOp == KStart || kOp == KRestart { + // For kubelet start and restart operations, Wait until Node becomes Ready + if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) + } + } +} + +// getKubeletMainPid return the Main PID of the Kubelet Process +func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string { + command := "" + if systemctlPresent { + command = "systemctl status kubelet | grep 'Main PID'" + } else { + command = "service kubelet status | grep 'Main PID'" + } + if sudoPresent { + command = fmt.Sprintf("sudo %s", command) + } + framework.Logf("Attempting `%s`", command) + sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP)) + framework.LogSSHResult(sshResult) + Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID") + Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty") + return sshResult.Stdout +} + +// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts +func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + By("Writing to the volume.") + file := "/mnt/_SUCCESS" + out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file)) + framework.Logf(out) + Expect(err).NotTo(HaveOccurred()) + + By("Restarting kubelet") + KubeletCommand(KRestart, c, clientPod) + + By("Testing that written file is accessible.") + out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file)) + framework.Logf(out) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file) +} + +// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. +func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + nodeIP, err := framework.GetHostExternalAddress(c, clientPod) + Expect(err).NotTo(HaveOccurred()) + nodeIP = nodeIP + ":22" + + By("Expecting the volume mount to be found.") + result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + + By("Stopping the kubelet.") + KubeletCommand(KStop, c, clientPod) + defer func() { + if err != nil { + KubeletCommand(KStart, c, clientPod) + } + }() + By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + By("Starting the kubelet and waiting for pod to delete.") + KubeletCommand(KStart, c, clientPod) + err = f.WaitForPodTerminated(clientPod.Name, "") + if !apierrs.IsNotFound(err) && err != nil { + Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.") + } + + By("Expecting the volume mount not to be found.") + result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") + framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) +} + +// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. +func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) { + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-volume-tester-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", command}, + VolumeMounts: []v1.VolumeMount{ + { + Name: "my-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "my-volume", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + ReadOnly: false, + }, + }, + }, + }, + }, + } + pod, err := c.CoreV1().Pods(ns).Create(pod) + framework.ExpectNoError(err, "Failed to create pod: %v", err) + defer func() { + framework.DeletePodOrFail(c, ns, pod.Name) + }() + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) +} diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go index f8ef16d2f14..6382a7cbc51 100644 --- a/test/e2e/storage/volume_expand.go +++ b/test/e2e/storage/volume_expand.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -38,7 +39,7 @@ const ( totalResizeWaitPeriod = 20 * time.Minute ) -var _ = SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() { +var _ = utils.SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/volume_io.go b/test/e2e/storage/volume_io.go index 0be71d44d77..e9205e4dfc3 100644 --- a/test/e2e/storage/volume_io.go +++ b/test/e2e/storage/volume_io.go @@ -40,6 +40,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -126,7 +127,7 @@ func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error { By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) loopCnt := fsize / minFileSize writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath) - _, err := podExec(pod, writeCmd) + _, err := utils.PodExec(pod, writeCmd) return err } @@ -134,7 +135,7 @@ func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error { // Verify that the test file is the expected size and contains the expected content. func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error { By("verifying file size") - rtnstr, err := podExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) + rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) if err != nil || rtnstr == "" { return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err) } @@ -147,7 +148,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) er } By("verifying file hash") - rtnstr, err = podExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) + rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) if err != nil { return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err) } @@ -168,7 +169,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) er // Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. func deleteFile(pod *v1.Pod, fpath string) { By(fmt.Sprintf("deleting test file %s...", fpath)) - _, err := podExec(pod, fmt.Sprintf("rm -f %s", fpath)) + _, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath)) if err != nil { // keep going, the test dir will be deleted when the volume is unmounted framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err) @@ -237,7 +238,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo // These tests need privileged containers which are disabled by default. // TODO: support all of the plugins tested in storage/volumes.go -var _ = SIGDescribe("Volume plugin streaming [Slow]", func() { +var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() { f := framework.NewDefaultFramework("volume-io") var ( config framework.VolumeTestConfig diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 85e04ab1c32..303df4855f8 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -30,11 +30,12 @@ import ( kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/metrics" + "k8s.io/kubernetes/test/e2e/storage/utils" ) // This test needs to run in serial because other tests could interfere // with metrics being tested here. -var _ = SIGDescribe("[Serial] Volume metrics", func() { +var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 38d1b257930..ded7fea3c00 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -45,6 +45,7 @@ import ( storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) type storageClassTest struct { @@ -229,7 +230,7 @@ func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { return nil } -var _ = SIGDescribe("Dynamic Provisioning", func() { +var _ = utils.SIGDescribe("Dynamic Provisioning", func() { f := framework.NewDefaultFramework("volume-provisioning") // filled in BeforeEach diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index da4ad0bc67c..f5e82ec3470 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -55,6 +55,8 @@ import ( clientset "k8s.io/client-go/kubernetes" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" ) func DeleteCinderVolume(name string) error { @@ -79,7 +81,7 @@ func DeleteCinderVolume(name string) error { } // These tests need privileged containers, which are disabled by default. -var _ = SIGDescribe("Volumes", func() { +var _ = utils.SIGDescribe("Volumes", func() { f := framework.NewDefaultFramework("volume") // note that namespace deletion is handled by delete-namespace flag @@ -510,10 +512,10 @@ var _ = SIGDescribe("Volumes", func() { if err != nil { return } - vsp, err := getVSphere(c) + vsp, err := vspheretest.GetVSphere(c) Expect(err).NotTo(HaveOccurred()) - volumePath, err = createVSphereVolume(vsp, nil) + volumePath, err = vspheretest.CreateVSphereVolume(vsp, nil) Expect(err).NotTo(HaveOccurred()) defer func() { diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD new file mode 100644 index 00000000000..d582d820046 --- /dev/null +++ b/test/e2e/storage/vsphere/BUILD @@ -0,0 +1,67 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "persistent_volumes-vsphere.go", + "pv_reclaimpolicy.go", + "pvc_label_selector.go", + "vsphere_scale.go", + "vsphere_statefulsets.go", + "vsphere_stress.go", + "vsphere_utils.go", + "vsphere_volume_cluster_ds.go", + "vsphere_volume_datastore.go", + "vsphere_volume_diskformat.go", + "vsphere_volume_disksize.go", + "vsphere_volume_fstype.go", + "vsphere_volume_master_restart.go", + "vsphere_volume_node_poweroff.go", + "vsphere_volume_ops_storm.go", + "vsphere_volume_perf.go", + "vsphere_volume_placement.go", + "vsphere_volume_vsan_policy.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere", + deps = [ + "//pkg/cloudprovider/providers/vsphere:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", + "//pkg/volume/util/volumehelper:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/storage/utils:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/github.com/vmware/govmomi/find:go_default_library", + "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/test/e2e/storage/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go similarity index 96% rename from test/e2e/storage/persistent_volumes-vsphere.go rename to test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 1d49361f068..46556e446a4 100644 --- a/test/e2e/storage/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "time" @@ -28,10 +28,11 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) // Testing configurations of single a PV/PVC pair attached to a vSphere Disk -var _ = SIGDescribe("PersistentVolumes:vsphere", func() { +var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { var ( c clientset.Interface ns string @@ -182,7 +183,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() { 3. Verify that written file is accessible after kubelet restart */ It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { - testKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv) + utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv) }) /* @@ -197,7 +198,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() { 5. Verify that volume mount not to be found. */ It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { - testVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv) + utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv) }) /* diff --git a/test/e2e/storage/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go similarity index 97% rename from test/e2e/storage/pv_reclaimpolicy.go rename to test/e2e/storage/vsphere/pv_reclaimpolicy.go index 8713ce7d7eb..79211c292b8 100644 --- a/test/e2e/storage/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "strconv" @@ -29,9 +29,10 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) -var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { +var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { f := framework.NewDefaultFramework("persistentvolumereclaim") var ( c clientset.Interface @@ -47,7 +48,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) - SIGDescribe("persistentvolumereclaim:vsphere", func() { + utils.SIGDescribe("persistentvolumereclaim:vsphere", func() { BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") pv = nil diff --git a/test/e2e/storage/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go similarity index 96% rename from test/e2e/storage/pvc_label_selector.go rename to test/e2e/storage/vsphere/pvc_label_selector.go index d389386a6c7..ccfa0a59ec2 100644 --- a/test/e2e/storage/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "time" @@ -24,6 +24,7 @@ import ( "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -43,7 +44,7 @@ import ( 9. delete pvc_vvol */ -var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { +var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { f := framework.NewDefaultFramework("pvclabelselector") var ( c clientset.Interface @@ -68,7 +69,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { }) - SIGDescribe("Selector-Label Volume Binding:vsphere", func() { + utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() { AfterEach(func() { By("Running clean up actions") if framework.ProviderIs("vsphere") { diff --git a/test/e2e/storage/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go similarity index 98% rename from test/e2e/storage/vsphere_scale.go rename to test/e2e/storage/vsphere/vsphere_scale.go index ded690e2047..2da0933f87a 100644 --- a/test/e2e/storage/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -52,7 +53,7 @@ type NodeSelector struct { labelValue string } -var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() { +var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { f := framework.NewDefaultFramework("vcp-at-scale") var ( diff --git a/test/e2e/storage/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go similarity index 98% rename from test/e2e/storage/vsphere_statefulsets.go rename to test/e2e/storage/vsphere/vsphere_statefulsets.go index b0a633ad391..3b5b4596922 100644 --- a/test/e2e/storage/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -49,7 +50,7 @@ const ( storageclassname = "nginx-sc" ) -var _ = SIGDescribe("vsphere statefulset", func() { +var _ = utils.SIGDescribe("vsphere statefulset", func() { f := framework.NewDefaultFramework("vsphere-statefulset") var ( namespace string diff --git a/test/e2e/storage/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go similarity index 98% rename from test/e2e/storage/vsphere_stress.go rename to test/e2e/storage/vsphere/vsphere_stress.go index 4be0205e051..2af52b7c94b 100644 --- a/test/e2e/storage/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -31,6 +31,7 @@ import ( k8stype "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -43,7 +44,7 @@ import ( 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. */ -var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() { f := framework.NewDefaultFramework("vcp-stress") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go similarity index 95% rename from test/e2e/storage/vsphere_utils.go rename to test/e2e/storage/vsphere/vsphere_utils.go index b0c87c0cd71..947a424d8cf 100644 --- a/test/e2e/storage/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -250,15 +251,20 @@ func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOption return volumePath, nil } +// CreateVSphereVolume creates a vmdk volume +func CreateVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) { + return createVSphereVolume(vsp, volumeOptions) +} + // function to write content to the volume backed by given PVC func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) { - runInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") + utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") framework.Logf("Done with writing content to volume") } // function to verify content is matching on the volume backed for given PVC func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) { - runInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data") + utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data") framework.Logf("Successfully verified content of the volume") } @@ -457,3 +463,8 @@ func getVSphere(c clientset.Interface) (*vsphere.VSphere, error) { addNodesToVCP(vsp, c) return vsp, nil } + +// GetVSphere returns vsphere cloud provider +func GetVSphere(c clientset.Interface) (*vsphere.VSphere, error) { + return getVSphere(c) +} diff --git a/test/e2e/storage/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go similarity index 96% rename from test/e2e/storage/vsphere_volume_cluster_ds.go rename to test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index b2ebd29fb16..d200eded4e1 100644 --- a/test/e2e/storage/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -27,6 +27,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -39,7 +40,7 @@ import ( 1. CLUSTER_DATASTORE which should be set to clustered datastore 2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore */ -var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-provision") var client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go similarity index 95% rename from test/e2e/storage/vsphere_volume_datastore.go rename to test/e2e/storage/vsphere/vsphere_volume_datastore.go index df86a96e5ef..70f55ee9f8d 100644 --- a/test/e2e/storage/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -44,7 +45,7 @@ const ( 4. Verify the error returned on PVC failure is the correct. */ -var _ = SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-datastore") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go similarity index 98% rename from test/e2e/storage/vsphere_volume_diskformat.go rename to test/e2e/storage/vsphere/vsphere_volume_diskformat.go index b805eb4d5a1..f4a9fa9a7a5 100644 --- a/test/e2e/storage/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "os" @@ -32,6 +32,7 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -52,7 +53,7 @@ import ( 11. Delete PVC, PV and Storage Class */ -var _ = SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-disk-format") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go similarity index 96% rename from test/e2e/storage/vsphere_volume_disksize.go rename to test/e2e/storage/vsphere/vsphere_volume_disksize.go index f69983accdc..a84692a2fe2 100644 --- a/test/e2e/storage/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -44,7 +45,7 @@ const ( 4. Verify the error returned on PVC failure is the correct. */ -var _ = SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-disksize") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go similarity index 98% rename from test/e2e/storage/vsphere_volume_fstype.go rename to test/e2e/storage/vsphere/vsphere_volume_fstype.go index 352b6dd3935..6ec152983a8 100644 --- a/test/e2e/storage/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "strings" @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -63,7 +64,7 @@ const ( 7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node. */ -var _ = SIGDescribe("Volume FStype [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-fstype") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go similarity index 97% rename from test/e2e/storage/vsphere_volume_master_restart.go rename to test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 5ba86162517..cb12e9d0edd 100644 --- a/test/e2e/storage/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -41,7 +42,7 @@ import ( 6. Delete the pod and wait for the volume to be detached 7. Delete the volume */ -var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() { +var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() { f := framework.NewDefaultFramework("restart-master") const labelKey = "vsphere_e2e_label" diff --git a/test/e2e/storage/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go similarity index 98% rename from test/e2e/storage/vsphere_volume_node_poweroff.go rename to test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 1a2f37e7560..c0534a73d35 100644 --- a/test/e2e/storage/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -36,6 +36,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -43,7 +44,7 @@ import ( 1. Verify the pod got provisioned on a different node with volume attached to it 2. Verify the volume is detached from the powered off node */ -var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() { +var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() { f := framework.NewDefaultFramework("node-poweroff") var ( client clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go similarity index 96% rename from test/e2e/storage/vsphere_volume_ops_storm.go rename to test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index b1b6516d0d9..80a07ceeec0 100644 --- a/test/e2e/storage/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -29,6 +29,7 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* @@ -47,7 +48,7 @@ import ( 10. Delete storage class. */ -var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-ops-storm") const DEFAULT_VOLUME_OPS_SCALE = 30 var ( diff --git a/test/e2e/storage/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go similarity index 98% rename from test/e2e/storage/vsphere_volume_perf.go rename to test/e2e/storage/vsphere/vsphere_volume_perf.go index 59ca3951782..73c8da97c0b 100644 --- a/test/e2e/storage/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) /* This test calculates latency numbers for volume lifecycle operations @@ -48,7 +49,7 @@ const ( DeleteOp = "DeleteOp" ) -var _ = SIGDescribe("vcp-performance [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { f := framework.NewDefaultFramework("vcp-performance") var ( diff --git a/test/e2e/storage/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go similarity index 99% rename from test/e2e/storage/vsphere_volume_placement.go rename to test/e2e/storage/vsphere/vsphere_volume_placement.go index 417d0723b15..a0327de9d4a 100644 --- a/test/e2e/storage/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -31,9 +31,10 @@ import ( vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) -var _ = SIGDescribe("Volume Placement", func() { +var _ = utils.SIGDescribe("Volume Placement", func() { f := framework.NewDefaultFramework("volume-placement") var ( c clientset.Interface diff --git a/test/e2e/storage/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go similarity index 99% rename from test/e2e/storage/vsphere_volume_vsan_policy.go rename to test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index f3bef3f1c93..52c86a2e538 100644 --- a/test/e2e/storage/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package storage +package vsphere import ( "fmt" @@ -26,6 +26,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/vmware/govmomi/find" "golang.org/x/net/context" "k8s.io/api/core/v1" @@ -34,6 +35,7 @@ import ( clientset "k8s.io/client-go/kubernetes" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -90,7 +92,7 @@ const ( */ -var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-vsan-policy") var ( client clientset.Interface From 070a7b58237ad12582b3b2eff00877e48324ad66 Mon Sep 17 00:00:00 2001 From: Walter Fender Date: Tue, 12 Sep 2017 15:47:03 -0700 Subject: [PATCH 384/794] Seperate loop and plugin control Seperate loop and plugin control in the kube-controller-manager. Adding an "--external-plugin" flag to specify a plugin to load when cloud-provider is set to "external". Flag has no effect currently when the cloud-provider is not set to external. The expectation is that the cloud provider and external plugin flags would go away once all cloud providers are on stage 2 cloud-controller-manager solutions. Managing the control loops more directly based on start up flags. Addressing issue brought up by @wlan0 Switched to using the main node controller in CCM. Changes to enable full NodeController to start in CCM. Fix related tests. Unifying some common code between KCM and CCM. Fix related tests and comments. Folded in feedback from @jhorwit2 and @wlan0 --- cmd/BUILD | 1 + .../app/controllermanager.go | 6 +- .../app/options/BUILD | 3 +- .../app/options/options.go | 51 +---- .../app/options/options_test.go | 101 +++++++--- cmd/controller-manager/app/options/BUILD | 29 +++ cmd/controller-manager/app/options/utils.go | 140 +++++++++++++ cmd/kube-controller-manager/app/BUILD | 1 + .../app/certificates.go | 6 +- .../app/controllermanager.go | 38 +++- cmd/kube-controller-manager/app/options/BUILD | 4 +- .../app/options/options.go | 119 ++--------- .../app/options/options_test.go | 189 +++++++++--------- pkg/apis/componentconfig/types.go | 3 + pkg/controller/node/node_controller.go | 2 +- 15 files changed, 409 insertions(+), 284 deletions(-) create mode 100644 cmd/controller-manager/app/options/BUILD create mode 100644 cmd/controller-manager/app/options/utils.go diff --git a/cmd/BUILD b/cmd/BUILD index 75dcc63d834..e8ed83675ae 100644 --- a/cmd/BUILD +++ b/cmd/BUILD @@ -13,6 +13,7 @@ filegroup( ":package-srcs", "//cmd/clicheck:all-srcs", "//cmd/cloud-controller-manager:all-srcs", + "//cmd/controller-manager/app/options:all-srcs", "//cmd/gendocs:all-srcs", "//cmd/genkubedocs:all-srcs", "//cmd/genman:all-srcs", diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 2871e9f727c..fc698b651dd 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -147,7 +147,7 @@ func Run(s *options.CloudControllerManagerServer) error { clientBuilder = rootClientBuilder } - if err := StartControllers(s, kubeconfig, clientBuilder, stop, recorder, cloud); err != nil { + if err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder, cloud); err != nil { glog.Fatalf("error running controllers: %v", err) } } @@ -193,7 +193,7 @@ func Run(s *options.CloudControllerManagerServer) error { } // StartControllers starts the cloud specific controller loops. -func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error { +func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error { // Function to build the kube client object client := func(serviceAccountName string) clientset.Interface { return clientBuilder.ClientOrDie(serviceAccountName) @@ -204,7 +204,7 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc cloud.Initialize(clientBuilder) } - versionedClient := client("shared-informers") + versionedClient := rootClientBuilder.ClientOrDie("shared-informers") sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(s)()) // Start the CloudNodeController diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index 62c8e0d01da..b02da213f59 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -11,7 +11,7 @@ go_library( srcs = ["options.go"], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options", deps = [ - "//pkg/apis/componentconfig:go_default_library", + "//cmd/controller-manager/app/options:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", @@ -40,6 +40,7 @@ go_test( importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options", library = ":go_default_library", deps = [ + "//cmd/controller-manager/app/options:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 6120232659c..5d606b16c14 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/apis/componentconfig" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/master/ports" @@ -33,10 +33,7 @@ import ( // CloudControllerManagerServer is the main context object for the controller manager. type CloudControllerManagerServer struct { - componentconfig.KubeControllerManagerConfiguration - - Master string - Kubeconfig string + cmoptions.ControllerManagerServer // NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status NodeStatusUpdateFrequency metav1.Duration @@ -45,22 +42,10 @@ type CloudControllerManagerServer struct { // NewCloudControllerManagerServer creates a new ExternalCMServer with a default config. func NewCloudControllerManagerServer() *CloudControllerManagerServer { s := CloudControllerManagerServer{ - // Part of these default values also present in 'cmd/kube-controller-manager/app/options/options.go'. - // Please keep them in sync when doing update. - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ - Port: ports.CloudControllerManagerPort, - Address: "0.0.0.0", - ConcurrentServiceSyncs: 1, - MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "kubernetes", - ConfigureCloudRoutes: true, - ContentType: "application/vnd.kubernetes.protobuf", - KubeAPIQPS: 20.0, - KubeAPIBurst: 30, - LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), - ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, - RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, + // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. + // Please make common changes there and put anything cloud specific here. + ControllerManagerServer: cmoptions.ControllerManagerServer{ + KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.CloudControllerManagerPort), }, NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute}, } @@ -70,34 +55,14 @@ func NewCloudControllerManagerServer() *CloudControllerManagerServer { // AddFlags adds flags for a specific ExternalCMServer to the specified FlagSet func (s *CloudControllerManagerServer) AddFlags(fs *pflag.FlagSet) { - fs.Int32Var(&s.Port, "port", s.Port, "The port that the cloud-controller-manager's http service runs on.") - fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces).") + cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs) fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider of cloud services. Cannot be empty.") - fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") - fs.BoolVar(&s.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.") - fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.") - fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.") - fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration, - "The period for syncing NodeStatus in NodeController.") fs.DurationVar(&s.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", s.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.") // TODO: remove --service-account-private-key-file 6 months after 1.8 is released (~1.10) fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") fs.MarkDeprecated("service-account-private-key-file", "This flag is currently no-op and will be deleted.") - fs.BoolVar(&s.UseServiceAccountCredentials, "use-service-account-credentials", s.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.") - fs.DurationVar(&s.RouteReconciliationPeriod.Duration, "route-reconciliation-period", s.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.") - fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.") - fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/.") - fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.") - fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.") - fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster.") - fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") - fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") - fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") - fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.") - fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.") - fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.") - fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") + leaderelectionconfig.BindFlags(&s.LeaderElection, fs) utilfeature.DefaultFeatureGate.AddFlag(fs) diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index ece3bf09d22..7538169b427 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/apis/componentconfig" ) @@ -65,34 +66,82 @@ func TestAddFlags(t *testing.T) { f.Parse(args) expected := &CloudControllerManagerServer{ - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ - CloudProvider: "gce", - CloudConfigFile: "/cloud-config", - Port: 10000, - Address: "192.168.4.10", - ConcurrentServiceSyncs: 1, - MinResyncPeriod: metav1.Duration{Duration: 100 * time.Minute}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "k8s", - ConfigureCloudRoutes: false, - AllocateNodeCIDRs: true, - ContentType: "application/vnd.kubernetes.protobuf", - EnableContentionProfiling: true, - KubeAPIQPS: 50.0, - KubeAPIBurst: 100, - LeaderElection: componentconfig.LeaderElectionConfiguration{ - ResourceLock: "configmap", - LeaderElect: false, - LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, + ControllerManagerServer: cmoptions.ControllerManagerServer{ + KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ + CloudProvider: "gce", + CloudConfigFile: "/cloud-config", + Port: 10000, + Address: "192.168.4.10", + ConcurrentEndpointSyncs: 5, + ConcurrentRSSyncs: 5, + ConcurrentResourceQuotaSyncs: 5, + ConcurrentDeploymentSyncs: 5, + ConcurrentDaemonSetSyncs: 2, + ConcurrentJobSyncs: 5, + ConcurrentNamespaceSyncs: 10, + ConcurrentSATokenSyncs: 5, + ConcurrentServiceSyncs: 1, + ConcurrentGCSyncs: 20, + ConcurrentRCSyncs: 5, + MinResyncPeriod: metav1.Duration{Duration: 100 * time.Minute}, + NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, + ServiceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, + HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, + HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, + DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, + NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, + NodeStartupGracePeriod: metav1.Duration{Duration: 1 * time.Minute}, + ClusterSigningDuration: metav1.Duration{Duration: 8760 * time.Hour}, + ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 1 * time.Minute}, + TerminatedPodGCThreshold: 12500, + RegisterRetryCount: 10, + ClusterName: "k8s", + ConfigureCloudRoutes: false, + AllocateNodeCIDRs: true, + EnableGarbageCollector: true, + EnableTaintManager: true, + HorizontalPodAutoscalerUseRESTClients: true, + VolumeConfiguration: componentconfig.VolumeConfiguration{ + EnableDynamicProvisioning: true, + EnableHostPathProvisioning: false, + FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", + PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ + MaximumRetry: 3, + MinimumTimeoutNFS: 300, + IncrementTimeoutNFS: 30, + MinimumTimeoutHostPath: 60, + IncrementTimeoutHostPath: 30, + }, + }, + ContentType: "application/vnd.kubernetes.protobuf", + ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem", + ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key", + EnableContentionProfiling: true, + KubeAPIQPS: 50.0, + KubeAPIBurst: 100, + LeaderElection: componentconfig.LeaderElectionConfiguration{ + ResourceLock: "configmap", + LeaderElect: false, + LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, + RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, + ClusterCIDR: "1.2.3.4/24", + NodeCIDRMaskSize: 24, + CIDRAllocatorType: "RangeAllocator", + Controllers: []string{"*"}, }, - ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, - RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, - ClusterCIDR: "1.2.3.4/24", + Kubeconfig: "/kubeconfig", + Master: "192.168.4.20", }, - Kubeconfig: "/kubeconfig", - Master: "192.168.4.20", NodeStatusUpdateFrequency: metav1.Duration{Duration: 10 * time.Minute}, } if !reflect.DeepEqual(expected, s) { diff --git a/cmd/controller-manager/app/options/BUILD b/cmd/controller-manager/app/options/BUILD new file mode 100644 index 00000000000..2ad13dea877 --- /dev/null +++ b/cmd/controller-manager/app/options/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["utils.go"], + importpath = "k8s.io/kubernetes/cmd/controller-manager/app/options", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", + "//vendor/github.com/cloudflare/cfssl/helpers:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cmd/controller-manager/app/options/utils.go b/cmd/controller-manager/app/options/utils.go new file mode 100644 index 00000000000..903158f2eb4 --- /dev/null +++ b/cmd/controller-manager/app/options/utils.go @@ -0,0 +1,140 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/cloudflare/cfssl/helpers" + "time" + + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" +) + +// ControllerManagerServer is the common structure for a controller manager. It works with GetDefaultControllerOptions +// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager. +type ControllerManagerServer struct { + componentconfig.KubeControllerManagerConfiguration + + Master string + Kubeconfig string +} + +const ( + // These defaults are deprecated and exported so that we can warn if + // they are being used. + + // DefaultClusterSigningCertFile is deprecated. Do not use. + DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" + // DefaultClusterSigningKeyFile is deprecated. Do not use. + DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" +) + +// GetDefaultControllerOptions returns common/default configuration values for both +// the kube-controller-manager and the cloud-contoller-manager. Any common changes should +// be made here. Any individual changes should be made in that controller. +func GetDefaultControllerOptions(port int32) componentconfig.KubeControllerManagerConfiguration { + return componentconfig.KubeControllerManagerConfiguration{ + Controllers: []string{"*"}, + Port: port, + Address: "0.0.0.0", + ConcurrentEndpointSyncs: 5, + ConcurrentServiceSyncs: 1, + ConcurrentRCSyncs: 5, + ConcurrentRSSyncs: 5, + ConcurrentDaemonSetSyncs: 2, + ConcurrentJobSyncs: 5, + ConcurrentResourceQuotaSyncs: 5, + ConcurrentDeploymentSyncs: 5, + ConcurrentNamespaceSyncs: 10, + ConcurrentSATokenSyncs: 5, + ServiceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, + ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, + PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, + HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, + HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, + DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, + RegisterRetryCount: 10, + PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, + NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, + NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, + ClusterName: "kubernetes", + NodeCIDRMaskSize: 24, + ConfigureCloudRoutes: true, + TerminatedPodGCThreshold: 12500, + VolumeConfiguration: componentconfig.VolumeConfiguration{ + EnableHostPathProvisioning: false, + EnableDynamicProvisioning: true, + PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ + MaximumRetry: 3, + MinimumTimeoutNFS: 300, + IncrementTimeoutNFS: 30, + MinimumTimeoutHostPath: 60, + IncrementTimeoutHostPath: 30, + }, + FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", + }, + ContentType: "application/vnd.kubernetes.protobuf", + KubeAPIQPS: 20.0, + KubeAPIBurst: 30, + LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), + ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, + EnableGarbageCollector: true, + ConcurrentGCSyncs: 20, + ClusterSigningCertFile: DefaultClusterSigningCertFile, + ClusterSigningKeyFile: DefaultClusterSigningKeyFile, + ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear}, + ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second}, + EnableTaintManager: true, + HorizontalPodAutoscalerUseRESTClients: true, + } +} + +// AddDefaultControllerFlags adds common/default flags for both the kube and cloud Controller Manager Server to the +// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller. +func AddDefaultControllerFlags(s *ControllerManagerServer, fs *pflag.FlagSet) { + fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on.") + fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces).") + fs.BoolVar(&s.UseServiceAccountCredentials, "use-service-account-credentials", s.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.") + fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") + fs.BoolVar(&s.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.") + fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.") + fs.DurationVar(&s.RouteReconciliationPeriod.Duration, "route-reconciliation-period", s.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.") + fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.") + fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration, + "The period for syncing NodeStatus in NodeController.") + fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.") + fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster.") + fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true") + fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") + fs.StringVar(&s.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use") + fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.") + fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") + fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") + fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.") + fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.") + fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.") + fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") +} diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index a533b2e29fc..2d4ede18b41 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -24,6 +24,7 @@ go_library( ], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app", deps = [ + "//cmd/controller-manager/app/options:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index 7ef29582175..6c1531cae5b 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/controller/certificates/approver" "k8s.io/kubernetes/pkg/controller/certificates/cleaner" "k8s.io/kubernetes/pkg/controller/certificates/signer" @@ -55,12 +55,12 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { _, err := os.Stat(ctx.Options.ClusterSigningCertFile) certFileExists = !os.IsNotExist(err) - certUsesDefault = (ctx.Options.ClusterSigningCertFile == options.DefaultClusterSigningCertFile) + certUsesDefault = (ctx.Options.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) _, err = os.Stat(ctx.Options.ClusterSigningKeyFile) keyFileExists = !os.IsNotExist(err) - keyUsesDefault = (ctx.Options.ClusterSigningKeyFile == options.DefaultClusterSigningKeyFile) + keyUsesDefault = (ctx.Options.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) switch { case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index d08e9658a8d..8697214e09c 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -71,6 +71,13 @@ const ( ControllerStartJitter = 1.0 ) +type ControllerLoopMode int + +const ( + IncludeCloudLoops ControllerLoopMode = iota + ExternalLoops +) + // NewControllerManagerCommand creates a *cobra.Command object with default parameters func NewControllerManagerCommand() *cobra.Command { s := options.NewCMServer() @@ -151,7 +158,7 @@ func Run(s *options.CMServer) error { } saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController - if err := StartControllers(ctx, saTokenControllerInitFunc, NewControllerInitializers()); err != nil { + if err := StartControllers(ctx, saTokenControllerInitFunc, NewControllerInitializers(ctx.LoopMode)); err != nil { glog.Fatalf("error starting controllers: %v", err) } @@ -262,6 +269,11 @@ type ControllerContext struct { // It must be initialized and ready to use. Cloud cloudprovider.Interface + // Control for which control loops to be run + // IncludeCloudLoops is for a kube-controller-manager running all loops + // ExternalLoops is for a kube-controller-manager running with a cloud-controller-manager + LoopMode ControllerLoopMode + // Stop is the stop channel Stop <-chan struct{} @@ -305,7 +317,7 @@ func IsControllerEnabled(name string, disabledByDefaultControllers sets.String, type InitFunc func(ctx ControllerContext) (bool, error) func KnownControllers() []string { - ret := sets.StringKeySet(NewControllerInitializers()) + ret := sets.StringKeySet(NewControllerInitializers(IncludeCloudLoops)) // add "special" controllers that aren't initialized normally. These controllers cannot be initialized // using a normal function. The only known special case is the SA token controller which *must* be started @@ -329,7 +341,7 @@ const ( // NewControllerInitializers is a public map of named controller groups (you can start more than one in an init func) // paired to their InitFunc. This allows for structured downstream composition and subdivision. -func NewControllerInitializers() map[string]InitFunc { +func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc { controllers := map[string]InitFunc{} controllers["endpoint"] = startEndpointController controllers["replicationcontroller"] = startReplicationController @@ -352,9 +364,12 @@ func NewControllerInitializers() map[string]InitFunc { controllers["ttl"] = startTTLController controllers["bootstrapsigner"] = startBootstrapSignerController controllers["tokencleaner"] = startTokenCleanerController - controllers["service"] = startServiceController + if loopMode == IncludeCloudLoops { + controllers["service"] = startServiceController + controllers["route"] = startRouteController + // TODO: Move node controller and volume controller into the IncludeCloudLoops only set. + } controllers["node"] = startNodeController - controllers["route"] = startRouteController controllers["persistentvolume-binder"] = startPersistentVolumeBinderController controllers["attachdetach"] = startAttachDetachController controllers["persistentvolume-expander"] = startVolumeExpandController @@ -430,7 +445,17 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild return ControllerContext{}, err } - cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) + var cloud cloudprovider.Interface + var loopMode ControllerLoopMode + if cloudprovider.IsExternal(s.CloudProvider) { + loopMode = ExternalLoops + if s.ExternalCloudVolumePlugin != "" { + cloud, err = cloudprovider.InitCloudProvider(s.ExternalCloudVolumePlugin, s.CloudConfigFile) + } + } else { + loopMode = IncludeCloudLoops + cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) + } if err != nil { return ControllerContext{}, fmt.Errorf("cloud provider could not be initialized: %v", err) } @@ -453,6 +478,7 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild Options: *s, AvailableResources: availableResources, Cloud: cloud, + LoopMode: loopMode, Stop: stop, InformersStarted: make(chan struct{}), } diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index 588b64c6866..41eeba489fa 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -11,14 +11,13 @@ go_library( srcs = ["options.go"], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", deps = [ + "//cmd/controller-manager/app/options:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/controller/garbagecollector:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", - "//vendor/github.com/cloudflare/cfssl/helpers:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -45,6 +44,7 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ + "//cmd/controller-manager/app/options:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index d681bfd1895..94548812aaa 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -21,12 +21,11 @@ package options import ( "fmt" "strings" - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/controller/garbagecollector" @@ -35,26 +34,12 @@ import ( // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" - "github.com/cloudflare/cfssl/helpers" "github.com/spf13/pflag" ) -const ( - // These defaults are deprecated and exported so that we can warn if - // they are being used. - - // DefaultClusterSigningCertFile is deprecated. Do not use. - DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" - // DefaultClusterSigningKeyFile is deprecated. Do not use. - DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" -) - // CMServer is the main context object for the controller manager. type CMServer struct { - componentconfig.KubeControllerManagerConfiguration - - Master string - Kubeconfig string + cmoptions.ControllerManagerServer } // NewCMServer creates a new CMServer with a default config. @@ -65,91 +50,32 @@ func NewCMServer() *CMServer { } s := CMServer{ - // Part of these default values also present in 'cmd/cloud-controller-manager/app/options/options.go'. - // Please keep them in sync when doing update. - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ - Controllers: []string{"*"}, - Port: ports.ControllerManagerPort, - Address: "0.0.0.0", - ConcurrentEndpointSyncs: 5, - ConcurrentServiceSyncs: 1, - ConcurrentRCSyncs: 5, - ConcurrentRSSyncs: 5, - ConcurrentDaemonSetSyncs: 2, - ConcurrentJobSyncs: 5, - ConcurrentResourceQuotaSyncs: 5, - ConcurrentDeploymentSyncs: 5, - ConcurrentNamespaceSyncs: 10, - ConcurrentSATokenSyncs: 5, - ServiceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, - RegisterRetryCount: 10, - PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, - NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "kubernetes", - NodeCIDRMaskSize: 24, - ConfigureCloudRoutes: true, - TerminatedPodGCThreshold: 12500, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableHostPathProvisioning: false, - EnableDynamicProvisioning: true, - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 300, - IncrementTimeoutNFS: 30, - MinimumTimeoutHostPath: 60, - IncrementTimeoutHostPath: 30, - }, - FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", - }, - ContentType: "application/vnd.kubernetes.protobuf", - KubeAPIQPS: 20.0, - KubeAPIBurst: 30, - LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(), - ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second}, - EnableGarbageCollector: true, - ConcurrentGCSyncs: 20, - GCIgnoredResources: gcIgnoredResources, - ClusterSigningCertFile: DefaultClusterSigningCertFile, - ClusterSigningKeyFile: DefaultClusterSigningKeyFile, - ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear}, - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second}, - EnableTaintManager: true, - HorizontalPodAutoscalerUseRESTClients: true, + // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. + // Please make common changes there but put anything kube-controller specific here. + ControllerManagerServer: cmoptions.ControllerManagerServer{ + KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.ControllerManagerPort), }, } + s.KubeControllerManagerConfiguration.GCIgnoredResources = gcIgnoredResources s.LeaderElection.LeaderElect = true return &s } // AddFlags adds flags for a specific CMServer to the specified FlagSet func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) { + cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs) + fs.StringSliceVar(&s.Controllers, "controllers", s.Controllers, fmt.Sprintf(""+ "A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ "named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s", strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", "))) - fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") - fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)") - fs.BoolVar(&s.UseServiceAccountCredentials, "use-service-account-credentials", s.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.") fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") - fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") - fs.BoolVar(&s.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.") - fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances") + fs.StringVar(&s.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.") fs.Int32Var(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load") fs.Int32Var(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") fs.Int32Var(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load") + fs.Int32Var(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load") fs.Int32Var(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load") fs.Int32Var(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") @@ -159,11 +85,9 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled "This flag is deprecated and will be removed in future releases. See node-monitor-period for Node health checking or "+ "route-reconciliation-period for cloud provider's route configuration settings.") fs.MarkDeprecated("node-sync-period", "This flag is currently no-op and will be deleted.") - fs.DurationVar(&s.RouteReconciliationPeriod.Duration, "route-reconciliation-period", s.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.") fs.DurationVar(&s.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system") fs.DurationVar(&s.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates") fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims") - fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod") fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling") fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod") fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod") @@ -193,8 +117,6 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled "where N means number of retries allowed for kubelet to post node status.") fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration, "Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.") - fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration, - "The period for syncing NodeStatus in NodeController.") fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") fs.StringVar(&s.ClusterSigningCertFile, "cluster-signing-cert-file", s.ClusterSigningCertFile, "Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates") fs.StringVar(&s.ClusterSigningKeyFile, "cluster-signing-key-file", s.ClusterSigningKeyFile, "Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates") @@ -202,34 +124,19 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled var dummy string fs.MarkDeprecated("insecure-experimental-approve-all-kubelet-csrs-for-group", "This flag does nothing.") fs.StringVar(&dummy, "insecure-experimental-approve-all-kubelet-csrs-for-group", "", "This flag does nothing.") - fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") - fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") - fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster") - fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true") fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") - fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, - "Should CIDRs for Pods be allocated and set on the cloud provider.") - fs.StringVar(&s.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", - "Type of CIDR allocator to use") - fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.") - fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.") - fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.") - fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") - fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.") fs.BoolVar(&s.EnableGarbageCollector, "enable-garbage-collector", s.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.") fs.Int32Var(&s.ConcurrentGCSyncs, "concurrent-gc-syncs", s.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.") - fs.Float32Var(&s.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.") - fs.Float32Var(&s.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.") fs.Int32Var(&s.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.") fs.Float32Var(&s.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ") fs.BoolVar(&s.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.") fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.") fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.") fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.") + fs.Float32Var(&s.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.") + fs.Float32Var(&s.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.") leaderelectionconfig.BindFlags(&s.LeaderElection, fs) diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index 87f92d5a9c0..904535121f7 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" "k8s.io/kubernetes/pkg/apis/componentconfig" ) @@ -110,102 +111,104 @@ func TestAddFlags(t *testing.T) { sort.Sort(sortedGCIgnoredResources(s.GCIgnoredResources)) expected := &CMServer{ - KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ - Port: 10000, - Address: "192.168.4.10", - AllocateNodeCIDRs: true, - CloudConfigFile: "/cloud-config", - CloudProvider: "gce", - ClusterCIDR: "1.2.3.4/24", - ClusterName: "k8s", - ConcurrentDeploymentSyncs: 10, - ConcurrentEndpointSyncs: 10, - ConcurrentGCSyncs: 30, - ConcurrentNamespaceSyncs: 20, - ConcurrentRSSyncs: 10, - ConcurrentResourceQuotaSyncs: 10, - ConcurrentServiceSyncs: 2, - ConcurrentSATokenSyncs: 10, - ConcurrentRCSyncs: 10, - ConfigureCloudRoutes: false, - EnableContentionProfiling: true, - ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, - ConcurrentDaemonSetSyncs: 2, - ConcurrentJobSyncs: 5, - DeletingPodsQps: 0.1, - EnableProfiling: false, - CIDRAllocatorType: "CloudAllocator", - NodeCIDRMaskSize: 48, - ServiceSyncPeriod: metav1.Duration{Duration: 2 * time.Minute}, - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, - NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, - MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour}, - RegisterRetryCount: 10, - RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, - PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute}, - NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - TerminatedPodGCThreshold: 12000, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableDynamicProvisioning: false, - EnableHostPathProvisioning: true, - FlexVolumePluginDir: "/flex-volume-plugin", - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 200, - IncrementTimeoutNFS: 45, - MinimumTimeoutHostPath: 45, - IncrementTimeoutHostPath: 45, + ControllerManagerServer: cmoptions.ControllerManagerServer{ + KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ + Port: 10000, + Address: "192.168.4.10", + AllocateNodeCIDRs: true, + CloudConfigFile: "/cloud-config", + CloudProvider: "gce", + ClusterCIDR: "1.2.3.4/24", + ClusterName: "k8s", + ConcurrentDeploymentSyncs: 10, + ConcurrentEndpointSyncs: 10, + ConcurrentGCSyncs: 30, + ConcurrentNamespaceSyncs: 20, + ConcurrentRSSyncs: 10, + ConcurrentResourceQuotaSyncs: 10, + ConcurrentServiceSyncs: 2, + ConcurrentSATokenSyncs: 10, + ConcurrentRCSyncs: 10, + ConfigureCloudRoutes: false, + EnableContentionProfiling: true, + ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, + ConcurrentDaemonSetSyncs: 2, + ConcurrentJobSyncs: 5, + DeletingPodsQps: 0.1, + EnableProfiling: false, + CIDRAllocatorType: "CloudAllocator", + NodeCIDRMaskSize: 48, + ServiceSyncPeriod: metav1.Duration{Duration: 2 * time.Minute}, + ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, + NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, + PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, + DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, + MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour}, + RegisterRetryCount: 10, + RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, + PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute}, + NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, + NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second}, + HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute}, + HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, + TerminatedPodGCThreshold: 12000, + VolumeConfiguration: componentconfig.VolumeConfiguration{ + EnableDynamicProvisioning: false, + EnableHostPathProvisioning: true, + FlexVolumePluginDir: "/flex-volume-plugin", + PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ + MaximumRetry: 3, + MinimumTimeoutNFS: 200, + IncrementTimeoutNFS: 45, + MinimumTimeoutHostPath: 45, + IncrementTimeoutHostPath: 45, + }, }, + ContentType: "application/json", + KubeAPIQPS: 50.0, + KubeAPIBurst: 100, + LeaderElection: componentconfig.LeaderElectionConfiguration{ + ResourceLock: "configmap", + LeaderElect: false, + LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + ClusterSigningCertFile: "/cluster-signing-cert", + ClusterSigningKeyFile: "/cluster-signing-key", + ServiceAccountKeyFile: "/service-account-private-key", + ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour}, + EnableGarbageCollector: false, + GCIgnoredResources: []componentconfig.GroupResource{ + {Group: "extensions", Resource: "replicationcontrollers"}, + {Group: "", Resource: "bindings"}, + {Group: "", Resource: "componentstatuses"}, + {Group: "", Resource: "events"}, + {Group: "authentication.k8s.io", Resource: "tokenreviews"}, + {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}, + {Group: "apiregistration.k8s.io", Resource: "apiservices"}, + {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}, + }, + NodeEvictionRate: 0.2, + SecondaryNodeEvictionRate: 0.05, + LargeClusterSizeThreshold: 100, + UnhealthyZoneThreshold: 0.6, + DisableAttachDetachReconcilerSync: true, + ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second}, + Controllers: []string{"foo", "bar"}, + EnableTaintManager: false, + HorizontalPodAutoscalerUseRESTClients: true, + UseServiceAccountCredentials: true, }, - ContentType: "application/json", - KubeAPIQPS: 50.0, - KubeAPIBurst: 100, - LeaderElection: componentconfig.LeaderElectionConfiguration{ - ResourceLock: "configmap", - LeaderElect: false, - LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, - ClusterSigningCertFile: "/cluster-signing-cert", - ClusterSigningKeyFile: "/cluster-signing-key", - ServiceAccountKeyFile: "/service-account-private-key", - ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour}, - EnableGarbageCollector: false, - GCIgnoredResources: []componentconfig.GroupResource{ - {Group: "extensions", Resource: "replicationcontrollers"}, - {Group: "", Resource: "bindings"}, - {Group: "", Resource: "componentstatuses"}, - {Group: "", Resource: "events"}, - {Group: "authentication.k8s.io", Resource: "tokenreviews"}, - {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}, - {Group: "apiregistration.k8s.io", Resource: "apiservices"}, - {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}, - }, - NodeEvictionRate: 0.2, - SecondaryNodeEvictionRate: 0.05, - LargeClusterSizeThreshold: 100, - UnhealthyZoneThreshold: 0.6, - DisableAttachDetachReconcilerSync: true, - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second}, - Controllers: []string{"foo", "bar"}, - EnableTaintManager: false, - HorizontalPodAutoscalerUseRESTClients: true, - UseServiceAccountCredentials: true, + Kubeconfig: "/kubeconfig", + Master: "192.168.4.20", }, - Kubeconfig: "/kubeconfig", - Master: "192.168.4.20", } // Sort GCIgnoredResources because it's built from a map, which means the diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index 4a57eec48c2..4788176be67 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -181,6 +181,9 @@ type KubeControllerManagerConfiguration struct { CloudProvider string // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string + // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". + // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. + ExternalCloudVolumePlugin string // run with untagged cloud instances AllowUntaggedCloud bool // concurrentEndpointSyncs is the number of endpoint syncing operations diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index cd8b0e219e5..ab490d6a052 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -259,7 +259,7 @@ func NewNodeController( } mask := clusterCIDR.Mask if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { - glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.") + glog.Fatalf("Controller: Invalid clusterCIDR, mask size of clusterCIDR(%d) must be less than nodeCIDRMaskSize(%d).", maskSize, nodeCIDRMaskSize) } } From 3fa0a781af9d3f8339aaa085554b1c3f3149b17a Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Mon, 18 Dec 2017 11:33:21 -0800 Subject: [PATCH 385/794] Add comment to gce config files advising to not use empty scopes --- cluster/gce/config-common.sh | 2 ++ cluster/gce/config-default.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/cluster/gce/config-common.sh b/cluster/gce/config-common.sh index 4c1bc3abeae..1515d3e0799 100644 --- a/cluster/gce/config-common.sh +++ b/cluster/gce/config-common.sh @@ -98,4 +98,6 @@ function get-cluster-ip-range { echo "${suggested_range}" } +# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account +# in order to initialize properly. NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index bd3fe8ceaf9..80c73415dd9 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -121,6 +121,8 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # It is the primary range in the subnet and is the range used for node instance IPs. NODE_IP_RANGE="$(get-node-ip-range)" +# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account +# in order to initialize properly. NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}" # Extra docker options for nodes. From f65b70979421b9bd12aefe630eb1a856665d0c72 Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Mon, 18 Dec 2017 11:47:05 -0800 Subject: [PATCH 386/794] Add PodSecurityPolicy OWNERS --- plugin/pkg/admission/security/podsecuritypolicy/OWNERS | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 plugin/pkg/admission/security/podsecuritypolicy/OWNERS diff --git a/plugin/pkg/admission/security/podsecuritypolicy/OWNERS b/plugin/pkg/admission/security/podsecuritypolicy/OWNERS new file mode 100644 index 00000000000..4a0052da6c3 --- /dev/null +++ b/plugin/pkg/admission/security/podsecuritypolicy/OWNERS @@ -0,0 +1,6 @@ +approvers: +- tallclair +- liggitt +reviewers: +- pweil- +- php-coder From 541a9b42b1bc069d8e09cb62546c5fcab5a40094 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 18 Dec 2017 14:50:09 -0500 Subject: [PATCH 387/794] add watch to requirements for quota-able resources --- pkg/controller/resourcequota/resource_quota_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 7ac8d350ac7..b2ae6d1f6e2 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -469,7 +469,7 @@ func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.Gro if err != nil { return nil, fmt.Errorf("failed to discover resources: %v", err) } - quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "delete"}}, possibleResources) + quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "watch", "delete"}}, possibleResources) quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources) if err != nil { return nil, fmt.Errorf("Failed to parse resources: %v", err) From 5256e26f685066c07d12bf97bdb2d6bf1b7d6ad8 Mon Sep 17 00:00:00 2001 From: Tim Pepper Date: Mon, 20 Nov 2017 15:10:40 -0800 Subject: [PATCH 388/794] e2e_node: use mktemp when building nsenter on trusty There's a bit of a hack in place to insure nsenter is present on Ubuntu trusty, which doesn't otherwise include it. This downloads util-linux to a hard coded directory in /tmp which is a bad practice. Even though "this is just a test case" it should properly use mktemp. Signed-off-by: Tim Pepper --- test/e2e_node/environment/setup_host.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e_node/environment/setup_host.sh b/test/e2e_node/environment/setup_host.sh index 7b588435540..5c39895d6a9 100755 --- a/test/e2e_node/environment/setup_host.sh +++ b/test/e2e_node/environment/setup_host.sh @@ -57,8 +57,8 @@ cat /etc/*-release | grep "ID=ubuntu" if [ $? -eq 0 ]; then if ! which nsenter > /dev/null; then echo "Do not find nsenter. Install it." - mkdir -p /tmp/nsenter-install - cd /tmp/nsenter-install + NSENTER_BUILD_DIR=$(mktemp -d /tmp/nsenter-build-XXXXXX) + cd $NSENTER_BUILD_DIR curl https://www.kernel.org/pub/linux/utils/util-linux/v2.24/util-linux-2.24.tar.gz | tar -zxf- sudo apt-get update sudo apt-get --yes install make @@ -67,7 +67,7 @@ if [ $? -eq 0 ]; then ./configure --without-ncurses make nsenter sudo cp nsenter /usr/local/bin - rm -rf /tmp/nsenter-install + rm -rf $NSENTER_BUILD_DIR fi fi From 3f4294cdf9c5ff50036558d56092af49d4fe0ed5 Mon Sep 17 00:00:00 2001 From: Tim Pepper Date: Mon, 20 Nov 2017 15:50:48 -0800 Subject: [PATCH 389/794] e2e_node: use newer util-linux The e2e_node test environment setup script is hard coded to pull down a quite old version of util-linux in order to build nsenter on trusty, which sadly is well known to not include an nsenter executable. While "just a test", it's unfortunate to be building from really old util-linux sources when newer are available. Signed-off-by: Tim Pepper --- test/e2e_node/environment/setup_host.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e_node/environment/setup_host.sh b/test/e2e_node/environment/setup_host.sh index 5c39895d6a9..2069a3a4947 100755 --- a/test/e2e_node/environment/setup_host.sh +++ b/test/e2e_node/environment/setup_host.sh @@ -59,11 +59,11 @@ if [ $? -eq 0 ]; then echo "Do not find nsenter. Install it." NSENTER_BUILD_DIR=$(mktemp -d /tmp/nsenter-build-XXXXXX) cd $NSENTER_BUILD_DIR - curl https://www.kernel.org/pub/linux/utils/util-linux/v2.24/util-linux-2.24.tar.gz | tar -zxf- + curl https://www.kernel.org/pub/linux/utils/util-linux/v2.31/util-linux-2.31.tar.gz | tar -zxf- sudo apt-get update sudo apt-get --yes install make sudo apt-get --yes install gcc - cd util-linux-2.24 + cd util-linux-2.31 ./configure --without-ncurses make nsenter sudo cp nsenter /usr/local/bin From 5d270b1583a0bb3bac90468ea1ab5c042cb2afa6 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Mon, 18 Dec 2017 13:23:59 -0800 Subject: [PATCH 390/794] Add CHANGELOG-1.10.md for v1.10.0-alpha.1. --- CHANGELOG-1.10.md | 104 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 CHANGELOG-1.10.md diff --git a/CHANGELOG-1.10.md b/CHANGELOG-1.10.md new file mode 100644 index 00000000000..cec21139e65 --- /dev/null +++ b/CHANGELOG-1.10.md @@ -0,0 +1,104 @@ + +- [v1.10.0-alpha.1](#v1100-alpha1) + - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) + - [Client Binaries](#client-binaries) + - [Server Binaries](#server-binaries) + - [Node Binaries](#node-binaries) + - [Changelog since v1.9.0](#changelog-since-v190) + - [Action Required](#action-required) + - [Other notable changes](#other-notable-changes) + + + + + +# v1.10.0-alpha.1 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) + +## Downloads for v1.10.0-alpha.1 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes.tar.gz) | `403b90bfa32f7669b326045a629bd15941c533addcaf0c49d3c3c561da0542f2` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-src.tar.gz) | `266da065e9eddf19d36df5ad325f2f854101a0e712766148e87d998e789b80cf` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-darwin-386.tar.gz) | `5aaa8e294ae4060d34828239e37f37b45fa5a69508374be668965102848626be` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | `40a8e3bab11b88a2bb8e748f0b29da806d89b55775508039abe9c38c5f4ab97d` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-386.tar.gz) | `e08dde0b561529f0b2bb39c141f4d7b1c943749ef7c1f9779facf5fb5b385d6a` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | `76a05d31acaab932ef45c67e1d6c9273933b8bc06dd5ce9bad3c7345d5267702` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | `4b833c9e80f3e4ac4958ea0ffb5ae564b31d2a524f6a14e58802937b2b936d73` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | `f1484ab75010a2258ed7717b1284d0c139d17e194ac9e391b8f1c0999eec3c2d` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | `da884f09ec753925b2c1f27ea0a1f6c3da2056855fc88f47929bb3d6c2a09312` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | `c486f760c6707fc92d1659d3cbe33d68c03190760b73ac215957ee52f9c19195` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-windows-386.tar.gz) | `514c550b7ff85ac33e6ed333bcc06461651fe4004d8b7c12ca67f5dc1d2198bf` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | `ddad59222f6a8cb4e88c4330c2a967c4126cb22ac5e0d7126f9f65cca0fb9f45` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | `514efd798ce1d7fe4233127f3334a3238faad6c26372a2d457eff02cbe72d756` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | `f71f75fb96221f65891fc3e04fd52ae4e5628da8b7b4fbedece3fab4cb650afa` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | `a9d8c2386813fd690e60623a6ee1968fe8f0a1a8e13bc5cc12b2caf8e8a862e1` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | `21336a5e40aead4e2ec7e744a99d72bf8cb552341f3141abf8f235beb250cd93` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | `257e44d38fef83f08990b6b9b5e985118e867c0c33f0e869f0900397b9d30498` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | `97bf1210f0595ebf496ca7b000c4367f8a459d97ef72459efc6d0e07a072398f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | `eebcd3c14fb4faeb82ab047a2152db528adc2d9f7b20eef6f5dc58202ebe3124` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | `3d4428416c775a0a6463f623286bd2ecdf9240ce901e1fbae180dfb564c53ea1` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | `5cc96b24fad0ac1779a66f9b136d90e975b07bf619fea905e6c26ac5a4c41168` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | `134c13338edf4efcd511f4161742fbaa6dc232965d3d926c3de435e8a080fcbb` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | `ae54bf2bbcb99cdcde959140460d0f83c0ecb187d060b594ae9c5349960ab055` + +## Changelog since v1.9.0 + +### Action Required + +* [action required] Remove the kubelet's `--cloud-provider=auto-detect` feature ([#56287](https://github.com/kubernetes/kubernetes/pull/56287), [@stewart-yu](https://github.com/stewart-yu)) + +### Other notable changes + +* Fix Heapster configuration and Metrics Server configuration to enable overriding default resource requirements. ([#56965](https://github.com/kubernetes/kubernetes/pull/56965), [@kawych](https://github.com/kawych)) +* YAMLDecoder Read now returns the number of bytes read ([#57000](https://github.com/kubernetes/kubernetes/pull/57000), [@sel](https://github.com/sel)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57324](https://github.com/kubernetes/kubernetes/pull/57324), [@mborsz](https://github.com/mborsz)) +* Update kubeadm's minimum supported Kubernetes version in v1.10.x to v1.9.0 ([#57233](https://github.com/kubernetes/kubernetes/pull/57233), [@xiangpengzhao](https://github.com/xiangpengzhao)) +* Graduate CPU Manager feature from alpha to beta. ([#55977](https://github.com/kubernetes/kubernetes/pull/55977), [@ConnorDoyle](https://github.com/ConnorDoyle)) +* Drop hacks used for Mesos integration that was already removed from main kubernetes repository ([#56754](https://github.com/kubernetes/kubernetes/pull/56754), [@dims](https://github.com/dims)) +* Compare correct file names for volume detach operation ([#57053](https://github.com/kubernetes/kubernetes/pull/57053), [@prashima](https://github.com/prashima)) +* Improved event generation in volume mount, attach, and extend operations ([#56872](https://github.com/kubernetes/kubernetes/pull/56872), [@davidz627](https://github.com/davidz627)) +* GCE: bump COS image version to cos-stable-63-10032-71-0 ([#57204](https://github.com/kubernetes/kubernetes/pull/57204), [@yujuhong](https://github.com/yujuhong)) +* fluentd-gcp updated to version 2.0.11. ([#56927](https://github.com/kubernetes/kubernetes/pull/56927), [@x13n](https://github.com/x13n)) +* calico-node addon tolerates all NoExecute and NoSchedule taints by default. ([#57122](https://github.com/kubernetes/kubernetes/pull/57122), [@caseydavenport](https://github.com/caseydavenport)) +* Support LoadBalancer for Azure Virtual Machine Scale Sets ([#57131](https://github.com/kubernetes/kubernetes/pull/57131), [@feiskyer](https://github.com/feiskyer)) +* Makes the kube-dns addon optional so that users can deploy their own DNS solution. ([#57113](https://github.com/kubernetes/kubernetes/pull/57113), [@wwwtyro](https://github.com/wwwtyro)) +* Enabled log rotation for load balancer's api logs to prevent running out of disk space. ([#56979](https://github.com/kubernetes/kubernetes/pull/56979), [@hyperbolic2346](https://github.com/hyperbolic2346)) +* Remove ScrubDNS interface from cloudprovider. ([#56955](https://github.com/kubernetes/kubernetes/pull/56955), [@feiskyer](https://github.com/feiskyer)) +* Fix `etcd-version-monitor` to backward compatibly support etcd 3.1 [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus) metrics format. ([#56871](https://github.com/kubernetes/kubernetes/pull/56871), [@jpbetz](https://github.com/jpbetz)) +* enable flexvolume on Windows node ([#56921](https://github.com/kubernetes/kubernetes/pull/56921), [@andyzhangx](https://github.com/andyzhangx)) +* When using Role-Based Access Control, the "admin", "edit", and "view" roles now have the expected permissions on NetworkPolicy resources. ([#56650](https://github.com/kubernetes/kubernetes/pull/56650), [@danwinship](https://github.com/danwinship)) +* Fix the PersistentVolumeLabel controller from initializing the PV labels when it's not the next pending initializer. ([#56831](https://github.com/kubernetes/kubernetes/pull/56831), [@jhorwit2](https://github.com/jhorwit2)) +* kube-apiserver: The external hostname no longer longer use the cloud provider API to select a default. It can be set explicitly using --external-hostname, if needed. ([#56812](https://github.com/kubernetes/kubernetes/pull/56812), [@dims](https://github.com/dims)) +* Use GiB unit for creating and resizing volumes for Glusterfs ([#56581](https://github.com/kubernetes/kubernetes/pull/56581), [@gnufied](https://github.com/gnufied)) +* PersistentVolume flexVolume sources can now reference secrets in a namespace other than the PersistentVolumeClaim's namespace. ([#56460](https://github.com/kubernetes/kubernetes/pull/56460), [@liggitt](https://github.com/liggitt)) +* Scheduler skips pods that use a PVC that either does not exist or is being deleted. ([#55957](https://github.com/kubernetes/kubernetes/pull/55957), [@jsafrane](https://github.com/jsafrane)) +* Fixed a garbage collection race condition where objects with ownerRefs pointing to cluster-scoped objects could be deleted incorrectly. ([#57211](https://github.com/kubernetes/kubernetes/pull/57211), [@liggitt](https://github.com/liggitt)) +* Kubectl explain now prints out the Kind and API version of the resource being explained ([#55689](https://github.com/kubernetes/kubernetes/pull/55689), [@luksa](https://github.com/luksa)) +* api-server provides specific events when unable to repair a service cluster ip or node port ([#54304](https://github.com/kubernetes/kubernetes/pull/54304), [@frodenas](https://github.com/frodenas)) +* Added docker-logins config to kubernetes-worker charm ([#56217](https://github.com/kubernetes/kubernetes/pull/56217), [@Cynerva](https://github.com/Cynerva)) +* delete useless params containerized ([#56146](https://github.com/kubernetes/kubernetes/pull/56146), [@jiulongzaitian](https://github.com/jiulongzaitian)) +* add mount options support for azure disk ([#56147](https://github.com/kubernetes/kubernetes/pull/56147), [@andyzhangx](https://github.com/andyzhangx)) +* Use structured generator for kubectl autoscale ([#55913](https://github.com/kubernetes/kubernetes/pull/55913), [@wackxu](https://github.com/wackxu)) +* K8s supports cephfs fuse mount. ([#55866](https://github.com/kubernetes/kubernetes/pull/55866), [@zhangxiaoyu-zidif](https://github.com/zhangxiaoyu-zidif)) +* COS: Keep the docker network checkpoint ([#54805](https://github.com/kubernetes/kubernetes/pull/54805), [@yujuhong](https://github.com/yujuhong)) +* Fixed documentation typo in IPVS README. ([#56578](https://github.com/kubernetes/kubernetes/pull/56578), [@shift](https://github.com/shift)) + From da05fbe03642c6c5a9964df69faa2a4d30bb3a07 Mon Sep 17 00:00:00 2001 From: p0lyn0mial Date: Mon, 20 Nov 2017 18:44:07 +0100 Subject: [PATCH 391/794] adds generic scaler to kubectl the implementation uses a polymorphic scale client capable of operating against scale subresources which can be found here https://github.com/kubernetes/client-go/tree/master/scale --- pkg/kubectl/BUILD | 8 + pkg/kubectl/scale.go | 93 ++++++++++++ pkg/kubectl/scale_test.go | 297 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 398 insertions(+) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index ae10e5f14f9..c0180bf03c4 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -56,6 +56,7 @@ go_test( "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", @@ -68,18 +69,24 @@ go_test( "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/fake:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], @@ -183,6 +190,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/jsonpath:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index da6d4fbb796..1d4165f9626 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -21,6 +21,7 @@ import ( "strconv" "time" + autoscalingapi "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -31,6 +32,8 @@ import ( "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" + + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" @@ -516,3 +519,93 @@ func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, prec } return nil } + +// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error +// TODO(p0lyn0mial): when the work on GenericScaler is done, rename validateGeneric to validate +func (precondition *ScalePrecondition) validateGeneric(scale *autoscalingapi.Scale) error { + if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(scale.Spec.Replicas))} + } + if len(precondition.ResourceVersion) > 0 && scale.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, scale.ResourceVersion} + } + return nil +} + +// GenericScaler can update scales for resources in a particular namespace +// TODO(o0lyn0mial): when the work on GenericScaler is done, don't +// export the GenericScaler. Instead use ScalerFor method for getting the Scaler +// also update the UTs +type GenericScaler struct { + scaleNamespacer scaleclient.ScalesGetter + targetGR schema.GroupResource +} + +var _ Scaler = &GenericScaler{} + +// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful. +func (s *GenericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) { + scale, err := s.scaleNamespacer.Scales(namespace).Get(s.targetGR, name) + if err != nil { + return "", ScaleError{ScaleGetFailure, "", err} + } + if preconditions != nil { + if err := preconditions.validateGeneric(scale); err != nil { + return "", err + } + } + + scale.Spec.Replicas = int32(newSize) + updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(s.targetGR, scale) + if err != nil { + if errors.IsConflict(err) { + return "", ScaleError{ScaleUpdateConflictFailure, scale.ResourceVersion, err} + } + return "", ScaleError{ScaleUpdateFailure, scale.ResourceVersion, err} + } + return updatedScale.ResourceVersion, nil +} + +// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil), +// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. +func (s *GenericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil) + if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + err := wait.PollImmediate( + waitForReplicas.Interval, + waitForReplicas.Timeout, + scaleHasDesiredReplicas(s.scaleNamespacer, s.targetGR, resourceName, namespace, int32(newSize))) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", resourceName) + } + return err + } + return nil +} + +// scaleHasDesiredReplicas returns a condition that will be true if and only if the desired replica +// count for a scale (Spec) equals its updated replicas count (Status) +func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionFunc { + return func() (bool, error) { + actualScale, err := sClient.Scales(namespace).Get(gr, resourceName) + if err != nil { + return false, err + } + // this means the desired scale target has been reset by something else + if actualScale.Spec.Replicas != desiredReplicas { + return true, nil + } + return actualScale.Spec.Replicas == actualScale.Status.Replicas && + desiredReplicas == actualScale.Status.Replicas, nil + } +} diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index ba177982f20..1d9b5119eab 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -17,11 +17,28 @@ limitations under the License. package kubectl import ( + "bytes" + "encoding/json" "errors" + "fmt" + "io" + "io/ioutil" + "net/http" "testing" + "time" + appsv1beta2 "k8s.io/api/apps/v1beta2" kerrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + fakedisco "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/dynamic" + fakerest "k8s.io/client-go/rest/fake" + "k8s.io/client-go/scale" testcore "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" @@ -1310,3 +1327,283 @@ func TestValidateReplicaSets(t *testing.T) { } } } + +// TestGenericScaleSimple exercises GenericScaler.ScaleSimple method +func TestGenericScaleSimple(t *testing.T) { + // test data + discoveryResources := []*metav1.APIResourceList{ + { + GroupVersion: appsv1beta2.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "deployments", Namespaced: true, Kind: "Deployment"}, + {Name: "deployments/scale", Namespaced: true, Kind: "Scale", Group: "apps", Version: "v1beta2"}, + }, + }, + } + appsV1beta2Scale := &appsv1beta2.Scale{ + TypeMeta: metav1.TypeMeta{ + Kind: "Scale", + APIVersion: appsv1beta2.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "abc", + }, + Spec: appsv1beta2.ScaleSpec{Replicas: 10}, + Status: appsv1beta2.ScaleStatus{ + Replicas: 10, + }, + } + pathsResources := map[string]runtime.Object{ + "/apis/apps/v1beta2/namespaces/default/deployments/abc/scale": appsV1beta2Scale, + } + + scaleClient, err := fakeScaleClient(discoveryResources, pathsResources) + if err != nil { + t.Fatal(err) + } + + // test scenarios + scenarios := []struct { + name string + precondition ScalePrecondition + newSize int + targetGR schema.GroupResource + resName string + scaleGetter scale.ScalesGetter + expectError bool + }{ + // scenario 1: scale up the "abc" deployment + { + name: "scale up the \"abc\" deployment", + precondition: ScalePrecondition{10, ""}, + newSize: 20, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + }, + // scenario 2: scale down the "abc" deployment + { + name: "scale down the \"abs\" deplyment", + precondition: ScalePrecondition{20, ""}, + newSize: 5, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + }, + // scenario 3: precondition error, expected size is 1, + // note that the previous scenario (2) set the size to 5 + { + name: "precondition error, expected size is 1", + precondition: ScalePrecondition{1, ""}, + newSize: 5, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + expectError: true, + }, + // scenario 4: precondition is not validated when the precondition size is set to -1 + { + name: "precondition is not validated when the size is set to -1", + precondition: ScalePrecondition{-1, ""}, + newSize: 5, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + }, + // scenario 5: precondition error, resource version mismatch + { + name: "precondition error, resource version mismatch", + precondition: ScalePrecondition{5, "v1"}, + newSize: 5, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + expectError: true, + }, + } + + // act + for index, scenario := range scenarios { + t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { + target := GenericScaler{scenario.scaleGetter, scenario.targetGR} + + resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize)) + + if scenario.expectError && err == nil { + t.Fatal("expeced an error but was not returned") + } + if !scenario.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resVersion != "" { + t.Fatalf("unexpected resource version returned = %s, wanted = %s", resVersion, "") + } + }) + } +} + +// TestGenericScale exercises GenericScaler.Scale method +func TestGenericScale(t *testing.T) { + // test data + discoveryResources := []*metav1.APIResourceList{ + { + GroupVersion: appsv1beta2.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "deployments", Namespaced: true, Kind: "Deployment"}, + {Name: "deployments/scale", Namespaced: true, Kind: "Scale", Group: "apps", Version: "v1beta2"}, + }, + }, + } + appsV1beta2Scale := &appsv1beta2.Scale{ + TypeMeta: metav1.TypeMeta{ + Kind: "Scale", + APIVersion: appsv1beta2.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "abc", + }, + Spec: appsv1beta2.ScaleSpec{Replicas: 10}, + Status: appsv1beta2.ScaleStatus{ + Replicas: 10, + }, + } + pathsResources := map[string]runtime.Object{ + "/apis/apps/v1beta2/namespaces/default/deployments/abc/scale": appsV1beta2Scale, + } + + scaleClient, err := fakeScaleClient(discoveryResources, pathsResources) + if err != nil { + t.Fatal(err) + } + + // test scenarios + scenarios := []struct { + name string + precondition ScalePrecondition + newSize int + targetGR schema.GroupResource + resName string + scaleGetter scale.ScalesGetter + waitForReplicas *RetryParams + expectError bool + }{ + // scenario 1: scale up the "abc" deployment + { + name: "scale up the \"abc\" deployment", + precondition: ScalePrecondition{10, ""}, + newSize: 20, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + }, + // scenario 2: a resource name cannot be empty + { + name: "a resource name cannot be empty", + precondition: ScalePrecondition{10, ""}, + newSize: 20, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "", + scaleGetter: scaleClient, + expectError: true, + }, + // scenario 3: wait for replicas error due to status.Replicas != spec.Replicas + { + name: "wait for replicas error due to status.Replicas != spec.Replicas", + precondition: ScalePrecondition{10, ""}, + newSize: 20, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + resName: "abc", + scaleGetter: scaleClient, + waitForReplicas: &RetryParams{time.Duration(5 * time.Second), time.Duration(5 * time.Second)}, + expectError: true, + }, + } + + // act + for index, scenario := range scenarios { + t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { + target := GenericScaler{scenario.scaleGetter, scenario.targetGR} + + err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas) + + if scenario.expectError && err == nil { + t.Fatal("expeced an error but was not returned") + } + if !scenario.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} + +func fakeScaleClient(discoveryResources []*metav1.APIResourceList, pathsResources map[string]runtime.Object) (scale.ScalesGetter, error) { + fakeDiscoveryClient := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}} + fakeDiscoveryClient.Resources = discoveryResources + restMapperRes, err := discovery.GetAPIGroupResources(fakeDiscoveryClient) + if err != nil { + return nil, err + } + restMapper := discovery.NewRESTMapper(restMapperRes, apimeta.InterfacesForUnstructured) + codecs := serializer.NewCodecFactory(scale.NewScaleConverter().Scheme()) + fakeReqHandler := func(req *http.Request) (*http.Response, error) { + path := req.URL.Path + scale, isScalePath := pathsResources[path] + if !isScalePath { + return nil, fmt.Errorf("unexpected request for URL %q with method %q", req.URL.String(), req.Method) + } + + switch req.Method { + case "GET": + res, err := json.Marshal(scale) + if err != nil { + return nil, err + } + return &http.Response{StatusCode: 200, Header: defaultHeaders(), Body: bytesBody(res)}, nil + case "PUT": + decoder := codecs.UniversalDeserializer() + body, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + newScale, newScaleGVK, err := decoder.Decode(body, nil, nil) + if err != nil { + return nil, fmt.Errorf("unexpected request body: %v", err) + } + if *newScaleGVK != scale.GetObjectKind().GroupVersionKind() { + return nil, fmt.Errorf("unexpected scale API version %s (expected %s)", newScaleGVK.String(), scale.GetObjectKind().GroupVersionKind().String()) + } + res, err := json.Marshal(newScale) + if err != nil { + return nil, err + } + + pathsResources[path] = newScale + return &http.Response{StatusCode: 200, Header: defaultHeaders(), Body: bytesBody(res)}, nil + default: + return nil, fmt.Errorf("unexpected request for URL %q with method %q", req.URL.String(), req.Method) + } + } + + fakeClient := &fakerest.RESTClient{ + Client: fakerest.CreateHTTPClient(fakeReqHandler), + NegotiatedSerializer: serializer.DirectCodecFactory{ + CodecFactory: serializer.NewCodecFactory(scale.NewScaleConverter().Scheme()), + }, + GroupVersion: schema.GroupVersion{}, + VersionedAPIPath: "/not/a/real/path", + } + + resolver := scale.NewDiscoveryScaleKindResolver(fakeDiscoveryClient) + client := scale.New(fakeClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) + return client, nil +} + +func bytesBody(bodyBytes []byte) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader(bodyBytes)) +} + +func defaultHeaders() http.Header { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return header +} From 05afd248f232aa4db132e3bf3f33ed63db1bdacf Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 12 Dec 2017 16:20:42 -0800 Subject: [PATCH 392/794] Version bump to etcd v3.2.11 --- Godeps/Godeps.json | 341 +- Godeps/LICENSES | 4148 ++++++++++++++++- vendor/BUILD | 10 +- vendor/github.com/boltdb/bolt/Makefile | 18 - vendor/github.com/cockroachdb/cmux/.gitignore | 24 + .../github.com/cockroachdb/cmux/.travis.yml | 22 + vendor/github.com/cockroachdb/cmux/BUILD | 31 + vendor/github.com/cockroachdb/cmux/LICENSE | 202 + vendor/github.com/cockroachdb/cmux/README.md | 65 + vendor/github.com/cockroachdb/cmux/buffer.go | 35 + vendor/github.com/cockroachdb/cmux/cmux.go | 210 + .../github.com/cockroachdb/cmux/matchers.go | 150 + .../github.com/cockroachdb/cmux/patricia.go | 173 + .../{boltdb/bolt => coreos/bbolt}/.gitignore | 1 + .../{boltdb/bolt => coreos/bbolt}/BUILD | 2 +- .../{boltdb/bolt => coreos/bbolt}/LICENSE | 0 vendor/github.com/coreos/bbolt/Makefile | 30 + .../{boltdb/bolt => coreos/bbolt}/README.md | 100 +- .../bolt => coreos/bbolt}/appveyor.yml | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_386.go | 3 + .../bolt => coreos/bbolt}/bolt_amd64.go | 3 + vendor/github.com/coreos/bbolt/bolt_arm.go | 28 + .../bolt => coreos/bbolt}/bolt_arm64.go | 3 + .../bolt => coreos/bbolt}/bolt_linux.go | 0 .../github.com/coreos/bbolt/bolt_mips64x.go | 12 + .../bbolt/bolt_mipsx.go} | 7 +- .../bolt => coreos/bbolt}/bolt_openbsd.go | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_ppc.go | 0 .../bolt => coreos/bbolt}/bolt_ppc64.go | 3 + .../bolt => coreos/bbolt}/bolt_ppc64le.go | 3 + .../bolt => coreos/bbolt}/bolt_s390x.go | 3 + .../bolt => coreos/bbolt}/bolt_unix.go | 33 +- .../bbolt}/bolt_unix_solaris.go | 39 +- .../bolt => coreos/bbolt}/bolt_windows.go | 33 +- .../bolt => coreos/bbolt}/boltsync_unix.go | 0 .../{boltdb/bolt => coreos/bbolt}/bucket.go | 49 +- .../{boltdb/bolt => coreos/bbolt}/cursor.go | 0 .../{boltdb/bolt => coreos/bbolt}/db.go | 211 +- .../{boltdb/bolt => coreos/bbolt}/doc.go | 0 .../{boltdb/bolt => coreos/bbolt}/errors.go | 0 .../{boltdb/bolt => coreos/bbolt}/freelist.go | 167 +- .../{boltdb/bolt => coreos/bbolt}/node.go | 2 +- .../{boltdb/bolt => coreos/bbolt}/page.go | 31 +- .../{boltdb/bolt => coreos/bbolt}/tx.go | 83 +- vendor/github.com/coreos/etcd/auth/BUILD | 5 + .../coreos/etcd/auth/authpb/auth.pb.go | 2 +- vendor/github.com/coreos/etcd/auth/jwt.go | 137 + .../coreos/etcd/auth/range_perm_cache.go | 180 +- .../coreos/etcd/auth/simple_token.go | 129 +- vendor/github.com/coreos/etcd/auth/store.go | 260 +- vendor/github.com/coreos/etcd/client/BUILD | 3 +- .../github.com/coreos/etcd/client/client.go | 52 +- .../github.com/coreos/etcd/client/discover.go | 19 + vendor/github.com/coreos/etcd/client/srv.go | 65 - vendor/github.com/coreos/etcd/clientv3/BUILD | 16 +- .../github.com/coreos/etcd/clientv3/README.md | 10 +- .../github.com/coreos/etcd/clientv3/auth.go | 25 +- .../coreos/etcd/clientv3/balancer.go | 239 - .../github.com/coreos/etcd/clientv3/client.go | 226 +- .../coreos/etcd/clientv3/cluster.go | 46 +- .../coreos/etcd/clientv3/compact_op.go | 6 +- .../coreos/etcd/clientv3/compare.go | 27 + .../coreos/etcd/clientv3/concurrency/BUILD | 35 + .../coreos/etcd/clientv3/concurrency/doc.go | 17 + .../etcd/clientv3/concurrency/election.go | 246 + .../coreos/etcd/clientv3/concurrency/key.go | 66 + .../coreos/etcd/clientv3/concurrency/mutex.go | 119 + .../etcd/clientv3/concurrency/session.go | 142 + .../coreos/etcd/clientv3/concurrency/stm.go | 388 ++ .../github.com/coreos/etcd/clientv3/config.go | 107 +- vendor/github.com/coreos/etcd/clientv3/doc.go | 2 +- .../coreos/etcd/clientv3/health_balancer.go | 627 +++ vendor/github.com/coreos/etcd/clientv3/kv.go | 54 +- .../github.com/coreos/etcd/clientv3/lease.go | 257 +- .../github.com/coreos/etcd/clientv3/logger.go | 34 +- .../coreos/etcd/clientv3/maintenance.go | 59 +- .../coreos/etcd/clientv3/namespace/BUILD | 34 + .../coreos/etcd/clientv3/namespace/doc.go | 43 + .../coreos/etcd/clientv3/namespace/kv.go | 189 + .../coreos/etcd/clientv3/namespace/lease.go | 58 + .../coreos/etcd/clientv3/namespace/util.go | 42 + .../coreos/etcd/clientv3/namespace/watch.go | 84 + .../coreos/etcd/clientv3/naming/BUILD | 32 + .../coreos/etcd/clientv3/naming/doc.go | 56 + .../coreos/etcd/clientv3/naming/grpc.go | 132 + vendor/github.com/coreos/etcd/clientv3/op.go | 133 +- .../coreos/etcd/clientv3/ready_wait.go | 30 + .../github.com/coreos/etcd/clientv3/retry.go | 376 +- vendor/github.com/coreos/etcd/clientv3/txn.go | 30 +- .../github.com/coreos/etcd/clientv3/watch.go | 74 +- .../coreos/etcd/compactor/compactor.go | 18 +- vendor/github.com/coreos/etcd/discovery/BUILD | 5 +- .../github.com/coreos/etcd/discovery/srv.go | 104 - vendor/github.com/coreos/etcd/embed/BUILD | 60 + vendor/github.com/coreos/etcd/embed/config.go | 464 ++ vendor/github.com/coreos/etcd/embed/doc.go | 45 + vendor/github.com/coreos/etcd/embed/etcd.go | 453 ++ vendor/github.com/coreos/etcd/embed/serve.go | 236 + vendor/github.com/coreos/etcd/embed/util.go | 30 + vendor/github.com/coreos/etcd/error/error.go | 5 +- .../github.com/coreos/etcd/etcdserver/BUILD | 2 +- .../coreos/etcd/etcdserver/api/BUILD | 4 + .../coreos/etcd/etcdserver/api/capability.go | 8 +- .../coreos/etcd/etcdserver/api/etcdhttp/BUILD | 40 + .../etcd/etcdserver/api/etcdhttp/base.go | 186 + .../api/{v2http => etcdhttp}/peer.go | 4 +- .../coreos/etcd/etcdserver/api/v2http/BUILD | 6 +- .../etcd/etcdserver/api/v2http/client.go | 146 +- .../coreos/etcd/etcdserver/api/v2http/http.go | 30 +- .../coreos/etcd/etcdserver/api/v3client/BUILD | 32 + .../etcd/etcdserver/api/v3client/doc.go | 45 + .../etcd/etcdserver/api/v3client/v3client.go | 67 + .../etcd/etcdserver/api/v3election/BUILD | 34 + .../etcd/etcdserver/api/v3election/doc.go | 16 + .../etcdserver/api/v3election/election.go | 123 + .../api/v3election/v3electionpb/BUILD | 39 + .../api/v3election/v3electionpb/gw/BUILD | 33 + .../v3electionpb/gw/v3election.pb.gw.go | 313 ++ .../v3election/v3electionpb/v3election.pb.go | 2098 +++++++++ .../v3election/v3electionpb/v3election.proto | 119 + .../coreos/etcd/etcdserver/api/v3lock/BUILD | 34 + .../coreos/etcd/etcdserver/api/v3lock/doc.go | 16 + .../coreos/etcd/etcdserver/api/v3lock/lock.go | 56 + .../etcd/etcdserver/api/v3lock/v3lockpb/BUILD | 38 + .../etcdserver/api/v3lock/v3lockpb/gw/BUILD | 33 + .../api/v3lock/v3lockpb/gw/v3lock.pb.gw.go | 167 + .../api/v3lock/v3lockpb/v3lock.pb.go | 978 ++++ .../api/v3lock/v3lockpb/v3lock.proto | 65 + .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 14 +- .../etcd/etcdserver/api/v3rpc/interceptor.go | 4 +- .../coreos/etcd/etcdserver/api/v3rpc/key.go | 10 +- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 31 +- .../etcd/etcdserver/api/v3rpc/maintenance.go | 3 +- .../etcd/etcdserver/api/v3rpc/member.go | 36 +- .../etcd/etcdserver/api/v3rpc/rpctypes/BUILD | 1 + .../etcdserver/api/v3rpc/rpctypes/error.go | 47 +- .../coreos/etcd/etcdserver/api/v3rpc/util.go | 6 +- .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 47 + .../coreos/etcd/etcdserver/apply.go | 292 +- .../coreos/etcd/etcdserver/apply_auth.go | 13 +- .../coreos/etcd/etcdserver/backend.go | 81 + .../coreos/etcd/etcdserver/cluster_util.go | 10 - .../coreos/etcd/etcdserver/config.go | 7 + .../coreos/etcd/etcdserver/errors.go | 1 + .../coreos/etcd/etcdserver/etcdserverpb/BUILD | 11 +- .../etcdserver/etcdserverpb/etcdserver.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/gw/BUILD | 33 + .../etcdserverpb/{ => gw}/rpc.pb.gw.go | 589 ++- .../etcdserverpb/raft_internal.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 1517 +++++- .../etcd/etcdserver/etcdserverpb/rpc.proto | 32 +- .../coreos/etcd/etcdserver/membership/BUILD | 1 + .../etcd/etcdserver/membership/cluster.go | 2 +- .../coreos/etcd/etcdserver/membership/doc.go | 16 + .../etcd/etcdserver/membership/store.go | 6 +- .../coreos/etcd/etcdserver/metrics.go | 7 + .../coreos/etcd/etcdserver/quota.go | 17 +- .../github.com/coreos/etcd/etcdserver/raft.go | 131 +- .../coreos/etcd/etcdserver/server.go | 234 +- .../coreos/etcd/etcdserver/snapshot_merge.go | 7 +- .../coreos/etcd/etcdserver/stats/leader.go | 15 +- .../coreos/etcd/etcdserver/stats/server.go | 54 +- .../coreos/etcd/etcdserver/storage.go | 3 - .../github.com/coreos/etcd/etcdserver/util.go | 2 +- .../coreos/etcd/etcdserver/v3_server.go | 292 +- .../github.com/coreos/etcd/integration/BUILD | 9 +- .../coreos/etcd/integration/bridge.go | 67 +- .../coreos/etcd/integration/cluster.go | 166 +- .../coreos/etcd/integration/cluster_direct.go | 4 + .../coreos/etcd/integration/cluster_proxy.go | 42 +- .../coreos/etcd/lease/leasehttp/BUILD | 1 - .../coreos/etcd/lease/leasehttp/http.go | 54 +- .../coreos/etcd/lease/leasepb/lease.pb.go | 2 +- vendor/github.com/coreos/etcd/lease/lessor.go | 99 +- vendor/github.com/coreos/etcd/mvcc/BUILD | 4 + .../github.com/coreos/etcd/mvcc/backend/BUILD | 11 +- .../coreos/etcd/mvcc/backend/backend.go | 141 +- .../coreos/etcd/mvcc/backend/batch_tx.go | 174 +- ...oltoption_default.go => config_default.go} | 6 +- .../{boltoption_linux.go => config_linux.go} | 7 +- .../etcd/mvcc/backend/config_windows.go | 26 + .../coreos/etcd/mvcc/backend/metrics.go | 10 + .../coreos/etcd/mvcc/backend/read_tx.go | 92 + .../coreos/etcd/mvcc/backend/tx_buffer.go | 181 + vendor/github.com/coreos/etcd/mvcc/index.go | 21 +- .../github.com/coreos/etcd/mvcc/key_index.go | 1 - vendor/github.com/coreos/etcd/mvcc/kv.go | 82 +- vendor/github.com/coreos/etcd/mvcc/kv_view.go | 53 + vendor/github.com/coreos/etcd/mvcc/kvstore.go | 557 +-- .../coreos/etcd/mvcc/kvstore_txn.go | 253 + vendor/github.com/coreos/etcd/mvcc/metrics.go | 15 +- .../coreos/etcd/mvcc/metrics_txn.go | 67 + .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 2 +- .../coreos/etcd/mvcc/watchable_store.go | 165 +- .../coreos/etcd/mvcc/watchable_store_txn.go | 53 + .../coreos/etcd/mvcc/watcher_group.go | 2 +- .../coreos/etcd/pkg/adt/interval_tree.go | 81 +- vendor/github.com/coreos/etcd/pkg/cors/BUILD | 22 + .../github.com/coreos/etcd/pkg/cors/cors.go | 90 + .../etcd/pkg/debugutil}/BUILD | 11 +- .../coreos/etcd/pkg/debugutil/doc.go | 16 + .../coreos/etcd/pkg/debugutil/pprof.go | 47 + .../coreos/etcd/pkg/fileutil/fileutil.go | 7 +- .../coreos/etcd/pkg/fileutil/lock_linux.go | 3 +- .../coreos/etcd/pkg/fileutil/preallocate.go | 15 +- .../coreos/etcd/pkg/httputil/httputil.go | 9 - .../github.com/coreos/etcd/pkg/idutil/id.go | 4 +- .../github.com/coreos/etcd/pkg/netutil/BUILD | 1 - .../coreos/etcd/pkg/netutil/netutil.go | 34 +- .../coreos/etcd/pkg/schedule/schedule.go | 2 - vendor/github.com/coreos/etcd/pkg/srv/BUILD | 23 + vendor/github.com/coreos/etcd/pkg/srv/srv.go | 140 + .../github.com/coreos/etcd/pkg/testutil/BUILD | 1 + .../coreos/etcd/pkg/testutil/assert.go | 62 + .../coreos/etcd/pkg/testutil/leak.go | 23 +- .../coreos/etcd/pkg/transport/BUILD | 6 +- .../coreos/etcd/pkg/transport/listener.go | 66 +- .../coreos/etcd/pkg/transport/listener_tls.go | 217 + .../etcd/pkg/transport/timeout_listener.go | 5 +- .../etcd/pkg/transport/unix_listener.go | 4 +- .../github.com/coreos/etcd/pkg/wait/wait.go | 19 +- .../coreos/etcd/proxy/grpcproxy/BUILD | 14 +- .../coreos/etcd/proxy/grpcproxy/adapter/BUILD | 40 + .../chan_stream.go} | 125 +- .../adapter/cluster_client_adapter.go | 44 + .../etcd/proxy/grpcproxy/adapter/doc.go | 17 + .../adapter/election_client_adapter.go | 79 + .../{ => adapter}/kv_client_adapter.go | 2 +- .../grpcproxy/adapter/lease_client_adapter.go | 77 + .../grpcproxy/adapter/lock_client_adapter.go | 36 + .../adapter/maintenance_client_adapter.go | 79 + .../grpcproxy/adapter/watch_client_adapter.go | 66 + .../coreos/etcd/proxy/grpcproxy/cache/BUILD | 2 +- .../etcd/proxy/grpcproxy/cache/store.go | 42 +- .../coreos/etcd/proxy/grpcproxy/cluster.go | 151 +- .../coreos/etcd/proxy/grpcproxy/election.go | 65 + .../coreos/etcd/proxy/grpcproxy/kv.go | 30 +- .../coreos/etcd/proxy/grpcproxy/leader.go | 114 + .../coreos/etcd/proxy/grpcproxy/lease.go | 344 +- .../coreos/etcd/proxy/grpcproxy/lock.go | 38 + .../coreos/etcd/proxy/grpcproxy/logger.go | 19 + .../etcd/proxy/grpcproxy/maintenance.go | 5 + .../coreos/etcd/proxy/grpcproxy/metrics.go | 7 + .../coreos/etcd/proxy/grpcproxy/register.go | 94 + .../coreos/etcd/proxy/grpcproxy/watch.go | 77 +- .../etcd/proxy/grpcproxy/watch_broadcast.go | 33 +- .../coreos/etcd/proxy/grpcproxy/watcher.go | 11 +- vendor/github.com/coreos/etcd/raft/README.md | 91 +- .../coreos/etcd/raft/log_unstable.go | 20 + vendor/github.com/coreos/etcd/raft/node.go | 16 + vendor/github.com/coreos/etcd/raft/raft.go | 4 + .../coreos/etcd/raft/raftpb/raft.pb.go | 72 +- .../coreos/etcd/rafthttp/pipeline.go | 5 +- .../coreos/etcd/rafthttp/snapshot_sender.go | 6 +- .../github.com/coreos/etcd/rafthttp/stream.go | 21 +- .../github.com/coreos/etcd/rafthttp/util.go | 32 +- vendor/github.com/coreos/etcd/snap/db.go | 21 +- .../coreos/etcd/snap/snappb/snap.pb.go | 2 +- vendor/github.com/coreos/etcd/store/node.go | 1 - vendor/github.com/coreos/etcd/store/store.go | 3 + .../coreos/etcd/store/watcher_hub.go | 2 +- .../github.com/coreos/etcd/version/version.go | 2 +- vendor/github.com/coreos/etcd/wal/encoder.go | 2 +- vendor/github.com/coreos/etcd/wal/repair.go | 2 +- vendor/github.com/coreos/etcd/wal/wal.go | 23 +- .../coreos/etcd/wal/walpb/record.pb.go | 2 +- .../protobuf/protoc-gen-go/descriptor/BUILD | 29 + .../protoc-gen-go/descriptor/Makefile | 37 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2215 +++++++++ .../protoc-gen-go/descriptor/descriptor.proto | 849 ++++ vendor/github.com/karlseguin/ccache/Makefile | 5 - vendor/github.com/karlseguin/ccache/bucket.go | 41 - vendor/github.com/karlseguin/ccache/cache.go | 227 - .../karlseguin/ccache/configuration.go | 94 - vendor/github.com/karlseguin/ccache/item.go | 103 - .../karlseguin/ccache/layeredbucket.go | 82 - .../karlseguin/ccache/layeredcache.go | 237 - .../github.com/karlseguin/ccache/license.txt | 19 - vendor/github.com/karlseguin/ccache/readme.md | 172 - .../karlseguin/ccache/secondarycache.go | 72 - .../genproto/googleapis/api/annotations/BUILD | 29 + .../api/annotations/annotations.pb.go | 64 + .../googleapis/api/annotations/http.pb.go | 566 +++ vendor/google.golang.org/grpc/BUILD | 1 + .../grpc/health/grpc_health_v1/BUILD | 33 + .../grpc/health/grpc_health_v1/health.pb.go | 176 + .../grpc/health/grpc_health_v1/health.proto | 20 + 287 files changed, 25980 insertions(+), 5220 deletions(-) delete mode 100644 vendor/github.com/boltdb/bolt/Makefile create mode 100644 vendor/github.com/cockroachdb/cmux/.gitignore create mode 100644 vendor/github.com/cockroachdb/cmux/.travis.yml create mode 100644 vendor/github.com/cockroachdb/cmux/BUILD create mode 100644 vendor/github.com/cockroachdb/cmux/LICENSE create mode 100644 vendor/github.com/cockroachdb/cmux/README.md create mode 100644 vendor/github.com/cockroachdb/cmux/buffer.go create mode 100644 vendor/github.com/cockroachdb/cmux/cmux.go create mode 100644 vendor/github.com/cockroachdb/cmux/matchers.go create mode 100644 vendor/github.com/cockroachdb/cmux/patricia.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/.gitignore (65%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/BUILD (96%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/LICENSE (100%) create mode 100644 vendor/github.com/coreos/bbolt/Makefile rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/README.md (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/appveyor.yml (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_386.go (72%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_amd64.go (73%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_arm.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_arm64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_linux.go (100%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_mips64x.go rename vendor/github.com/{boltdb/bolt/bolt_arm.go => coreos/bbolt/bolt_mipsx.go} (55%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_openbsd.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64le.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_s390x.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix.go (80%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix_solaris.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_windows.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/boltsync_unix.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bucket.go (95%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/cursor.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/db.go (85%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/doc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/errors.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/freelist.go (56%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/node.go (99%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/page.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/tx.go (94%) create mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go delete mode 100644 vendor/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/election.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/key.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/session.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/kv.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/lease.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/util.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/watch.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/grpc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go delete mode 100644 vendor/github.com/coreos/etcd/discovery/srv.go create mode 100644 vendor/github.com/coreos/etcd/embed/BUILD create mode 100644 vendor/github.com/coreos/etcd/embed/config.go create mode 100644 vendor/github.com/coreos/etcd/embed/doc.go create mode 100644 vendor/github.com/coreos/etcd/embed/etcd.go create mode 100644 vendor/github.com/coreos/etcd/embed/serve.go create mode 100644 vendor/github.com/coreos/etcd/embed/util.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go rename vendor/github.com/coreos/etcd/etcdserver/api/{v2http => etcdhttp}/peer.go (97%) create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto create mode 100644 vendor/github.com/coreos/etcd/etcdserver/backend.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD rename vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/{ => gw}/rpc.pb.gw.go (69%) create mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/doc.go rename vendor/github.com/coreos/etcd/mvcc/backend/{boltoption_default.go => config_default.go} (82%) rename vendor/github.com/coreos/etcd/mvcc/backend/{boltoption_linux.go => config_linux.go} (88%) create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kv_view.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go create mode 100644 vendor/github.com/coreos/etcd/pkg/cors/BUILD create mode 100644 vendor/github.com/coreos/etcd/pkg/cors/cors.go rename vendor/github.com/{karlseguin/ccache => coreos/etcd/pkg/debugutil}/BUILD (66%) create mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/doc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/BUILD create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/srv.go create mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/assert.go create mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{watch_client_adapter.go => adapter/chan_stream.go} (65%) create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{ => adapter}/kv_client_adapter.go (98%) create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 vendor/github.com/karlseguin/ccache/Makefile delete mode 100644 vendor/github.com/karlseguin/ccache/bucket.go delete mode 100644 vendor/github.com/karlseguin/ccache/cache.go delete mode 100644 vendor/github.com/karlseguin/ccache/configuration.go delete mode 100644 vendor/github.com/karlseguin/ccache/item.go delete mode 100644 vendor/github.com/karlseguin/ccache/layeredbucket.go delete mode 100644 vendor/github.com/karlseguin/ccache/layeredcache.go delete mode 100644 vendor/github.com/karlseguin/ccache/license.txt delete mode 100644 vendor/github.com/karlseguin/ccache/readme.md delete mode 100644 vendor/github.com/karlseguin/ccache/secondarycache.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b072375c745..76a3b65b33b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -333,11 +333,6 @@ "Comment": "v3.5.0", "Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6" }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.3.0", - "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" - }, { "ImportPath": "github.com/chai2010/gettext-go/gettext", "Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723" @@ -423,6 +418,10 @@ "ImportPath": "github.com/clusterhq/flocker-go", "Rev": "2b8b7259d3139c96c4a6871031355808ab3fd3b3" }, + { + "ImportPath": "github.com/cockroachdb/cmux", + "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" + }, { "ImportPath": "github.com/codedellemc/goscaleio", "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" @@ -515,285 +514,375 @@ "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, + { + "ImportPath": "github.com/coreos/bbolt", + "Comment": "v1.3.1-coreos.6", + "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" + }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/namespace", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/naming", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/embed", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/cors", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/debugutil", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.11", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -1352,6 +1441,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -1921,11 +2014,6 @@ "ImportPath": "github.com/kardianos/osext", "Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef" }, - { - "ImportPath": "github.com/karlseguin/ccache", - "Comment": "v2.0.2-5-g3ba9789", - "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" - }, { "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" @@ -2912,6 +3000,10 @@ "ImportPath": "google.golang.org/api/pubsub/v1", "Rev": "654f863362977d69086620b5f72f13e911da2410" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" @@ -2941,6 +3033,11 @@ "Comment": "v1.3.0", "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + }, { "ImportPath": "google.golang.org/grpc/internal", "Comment": "v1.3.0", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 2b40e59609c..9ec4ed0bb82 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -11102,34 +11102,6 @@ THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/boltdb/bolt licensed under: = - -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/boltdb/bolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a -================================================================================ - - ================================================================================ = vendor/github.com/chai2010/gettext-go/gettext licensed under: = @@ -11884,6 +11856,216 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/cockroachdb/cmux licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/cockroachdb/cmux/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/codedellemc/goscaleio licensed under: = @@ -15593,6 +15775,34 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/bbolt licensed under: = + +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/coreos/bbolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/alarm licensed under: = @@ -16643,6 +16853,636 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/clientv3/concurrency licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/clientv3/namespace licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/clientv3/naming licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/compactor licensed under: = @@ -17063,6 +17903,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/embed licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/error licensed under: = @@ -17693,6 +18743,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v2http licensed under: = @@ -18113,6 +19373,1476 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3client licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v3rpc licensed under: = @@ -18953,6 +21683,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/membership licensed under: = @@ -21263,6 +24203,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/cors licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/cpuutil licensed under: = @@ -21683,6 +24833,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/debugutil licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/fileutil licensed under: = @@ -23993,6 +27353,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/srv licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/testutil licensed under: = @@ -25253,6 +28823,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/proxy/grpcproxy/cache licensed under: = @@ -44096,6 +47876,45 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/golang/protobuf/protoc-gen-go/descriptor licensed under: = + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + += vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 +================================================================================ + + ================================================================================ = vendor/github.com/golang/protobuf/ptypes licensed under: = @@ -66349,33 +70168,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/github.com/karlseguin/ccache licensed under: = - -Copyright (c) 2013 Karl Seguin. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -= vendor/github.com/karlseguin/ccache/license.txt fb40cd712dfcf5e0a8de4c13c3399db2 -================================================================================ - - ================================================================================ = vendor/github.com/kr/fs licensed under: = @@ -85725,6 +89517,216 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/genproto/googleapis/api/annotations licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/google.golang.org/genproto/googleapis/rpc/status licensed under: = @@ -86115,6 +90117,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = + +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 +================================================================================ + + ================================================================================ = vendor/google.golang.org/grpc/internal licensed under: = diff --git a/vendor/BUILD b/vendor/BUILD index 5b92af906f8..e15dc8349eb 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -45,7 +45,6 @@ filegroup( "//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs", "//vendor/github.com/beorn7/perks/quantile:all-srcs", "//vendor/github.com/blang/semver:all-srcs", - "//vendor/github.com/boltdb/bolt:all-srcs", "//vendor/github.com/chai2010/gettext-go/gettext:all-srcs", "//vendor/github.com/cloudflare/cfssl/auth:all-srcs", "//vendor/github.com/cloudflare/cfssl/certdb:all-srcs", @@ -59,6 +58,7 @@ filegroup( "//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs", "//vendor/github.com/cloudflare/cfssl/signer:all-srcs", "//vendor/github.com/clusterhq/flocker-go:all-srcs", + "//vendor/github.com/cockroachdb/cmux:all-srcs", "//vendor/github.com/codedellemc/goscaleio:all-srcs", "//vendor/github.com/codegangsta/negroni:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", @@ -74,12 +74,14 @@ filegroup( "//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/types:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/version:all-srcs", + "//vendor/github.com/coreos/bbolt:all-srcs", "//vendor/github.com/coreos/etcd/alarm:all-srcs", "//vendor/github.com/coreos/etcd/auth:all-srcs", "//vendor/github.com/coreos/etcd/client:all-srcs", "//vendor/github.com/coreos/etcd/clientv3:all-srcs", "//vendor/github.com/coreos/etcd/compactor:all-srcs", "//vendor/github.com/coreos/etcd/discovery:all-srcs", + "//vendor/github.com/coreos/etcd/embed:all-srcs", "//vendor/github.com/coreos/etcd/error:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver:all-srcs", "//vendor/github.com/coreos/etcd/integration:all-srcs", @@ -87,8 +89,10 @@ filegroup( "//vendor/github.com/coreos/etcd/mvcc:all-srcs", "//vendor/github.com/coreos/etcd/pkg/adt:all-srcs", "//vendor/github.com/coreos/etcd/pkg/contention:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/cors:all-srcs", "//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/crc:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/debugutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs", @@ -100,6 +104,7 @@ filegroup( "//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs", "//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/srv:all-srcs", "//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/transport:all-srcs", @@ -211,6 +216,7 @@ filegroup( "//vendor/github.com/golang/mock/gomock:all-srcs", "//vendor/github.com/golang/protobuf/jsonpb:all-srcs", "//vendor/github.com/golang/protobuf/proto:all-srcs", + "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:all-srcs", "//vendor/github.com/golang/protobuf/ptypes:all-srcs", "//vendor/github.com/google/btree:all-srcs", "//vendor/github.com/google/cadvisor/accelerators:all-srcs", @@ -267,7 +273,6 @@ filegroup( "//vendor/github.com/jteeuwen/go-bindata:all-srcs", "//vendor/github.com/juju/ratelimit:all-srcs", "//vendor/github.com/kardianos/osext:all-srcs", - "//vendor/github.com/karlseguin/ccache:all-srcs", "//vendor/github.com/kr/fs:all-srcs", "//vendor/github.com/kr/pretty:all-srcs", "//vendor/github.com/kr/pty:all-srcs", @@ -391,6 +396,7 @@ filegroup( "//vendor/google.golang.org/api/logging/v2beta1:all-srcs", "//vendor/google.golang.org/api/monitoring/v3:all-srcs", "//vendor/google.golang.org/api/pubsub/v1:all-srcs", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs", "//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs", "//vendor/google.golang.org/grpc:all-srcs", "//vendor/gopkg.in/gcfg.v1:all-srcs", diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adcd..00000000000 --- a/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/github.com/cockroachdb/cmux/.gitignore b/vendor/github.com/cockroachdb/cmux/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/cockroachdb/cmux/.travis.yml b/vendor/github.com/cockroachdb/cmux/.travis.yml new file mode 100644 index 00000000000..e73780f2eb0 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + +gobuild_args: -race + +before_install: + - go get -u github.com/golang/lint/golint + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi + - go get -u golang.org/x/tools/cmd/vet + +before_script: + - '! gofmt -s -l . | read' + - golint ./... + - echo $TRAVIS_GO_VERSION + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi + - go vet . + - go tool vet --shadow . diff --git a/vendor/github.com/cockroachdb/cmux/BUILD b/vendor/github.com/cockroachdb/cmux/BUILD new file mode 100644 index 00000000000..b8a9413ba38 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "buffer.go", + "cmux.go", + "matchers.go", + "patricia.go", + ], + importpath = "github.com/cockroachdb/cmux", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/golang.org/x/net/http2/hpack:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/cockroachdb/cmux/LICENSE b/vendor/github.com/cockroachdb/cmux/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cockroachdb/cmux/README.md b/vendor/github.com/cockroachdb/cmux/README.md new file mode 100644 index 00000000000..b3713da5876 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/README.md @@ -0,0 +1,65 @@ +# cmux: Connection Mux [![Build Status](https://travis-ci.org/cockroachdb/cmux.svg?branch=master)](https://travis-ci.org/cockroachdb/cmux) [![GoDoc](https://godoc.org/github.com/cockroachdb/cmux?status.svg)](https://godoc.org/github.com/cockroachdb/cmux) + +cmux is a generic Go library to multiplex connections based on their payload. +Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any +other protocol on the same TCP listener. + +## How-To +Simply create your main listener, create a cmux for that listener, +and then match connections: +```go +// Create the main listener. +l, err := net.Listen("tcp", ":23456") +if err != nil { + log.Fatal(err) +} + +// Create a cmux. +m := cmux.New(l) + +// Match connections in order: +// First grpc, then HTTP, and otherwise Go RPC/TCP. +grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) +httpL := m.Match(cmux.HTTP1Fast()) +trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched. + +// Create your protocol servers. +grpcS := grpc.NewServer() +grpchello.RegisterGreeterServer(grpcs, &server{}) + +httpS := &http.Server{ + Handler: &helloHTTP1Handler{}, +} + +trpcS := rpc.NewServer() +s.Register(&ExampleRPCRcvr{}) + +// Use the muxed listeners for your servers. +go grpcS.Serve(grpcL) +go httpS.Serve(httpL) +go trpcS.Accept(trpcL) + +// Start serving! +m.Serve() +``` + +There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples). + +## Performance +Since we are only matching the very first bytes of a connection, the +performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP +streams) is negligible. + +## Limitations +* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221) +to identify TLS connections; since cmux's lookahead-implementing connection +wraps the underlying TLS connection, this type assertion fails. This means you +can serve HTTPS using cmux but `http.Request.TLS` will not be set in your +handlers. If you are able to wrap TLS around cmux, you can work around this +limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an +example of this approach. + +* *Different Protocols on The Same Connection*: `cmux` matches the connection +when it's accepted. For example, one connection can be either gRPC or REST, but +not both. That is, we assume that a client connection is either used for gRPC +or REST. diff --git a/vendor/github.com/cockroachdb/cmux/buffer.go b/vendor/github.com/cockroachdb/cmux/buffer.go new file mode 100644 index 00000000000..5c178585363 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/buffer.go @@ -0,0 +1,35 @@ +package cmux + +import ( + "bytes" + "io" +) + +// bufferedReader is an optimized implementation of io.Reader that behaves like +// ``` +// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer)) +// ``` +// without allocating. +type bufferedReader struct { + source io.Reader + buffer *bytes.Buffer + bufferRead int + bufferSize int +} + +func (s *bufferedReader) Read(p []byte) (int, error) { + // Functionality of bytes.Reader. + bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize]) + s.bufferRead += bn + + p = p[bn:] + + // Funtionality of io.TeeReader. + sn, sErr := s.source.Read(p) + if sn > 0 { + if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil { + return bn + wn, wErr + } + } + return bn + sn, sErr +} diff --git a/vendor/github.com/cockroachdb/cmux/cmux.go b/vendor/github.com/cockroachdb/cmux/cmux.go new file mode 100644 index 00000000000..89cc910b024 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/cmux.go @@ -0,0 +1,210 @@ +package cmux + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// Matcher matches a connection based on its content. +type Matcher func(io.Reader) bool + +// ErrorHandler handles an error and returns whether +// the mux should continue serving the listener. +type ErrorHandler func(error) bool + +var _ net.Error = ErrNotMatched{} + +// ErrNotMatched is returned whenever a connection is not matched by any of +// the matchers registered in the multiplexer. +type ErrNotMatched struct { + c net.Conn +} + +func (e ErrNotMatched) Error() string { + return fmt.Sprintf("mux: connection %v not matched by an matcher", + e.c.RemoteAddr()) +} + +// Temporary implements the net.Error interface. +func (e ErrNotMatched) Temporary() bool { return true } + +// Timeout implements the net.Error interface. +func (e ErrNotMatched) Timeout() bool { return false } + +type errListenerClosed string + +func (e errListenerClosed) Error() string { return string(e) } +func (e errListenerClosed) Temporary() bool { return false } +func (e errListenerClosed) Timeout() bool { return false } + +// ErrListenerClosed is returned from muxListener.Accept when the underlying +// listener is closed. +var ErrListenerClosed = errListenerClosed("mux: listener closed") + +// New instantiates a new connection multiplexer. +func New(l net.Listener) CMux { + return &cMux{ + root: l, + bufLen: 1024, + errh: func(_ error) bool { return true }, + donec: make(chan struct{}), + } +} + +// CMux is a multiplexer for network connections. +type CMux interface { + // Match returns a net.Listener that sees (i.e., accepts) only + // the connections matched by at least one of the matcher. + // + // The order used to call Match determines the priority of matchers. + Match(...Matcher) net.Listener + // Serve starts multiplexing the listener. Serve blocks and perhaps + // should be invoked concurrently within a go routine. + Serve() error + // HandleError registers an error handler that handles listener errors. + HandleError(ErrorHandler) +} + +type matchersListener struct { + ss []Matcher + l muxListener +} + +type cMux struct { + root net.Listener + bufLen int + errh ErrorHandler + donec chan struct{} + sls []matchersListener +} + +func (m *cMux) Match(matchers ...Matcher) net.Listener { + ml := muxListener{ + Listener: m.root, + connc: make(chan net.Conn, m.bufLen), + } + m.sls = append(m.sls, matchersListener{ss: matchers, l: ml}) + return ml +} + +func (m *cMux) Serve() error { + var wg sync.WaitGroup + + defer func() { + close(m.donec) + wg.Wait() + + for _, sl := range m.sls { + close(sl.l.connc) + // Drain the connections enqueued for the listener. + for c := range sl.l.connc { + _ = c.Close() + } + } + }() + + for { + c, err := m.root.Accept() + if err != nil { + if !m.handleErr(err) { + return err + } + continue + } + + wg.Add(1) + go m.serve(c, m.donec, &wg) + } +} + +func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + muc := newMuxConn(c) + for _, sl := range m.sls { + for _, s := range sl.ss { + matched := s(muc.getSniffer()) + if matched { + select { + case sl.l.connc <- muc: + case <-donec: + _ = c.Close() + } + return + } + } + } + + _ = c.Close() + err := ErrNotMatched{c: c} + if !m.handleErr(err) { + _ = m.root.Close() + } +} + +func (m *cMux) HandleError(h ErrorHandler) { + m.errh = h +} + +func (m *cMux) handleErr(err error) bool { + if !m.errh(err) { + return false + } + + if ne, ok := err.(net.Error); ok { + return ne.Temporary() + } + + return false +} + +type muxListener struct { + net.Listener + connc chan net.Conn +} + +func (l muxListener) Accept() (net.Conn, error) { + c, ok := <-l.connc + if !ok { + return nil, ErrListenerClosed + } + return c, nil +} + +// MuxConn wraps a net.Conn and provides transparent sniffing of connection data. +type MuxConn struct { + net.Conn + buf bytes.Buffer + sniffer bufferedReader +} + +func newMuxConn(c net.Conn) *MuxConn { + return &MuxConn{ + Conn: c, + } +} + +// From the io.Reader documentation: +// +// When Read encounters an error or end-of-file condition after +// successfully reading n > 0 bytes, it returns the number of +// bytes read. It may return the (non-nil) error from the same call +// or return the error (and n == 0) from a subsequent call. +// An instance of this general case is that a Reader returning +// a non-zero number of bytes at the end of the input stream may +// return either err == EOF or err == nil. The next Read should +// return 0, EOF. +func (m *MuxConn) Read(p []byte) (int, error) { + if n, err := m.buf.Read(p); err != io.EOF { + return n, err + } + return m.Conn.Read(p) +} + +func (m *MuxConn) getSniffer() io.Reader { + m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()} + return &m.sniffer +} diff --git a/vendor/github.com/cockroachdb/cmux/matchers.go b/vendor/github.com/cockroachdb/cmux/matchers.go new file mode 100644 index 00000000000..abc30f6e0ad --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/matchers.go @@ -0,0 +1,150 @@ +package cmux + +import ( + "bufio" + "io" + "io/ioutil" + "net/http" + "strings" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Any is a Matcher that matches any connection. +func Any() Matcher { + return func(r io.Reader) bool { return true } +} + +// PrefixMatcher returns a matcher that matches a connection if it +// starts with any of the strings in strs. +func PrefixMatcher(strs ...string) Matcher { + pt := newPatriciaTreeString(strs...) + return pt.matchPrefix +} + +var defaultHTTPMethods = []string{ + "OPTIONS", + "GET", + "HEAD", + "POST", + "PUT", + "DELETE", + "TRACE", + "CONNECT", +} + +// HTTP1Fast only matches the methods in the HTTP request. +// +// This matcher is very optimistic: if it returns true, it does not mean that +// the request is a valid HTTP response. If you want a correct but slower HTTP1 +// matcher, use HTTP1 instead. +func HTTP1Fast(extMethods ...string) Matcher { + return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...) +} + +const maxHTTPRead = 4096 + +// HTTP1 parses the first line or upto 4096 bytes of the request to see if +// the conection contains an HTTP request. +func HTTP1() Matcher { + return func(r io.Reader) bool { + br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead}) + l, part, err := br.ReadLine() + if err != nil || part { + return false + } + + _, _, proto, ok := parseRequestLine(string(l)) + if !ok { + return false + } + + v, _, ok := http.ParseHTTPVersion(proto) + return ok && v == 1 + } +} + +// grabbed from net/http. +func parseRequestLine(line string) (method, uri, proto string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + s2 += s1 + 1 + return line[:s1], line[s1+1 : s2], line[s2+1:], true +} + +// HTTP2 parses the frame header of the first frame to detect whether the +// connection is an HTTP2 connection. +func HTTP2() Matcher { + return hasHTTP2Preface +} + +// HTTP1HeaderField returns a matcher matching the header fields of the first +// request of an HTTP 1 connection. +func HTTP1HeaderField(name, value string) Matcher { + return func(r io.Reader) bool { + return matchHTTP1Field(r, name, value) + } +} + +// HTTP2HeaderField resturns a matcher matching the header fields of the first +// headers frame. +func HTTP2HeaderField(name, value string) Matcher { + return func(r io.Reader) bool { + return matchHTTP2Field(r, name, value) + } +} + +func hasHTTP2Preface(r io.Reader) bool { + var b [len(http2.ClientPreface)]byte + if _, err := io.ReadFull(r, b[:]); err != nil { + return false + } + + return string(b[:]) == http2.ClientPreface +} + +func matchHTTP1Field(r io.Reader, name, value string) (matched bool) { + req, err := http.ReadRequest(bufio.NewReader(r)) + if err != nil { + return false + } + + return req.Header.Get(name) == value +} + +func matchHTTP2Field(r io.Reader, name, value string) (matched bool) { + if !hasHTTP2Preface(r) { + return false + } + + framer := http2.NewFramer(ioutil.Discard, r) + hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) { + if hf.Name == name && hf.Value == value { + matched = true + } + }) + for { + f, err := framer.ReadFrame() + if err != nil { + return false + } + + switch f := f.(type) { + case *http2.HeadersFrame: + if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { + return false + } + if matched { + return true + } + + if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 { + return false + } + } + } +} diff --git a/vendor/github.com/cockroachdb/cmux/patricia.go b/vendor/github.com/cockroachdb/cmux/patricia.go new file mode 100644 index 00000000000..56ec4e7b287 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/patricia.go @@ -0,0 +1,173 @@ +package cmux + +import ( + "bytes" + "io" +) + +// patriciaTree is a simple patricia tree that handles []byte instead of string +// and cannot be changed after instantiation. +type patriciaTree struct { + root *ptNode +} + +func newPatriciaTree(b ...[]byte) *patriciaTree { + return &patriciaTree{ + root: newNode(b), + } +} + +func newPatriciaTreeString(strs ...string) *patriciaTree { + b := make([][]byte, len(strs)) + for i, s := range strs { + b[i] = []byte(s) + } + return &patriciaTree{ + root: newNode(b), + } +} + +func (t *patriciaTree) matchPrefix(r io.Reader) bool { + return t.root.match(r, true) +} + +func (t *patriciaTree) match(r io.Reader) bool { + return t.root.match(r, false) +} + +type ptNode struct { + prefix []byte + next map[byte]*ptNode + terminal bool +} + +func newNode(strs [][]byte) *ptNode { + if len(strs) == 0 { + return &ptNode{ + prefix: []byte{}, + terminal: true, + } + } + + if len(strs) == 1 { + return &ptNode{ + prefix: strs[0], + terminal: true, + } + } + + p, strs := splitPrefix(strs) + n := &ptNode{ + prefix: p, + } + + nexts := make(map[byte][][]byte) + for _, s := range strs { + if len(s) == 0 { + n.terminal = true + continue + } + nexts[s[0]] = append(nexts[s[0]], s[1:]) + } + + n.next = make(map[byte]*ptNode) + for first, rests := range nexts { + n.next[first] = newNode(rests) + } + + return n +} + +func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { + if len(bss) == 0 || len(bss[0]) == 0 { + return prefix, bss + } + + if len(bss) == 1 { + return bss[0], [][]byte{{}} + } + + for i := 0; ; i++ { + var cur byte + eq := true + for j, b := range bss { + if len(b) <= i { + eq = false + break + } + + if j == 0 { + cur = b[i] + continue + } + + if cur != b[i] { + eq = false + break + } + } + + if !eq { + break + } + + prefix = append(prefix, cur) + } + + rest = make([][]byte, 0, len(bss)) + for _, b := range bss { + rest = append(rest, b[len(prefix):]) + } + + return prefix, rest +} + +func readBytes(r io.Reader, n int) (b []byte, err error) { + b = make([]byte, n) + o := 0 + for o < n { + nr, err := r.Read(b[o:]) + if err != nil && err != io.EOF { + return b, err + } + + o += nr + + if err == io.EOF { + break + } + } + return b[:o], nil +} + +func (n *ptNode) match(r io.Reader, prefix bool) bool { + if l := len(n.prefix); l > 0 { + b, err := readBytes(r, l) + if err != nil || len(b) != l || !bytes.Equal(b, n.prefix) { + return false + } + } + + if prefix && n.terminal { + return true + } + + b := make([]byte, 1) + for { + nr, err := r.Read(b) + if nr != 0 { + break + } + + if err == io.EOF { + return n.terminal + } + + if err != nil { + return false + } + } + + nextN, ok := n.next[b[0]] + return ok && nextN.match(r, prefix) +} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore similarity index 65% rename from vendor/github.com/boltdb/bolt/.gitignore rename to vendor/github.com/coreos/bbolt/.gitignore index c7bd2b7a5b8..c2a8cfa788c 100644 --- a/vendor/github.com/boltdb/bolt/.gitignore +++ b/vendor/github.com/coreos/bbolt/.gitignore @@ -2,3 +2,4 @@ *.test *.swp /bin/ +cmd/bolt/bolt diff --git a/vendor/github.com/boltdb/bolt/BUILD b/vendor/github.com/coreos/bbolt/BUILD similarity index 96% rename from vendor/github.com/boltdb/bolt/BUILD rename to vendor/github.com/coreos/bbolt/BUILD index d29a61e9df0..78399c2f09a 100644 --- a/vendor/github.com/boltdb/bolt/BUILD +++ b/vendor/github.com/coreos/bbolt/BUILD @@ -28,7 +28,7 @@ go_library( ], "//conditions:default": [], }), - importpath = "github.com/boltdb/bolt", + importpath = "github.com/coreos/bbolt", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE similarity index 100% rename from vendor/github.com/boltdb/bolt/LICENSE rename to vendor/github.com/coreos/bbolt/LICENSE diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile new file mode 100644 index 00000000000..43b94f3bdfe --- /dev/null +++ b/vendor/github.com/coreos/bbolt/Makefile @@ -0,0 +1,30 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt + +test: + go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + go test -v ./cmd/bolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/coreos/bbolt/README.md similarity index 88% rename from vendor/github.com/boltdb/bolt/README.md rename to vendor/github.com/coreos/bbolt/README.md index 8523e337734..015f0efbe84 100644 --- a/vendor/github.com/boltdb/bolt/README.md +++ b/vendor/github.com/coreos/bbolt/README.md @@ -1,6 +1,16 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +bbolt ==== +[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt) +[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] [LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database @@ -10,16 +20,18 @@ Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. ## Table of Contents @@ -59,7 +71,7 @@ services every day. To start using Bolt, install Go and run `go get`: ```sh -$ go get github.com/boltdb/bolt/... +$ go get github.com/coreos/bbolt/... ``` This will retrieve the library and install the `bolt` command line utility into @@ -79,7 +91,7 @@ package main import ( "log" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func main() { @@ -209,7 +221,7 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close +You can use the `DB.Begin()` function directly but **please** be sure to close the transaction. ```go @@ -395,7 +407,7 @@ db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } @@ -448,6 +460,10 @@ db.View(func(tx *bolt.Tx) error { }) ``` +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. ### Nested buckets @@ -460,6 +476,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + ### Database backups @@ -469,7 +534,7 @@ this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx) documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to @@ -715,6 +780,9 @@ Here are a few things to note when evaluating and using Bolt: can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. @@ -755,7 +823,7 @@ Here are a few things to note when evaluating and using Bolt: ## Reading the Source -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, transactional key/value database so it can be a good starting point for people interested in how databases work. @@ -848,5 +916,13 @@ Below is a list of public, open source projects that use Bolt: * [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml similarity index 100% rename from vendor/github.com/boltdb/bolt/appveyor.yml rename to vendor/github.com/coreos/bbolt/appveyor.yml diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go similarity index 72% rename from vendor/github.com/boltdb/bolt/bolt_386.go rename to vendor/github.com/coreos/bbolt/bolt_386.go index e659bfb91f3..820d533c15f 100644 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ b/vendor/github.com/coreos/bbolt/bolt_386.go @@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go similarity index 73% rename from vendor/github.com/boltdb/bolt/bolt_amd64.go rename to vendor/github.com/coreos/bbolt/bolt_amd64.go index cca6b7eb707..98fafdb47d8 100644 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go @@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go new file mode 100644 index 00000000000..7e5cb4b9412 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_arm64.go rename to vendor/github.com/coreos/bbolt/bolt_arm64.go index 6d2309352e0..b26d84f91ba 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_linux.go rename to vendor/github.com/coreos/bbolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go new file mode 100644 index 00000000000..134b578bd44 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go similarity index 55% rename from vendor/github.com/boltdb/bolt/bolt_arm.go rename to vendor/github.com/coreos/bbolt/bolt_mipsx.go index e659bfb91f3..d5ecb0597e4 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ b/vendor/github.com/coreos/bbolt/bolt_mipsx.go @@ -1,7 +1,12 @@ +// +build mips mipsle + package bolt // maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB +const maxMapSize = 0x40000000 // 1GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_openbsd.go rename to vendor/github.com/coreos/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_ppc.go rename to vendor/github.com/coreos/bbolt/bolt_ppc.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_ppc64.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64.go index 2dc6be02e3e..9331d9771eb 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_ppc64le.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64le.go index 8351e129f6a..8c143bc5d19 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_s390x.go rename to vendor/github.com/coreos/bbolt/bolt_s390x.go index f4dd26bbba7..d7c39af9253 100644 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go similarity index 80% rename from vendor/github.com/boltdb/bolt/bolt_unix.go rename to vendor/github.com/coreos/bbolt/bolt_unix.go index cad62dda1e3..06592a08089 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix.go @@ -13,29 +13,32 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_unix_solaris.go rename to vendor/github.com/coreos/bbolt/bolt_unix_solaris.go index 307bf2b3ee9..fd8335ecc96 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go @@ -13,34 +13,33 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go similarity index 88% rename from vendor/github.com/boltdb/bolt/bolt_windows.go rename to vendor/github.com/coreos/bbolt/bolt_windows.go index d538e6afd77..ca6f9a11c24 100644 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ b/vendor/github.com/coreos/bbolt/bolt_windows.go @@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro db.lockfile = f var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := f.Fd() + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + // Attempt to obtain an exclusive lock. + err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } @@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro func funlock(db *DB) error { err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) db.lockfile.Close() - os.Remove(db.path+lockExt) + os.Remove(db.path + lockExt) return err } diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/boltdb/bolt/boltsync_unix.go rename to vendor/github.com/coreos/bbolt/boltsync_unix.go diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go similarity index 95% rename from vendor/github.com/boltdb/bolt/bucket.go rename to vendor/github.com/coreos/bbolt/bucket.go index d2f8c524e42..44db88b8abd 100644 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ b/vendor/github.com/coreos/bbolt/bucket.go @@ -14,13 +14,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { + if b.tx.writable && !unaligned { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue } + return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - _, _, flags := c.seek(key) + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error { return nil } +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go similarity index 100% rename from vendor/github.com/boltdb/bolt/cursor.go rename to vendor/github.com/coreos/bbolt/cursor.go diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/coreos/bbolt/db.go similarity index 85% rename from vendor/github.com/boltdb/bolt/db.go rename to vendor/github.com/coreos/bbolt/db.go index 1223493ca7b..4c8c156b23e 100644 --- a/vendor/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/coreos/bbolt/db.go @@ -7,8 +7,7 @@ import ( "log" "os" "runtime" - "runtime/debug" - "strings" + "sort" "sync" "time" "unsafe" @@ -23,6 +22,8 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED +const pgidNoFreelist pgid = 0xffffffffffffffff + // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -39,6 +40,9 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -61,6 +65,11 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -107,9 +116,11 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - freelist *freelist stats Stats + freelist *freelist + freelistLoad sync.Once + pagePool sync.Pool batchMu sync.Mutex @@ -148,14 +159,17 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - + db := &DB{ + opened: true, + } // Set default options if no options are provided. if options == nil { options = DefaultOptions } + db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + db.lockfile = nil // make 'unused' happy. TODO: rework locks _ = db.close() return nil, err } @@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err @@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { db.pageSize = int(m.pageSize) } + } else { + return nil, ErrInvalid } } @@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } // Mark the database as opened and return. return db, nil } +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t + db.freePages() + return t, nil +} - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid } if minid > 0 { db.freelist.release(minid - 1) } - - return t, nil + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. } +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] break } } @@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - if err := t.Rollback(); err != nil { - return err - } - - return nil + return t.Rollback() } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -734,9 +802,7 @@ retry: // pass success, or bolt internal errors, to all callers for _, c := range b.calls { - if c.err != nil { - c.err <- err - } + c.err <- err } break retry } @@ -823,7 +889,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { +func (db *DB) allocate(txid txid, count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -835,7 +901,7 @@ func (db *DB) allocate(count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { + if p.id = db.freelist.allocate(txid, count); p.id != 0 { return p, nil } @@ -890,6 +956,38 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -900,6 +998,10 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -916,6 +1018,14 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -952,15 +1062,11 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN + diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - type Info struct { Data uintptr PageSize int @@ -999,7 +1105,8 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1026,11 +1133,3 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/doc.go rename to vendor/github.com/coreos/bbolt/doc.go diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go similarity index 100% rename from vendor/github.com/boltdb/bolt/errors.go rename to vendor/github.com/coreos/bbolt/errors.go diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go similarity index 56% rename from vendor/github.com/boltdb/bolt/freelist.go rename to vendor/github.com/coreos/bbolt/freelist.go index 1b7ba91b2a5..266f1542945 100644 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -6,25 +6,40 @@ import ( "unsafe" ) +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ - pending: make(map[txid][]pgid), + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -40,27 +55,26 @@ func (f *freelist) free_count() int { // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int - for _, list := range f.pending { - count += len(list) + for _, txp := range f.pending { + count += len(txp.ids) } return count } -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) } - sort.Sort(m) - return pgids(f.ids).merge(m) + mergepgids(dst, f.ids, m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { +func (f *freelist) allocate(txid txid, n int) pgid { if len(f.ids) == 0 { return 0 } @@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid { for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } - + f.allocs[initial] = txid return initial } @@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) { } // Free page and all its overflow pages. - var ids = f.pending[txid] + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } - // Add to the freelist and cache. - ids = append(ids, id) + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) f.cache[id] = true } - f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) - for tid, ids := range f.pending { + for tid, txp := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. - m = append(m, ids...) + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { delete(f.pending, tid) } } @@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) { // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) + txp := f.pending[txid] + if txp == nil { + return } - - // Remove pages from pending list. + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) } // freed returns whether a given page is in the free list. @@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) @@ -169,7 +248,7 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) @@ -181,27 +260,33 @@ func (f *freelist) read(p *page) { f.reindex() } +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) } return nil @@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) { // Build a cache of only pending pages. pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { pcache[pendingID] = true } } @@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) + f.cache = make(map[pgid]bool, len(f.ids)) for _, id := range f.ids { f.cache[id] = true } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { f.cache[pendingID] = true } } diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/coreos/bbolt/node.go similarity index 99% rename from vendor/github.com/boltdb/bolt/node.go rename to vendor/github.com/coreos/bbolt/node.go index 159318b229c..f4ce240eddd 100644 --- a/vendor/github.com/boltdb/bolt/node.go +++ b/vendor/github.com/coreos/bbolt/node.go @@ -365,7 +365,7 @@ func (n *node) spill() error { } // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) if err != nil { return err } diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/coreos/bbolt/page.go similarity index 88% rename from vendor/github.com/boltdb/bolt/page.go rename to vendor/github.com/coreos/bbolt/page.go index 7651a6bf7d9..cde403ae86d 100644 --- a/vendor/github.com/boltdb/bolt/page.go +++ b/vendor/github.com/coreos/bbolt/page.go @@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } else if len(b) == 0 { + } + if len(b) == 0 { return a } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids { } // Append what's left in follow. - merged = append(merged, follow...) - - return merged + _ = append(merged, follow...) } diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go similarity index 94% rename from vendor/github.com/boltdb/bolt/tx.go rename to vendor/github.com/coreos/bbolt/tx.go index 1cfb4cde855..5c0290733f5 100644 --- a/vendor/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/coreos/bbolt/tx.go @@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil + return fn(k, tx.root.Bucket(k)) }) } @@ -169,28 +166,18 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { return err } + } else { + tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -235,6 +222,31 @@ func (tx *Tx) Commit() error { return nil } +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { _ = f.Close() }() + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, f.Close() + return n, nil } // CopyFile copies the entire database to file at the given path. @@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + // Check if any pages are double freed. freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } } // Recursively check buckets. @@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) + p, err := tx.db.allocate(tx.meta.txid, count) if err != nil { return nil, err } @@ -460,7 +483,7 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount++ + tx.stats.PageCount += count tx.stats.PageAlloc += count * tx.db.pageSize return p, nil diff --git a/vendor/github.com/coreos/etcd/auth/BUILD b/vendor/github.com/coreos/etcd/auth/BUILD index 7452a66ee29..892b00a396f 100644 --- a/vendor/github.com/coreos/etcd/auth/BUILD +++ b/vendor/github.com/coreos/etcd/auth/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "jwt.go", "range_perm_cache.go", "simple_token.go", "store.go", @@ -14,10 +15,14 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/dgrijalva/jwt-go:go_default_library", "//vendor/golang.org/x/crypto/bcrypt:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/peer:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index c6e2a12a7fa..009ebda70ca 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } var fileDescriptorAuth = []byte{ // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 00000000000..214ae48c83a --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/rsa" + "io/ioutil" + + jwt "github.com/dgrijalva/jwt-go" + "golang.org/x/net/context" +) + +type tokenJWT struct { + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + plog.Warningf("invalid jwt token: %s", token) + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + plog.Warningf("failed to parse jwt token: %s", err) + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + plog.Debugf("failed to sign jwt token: %s", err) + return "", err + } + + plog.Debugf("jwt token: %s", token) + + return token, err +} + +func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + default: + plog.Errorf("unknown token specific option: %s", k) + return "", "", "", ErrInvalidAuthOpts + } + } + + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil +} + +func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + t := &tokenJWT{} + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go index 3cd1ad2a411..691b65ba38e 100644 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -15,93 +15,11 @@ package auth import ( - "bytes" - "sort" - "github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/adt" ) -// isSubset returns true if a is a subset of b. -// If a is a prefix of b, then a is a subset of b. -// Given intervals [a1,a2) and [b1,b2), is -// the a interval a subset of b? -func isSubset(a, b *rangePerm) bool { - switch { - case len(a.end) == 0 && len(b.end) == 0: - // a, b are both keys - return bytes.Equal(a.begin, b.begin) - case len(b.end) == 0: - // b is a key, a is a range - return false - case len(a.end) == 0: - // a is a key, b is a range. need b1 <= a1 and a1 < b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0 - default: - // both are ranges. need b1 <= a1 and a2 <= b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0 - } -} - -func isRangeEqual(a, b *rangePerm) bool { - return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end) -} - -// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms. -// If there are equal ranges, removeSubsetRangePerms only keeps one of them. -// It returns a sorted rangePerm slice. -func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) { - sort.Sort(RangePermSliceByBegin(perms)) - var prev *rangePerm - for i := range perms { - if i == 0 { - prev = perms[i] - newp = append(newp, perms[i]) - continue - } - if isRangeEqual(perms[i], prev) { - continue - } - if isSubset(perms[i], prev) { - continue - } - if isSubset(prev, perms[i]) { - prev = perms[i] - newp[len(newp)-1] = perms[i] - continue - } - prev = perms[i] - newp = append(newp, perms[i]) - } - return newp -} - -// mergeRangePerms merges adjacent rangePerms. -func mergeRangePerms(perms []*rangePerm) []*rangePerm { - var merged []*rangePerm - perms = removeSubsetRangePerms(perms) - - i := 0 - for i < len(perms) { - begin, next := i, i - for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 { - next++ - } - // don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty. - if next != begin && len(perms[next].end) > 0 { - merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end}) - } else { - merged = append(merged, perms[begin]) - if next != begin { - merged = append(merged, perms[next]) - } - } - i = next + 1 - } - - return merged -} - func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { user := getUser(tx, userName) if user == nil { @@ -109,7 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission return nil } - var readPerms, writePerms []*rangePerm + readPerms := &adt.IntervalTree{} + writePerms := &adt.IntervalTree{} for _, roleName := range user.Roles { role := getRole(tx, roleName) @@ -118,48 +37,66 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } for _, perm := range role.KeyPermission { - rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd} + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = perm.RangeEnd + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint(perm.Key) + } switch perm.PermType { case authpb.READWRITE: - readPerms = append(readPerms, rp) - writePerms = append(writePerms, rp) + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) case authpb.READ: - readPerms = append(readPerms, rp) + readPerms.Insert(ivl, struct{}{}) case authpb.WRITE: - writePerms = append(writePerms, rp) + writePerms.Insert(ivl, struct{}{}) } } } return &unifiedRangePermissions{ - readPerms: mergeRangePerms(readPerms), - writePerms: mergeRangePerms(writePerms), + readPerms: readPerms, + writePerms: writePerms, } } -func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - var tocheck []*rangePerm +func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + ivl := adt.NewBytesAffineInterval(key, rangeEnd) switch permtyp { case authpb.READ: - tocheck = cachedPerms.readPerms + return cachedPerms.readPerms.Contains(ivl) case authpb.WRITE: - tocheck = cachedPerms.writePerms + return cachedPerms.writePerms.Contains(ivl) default: plog.Panicf("unknown auth type: %v", permtyp) } + return false +} - requiredPerm := &rangePerm{begin: key, end: rangeEnd} - - for _, perm := range tocheck { - if isSubset(requiredPerm, perm) { - return true - } +func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + plog.Panicf("unknown auth type: %v", permtyp) } - return false } @@ -175,7 +112,11 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key as.rangePermCache[userName] = perms } - return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp) + if len(rangeEnd) == 0 { + return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { @@ -187,35 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) { } type unifiedRangePermissions struct { - // readPerms[i] and readPerms[j] (i != j) don't overlap - readPerms []*rangePerm - // writePerms[i] and writePerms[j] (i != j) don't overlap, too - writePerms []*rangePerm -} - -type rangePerm struct { - begin, end []byte -} - -type RangePermSliceByBegin []*rangePerm - -func (slice RangePermSliceByBegin) Len() int { - return len(slice) -} - -func (slice RangePermSliceByBegin) Less(i, j int) bool { - switch bytes.Compare(slice[i].begin, slice[j].begin) { - case 0: // begin(i) == begin(j) - return bytes.Compare(slice[i].end, slice[j].end) == -1 - - case -1: // begin(i) < begin(j) - return true - - default: - return false - } -} - -func (slice RangePermSliceByBegin) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] + readPerms *adt.IntervalTree + writePerms *adt.IntervalTree } diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go index a39f3927685..94d92a115e2 100644 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -19,10 +19,14 @@ package auth import ( "crypto/rand" + "fmt" "math/big" + "strconv" "strings" "sync" "time" + + "golang.org/x/net/context" ) const ( @@ -90,24 +94,14 @@ func (tm *simpleTokenTTLKeeper) run() { } } -func (as *authStore) enable() { - delf := func(tk string) { - if username, ok := as.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(as.simpleTokens, tk) - } - } - as.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &as.simpleTokensMu, - } - go as.simpleTokenKeeper.run() +type tokenSimple struct { + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username } -func (as *authStore) GenSimpleToken() (string, error) { +func (t *tokenSimple) genTokenPrefix() (string, error) { ret := make([]byte, defaultSimpleTokenLength) for i := 0; i < defaultSimpleTokenLength; i++ { @@ -122,28 +116,105 @@ func (as *authStore) GenSimpleToken() (string, error) { return string(ret), nil } -func (as *authStore) assignSimpleTokenToUser(username, token string) { - as.simpleTokensMu.Lock() - _, ok := as.simpleTokens[token] +func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { + t.simpleTokensMu.Lock() + _, ok := t.simpleTokens[token] if ok { plog.Panicf("token %s is alredy used", token) } - as.simpleTokens[token] = username - as.simpleTokenKeeper.addSimpleToken(token) - as.simpleTokensMu.Unlock() + t.simpleTokens[token] = username + t.simpleTokenKeeper.addSimpleToken(token) + t.simpleTokensMu.Unlock() } -func (as *authStore) invalidateUser(username string) { - if as.simpleTokenKeeper == nil { +func (t *tokenSimple) invalidateUser(username string) { + if t.simpleTokenKeeper == nil { return } - as.simpleTokensMu.Lock() - for token, name := range as.simpleTokens { + t.simpleTokensMu.Lock() + for token, name := range t.simpleTokens { if strings.Compare(name, username) == 0 { - delete(as.simpleTokens, token) - as.simpleTokenKeeper.deleteSimpleToken(token) + delete(t.simpleTokens, token) + t.simpleTokenKeeper.deleteSimpleToken(token) } } - as.simpleTokensMu.Unlock() + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) enable() { + delf := func(tk string) { + if username, ok := t.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(t.simpleTokens, tk) + } + } + t.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &t.simpleTokensMu, + } + go t.simpleTokenKeeper.run() +} + +func (t *tokenSimple) disable() { + t.simpleTokensMu.Lock() + tk := t.simpleTokenKeeper + t.simpleTokenKeeper = nil + t.simpleTokens = make(map[string]string) // invalidate all tokens + t.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } +} + +func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { + if !t.isValidSimpleToken(ctx, token) { + return nil, false + } + t.simpleTokensMu.Lock() + username, ok := t.simpleTokens[token] + if ok && t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.resetSimpleToken(token) + } + t.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: revision}, ok +} + +func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { + // rev isn't used in simple token, it is only used in JWT + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + token := fmt.Sprintf("%s.%d", simpleToken, index) + t.assignSimpleTokenToUser(username, token) + + return token, nil +} + +func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false + } + + select { + case <-t.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): + } + + return false +} + +func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { + return &tokenSimple{ + simpleTokens: make(map[string]string), + indexWaiter: indexWaiter, + } } diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go index 236bb2c529d..3fac7f5a6fd 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -18,11 +18,10 @@ import ( "bytes" "encoding/binary" "errors" - "fmt" "sort" - "strconv" "strings" "sync" + "sync/atomic" "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -30,7 +29,9 @@ import ( "github.com/coreos/pkg/capnslog" "golang.org/x/crypto/bcrypt" "golang.org/x/net/context" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) var ( @@ -60,6 +61,8 @@ var ( ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") ErrAuthOldRevision = errors.New("auth: revision in header is old") ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") // BcryptCost is the algorithm cost / strength for hashing auth passwords BcryptCost = bcrypt.DefaultCost @@ -129,10 +132,6 @@ type AuthStore interface { // RoleList gets a list of all roles RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - // AuthInfoFromToken gets a username from the given Token and current revision number - // (The revision number is used for preventing the TOCTOU problem) - AuthInfoFromToken(token string) (*AuthInfo, bool) - // IsPutPermitted checks put permission of the user IsPutPermitted(authInfo *AuthInfo, key []byte) error @@ -145,8 +144,9 @@ type AuthStore interface { // IsAdminPermitted checks admin permission of the user IsAdminPermitted(authInfo *AuthInfo) error - // GenSimpleToken produces a simple random string - GenSimpleToken() (string, error) + // GenTokenPrefix produces a random string in a case of simple token + // in a case of JWT, it produces an empty string + GenTokenPrefix() (string, error) // Revision gets current revision of authStore Revision() uint64 @@ -159,33 +159,32 @@ type AuthStore interface { // AuthInfoFromCtx gets AuthInfo from gRPC's context AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) + + // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context + AuthInfoFromTLS(ctx context.Context) *AuthInfo +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + + invalidateUser(string) + genTokenPrefix() (string, error) } type authStore struct { + // atomic operations; need 64-bit align, or 32-bit tests will crash + revision uint64 + be backend.Backend enabled bool enabledMu sync.RWMutex rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - revision uint64 - - // tokenSimple in v3.2+ - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func newDeleterFunc(as *authStore) func(string) { - return func(t string) { - as.simpleTokensMu.Lock() - defer as.simpleTokensMu.Unlock() - if username, ok := as.simpleTokens[t]; ok { - plog.Infof("deleting token %s for user %s", t, username) - delete(as.simpleTokens, t) - } - } + tokenProvider TokenProvider } func (as *authStore) AuthEnable() error { @@ -215,11 +214,11 @@ func (as *authStore) AuthEnable() error { tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) as.enabled = true - as.enable() + as.tokenProvider.enable() as.rangePermCache = make(map[string]*unifiedRangePermissions) - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) plog.Noticef("Authentication enabled") @@ -241,15 +240,7 @@ func (as *authStore) AuthDisable() { b.ForceCommit() as.enabled = false - - as.simpleTokensMu.Lock() - tk := as.simpleTokenKeeper - as.simpleTokenKeeper = nil - as.simpleTokens = make(map[string]string) // invalidate all tokens - as.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } + as.tokenProvider.disable() plog.Noticef("Authentication disabled") } @@ -260,10 +251,7 @@ func (as *authStore) Close() error { if !as.enabled { return nil } - if as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.stop() - as.simpleTokenKeeper = nil - } + as.tokenProvider.disable() return nil } @@ -272,10 +260,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthNotEnabled } - // TODO(mitake): after adding jwt support, branching based on values of ctx is required - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -285,14 +269,23 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthFailed } - token := fmt.Sprintf("%s.%d", simpleToken, index) - as.assignSimpleTokenToUser(username, token) + // Password checking is already performed in the API layer, so we don't need to check for now. + // Staleness of password can be detected with OCC in the API layer, too. - plog.Infof("authorized %s, token is %s", username, token) + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } + + plog.Debugf("authorized %s, token is %s", username, token) return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -322,7 +315,7 @@ func (as *authStore) Recover(be backend.Backend) { } } - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) tx.Unlock() @@ -366,6 +359,11 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 { + plog.Errorf("the user root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -380,7 +378,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("deleted a user: %s", r.Name) @@ -416,7 +414,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("changed a password of a user: %s", r.Name) @@ -491,6 +489,11 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be revoked from the user root") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -593,17 +596,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - // TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles - // - // Assume a case like below: - // create a role r1 - // create a user u1 and grant r1 to u1 - // delete r1 - // - // After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1, - // the new r1 is automatically granted u1. - // In some cases, it would be confusing. So we need to provide an option for deleting the grant relation - // from all users. + if as.enabled && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be deleted") + return nil, ErrInvalidAuthMgmt + } tx := as.be.BatchTx() tx.Lock() @@ -616,6 +612,28 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) + users := getAllUsers(tx) + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(string(user.Name)) + } + as.commitRevision(tx) plog.Noticef("deleted role %s", r.Role) @@ -645,15 +663,8 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, return &pb.AuthRoleAddResponse{}, nil } -func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) { - // same as '(t *tokenSimple) info' in v3.2+ - as.simpleTokensMu.Lock() - username, ok := as.simpleTokens[token] - if ok && as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.resetSimpleToken(token) - } - as.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: as.revision}, ok +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) } type permSlice []*authpb.Permission @@ -723,7 +734,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE return ErrUserEmpty } - if revision < as.revision { + if revision < as.Revision() { return ErrAuthOldRevision } @@ -886,7 +897,7 @@ func (as *authStore) isAuthEnabled() bool { return as.enabled } -func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore { +func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { tx := be.BatchTx() tx.Lock() @@ -904,18 +915,17 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) as := &authStore{ be: be, - simpleTokens: make(map[string]string), revision: getRevision(tx), - indexWaiter: indexWaiter, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, } if enabled { - as.enable() + as.tokenProvider.enable() } - if as.revision == 0 { + if as.Revision() == 0 { as.commitRevision(tx) } @@ -935,9 +945,9 @@ func hasRootRole(u *authpb.User) bool { } func (as *authStore) commitRevision(tx backend.BatchTx) { - as.revision++ + atomic.AddUint64(&as.revision, 1) revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.revision) + binary.BigEndian.PutUint64(revBytes, as.Revision()) tx.UnsafePut(authBucketName, revisionKey, revBytes) } @@ -951,31 +961,38 @@ func getRevision(tx backend.BatchTx) uint64 { return binary.BigEndian.Uint64(vs[0]) } -func (as *authStore) Revision() uint64 { - return as.revision +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) } -func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false +func (as *authStore) Revision() uint64 { + return atomic.LoadUint64(&as.revision) +} + +func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil } - select { - case <-as.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + for _, chain := range chains { + cn := chain.Subject.CommonName + plog.Debugf("found common name %s", cn) + + return &AuthInfo{ + Username: cn, + Revision: as.Revision(), + } + } } - return false + return nil } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil } @@ -986,14 +1003,57 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } token := ts[0] - if !as.isValidSimpleToken(token, ctx) { - return nil, ErrInvalidAuthToken - } - - authInfo, uok := as.AuthInfoFromToken(token) + authInfo, uok := as.authInfoFromToken(ctx, token) if !uok { plog.Warningf("invalid auth token: %s", token) return nil, ErrInvalidAuthToken } return authInfo, nil } + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + plog.Errorf("invalid token specific option: %s", optstr) + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil + +} + +func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case "simple": + plog.Warningf("simple token is not cryptographically signed") + return newTokenProviderSimple(indexWaiter), nil + case "jwt": + return newTokenProviderJWT(typeSpecificOpts) + default: + plog.Errorf("unknown token type: %s", tokenType) + return nil, ErrInvalidAuthOpts + } +} diff --git a/vendor/github.com/coreos/etcd/client/BUILD b/vendor/github.com/coreos/etcd/client/BUILD index 16c78ceec37..00a5b08d87f 100644 --- a/vendor/github.com/coreos/etcd/client/BUILD +++ b/vendor/github.com/coreos/etcd/client/BUILD @@ -14,14 +14,15 @@ go_library( "keys.generated.go", "keys.go", "members.go", - "srv.go", "util.go", ], importpath = "github.com/coreos/etcd/client", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index f9131b4725c..19ce2ec01da 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -15,6 +15,7 @@ package client import ( + "encoding/json" "errors" "fmt" "io/ioutil" @@ -27,6 +28,8 @@ import ( "sync" "time" + "github.com/coreos/etcd/version" + "golang.org/x/net/context" ) @@ -201,6 +204,9 @@ type Client interface { // returned SetEndpoints(eps []string) error + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + httpClient } @@ -366,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } - if isOneShot { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { + } else if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response @@ -379,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } - if isOneShot { - return nil, nil, cerr.Errors[0] + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue } - continue + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err } if k != pinned { c.Lock() @@ -477,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration } } +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + type roundTripResponse struct { resp *http.Response err error diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go index bfd7aec93f5..442e35fe543 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -14,8 +14,27 @@ package client +import ( + "github.com/coreos/etcd/pkg/srv" +) + // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa3435921..00000000000 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/BUILD b/vendor/github.com/coreos/etcd/clientv3/BUILD index c4cec492504..c610438b2a5 100644 --- a/vendor/github.com/coreos/etcd/clientv3/BUILD +++ b/vendor/github.com/coreos/etcd/clientv3/BUILD @@ -4,18 +4,19 @@ go_library( name = "go_default_library", srcs = [ "auth.go", - "balancer.go", "client.go", "cluster.go", "compact_op.go", "compare.go", "config.go", "doc.go", + "health_balancer.go", "kv.go", "lease.go", "logger.go", "maintenance.go", "op.go", + "ready_wait.go", "retry.go", "sort.go", "txn.go", @@ -28,15 +29,15 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) @@ -49,7 +50,12 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs", + "//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs", + "//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md index 87c32d1a88a..376bfba7614 100644 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -1,6 +1,6 @@ # etcd/clientv3 -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) `etcd/clientv3` is the official Go etcd client for v3. @@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs: ```go ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := kvc.Put(ctx, "sample_key", "sample_value") +resp, err := cli.Put(ctx, "sample_key", "sample_value") cancel() if err != nil { // handle error! @@ -57,7 +57,7 @@ etcd client returns 2 types of errors: Here is the example code to handle client errors: ```go -resp, err := kvc.Put(ctx, "", "") +resp, err := cli.Put(ctx, "", "") if err != nil { switch err { case context.Canceled: @@ -76,6 +76,10 @@ if err != nil { The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). +## Namespacing + +The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + ## Examples More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index b995bce8e3f..dddbcb4f626 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -100,28 +101,20 @@ type Auth interface { } type auth struct { - c *Client - - conn *grpc.ClientConn // conn in-use remote pb.AuthClient } func NewAuth(c *Client) Auth { - conn := c.ActiveConnection() - return &auth{ - conn: c.ActiveConnection(), - remote: pb.NewAuthClient(conn), - c: c, - } + return &auth{remote: RetryAuthClient(c)} } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) return (*AuthDisableResponse)(resp), toErr(ctx, err) } @@ -146,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) ( } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}) return (*AuthUserListResponse)(resp), toErr(ctx, err) } @@ -176,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } @@ -209,7 +202,7 @@ type authenticator struct { } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}) return (*AuthenticateResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 0fef9c54934..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoints for grpc - addrs []grpc.Address - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects upEps, pinAddr, and connectingAddr - mu sync.RWMutex - // upEps holds the current endpoints that have an active connection - upEps map[string]struct{} - // upc closes when upEps transitions from empty to non-zero or the balancer closes. - upc chan struct{} - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // intialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - notifyCh <- addrs - sb := &simpleBalancer{ - addrs: addrs, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upEps: make(map[string]struct{}), - upc: make(chan struct{}), - host2ep: getHost2ep(eps), - } - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) getEndpoint(host string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.host2ep[host] -} - -func getHost2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.host2ep) - for k, v := range np { - if b.host2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs - b.notifyCh <- addrs -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Or our simplerBalancer - // might panic since b.upc is closed. - if b.closed { - return func(err error) {} - } - - if len(b.upEps) == 0 { - // notify waiting Get()s and pin first connected address - close(b.upc) - b.pinAddr = addr.Addr - } - b.upEps[addr.Addr] = struct{}{} - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - b.mu.Lock() - delete(b.upEps, addr.Addr) - if len(b.upEps) == 0 && b.pinAddr != "" { - b.upc = make(chan struct{}) - } else if b.pinAddr == addr.Addr { - // choose new random up endpoint - for k := range b.upEps { - b.pinAddr = k - break - } - } - b.mu.Unlock() - } -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var addr string - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed := b.closed - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - - if upEps == 0 { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if addr == "" { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if upEps > 0 { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - return nil - } - b.closed = true - close(b.notifyCh) - // terminate all waiting Get()s - b.pinAddr = "" - if len(b.upEps) == 0 { - close(b.upc) - } - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index 8263890bdff..2dc7e8675c8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -20,22 +20,25 @@ import ( "fmt" "net" "net/url" + "strconv" "strings" "sync" "time" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) var ( ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") ) // Client provides and manages an etcd v3 client session. @@ -47,19 +50,20 @@ type Client struct { Auth Maintenance - conn *grpc.ClientConn - cfg Config - creds *credentials.TransportCredentials - balancer *simpleBalancer - retryWrapper retryRpcFunc - retryAuthWrapper retryRpcFunc + conn *grpc.ClientConn + dialerrc chan error + + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu sync.Mutex ctx context.Context cancel context.CancelFunc - // Username is a username for authentication + // Username is a user name for authentication. Username string - // Password is a password for authentication + // Password is a password for authentication. Password string // tokenCred is an instance of WithPerRPCCredentials()'s argument tokenCred *authTokenCredential @@ -74,26 +78,28 @@ func New(cfg Config) (*Client, error) { return newClient(&cfg) } +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + // NewFromURL creates a new etcdv3 client from a URL. func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) } -// NewFromConfigFile creates a new etcdv3 client from a configuration file. -func NewFromConfigFile(path string) (*Client, error) { - cfg, err := configFromFile(path) - if err != nil { - return nil, err - } - return New(*cfg) -} - // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() c.Watcher.Close() c.Lease.Close() - return toErr(c.ctx, c.conn.Close()) + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() } // Ctx is a context for "out of band" messages (e.g., for sending @@ -111,8 +117,23 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) + c.mu.Unlock() + c.balancer.updateAddrs(eps...) + + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + c.balancer.mu.RLock() + update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) + c.balancer.mu.RUnlock() + if update { + select { + case c.balancer.updateAddrsC <- notifyNext: + case <-c.balancer.stopc: + } + } } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -139,8 +160,10 @@ func (c *Client) autoSync() { case <-c.ctx.Done(): return case <-time.After(c.cfg.AutoSyncInterval): - ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) - if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { logger.Println("Auto sync endpoints failed:", err) } } @@ -169,7 +192,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { - return + return proto, host, scheme } scheme = url.Scheme @@ -177,12 +200,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = url.Host switch url.Scheme { case "http", "https": - case "unix": + case "unix", "unixs": proto = "unix" + host = url.Host + url.Path default: proto, host = "", "" } - return + return proto, host, scheme } func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { @@ -191,7 +215,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden case "unix": case "http": creds = nil - case "https": + case "https", "unixs": if creds != nil { break } @@ -201,7 +225,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden default: creds = nil } - return + return creds } // dialSetupOpts gives the dial opts prior to any authentication @@ -209,10 +233,22 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + if host == "" && endpoint != "" { + // dialing an endpoint not in the balancer; use + // endpoint passed into dial + proto, host, _ = parseEndpoint(endpoint) + } if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } @@ -222,7 +258,14 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts default: } dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) + conn, err := dialer.DialContext(c.ctx, proto, host) + if err != nil { + select { + case c.dialerrc <- err: + default: + } + } + return conn, err } opts = append(opts, grpc.WithDialer(f)) @@ -288,21 +331,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo defer cancel() ctx = cctx } - if err := c.getToken(ctx); err != nil { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout - } - return nil, err - } - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + } } - // add metrics options - opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) - opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) + opts = append(opts, c.cfg.DialOptions...) - conn, err := grpc.Dial(host, opts...) + conn, err := grpc.DialContext(c.ctx, host, opts...) if err != nil { return nil, err } @@ -313,7 +358,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) + return metadata.NewOutgoingContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -327,20 +372,31 @@ func newClient(cfg *Config) (*Client, error) { } // use a temporary skeleton client to bootstrap first connection - ctx, cancel := context.WithCancel(context.TODO()) + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, + conn: nil, + dialerrc: make(chan error, 1), + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, } if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password } - client.balancer = newSimpleBalancer(cfg.Endpoints) + client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { + return grpcHealthCheck(client, ep) + }) + + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() @@ -348,24 +404,27 @@ func newClient(cfg *Config) (*Client, error) { return nil, err } client.conn = conn - client.retryWrapper = client.newRetryWrapper() - client.retryAuthWrapper = client.newAuthRetryWrapper() // wait for a connection if cfg.DialTimeout > 0 { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.readyc: + case <-client.balancer.ready(): hasConn = true case <-ctx.Done(): case <-waitc: } if !hasConn { + err := context.DeadlineExceeded + select { + case err = <-client.dialerrc: + default: + } client.cancel() client.balancer.Close() conn.Close() - return nil, grpc.ErrClientConnTimeout + return nil, err } } @@ -376,10 +435,57 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + go client.autoSync() return client, nil } +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + // ActiveConnection returns the current in-use connection func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } @@ -392,14 +498,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - code := grpc.Code(err) + ev, _ := status.FromError(err) // Unavailable codes mean the system will be right back. // (e.g., can't connect, lost leader) // Treat Internal codes as if something failed, leaving the // system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? - return code != codes.Unavailable && code != codes.Internal + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -410,7 +516,8 @@ func toErr(ctx context.Context, err error) error { if _, ok := err.(rpctypes.EtcdError); ok { return err } - code := grpc.Code(err) + ev, _ := status.FromError(err) + code := ev.Code() switch code { case codes.DeadlineExceeded: fallthrough @@ -419,9 +526,16 @@ func toErr(ctx context.Context, err error) error { err = ctx.Err() } case codes.Unavailable: - err = ErrNoAvailableEndpoints case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } return err } + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index b9bff626bd7..2df9f295165 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -16,8 +16,8 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" - "google.golang.org/grpc" ) type ( @@ -50,53 +50,43 @@ func NewCluster(c *Client) Cluster { return &cluster{remote: RetryClusterClient(c)} } +func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { + return &cluster{remote: remote} +} + func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { r := &pb.MemberAddRequest{PeerURLs: peerAddrs} resp, err := c.remote.MemberAdd(ctx, r) - if err == nil { - return (*MemberAddResponse)(resp), nil - } - if isHaltErr(ctx, err) { + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberAddResponse)(resp), nil } func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r) - if err == nil { - return (*MemberRemoveResponse)(resp), nil - } - if isHaltErr(ctx, err) { + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberRemoveResponse)(resp), nil } func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r) + if err == nil { + return (*MemberUpdateResponse)(resp), nil } + return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}) + if err == nil { + return (*MemberListResponse)(resp), nil } + return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go index 32d97eb0cc1..41e80c1da5d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} } -// WithCompactPhysical makes compact RPC call wait until -// the compaction is physically applied to the local database -// such that compacted entries are totally removed from the -// backend database. +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index f89ffb52c4a..68a25fd800f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -82,6 +82,24 @@ func ModRevision(key string) Cmp { return Cmp{Key: []byte(key), Target: pb.Compare_MOD} } +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v @@ -91,3 +109,12 @@ func mustInt64(val interface{}) int64 { } panic("bad value") } + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD b/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD new file mode 100644 index 00000000000..4ee0f5650b2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "election.go", + "key.go", + "mutex.go", + "session.go", + "stm.go", + ], + importpath = "github.com/coreos/etcd/clientv3/concurrency", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go new file mode 100644 index 00000000000..dcdbf511d1b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go new file mode 100644 index 00000000000..c092bde0aeb --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -0,0 +1,246 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "errors" + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election. It blocks until +// it is elected, an error occurs, or the context is cancelled. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go new file mode 100644 index 00000000000..9936737756c --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go new file mode 100644 index 00000000000..736a9d3d353 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -0,0 +1,119 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + "sync" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" +) + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + + // wait for deletion revisions prior to myKey + hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if cancelled + select { + case <-ctx.Done(): + m.Unlock(client.Ctx()) + default: + m.hdr = hdr + } + return werr +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go new file mode 100644 index 00000000000..55cb553ea4a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -0,0 +1,142 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "time" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = v3.LeaseID(resp.ID) + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go new file mode 100644 index 00000000000..6bfd70ec428 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -0,0 +1,388 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "math" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index d1d5f40906a..ccf7445c7ba 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -16,98 +16,47 @@ package clientv3 import ( "crypto/tls" - "crypto/x509" - "io/ioutil" "time" - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/ghodss/yaml" + "golang.org/x/net/context" + "google.golang.org/grpc" ) type Config struct { - // Endpoints is a list of URLs - Endpoints []string + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` // AutoSyncInterval is the interval to update endpoints with its latest members. // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration + AutoSyncInterval time.Duration `json:"auto-sync-interval"` // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` // TLS holds the client secure credentials, if any. TLS *tls.Config - // Username is a username for authentication - Username string + // Username is a user name for authentication. + Username string `json:"username"` - // Password is a password for authentication - Password string -} - -type yamlConfig struct { - Endpoints []string `json:"endpoints"` - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - DialTimeout time.Duration `json:"dial-timeout"` - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` - Certfile string `json:"cert-file"` - Keyfile string `json:"key-file"` - CAfile string `json:"ca-file"` -} - -func configFromFile(fpath string) (*Config, error) { - b, err := ioutil.ReadFile(fpath) - if err != nil { - return nil, err - } - - yc := &yamlConfig{} - - err = yaml.Unmarshal(b, yc) - if err != nil { - return nil, err - } - - cfg := &Config{ - Endpoints: yc.Endpoints, - AutoSyncInterval: yc.AutoSyncInterval, - DialTimeout: yc.DialTimeout, - } - - if yc.InsecureTransport { - cfg.TLS = nil - return cfg, nil - } - - var ( - cert *tls.Certificate - cp *x509.CertPool - ) - - if yc.Certfile != "" && yc.Keyfile != "" { - cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) - if err != nil { - return nil, err - } - } - - if yc.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) - if err != nil { - return nil, err - } - } - - tlscfg := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: yc.InsecureSkipTLSVerify, - RootCAs: cp, - } - if cert != nil { - tlscfg.Certificates = []tls.Certificate{*cert} - } - cfg.TLS = tlscfg - - return cfg, nil + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context } diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index 470ca4dc476..dacc5bb346f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -28,7 +28,7 @@ // Make sure to close the client after using it. If the client is not closed, the // connection will have leaky goroutines. // -// To specify client request timeout, pass context.WithTimeout to APIs: +// To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) // resp, err := kvc.Put(ctx, "sample_key", "sample_value") diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 00000000000..52bea90e66e --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,627 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "errors" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + minHealthRetryDuration = 3 * time.Second + unknownService = "unknown service grpc.health.v1.Health" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") + +type healthCheckFunc func(ep string) (bool, error) + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// healthBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type healthBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + unhealthyMu sync.RWMutex + unhealthyHostPorts map[string]time.Time + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + hb := &healthBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + healthCheck: hc, + unhealthyHostPorts: make(map[string]time.Time), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + close(hb.downc) + go hb.updateNotifyLoop() + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy() + }() + return hb +} + +func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *healthBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *healthBalancer) endpoint(hostPort string) string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.hostPort2ep[hostPort] +} + +func (b *healthBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func (b *healthBalancer) hostPortError(hostPort string, err error) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) + } + return + } + + b.unhealthyMu.Lock() + b.unhealthyHostPorts[hostPort] = time.Now() + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + } +} + +func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) + } + return + } + + b.unhealthyMu.Lock() + delete(b.unhealthyHostPorts, hostPort) + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + } +} + +func (b *healthBalancer) countUnhealthy() (count int) { + b.unhealthyMu.RLock() + count = len(b.unhealthyHostPorts) + b.unhealthyMu.RUnlock() + return count +} + +func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { + b.unhealthyMu.RLock() + _, unhealthy = b.unhealthyHostPorts[hostPort] + b.unhealthyMu.RUnlock() + return unhealthy +} + +func (b *healthBalancer) cleanupUnhealthy() { + b.unhealthyMu.Lock() + for k, v := range b.unhealthyHostPorts { + if time.Since(v) > b.healthCheckTimeout { + delete(b.unhealthyHostPorts, k) + if logger.V(4) { + logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + } + } + } + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { + unhealthyCnt := b.countUnhealthy() + + b.mu.RLock() + defer b.mu.RUnlock() + + hbAddrs := b.addrs + if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { + liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) + for k := range b.hostPort2ep { + liveHostPorts[k] = struct{}{} + } + return hbAddrs, liveHostPorts + } + + addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) + liveHostPorts := make(map[string]struct{}, len(addrs)) + for _, addr := range b.addrs { + if !b.isUnhealthy(addr.Addr) { + addrs = append(addrs, addr) + liveHostPorts[addr.Addr] = struct{}{} + } + } + return addrs, liveHostPorts +} + +func (b *healthBalancer) updateUnhealthy() { + for { + select { + case <-time.After(b.healthCheckTimeout): + b.cleanupUnhealthy() + pinned := b.pinned() + if pinned == "" || b.isUnhealthy(pinned) { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + return + } + } + case <-b.stopc: + return + } + } +} + +func (b *healthBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.hostPort2ep) + if match { + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + b.unhealthyMu.Lock() + b.unhealthyHostPorts = make(map[string]time.Time) + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func (b *healthBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *healthBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + addrs, hostPorts := b.liveAddrs() + + var waitDown bool + if pinAddr != "" { + _, ok := hostPorts[pinAddr] + waitDown = !ok + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *healthBalancer) Up(addr grpc.Address) func(error) { + if !b.mayPin(addr) { + return func(err error) {} + } + + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {} + } + + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {} + } + + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {} + } + + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + b.hostPortError(addr.Addr, err) + + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + } +} + +func (b *healthBalancer) mayPin(addr grpc.Address) bool { + if b.endpoint(addr.Addr) == "" { // stale host:port + return false + } + + b.unhealthyMu.RLock() + unhealthyCnt := len(b.unhealthyHostPorts) + failedTime, bad := b.unhealthyHostPorts[addr.Addr] + b.unhealthyMu.RUnlock() + + b.mu.RLock() + skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt + b.mu.RUnlock() + if skip || !bad { + return true + } + + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + } + return false + } + + if ok, _ := b.healthCheck(addr.Addr); ok { + b.removeUnhealthy(addr.Addr, "health check success") + return true + } + + b.hostPortError(addr.Addr, errors.New("health check failed")) + return false +} + +func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *healthBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + b.stopOnce.Do(func() { close(b.stopc) }) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + b.wg.Wait() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index c8350f9268b..949f6dc5b14 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -16,8 +16,8 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" - "google.golang.org/grpc" ) type ( @@ -32,7 +32,7 @@ type KV interface { // Put puts a key-value pair into etcd. // Note that key,value can be plain bytes array and string is // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte(0x10, 0x20)). + // To get a string of bytes, do string([]byte{0x10, 0x20}). Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) // Get retrieves keys. @@ -51,11 +51,6 @@ type KV interface { // Compact compacts etcd KV history before the given rev. Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - // Do applies a single Op on KV without a transaction. - // Do is useful when declaring operations to be issued at a later time - // whereas Get/Put/Delete are for better suited for when the operation - // should be immediately issued at time of declaration. - // Do applies a single Op on KV without a transaction. // Do is useful when creating arbitrary operations to be issued at a // later time; the user can range over the operations, calling Do to @@ -71,11 +66,26 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} type kv struct { remote pb.KVClient @@ -120,35 +130,17 @@ func (kv *kv) Txn(ctx context.Context) Txn { } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest()) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} resp, err = kv.remote.Put(ctx, r) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil @@ -160,8 +152,14 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } - return OpResponse{}, err + return OpResponse{}, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index 10d3dd0b27f..d90531bf2a1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -20,8 +20,9 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" - "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) type ( @@ -29,7 +30,7 @@ type ( LeaseID int64 ) -// LeaseGrantResponse is used to convert the protobuf grant response. +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. type LeaseGrantResponse struct { *pb.ResponseHeader ID LeaseID @@ -37,14 +38,14 @@ type LeaseGrantResponse struct { Error string } -// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. type LeaseKeepAliveResponse struct { *pb.ResponseHeader ID LeaseID TTL int64 } -// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. type LeaseTimeToLiveResponse struct { *pb.ResponseHeader ID LeaseID `json:"id"` @@ -59,6 +60,12 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `json:"keys"` } +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -67,6 +74,9 @@ const ( leaseResponseChSize = 16 // NoLease is a lease ID for the absence of a lease. NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond ) // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. @@ -97,7 +107,7 @@ type Lease interface { // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - // KeepAliveOnce renews the lease once. In most of the cases, Keepalive + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive // should be used instead of KeepAliveOnce. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) @@ -126,6 +136,9 @@ type lessor struct { // firstKeepAliveTimeout is the timeout for the first keepalive request // before the actual TTL is known to the lease client firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -141,85 +154,62 @@ type keepAlive struct { } func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { l := &lessor{ donec: make(chan struct{}), keepAlives: make(map[LeaseID]*keepAlive), - remote: RetryLeaseClient(c), - firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second, + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } - - l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) - go l.recvKeepAliveLoop() - go l.deadlineLoop() + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) return l } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(cctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(cctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil } + return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -254,19 +244,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl l.mu.Unlock() go l.keepAliveCtxCloser(id, ctx, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) return ch, nil } func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - for { - resp, err := l.keepAliveOnce(cctx, id) + resp, err := l.keepAliveOnce(ctx, id) if err == nil { - if resp.TTL == 0 { + if resp.TTL <= 0 { err = rpctypes.ErrLeaseNotFound } return resp, err @@ -279,6 +269,8 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive func (l *lessor) Close() error { l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) <-l.donec return nil } @@ -315,11 +307,50 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha } } +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(cctx) if err != nil { return nil, toErr(ctx, err) } @@ -348,32 +379,50 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.Close() + ka.close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() }() - stream, serr := l.resetRecv() - for serr == nil { - resp, err := stream.Recv() + for { + stream, err := l.resetRecv() if err != nil { - if isHaltErr(l.stopCtx, err) { + if canceledByCaller(l.stopCtx, err) { return err } - stream, serr = l.resetRecv() - continue + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() } - l.recvKeepAlive(resp) } - return serr } -// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests +// resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) - if err = toErr(sctx, err); err != nil { + stream, err := l.remote.LeaseKeepAlive(sctx) + if err != nil { cancel() return nil, err } @@ -381,7 +430,6 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { l.mu.Lock() defer l.mu.Unlock() if l.stream != nil && l.streamCancel != nil { - l.stream.CloseSend() l.streamCancel() } @@ -411,7 +459,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.Close() + ka.close() return } @@ -441,7 +489,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.Close() + ka.close() delete(l.keepAlives, id) } } @@ -449,19 +497,9 @@ func (l *lessor) deadlineLoop() { } } -// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for { - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - var tosend []LeaseID now := time.Now() @@ -480,29 +518,22 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { return } } + + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } } } -func (ka *keepAlive) Close() { +func (ka *keepAlive) close() { close(ka.donec) for _, ch := range ka.chs { close(ch) } } - -// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done -// should be closed when the work is finished. When done fires, cancelWhenStop will release -// its internal resource. -func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} { - done := make(chan struct{}, 1) - - go func() { - select { - case <-stopc: - case <-done: - } - cancel() - }() - - return done -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 519db45d8e3..012abdbce63 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -16,36 +16,35 @@ package clientv3 import ( "io/ioutil" - "log" "sync" "google.golang.org/grpc/grpclog" ) // Logger is the logger used by client library. -// It implements grpclog.Logger interface. -type Logger grpclog.Logger +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 var ( logger settableLogger ) type settableLogger struct { - l grpclog.Logger + l grpclog.LoggerV2 mu sync.RWMutex } func init() { // disable client side logs by default logger.mu.Lock() - logger.l = log.New(ioutil.Discard, "", 0) + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) // logger has to override the grpclog at initialization so that // any changes to the grpclog go through logger with locking // instead of through SetLogger // // now updates only happen through settableLogger.set - grpclog.SetLogger(&logger) + grpclog.SetLoggerV2(&logger) logger.mu.Unlock() } @@ -62,6 +61,7 @@ func GetLogger() Logger { func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l + grpclog.SetLoggerV2(&logger) s.mu.Unlock() } @@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger { return l } -// implement the grpclog.Logger interface +// implement the grpclog.LoggerV2 interface +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 718356250be..ca2f445b8b4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -18,8 +18,8 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" - "google.golang.org/grpc" ) type ( @@ -36,7 +36,7 @@ type Maintenance interface { // AlarmDisarm disarms a given alarm. AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - // Defragment defragments storage backend of the etcd member with given endpoint. + // Defragment releases wasted space from internal fragmentation on a given etcd member. // Defragment is only needed when deleting a large number of keys and want to reclaim // the resources. // Defragment is an expensive operation. User should avoid defragmenting multiple members @@ -48,17 +48,36 @@ type Maintenance interface { // Status gets the status of the endpoint. Status(ctx context.Context, endpoint string) (*StatusResponse, error) - // Snapshot provides a reader for a snapshot of a backend. + // Snapshot provides a reader for a point-in-time snapshot of etcd. Snapshot(ctx context.Context) (io.ReadCloser, error) } type maintenance struct { - c *Client + dial func(endpoint string) (pb.MaintenanceClient, func(), error) remote pb.MaintenanceClient } func NewMaintenance(c *Client) Maintenance { - return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)} + return &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.dial(endpoint) + if err != nil { + return nil, nil, err + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { + return &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } } func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { @@ -67,15 +86,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := m.remote.Alarm(ctx, req) + if err == nil { + return (*AlarmResponse)(resp), nil } + return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -101,7 +116,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + resp, err := m.remote.Alarm(ctx, req) if err == nil { return (*AlarmResponse)(resp), nil } @@ -109,13 +124,12 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) if err != nil { return nil, toErr(ctx, err) } @@ -123,13 +137,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm } func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}) if err != nil { return nil, toErr(ctx, err) } @@ -137,7 +150,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}) if err != nil { return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD b/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD new file mode 100644 index 00000000000..8c8b12d4cf4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "kv.go", + "lease.go", + "util.go", + "watch.go", + ], + importpath = "github.com/coreos/etcd/clientv3/namespace", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go new file mode 100644 index 00000000000..3f883320fcc --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go @@ -0,0 +1,43 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package namespace is a clientv3 wrapper that translates all keys to begin +// with a given prefix. +// +// First, create a client: +// +// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) +// if err != nil { +// // handle error! +// } +// +// Next, override the client interfaces: +// +// unprefixedKV := cli.KV +// cli.KV = namespace.NewKV(cli.KV, "my-prefix/") +// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/") +// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/") +// +// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/": +// +// cli.Put(context.TODO(), "abc", "123") +// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 123 +// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456") +// resp, _ = cli.Get("abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 456 +// +package namespace diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go new file mode 100644 index 00000000000..2b759e0d394 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go @@ -0,0 +1,189 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" +) + +type kvPrefix struct { + clientv3.KV + pfx string +} + +// NewKV wraps a KV instance so that all requests +// are prefixed with a given string. +func NewKV(kv clientv3.KV, prefix string) clientv3.KV { + return &kvPrefix{kv, prefix} +} + +func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + op := kv.prefixOp(clientv3.OpPut(key, val, opts...)) + r, err := kv.KV.Do(ctx, op) + if err != nil { + return nil, err + } + put := r.Put() + kv.unprefixPutResponse(put) + return put, nil +} + +func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...))) + if err != nil { + return nil, err + } + get := r.Get() + kv.unprefixGetResponse(get) + return get, nil +} + +func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...))) + if err != nil { + return nil, err + } + del := r.Del() + kv.unprefixDeleteResponse(del) + return del, nil +} + +func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { + if len(op.KeyBytes()) == 0 { + return clientv3.OpResponse{}, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(op)) + if err != nil { + return r, err + } + switch { + case r.Get() != nil: + kv.unprefixGetResponse(r.Get()) + case r.Put() != nil: + kv.unprefixPutResponse(r.Put()) + case r.Del() != nil: + kv.unprefixDeleteResponse(r.Del()) + } + return r, nil +} + +type txnPrefix struct { + clientv3.Txn + kv *kvPrefix +} + +func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { + return &txnPrefix{kv.KV.Txn(ctx), kv} +} + +func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { + newCmps := make([]clientv3.Cmp, len(cs)) + for i := range cs { + newCmps[i] = cs[i] + pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil) + newCmps[i].WithKeyBytes(pfxKey) + } + txn.Txn = txn.Txn.If(newCmps...) + return txn +} + +func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = txn.kv.prefixOp(ops[i]) + } + txn.Txn = txn.Txn.Then(newOps...) + return txn +} + +func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = txn.kv.prefixOp(ops[i]) + } + txn.Txn = txn.Txn.Else(newOps...) + return txn +} + +func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { + resp, err := txn.Txn.Commit() + if err != nil { + return nil, err + } + txn.kv.unprefixTxnResponse(resp) + return resp, nil +} + +func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { + begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) + op.WithKeyBytes(begin) + op.WithRangeBytes(end) + return op +} + +func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { + for i := range resp.Kvs { + resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) { + if resp.PrevKv != nil { + resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) { + for i := range resp.PrevKvs { + resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { + for _, r := range resp.Responses { + switch tv := r.Response.(type) { + case *pb.ResponseOp_ResponseRange: + if tv.ResponseRange != nil { + kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange)) + } + case *pb.ResponseOp_ResponsePut: + if tv.ResponsePut != nil { + kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut)) + } + case *pb.ResponseOp_ResponseDeleteRange: + if tv.ResponseDeleteRange != nil { + kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) + } + default: + } + } +} + +func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { + return prefixInterval(p.pfx, key, end) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go new file mode 100644 index 00000000000..c3167fa5d87 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go @@ -0,0 +1,58 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "bytes" + + "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +type leasePrefix struct { + clientv3.Lease + pfx []byte +} + +// NewLease wraps a Lease interface to filter for only keys with a prefix +// and remove that prefix when fetching attached keys through TimeToLive. +func NewLease(l clientv3.Lease, prefix string) clientv3.Lease { + return &leasePrefix{l, []byte(prefix)} +} + +func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { + resp, err := l.Lease.TimeToLive(ctx, id, opts...) + if err != nil { + return nil, err + } + if len(resp.Keys) > 0 { + var outKeys [][]byte + for i := range resp.Keys { + if len(resp.Keys[i]) < len(l.pfx) { + // too short + continue + } + if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) { + // doesn't match prefix + continue + } + // strip prefix + outKeys = append(outKeys, resp.Keys[i][len(l.pfx):]) + } + resp.Keys = outKeys + } + return resp, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go new file mode 100644 index 00000000000..ecf04046c32 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go @@ -0,0 +1,42 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) { + pfxKey = make([]byte, len(pfx)+len(key)) + copy(pfxKey[copy(pfxKey, pfx):], key) + + if len(end) == 1 && end[0] == 0 { + // the edge of the keyspace + pfxEnd = make([]byte, len(pfx)) + copy(pfxEnd, pfx) + ok := false + for i := len(pfxEnd) - 1; i >= 0; i-- { + if pfxEnd[i]++; pfxEnd[i] != 0 { + ok = true + break + } + } + if !ok { + // 0xff..ff => 0x00 + pfxEnd = []byte{0} + } + } else if len(end) >= 1 { + pfxEnd = make([]byte, len(pfx)+len(end)) + copy(pfxEnd[copy(pfxEnd, pfx):], end) + } + + return pfxKey, pfxEnd +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go new file mode 100644 index 00000000000..9907211529a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go @@ -0,0 +1,84 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "sync" + + "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +type watcherPrefix struct { + clientv3.Watcher + pfx string + + wg sync.WaitGroup + stopc chan struct{} + stopOnce sync.Once +} + +// NewWatcher wraps a Watcher instance so that all Watch requests +// are prefixed with a given string and all Watch responses have +// the prefix removed. +func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { + return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} +} + +func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + // since OpOption is opaque, determine range for prefixing through an OpGet + op := clientv3.OpGet(key, opts...) + end := op.RangeBytes() + pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end) + if pfxEnd != nil { + opts = append(opts, clientv3.WithRange(string(pfxEnd))) + } + + wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...) + + // translate watch events from prefixed to unprefixed + pfxWch := make(chan clientv3.WatchResponse) + w.wg.Add(1) + go func() { + defer func() { + close(pfxWch) + w.wg.Done() + }() + for wr := range wch { + for i := range wr.Events { + wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):] + if wr.Events[i].PrevKv != nil { + wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key + } + } + select { + case pfxWch <- wr: + case <-ctx.Done(): + return + case <-w.stopc: + return + } + } + }() + return pfxWch +} + +func (w *watcherPrefix) Close() error { + err := w.Watcher.Close() + w.stopOnce.Do(func() { close(w.stopc) }) + w.wg.Wait() + return err +} diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/BUILD b/vendor/github.com/coreos/etcd/clientv3/naming/BUILD new file mode 100644 index 00000000000..9329d763bab --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "grpc.go", + ], + importpath = "github.com/coreos/etcd/clientv3/naming", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go new file mode 100644 index 00000000000..71608cc738b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services. +// +// To use, first import the packages: +// +// import ( +// "github.com/coreos/etcd/clientv3" +// etcdnaming "github.com/coreos/etcd/clientv3/naming" +// +// "google.golang.org/grpc" +// "google.golang.org/grpc/naming" +// ) +// +// First, register new endpoint addresses for a service: +// +// func etcdAdd(c *clientv3.Client, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}) +// } +// +// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: +// +// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { +// r := &etcdnaming.GRPCResolver{Client: c} +// b := grpc.RoundRobin(r) +// return grpc.Dial(service, grpc.WithBalancer(b)) +// } +// +// Optionally, force delete an endpoint: +// +// func etcdDelete(c *clientv3, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"}) +// } +// +// Or register an expiring endpoint with a lease: +// +// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid)) +// } +// +package naming diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go new file mode 100644 index 00000000000..7fabc4f109a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go @@ -0,0 +1,132 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package naming + +import ( + "encoding/json" + "fmt" + + etcd "github.com/coreos/etcd/clientv3" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" +) + +var ErrWatcherClosed = fmt.Errorf("naming: watch closed") + +// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes. +type GRPCResolver struct { + // Client is an initialized etcd client. + Client *etcd.Client +} + +func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) { + switch nm.Op { + case naming.Add: + var v []byte + if v, err = json.Marshal(nm); err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + _, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...) + case naming.Delete: + _, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...) + default: + return status.Error(codes.InvalidArgument, "naming: bad naming op") + } + return err +} + +func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) { + ctx, cancel := context.WithCancel(context.Background()) + w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel} + return w, nil +} + +type gRPCWatcher struct { + c *etcd.Client + target string + ctx context.Context + cancel context.CancelFunc + wch etcd.WatchChan + err error +} + +// Next gets the next set of updates from the etcd resolver. +// Calls to Next should be serialized; concurrent calls are not safe since +// there is no way to reconcile the update ordering. +func (gw *gRPCWatcher) Next() ([]*naming.Update, error) { + if gw.wch == nil { + // first Next() returns all addresses + return gw.firstNext() + } + if gw.err != nil { + return nil, gw.err + } + + // process new events on target/* + wr, ok := <-gw.wch + if !ok { + gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error()) + return nil, gw.err + } + if gw.err = wr.Err(); gw.err != nil { + return nil, gw.err + } + + updates := make([]*naming.Update, 0, len(wr.Events)) + for _, e := range wr.Events { + var jupdate naming.Update + var err error + switch e.Type { + case etcd.EventTypePut: + err = json.Unmarshal(e.Kv.Value, &jupdate) + jupdate.Op = naming.Add + case etcd.EventTypeDelete: + err = json.Unmarshal(e.PrevKv.Value, &jupdate) + jupdate.Op = naming.Delete + } + if err == nil { + updates = append(updates, &jupdate) + } + } + return updates, nil +} + +func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) { + // Use serialized request so resolution still works if the target etcd + // server is partitioned away from the quorum. + resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable()) + if gw.err = err; err != nil { + return nil, err + } + + updates := make([]*naming.Update, 0, len(resp.Kvs)) + for _, kv := range resp.Kvs { + var jupdate naming.Update + if err := json.Unmarshal(kv.Value, &jupdate); err != nil { + continue + } + updates = append(updates, &jupdate) + } + + opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()} + gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...) + return updates, nil +} + +func (gw *gRPCWatcher) Close() { gw.cancel() } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 6e260076698..e18d28662c4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -52,6 +53,10 @@ type Op struct { // for watch, put, delete prevKV bool + // for put + ignoreValue bool + ignoreLease bool + // progressNotify is for progress updates. progressNotify bool // createdNotify is for created event @@ -63,8 +68,69 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + func (op Op) toRangeRequest() *pb.RangeRequest { if op.t != tRange { panic("op.t != tRange") @@ -89,12 +155,28 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} @@ -105,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp { } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -170,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -207,6 +306,7 @@ func WithLease(leaseID LeaseID) OpOption { } // WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } // WithRev specifies the store revision for 'Get' request. @@ -222,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption { if target == SortByKey && order == SortAscend { // If order != SortNone, server fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since current mvcc.Range implementation returns results - // sorted by keys in lexicographically ascending order, - // client should ignore SortOrder if the target is SortByKey. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. order = SortNone } op.sort = &SortOption{target, order} @@ -257,6 +357,10 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } op.end = getPrefix(op.key) } } @@ -360,6 +464,24 @@ func WithPrevKV() OpOption { } } +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + // LeaseOp represents an Operation that lease can execute. type LeaseOp struct { id LeaseID @@ -377,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) { } } -// WithAttachedKeys requests lease timetolive API to return -// attached keys of given lease ID. +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. func WithAttachedKeys() LeaseOption { return func(op *LeaseOp) { op.attachedKeys = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 00000000000..23eea9367ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "golang.org/x/net/context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 78f31a8c4b0..c95b2cad7c4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -17,135 +17,183 @@ package clientv3 import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool -func (c *Client) newRetryWrapper() retryRpcFunc { +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + desc := rpctypes.ErrorDesc(err) + return desc != "there is no address available" && desc != "there is no connection available" +} + +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { - return err + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) } - // only retry if unavailable - if grpc.Code(err) != codes.Unavailable { - return err + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } } - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() + if isStop(err) { + return err } } } } -func (c *Client) newAuthRetryWrapper() retryRpcFunc { +func (c *Client) newAuthRetryWrapper() retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } return err // return the original error for simplicity } continue } - return err } } } -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. +// RetryKVClient implements a KVClient. func RetryKVClient(c *Client) pb.KVClient { - retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} - return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} } type retryKVClient struct { - *retryWriteKVClient + *nonRepeatableKVClient + repeatableRetry retryRPCFunc } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) return err }) return resp, err } -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc } -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc + lc pb.LeaseClient + repeatableRetry retryRPCFunc } -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. +// RetryLeaseClient implements a LeaseClient. func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} - return &retryLeaseClient{retry, c.retryAuthWrapper} + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc } -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. +// RetryClusterClient implements a ClusterClient. func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} } -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc } -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. +// RetryAuthClient implements a AuthClient. func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} } -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index a61decd6406..2661c5942e2 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -18,13 +18,13 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" - "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. // -// Tx.If( +// Txn(context.TODO()).If( // Compare(Value(k1), ">", v1), // Compare(Version(k1), "=", 2) // ).Then( @@ -49,8 +49,6 @@ type Txn interface { // Commit tries to commit the transaction. Commit() (*TxnResponse, error) - - // TODO: add a Do for shortcut the txn without any condition? } type txn struct { @@ -137,30 +135,14 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} -func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r) if err != nil { - return nil, err + return nil, toErr(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 9b083cc9462..12977aed896 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,8 +22,12 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const ( @@ -39,10 +43,9 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -65,6 +68,9 @@ type WatchResponse struct { Created bool closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string } // IsCreate returns true if the event tells that the key is newly created. @@ -85,6 +91,9 @@ func (wr *WatchResponse) Err() error { case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } return v3rpc.ErrFutureRev } return nil @@ -128,7 +137,7 @@ type watchGrpcStream struct { respc chan *pb.WatchResponse // donec closes to broadcast shutdown donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconn logic + // errc transmits errors from grpc Recv to the watch stream reconnect logic errc chan error // closingc gets the watcherStream of closing watchers closingc chan *watcherStream @@ -207,16 +216,15 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { owner: w, remote: w.remote, ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), + ctxKey: streamKeyFromCtx(inctx), cancel: cancel, substreams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -247,7 +255,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } ok := false - ctxKey := fmt.Sprintf("%v", ctx) + ctxKey := streamKeyFromCtx(ctx) // find or allocate appropriate grpc watch stream w.mu.Lock() @@ -310,14 +318,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { + if werr := wgs.close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) Close() (err error) { +func (w *watchGrpcStream) close() (err error) { w.cancel() <-w.donec select { @@ -428,7 +436,7 @@ func (w *watchGrpcStream) run() { initReq: *wreq, id: -1, outc: outc, - // unbufffered so resumes won't cause repeat events + // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), } @@ -480,7 +488,7 @@ func (w *watchGrpcStream) run() { req := &pb.WatchRequest{RequestUnion: cr} wc.Send(req) } - // watch client failed to recv; spawn another if possible + // watch client failed on Recv; spawn another if possible case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -520,10 +528,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream { // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -534,6 +538,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { CompactRevision: pbresp.CompactRevision, Created: pbresp.Created, Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false } select { case ws.recvc <- wr: @@ -725,7 +734,11 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str ws.closing = true close(ws.outc) ws.outc = nil - go func() { w.closingc <- ws }() + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() case <-stopc: } }(w.resuming[i]) @@ -737,7 +750,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str return donec } -// joinSubstream waits for all substream goroutines to complete +// joinSubstreams waits for all substream goroutines to complete. func (w *watchGrpcStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec @@ -749,7 +762,9 @@ func (w *watchGrpcStream) joinSubstreams() { } } -// openWatchClient retries opening a watchclient until retryConnection fails +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { @@ -770,7 +785,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ StartRevision: wr.rev, @@ -783,3 +798,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} } + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go index 322a0987011..270287072d8 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -30,7 +30,8 @@ var ( ) const ( - checkCompactionInterval = 5 * time.Minute + checkCompactionInterval = 5 * time.Minute + executeCompactionInterval = time.Hour ) type Compactable interface { @@ -41,6 +42,8 @@ type RevGetter interface { Rev() int64 } +// Periodic compacts the log by purging revisions older than +// the configured retention time. Compaction happens hourly. type Periodic struct { clock clockwork.Clock periodInHour int @@ -85,11 +88,12 @@ func (t *Periodic) Run() { continue } } - if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { + + if clock.Now().Sub(last) < executeCompactionInterval { continue } - rev := t.getRev(t.periodInHour) + rev, remaining := t.getRev(t.periodInHour) if rev < 0 { continue } @@ -97,7 +101,7 @@ func (t *Periodic) Run() { plog.Noticef("Starting auto-compaction at revision %d", rev) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { - t.revs = make([]int64, 0) + t.revs = remaining last = clock.Now() plog.Noticef("Finished auto-compaction at revision %d", rev) } else { @@ -124,10 +128,10 @@ func (t *Periodic) Resume() { t.paused = false } -func (t *Periodic) getRev(h int) int64 { +func (t *Periodic) getRev(h int) (int64, []int64) { i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) if i < 0 { - return -1 + return -1, t.revs } - return t.revs[i] + return t.revs[i], t.revs[i+1:] } diff --git a/vendor/github.com/coreos/etcd/discovery/BUILD b/vendor/github.com/coreos/etcd/discovery/BUILD index 4a437e67af1..496402a6761 100644 --- a/vendor/github.com/coreos/etcd/discovery/BUILD +++ b/vendor/github.com/coreos/etcd/discovery/BUILD @@ -2,10 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "discovery.go", - "srv.go", - ], + srcs = ["discovery.go"], importpath = "github.com/coreos/etcd/discovery", visibility = ["//visibility:public"], deps = [ diff --git a/vendor/github.com/coreos/etcd/discovery/srv.go b/vendor/github.com/coreos/etcd/discovery/srv.go deleted file mode 100644 index c3d20ca9243..00000000000 --- a/vendor/github.com/coreos/etcd/discovery/srv.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr -) - -// SRVGetCluster gets the cluster information via DNS discovery. -// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) -// Also doesn't do any lookups for the token (though it could) -// Also sees each entry as a separate instance. -func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) - return "", "", err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, err := resolveTCPAddr("tcp", host) - if err != nil { - plog.Warningf("couldn't resolve host %s during SRV discovery", host) - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) - if ok && url.Scheme != scheme { - plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - return nil - } - - failCount := 0 - err := updateNodeMap("etcd-server-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) - failCount++ - } - err = updateNodeMap("etcd-server", "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) - failCount++ - } - if failCount == 2 { - plog.Warningf(srvErr[0]) - plog.Warningf(srvErr[1]) - plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") - return "", "", err - } - return strings.Join(stringParts, ","), defaultToken, nil -} diff --git a/vendor/github.com/coreos/etcd/embed/BUILD b/vendor/github.com/coreos/etcd/embed/BUILD new file mode 100644 index 00000000000..286bdae6892 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "doc.go", + "etcd.go", + "serve.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/embed", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cockroachdb/cmux:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/cors:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/debugutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", + "//vendor/github.com/coreos/etcd/wal:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/golang.org/x/net/trace:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go new file mode 100644 index 00000000000..90efb3937d7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/config.go @@ -0,0 +1,464 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/pkg/cors" + "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/srv" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + + "github.com/ghodss/yaml" + "google.golang.org/grpc" +) + +const ( + ClusterStateFlagNew = "new" + ClusterStateFlagExisting = "existing" + + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second + + DefaultListenPeerURLs = "http://localhost:2380" + DefaultListenClientURLs = "http://localhost:2379" + + // maxElectionMs specifies the maximum value of election timeout. + // More details are listed in ../Documentation/tuning.md#time-parameters. + maxElectionMs = 50000 +) + +var ( + ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + + "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") + ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") + + DefaultInitialAdvertisePeerURLs = "http://localhost:2380" + DefaultAdvertiseClientURLs = "http://localhost:2379" + + defaultHostname string + defaultHostStatus error +) + +func init() { + defaultHostname, defaultHostStatus = netutil.GetDefaultHost() +} + +// Config holds the arguments for configuring an etcd server. +type Config struct { + // member + + CorsInfo *cors.CORSInfo + LPUrls, LCUrls []url.URL + Dir string `json:"data-dir"` + WalDir string `json:"wal-dir"` + MaxSnapFiles uint `json:"max-snapshots"` + MaxWalFiles uint `json:"max-wals"` + Name string `json:"name"` + SnapCount uint64 `json:"snapshot-count"` + AutoCompactionRetention int `json:"auto-compaction-retention"` + + // TickMs is the number of milliseconds between heartbeat ticks. + // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). + // make ticks a cluster wide configuration. + TickMs uint `json:"heartbeat-interval"` + ElectionMs uint `json:"election-timeout"` + QuotaBackendBytes int64 `json:"quota-backend-bytes"` + MaxRequestBytes uint `json:"max-request-bytes"` + + // gRPC server options + + // GRPCKeepAliveMinTime is the minimum interval that a client should + // wait before pinging server. When client pings "too fast", server + // sends goaway and closes the connection (errors: too_many_pings, + // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. + // Server expects client pings only when there is any active streams + // (PermitWithoutStream is set false). + GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` + // GRPCKeepAliveInterval is the frequency of server-to-client ping + // to check if a connection is alive. Close a non-responsive connection + // after an additional duration of Timeout. 0 to disable. + GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` + // GRPCKeepAliveTimeout is the additional duration of wait + // before closing a non-responsive connection. 0 to disable. + GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` + + // clustering + + APUrls, ACUrls []url.URL + ClusterState string `json:"initial-cluster-state"` + DNSCluster string `json:"discovery-srv"` + Dproxy string `json:"discovery-proxy"` + Durl string `json:"discovery"` + InitialCluster string `json:"initial-cluster"` + InitialClusterToken string `json:"initial-cluster-token"` + StrictReconfigCheck bool `json:"strict-reconfig-check"` + EnableV2 bool `json:"enable-v2"` + + // security + + ClientTLSInfo transport.TLSInfo + ClientAutoTLS bool + PeerTLSInfo transport.TLSInfo + PeerAutoTLS bool + + // debug + + Debug bool `json:"debug"` + LogPkgLevels string `json:"log-package-levels"` + EnablePprof bool `json:"enable-pprof"` + Metrics string `json:"metrics"` + + // ForceNewCluster starts a new cluster even if previously started; unsafe. + ForceNewCluster bool `json:"force-new-cluster"` + + // UserHandlers is for registering users handlers and only used for + // embedding etcd into other applications. + // The map key is the route path for the handler, and + // you must ensure it can't be conflicted with etcd's. + UserHandlers map[string]http.Handler `json:"-"` + // ServiceRegister is for registering users' gRPC services. A simple usage example: + // cfg := embed.NewConfig() + // cfg.ServerRegister = func(s *grpc.Server) { + // pb.RegisterFooServer(s, &fooServer{}) + // pb.RegisterBarServer(s, &barServer{}) + // } + // embed.StartEtcd(cfg) + ServiceRegister func(*grpc.Server) `json:"-"` + + // auth + + AuthToken string `json:"auth-token"` +} + +// configYAML holds the config suitable for yaml parsing +type configYAML struct { + Config + configJSON +} + +// configJSON has file options that are translated into Config options +type configJSON struct { + LPUrlsJSON string `json:"listen-peer-urls"` + LCUrlsJSON string `json:"listen-client-urls"` + CorsJSON string `json:"cors"` + APUrlsJSON string `json:"initial-advertise-peer-urls"` + ACUrlsJSON string `json:"advertise-client-urls"` + ClientSecurityJSON securityConfig `json:"client-transport-security"` + PeerSecurityJSON securityConfig `json:"peer-transport-security"` +} + +type securityConfig struct { + CAFile string `json:"ca-file"` + CertFile string `json:"cert-file"` + KeyFile string `json:"key-file"` + CertAuth bool `json:"client-cert-auth"` + TrustedCAFile string `json:"trusted-ca-file"` + AutoTLS bool `json:"auto-tls"` +} + +// NewConfig creates a new Config populated with default values. +func NewConfig() *Config { + lpurl, _ := url.Parse(DefaultListenPeerURLs) + apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) + lcurl, _ := url.Parse(DefaultListenClientURLs) + acurl, _ := url.Parse(DefaultAdvertiseClientURLs) + cfg := &Config{ + CorsInfo: &cors.CORSInfo{}, + MaxSnapFiles: DefaultMaxSnapshots, + MaxWalFiles: DefaultMaxWALs, + Name: DefaultName, + SnapCount: etcdserver.DefaultSnapCount, + MaxRequestBytes: DefaultMaxRequestBytes, + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + TickMs: 100, + ElectionMs: 1000, + LPUrls: []url.URL{*lpurl}, + LCUrls: []url.URL{*lcurl}, + APUrls: []url.URL{*apurl}, + ACUrls: []url.URL{*acurl}, + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + StrictReconfigCheck: true, + Metrics: "basic", + EnableV2: true, + AuthToken: "simple", + } + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + return cfg +} + +func ConfigFromFile(path string) (*Config, error) { + cfg := &configYAML{Config: *NewConfig()} + if err := cfg.configFromFile(path); err != nil { + return nil, err + } + return &cfg.Config, nil +} + +func (cfg *configYAML) configFromFile(path string) error { + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + defaultInitialCluster := cfg.InitialCluster + + err = yaml.Unmarshal(b, cfg) + if err != nil { + return err + } + + if cfg.LPUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err) + } + cfg.LPUrls = []url.URL(u) + } + + if cfg.LCUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-client-urls: %v", err) + } + cfg.LCUrls = []url.URL(u) + } + + if cfg.CorsJSON != "" { + if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil { + plog.Panicf("unexpected error setting up cors: %v", err) + } + } + + if cfg.APUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err) + } + cfg.APUrls = []url.URL(u) + } + + if cfg.ACUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err) + } + cfg.ACUrls = []url.URL(u) + } + + // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName + if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = "" + } + if cfg.ClusterState == "" { + cfg.ClusterState = ClusterStateFlagNew + } + + copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { + tls.CAFile = ysc.CAFile + tls.CertFile = ysc.CertFile + tls.KeyFile = ysc.KeyFile + tls.ClientCertAuth = ysc.CertAuth + tls.TrustedCAFile = ysc.TrustedCAFile + } + copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON) + copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) + cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS + cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS + + return cfg.Validate() +} + +func (cfg *Config) Validate() error { + if err := checkBindURLs(cfg.LPUrls); err != nil { + return err + } + if err := checkBindURLs(cfg.LCUrls); err != nil { + return err + } + + // Check if conflicting flags are passed. + nSet := 0 + for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { + if v { + nSet++ + } + } + + if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting { + return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState) + } + + if nSet > 1 { + return ErrConflictBootstrapFlags + } + + if 5*cfg.TickMs > cfg.ElectionMs { + return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs) + } + if cfg.ElectionMs > maxElectionMs { + return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs) + } + + // check this last since proxying in etcdmain may make this OK + if cfg.LCUrls != nil && cfg.ACUrls == nil { + return ErrUnsetAdvertiseClientURLsFlag + } + + return nil +} + +// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. +func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { + token = cfg.InitialClusterToken + switch { + case cfg.Durl != "": + urlsmap = types.URLsMap{} + // If using discovery, generate a temporary cluster based on + // self's advertised peer URLs + urlsmap[cfg.Name] = cfg.APUrls + token = cfg.Durl + case cfg.DNSCluster != "": + clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + return nil, "", cerr + } + for _, s := range clusterStrs { + plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) + } + clusterStr := strings.Join(clusterStrs, ",") + if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { + cfg.PeerTLSInfo.ServerName = cfg.DNSCluster + } + urlsmap, err = types.NewURLsMap(clusterStr) + // only etcd member must belong to the discovered cluster. + // proxy does not need to belong to the discovered cluster. + if which == "etcd" { + if _, ok := urlsmap[cfg.Name]; !ok { + return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) + } + } + default: + // We're statically configured, and cluster has appropriately been set. + urlsmap, err = types.NewURLsMap(cfg.InitialCluster) + } + return urlsmap, token, err +} + +func (cfg Config) InitialClusterFromName(name string) (ret string) { + if len(cfg.APUrls) == 0 { + return "" + } + n := name + if name == "" { + n = DefaultName + } + for i := range cfg.APUrls { + ret = ret + "," + n + "=" + cfg.APUrls[i].String() + } + return ret[1:] +} + +func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } +func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } + +func (cfg Config) defaultPeerHost() bool { + return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs +} + +func (cfg Config) defaultClientHost() bool { + return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs +} + +// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host, +// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. +// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380 +// then the advertise peer host would be updated with machine's default host, +// while keeping the listen URL's port. +// User can work around this by explicitly setting URL with 127.0.0.1. +// It returns the default hostname, if used, and the error, if any, from getting the machine's default host. +// TODO: check whether fields are set instead of whether fields have default value +func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) { + if defaultHostname == "" || defaultHostStatus != nil { + // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + return "", defaultHostStatus + } + + used := false + pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() + if cfg.defaultPeerHost() && pip == "0.0.0.0" { + cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} + used = true + } + // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + + cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() + if cfg.defaultClientHost() && cip == "0.0.0.0" { + cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} + used = true + } + dhost := defaultHostname + if !used { + dhost = "" + } + return dhost, defaultHostStatus +} + +// checkBindURLs returns an error if any URL uses a domain name. +// TODO: return error in 3.2.0 +func checkBindURLs(urls []url.URL) error { + for _, url := range urls { + if url.Scheme == "unix" || url.Scheme == "unixs" { + continue + } + host, _, err := net.SplitHostPort(url.Host) + if err != nil { + return err + } + if host == "localhost" { + // special case for local address + // TODO: support /etc/hosts ? + continue + } + if net.ParseIP(host) == nil { + return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) + } + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/github.com/coreos/etcd/embed/doc.go new file mode 100644 index 00000000000..c555aa58eba --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/doc.go @@ -0,0 +1,45 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package embed provides bindings for embedding an etcd server in a program. + +Launch an embedded etcd server using the configuration defaults: + + import ( + "log" + "time" + + "github.com/coreos/etcd/embed" + ) + + func main() { + cfg := embed.NewConfig() + cfg.Dir = "default.etcd" + e, err := embed.StartEtcd(cfg) + if err != nil { + log.Fatal(err) + } + defer e.Close() + select { + case <-e.Server.ReadyNotify(): + log.Printf("Server is ready!") + case <-time.After(60 * time.Second): + e.Server.Stop() // trigger a shutdown + log.Printf("Server took too long to start!") + } + log.Fatal(<-e.Err()) + } +*/ +package embed diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go new file mode 100644 index 00000000000..6d92f11ea66 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/etcd.go @@ -0,0 +1,453 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "crypto/tls" + "fmt" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "path/filepath" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" + "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/pkg/cors" + "github.com/coreos/etcd/pkg/debugutil" + runtimeutil "github.com/coreos/etcd/pkg/runtime" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/rafthttp" + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") + +const ( + // internal fd usage includes disk usage and transport usage. + // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs + // at most 2 to read/lock/write WALs. One case that it needs to 2 is to + // read all logs after some snapshot index, which locates at the end of + // the second last and the head of the last. For purging, it needs to read + // directory, so it needs 1. For fd monitor, it needs 1. + // For transport, rafthttp builds two long-polling connections and at most + // four temporary connections with each member. There are at most 9 members + // in a cluster, so it should reserve 96. + // For the safety, we set the total reserved number to 150. + reservedInternalFDNum = 150 +) + +// Etcd contains a running etcd server and its listeners. +type Etcd struct { + Peers []*peerListener + Clients []net.Listener + Server *etcdserver.EtcdServer + + cfg Config + stopc chan struct{} + errc chan error + sctxs map[string]*serveCtx + + closeOnce sync.Once +} + +type peerListener struct { + net.Listener + serve func() error + close func(context.Context) error +} + +// StartEtcd launches the etcd server and HTTP handlers for client/server communication. +// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait +// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. +func StartEtcd(inCfg *Config) (e *Etcd, err error) { + if err = inCfg.Validate(); err != nil { + return nil, err + } + serving := false + e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} + cfg := &e.cfg + defer func() { + if e == nil || err == nil { + return + } + if !serving { + // errored before starting gRPC server for serveCtx.grpcServerC + for _, sctx := range e.sctxs { + close(sctx.grpcServerC) + } + } + e.Close() + e = nil + }() + + if e.Peers, err = startPeerListeners(cfg); err != nil { + return + } + if e.sctxs, err = startClientListeners(cfg); err != nil { + return + } + for _, sctx := range e.sctxs { + e.Clients = append(e.Clients, sctx.l) + } + + var ( + urlsmap types.URLsMap + token string + ) + + if !isMemberInitialized(cfg) { + urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") + if err != nil { + return e, fmt.Errorf("error setting up initial cluster: %v", err) + } + } + + srvcfg := &etcdserver.ServerConfig{ + Name: cfg.Name, + ClientURLs: cfg.ACUrls, + PeerURLs: cfg.APUrls, + DataDir: cfg.Dir, + DedicatedWALDir: cfg.WalDir, + SnapCount: cfg.SnapCount, + MaxSnapFiles: cfg.MaxSnapFiles, + MaxWALFiles: cfg.MaxWalFiles, + InitialPeerURLsMap: urlsmap, + InitialClusterToken: token, + DiscoveryURL: cfg.Durl, + DiscoveryProxy: cfg.Dproxy, + NewCluster: cfg.IsNewCluster(), + ForceNewCluster: cfg.ForceNewCluster, + PeerTLSInfo: cfg.PeerTLSInfo, + TickMs: cfg.TickMs, + ElectionTicks: cfg.ElectionTicks(), + AutoCompactionRetention: cfg.AutoCompactionRetention, + QuotaBackendBytes: cfg.QuotaBackendBytes, + MaxRequestBytes: cfg.MaxRequestBytes, + StrictReconfigCheck: cfg.StrictReconfigCheck, + ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, + AuthToken: cfg.AuthToken, + } + + if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { + return + } + + // configure peer handlers after rafthttp.Transport started + ph := etcdhttp.NewPeerHandler(e.Server) + for _, p := range e.Peers { + srv := &http.Server{ + Handler: ph, + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + + l := p.Listener + p.serve = func() error { return srv.Serve(l) } + p.close = func(ctx context.Context) error { + // gracefully shutdown http.Server + // close open listeners, idle connections + // until context cancel or time-out + return srv.Shutdown(ctx) + } + } + + // buffer channel so goroutines on closed connections won't wait forever + e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) + + e.Server.Start() + if err = e.serve(); err != nil { + return + } + serving = true + return +} + +// Config returns the current configuration. +func (e *Etcd) Config() Config { + return e.cfg +} + +func (e *Etcd) Close() { + e.closeOnce.Do(func() { close(e.stopc) }) + + timeout := 2 * time.Second + if e.Server != nil { + timeout = e.Server.Cfg.ReqTimeout() + } + for _, sctx := range e.sctxs { + for gs := range sctx.grpcServerC { + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + gs.GracefulStop() + }() + // wait until all pending RPCs are finished + select { + case <-ch: + case <-time.After(timeout): + // took too long, manually close open transports + // e.g. watch streams + gs.Stop() + // concurrent GracefulStop should be interrupted + <-ch + } + } + } + + for _, sctx := range e.sctxs { + sctx.cancel() + } + for i := range e.Clients { + if e.Clients[i] != nil { + e.Clients[i].Close() + } + } + + // close rafthttp transports + if e.Server != nil { + e.Server.Stop() + } + + // close all idle connections in peer handler (wait up to 1-second) + for i := range e.Peers { + if e.Peers[i] != nil && e.Peers[i].close != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + e.Peers[i].close(ctx) + cancel() + } + } +} + +func (e *Etcd) Err() <-chan error { return e.errc } + +func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { + if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { + phosts := make([]string, len(cfg.LPUrls)) + for i, u := range cfg.LPUrls { + phosts[i] = u.Host + } + cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) + if err != nil { + plog.Fatalf("could not get certs (%v)", err) + } + } else if cfg.PeerAutoTLS { + plog.Warningf("ignoring peer auto TLS since certs given") + } + + if !cfg.PeerTLSInfo.Empty() { + plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) + } + + peers = make([]*peerListener, len(cfg.LPUrls)) + defer func() { + if err == nil { + return + } + for i := range peers { + if peers[i] != nil && peers[i].close != nil { + plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + peers[i].close(context.Background()) + } + } + }() + + for i, u := range cfg.LPUrls { + if u.Scheme == "http" { + if !cfg.PeerTLSInfo.Empty() { + plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) + } + if cfg.PeerTLSInfo.ClientCertAuth { + plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } + } + peers[i] = &peerListener{close: func(context.Context) error { return nil }} + peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) + if err != nil { + return nil, err + } + // once serve, overwrite with 'http.Server.Shutdown' + peers[i].close = func(context.Context) error { + return peers[i].Listener.Close() + } + plog.Info("listening for peers on ", u.String()) + } + return peers, nil +} + +func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { + if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { + chosts := make([]string, len(cfg.LCUrls)) + for i, u := range cfg.LCUrls { + chosts[i] = u.Host + } + cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) + if err != nil { + plog.Fatalf("could not get certs (%v)", err) + } + } else if cfg.ClientAutoTLS { + plog.Warningf("ignoring client auto TLS since certs given") + } + + if cfg.EnablePprof { + plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) + } + + sctxs = make(map[string]*serveCtx) + for _, u := range cfg.LCUrls { + sctx := newServeCtx() + + if u.Scheme == "http" || u.Scheme == "unix" { + if !cfg.ClientTLSInfo.Empty() { + plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) + } + if cfg.ClientTLSInfo.ClientCertAuth { + plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } + } + if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { + return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) + } + + proto := "tcp" + addr := u.Host + if u.Scheme == "unix" || u.Scheme == "unixs" { + proto = "unix" + addr = u.Host + u.Path + } + + sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" + sctx.insecure = !sctx.secure + if oldctx := sctxs[addr]; oldctx != nil { + oldctx.secure = oldctx.secure || sctx.secure + oldctx.insecure = oldctx.insecure || sctx.insecure + continue + } + + if sctx.l, err = net.Listen(proto, addr); err != nil { + return nil, err + } + // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking + // hosts that disable ipv6. So, use the address given by the user. + sctx.addr = addr + + if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { + if fdLimit <= reservedInternalFDNum { + plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) + } + sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) + } + + if proto == "tcp" { + if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil { + return nil, err + } + } + + plog.Info("listening for client requests on ", u.Host) + defer func() { + if err != nil { + sctx.l.Close() + plog.Info("stopping listening for client requests on ", u.Host) + } + }() + for k := range cfg.UserHandlers { + sctx.userHandlers[k] = cfg.UserHandlers[k] + } + sctx.serviceRegister = cfg.ServiceRegister + if cfg.EnablePprof || cfg.Debug { + sctx.registerPprof() + } + if cfg.Debug { + sctx.registerTrace() + } + sctxs[addr] = sctx + } + return sctxs, nil +} + +func (e *Etcd) serve() (err error) { + var ctlscfg *tls.Config + if !e.cfg.ClientTLSInfo.Empty() { + plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) + if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil { + return err + } + } + + if e.cfg.CorsInfo.String() != "" { + plog.Infof("cors = %s", e.cfg.CorsInfo) + } + + // Start the peer server in a goroutine + for _, pl := range e.Peers { + go func(l *peerListener) { + e.errHandler(l.serve()) + }(pl) + } + + // Start a client server goroutine for each listen address + var h http.Handler + if e.Config().EnableV2 { + h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) + } else { + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, e.Server) + h = mux + } + h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) + + gopts := []grpc.ServerOption{} + if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: e.cfg.GRPCKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && + e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: e.cfg.GRPCKeepAliveInterval, + Timeout: e.cfg.GRPCKeepAliveTimeout, + })) + } + for _, sctx := range e.sctxs { + go func(s *serveCtx) { + e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...)) + }(sctx) + } + return nil +} + +func (e *Etcd) errHandler(err error) { + select { + case <-e.stopc: + return + default: + } + select { + case <-e.stopc: + case e.errc <- err: + } +} diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go new file mode 100644 index 00000000000..3627f88a958 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/serve.go @@ -0,0 +1,236 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "crypto/tls" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "strings" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3client" + "github.com/coreos/etcd/etcdserver/api/v3election" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" + "github.com/coreos/etcd/etcdserver/api/v3lock" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" + "github.com/coreos/etcd/pkg/debugutil" + + "github.com/cockroachdb/cmux" + gw "github.com/grpc-ecosystem/grpc-gateway/runtime" + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type serveCtx struct { + l net.Listener + addr string + secure bool + insecure bool + + ctx context.Context + cancel context.CancelFunc + + userHandlers map[string]http.Handler + serviceRegister func(*grpc.Server) + grpcServerC chan *grpc.Server +} + +func newServeCtx() *serveCtx { + ctx, cancel := context.WithCancel(context.Background()) + return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler), + grpcServerC: make(chan *grpc.Server, 2), // in case sctx.insecure,sctx.secure true + } +} + +// serve accepts incoming connections on the listener l, +// creating a new service goroutine for each. The service goroutines +// read requests and then call handler to reply to them. +func (sctx *serveCtx) serve( + s *etcdserver.EtcdServer, + tlscfg *tls.Config, + handler http.Handler, + errHandler func(error), + gopts ...grpc.ServerOption) error { + logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) + <-s.ReadyNotify() + plog.Info("ready to serve client requests") + + m := cmux.New(sctx.l) + v3c := v3client.New(s) + servElection := v3election.NewElectionServer(v3c) + servLock := v3lock.NewLockServer(v3c) + + if sctx.insecure { + gs := v3rpc.Server(s, nil, gopts...) + sctx.grpcServerC <- gs + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + grpcl := m.Match(cmux.HTTP2()) + go func() { errHandler(gs.Serve(grpcl)) }() + + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + gwmux, err := sctx.registerGateway(opts) + if err != nil { + return err + } + + httpmux := sctx.createMux(gwmux, handler) + + srvhttp := &http.Server{ + Handler: httpmux, + ErrorLog: logger, // do not log user error + } + httpl := m.Match(cmux.HTTP1()) + go func() { errHandler(srvhttp.Serve(httpl)) }() + plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) + } + + if sctx.secure { + gs := v3rpc.Server(s, tlscfg, gopts...) + sctx.grpcServerC <- gs + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + handler = grpcHandlerFunc(gs, handler) + + dtls := tlscfg.Clone() + // trust local server + dtls.InsecureSkipVerify = true + creds := credentials.NewTLS(dtls) + opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} + gwmux, err := sctx.registerGateway(opts) + if err != nil { + return err + } + + tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) + // TODO: add debug flag; enable logging when debug flag is set + httpmux := sctx.createMux(gwmux, handler) + + srv := &http.Server{ + Handler: httpmux, + TLSConfig: tlscfg, + ErrorLog: logger, // do not log user error + } + go func() { errHandler(srv.Serve(tlsl)) }() + + plog.Infof("serving client requests on %s", sctx.l.Addr().String()) + } + + close(sctx.grpcServerC) + return m.Serve() +} + +// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC +// connections or otherHandler otherwise. Copied from cockroachdb. +func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + if otherHandler == nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + grpcServer.ServeHTTP(w, r) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + grpcServer.ServeHTTP(w, r) + } else { + otherHandler.ServeHTTP(w, r) + } + }) +} + +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error + +func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { + ctx := sctx.ctx + conn, err := grpc.DialContext(ctx, sctx.addr, opts...) + if err != nil { + return nil, err + } + gwmux := gw.NewServeMux() + + handlers := []registerHandlerFunc{ + etcdservergw.RegisterKVHandler, + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, + } + for _, h := range handlers { + if err := h(ctx, gwmux, conn); err != nil { + return nil, err + } + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) + } + }() + + return gwmux, nil +} + +func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { + httpmux := http.NewServeMux() + for path, h := range sctx.userHandlers { + httpmux.Handle(path, h) + } + + httpmux.Handle("/v3alpha/", gwmux) + if handler != nil { + httpmux.Handle("/", handler) + } + return httpmux +} + +func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { + if sctx.userHandlers[s] != nil { + plog.Warningf("path %s already registered by user handler", s) + return + } + sctx.userHandlers[s] = h +} + +func (sctx *serveCtx) registerPprof() { + for p, h := range debugutil.PProfHandlers() { + sctx.registerUserHandler(p, h) + } +} + +func (sctx *serveCtx) registerTrace() { + reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } + sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) + evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } + sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) +} diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/github.com/coreos/etcd/embed/util.go new file mode 100644 index 00000000000..168e031389d --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/util.go @@ -0,0 +1,30 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "path/filepath" + + "github.com/coreos/etcd/wal" +) + +func isMemberInitialized(cfg *Config) bool { + waldir := cfg.WalDir + if waldir == "" { + waldir = filepath.Join(cfg.Dir, "member", "wal") + } + + return wal.Exist(waldir) +} diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go index 8cf83cc716a..b541a628b87 100644 --- a/vendor/github.com/coreos/etcd/error/error.go +++ b/vendor/github.com/coreos/etcd/error/error.go @@ -154,9 +154,10 @@ func (e Error) StatusCode() int { return status } -func (e Error) WriteTo(w http.ResponseWriter) { +func (e Error) WriteTo(w http.ResponseWriter) error { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - fmt.Fprintln(w, e.toJsonString()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/BUILD b/vendor/github.com/coreos/etcd/etcdserver/BUILD index e05450a1bee..ebbd59bb139 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/BUILD @@ -6,6 +6,7 @@ go_library( "apply.go", "apply_auth.go", "apply_v2.go", + "backend.go", "cluster_util.go", "config.go", "consistent_index.go", @@ -40,7 +41,6 @@ go_library( "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/contention:go_default_library", "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD index ab95a4f3277..5913cf0d91a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD @@ -29,7 +29,11 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go index ab8cee7cf89..5e2de58e9a1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -33,11 +33,10 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") // capabilityMaps is a static map of version to capability map. - // the base capabilities is the set of capability 2.0 supports. capabilityMaps = map[string]map[Capability]bool{ - "2.3.0": {AuthCapability: true}, "3.0.0": {AuthCapability: true, V3rpcCapability: true}, "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -48,7 +47,10 @@ var ( ) func init() { - enabledMap = make(map[Capability]bool) + enabledMap = map[Capability]bool{ + AuthCapability: true, + V3rpcCapability: true, + } } // UpdateCapability updates the enabledMap when the cluster version increases. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD new file mode 100644 index 00000000000..323b6e08dc0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "peer.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/etcdhttp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/error:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go new file mode 100644 index 00000000000..283b32dbf95 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go @@ -0,0 +1,186 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "strings" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/version" + "github.com/coreos/pkg/capnslog" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") + mlog = logutil.NewMergeLogger(plog) +) + +const ( + configPath = "/config" + metricsPath = "/metrics" + healthPath = "/health" + varsPath = "/debug/vars" + versionPath = "/version" +) + +// HandleBasic adds handlers to a mux for serving JSON etcd client requests +// that do not access the v2 store. +func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) { + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(configPath+"/local/log", logHandleFunc) + mux.Handle(metricsPath, prometheus.Handler()) + mux.Handle(healthPath, healthHandler(server)) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) +} + +func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + if uint64(server.Leader()) == raft.None { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"health": "true"}`)) + } +} + +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + plog.Panicf("cannot marshal versions to json (%v)", err) + } + w.Write(b) +} + +func logHandleFunc(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "PUT") { + return + } + + in := struct{ Level string }{} + + d := json.NewDecoder(r.Body) + if err := d.Decode(&in); err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + return + } + + logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) + if err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + return + } + + plog.Noticef("globalLogLevel set to %q", logl.String()) + capnslog.SetGlobalLogLevel(logl) + w.WriteHeader(http.StatusNoContent) +} + +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +// WriteError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError +func WriteError(w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *etcdErr.Error: + e.WriteTo(w) + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: + mlog.MergeError(err) + default: + mlog.MergeErrorf("got unexpected response error (%v)", err) + } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go rename to vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go index a1abadba8e7..721bae3c600 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2http +package etcdhttp import ( "encoding/json" @@ -61,7 +61,7 @@ type peerMembersHandler struct { } func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { + if !allowMethod(w, r, "GET") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD index ab856d75cac..680ea8c0f36 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD @@ -9,7 +9,6 @@ go_library( "doc.go", "http.go", "metrics.go", - "peer.go", ], importpath = "github.com/coreos/etcd/etcdserver/api/v2http", visibility = ["//visibility:public"], @@ -17,18 +16,15 @@ go_library( "//vendor/github.com/coreos/etcd/error:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", "//vendor/github.com/coreos/etcd/store:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/jonboulle/clockwork:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go index 038f5417e67..aa1e71ec329 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go @@ -17,7 +17,6 @@ package v2http import ( "encoding/json" "errors" - "expvar" "fmt" "io/ioutil" "net/http" @@ -30,38 +29,36 @@ import ( etcdErr "github.com/coreos/etcd/error" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - deprecatedMachinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" - varsPath = "/debug/vars" - metricsPath = "/metrics" - healthPath = "/health" - versionPath = "/version" - configPath = "/config" + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + machinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" ) // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - sec := auth.NewStore(server, timeout) + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, server) + handleV2(mux, server, timeout) + return requestLogger(mux) +} +func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) { + sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, @@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - dmh := &deprecatedMachinesHandler{ - cluster: server.Cluster(), - } + mah := &machinesHandler{cluster: server.Cluster()} sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - - mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) - mux.Handle(deprecatedMachinesPrefix, dmh) + mux.Handle(machinesPrefix, mah) handleAuth(mux, sech) - - return requestLogger(mux) } type keysHandler struct { @@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -type deprecatedMachinesHandler struct { +type machinesHandler struct { cluster api.Cluster } -func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "HEAD") { return } @@ -234,7 +220,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } now := h.clock.Now() m := membership.NewMember("", req.PeerURLs, "", &now) - err := h.server.AddMember(ctx, *m) + _, err := h.server.AddMember(ctx, *m) switch { case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -255,7 +241,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ok { return } - err := h.server.RemoveMember(ctx, uint64(id)) + _, err := h.server.RemoveMember(ctx, uint64(id)) switch { case err == membership.ErrIDRemoved: writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) @@ -280,7 +266,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ID: id, RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, } - err := h.server.UpdateMember(ctx, m) + _, err := h.server.UpdateMember(ctx, m) switch { case err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { } stats := h.stats.LeaderStats() if stats == nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) return } w.Header().Set("Content-Type", "application/json") w.Write(stats) } -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r.Method, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - // parseKeyRequest converts a received http.Request on keysPrefix to // a server Request, performing validation of supplied fields as appropriate. // If any validation fails, an empty Request and non-nil error is returned. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go index 62c99e19d4a..589c172dbbb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go @@ -20,12 +20,11 @@ import ( "strings" "time" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/pkg/capnslog" ) @@ -39,37 +38,18 @@ var ( mlog = logutil.NewMergeLogger(plog) ) -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError func writeError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - case auth.Error: + if e, ok := err.(auth.Error); ok { herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } + return } + etcdhttp.WriteError(w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD new file mode 100644 index 00000000000..5fc2b9dc11c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "v3client.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3client", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", + "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go new file mode 100644 index 00000000000..310715f5cd7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go @@ -0,0 +1,45 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3client provides clientv3 interfaces from an etcdserver. +// +// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: +// +// import ( +// "context" +// +// "github.com/coreos/etcd/embed" +// "github.com/coreos/etcd/etcdserver/api/v3client" +// ) +// +// ... +// +// // create an embedded EtcdServer from the default configuration +// cfg := embed.NewConfig() +// cfg.Dir = "default.etcd" +// e, err := embed.StartEtcd(cfg) +// if err != nil { +// // handle error! +// } +// +// // wrap the EtcdServer with v3client +// cli := v3client.New(e.Server) +// +// // use like an ordinary clientv3 +// resp, err := cli.Put(context.TODO(), "some-key", "it works!") +// if err != nil { +// // handle error! +// } +// +package v3client diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go new file mode 100644 index 00000000000..cc4147d2f0c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3client + +import ( + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/proxy/grpcproxy/adapter" + + "golang.org/x/net/context" +) + +// New creates a clientv3 client that wraps an in-process EtcdServer. Instead +// of making gRPC calls through sockets, the client makes direct function calls +// to the etcd server through its api/v3rpc function interfaces. +func New(s *etcdserver.EtcdServer) *clientv3.Client { + c := clientv3.NewCtxClient(context.Background()) + + kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) + c.KV = clientv3.NewKVFromKVClient(kvc) + + lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) + c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second) + + wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) + c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)} + + mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) + c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc) + + clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) + c.Cluster = clientv3.NewClusterFromClusterClient(clc) + + // TODO: implement clientv3.Auth interface? + + return c +} + +// BlankContext implements Stringer on a context so the ctx string doesn't +// depend on the context's WithValue data, which tends to be unsynchronized +// (e.g., x/net/trace), causing ctx.String() to throw data races. +type blankContext struct{ context.Context } + +func (*blankContext) String() string { return "(blankCtx)" } + +// watchWrapper wraps clientv3 watch calls to blank out the context +// to avoid races on trace data. +type watchWrapper struct{ clientv3.Watcher } + +func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + return ww.Watcher.Watch(&blankContext{ctx}, key, opts...) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD new file mode 100644 index 00000000000..55965b382df --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "election.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go new file mode 100644 index 00000000000..d6fefd74150 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3election provides a v3 election service from an etcdserver. +package v3election diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go new file mode 100644 index 00000000000..f9061c07926 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go @@ -0,0 +1,123 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3election + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionServer struct { + c *clientv3.Client +} + +func NewElectionServer(c *clientv3.Client) epb.ElectionServer { + return &electionServer{c} +} + +func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { + s, err := es.session(ctx, req.Lease) + if err != nil { + return nil, err + } + e := concurrency.NewElection(s, string(req.Name)) + if err = e.Campaign(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.CampaignResponse{ + Header: e.Header(), + Leader: &epb.LeaderKey{ + Name: req.Name, + Key: []byte(e.Key()), + Rev: e.Rev(), + Lease: int64(s.Lease()), + }, + }, nil +} + +func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Proclaim(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.ProclaimResponse{Header: e.Header()}, nil +} + +func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { + s, err := es.session(stream.Context(), -1) + if err != nil { + return err + } + e := concurrency.NewElection(s, string(req.Name)) + ch := e.Observe(stream.Context()) + for stream.Context().Err() == nil { + select { + case <-stream.Context().Done(): + case resp, ok := <-ch: + if !ok { + return nil + } + lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} + if err := stream.Send(lresp); err != nil { + return err + } + } + } + return stream.Context().Err() +} + +func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { + s, err := es.session(ctx, -1) + if err != nil { + return nil, err + } + l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) + if lerr != nil { + return nil, lerr + } + return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil +} + +func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Resign(ctx); err != nil { + return nil, err + } + return &epb.ResignResponse{Header: e.Header()}, nil +} + +func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { + s, err := concurrency.NewSession( + es.c, + concurrency.WithLease(clientv3.LeaseID(lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + return s, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD new file mode 100644 index 00000000000..e46a6a322e4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["v3election.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["v3election.pb.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD new file mode 100644 index 00000000000..41b80256081 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["v3election.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go new file mode 100644 index 00000000000..ac00cbea983 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3election/v3electionpb/v3election.proto + +/* +Package v3electionpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.CampaignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ProclaimRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.Observe(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ResignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterElectionHandler(ctx, mux, conn) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ElectionClient" to call the correct interceptors. +func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { + + mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, "")) + + pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, "")) + + pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, "")) + + pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, "")) + + pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, "")) +) + +var ( + forward_Election_Campaign_0 = runtime.ForwardResponseMessage + + forward_Election_Proclaim_0 = runtime.ForwardResponseMessage + + forward_Election_Leader_0 = runtime.ForwardResponseMessage + + forward_Election_Observe_0 = runtime.ForwardResponseStream + + forward_Election_Resign_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go new file mode 100644 index 00000000000..92acb1469e9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -0,0 +1,2098 @@ +// Code generated by protoc-gen-gogo. +// source: v3election.proto +// DO NOT EDIT! + +/* + Package v3electionpb is a generated protocol buffer package. + + It is generated from these files: + v3election.proto + + It has these top-level messages: + CampaignRequest + CampaignResponse + LeaderKey + LeaderRequest + LeaderResponse + ResignRequest + ResignResponse + ProclaimRequest + ProclaimResponse +*/ +package v3electionpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CampaignRequest struct { + // name is the election's identifier for the campaign. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` + // value is the initial proclaimed value set when the campaigner wins the + // election. + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } +func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } +func (*CampaignRequest) ProtoMessage() {} +func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } + +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type CampaignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // leader describes the resources used for holding leadereship of the election. + Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` +} + +func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } +func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } +func (*CampaignResponse) ProtoMessage() {} +func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} } + +func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *CampaignResponse) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type LeaderKey struct { + // name is the election identifier that correponds to the leadership key. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` + // lease is the lease ID of the election leader. + Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LeaderKey) Reset() { *m = LeaderKey{} } +func (m *LeaderKey) String() string { return proto.CompactTextString(m) } +func (*LeaderKey) ProtoMessage() {} +func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } + +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LeaderRequest struct { + // name is the election identifier for the leadership information. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } +func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } +func (*LeaderRequest) ProtoMessage() {} +func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } + +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type LeaderResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kv is the key-value pair representing the latest leader update. + Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` +} + +func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } +func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } +func (*LeaderResponse) ProtoMessage() {} +func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} } + +func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { + if m != nil { + return m.Kv + } + return nil +} + +type ResignRequest struct { + // leader is the leadership to relinquish by resignation. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` +} + +func (m *ResignRequest) Reset() { *m = ResignRequest{} } +func (m *ResignRequest) String() string { return proto.CompactTextString(m) } +func (*ResignRequest) ProtoMessage() {} +func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} } + +func (m *ResignRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type ResignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ResignResponse) Reset() { *m = ResignResponse{} } +func (m *ResignResponse) String() string { return proto.CompactTextString(m) } +func (*ResignResponse) ProtoMessage() {} +func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} } + +func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type ProclaimRequest struct { + // leader is the leadership hold on the election. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` + // value is an update meant to overwrite the leader's current value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } +func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } +func (*ProclaimRequest) ProtoMessage() {} +func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} } + +func (m *ProclaimRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type ProclaimResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } +func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } +func (*ProclaimResponse) ProtoMessage() {} +func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} } + +func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") + proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") + proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") + proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") + proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") + proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") + proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") + proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") + proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Election service + +type ElectionClient interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) +} + +type electionClient struct { + cc *grpc.ClientConn +} + +func NewElectionClient(cc *grpc.ClientConn) ElectionClient { + return &electionClient{cc} +} + +func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { + out := new(CampaignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { + out := new(ProclaimResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { + out := new(LeaderResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...) + if err != nil { + return nil, err + } + x := &electionObserveClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Election_ObserveClient interface { + Recv() (*LeaderResponse, error) + grpc.ClientStream +} + +type electionObserveClient struct { + grpc.ClientStream +} + +func (x *electionObserveClient) Recv() (*LeaderResponse, error) { + m := new(LeaderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { + out := new(ResignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Election service + +type ElectionServer interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(*LeaderRequest, Election_ObserveServer) error + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(context.Context, *ResignRequest) (*ResignResponse, error) +} + +func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { + s.RegisterService(&_Election_serviceDesc, srv) +} + +func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CampaignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Campaign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Campaign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProclaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Proclaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Proclaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Leader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Leader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LeaderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) +} + +type Election_ObserveServer interface { + Send(*LeaderResponse) error + grpc.ServerStream +} + +type electionObserveServer struct { + grpc.ServerStream +} + +func (x *electionObserveServer) Send(m *LeaderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Resign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Resign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Election_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3electionpb.Election", + HandlerType: (*ElectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Campaign", + Handler: _Election_Campaign_Handler, + }, + { + MethodName: "Proclaim", + Handler: _Election_Proclaim_Handler, + }, + { + MethodName: "Leader", + Handler: _Election_Leader_Handler, + }, + { + MethodName: "Resign", + Handler: _Election_Resign_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Observe", + Handler: _Election_Observe_Handler, + ServerStreams: true, + }, + }, + Metadata: "v3election.proto", +} + +func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Leader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n2, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaderKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Rev != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) + } + if m.Lease != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n3, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size())) + n4, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *ResignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n5, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *ResignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n6, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n7, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n8, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func encodeFixed64V3Election(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Election(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CampaignRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *CampaignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderKey) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Rev != 0 { + n += 1 + sovV3Election(uint64(m.Rev)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + return n +} + +func (m *LeaderRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func sovV3Election(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Election(x uint64) (n int) { + return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CampaignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CampaignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType) + } + m.Rev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rev |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &mvccpb.KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Election(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Election + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Election(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) } + +var fileDescriptorV3Election = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0x89, 0x34, 0xa4, 0x26, 0xda, 0x02, 0xaa, + 0x72, 0xf0, 0xa2, 0x86, 0x53, 0x4e, 0x08, 0x04, 0xaa, 0x54, 0x24, 0xc0, 0x07, 0x04, 0xc7, 0x8d, + 0x3b, 0x4a, 0xa2, 0x38, 0xde, 0xc5, 0x4e, 0x2d, 0xe5, 0xca, 0x2f, 0x70, 0xe1, 0x33, 0xf8, 0x0c, + 0x8e, 0x48, 0xfc, 0x00, 0x0a, 0x7c, 0x08, 0xda, 0x5d, 0x1b, 0x3b, 0x6e, 0x88, 0x50, 0x73, 0xb1, + 0xc6, 0x33, 0xcf, 0xf3, 0xe6, 0xbd, 0x9d, 0x35, 0xd8, 0x69, 0x1f, 0x43, 0x0c, 0xe6, 0x13, 0x11, + 0x79, 0x32, 0x16, 0x73, 0xe1, 0x34, 0x8b, 0x8c, 0x1c, 0xb6, 0x0f, 0x46, 0x62, 0x24, 0x74, 0x81, + 0xa9, 0xc8, 0x60, 0xda, 0x8f, 0x70, 0x1e, 0x5c, 0x30, 0xf5, 0x48, 0x30, 0x4e, 0x31, 0x2e, 0x85, + 0x72, 0xc8, 0x62, 0x19, 0x64, 0xb8, 0x43, 0x8d, 0x9b, 0xa5, 0x41, 0xa0, 0x1f, 0x72, 0xc8, 0xa6, + 0x69, 0x56, 0xea, 0x8c, 0x84, 0x18, 0x85, 0xc8, 0xb8, 0x9c, 0x30, 0x1e, 0x45, 0x62, 0xce, 0x15, + 0x63, 0x62, 0xaa, 0xf4, 0x2d, 0xec, 0x3f, 0xe7, 0x33, 0xc9, 0x27, 0xa3, 0xc8, 0xc7, 0x8f, 0x97, + 0x98, 0xcc, 0x1d, 0x07, 0xea, 0x11, 0x9f, 0x61, 0x8b, 0x74, 0xc9, 0x49, 0xd3, 0xd7, 0xb1, 0x73, + 0x00, 0x37, 0x43, 0xe4, 0x09, 0xb6, 0xac, 0x2e, 0x39, 0xa9, 0xf9, 0xe6, 0x45, 0x65, 0x53, 0x1e, + 0x5e, 0x62, 0xab, 0xa6, 0xa1, 0xe6, 0x85, 0x2e, 0xc0, 0x2e, 0x5a, 0x26, 0x52, 0x44, 0x09, 0x3a, + 0x4f, 0xa0, 0x31, 0x46, 0x7e, 0x81, 0xb1, 0xee, 0x7a, 0xe7, 0xb4, 0xe3, 0x95, 0x85, 0x78, 0x39, + 0xee, 0x4c, 0x63, 0xfc, 0x0c, 0xeb, 0x30, 0x68, 0x84, 0xe6, 0x2b, 0x4b, 0x7f, 0x75, 0xd7, 0x2b, + 0x5b, 0xe6, 0xbd, 0xd2, 0xb5, 0x73, 0x5c, 0xf8, 0x19, 0x8c, 0x7e, 0x80, 0xdb, 0x7f, 0x93, 0x6b, + 0x75, 0xd8, 0x50, 0x9b, 0xe2, 0x42, 0xb7, 0x6b, 0xfa, 0x2a, 0x54, 0x99, 0x18, 0x53, 0xad, 0xa0, + 0xe6, 0xab, 0xb0, 0xd0, 0x5a, 0x2f, 0x69, 0xa5, 0xc7, 0xb0, 0x6b, 0x5a, 0x6f, 0xb0, 0x89, 0x8e, + 0x61, 0x2f, 0x07, 0x6d, 0x25, 0xbc, 0x0b, 0xd6, 0x34, 0xcd, 0x44, 0xdb, 0x9e, 0x39, 0x51, 0xef, + 0x1c, 0x17, 0xef, 0x94, 0xc1, 0xbe, 0x35, 0x4d, 0xe9, 0x53, 0xd8, 0xf5, 0x31, 0x29, 0x9d, 0x5a, + 0xe1, 0x15, 0xf9, 0x3f, 0xaf, 0x5e, 0xc2, 0x5e, 0xde, 0x61, 0x9b, 0x59, 0xe9, 0x7b, 0xd8, 0x7f, + 0x13, 0x8b, 0x20, 0xe4, 0x93, 0xd9, 0x75, 0x67, 0x29, 0x16, 0xc9, 0x2a, 0x2f, 0xd2, 0x19, 0xd8, + 0x45, 0xe7, 0x6d, 0x66, 0x3c, 0xfd, 0x5a, 0x87, 0x9d, 0x17, 0xd9, 0x00, 0x8e, 0x84, 0x9d, 0x7c, + 0x3f, 0x9d, 0xa3, 0xd5, 0xc9, 0x2a, 0x57, 0xa1, 0xed, 0xfe, 0xab, 0x6c, 0x58, 0xe8, 0xc3, 0x4f, + 0x3f, 0x7e, 0x7f, 0xb6, 0xee, 0xd3, 0x36, 0x4b, 0xfb, 0x3c, 0x94, 0x63, 0xce, 0x72, 0x34, 0x0b, + 0x32, 0xec, 0x80, 0xf4, 0x14, 0x63, 0x2e, 0xa4, 0xca, 0x58, 0xb1, 0xae, 0xca, 0x58, 0xd5, 0xbf, + 0x89, 0x51, 0x66, 0x58, 0xc5, 0x38, 0x86, 0x86, 0x71, 0xd9, 0xb9, 0xb7, 0xce, 0xfb, 0x9c, 0xad, + 0xb3, 0xbe, 0x98, 0x71, 0x1d, 0x6b, 0xae, 0x23, 0xda, 0xba, 0xca, 0x65, 0xce, 0x4d, 0x31, 0x85, + 0x70, 0xeb, 0xf5, 0x50, 0xfb, 0xbf, 0x0d, 0xd5, 0x03, 0x4d, 0xe5, 0xd2, 0xc3, 0xab, 0x54, 0xc2, + 0x74, 0x1f, 0x90, 0xde, 0x63, 0xa2, 0x74, 0x99, 0xa5, 0xad, 0x92, 0xad, 0x5c, 0x86, 0x2a, 0xd9, + 0xea, 0x9e, 0x6f, 0xd2, 0x15, 0x6b, 0xe4, 0x80, 0xf4, 0x9e, 0xd9, 0xdf, 0x96, 0x2e, 0xf9, 0xbe, + 0x74, 0xc9, 0xcf, 0xa5, 0x4b, 0xbe, 0xfc, 0x72, 0x6f, 0x0c, 0x1b, 0xfa, 0x8f, 0xd9, 0xff, 0x13, + 0x00, 0x00, 0xff, 0xff, 0xfc, 0x4d, 0x5a, 0x40, 0xca, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto new file mode 100644 index 00000000000..ebf6c88f7fa --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto @@ -0,0 +1,119 @@ +syntax = "proto3"; +package v3electionpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The election service exposes client-side election facilities as a gRPC interface. +service Election { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + rpc Campaign(CampaignRequest) returns (CampaignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/campaign" + body: "*" + }; + } + // Proclaim updates the leader's posted value with a new value. + rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) { + option (google.api.http) = { + post: "/v3alpha/election/proclaim" + body: "*" + }; + } + // Leader returns the current election proclamation, if any. + rpc Leader(LeaderRequest) returns (LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/leader" + body: "*" + }; + } + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + rpc Observe(LeaderRequest) returns (stream LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/observe" + body: "*" + }; + } + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + rpc Resign(ResignRequest) returns (ResignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/resign" + body: "*" + }; + } +} + +message CampaignRequest { + // name is the election's identifier for the campaign. + bytes name = 1; + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + int64 lease = 2; + // value is the initial proclaimed value set when the campaigner wins the + // election. + bytes value = 3; +} + +message CampaignResponse { + etcdserverpb.ResponseHeader header = 1; + // leader describes the resources used for holding leadereship of the election. + LeaderKey leader = 2; +} + +message LeaderKey { + // name is the election identifier that correponds to the leadership key. + bytes name = 1; + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + bytes key = 2; + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + int64 rev = 3; + // lease is the lease ID of the election leader. + int64 lease = 4; +} + +message LeaderRequest { + // name is the election identifier for the leadership information. + bytes name = 1; +} + +message LeaderResponse { + etcdserverpb.ResponseHeader header = 1; + // kv is the key-value pair representing the latest leader update. + mvccpb.KeyValue kv = 2; +} + +message ResignRequest { + // leader is the leadership to relinquish by resignation. + LeaderKey leader = 1; +} + +message ResignResponse { + etcdserverpb.ResponseHeader header = 1; +} + +message ProclaimRequest { + // leader is the leadership hold on the election. + LeaderKey leader = 1; + // value is an update meant to overwrite the leader's current value. + bytes value = 2; +} + +message ProclaimResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD new file mode 100644 index 00000000000..a528567d38f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "lock.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go new file mode 100644 index 00000000000..e0a1008abc9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3lock provides a v3 locking service from an etcdserver. +package v3lock diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go new file mode 100644 index 00000000000..66465bf13f6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3lock + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockServer struct { + c *clientv3.Client +} + +func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { + return &lockServer{c} +} + +func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + s, err := concurrency.NewSession( + ls.c, + concurrency.WithLease(clientv3.LeaseID(req.Lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + m := concurrency.NewMutex(s, string(req.Name)) + if err = m.Lock(ctx); err != nil { + return nil, err + } + return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil +} + +func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + resp, err := ls.c.Delete(ctx, string(req.Key)) + if err != nil { + return nil, err + } + return &v3lockpb.UnlockResponse{Header: resp.Header}, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD new file mode 100644 index 00000000000..abe9cd04aeb --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["v3lock.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["v3lock.pb.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD new file mode 100644 index 00000000000..9d54f77260e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["v3lock.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go new file mode 100644 index 00000000000..5aef4756dfe --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto + +/* +Package v3lockpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.LockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.UnlockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLockHandler(ctx, mux, conn) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn)) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LockClient" to call the correct interceptors. +func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { + + mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, "")) + + pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, "")) +) + +var ( + forward_Lock_Lock_0 = runtime.ForwardResponseMessage + + forward_Lock_Unlock_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go new file mode 100644 index 00000000000..dcf2bad4019 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -0,0 +1,978 @@ +// Code generated by protoc-gen-gogo. +// source: v3lock.proto +// DO NOT EDIT! + +/* + Package v3lockpb is a generated protocol buffer package. + + It is generated from these files: + v3lock.proto + + It has these top-level messages: + LockRequest + LockResponse + UnlockRequest + UnlockResponse +*/ +package v3lockpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LockRequest struct { + // name is the identifier for the distributed shared lock to be acquired. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LockRequest) Reset() { *m = LockRequest{} } +func (m *LockRequest) String() string { return proto.CompactTextString(m) } +func (*LockRequest) ProtoMessage() {} +func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } + +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *LockResponse) Reset() { *m = LockResponse{} } +func (m *LockResponse) String() string { return proto.CompactTextString(m) } +func (*LockResponse) ProtoMessage() {} +func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} } + +func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockRequest struct { + // key is the lock ownership key granted by Lock. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } +func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockRequest) ProtoMessage() {} +func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } + +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } +func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockResponse) ProtoMessage() {} +func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} } + +func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") + proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") + proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") + proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Lock service + +type LockClient interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) +} + +type lockClient struct { + cc *grpc.ClientConn +} + +func NewLockClient(cc *grpc.ClientConn) LockClient { + return &lockClient{cc} +} + +func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { + out := new(LockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { + out := new(UnlockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lock service + +type LockServer interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(context.Context, *LockRequest) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) +} + +func RegisterLockServer(s *grpc.Server, srv LockServer) { + s.RegisterService(&_Lock_serviceDesc, srv) +} + +func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Lock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Lock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Lock(ctx, req.(*LockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Unlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Unlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lock_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3lockpb.Lock", + HandlerType: (*LockServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lock", + Handler: _Lock_Lock_Handler, + }, + { + MethodName: "Unlock", + Handler: _Lock_Unlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v3lock.proto", +} + +func (m *LockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Lock(uint64(m.Lease)) + } + return n +} + +func (m *LockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func sovV3Lock(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Lock(x uint64) (n int) { + return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Lock(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Lock + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Lock(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) } + +var fileDescriptorV3Lock = []byte{ + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, + 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, + 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, + 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, + 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, + 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, + 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, + 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, + 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, + 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, + 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, + 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, + 0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, + 0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39, + 0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2, + 0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36, + 0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, + 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, + 0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto new file mode 100644 index 00000000000..3e92a6ec277 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; +package v3lockpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The lock service exposes client-side locking facilities as a gRPC interface. +service Lock { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + rpc Lock(LockRequest) returns (LockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/lock" + body: "*" + }; + } + + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + rpc Unlock(UnlockRequest) returns (UnlockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/unlock" + body: "*" + }; + } +} + +message LockRequest { + // name is the identifier for the distributed shared lock to be acquired. + bytes name = 1; + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + int64 lease = 2; +} + +message LockResponse { + etcdserverpb.ResponseHeader header = 1; + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + bytes key = 2; +} + +message UnlockRequest { + // key is the lock ownership key granted by Lock. + bytes key = 1; +} + +message UnlockResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index 88174e3bac2..ed70887b59d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,6 +16,7 @@ package v3rpc import ( "crypto/tls" + "math" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -24,11 +25,17 @@ import ( "google.golang.org/grpc/grpclog" ) +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 + maxSendBytes = math.MaxInt32 +) + func init() { grpclog.SetLogger(plog) } -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { +func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { @@ -36,8 +43,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) + opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) + grpcServer := grpc.NewServer(append(opts, gopts...)...) - grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index 29aef2914a5..de9470a8905 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ss.Context()) + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go index 6ea7bbacde0..d0220e03a26 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -134,6 +134,12 @@ func checkPutRequest(r *pb.PutRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } + if r.IgnoreValue && len(r.Value) != 0 { + return rpctypes.ErrGRPCValueProvided + } + if r.IgnoreLease && r.Lease != 0 { + return rpctypes.ErrGRPCLeaseProvided + } return nil } @@ -246,8 +252,8 @@ func checkRequestOp(u *pb.RequestOp) error { return checkDeleteRequest(uv.RequestDeleteRange) } default: - // empty op - return nil + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound } return nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index be6e20b97fb..7356633f8a6 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -18,6 +18,7 @@ import ( "io" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/lease" "golang.org/x/net/context" @@ -53,20 +54,45 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil { + if err != nil && err != lease.ErrLeaseNotFound { return nil, togRPCError(err) } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: rr.ID, + TTL: -1, + } + } ls.hdr.fill(resp.Header) return resp, nil } -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { + errc := make(chan error, 1) + go func() { + errc <- ls.leaseKeepAlive(stream) + }() + select { + case err = <-errc: + case <-stream.Context().Done(): + // the only server-side cancellation is noleader for now. + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + return err +} + +func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { + plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) return err } @@ -92,6 +118,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro resp.TTL = ttl err = stream.Send(resp) if err != nil { + plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) return err } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index af29ab3b71e..3657d036082 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -47,6 +47,7 @@ type RaftStatusGetter interface { } type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } @@ -152,7 +153,7 @@ type authMaintenanceServer struct { } func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := ams.ag.AuthInfoFromCtx(ctx) if err != nil { return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go index bcd5dac5183..91a59389b87 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go @@ -48,21 +48,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) now := time.Now() m := membership.NewMember("", urls, "", &now) - if err = cs.server.AddMember(ctx, *m); err != nil { - return nil, togRPCError(err) + membs, merr := cs.server.AddMember(ctx, *m) + if merr != nil { + return nil, togRPCError(merr) } return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Header: cs.header(), + Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Members: membersToProtoMembers(membs), }, nil } func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - if err := cs.server.RemoveMember(ctx, r.ID); err != nil { + membs, err := cs.server.RemoveMember(ctx, r.ID) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberRemoveResponse{Header: cs.header()}, nil + return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { @@ -70,15 +73,23 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq ID: types.ID(r.ID), RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, } - if err := cs.server.UpdateMember(ctx, m); err != nil { + membs, err := cs.server.UpdateMember(ctx, m) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberUpdateResponse{Header: cs.header()}, nil + return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := cs.cluster.Members() + membs := membersToProtoMembers(cs.cluster.Members()) + return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil +} +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} +} + +func membersToProtoMembers(membs []*membership.Member) []*pb.Member { protoMembs := make([]*pb.Member, len(membs)) for i := range membs { protoMembs[i] = &pb.Member{ @@ -88,10 +99,5 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest ClientURLs: membs[i].ClientURLs, } } - - return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} + return protoMembs } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD index 38cd71455d5..e1ada36c303 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 5a3cfc0a0db..bd17179e997 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -17,16 +17,20 @@ package rpctypes import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( // server-side error - ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") - ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") - ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") - ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") - ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") - ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") + ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") + ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") + ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") + ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") @@ -53,6 +57,7 @@ var ( ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") + ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") @@ -63,7 +68,11 @@ var ( ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") errStringToError = map[string]error{ - grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, @@ -95,6 +104,7 @@ var ( grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, @@ -106,12 +116,15 @@ var ( } // client-side error - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -138,6 +151,7 @@ var ( ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotCapable = Error(ErrGRPCNotCapable) @@ -175,3 +189,10 @@ func Error(err error) error { } return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} } + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 5a057ed040d..8d38d9bd18f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -42,8 +42,6 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCCompacted case mvcc.ErrFutureRev: return rpctypes.ErrGRPCFutureRev - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound case etcdserver.ErrRequestTooLarge: return rpctypes.ErrGRPCRequestTooLarge case etcdserver.ErrNoSpace: @@ -63,6 +61,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCTimeoutDueToConnectionLost case etcdserver.ErrUnhealthy: return rpctypes.ErrGRPCUnhealthy + case etcdserver.ErrKeyNotFound: + return rpctypes.ErrGRPCKeyNotFound case lease.ErrLeaseNotFound: return rpctypes.ErrGRPCLeaseNotFound @@ -95,6 +95,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCAuthNotEnabled case auth.ErrInvalidAuthToken: return rpctypes.ErrGRPCInvalidAuthToken + case auth.ErrInvalidAuthMgmt: + return rpctypes.ErrGRPCInvalidAuthMgmt default: return grpc.Errorf(codes.Unknown, err.Error()) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index f0215531dee..e328f6694a3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -33,6 +34,8 @@ type watchServer struct { memberID int64 raftTimer etcdserver.RaftTimer watchable mvcc.WatchableKV + + ag AuthGetter } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { @@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), + ag: s, } } @@ -101,6 +105,8 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup + + ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { @@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), + + ag: ws.ag, } sws.wg.Add(1) @@ -133,6 +141,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { + plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) errc <- rerr } }() @@ -150,6 +159,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { return err } +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + + return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil +} + func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() @@ -171,10 +193,32 @@ func (sws *serverWatchStream) recvLoop() error { // \x00 is the smallest key creq.Key = []byte{0} } + if len(creq.RangeEnd) == 0 { + // force nil since watchstream.Watch distinguishes + // between nil and []byte{} for single key / >= + creq.RangeEnd = nil + } if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { // support >= key queries creq.RangeEnd = []byte{} } + + if !sws.isWatchPermitted(creq) { + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + } + return nil + } + filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() @@ -294,6 +338,7 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { + plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) return } @@ -310,6 +355,7 @@ func (sws *serverWatchStream) sendLoop() { } if err := sws.gRPCStream.Send(c); err != nil { + plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) return } @@ -325,6 +371,7 @@ func (sws *serverWatchStream) sendLoop() { for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { + plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go index e4bf35bc47e..0be93c52b6f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -16,7 +16,6 @@ package etcdserver import ( "bytes" - "fmt" "sort" "time" @@ -30,11 +29,6 @@ import ( ) const ( - // noTxn is an invalid txn ID. - // To apply with independent Range, Put, Delete, you can pass noTxn - // to apply functions instead of a valid txn ID. - noTxn = -1 - warnApplyDuration = 100 * time.Millisecond ) @@ -51,9 +45,9 @@ type applyResult struct { type applierV3 interface { Apply(r *pb.InternalRaftRequest) *applyResult - Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) @@ -99,11 +93,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range) + ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put) + ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange) + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) case r.Txn != nil: ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) case r.Compaction != nil: @@ -152,106 +146,87 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { return ar } -func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - resp := &pb.PutResponse{} +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rev int64 - err error - ) - var rr *mvcc.RangeResult - if p.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) - if err != nil { - return nil, err - } - } else { - leaseID := lease.LeaseID(p.Lease) + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } - rev = a.s.KV().Put(p.Key, p.Value, leaseID) + txn = a.s.KV().Write() + defer txn.End() } - resp.Header.Revision = rev - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) return resp, nil } -func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - n int64 - rev int64 - err error - ) + if txn == nil { + txn = a.s.kv.Write() + defer txn.End() + } if isGteRange(dr.RangeEnd) { dr.RangeEnd = []byte{} } - var rr *mvcc.RangeResult if dr.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd) + rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) if err != nil { return nil, err } - } else { - n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd) - } - - resp.Deleted = n - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } } } - resp.Header.Revision = rev + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) return resp, nil } -func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rr *mvcc.RangeResult - err error - ) + if txn == nil { + txn = a.s.kv.Read() + defer txn.End() + } if isGteRange(r.RangeEnd) { r.RangeEnd = []byte{} @@ -275,16 +250,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp Count: r.CountOnly, } - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } + rr, err := txn.Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err } if r.MaxModRevision != 0 { @@ -350,61 +318,64 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := true - for _, c := range rt.Compare { - if _, ok = a.applyCompare(c); !ok { - break + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) + + reqs, ok := a.compareToOps(txn, rt) + if isWrite { + if err := a.checkRequestPut(txn, reqs); err != nil { + txn.End() + return nil, err } } - - var reqs []*pb.RequestOp - if ok { - reqs = rt.Success - } else { - reqs = rt.Failure - } - - if err := a.checkRequestLeases(reqs); err != nil { + if err := checkRequestRange(txn, reqs); err != nil { + txn.End() return nil, err } - if err := a.checkRequestRange(reqs); err != nil { - return nil, err - } - - // When executing the operations of txn, we need to hold the txn lock. - // So the reader will not see any intermediate results. - txnID := a.s.KV().TxnBegin() resps := make([]*pb.ResponseOp, len(reqs)) + txnResp := &pb.TxnResponse{ + Responses: resps, + Succeeded: ok, + Header: &pb.ResponseHeader{}, + } + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() + } for i := range reqs { - resps[i] = a.applyUnion(txnID, reqs[i]) + resps[i] = a.applyUnion(txn, reqs[i]) } - - err := a.s.KV().TxnEnd(txnID) - if err != nil { - panic(fmt.Sprint("unexpected error when closing txn", txnID)) + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ } + txn.End() - txnResp := &pb.TxnResponse{} - txnResp.Header = &pb.ResponseHeader{} - txnResp.Header.Revision = a.s.KV().Rev() - txnResp.Responses = resps - txnResp.Succeeded = ok + txnResp.Header.Revision = rev return txnResp, nil } -// applyCompare applies the compare request. -// It returns the revision at which the comparison happens. If the comparison -// succeeds, the it returns true. Otherwise it returns false. -func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { - rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) - rev := rr.Rev - - if err != nil { - if err == mvcc.ErrTxnIDMismatch { - panic("unexpected txn ID mismatch error") +func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { + for _, c := range rt.Compare { + if !applyCompare(rv, c) { + return rt.Failure, false } - return rev, false + } + return rt.Success, true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return false } var ckv mvccpb.KeyValue if len(rr.KVs) != 0 { @@ -416,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { // We can treat non-existence as the empty set explicitly, such that // even a key with a value of length 0 bytes is still a real key // that was written that way - return rev, false + return false } } @@ -448,30 +419,22 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { switch c.Result { case pb.Compare_EQUAL: - if result != 0 { - return rev, false - } + return result == 0 case pb.Compare_NOT_EQUAL: - if result == 0 { - return rev, false - } + return result != 0 case pb.Compare_GREATER: - if result != 1 { - return rev, false - } + return result > 0 case pb.Compare_LESS: - if result != -1 { - return rev, false - } + return result < 0 } - return rev, true + return true } -func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp { +func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { switch tv := union.Request.(type) { case *pb.RequestOp_RequestRange: if tv.RequestRange != nil { - resp, err := a.Range(txnID, tv.RequestRange) + resp, err := a.Range(txn, tv.RequestRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -479,7 +442,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestPut: if tv.RequestPut != nil { - resp, err := a.Put(txnID, tv.RequestPut) + resp, err := a.Put(txn, tv.RequestPut) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -487,7 +450,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestDeleteRange: if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -588,7 +551,7 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { return nil, ErrNoSpace } @@ -617,7 +580,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { } func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) if resp != nil { resp.Header = newHeader(a.s) @@ -738,9 +701,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { return "aApplierV3{app, NewBackendQuota(s)} } -func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { ok := a.q.Available(p) - resp, err := a.applierV3.Put(txnID, p) + resp, err := a.applierV3.Put(txn, p) if err == nil && !ok { err = ErrNoSpace } @@ -804,14 +767,27 @@ func (s *kvSortByValue) Less(i, j int) bool { return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 } -func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestPut) if !ok { continue } preq := tv.RequestPut - if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { + if preq == nil { + continue + } + if preq.IgnoreValue || preq.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { @@ -821,7 +797,7 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { return nil } -func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { +func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestRange) if !ok { @@ -832,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { continue } - if greq.Revision > a.s.KV().Rev() { + if greq.Revision > rv.Rev() { return mvcc.ErrFutureRev } - if greq.Revision < a.s.KV().FirstRev() { + if greq.Revision < rv.FirstRev() { return mvcc.ErrCompacted } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go index 4868e855ca1..7da4ae45df5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -19,6 +19,7 @@ import ( "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { @@ -58,7 +59,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { return ret } -func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) { +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, err } @@ -68,17 +69,17 @@ func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, er return nil, err } } - return aa.applierV3.Put(txnID, r) + return aa.applierV3.Put(txn, r) } -func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } - return aa.applierV3.Range(txnID, r) + return aa.applierV3.Range(txn, r) } -func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } @@ -89,7 +90,7 @@ func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb } } - return aa.applierV3.DeleteRange(txnID, r) + return aa.applierV3.DeleteRange(txn, r) } func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 00000000000..c5e2dabf3e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg *ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg *ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go index fa84ffae630..f44862a4638 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,7 +23,6 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" @@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) continue } - // etcd 2.0 does not have version endpoint on peer url. - if resp.StatusCode == http.StatusNotFound { - httputil.GracefulClose(resp) - return &version.Versions{ - Server: "2.0.0", - Cluster: "2.0.0", - }, nil - } - var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index 9bcac0f076b..c8ff27f4e14 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -55,10 +55,15 @@ type ServerConfig struct { AutoCompactionRetention int QuotaBackendBytes int64 + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint + StrictReconfigCheck bool // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool + + AuthToken string } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -198,3 +203,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } return time.Second } + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go index 5edc155624b..ed749dbe8d8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -33,6 +33,7 @@ var ( ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") ) type DiscoveryError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD index 0c1db78018e..4476d65ab94 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD @@ -16,7 +16,6 @@ go_library( "etcdserver.pb.go", "raft_internal.pb.go", "rpc.pb.go", - "rpc.pb.gw.go", ], importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb", visibility = ["//visibility:public"], @@ -24,12 +23,9 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", ], ) @@ -42,7 +38,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index f34bedf3ed3..aabf90061f6 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1018,7 +1018,7 @@ func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } var fileDescriptorEtcdserver = []byte{ // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD new file mode 100644 index 00000000000..0b8e37503da --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rpc.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go similarity index 69% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go rename to vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go index 473ad582ef8..02a23b78c10 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -1,15 +1,15 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: etcdserver/etcdserverpb/rpc.proto -// DO NOT EDIT! /* Package etcdserverpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package etcdserverpb +package gw import ( + "github.com/coreos/etcd/etcdserver/etcdserverpb" "io" "net/http" @@ -20,19 +20,21 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RangeRequest +func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -40,12 +42,12 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client } -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PutRequest +func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -53,12 +55,12 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRangeRequest +func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -66,12 +68,12 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TxnRequest +func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -79,12 +81,12 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CompactionRequest +func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -92,7 +94,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie } -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Watch(ctx) if err != nil { @@ -101,7 +103,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq WatchRequest + var protoReq etcdserverpb.WatchRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -144,12 +146,12 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli return stream, metadata, nil } -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseGrantRequest +func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -157,12 +159,12 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseRevokeRequest +func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -170,7 +172,7 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale } -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.LeaseKeepAlive(ctx) if err != nil { @@ -179,7 +181,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq LeaseKeepAliveRequest + var protoReq etcdserverpb.LeaseKeepAliveRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -222,12 +224,12 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh return stream, metadata, nil } -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseTimeToLiveRequest +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -235,12 +237,12 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars } -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberAddRequest +func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -248,12 +250,12 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale } -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberRemoveRequest +func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -261,12 +263,12 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberUpdateRequest +func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -274,12 +276,12 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberListRequest +func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -287,12 +289,12 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AlarmRequest +func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -300,12 +302,12 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale } -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest +func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -313,12 +315,12 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DefragmentRequest +func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -326,12 +328,12 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar } -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HashRequest +func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -339,12 +341,12 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq SnapshotRequest +func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.SnapshotRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.Snapshot(ctx, &protoReq) @@ -360,12 +362,12 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh } -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthEnableRequest +func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -373,12 +375,12 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthDisableRequest +func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -386,12 +388,12 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthenticateRequest +func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -399,12 +401,12 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale } -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserAddRequest +func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -412,12 +414,12 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGetRequest +func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -425,12 +427,12 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserListRequest +func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -438,12 +440,12 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserDeleteRequest +func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -451,12 +453,12 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserChangePasswordRequest +func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -464,12 +466,12 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma } -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGrantRoleRequest +func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -477,12 +479,12 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal } -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserRevokeRoleRequest +func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -490,12 +492,12 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha } -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleAddRequest +func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -503,12 +505,12 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGetRequest +func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -516,12 +518,12 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleListRequest +func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -529,12 +531,12 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleDeleteRequest +func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -542,12 +544,12 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGrantPermissionRequest +func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -555,12 +557,12 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M } -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleRevokePermissionRequest +func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -596,7 +598,15 @@ func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, e // RegisterKVHandler registers the http handlers for service KV to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewKVClient(conn) + return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) +} + +// RegisterKVHandler registers the http handlers for service KV to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "KVClient" to call the correct interceptors. +func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -611,18 +621,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -639,18 +650,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -667,18 +679,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -695,18 +708,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -723,18 +737,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -793,7 +808,15 @@ func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterWatchHandler registers the http handlers for service Watch to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWatchClient(conn) + return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) +} + +// RegisterWatchHandler registers the http handlers for service Watch to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WatchClient" to call the correct interceptors. +func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -808,18 +831,19 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -862,7 +886,15 @@ func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterLeaseHandler registers the http handlers for service Lease to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLeaseClient(conn) + return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) +} + +// RegisterLeaseHandler registers the http handlers for service Lease to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LeaseClient" to call the correct interceptors. +func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -877,18 +909,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -905,18 +938,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -933,18 +967,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -961,18 +996,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1027,7 +1063,15 @@ func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeM // RegisterClusterHandler registers the http handlers for service Cluster to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewClusterClient(conn) + return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) +} + +// RegisterClusterHandler registers the http handlers for service Cluster to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClusterClient" to call the correct interceptors. +func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1042,18 +1086,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1070,18 +1115,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1098,18 +1144,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1126,18 +1173,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1192,7 +1240,15 @@ func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewMaintenanceClient(conn) + return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn)) +} + +// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MaintenanceClient" to call the correct interceptors. +func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1207,18 +1263,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1235,18 +1292,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1263,18 +1321,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1291,18 +1350,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1319,18 +1379,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1389,7 +1450,15 @@ func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterAuthHandler registers the http handlers for service Auth to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewAuthClient(conn) + return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn)) +} + +// RegisterAuthHandler registers the http handlers for service Auth to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "AuthClient" to call the correct interceptors. +func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1404,18 +1473,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1432,18 +1502,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1460,18 +1531,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1488,18 +1560,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1516,18 +1589,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1544,18 +1618,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1572,18 +1647,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1600,18 +1676,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1628,18 +1705,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1656,18 +1734,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1684,18 +1763,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1712,18 +1792,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1740,18 +1821,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1768,18 +1850,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1796,18 +1879,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1824,18 +1908,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 66890c93c44..44a3b6f69eb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -2038,7 +2038,7 @@ func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftIntern var fileDescriptorRaftInternal = []byte{ // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index b28f2e50e3c..894c815f824 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -15,6 +15,8 @@ import ( authpb "github.com/coreos/etcd/auth/authpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -223,16 +225,45 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -272,6 +303,97 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -302,6 +424,20 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -313,6 +449,12 @@ type PutRequest struct { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } func (m *PutRequest) Reset() { *m = PutRequest{} } @@ -320,6 +462,48 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -350,12 +534,12 @@ type DeleteRangeRequest struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } @@ -364,6 +548,27 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -384,6 +589,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -754,6 +966,27 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -950,6 +1183,13 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -973,6 +1213,20 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1015,6 +1269,13 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + type SnapshotRequest struct { } @@ -1045,6 +1306,20 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1198,6 +1473,48 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1208,6 +1525,13 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1228,8 +1552,10 @@ type WatchResponse struct { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } @@ -1244,6 +1570,41 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1263,6 +1624,20 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1284,6 +1659,27 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1294,6 +1690,13 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1320,6 +1723,13 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1340,6 +1750,20 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1352,6 +1776,20 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1376,6 +1814,34 @@ func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1392,6 +1858,34 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1402,10 +1896,19 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } @@ -1427,6 +1930,13 @@ func (m *MemberAddResponse) GetMember() *Member { return nil } +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberRemoveRequest struct { // ID is the member ID of the member to remove. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1437,8 +1947,17 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } @@ -1453,6 +1972,13 @@ func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1465,8 +1991,24 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } @@ -1481,6 +2023,13 @@ func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberListRequest struct { } @@ -1555,6 +2104,27 @@ func (m *AlarmRequest) String() string { return proto.CompactTextStri func (*AlarmRequest) ProtoMessage() {} func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` @@ -1567,6 +2137,20 @@ func (m *AlarmMember) String() string { return proto.CompactTextStrin func (*AlarmMember) ProtoMessage() {} func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. @@ -1626,6 +2210,41 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type AuthEnableRequest struct { } @@ -1652,6 +2271,20 @@ func (m *AuthenticateRequest) String() string { return proto.CompactT func (*AuthenticateRequest) ProtoMessage() {} func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` @@ -1662,6 +2295,20 @@ func (m *AuthUserAddRequest) String() string { return proto.CompactTe func (*AuthUserAddRequest) ProtoMessage() {} func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1671,6 +2318,13 @@ func (m *AuthUserGetRequest) String() string { return proto.CompactTe func (*AuthUserGetRequest) ProtoMessage() {} func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserDeleteRequest struct { // name is the name of the user to delete. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1681,6 +2335,13 @@ func (m *AuthUserDeleteRequest) String() string { return proto.Compac func (*AuthUserDeleteRequest) ProtoMessage() {} func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1695,6 +2356,20 @@ func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -1707,6 +2382,20 @@ func (m *AuthUserGrantRoleRequest) String() string { return proto.Com func (*AuthUserGrantRoleRequest) ProtoMessage() {} func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` @@ -1717,6 +2406,20 @@ func (m *AuthUserRevokeRoleRequest) String() string { return proto.Co func (*AuthUserRevokeRoleRequest) ProtoMessage() {} func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1727,6 +2430,13 @@ func (m *AuthRoleAddRequest) String() string { return proto.CompactTe func (*AuthRoleAddRequest) ProtoMessage() {} func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } @@ -1736,6 +2446,13 @@ func (m *AuthRoleGetRequest) String() string { return proto.CompactTe func (*AuthRoleGetRequest) ProtoMessage() {} func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserListRequest struct { } @@ -1761,6 +2478,13 @@ func (m *AuthRoleDeleteRequest) String() string { return proto.Compac func (*AuthRoleDeleteRequest) ProtoMessage() {} func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1775,6 +2499,13 @@ func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { if m != nil { return m.Perm @@ -1795,6 +2526,27 @@ func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + type AuthEnableResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1845,6 +2597,13 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1878,6 +2637,13 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2001,6 +2767,13 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2018,6 +2791,13 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -3941,6 +4721,26 @@ func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -4749,6 +5549,12 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } if len(m.Events) > 0 { for _, msg := range m.Events { dAtA[i] = 0x5a @@ -5159,6 +5965,18 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { } i += n29 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5210,6 +6028,18 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { } i += n30 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5276,6 +6106,18 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { } i += n31 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6610,6 +7452,12 @@ func (m *PutRequest) Size() (n int) { if m.PrevKv { n += 2 } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } return n } @@ -6973,6 +7821,10 @@ func (m *WatchResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() @@ -7143,6 +7995,12 @@ func (m *MemberAddResponse) Size() (n int) { l = m.Member.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7162,6 +8020,12 @@ func (m *MemberRemoveResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7187,6 +8051,12 @@ func (m *MemberUpdateResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -8413,6 +9283,46 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } } m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -10345,7 +11255,24 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 2 { + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10386,23 +11313,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } @@ -10656,6 +11566,35 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) @@ -11876,6 +12815,37 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12028,6 +12998,37 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12209,6 +13210,37 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -16041,218 +17073,221 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcb, 0x73, 0x1b, 0xc7, - 0xd1, 0xe7, 0x02, 0x24, 0x40, 0x34, 0x1e, 0x84, 0x86, 0x94, 0x04, 0xae, 0x24, 0x8a, 0x1a, 0xbd, - 0x28, 0xc9, 0x26, 0x6d, 0xda, 0xdf, 0x77, 0xd0, 0xe7, 0x72, 0x7d, 0x14, 0x09, 0x8b, 0x0c, 0x29, - 0x52, 0x5e, 0x52, 0xb2, 0x53, 0xe5, 0x0a, 0x6a, 0x09, 0x8c, 0xc8, 0x2d, 0x02, 0xbb, 0xf0, 0xee, - 0x02, 0x22, 0x9d, 0xa4, 0x2a, 0xe5, 0xd8, 0x95, 0x4a, 0x8e, 0xf1, 0x21, 0xaf, 0x63, 0x2a, 0x87, - 0xfc, 0x01, 0xb9, 0xe5, 0x0f, 0x48, 0xe5, 0x92, 0x54, 0xe5, 0x1f, 0x48, 0x39, 0x39, 0xe4, 0x90, - 0x7b, 0x4e, 0xa9, 0xa4, 0xe6, 0xb5, 0x3b, 0xbb, 0xd8, 0x05, 0xe5, 0x6c, 0x7c, 0x11, 0x77, 0x66, - 0x7a, 0xfa, 0xd7, 0xdd, 0x33, 0xdd, 0xd3, 0xd3, 0x03, 0x41, 0xc9, 0xed, 0xb7, 0x97, 0xfb, 0xae, - 0xe3, 0x3b, 0xa8, 0x42, 0xfc, 0x76, 0xc7, 0x23, 0xee, 0x90, 0xb8, 0xfd, 0x43, 0x7d, 0xee, 0xc8, - 0x39, 0x72, 0xd8, 0xc0, 0x0a, 0xfd, 0xe2, 0x34, 0xfa, 0x3c, 0xa5, 0x59, 0xe9, 0x0d, 0xdb, 0x6d, - 0xf6, 0x4f, 0xff, 0x70, 0xe5, 0x64, 0x28, 0x86, 0xae, 0xb0, 0x21, 0x73, 0xe0, 0x1f, 0xb3, 0x7f, - 0xfa, 0x87, 0xec, 0x8f, 0x18, 0xbc, 0x7a, 0xe4, 0x38, 0x47, 0x5d, 0xb2, 0x62, 0xf6, 0xad, 0x15, - 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0xf8, 0x28, 0xfe, 0x5c, 0x83, 0x9a, 0x41, 0xbc, - 0xbe, 0x63, 0x7b, 0x64, 0x93, 0x98, 0x1d, 0xe2, 0xa2, 0x6b, 0x00, 0xed, 0xee, 0xc0, 0xf3, 0x89, - 0xdb, 0xb2, 0x3a, 0x0d, 0x6d, 0x51, 0x5b, 0x9a, 0x34, 0x4a, 0xa2, 0x67, 0xab, 0x83, 0xae, 0x40, - 0xa9, 0x47, 0x7a, 0x87, 0x7c, 0x34, 0xc7, 0x46, 0xa7, 0x79, 0xc7, 0x56, 0x07, 0xe9, 0x30, 0xed, - 0x92, 0xa1, 0xe5, 0x59, 0x8e, 0xdd, 0xc8, 0x2f, 0x6a, 0x4b, 0x79, 0x23, 0x68, 0xd3, 0x89, 0xae, - 0xf9, 0xc2, 0x6f, 0xf9, 0xc4, 0xed, 0x35, 0x26, 0xf9, 0x44, 0xda, 0x71, 0x40, 0xdc, 0x1e, 0xfe, - 0x6c, 0x0a, 0x2a, 0x86, 0x69, 0x1f, 0x11, 0x83, 0x7c, 0x3c, 0x20, 0x9e, 0x8f, 0xea, 0x90, 0x3f, - 0x21, 0x67, 0x0c, 0xbe, 0x62, 0xd0, 0x4f, 0x3e, 0xdf, 0x3e, 0x22, 0x2d, 0x62, 0x73, 0xe0, 0x0a, - 0x9d, 0x6f, 0x1f, 0x91, 0xa6, 0xdd, 0x41, 0x73, 0x30, 0xd5, 0xb5, 0x7a, 0x96, 0x2f, 0x50, 0x79, - 0x23, 0x22, 0xce, 0x64, 0x4c, 0x9c, 0x75, 0x00, 0xcf, 0x71, 0xfd, 0x96, 0xe3, 0x76, 0x88, 0xdb, - 0x98, 0x5a, 0xd4, 0x96, 0x6a, 0xab, 0xb7, 0x96, 0xd5, 0x85, 0x58, 0x56, 0x05, 0x5a, 0xde, 0x77, - 0x5c, 0x7f, 0x8f, 0xd2, 0x1a, 0x25, 0x4f, 0x7e, 0xa2, 0xf7, 0xa0, 0xcc, 0x98, 0xf8, 0xa6, 0x7b, - 0x44, 0xfc, 0x46, 0x81, 0x71, 0xb9, 0x7d, 0x0e, 0x97, 0x03, 0x46, 0x6c, 0x30, 0x78, 0xfe, 0x8d, - 0x30, 0x54, 0x3c, 0xe2, 0x5a, 0x66, 0xd7, 0xfa, 0xc4, 0x3c, 0xec, 0x92, 0x46, 0x71, 0x51, 0x5b, - 0x9a, 0x36, 0x22, 0x7d, 0x54, 0xff, 0x13, 0x72, 0xe6, 0xb5, 0x1c, 0xbb, 0x7b, 0xd6, 0x98, 0x66, - 0x04, 0xd3, 0xb4, 0x63, 0xcf, 0xee, 0x9e, 0xb1, 0x45, 0x73, 0x06, 0xb6, 0xcf, 0x47, 0x4b, 0x6c, - 0xb4, 0xc4, 0x7a, 0xd8, 0xf0, 0x12, 0xd4, 0x7b, 0x96, 0xdd, 0xea, 0x39, 0x9d, 0x56, 0x60, 0x10, - 0x60, 0x06, 0xa9, 0xf5, 0x2c, 0xfb, 0x89, 0xd3, 0x31, 0xa4, 0x59, 0x28, 0xa5, 0x79, 0x1a, 0xa5, - 0x2c, 0x0b, 0x4a, 0xf3, 0x54, 0xa5, 0x5c, 0x86, 0x59, 0xca, 0xb3, 0xed, 0x12, 0xd3, 0x27, 0x21, - 0x71, 0x85, 0x11, 0x5f, 0xe8, 0x59, 0xf6, 0x3a, 0x1b, 0x89, 0xd0, 0x9b, 0xa7, 0x23, 0xf4, 0x55, - 0x41, 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x02, 0x9b, 0xa3, 0x69, 0x98, 0xdc, 0xdd, 0xdb, - 0x6d, 0xd6, 0x27, 0x10, 0x40, 0x61, 0x6d, 0x7f, 0xbd, 0xb9, 0xbb, 0x51, 0xd7, 0x50, 0x19, 0x8a, - 0x1b, 0x4d, 0xde, 0xc8, 0xe1, 0x47, 0x00, 0xa1, 0x75, 0x51, 0x11, 0xf2, 0xdb, 0xcd, 0x6f, 0xd6, - 0x27, 0x28, 0xcd, 0xf3, 0xa6, 0xb1, 0xbf, 0xb5, 0xb7, 0x5b, 0xd7, 0xe8, 0xe4, 0x75, 0xa3, 0xb9, - 0x76, 0xd0, 0xac, 0xe7, 0x28, 0xc5, 0x93, 0xbd, 0x8d, 0x7a, 0x1e, 0x95, 0x60, 0xea, 0xf9, 0xda, - 0xce, 0xb3, 0x66, 0x7d, 0x12, 0x7f, 0xa1, 0x41, 0x55, 0xac, 0x17, 0xf7, 0x09, 0xf4, 0x36, 0x14, - 0x8e, 0x99, 0x5f, 0xb0, 0xad, 0x58, 0x5e, 0xbd, 0x1a, 0x5b, 0xdc, 0x88, 0xef, 0x18, 0x82, 0x16, - 0x61, 0xc8, 0x9f, 0x0c, 0xbd, 0x46, 0x6e, 0x31, 0xbf, 0x54, 0x5e, 0xad, 0x2f, 0x73, 0x87, 0x5d, - 0xde, 0x26, 0x67, 0xcf, 0xcd, 0xee, 0x80, 0x18, 0x74, 0x10, 0x21, 0x98, 0xec, 0x39, 0x2e, 0x61, - 0x3b, 0x76, 0xda, 0x60, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0x70, 0x1b, 0xe0, - 0xe9, 0xc0, 0x4f, 0xf7, 0x8c, 0x39, 0x98, 0x1a, 0x52, 0xbe, 0xc2, 0x2b, 0x78, 0x83, 0xb9, 0x04, - 0x31, 0x3d, 0x12, 0xb8, 0x04, 0x6d, 0xa0, 0xcb, 0x50, 0xec, 0xbb, 0x64, 0xd8, 0x3a, 0x19, 0x32, - 0x8c, 0x69, 0xa3, 0x40, 0x9b, 0xdb, 0x43, 0x6c, 0x43, 0x99, 0x81, 0x64, 0xd2, 0xfb, 0x5e, 0xc8, - 0x3d, 0xc7, 0xa6, 0x8d, 0xea, 0x2e, 0xf1, 0x3e, 0x02, 0xb4, 0x41, 0xba, 0xc4, 0x27, 0x59, 0xdc, - 0x5e, 0xd1, 0x26, 0x1f, 0xd1, 0xe6, 0xc7, 0x1a, 0xcc, 0x46, 0xd8, 0x67, 0x52, 0xab, 0x01, 0xc5, - 0x0e, 0x63, 0xc6, 0x25, 0xc8, 0x1b, 0xb2, 0x89, 0x1e, 0xc0, 0xb4, 0x10, 0xc0, 0x6b, 0xe4, 0x53, - 0x56, 0xbb, 0xc8, 0x65, 0xf2, 0xf0, 0xdf, 0x35, 0x28, 0x09, 0x45, 0xf7, 0xfa, 0x68, 0x0d, 0xaa, - 0x2e, 0x6f, 0xb4, 0x98, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x15, 0x31, 0x85, - 0x75, 0xa3, 0xff, 0x83, 0xb2, 0x64, 0xd1, 0x1f, 0xf8, 0xc2, 0xe4, 0x8d, 0x28, 0x83, 0x70, 0xe7, - 0x6c, 0x4e, 0x18, 0x20, 0xc8, 0x9f, 0x0e, 0x7c, 0x74, 0x00, 0x73, 0x72, 0x32, 0xd7, 0x46, 0x88, - 0x91, 0x67, 0x5c, 0x16, 0xa3, 0x5c, 0x46, 0x97, 0x6a, 0x73, 0xc2, 0x40, 0x62, 0xbe, 0x32, 0xf8, - 0xa8, 0x04, 0x45, 0xd1, 0x8b, 0xff, 0xa1, 0x01, 0x48, 0x83, 0xee, 0xf5, 0xd1, 0x06, 0xd4, 0x5c, - 0xd1, 0x8a, 0x28, 0x7c, 0x25, 0x51, 0x61, 0xb1, 0x0e, 0x13, 0x46, 0x55, 0x4e, 0xe2, 0x2a, 0xbf, - 0x0b, 0x95, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0xb2, 0x9c, 0x40, 0xb5, 0xfe, - 0x00, 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, - 0x0e, 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xd7, 0x79, 0x28, 0xae, 0x3b, 0xbd, 0xbe, 0xe9, 0xd2, 0x35, - 0x2a, 0xb8, 0xc4, 0x1b, 0x74, 0x7d, 0xa6, 0x6e, 0x6d, 0xf5, 0x66, 0x14, 0x41, 0x90, 0xc9, 0xbf, - 0x06, 0x23, 0x35, 0xc4, 0x14, 0x3a, 0x59, 0x1c, 0x2d, 0xb9, 0x57, 0x98, 0x2c, 0x0e, 0x16, 0x31, - 0x45, 0xfa, 0x52, 0x3e, 0xf4, 0x25, 0x1d, 0x8a, 0x43, 0xe2, 0x86, 0xc7, 0xe1, 0xe6, 0x84, 0x21, - 0x3b, 0xd0, 0x3d, 0x98, 0x89, 0x87, 0xe6, 0x29, 0x41, 0x53, 0x6b, 0x47, 0x23, 0xf9, 0x4d, 0xa8, - 0x44, 0xce, 0x87, 0x82, 0xa0, 0x2b, 0xf7, 0x94, 0xe3, 0xe1, 0x92, 0x0c, 0x4a, 0xf4, 0x2c, 0xab, - 0x6c, 0x4e, 0x88, 0xb0, 0x84, 0xff, 0x1f, 0xaa, 0x11, 0x5d, 0x69, 0xf8, 0x6d, 0xbe, 0xff, 0x6c, - 0x6d, 0x87, 0xc7, 0xea, 0xc7, 0x2c, 0x3c, 0x1b, 0x75, 0x8d, 0x86, 0xfc, 0x9d, 0xe6, 0xfe, 0x7e, - 0x3d, 0x87, 0xaa, 0x50, 0xda, 0xdd, 0x3b, 0x68, 0x71, 0xaa, 0x3c, 0x7e, 0x27, 0xe0, 0x20, 0x62, - 0xbd, 0x12, 0xe2, 0x27, 0x94, 0x10, 0xaf, 0xc9, 0x10, 0x9f, 0x0b, 0x43, 0x7c, 0xfe, 0x51, 0x0d, - 0x2a, 0xdc, 0x3e, 0xad, 0x81, 0x4d, 0x8f, 0x99, 0x5f, 0x6a, 0x00, 0x07, 0xa7, 0xb6, 0x0c, 0x40, - 0x2b, 0x50, 0x6c, 0x73, 0xe6, 0x0d, 0x8d, 0xf9, 0xf3, 0xc5, 0x44, 0x93, 0x1b, 0x92, 0x0a, 0xbd, - 0x09, 0x45, 0x6f, 0xd0, 0x6e, 0x13, 0x4f, 0x86, 0xfb, 0xcb, 0xf1, 0x90, 0x22, 0x1c, 0xde, 0x90, - 0x74, 0x74, 0xca, 0x0b, 0xd3, 0xea, 0x0e, 0x58, 0xf0, 0x1f, 0x3f, 0x45, 0xd0, 0xe1, 0x9f, 0x69, - 0x50, 0x66, 0x52, 0x66, 0x8a, 0x63, 0x57, 0xa1, 0xc4, 0x64, 0x20, 0x1d, 0x11, 0xc9, 0xa6, 0x8d, - 0xb0, 0x03, 0xfd, 0x2f, 0x94, 0xe4, 0x0e, 0x96, 0xc1, 0xac, 0x91, 0xcc, 0x76, 0xaf, 0x6f, 0x84, - 0xa4, 0x78, 0x1b, 0x2e, 0x30, 0xab, 0xb4, 0x69, 0x62, 0x29, 0xed, 0xa8, 0xa6, 0x5e, 0x5a, 0x2c, - 0xf5, 0xd2, 0x61, 0xba, 0x7f, 0x7c, 0xe6, 0x59, 0x6d, 0xb3, 0x2b, 0xa4, 0x08, 0xda, 0xf8, 0x1b, - 0x80, 0x54, 0x66, 0x59, 0xd4, 0xc5, 0x55, 0x28, 0x6f, 0x9a, 0xde, 0xb1, 0x10, 0x09, 0x7f, 0x08, - 0x15, 0xde, 0xcc, 0x64, 0x43, 0x04, 0x93, 0xc7, 0xa6, 0x77, 0xcc, 0x04, 0xaf, 0x1a, 0xec, 0x1b, - 0x5f, 0x80, 0x99, 0x7d, 0xdb, 0xec, 0x7b, 0xc7, 0x8e, 0x8c, 0xb5, 0x34, 0xb1, 0xae, 0x87, 0x7d, - 0x99, 0x10, 0xef, 0xc2, 0x8c, 0x4b, 0x7a, 0xa6, 0x65, 0x5b, 0xf6, 0x51, 0xeb, 0xf0, 0xcc, 0x27, - 0x9e, 0xc8, 0xbb, 0x6b, 0x41, 0xf7, 0x23, 0xda, 0x4b, 0x45, 0x3b, 0xec, 0x3a, 0x87, 0xc2, 0xe3, - 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0x95, 0x0f, 0x4c, 0xbf, 0x2d, 0xad, 0x80, 0xb6, 0xa0, 0x16, 0xf8, - 0x39, 0xeb, 0x11, 0xb2, 0xc4, 0x02, 0x3e, 0x9b, 0x23, 0x33, 0x32, 0x19, 0xf0, 0xab, 0x6d, 0xb5, - 0x83, 0xb1, 0x32, 0xed, 0x36, 0xe9, 0x06, 0xac, 0x72, 0xe9, 0xac, 0x18, 0xa1, 0xca, 0x4a, 0xed, - 0x78, 0x34, 0x13, 0x1e, 0x86, 0xdc, 0x2d, 0x7f, 0x9e, 0x03, 0x34, 0x2a, 0xc3, 0x57, 0xcd, 0x0f, - 0x6e, 0x43, 0xcd, 0xf3, 0x4d, 0xd7, 0x6f, 0xc5, 0x6e, 0x25, 0x55, 0xd6, 0x1b, 0xc4, 0xaa, 0xbb, - 0x30, 0xd3, 0x77, 0x9d, 0x23, 0x97, 0x78, 0x5e, 0xcb, 0x76, 0x7c, 0xeb, 0xc5, 0x99, 0x48, 0x8e, - 0x6a, 0xb2, 0x7b, 0x97, 0xf5, 0xa2, 0x26, 0x14, 0x5f, 0x58, 0x5d, 0x9f, 0xb8, 0x5e, 0x63, 0x6a, - 0x31, 0xbf, 0x54, 0x5b, 0x7d, 0x70, 0x9e, 0xd5, 0x96, 0xdf, 0x63, 0xf4, 0x07, 0x67, 0x7d, 0x62, - 0xc8, 0xb9, 0x6a, 0xda, 0x52, 0x88, 0xa4, 0x2d, 0xb7, 0x01, 0x42, 0x7a, 0x1a, 0xb5, 0x76, 0xf7, - 0x9e, 0x3e, 0x3b, 0xa8, 0x4f, 0xa0, 0x0a, 0x4c, 0xef, 0xee, 0x6d, 0x34, 0x77, 0x9a, 0x34, 0xae, - 0xe1, 0x15, 0x69, 0x1b, 0xd5, 0x86, 0x68, 0x1e, 0xa6, 0x5f, 0xd2, 0x5e, 0x79, 0x6d, 0xcb, 0x1b, - 0x45, 0xd6, 0xde, 0xea, 0xe0, 0xbf, 0x69, 0x50, 0x15, 0xbb, 0x20, 0xd3, 0x56, 0x54, 0x21, 0x72, - 0x11, 0x08, 0x9a, 0x23, 0xf1, 0xdd, 0xd1, 0x11, 0xa9, 0x98, 0x6c, 0x52, 0x77, 0xe7, 0x8b, 0x4d, - 0x3a, 0xc2, 0xac, 0x41, 0x1b, 0xdd, 0x83, 0x7a, 0x9b, 0xbb, 0x7b, 0xec, 0xd8, 0x31, 0x66, 0x44, - 0x7f, 0xb0, 0x48, 0xb7, 0xa1, 0x40, 0x86, 0xc4, 0xf6, 0xbd, 0x46, 0x99, 0xc5, 0xa6, 0xaa, 0x4c, - 0xb4, 0x9a, 0xb4, 0xd7, 0x10, 0x83, 0xf8, 0x7f, 0xe0, 0xc2, 0x0e, 0xcd, 0x74, 0x1f, 0xbb, 0xa6, - 0xad, 0xe6, 0xcc, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0xd5, 0x20, 0xb7, 0xb5, 0x21, 0x74, - 0xc8, 0x6d, 0x6d, 0xe0, 0x4f, 0x35, 0x40, 0xea, 0xbc, 0x4c, 0x66, 0x8a, 0x31, 0x97, 0xf0, 0xf9, - 0x10, 0x7e, 0x0e, 0xa6, 0x88, 0xeb, 0x3a, 0x2e, 0x33, 0x48, 0xc9, 0xe0, 0x0d, 0x7c, 0x4b, 0xc8, - 0x60, 0x90, 0xa1, 0x73, 0x12, 0xec, 0x79, 0xce, 0x4d, 0x0b, 0x44, 0xdd, 0x86, 0xd9, 0x08, 0x55, - 0xa6, 0x18, 0x79, 0x17, 0x2e, 0x32, 0x66, 0xdb, 0x84, 0xf4, 0xd7, 0xba, 0xd6, 0x30, 0x15, 0xb5, - 0x0f, 0x97, 0xe2, 0x84, 0x5f, 0xaf, 0x8d, 0xf0, 0x3b, 0x02, 0xf1, 0xc0, 0xea, 0x91, 0x03, 0x67, - 0x27, 0x5d, 0x36, 0x1a, 0xf8, 0xe8, 0x4d, 0x58, 0x1c, 0x26, 0xec, 0x1b, 0xff, 0x4a, 0x83, 0xcb, - 0x23, 0xd3, 0xbf, 0xe6, 0x55, 0x5d, 0x00, 0x38, 0xa2, 0xdb, 0x87, 0x74, 0xe8, 0x00, 0xbf, 0xc3, - 0x29, 0x3d, 0x81, 0x9c, 0x34, 0x76, 0x54, 0x84, 0x9c, 0xc7, 0x50, 0x78, 0xc2, 0xca, 0x27, 0x8a, - 0x56, 0x93, 0x52, 0x2b, 0xdb, 0xec, 0xf1, 0x5b, 0x5d, 0xc9, 0x60, 0xdf, 0xec, 0xe8, 0x24, 0xc4, - 0x7d, 0x66, 0xec, 0xf0, 0x23, 0xba, 0x64, 0x04, 0x6d, 0x8a, 0xde, 0xee, 0x5a, 0xc4, 0xf6, 0xd9, - 0xe8, 0x24, 0x1b, 0x55, 0x7a, 0xf0, 0x32, 0xd4, 0x39, 0xd2, 0x5a, 0xa7, 0xa3, 0x1c, 0xd3, 0x01, - 0x3f, 0x2d, 0xca, 0x0f, 0xbf, 0x84, 0x0b, 0x0a, 0x7d, 0x26, 0xd3, 0xbd, 0x06, 0x05, 0x5e, 0x23, - 0x12, 0x27, 0xc4, 0x5c, 0x74, 0x16, 0x87, 0x31, 0x04, 0x0d, 0xbe, 0x0d, 0xb3, 0xa2, 0x87, 0xf4, - 0x9c, 0xa4, 0x55, 0x67, 0xf6, 0xc1, 0x3b, 0x30, 0x17, 0x25, 0xcb, 0xe4, 0x08, 0x6b, 0x12, 0xf4, - 0x59, 0xbf, 0xa3, 0x1c, 0x38, 0xf1, 0x45, 0x51, 0x0d, 0x96, 0x8b, 0x19, 0x2c, 0x10, 0x48, 0xb2, - 0xc8, 0x24, 0xd0, 0xac, 0x34, 0xff, 0x8e, 0xe5, 0x05, 0x69, 0xc5, 0x27, 0x80, 0xd4, 0xce, 0x4c, - 0x8b, 0xb2, 0x0c, 0x45, 0x6e, 0x70, 0x99, 0xb9, 0x26, 0xaf, 0x8a, 0x24, 0xa2, 0x02, 0x6d, 0x90, - 0x17, 0xae, 0x79, 0xd4, 0x23, 0x41, 0x64, 0xa5, 0xf9, 0x9a, 0xda, 0x99, 0x49, 0xe3, 0x3f, 0x68, - 0x50, 0x59, 0xeb, 0x9a, 0x6e, 0x4f, 0x1a, 0xff, 0x5d, 0x28, 0xf0, 0x44, 0x50, 0xdc, 0x9d, 0xee, - 0x44, 0xd9, 0xa8, 0xb4, 0xbc, 0xb1, 0xc6, 0xd3, 0x46, 0x31, 0x8b, 0x2e, 0x96, 0x28, 0x4d, 0x6e, - 0xc4, 0x4a, 0x95, 0x1b, 0xe8, 0x75, 0x98, 0x32, 0xe9, 0x14, 0xe6, 0xbf, 0xb5, 0x78, 0x0a, 0xce, - 0xb8, 0xb1, 0x43, 0x9b, 0x53, 0xe1, 0xb7, 0xa1, 0xac, 0x20, 0xd0, 0x9b, 0xc5, 0xe3, 0xa6, 0x38, - 0x98, 0xd7, 0xd6, 0x0f, 0xb6, 0x9e, 0xf3, 0x0b, 0x47, 0x0d, 0x60, 0xa3, 0x19, 0xb4, 0x73, 0xf8, - 0x43, 0x31, 0x4b, 0x78, 0xb8, 0x2a, 0x8f, 0x96, 0x26, 0x4f, 0xee, 0x95, 0xe4, 0x39, 0x85, 0xaa, - 0x50, 0x3f, 0xd3, 0x1e, 0x78, 0x13, 0x0a, 0x8c, 0x9f, 0xdc, 0x02, 0xf3, 0x09, 0xb0, 0xd2, 0x3b, - 0x39, 0x21, 0x9e, 0x81, 0xea, 0xbe, 0x6f, 0xfa, 0x03, 0x4f, 0x6e, 0x81, 0xdf, 0x6b, 0x50, 0x93, - 0x3d, 0x59, 0xcb, 0x2c, 0xf2, 0x7a, 0xca, 0x63, 0x5e, 0x70, 0x39, 0xbd, 0x04, 0x85, 0xce, 0xe1, - 0xbe, 0xf5, 0x89, 0x2c, 0x66, 0x89, 0x16, 0xed, 0xef, 0x72, 0x1c, 0x5e, 0x50, 0x16, 0x2d, 0x7a, - 0xd1, 0x71, 0xcd, 0x17, 0xfe, 0x96, 0xdd, 0x21, 0xa7, 0x2c, 0x9f, 0x98, 0x34, 0xc2, 0x0e, 0x76, - 0x37, 0x11, 0x85, 0x67, 0x96, 0x7f, 0xa9, 0x85, 0xe8, 0x59, 0xb8, 0xb0, 0x36, 0xf0, 0x8f, 0x9b, - 0xb6, 0x79, 0xd8, 0x95, 0x41, 0x00, 0xcf, 0x01, 0xa2, 0x9d, 0x1b, 0x96, 0xa7, 0xf6, 0x36, 0x61, - 0x96, 0xf6, 0x12, 0xdb, 0xb7, 0xda, 0x4a, 0xc4, 0x90, 0x61, 0x5b, 0x8b, 0x85, 0x6d, 0xd3, 0xf3, - 0x5e, 0x3a, 0x6e, 0x47, 0xa8, 0x16, 0xb4, 0xf1, 0x06, 0x67, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, - 0xca, 0x65, 0x29, 0xe4, 0xf2, 0x98, 0xf8, 0x63, 0xb8, 0xe0, 0x07, 0x70, 0x51, 0x52, 0x8a, 0xfa, - 0xc5, 0x18, 0xe2, 0x3d, 0xb8, 0x26, 0x89, 0xd7, 0x8f, 0x69, 0x56, 0xfd, 0x54, 0x00, 0xfe, 0xa7, - 0x72, 0x3e, 0x82, 0x46, 0x20, 0x27, 0xcb, 0xb4, 0x9c, 0xae, 0x2a, 0xc0, 0xc0, 0x13, 0x7b, 0xa6, - 0x64, 0xb0, 0x6f, 0xda, 0xe7, 0x3a, 0xdd, 0xe0, 0x10, 0xa4, 0xdf, 0x78, 0x1d, 0xe6, 0x25, 0x0f, - 0x91, 0x03, 0x45, 0x99, 0x8c, 0x08, 0x94, 0xc4, 0x44, 0x18, 0x8c, 0x4e, 0x1d, 0x6f, 0x76, 0x95, - 0x32, 0x6a, 0x5a, 0xc6, 0x53, 0x53, 0x78, 0x5e, 0xe4, 0x3b, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0xd1, - 0x4d, 0x19, 0xa8, 0xdd, 0x62, 0x21, 0x68, 0xf7, 0xc8, 0x42, 0x8c, 0xb0, 0xfe, 0x08, 0x16, 0x02, - 0x21, 0xa8, 0xdd, 0x9e, 0x12, 0xb7, 0x67, 0x79, 0x9e, 0x72, 0xe3, 0x4e, 0x52, 0xfc, 0x0e, 0x4c, - 0xf6, 0x89, 0x88, 0x29, 0xe5, 0x55, 0xb4, 0xcc, 0x9f, 0x87, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0x77, - 0xe0, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, - 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, - 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x10, 0xe6, 0xa2, 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0x94, - 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, - 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, - 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, - 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0xbf, 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, - 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, - 0x35, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, - 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, - 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, 0x82, 0x84, 0x52, 0x79, 0x5a, 0x2d, 0x43, - 0x71, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, 0xad, 0xfe, 0x33, 0x0f, 0xb9, 0xed, 0xe7, - 0xe8, 0x5b, 0x30, 0xc5, 0x1f, 0x5e, 0xc6, 0xbc, 0x4b, 0xe9, 0xe3, 0x9e, 0x70, 0xf0, 0xd5, 0x4f, - 0xff, 0xf4, 0xd7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, 0xe1, 0x5b, 0x66, 0xb7, 0x7f, 0x6c, 0xae, - 0x9c, 0x0c, 0x57, 0xd8, 0x99, 0xf0, 0x50, 0xbb, 0x8f, 0x9e, 0x43, 0xfe, 0xe9, 0xc0, 0x47, 0xa9, - 0x8f, 0x56, 0x7a, 0xfa, 0xd3, 0x0e, 0xd6, 0x19, 0xe7, 0x39, 0x3c, 0xa3, 0x72, 0xee, 0x0f, 0x7c, - 0xca, 0x77, 0x08, 0x65, 0xe5, 0x75, 0x06, 0x9d, 0xfb, 0x9c, 0xa5, 0x9f, 0xff, 0xf2, 0x83, 0x31, - 0xc3, 0xbb, 0x8a, 0x2f, 0xab, 0x78, 0xfc, 0x11, 0x49, 0xd5, 0xe7, 0xe0, 0xd4, 0x8e, 0xeb, 0x13, - 0x3e, 0x30, 0xc4, 0xf5, 0x51, 0x8a, 0xfa, 0xc9, 0xfa, 0xf8, 0xa7, 0x36, 0xe5, 0xeb, 0x88, 0x17, - 0xa5, 0xb6, 0x8f, 0xae, 0x27, 0xbc, 0x48, 0xa8, 0xb5, 0x77, 0x7d, 0x31, 0x9d, 0x40, 0x20, 0xdd, - 0x60, 0x48, 0x57, 0xf0, 0x25, 0x15, 0xa9, 0x1d, 0xd0, 0x3d, 0xd4, 0xee, 0xaf, 0x1e, 0xc3, 0x14, - 0xab, 0x18, 0xa2, 0x96, 0xfc, 0xd0, 0x13, 0x6a, 0x9d, 0x29, 0x3b, 0x20, 0x52, 0x6b, 0xc4, 0xf3, - 0x0c, 0x6d, 0x16, 0xd7, 0x02, 0x34, 0x56, 0x34, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0xde, 0xd0, 0x56, - 0xbf, 0x3f, 0x09, 0x53, 0xac, 0x52, 0x83, 0xfa, 0x00, 0x61, 0x0d, 0x2e, 0xae, 0xe7, 0x48, 0x55, - 0x2f, 0xae, 0xe7, 0x68, 0xf9, 0x0e, 0x5f, 0x67, 0xc8, 0xf3, 0x78, 0x2e, 0x40, 0x66, 0xaf, 0xe0, - 0x2b, 0xac, 0x26, 0x43, 0xcd, 0xfa, 0x12, 0xca, 0x4a, 0x2d, 0x0d, 0x25, 0x71, 0x8c, 0x14, 0xe3, - 0xe2, 0xdb, 0x24, 0xa1, 0x10, 0x87, 0x6f, 0x32, 0xd0, 0x6b, 0xb8, 0xa1, 0x1a, 0x97, 0xe3, 0xba, - 0x8c, 0x92, 0x02, 0x7f, 0xa6, 0x41, 0x2d, 0x5a, 0x4f, 0x43, 0x37, 0x13, 0x58, 0xc7, 0xcb, 0x72, - 0xfa, 0xad, 0xf1, 0x44, 0xa9, 0x22, 0x70, 0xfc, 0x13, 0x42, 0xfa, 0x26, 0xa5, 0x14, 0xb6, 0x47, - 0x3f, 0xd0, 0x60, 0x26, 0x56, 0x25, 0x43, 0x49, 0x10, 0x23, 0x35, 0x38, 0xfd, 0xf6, 0x39, 0x54, - 0x42, 0x92, 0xbb, 0x4c, 0x92, 0x1b, 0xf8, 0xea, 0xa8, 0x31, 0x7c, 0xab, 0x47, 0x7c, 0x47, 0x48, - 0xb3, 0xfa, 0xaf, 0x3c, 0x14, 0xd7, 0xf9, 0xaf, 0x8c, 0x90, 0x0f, 0xa5, 0xa0, 0xf2, 0x84, 0x16, - 0x92, 0xaa, 0x12, 0x61, 0xca, 0xae, 0x5f, 0x4f, 0x1d, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, - 0x2b, 0x81, 0x08, 0xe2, 0xd7, 0x4c, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0x76, 0x3a, 0x74, 0x49, 0xbe, - 0xa7, 0x41, 0x45, 0x2d, 0x28, 0xa1, 0x1b, 0x89, 0xf5, 0x10, 0xb5, 0x26, 0xa5, 0xe3, 0x71, 0x24, - 0x02, 0xff, 0x1e, 0xc3, 0xbf, 0x89, 0x17, 0xd2, 0xf0, 0x5d, 0x46, 0x1f, 0x15, 0x81, 0x97, 0x90, - 0x92, 0x45, 0x88, 0x54, 0xa8, 0x92, 0x45, 0x88, 0x56, 0xa0, 0xce, 0x17, 0x61, 0xc0, 0xe8, 0xa9, - 0x08, 0xa7, 0x00, 0x61, 0x85, 0x09, 0x25, 0x1a, 0x57, 0xb9, 0xc4, 0xc4, 0x7d, 0x70, 0xb4, 0x38, - 0x95, 0xb0, 0x03, 0x62, 0xd8, 0x5d, 0xcb, 0xa3, 0xbe, 0xb8, 0xfa, 0xdb, 0x49, 0x28, 0x3f, 0x31, - 0x2d, 0xdb, 0x27, 0xb6, 0x69, 0xb7, 0x09, 0x3a, 0x82, 0x29, 0x76, 0x4a, 0xc5, 0x03, 0x8f, 0x5a, - 0xf6, 0x89, 0x07, 0x9e, 0x48, 0x4d, 0x04, 0xdf, 0x66, 0xd0, 0xd7, 0xb1, 0x1e, 0x40, 0xf7, 0x42, - 0xfe, 0x2b, 0xac, 0x9e, 0x41, 0x55, 0x3e, 0x81, 0x02, 0xaf, 0x5f, 0xa0, 0x18, 0xb7, 0x48, 0x9d, - 0x43, 0xbf, 0x9a, 0x3c, 0x98, 0xba, 0xcb, 0x54, 0x2c, 0x8f, 0x11, 0x53, 0xb0, 0x6f, 0x03, 0x84, - 0x05, 0xb3, 0xb8, 0x7d, 0x47, 0xea, 0x6b, 0xfa, 0x62, 0x3a, 0x81, 0x00, 0xbe, 0xcf, 0x80, 0x6f, - 0xe1, 0xeb, 0x89, 0xc0, 0x9d, 0x60, 0x02, 0x05, 0x6f, 0xc3, 0xe4, 0xa6, 0xe9, 0x1d, 0xa3, 0xd8, - 0x21, 0xa4, 0xbc, 0x92, 0xea, 0x7a, 0xd2, 0x90, 0x80, 0xba, 0xc5, 0xa0, 0x16, 0xf0, 0x7c, 0x22, - 0xd4, 0xb1, 0xe9, 0xd1, 0x98, 0x8e, 0x06, 0x30, 0x2d, 0x5f, 0x3e, 0xd1, 0xb5, 0x98, 0xcd, 0xa2, - 0xaf, 0xa4, 0xfa, 0x42, 0xda, 0xb0, 0x00, 0x5c, 0x62, 0x80, 0x18, 0x5f, 0x4b, 0x36, 0xaa, 0x20, - 0x7f, 0xa8, 0xdd, 0x7f, 0x43, 0x5b, 0xfd, 0x51, 0x1d, 0x26, 0x69, 0xbe, 0x44, 0x4f, 0x91, 0xf0, - 0x9a, 0x19, 0xb7, 0xf0, 0x48, 0x71, 0x27, 0x6e, 0xe1, 0xd1, 0x1b, 0x6a, 0xc2, 0x29, 0xc2, 0x7e, - 0x6b, 0x49, 0x18, 0x15, 0xd5, 0xd8, 0x87, 0xb2, 0x72, 0x19, 0x45, 0x09, 0x1c, 0xa3, 0xa5, 0xa3, - 0xf8, 0x29, 0x92, 0x70, 0x93, 0xc5, 0x8b, 0x0c, 0x54, 0xc7, 0x17, 0xa3, 0xa0, 0x1d, 0x4e, 0x46, - 0x51, 0xbf, 0x03, 0x15, 0xf5, 0xd6, 0x8a, 0x12, 0x98, 0xc6, 0x6a, 0x53, 0xf1, 0x58, 0x91, 0x74, - 0xe9, 0x4d, 0x70, 0x9a, 0xe0, 0x97, 0xa5, 0x92, 0x96, 0xa2, 0x7f, 0x0c, 0x45, 0x71, 0x97, 0x4d, - 0xd2, 0x37, 0x5a, 0xcd, 0x4a, 0xd2, 0x37, 0x76, 0x11, 0x4e, 0x48, 0x49, 0x18, 0x2c, 0xcd, 0xd9, - 0x65, 0x80, 0x16, 0x90, 0x8f, 0x89, 0x9f, 0x06, 0x19, 0xd6, 0x67, 0xd2, 0x20, 0x95, 0xfb, 0xd2, - 0x58, 0xc8, 0x23, 0xe2, 0x8b, 0xbd, 0x2c, 0x2f, 0x23, 0x28, 0x85, 0xa3, 0x1a, 0x0d, 0xf1, 0x38, - 0x92, 0xd4, 0x2c, 0x32, 0x44, 0x15, 0xa1, 0x10, 0x7d, 0x17, 0x20, 0xbc, 0x78, 0xc7, 0x13, 0x83, - 0xc4, 0xea, 0x5d, 0x3c, 0x31, 0x48, 0xbe, 0xbb, 0x27, 0x78, 0x70, 0x08, 0xce, 0x33, 0x59, 0x0a, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x48, 0x86, 0x48, 0x2c, 0x0c, 0xea, 0xaf, 0xbd, - 0x1a, 0x71, 0x6a, 0xf4, 0x0c, 0xe5, 0x6a, 0xb3, 0x29, 0xfd, 0x97, 0x54, 0xb2, 0xcf, 0x35, 0xa8, - 0x46, 0xae, 0xfa, 0xe8, 0x4e, 0xca, 0x3a, 0xc7, 0x8a, 0x8b, 0xfa, 0xdd, 0x73, 0xe9, 0x52, 0x73, - 0x27, 0x65, 0x57, 0xc8, 0xbc, 0xf1, 0x87, 0x1a, 0xd4, 0xa2, 0xf5, 0x01, 0x94, 0x02, 0x30, 0x52, - 0xa1, 0xd4, 0x97, 0xce, 0x27, 0x7c, 0x85, 0xd5, 0x0a, 0x53, 0xc9, 0x8f, 0xa1, 0x28, 0xca, 0x0a, - 0x49, 0x6e, 0x11, 0x2d, 0x70, 0x26, 0xb9, 0x45, 0xac, 0x26, 0x91, 0xe6, 0x16, 0xf4, 0x86, 0xae, - 0x78, 0xa2, 0x28, 0x3e, 0xa4, 0x41, 0x8e, 0xf7, 0xc4, 0x58, 0xe5, 0x62, 0x2c, 0x64, 0xe8, 0x89, - 0xb2, 0xf4, 0x80, 0x52, 0x38, 0x9e, 0xe3, 0x89, 0xf1, 0xca, 0x45, 0x9a, 0x27, 0x32, 0x54, 0xc5, - 0x13, 0xc3, 0x4a, 0x41, 0x92, 0x27, 0x8e, 0x94, 0x6f, 0x93, 0x3c, 0x71, 0xb4, 0xd8, 0x90, 0xb6, - 0xb6, 0x0c, 0x3c, 0xe2, 0x89, 0xb3, 0x09, 0x95, 0x05, 0xf4, 0x5a, 0x8a, 0x4d, 0x13, 0x4b, 0xc3, - 0xfa, 0xeb, 0xaf, 0x48, 0x3d, 0xde, 0x03, 0xf8, 0x6a, 0x48, 0x0f, 0xf8, 0x85, 0x06, 0x73, 0x49, - 0xa5, 0x09, 0x94, 0x02, 0x96, 0x52, 0x57, 0xd6, 0x97, 0x5f, 0x95, 0xfc, 0x15, 0xec, 0x16, 0xf8, - 0xc4, 0xa3, 0xfa, 0xef, 0xbe, 0x5c, 0xd0, 0xfe, 0xf8, 0xe5, 0x82, 0xf6, 0xe7, 0x2f, 0x17, 0xb4, - 0x9f, 0xfe, 0x65, 0x61, 0xe2, 0xb0, 0xc0, 0xfe, 0xc3, 0xc3, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x73, 0x7e, 0xb4, 0xb4, 0x77, 0x31, 0x00, 0x00, + // 3450 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, + 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, + 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, + 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, + 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, + 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, + 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, + 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, + 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, + 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, + 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, + 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, + 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, + 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, + 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, + 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, + 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, + 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, + 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, + 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, + 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, + 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, + 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, + 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, + 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, + 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, + 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, + 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, + 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, + 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, + 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, + 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, + 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, + 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, + 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, + 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, + 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, + 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, + 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, + 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, + 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, + 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, + 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, + 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, + 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, + 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, + 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, + 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, + 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, + 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, + 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, + 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, + 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, + 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, + 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, + 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, + 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, + 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, + 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, + 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, + 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, + 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, + 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, + 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, + 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, + 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, + 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, + 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, + 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, + 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, + 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, + 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, + 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, + 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, + 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, + 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, + 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, + 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, + 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, + 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, + 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, + 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, + 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, + 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, + 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, + 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, + 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, + 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, + 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, + 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, + 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, + 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, + 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, + 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, + 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, + 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, + 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, + 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, + 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, + 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, + 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, + 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, + 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, + 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, + 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, + 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, + 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, + 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, + 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, + 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, + 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, + 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, + 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, + 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, + 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, + 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, + 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, + 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, + 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, + 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, + 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, + 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, + 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, + 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, + 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, + 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, + 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, + 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, + 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, + 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, + 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, + 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, + 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, + 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, + 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, + 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, + 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, + 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, + 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, + 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, + 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, + 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, + 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, + 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, + 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, + 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, + 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, + 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, + 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, + 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, + 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, + 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, + 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, + 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, + 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, + 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, + 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, + 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, + 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, + 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, + 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, + 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, + 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, + 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, + 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, + 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, + 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, + 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, + 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, + 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, + 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, + 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, + 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, + 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, + 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, + 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, + 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, + 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, + 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, + 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, + 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, + 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, + 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, + 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, + 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, + 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, + 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, + 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, + 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, + 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, + 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, + 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, + 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, + 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, + 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, + 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, + 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, + 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, + 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, + 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, + 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, + 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, + 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, + 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, + 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, + 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, + 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, + 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, + 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, + 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, + 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, + 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, + 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, + 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, + 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index ddf1ad23329..a6cd00ab7c3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -352,11 +352,12 @@ message RangeRequest { bytes key = 1; // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. int64 limit = 3; // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -423,6 +424,14 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; } message PutResponse { @@ -436,13 +445,13 @@ message DeleteRangeRequest { bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. bytes range_end = 2; // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. bool prev_kv = 3; } @@ -645,6 +654,9 @@ message WatchResponse { // watcher with the same start_revision again. int64 compact_revision = 5; + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + repeated mvccpb.Event events = 11; } @@ -725,6 +737,8 @@ message MemberAddResponse { ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; } message MemberRemoveRequest { @@ -734,6 +748,8 @@ message MemberRemoveRequest { message MemberRemoveResponse { ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; } message MemberUpdateRequest { @@ -745,6 +761,8 @@ message MemberUpdateRequest { message MemberUpdateResponse{ ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; } message MemberListRequest { diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD index d9478aa74fc..473575baffc 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "cluster.go", + "doc.go", "errors.go", "member.go", "store.go", diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go index 25c45dfce12..2330219f18a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go @@ -178,7 +178,7 @@ func (c *RaftCluster) String() string { fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) var ids []string for id := range c.removed { - ids = append(ids, fmt.Sprintf("%s", id)) + ids = append(ids, id.String()) } fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) return b.String() diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go new file mode 100644 index 00000000000..b07fb2d9285 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package membership describes individual etcd members and clusters of members. +package membership diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go index f2ea0120d74..d3f8f2474a4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go @@ -36,7 +36,7 @@ const ( var ( membersBucketName = []byte("members") - membersRemovedBuckedName = []byte("members_removed") + membersRemovedBucketName = []byte("members_removed") clusterBucketName = []byte("cluster") StoreMembersPrefix = path.Join(storePrefix, "members") @@ -62,7 +62,7 @@ func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { tx := be.BatchTx() tx.Lock() tx.UnsafeDelete(membersBucketName, mkey) - tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) + tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed")) tx.Unlock() } @@ -164,7 +164,7 @@ func mustCreateBackendBuckets(be backend.Backend) { tx.Lock() defer tx.Unlock() tx.UnsafeCreateBucket(membersBucketName) - tx.UnsafeCreateBucket(membersRemovedBuckedName) + tx.UnsafeCreateBucket(membersRemovedBucketName) tx.UnsafeCreateBucket(clusterBucketName) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go index 2b549f738f7..90bbd3632a6 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -58,6 +58,12 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) ) func init() { @@ -67,6 +73,7 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) } func monitorFileDescriptor(done <-chan struct{}) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go index 088a4696253..87126f1564c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -16,7 +16,15 @@ package etcdserver import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" +) + +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB ) // Quota represents an arbitrary quota against arbitrary requests. Each request @@ -57,11 +65,10 @@ func NewBackendQuota(s *EtcdServer) Quota { } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given - return &backendQuota{s, backend.DefaultQuotaBytes} + return &backendQuota{s, DefaultQuotaBytes} } - if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes) - return &backendQuota{s, backend.MaxQuotaBytes} + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) } return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go index d7ec176eb3a..dcb894f82fb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -83,7 +83,8 @@ type RaftTimer interface { type apply struct { entries []raftpb.Entry snapshot raftpb.Snapshot - raftDone <-chan struct{} // rx {} after raft has persisted messages + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} } type raftNode struct { @@ -94,14 +95,7 @@ type raftNode struct { term uint64 lead uint64 - mu sync.Mutex - // last lead elected time - lt time.Time - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - - raft.Node + raftNodeConfig // a chan to send/receive snapshot msgSnapC chan raftpb.Message @@ -113,28 +107,51 @@ type raftNode struct { readStateC chan raft.ReadState // utility - ticker <-chan time.Time + ticker *time.Ticker // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - heartbeat time.Duration // for logging - raftStorage *raft.MemoryStorage - storage Storage - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter + td *contention.TimeoutDetector stopped chan struct{} done chan struct{} } +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node + raftStorage *raft.MemoryStorage + storage Storage + heartbeat time.Duration // for logging + // transport specifies the transport to send and receive msgs to members. + // Sending messages MUST NOT block. It is okay to drop messages, since + // clients should timeout and reissue their messages. + // If transport is nil, server will panic. + transport rafthttp.Transporter +} + +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r +} + // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { - r.applyc = make(chan apply) - r.stopped = make(chan struct{}) - r.done = make(chan struct{}) internalTimeout := time.Second go func() { @@ -143,14 +160,12 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { - case <-r.ticker: + case <-r.ticker.C: r.Tick() case rd := <-r.Ready(): if rd.SoftState != nil { - if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { - r.mu.Lock() - r.lt = time.Now() - r.mu.Unlock() + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { leaderChanges.Inc() } @@ -162,7 +177,8 @@ func (r *raftNode) start(rh *raftReadyHandler) { atomic.StoreUint64(&r.lead, rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader - rh.updateLeadership() + rh.updateLeadership(newLeader) + r.td.Reset() } if len(rd.ReadStates) != 0 { @@ -175,11 +191,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } - raftDone := make(chan struct{}, 1) + notifyc := make(chan struct{}, 1) ap := apply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, - raftDone: raftDone, + notifyc: notifyc, } updateCommittedIndex(&ap, rh) @@ -195,7 +211,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { // For more details, check raft thesis 10.2.1 if islead { // gofail: var raftBeforeLeaderSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(r.processMessages(rd.Messages)) } // gofail: var raftBeforeSave struct{} @@ -212,6 +228,9 @@ func (r *raftNode) start(rh *raftReadyHandler) { if err := r.storage.SaveSnap(rd.Snapshot); err != nil { plog.Fatalf("raft save snapshot error: %v", err) } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) @@ -221,10 +240,44 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.raftStorage.Append(rd.Entries) if !islead { + // finish processing incoming messages before we signal raftdone chan + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to be applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to be applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + // gofail: var raftBeforeFollowerSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} } - raftDone <- struct{}{} + r.Advance() case <-r.stopped: return @@ -246,7 +299,7 @@ func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { } } -func (r *raftNode) sendMessages(ms []raftpb.Message) { +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { sentAppResp := false for i := len(ms) - 1; i >= 0; i-- { if r.isIDRemoved(ms[i].To) { @@ -282,20 +335,13 @@ func (r *raftNode) sendMessages(ms []raftpb.Message) { } } } - - r.transport.Send(ms) + return ms } func (r *raftNode) apply() chan apply { return r.applyc } -func (r *raftNode) leadElectedTime() time.Time { - r.mu.Lock() - defer r.mu.Unlock() - return r.lt -} - func (r *raftNode) stop() { r.stopped <- struct{}{} <-r.done @@ -303,6 +349,7 @@ func (r *raftNode) stop() { func (r *raftNode) onStop() { r.Stop() + r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { plog.Panicf("raft close storage error: %v", err) diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index 98eb2cc7b29..271c5e77313 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -23,7 +23,6 @@ import ( "net/http" "os" "path" - "path/filepath" "regexp" "sync" "sync/atomic" @@ -41,7 +40,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/contention" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -62,7 +60,7 @@ import ( ) const ( - DefaultSnapCount = 10000 + DefaultSnapCount = 100000 StoreClusterPrefix = "/0" StoreKeysPrefix = "/1" @@ -77,7 +75,6 @@ const ( // (since it will timeout). monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - databaseFilename = "db" // max number of in-flight snapshot messages etcdserver allows to have // This number is more than enough for most clusters with 5 machines. maxInFlightMsgSnap = 16 @@ -85,7 +82,8 @@ const ( releaseDelayAfterSnapshot = 30 * time.Second // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 + maxPendingRevokes = 16 + recommendedMaxRequestBytes = 10 * 1024 * 1024 ) var ( @@ -135,15 +133,15 @@ type Server interface { // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) error + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) // RemoveMember attempts to remove a member from the cluster. It will // return ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) error + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) // UpdateMember attempts to update an existing member in the cluster. It will // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) error + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) // ClusterVersion is the cluster-wide minimum major.minor version. // Cluster version is set to the min version that an etcd member is @@ -201,7 +199,8 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + store store.Store + snapshotter *snap.Snapshotter applyV2 ApplierV2 @@ -221,7 +220,7 @@ type EtcdServer struct { stats *stats.ServerStats lstats *stats.LeaderStats - SyncTicker <-chan time.Time + SyncTicker *time.Ticker // compactor is used to auto-compact the KV. compactor *compactor.Periodic @@ -238,6 +237,14 @@ type EtcdServer struct { // wg is used to wait for the go routines that depends on the server state // to exit when stopping the server. wg sync.WaitGroup + + // ctx is used for etcd-initiated requests that may need to be canceled + // on etcd server shutdown. + ctx context.Context + cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time } // NewServer creates a new EtcdServer from the supplied configuration. The @@ -253,6 +260,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } @@ -264,23 +275,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { } ss := snap.New(cfg.SnapDir()) - bepath := filepath.Join(cfg.SnapDir(), databaseFilename) + bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) - - var be backend.Backend - beOpened := make(chan struct{}) - go func() { - be = backend.NewDefaultBackend(bepath) - beOpened <- struct{}{} - }() - - select { - case <-beOpened: - case <-time.After(time.Second): - plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") - plog.Warningf("waiting for it to exit before starting...") - <-beOpened - } + be := openBackend(cfg) defer func() { if err != nil { @@ -378,6 +375,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Panicf("recovered store from snapshot error: %v", err) } plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } } cfg.Print() if !cfg.ForceNewCluster { @@ -400,39 +400,32 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, fmt.Errorf("cannot access member directory: %v", terr) } - sstats := &stats.ServerStats{ - Name: cfg.Name, - ID: id.String(), - } - sstats.Initialize() + sstats := stats.NewServerStats(cfg.Name, id.String()) lstats := stats.NewLeaderStats(id.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - ticker: time.Tick(heartbeat), - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * heartbeat), - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - readStateC: make(chan raft.ReadState, 1), - }, + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), id: id, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: cl, stats: sstats, lstats: lstats, - SyncTicker: time.Tick(500 * time.Millisecond), + SyncTicker: time.NewTicker(500 * time.Millisecond), peerRt: prt, reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), forceVersionC: make(chan struct{}), @@ -458,12 +451,26 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() - srv.authStore = auth.NewAuthStore(srv.be, + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.AuthToken, func(index uint64) <-chan struct{} { return srv.applyWait.Wait(index) - }) + }, + ) + if err != nil { + plog.Errorf("failed to create token provider: %s", err) + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.be, tp) if h := cfg.AutoCompactionRetention; h != 0 { srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) srv.compactor.Run() @@ -531,6 +538,7 @@ func (s *EtcdServer) start() { s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) + s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() if s.ClusterVersion() != nil { @@ -603,16 +611,19 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { - updateLeadership func() + updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { - snap, err := s.r.raftStorage.Snapshot() + sn, err := s.r.raftStorage.Snapshot() if err != nil { plog.Panicf("get snapshot from raft storage error: %v", err) } + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + var ( smu sync.RWMutex syncC <-chan time.Time @@ -629,7 +640,7 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ - updateLeadership: func() { + updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { s.lessor.Demote() @@ -639,7 +650,13 @@ func (s *EtcdServer) run() { } setSyncC(nil) } else { - setSyncC(s.SyncTicker) + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) if s.compactor != nil { s.compactor.Resume() } @@ -650,9 +667,6 @@ func (s *EtcdServer) run() { if s.stats != nil { s.stats.BecomeLeader() } - if s.r.td != nil { - s.r.td.Reset() - } }, updateCommittedIndex: func(ci uint64) { cci := s.getCommittedIndex() @@ -663,25 +677,26 @@ func (s *EtcdServer) run() { } s.r.start(rh) - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() ep := etcdProgress{ - confState: snap.Metadata.ConfState, - snapi: snap.Metadata.Index, - appliedt: snap.Metadata.Term, - appliedi: snap.Metadata.Index, + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, } defer func() { s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping close(s.stopping) s.wgMu.Unlock() + s.cancel() sched.Stop() // wait for gouroutines before closing raft so wal stays open s.wg.Wait() + s.SyncTicker.Stop() + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines // by adding a peer after raft stops the transport s.r.stop() @@ -728,7 +743,8 @@ func (s *EtcdServer) run() { } lid := lease.ID s.goAttach(func() { - s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)}) + s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() <-c }) } @@ -762,7 +778,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. - <-apply.raftDone + <-apply.notifyc s.triggerSnapshot(ep) select { @@ -787,23 +803,19 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { apply.snapshot.Metadata.Index, ep.appliedi) } - snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc + + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) if err != nil { - plog.Panicf("get database snapshot file path error: %v", err) + plog.Panic(err) } - fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) - if err := os.Rename(snapfn, fn); err != nil { - plog.Panicf("rename snapshot file error: %v", err) - } - - newbe := backend.NewDefaultBackend(fn) - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { plog.Info("recovering lessor...") - s.lessor.Recover(newbe, s.kv) + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) plog.Info("finished recovering lessor") } @@ -955,7 +967,7 @@ func (s *EtcdServer) TransferLeadership() error { } tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), tm) + ctx, cancel := context.WithTimeout(s.ctx, tm) err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) cancel() return err @@ -1015,7 +1027,7 @@ func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { - // In the context of ordinal etcd process, s.authStore will never be nil. + // In the context of ordinary etcd process, s.authStore will never be nil. // This branch is for handling cases in server_test.go return nil } @@ -1026,7 +1038,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -1034,27 +1046,27 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err return s.AuthStore().IsAdminPermitted(authInfo) } -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } if s.Cfg.StrictReconfigCheck { // by default StrictReconfigCheck is enabled; reject new members if unhealthy if !s.cluster.IsReadyToAddNewMember() { plog.Warningf("not enough started members, rejecting member add %+v", memb) - return ErrNotEnoughStartedMembers + return nil, ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return ErrUnhealthy + return nil, ErrUnhealthy } } // TODO: move Member to protobuf type b, err := json.Marshal(memb) if err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, @@ -1064,14 +1076,14 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro return s.configure(ctx, cc) } -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error { +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss if err := s.mayRemoveMember(types.ID(id)); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ @@ -1107,14 +1119,14 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { return nil } -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { b, merr := json.Marshal(memb) if merr != nil { - return merr + return nil, merr } if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, @@ -1137,31 +1149,34 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } +type confChangeResponse struct { + membs []*membership.Member + err error +} + // configure sends a configuration change through consensus and // then waits for it to be applied to the server. It // will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) - return err + return nil, err } select { case x := <-ch: - if err, ok := x.(error); ok { - return err + if x == nil { + plog.Panicf("configure trigger value should never be nil") } - if x != nil { - plog.Panicf("return type should always be error") - } - return nil + resp := x.(*confChangeResponse) + return resp.membs, resp.err case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait - return s.parseProposeCtxErr(ctx.Err(), start) + return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return ErrStopped + return nil, ErrStopped } } @@ -1169,7 +1184,6 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error // This makes no guarantee that the request will be proposed or performed. // The request will be canceled after the given timeout. func (s *EtcdServer) sync(timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) req := pb.Request{ Method: "SYNC", ID: s.reqIDGen.Next(), @@ -1178,6 +1192,7 @@ func (s *EtcdServer) sync(timeout time.Duration) { data := pbutil.MustMarshal(&req) // There is no promise that node has leader when do SYNC request, // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) s.goAttach(func() { s.r.Propose(ctx, data) cancel() @@ -1202,7 +1217,7 @@ func (s *EtcdServer) publish(timeout time.Duration) { } for { - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(s.ctx, timeout) _, err := s.Do(ctx, req) cancel() switch err { @@ -1262,7 +1277,7 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, err) + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) default: plog.Panicf("entry type should be either EntryNormal or EntryConfChange") } @@ -1347,8 +1362,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - r := pb.InternalRaftRequest{Alarm: a} - s.processInternalRaftRequest(context.TODO(), r) + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) s.w.Trigger(id, ar) }) } @@ -1544,7 +1558,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) { Path: membership.StoreClusterVersionKey(), Val: ver, } - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() switch err { @@ -1563,7 +1577,9 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: - curLeadElected := s.r.leadElectedTime() + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 9cfc852168b..928aa95b6b1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -60,9 +60,14 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { n, err := snapshot.WriteTo(pw) if err == nil { plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) } pw.CloseWithError(err) - snapshot.Close() + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } }() return pr } diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go index 1bed85474e3..8f6a54ff751 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -24,25 +24,30 @@ import ( // LeaderStats is used by the leader in an etcd cluster, and encapsulates // statistics about communication with its followers type LeaderStats struct { + leaderStats + sync.Mutex +} + +type leaderStats struct { // Leader is the ID of the leader in the etcd cluster. // TODO(jonboulle): clarify that these are IDs, not names Leader string `json:"leader"` Followers map[string]*FollowerStats `json:"followers"` - - sync.Mutex } // NewLeaderStats generates a new LeaderStats with the given id as leader func NewLeaderStats(id string) *LeaderStats { return &LeaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), + leaderStats: leaderStats{ + Leader: id, + Followers: make(map[string]*FollowerStats), + }, } } func (ls *LeaderStats) JSON() []byte { ls.Lock() - stats := *ls + stats := ls.leaderStats ls.Unlock() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go index cd450e2d199..0278e885cf9 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go @@ -26,6 +26,26 @@ import ( // ServerStats encapsulates various statistics about an EtcdServer and its // communication with other members of the cluster type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { Name string `json:"name"` // ID is the raft ID of the node. // TODO(jonboulle): use ID instead of name? @@ -49,17 +69,15 @@ type ServerStats struct { sendRateQueue *statsQueue recvRateQueue *statsQueue - - sync.Mutex } func (ss *ServerStats) JSON() []byte { ss.Lock() - stats := *ss + stats := ss.serverStats ss.Unlock() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? if err != nil { @@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte { return b } -// Initialize clears the statistics of ServerStats and resets its start time -func (ss *ServerStats) Initialize() { - if ss == nil { - return - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{ - back: -1, - } - ss.recvRateQueue = &statsQueue{ - back: -1, - } -} - -// RecvRates calculates and returns the rate of received append requests -func (ss *ServerStats) RecvRates() (float64, float64) { - return ss.recvRateQueue.Rate() -} - -// SendRates calculates and returns the rate of sent append requests -func (ss *ServerStats) SendRates() (float64, float64) { - return ss.sendRateQueue.Rate() -} - // RecvAppendReq updates the ServerStats in response to an AppendRequest // from the given leader being received func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go index 693618fbd51..aa8f87569db 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -32,9 +32,6 @@ type Storage interface { Save(st raftpb.HardState, ents []raftpb.Entry) error // SaveSnap function saves snapshot to the underlying stable storage. SaveSnap(snap raftpb.Snapshot) error - // DBFilePath returns the file path of database snapshot saved with given - // id. - DBFilePath(id uint64) (string, error) // Close closes the Storage and performs finalization. Close() error } diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go index 66084ae1244..e3896ffc2d3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -87,7 +87,7 @@ type notifier struct { func newNotifier() *notifier { return ¬ifier{ - c: make(chan struct{}, 0), + c: make(chan struct{}), } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index 60653cb6dff..ae449bbf22f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -19,6 +19,8 @@ import ( "encoding/binary" "time" + "github.com/gogo/protobuf/proto" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" @@ -27,17 +29,10 @@ import ( "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/raft" - "github.com/coreos/go-semver/semver" "golang.org/x/net/context" ) const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -45,10 +40,6 @@ const ( maxGapBetweenApplyAndCommitIndex = 5000 ) -var ( - newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0")) -) - type RaftKV interface { Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) @@ -91,11 +82,6 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyRange(ctx, r) - } - if !r.Serializable { err := s.linearizableReadNotify(ctx) if err != nil { @@ -107,65 +93,30 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe chk := func(ai *auth.AuthInfo) error { return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.RangeResponse), nil -} - func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.PutResponse), nil + return resp.(*pb.PutResponse), nil } func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.DeleteRangeResponse), nil + return resp.(*pb.DeleteRangeResponse), nil } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyTxn(ctx, r) - } - if isTxnReadonly(r) { if !isTxnSerializable(r) { err := s.linearizableReadNotify(ctx) @@ -184,38 +135,11 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } return resp, err } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil -} - -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnSerializable(r) { - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil + return resp.(*pb.TxnResponse), nil } func isTxnSerializable(r *pb.TxnRequest) bool { @@ -280,25 +204,19 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (* // only use positive int64 id's r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) } - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseGrantResponse), nil + return resp.(*pb.LeaseGrantResponse), nil } func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseRevokeResponse), nil + return resp.(*pb.LeaseRevokeResponse), nil } func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { @@ -394,54 +312,45 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) } func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AlarmResponse), nil + return resp.(*pb.AlarmResponse), nil } func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthEnableResponse), nil + return resp.(*pb.AuthEnableResponse), nil } func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthDisableResponse), nil + return resp.(*pb.AuthDisableResponse), nil } func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - var result *applyResult - - err := s.linearizableReadNotify(ctx) - if err != nil { + if err := s.linearizableReadNotify(ctx); err != nil { return nil, err } + var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } return nil, err } - st, err := s.AuthStore().GenSimpleToken() + st, err := s.AuthStore().GenTokenPrefix() if err != nil { return nil, err } @@ -452,172 +361,147 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest SimpleToken: st, } - result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if checkedRevision == s.AuthStore().Revision() { + break } - - if checkedRevision != s.AuthStore().Revision() { - plog.Infof("revision when password checked is obsolete, retrying") - continue - } - - break + plog.Infof("revision when password checked is obsolete, retrying") } - return result.resp.(*pb.AuthenticateResponse), nil + return resp.(*pb.AuthenticateResponse), nil } func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserAddResponse), nil + return resp.(*pb.AuthUserAddResponse), nil } func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserDeleteResponse), nil + return resp.(*pb.AuthUserDeleteResponse), nil } func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserChangePasswordResponse), nil + return resp.(*pb.AuthUserChangePasswordResponse), nil } func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGrantRoleResponse), nil + return resp.(*pb.AuthUserGrantRoleResponse), nil } func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGetResponse), nil + return resp.(*pb.AuthUserGetResponse), nil } func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserListResponse), nil + return resp.(*pb.AuthUserListResponse), nil } func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserRevokeRoleResponse), nil + return resp.(*pb.AuthUserRevokeRoleResponse), nil } func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleAddResponse), nil + return resp.(*pb.AuthRoleAddResponse), nil } func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil + return resp.(*pb.AuthRoleGrantPermissionResponse), nil } func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGetResponse), nil + return resp.(*pb.AuthRoleGetResponse), nil } func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleListResponse), nil + return resp.(*pb.AuthRoleListResponse), nil } func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil + return resp.(*pb.AuthRoleRevokePermissionResponse), nil } func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) if err != nil { return nil, err } if result.err != nil { return nil, result.err } - return result.resp.(*pb.AuthRoleDeleteResponse), nil + return result.resp, nil +} + +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } } // doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { for { - ai, err := s.AuthStore().AuthInfoFromCtx(ctx) + ai, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -652,7 +536,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ID: s.reqIDGen.Next(), } - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return nil, err } @@ -666,7 +550,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > maxRequestBytes { + if len(data) > int(s.Cfg.MaxRequestBytes) { return nil, ErrRequestTooLarge } @@ -696,19 +580,6 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } } -func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - var result *applyResult - var err error - for { - result, err = s.processInternalRaftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - break - } - } - - return result, err -} - // Watchable returns a watchable interface attached to the etcdserver. func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } @@ -802,3 +673,14 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { return ErrStopped } } + +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + if s.Cfg.ClientCertAuthEnabled { + authInfo := s.AuthStore().AuthInfoFromTLS(ctx) + if authInfo != nil { + return authInfo, nil + } + } + + return s.AuthStore().AuthInfoFromCtx(ctx) +} diff --git a/vendor/github.com/coreos/etcd/integration/BUILD b/vendor/github.com/coreos/etcd/integration/BUILD index c6a3e2ae69b..cd730e4a52a 100644 --- a/vendor/github.com/coreos/etcd/integration/BUILD +++ b/vendor/github.com/coreos/etcd/integration/BUILD @@ -13,9 +13,15 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/embed:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", @@ -25,6 +31,7 @@ go_library( "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/integration/bridge.go b/vendor/github.com/coreos/etcd/integration/bridge.go index b9e67318e52..59cebe1f0e0 100644 --- a/vendor/github.com/coreos/etcd/integration/bridge.go +++ b/vendor/github.com/coreos/etcd/integration/bridge.go @@ -17,6 +17,7 @@ package integration import ( "fmt" "io" + "io/ioutil" "net" "sync" @@ -31,9 +32,10 @@ type bridge struct { l net.Listener conns map[*bridgeConn]struct{} - stopc chan struct{} - pausec chan struct{} - wg sync.WaitGroup + stopc chan struct{} + pausec chan struct{} + blackholec chan struct{} + wg sync.WaitGroup mu sync.Mutex } @@ -41,11 +43,12 @@ type bridge struct { func newBridge(addr string) (*bridge, error) { b := &bridge{ // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), + inaddr: addr + "0", + outaddr: addr, + conns: make(map[*bridgeConn]struct{}), + stopc: make(chan struct{}), + pausec: make(chan struct{}), + blackholec: make(chan struct{}), } close(b.pausec) @@ -152,12 +155,12 @@ func (b *bridge) serveConn(bc *bridgeConn) { var wg sync.WaitGroup wg.Add(2) go func() { - io.Copy(bc.out, bc.in) + b.ioCopy(bc, bc.out, bc.in) bc.close() wg.Done() }() go func() { - io.Copy(bc.in, bc.out) + b.ioCopy(bc, bc.in, bc.out) bc.close() wg.Done() }() @@ -179,3 +182,47 @@ func (bc *bridgeConn) close() { bc.in.Close() bc.out.Close() } + +func (b *bridge) Blackhole() { + b.mu.Lock() + close(b.blackholec) + b.mu.Unlock() +} + +func (b *bridge) Unblackhole() { + b.mu.Lock() + for bc := range b.conns { + bc.Close() + } + b.conns = make(map[*bridgeConn]struct{}) + b.blackholec = make(chan struct{}) + b.mu.Unlock() +} + +// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer +func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error) { + buf := make([]byte, 32*1024) + for { + select { + case <-b.blackholec: + io.Copy(ioutil.Discard, src) + return nil + default: + } + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if ew != nil { + return ew + } + if nr != nw { + return io.ErrShortWrite + } + } + if er != nil { + err = er + break + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/integration/cluster.go b/vendor/github.com/coreos/etcd/integration/cluster.go index 4989e1f62fa..a8fa542b250 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster.go +++ b/vendor/github.com/coreos/etcd/integration/cluster.go @@ -31,21 +31,28 @@ import ( "testing" "time" - "golang.org/x/net/context" - "google.golang.org/grpc" - "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/etcdserver/api/v3client" + "github.com/coreos/etcd/etcdserver/api/v3election" + epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock" + lockpb "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" "github.com/coreos/etcd/etcdserver/api/v3rpc" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -72,16 +79,29 @@ var ( ClientCertAuth: true, } + testTLSInfoExpired = transport.TLSInfo{ + KeyFile: "./fixtures-expired/server-key.pem", + CertFile: "./fixtures-expired/server.pem", + TrustedCAFile: "./fixtures-expired/etcd-root-ca.pem", + ClientCertAuth: true, + } + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration") ) type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - DiscoveryURL string - UseGRPC bool - QuotaBackendBytes int64 + Size int + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + DiscoveryURL string + UseGRPC bool + QuotaBackendBytes int64 + MaxRequestBytes uint + GRPCKeepAliveMinTime time.Duration + GRPCKeepAliveInterval time.Duration + GRPCKeepAliveTimeout time.Duration + // SkipCreatingClient to skip creating clients for each member. + SkipCreatingClient bool } type cluster struct { @@ -89,11 +109,6 @@ type cluster struct { Members []*member } -func init() { - // manually enable v3 capability since we know the cluster members all support v3. - api.EnableCapability(api.V3rpcCapability) -} - func schemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return UrlScheme @@ -175,8 +190,12 @@ func (c *cluster) URL(i int) string { // URLs returns a list of all active client URLs in the cluster func (c *cluster) URLs() []string { + return getMembersURLs(c.Members) +} + +func getMembersURLs(members []*member) []string { urls := make([]string, 0) - for _, m := range c.Members { + for _, m := range members { select { case <-m.s.StopNotify(): continue @@ -210,10 +229,14 @@ func (c *cluster) HTTPMembers() []client.Member { func (c *cluster) mustNewMember(t *testing.T) *member { m := mustNewMember(t, memberConfig{ - name: c.name(rand.Int()), - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, + name: c.name(rand.Int()), + peerTLS: c.cfg.PeerTLS, + clientTLS: c.cfg.ClientTLS, + quotaBackendBytes: c.cfg.QuotaBackendBytes, + maxRequestBytes: c.cfg.MaxRequestBytes, + grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, + grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, + grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, }) m.DiscoveryURL = c.cfg.DiscoveryURL if c.cfg.UseGRPC { @@ -312,9 +335,15 @@ func (c *cluster) removeMember(t *testing.T, id uint64) error { } func (c *cluster) Terminate(t *testing.T) { + var wg sync.WaitGroup + wg.Add(len(c.Members)) for _, m := range c.Members { - m.Terminate(t) + go func(mm *member) { + defer wg.Done() + mm.Terminate(t) + }(m) } + wg.Wait() } func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { @@ -331,7 +360,6 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { time.Sleep(tickDuration) } } - return } func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) } @@ -343,6 +371,18 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int { for _, m := range membs { possibleLead[uint64(m.s.ID())] = true } + cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) + kapi := client.NewKeysAPI(cc) + + // ensure leader is up via linearizable get + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration) + _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) + cancel() + if err == nil || strings.Contains(err.Error(), "Key not found") { + break + } + } for lead == 0 || !possibleLead[lead] { lead = 0 @@ -446,9 +486,13 @@ type member struct { s *etcdserver.EtcdServer hss []*httptest.Server - grpcServer *grpc.Server - grpcAddr string - grpcBridge *bridge + grpcServerOpts []grpc.ServerOption + grpcServer *grpc.Server + grpcAddr string + grpcBridge *bridge + + // serverClient is a clientv3 that directly calls the etcdserver. + serverClient *clientv3.Client keepDataDirTerminate bool } @@ -456,10 +500,14 @@ type member struct { func (m *member) GRPCAddr() string { return m.grpcAddr } type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - quotaBackendBytes int64 + name string + peerTLS *transport.TLSInfo + clientTLS *transport.TLSInfo + quotaBackendBytes int64 + maxRequestBytes uint + grpcKeepAliveMinTime time.Duration + grpcKeepAliveInterval time.Duration + grpcKeepAliveTimeout time.Duration } // mustNewMember return an inited member with the given name. If peerTLS is @@ -507,6 +555,26 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member { m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.quotaBackendBytes + m.MaxRequestBytes = mcfg.maxRequestBytes + if m.MaxRequestBytes == 0 { + m.MaxRequestBytes = embed.DefaultMaxRequestBytes + } + m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough + + m.grpcServerOpts = []grpc.ServerOption{} + if mcfg.grpcKeepAliveMinTime > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.grpcKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if mcfg.grpcKeepAliveInterval > time.Duration(0) && + mcfg.grpcKeepAliveTimeout > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.grpcKeepAliveInterval, + Timeout: mcfg.grpcKeepAliveTimeout, + })) + } return m } @@ -523,7 +591,7 @@ func (m *member) listenGRPC() error { l.Close() return err } - m.grpcAddr = m.grpcBridge.URL() + m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr m.grpcListener = l return nil } @@ -535,6 +603,8 @@ func (m *member) electionTimeout() time.Duration { func (m *member) DropConnections() { m.grpcBridge.Reset() } func (m *member) PauseConnections() { m.grpcBridge.Pause() } func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } +func (m *member) Blackhole() { m.grpcBridge.Blackhole() } +func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *member) (*clientv3.Client, error) { @@ -597,10 +667,10 @@ func (m *member) Launch() error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ @@ -644,7 +714,10 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg) + m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...) + m.serverClient = v3client.New(m.s) + lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) + epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) go m.grpcServer.Serve(m.grpcListener) } @@ -688,8 +761,12 @@ func (m *member) Close() { m.grpcBridge.Close() m.grpcBridge = nil } + if m.serverClient != nil { + m.serverClient.Close() + m.serverClient = nil + } if m.grpcServer != nil { - m.grpcServer.Stop() + m.grpcServer.GracefulStop() m.grpcServer = nil } m.s.HardStop() @@ -785,7 +862,7 @@ func (m *member) Metric(metricName string) (string, error) { } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t *testing.T, others []*member) { +func (m *member) InjectPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.CutPeer(other.s.ID()) other.s.CutPeer(m.s.ID()) @@ -793,7 +870,7 @@ func (m *member) InjectPartition(t *testing.T, others []*member) { } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t *testing.T, others []*member) { +func (m *member) RecoverPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.MendPeer(other.s.ID()) other.s.MendPeer(m.s.ID()) @@ -845,12 +922,15 @@ func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 { cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) + + if !cfg.SkipCreatingClient { + for _, m := range clus.Members { + client, err := NewClientV3(m) + if err != nil { + t.Fatalf("cannot create client: %v", err) + } + clus.clients = append(clus.clients, client) } - clus.clients = append(clus.clients, client) } return clus @@ -897,4 +977,8 @@ type grpcAPI struct { Maintenance pb.MaintenanceClient // Auth is the authentication API for the client's connection. Auth pb.AuthClient + // Lock is the lock API for the client's connection. + Lock lockpb.LockClient + // Election is the election API for the client's connection. + Election epb.ElectionClient } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_direct.go b/vendor/github.com/coreos/etcd/integration/cluster_direct.go index 84b2a796cc0..ff97e6146ed 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_direct.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_direct.go @@ -18,6 +18,8 @@ package integration import ( "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) @@ -29,6 +31,8 @@ func toGRPC(c *clientv3.Client) grpcAPI { pb.NewWatchClient(c.ActiveConnection()), pb.NewMaintenanceClient(c.ActiveConnection()), pb.NewAuthClient(c.ActiveConnection()), + v3lockpb.NewLockClient(c.ActiveConnection()), + v3electionpb.NewElectionClient(c.ActiveConnection()), } } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go index 75319218ec6..3916553be86 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go @@ -20,8 +20,10 @@ import ( "sync" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/namespace" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/proxy/grpcproxy" + "github.com/coreos/etcd/proxy/grpcproxy/adapter" ) var ( @@ -29,10 +31,13 @@ var ( proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy) ) +const proxyNamespace = "proxy-namespace" + type grpcClientProxy struct { grpc grpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} + lpdonec <-chan struct{} } func toGRPC(c *clientv3.Client) grpcAPI { @@ -43,17 +48,30 @@ func toGRPC(c *clientv3.Client) grpcAPI { return v.grpc } - wp, wpch := grpcproxy.NewWatchProxy(c) + // test namespacing proxy + c.KV = namespace.NewKV(c.KV, proxyNamespace) + c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace) + c.Lease = namespace.NewLease(c.Lease, proxyNamespace) + // test coalescing/caching proxy kvp, kvpch := grpcproxy.NewKvProxy(c) + wp, wpch := grpcproxy.NewWatchProxy(c) + lp, lpch := grpcproxy.NewLeaseProxy(c) + mp := grpcproxy.NewMaintenanceProxy(c) + clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs + lockp := grpcproxy.NewLockProxy(c) + electp := grpcproxy.NewElectionProxy(c) + grpc := grpcAPI{ - pb.NewClusterClient(c.ActiveConnection()), - grpcproxy.KvServerToKvClient(kvp), - pb.NewLeaseClient(c.ActiveConnection()), - grpcproxy.WatchServerToWatchClient(wp), - pb.NewMaintenanceClient(c.ActiveConnection()), + adapter.ClusterServerToClusterClient(clp), + adapter.KvServerToKvClient(kvp), + adapter.LeaseServerToLeaseClient(lp), + adapter.WatchServerToWatchClient(wp), + adapter.MaintenanceServerToMaintenanceClient(mp), pb.NewAuthClient(c.ActiveConnection()), + adapter.LockServerToLockClient(lockp), + adapter.ElectionServerToElectionClient(electp), } - proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch} + proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch} return grpc } @@ -61,13 +79,17 @@ type proxyCloser struct { clientv3.Watcher wdonec <-chan struct{} kvdonec <-chan struct{} + lclose func() + lpdonec <-chan struct{} } func (pc *proxyCloser) Close() error { - // client ctx is canceled before calling close, so kv will close out + // client ctx is canceled before calling close, so kv and lp will close out <-pc.kvdonec err := pc.Watcher.Close() <-pc.wdonec + pc.lclose() + <-pc.lpdonec return err } @@ -79,10 +101,14 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { rpc := toGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV) pmu.Lock() + lc := c.Lease + c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, cfg.DialTimeout) c.Watcher = &proxyCloser{ Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), wdonec: proxies[c].wdonec, kvdonec: proxies[c].kvdonec, + lclose: func() { lc.Close() }, + lpdonec: proxies[c].lpdonec, } pmu.Unlock() return c, nil diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD index 7be8ef45cc9..1385cb46bf8 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD @@ -13,7 +13,6 @@ go_library( "//vendor/github.com/coreos/etcd/lease:go_default_library", "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go index 256051efc8d..c3175cbbb0f 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go @@ -16,6 +16,7 @@ package leasehttp import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -26,7 +27,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease/leasepb" "github.com/coreos/etcd/pkg/httputil" - "golang.org/x/net/context" ) var ( @@ -202,45 +202,27 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string } req.Header.Set("Content-Type", "application/protobuf") - cancel := httputil.RequestCanceler(req) + req = req.WithContext(ctx) cc := &http.Client{Transport: rt} var b []byte // buffer errc channel so that errc don't block inside the go routinue - errc := make(chan error, 2) - go func() { - resp, err := cc.Do(req) - if err != nil { - errc <- err - return - } - b, err = readResponse(resp) - if err != nil { - errc <- err - return - } - if resp.StatusCode == http.StatusRequestTimeout { - errc <- ErrLeaseHTTPTimeout - return - } - if resp.StatusCode == http.StatusNotFound { - errc <- lease.ErrLeaseNotFound - return - } - if resp.StatusCode != http.StatusOK { - errc <- fmt.Errorf("lease: unknown error(%s)", string(b)) - return - } - errc <- nil - }() - select { - case derr := <-errc: - if derr != nil { - return nil, derr - } - case <-ctx.Done(): - cancel() - return nil, ctx.Err() + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + b, err = readResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrLeaseHTTPTimeout + } + if resp.StatusCode == http.StatusNotFound { + return nil, lease.ErrLeaseNotFound + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) } lresp := &leasepb.LeaseInternalResponse{} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go index fb3a9bab0c3..ec8db732be5 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go @@ -590,7 +590,7 @@ func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } var fileDescriptorLease = []byte{ // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go index 385bd76d73c..3418cf565ed 100644 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ b/vendor/github.com/coreos/etcd/lease/lessor.go @@ -31,40 +31,39 @@ import ( const ( // NoLease is a special LeaseID representing the absence of a lease. NoLease = LeaseID(0) + + forever = monotime.Time(math.MaxInt64) ) var ( leaseBucketName = []byte("lease") - forever = monotime.Time(math.MaxInt64) + // maximum number of leases to revoke per second; configurable for tests + leaseRevokeRate = 1000 ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") ErrLeaseExists = errors.New("lease already exists") ) -type LeaseID int64 - -// RangeDeleter defines an interface with Txn and DeleteRange method. -// We define this interface only for lessor to limit the number -// of methods of mvcc.KV to what lessor actually needs. -// -// Having a minimum interface makes testing easy. -type RangeDeleter interface { - // TxnBegin see comments on mvcc.KV - TxnBegin() int64 - // TxnEnd see comments on mvcc.KV - TxnEnd(txnID int64) error - // TxnDeleteRange see comments on mvcc.KV - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) +// TxnDelete is a TxnWrite that only permits deletes. Defined here +// to avoid circular dependency with mvcc. +type TxnDelete interface { + DeleteRange(key, end []byte) (n, rev int64) + End() } +// RangeDeleter is a TxnDelete constructor. +type RangeDeleter func() TxnDelete + +type LeaseID int64 + // Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. type Lessor interface { - // SetRangeDeleter sets the RangeDeleter to the Lessor. - // Lessor deletes the items in the revoked or expired lease from the - // the set RangeDeleter. - SetRangeDeleter(dr RangeDeleter) + // SetRangeDeleter lets the lessor create TxnDeletes to the store. + // Lessor deletes the items in the revoked or expired lease by creating + // new TxnDeletes. + SetRangeDeleter(rd RangeDeleter) // Grant grants a lease that expires at least after TTL seconds. Grant(id LeaseID, ttl int64) (*Lease, error) @@ -248,17 +247,14 @@ func (le *lessor) Revoke(id LeaseID) error { return nil } - tid := le.rd.TxnBegin() + txn := le.rd() // sort keys so deletes are in same order among all members, // otherwise the backened hashes will be different keys := l.Keys() sort.StringSlice(keys).Sort() for _, key := range keys { - _, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil) - if err != nil { - panic(err) - } + txn.DeleteRange([]byte(key), nil) } le.mu.Lock() @@ -269,11 +265,7 @@ func (le *lessor) Revoke(id LeaseID) error { // deleting the keys if etcdserver fails in between. le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) - err := le.rd.TxnEnd(tid) - if err != nil { - panic(err) - } - + txn.End() return nil } @@ -335,8 +327,53 @@ func (le *lessor) Promote(extend time.Duration) { for _, l := range le.leaseMap { l.refresh(extend) } + + if len(le.leaseMap) < leaseRevokeRate { + // no possibility of lease pile-up + return + } + + // adjust expiries in case of overlap + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() + nextWindow := baseWindow + time.Second + expires := 0 + // have fewer expires than the total revoke rate so piled up leases + // don't consume the entire revoke limit + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // If leases are extended by n seconds, leases n seconds ahead of the + // base window should be extended by only one second. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + } } +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -433,6 +470,10 @@ func (le *lessor) runLoop() { le.mu.Unlock() if len(ls) != 0 { + // rate limit + if len(ls) > leaseRevokeRate/2 { + ls = ls[:leaseRevokeRate/2] + } select { case <-le.stopC: return diff --git a/vendor/github.com/coreos/etcd/mvcc/BUILD b/vendor/github.com/coreos/etcd/mvcc/BUILD index ab4fab1efde..21b837ec9ac 100644 --- a/vendor/github.com/coreos/etcd/mvcc/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/BUILD @@ -7,12 +7,16 @@ go_library( "index.go", "key_index.go", "kv.go", + "kv_view.go", "kvstore.go", "kvstore_compaction.go", + "kvstore_txn.go", "metrics.go", + "metrics_txn.go", "revision.go", "util.go", "watchable_store.go", + "watchable_store_txn.go", "watcher.go", "watcher_group.go", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD index 137f4aacc21..73c025f8294 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD @@ -5,19 +5,24 @@ go_library( srcs = [ "backend.go", "batch_tx.go", - "boltoption_default.go", + "config_default.go", "doc.go", "metrics.go", + "read_tx.go", + "tx_buffer.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ - "boltoption_linux.go", + "config_linux.go", + ], + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "config_windows.go", ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/mvcc/backend", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/boltdb/bolt:go_default_library", + "//vendor/github.com/coreos/bbolt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go index e5e0028f94b..87edd25f427 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" ) @@ -35,25 +35,21 @@ var ( defragLimit = 10000 - // InitialMmapSize is the initial size of the mmapped region. Setting this larger than + // initialMmapSize is the initial size of the mmapped region. Setting this larger than // the potential max db size can prevent writer from blocking reader. // This only works for linux. - InitialMmapSize = int64(10 * 1024 * 1024 * 1024) + initialMmapSize = uint64(10 * 1024 * 1024 * 1024) plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") -) -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = time.Duration(30 * time.Second) ) type Backend interface { + ReadTx() ReadTx BatchTx() BatchTx + Snapshot() Snapshot Hash(ignores map[IgnoreKey]struct{}) (uint32, error) // Size returns the current size of the backend. @@ -86,36 +82,71 @@ type backend struct { batchInterval time.Duration batchLimit int - batchTx *batchTx + batchTx *batchTxBuffered + + readTx *readTx stopc chan struct{} donec chan struct{} } -func New(path string, d time.Duration, limit int) Backend { - return newBackend(path, d, limit) +type BackendConfig struct { + // Path is the file path to the backend file. + Path string + // BatchInterval is the maximum time before flushing the BatchTx. + BatchInterval time.Duration + // BatchLimit is the maximum puts before flushing the BatchTx. + BatchLimit int + // MmapSize is the number of bytes to mmap for the backend. + MmapSize uint64 +} + +func DefaultBackendConfig() BackendConfig { + return BackendConfig{ + BatchInterval: defaultBatchInterval, + BatchLimit: defaultBatchLimit, + MmapSize: initialMmapSize, + } +} + +func New(bcfg BackendConfig) Backend { + return newBackend(bcfg) } func NewDefaultBackend(path string) Backend { - return newBackend(path, defaultBatchInterval, defaultBatchLimit) + bcfg := DefaultBackendConfig() + bcfg.Path = path + return newBackend(bcfg) } -func newBackend(path string, d time.Duration, limit int) *backend { - db, err := bolt.Open(path, 0600, boltOpenOptions) +func newBackend(bcfg BackendConfig) *backend { + bopts := &bolt.Options{} + if boltOpenOptions != nil { + *bopts = *boltOpenOptions + } + bopts.InitialMmapSize = bcfg.mmapSize() + + db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { - plog.Panicf("cannot open database at %s (%v)", path, err) + plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) } + // In future, may want to make buffering optional for low-concurrency systems + // or dynamically swap between buffered/non-buffered depending on workload. b := &backend{ db: db, - batchInterval: d, - batchLimit: limit, + batchInterval: bcfg.BatchInterval, + batchLimit: bcfg.BatchLimit, + + readTx: &readTx{buf: txReadBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}}, + }, stopc: make(chan struct{}), donec: make(chan struct{}), } - b.batchTx = newBatchTx(b) + b.batchTx = newBatchTxBuffered(b) go b.run() return b } @@ -127,6 +158,8 @@ func (b *backend) BatchTx() BatchTx { return b.batchTx } +func (b *backend) ReadTx() ReadTx { return b.readTx } + // ForceCommit forces the current batching tx to commit. func (b *backend) ForceCommit() { b.batchTx.Commit() @@ -141,7 +174,33 @@ func (b *backend) Snapshot() Snapshot { if err != nil { plog.Fatalf("cannot begin tx (%s)", err) } - return &snapshot{tx} + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() + go func() { + defer close(donec) + // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection + // assuming a min tcp throughput of 100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1014 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + case <-stopc: + snapshotDurations.Observe(time.Since(start).Seconds()) + return + } + } + }() + + return &snapshot{tx, stopc, donec} } type IgnoreKey struct { @@ -235,7 +294,11 @@ func (b *backend) defrag() error { b.mu.Lock() defer b.mu.Unlock() - b.batchTx.commit(true) + // block concurrent read requests while resetting tx + b.readTx.mu.Lock() + defer b.readTx.mu.Unlock() + + b.batchTx.unsafeCommit(true) b.batchTx.tx = nil tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) @@ -276,6 +339,10 @@ func (b *backend) defrag() error { plog.Fatalf("cannot begin tx (%s)", err) } + b.readTx.buf.reset() + b.readTx.tx = b.unsafeBegin(false) + atomic.StoreInt64(&b.size, b.readTx.tx.Size()) + return nil } @@ -331,6 +398,22 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { return tmptx.Commit() } +func (b *backend) begin(write bool) *bolt.Tx { + b.mu.RLock() + tx := b.unsafeBegin(write) + b.mu.RUnlock() + atomic.StoreInt64(&b.size, tx.Size()) + return tx +} + +func (b *backend) unsafeBegin(write bool) *bolt.Tx { + tx, err := b.db.Begin(write) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + return tx +} + // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") @@ -338,7 +421,9 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin plog.Fatal(err) } tmpPath := filepath.Join(dir, "database") - return newBackend(tmpPath, batchInterval, batchLimit), tmpPath + bcfg := DefaultBackendConfig() + bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit + return newBackend(bcfg), tmpPath } func NewDefaultTmpBackend() (*backend, string) { @@ -347,6 +432,12 @@ func NewDefaultTmpBackend() (*backend, string) { type snapshot struct { *bolt.Tx + stopc chan struct{} + donec chan struct{} } -func (s *snapshot) Close() error { return s.Tx.Rollback() } +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go index 04fea1e9477..e5fb8474089 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -16,23 +16,24 @@ package backend import ( "bytes" + "fmt" + "math" "sync" "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) type BatchTx interface { - Lock() - Unlock() + ReadTx UnsafeCreateBucket(name []byte) UnsafePut(bucketName []byte, key []byte, value []byte) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) UnsafeDelete(bucketName []byte, key []byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error + // Commit commits a previous tx and begins a new writable one. Commit() + // CommitAndStop commits the previous tx and does not create a new one. CommitAndStop() } @@ -40,13 +41,8 @@ type batchTx struct { sync.Mutex tx *bolt.Tx backend *backend - pending int -} -func newBatchTx(backend *backend) *batchTx { - tx := &batchTx{backend: backend} - tx.Commit() - return tx + pending int } func (t *batchTx) UnsafeCreateBucket(name []byte) { @@ -84,30 +80,37 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo } // UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { - bucket := t.tx.Bucket(bucketName) +func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) + if err != nil { + plog.Fatal(err) + } + return k, v +} + +func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { + bucket := tx.Bucket(bucketName) if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) + return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) } - if len(endKey) == 0 { - if v := bucket.Get(key); v == nil { - return keys, vs - } else { - return append(keys, key), append(vs, v) + if v := bucket.Get(key); v != nil { + return append(keys, key), append(vs, v), nil } + return nil, nil, nil + } + if limit <= 0 { + limit = math.MaxInt64 } - c := bucket.Cursor() for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) - if limit > 0 && limit == int64(len(keys)) { + if limit == int64(len(keys)) { break } } - - return keys, vs + return keys, vs, nil } // UnsafeDelete must be called holding the lock on the tx. @@ -125,12 +128,14 @@ func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { // UnsafeForEach must be called holding the lock on the tx. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - b := t.tx.Bucket(bucketName) - if b == nil { - // bucket does not exist - return nil + return unsafeForEach(t.tx, bucketName, visitor) +} + +func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error { + if b := tx.Bucket(bucket); b != nil { + return b.ForEach(visitor) } - return b.ForEach(visitor) + return nil } // Commit commits a previous tx and begins a new writable one. @@ -140,7 +145,7 @@ func (t *batchTx) Commit() { t.commit(false) } -// CommitAndStop commits the previous tx and do not create a new one. +// CommitAndStop commits the previous tx and does not create a new one. func (t *batchTx) CommitAndStop() { t.Lock() defer t.Unlock() @@ -150,37 +155,28 @@ func (t *batchTx) CommitAndStop() { func (t *batchTx) Unlock() { if t.pending >= t.backend.batchLimit { t.commit(false) - t.pending = 0 } t.Mutex.Unlock() } func (t *batchTx) commit(stop bool) { - var err error // commit the last tx if t.tx != nil { if t.pending == 0 && !stop { t.backend.mu.RLock() defer t.backend.mu.RUnlock() - // batchTx.commit(true) calls *bolt.Tx.Commit, which - // initializes *bolt.Tx.db and *bolt.Tx.meta as nil, - // and subsequent *bolt.Tx.Size() call panics. - // - // This nil pointer reference panic happens when: - // 1. batchTx.commit(false) from newBatchTx - // 2. batchTx.commit(true) from stopping backend - // 3. batchTx.commit(false) from inflight mvcc Hash call - // - // Check if db is nil to prevent this panic - if t.tx.DB() != nil { - atomic.StoreInt64(&t.backend.size, t.tx.Size()) - } + // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)', + // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size(). + // Server must make sure 'batchTx.commit(false)' does not follow + // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call). + atomic.StoreInt64(&t.backend.size, t.tx.Size()) return } + start := time.Now() // gofail: var beforeCommit struct{} - err = t.tx.Commit() + err := t.tx.Commit() // gofail: var afterCommit struct{} commitDurations.Observe(time.Since(start).Seconds()) atomic.AddInt64(&t.backend.commits, 1) @@ -190,17 +186,81 @@ func (t *batchTx) commit(stop bool) { plog.Fatalf("cannot commit tx (%s)", err) } } - - if stop { - return + if !stop { + t.tx = t.backend.begin(true) } - - t.backend.mu.RLock() - defer t.backend.mu.RUnlock() - // begin a new tx - t.tx, err = t.backend.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - atomic.StoreInt64(&t.backend.size, t.tx.Size()) +} + +type batchTxBuffered struct { + batchTx + buf txWriteBuffer +} + +func newBatchTxBuffered(backend *backend) *batchTxBuffered { + tx := &batchTxBuffered{ + batchTx: batchTx{backend: backend}, + buf: txWriteBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}, + seq: true, + }, + } + tx.Commit() + return tx +} + +func (t *batchTxBuffered) Unlock() { + if t.pending != 0 { + t.backend.readTx.mu.Lock() + t.buf.writeback(&t.backend.readTx.buf) + t.backend.readTx.mu.Unlock() + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + } + t.batchTx.Unlock() +} + +func (t *batchTxBuffered) Commit() { + t.Lock() + defer t.Unlock() + t.commit(false) +} + +func (t *batchTxBuffered) CommitAndStop() { + t.Lock() + defer t.Unlock() + t.commit(true) +} + +func (t *batchTxBuffered) commit(stop bool) { + // all read txs must be closed to acquire boltdb commit rwlock + t.backend.readTx.mu.Lock() + defer t.backend.readTx.mu.Unlock() + t.unsafeCommit(stop) +} + +func (t *batchTxBuffered) unsafeCommit(stop bool) { + if t.backend.readTx.tx != nil { + if err := t.backend.readTx.tx.Rollback(); err != nil { + plog.Fatalf("cannot rollback tx (%s)", err) + } + t.backend.readTx.buf.reset() + t.backend.readTx.tx = nil + } + + t.batchTx.commit(stop) + + if !stop { + t.backend.readTx.tx = t.backend.begin(false) + } +} + +func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafePut(bucketName, key, value) + t.buf.put(bucketName, key, value) +} + +func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafeSeqPut(bucketName, key, value) + t.buf.putSeq(bucketName, key, value) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go similarity index 82% rename from vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go rename to vendor/github.com/coreos/etcd/mvcc/backend/config_default.go index 92019c18415..edfed0025c6 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !linux +// +build !linux,!windows package backend -import "github.com/boltdb/bolt" +import bolt "github.com/coreos/bbolt" var boltOpenOptions *bolt.Options = nil + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go similarity index 88% rename from vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go rename to vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go index 4ee9b05a77c..a8f6abeba63 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead @@ -27,6 +27,7 @@ import ( // (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might // silently ignore this flag. Please update your kernel to prevent this. var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, - InitialMmapSize: int(InitialMmapSize), + MmapFlags: syscall.MAP_POPULATE, } + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go new file mode 100644 index 00000000000..71d02700bcd --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +// setting mmap size != 0 on windows will allocate the entire +// mmap size for the file, instead of growing it. So, force 0. + +func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go index 34a56a91956..30a38801476 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -24,8 +24,18 @@ var ( Help: "The latency distributions of commit called by backend.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) + + snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_snapshot_duration_seconds", + Help: "The latency distribution of backend snapshots.", + // 10 ms -> 655 seconds + Buckets: prometheus.ExponentialBuckets(.01, 2, 17), + }) ) func init() { prometheus.MustRegister(commitDurations) + prometheus.MustRegister(snapshotDurations) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go new file mode 100644 index 00000000000..9fc6b790620 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,92 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + + bolt "github.com/coreos/bbolt" +) + +// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket +// is known to never overwrite any key so range is safe. +var safeRangeBucket = []byte("key") + +type ReadTx interface { + Lock() + Unlock() + + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error +} + +type readTx struct { + // mu protects accesses to the txReadBuffer + mu sync.RWMutex + buf txReadBuffer + + // txmu protects accesses to the Tx on Range requests + txmu sync.Mutex + tx *bolt.Tx +} + +func (rt *readTx) Lock() { rt.mu.RLock() } +func (rt *readTx) Unlock() { rt.mu.RUnlock() } + +func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + rt.txmu.Lock() + // ignore error since bucket may have been created in this batch + k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) + rt.txmu.Unlock() + return append(k2, keys...), append(v2, vals...) +} + +func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + f1 := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return visitor(k, v) + } + f2 := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, f1); err != nil { + return err + } + rt.txmu.Lock() + err := unsafeForEach(rt.tx, bucketName, f2) + rt.txmu.Unlock() + return err +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go new file mode 100644 index 00000000000..56e885dbfbc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go @@ -0,0 +1,181 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "sort" +) + +// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. +type txBuffer struct { + buckets map[string]*bucketBuffer +} + +func (txb *txBuffer) reset() { + for k, v := range txb.buckets { + if v.used == 0 { + // demote + delete(txb.buckets, k) + } + v.used = 0 + } +} + +// txWriteBuffer buffers writes of pending updates that have not yet committed. +type txWriteBuffer struct { + txBuffer + seq bool +} + +func (txw *txWriteBuffer) put(bucket, k, v []byte) { + txw.seq = false + txw.putSeq(bucket, k, v) +} + +func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) { + b, ok := txw.buckets[string(bucket)] + if !ok { + b = newBucketBuffer() + txw.buckets[string(bucket)] = b + } + b.add(k, v) +} + +func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { + for k, wb := range txw.buckets { + rb, ok := txr.buckets[k] + if !ok { + delete(txw.buckets, k) + txr.buckets[k] = wb + continue + } + if !txw.seq && wb.used > 1 { + // assume no duplicate keys + sort.Sort(wb) + } + rb.merge(wb) + } + txw.reset() +} + +// txReadBuffer accesses buffered updates. +type txReadBuffer struct{ txBuffer } + +func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.Range(key, endKey, limit) + } + return nil, nil +} + +func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.ForEach(visitor) + } + return nil +} + +type kv struct { + key []byte + val []byte +} + +// bucketBuffer buffers key-value pairs that are pending commit. +type bucketBuffer struct { + buf []kv + // used tracks number of elements in use so buf can be reused without reallocation. + used int +} + +func newBucketBuffer() *bucketBuffer { + return &bucketBuffer{buf: make([]kv, 512), used: 0} +} + +func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { + f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } + idx := sort.Search(bb.used, f) + if idx < 0 { + return nil, nil + } + if len(endKey) == 0 { + if bytes.Equal(key, bb.buf[idx].key) { + keys = append(keys, bb.buf[idx].key) + vals = append(vals, bb.buf[idx].val) + } + return keys, vals + } + if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { + return nil, nil + } + for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { + if bytes.Compare(endKey, bb.buf[i].key) <= 0 { + break + } + keys = append(keys, bb.buf[i].key) + vals = append(vals, bb.buf[i].val) + } + return keys, vals +} + +func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { + for i := 0; i < bb.used; i++ { + if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { + return err + } + } + return nil +} + +func (bb *bucketBuffer) add(k, v []byte) { + bb.buf[bb.used].key, bb.buf[bb.used].val = k, v + bb.used++ + if bb.used == len(bb.buf) { + buf := make([]kv, (3*len(bb.buf))/2) + copy(buf, bb.buf) + bb.buf = buf + } +} + +// merge merges data from bb into bbsrc. +func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { + for i := 0; i < bbsrc.used; i++ { + bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) + } + if bb.used == bbsrc.used { + return + } + if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { + return + } + + sort.Stable(bb) + + // remove duplicates, using only newest update + widx := 0 + for ridx := 1; ridx < bb.used; ridx++ { + if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { + widx++ + } + bb.buf[widx] = bb.buf[ridx] + } + bb.used = widx + 1 +} + +func (bb *bucketBuffer) Len() int { return bb.used } +func (bb *bucketBuffer) Less(i, j int) bool { + return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 +} +func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go index 397098a7ba7..991289cdd5c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -29,7 +29,9 @@ type index interface { RangeSince(key, end []byte, rev int64) []revision Compact(rev int64) map[revision]struct{} Equal(b index) bool + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex } type treeIndex struct { @@ -60,18 +62,27 @@ func (ti *treeIndex) Put(key []byte, rev revision) { func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { keyi := &keyIndex{key: key} - ti.RLock() defer ti.RUnlock() - item := ti.tree.Get(keyi) - if item == nil { + if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - - keyi = item.(*keyIndex) return keyi.get(atRev) } +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go index 983c64e2f6b..9104f9b2d36 100644 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -222,7 +222,6 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { } // remove the previous generations. ki.generations = ki.generations[i:] - return } func (ki *keyIndex) isEmpty() bool { diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go index c851c8725e8..6636347aa43 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -32,15 +32,15 @@ type RangeResult struct { Count int } -type KV interface { - // Rev returns the current revision of the KV. - Rev() int64 - - // FirstRev returns the first revision of the KV. +type ReadView interface { + // FirstRev returns the first KV revision at the time of opening the txn. // After a compaction, the first revision increases to the compaction // revision. FirstRev() int64 + // Rev returns the revision of the KV at the time of opening the txn. + Rev() int64 + // Range gets the keys in the range at rangeRev. // The returned rev is the current revision of the KV when the operation is executed. // If rangeRev <=0, range gets the keys at currentRev. @@ -50,14 +50,17 @@ type KV interface { // Limit limits the number of keys returned. // If the required rev is compacted, ErrCompacted will be returned. Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) +} - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) +// TxnRead represents a read-only transaction with operations that will not +// block other read transactions. +type TxnRead interface { + ReadView + // End marks the transaction is complete and ready to commit. + End() +} +type WriteView interface { // DeleteRange deletes the given range from the store. // A deleteRange increases the rev of the store if any key in the range exists. // The number of key deleted will be returned. @@ -67,26 +70,51 @@ type KV interface { // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). DeleteRange(key, end []byte) (n, rev int64) - // TxnBegin begins a txn. Only Txn prefixed operation can be executed, others will be blocked - // until txn ends. Only one on-going txn is allowed. - // TxnBegin returns an int64 txn ID. - // All txn prefixed operations with same txn ID will be done with the same rev. - TxnBegin() int64 - // TxnEnd ends the on-going txn with txn ID. If the on-going txn ID is not matched, error is returned. - TxnEnd(txnID int64) error - // TxnRange returns the current revision of the KV when the operation is executed. - TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) - TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +// TxnWrite represents a transaction that can modify the store. +type TxnWrite interface { + TxnRead + WriteView + // Changes gets the changes made since opening the write txn. + Changes() []mvccpb.KeyValue +} + +// txnReadWrite coerces a read txn to a write, panicking on any write operation. +type txnReadWrite struct{ TxnRead } + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type KV interface { + ReadView + WriteView + + // Read creates a read transaction. + Read() TxnRead + + // Write creates a write transaction. + Write() TxnWrite + + // Hash retrieves the hash of KV state and revision. + // This method is designed for consistency checking purposes. + Hash() (hash uint32, revision int64, err error) // Compact frees all superseded keys with revisions less than rev. Compact(rev int64) (<-chan struct{}, error) - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purpose. - Hash() (hash uint32, revision int64, err error) - - // Commit commits txns into the underlying backend. + // Commit commits outstanding txns into the underlying backend. Commit() // Restore restores the KV store from a backend. diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go new file mode 100644 index 00000000000..f40ba8edc22 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read() + defer tr.End() + return tr.Range(key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.DeleteRange(key, end) +} + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go index 28a18a06597..28a508ccb95 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -18,7 +18,6 @@ import ( "encoding/binary" "errors" "math" - "math/rand" "sync" "time" @@ -34,25 +33,29 @@ var ( keyBucketName = []byte("key") metaBucketName = []byte("meta") + consistentIndexKeyName = []byte("consistent_index") + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrClosed = errors.New("mvcc: closed") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") +) + +const ( // markedRevBytesLen is the byte length of marked revision. // The first `revBytesLen` bytes represents a normal revision. The last // one byte is the mark. markedRevBytesLen = revBytesLen + 1 markBytePosition = markedRevBytesLen - 1 markTombstone byte = 't' - - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrTxnIDMismatch = errors.New("mvcc: txn id mismatch") - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") ) +var restoreChunkKeys = 10000 // non-const for testing + // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. type ConsistentIndexGetter interface { @@ -61,7 +64,11 @@ type ConsistentIndexGetter interface { } type store struct { - mu sync.Mutex // guards the following + ReadView + WriteView + + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex ig ConsistentIndexGetter @@ -70,19 +77,19 @@ type store struct { le lease.Lessor - currentRev revision - // the main revision of the last compaction + // revMuLock protects currentRev and compactMainRev. + // Locked at end of write txn and released after write txn unlock lock. + // Locked before locking read txn and released after locking. + revMu sync.RWMutex + // currentRev is the revision of the last completed transaction. + currentRev int64 + // compactMainRev is the main revision of the last compaction. compactMainRev int64 - tx backend.BatchTx - txnID int64 // tracks the current txnID to verify txn operations - txnModify bool - // bytesBuf8 is a byte slice of length 8 // to avoid a repetitive allocation in saveIndex. bytesBuf8 []byte - changes []mvccpb.KeyValue fifoSched schedule.Scheduler stopc chan struct{} @@ -98,17 +105,18 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto le: le, - currentRev: revision{main: 1}, + currentRev: 1, compactMainRev: -1, - bytesBuf8: make([]byte, 8, 8), + bytesBuf8: make([]byte, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), } - + s.ReadView = &readView{s} + s.WriteView = &writeView{s} if s.le != nil { - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } tx := s.b.BatchTx() @@ -126,140 +134,6 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto return s } -func (s *store) Rev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.currentRev.main -} - -func (s *store) FirstRev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.compactMainRev -} - -func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 { - id := s.TxnBegin() - s.put(key, value, lease) - s.txnEnd(id) - - putCounter.Inc() - - return int64(s.currentRev.main) -} - -func (s *store) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - id := s.TxnBegin() - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - s.txnEnd(id) - - rangeCounter.Inc() - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - - return r, err -} - -func (s *store) DeleteRange(key, end []byte) (n, rev int64) { - id := s.TxnBegin() - n = s.deleteRange(key, end) - s.txnEnd(id) - - deleteCounter.Inc() - - return n, int64(s.currentRev.main) -} - -func (s *store) TxnBegin() int64 { - s.mu.Lock() - s.currentRev.sub = 0 - s.tx = s.b.BatchTx() - s.tx.Lock() - - s.txnID = rand.Int63() - return s.txnID -} - -func (s *store) TxnEnd(txnID int64) error { - err := s.txnEnd(txnID) - if err != nil { - return err - } - - txnCounter.Inc() - return nil -} - -// txnEnd is used for unlocking an internal txn. It does -// not increase the txnCounter. -func (s *store) txnEnd(txnID int64) error { - if txnID != s.txnID { - return ErrTxnIDMismatch - } - - // only update index if the txn modifies the mvcc state. - // read only txn might execute with one write txn concurrently, - // it should not write its index to mvcc. - if s.txnModify { - s.saveIndex() - } - s.txnModify = false - - s.tx.Unlock() - if s.currentRev.sub != 0 { - s.currentRev.main += 1 - } - s.currentRev.sub = 0 - - dbTotalSize.Set(float64(s.b.Size())) - s.mu.Unlock() - return nil -} - -func (s *store) TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - if txnID != s.txnID { - return nil, ErrTxnIDMismatch - } - - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - return r, err -} - -func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) { - if txnID != s.txnID { - return 0, ErrTxnIDMismatch - } - - s.put(key, value, lease) - return int64(s.currentRev.main + 1), nil -} - -func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) { - if txnID != s.txnID { - return 0, 0, ErrTxnIDMismatch - } - - n = s.deleteRange(key, end) - if n != 0 || s.currentRev.sub != 0 { - rev = int64(s.currentRev.main + 1) - } else { - rev = int64(s.currentRev.main) - } - return n, rev, nil -} - func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { if ctx == nil || ctx.Err() != nil { s.mu.Lock() @@ -275,16 +149,25 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { close(ch) } +func (s *store) Hash() (hash uint32, revision int64, err error) { + s.b.ForceCommit() + h, err := s.b.Hash(DefaultIgnores) + return h, s.currentRev, err +} + func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.mu.Lock() defer s.mu.Unlock() + s.revMu.Lock() + defer s.revMu.Unlock() + if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) return ch, ErrCompacted } - if rev > s.currentRev.main { + if rev > s.currentRev { return nil, ErrFutureRev } @@ -333,24 +216,14 @@ func init() { } } -func (s *store) Hash() (uint32, int64, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.b.ForceCommit() - - h, err := s.b.Hash(DefaultIgnores) - rev := s.currentRev.main - return h, rev, err -} - func (s *store) Commit() { s.mu.Lock() defer s.mu.Unlock() - s.tx = s.b.BatchTx() - s.tx.Lock() - s.saveIndex() - s.tx.Unlock() + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() s.b.ForceCommit() } @@ -363,10 +236,8 @@ func (s *store) Restore(b backend.Backend) error { s.b = b s.kvindex = newTreeIndex() - s.currentRev = revision{main: 1} + s.currentRev = 1 s.compactMainRev = -1 - s.tx = b.BatchTx() - s.txnID = -1 s.fifoSched = schedule.NewFIFOScheduler() s.stopc = make(chan struct{}) @@ -374,75 +245,63 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) - // use an unordered map to hold the temp index data to speed up - // the initial key index recovery. - // we will convert this unordered map into the tree index later. - unordered := make(map[string]*keyIndex, 100000) - // restore index tx := s.b.BatchTx() tx.Lock() + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main plog.Printf("restore compact to %d", s.compactMainRev) } - - // TODO: limit N to reduce max memory usage - keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) - for i, key := range keys { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - - rev := bytesToRev(key[:revBytesLen]) - - // restore index - switch { - case isTombstone(key): - if ki, ok := unordered[string(kv.Key)]; ok { - ki.tombstone(rev.main, rev.sub) - } - delete(keyToLease, string(kv.Key)) - - default: - ki, ok := unordered[string(kv.Key)] - if ok { - ki.put(rev.main, rev.sub) - } else { - ki = &keyIndex{key: kv.Key} - ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) - unordered[string(kv.Key)] = ki - } - - if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { - keyToLease[string(kv.Key)] = lid - } else { - delete(keyToLease, string(kv.Key)) - } - } - - // update revision - s.currentRev = rev + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main } - // restore the tree index from the unordered index. - for _, v := range unordered { - s.kvindex.Insert(v) + // index keys concurrently as they're loaded in from tx + keysGauge.Set(0) + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break + } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break + } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) } + close(rkvc) + s.currentRev = <-revc // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // the correct revision should be set to compaction revision in the case, not the largest revision // we have seen. - if s.currentRev.main < s.compactMainRev { - s.currentRev.main = s.compactMainRev + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 } for key, lid := range keyToLease { @@ -455,15 +314,6 @@ func (s *store) restore() error { } } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - } - tx.Unlock() if scheduledCompact != 0 { @@ -474,6 +324,75 @@ func (s *store) restore() error { return nil } +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + func (s *store) Close() error { close(s.stopc) s.fifoSched.Stop() @@ -490,180 +409,10 @@ func (a *store) Equal(b *store) bool { return a.kvindex.Equal(b.kvindex) } -// range is a keyword in Go, add Keys suffix. -func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool) (kvs []mvccpb.KeyValue, count int, curRev int64, err error) { - curRev = int64(s.currentRev.main) - if s.currentRev.sub > 0 { - curRev += 1 - } - - if rangeRev > curRev { - return nil, -1, s.currentRev.main, ErrFutureRev - } - var rev int64 - if rangeRev <= 0 { - rev = curRev - } else { - rev = rangeRev - } - if rev < s.compactMainRev { - return nil, -1, 0, ErrCompacted - } - - _, revpairs := s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return nil, 0, curRev, nil - } - if countOnly { - return nil, len(revpairs), curRev, nil - } - - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - - _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if limit > 0 && len(kvs) >= int(limit) { - break - } - } - return kvs, len(revpairs), curRev, nil -} - -func (s *store) put(key, value []byte, leaseID lease.LeaseID) { - s.txnModify = true - - rev := s.currentRev.main + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub}) - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - if oldLease != lease.NoLease { - if s.le == nil { - panic("no lessor to detach lease") - } - - err = s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - - if leaseID != lease.NoLease { - if s.le == nil { - panic("no lessor to attach lease") - } - - err = s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (s *store) deleteRange(key, end []byte) int64 { - s.txnModify = true - - rrev := s.currentRev.main - if s.currentRev.sub > 0 { - rrev += 1 - } - keys, revs := s.kvindex.Range(key, end, rrev) - - if len(keys) == 0 { - return 0 - } - - for i, key := range keys { - s.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (s *store) delete(key []byte, rev revision) { - mainrev := s.currentRev.main + 1 - - ibytes := newRevBytes() - revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{ - Key: key, - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - item := lease.LeaseItem{Key: string(key)} - leaseID := s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (s *store) getChanges() []mvccpb.KeyValue { - changes := s.changes - s.changes = make([]mvccpb.KeyValue, 0, 4) - return changes -} - -func (s *store) saveIndex() { +func (s *store) saveIndex(tx backend.BatchTx) { if s.ig == nil { return } - tx := s.tx bs := s.bytesBuf8 binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) // put the index into the underlying backend diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go new file mode 100644 index 00000000000..13d4d530d0a --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,253 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + + firstRev int64 + rev int64 +} + +func (s *store) Read() TxnRead { + s.mu.RLock() + tx := s.b.ReadTx() + s.revMu.RLock() + tx.Lock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnRead) Rev() int64 { return tr.rev } + +func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(key, end, tr.Rev(), ro) +} + +func (tr *storeTxnRead) End() { + tr.tx.Unlock() + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + *storeTxnRead + tx backend.BatchTx + // beginRev is the revision where the txn begins; it will write to the next revision. + beginRev int64 + changes []mvccpb.KeyValue +} + +func (s *store) Write() TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return newMetricsTxnWrite(tw) +} + +func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } + +func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(key, end, rev, ro) +} + +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, int64(tw.beginRev + 1) + } + return 0, int64(tw.beginRev) +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return int64(tw.beginRev + 1) +} + +func (tw *storeTxnWrite) End() { + // only update index if the txn modifies the mvcc state. + if len(tw.changes) != 0 { + tw.s.saveIndex(tw.tx) + // hold revMu lock to prevent new read txns from opening until writeback. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} + +func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + + _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil + } + if ro.Count { + return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil + } + + var kvs []mvccpb.KeyValue + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { + break + } + } + return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil +} + +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := tw.s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + idxRev := revision{main: rev, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + tw.s.kvindex.Put(key, idxRev) + tw.changes = append(tw.changes, kv) + + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to detach lease") + } + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to attach lease") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev += 1 + } + keys, revs := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for i, key := range keys { + tw.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (tw *storeTxnWrite) delete(key []byte, rev revision) { + ibytes := newRevBytes() + idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{Key: key} + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go index aa8af6aa552..a65fe59b996 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -15,6 +15,8 @@ package mvcc import ( + "sync" + "github.com/prometheus/client_golang/prometheus" ) @@ -129,12 +131,21 @@ var ( Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) - dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database in bytes.", - }) + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } ) func init() { diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go new file mode 100644 index 00000000000..fd2144279ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type metricsTxnWrite struct { + TxnWrite + ranges uint + puts uint + deletes uint +} + +func newMetricsTxnRead(tr TxnRead) TxnRead { + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} +} + +func newMetricsTxnWrite(tw TxnWrite) TxnWrite { + return &metricsTxnWrite{tw, 0, 0, 0} +} + +func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { + tw.ranges++ + return tw.TxnWrite.Range(key, end, ro) +} + +func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { + tw.deletes++ + return tw.TxnWrite.DeleteRange(key, end) +} + +func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw.puts++ + return tw.TxnWrite.Put(key, value, lease) +} + +func (tw *metricsTxnWrite) End() { + defer tw.TxnWrite.End() + if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { + if sum > 1 { + txnCounter.Inc() + } + return + } + switch { + case tw.ranges == 1: + rangeCounter.Inc() + case tw.puts == 1: + putCounter.Inc() + case tw.deletes == 1: + deleteCounter.Inc() + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index aa053f4e66e..7033f132662 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -713,7 +713,7 @@ func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } var fileDescriptorKv = []byte{ // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go index dbb79bcb693..68d9ab71d27 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -41,10 +41,12 @@ type watchable interface { } type watchableStore struct { - mu sync.Mutex - *store + // mu protects watcher groups and batches. It should never be locked + // before locking store.mu to avoid deadlock. + mu sync.RWMutex + // victims are watcher batches that were blocked on the watch channel victims []watcherBatch victimc chan struct{} @@ -76,9 +78,11 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet synced: newWatcherGroup(), stopc: make(chan struct{}), } + s.store.ReadView = &readView{s} + s.store.WriteView = &writeView{s} if s.le != nil { // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } s.wg.Add(2) go s.syncWatchersLoop() @@ -86,89 +90,6 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet return s } -func (s *watchableStore) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - rev = s.store.Put(key, value, lease) - changes := s.store.getChanges() - if len(changes) != 1 { - plog.Panicf("unexpected len(changes) != 1 after put") - } - - ev := mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[0], - } - s.notify(rev, []mvccpb.Event{ev}) - return rev -} - -func (s *watchableStore) DeleteRange(key, end []byte) (n, rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - n, rev = s.store.DeleteRange(key, end) - changes := s.store.getChanges() - - if len(changes) != int(n) { - plog.Panicf("unexpected len(changes) != n after deleteRange") - } - - if n == 0 { - return n, rev - } - - evs := make([]mvccpb.Event, n) - for i := range changes { - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - } - s.notify(rev, evs) - return n, rev -} - -func (s *watchableStore) TxnBegin() int64 { - s.mu.Lock() - return s.store.TxnBegin() -} - -func (s *watchableStore) TxnEnd(txnID int64) error { - err := s.store.TxnEnd(txnID) - if err != nil { - return err - } - - changes := s.getChanges() - if len(changes) == 0 { - s.mu.Unlock() - return nil - } - - rev := s.store.Rev() - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - switch change.CreateRevision { - case 0: - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - default: - evs[i] = mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[i]} - } - } - - s.notify(rev, evs) - s.mu.Unlock() - - return nil -} - func (s *watchableStore) Close() error { close(s.stopc) s.wg.Wait() @@ -186,9 +107,6 @@ func (s *watchableStore) NewWatchStream() WatchStream { } func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - s.mu.Lock() - defer s.mu.Unlock() - wa := &watcher{ key: key, end: end, @@ -198,21 +116,24 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c fcs: fcs, } - s.store.mu.Lock() - synced := startRev > s.store.currentRev.main || startRev == 0 + s.mu.Lock() + s.revMu.RLock() + synced := startRev > s.store.currentRev || startRev == 0 if synced { - wa.minRev = s.store.currentRev.main + 1 + wa.minRev = s.store.currentRev + 1 if startRev > wa.minRev { wa.minRev = startRev } } - s.store.mu.Unlock() if synced { s.synced.add(wa) } else { slowWatcherGauge.Inc() s.unsynced.add(wa) } + s.revMu.RUnlock() + s.mu.Unlock() + watcherGauge.Inc() return wa, func() { s.cancelWatcher(wa) } @@ -258,17 +179,35 @@ func (s *watchableStore) cancelWatcher(wa *watcher) { s.mu.Unlock() } +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + s.unsynced.watchers.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + // syncWatchersLoop syncs the watcher in the unsynced map every 100ms. func (s *watchableStore) syncWatchersLoop() { defer s.wg.Done() for { - s.mu.Lock() + s.mu.RLock() st := time.Now() lastUnsyncedWatchers := s.unsynced.size() - s.syncWatchers() - unsyncedWatchers := s.unsynced.size() - s.mu.Unlock() + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + unsyncedWatchers = s.syncWatchers() + } syncDuration := time.Since(st) waitDuration := 100 * time.Millisecond @@ -295,9 +234,9 @@ func (s *watchableStore) syncVictimsLoop() { for s.moveVictims() != 0 { // try to update all victim watchers } - s.mu.Lock() + s.mu.RLock() isEmpty := len(s.victims) == 0 - s.mu.Unlock() + s.mu.RUnlock() var tickc <-chan time.Time if !isEmpty { @@ -340,8 +279,8 @@ func (s *watchableStore) moveVictims() (moved int) { // assign completed victim watchers to unsync/sync s.mu.Lock() - s.store.mu.Lock() - curRev := s.store.currentRev.main + s.store.revMu.RLock() + curRev := s.store.currentRev for w, eb := range wb { if newVictim != nil && newVictim[w] != nil { // couldn't send watch response; stays victim @@ -358,7 +297,7 @@ func (s *watchableStore) moveVictims() (moved int) { s.synced.add(w) } } - s.store.mu.Unlock() + s.store.revMu.RUnlock() s.mu.Unlock() } @@ -376,19 +315,23 @@ func (s *watchableStore) moveVictims() (moved int) { // 2. iterate over the set to get the minimum revision and remove compacted watchers // 3. use minimum revision to get all key-value pairs and send those events to watchers // 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() { +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + if s.unsynced.size() == 0 { - return + return 0 } - s.store.mu.Lock() - defer s.store.mu.Unlock() + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() // in order to find key-value pairs from unsynced watchers, we need to // find min revision index, and these revisions can be used to // query the backend store of key-value pairs - curRev := s.store.currentRev.main + curRev := s.store.currentRev compactionRev := s.store.compactMainRev + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) minBytes, maxBytes := newRevBytes(), newRevBytes() revToBytes(revision{main: minRev}, minBytes) @@ -396,7 +339,7 @@ func (s *watchableStore) syncWatchers() { // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. - tx := s.store.b.BatchTx() + tx := s.store.b.ReadTx() tx.Lock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) evs := kvsToEvents(wg, revs, vs) @@ -446,6 +389,8 @@ func (s *watchableStore) syncWatchers() { vsz += len(v) } slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) + + return s.unsynced.size() } // kvsToEvents gets all events for the watchers from all key-value pairs @@ -511,8 +456,8 @@ func (s *watchableStore) addVictim(victim watcherBatch) { func (s *watchableStore) rev() int64 { return s.store.Rev() } func (s *watchableStore) progress(w *watcher) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if _, ok := s.synced.watchers[w]; ok { w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go new file mode 100644 index 00000000000..5c5bfda1341 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // end write txn under watchable store lock so the updates are visible + // when asynchronous event posting checks the current store revision + tw.s.mu.Lock() + tw.s.notify(rev, evs) + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go index 2710c1cc940..6ef1d0ce8bb 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -183,7 +183,7 @@ func (wg *watcherGroup) add(wa *watcher) { // contains is whether the given key has a watcher in the group. func (wg *watcherGroup) contains(key string) bool { _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key)) + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) } // size gives the number of unique watchers in the group. diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go index 6edbe593fb4..9769771ea4f 100644 --- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -15,6 +15,7 @@ package adt import ( + "bytes" "math" ) @@ -134,25 +135,29 @@ func (x *intervalNode) updateMax() { type nodeVisitor func(n *intervalNode) bool // visit will call a node visitor on each node that overlaps the given interval -func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) { +func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { if x == nil { - return + return true } v := iv.Compare(&x.iv.Ivl) switch { case v < 0: - x.left.visit(iv, nv) + if !x.left.visit(iv, nv) { + return false + } case v > 0: maxiv := Interval{x.iv.Ivl.Begin, x.max} if maxiv.Compare(iv) == 0 { - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { + return false + } } default: - nv(x) - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { + return false + } } + return true } type IntervalValue struct { @@ -402,10 +407,11 @@ func (ivt *IntervalTree) MaxHeight() int { return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) } -// IntervalVisitor is used on tree searchs; return false to stop searching. +// IntervalVisitor is used on tree searches; return false to stop searching. type IntervalVisitor func(n *IntervalValue) bool // Visit calls a visitor function on every tree node intersecting the given interval. +// It will visit each interval [x, y) in ascending order sorted on x. func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) } @@ -432,8 +438,8 @@ func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { return &n.iv } -// Contains returns true if there is some tree node intersecting the given interval. -func (ivt *IntervalTree) Contains(iv Interval) bool { +// Intersects returns true if there is some tree node intersecting the given interval. +func (ivt *IntervalTree) Intersects(iv Interval) bool { x := ivt.root for x != nil && iv.Compare(&x.iv.Ivl) != 0 { if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { @@ -445,6 +451,30 @@ func (ivt *IntervalTree) Contains(iv Interval) bool { return x != nil } +// Contains returns true if the interval tree's keys cover the entire given interval. +func (ivt *IntervalTree) Contains(ivl Interval) bool { + var maxEnd, minBegin Comparable + + isContiguous := true + ivt.Visit(ivl, func(n *IntervalValue) bool { + if minBegin == nil { + minBegin = n.Ivl.Begin + maxEnd = n.Ivl.End + return true + } + if maxEnd.Compare(n.Ivl.Begin) < 0 { + isContiguous = false + return false + } + if n.Ivl.End.Compare(maxEnd) > 0 { + maxEnd = n.Ivl.End + } + return true + }) + + return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0 +} + // Stab returns a slice with all elements in the tree intersecting the interval. func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { if ivt.count == 0 { @@ -529,3 +559,32 @@ func (v Int64Comparable) Compare(c Comparable) int { } return 0 } + +// BytesAffineComparable treats empty byte arrays as > all other byte arrays +type BytesAffineComparable []byte + +func (b BytesAffineComparable) Compare(c Comparable) int { + bc := c.(BytesAffineComparable) + + if len(b) == 0 { + if len(bc) == 0 { + return 0 + } + return 1 + } + if len(bc) == 0 { + return -1 + } + + return bytes.Compare(b, bc) +} + +func NewBytesAffineInterval(begin, end []byte) Interval { + return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)} +} +func NewBytesAffinePoint(b []byte) Interval { + be := make([]byte, len(b)+1) + copy(be, b) + be[len(b)] = 0 + return NewBytesAffineInterval(b, be) +} diff --git a/vendor/github.com/coreos/etcd/pkg/cors/BUILD b/vendor/github.com/coreos/etcd/pkg/cors/BUILD new file mode 100644 index 00000000000..e707c8a14d5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cors/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cors.go"], + importpath = "github.com/coreos/etcd/pkg/cors", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go new file mode 100644 index 00000000000..0c64f16a390 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cors/cors.go @@ -0,0 +1,90 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cors handles cross-origin HTTP requests (CORS). +package cors + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" +) + +type CORSInfo map[string]bool + +// Set implements the flag.Value interface to allow users to define a list of CORS origins +func (ci *CORSInfo) Set(s string) error { + m := make(map[string]bool) + for _, v := range strings.Split(s, ",") { + v = strings.TrimSpace(v) + if v == "" { + continue + } + if v != "*" { + if _, err := url.Parse(v); err != nil { + return fmt.Errorf("Invalid CORS origin: %s", err) + } + } + m[v] = true + + } + *ci = CORSInfo(m) + return nil +} + +func (ci *CORSInfo) String() string { + o := make([]string, 0) + for k := range *ci { + o = append(o, k) + } + sort.StringSlice(o).Sort() + return strings.Join(o, ",") +} + +// OriginAllowed determines whether the server will allow a given CORS origin. +func (c CORSInfo) OriginAllowed(origin string) bool { + return c["*"] || c[origin] +} + +type CORSHandler struct { + Handler http.Handler + Info *CORSInfo +} + +// addHeader adds the correct cors headers given an origin +func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) { + w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + w.Header().Add("Access-Control-Allow-Origin", origin) + w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") +} + +// ServeHTTP adds the correct CORS headers based on the origin and returns immediately +// with a 200 OK if the method is OPTIONS. +func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Write CORS header. + if h.Info.OriginAllowed("*") { + h.addHeader(w, "*") + } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) { + h.addHeader(w, origin) + } + + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + h.Handler.ServeHTTP(w, req) +} diff --git a/vendor/github.com/karlseguin/ccache/BUILD b/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD similarity index 66% rename from vendor/github.com/karlseguin/ccache/BUILD rename to vendor/github.com/coreos/etcd/pkg/debugutil/BUILD index 398b1fd68ad..d943ed2561a 100644 --- a/vendor/github.com/karlseguin/ccache/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD @@ -3,15 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "bucket.go", - "cache.go", - "configuration.go", - "item.go", - "layeredbucket.go", - "layeredcache.go", - "secondarycache.go", + "doc.go", + "pprof.go", ], - importpath = "github.com/karlseguin/ccache", + importpath = "github.com/coreos/etcd/pkg/debugutil", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go new file mode 100644 index 00000000000..74499eb2737 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debugutil includes utility functions for debugging. +package debugutil diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go new file mode 100644 index 00000000000..8d5544a3dca --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go @@ -0,0 +1,47 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debugutil + +import ( + "net/http" + "net/http/pprof" + "runtime" +) + +const HTTPPrefixPProf = "/debug/pprof" + +// PProfHandlers returns a map of pprof handlers keyed by the HTTP path. +func PProfHandlers() map[string]http.Handler { + // set only when there's no existing setting + if runtime.SetMutexProfileFraction(-1) == 0 { + // 1 out of 5 mutex events are reported, on average + runtime.SetMutexProfileFraction(5) + } + + m := make(map[string]http.Handler) + + m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index) + m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile) + m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol) + m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline) + m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace) + m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap") + m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine") + m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate") + m[HTTPPrefixPProf+"/block"] = pprof.Handler("block") + m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex") + + return m +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go index aad40b75904..fce5126c695 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -17,6 +17,7 @@ package fileutil import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -101,11 +102,11 @@ func Exist(name string) bool { // shorten the length of the file. func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, os.SEEK_CUR) + off, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - lenf, lerr := f.Seek(0, os.SEEK_END) + lenf, lerr := f.Seek(0, io.SeekEnd) if lerr != nil { return lerr } @@ -116,6 +117,6 @@ func ZeroToEnd(f *os.File) error { if err = Preallocate(f, lenf, true); err != nil { return err } - _, err = f.Seek(off, os.SEEK_SET) + _, err = f.Seek(off, io.SeekStart) return err } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go index dec25a1af44..939fea62381 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -17,6 +17,7 @@ package fileutil import ( + "io" "os" "syscall" ) @@ -36,7 +37,7 @@ const ( var ( wrlck = syscall.Flock_t{ Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go index bb7f0281239..c747b7cf81f 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -14,7 +14,10 @@ package fileutil -import "os" +import ( + "io" + "os" +) // Preallocate tries to allocate the space for given // file. This operation is only supported on linux by a @@ -22,6 +25,10 @@ import "os" // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } if extendFile { return preallocExtend(f, sizeInBytes) } @@ -29,15 +36,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { } func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, os.SEEK_CUR) + curOff, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - size, err := f.Seek(sizeInBytes, os.SEEK_END) + size, err := f.Seek(sizeInBytes, io.SeekEnd) if err != nil { return err } - if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + if _, err = f.Seek(curOff, io.SeekStart); err != nil { return err } if sizeInBytes > size { diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go index 859fc9d49e1..09f44e7c71d 100644 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go @@ -13,15 +13,6 @@ import ( "net/http" ) -func RequestCanceler(req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} - // GracefulClose drains http.Response.Body until it hits EOF // and closes it. This prevents TCP/TLS connections from closing, // therefore available for reuse. diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go index 931beb2d058..2da21062657 100644 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -32,8 +32,8 @@ const ( // a node member ID. // // The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. +// High order 2 bytes are from memberID, next 5 bytes are from timestamp, +// and low order one byte is a counter. // | prefix | suffix | // | 2 bytes | 5 bytes | 1 byte | // | memberID | timestamp | cnt | diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD index 2455b3c3edd..e918523a418 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD @@ -18,7 +18,6 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go index bb5f392b34c..5e38dc98dbf 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -16,14 +16,13 @@ package netutil import ( + "context" "net" "net/url" "reflect" "sort" "time" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" ) @@ -32,11 +31,38 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") // indirection for testing - resolveTCPAddr = net.ResolveTCPAddr + resolveTCPAddr = resolveTCPAddrDefault ) const retryInterval = time.Second +// taken from go's ResolveTCP code but uses configurable ctx +func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { + host, port, serr := net.SplitHostPort(addr) + if serr != nil { + return nil, serr + } + portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) + if perr != nil { + return nil, perr + } + + var ips []net.IPAddr + if ip := net.ParseIP(host); ip != nil { + ips = []net.IPAddr{{IP: ip}} + } else { + // Try as a DNS name. + ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + ips = ipss + } + // randomize? + ip := ips[0] + return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil +} + // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. @@ -75,7 +101,7 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { if host == "localhost" || net.ParseIP(host) != nil { return "", nil } - tcpAddr, err := resolveTCPAddr("tcp", u.Host) + tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) return tcpAddr.String(), nil diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go index 79c59b01288..bf8528b753a 100644 --- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go +++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -88,8 +88,6 @@ func (f *fifo) Schedule(j Job) { } } f.pendings = append(f.pendings, j) - - return } func (f *fifo) Pending() int { diff --git a/vendor/github.com/coreos/etcd/pkg/srv/BUILD b/vendor/github.com/coreos/etcd/pkg/srv/BUILD new file mode 100644 index 00000000000..3707eb3e968 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["srv.go"], + importpath = "github.com/coreos/etcd/pkg/srv", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/etcd/pkg/types:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 00000000000..fefcbcb4b88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD index 054063068f4..d3b887f366d 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "assert.go", "leak.go", "pauseable_handler.go", "recorder.go", diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/assert.go b/vendor/github.com/coreos/etcd/pkg/testutil/assert.go new file mode 100644 index 00000000000..9cf03457d52 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/testutil/assert.go @@ -0,0 +1,62 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "reflect" + "testing" +) + +func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { + if (e == nil || a == nil) && (isNil(e) && isNil(a)) { + return + } + if reflect.DeepEqual(e, a) { + return + } + s := "" + if len(msg) > 1 { + s = msg[0] + ": " + } + s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) + FatalStack(t, s) +} + +func AssertNil(t *testing.T, v interface{}) { + AssertEqual(t, nil, v) +} + +func AssertNotNil(t *testing.T, v interface{}) { + if v == nil { + t.Fatalf("expected non-nil, got %+v", v) + } +} + +func AssertTrue(t *testing.T, v bool, msg ...string) { + AssertEqual(t, true, v, msg...) +} + +func AssertFalse(t *testing.T, v bool, msg ...string) { + AssertEqual(t, false, v, msg...) +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() != reflect.Struct && rv.IsNil() +} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go index 80bc0eebc8a..a29d06d9bd0 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go +++ b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go @@ -62,10 +62,11 @@ func CheckLeakedGoroutine() bool { return true } -func AfterTest(t *testing.T) { +// CheckAfterTest returns an error if AfterTest would fail with an error. +func CheckAfterTest(d time.Duration) error { http.DefaultTransport.(*http.Transport).CloseIdleConnections() if testing.Short() { - return + return nil } var bad string badSubstring := map[string]string{ @@ -75,10 +76,12 @@ func AfterTest(t *testing.T) { "net.(*netFD).connect(": "a timing out dial", ").noteClientGone(": "a closenotifier sender", ").readLoop(": "a Transport", + ".grpc": "a gRPC resource", } var stacks string - for i := 0; i < 6; i++ { + begin := time.Now() + for time.Since(begin) < d { bad = "" stacks = strings.Join(interestingGoroutines(), "\n\n") for substr, what := range badSubstring { @@ -87,13 +90,22 @@ func AfterTest(t *testing.T) { } } if bad == "" { - return + return nil } // Bad stuff found, but goroutines might just still be // shutting down, so give it some time. time.Sleep(50 * time.Millisecond) } - t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) + return fmt.Errorf("appears to have leaked %s:\n%s", bad, stacks) +} + +// AfterTest is meant to run in a defer that executes after a test completes. +// It will detect common goroutine leaks, retrying in case there are goroutines +// not synchronously torn down, and fail the test if any goroutines are stuck. +func AfterTest(t *testing.T) { + if err := CheckAfterTest(300 * time.Millisecond); err != nil { + t.Errorf("Test %v", err) + } } func interestingGoroutines() (gs []string) { @@ -106,6 +118,7 @@ func interestingGoroutines() (gs []string) { } stack := strings.TrimSpace(sl[1]) if stack == "" || + strings.Contains(stack, "sync.(*WaitGroup).Done") || strings.Contains(stack, "created by os/signal.init") || strings.Contains(stack, "runtime/panic.go") || strings.Contains(stack, "created by testing.RunTests") || diff --git a/vendor/github.com/coreos/etcd/pkg/transport/BUILD b/vendor/github.com/coreos/etcd/pkg/transport/BUILD index 7074e74a002..3ae75dce137 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/transport/BUILD @@ -7,6 +7,7 @@ go_library( "keepalive_listener.go", "limit_listen.go", "listener.go", + "listener_tls.go", "timeout_conn.go", "timeout_dialer.go", "timeout_listener.go", @@ -17,10 +18,7 @@ go_library( ], importpath = "github.com/coreos/etcd/pkg/transport", visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - ], + deps = ["//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library"], ) filegroup( diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 4fcdb5ad9a3..3b58b41543f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -23,22 +23,21 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "log" "math/big" "net" "os" "path/filepath" + "strings" "time" - "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/tlsutil" ) -func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlscfg, l) + return wrapTLS(addr, scheme, tlsinfo, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -49,15 +48,11 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { +func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } - if tlscfg == nil { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - return tls.NewListener(l, tlscfg), nil + return newTLSListener(l, tlsinfo) } type TLSInfo struct { @@ -70,6 +65,10 @@ type TLSInfo struct { // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -86,7 +85,7 @@ func (info TLSInfo) Empty() bool { } func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = fileutil.TouchDirAll(dirpath); err != nil { + if err = os.MkdirAll(dirpath, 0700); err != nil { return } @@ -173,6 +172,14 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } return cfg, nil } @@ -235,9 +242,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if err != nil { return nil, err } - // if given a CA, trust any host with a cert signed by the CA - log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") - cfg.ServerName = "" } if info.selfCert { @@ -246,31 +250,11 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { return cfg, nil } -// ShallowCopyTLSConfig copies *tls.Config. This is only -// work-around for go-vet tests, which complains -// -// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex -// -// Keep up-to-date with 'go/src/crypto/tls/common.go' -func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { - ncfg := tls.Config{ - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } - return &ncfg +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 00000000000..86511860335 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,217 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + + st := tlsConn.ConnectionState() + if len(st.PeerCertificates) > 0 { + cert := st.PeerCertificates[0] + addr := tlsConn.RemoteAddr().String() + if cerr := checkCert(ctx, cert, addr); cerr != nil { + l.handshakeFailure(tlsConn, cerr) + return + } + } + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + h, _, herr := net.SplitHostPort(remoteAddr) + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go index 0f4df5fbe3b..b35e04955bb 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -15,7 +15,6 @@ package transport import ( - "crypto/tls" "net" "time" ) @@ -23,7 +22,7 @@ import ( // NewTimeoutListener returns a listener that listens on the given address. // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err @@ -33,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeou rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { + if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go index c126b6f7fa0..123e2036f0f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -22,7 +22,7 @@ import ( type unixListener struct{ net.Listener } func NewUnixListener(addr string) (net.Listener, error) { - if err := os.RemoveAll(addr); err != nil { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { return nil, err } l, err := net.Listen("unix", addr) @@ -33,7 +33,7 @@ func NewUnixListener(addr string) (net.Listener, error) { } func (ul *unixListener) Close() error { - if err := os.RemoveAll(ul.Addr().String()); err != nil { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { return err } return ul.Listener.Close() diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go index 0f31eeb9790..34fa237e825 100644 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go @@ -21,22 +21,29 @@ import ( "sync" ) +// Wait is an interface that provides the ability to wait and trigger events that +// are associated with IDs. type Wait interface { + // Register waits returns a chan that waits on the given ID. + // The chan will be triggered when Trigger is called with + // the same ID. Register(id uint64) <-chan interface{} + // Trigger triggers the waiting chans with the given ID. Trigger(id uint64, x interface{}) IsRegistered(id uint64) bool } -type List struct { +type list struct { l sync.Mutex m map[uint64]chan interface{} } -func New() *List { - return &List{m: make(map[uint64]chan interface{})} +// New creates a Wait. +func New() Wait { + return &list{m: make(map[uint64]chan interface{})} } -func (w *List) Register(id uint64) <-chan interface{} { +func (w *list) Register(id uint64) <-chan interface{} { w.l.Lock() defer w.l.Unlock() ch := w.m[id] @@ -49,7 +56,7 @@ func (w *List) Register(id uint64) <-chan interface{} { return ch } -func (w *List) Trigger(id uint64, x interface{}) { +func (w *list) Trigger(id uint64, x interface{}) { w.l.Lock() ch := w.m[id] delete(w.m, id) @@ -60,7 +67,7 @@ func (w *List) Trigger(id uint64, x interface{}) { } } -func (w *List) IsRegistered(id uint64) bool { +func (w *list) IsRegistered(id uint64) bool { w.l.Lock() defer w.l.Unlock() _, ok := w.m[id] diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD index e0aa0fddb02..881f0a54f10 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD @@ -6,15 +6,18 @@ go_library( "auth.go", "cluster.go", "doc.go", + "election.go", "kv.go", - "kv_client_adapter.go", + "leader.go", "lease.go", + "lock.go", + "logger.go", "maintenance.go", "metrics.go", + "register.go", "watch.go", "watch_broadcast.go", "watch_broadcasts.go", - "watch_client_adapter.go", "watch_ranges.go", "watcher.go", ], @@ -22,17 +25,23 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/naming:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/time/rate:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", ], ) @@ -47,6 +56,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:all-srcs", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD new file mode 100644 index 00000000000..136c7198aa8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "chan_stream.go", + "cluster_client_adapter.go", + "doc.go", + "election_client_adapter.go", + "kv_client_adapter.go", + "lease_client_adapter.go", + "lock_client_adapter.go", + "maintenance_client_adapter.go", + "watch_client_adapter.go", + ], + importpath = "github.com/coreos/etcd/proxy/grpcproxy/adapter", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go similarity index 65% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go index 283c2ed07fa..3aa01f2052b 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go @@ -1,4 +1,4 @@ -// Copyright 2016 The etcd Authors +// Copyright 2017 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,79 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpcproxy +package adapter import ( - "errors" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" + "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) -var errAlreadySentHeader = errors.New("grpcproxy: already send header") - -type ws2wc struct{ wserv pb.WatchServer } - -func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { - return &ws2wc{wserv} -} - -func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { - // ch1 is buffered so server can send error on close - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) - headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) - - cctx, ccancel := context.WithCancel(ctx) - cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} - wclient := &ws2wcClientStream{chanClientStream{headerc, trailerc, cli}} - - sctx, scancel := context.WithCancel(ctx) - srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} - wserver := &ws2wcServerStream{chanServerStream{headerc, trailerc, srv, nil}} - go func() { - if err := s.wserv.Watch(wserver); err != nil { - select { - case srv.sendc <- err: - case <-sctx.Done(): - case <-cctx.Done(): - } - } - scancel() - ccancel() - }() - return wclient, nil -} - -// ws2wcClientStream implements Watch_WatchClient -type ws2wcClientStream struct{ chanClientStream } - -// ws2wcServerStream implements Watch_WatchServer -type ws2wcServerStream struct{ chanServerStream } - -func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { - return s.SendMsg(wr) -} -func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchResponse), nil -} - -func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { - return s.SendMsg(wr) -} -func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchRequest), nil -} - // chanServerStream implements grpc.ServerStream with a chanStream type chanServerStream struct { headerc chan<- metadata.MD @@ -151,8 +87,8 @@ func (cs *chanClientStream) Trailer() metadata.MD { } } -func (s *chanClientStream) CloseSend() error { - close(s.chanStream.sendc) +func (cs *chanClientStream) CloseSend() error { + close(cs.chanStream.sendc) return nil } @@ -180,17 +116,50 @@ func (s *chanStream) SendMsg(m interface{}) error { func (s *chanStream) RecvMsg(m interface{}) error { v := m.(*interface{}) - select { - case msg, ok := <-s.recvc: - if !ok { - return grpc.ErrClientConnClosing + for { + select { + case msg, ok := <-s.recvc: + if !ok { + return grpc.ErrClientConnClosing + } + if err, ok := msg.(error); ok { + return err + } + *v = msg + return nil + case <-s.ctx.Done(): } - if err, ok := msg.(error); ok { - return err + if len(s.recvc) == 0 { + // prioritize any pending recv messages over canceled context + break } - *v = msg - return nil - case <-s.ctx.Done(): } return s.ctx.Err() } + +func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { + // ch1 is buffered so server can send error on close + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) + headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) + + cctx, ccancel := context.WithCancel(ctx) + cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} + cs := chanClientStream{headerc, trailerc, cli} + + sctx, scancel := context.WithCancel(ctx) + srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} + ss := chanServerStream{headerc, trailerc, srv, nil} + + go func() { + if err := ssHandler(ss); err != nil { + select { + case srv.sendc <- err: + case <-sctx.Done(): + case <-cctx.Done(): + } + } + scancel() + ccancel() + }() + return cs +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go new file mode 100644 index 00000000000..4ddf78e15ec --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -0,0 +1,44 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type cls2clc struct{ cls pb.ClusterServer } + +func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient { + return &cls2clc{cls} +} + +func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) { + return s.cls.MemberList(ctx, r) +} + +func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) { + return s.cls.MemberAdd(ctx, r) +} + +func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) { + return s.cls.MemberUpdate(ctx, r) +} + +func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) { + return s.cls.MemberRemove(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go new file mode 100644 index 00000000000..7170be23304 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adapter provides gRPC adapters between client and server +// gRPC interfaces without needing to go through a gRPC connection. +package adapter diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go new file mode 100644 index 00000000000..383c1b9d8fb --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type es2ec struct{ es v3electionpb.ElectionServer } + +func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient { + return &es2ec{es} +} + +func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) { + return s.es.Campaign(ctx, r) +} + +func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) { + return s.es.Proclaim(ctx, r) +} + +func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) { + return s.es.Leader(ctx, r) +} + +func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) { + return s.es.Resign(ctx, r) +} + +func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.es.Observe(in, &es2ecServerStream{ss}) + }) + return &es2ecClientStream{cs}, nil +} + +// es2ecClientStream implements Election_ObserveClient +type es2ecClientStream struct{ chanClientStream } + +// es2ecServerStream implements Election_ObserveServer +type es2ecServerStream struct{ chanServerStream } + +func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { + return s.SendMsg(rr) +} +func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderResponse), nil +} + +func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { + return s.SendMsg(rr) +} +func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go index 7880b18109d..fec401d9dd0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpcproxy +package adapter import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go new file mode 100644 index 00000000000..d471fd9144b --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -0,0 +1,77 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ls2lc struct { + leaseServer pb.LeaseServer +} + +func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient { + return &ls2lc{ls} +} + +func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) { + return c.leaseServer.LeaseGrant(ctx, in) +} + +func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) { + return c.leaseServer.LeaseRevoke(ctx, in) +} + +func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss}) + }) + return &ls2lcClientStream{cs}, nil +} + +func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) { + return c.leaseServer.LeaseTimeToLive(ctx, in) +} + +// ls2lcClientStream implements Lease_LeaseKeepAliveClient +type ls2lcClientStream struct{ chanClientStream } + +// ls2lcServerStream implements Lease_LeaseKeepAliveServer +type ls2lcServerStream struct{ chanServerStream } + +func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { + return s.SendMsg(rr) +} +func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveResponse), nil +} + +func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { + return s.SendMsg(rr) +} +func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go new file mode 100644 index 00000000000..05e5cb020a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -0,0 +1,36 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ls2lsc struct{ ls v3lockpb.LockServer } + +func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient { + return &ls2lsc{ls} +} + +func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) { + return s.ls.Lock(ctx, r) +} + +func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) { + return s.ls.Unlock(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go new file mode 100644 index 00000000000..9b21bf2576e --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type mts2mtc struct{ mts pb.MaintenanceServer } + +func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient { + return &mts2mtc{mts} +} + +func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) { + return s.mts.Alarm(ctx, r) +} + +func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) { + return s.mts.Status(ctx, r) +} + +func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) { + return s.mts.Defragment(ctx, dr) +} + +func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) { + return s.mts.Hash(ctx, r) +} + +func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.mts.Snapshot(in, &ss2scServerStream{ss}) + }) + return &ss2scClientStream{cs}, nil +} + +// ss2scClientStream implements Maintenance_SnapshotClient +type ss2scClientStream struct{ chanClientStream } + +// ss2scServerStream implements Maintenance_SnapshotServer +type ss2scServerStream struct{ chanServerStream } + +func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { + return s.SendMsg(rr) +} +func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotResponse), nil +} + +func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { + return s.SendMsg(rr) +} +func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go new file mode 100644 index 00000000000..af4a13c4152 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "errors" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var errAlreadySentHeader = errors.New("adapter: already sent header") + +type ws2wc struct{ wserv pb.WatchServer } + +func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { + return &ws2wc{wserv} +} + +func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.wserv.Watch(&ws2wcServerStream{ss}) + }) + return &ws2wcClientStream{cs}, nil +} + +// ws2wcClientStream implements Watch_WatchClient +type ws2wcClientStream struct{ chanClientStream } + +// ws2wcServerStream implements Watch_WatchServer +type ws2wcServerStream struct{ chanServerStream } + +func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { + return s.SendMsg(wr) +} +func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchResponse), nil +} + +func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { + return s.SendMsg(wr) +} +func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD index ba6e5289a51..5d0c3e9e39c 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD @@ -9,7 +9,7 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", - "//vendor/github.com/karlseguin/ccache:go_default_library", + "//vendor/github.com/golang/groupcache/lru:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go index 155bbf90022..e84a05229e0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package cache exports functionality for efficiently caching and mapping +// `RangeRequest`s to corresponding `RangeResponse`s. package cache import ( "errors" "sync" - "time" - - "github.com/karlseguin/ccache" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/adt" + "github.com/golang/groupcache/lru" ) var ( @@ -31,14 +31,12 @@ var ( ErrCompacted = rpctypes.ErrGRPCCompacted ) -const defaultHistoricTTL = time.Hour -const defaultCurrentTTL = time.Minute - type Cache interface { Add(req *pb.RangeRequest, resp *pb.RangeResponse) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) Compact(revision int64) Invalidate(key []byte, endkey []byte) + Size() int Close() } @@ -54,17 +52,17 @@ func keyFunc(req *pb.RangeRequest) string { func NewCache(maxCacheEntries int) Cache { return &cache{ - lru: ccache.New(ccache.Configure().MaxSize(int64(maxCacheEntries))), + lru: lru.New(maxCacheEntries), compactedRev: -1, } } -func (c *cache) Close() { c.lru.Stop() } +func (c *cache) Close() {} // cache implements Cache type cache struct { mu sync.RWMutex - lru *ccache.Cache + lru *lru.Cache // a reverse index for cache invalidation cachedRanges adt.IntervalTree @@ -80,11 +78,7 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { defer c.mu.Unlock() if req.Revision > c.compactedRev { - if req.Revision == 0 { - c.lru.Set(key, resp, defaultCurrentTTL) - } else { - c.lru.Set(key, resp, defaultHistoricTTL) - } + c.lru.Add(key, resp) } // we do not need to invalidate a request with a revision specified. // so we do not need to add it into the reverse index. @@ -116,16 +110,16 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { key := keyFunc(req) - c.mu.RLock() - defer c.mu.RUnlock() + c.mu.Lock() + defer c.mu.Unlock() - if req.Revision < c.compactedRev { - c.lru.Delete(key) + if req.Revision > 0 && req.Revision < c.compactedRev { + c.lru.Remove(key) return nil, ErrCompacted } - if item := c.lru.Get(key); item != nil { - return item.Value().(*pb.RangeResponse), nil + if resp, ok := c.lru.Get(key); ok { + return resp.(*pb.RangeResponse), nil } return nil, errors.New("not exist") } @@ -149,7 +143,7 @@ func (c *cache) Invalidate(key, endkey []byte) { for _, iv := range ivs { keys := iv.Val.([]string) for _, key := range keys { - c.lru.Delete(key) + c.lru.Remove(key) } } // delete after removing all keys since it is destructive to 'ivs' @@ -166,3 +160,9 @@ func (c *cache) Compact(revision int64) { c.compactedRev = revision } } + +func (c *cache) Size() int { + c.mu.RLock() + defer c.mu.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go index 8a2fa16c124..899fb9be65f 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go @@ -15,38 +15,163 @@ package grpcproxy import ( + "fmt" + "os" + "sync" + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/naming" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" + "golang.org/x/time/rate" + "google.golang.org/grpc" + gnaming "google.golang.org/grpc/naming" ) +// allow maximum 1 retry per second +const resolveRetryRate = 1 + type clusterProxy struct { - client *clientv3.Client + clus clientv3.Cluster + ctx context.Context + gr *naming.GRPCResolver + + // advertise client URL + advaddr string + prefix string + + umu sync.RWMutex + umap map[string]gnaming.Update } -func NewClusterProxy(c *clientv3.Client) pb.ClusterServer { - return &clusterProxy{ - client: c, +// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints. +// The returned channel is closed when there is grpc-proxy endpoint registered +// and the client's context is canceled so the 'register' loop returns. +func NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) { + cp := &clusterProxy{ + clus: c.Cluster, + ctx: c.Ctx(), + gr: &naming.GRPCResolver{Client: c}, + + advaddr: advaddr, + prefix: prefix, + umap: make(map[string]gnaming.Update), + } + + donec := make(chan struct{}) + if advaddr != "" && prefix != "" { + go func() { + defer close(donec) + cp.resolve(prefix) + }() + return cp, donec + } + + close(donec) + return cp, donec +} + +func (cp *clusterProxy) resolve(prefix string) { + rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate) + for rm.Wait(cp.ctx) == nil { + wa, err := cp.gr.Resolve(prefix) + if err != nil { + plog.Warningf("failed to resolve %q (%v)", prefix, err) + continue + } + cp.monitor(wa) + } +} + +func (cp *clusterProxy) monitor(wa gnaming.Watcher) { + for cp.ctx.Err() == nil { + ups, err := wa.Next() + if err != nil { + plog.Warningf("clusterProxy watcher error (%v)", err) + if grpc.ErrorDesc(err) == naming.ErrWatcherClosed.Error() { + return + } + } + + cp.umu.Lock() + for i := range ups { + switch ups[i].Op { + case gnaming.Add: + cp.umap[ups[i].Addr] = *ups[i] + case gnaming.Delete: + delete(cp.umap, ups[i].Addr) + } + } + cp.umu.Unlock() } } func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberAdd(ctx, r) + mresp, err := cp.clus.MemberAdd(ctx, r.PeerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberAddResponse)(*mresp) + return &resp, err } func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberRemove(ctx, r) + mresp, err := cp.clus.MemberRemove(ctx, r.ID) + if err != nil { + return nil, err + } + resp := (pb.MemberRemoveResponse)(*mresp) + return &resp, err } func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberUpdate(ctx, r) + mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberUpdateResponse)(*mresp) + return &resp, err } -func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberList(ctx, r) +func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { + cp.umu.RLock() + defer cp.umu.RUnlock() + mbs := make([]*pb.Member, 0, len(cp.umap)) + for addr, upt := range cp.umap { + m, err := decodeMeta(fmt.Sprint(upt.Metadata)) + if err != nil { + return nil, err + } + mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}}) + } + return mbs, nil +} + +// MemberList wraps member list API with following rules: +// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver +// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr' +// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register' +// - If 'advaddr' is empty, forward to member list API +func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { + if cp.advaddr != "" { + if cp.prefix != "" { + mbs, err := cp.membersFromUpdates() + if err != nil { + return nil, err + } + if len(mbs) > 0 { + return &pb.MemberListResponse{Members: mbs}, nil + } + } + // prefix is empty or no grpc-proxy members haven't been registered + hostname, _ := os.Hostname() + return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil + } + mresp, err := cp.clus.MemberList(ctx) + if err != nil { + return nil, err + } + resp := (pb.MemberListResponse)(*mresp) + return &resp, err } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go new file mode 100644 index 00000000000..27115a81d7d --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionProxy struct { + client *clientv3.Client +} + +func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { + return &electionProxy{client: client} +} + +func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req) +} + +func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req) +} + +func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req) +} + +func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { + conn := ep.client.ActiveConnection() + ctx, cancel := context.WithCancel(s.Context()) + defer cancel() + sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req) + if err != nil { + return err + } + for { + rr, err := sc.Recv() + if err != nil { + return err + } + if err = s.Send(rr); err != nil { + return err + } + } +} + +func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go index 36885135797..0654729a0ae 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go @@ -33,11 +33,7 @@ func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { cache: cache.NewCache(cache.DefaultMaxEntries), } donec := make(chan struct{}) - go func() { - defer close(donec) - <-c.Ctx().Done() - kv.cache.Close() - }() + close(donec) return kv, donec } @@ -65,12 +61,14 @@ func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRespo req.Serializable = true gresp := (*pb.RangeResponse)(resp.Get()) p.cache.Add(&req, gresp) + cacheKeys.Set(float64(p.cache.Size())) return gresp, nil } func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { p.cache.Invalidate(r.Key, nil) + cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, PutRequestToOp(r)) return (*pb.PutResponse)(resp.Put()), err @@ -78,6 +76,7 @@ func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, e func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { p.cache.Invalidate(r.Key, r.RangeEnd) + cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, DelRequestToOp(r)) return (*pb.DeleteRangeResponse)(resp.Del()), err @@ -133,6 +132,8 @@ func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, e p.txnToCache(r.Failure, resp.Responses) } + cacheKeys.Set(float64(p.cache.Size())) + return (*pb.TxnResponse)(resp), nil } @@ -147,6 +148,8 @@ func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Com p.cache.Compact(r.Revision) } + cacheKeys.Set(float64(p.cache.Size())) + return (*pb.CompactionResponse)(resp), err } @@ -183,7 +186,12 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) - + if r.CountOnly { + opts = append(opts, clientv3.WithCountOnly()) + } + if r.KeysOnly { + opts = append(opts, clientv3.WithKeysOnly()) + } if r.Serializable { opts = append(opts, clientv3.WithSerializable()) } @@ -194,7 +202,15 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { func PutRequestToOp(r *pb.PutRequest) clientv3.Op { opts := []clientv3.OpOption{} opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) - + if r.IgnoreValue { + opts = append(opts, clientv3.WithIgnoreValue()) + } + if r.IgnoreLease { + opts = append(opts, clientv3.WithIgnoreLease()) + } + if r.PrevKv { + opts = append(opts, clientv3.WithPrevKV()) + } return clientv3.OpPut(string(r.Key), string(r.Value), opts...) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go new file mode 100644 index 00000000000..86afdb7072b --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go @@ -0,0 +1,114 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "math" + "sync" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + "google.golang.org/grpc" + + "github.com/coreos/etcd/clientv3" +) + +const ( + lostLeaderKey = "__lostleader" // watched to detect leader loss + retryPerSecond = 10 +) + +type leader struct { + ctx context.Context + w clientv3.Watcher + // mu protects leaderc updates. + mu sync.RWMutex + leaderc chan struct{} + disconnc chan struct{} + donec chan struct{} +} + +func newLeader(ctx context.Context, w clientv3.Watcher) *leader { + l := &leader{ + ctx: clientv3.WithRequireLeader(ctx), + w: w, + leaderc: make(chan struct{}), + disconnc: make(chan struct{}), + donec: make(chan struct{}), + } + // begin assuming leader is lost + close(l.leaderc) + go l.recvLoop() + return l +} + +func (l *leader) recvLoop() { + defer close(l.donec) + + limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond) + rev := int64(math.MaxInt64 - 2) + for limiter.Wait(l.ctx) == nil { + wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify()) + cresp, ok := <-wch + if !ok { + l.loseLeader() + continue + } + if cresp.Err() != nil { + l.loseLeader() + if grpc.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() { + close(l.disconnc) + return + } + continue + } + l.gotLeader() + <-wch + l.loseLeader() + } +} + +func (l *leader) loseLeader() { + l.mu.RLock() + defer l.mu.RUnlock() + select { + case <-l.leaderc: + default: + close(l.leaderc) + } +} + +// gotLeader will force update the leadership status to having a leader. +func (l *leader) gotLeader() { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.leaderc: + l.leaderc = make(chan struct{}) + default: + } +} + +func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc } + +func (l *leader) stopNotify() <-chan struct{} { return l.donec } + +// lostNotify returns a channel that is closed if there has been +// a leader loss not yet followed by a leader reacquire. +func (l *leader) lostNotify() <-chan struct{} { + l.mu.RLock() + defer l.mu.RUnlock() + return l.leaderc +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go index 4f870220b79..19c2249a7e2 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go @@ -15,73 +15,353 @@ package grpcproxy import ( + "io" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) type leaseProxy struct { - client *clientv3.Client + // leaseClient handles req from LeaseGrant() that requires a lease ID. + leaseClient pb.LeaseClient + + lessor clientv3.Lease + + ctx context.Context + + leader *leader + + // mu protects adding outstanding leaseProxyStream through wg. + mu sync.RWMutex + + // wg waits until all outstanding leaseProxyStream quit. + wg sync.WaitGroup } -func NewLeaseProxy(c *clientv3.Client) pb.LeaseServer { - return &leaseProxy{ - client: c, +func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(c.Ctx()) + lp := &leaseProxy{ + leaseClient: pb.NewLeaseClient(c.ActiveConnection()), + lessor: c.Lease, + ctx: cctx, + leader: newLeader(c.Ctx(), c.Watcher), } + ch := make(chan struct{}) + go func() { + defer close(ch) + <-lp.leader.stopNotify() + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + case <-lp.leader.disconnectNotify(): + cancel() + } + <-lp.ctx.Done() + lp.mu.Unlock() + lp.wg.Wait() + }() + return lp, ch } func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseGrant(ctx, cr) + rp, err := lp.leaseClient.LeaseGrant(ctx, cr) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return rp, nil } func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseRevoke(ctx, rr) + r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID)) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return (*pb.LeaseRevokeResponse)(r), nil } func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseTimeToLive(ctx, rr) + var ( + r *clientv3.LeaseTimeToLiveResponse + err error + ) + if rr.Keys { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys()) + } else { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID)) + } + if err != nil { + return nil, err + } + rp := &pb.LeaseTimeToLiveResponse{ + Header: r.ResponseHeader, + ID: int64(r.ID), + TTL: r.TTL, + GrantedTTL: r.GrantedTTL, + Keys: r.Keys, + } + return rp, err } func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - conn := lp.client.ActiveConnection() + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + lp.mu.Unlock() + return lp.ctx.Err() + default: + lp.wg.Add(1) + } + lp.mu.Unlock() + ctx, cancel := context.WithCancel(stream.Context()) - lc, err := pb.NewLeaseClient(conn).LeaseKeepAlive(ctx) - if err != nil { - cancel() - return err + lps := leaseProxyStream{ + stream: stream, + lessor: lp.lessor, + keepAliveLeases: make(map[int64]*atomicCounter), + respc: make(chan *pb.LeaseKeepAliveResponse), + ctx: ctx, + cancel: cancel, } - go func() { - // Cancel the context attached to lc to unblock lc.Recv when - // this routine returns on error. - defer cancel() + errc := make(chan error, 2) - for { - // stream.Recv will be unblock when the loop in the parent routine - // returns on error. - rr, err := stream.Recv() - if err != nil { - return - } - err = lc.Send(rr) - if err != nil { - return + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { + v := md[rpctypes.MetadataRequireLeaderKey] + if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { + lostLeaderC = lp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + lp.wg.Done() + return rpctypes.ErrNoLeader + default: } } + } + stopc := make(chan struct{}, 3) + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.recvLoop(); err != nil { + errc <- err + } }() - for { - rr, err := lc.Recv() + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.sendLoop(); err != nil { + errc <- err + } + }() + + // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated. + go func() { + defer func() { stopc <- struct{}{} }() + select { + case <-lostLeaderC: + case <-ctx.Done(): + case <-lp.ctx.Done(): + } + }() + + var err error + select { + case <-stopc: + stopc <- struct{}{} + case err = <-errc: + } + cancel() + + // recv/send may only shutdown after function exits; + // this goroutine notifies lease proxy that the stream is through + go func() { + <-stopc + <-stopc + <-stopc + lps.close() + close(errc) + lp.wg.Done() + }() + + select { + case <-lostLeaderC: + return rpctypes.ErrNoLeader + case <-lp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing + default: if err != nil { return err } - err = stream.Send(rr) + return ctx.Err() + } +} + +type leaseProxyStream struct { + stream pb.Lease_LeaseKeepAliveServer + + lessor clientv3.Lease + // wg tracks keepAliveLoop goroutines + wg sync.WaitGroup + // mu protects keepAliveLeases + mu sync.RWMutex + // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease. + keepAliveLeases map[int64]*atomicCounter + // respc receives lease keepalive responses from etcd backend + respc chan *pb.LeaseKeepAliveResponse + + ctx context.Context + cancel context.CancelFunc +} + +func (lps *leaseProxyStream) recvLoop() error { + for { + rr, err := lps.stream.Recv() + if err == io.EOF { + return nil + } if err != nil { return err } + lps.mu.Lock() + neededResps, ok := lps.keepAliveLeases[rr.ID] + if !ok { + neededResps = &atomicCounter{} + lps.keepAliveLeases[rr.ID] = neededResps + lps.wg.Add(1) + go func() { + defer lps.wg.Done() + if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil { + lps.cancel() + } + }() + } + neededResps.add(1) + lps.mu.Unlock() + } +} + +func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error { + cctx, ccancel := context.WithCancel(lps.ctx) + defer ccancel() + respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + // ticker expires when loop hasn't received keepalive within TTL + var ticker <-chan time.Time + for { + select { + case <-ticker: + lps.mu.Lock() + // if there are outstanding keepAlive reqs at the moment of ticker firing, + // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs. + if neededResps.get() > 0 { + lps.mu.Unlock() + ticker = nil + continue + } + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + return nil + case rp, ok := <-respc: + if !ok { + lps.mu.Lock() + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + if neededResps.get() == 0 { + return nil + } + ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + r := &pb.LeaseKeepAliveResponse{ + Header: ttlResp.ResponseHeader, + ID: int64(ttlResp.ID), + TTL: ttlResp.TTL, + } + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-lps.ctx.Done(): + return nil + } + } + return nil + } + if neededResps.get() == 0 { + continue + } + ticker = time.After(time.Duration(rp.TTL) * time.Second) + r := &pb.LeaseKeepAliveResponse{ + Header: rp.ResponseHeader, + ID: int64(rp.ID), + TTL: rp.TTL, + } + lps.replyToClient(r, neededResps) + } } } + +func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) { + timer := time.After(500 * time.Millisecond) + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-timer: + return + case <-lps.ctx.Done(): + return + } + } +} + +func (lps *leaseProxyStream) sendLoop() error { + for { + select { + case lrp, ok := <-lps.respc: + if !ok { + return nil + } + if err := lps.stream.Send(lrp); err != nil { + return err + } + case <-lps.ctx.Done(): + return lps.ctx.Err() + } + } +} + +func (lps *leaseProxyStream) close() { + lps.cancel() + lps.wg.Wait() + // only close respc channel if all the keepAliveLoop() goroutines have finished + // this ensures those goroutines don't send resp to a closed resp channel + close(lps.respc) +} + +type atomicCounter struct { + counter int64 +} + +func (ac *atomicCounter) add(delta int64) { + atomic.AddInt64(&ac.counter, delta) +} + +func (ac *atomicCounter) get() int64 { + return atomic.LoadInt64(&ac.counter) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go new file mode 100644 index 00000000000..804aff64a96 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go @@ -0,0 +1,38 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockProxy struct { + client *clientv3.Client +} + +func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { + return &lockProxy{client: client} +} + +func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req) +} + +func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go new file mode 100644 index 00000000000..c2d81804395 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go @@ -0,0 +1,19 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import "github.com/coreos/pkg/capnslog" + +var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "grpcproxy") diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go index 209dc94a712..384d1520360 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go @@ -15,6 +15,8 @@ package grpcproxy import ( + "io" + "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" @@ -49,6 +51,9 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan for { rr, err := sc.Recv() if err != nil { + if err == io.EOF { + return nil + } return err } err = stream.Send(rr) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go index f4a1d4c8de4..864fa1609a0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go @@ -29,6 +29,12 @@ var ( Name: "events_coalescing_total", Help: "Total number of events coalescing", }) + cacheKeys = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "grpc_proxy", + Name: "cache_keys_total", + Help: "Total number of keys/ranges cached", + }) cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "grpc_proxy", @@ -46,6 +52,7 @@ var ( func init() { prometheus.MustRegister(watchersCoalescing) prometheus.MustRegister(eventsCoalescing) + prometheus.MustRegister(cacheKeys) prometheus.MustRegister(cacheHits) prometheus.MustRegister(cachedMisses) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go new file mode 100644 index 00000000000..598c71f07ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go @@ -0,0 +1,94 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "encoding/json" + "os" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/clientv3/naming" + + "golang.org/x/time/rate" + gnaming "google.golang.org/grpc/naming" +) + +// allow maximum 1 retry per second +const registerRetryRate = 1 + +// Register registers itself as a grpc-proxy server by writing prefixed-key +// with session of specified TTL (in seconds). The returned channel is closed +// when the client's context is canceled. +func Register(c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} { + rm := rate.NewLimiter(rate.Limit(registerRetryRate), registerRetryRate) + + donec := make(chan struct{}) + go func() { + defer close(donec) + + for rm.Wait(c.Ctx()) == nil { + ss, err := registerSession(c, prefix, addr, ttl) + if err != nil { + plog.Warningf("failed to create a session %v", err) + continue + } + select { + case <-c.Ctx().Done(): + ss.Close() + return + + case <-ss.Done(): + plog.Warning("session expired; possible network partition or server restart") + plog.Warning("creating a new session to rejoin") + continue + } + } + }() + + return donec +} + +func registerSession(c *clientv3.Client, prefix string, addr string, ttl int) (*concurrency.Session, error) { + ss, err := concurrency.NewSession(c, concurrency.WithTTL(ttl)) + if err != nil { + return nil, err + } + + gr := &naming.GRPCResolver{Client: c} + if err = gr.Update(c.Ctx(), prefix, gnaming.Update{Op: gnaming.Add, Addr: addr, Metadata: getMeta()}, clientv3.WithLease(ss.Lease())); err != nil { + return nil, err + } + + plog.Infof("registered %q with %d-second lease", addr, ttl) + return ss, nil +} + +// meta represents metadata of proxy register. +type meta struct { + Name string `json:"name"` +} + +func getMeta() string { + hostname, _ := os.Hostname() + bts, _ := json.Marshal(meta{Name: hostname}) + return string(bts) +} + +func decodeMeta(s string) (meta, error) { + m := meta{} + err := json.Unmarshal([]byte(s), &m) + return m, err +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go index 42d196ca2ca..b960c94769a 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go @@ -18,7 +18,7 @@ import ( "sync" "golang.org/x/net/context" - "golang.org/x/time/rate" + "google.golang.org/grpc" "google.golang.org/grpc/metadata" "github.com/coreos/etcd/clientv3" @@ -31,49 +31,35 @@ type watchProxy struct { cw clientv3.Watcher ctx context.Context + leader *leader + ranges *watchRanges - // retryLimiter controls the create watch retry rate on lost leaders. - retryLimiter *rate.Limiter - - // mu protects leaderc updates. - mu sync.RWMutex - leaderc chan struct{} + // mu protects adding outstanding watch servers through wg. + mu sync.Mutex // wg waits until all outstanding watch servers quit. wg sync.WaitGroup } -const ( - lostLeaderKey = "__lostleader" // watched to detect leader loss - retryPerSecond = 10 -) - func NewWatchProxy(c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(c.Ctx()) wp := &watchProxy{ - cw: c.Watcher, - ctx: clientv3.WithRequireLeader(c.Ctx()), - retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond), - leaderc: make(chan struct{}), + cw: c.Watcher, + ctx: cctx, + leader: newLeader(c.Ctx(), c.Watcher), } wp.ranges = newWatchRanges(wp) ch := make(chan struct{}) go func() { defer close(ch) - // a new streams without opening any watchers won't catch - // a lost leader event, so have a special watch to monitor it - rev := int64((uint64(1) << 63) - 2) - for wp.ctx.Err() == nil { - wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev)) - for range wch { - } - wp.mu.Lock() - close(wp.leaderc) - wp.leaderc = make(chan struct{}) - wp.mu.Unlock() - wp.retryLimiter.Wait(wp.ctx) - } + <-wp.leader.stopNotify() wp.mu.Lock() + select { + case <-wp.ctx.Done(): + case <-wp.leader.disconnectNotify(): + cancel() + } <-wp.ctx.Done() wp.mu.Unlock() wp.wg.Wait() @@ -87,7 +73,12 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { select { case <-wp.ctx.Done(): wp.mu.Unlock() - return + select { + case <-wp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing + default: + return wp.ctx.Err() + } default: wp.wg.Add(1) } @@ -103,11 +94,19 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { cancel: cancel, } - var leaderc <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - leaderc = wp.lostLeaderNotify() + lostLeaderC = wp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + wp.wg.Done() + return rpctypes.ErrNoLeader + default: + } } } @@ -126,7 +125,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { go func() { defer func() { stopc <- struct{}{} }() select { - case <-leaderc: + case <-lostLeaderC: case <-ctx.Done(): case <-wp.ctx.Done(): } @@ -145,19 +144,15 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { }() select { - case <-leaderc: + case <-lostLeaderC: return rpctypes.ErrNoLeader + case <-wp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing default: return wps.ctx.Err() } } -func (wp *watchProxy) lostLeaderNotify() <-chan struct{} { - wp.mu.RLock() - defer wp.mu.RUnlock() - return wp.leaderc -} - // watchProxyStream forwards etcd watch events to a proxied client stream. type watchProxyStream struct { ranges *watchRanges diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go index 5529fb5a2bc..5e750bdb0d4 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go @@ -50,27 +50,20 @@ func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) wb.add(w) go func() { defer close(wb.donec) - // loop because leader loss will close channel - for cctx.Err() == nil { - opts := []clientv3.OpOption{ - clientv3.WithRange(w.wr.end), - clientv3.WithProgressNotify(), - clientv3.WithRev(wb.nextrev), - clientv3.WithPrevKV(), - } - // The create notification should be the first response; - // if the watch is recreated following leader loss, it - // shouldn't post a second create response to the client. - if wb.responses == 0 { - opts = append(opts, clientv3.WithCreatedNotify()) - } - wch := wp.cw.Watch(cctx, w.wr.key, opts...) - for wr := range wch { - wb.bcast(wr) - update(wb) - } - wp.retryLimiter.Wait(cctx) + opts := []clientv3.OpOption{ + clientv3.WithRange(w.wr.end), + clientv3.WithProgressNotify(), + clientv3.WithRev(wb.nextrev), + clientv3.WithPrevKV(), + clientv3.WithCreatedNotify(), + } + + wch := wp.cw.Watch(cctx, w.wr.key, opts...) + + for wr := range wch { + wb.bcast(wr) + update(wb) } }() return wb diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go index e860a69ce81..7387caf4dbd 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go @@ -102,16 +102,17 @@ func (w *watcher) send(wr clientv3.WatchResponse) { } // all events are filtered out? - if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { + if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ - Header: &wr.Header, - Created: wr.Created, - WatchId: w.id, - Events: events, + Header: &wr.Header, + Created: wr.Created, + CompactRevision: wr.CompactRevision, + WatchId: w.id, + Events: events, }) } diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md index a724b958579..f485b839771 100644 --- a/vendor/github.com/coreos/etcd/raft/README.md +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -13,9 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample # Features @@ -51,11 +49,11 @@ This raft implementation also includes a few optional enhancements: - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks ## Usage -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. To start a three-node cluster ```go @@ -73,7 +71,7 @@ To start a three-node cluster n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) ``` -You can start a single node cluster, like so: +Start a single node cluster, like so: ```go // Create storage and config as shown above. // Set peer list to itself, so this node can become the leader of this single-node cluster. @@ -81,7 +79,7 @@ You can start a single node cluster, like so: n := raft.StartNode(c, peers) ``` -To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so: +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: ```go // Create storage and config as shown above. n := raft.StartNode(c, nil) @@ -110,46 +108,21 @@ To restart a node from previous state: n := raft.RestartNode(c) ``` -Now that you are holding onto a Node you have a few responsibilities: +After creating a Node, the user has a few responsibilities: -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. +1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. -Third, when you receive a message from another node, pass it to Node.Step: +Third, after receiving a message from another node, pass it to Node.Step: ```go func recvRaftRPC(ctx context.Context, m raftpb.Message) { @@ -157,10 +130,7 @@ Third, when you receive a message from another node, pass it to Node.Step: } ``` -Finally, you need to call `Node.Tick()` at regular intervals (probably -via a `time.Ticker`). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". The total state machine handling loop will look something like this: @@ -190,16 +160,13 @@ The total state machine handling loop will look something like this: } ``` -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: ```go n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -207,8 +174,7 @@ To add or remove node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) ``` -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: ```go var cc raftpb.ConfChange @@ -223,25 +189,8 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. +This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go index 8ae301c3d8d..263af9ce405 100644 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -85,6 +85,26 @@ func (u *unstable) stableTo(i, t uint64) { if gt == t && i >= u.offset { u.entries = u.entries[i+1-u.offset:] u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries } } diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go index c8410fdc77f..5da1c1193b2 100644 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -83,6 +83,10 @@ type Ready struct { // If it contains a MsgSnap message, the application MUST report back to raft // when the snapshot has been received or has failed by calling ReportSnapshot. Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool } func isHardStateEqual(a, b pb.HardState) bool { @@ -517,5 +521,17 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } + rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) return rd } + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go index 7be4407ee2b..29f20398203 100644 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -1159,6 +1159,10 @@ func (r *raft) addNode(id uint64) { } r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true } func (r *raft) removeNode(id uint64) { diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 86ad3120708..4c6e79d58a0 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1847,7 +1889,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } var fileDescriptorRaft = []byte{ // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go index ccd9eb78698..d9f07c3479d 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go @@ -16,13 +16,13 @@ package rafthttp import ( "bytes" + "context" "errors" "io/ioutil" "sync" "time" "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft" @@ -118,7 +118,8 @@ func (p *pipeline) post(data []byte) (err error) { req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) done := make(chan struct{}, 1) - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) go func() { select { case <-done: diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go index 105b330728e..52273c9d195 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go @@ -16,6 +16,7 @@ package rafthttp import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -104,7 +105,9 @@ func (s *snapshotSender) send(merged snap.Message) { // post posts the given request. // It returns nil when request is sent out and processed successfully. func (s *snapshotSender) post(req *http.Request) (err error) { - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + defer cancel() type responseAndError struct { resp *http.Response @@ -130,7 +133,6 @@ func (s *snapshotSender) post(req *http.Request) (err error) { select { case <-s.stopc: - cancel() return errStopped case r := <-result: if r.err != nil { diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go index e69a44ff65a..2a6c620f56d 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go @@ -15,10 +15,10 @@ package rafthttp import ( + "context" "fmt" "io" "io/ioutil" - "net" "net/http" "path" "strings" @@ -27,6 +27,7 @@ import ( "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/httputil" + "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" @@ -51,6 +52,7 @@ var ( "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -140,7 +142,8 @@ func (cw *streamWriter) run() { flusher http.Flusher batched int ) - tickc := time.Tick(ConnReadTimeout / 3) + tickc := time.NewTicker(ConnReadTimeout / 3) + defer tickc.Stop() unflushed := 0 plog.Infof("started streaming with peer %s (writer)", cw.peerID) @@ -212,7 +215,7 @@ func (cw *streamWriter) run() { plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) } plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = tickc, cw.msgc + heartbeatc, msgc = tickc.C, cw.msgc case <-cw.stopc: if cw.close() { plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) @@ -314,7 +317,7 @@ func (cr *streamReader) run() { // all data is read out case err == io.EOF: // connection is closed by the remote - case isClosedConnectionError(err): + case transport.IsClosedConnError(err): default: cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) } @@ -426,14 +429,17 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { setPeerURLsHeader(req, cr.tr.URLs) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + cr.mu.Lock() + cr.cancel = cancel select { case <-cr.stopc: cr.mu.Unlock() return nil, fmt.Errorf("stream reader is stopped") default: } - cr.cancel = httputil.RequestCanceler(req) cr.mu.Unlock() resp, err := cr.tr.streamRt.RoundTrip(req) @@ -508,11 +514,6 @@ func (cr *streamReader) resume() { cr.paused = false } -func isClosedConnectionError(err error) bool { - operr, ok := err.(*net.OpError) - return ok && operr.Err.Error() == "use of closed network connection" -} - // checkStreamSupport checks whether the stream type is supported in the // given version. func checkStreamSupport(v *semver.Version, t streamType) bool { diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go index 61855c52a60..12e548c7717 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ b/vendor/github.com/coreos/etcd/rafthttp/util.go @@ -15,8 +15,6 @@ package rafthttp import ( - "crypto/tls" - "encoding/binary" "fmt" "io" "net" @@ -27,7 +25,6 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" ) @@ -39,8 +36,8 @@ var ( // NewListener returns a listener for raft message transfer between peers. // It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlscfg *tls.Config) (net.Listener, error) { - return transport.NewTimeoutListener(u.Host, u.Scheme, tlscfg, ConnReadTimeout, ConnWriteTimeout) +func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { + return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) } // NewRoundTripper returns a roundTripper used to send requests @@ -61,31 +58,6 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) } -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} - // createPostRequest creates a HTTP POST request that sends raft message. func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go index ae3c743f80c..01d897ae861 100644 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -15,6 +15,7 @@ package snap import ( + "errors" "fmt" "io" "io/ioutil" @@ -24,6 +25,8 @@ import ( "github.com/coreos/etcd/pkg/fileutil" ) +var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") + // SaveDBFrom saves snapshot of the database from the given reader. It // guarantees the save operation is atomic. func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { @@ -41,7 +44,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { os.Remove(f.Name()) return n, err } - fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + fn := s.dbFilePath(id) if fileutil.Exist(fn) { os.Remove(f.Name()) return n, nil @@ -60,15 +63,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { // DBFilePath returns the file path for the snapshot of the database with // given id. If the snapshot does not exist, it returns error. func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { + if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return filepath.Join(s.dir, fn), nil - } + if fn := s.dbFilePath(id); fileutil.Exist(fn) { + return fn, nil } - return "", fmt.Errorf("snap: snapshot file doesn't exist") + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) } diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go index 130e2277c84..05a77ff9d06 100644 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -342,7 +342,7 @@ func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } var fileDescriptorSnap = []byte{ // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go index 731327b08ba..54159553500 100644 --- a/vendor/github.com/coreos/etcd/store/node.go +++ b/vendor/github.com/coreos/etcd/store/node.go @@ -332,7 +332,6 @@ func (n *node) UpdateTTL(expireTime time.Time) { n.ExpireTime = expireTime // push into ttl heap n.store.ttlKeyHeap.push(n) - return } // Compare function compares node index and value with provided ones. diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go index 6c19ad4c970..edf7f21942b 100644 --- a/vendor/github.com/coreos/etcd/store/store.go +++ b/vendor/github.com/coreos/etcd/store/store.go @@ -682,6 +682,9 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.PrevNode = node.Repr(false, false, s.clock) + if node.IsDir() { + e.Node.Dir = true + } callback := func(path string) { // notify function // notify the watchers with deleted set true diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go index 6dd63f3c541..13c23e391d9 100644 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go @@ -116,7 +116,7 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde } func (wh *watcherHub) add(e *Event) { - e = wh.EventHistory.addEvent(e) + wh.EventHistory.addEvent(e) } // notify function accepts an event and notify to the watchers. diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index 0173d6f11d3..a09a2a33c0d 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.1.10" + Version = "3.2.11" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go index efe58928cc8..aac1e197e59 100644 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -52,7 +52,7 @@ func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { // newFileEncoder creates a new encoder with current file offset for the page writer. func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, os.SEEK_CUR) + offset, err := f.Seek(0, io.SeekCurrent) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go index ffb14161682..091036b57b9 100644 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -62,7 +62,7 @@ func Repair(dirpath string) bool { } defer bf.Close() - if _, err = f.Seek(0, os.SEEK_SET); err != nil { + if _, err = f.Seek(0, io.SeekStart); err != nil { plog.Errorf("could not repair %v, failed to read file", f.Name()) return false } diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go index b65f6448304..2cac25c1c90 100644 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -112,7 +112,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { if err != nil { return nil, err } - if _, err = f.Seek(0, os.SEEK_END); err != nil { + if _, err = f.Seek(0, io.SeekEnd); err != nil { return nil, err } if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { @@ -322,7 +322,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // not all, will cause CRC errors on WAL open. Since the records // were never fully synced to disk in the first place, it's safe // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { + if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { return nil, state, nil, err } if err = fileutil.ZeroToEnd(w.tail().File); err != nil { @@ -361,7 +361,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // Then cut atomically rename temp wal file to a wal file. func (w *WAL) cut() error { // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, os.SEEK_CUR) + off, serr := w.tail().Seek(0, io.SeekCurrent) if serr != nil { return serr } @@ -401,7 +401,7 @@ func (w *WAL) cut() error { return err } - off, err = w.tail().Seek(0, os.SEEK_CUR) + off, err = w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -418,7 +418,7 @@ func (w *WAL) cut() error { if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { return err } - if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { + if _, err = newTail.Seek(off, io.SeekStart); err != nil { return err } @@ -552,7 +552,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return nil } - mustSync := mustSync(st, w.state, len(ents)) + mustSync := raft.MustSync(st, w.state, len(ents)) // TODO(xiangli): no more reference operator for i := range ents { @@ -564,7 +564,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return err } - curOff, err := w.tail().Seek(0, os.SEEK_CUR) + curOff, err := w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -618,15 +618,6 @@ func (w *WAL) seq() uint64 { return seq } -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term -} - func closeAll(rcs ...io.ReadCloser) error { for _, f := range rcs { if err := f.Close(); err != nil { diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go index e1a77d5e51a..664fae1305b 100644 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -506,7 +506,7 @@ func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } var fileDescriptorRecord = []byte{ // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD new file mode 100644 index 00000000000..3f80de51569 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["descriptor.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["descriptor.pb.go"], + importpath = "github.com/golang/protobuf/protoc-gen-go/descriptor", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 00000000000..f706871a6fa --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . + protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 00000000000..c6a91bcab9c --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, + 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, + 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, + 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, + 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, + 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, + 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, + 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, + 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, + 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, + 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, + 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, + 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, + 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, + 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, + 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, + 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, + 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, + 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, + 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, + 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, + 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, + 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, + 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, + 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, + 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, + 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, + 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, + 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, + 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, + 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, + 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, + 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, + 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, + 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, + 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, + 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, + 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, + 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, + 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, + 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, + 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, + 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, + 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, + 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, + 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, + 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, + 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, + 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, + 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, + 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, + 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, + 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, + 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, + 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, + 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, + 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, + 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, + 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, + 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, + 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, + 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, + 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, + 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, + 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, + 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, + 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, + 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, + 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, + 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, + 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, + 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, + 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, + 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, + 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, + 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, + 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, + 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, + 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, + 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, + 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, + 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, + 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, + 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, + 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, + 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, + 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, + 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, + 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, + 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, + 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, + 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, + 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, + 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, + 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, + 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, + 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, + 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, + 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, + 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, + 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, + 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, + 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, + 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, + 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, + 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, + 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, + 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, + 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, + 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, + 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, + 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, + 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, + 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, + 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, + 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, + 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, + 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, + 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, + 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, + 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, + 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, + 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, + 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, + 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, + 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, + 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, + 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, + 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, + 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, + 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, + 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, + 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, + 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, + 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, + 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, + 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, + 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, + 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, + 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, + 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, + 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, + 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, + 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, + 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, + 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, + 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, + 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, + 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, + 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, + 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, + 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 00000000000..4d4fb378f50 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,849 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/karlseguin/ccache/Makefile b/vendor/github.com/karlseguin/ccache/Makefile deleted file mode 100644 index 5b3f26bafdc..00000000000 --- a/vendor/github.com/karlseguin/ccache/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -t: - go test ./... - -f: - go fmt ./... diff --git a/vendor/github.com/karlseguin/ccache/bucket.go b/vendor/github.com/karlseguin/ccache/bucket.go deleted file mode 100644 index d67535170c5..00000000000 --- a/vendor/github.com/karlseguin/ccache/bucket.go +++ /dev/null @@ -1,41 +0,0 @@ -package ccache - -import ( - "sync" - "time" -) - -type bucket struct { - sync.RWMutex - lookup map[string]*Item -} - -func (b *bucket) get(key string) *Item { - b.RLock() - defer b.RUnlock() - return b.lookup[key] -} - -func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) { - expires := time.Now().Add(duration).UnixNano() - item := newItem(key, value, expires) - b.Lock() - defer b.Unlock() - existing := b.lookup[key] - b.lookup[key] = item - return item, existing -} - -func (b *bucket) delete(key string) *Item { - b.Lock() - defer b.Unlock() - item := b.lookup[key] - delete(b.lookup, key) - return item -} - -func (b *bucket) clear() { - b.Lock() - defer b.Unlock() - b.lookup = make(map[string]*Item) -} diff --git a/vendor/github.com/karlseguin/ccache/cache.go b/vendor/github.com/karlseguin/ccache/cache.go deleted file mode 100644 index a9e94f486f9..00000000000 --- a/vendor/github.com/karlseguin/ccache/cache.go +++ /dev/null @@ -1,227 +0,0 @@ -// An LRU cached aimed at high concurrency -package ccache - -import ( - "container/list" - "hash/fnv" - "sync/atomic" - "time" -) - -type Cache struct { - *Configuration - list *list.List - size int64 - buckets []*bucket - bucketMask uint32 - deletables chan *Item - promotables chan *Item - donec chan struct{} -} - -// Create a new cache with the specified configuration -// See ccache.Configure() for creating a configuration -func New(config *Configuration) *Cache { - c := &Cache{ - list: list.New(), - Configuration: config, - bucketMask: uint32(config.buckets) - 1, - buckets: make([]*bucket, config.buckets), - } - for i := 0; i < int(config.buckets); i++ { - c.buckets[i] = &bucket{ - lookup: make(map[string]*Item), - } - } - c.restart() - return c -} - -// Get an item from the cache. Returns nil if the item wasn't found. -// This can return an expired item. Use item.Expired() to see if the item -// is expired and item.TTL() to see how long until the item expires (which -// will be negative for an already expired item). -func (c *Cache) Get(key string) *Item { - item := c.bucket(key).get(key) - if item == nil { - return nil - } - if item.expires > time.Now().UnixNano() { - c.promote(item) - } - return item -} - -// Used when the cache was created with the Track() configuration option. -// Avoid otherwise -func (c *Cache) TrackingGet(key string) TrackedItem { - item := c.Get(key) - if item == nil { - return NilTracked - } - item.track() - return item -} - -// Set the value in the cache for the specified duration -func (c *Cache) Set(key string, value interface{}, duration time.Duration) { - c.set(key, value, duration) -} - -// Replace the value if it exists, does not set if it doesn't. -// Returns true if the item existed an was replaced, false otherwise. -// Replace does not reset item's TTL -func (c *Cache) Replace(key string, value interface{}) bool { - item := c.bucket(key).get(key) - if item == nil { - return false - } - c.Set(key, value, item.TTL()) - return true -} - -// Attempts to get the value from the cache and calles fetch on a miss (missing -// or stale item). If fetch returns an error, no value is cached and the error -// is returned back to the caller. -func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := c.Get(key) - if item != nil && !item.Expired() { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return c.set(key, value, duration), nil -} - -// Remove the item from the cache, return true if the item was present, false otherwise. -func (c *Cache) Delete(key string) bool { - item := c.bucket(key).delete(key) - if item != nil { - c.deletables <- item - return true - } - return false -} - -//this isn't thread safe. It's meant to be called from non-concurrent tests -func (c *Cache) Clear() { - for _, bucket := range c.buckets { - bucket.clear() - } - c.size = 0 - c.list = list.New() -} - -// Stops the background worker. Operations performed on the cache after Stop -// is called are likely to panic -func (c *Cache) Stop() { - close(c.promotables) - <-c.donec -} - -func (c *Cache) restart() { - c.deletables = make(chan *Item, c.deleteBuffer) - c.promotables = make(chan *Item, c.promoteBuffer) - c.donec = make(chan struct{}) - go c.worker() -} - -func (c *Cache) deleteItem(bucket *bucket, item *Item) { - bucket.delete(item.key) //stop other GETs from getting it - c.deletables <- item -} - -func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item { - item, existing := c.bucket(key).set(key, value, duration) - if existing != nil { - c.deletables <- existing - } - c.promote(item) - return item -} - -func (c *Cache) bucket(key string) *bucket { - h := fnv.New32a() - h.Write([]byte(key)) - return c.buckets[h.Sum32()&c.bucketMask] -} - -func (c *Cache) promote(item *Item) { - c.promotables <- item -} - -func (c *Cache) worker() { - defer close(c.donec) - - for { - select { - case item, ok := <-c.promotables: - if ok == false { - goto drain - } - if c.doPromote(item) && c.size > c.maxSize { - c.gc() - } - case item := <-c.deletables: - c.doDelete(item) - } - } - -drain: - for { - select { - case item := <-c.deletables: - c.doDelete(item) - default: - close(c.deletables) - return - } - } -} - -func (c *Cache) doDelete(item *Item) { - if item.element == nil { - item.promotions = -2 - } else { - c.size -= item.size - c.list.Remove(item.element) - } -} - -func (c *Cache) doPromote(item *Item) bool { - //already deleted - if item.promotions == -2 { - return false - } - if item.element != nil { //not a new item - if item.shouldPromote(c.getsPerPromote) { - c.list.MoveToFront(item.element) - item.promotions = 0 - } - return false - } - - c.size += item.size - item.element = c.list.PushFront(item) - return true -} - -func (c *Cache) gc() { - element := c.list.Back() - for i := 0; i < c.itemsToPrune; i++ { - if element == nil { - return - } - prev := element.Prev() - item := element.Value.(*Item) - if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { - c.bucket(item.key).delete(item.key) - c.size -= item.size - c.list.Remove(element) - item.promotions = -2 - } - element = prev - } -} diff --git a/vendor/github.com/karlseguin/ccache/configuration.go b/vendor/github.com/karlseguin/ccache/configuration.go deleted file mode 100644 index daa8357767e..00000000000 --- a/vendor/github.com/karlseguin/ccache/configuration.go +++ /dev/null @@ -1,94 +0,0 @@ -package ccache - -type Configuration struct { - maxSize int64 - buckets int - itemsToPrune int - deleteBuffer int - promoteBuffer int - getsPerPromote int32 - tracking bool -} - -// Creates a configuration object with sensible defaults -// Use this as the start of the fluent configuration: -// e.g.: ccache.New(ccache.Configure().MaxSize(10000)) -func Configure() *Configuration { - return &Configuration{ - buckets: 16, - itemsToPrune: 500, - deleteBuffer: 1024, - getsPerPromote: 3, - promoteBuffer: 1024, - maxSize: 5000, - tracking: false, - } -} - -// The max size for the cache -// [5000] -func (c *Configuration) MaxSize(max int64) *Configuration { - c.maxSize = max - return c -} - -// Keys are hashed into % bucket count to provide greater concurrency (every set -// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) -// [16] -func (c *Configuration) Buckets(count uint32) *Configuration { - if count == 0 || ((count&(^count+1)) == count) == false { - count = 16 - } - c.buckets = int(count) - return c -} - -// The number of items to prune when memory is low -// [500] -func (c *Configuration) ItemsToPrune(count uint32) *Configuration { - c.itemsToPrune = int(count) - return c -} - -// The size of the queue for items which should be promoted. If the queue fills -// up, promotions are skipped -// [1024] -func (c *Configuration) PromoteBuffer(size uint32) *Configuration { - c.promoteBuffer = int(size) - return c -} - -// The size of the queue for items which should be deleted. If the queue fills -// up, calls to Delete() will block -func (c *Configuration) DeleteBuffer(size uint32) *Configuration { - c.deleteBuffer = int(size) - return c -} - -// Give a large cache with a high read / write ratio, it's usually unecessary -// to promote an item on every Get. GetsPerPromote specifies the number of Gets -// a key must have before being promoted -// [3] -func (c *Configuration) GetsPerPromote(count int32) *Configuration { - c.getsPerPromote = count - return c -} - -// Typically, a cache is agnostic about how cached values are use. This is fine -// for a typical cache usage, where you fetch an item from the cache, do something -// (write it out) and nothing else. - -// However, if callers are going to keep a reference to a cached item for a long -// time, things get messy. Specifically, the cache can evict the item, while -// references still exist. Technically, this isn't an issue. However, if you reload -// the item back into the cache, you end up with 2 objects representing the same -// data. This is a waste of space and could lead to weird behavior (the type an -// identity map is meant to solve). - -// By turning tracking on and using the cache's TrackingGet, the cache -// won't evict items which you haven't called Release() on. It's a simple reference -// counter. -func (c *Configuration) Track() *Configuration { - c.tracking = true - return c -} diff --git a/vendor/github.com/karlseguin/ccache/item.go b/vendor/github.com/karlseguin/ccache/item.go deleted file mode 100644 index bb7c04fff9d..00000000000 --- a/vendor/github.com/karlseguin/ccache/item.go +++ /dev/null @@ -1,103 +0,0 @@ -package ccache - -import ( - "container/list" - "sync/atomic" - "time" -) - -type Sized interface { - Size() int64 -} - -type TrackedItem interface { - Value() interface{} - Release() - Expired() bool - TTL() time.Duration - Expires() time.Time - Extend(duration time.Duration) -} - -type nilItem struct{} - -func (n *nilItem) Value() interface{} { return nil } -func (n *nilItem) Release() {} - -func (i *nilItem) Expired() bool { - return true -} - -func (i *nilItem) TTL() time.Duration { - return time.Minute -} - -func (i *nilItem) Expires() time.Time { - return time.Time{} -} - -func (i *nilItem) Extend(duration time.Duration) { -} - -var NilTracked = new(nilItem) - -type Item struct { - key string - group string - promotions int32 - refCount int32 - expires int64 - size int64 - value interface{} - element *list.Element -} - -func newItem(key string, value interface{}, expires int64) *Item { - size := int64(1) - if sized, ok := value.(Sized); ok { - size = sized.Size() - } - return &Item{ - key: key, - value: value, - promotions: 0, - size: size, - expires: expires, - } -} - -func (i *Item) shouldPromote(getsPerPromote int32) bool { - i.promotions += 1 - return i.promotions == getsPerPromote -} - -func (i *Item) Value() interface{} { - return i.value -} - -func (i *Item) track() { - atomic.AddInt32(&i.refCount, 1) -} - -func (i *Item) Release() { - atomic.AddInt32(&i.refCount, -1) -} - -func (i *Item) Expired() bool { - expires := atomic.LoadInt64(&i.expires) - return expires < time.Now().UnixNano() -} - -func (i *Item) TTL() time.Duration { - expires := atomic.LoadInt64(&i.expires) - return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) -} - -func (i *Item) Expires() time.Time { - expires := atomic.LoadInt64(&i.expires) - return time.Unix(0, expires) -} - -func (i *Item) Extend(duration time.Duration) { - atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) -} diff --git a/vendor/github.com/karlseguin/ccache/layeredbucket.go b/vendor/github.com/karlseguin/ccache/layeredbucket.go deleted file mode 100644 index 88f3def4219..00000000000 --- a/vendor/github.com/karlseguin/ccache/layeredbucket.go +++ /dev/null @@ -1,82 +0,0 @@ -package ccache - -import ( - "sync" - "time" -) - -type layeredBucket struct { - sync.RWMutex - buckets map[string]*bucket -} - -func (b *layeredBucket) get(primary, secondary string) *Item { - bucket := b.getSecondaryBucket(primary) - if bucket == nil { - return nil - } - return bucket.get(secondary) -} - -func (b *layeredBucket) getSecondaryBucket(primary string) *bucket { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return nil - } - return bucket -} - -func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) { - b.Lock() - bkt, exists := b.buckets[primary] - if exists == false { - bkt = &bucket{lookup: make(map[string]*Item)} - b.buckets[primary] = bkt - } - b.Unlock() - item, existing := bkt.set(secondary, value, duration) - item.group = primary - return item, existing -} - -func (b *layeredBucket) delete(primary, secondary string) *Item { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return nil - } - return bucket.delete(secondary) -} - -func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return false - } - - bucket.Lock() - defer bucket.Unlock() - - if l := len(bucket.lookup); l == 0 { - return false - } - for key, item := range bucket.lookup { - delete(bucket.lookup, key) - deletables <- item - } - return true -} - -func (b *layeredBucket) clear() { - b.Lock() - defer b.Unlock() - for _, bucket := range b.buckets { - bucket.clear() - } - b.buckets = make(map[string]*bucket) -} diff --git a/vendor/github.com/karlseguin/ccache/layeredcache.go b/vendor/github.com/karlseguin/ccache/layeredcache.go deleted file mode 100644 index 20b13f94d30..00000000000 --- a/vendor/github.com/karlseguin/ccache/layeredcache.go +++ /dev/null @@ -1,237 +0,0 @@ -// An LRU cached aimed at high concurrency -package ccache - -import ( - "container/list" - "hash/fnv" - "sync/atomic" - "time" -) - -type LayeredCache struct { - *Configuration - list *list.List - buckets []*layeredBucket - bucketMask uint32 - size int64 - deletables chan *Item - promotables chan *Item - donec chan struct{} -} - -// Create a new layered cache with the specified configuration. -// A layered cache used a two keys to identify a value: a primary key -// and a secondary key. Get, Set and Delete require both a primary and -// secondary key. However, DeleteAll requires only a primary key, deleting -// all values that share the same primary key. - -// Layered Cache is useful as an HTTP cache, where an HTTP purge might -// delete multiple variants of the same resource: -// primary key = "user/44" -// secondary key 1 = ".json" -// secondary key 2 = ".xml" - -// See ccache.Configure() for creating a configuration -func Layered(config *Configuration) *LayeredCache { - c := &LayeredCache{ - list: list.New(), - Configuration: config, - bucketMask: uint32(config.buckets) - 1, - buckets: make([]*layeredBucket, config.buckets), - deletables: make(chan *Item, config.deleteBuffer), - } - for i := 0; i < int(config.buckets); i++ { - c.buckets[i] = &layeredBucket{ - buckets: make(map[string]*bucket), - } - } - c.restart() - return c -} - -// Get an item from the cache. Returns nil if the item wasn't found. -// This can return an expired item. Use item.Expired() to see if the item -// is expired and item.TTL() to see how long until the item expires (which -// will be negative for an already expired item). -func (c *LayeredCache) Get(primary, secondary string) *Item { - item := c.bucket(primary).get(primary, secondary) - if item == nil { - return nil - } - if item.expires > time.Now().UnixNano() { - c.promote(item) - } - return item -} - -// Get the secondary cache for a given primary key. This operation will -// never return nil. In the case where the primary key does not exist, a -// new, underlying, empty bucket will be created and returned. -func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache { - primaryBkt := c.bucket(primary) - bkt := primaryBkt.getSecondaryBucket(primary) - primaryBkt.Lock() - if bkt == nil { - bkt = &bucket{lookup: make(map[string]*Item)} - primaryBkt.buckets[primary] = bkt - } - primaryBkt.Unlock() - return &SecondaryCache{ - bucket: bkt, - pCache: c, - } -} - -// Used when the cache was created with the Track() configuration option. -// Avoid otherwise -func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem { - item := c.Get(primary, secondary) - if item == nil { - return NilTracked - } - item.track() - return item -} - -// Set the value in the cache for the specified duration -func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) { - c.set(primary, secondary, value, duration) -} - -// Replace the value if it exists, does not set if it doesn't. -// Returns true if the item existed an was replaced, false otherwise. -// Replace does not reset item's TTL nor does it alter its position in the LRU -func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool { - item := c.bucket(primary).get(primary, secondary) - if item == nil { - return false - } - c.Set(primary, secondary, value, item.TTL()) - return true -} - -// Attempts to get the value from the cache and calles fetch on a miss. -// If fetch returns an error, no value is cached and the error is returned back -// to the caller. -func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := c.Get(primary, secondary) - if item != nil { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return c.set(primary, secondary, value, duration), nil -} - -// Remove the item from the cache, return true if the item was present, false otherwise. -func (c *LayeredCache) Delete(primary, secondary string) bool { - item := c.bucket(primary).delete(primary, secondary) - if item != nil { - c.deletables <- item - return true - } - return false -} - -// Deletes all items that share the same primary key -func (c *LayeredCache) DeleteAll(primary string) bool { - return c.bucket(primary).deleteAll(primary, c.deletables) -} - -//this isn't thread safe. It's meant to be called from non-concurrent tests -func (c *LayeredCache) Clear() { - for _, bucket := range c.buckets { - bucket.clear() - } - c.size = 0 - c.list = list.New() -} - -func (c *LayeredCache) Stop() { - close(c.promotables) - <-c.donec -} - -func (c *LayeredCache) restart() { - c.promotables = make(chan *Item, c.promoteBuffer) - c.donec = make(chan struct{}) - go c.worker() -} - -func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item { - item, existing := c.bucket(primary).set(primary, secondary, value, duration) - if existing != nil { - c.deletables <- existing - } - c.promote(item) - return item -} - -func (c *LayeredCache) bucket(key string) *layeredBucket { - h := fnv.New32a() - h.Write([]byte(key)) - return c.buckets[h.Sum32()&c.bucketMask] -} - -func (c *LayeredCache) promote(item *Item) { - c.promotables <- item -} - -func (c *LayeredCache) worker() { - defer close(c.donec) - for { - select { - case item, ok := <-c.promotables: - if ok == false { - return - } - if c.doPromote(item) && c.size > c.maxSize { - c.gc() - } - case item := <-c.deletables: - if item.element == nil { - item.promotions = -2 - } else { - c.size -= item.size - c.list.Remove(item.element) - } - } - } -} - -func (c *LayeredCache) doPromote(item *Item) bool { - // deleted before it ever got promoted - if item.promotions == -2 { - return false - } - if item.element != nil { //not a new item - if item.shouldPromote(c.getsPerPromote) { - c.list.MoveToFront(item.element) - item.promotions = 0 - } - return false - } - c.size += item.size - item.element = c.list.PushFront(item) - return true -} - -func (c *LayeredCache) gc() { - element := c.list.Back() - for i := 0; i < c.itemsToPrune; i++ { - if element == nil { - return - } - prev := element.Prev() - item := element.Value.(*Item) - if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { - c.bucket(item.group).delete(item.group, item.key) - c.size -= item.size - c.list.Remove(element) - item.promotions = -2 - } - element = prev - } -} diff --git a/vendor/github.com/karlseguin/ccache/license.txt b/vendor/github.com/karlseguin/ccache/license.txt deleted file mode 100644 index aebeebfa520..00000000000 --- a/vendor/github.com/karlseguin/ccache/license.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Karl Seguin. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/karlseguin/ccache/readme.md b/vendor/github.com/karlseguin/ccache/readme.md deleted file mode 100644 index 8a7efa2e2b4..00000000000 --- a/vendor/github.com/karlseguin/ccache/readme.md +++ /dev/null @@ -1,172 +0,0 @@ -# CCache -CCache is an LRU Cache, written in Go, focused on supporting high concurrency. - -Lock contention on the list is reduced by: - -* Introducing a window which limits the frequency that an item can get promoted -* Using a buffered channel to queue promotions for a single worker -* Garbage collecting within the same thread as the worker - -## Setup - -First, download the project: - - go get github.com/karlseguin/ccache - -## Configuration -Next, import and create a `Cache` instance: - - -```go -import ( - "github.com/karlseguin/ccache" -) - -var cache = ccache.New(ccache.Configure()) -``` - -`Configure` exposes a chainable API: - -```go -var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)) -``` - -The most likely configuration options to tweak are: - -* `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) -* `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) -* `ItemsToPrune(int)` - the number of items to prune when we hit `MaxSize`. Freeing up more than 1 slot at a time improved performance (default: 500) - -Configurations that change the internals of the cache, which aren't as likely to need tweaking: - -* `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). -* `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) -* `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) - -## Usage - -Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: - -### Get -```go -item := cache.Get("user:4") -if item == nil { - //handle -} else { - user := item.Value().(*User) -} -``` -The returned `*Item` exposes a number of methods: - -* `Value() interface{}` - the value cached -* `Expired() bool` - whether the item is expired or not -* `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) -* `Expires() time.Time` - the time the item will expire - -By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. - -### Set -`Set` expects the key, value and ttl: - -```go -cache.Set("user:4", user, time.Minute * 10) -``` - -### Fetch -There's also a `Fetch` which mixes a `Get` and a `Set`: - -```go -item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) { - //code to fetch the data incase of a miss - //should return the data to cache and the error, if any -}) -``` - -### Delete -`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key: - -```go -cache.Delete("user:4") -``` - -### Extend -The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. - -### Replace -The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: - -```go -cache.Replace("user:4", user) -``` - -`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. - -### Stop -The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called -the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. - -## Tracking -CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. - -When you configure your cache with `Track()`: - -```go -cache = ccache.New(ccache.Configure().Track()) -``` - -The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: - -```go -item := cache.TrackingGet("user:4") -user := item.Value() //will be nil if "user:4" didn't exist in the cache -item.Release() //can be called even if item.Value() returned nil -``` - -In practive, `Release` wouldn't be called until later, at some other place in your code. - -There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. - -More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. - -## LayeredCache - -CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). - -`LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. - -`LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: - -```go -cache := ccache.Layered(ccache.Configure()) - -cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) -cache.Set("/users/goku", "type:xml", "", time.Minute * 5) - -json := cache.Get("/users/goku", "type:json") -xml := cache.Get("/users/goku", "type:xml") - -cache.Delete("/users/goku", "type:json") -cache.Delete("/users/goku", "type:xml") -// OR -cache.DeleteAll("/users/goku") -``` - -# SecondaryCache - -In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: - -```go -cache := ccache.Layered(ccache.Configure()) -sCache := cache.GetOrCreateSecondaryCache("/users/goku") -sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) -``` - -The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. - -## Size -By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. - -However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. - -## Want Something Simpler? -For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache) diff --git a/vendor/github.com/karlseguin/ccache/secondarycache.go b/vendor/github.com/karlseguin/ccache/secondarycache.go deleted file mode 100644 index f901fde0c55..00000000000 --- a/vendor/github.com/karlseguin/ccache/secondarycache.go +++ /dev/null @@ -1,72 +0,0 @@ -package ccache - -import "time" - -type SecondaryCache struct { - bucket *bucket - pCache *LayeredCache -} - -// Get the secondary key. -// The semantics are the same as for LayeredCache.Get -func (s *SecondaryCache) Get(secondary string) *Item { - return s.bucket.get(secondary) -} - -// Set the secondary key to a value. -// The semantics are the same as for LayeredCache.Set -func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item { - item, existing := s.bucket.set(secondary, value, duration) - if existing != nil { - s.pCache.deletables <- existing - } - s.pCache.promote(item) - return item -} - -// Fetch or set a secondary key. -// The semantics are the same as for LayeredCache.Fetch -func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := s.Get(secondary) - if item != nil { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return s.Set(secondary, value, duration), nil -} - -// Delete a secondary key. -// The semantics are the same as for LayeredCache.Delete -func (s *SecondaryCache) Delete(secondary string) bool { - item := s.bucket.delete(secondary) - if item != nil { - s.pCache.deletables <- item - return true - } - return false -} - -// Replace a secondary key. -// The semantics are the same as for LayeredCache.Replace -func (s *SecondaryCache) Replace(secondary string, value interface{}) bool { - item := s.Get(secondary) - if item == nil { - return false - } - s.Set(secondary, value, item.TTL()) - return true -} - -// Track a secondary key. -// The semantics are the same as for LayeredCache.TrackingGet -func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem { - item := c.Get(secondary) - if item == nil { - return NilTracked - } - item.track() - return item -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD b/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD new file mode 100644 index 00000000000..bf4e2ecbebb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "annotations.pb.go", + "http.pb.go", + ], + importpath = "google.golang.org/genproto/googleapis/api/annotations", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 00000000000..53d57f67a53 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +/* +Package annotations is a generated protocol buffer package. + +It is generated from these files: + google/api/annotations.proto + google/api/http.proto + +It has these top-level messages: + Http + HttpRule + CustomHttpPattern +*/ +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 00000000000..f91c604620b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Defines the HTTP configuration for a service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST APIs. The mapping determines what portions of the request +// message are populated from the path, query parameters, or body of +// the HTTP request. The mapping is typically specified as an +// `google.api.http` annotation, see "google/api/annotations.proto" +// for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it assumes there is no HTTP body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. It follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion. +// +// The syntax `**` matches zero or more path segments. It follows the semantics +// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved +// Expansion. NOTE: it must be the last segment in the path except the Verb. +// +// The syntax `LITERAL` matches literal text in the URL path. +// +// The syntax `Variable` matches the entire path as specified by its template; +// this nested template must not contain further variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +// +// Use CustomHttpPattern to specify any HTTP method that is not included in the +// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for +// a given URL path rule. The wild-card rule is useful for services that provide +// content to Web (HTML) clients. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,oneof"` +} +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,oneof"` +} +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,oneof"` +} +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` +} +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` +} +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} +func (*HttpRule_Put) isHttpRule_Pattern() {} +func (*HttpRule_Post) isHttpRule_Pattern() {} +func (*HttpRule_Delete) isHttpRule_Pattern() {} +func (*HttpRule_Patch) isHttpRule_Pattern() {} +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 359 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, + 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29, + 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1, + 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe, + 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8, + 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39, + 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62, + 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18, + 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2, + 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48, + 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24, + 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49, + 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc, + 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84, + 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12, + 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74, + 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4, + 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67, + 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90, + 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64, + 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a, + 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, + 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD index 9b550cb77b9..48803bf4539 100644 --- a/vendor/google.golang.org/grpc/BUILD +++ b/vendor/google.golang.org/grpc/BUILD @@ -57,6 +57,7 @@ filegroup( "//vendor/google.golang.org/grpc/credentials:all-srcs", "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:all-srcs", "//vendor/google.golang.org/grpc/grpclog:all-srcs", + "//vendor/google.golang.org/grpc/health/grpc_health_v1:all-srcs", "//vendor/google.golang.org/grpc/internal:all-srcs", "//vendor/google.golang.org/grpc/keepalive:all-srcs", "//vendor/google.golang.org/grpc/metadata:all-srcs", diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD b/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD new file mode 100644 index 00000000000..9a60f52bef1 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["health.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["health.pb.go"], + importpath = "google.golang.org/grpc/health/grpc_health_v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..89c4d459f0a --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,176 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "health.proto", +} + +func init() { proto.RegisterFile("health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, + 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, + 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, + 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, + 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, + 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, + 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, + 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, + 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, + 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, + 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 00000000000..e2dc0889258 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} From eab0e37a63cde770c5ff000c3a8bfd536ca45e0f Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Mon, 18 Dec 2017 15:35:31 -0800 Subject: [PATCH 393/794] kubeadm upgrade: fix unit test The CA generated for each test case is global and the cases modify the expiry. This can flake depending on what order the tests run. Generate a new CA for each test case. --- cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19_test.go b/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19_test.go index d45e744f415..6720202f64b 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19_test.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade_v18_19_test.go @@ -136,10 +136,6 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) { Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, NodeName: "test-node", } - caCert, caKey, err := certsphase.NewCACertAndKey() - if err != nil { - t.Fatalf("failed creation of ca cert and key: %v", err) - } for desc, test := range map[string]struct { adjustedExpiry time.Duration @@ -160,6 +156,10 @@ func TestShouldBackupAPIServerCertAndKey(t *testing.T) { expected: true, }, } { + caCert, caKey, err := certsphase.NewCACertAndKey() + if err != nil { + t.Fatalf("failed creation of ca cert and key: %v", err) + } caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC() apiCert, apiKey, err := certsphase.NewAPIServerCertAndKey(cfg, caCert, caKey) if err != nil { From 0abafb240443ace1898447e33b559fa6cc845f92 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 12:24:59 -0800 Subject: [PATCH 394/794] Version bump to grpc v1.7.5 --- Godeps/Godeps.json | 80 +- Godeps/LICENSES | 4230 +++++++++++++++-- vendor/google.golang.org/grpc/.please-update | 0 vendor/google.golang.org/grpc/.travis.yml | 19 +- vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/BUILD | 15 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 58 +- vendor/google.golang.org/grpc/LICENSE | 224 +- vendor/google.golang.org/grpc/Makefile | 13 +- vendor/google.golang.org/grpc/PATENTS | 22 - vendor/google.golang.org/grpc/README.md | 12 +- vendor/google.golang.org/grpc/backoff.go | 18 + vendor/google.golang.org/grpc/balancer.go | 66 +- vendor/google.golang.org/grpc/balancer/BUILD | 28 + .../grpc/balancer/balancer.go | 206 + .../grpc/balancer_conn_wrappers.go | 252 + .../grpc/balancer_v1_wrapper.go | 367 ++ vendor/google.golang.org/grpc/call.go | 133 +- vendor/google.golang.org/grpc/clientconn.go | 988 ++-- vendor/google.golang.org/grpc/codec.go | 40 +- .../grpc/codes/code_string.go | 4 +- vendor/google.golang.org/grpc/codes/codes.go | 37 +- .../google.golang.org/grpc/connectivity/BUILD | 26 + .../grpc/connectivity/connectivity.go | 72 + vendor/google.golang.org/grpc/coverage.sh | 48 - .../grpc/credentials/credentials.go | 47 +- .../grpc/credentials/credentials_util_go17.go | 35 +- .../grpc/credentials/credentials_util_go18.go | 35 +- .../credentials/credentials_util_pre_go17.go | 35 +- vendor/google.golang.org/grpc/doc.go | 20 +- vendor/google.golang.org/grpc/go16.go | 56 - vendor/google.golang.org/grpc/go17.go | 55 - vendor/google.golang.org/grpc/grpclb.go | 241 +- .../grpc/grpclb/grpc_lb_v1/BUILD | 14 +- .../grpc/grpclb/grpc_lb_v1/doc.go | 21 + .../grpc/grpclb/grpc_lb_v1/messages/BUILD | 29 + .../{grpclb.pb.go => messages/messages.pb.go} | 118 +- .../{grpclb.proto => messages/messages.proto} | 50 +- vendor/google.golang.org/grpc/grpclog/BUILD | 6 +- .../google.golang.org/grpc/grpclog/grpclog.go | 123 + .../google.golang.org/grpc/grpclog/logger.go | 104 +- .../grpc/grpclog/loggerv2.go | 195 + .../grpc/health/grpc_health_v1/health.pb.go | 54 +- .../grpc/health/grpc_health_v1/health.proto | 14 + vendor/google.golang.org/grpc/interceptor.go | 41 +- .../grpc/internal/internal.go | 35 +- .../grpc/keepalive/keepalive.go | 39 +- .../grpc/metadata/metadata.go | 87 +- vendor/google.golang.org/grpc/naming/BUILD | 11 +- .../grpc/naming/dns_resolver.go | 290 ++ vendor/google.golang.org/grpc/naming/go17.go | 34 + vendor/google.golang.org/grpc/naming/go18.go | 28 + .../google.golang.org/grpc/naming/naming.go | 35 +- vendor/google.golang.org/grpc/peer/peer.go | 38 +- .../google.golang.org/grpc/picker_wrapper.go | 141 + vendor/google.golang.org/grpc/pickfirst.go | 95 + vendor/google.golang.org/grpc/proxy.go | 38 +- vendor/google.golang.org/grpc/resolver/BUILD | 22 + .../grpc/resolver/resolver.go | 143 + .../grpc/resolver_conn_wrapper.go | 139 + vendor/google.golang.org/grpc/rpc_util.go | 355 +- vendor/google.golang.org/grpc/server.go | 560 ++- .../google.golang.org/grpc/stats/handlers.go | 42 +- vendor/google.golang.org/grpc/stats/stats.go | 147 +- vendor/google.golang.org/grpc/status/BUILD | 1 + .../google.golang.org/grpc/status/status.go | 73 +- vendor/google.golang.org/grpc/stream.go | 254 +- vendor/google.golang.org/grpc/tap/tap.go | 55 +- vendor/google.golang.org/grpc/trace.go | 50 +- vendor/google.golang.org/grpc/transport/BUILD | 4 +- .../grpc/transport/bdp_estimator.go | 143 + .../grpc/transport/control.go | 174 +- .../google.golang.org/grpc/transport/go16.go | 46 - .../google.golang.org/grpc/transport/go17.go | 46 - .../grpc/transport/handler_server.go | 107 +- .../grpc/transport/http2_client.go | 948 ++-- .../grpc/transport/http2_server.go | 900 ++-- .../grpc/transport/http_util.go | 290 +- .../google.golang.org/grpc/transport/log.go | 50 + .../grpc/transport/transport.go | 357 +- vendor/google.golang.org/grpc/vet.sh | 78 + 81 files changed, 10244 insertions(+), 3863 deletions(-) create mode 100644 vendor/google.golang.org/grpc/.please-update create mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/PATENTS create mode 100644 vendor/google.golang.org/grpc/balancer/BUILD create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/connectivity/BUILD create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100755 vendor/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/google.golang.org/grpc/go16.go delete mode 100644 vendor/google.golang.org/grpc/go17.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.pb.go => messages/messages.pb.go} (79%) rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.proto => messages/messages.proto} (71%) create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go create mode 100644 vendor/google.golang.org/grpc/naming/go18.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/resolver/BUILD create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/log.go create mode 100755 vendor/google.golang.org/grpc/vet.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 76a3b65b33b..197f6e897c1 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3010,78 +3010,98 @@ }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/gcfg.v1", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 9ec4ed0bb82..6d17659db3a 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -89940,540 +89940,3990 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ = vendor/google.golang.org/grpc licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/balancer licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/codes licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/connectivity licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/credentials licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclb/grpc_lb_v1 licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclog licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/internal licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/keepalive licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/metadata licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/naming licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/peer licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/resolver licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/stats licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/status licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/tap licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/transport licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ diff --git a/vendor/google.golang.org/grpc/.please-update b/vendor/google.golang.org/grpc/.please-update new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index b3577c7ae20..22bf25004a3 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,19 +1,20 @@ language: go go: - - 1.6.3 - - 1.7 - - 1.8 + - 1.7.x + - 1.8.x + - 1.9.x + +matrix: + include: + - go: 1.9.x + env: ARCH=386 go_import_path: google.golang.org/grpc before_install: - - go get github.com/golang/lint/golint - - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi script: - - '! gofmt -s -d -l . 2>&1 | read' - - '! goimports -l . | read' - - '! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"' - - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi - make test testrace diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD index 48803bf4539..b266ac7de56 100644 --- a/vendor/google.golang.org/grpc/BUILD +++ b/vendor/google.golang.org/grpc/BUILD @@ -5,15 +5,18 @@ go_library( srcs = [ "backoff.go", "balancer.go", + "balancer_conn_wrappers.go", + "balancer_v1_wrapper.go", "call.go", "clientconn.go", "codec.go", "doc.go", - "go16.go", - "go17.go", "grpclb.go", "interceptor.go", + "picker_wrapper.go", + "pickfirst.go", "proxy.go", + "resolver_conn_wrapper.go", "rpc_util.go", "server.go", "stream.go", @@ -26,15 +29,18 @@ go_library( "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/golang.org/x/net/trace:go_default_library", + "//vendor/google.golang.org/grpc/balancer:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:go_default_library", + "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/internal:go_default_library", "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", "//vendor/google.golang.org/grpc/naming:go_default_library", "//vendor/google.golang.org/grpc/peer:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", "//vendor/google.golang.org/grpc/stats:go_default_library", "//vendor/google.golang.org/grpc/status:go_default_library", "//vendor/google.golang.org/grpc/tap:go_default_library", @@ -53,7 +59,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/google.golang.org/grpc/balancer:all-srcs", "//vendor/google.golang.org/grpc/codes:all-srcs", + "//vendor/google.golang.org/grpc/connectivity:all-srcs", "//vendor/google.golang.org/grpc/credentials:all-srcs", "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:all-srcs", "//vendor/google.golang.org/grpc/grpclog:all-srcs", @@ -63,6 +71,7 @@ filegroup( "//vendor/google.golang.org/grpc/metadata:all-srcs", "//vendor/google.golang.org/grpc/naming:all-srcs", "//vendor/google.golang.org/grpc/peer:all-srcs", + "//vendor/google.golang.org/grpc/resolver:all-srcs", "//vendor/google.golang.org/grpc/stats:all-srcs", "//vendor/google.golang.org/grpc/status:all-srcs", "//vendor/google.golang.org/grpc/tap:all-srcs", diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 36cd6f7581b..a5c6e06e255 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,46 +1,32 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. +We definitely welcome your patches and contributions to gRPC! -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -## Filing Issues -When filing an issue, make sure to answer these five questions: +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE index f4988b45079..d6456956733 100644 --- a/vendor/google.golang.org/grpc/LICENSE +++ b/vendor/google.golang.org/grpc/LICENSE @@ -1,28 +1,202 @@ -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 03bb01f0b35..39606b564a6 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -20,24 +20,17 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done + go generate google.golang.org/grpc/... test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... + go test -cpu 1,4 google.golang.org/grpc/... testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... + go test -race -cpu 1,4 google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... -coverage: testdeps - ./coverage.sh --coveralls - .PHONY: \ all \ deps \ diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 69b47959fab..00000000000 --- a/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the gRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of gRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of gRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of gRPC or any code incorporated within this -implementation of gRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of gRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ae0236f92f3..622a5dc3e85 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ @@ -10,13 +10,13 @@ Installation To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` -$ go get google.golang.org/grpc +$ go get -u google.golang.org/grpc ``` Prerequisites ------------- -This requires Go 1.6 or later. +This requires Go 1.7 or later. Constraints ----------- @@ -26,9 +26,13 @@ Documentation ------------- See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + Status ------ -GA +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). FAQ --- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index c99024ee302..090fbe87c52 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package grpc import ( diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 9d943fbadae..ab65049ddc1 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -35,6 +20,7 @@ package grpc import ( "fmt" + "net" "sync" "golang.org/x/net/context" @@ -60,6 +46,10 @@ type BalancerConfig struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. @@ -167,7 +157,7 @@ type roundRobin struct { func (rr *roundRobin) watchAddrUpdates() error { updates, err := rr.w.Next() if err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) return err } rr.mu.Lock() @@ -183,7 +173,7 @@ func (rr *roundRobin) watchAddrUpdates() error { for _, v := range rr.addrs { if addr == v.addr { exist = true - grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) break } } @@ -200,7 +190,7 @@ func (rr *roundRobin) watchAddrUpdates() error { } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorln("Unknown update.Op ", update.Op) } } // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. @@ -211,6 +201,10 @@ func (rr *roundRobin) watchAddrUpdates() error { if rr.done { return ErrClientConnClosing } + select { + case <-rr.addrCh: + default: + } rr.addrCh <- open return nil } @@ -233,7 +227,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error { return err } rr.w = w - rr.addrCh = make(chan []Address) + rr.addrCh = make(chan []Address, 1) go func() { for { if err := rr.watchAddrUpdates(); err != nil { @@ -385,6 +379,9 @@ func (rr *roundRobin) Notify() <-chan []Address { func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } rr.done = true if rr.w != nil { rr.w.Close() @@ -398,3 +395,14 @@ func (rr *roundRobin) Close() error { } return nil } + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/BUILD b/vendor/google.golang.org/grpc/balancer/BUILD new file mode 100644 index 00000000000..e422cbb250b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["balancer.go"], + importpath = "google.golang.org/grpc/balancer", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000000..84e10b630e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + // defaultBuilder is the default balancer to use. + defaultBuilder Builder // TODO(bar) install pickfirst as default. +) + +// Register registers the balancer builder to the balancer map. +// b.Name will be used as the name registered with this builder. +func Register(b Builder) { + m[b.Name()] = b +} + +// Get returns the resolver builder registered with the given name. +// If no builder is register with the name, the default pickfirst will +// be used. +func Get(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return defaultBuilder +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot everytime its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // this call until a new picker is updated and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000000..f5dbc4ba201 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + grpclog.Infof("ccBalancerWrapper: removing subconn") + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) + acbw.mu.Lock() + defer acbw.mu.Unlock() + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect(false) + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect(false) +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 00000000000..9d0616080a1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,367 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(cc.Target(), BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.cc.Target(), + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.cc.Target()}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + oldSC.UpdateAddresses(newAddrs) + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } + return +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. + return +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() + return +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index af34a71316f..1ef2507c35f 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,6 +25,7 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -73,7 +59,10 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } } for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { break } @@ -86,14 +75,11 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) } c.trailerMD = stream.Trailer() - if peer, ok := peer.FromContext(stream.Context()); ok { - c.peer = peer - } return nil } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. @@ -114,11 +100,17 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, Client: true, } } - outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err } - err = t.Write(stream, outBuf, opts) + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) @@ -144,25 +136,33 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - defer cancel() - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { - o.after(&c) + o.after(c) } }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() @@ -179,27 +179,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) - } - }() + }() + } topts := &transport.Options{ Last: true, Delay: false, @@ -209,9 +207,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the + // Record the done handler from Balancer.Get(...). It is called once the // RPC has completed or failed. - put func() + done func(balancer.DoneInfo) ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ @@ -221,11 +219,11 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + if c.creds != nil { + callHdr.Creds = c.creds } - t, put, err = cc.getTransport(ctx, gopts) + + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -245,28 +243,31 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } stream, err = t.NewStream(ctx, callHdr) if err != nil { - if put != nil { + if done != nil { if _, ok := err.(transport.ConnectionError); ok { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) } - err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, stream, t, args, topts) + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } // Retry a non-failfast RPC when // i) there is a connection error; or @@ -276,14 +277,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } - err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) + err = recvResponse(ctx, cc.dopts, t, c, stream, reply) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -294,12 +295,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } return stream.Status().Err() } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f542d8bd041..71de2e50d2b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,14 +23,19 @@ import ( "fmt" "math" "net" + "reflect" + "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -56,8 +46,7 @@ var ( ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be - // removed in Q1 2017. + // DEPRECATED: Please use context.DeadlineExceeded instead. ErrClientConnTimeout = errors.New("grpc: timed out when dialing") // errNoTransportSecurity indicates that there is no transport security @@ -79,6 +68,8 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errConnUnavailable indicates that the connection is unavailable. errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -86,30 +77,71 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions - maxMsgSize int + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is to support v1 balancer. + balancerBuilder balancer.Builder } -const defaultClientMaxMsgSize = math.MaxInt32 +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) // DialOption configures how we set up the connection. type DialOption func(*dialOptions) -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. -func WithMaxMsgSize(s int) DialOption { +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { return func(o *dialOptions) { - o.maxMsgSize = s + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) } } @@ -136,10 +168,23 @@ func WithDecompressor(dc Decompressor) DialOption { } } -// WithBalancer returns a DialOption which sets a load balancer. +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// Deprecated: use the new balancer APIs in balancer package instead. func WithBalancer(b Balancer) DialOption { return func(o *dialOptions) { - o.balancer = b + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b } } @@ -204,7 +249,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption } // WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. +// credentials and places auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) @@ -213,6 +258,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // initially. This is valid if and only if WithBlock() is present. +// Deprecated: use DialContext and context.WithTimeout instead. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { o.timeout = d @@ -241,7 +287,7 @@ func WithStatsHandler(h stats.Handler) DialOption { } } -// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // address and won't try to reconnect. // The default value of FailOnNonTempDialError is false. @@ -259,7 +305,7 @@ func WithUserAgent(s string) DialOption { } } -// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { return func(o *dialOptions) { o.copts.KeepaliveParams = kp @@ -295,26 +341,44 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { } // DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the +// cancel or expire the pending connection. Once this function returns, the // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - conns: make(map[Address]*addrConn), + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), } cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.dopts.maxMsgSize = defaultClientMaxMsgSize + for _, opt := range opts { opt(&cc.dopts) } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) }, ) } @@ -343,15 +407,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() + scSet := false if cc.dopts.scChan != nil { - // Wait for the initial service config. + // Try to get an initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = sc + scSet = true } - case <-ctx.Done(): - return nil, ctx.Err() + default: } } // Set defaults. @@ -369,89 +434,130 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else { cc.authority = target } - waitC := make(chan error, 1) - go func() { - defer close(waitC) - if cc.dopts.balancer == nil && cc.sc.LB != nil { - cc.dopts.balancer = cc.sc.LB + + if cc.dopts.balancerBuilder != nil { + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() } - if cc.dopts.balancer != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - config := BalancerConfig{ - DialCreds: credsClone, - } - if err := cc.dopts.balancer.Start(target, config); err != nil { + buildOpts := balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + // Build should not take long time. So it's ok to not have a goroutine for it. + // TODO(bar) init balancer after first resolver result to support service config balancer. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) + } else { + waitC := make(chan error, 1) + go func() { + defer close(waitC) + // No balancer, or no resolver within the balancer. Connect directly. + ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) + if err != nil { waitC <- err return } - ch := cc.dopts.balancer.Notify() - if ch != nil { - if cc.dopts.block { - doneChan := make(chan struct{}) - go cc.lbWatcher(doneChan) - <-doneChan - } else { - go cc.lbWatcher(nil) - } + if err := ac.connect(cc.dopts.block); err != nil { + waitC <- err return } - } - // No balancer, or no resolver within the balancer. Connect directly. - if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + } + } + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() } } - if cc.dopts.scChan != nil { go cc.scWatcher() } + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + if cc.balancerWrapper != nil && cc.resolverWrapper == nil { + // TODO(bar) there should always be a resolver (DNS as the default). + // Unblock balancer initialization with a fake resolver update if there's no resolver. + // The balancer wrapper will not read the addresses, so an empty list works. + // TODO(bar) remove this after the real resolver is started. + cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) + } + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + return cc, nil } -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan } // ClientConn represents a client connection to an RPC server. @@ -462,58 +568,40 @@ type ClientConn struct { target string authority string dopts dialOptions + csMgr *connectivityStateManager + + balancerWrapper *ccBalancerWrapper + resolverWrapper *ccResolverWrapper + + blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig - conns map[Address]*addrConn - // Keepalive parameter can be udated if a GoAway is received. + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters } -// lbWatcher watches the Notify channel of the balancer in cc and manages -// connections accordingly. If doneChan is not nil, it is closed after the -// first successfull connection is made. -func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { - for addrs := range cc.dopts.balancer.Notify() { - var ( - add []Address // Addresses need to setup connections. - del []*addrConn // Connections need to tear down. - ) - cc.mu.Lock() - for _, a := range addrs { - if _, ok := cc.conns[a]; !ok { - add = append(add, a) - } - } - for k, c := range cc.conns { - var keep bool - for _, a := range addrs { - if k == a { - keep = true - break - } - } - if !keep { - del = append(del, c) - delete(cc.conns, c.addr) - } - } - cc.mu.Unlock() - for _, a := range add { - if doneChan != nil { - err := cc.resetAddrConn(a, true, nil) - if err == nil { - close(doneChan) - doneChan = nil - } - } else { - cc.resetAddrConn(a, false, nil) - } - } - for _, c := range del { - c.tearDown(errConnDrain) - } +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() } func (cc *ClientConn) scWatcher() { @@ -534,69 +622,64 @@ func (cc *ClientConn) scWatcher() { } } -// resetAddrConn creates an addrConn for addr and adds it to cc.conns. -// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. -// If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ cc: cc, - addr: addr, + addrs: addrs, dopts: cc.dopts, } - cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = cc.mkp - cc.mu.RUnlock() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.stateCV = sync.NewCond(&ac.mu) - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing + return nil, ErrClientConnClosing } - stale := cc.conns[ac.addr] - cc.conns[ac.addr] = ac + cc.conns[ac] = struct{}{} cc.mu.Unlock() - if stale != nil { - // There is an addrConn alive on ac.addr already. This could be due to - // 1) a buggy Balancer notifies duplicated Addresses; - // 2) goaway was received, a new ac will replace the old ac. - // The old ac should be deleted from cc.conns, but the - // underlying transport should drain rather than close. - if tearDownErr == nil { - // tearDownErr is nil if resetAddrConn is called by - // 1) Dial - // 2) lbWatcher - // In both cases, the stale ac should drain, not close. - stale.tearDown(errConnDrain) - } else { - stale.tearDown(tearDownErr) - } + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect(block bool) error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.mu.Unlock() + if block { - if err := ac.resetTransport(false); err != nil { + if err := ac.resetTransport(); err != nil { if err != errConnClosing { - // Tear down ac and delete it from cc.conns. - cc.mu.Lock() - delete(cc.conns, ac.addr) - cc.mu.Unlock() ac.tearDown(err) } if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { @@ -609,8 +692,8 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) if err != errConnClosing { // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) @@ -623,66 +706,86 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) return nil } -// TODO: Avoid the locking here. -func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { - cc.mu.RLock() - defer cc.mu.RUnlock() - m, ok = cc.sc.Methods[method] - return +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound } -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { - var ( - ac *addrConn - ok bool - put func() - ) - if cc.dopts.balancer == nil { +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + if cc.balancerWrapper == nil { // If balancer is nil, there should be only one addrConn available. cc.mu.RLock() if cc.conns == nil { cc.mu.RUnlock() + // TODO this function returns toRPCErr and non-toRPCErr. Clean up + // the errors in ClientConn. return nil, nil, toRPCErr(ErrClientConnClosing) } - for _, ac = range cc.conns { + var ac *addrConn + for ac = range cc.conns { // Break after the first iteration to get the first addrConn. - ok = true break } cc.mu.RUnlock() - } else { - var ( - addr Address - err error - ) - addr, put, err = cc.dopts.balancer.Get(ctx, opts) + if ac == nil { + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) if err != nil { - return nil, nil, toRPCErr(err) + return nil, nil, err } - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - ac, ok = cc.conns[addr] - cc.mu.RUnlock() + return t, nil, nil } - if !ok { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() - } - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) + + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() - } - return nil, nil, err + return nil, nil, toRPCErr(err) } - return t, put, nil + return t, done, nil } // Close tears down the ClientConn and all underlying connections. @@ -696,11 +799,16 @@ func (cc *ClientConn) Close() error { } conns := cc.conns cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) cc.mu.Unlock() - if cc.dopts.balancer != nil { - cc.dopts.balancer.Close() + cc.blockingpicker.close() + if cc.resolverWrapper != nil { + cc.resolverWrapper.close() } - for _, ac := range conns { + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + for ac := range conns { ac.tearDown(ErrClientConnClosing) } return nil @@ -711,15 +819,15 @@ type addrConn struct { ctx context.Context cancel context.CancelFunc - cc *ClientConn - addr Address - dopts dialOptions - events trace.EventLog + cc *ClientConn + curAddr resolver.Address + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - down func(error) // the handler called when a connection is down. + mu sync.Mutex + state connectivity.State // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} @@ -759,125 +867,137 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { } } -// getState returns the connectivity state of the Conn -func (ac *addrConn) getState() ConnectivityState { +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -// waitForStateChange blocks until the state changes to something other than the sourceState. -func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - ac.mu.Lock() - defer ac.mu.Unlock() - if sourceState != ac.state { - return ac.state, nil + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - ac.mu.Lock() - err = ctx.Err() - ac.stateCV.Broadcast() - ac.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == ac.state { - ac.stateCV.Wait() - if err != nil { - return ac.state, err - } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - return ac.state, nil -} - -func (ac *addrConn) resetTransport(closeTransport bool) error { + ac.transport = nil + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() for retries := 0; ; retries++ { + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout ac.mu.Lock() - ac.printf("connecting") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. + if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { + timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + } + connectTime := time.Now() + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } - if ac.down != nil { - ac.down(downErrorf(false, true, "%v", errNetworkIO)) - ac.down = nil - } - ac.state = Connecting - ac.stateCV.Broadcast() - t := ac.transport - ac.mu.Unlock() - if closeTransport && t != nil { - t.Close() - } - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - if timeout < sleepTime { - timeout = sleepTime - } - ctx, cancel := context.WithTimeout(ac.ctx, timeout) - connectTime := time.Now() - sinfo := transport.TargetInfo{ - Addr: ac.addr.Addr, - Metadata: ac.addr.Metadata, - } - newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) - // Don't call cancel in success path due to a race in Go 1.6: - // https://github.com/golang/go/issues/15078. - if err != nil { - cancel() - - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return err + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + // TODO(bar) remove condition once we always have a balancer. + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + for _, addr := range addrsIter { ac.mu.Lock() - if ac.state == Shutdown { + if ac.state == connectivity.Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() return errConnClosing } - ac.errorf("transient failure: %v", err) - ac.state = TransientFailure - ac.stateCV.Broadcast() + ac.mu.Unlock() + sinfo := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + } + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) + if err != nil { + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + ac.mu.Lock() + if ac.state != connectivity.Shutdown { + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + } + ac.mu.Unlock() + return err + } + grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + continue + } + ac.mu.Lock() + ac.printf("ready") + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + newTransport.Close() + return errConnClosing + } + ac.state = connectivity.Ready + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + t := ac.transport + ac.transport = newTransport + if t != nil { + t.Close() + } + ac.curAddr = addr if ac.ready != nil { close(ac.ready) ac.ready = nil } ac.mu.Unlock() - closeTransport = false - select { - case <-time.After(sleepTime - time.Since(connectTime)): - case <-ac.ctx.Done(): - return ac.ctx.Err() - } - continue + return nil } ac.mu.Lock() - ac.printf("ready") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) } - ac.state = Ready - ac.stateCV.Broadcast() - ac.transport = newTransport if ac.ready != nil { close(ac.ready) ac.ready = nil } - if ac.cc.dopts.balancer != nil { - ac.down = ac.cc.dopts.balancer.Up(ac.addr) - } ac.mu.Unlock() - return nil + timer := time.NewTimer(sleepTime - time.Since(connectTime)) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return ac.ctx.Err() + } + timer.Stop() } } @@ -888,73 +1008,54 @@ func (ac *addrConn) transportMonitor() { ac.mu.Lock() t := ac.transport ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return case <-t.GoAway(): ac.adjustParams(t.GetGoAwayReason()) - // If GoAway happens without any network I/O error, ac is closed without shutting down the - // underlying transport (the transport will be closed when all the pending RPCs finished or - // failed.). - // If GoAway and some network I/O error happen concurrently, ac and its underlying transport - // are closed. - // In both cases, a new ac is created. - select { - case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - default: - ac.cc.resetAddrConn(ac.addr, false, errConnDrain) + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) } return - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - return - default: - } - ac.mu.Lock() - if ac.state == Shutdown { - // ac has been shutdown. - ac.mu.Unlock() - return - } - ac.state = TransientFailure - ac.stateCV.Broadcast() - ac.mu.Unlock() - if err := ac.resetTransport(true); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } } } } // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in TransientFailure and there is a balancer/failfast is true. +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { for { ac.mu.Lock() switch { - case ac.state == Shutdown: + case ac.state == connectivity.Shutdown: if failfast || !hasBalancer { // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. err := ac.tearDownErr @@ -963,11 +1064,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } ac.mu.Unlock() return nil, errConnClosing - case ac.state == Ready: + case ac.state == connectivity.Ready: ct := ac.transport ac.mu.Unlock() return ct, nil - case ac.state == TransientFailure: + case ac.state == connectivity.TransientFailure: if failfast || hasBalancer { ac.mu.Unlock() return nil, errConnUnavailable @@ -988,6 +1089,28 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } } +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect(false) + } + return nil, false +} + // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a @@ -995,13 +1118,9 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.cancel() - ac.mu.Lock() + ac.curAddr = resolver.Address{} defer ac.mu.Unlock() - if ac.down != nil { - ac.down(downErrorf(false, false, "%v", err)) - ac.down = nil - } if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1009,12 +1128,16 @@ func (ac *addrConn) tearDown(err error) { // address removal and GoAway. ac.transport.GracefulClose() } - if ac.state == Shutdown { + if ac.state == connectivity.Shutdown { return } - ac.state = Shutdown + ac.state = connectivity.Shutdown ac.tearDownErr = err - ac.stateCV.Broadcast() + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1023,8 +1146,11 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } return } + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index bd76ebb7f17..905b048e2ac 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -1,35 +1,20 @@ /* -* - * Copyright 2014, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2014 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 * -*/ + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package grpc @@ -96,6 +81,7 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) { func (p protoCodec) Unmarshal(data []byte, v interface{}) error { cb := protoBufferPool.Get().(*cachedProtoBuffer) cb.SetBuf(data) + v.(proto.Message).Reset() err := cb.Unmarshal(v.(proto.Message)) cb.SetBuf(nil) protoBufferPool.Put(cb) diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index e6762d08455..259837060ab 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=Code; DO NOT EDIT +// Code generated by "stringer -type=Code"; DO NOT EDIT. package codes @@ -9,7 +9,7 @@ const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlre var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { + if i >= Code(len(_Code_index)-1) { return fmt.Sprintf("Code(%d)", i) } return _Code_name[_Code_index[i]:_Code_index[i+1]] diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 37c5b860bd6..81fe7bf85b3 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -44,7 +29,7 @@ const ( // OK is returned on success. OK Code = 0 - // Canceled indicates the operation was cancelled (typically by the caller). + // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is diff --git a/vendor/google.golang.org/grpc/connectivity/BUILD b/vendor/google.golang.org/grpc/connectivity/BUILD new file mode 100644 index 00000000000..d5555d4a28f --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["connectivity.go"], + importpath = "google.golang.org/grpc/connectivity", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000000..568ef5dc68b --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index b85f9181dee..00000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index d1217344b67..0ce766a4dcf 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -106,10 +91,14 @@ type TransportCredentials interface { // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo @@ -196,14 +185,14 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs a TLS from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { @@ -218,12 +207,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } -// NewServerTLSFromCert constructs a TLS from the input certificate for server. +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } -// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go index 7597b09e358..60409aac0fb 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -3,34 +3,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go index 0ecf342da84..93f0e1d8de2 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -2,34 +2,19 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go index cfd40dfa34a..d6bbcc9fdd9 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -2,34 +2,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index b4c0e740e9c..e153b2c390c 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + /* Package grpc implements an RPC system called gRPC. -See www.grpc.io for more information about gRPC. +See grpc.io for more information about gRPC. */ package grpc diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go deleted file mode 100644 index b61c57e88de..00000000000 --- a/vendor/google.golang.org/grpc/go16.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "fmt" - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req.Cancel = ctx.Done() - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go deleted file mode 100644 index 844f0e1899b..00000000000 --- a/vendor/google.golang.org/grpc/go17.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return err - } - return nil -} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go index 524e429df3e..db56ff36217 100644 --- a/vendor/google.golang.org/grpc/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,7 +28,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/naming" @@ -74,41 +59,21 @@ type balanceLoadClientStream struct { ClientStream } -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { +func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) +func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { + m := new(lbmpb.LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolution for grpclb should provide. The -// name resolver used by grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - // NewGRPCLBBalancer creates a grpclb load balancer. func NewGRPCLBBalancer(r naming.Resolver) Balancer { - return &balancer{ + return &grpclbBalancer{ r: r, } } @@ -131,27 +96,27 @@ type grpclbAddrInfo struct { dropForLoadBalancing bool } -type balancer struct { - r naming.Resolver - target string - mu sync.Mutex - seq int // a sequence number to make sure addrCh does not get stale addresses. - w naming.Watcher - addrCh chan []Address - rbs []remoteBalancerInfo - addrs []*grpclbAddrInfo - next int - waitCh chan struct{} - done bool - expTimer *time.Timer - rand *rand.Rand +type grpclbBalancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + rand *rand.Rand - clientStats lbpb.ClientStats + clientStats lbmpb.ClientStats } -func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { +func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { updates, err := w.Next() if err != nil { + grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) return err } b.mu.Lock() @@ -173,24 +138,24 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn if exist { continue } - md, ok := update.Metadata.(*AddrMetadataGRPCLB) + md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) if !ok { // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) + grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) continue } switch md.AddrType { - case Backend: + case naming.Backend: // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution does not give grpclb addresses") + grpclog.Errorf("The name resolution does not give grpclb addresses") continue - case GRPCLB: + case naming.GRPCLB: b.rbs = append(b.rbs, remoteBalancerInfo{ addr: update.Addr, name: md.ServerName, }) default: - grpclog.Printf("Received unknow address type %d", md.AddrType) + grpclog.Errorf("Received unknow address type %d", md.AddrType) continue } case naming.Delete: @@ -202,7 +167,7 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorf("Unknown update.Op %v", update.Op) } } // TODO: Fall back to the basic round-robin load balancing if the resulting address is @@ -215,42 +180,33 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn return nil } -func (b *balancer) serverListExpire(seq int) { - b.mu.Lock() - defer b.mu.Unlock() - // TODO: gRPC interanls do not clear the connections when the server list is stale. - // This means RPCs will keep using the existing server list until b receives new - // server list even though the list is expired. Revisit this behavior later. - if b.done || seq < b.seq { - return - } - b.next = 0 - b.addrs = nil - // Ask grpc internals to close all the corresponding connections. - b.addrCh <- nil -} - -func convertDuration(d *lbpb.Duration) time.Duration { +func convertDuration(d *lbmpb.Duration) time.Duration { if d == nil { return 0 } return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond } -func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { +func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { if l == nil { return } servers := l.GetServers() - expiration := convertDuration(l.GetExpirationInterval()) var ( sl []*grpclbAddrInfo addrs []Address ) for _, s := range servers { md := metadata.Pairs("lb-token", s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } addr := Address{ - Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } sl = append(sl, &grpclbAddrInfo{ @@ -270,20 +226,11 @@ func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { b.next = 0 b.addrs = sl b.addrCh <- addrs - if b.expTimer != nil { - b.expTimer.Stop() - b.expTimer = nil - } - if expiration > 0 { - b.expTimer = time.AfterFunc(expiration, func() { - b.serverListExpire(seq) - }) - } } return } -func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { +func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -294,29 +241,30 @@ func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Dura } b.mu.Lock() stats := b.clientStats - b.clientStats = lbpb.ClientStats{} // Clear the stats. + b.clientStats = lbmpb.ClientStats{} // Clear the stats. b.mu.Unlock() t := time.Now() - stats.Timestamp = &lbpb.Timestamp{ + stats.Timestamp = &lbmpb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + if err := s.Send(&lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ ClientStats: &stats, }, }); err != nil { + grpclog.Errorf("grpclb: failed to send load report: %v", err) return } } } -func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { +func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx) if err != nil { - grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) + grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() @@ -325,37 +273,39 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return } b.mu.Unlock() - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ + initReq := &lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbmpb.InitialLoadBalanceRequest{ Name: b.target, }, }, } if err := stream.Send(initReq); err != nil { + grpclog.Errorf("grpclb: failed to send init request: %v", err) // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv init response: %v", err) // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { - grpclog.Println("Failed to receive the initial response from the remote balancer.") + grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation - grpclog.Println("TODO: Delegation is not supported yet.") + grpclog.Errorf("TODO: Delegation is not supported yet.") return } streamDone := make(chan struct{}) defer close(streamDone) b.mu.Lock() - b.clientStats = lbpb.ClientStats{} // Clear client stats. + b.clientStats = lbmpb.ClientStats{} // Clear client stats. b.mu.Unlock() if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { go b.sendLoadReport(stream, d, streamDone) @@ -364,6 +314,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b for { reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv server list: %v", err) break } b.mu.Lock() @@ -381,7 +332,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return true } -func (b *balancer) Start(target string, config BalancerConfig) error { +func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { b.rand = rand.New(rand.NewSource(time.Now().Unix())) // TODO: Fall back to the basic direct connection if there is no name resolver. if b.r == nil { @@ -397,6 +348,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { w, err := b.r.Resolve(target) if err != nil { b.mu.Unlock() + grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) return err } b.w = w @@ -406,7 +358,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { go func() { for { if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) close(balancerAddrsCh) return } @@ -490,22 +442,32 @@ func (b *balancer) Start(target string, config BalancerConfig) error { cc.Close() } // Talk to the remote load balancer to get the server list. - var err error - creds := config.DialCreds - ccError = make(chan struct{}) - if creds == nil { - cc, err = Dial(rb.addr, WithInsecure()) - } else { + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { if rb.name != "" { if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Printf("Failed to override the server name in the credentials: %v", err) + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) continue } } - cc, err = Dial(rb.addr, WithTransportCredentials(creds)) + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) + } + dopts = append(dopts, WithBlock()) + ccError = make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cc, err = DialContext(ctx, rb.addr, dopts...) + cancel() if err != nil { - grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) close(ccError) continue } @@ -529,7 +491,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { return nil } -func (b *balancer) down(addr Address, err error) { +func (b *grpclbBalancer) down(addr Address, err error) { b.mu.Lock() defer b.mu.Unlock() for _, a := range b.addrs { @@ -540,7 +502,7 @@ func (b *balancer) down(addr Address, err error) { } } -func (b *balancer) Up(addr Address) func(error) { +func (b *grpclbBalancer) Up(addr Address) func(error) { b.mu.Lock() defer b.mu.Unlock() if b.done { @@ -568,7 +530,7 @@ func (b *balancer) Up(addr Address) func(error) { } } -func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { +func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} b.mu.Lock() if b.done { @@ -638,17 +600,10 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } if !opts.BlockingWait { - if len(b.addrs) == 0 { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on b.addrs for a failfast RPC. - addr = b.addrs[b.next].addr - b.next++ + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") return } // Wait on b.waitCh for non-failfast RPCs. @@ -725,17 +680,17 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } -func (b *balancer) Notify() <-chan []Address { +func (b *grpclbBalancer) Notify() <-chan []Address { return b.addrCh } -func (b *balancer) Close() error { +func (b *grpclbBalancer) Close() error { b.mu.Lock() defer b.mu.Unlock() - b.done = true - if b.expTimer != nil { - b.expTimer.Stop() + if b.done { + return errBalancerClosed } + b.done = true if b.waitCh != nil { close(b.waitCh) } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD index 79fd880777f..35716e585b4 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD @@ -1,17 +1,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -filegroup( - name = "go_default_library_protos", - srcs = ["grpclb.proto"], - visibility = ["//visibility:public"], -) - go_library( name = "go_default_library", - srcs = ["grpclb.pb.go"], + srcs = ["doc.go"], importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) filegroup( @@ -23,7 +16,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go new file mode 100644 index 00000000000..aba962840c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go @@ -0,0 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer +// message and service protobuf definitions. +package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD new file mode 100644 index 00000000000..06ab31fa949 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["messages.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["messages.pb.go"], + importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go similarity index 79% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go index f63941bd803..f4a27125a4f 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: grpclb.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto /* -Package grpc_lb_v1 is a generated protocol buffer package. +Package messages is a generated protocol buffer package. It is generated from these files: - grpclb.proto + grpc_lb_v1/messages/messages.proto It has these top-level messages: Duration @@ -19,7 +18,7 @@ It has these top-level messages: ServerList Server */ -package grpc_lb_v1 +package messages import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -473,11 +472,6 @@ type ServerList struct { // across more servers. The client should consume the server list in order // unless instructed otherwise via the client_config. Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` } func (m *ServerList) Reset() { *m = ServerList{} } @@ -492,13 +486,6 @@ func (m *ServerList) GetServers() []*Server { return nil } -func (m *ServerList) GetExpirationInterval() *Duration { - if m != nil { - return m.ExpirationInterval - } - return nil -} - // Contains server information. When none of the [drop_for_*] fields are true, // use the other fields. When drop_for_rate_limiting is true, ignore all other // fields. Use drop_for_load_balancing only when it is true and @@ -576,54 +563,53 @@ func init() { proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } -func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, - 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, - 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, - 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, - 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, - 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, - 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, - 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, - 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, - 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, - 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, - 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, - 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, - 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, - 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, - 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, - 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, - 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, - 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, - 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, - 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, - 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, - 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, - 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, - 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, - 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, - 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, - 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, - 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, - 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, - 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, - 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, - 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, - 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, - 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, - 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, - 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, - 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, - 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, - 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, - 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, - 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, - 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, - 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, - 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, - 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, + 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, + 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, + 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, + 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, + 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, + 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, + 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, + 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, + 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, + 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, + 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, + 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, + 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, + 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, + 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, + 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, + 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, + 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, + 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, + 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, + 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, + 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, + 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, + 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, + 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, + 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, + 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, + 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, + 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, + 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, + 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, + 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, + 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, + 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, + 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, + 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, + 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, + 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, + 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, + 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, + 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, + 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, + 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, + 0xa6, 0x4a, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto similarity index 71% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto index a2502fb284a..2ed04551fad 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -1,35 +1,21 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; package grpc.lb.v1; +option go_package = "messages"; message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 @@ -46,7 +32,6 @@ message Duration { } message Timestamp { - // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -59,12 +44,6 @@ message Timestamp { int32 nanos = 2; } -service LoadBalancer { - // Bidirectional rpc to get a list of servers. - rpc BalanceLoad(stream LoadBalanceRequest) - returns (stream LoadBalanceResponse); -} - message LoadBalanceRequest { oneof load_balance_request_type { // This message should be sent on the first request to the load balancer. @@ -142,11 +121,8 @@ message ServerList { // unless instructed otherwise via the client_config. repeated Server servers = 1; - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - Duration expiration_interval = 3; + // Was google.protobuf.Duration expiration_interval. + reserved 3; } // Contains server information. When none of the [drop_for_*] fields are true, diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD b/vendor/google.golang.org/grpc/grpclog/BUILD index 4595e517367..4b225761989 100644 --- a/vendor/google.golang.org/grpc/grpclog/BUILD +++ b/vendor/google.golang.org/grpc/grpclog/BUILD @@ -2,7 +2,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["logger.go"], + srcs = [ + "grpclog.go", + "logger.go", + "loggerv2.go", + ], importpath = "google.golang.org/grpc/grpclog", visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000000..1d71e25de50 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 2cc09be4894..d03b2397bfa 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -1,52 +1,25 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package grpclog defines logging for grpc. -*/ package grpclog -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) - // Logger mimics golang's standard Logger as an interface. +// Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) @@ -58,36 +31,53 @@ type Logger interface { // SetLogger sets the logger that is used in grpc. Call only from // init() functions. +// Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = l + logger = &loggerWrapper{Logger: l} } -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger } -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) } -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) } -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) } -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) } -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000000..d4932577695 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 89c4d459f0a..fdcbb9e0b7d 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: health.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto /* Package grpc_health_v1 is a generated protocol buffer package. It is generated from these files: - health.proto + grpc_health_v1/health.proto It has these top-level messages: HealthCheckRequest @@ -69,6 +68,13 @@ func (m *HealthCheckRequest) String() string { return proto.CompactTe func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + type HealthCheckResponse struct { Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` } @@ -78,6 +84,13 @@ func (m *HealthCheckResponse) String() string { return proto.CompactT func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + func init() { proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") @@ -153,24 +166,25 @@ var _Health_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "health.proto", + Metadata: "grpc_health_v1/health.proto", } -func init() { proto.RegisterFile("health.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, - 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, - 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, - 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, - 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, - 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, - 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, - 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, - 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, - 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, - 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, - 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, - 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto index e2dc0889258..6072fdc3b80 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; package grpc.health.v1; diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index a6921614572..06dc825b9fb 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,15 +27,15 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// This is an EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5489143a85c..07083832c3c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -1,32 +1,17 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index d492589c96b..f8adc7e6d4f 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -39,8 +24,8 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a connection broken -// and to cause activity so intermediaries are aware the connection is still in use. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. // Make sure these parameters are set in coordination with the keepalive policy on the server, // as incompatible settings can result in closing of connection. type ClientParameters struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a4f2de026db..589161d57fa 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -1,38 +1,23 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ // Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata. +// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. package metadata import ( @@ -51,8 +36,17 @@ func DecodeKeyValue(k, v string) (string, string, error) { // two convenience functions New and Pairs to generate MD. type MD map[string][]string -// New creates a MD from given key-value map. -// Keys are automatically converted to lowercase. +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func New(m map[string]string) MD { md := MD{} for k, val := range m { @@ -64,7 +58,16 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. -// Keys are automatically converted to lowercase. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) @@ -91,9 +94,9 @@ func (md MD) Copy() MD { return Join(md) } -// Join joins any number of MDs into a single MD. +// Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which -// the MDs containing those values are presented to Join. +// the mds containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -107,11 +110,6 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated. -func NewContext(ctx context.Context, md MD) context.Context { - return NewOutgoingContext(ctx, md) -} - // NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) @@ -122,22 +120,17 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, md) } -// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. -func FromContext(ctx context.Context) (md MD, ok bool) { - return FromIncomingContext(ctx) -} - -// FromIncomingContext returns the incoming MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. func FromIncomingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdIncomingKey{}).(MD) return } -// FromOutgoingContext returns the outgoing MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdOutgoingKey{}).(MD) return diff --git a/vendor/google.golang.org/grpc/naming/BUILD b/vendor/google.golang.org/grpc/naming/BUILD index 2318033a3c4..ea07a9fb642 100644 --- a/vendor/google.golang.org/grpc/naming/BUILD +++ b/vendor/google.golang.org/grpc/naming/BUILD @@ -2,9 +2,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["naming.go"], + srcs = [ + "dns_resolver.go", + "go17.go", + "go18.go", + "naming.go", + ], importpath = "google.golang.org/grpc/naming", visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], ) filegroup( diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 00000000000..7e69a2ca0a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the adrress resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exisits until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 00000000000..8bdf21e7998 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 00000000000..b5a0f842748 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c2e0871e6f8..1af7e32f86d 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index bfa6205ba9e..317b8b9d09a 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,7 +27,8 @@ import ( "google.golang.org/grpc/credentials" ) -// Peer contains the information of the peer for an RPC. +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. type Peer struct { // Addr is the peer address. Addr net.Addr diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000000..9085dbc9c98 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000000..7f993ef5a38 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + } else { + b.sc.UpdateAddresses(addrs) + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc || s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go index 10188dc3433..3e17efec61b 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/proxy.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -97,7 +82,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ Header: map[string][]string{"User-Agent": {grpcUA}}, }) - if err := sendHTTPRequest(ctx, req, conn); err != nil { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } diff --git a/vendor/google.golang.org/grpc/resolver/BUILD b/vendor/google.golang.org/grpc/resolver/BUILD new file mode 100644 index 00000000000..51f8d6f28fe --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["resolver.go"], + importpath = "google.golang.org/grpc/resolver", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000000..49307e8fe9e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme string +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. +// b.Scheme will be used as the scheme registered with this builder. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "dns" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + if b, ok := m[defaultScheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "dns". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // It's the name of the grpc load balancer, which will be used for authentication. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name again. + // It's just a hint, resolver can ignore this if it's not necessary. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000000..7d53964d094 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,139 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns "", s instead. +func split2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", s + } + return spl[0], spl[1] +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + ret.Scheme, ret.Endpoint = split2(target, "://") + ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +// +// This function could return nil, nil, in tests for old behaviors. +// TODO(bar) never return nil, nil when DNS becomes the default resolver. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + target := parseTarget(cc.target) + grpclog.Infof("dialing to target with scheme: %q", target.Scheme) + + rb := resolver.Get(target.Scheme) + if rb == nil { + // TODO(bar) return error when DNS becomes the default (implemented and + // registered by DNS package). + grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) + return nil, nil + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) + // TODO(bar switching) this should never be nil. Pickfirst should be default. + if ccr.cc.balancerWrapper != nil { + // TODO(bar switching) create balancer if it's nil? + ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) + } + case sc := <-ccr.scCh: + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 34e1ad03b97..188a75fff94 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,15 +21,18 @@ package grpc import ( "bytes" "compress/gzip" + stdctx "context" "encoding/binary" "io" "io/ioutil" "math" "os" + "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -60,16 +48,25 @@ type Compressor interface { Type() string } -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} +type gzipCompressor struct { + pool sync.Pool } -type gzipCompressor struct { +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) if _, err := z.Write(p); err != nil { return err } @@ -89,6 +86,7 @@ type Decompressor interface { } type gzipDecompressor struct { + pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. @@ -97,11 +95,26 @@ func NewGZIPDecompressor() Decompressor { } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } } - defer z.Close() + + defer func() { + z.Close() + d.pool.Put(z) + }() return ioutil.ReadAll(z) } @@ -111,14 +124,19 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer - traceInfo traceInfo // in trace.go + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials } -var defaultCallInfo = callInfo{failFast: true} +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} // CallOption configures a Call before it starts or extracts information from // a Call after it completes. @@ -132,6 +150,14 @@ type CallOption interface { after(*callInfo) } +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } @@ -173,7 +199,8 @@ func Peer(peer *peer.Peer) CallOption { // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -181,6 +208,31 @@ func FailFast(failFast bool) CallOption { }) } +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -197,7 +249,7 @@ type parser struct { r io.Reader // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. + // at https://grpc.io/docs/guides/wire.html. header [5]byte } @@ -214,8 +266,8 @@ type parser struct { // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } @@ -225,13 +277,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro if length == 0 { return pf, nil, nil } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { + if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -240,19 +292,20 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro return pf, msg, nil } -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) { - var ( - b []byte - length uint +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { + var b []byte + const ( + payloadLen = 1 + sizeLen = 4 ) + if msg != nil { var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } if outPayload != nil { outPayload.Payload = msg @@ -262,39 +315,28 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl } if cp != nil { if err := cp.Do(cbuf, b); err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } b = cbuf.Bytes() } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) } - const ( - payloadLen = 1 - sizeLen = 4 - ) + if uint(len(b)) > math.MaxUint32 { + return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } - var buf = make([]byte, payloadLen+sizeLen+len(b)) - - // Write payload format + bufHeader := make([]byte, payloadLen+sizeLen) if cp == nil { - buf[0] = byte(compressionNone) + bufHeader[0] = byte(compressionNone) } else { - buf[0] = byte(compressionMade) + bufHeader[0] = byte(compressionMade) } // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) if outPayload != nil { - outPayload.WireLength = len(buf) + outPayload.WireLength = payloadLen + sizeLen + len(b) } - - return buf, nil + return bufHeader, b, nil } func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { @@ -310,8 +352,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { - pf, d, err := p.recvMsg(maxMsgSize) +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return err } @@ -327,10 +369,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } - if len(d) > maxMsgSize { + if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) @@ -346,14 +388,15 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ } type rpcInfo struct { + failfast bool bytesSent bool bytesReceived bool } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { @@ -363,11 +406,63 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { if ss, ok := rpcInfoFromContext(ctx); ok { - *ss = s + ss.bytesReceived = s.bytesReceived + ss.bytesSent = s.bytesSent } return } +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, stdctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled, stdctx.Canceled: + return codes.Canceled + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // @@ -398,57 +493,6 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) - case transport.ConnectionError: - return status.Error(codes.Internal, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - case ErrClientConnClosing: - return status.Error(codes.FailedPrecondition, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - // MethodConfig defines the configuration recommended by the service providers for a // particular method. // This is EXPERIMENTAL and subject to change. @@ -456,24 +500,22 @@ type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. - WaitForReady bool + WaitForReady *bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout time.Duration + Timeout *time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minumum of the value specified here and the value set + // The actual value used is the minimum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. - // TODO: support this. - MaxReqSize uint32 + MaxReqSize *int // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. - // TODO: support this. - MaxRespSize uint32 + MaxRespSize *int } // ServiceConfig is provided by the service provider and contains parameters for how @@ -484,9 +526,38 @@ type ServiceConfig struct { // via grpc.WithBalancer will override this. LB Balancer // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig } +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +// SupportPackageIsVersion3 is referenced from generated protocol buffer files. +// The latest support package version is 4. +// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the +// next support package version update. +const SupportPackageIsVersion3 = true + // SupportPackageIsVersion4 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the grpc package. // @@ -496,6 +567,6 @@ type ServiceConfig struct { const SupportPackageIsVersion4 = true // Version is the current grpc version. -const Version = "1.3.0" +const Version = "1.7.5" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index b15f71c6c18..787665dfeb3 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,6 +23,7 @@ import ( "errors" "fmt" "io" + "math" "net" "net/http" "reflect" @@ -61,6 +47,11 @@ import ( "google.golang.org/grpc/transport" ) +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -96,6 +87,7 @@ type Server struct { mu sync.Mutex // guards following lis map[net.Listener]bool conns map[io.Closer]bool + serve bool drain bool ctx context.Context cancel context.CancelFunc @@ -107,27 +99,69 @@ type Server struct { } type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration } -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, +} -// A ServerOption sets options. +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { return func(o *options) { @@ -163,11 +197,25 @@ func RPCDecompressor(dc Decompressor) ServerOption { } } -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { return func(o *options) { - o.maxMsgSize = m + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m } } @@ -192,7 +240,7 @@ func Creds(c credentials.TransportCredentials) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return func(o *options) { if o.unaryInt != nil { - panic("The unary server interceptor has been set.") + panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i } @@ -203,7 +251,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption { return func(o *options) { if o.streamInt != nil { - panic("The stream server interceptor has been set.") + panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i } @@ -214,7 +262,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption { return func(o *options) { if o.inTapHandle != nil { - panic("The tap handle has been set.") + panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h } @@ -229,10 +277,10 @@ func StatsHandler(h stats.Handler) ServerOption { // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the the "unimplemented" gRPC +// handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function has full access to the Context of the request and the -// stream, and the invocation passes through interceptors. +// stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return func(o *options) { o.unknownStreamDesc = &StreamDesc{ @@ -245,11 +293,20 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { } } +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +func ConnectionTimeout(d time.Duration) ServerOption { + return func(o *options) { + o.connectionTimeout = d + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize + opts := defaultServerOptions for _, o := range opt { o(&opts) } @@ -288,8 +345,8 @@ func (s *Server) errorf(format string, a ...interface{}) { } } -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() @@ -304,6 +361,9 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } @@ -334,7 +394,7 @@ type MethodInfo struct { IsServerStream bool } -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. @@ -392,6 +452,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") + s.serve = true if s.lis == nil { s.mu.Unlock() lis.Close() @@ -427,10 +488,12 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() + timer := time.NewTimer(tempDelay) select { - case <-time.After(tempDelay): + case <-timer.C: case <-s.ctx.Done(): } + timer.Stop() continue } s.mu.Lock() @@ -448,16 +511,18 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandShake returns ErrConnDispatched, keep rawConn open. + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { rawConn.Close() } + rawConn.SetDeadline(time.Time{}) return } @@ -470,25 +535,32 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Unlock() if s.opts.useHandlerImpl { + rawConn.SetDeadline(time.Time{}) s.serveUsingHandler(conn) } else { - s.serveHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + rawConn.SetDeadline(time.Time{}) + s.serveStreams(st) } } -// serveHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -496,14 +568,14 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil } if !s.addConn(st) { st.Close() - return + return nil } - s.serveStreams(st) + return st } func (s *Server) serveStreams(st transport.ServerTransport) { @@ -554,6 +626,30 @@ func (s *Server) serveUsingHandler(conn net.Conn) { }) } +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r) if err != nil { @@ -618,18 +714,15 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str if s.opts.statsHandler != nil { outPayload = &stats.OutPayload{} } - p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err } - err = t.Write(stream, p, opts) + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) @@ -644,9 +737,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -654,8 +745,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if trInfo != nil { defer trInfo.tr.Finish() trInfo.firstLine.client = false @@ -672,139 +763,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. stream.SetSendCompress(s.opts.cp.Type()) } p := &parser{r: stream} - for { // TODO: delete - pf, req, err := p.recvMsg(s.opts.maxMsgSize) + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) - } - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - } - if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. - } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - return Errorf(codes.Internal, err.Error()) - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.Internal, "grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - if inPayload != nil { - inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) - sh.HandleRPC(stream.Context(), inPayload) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(convertCode(appErr), appErr.Error()) - appStatus, _ = status.FromError(appErr) - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - return appErr - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) - } - } - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - return t.WriteStatus(stream, status.New(codes.OK, "")) + return err } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -814,9 +903,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -824,24 +911,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if s.opts.cp != nil { stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - statsHandler: sh, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -913,12 +998,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.InvalidArgument, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -943,7 +1028,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -973,7 +1058,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1011,8 +1096,9 @@ func (s *Server) Stop() { s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. func (s *Server) GracefulStop() { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index 26e1a8e2f08..05b384c6931 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -45,19 +30,22 @@ type ConnTagInfo struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // TODO add QOS related fields. } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. - // The returned context is used in the rest lifetime of the RPC. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index c2c9a9dfa23..e844541e9c0 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -1,36 +1,23 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -39,6 +26,8 @@ package stats import ( "net" "time" + + "golang.org/x/net/context" ) // RPCStats contains stats information about RPCs. @@ -49,7 +38,7 @@ type RPCStats interface { } // Begin contains stats when an RPC begins. -// FailFast are only valid if Client is true. +// FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool @@ -59,7 +48,7 @@ type Begin struct { FailFast bool } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} @@ -80,19 +69,19 @@ type InPayload struct { RecvTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. -// FullMethod, addresses and Compression are only valid if Client is false. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int + // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -103,7 +92,7 @@ type InHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} @@ -116,7 +105,7 @@ type InTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} @@ -137,19 +126,17 @@ type OutPayload struct { SentTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. -// FullMethod, addresses and Compression are only valid if Client is true. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool - // WireLength is the wire length of header. - WireLength int + // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -160,7 +147,7 @@ type OutHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} @@ -173,7 +160,7 @@ type OutTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} @@ -184,7 +171,9 @@ type End struct { Client bool // EndTime is the time when the RPC ends. EndTime time.Time - // Error is the error just happened. It implements status.Status if non-nil. + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. Error error } @@ -221,3 +210,85 @@ type ConnEnd struct { func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/BUILD b/vendor/google.golang.org/grpc/status/BUILD index a92cd5f4e04..84cb8afc49f 100644 --- a/vendor/google.golang.org/grpc/status/BUILD +++ b/vendor/google.golang.org/grpc/status/BUILD @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", ], diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 99a4cbe5112..871dc4b31c7 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,9 +28,11 @@ package status import ( + "errors" "fmt" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) @@ -143,3 +130,39 @@ func FromError(err error) (s *Status, ok bool) { } return nil, false } + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 33f1c787b34..75eab40b109 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,8 +27,10 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" @@ -73,11 +60,17 @@ type Stream interface { // side. On server side, it simply returns the error to the caller. // SendMsg is called by generated code. Also Users can call SendMsg // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } @@ -93,6 +86,11 @@ type ClientStream interface { // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // Always call Stream.RecvMsg() to get the final status if you care about the status of + // the RPC. Stream } @@ -109,29 +107,48 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var ( t transport.ClientTransport s *transport.Stream - put func() + done func(balancer.DoneInfo) cancel context.CancelFunc ) - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer func() { + if err != nil { + cancel() + } + }() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return nil, toRPCErr(err) } } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + if c.creds != nil { + callHdr.Creds = c.creds + } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) @@ -151,32 +168,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if err != nil && sh != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) } - sh.HandleRPC(ctx, end) - } - }() - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + }() } for { - t, put, err = cc.getTransport(ctx, gopts) + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -194,15 +208,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && put != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - if put != nil { - put() - put = nil + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -211,20 +225,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } break } + // Set callInfo.peer object from stream's context. + if peer, ok := peer.FromContext(s.Context()); ok { + c.peer = peer + } cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - maxMsgSize: cc.dopts.maxMsgSize, - cancel: cancel, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + cancel: cancel, - put: put, - t: t, - s: s, - p: &parser{r: s}, + done: done, + t: t, + s: s, + p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, @@ -232,9 +249,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth statsCtx: ctx, statsHandler: cc.dopts.copts.StatsHandler, } - if cc.dopts.cp != nil { - cs.cbuf = new(bytes.Buffer) - } // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { @@ -263,23 +277,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - maxMsgSize int - cancel context.CancelFunc + opts []CallOption + c *callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + dc Decompressor + cancel context.CancelFunc tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex - put func() + done func(balancer.DoneInfo) closed bool finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), @@ -329,7 +341,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return } if err == io.EOF { - // Specialize the process for server streaming. SendMesg is only called + // Specialize the process for server streaming. SendMsg is only called // once when creating the stream object. io.EOF needs to be skipped when // the rpc is early finished (before the stream object is created.). // TODO: It is probably better to move this into the generated code. @@ -349,16 +361,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Client: true, } } - out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() + hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err } - err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + } + err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) @@ -373,7 +386,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { Client: true, } } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -396,7 +412,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } // Special handling for client streaming rpc. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) @@ -424,7 +443,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) defer func() { if err != nil { cs.finish(err) @@ -464,15 +483,15 @@ func (cs *clientStream) finish(err error) { } }() for _, o := range cs.opts { - o.after(&cs.c) + o.after(cs.c) } - if cs.put != nil { + if cs.done != nil { updateRPCInfoInContext(cs.s.Context(), rpcInfo{ bytesSent: cs.s.BytesSent(), bytesReceived: cs.s.BytesReceived(), }) - cs.put() - cs.put = nil + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil } if cs.statsHandler != nil { end := &stats.End{ @@ -521,15 +540,15 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - trInfo *traceInfo + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo statsHandler stats.Handler @@ -573,22 +592,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var outPayload *stats.OutPayload if ss.statsHandler != nil { outPayload = &stats.OutPayload{} } - out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() + hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - err = Errorf(codes.Internal, "grpc: %v", err) return err } - if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { + if len(data) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } if outPayload != nil { @@ -612,12 +632,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var inPayload *stats.InPayload if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { return err } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 0f366476742..22b8fb50dea 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,8 +32,20 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs when a new stream is created -// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead -// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming -// work in this handle. Otherwise all the RPCs would slow down. +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index f6747e1dfa4..c1c96dedcb7 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -1,33 +1,18 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -46,7 +31,7 @@ import ( // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. // This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true +var EnableTracing bool // methodFamily returns the trace family for the given method. // It turns "/pkg.Service/GetFoo" into "pkg.Service". @@ -91,6 +76,15 @@ func (f *firstLine) String() string { return line.String() } +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + // payload represents an RPC request or response payload. type payload struct { sent bool // whether this is an outgoing payload @@ -100,9 +94,9 @@ type payload struct { func (p payload) String() string { if p.sent { - return fmt.Sprintf("sent: %v", p.msg) + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) } - return fmt.Sprintf("recv: %v", p.msg) + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/BUILD b/vendor/google.golang.org/grpc/transport/BUILD index d6b3e9fd45f..838ad3079a6 100644 --- a/vendor/google.golang.org/grpc/transport/BUILD +++ b/vendor/google.golang.org/grpc/transport/BUILD @@ -3,13 +3,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "bdp_estimator.go", "control.go", - "go16.go", - "go17.go", "handler_server.go", "http2_client.go", "http2_server.go", "http_util.go", + "log.go", "transport.go", ], importpath = "google.golang.org/grpc/transport", diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 00000000000..8dd2ed42792 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +var ( + // Adding arbitrary data to ping so that its ack can be + // identified. + // Easter-egg: what does the ping message say? + bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} +) + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 8d29aee53d4..dd1a8d42e7e 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -37,17 +22,18 @@ import ( "fmt" "math" "sync" + "sync/atomic" "time" "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection + initialWindowSize = defaultWindowSize // for an RPC infinity = time.Duration(math.MaxInt64) defaultClientKeepaliveTime = infinity defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) @@ -58,11 +44,43 @@ const ( defaultServerKeepaliveTime = time.Duration(2 * time.Hour) defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultLocalSendQuota sets is default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultLocalSendQuota = 64 * 1024 ) // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool +} + +func (*headerFrame) item() {} + +type continuationFrame struct { + streamID uint32 + endHeaders bool + headerBlockFragment []byte +} + +type dataFrame struct { + streamID uint32 + endStream bool + d []byte + f func() +} + +func (*dataFrame) item() {} + +func (*continuationFrame) item() {} + type windowUpdate struct { streamID uint32 increment uint32 @@ -87,6 +105,8 @@ func (*resetStream) item() {} type goAway struct { code http2.ErrCode debugData []byte + headsUp bool + closeConn bool } func (*goAway) item() {} @@ -108,8 +128,9 @@ func (*ping) item() {} type quotaPool struct { c chan int - mu sync.Mutex - quota int + mu sync.Mutex + version uint32 + quota int } // newQuotaPool creates a quotaPool which has quota q available to consume. @@ -130,6 +151,10 @@ func newQuotaPool(q int) *quotaPool { func (qb *quotaPool) add(v int) { qb.mu.Lock() defer qb.mu.Unlock() + qb.lockedAdd(v) +} + +func (qb *quotaPool) lockedAdd(v int) { select { case n := <-qb.c: qb.quota += n @@ -150,6 +175,35 @@ func (qb *quotaPool) add(v int) { } } +func (qb *quotaPool) addAndUpdate(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) + // Update the version only after having added to the quota + // so that if acquireWithVesrion sees the new vesrion it is + // guaranteed to have seen the updated quota. + // Also, still keep this inside of the lock, so that when + // compareAndExecute is processing, this function doesn't + // get executed partially (quota gets updated but the version + // doesn't). + atomic.AddUint32(&(qb.version), 1) +} + +func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { + return qb.c, atomic.LoadUint32(&(qb.version)) +} + +func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { + qb.mu.Lock() + defer qb.mu.Unlock() + if version == atomic.LoadUint32(&(qb.version)) { + success() + return true + } + failure() + return false +} + // acquire returns the channel on which available quota amounts are sent. func (qb *quotaPool) acquire() <-chan int { return qb.c @@ -157,16 +211,59 @@ func (qb *quotaPool) acquire() <-chan int { // inFlow deals with inbound flow control type inFlow struct { + mu sync.Mutex // The inbound flow control limit for pending data. limit uint32 - - mu sync.Mutex // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + d := n - f.limit + f.limit = n + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 } // onData is invoked when some data frame is received. It updates pendingData. @@ -174,7 +271,7 @@ func (f *inFlow) onData(n uint32) error { f.mu.Lock() defer f.mu.Unlock() f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { + if f.pendingData+f.pendingUpdate > f.limit+f.delta { return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } return nil @@ -189,6 +286,13 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate @@ -198,10 +302,10 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } -func (f *inFlow) resetPendingData() uint32 { +func (f *inFlow) resetPendingUpdate() uint32 { f.mu.Lock() defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 + n := f.pendingUpdate + f.pendingUpdate = 0 return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go deleted file mode 100644 index ee1c46bad57..00000000000 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go deleted file mode 100644 index 356f13ff197..00000000000 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 24f306babbb..7e0fdb35938 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -1,32 +1,18 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2016 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,6 +33,7 @@ import ( "sync" "time" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc/codes" @@ -102,15 +89,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr continue } for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } v, err := decodeMetadataHeader(k, v) if err != nil { return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) @@ -144,6 +122,10 @@ type serverHandlerTransport struct { // ServeHTTP (HandleStreams) goroutine. The channel is closed // when WriteStatus is called. writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex } func (ht *serverHandlerTransport) Close() error { @@ -179,15 +161,24 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { - case ht.writes <- fn: - return nil case <-ht.closedCh: return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } } } func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + err := ht.do(func() { ht.writeCommonHeaders(s) @@ -202,7 +193,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } - // TODO: Support Grpc-Status-Details-Bin + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } if md := s.Trailer(); len(md) > 0 { for k, vv := range md { @@ -218,7 +217,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } }) - close(ht.writes) + + if err == nil { // transport has not been closed + ht.Close() + close(ht.writes) + } return err } @@ -241,16 +244,17 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") - // TODO: Support Grpc-Status-Details-Bin + h.Add("Trailer", "Grpc-Status-Details-Bin") if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) } } -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { return ht.do(func() { ht.writeCommonHeaders(s) + ht.rw.Write(hdr) ht.rw.Write(data) if !opts.Delay { ht.rw.(http.Flusher).Flush() @@ -309,13 +313,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -326,7 +330,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx = metadata.NewIncomingContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) @@ -338,11 +345,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(&recvMsg{data: buf[:n:n]}) + s.buf.put(recvMsg{data: buf[:n:n]}) buf = buf[n:] } if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } if len(buf) == 0 { diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 380fff665fb..1abb62e6df4 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -48,7 +33,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -59,6 +43,7 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { ctx context.Context + cancel context.CancelFunc target string // server name/addr userAgent string md interface{} @@ -68,17 +53,6 @@ type http2Client struct { authInfo credentials.AuthInfo // auth info about the connection nextID uint32 // the next stream ID to be used - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -91,7 +65,7 @@ type http2Client struct { // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool @@ -101,6 +75,8 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string + isSecure bool + creds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. @@ -110,6 +86,11 @@ type http2Client struct { statsHandler stats.Handler + initialWindowSize int32 + + bdpEst *bdpEstimator + outQuotaVersion uint32 + mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream @@ -117,8 +98,6 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 - // goAwayID records the Last-Stream-ID in the GoAway frame from the server. - goAwayID uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -130,7 +109,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if fn != nil { return fn(ctx, addr) } - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { @@ -164,14 +143,23 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) { +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { scheme := "http" - conn, err := dial(ctx, opts.Dialer, addr.Addr) + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + connectCancel() + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -179,16 +167,20 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( conn.Close() } }(conn) - var authInfo credentials.AuthInfo + var ( + isSecure bool + authInfo credentials.AuthInfo + ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) if err != nil { // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: %v", err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) } + isSecure = true } kp := opts.KeepaliveParams // Validate keepalive parameters. @@ -198,9 +190,24 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } t := &http2Client{ ctx: ctx, + cancel: cancel, target: addr.Addr, userAgent: opts.UserAgent, md: addr.Metadata, @@ -209,27 +216,36 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( localAddr: conn.LocalAddr(), authInfo: authInfo, // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: defaultMaxStreamsClient, - streamsQuota: newQuotaPool(defaultMaxStreamsClient), - streamSendQuota: defaultWindowSize, - kp: kp, - statsHandler: opts.StatsHandler, + nextID: 1, + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } // Make sure awakenKeepalive can't be written upon. // keepalive routine will make it writable, if need be. @@ -252,65 +268,75 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - if initialWindowSize != defaultWindowSize { - err = t.framer.writeSettings(true, http2.Setting{ + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), + Val: uint32(t.initialWindowSize), }) } else { - err = t.framer.writeSettings(true) + err = t.framer.fr.WriteSettings() } if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } - go t.controller() + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() if t.kp.Time != infinity { go t.keepalive() } - t.writableChan <- 0 return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + localSendQuota: newQuotaPool(defaultLocalSendQuota), + headerChan: make(chan struct{}), } t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, } + return s } @@ -324,31 +350,51 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } - userCtx := ctx ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + pos = len(callHdr.Method) } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + audience = "https://" + host + callHdr.Method[:pos] + } + for _, c := range t.creds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) + return nil, streamErrorf(codes.Internal, "transport: %v", err) } for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) authData[k] = v } } + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() @@ -363,7 +409,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } t.mu.Unlock() - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -371,79 +417,49 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if sq > 1 { t.streamsQuota.add(sq - 1) } - if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok { - t.streamsQuota.add(1) - } - return nil, err - } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - t.streamsQuota.add(1) - // Need to make t writable again so that the rpc in flight can still proceed. - t.writableChan <- 0 - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - s.clientStatsCtx = userCtx - t.activeStreams[s.id] = s - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { - select { - case t.awakenKeepalive <- struct{}{}: - t.framer.writePing(false, false, [8]byte{}) - default: - } - } - - t.mu.Unlock() - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := dl.Sub(time.Now()) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } - for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - var ( - hasMD bool - endHeaders bool - ) if md, ok := metadata.FromOutgoingContext(ctx); ok { - hasMD = true for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -453,60 +469,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - first := true - bufLen := t.hBuf.Len() - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, connectionErrorf(true, err, "transport: %v", err) + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.controlBuf.put(&ping{data: [8]byte{}}) + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: } } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + t.mu.Unlock() + + s.mu.Lock() s.bytesSent = true + s.mu.Unlock() if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, - WireLength: bufLen, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, } - t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader) + t.statsHandler.HandleRPC(s.ctx, outHeader) } - t.writableChan <- 0 return s, nil } @@ -518,6 +530,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) { t.mu.Unlock() return } + if err != nil { + // notify in-flight streams, before the deletion + s.write(recvMsg{err: err}) + } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -547,11 +563,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) { s.mu.Lock() rstStream = s.rstStream rstError = s.rstError - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -577,12 +588,9 @@ func (t *http2Client) Close() (err error) { t.mu.Unlock() return } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } t.state = closing t.mu.Unlock() - close(t.shutdownChan) + t.cancel() err = t.conn.Close() t.mu.Lock() streams := t.activeStreams @@ -604,41 +612,18 @@ func (t *http2Client) Close() (err error) { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return + return err } +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - // Notify the streams which were initiated after the server sent GOAWAY. - select { - case <-t.goAway: - n := t.prevGoAwayID - if n == 0 && t.nextID > 1 { - n = t.nextID - 2 - } - m := t.goAwayID + 2 - if m == 2 { - m = 1 - } - for i := m; i <= n; i += 2 { - if s, ok := t.activeStreams[i]; ok { - close(s.goAway) - } - } - default: - } - if t.state == draining { + case closing, draining: t.mu.Unlock() return nil } @@ -653,21 +638,38 @@ func (t *http2Client) GracefulClose() error { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + if hdr == nil && data == nil && opts.Last { + // stream.CloseSend uses this to send an empty frame with endStream=True + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) + return nil + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for idx, r := range [][]byte{hdr, data} { + for len(r) > 0 { size := http2MaxFrameLen // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) if err != nil { return err } // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) if err != nil { return err } @@ -677,69 +679,51 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { if tq < size { size = tq } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) + if size > len(r) { + size = len(r) } + p := r[:size] + ps := len(p) if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - // Return the connection quota back. - t.sendQuotaPool.add(len(p)) + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) + s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. + var endStream bool + // See if this is the last frame to be written. + if opts.Last { + if len(r)-size == 0 { // No more data in r after this iteration. + if idx == 0 { // We're writing data header. + if len(data) == 0 { // There's no data to follow. + endStream = true + } + } else { // We're writing data. + endStream = true + } + } } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(len(p)) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) + if ps < sq { + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break } } if !opts.Last { @@ -760,6 +744,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { return s, ok } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback connection's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -769,41 +771,76 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) +} + func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(connectionErrorf(true, err, "%v", err)) - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could've been an empty data frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -859,10 +896,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } - s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode)) + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -881,7 +918,11 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { } func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -890,36 +931,56 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } t.mu.Lock() - if t.state == reachable || t.state == draining { - if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) - return - } - select { - case <-t.goAway: - id := t.goAwayID - // t.goAway has been closed (i.e.,multiple GoAways). - if id < f.LastStreamID { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) - return - } - t.prevGoAwayID = id - t.goAwayID = f.LastStreamID - t.mu.Unlock() - return - default: - t.setGoAwayReason(f) - } - t.goAwayID = f.LastStreamID - close(t.goAway) + if t.state != reachable && t.state != draining { + t.mu.Unlock() + return } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay + // with the ID of the last stream the server will process. + // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we + // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server + // was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + close(stream.goAway) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) t.mu.Unlock() + if active == 0 { + t.Close() + } } // setGoAwayReason sets the value of t.goAwayReason based @@ -960,20 +1021,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } + s.mu.Lock() s.bytesReceived = true + s.mu.Unlock() var state decodeState - for _, hf := range frame.Fields { - if err := state.processHeaderField(hf); err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return + if err := state.decodeResponseHeader(frame); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return } endStream := frame.StreamEnded() @@ -985,13 +1046,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader) + t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer) + t.statsHandler.HandleRPC(s.ctx, inTrailer) } } }() @@ -1039,22 +1100,22 @@ func handleMalformedHTTP2(s *Stream, err error) { // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() if err != nil { - t.notifyError(err) + t.Close() return } atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.notifyError(err) + t.Close() return } t.handleSettings(sf) // loop to keep reading incoming messages on this transport. for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.CompareAndSwapUint32(&t.activity, 0, 1) if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1066,12 +1127,12 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) } continue } else { // Transport error. - t.notifyError(err) + t.Close() return } } @@ -1091,7 +1152,7 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } @@ -1115,7 +1176,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) { t.mu.Lock() for _, stream := range t.activeStreams { // Adjust the sending quota for each stream. - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) } t.streamSendQuota = s.Val t.mu.Unlock() @@ -1123,49 +1184,78 @@ func (t *http2Client) applySettings(ss []http2.Setting) { } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - // If the server needs to be to intimated about stream closing, - // then we need to make sure the RST_STREAM frame is written to - // the wire before the headers of the next stream waiting on - // streamQuota. We ensure this by adding to the streamsQuota pool - // only after having acquired the writableChan to send RST_STREAM. - t.streamsQuota.add(1) - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Client) itemHandler(i item) error { + var err error + switch i := i.(type) { + case *dataFrame: + err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) + if err == nil { + i.f() } + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + case *windowUpdate: + err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + err = t.framer.fr.WriteSettingsAck() + } else { + err = t.framer.fr.WriteSettings(i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + t.streamsQuota.add(1) + case *flushIO: + err = t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + err = t.framer.fr.WritePing(i.ack, i.data) + default: + errorf("transport: http2Client.controller got unexpected item type %v\n", i) } + return err } // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. @@ -1189,7 +1279,7 @@ func (t *http2Client) keepalive() { case <-t.awakenKeepalive: // If the control gets here a ping has been sent // need to reset the timer with keepalive.Timeout. - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } else { @@ -1208,13 +1298,13 @@ func (t *http2Client) keepalive() { } t.Close() return - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } return } - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } @@ -1224,25 +1314,9 @@ func (t *http2Client) keepalive() { } func (t *http2Client) Error() <-chan struct{} { - return t.errorChan + return t.ctx.Done() } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 14cd19c64c6..00df8eed0fd 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,6 +21,7 @@ package transport import ( "bytes" "errors" + "fmt" "io" "math" "math/rand" @@ -51,7 +37,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -67,35 +52,25 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context + cancel context.CancelFunc conn net.Conn remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool - - stats stats.Handler - + stats stats.Handler // Flag to keep track of reading activity on transport. // 1 is true and 0 is false. activity uint32 // Accessed atomically. @@ -111,15 +86,25 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator - mu sync.Mutex // guard the following + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 // idle is the time instant when the connection went idle. - // This is either the begining of the connection or when the number of + // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time @@ -128,32 +113,51 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - framer := newFramer(conn) + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) // Send initial settings as connection preface to client. - var settings []http2.Setting + var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{ + isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, Val: maxStreams, }) } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } } kp := config.KeepaliveParams @@ -179,29 +183,36 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err kep.MinTime = defaultKeepalivePolicyMinTime } var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ - ctx: context.Background(), - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - stats: config.StatsHandler, - kp: kp, - idle: time.Now(), - kep: kep, + ctx: ctx, + cancel: cancel, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ @@ -211,37 +222,68 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - go t.controller() + t.framer.writer.Flush() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() go t.keepalive() - t.writableChan <- 0 return t, nil } // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { - buf := newRecvBuffer() - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: &inFlow{limit: initialWindowSize}, - } + streamID := frame.Header().StreamID var state decodeState for _, hf := range frame.Fields { if err := state.processHeaderField(hf); err != nil { if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) } return } } + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - s.recvCompress = state.encoding if state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { @@ -263,13 +305,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) } - s.recvCompress = state.encoding - s.method = state.method if t.inTapHandle != nil { var err error info := &tap.Info{ @@ -277,7 +318,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - // TODO: Log the real error. + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } @@ -289,24 +330,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) return } - if s.id%2 != 1 || s.id <= t.maxStreamID { + if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) return true } - t.maxStreamID = s.id + t.maxStreamID = streamID s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s + s.localSendQuota = newQuotaPool(defaultLocalSendQuota) + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { @@ -320,6 +362,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.stats.HandleRPC(s.ctx, inHeader) } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } handle(s) return } @@ -328,40 +379,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - atomic.StoreUint32(&t.activity, 1) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -378,7 +397,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } @@ -401,7 +420,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -421,6 +440,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { return s, true } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -430,42 +466,78 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) + +} + func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could be an empty frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + errorf("transport: http2Server %v", err) + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -517,17 +589,38 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { ss = append(ss, s) return nil }) - // The settings will be applied once the ack is sent. t.controlBuf.put(&settings{ack: true, ss: ss}) } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + + } +} + const ( maxPingStrikes = 2 defaultPingTimeout = 2 * time.Hour ) func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -550,7 +643,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should - // have come after atleast defaultPingTimeout. + // have come after at least defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } @@ -563,7 +656,8 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")}) + errorf("transport: Got to too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -579,47 +673,16 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - defer func() { - if err == nil { - // Reset ping strikes when seding headers since that might cause the - // peer to send ping. - atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - } - return nil -} - // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + s.mu.Lock() if s.headerOk || s.state == streamDone { s.mu.Unlock() @@ -635,14 +698,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } md = s.header s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, vv := range md { if isReservedHeader(k) { @@ -650,20 +712,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) if t.stats != nil { outHeader := &stats.OutHeader{ - WireLength: bufLen, + //WireLength: // TODO(mmukhi): Revisit this later, if needed. } t.stats.HandleRPC(s.Context(), outHeader) } - t.writableChan <- 0 return nil } @@ -672,6 +734,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var headersSent, hasHeader bool s.mu.Lock() if s.state == streamDone { @@ -691,20 +759,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headersSent = true } - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(st.Code())), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) @@ -713,7 +776,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { panic(err) } - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. @@ -723,29 +786,32 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) if t.stats != nil { - outTrailer := &stats.OutTrailer{ - WireLength: bufLen, - } - t.stats.HandleRPC(s.Context(), outTrailer) + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } t.closeStream(s) - t.writableChan <- 0 return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { - // TODO(zhaoq): Support multi-writers for a single stream. +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var writeHeaderFrame bool s.mu.Lock() if s.state == streamDone { @@ -759,107 +825,81 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { if writeHeaderFrame { t.WriteHeader(s, nil) } - defer func() { - if err == nil { + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for _, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok we make this negative. // Reset ping strikes when sending data since this might cause // the peer to send ping. atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok { - // Return the connection quota back. + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { + s.localSendQuota.add(ps) + }}) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(ps) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - } - } + return nil } // keepalive running in a separate goroutine does the following: // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} @@ -868,7 +908,7 @@ func (t *http2Server) keepalive() { maxAge := time.NewTimer(t.kp.MaxConnectionAge) keepalive := time.NewTimer(t.kp.Time) // NOTE: All exit paths of this function should reset their - // respecitve timers. A failure to do so will cause the + // respective timers. A failure to do so will cause the // following clean-up to deadlock and eventually leak. defer func() { if !maxIdle.Stop() { @@ -892,23 +932,18 @@ func (t *http2Server) keepalive() { continue } val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) // Reseting the timer so that the clean-up doesn't deadlock. maxIdle.Reset(infinity) return } - t.mu.Unlock() maxIdle.Reset(val) case <-maxAge.C: - t.mu.Lock() - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) maxAge.Reset(t.kp.MaxConnectionAgeGrace) select { case <-maxAge.C: @@ -916,7 +951,7 @@ func (t *http2Server) keepalive() { t.Close() // Reseting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.shutdownChan: + case <-t.ctx.Done(): } return case <-keepalive.C: @@ -934,69 +969,137 @@ func (t *http2Server) keepalive() { pingSent = true t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Server) itemHandler(i item) error { + switch i := i.(type) { + case *dataFrame: + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err + } + i.f() + return nil + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil + case *windowUpdate: + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + return t.framer.fr.WriteSettingsAck() + } + return t.framer.fr.WriteSettings(i.ss...) + case *resetStream: + return t.framer.fr.WriteRSTStream(i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return fmt.Errorf("transport: Connection closing") + } + sid := t.maxStreamID + if !i.headsUp { + // Stop accepting more streams now. + t.state = draining + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { + return err + } + if i.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return fmt.Errorf("transport: Connection closing") + } + return nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return - } - sid := t.maxStreamID - t.state = draining - t.mu.Unlock() - t.framer.writeGoAway(true, sid, i.code, i.debugData) - if i.code == http2.ErrCodeEnhanceYourCalm { - t.Close() - } - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): return } - case <-t.shutdownChan: - return + t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) + }() + return nil + case *flushIO: + return t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) } + return t.framer.fr.WritePing(i.ack, i.data) + default: + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { +func (t *http2Server) Close() error { t.mu.Lock() if t.state == closing { t.mu.Unlock() @@ -1006,8 +1109,8 @@ func (t *http2Server) Close() (err error) { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() + t.cancel() + err := t.conn.Close() // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1016,7 +1119,7 @@ func (t *http2Server) Close() (err error) { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return + return err } // closeStream clears the footprint of a stream when the stream is not needed @@ -1036,11 +1139,6 @@ func (t *http2Server) closeStream(s *Stream) { // called to interrupt the potential blocking on other goroutines. s.cancel() s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -1054,7 +1152,17 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{code: http2.ErrCodeNo}) + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) } var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 795d5d18a4f..39f878cfd5b 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,9 +25,9 @@ import ( "fmt" "io" "net" + "net/http" "strconv" "strings" - "sync/atomic" "time" "github.com/golang/protobuf/proto" @@ -50,7 +35,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -60,7 +44,8 @@ const ( // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 ) var ( @@ -88,6 +73,24 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } ) // Records the states during HPACK decoding. Must be reset once the @@ -100,14 +103,17 @@ type decodeState struct { statusGen *status.Status // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not // intended for direct access outside of parsing. - rawStatusCode int32 + rawStatusCode *int rawStatusMsg string + httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string][]string + mdata map[string][]string + statsTags []byte + statsTrace []byte } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -159,7 +165,7 @@ func validContentType(t string) bool { func (d *decodeState) status() *status.Status { if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.statusGen = status.New(codes.Code(d.rawStatusCode), d.rawStatusMsg) + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) } return d.statusGen } @@ -193,6 +199,51 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": @@ -206,7 +257,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { if err != nil { return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.rawStatusCode = int32(code) + d.rawStatusCode = &code case "grpc-message": d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": @@ -227,18 +278,36 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { } case ":path": d.method = f.Value - default: - if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return nil - } - d.mdata[f.Name] = append(d.mdata[f.Name], v) + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, string(v)) } return nil } @@ -406,10 +475,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn) *framer { +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), } f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -418,132 +487,3 @@ func newFramer(conn net.Conn) *framer { f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} - -func (f *framer) errorDetail() error { - return f.fr.ErrorDetail() -} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 00000000000..ac8e358c5c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 87dc27e5bba..ce5cb74d2ee 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -1,48 +1,32 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). package transport import ( - "bytes" + stdctx "context" "fmt" "io" "net" "sync" + "time" "golang.org/x/net/context" "golang.org/x/net/http2" @@ -65,57 +49,56 @@ type recvMsg struct { err error } -func (*recvMsg) item() {} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - item() -} - -// recvBuffer is an unbounded channel of item. +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { - c chan item + c chan recvMsg mu sync.Mutex - backlog []item + backlog []recvMsg } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ - c: make(chan item, 1), + c: make(chan recvMsg, 1), } return b } -func (b *recvBuffer) put(r item) { +func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) + b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} b.backlog = b.backlog[1:] default: } } + b.mu.Unlock() } -// get returns the channel that receives an item in the buffer. +// get returns the channel that receives a recvMsg in the buffer. // -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { return b.c } @@ -125,7 +108,7 @@ type recvBufferReader struct { ctx context.Context goAway chan struct{} recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. + last []byte // Stores the remaining data in the previous calls. err error } @@ -136,27 +119,87 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { // Read remaining data left in last call. - return r.last.Read(p) + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil } select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) case <-r.goAway: return 0, ErrStreamDrain - case i := <-r.recv.get(): + case m := <-r.recv.get(): r.recv.load() - m := i.(*recvMsg) if m.err != nil { return 0, m.err } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil } } +// All items in an out of a controlBuffer should be the same type. +type item interface { + item() +} + +// controlBuffer is an unbounded channel of item. +type controlBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newControlBuffer() *controlBuffer { + b := &controlBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *controlBuffer) put(r item) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *controlBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *controlBuffer) get() <-chan item { + return b.c +} + type streamState uint8 const ( @@ -171,11 +214,6 @@ type Stream struct { id uint32 // nil for client side Stream. st ServerTransport - // clientStatsCtx keeps the user context for stats handling. - // It's only valid on client side. Server side stats context is same as s.ctx. - // All client side stats collection should use the clientStatsCtx (instead of the stream context) - // so that all the generated stats for a particular RPC can be associated in the processing phase. - clientStatsCtx context.Context // ctx is the associated context of the stream. ctx context.Context // cancel is always nil for client side Stream. @@ -189,16 +227,20 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - dec io.Reader + trReader io.Reader fc *inFlow recvQuota uint32 + + // TODO: Remote this unused variable. // The accumulated inbound quota pending for window update. updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - sendQuotaPool *quotaPool + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) + + sendQuotaPool *quotaPool + localSendQuota *quotaPool // Close headerChan to indicate the end of reception of header metadata. headerChan chan struct{} // header caches the received header metadata. @@ -251,16 +293,24 @@ func (s *Stream) GoAway() <-chan struct{} { // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. +// header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { + var err error select { case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + err = ContextErr(s.ctx.Err()) case <-s.goAway: - return nil, ErrStreamDrain + err = ErrStreamDrain case <-s.headerChan: return s.header.Copy(), nil } + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + return s.header.Copy(), nil + default: + } + return nil, err } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -268,8 +318,9 @@ func (s *Stream) Header() (metadata.MD, error) { // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() + c := s.trailer.Copy() + s.mu.RUnlock() + return c } // ServerTransport returns the underlying ServerTransport for the stream. @@ -297,14 +348,16 @@ func (s *Stream) Status() *status.Status { // Server side only. func (s *Stream) SetHeader(md metadata.MD) error { s.mu.Lock() - defer s.mu.Unlock() if s.headerOk || s.state == streamDone { + s.mu.Unlock() return ErrIllegalHeaderWrite } if md.Len() == 0 { + s.mu.Unlock() return nil } s.header = metadata.Join(s.header, md) + s.mu.Unlock() return nil } @@ -315,25 +368,44 @@ func (s *Stream) SetTrailer(md metadata.MD) error { return nil } s.mu.Lock() - defer s.mu.Unlock() s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() return nil } func (s *Stream) write(m recvMsg) { - s.buf.put(&m) + s.buf.put(m) } -// Read reads all the data available for this Stream from the transport and +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) if err != nil { + t.er = err return } - s.windowHandler(n) + t.windowHandler(n) return } @@ -348,15 +420,17 @@ func (s *Stream) finish(st *status.Status) { // BytesSent indicates whether any bytes have been sent on this stream. func (s *Stream) BytesSent() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesSent + bs := s.bytesSent + s.mu.Unlock() + return bs } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesReceived + br := s.bytesReceived + s.mu.Unlock() + return br } // GoString is implemented by Stream so context.String() won't @@ -385,19 +459,22 @@ type transportState int const ( reachable transportState = iota - unreachable closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -425,6 +502,14 @@ type ConnectOptions struct { KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int } // TargetInfo contains the information of the target such as network address and metadata. @@ -435,8 +520,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) } // Options provides additional hints and information for message @@ -448,7 +533,7 @@ type Options struct { // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. + // transport implementation may ignore the hint. Delay bool } @@ -468,10 +553,15 @@ type CallHdr struct { // outbound message. SendCompress string + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision + // only a hint. + // If it's true, the transport may modify the flush decision // for performance purposes. + // If it's false, new stream will never be flushed. Flush bool } @@ -489,7 +579,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -507,7 +597,7 @@ type ClientTransport interface { // once the transport is initiated. Error() <-chan struct{} - // GoAway returns a channel that is closed when ClientTranspor + // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} @@ -531,7 +621,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -613,45 +703,33 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { +// wait blocks until it can receive from one of the provided contexts or channels +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-done: - // User cancellation has precedence. - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - default: - } return 0, io.EOF case <-goAway: return 0, ErrStreamDrain - case <-closing: + case <-tctx.Done(): return 0, ErrConnClosing case i := <-proceed: return i, nil } } +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, stdctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -661,6 +739,39 @@ const ( // NoReason is the default value when GoAway frame is received. NoReason GoAwayReason = 1 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was recieved and that the debug data said "too_many_pings". + // was received and that the debug data said "too_many_pings". TooManyPings GoAwayReason = 2 ) + +// loopyWriter is run in a separate go routine. It is the single code path that will +// write data on wire. +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + } + hasData: + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + default: + if err := handler(&flushIO{}); err != nil { + return + } + break hasData + } + } + } +} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 00000000000..d006a426347 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/golang/protobuf/protoc-gen-go \ + golang.org/x/tools/cmd/stringer + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... From 132278ec6ea6c61d37780d4e270d7bde4b8547b1 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 13:34:22 -0800 Subject: [PATCH 395/794] Version bump to grpc-gateway v1.3.0 --- Godeps/Godeps.json | 12 +- .../grpc-ecosystem/grpc-gateway/runtime/BUILD | 3 +- .../grpc-gateway/runtime/context.go | 64 +++++-- .../grpc-gateway/runtime/errors.go | 20 ++- .../grpc-gateway/runtime/handler.go | 35 ++-- .../runtime/internal/stream_chunk.pb.go | 33 +++- .../grpc-gateway/runtime/mux.go | 144 ++++++++++++++- .../grpc-gateway/runtime/pattern.go | 2 +- .../grpc-gateway/runtime/proto_errors.go | 61 +++++++ .../grpc-gateway/runtime/query.go | 165 ++++++++++++++++-- 10 files changed, 475 insertions(+), 64 deletions(-) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 197f6e897c1..d40b0db27fe 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1871,18 +1871,18 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/hashicorp/golang-lru", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD index 77e7c662829..4c47e798460 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD @@ -15,6 +15,7 @@ go_library( "mux.go", "pattern.go", "proto2_convert.go", + "proto_errors.go", "query.go", ], importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -25,10 +26,10 @@ go_library( "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal:go_default_library", "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go index f248c738b23..6e0eb27e285 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -9,18 +9,23 @@ import ( "time" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) -// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to -// gRPC metadata for incoming requests processed by grpc-gateway +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields. +const MetadataPrefix = "grpcgateway-" + // MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to // HTTP headers in a response handled by grpc-gateway const MetadataTrailerPrefix = "Grpc-Trailer-" + const metadataGrpcTimeout = "Grpc-Timeout" const xForwardedFor = "X-Forwarded-For" @@ -39,25 +44,25 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) { +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { var err error timeout, err = timeoutDecode(tm) if err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) } } for key, vals := range req.Header { for _, val := range vals { - if key == "Authorization" { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if strings.ToLower(key) == "authorization" { pairs = append(pairs, "authorization", val) - continue } - if strings.HasPrefix(key, MetadataHeaderPrefix) { - pairs = append(pairs, key[len(MetadataHeaderPrefix):], val) + if h, ok := mux.incomingHeaderMatcher(key); ok { + pairs = append(pairs, h, val) } } } @@ -85,7 +90,11 @@ func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, e if len(pairs) == 0 { return ctx, nil } - return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil + md := metadata.Pairs(pairs...) + if mux.metadataAnnotator != nil { + md = metadata.Join(md, mux.metadataAnnotator(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil } // ServerMetadata consists of metadata sent from gRPC server. @@ -141,3 +150,38 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { } return } + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go index 0d3cb3bf3ca..8eebdcf49f4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -6,9 +6,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. @@ -64,7 +64,7 @@ var ( type errorBody struct { Error string `protobuf:"bytes,1,name=error" json:"error"` - Code int `protobuf:"bytes,2,name=code" json:"code"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` } //Make this also conform to proto.Message for builtin JSONPb Marshaler @@ -78,14 +78,20 @@ func (*errorBody) ProtoMessage() {} // // The response body returned by this function is a JSON object, // which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { const fallback = `{"error": "failed to marshal error message"}` w.Header().Del("Trailer") w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + body := &errorBody{ - Error: grpc.ErrorDesc(err), - Code: int(grpc.Code(err)), + Error: s.Message(), + Code: int32(s.Code()), } buf, merr := marshaler.Marshal(body) @@ -103,9 +109,9 @@ func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseW grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(grpc.Code(err)) + st := HTTPStatusFromCode(s.Code()) w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go index d7040851ae9..ae6a5d551cf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -9,12 +9,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime/internal" "golang.org/x/net/context" - "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) if !ok { grpclog.Printf("Flush not supported in %T", w) @@ -28,7 +29,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp http.Error(w, "unexpected error", http.StatusInternalServerError) return } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Content-Type", marshaler.ContentType()) @@ -57,7 +58,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp grpclog.Printf("Failed to marshal response chunk: %v", err) return } - if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { + if _, err = w.Write(buf); err != nil { grpclog.Printf("Failed to send response chunk: %v", err) return } @@ -65,11 +66,12 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp } } -func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) { +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { for k, vs := range md.HeaderMD { - hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k) - for i := range vs { - w.Header().Add(hKey, vs[i]) + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } } } } @@ -84,31 +86,31 @@ func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { for k, vs := range md.TrailerMD { tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for i := range vs { - w.Header().Add(tKey, vs[i]) + for _, v := range vs { + w.Header().Add(tKey, v) } } } // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } buf, err := marshaler.Marshal(resp) if err != nil { grpclog.Printf("Marshal error: %v", err) - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } @@ -146,7 +148,10 @@ func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter func streamChunk(result proto.Message, err error) map[string]proto.Message { if err != nil { - grpcCode := grpc.Code(err) + grpcCode := codes.Unknown + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + } httpCode := HTTPStatusFromCode(grpcCode) return map[string]proto.Message{ "error": &internal.StreamError{ diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go index 6f837cfd5d9..44550f393b4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: runtime/internal/stream_chunk.proto -// DO NOT EDIT! /* Package internal is a generated protocol buffer package. @@ -42,6 +41,34 @@ func (m *StreamError) String() string { return proto.CompactTextStrin func (*StreamError) ProtoMessage() {} func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + func init() { proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") } @@ -50,7 +77,7 @@ func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDesc var fileDescriptor0 = []byte{ // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, 0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1, 0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c, 0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 2e6c5621302..205bc430921 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -1,12 +1,16 @@ package runtime import ( + "fmt" "net/http" + "net/textproto" "strings" - "golang.org/x/net/context" - "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // A HandlerFunc handles a specific pair of path pattern and HTTP method. @@ -19,6 +23,10 @@ type ServeMux struct { handlers map[string][]handler forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotator func(context.Context, *http.Request) metadata.MD + protoErrorHandler ProtoErrorHandlerFunc } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -36,6 +44,64 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. } } +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotator = annotator + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ @@ -47,6 +113,29 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { for _, opt := range opts { opt(serveMux) } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + return serveMux } @@ -57,9 +146,17 @@ func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + path := r.URL.Path if !strings.HasPrefix(path, "/") { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } return } @@ -67,7 +164,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { l := len(components) var verb string if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } return } else if idx > 0 { c := components[l-1] @@ -77,7 +180,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) { r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } } @@ -104,17 +213,36 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // X-HTTP-Method-Override is optional. Always allow fallback to POST. if isPathLengthFallback(r) { if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } h.h(w, r, pathParams) return } - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } return } } - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } } // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go index 3947dbea023..8a9ec2cdae4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -21,7 +21,7 @@ type op struct { operand int } -// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. type Pattern struct { // ops is a list of operations ops []op diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 00000000000..b1b089273b6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,61 @@ +package runtime + +import ( + "io" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + w.Header().Del("Trailer") + w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Printf("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Printf("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go index 56a919a52f1..c00e0b914e2 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -4,7 +4,9 @@ import ( "fmt" "net/url" "reflect" + "strconv" "strings" + "time" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/utilities" @@ -38,31 +40,39 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] if m.Kind() != reflect.Ptr { return fmt.Errorf("unexpected type %T: %v", msg, msg) } + var props *proto.Properties m = m.Elem() for i, fieldName := range fieldPath { isLast := i == len(fieldPath)-1 if !isLast && m.Kind() != reflect.Struct { return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) } - f := fieldByProtoName(m, fieldName) - if !f.IsValid() { + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) return nil } switch f.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } m = f case reflect.Slice: // TODO(yugui) Support []byte if !isLast { return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) } - return populateRepeatedField(f, values) + return populateRepeatedField(f, values, props) case reflect.Ptr: if f.IsNil() { m = reflect.New(f.Type().Elem()) - f.Set(m) + f.Set(m.Convert(f.Type())) } m = f.Elem() continue @@ -80,39 +90,127 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] default: grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) } - return populateField(m, values[0]) + return populateField(m, values[0], props) } // fieldByProtoName looks up a field whose corresponding protobuf field name is "name". // "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) reflect.Value { +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + for _, p := range props.Prop { if p.OrigName == name { - return m.FieldByName(p.Name) + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil } } - return reflect.Value{} + return reflect.Value{}, nil, nil } -func populateRepeatedField(f reflect.Value, values []string) error { +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + conv, ok := convFromType[elemType.Kind()] if !ok { return fmt.Errorf("unsupported field type %s", elemType) } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values))) + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) for i, v := range values { result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) if err := result[1].Interface(); err != nil { return err.(error) } - f.Index(i).Set(result[0]) + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) } return nil } -func populateField(f reflect.Value, value string) error { +func populateField(f reflect.Value, value string, props *proto.Properties) error { + // Handle well known type + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := f.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "Timestamp": + if value == "null" { + f.Field(0).SetInt(0) + f.Field(1).SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.Field(0).SetInt(int64(t.Unix())) + f.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.Field(0).SetBool(true) + } else if value == "false" { + f.Field(0).SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.Field(0).SetString(value) + return nil + } + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + conv, ok := convFromType[f.Kind()] if !ok { return fmt.Errorf("unsupported field type %T", f) @@ -121,7 +219,48 @@ func populateField(f reflect.Value, value string) error { if err := result[1].Interface(); err != nil { return err.(error) } - f.Set(result[0]) + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } return nil } From 9b9057564dca02c42424c4229dd2b6093e4832aa Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Thu, 14 Dec 2017 23:15:58 -0800 Subject: [PATCH 396/794] Update staging deps for etcd 3.2.11 version bump --- .../Godeps/Godeps.json | 118 ++++---- .../src/k8s.io/apiserver/Godeps/Godeps.json | 260 ++++++++++++------ .../k8s.io/kube-aggregator/Godeps/Godeps.json | 118 ++++---- .../sample-apiserver/Godeps/Godeps.json | 118 ++++---- 4 files changed, 357 insertions(+), 257 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 08607c1ddcf..c5b105e50c1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -32,60 +32,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -167,11 +167,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -186,10 +186,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -222,22 +218,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -442,65 +422,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 83c6b74dbaa..d2800e3d018 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -27,232 +27,304 @@ "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" }, { - "ImportPath": "github.com/boltdb/bolt", - "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" + "ImportPath": "github.com/cockroachdb/cmux", + "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" + }, + { + "ImportPath": "github.com/coreos/bbolt", + "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/namespace", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/naming", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/embed", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/error", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/cors", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/debugutil", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/store", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/version", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -306,6 +378,10 @@ "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, + { + "ImportPath": "github.com/dgrijalva/jwt-go", + "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" + }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", "Rev": "3dcc96556217539f50599357fb481ac0dc7439b9" @@ -358,6 +434,10 @@ "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, + { + "ImportPath": "github.com/golang/groupcache/lru", + "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" + }, { "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -366,6 +446,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -448,15 +532,15 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/hashicorp/golang-lru", @@ -486,10 +570,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/karlseguin/ccache", - "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" - }, { "ImportPath": "github.com/mailru/easyjson/buffer", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" @@ -690,65 +770,85 @@ "ImportPath": "golang.org/x/time/rate", "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index ec78f699e49..fb2564f1aaf 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -147,11 +147,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -166,10 +166,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -202,22 +198,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -418,65 +398,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 3281bf2fb31..6bac29d3633 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -139,11 +139,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -158,10 +158,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -194,22 +190,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -402,65 +382,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", From 94f2ed6849b27a605a25f49da7f1c79e8c822b07 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 15:12:11 -0800 Subject: [PATCH 397/794] Fix build and test errors from etcd 3.2.11 upgrade --- staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD | 1 + .../src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | 6 ++++-- test/e2e_node/services/etcd.go | 1 + test/integration/scale/BUILD | 1 + test/integration/scale/scale_test.go | 1 + 5 files changed, 8 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD index ac48442e50d..5d495404c6d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD @@ -13,6 +13,7 @@ go_library( "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 6ab310b601e..96d21b5812e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -35,6 +35,7 @@ import ( etcd "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" @@ -154,6 +155,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } + m.AuthToken = "simple" } else { cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -189,9 +191,9 @@ func (m *EtcdTestServer) launch(t *testing.T) error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ Listener: ln, diff --git a/test/e2e_node/services/etcd.go b/test/e2e_node/services/etcd.go index 9176ff7a0d7..8c40dc7797e 100644 --- a/test/e2e_node/services/etcd.go +++ b/test/e2e_node/services/etcd.go @@ -77,6 +77,7 @@ func NewEtcd(dataDir string) *EtcdServer { MaxWALFiles: maxWALFiles, TickMs: tickMs, ElectionTicks: electionTicks, + AuthToken: "simple", } return &EtcdServer{ diff --git a/test/integration/scale/BUILD b/test/integration/scale/BUILD index 120e556ce4b..5fd7eb7c093 100644 --- a/test/integration/scale/BUILD +++ b/test/integration/scale/BUILD @@ -14,6 +14,7 @@ go_test( deps = [ "//cmd/kube-apiserver/app/testing:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/scale/scale_test.go b/test/integration/scale/scale_test.go index a40093571c1..fe92420ec1e 100644 --- a/test/integration/scale/scale_test.go +++ b/test/integration/scale/scale_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + _ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init. "github.com/coreos/pkg/capnslog" appsv1beta2 "k8s.io/api/apps/v1beta2" From d9dc64227f2b13ea7546b0e83307b4a824ab3722 Mon Sep 17 00:00:00 2001 From: edisonxiang Date: Tue, 19 Dec 2017 09:32:15 +0800 Subject: [PATCH 398/794] using RoundUpToGB function directly --- pkg/volume/gce_pd/gce_util.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 0ec7566ea9f..022d759d163 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -81,9 +81,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - requestBytes := capacity.Value() // GCE PDs are allocated in chunks of GBs (not GiBs) - requestGB := volume.RoundUpSize(requestBytes, volume.GB) + requestGB := volume.RoundUpToGB(capacity) // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. From d1eb8a6163c0065c23d8d04084bcd85ad5f15964 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Pab=C3=B3n?= Date: Mon, 20 Nov 2017 23:43:22 -0500 Subject: [PATCH 399/794] e2e: CSI Volume tests This e2e test tests the CSI volume plugin in kubernetes with a CSI hostPath driver. It is also setup to be able to be tested with more drivers in the future. --- test/e2e/storage/BUILD | 3 + test/e2e/storage/csi_hostpath.go | 199 +++++++++++++++++++ test/e2e/storage/csi_volumes.go | 243 ++++++++++++++++++++++++ test/e2e/storage/volume_provisioning.go | 193 ++++++++++--------- 4 files changed, 544 insertions(+), 94 deletions(-) create mode 100644 test/e2e/storage/csi_hostpath.go create mode 100644 test/e2e/storage/csi_volumes.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index f7c76328535..12f2f24cd06 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -8,6 +8,8 @@ load( go_library( name = "go_default_library", srcs = [ + "csi_hostpath.go", + "csi_volumes.go", "empty_dir_wrapper.go", "flexvolume.go", "pd.go", @@ -47,6 +49,7 @@ go_library( "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", diff --git a/test/e2e/storage/csi_hostpath.go b/test/e2e/storage/csi_hostpath.go new file mode 100644 index 00000000000..ddf38f12322 --- /dev/null +++ b/test/e2e/storage/csi_hostpath.go @@ -0,0 +1,199 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file is used to deploy the CSI hostPath plugin +// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath + +package storage + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + csiHostPathPluginImage string = "docker.io/k8scsi/hostpathplugin:0.1" +) + +func csiHostPathPod( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + f *framework.Framework, + sa *v1.ServiceAccount, +) *v1.Pod { + podClient := client.CoreV1().Pods(config.Namespace) + + priv := true + mountPropagation := v1.MountPropagationBidirectional + hostPathType := v1.HostPathDirectoryOrCreate + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-pod", + Namespace: config.Namespace, + Labels: map[string]string{ + "app": "hostpath-driver", + }, + }, + Spec: v1.PodSpec{ + ServiceAccountName: sa.GetName(), + NodeName: config.ServerNodeName, + RestartPolicy: v1.RestartPolicyNever, + Containers: []v1.Container{ + { + Name: "external-provisioner", + Image: csiExternalProvisionerImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--provisioner=csi-hostpath", + "--csi-address=/csi/csi.sock", + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "driver-registrar", + Image: csiDriverRegistrarImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--csi-address=/csi/csi.sock", + }, + Env: []v1.EnvVar{ + { + Name: "KUBE_NODE_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "external-attacher", + Image: csiExternalAttacherImage, + ImagePullPolicy: v1.PullAlways, + Args: []string{ + "--v=5", + "--csi-address=$(ADDRESS)", + }, + Env: []v1.EnvVar{ + { + Name: "ADDRESS", + Value: "/csi/csi.sock", + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + }, + }, + { + Name: "hostpath-driver", + Image: csiHostPathPluginImage, + ImagePullPolicy: v1.PullAlways, + SecurityContext: &v1.SecurityContext{ + Privileged: &priv, + }, + Args: []string{ + "--v=5", + "--endpoint=$(CSI_ENDPOINT)", + "--nodeid=$(KUBE_NODE_NAME)", + }, + Env: []v1.EnvVar{ + { + Name: "CSI_ENDPOINT", + Value: "unix://" + "/csi/csi.sock", + }, + { + Name: "KUBE_NODE_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "socket-dir", + MountPath: "/csi", + }, + { + Name: "mountpoint-dir", + MountPath: "/var/lib/kubelet/pods", + MountPropagation: &mountPropagation, + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "socket-dir", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/lib/kubelet/plugins/csi-hostpath", + Type: &hostPathType, + }, + }, + }, + { + Name: "mountpoint-dir", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/var/lib/kubelet/pods", + Type: &hostPathType, + }, + }, + }, + }, + }, + } + + err := framework.DeletePodWithWait(f, client, pod) + framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v", + pod.GetNamespace(), pod.GetName(), err) + + if teardown { + return nil + } + + ret, err := podClient.Create(pod) + if err != nil { + framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err) + } + + // Wait for pod to come up + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret)) + return ret +} diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go new file mode 100644 index 00000000000..3e764fad429 --- /dev/null +++ b/test/e2e/storage/csi_volumes.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "math/rand" + "time" + + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + + . "github.com/onsi/ginkgo" +) + +const ( + csiExternalAttacherImage string = "docker.io/k8scsi/csi-attacher:0.1" + csiExternalProvisionerImage string = "docker.io/k8scsi/csi-provisioner:0.1" + csiDriverRegistrarImage string = "docker.io/k8scsi/driver-registrar" +) + +func externalAttacherServiceAccount( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, +) *v1.ServiceAccount { + serviceAccountName := config.Prefix + "-service-account" + serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) + sa := &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + }, + } + + serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := serviceAccountClient.Create(sa) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err) + } + + return ret +} + +func externalAttacherClusterRole( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, +) *rbacv1.ClusterRole { + clusterRoleClient := client.RbacV1().ClusterRoles() + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-cluster-role", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumes"}, + Verbs: []string{"create", "delete", "get", "list", "watch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumesclaims"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"volumeattachments"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + + clusterRoleClient.Delete(role.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := clusterRoleClient.Get(role.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := clusterRoleClient.Create(role) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err) + } + + return ret +} + +func externalAttacherClusterRoleBinding( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + sa *v1.ServiceAccount, + clusterRole *rbacv1.ClusterRole, +) *rbacv1.ClusterRoleBinding { + clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-role-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.GetName(), + Namespace: sa.GetNamespace(), + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRole.GetName(), + APIGroup: "rbac.authorization.k8s.io", + }, + } + + clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := clusterRoleBindingClient.Create(binding) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) + } + + return ret +} + +var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() { + f := framework.NewDefaultFramework("csi-mock-plugin") + + var ( + cs clientset.Interface + ns *v1.Namespace + node v1.Node + config framework.VolumeTestConfig + suffix string + ) + + BeforeEach(func() { + cs = f.ClientSet + ns = f.Namespace + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + node = nodes.Items[rand.Intn(len(nodes.Items))] + config = framework.VolumeTestConfig{ + Namespace: ns.Name, + Prefix: "csi", + ClientNodeName: node.Name, + ServerNodeName: node.Name, + WaitForCompletion: true, + } + suffix = ns.Name + }) + + // Create one of these for each of the drivers to be tested + // CSI hostPath driver test + Describe("Sanity CSI plugin test using hostPath CSI driver", func() { + + var ( + clusterRole *rbacv1.ClusterRole + serviceAccount *v1.ServiceAccount + ) + + BeforeEach(func() { + By("deploying csi hostpath driver") + clusterRole = externalAttacherClusterRole(cs, config, false) + serviceAccount = externalAttacherServiceAccount(cs, config, false) + externalAttacherClusterRoleBinding(cs, config, false, serviceAccount, clusterRole) + csiHostPathPod(cs, config, false, f, serviceAccount) + }) + + AfterEach(func() { + By("uninstalling csi hostpath driver") + csiHostPathPod(cs, config, true, f, serviceAccount) + externalAttacherClusterRoleBinding(cs, config, true, serviceAccount, clusterRole) + serviceAccount = externalAttacherServiceAccount(cs, config, true) + clusterRole = externalAttacherClusterRole(cs, config, true) + }) + + It("should provision storage with a hostPath CSI driver", func() { + t := storageClassTest{ + name: "csi-hostpath", + provisioner: "csi-hostpath", + parameters: map[string]string{}, + claimSize: "1Gi", + expectedSize: "1Gi", + nodeName: node.Name, + } + + claim := newClaim(t, ns.GetName(), "") + class := newStorageClass(t, ns.GetName(), "") + testDynamicProvisioning(t, cs, claim, class) + }) + }) +}) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index e87cc902201..41107da5344 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -56,6 +56,7 @@ type storageClassTest struct { claimSize string expectedSize string pvCheck func(volume *v1.PersistentVolume) error + nodeName string } const ( @@ -139,10 +140,10 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla // Get entry, get mount options at 6th word, replace brackets with commas command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) } - runInPodWithVolume(client, claim.Namespace, claim.Name, command) + runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command) By("checking the created volume is readable and retains data") - runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data") + runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data") By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) @@ -250,140 +251,140 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // that can be used to persist data among pods. tests := []storageClassTest{ { - "SSD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "SSD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-ssd", "zone": cloudZone, }, - "1.5G", - "2G", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5G", + expectedSize: "2G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-ssd") }, }, { - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1.5G", - "2G", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5G", + expectedSize: "2G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, }, // AWS { - "gp2 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "gp2 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "gp2", "zone": cloudZone, }, - "1.5Gi", - "2Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", false) }, }, { - "io1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "io1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "io1", "iopsPerGB": "50", }, - "3.5Gi", - "4Gi", // 4 GiB is minimum for io1 - func(volume *v1.PersistentVolume) error { + claimSize: "3.5Gi", + expectedSize: "4Gi", // 4 GiB is minimum for io1 + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "io1", false) }, }, { - "sc1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "sc1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "sc1", }, - "500Gi", // minimum for sc1 - "500Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "500Gi", // minimum for sc1 + expectedSize: "500Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "sc1", false) }, }, { - "st1 EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "st1 EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "type": "st1", }, - "500Gi", // minimum for st1 - "500Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "500Gi", // minimum for st1 + expectedSize: "500Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "st1", false) }, }, { - "encrypted EBS on AWS", - []string{"aws"}, - "kubernetes.io/aws-ebs", - map[string]string{ + name: "encrypted EBS on AWS", + cloudProviders: []string{"aws"}, + provisioner: "kubernetes.io/aws-ebs", + parameters: map[string]string{ "encrypted": "true", }, - "1Gi", - "1Gi", - func(volume *v1.PersistentVolume) error { + claimSize: "1Gi", + expectedSize: "1Gi", + pvCheck: func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", true) }, }, // OpenStack generic tests (works on all OpenStack deployments) { - "generic Cinder volume on OpenStack", - []string{"openstack"}, - "kubernetes.io/cinder", - map[string]string{}, - "1.5Gi", - "2Gi", - nil, // there is currently nothing to check on OpenStack + name: "generic Cinder volume on OpenStack", + cloudProviders: []string{"openstack"}, + provisioner: "kubernetes.io/cinder", + parameters: map[string]string{}, + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: nil, // there is currently nothing to check on OpenStack }, { - "Cinder volume with empty volume type and zone on OpenStack", - []string{"openstack"}, - "kubernetes.io/cinder", - map[string]string{ + name: "Cinder volume with empty volume type and zone on OpenStack", + cloudProviders: []string{"openstack"}, + provisioner: "kubernetes.io/cinder", + parameters: map[string]string{ "type": "", "availability": "", }, - "1.5Gi", - "2Gi", - nil, // there is currently nothing to check on OpenStack + claimSize: "1.5Gi", + expectedSize: "2Gi", + pvCheck: nil, // there is currently nothing to check on OpenStack }, // vSphere generic test { - "generic vSphere volume", - []string{"vsphere"}, - "kubernetes.io/vsphere-volume", - map[string]string{}, - "1.5Gi", - "1.5Gi", - nil, + name: "generic vSphere volume", + cloudProviders: []string{"vsphere"}, + provisioner: "kubernetes.io/vsphere-volume", + parameters: map[string]string{}, + claimSize: "1.5Gi", + expectedSize: "1.5Gi", + pvCheck: nil, }, { - "Azure disk volume with empty sku and location", - []string{"azure"}, - "kubernetes.io/azure-disk", - map[string]string{}, - "1Gi", - "1Gi", - nil, + name: "Azure disk volume with empty sku and location", + cloudProviders: []string{"azure"}, + provisioner: "kubernetes.io/azure-disk", + parameters: map[string]string{}, + claimSize: "1Gi", + expectedSize: "1Gi", + pvCheck: nil, }, } @@ -430,15 +431,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := storageClassTest{ - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1G", - "1G", - func(volume *v1.PersistentVolume) error { + claimSize: "1G", + expectedSize: "1G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, } @@ -464,15 +465,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := storageClassTest{ - "HDD PD on GCE/GKE", - []string{"gce", "gke"}, - "kubernetes.io/gce-pd", - map[string]string{ + name: "HDD PD on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ "type": "pd-standard", }, - "1G", - "1G", - func(volume *v1.PersistentVolume) error { + claimSize: "1G", + expectedSize: "1G", + pvCheck: func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, } @@ -791,7 +792,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { } // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { +func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -829,6 +830,10 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { }, }, } + + if len(nodeName) != 0 { + pod.Spec.NodeName = nodeName + } pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { From 42b01beaca349200cc5c8dbf20e70faa7ae2c27a Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 19 Dec 2017 11:18:15 +0800 Subject: [PATCH 400/794] Cacher stopLock should be unlocked --- staging/src/k8s.io/apiserver/pkg/storage/cacher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go index 27ead5a1d78..0f5b4e2ff28 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go @@ -655,7 +655,7 @@ func (c *Cacher) Stop() { } c.stopLock.Lock() if c.stopped { - // avoid that it was locked meanwhile as isStopped only read-locks + c.stopLock.Unlock() return } c.stopped = true From 477def0b4df29d9ba7301f24283f5ec84630110d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Tue, 19 Dec 2017 11:42:02 +0800 Subject: [PATCH 401/794] Add azure owners --- pkg/cloudprovider/providers/azure/OWNERS | 8 ++++++++ pkg/volume/azure_dd/OWNERS | 17 +++++++++++------ pkg/volume/azure_file/OWNERS | 11 ++++++++--- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/OWNERS b/pkg/cloudprovider/providers/azure/OWNERS index 1109bcea346..51c658d2287 100644 --- a/pkg/cloudprovider/providers/azure/OWNERS +++ b/pkg/cloudprovider/providers/azure/OWNERS @@ -1,6 +1,14 @@ approvers: +- andyzhangx - brendandburns - colemickens +- feiskyer - jdumars +- khenidak reviewers: - andyzhangx +- brendandburns +- colemickens +- feiskyer +- jdumars +- khenidak diff --git a/pkg/volume/azure_dd/OWNERS b/pkg/volume/azure_dd/OWNERS index dbe4f0a2053..a6d469c0755 100755 --- a/pkg/volume/azure_dd/OWNERS +++ b/pkg/volume/azure_dd/OWNERS @@ -1,11 +1,16 @@ approvers: +- andyzhangx - brendandburns +- feiskyer +- khenidak - rootfs reviewers: -- rootfs -- brendandburns -- saad-ali -- jsafrane -- jingxu97 -- msau42 - andyzhangx +- brendandburns +- feiskyer +- jingxu97 +- jsafrane +- msau42 +- khenidak +- rootfs +- saad-ali diff --git a/pkg/volume/azure_file/OWNERS b/pkg/volume/azure_file/OWNERS index dbe4f0a2053..06e4934dfe2 100644 --- a/pkg/volume/azure_file/OWNERS +++ b/pkg/volume/azure_file/OWNERS @@ -1,11 +1,16 @@ approvers: +- andyzhangx - brendandburns +- feiskyer +- khenidak - rootfs reviewers: -- rootfs +- andyzhangx - brendandburns -- saad-ali +- feiskyer - jsafrane - jingxu97 +- khenidak - msau42 -- andyzhangx +- rootfs +- saad-ali From 7d919fbd0c1ac69da1d4233cc790f9299163be74 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 15 Dec 2017 16:34:14 +0800 Subject: [PATCH 402/794] Use apps/v1 API in kubeadm. --- cmd/kubeadm/app/phases/addons/dns/dns.go | 2 +- cmd/kubeadm/app/phases/addons/dns/manifests.go | 4 ++-- cmd/kubeadm/app/phases/addons/proxy/manifests.go | 2 +- cmd/kubeadm/app/phases/addons/proxy/proxy.go | 2 +- cmd/kubeadm/app/phases/selfhosting/selfhosting.go | 2 +- .../app/phases/selfhosting/selfhosting_test.go | 8 ++++---- cmd/kubeadm/app/phases/upgrade/health.go | 4 ++-- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 2 +- cmd/kubeadm/app/phases/upgrade/prepull.go | 2 +- cmd/kubeadm/app/phases/upgrade/selfhosted.go | 6 +++--- cmd/kubeadm/app/util/apiclient/idempotency.go | 14 +++++++------- 11 files changed, 24 insertions(+), 24 deletions(-) diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index 072359a8e5b..efeb25e5f60 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -20,7 +20,7 @@ import ( "fmt" "runtime" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index cbf434706da..65269982ba2 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -19,7 +19,7 @@ package dns const ( // v180AndAboveKubeDNSDeployment is the kube-dns Deployment manifest for the kube-dns manifest for v1.7+ v180AndAboveKubeDNSDeployment = ` -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns @@ -216,7 +216,7 @@ spec: // CoreDNSDeployment is the CoreDNS Deployment manifest CoreDNSDeployment = ` -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: coredns diff --git a/cmd/kubeadm/app/phases/addons/proxy/manifests.go b/cmd/kubeadm/app/phases/addons/proxy/manifests.go index d141e16289f..f6e82319bf0 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/manifests.go +++ b/cmd/kubeadm/app/phases/addons/proxy/manifests.go @@ -52,7 +52,7 @@ data: // KubeProxyDaemonSet19 is the proxy DaemonSet manifest for Kubernetes 1.9 and above KubeProxyDaemonSet19 = ` -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index 0121da57d02..e8f71d11be1 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -20,7 +20,7 @@ import ( "fmt" "runtime" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go index 18ae9563f8f..8e894d64863 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -21,7 +21,7 @@ import ( "os" "time" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go index 4de0a7bbee3..7921ce0696f 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -23,7 +23,7 @@ import ( "os" "testing" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -104,7 +104,7 @@ spec: status: {} ` - testAPIServerDaemonSet = `apiVersion: apps/v1beta2 + testAPIServerDaemonSet = `apiVersion: apps/v1 kind: DaemonSet metadata: creationTimestamp: null @@ -265,7 +265,7 @@ spec: status: {} ` - testControllerManagerDaemonSet = `apiVersion: apps/v1beta2 + testControllerManagerDaemonSet = `apiVersion: apps/v1 kind: DaemonSet metadata: creationTimestamp: null @@ -395,7 +395,7 @@ spec: status: {} ` - testSchedulerDaemonSet = `apiVersion: apps/v1beta2 + testSchedulerDaemonSet = `apiVersion: apps/v1 kind: DaemonSet metadata: creationTimestamp: null diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index 2ce9314fac9..50b014dee6e 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -21,7 +21,7 @@ import ( "net/http" "os" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -174,7 +174,7 @@ func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) { notReadyDaemonSets := []error{} for _, component := range constants.MasterComponents { dsName := constants.AddSelfHostedPrefix(component) - ds, err := client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{}) + ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem) } diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index b6b91a3d7a6..f18c2e23f79 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -117,7 +117,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error { if features.Enabled(cfg.FeatureGates, features.CoreDNS) { return apiclient.TryRunCommand(func() error { - coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{}) + coreDNSDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{}) if err != nil { return err } diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go index 5d0b2940234..f4094dbf715 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" diff --git a/cmd/kubeadm/app/phases/upgrade/selfhosted.go b/cmd/kubeadm/app/phases/upgrade/selfhosted.go index cef6420a899..385a2a30a47 100644 --- a/cmd/kubeadm/app/phases/upgrade/selfhosted.go +++ b/cmd/kubeadm/app/phases/upgrade/selfhosted.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -119,7 +119,7 @@ func SelfHostedControlPlane(client clientset.Interface, waiter apiclient.Waiter, // During this upgrade; the temporary/backup component will take over if err := apiclient.TryRunCommand(func() error { - if _, err := client.AppsV1beta2().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil { + if _, err := client.AppsV1().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil { return fmt.Errorf("couldn't update self-hosted component's DaemonSet: %v", err) } return nil @@ -256,7 +256,7 @@ func getCurrentControlPlaneComponentResources(client clientset.Interface) (map[s if err := apiclient.TryRunCommand(func() error { var tryrunerr error // Try to get the current self-hosted component - currentDS, tryrunerr = client.AppsV1beta2().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{}) + currentDS, tryrunerr = client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{}) return tryrunerr // note that tryrunerr is most likely nil here (in successful cases) }, selfHostingFailureThreshold); err != nil { return nil, err diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index 319ad5c1b96..03f60be77cd 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -19,7 +19,7 @@ package apiclient import ( "fmt" - apps "k8s.io/api/apps/v1beta2" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -72,12 +72,12 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco // CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error { - if _, err := client.AppsV1beta2().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil { if !apierrors.IsAlreadyExists(err) { return fmt.Errorf("unable to create deployment: %v", err) } - if _, err := client.AppsV1beta2().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil { return fmt.Errorf("unable to update deployment: %v", err) } } @@ -86,12 +86,12 @@ func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deploymen // CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error { - if _, err := client.AppsV1beta2().DaemonSets(ds.ObjectMeta.Namespace).Create(ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(ds); err != nil { if !apierrors.IsAlreadyExists(err) { return fmt.Errorf("unable to create daemonset: %v", err) } - if _, err := client.AppsV1beta2().DaemonSets(ds.ObjectMeta.Namespace).Update(ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(ds); err != nil { return fmt.Errorf("unable to update daemonset: %v", err) } } @@ -104,7 +104,7 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin deleteOptions := &metav1.DeleteOptions{ PropagationPolicy: &foregroundDelete, } - return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions) + return client.AppsV1().DaemonSets(namespace).Delete(name, deleteOptions) } // DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted @@ -113,7 +113,7 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri deleteOptions := &metav1.DeleteOptions{ PropagationPolicy: &foregroundDelete, } - return client.AppsV1beta2().Deployments(namespace).Delete(name, deleteOptions) + return client.AppsV1().Deployments(namespace).Delete(name, deleteOptions) } // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. From 88f609fe4d31f5c416c4461ef21013391f41c6f1 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Fri, 15 Dec 2017 16:34:43 +0800 Subject: [PATCH 403/794] Auto generate BUILD files. --- cmd/kubeadm/app/phases/addons/dns/BUILD | 2 +- cmd/kubeadm/app/phases/addons/proxy/BUILD | 2 +- cmd/kubeadm/app/phases/selfhosting/BUILD | 4 ++-- cmd/kubeadm/app/phases/upgrade/BUILD | 2 +- cmd/kubeadm/app/util/apiclient/BUILD | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/kubeadm/app/phases/addons/dns/BUILD b/cmd/kubeadm/app/phases/addons/dns/BUILD index bb0f1d4f5df..f1279872ea1 100644 --- a/cmd/kubeadm/app/phases/addons/dns/BUILD +++ b/cmd/kubeadm/app/phases/addons/dns/BUILD @@ -42,7 +42,7 @@ go_library( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/util/version:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/cmd/kubeadm/app/phases/addons/proxy/BUILD b/cmd/kubeadm/app/phases/addons/proxy/BUILD index 6a59d477207..457cb360bff 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/BUILD +++ b/cmd/kubeadm/app/phases/addons/proxy/BUILD @@ -42,7 +42,7 @@ go_library( "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/selfhosting/BUILD b/cmd/kubeadm/app/phases/selfhosting/BUILD index cdeaaf6b5d6..3374322b9a1 100644 --- a/cmd/kubeadm/app/phases/selfhosting/BUILD +++ b/cmd/kubeadm/app/phases/selfhosting/BUILD @@ -19,7 +19,7 @@ go_test( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//pkg/volume/util:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], ) @@ -39,7 +39,7 @@ go_library( "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//pkg/volume/util:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 644794563b8..e8bd736c103 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -40,7 +40,7 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/util/apiclient/BUILD b/cmd/kubeadm/app/util/apiclient/BUILD index 1b79bbe519b..2ad41a61e82 100644 --- a/cmd/kubeadm/app/util/apiclient/BUILD +++ b/cmd/kubeadm/app/util/apiclient/BUILD @@ -20,7 +20,7 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", From 8c8cdfe7b74ffb6a0a73c4129426168c5ef31aa3 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 18 Dec 2017 11:26:41 +0800 Subject: [PATCH 404/794] Update CoreDNS version and Corefile. --- cluster/addons/dns/coredns.yaml.base | 8 +++++--- cluster/addons/dns/coredns.yaml.in | 8 +++++--- cluster/addons/dns/coredns.yaml.sed | 8 +++++--- cmd/kubeadm/app/phases/addons/dns/manifests.go | 2 +- cmd/kubeadm/app/phases/addons/dns/versions.go | 4 ++-- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/cluster/addons/dns/coredns.yaml.base b/cluster/addons/dns/coredns.yaml.base index 4976ac09a6f..533a4f02af1 100644 --- a/cluster/addons/dns/coredns.yaml.base +++ b/cluster/addons/dns/coredns.yaml.base @@ -57,9 +57,11 @@ data: Corefile: | .:53 { errors - log stdout + log health - kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ + kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ { + pods insecure + } prometheus proxy . /etc/resolv.conf cache 30 @@ -93,7 +95,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:0.9.10 + image: coredns/coredns:1.0.1 imagePullPolicy: IfNotPresent resources: limits: diff --git a/cluster/addons/dns/coredns.yaml.in b/cluster/addons/dns/coredns.yaml.in index d2eb1b35796..e56084a3cc9 100644 --- a/cluster/addons/dns/coredns.yaml.in +++ b/cluster/addons/dns/coredns.yaml.in @@ -57,9 +57,11 @@ data: Corefile: | .:53 { errors - log stdout + log health - kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} + kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} { + pods insecure + } prometheus proxy . /etc/resolv.conf cache 30 @@ -93,7 +95,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:0.9.10 + image: coredns/coredns:1.0.1 imagePullPolicy: IfNotPresent resources: limits: diff --git a/cluster/addons/dns/coredns.yaml.sed b/cluster/addons/dns/coredns.yaml.sed index 329ad6ca23f..4ec582f2edd 100644 --- a/cluster/addons/dns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns.yaml.sed @@ -57,9 +57,11 @@ data: Corefile: | .:53 { errors - log stdout + log health - kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE + kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE { + pods insecure + } prometheus proxy . /etc/resolv.conf cache 30 @@ -93,7 +95,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:0.9.10 + image: coredns/coredns:1.0.1 imagePullPolicy: IfNotPresent resources: limits: diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index cbf434706da..5dd65612c27 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -293,7 +293,7 @@ data: Corefile: | .:53 { errors - log stdout + log health kubernetes {{ .DNSDomain }} {{ .ServiceCIDR }} { pods insecure diff --git a/cmd/kubeadm/app/phases/addons/dns/versions.go b/cmd/kubeadm/app/phases/addons/dns/versions.go index b68a0b7eee9..96267a9f7f8 100644 --- a/cmd/kubeadm/app/phases/addons/dns/versions.go +++ b/cmd/kubeadm/app/phases/addons/dns/versions.go @@ -27,14 +27,14 @@ const ( kubeDNSProbeSRV = "SRV" kubeDNSProbeA = "A" - coreDNSVersion = "1.0.0" + coreDNSVersion = "1.0.1" ) // GetDNSVersion returns the right kube-dns version for a specific k8s version func GetDNSVersion(kubeVersion *version.Version, dns string) string { // v1.8.0+ uses kube-dns 1.14.5 // v1.9.0+ uses kube-dns 1.14.7 - // v1.9.0+ uses CoreDNS 1.0.0 + // v1.9.0+ uses CoreDNS 1.0.1 // In the future when the version is bumped at HEAD; add conditional logic to return the right versions // Also, the version might be bumped for different k8s releases on the same branch From 5faf31054412520ed199a85405c26fbcac404b5e Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 19 Dec 2017 14:06:19 +0800 Subject: [PATCH 405/794] fix typos --- staging/src/k8s.io/apiserver/pkg/storage/cacher.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go index 0f5b4e2ff28..36334a3662b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher.go @@ -676,7 +676,7 @@ func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported b glog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) } // It's possible that the watcher is already not in the structure (e.g. in case of - // simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything. + // simultaneous Stop() and terminateAllWatchers(), but it doesn't break anything. c.watchers.deleteWatcher(index, triggerValue, triggerSupported) } } @@ -747,7 +747,7 @@ func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interfac return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) } -// cacherWatch implements watch.Interface to return a single error +// errWatcher implements watch.Interface to return a single error type errWatcher struct { result chan watch.Event } @@ -787,7 +787,7 @@ func (c *errWatcher) Stop() { // no-op } -// cacherWatch implements watch.Interface +// cachWatcher implements watch.Interface type cacheWatcher struct { sync.Mutex input chan *watchCacheEvent From f4e8385a3bc8c8031b1802e15be9272531883df1 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 19 Dec 2017 09:13:28 +0200 Subject: [PATCH 406/794] Minor lint fix --- .../juju/layers/kubernetes-master/reactive/kubernetes_master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index e5fef80a644..0dad475942e 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -39,7 +39,7 @@ from charms.reactive import hook from charms.reactive import remove_state from charms.reactive import set_state from charms.reactive import is_state -from charms.reactive import when, when_any, when_not, when_all +from charms.reactive import when, when_any, when_not from charms.reactive.helpers import data_changed, any_file_changed from charms.kubernetes.common import get_version from charms.kubernetes.common import retry From a0a69a35830e60ef9f87ddf3f13391c4c3e39077 Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Dec 2017 15:37:21 +0800 Subject: [PATCH 407/794] Add more validate conditions when run kubectl get with --raw --- pkg/kubectl/cmd/resource/get.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4013cb42874..4e830342af4 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -25,6 +25,8 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "net/url" + kapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -213,6 +215,12 @@ func (options *GetOptions) Validate(cmd *cobra.Command) error { if len(options.Raw) > 0 && (options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export) { return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") } + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(options.Raw); err != nil { + return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + } if cmdutil.GetFlagBool(cmd, "show-labels") { outputOption := cmd.Flags().Lookup("output").Value.String() if outputOption != "" && outputOption != "wide" { From 4665303c925e3a9de2b0b98a8050263c798883e4 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Tue, 21 Nov 2017 18:25:39 +0100 Subject: [PATCH 408/794] Add a version string to pause.c The version string is based on the TAG and revision. Also: * Bump the TAG to 3.1 * Update the arm compiler binary used in kube-cross --- build/pause/Makefile | 7 ++++--- build/pause/pause.c | 21 +++++++++++++++++++-- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/build/pause/Makefile b/build/pause/Makefile index 4be3f8ea2e9..151396d4c80 100644 --- a/build/pause/Makefile +++ b/build/pause/Makefile @@ -18,14 +18,15 @@ REGISTRY ?= k8s.gcr.io IMAGE = $(REGISTRY)/pause-$(ARCH) LEGACY_AMD64_IMAGE = $(REGISTRY)/pause -TAG = 3.0 +TAG = 3.1 +REV = $(shell git describe --contains --always --match='v*') # Architectures supported: amd64, arm, arm64, ppc64le and s390x ARCH ?= amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x -CFLAGS = -Os -Wall -Werror -static +CFLAGS = -Os -Wall -Werror -static -DVERSION=v$(TAG)-$(REV) KUBE_CROSS_IMAGE ?= k8s.gcr.io/kube-cross KUBE_CROSS_VERSION ?= $(shell cat ../build-image/cross/VERSION) @@ -37,7 +38,7 @@ ifeq ($(ARCH),amd64) endif ifeq ($(ARCH),arm) - TRIPLE ?= arm-linux-gnueabi + TRIPLE ?= arm-linux-gnueabihf endif ifeq ($(ARCH),arm64) diff --git a/build/pause/pause.c b/build/pause/pause.c index f2e21b41d55..95966f4384d 100644 --- a/build/pause/pause.c +++ b/build/pause/pause.c @@ -17,20 +17,37 @@ limitations under the License. #include #include #include +#include #include #include #include +#define STRINGIFY(x) #x +#define VERSION_STRING(x) STRINGIFY(x) + +#ifndef VERSION +#define VERSION HEAD +#endif + static void sigdown(int signo) { psignal(signo, "Shutting down, got signal"); exit(0); } static void sigreap(int signo) { - while (waitpid(-1, NULL, WNOHANG) > 0); + while (waitpid(-1, NULL, WNOHANG) > 0) + ; } -int main() { +int main(int argc, char **argv) { + int i; + for (i = 1; i < argc; ++i) { + if (!strcasecmp(argv[i], "-v")) { + printf("pause.c %s\n", VERSION_STRING(VERSION)); + return 0; + } + } + if (getpid() != 1) /* Not an error because pause sees use outside of infra containers. */ fprintf(stderr, "Warning: pause should be the first process\n"); From 249ddd762c71196cc15ac894847e764fb5640f13 Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Tue, 19 Dec 2017 09:47:25 +0100 Subject: [PATCH 409/794] Revert "Add --retry-connrefused to all curl invocations." --- cluster/gce/container-linux/configure-helper.sh | 6 +++--- cluster/gce/container-linux/configure.sh | 4 ++-- cluster/gce/container-linux/master.yaml | 2 +- cluster/gce/container-linux/node.yaml | 2 +- cluster/gce/gci/configure-helper.sh | 2 +- cluster/gce/gci/configure.sh | 6 +++--- cluster/gce/gci/master.yaml | 2 +- cluster/gce/gci/node.yaml | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index 068e742dcb7..d0bcdef8218 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -975,7 +975,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" @@ -1469,7 +1469,7 @@ function setup-rkt { mkdir -p /etc/rkt "${KUBE_HOME}/download/" local rkt_tar="${KUBE_HOME}/download/rkt.tar.gz" local rkt_tmpdir=$(mktemp -d "${KUBE_HOME}/rkt_download.XXXXX") - curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 --fail --silent --show-error \ --location --create-dirs --output "${rkt_tar}" \ https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz tar --strip-components=1 -xf "${rkt_tar}" -C "${rkt_tmpdir}" --overwrite @@ -1508,7 +1508,7 @@ function install-docker2aci { local tar_path="${KUBE_HOME}/download/docker2aci.tar.gz" local tmp_path="${KUBE_HOME}/docker2aci" mkdir -p "${KUBE_HOME}/download/" "${tmp_path}" - curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 --fail --silent --show-error \ --location --create-dirs --output "${tar_path}" \ https://github.com/appc/docker2aci/releases/download/v0.14.0/docker2aci-v0.14.0.tar.gz tar --strip-components=1 -xf "${tar_path}" -C "${tmp_path}" --overwrite diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh index 6651cc8ff51..7ee40b684b4 100755 --- a/cluster/gce/container-linux/configure.sh +++ b/cluster/gce/container-linux/configure.sh @@ -21,7 +21,7 @@ set -o pipefail function download-kube-env { # Fetch kube-env from GCE metadata server. local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_env}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env @@ -55,7 +55,7 @@ function download-or-bust { for url in "${urls[@]}"; do local file="${url##*/}" rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 --retry-connrefused "${url}"; then + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" diff --git a/cluster/gce/container-linux/master.yaml b/cluster/gce/container-linux/master.yaml index be4c3c31ccc..4dec695c9d7 100644 --- a/cluster/gce/container-linux/master.yaml +++ b/cluster/gce/container-linux/master.yaml @@ -17,7 +17,7 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/container-linux/node.yaml b/cluster/gce/container-linux/node.yaml index 24aaa7b43c1..b203c4fded3 100644 --- a/cluster/gce/container-linux/node.yaml +++ b/cluster/gce/container-linux/node.yaml @@ -17,7 +17,7 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index cd8733ae06e..f82f2a23dca 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1655,7 +1655,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --retry-connrefused --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 6fa08398244..ac6a28efd9e 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -50,7 +50,7 @@ function download-kube-env { # Fetch kube-env from GCE metadata server. (umask 700; local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_env}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env @@ -68,7 +68,7 @@ function download-kube-master-certs { # Fetch kube-env from GCE metadata server. (umask 700; local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml" - curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_master_certs}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs @@ -106,7 +106,7 @@ function download-or-bust { for url in "${urls[@]}"; do local file="${url##*/}" rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 --retry-connrefused "${url}"; then + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" diff --git a/cluster/gce/gci/master.yaml b/cluster/gce/gci/master.yaml index 68fdc5a901a..7854ab4fa93 100644 --- a/cluster/gce/gci/master.yaml +++ b/cluster/gce/gci/master.yaml @@ -15,7 +15,7 @@ write_files: ExecStartPre=/bin/mkdir -p /home/kubernetes/bin ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh ExecStart=/home/kubernetes/bin/configure.sh diff --git a/cluster/gce/gci/node.yaml b/cluster/gce/gci/node.yaml index c716af56c49..52971e2a076 100644 --- a/cluster/gce/gci/node.yaml +++ b/cluster/gce/gci/node.yaml @@ -15,7 +15,7 @@ write_files: ExecStartPre=/bin/mkdir -p /home/kubernetes/bin ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --retry-connrefused --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh ExecStart=/home/kubernetes/bin/configure.sh From 484460db9a2e9c506ef0c6940390d15394144430 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 5 Dec 2017 11:52:25 +0100 Subject: [PATCH 410/794] Build and push 3.1.11 etcd image --- cluster/images/etcd/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 7f2978a1829..2681d3a0794 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [TAGS=2.2.1 2.3.7 3.0.17 3.1.10] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [TAGS=2.2.1 2.3.7 3.0.17 3.1.11] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any tag from here. @@ -26,8 +26,8 @@ # Except from etcd-$(tag) and etcdctl-$(tag) binaries, we also # need etcd and etcdctl binaries for backward compatibility reasons. # That binary will be set to the last tag from $(TAGS). -TAGS?=2.2.1 2.3.7 3.0.17 3.1.10 -REGISTRY_TAG?=3.1.10 +TAGS?=2.2.1 2.3.7 3.0.17 3.1.11 +REGISTRY_TAG?=3.1.11 ARCH?=amd64 REGISTRY?=k8s.gcr.io GOLANG_VERSION?=1.7.6 From 8d99c55bba8486c0ece1c4d70a5d8353e3fe983b Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Dec 2017 09:31:43 +0000 Subject: [PATCH 411/794] add Dong Liu as approver and add OWNERS in credentialprovider --- pkg/cloudprovider/providers/azure/OWNERS | 2 ++ pkg/credentialprovider/azure/OWNERS | 12 ++++++++++++ pkg/volume/azure_dd/OWNERS | 2 ++ pkg/volume/azure_file/OWNERS | 2 ++ 4 files changed, 18 insertions(+) create mode 100644 pkg/credentialprovider/azure/OWNERS diff --git a/pkg/cloudprovider/providers/azure/OWNERS b/pkg/cloudprovider/providers/azure/OWNERS index 51c658d2287..535c3ff2dce 100644 --- a/pkg/cloudprovider/providers/azure/OWNERS +++ b/pkg/cloudprovider/providers/azure/OWNERS @@ -4,6 +4,7 @@ approvers: - colemickens - feiskyer - jdumars +- karataliu - khenidak reviewers: - andyzhangx @@ -11,4 +12,5 @@ reviewers: - colemickens - feiskyer - jdumars +- karataliu - khenidak diff --git a/pkg/credentialprovider/azure/OWNERS b/pkg/credentialprovider/azure/OWNERS new file mode 100644 index 00000000000..6b1eb11dd34 --- /dev/null +++ b/pkg/credentialprovider/azure/OWNERS @@ -0,0 +1,12 @@ +approvers: +- andyzhangx +- brendandburns +- feiskyer +- karataliu +- khenidak +reviewers: +- andyzhangx +- brendandburns +- feiskyer +- karataliu +- khenidak diff --git a/pkg/volume/azure_dd/OWNERS b/pkg/volume/azure_dd/OWNERS index a6d469c0755..635cdd060b4 100755 --- a/pkg/volume/azure_dd/OWNERS +++ b/pkg/volume/azure_dd/OWNERS @@ -2,6 +2,7 @@ approvers: - andyzhangx - brendandburns - feiskyer +- karataliu - khenidak - rootfs reviewers: @@ -11,6 +12,7 @@ reviewers: - jingxu97 - jsafrane - msau42 +- karataliu - khenidak - rootfs - saad-ali diff --git a/pkg/volume/azure_file/OWNERS b/pkg/volume/azure_file/OWNERS index 06e4934dfe2..c71485c993a 100644 --- a/pkg/volume/azure_file/OWNERS +++ b/pkg/volume/azure_file/OWNERS @@ -2,6 +2,7 @@ approvers: - andyzhangx - brendandburns - feiskyer +- karataliu - khenidak - rootfs reviewers: @@ -10,6 +11,7 @@ reviewers: - feiskyer - jsafrane - jingxu97 +- karataliu - khenidak - msau42 - rootfs From ce14bdfc7cc57264e8fadd234338582788c3bb47 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 19 Dec 2017 10:47:27 +0100 Subject: [PATCH 412/794] apimachinery: remove dead code from roundtrip tester --- .../apimachinery/pkg/api/testing/roundtrip/roundtrip.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go b/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go index 479a6a50a3e..0c032b81555 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip/roundtrip.go @@ -277,12 +277,6 @@ func roundTrip(t *testing.T, scheme *runtime.Scheme, codec runtime.Codec, object return } - // catch deepcopy errors early - if !apiequality.Semantic.DeepEqual(original, object) { - t.Errorf("%v: DeepCopy did not lead to equal object, diff: %v", name, diff.ObjectReflectDiff(original, object)) - return - } - // encode (serialize) the deep copy using the provided codec data, err := runtime.Encode(codec, object) if err != nil { From 8a7f8bc0462afb34a53542ce1deb4256e644bb0b Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 19 Dec 2017 17:51:30 +0800 Subject: [PATCH 413/794] Move output and url checks under raw flag condition --- pkg/kubectl/cmd/resource/get.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4e830342af4..6e928a65445 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -212,14 +212,16 @@ func (options *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args // Validate checks the set of flags provided by the user. func (options *GetOptions) Validate(cmd *cobra.Command) error { - if len(options.Raw) > 0 && (options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export) { - return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") - } - if len(cmdutil.GetFlagString(cmd, "output")) > 0 { - return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") - } - if _, err := url.ParseRequestURI(options.Raw); err != nil { - return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + if len(options.Raw) > 0 { + if options.Watch || options.WatchOnly || len(options.LabelSelector) > 0 || options.Export { + return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") + } + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return cmdutil.UsageErrorf(cmd, "--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(options.Raw); err != nil { + return cmdutil.UsageErrorf(cmd, "--raw must be a valid URL path: %v", err) + } } if cmdutil.GetFlagBool(cmd, "show-labels") { outputOption := cmd.Flags().Lookup("output").Value.String() From be41d1e2ce71d91c33ae2f4da28142e1ff0a2a83 Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Tue, 19 Dec 2017 10:41:48 +0100 Subject: [PATCH 414/794] Do not require the vim package to be installed The minimal Ubuntu image used on GKE nodes provides the vim editor as part of system packages, as "vim.tiny". People logging on the nodes have a vim environment available despite the "vim" package not being installed. Signed-off-by: Chris Glass --- test/e2e_node/system/specs/gke.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/e2e_node/system/specs/gke.yaml b/test/e2e_node/system/specs/gke.yaml index 0e795d4edfa..a4e4ce97d0e 100644 --- a/test/e2e_node/system/specs/gke.yaml +++ b/test/e2e_node/system/specs/gke.yaml @@ -235,8 +235,6 @@ packageSpecs: versionRange: '>=1.28' - name: util-linux versionRange: '>=2.27.1' -- name: vim - versionRange: '>=7.4.712' - name: wget versionRange: '>=1.18' - name: gce-compute-image-packages From 3e3385821a1c38092a3c2ba2cc993bf117757951 Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Tue, 19 Dec 2017 10:46:46 +0100 Subject: [PATCH 415/794] Do not require the linux headers to be installed. The linux headers take significant disk space and are not necessary to run kubernetes on a GKE node. User logging on to a node can trivially install the kernel headers should they need to by running "apt-get install linux-headers-$(uname -r)". Signed-off-by: Chris Glass --- test/e2e_node/system/specs/gke.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e_node/system/specs/gke.yaml b/test/e2e_node/system/specs/gke.yaml index a4e4ce97d0e..0b77359561f 100644 --- a/test/e2e_node/system/specs/gke.yaml +++ b/test/e2e_node/system/specs/gke.yaml @@ -220,7 +220,6 @@ packageSpecs: versionRange: '>=4.2.0' - name: less versionRange: '>=481' -- name: linux-headers-${KERNEL_RELEASE} - name: netcat-openbsd versionRange: '>=1.10' - name: python From 89e70760d77d2117a44fa2d1c8fd09a2615efc34 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Tue, 19 Dec 2017 16:05:32 +0800 Subject: [PATCH 416/794] log error when error occur in CleanupLeftovers() --- pkg/proxy/ipvs/proxier.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 104a5e9a353..9a3725f6f3f 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -806,6 +806,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset encounteredError = false err := ipvs.Flush() if err != nil { + glog.Errorf("Error flushing IPVS rules: %v", err) encounteredError = true } } @@ -813,6 +814,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset nl := NewNetLinkHandle() err := nl.DeleteDummyDevice(DefaultDummyDevice) if err != nil { + glog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) encounteredError = true } // Clear iptables created by ipvs Proxier. From 1c41db6178d2f4edc38141bf87e8cbf27d58efba Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Tue, 19 Dec 2017 13:16:12 +0100 Subject: [PATCH 417/794] printFlexPersistentVolumeSource: fix format. --- pkg/printers/internalversion/describe.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index e231df79692..3a6429c1097 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -1081,7 +1081,7 @@ func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w Pre " Driver:\t%v\n"+ " FSType:\t%v\n"+ " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", + " ReadOnly:\t%v\n"+ " Options:\t%v\n", flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } From f2da0781689f700cc2d79d28180b9f23015c006f Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 19 Dec 2017 14:48:05 +0200 Subject: [PATCH 418/794] Pointing juju charms to 1.9 --- cluster/juju/layers/kubernetes-e2e/config.yaml | 2 +- cluster/juju/layers/kubernetes-master/config.yaml | 2 +- cluster/juju/layers/kubernetes-worker/config.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/juju/layers/kubernetes-e2e/config.yaml b/cluster/juju/layers/kubernetes-e2e/config.yaml index bf1ba66a77c..d765c028713 100644 --- a/cluster/juju/layers/kubernetes-e2e/config.yaml +++ b/cluster/juju/layers/kubernetes-e2e/config.yaml @@ -1,6 +1,6 @@ options: channel: type: string - default: "1.8/stable" + default: "1.9/stable" description: | Snap channel to install Kubernetes snaps from diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index 4001cd979ac..208032002cc 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -33,7 +33,7 @@ options: detected on a worker node. channel: type: string - default: "1.8/stable" + default: "1.9/stable" description: | Snap channel to install Kubernetes master services from client_password: diff --git a/cluster/juju/layers/kubernetes-worker/config.yaml b/cluster/juju/layers/kubernetes-worker/config.yaml index b7ddc9bba72..b16b1814d77 100644 --- a/cluster/juju/layers/kubernetes-worker/config.yaml +++ b/cluster/juju/layers/kubernetes-worker/config.yaml @@ -22,7 +22,7 @@ options: switch to privileged mode if gpu hardware is detected. channel: type: string - default: "1.8/stable" + default: "1.9/stable" description: | Snap channel to install Kubernetes worker services from require-manual-upgrade: From 95dccb4b821b7a0bf3461d73f7e118cd1cf75b1e Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Mon, 18 Dec 2017 13:34:33 +0100 Subject: [PATCH 419/794] Add --retry-connrefused to all curl invocations. By default 'Connection refused' error is not a transient error and is not retried. --- cluster/gce/container-linux/configure-helper.sh | 12 +++++++++--- cluster/gce/container-linux/configure.sh | 10 ++++++++-- cluster/gce/container-linux/master.yaml | 3 ++- cluster/gce/container-linux/node.yaml | 3 ++- cluster/gce/gci/configure-helper.sh | 8 +++++++- cluster/gce/gci/configure.sh | 12 +++++++++--- cluster/gce/gci/master.yaml | 3 ++- cluster/gce/gci/node.yaml | 3 ++- 8 files changed, 41 insertions(+), 13 deletions(-) diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index d0bcdef8218..abb1d4a749f 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -25,6 +25,12 @@ set -o errexit set -o nounset set -o pipefail +# Use --retry-connrefused opt only if it's supported by curl. +CURL_RETRY_CONNREFUSED="" +if curl --help | grep -q -- '--retry-connrefused'; then + CURL_RETRY_CONNREFUSED='--retry-connrefused' +fi + function create-dirs { echo "Creating required directories" mkdir -p /var/lib/kubelet @@ -975,7 +981,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" @@ -1469,7 +1475,7 @@ function setup-rkt { mkdir -p /etc/rkt "${KUBE_HOME}/download/" local rkt_tar="${KUBE_HOME}/download/rkt.tar.gz" local rkt_tmpdir=$(mktemp -d "${KUBE_HOME}/rkt_download.XXXXX") - curl --retry 5 --retry-delay 3 --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent --show-error \ --location --create-dirs --output "${rkt_tar}" \ https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz tar --strip-components=1 -xf "${rkt_tar}" -C "${rkt_tmpdir}" --overwrite @@ -1508,7 +1514,7 @@ function install-docker2aci { local tar_path="${KUBE_HOME}/download/docker2aci.tar.gz" local tmp_path="${KUBE_HOME}/docker2aci" mkdir -p "${KUBE_HOME}/download/" "${tmp_path}" - curl --retry 5 --retry-delay 3 --fail --silent --show-error \ + curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent --show-error \ --location --create-dirs --output "${tar_path}" \ https://github.com/appc/docker2aci/releases/download/v0.14.0/docker2aci-v0.14.0.tar.gz tar --strip-components=1 -xf "${tar_path}" -C "${tmp_path}" --overwrite diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh index 7ee40b684b4..962a5c03312 100755 --- a/cluster/gce/container-linux/configure.sh +++ b/cluster/gce/container-linux/configure.sh @@ -18,10 +18,16 @@ set -o errexit set -o nounset set -o pipefail +# Use --retry-connrefused opt only if it's supported by curl. +CURL_RETRY_CONNREFUSED="" +if curl --help | grep -q -- '--retry-connrefused'; then + CURL_RETRY_CONNREFUSED='--retry-connrefused' +fi + function download-kube-env { # Fetch kube-env from GCE metadata server. local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 --silent --show-error \ + curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_env}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env @@ -55,7 +61,7 @@ function download-or-bust { for url in "${urls[@]}"; do local file="${url##*/}" rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" diff --git a/cluster/gce/container-linux/master.yaml b/cluster/gce/container-linux/master.yaml index 4dec695c9d7..444d3042739 100644 --- a/cluster/gce/container-linux/master.yaml +++ b/cluster/gce/container-linux/master.yaml @@ -17,7 +17,8 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + # Use --retry-connrefused opt only if it's supported by curl. + ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/container-linux/node.yaml b/cluster/gce/container-linux/node.yaml index b203c4fded3..9886679cd78 100644 --- a/cluster/gce/container-linux/node.yaml +++ b/cluster/gce/container-linux/node.yaml @@ -17,7 +17,8 @@ coreos: Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + # Use --retry-connrefused opt only if it's supported by curl. + ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh ExecStart=/opt/kubernetes/bin/configure.sh diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index f82f2a23dca..0bf30051c9e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -28,6 +28,12 @@ set -o pipefail readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds" readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds" +# Use --retry-connrefused opt only if it's supported by curl. +CURL_RETRY_CONNREFUSED="" +if curl --help | grep -q -- '--retry-connrefused'; then + CURL_RETRY_CONNREFUSED='--retry-connrefused' +fi + function setup-os-params { # Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to # /sbin/crash_reporter which is more restrictive in saving crash dumps. So for @@ -1655,7 +1661,7 @@ function start-kube-apiserver { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") if [[ -n "${PROXY_SSH_USER:-}" ]]; then params+=" --advertise-address=${vm_external_ip}" params+=" --ssh-user=${PROXY_SSH_USER}" diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index ac6a28efd9e..f8ac61b6136 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -31,6 +31,12 @@ DEFAULT_NPD_SHA1="a57a3fe64cab8a18ec654f5cef0aec59dae62568" DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571" ### +# Use --retry-connrefused opt only if it's supported by curl. +CURL_RETRY_CONNREFUSED="" +if curl --help | grep -q -- '--retry-connrefused'; then + CURL_RETRY_CONNREFUSED='--retry-connrefused' +fi + function set-broken-motd { cat > /etc/motd < Date: Tue, 19 Dec 2017 07:48:16 -0600 Subject: [PATCH 420/794] Use old dns-ip mechanism with older cdk-addons. --- .../kubernetes-master/reactive/kubernetes_master.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index d7ca9beed86..97f2120ffca 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -572,6 +572,7 @@ def configure_cdk_addons(): dnsEnabled = str(hookenv.config('enable-kube-dns')).lower() args = [ 'arch=' + arch(), + 'dns-ip=' + get_deprecated_dns_ip(), 'dns-domain=' + hookenv.config('dns_domain'), 'enable-dashboard=' + dbEnabled, 'enable-kube-dns=' + dnsEnabled @@ -964,6 +965,14 @@ def get_dns_ip(): return svc['spec']['clusterIP'] +def get_deprecated_dns_ip(): + '''We previously hardcoded the dns ip. This function returns the old + hardcoded value for use with older versions of cdk_addons.''' + interface = ipaddress.IPv4Interface(service_cidr()) + ip = interface.network.network_address + 10 + return ip.exploded + + def get_kubernetes_service_ip(): '''Get the IP address for the kubernetes service based on the cidr.''' interface = ipaddress.IPv4Interface(service_cidr()) From 02f933b84921b9e09dc602be52724bdf47016111 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Tue, 19 Dec 2017 19:26:16 +0530 Subject: [PATCH 421/794] apimachinery: fix typos in README --- staging/src/k8s.io/apimachinery/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/README.md b/staging/src/k8s.io/apimachinery/README.md index 98899fb58dd..258250ce2cb 100644 --- a/staging/src/k8s.io/apimachinery/README.md +++ b/staging/src/k8s.io/apimachinery/README.md @@ -6,7 +6,7 @@ Scheme, typing, encoding, decoding, and conversion packages for Kubernetes and K ## Purpose This library is a shared dependency for servers and clients to work with Kubernetes API infrastructure without direct -type dependencies. It's first comsumers are `k8s.io/kubernetes`, `k8s.io/client-go`, and `k8s.io/apiserver`. +type dependencies. Its first consumers are `k8s.io/kubernetes`, `k8s.io/client-go`, and `k8s.io/apiserver`. ## Compatibility @@ -25,5 +25,5 @@ Code changes are made in that location, merged into `k8s.io/kubernetes` and late ## Things you should *NOT* do 1. Add API types to this repo. This is for the machinery, not for the types. - 2. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kuberenetes/staging/src/k8s.io/apimachinery`. - 3. Expect compatibility. This repo is direct support of Kubernetes and the API isn't yet stable enough for API guarantees. \ No newline at end of file + 2. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/apimachinery`. + 3. Expect compatibility. This repo is direct support of Kubernetes and the API isn't yet stable enough for API guarantees. From 4e8526dc6ba27a40370eea47b17aca1054ca54f9 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 19 Dec 2017 15:25:06 +0100 Subject: [PATCH 422/794] Revert "Version bump to etcd v3.2.11, grpc v1.7.5" --- Godeps/Godeps.json | 429 +- Godeps/LICENSES | 8366 +---------------- .../Godeps/Godeps.json | 118 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 260 +- .../apiserver/pkg/storage/etcd/testing/BUILD | 1 - .../pkg/storage/etcd/testing/utils.go | 6 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 118 +- .../sample-apiserver/Godeps/Godeps.json | 118 +- test/e2e_node/services/etcd.go | 1 - test/integration/scale/BUILD | 1 - test/integration/scale/scale_test.go | 1 - vendor/BUILD | 10 +- .../{coreos/bbolt => boltdb/bolt}/.gitignore | 1 - .../{coreos/bbolt => boltdb/bolt}/BUILD | 2 +- .../{coreos/bbolt => boltdb/bolt}/LICENSE | 0 vendor/github.com/boltdb/bolt/Makefile | 18 + .../{coreos/bbolt => boltdb/bolt}/README.md | 100 +- .../bbolt => boltdb/bolt}/appveyor.yml | 0 .../{coreos/bbolt => boltdb/bolt}/bolt_386.go | 3 - .../bbolt => boltdb/bolt}/bolt_amd64.go | 3 - .../bolt_mipsx.go => boltdb/bolt/bolt_arm.go} | 7 +- .../bbolt => boltdb/bolt}/bolt_arm64.go | 3 - .../bbolt => boltdb/bolt}/bolt_linux.go | 0 .../bbolt => boltdb/bolt}/bolt_openbsd.go | 0 .../{coreos/bbolt => boltdb/bolt}/bolt_ppc.go | 0 .../bbolt => boltdb/bolt}/bolt_ppc64.go | 3 - .../bbolt => boltdb/bolt}/bolt_ppc64le.go | 3 - .../bbolt => boltdb/bolt}/bolt_s390x.go | 3 - .../bbolt => boltdb/bolt}/bolt_unix.go | 33 +- .../bolt}/bolt_unix_solaris.go | 39 +- .../bbolt => boltdb/bolt}/bolt_windows.go | 33 +- .../bbolt => boltdb/bolt}/boltsync_unix.go | 0 .../{coreos/bbolt => boltdb/bolt}/bucket.go | 49 +- .../{coreos/bbolt => boltdb/bolt}/cursor.go | 0 .../{coreos/bbolt => boltdb/bolt}/db.go | 211 +- .../{coreos/bbolt => boltdb/bolt}/doc.go | 0 .../{coreos/bbolt => boltdb/bolt}/errors.go | 0 .../{coreos/bbolt => boltdb/bolt}/freelist.go | 167 +- .../{coreos/bbolt => boltdb/bolt}/node.go | 2 +- .../{coreos/bbolt => boltdb/bolt}/page.go | 31 +- .../{coreos/bbolt => boltdb/bolt}/tx.go | 85 +- vendor/github.com/cockroachdb/cmux/.gitignore | 24 - .../github.com/cockroachdb/cmux/.travis.yml | 22 - vendor/github.com/cockroachdb/cmux/BUILD | 31 - vendor/github.com/cockroachdb/cmux/LICENSE | 202 - vendor/github.com/cockroachdb/cmux/README.md | 65 - vendor/github.com/cockroachdb/cmux/buffer.go | 35 - vendor/github.com/cockroachdb/cmux/cmux.go | 210 - .../github.com/cockroachdb/cmux/matchers.go | 150 - .../github.com/cockroachdb/cmux/patricia.go | 173 - vendor/github.com/coreos/bbolt/Makefile | 30 - vendor/github.com/coreos/bbolt/bolt_arm.go | 28 - .../github.com/coreos/bbolt/bolt_mips64x.go | 12 - vendor/github.com/coreos/etcd/auth/BUILD | 5 - .../coreos/etcd/auth/authpb/auth.pb.go | 2 +- vendor/github.com/coreos/etcd/auth/jwt.go | 137 - .../coreos/etcd/auth/range_perm_cache.go | 180 +- .../coreos/etcd/auth/simple_token.go | 129 +- vendor/github.com/coreos/etcd/auth/store.go | 258 +- vendor/github.com/coreos/etcd/client/BUILD | 3 +- .../github.com/coreos/etcd/client/client.go | 52 +- .../github.com/coreos/etcd/client/discover.go | 19 - vendor/github.com/coreos/etcd/client/srv.go | 65 + vendor/github.com/coreos/etcd/clientv3/BUILD | 16 +- .../github.com/coreos/etcd/clientv3/README.md | 10 +- .../github.com/coreos/etcd/clientv3/auth.go | 25 +- .../coreos/etcd/clientv3/balancer.go | 239 + .../github.com/coreos/etcd/clientv3/client.go | 224 +- .../coreos/etcd/clientv3/cluster.go | 46 +- .../coreos/etcd/clientv3/compact_op.go | 6 +- .../coreos/etcd/clientv3/compare.go | 27 - .../coreos/etcd/clientv3/concurrency/BUILD | 35 - .../coreos/etcd/clientv3/concurrency/doc.go | 17 - .../etcd/clientv3/concurrency/election.go | 246 - .../coreos/etcd/clientv3/concurrency/key.go | 66 - .../coreos/etcd/clientv3/concurrency/mutex.go | 119 - .../etcd/clientv3/concurrency/session.go | 142 - .../coreos/etcd/clientv3/concurrency/stm.go | 388 - .../github.com/coreos/etcd/clientv3/config.go | 107 +- vendor/github.com/coreos/etcd/clientv3/doc.go | 2 +- .../coreos/etcd/clientv3/health_balancer.go | 627 -- vendor/github.com/coreos/etcd/clientv3/kv.go | 54 +- .../github.com/coreos/etcd/clientv3/lease.go | 255 +- .../github.com/coreos/etcd/clientv3/logger.go | 34 +- .../coreos/etcd/clientv3/maintenance.go | 59 +- .../coreos/etcd/clientv3/namespace/BUILD | 34 - .../coreos/etcd/clientv3/namespace/doc.go | 43 - .../coreos/etcd/clientv3/namespace/kv.go | 189 - .../coreos/etcd/clientv3/namespace/lease.go | 58 - .../coreos/etcd/clientv3/namespace/util.go | 42 - .../coreos/etcd/clientv3/namespace/watch.go | 84 - .../coreos/etcd/clientv3/naming/BUILD | 32 - .../coreos/etcd/clientv3/naming/doc.go | 56 - .../coreos/etcd/clientv3/naming/grpc.go | 132 - vendor/github.com/coreos/etcd/clientv3/op.go | 133 +- .../coreos/etcd/clientv3/ready_wait.go | 30 - .../github.com/coreos/etcd/clientv3/retry.go | 380 +- vendor/github.com/coreos/etcd/clientv3/txn.go | 30 +- .../github.com/coreos/etcd/clientv3/watch.go | 74 +- .../coreos/etcd/compactor/compactor.go | 18 +- vendor/github.com/coreos/etcd/discovery/BUILD | 5 +- .../github.com/coreos/etcd/discovery/srv.go | 104 + vendor/github.com/coreos/etcd/embed/BUILD | 60 - vendor/github.com/coreos/etcd/embed/config.go | 464 - vendor/github.com/coreos/etcd/embed/doc.go | 45 - vendor/github.com/coreos/etcd/embed/etcd.go | 453 - vendor/github.com/coreos/etcd/embed/serve.go | 236 - vendor/github.com/coreos/etcd/embed/util.go | 30 - vendor/github.com/coreos/etcd/error/error.go | 5 +- .../github.com/coreos/etcd/etcdserver/BUILD | 2 +- .../coreos/etcd/etcdserver/api/BUILD | 4 - .../coreos/etcd/etcdserver/api/capability.go | 8 +- .../coreos/etcd/etcdserver/api/etcdhttp/BUILD | 40 - .../etcd/etcdserver/api/etcdhttp/base.go | 186 - .../coreos/etcd/etcdserver/api/v2http/BUILD | 6 +- .../etcd/etcdserver/api/v2http/client.go | 146 +- .../coreos/etcd/etcdserver/api/v2http/http.go | 30 +- .../api/{etcdhttp => v2http}/peer.go | 4 +- .../coreos/etcd/etcdserver/api/v3client/BUILD | 32 - .../etcd/etcdserver/api/v3client/doc.go | 45 - .../etcd/etcdserver/api/v3client/v3client.go | 67 - .../etcd/etcdserver/api/v3election/BUILD | 34 - .../etcd/etcdserver/api/v3election/doc.go | 16 - .../etcdserver/api/v3election/election.go | 123 - .../api/v3election/v3electionpb/BUILD | 39 - .../api/v3election/v3electionpb/gw/BUILD | 33 - .../v3electionpb/gw/v3election.pb.gw.go | 313 - .../v3election/v3electionpb/v3election.pb.go | 2098 ----- .../v3election/v3electionpb/v3election.proto | 119 - .../coreos/etcd/etcdserver/api/v3lock/BUILD | 34 - .../coreos/etcd/etcdserver/api/v3lock/doc.go | 16 - .../coreos/etcd/etcdserver/api/v3lock/lock.go | 56 - .../etcd/etcdserver/api/v3lock/v3lockpb/BUILD | 38 - .../etcdserver/api/v3lock/v3lockpb/gw/BUILD | 33 - .../api/v3lock/v3lockpb/gw/v3lock.pb.gw.go | 167 - .../api/v3lock/v3lockpb/v3lock.pb.go | 978 -- .../api/v3lock/v3lockpb/v3lock.proto | 65 - .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 14 +- .../etcd/etcdserver/api/v3rpc/interceptor.go | 4 +- .../coreos/etcd/etcdserver/api/v3rpc/key.go | 10 +- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 31 +- .../etcd/etcdserver/api/v3rpc/maintenance.go | 3 +- .../etcd/etcdserver/api/v3rpc/member.go | 36 +- .../etcd/etcdserver/api/v3rpc/rpctypes/BUILD | 1 - .../etcdserver/api/v3rpc/rpctypes/error.go | 47 +- .../coreos/etcd/etcdserver/api/v3rpc/util.go | 6 +- .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 47 - .../coreos/etcd/etcdserver/apply.go | 294 +- .../coreos/etcd/etcdserver/apply_auth.go | 13 +- .../coreos/etcd/etcdserver/backend.go | 81 - .../coreos/etcd/etcdserver/cluster_util.go | 10 + .../coreos/etcd/etcdserver/config.go | 7 - .../coreos/etcd/etcdserver/errors.go | 1 - .../coreos/etcd/etcdserver/etcdserverpb/BUILD | 11 +- .../etcdserver/etcdserverpb/etcdserver.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/gw/BUILD | 33 - .../etcdserverpb/raft_internal.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 1517 +-- .../etcdserverpb/{gw => }/rpc.pb.gw.go | 589 +- .../etcd/etcdserver/etcdserverpb/rpc.proto | 32 +- .../coreos/etcd/etcdserver/membership/BUILD | 1 - .../etcd/etcdserver/membership/cluster.go | 2 +- .../coreos/etcd/etcdserver/membership/doc.go | 16 - .../etcd/etcdserver/membership/store.go | 6 +- .../coreos/etcd/etcdserver/metrics.go | 7 - .../coreos/etcd/etcdserver/quota.go | 17 +- .../github.com/coreos/etcd/etcdserver/raft.go | 121 +- .../coreos/etcd/etcdserver/server.go | 234 +- .../coreos/etcd/etcdserver/snapshot_merge.go | 7 +- .../coreos/etcd/etcdserver/stats/leader.go | 15 +- .../coreos/etcd/etcdserver/stats/server.go | 54 +- .../coreos/etcd/etcdserver/storage.go | 3 + .../github.com/coreos/etcd/etcdserver/util.go | 2 +- .../coreos/etcd/etcdserver/v3_server.go | 432 +- .../github.com/coreos/etcd/integration/BUILD | 9 +- .../coreos/etcd/integration/bridge.go | 67 +- .../coreos/etcd/integration/cluster.go | 166 +- .../coreos/etcd/integration/cluster_direct.go | 4 - .../coreos/etcd/integration/cluster_proxy.go | 42 +- .../coreos/etcd/lease/leasehttp/BUILD | 1 + .../coreos/etcd/lease/leasehttp/http.go | 54 +- .../coreos/etcd/lease/leasepb/lease.pb.go | 2 +- vendor/github.com/coreos/etcd/lease/lessor.go | 99 +- vendor/github.com/coreos/etcd/mvcc/BUILD | 4 - .../github.com/coreos/etcd/mvcc/backend/BUILD | 11 +- .../coreos/etcd/mvcc/backend/backend.go | 141 +- .../coreos/etcd/mvcc/backend/batch_tx.go | 168 +- ...onfig_default.go => boltoption_default.go} | 6 +- .../{config_linux.go => boltoption_linux.go} | 7 +- .../etcd/mvcc/backend/config_windows.go | 26 - .../coreos/etcd/mvcc/backend/metrics.go | 10 - .../coreos/etcd/mvcc/backend/read_tx.go | 92 - .../coreos/etcd/mvcc/backend/tx_buffer.go | 181 - vendor/github.com/coreos/etcd/mvcc/index.go | 21 +- .../github.com/coreos/etcd/mvcc/key_index.go | 1 + vendor/github.com/coreos/etcd/mvcc/kv.go | 82 +- vendor/github.com/coreos/etcd/mvcc/kv_view.go | 53 - vendor/github.com/coreos/etcd/mvcc/kvstore.go | 557 +- .../coreos/etcd/mvcc/kvstore_txn.go | 253 - vendor/github.com/coreos/etcd/mvcc/metrics.go | 15 +- .../coreos/etcd/mvcc/metrics_txn.go | 67 - .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 2 +- .../coreos/etcd/mvcc/watchable_store.go | 165 +- .../coreos/etcd/mvcc/watchable_store_txn.go | 53 - .../coreos/etcd/mvcc/watcher_group.go | 2 +- .../coreos/etcd/pkg/adt/interval_tree.go | 81 +- vendor/github.com/coreos/etcd/pkg/cors/BUILD | 22 - .../github.com/coreos/etcd/pkg/cors/cors.go | 90 - .../coreos/etcd/pkg/debugutil/doc.go | 16 - .../coreos/etcd/pkg/debugutil/pprof.go | 47 - .../coreos/etcd/pkg/fileutil/fileutil.go | 7 +- .../coreos/etcd/pkg/fileutil/lock_linux.go | 3 +- .../coreos/etcd/pkg/fileutil/preallocate.go | 15 +- .../coreos/etcd/pkg/httputil/httputil.go | 9 + .../github.com/coreos/etcd/pkg/idutil/id.go | 4 +- .../github.com/coreos/etcd/pkg/netutil/BUILD | 1 + .../coreos/etcd/pkg/netutil/netutil.go | 34 +- .../coreos/etcd/pkg/schedule/schedule.go | 2 + vendor/github.com/coreos/etcd/pkg/srv/BUILD | 23 - vendor/github.com/coreos/etcd/pkg/srv/srv.go | 140 - .../github.com/coreos/etcd/pkg/testutil/BUILD | 1 - .../coreos/etcd/pkg/testutil/assert.go | 62 - .../coreos/etcd/pkg/testutil/leak.go | 23 +- .../coreos/etcd/pkg/transport/BUILD | 6 +- .../coreos/etcd/pkg/transport/listener.go | 66 +- .../coreos/etcd/pkg/transport/listener_tls.go | 217 - .../etcd/pkg/transport/timeout_listener.go | 5 +- .../etcd/pkg/transport/unix_listener.go | 4 +- .../github.com/coreos/etcd/pkg/wait/wait.go | 19 +- .../coreos/etcd/proxy/grpcproxy/BUILD | 14 +- .../coreos/etcd/proxy/grpcproxy/adapter/BUILD | 40 - .../adapter/cluster_client_adapter.go | 44 - .../etcd/proxy/grpcproxy/adapter/doc.go | 17 - .../adapter/election_client_adapter.go | 79 - .../grpcproxy/adapter/lease_client_adapter.go | 77 - .../grpcproxy/adapter/lock_client_adapter.go | 36 - .../adapter/maintenance_client_adapter.go | 79 - .../grpcproxy/adapter/watch_client_adapter.go | 66 - .../coreos/etcd/proxy/grpcproxy/cache/BUILD | 2 +- .../etcd/proxy/grpcproxy/cache/store.go | 42 +- .../coreos/etcd/proxy/grpcproxy/cluster.go | 149 +- .../coreos/etcd/proxy/grpcproxy/election.go | 65 - .../coreos/etcd/proxy/grpcproxy/kv.go | 30 +- .../{adapter => }/kv_client_adapter.go | 2 +- .../coreos/etcd/proxy/grpcproxy/leader.go | 114 - .../coreos/etcd/proxy/grpcproxy/lease.go | 360 +- .../coreos/etcd/proxy/grpcproxy/lock.go | 38 - .../coreos/etcd/proxy/grpcproxy/logger.go | 19 - .../etcd/proxy/grpcproxy/maintenance.go | 5 - .../coreos/etcd/proxy/grpcproxy/metrics.go | 7 - .../coreos/etcd/proxy/grpcproxy/register.go | 94 - .../coreos/etcd/proxy/grpcproxy/watch.go | 77 +- .../etcd/proxy/grpcproxy/watch_broadcast.go | 33 +- ...chan_stream.go => watch_client_adapter.go} | 125 +- .../coreos/etcd/proxy/grpcproxy/watcher.go | 11 +- vendor/github.com/coreos/etcd/raft/README.md | 91 +- .../coreos/etcd/raft/log_unstable.go | 20 - vendor/github.com/coreos/etcd/raft/node.go | 16 - vendor/github.com/coreos/etcd/raft/raft.go | 4 - .../coreos/etcd/raft/raftpb/raft.pb.go | 78 +- .../coreos/etcd/rafthttp/pipeline.go | 5 +- .../coreos/etcd/rafthttp/snapshot_sender.go | 6 +- .../github.com/coreos/etcd/rafthttp/stream.go | 21 +- .../github.com/coreos/etcd/rafthttp/util.go | 32 +- vendor/github.com/coreos/etcd/snap/db.go | 21 +- .../coreos/etcd/snap/snappb/snap.pb.go | 2 +- vendor/github.com/coreos/etcd/store/node.go | 1 + vendor/github.com/coreos/etcd/store/store.go | 3 - .../coreos/etcd/store/watcher_hub.go | 2 +- .../github.com/coreos/etcd/version/version.go | 2 +- vendor/github.com/coreos/etcd/wal/encoder.go | 2 +- vendor/github.com/coreos/etcd/wal/repair.go | 2 +- vendor/github.com/coreos/etcd/wal/wal.go | 23 +- .../coreos/etcd/wal/walpb/record.pb.go | 2 +- .../protobuf/protoc-gen-go/descriptor/BUILD | 29 - .../protoc-gen-go/descriptor/Makefile | 37 - .../protoc-gen-go/descriptor/descriptor.pb.go | 2215 ----- .../protoc-gen-go/descriptor/descriptor.proto | 849 -- .../grpc-ecosystem/grpc-gateway/runtime/BUILD | 3 +- .../grpc-gateway/runtime/context.go | 64 +- .../grpc-gateway/runtime/errors.go | 20 +- .../grpc-gateway/runtime/handler.go | 35 +- .../runtime/internal/stream_chunk.pb.go | 33 +- .../grpc-gateway/runtime/mux.go | 144 +- .../grpc-gateway/runtime/pattern.go | 2 +- .../grpc-gateway/runtime/proto_errors.go | 61 - .../grpc-gateway/runtime/query.go | 165 +- .../pkg/debugutil => karlseguin/ccache}/BUILD | 11 +- vendor/github.com/karlseguin/ccache/Makefile | 5 + vendor/github.com/karlseguin/ccache/bucket.go | 41 + vendor/github.com/karlseguin/ccache/cache.go | 227 + .../karlseguin/ccache/configuration.go | 94 + vendor/github.com/karlseguin/ccache/item.go | 103 + .../karlseguin/ccache/layeredbucket.go | 82 + .../karlseguin/ccache/layeredcache.go | 237 + .../github.com/karlseguin/ccache/license.txt | 19 + vendor/github.com/karlseguin/ccache/readme.md | 172 + .../karlseguin/ccache/secondarycache.go | 72 + .../genproto/googleapis/api/annotations/BUILD | 29 - .../api/annotations/annotations.pb.go | 64 - .../googleapis/api/annotations/http.pb.go | 566 -- vendor/google.golang.org/grpc/.please-update | 0 vendor/google.golang.org/grpc/.travis.yml | 19 +- vendor/google.golang.org/grpc/AUTHORS | 1 - vendor/google.golang.org/grpc/BUILD | 16 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 58 +- vendor/google.golang.org/grpc/LICENSE | 224 +- vendor/google.golang.org/grpc/Makefile | 13 +- vendor/google.golang.org/grpc/PATENTS | 22 + vendor/google.golang.org/grpc/README.md | 12 +- vendor/google.golang.org/grpc/backoff.go | 18 - vendor/google.golang.org/grpc/balancer.go | 66 +- vendor/google.golang.org/grpc/balancer/BUILD | 28 - .../grpc/balancer/balancer.go | 206 - .../grpc/balancer_conn_wrappers.go | 252 - .../grpc/balancer_v1_wrapper.go | 367 - vendor/google.golang.org/grpc/call.go | 133 +- vendor/google.golang.org/grpc/clientconn.go | 1000 +- vendor/google.golang.org/grpc/codec.go | 40 +- .../grpc/codes/code_string.go | 4 +- vendor/google.golang.org/grpc/codes/codes.go | 37 +- .../google.golang.org/grpc/connectivity/BUILD | 26 - .../grpc/connectivity/connectivity.go | 72 - vendor/google.golang.org/grpc/coverage.sh | 48 + .../grpc/credentials/credentials.go | 47 +- .../grpc/credentials/credentials_util_go17.go | 35 +- .../grpc/credentials/credentials_util_go18.go | 35 +- .../credentials/credentials_util_pre_go17.go | 35 +- vendor/google.golang.org/grpc/doc.go | 20 +- vendor/google.golang.org/grpc/go16.go | 56 + vendor/google.golang.org/grpc/go17.go | 55 + vendor/google.golang.org/grpc/grpclb.go | 241 +- .../grpc/grpclb/grpc_lb_v1/BUILD | 14 +- .../grpc/grpclb/grpc_lb_v1/doc.go | 21 - .../{messages/messages.pb.go => grpclb.pb.go} | 118 +- .../{messages/messages.proto => grpclb.proto} | 50 +- .../grpc/grpclb/grpc_lb_v1/messages/BUILD | 29 - vendor/google.golang.org/grpc/grpclog/BUILD | 6 +- .../google.golang.org/grpc/grpclog/grpclog.go | 123 - .../google.golang.org/grpc/grpclog/logger.go | 104 +- .../grpc/grpclog/loggerv2.go | 195 - .../grpc/health/grpc_health_v1/BUILD | 33 - .../grpc/health/grpc_health_v1/health.pb.go | 190 - .../grpc/health/grpc_health_v1/health.proto | 34 - vendor/google.golang.org/grpc/interceptor.go | 41 +- .../grpc/internal/internal.go | 35 +- .../grpc/keepalive/keepalive.go | 39 +- .../grpc/metadata/metadata.go | 87 +- vendor/google.golang.org/grpc/naming/BUILD | 11 +- .../grpc/naming/dns_resolver.go | 290 - vendor/google.golang.org/grpc/naming/go17.go | 34 - vendor/google.golang.org/grpc/naming/go18.go | 28 - .../google.golang.org/grpc/naming/naming.go | 35 +- vendor/google.golang.org/grpc/peer/peer.go | 38 +- .../google.golang.org/grpc/picker_wrapper.go | 141 - vendor/google.golang.org/grpc/pickfirst.go | 95 - vendor/google.golang.org/grpc/proxy.go | 38 +- vendor/google.golang.org/grpc/resolver/BUILD | 22 - .../grpc/resolver/resolver.go | 143 - .../grpc/resolver_conn_wrapper.go | 139 - vendor/google.golang.org/grpc/rpc_util.go | 355 +- vendor/google.golang.org/grpc/server.go | 560 +- .../google.golang.org/grpc/stats/handlers.go | 42 +- vendor/google.golang.org/grpc/stats/stats.go | 147 +- vendor/google.golang.org/grpc/status/BUILD | 1 - .../google.golang.org/grpc/status/status.go | 73 +- vendor/google.golang.org/grpc/stream.go | 254 +- vendor/google.golang.org/grpc/tap/tap.go | 55 +- vendor/google.golang.org/grpc/trace.go | 50 +- vendor/google.golang.org/grpc/transport/BUILD | 4 +- .../grpc/transport/bdp_estimator.go | 143 - .../grpc/transport/control.go | 174 +- .../google.golang.org/grpc/transport/go16.go | 46 + .../google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 107 +- .../grpc/transport/http2_client.go | 936 +- .../grpc/transport/http2_server.go | 904 +- .../grpc/transport/http_util.go | 288 +- .../google.golang.org/grpc/transport/log.go | 50 - .../grpc/transport/transport.go | 357 +- vendor/google.golang.org/grpc/vet.sh | 78 - 381 files changed, 9452 insertions(+), 37110 deletions(-) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/.gitignore (65%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/BUILD (96%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/LICENSE (100%) create mode 100644 vendor/github.com/boltdb/bolt/Makefile rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/README.md (88%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/appveyor.yml (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_386.go (72%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_amd64.go (73%) rename vendor/github.com/{coreos/bbolt/bolt_mipsx.go => boltdb/bolt/bolt_arm.go} (55%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_arm64.go (74%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_linux.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_openbsd.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_ppc.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_ppc64.go (74%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_ppc64le.go (75%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_s390x.go (74%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_unix.go (80%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_unix_solaris.go (75%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bolt_windows.go (88%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/boltsync_unix.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/bucket.go (95%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/cursor.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/db.go (85%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/doc.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/errors.go (100%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/freelist.go (56%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/node.go (99%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/page.go (88%) rename vendor/github.com/{coreos/bbolt => boltdb/bolt}/tx.go (94%) delete mode 100644 vendor/github.com/cockroachdb/cmux/.gitignore delete mode 100644 vendor/github.com/cockroachdb/cmux/.travis.yml delete mode 100644 vendor/github.com/cockroachdb/cmux/BUILD delete mode 100644 vendor/github.com/cockroachdb/cmux/LICENSE delete mode 100644 vendor/github.com/cockroachdb/cmux/README.md delete mode 100644 vendor/github.com/cockroachdb/cmux/buffer.go delete mode 100644 vendor/github.com/cockroachdb/cmux/cmux.go delete mode 100644 vendor/github.com/cockroachdb/cmux/matchers.go delete mode 100644 vendor/github.com/cockroachdb/cmux/patricia.go delete mode 100644 vendor/github.com/coreos/bbolt/Makefile delete mode 100644 vendor/github.com/coreos/bbolt/bolt_arm.go delete mode 100644 vendor/github.com/coreos/bbolt/bolt_mips64x.go delete mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go create mode 100644 vendor/github.com/coreos/etcd/client/srv.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/election.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/key.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/session.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/BUILD delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/kv.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/lease.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/util.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/watch.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/BUILD delete mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/grpc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go create mode 100644 vendor/github.com/coreos/etcd/discovery/srv.go delete mode 100644 vendor/github.com/coreos/etcd/embed/BUILD delete mode 100644 vendor/github.com/coreos/etcd/embed/config.go delete mode 100644 vendor/github.com/coreos/etcd/embed/doc.go delete mode 100644 vendor/github.com/coreos/etcd/embed/etcd.go delete mode 100644 vendor/github.com/coreos/etcd/embed/serve.go delete mode 100644 vendor/github.com/coreos/etcd/embed/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go rename vendor/github.com/coreos/etcd/etcdserver/api/{etcdhttp => v2http}/peer.go (97%) delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/backend.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD rename vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/{gw => }/rpc.pb.gw.go (69%) delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/doc.go rename vendor/github.com/coreos/etcd/mvcc/backend/{config_default.go => boltoption_default.go} (82%) rename vendor/github.com/coreos/etcd/mvcc/backend/{config_linux.go => boltoption_linux.go} (88%) delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kv_view.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics_txn.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/cors/BUILD delete mode 100644 vendor/github.com/coreos/etcd/pkg/cors/cors.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/srv/BUILD delete mode 100644 vendor/github.com/coreos/etcd/pkg/srv/srv.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/assert.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{adapter => }/kv_client_adapter.go (98%) delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{adapter/chan_stream.go => watch_client_adapter.go} (65%) delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go rename vendor/github.com/{coreos/etcd/pkg/debugutil => karlseguin/ccache}/BUILD (66%) create mode 100644 vendor/github.com/karlseguin/ccache/Makefile create mode 100644 vendor/github.com/karlseguin/ccache/bucket.go create mode 100644 vendor/github.com/karlseguin/ccache/cache.go create mode 100644 vendor/github.com/karlseguin/ccache/configuration.go create mode 100644 vendor/github.com/karlseguin/ccache/item.go create mode 100644 vendor/github.com/karlseguin/ccache/layeredbucket.go create mode 100644 vendor/github.com/karlseguin/ccache/layeredcache.go create mode 100644 vendor/github.com/karlseguin/ccache/license.txt create mode 100644 vendor/github.com/karlseguin/ccache/readme.md create mode 100644 vendor/github.com/karlseguin/ccache/secondarycache.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go delete mode 100644 vendor/google.golang.org/grpc/.please-update delete mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/PATENTS delete mode 100644 vendor/google.golang.org/grpc/balancer/BUILD delete mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/connectivity/BUILD delete mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100755 vendor/google.golang.org/grpc/coverage.sh create mode 100644 vendor/google.golang.org/grpc/go16.go create mode 100644 vendor/google.golang.org/grpc/go17.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{messages/messages.pb.go => grpclb.pb.go} (79%) rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{messages/messages.proto => grpclb.proto} (71%) delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD delete mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/go17.go delete mode 100644 vendor/google.golang.org/grpc/naming/go18.go delete mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/pickfirst.go delete mode 100644 vendor/google.golang.org/grpc/resolver/BUILD delete mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go delete mode 100644 vendor/google.golang.org/grpc/transport/log.go delete mode 100755 vendor/google.golang.org/grpc/vet.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b19a302f9fe..270cff5e455 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -333,6 +333,11 @@ "Comment": "v3.5.0", "Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6" }, + { + "ImportPath": "github.com/boltdb/bolt", + "Comment": "v1.3.0", + "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" + }, { "ImportPath": "github.com/chai2010/gettext-go/gettext", "Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723" @@ -418,10 +423,6 @@ "ImportPath": "github.com/clusterhq/flocker-go", "Rev": "2b8b7259d3139c96c4a6871031355808ab3fd3b3" }, - { - "ImportPath": "github.com/cockroachdb/cmux", - "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" - }, { "ImportPath": "github.com/codedellemc/goscaleio", "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" @@ -514,375 +515,285 @@ "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, - { - "ImportPath": "github.com/coreos/bbolt", - "Comment": "v1.3.1-coreos.6", - "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" - }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/namespace", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/naming", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/embed", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/cors", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/debugutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Comment": "v3.2.11", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Comment": "v3.1.10", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -1441,10 +1352,6 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -1871,18 +1778,18 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Comment": "v1.3.0", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Comment": "v1.1.0-25-g84398b9", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Comment": "v1.3.0", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Comment": "v1.1.0-25-g84398b9", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Comment": "v1.3.0", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Comment": "v1.1.0-25-g84398b9", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/hashicorp/golang-lru", @@ -2014,6 +1921,11 @@ "ImportPath": "github.com/kardianos/osext", "Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef" }, + { + "ImportPath": "github.com/karlseguin/ccache", + "Comment": "v2.0.2-5-g3ba9789", + "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" + }, { "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" @@ -3000,108 +2912,79 @@ "ImportPath": "google.golang.org/api/pubsub/v1", "Rev": "654f863362977d69086620b5f72f13e911da2410" }, - { - "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" - }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/balancer", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/connectivity", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/resolver", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "gopkg.in/gcfg.v1", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 6d17659db3a..2b40e59609c 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -11102,6 +11102,34 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/boltdb/bolt licensed under: = + +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/boltdb/bolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a +================================================================================ + + ================================================================================ = vendor/github.com/chai2010/gettext-go/gettext licensed under: = @@ -11856,216 +11884,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/github.com/cockroachdb/cmux licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/cockroachdb/cmux/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/codedellemc/goscaleio licensed under: = @@ -15775,34 +15593,6 @@ SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/bbolt licensed under: = - -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/coreos/bbolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/alarm licensed under: = @@ -16853,636 +16643,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/clientv3/concurrency licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/clientv3/namespace licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/clientv3/naming licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/compactor licensed under: = @@ -17903,216 +17063,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/embed licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/error licensed under: = @@ -18743,216 +17693,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v2http licensed under: = @@ -19373,1476 +18113,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3client licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3election licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3lock licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v3rpc licensed under: = @@ -21683,216 +18953,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/membership licensed under: = @@ -24203,216 +21263,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/pkg/cors licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/pkg/cpuutil licensed under: = @@ -24833,216 +21683,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/pkg/debugutil licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/pkg/fileutil licensed under: = @@ -27353,216 +23993,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/pkg/srv licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/pkg/testutil licensed under: = @@ -28823,216 +25253,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/github.com/coreos/etcd/proxy/grpcproxy/cache licensed under: = @@ -47876,45 +44096,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/github.com/golang/protobuf/protoc-gen-go/descriptor licensed under: = - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -= vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 -================================================================================ - - ================================================================================ = vendor/github.com/golang/protobuf/ptypes licensed under: = @@ -70168,6 +66349,33 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/karlseguin/ccache licensed under: = + +Copyright (c) 2013 Karl Seguin. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + += vendor/github.com/karlseguin/ccache/license.txt fb40cd712dfcf5e0a8de4c13c3399db2 +================================================================================ + + ================================================================================ = vendor/github.com/kr/fs licensed under: = @@ -89517,216 +85725,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/google.golang.org/genproto/googleapis/api/annotations licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - ================================================================================ = vendor/google.golang.org/genproto/googleapis/rpc/status licensed under: = @@ -89940,3990 +85938,504 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ = vendor/google.golang.org/grpc licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/google.golang.org/grpc/balancer licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/codes licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/google.golang.org/grpc/connectivity licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/credentials licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclb/grpc_lb_v1 licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclog licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/internal licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/keepalive licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/metadata licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/naming licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/peer licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 -================================================================================ - - -================================================================================ -= vendor/google.golang.org/grpc/resolver licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/stats licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/status licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/tap licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/transport licensed under: = +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 ================================================================================ diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index c5b105e50c1..08607c1ddcf 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -32,60 +32,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/fileutil", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/version", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/go-semver/semver", - "Rev": "568e959cd89871e61434c1143528d9162da89ef2" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, + { + "ImportPath": "github.com/coreos/go-systemd/journal", + "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + }, + { + "ImportPath": "github.com/coreos/pkg/capnslog", + "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -167,11 +167,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -186,6 +186,10 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/struct", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -218,6 +222,22 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, + { + "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", + "Rev": "2500245aa6110c562d17020fb31a2c133d737799" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -422,85 +442,65 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" - }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/balancer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/connectivity", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/resolver", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index d2800e3d018..83c6b74dbaa 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -27,304 +27,232 @@ "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" }, { - "ImportPath": "github.com/cockroachdb/cmux", - "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" - }, - { - "ImportPath": "github.com/coreos/bbolt", - "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" + "ImportPath": "github.com/boltdb/bolt", + "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/namespace", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3/naming", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/embed", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/error", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/cors", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/debugutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/store", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/version", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -378,10 +306,6 @@ "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, - { - "ImportPath": "github.com/dgrijalva/jwt-go", - "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" - }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", "Rev": "3dcc96556217539f50599357fb481ac0dc7439b9" @@ -434,10 +358,6 @@ "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, - { - "ImportPath": "github.com/golang/groupcache/lru", - "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" - }, { "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -446,10 +366,6 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -532,15 +448,15 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" }, { "ImportPath": "github.com/hashicorp/golang-lru", @@ -570,6 +486,10 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, + { + "ImportPath": "github.com/karlseguin/ccache", + "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" + }, { "ImportPath": "github.com/mailru/easyjson/buffer", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" @@ -770,85 +690,65 @@ "ImportPath": "golang.org/x/time/rate", "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" }, - { - "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" - }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/balancer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/connectivity", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/resolver", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD index 5d495404c6d..ac48442e50d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD @@ -13,7 +13,6 @@ go_library( "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 96d21b5812e..6ab310b601e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -35,7 +35,6 @@ import ( etcd "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" @@ -155,7 +154,6 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } - m.AuthToken = "simple" } else { cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -191,9 +189,9 @@ func (m *EtcdTestServer) launch(t *testing.T) error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.s.SyncTicker = time.Tick(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ Listener: ln, diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index fb2564f1aaf..ec78f699e49 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/fileutil", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/version", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/go-semver/semver", - "Rev": "568e959cd89871e61434c1143528d9162da89ef2" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, + { + "ImportPath": "github.com/coreos/go-systemd/journal", + "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + }, + { + "ImportPath": "github.com/coreos/pkg/capnslog", + "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -147,11 +147,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -166,6 +166,10 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/struct", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -198,6 +202,22 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, + { + "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", + "Rev": "2500245aa6110c562d17020fb31a2c133d737799" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -398,85 +418,65 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" - }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/balancer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/connectivity", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/resolver", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 6bac29d3633..3281bf2fb31 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/fileutil", + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/etcd/version", - "Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71" - }, - { - "ImportPath": "github.com/coreos/go-semver/semver", - "Rev": "568e959cd89871e61434c1143528d9162da89ef2" + "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, + { + "ImportPath": "github.com/coreos/go-systemd/journal", + "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + }, + { + "ImportPath": "github.com/coreos/pkg/capnslog", + "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -139,11 +139,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -158,6 +158,10 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/struct", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -190,6 +194,22 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, + { + "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", + "Rev": "2500245aa6110c562d17020fb31a2c133d737799" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, + { + "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", + "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -382,85 +402,65 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" - }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/balancer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/connectivity", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - }, - { - "ImportPath": "google.golang.org/grpc/resolver", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/test/e2e_node/services/etcd.go b/test/e2e_node/services/etcd.go index 8c40dc7797e..9176ff7a0d7 100644 --- a/test/e2e_node/services/etcd.go +++ b/test/e2e_node/services/etcd.go @@ -77,7 +77,6 @@ func NewEtcd(dataDir string) *EtcdServer { MaxWALFiles: maxWALFiles, TickMs: tickMs, ElectionTicks: electionTicks, - AuthToken: "simple", } return &EtcdServer{ diff --git a/test/integration/scale/BUILD b/test/integration/scale/BUILD index 5fd7eb7c093..120e556ce4b 100644 --- a/test/integration/scale/BUILD +++ b/test/integration/scale/BUILD @@ -14,7 +14,6 @@ go_test( deps = [ "//cmd/kube-apiserver/app/testing:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/scale/scale_test.go b/test/integration/scale/scale_test.go index fe92420ec1e..a40093571c1 100644 --- a/test/integration/scale/scale_test.go +++ b/test/integration/scale/scale_test.go @@ -22,7 +22,6 @@ import ( "strings" "testing" - _ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init. "github.com/coreos/pkg/capnslog" appsv1beta2 "k8s.io/api/apps/v1beta2" diff --git a/vendor/BUILD b/vendor/BUILD index e15dc8349eb..5b92af906f8 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -45,6 +45,7 @@ filegroup( "//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs", "//vendor/github.com/beorn7/perks/quantile:all-srcs", "//vendor/github.com/blang/semver:all-srcs", + "//vendor/github.com/boltdb/bolt:all-srcs", "//vendor/github.com/chai2010/gettext-go/gettext:all-srcs", "//vendor/github.com/cloudflare/cfssl/auth:all-srcs", "//vendor/github.com/cloudflare/cfssl/certdb:all-srcs", @@ -58,7 +59,6 @@ filegroup( "//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs", "//vendor/github.com/cloudflare/cfssl/signer:all-srcs", "//vendor/github.com/clusterhq/flocker-go:all-srcs", - "//vendor/github.com/cockroachdb/cmux:all-srcs", "//vendor/github.com/codedellemc/goscaleio:all-srcs", "//vendor/github.com/codegangsta/negroni:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", @@ -74,14 +74,12 @@ filegroup( "//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/types:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/version:all-srcs", - "//vendor/github.com/coreos/bbolt:all-srcs", "//vendor/github.com/coreos/etcd/alarm:all-srcs", "//vendor/github.com/coreos/etcd/auth:all-srcs", "//vendor/github.com/coreos/etcd/client:all-srcs", "//vendor/github.com/coreos/etcd/clientv3:all-srcs", "//vendor/github.com/coreos/etcd/compactor:all-srcs", "//vendor/github.com/coreos/etcd/discovery:all-srcs", - "//vendor/github.com/coreos/etcd/embed:all-srcs", "//vendor/github.com/coreos/etcd/error:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver:all-srcs", "//vendor/github.com/coreos/etcd/integration:all-srcs", @@ -89,10 +87,8 @@ filegroup( "//vendor/github.com/coreos/etcd/mvcc:all-srcs", "//vendor/github.com/coreos/etcd/pkg/adt:all-srcs", "//vendor/github.com/coreos/etcd/pkg/contention:all-srcs", - "//vendor/github.com/coreos/etcd/pkg/cors:all-srcs", "//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/crc:all-srcs", - "//vendor/github.com/coreos/etcd/pkg/debugutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs", @@ -104,7 +100,6 @@ filegroup( "//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs", "//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs", - "//vendor/github.com/coreos/etcd/pkg/srv:all-srcs", "//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/transport:all-srcs", @@ -216,7 +211,6 @@ filegroup( "//vendor/github.com/golang/mock/gomock:all-srcs", "//vendor/github.com/golang/protobuf/jsonpb:all-srcs", "//vendor/github.com/golang/protobuf/proto:all-srcs", - "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:all-srcs", "//vendor/github.com/golang/protobuf/ptypes:all-srcs", "//vendor/github.com/google/btree:all-srcs", "//vendor/github.com/google/cadvisor/accelerators:all-srcs", @@ -273,6 +267,7 @@ filegroup( "//vendor/github.com/jteeuwen/go-bindata:all-srcs", "//vendor/github.com/juju/ratelimit:all-srcs", "//vendor/github.com/kardianos/osext:all-srcs", + "//vendor/github.com/karlseguin/ccache:all-srcs", "//vendor/github.com/kr/fs:all-srcs", "//vendor/github.com/kr/pretty:all-srcs", "//vendor/github.com/kr/pty:all-srcs", @@ -396,7 +391,6 @@ filegroup( "//vendor/google.golang.org/api/logging/v2beta1:all-srcs", "//vendor/google.golang.org/api/monitoring/v3:all-srcs", "//vendor/google.golang.org/api/pubsub/v1:all-srcs", - "//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs", "//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs", "//vendor/google.golang.org/grpc:all-srcs", "//vendor/gopkg.in/gcfg.v1:all-srcs", diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore similarity index 65% rename from vendor/github.com/coreos/bbolt/.gitignore rename to vendor/github.com/boltdb/bolt/.gitignore index c2a8cfa788c..c7bd2b7a5b8 100644 --- a/vendor/github.com/coreos/bbolt/.gitignore +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -2,4 +2,3 @@ *.test *.swp /bin/ -cmd/bolt/bolt diff --git a/vendor/github.com/coreos/bbolt/BUILD b/vendor/github.com/boltdb/bolt/BUILD similarity index 96% rename from vendor/github.com/coreos/bbolt/BUILD rename to vendor/github.com/boltdb/bolt/BUILD index 78399c2f09a..d29a61e9df0 100644 --- a/vendor/github.com/coreos/bbolt/BUILD +++ b/vendor/github.com/boltdb/bolt/BUILD @@ -28,7 +28,7 @@ go_library( ], "//conditions:default": [], }), - importpath = "github.com/coreos/bbolt", + importpath = "github.com/boltdb/bolt", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE similarity index 100% rename from vendor/github.com/coreos/bbolt/LICENSE rename to vendor/github.com/boltdb/bolt/LICENSE diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000000..e035e63adcd --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/boltdb/bolt/README.md similarity index 88% rename from vendor/github.com/coreos/bbolt/README.md rename to vendor/github.com/boltdb/bolt/README.md index 015f0efbe84..8523e337734 100644 --- a/vendor/github.com/coreos/bbolt/README.md +++ b/vendor/github.com/boltdb/bolt/README.md @@ -1,16 +1,6 @@ -bbolt +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) ==== -[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt) -[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt) - -bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value -store. The purpose of this fork is to provide the Go community with an active -maintenance and development target for Bolt; the goal is improved reliability -and stability. bbolt includes bug fixes, performance enhancements, and features -not found in Bolt while preserving backwards compatibility with the Bolt API. - Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] [LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database @@ -20,18 +10,16 @@ Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. -[gh_ben]: https://github.com/benbjohnson -[bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. ## Table of Contents @@ -71,7 +59,7 @@ Shopify and Heroku use Bolt-backed services every day. To start using Bolt, install Go and run `go get`: ```sh -$ go get github.com/coreos/bbolt/... +$ go get github.com/boltdb/bolt/... ``` This will retrieve the library and install the `bolt` command line utility into @@ -91,7 +79,7 @@ package main import ( "log" - bolt "github.com/coreos/bbolt" + "github.com/boltdb/bolt" ) func main() { @@ -221,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close +You can use the `Tx.Begin()` function directly but **please** be sure to close the transaction. ```go @@ -407,7 +395,7 @@ db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } @@ -460,10 +448,6 @@ db.View(func(tx *bolt.Tx) error { }) ``` -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. ### Nested buckets @@ -476,55 +460,6 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - ### Database backups @@ -534,7 +469,7 @@ this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx) +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to @@ -780,9 +715,6 @@ Here are a few things to note when evaluating and using Bolt: can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. @@ -823,7 +755,7 @@ Here are a few things to note when evaluating and using Bolt: ## Reading the Source -Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, transactional key/value database so it can be a good starting point for people interested in how databases work. @@ -916,13 +848,5 @@ Below is a list of public, open source projects that use Bolt: * [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/coreos/bbolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml similarity index 100% rename from vendor/github.com/coreos/bbolt/appveyor.yml rename to vendor/github.com/boltdb/bolt/appveyor.yml diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go similarity index 72% rename from vendor/github.com/coreos/bbolt/bolt_386.go rename to vendor/github.com/boltdb/bolt/bolt_386.go index 820d533c15f..e659bfb91f3 100644 --- a/vendor/github.com/coreos/bbolt/bolt_386.go +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go similarity index 73% rename from vendor/github.com/coreos/bbolt/bolt_amd64.go rename to vendor/github.com/boltdb/bolt/bolt_amd64.go index 98fafdb47d8..cca6b7eb707 100644 --- a/vendor/github.com/coreos/bbolt/bolt_amd64.go +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/vendor/github.com/boltdb/bolt/bolt_arm.go similarity index 55% rename from vendor/github.com/coreos/bbolt/bolt_mipsx.go rename to vendor/github.com/boltdb/bolt/bolt_arm.go index d5ecb0597e4..e659bfb91f3 100644 --- a/vendor/github.com/coreos/bbolt/bolt_mipsx.go +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -1,12 +1,7 @@ -// +build mips mipsle - package bolt // maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB +const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go similarity index 74% rename from vendor/github.com/coreos/bbolt/bolt_arm64.go rename to vendor/github.com/boltdb/bolt/bolt_arm64.go index b26d84f91ba..6d2309352e0 100644 --- a/vendor/github.com/coreos/bbolt/bolt_arm64.go +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_linux.go rename to vendor/github.com/boltdb/bolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_openbsd.go rename to vendor/github.com/boltdb/bolt/bolt_openbsd.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_ppc.go rename to vendor/github.com/boltdb/bolt/bolt_ppc.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go similarity index 74% rename from vendor/github.com/coreos/bbolt/bolt_ppc64.go rename to vendor/github.com/boltdb/bolt/bolt_ppc64.go index 9331d9771eb..2dc6be02e3e 100644 --- a/vendor/github.com/coreos/bbolt/bolt_ppc64.go +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/coreos/bbolt/bolt_ppc64le.go rename to vendor/github.com/boltdb/bolt/bolt_ppc64le.go index 8c143bc5d19..8351e129f6a 100644 --- a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go similarity index 74% rename from vendor/github.com/coreos/bbolt/bolt_s390x.go rename to vendor/github.com/boltdb/bolt/bolt_s390x.go index d7c39af9253..f4dd26bbba7 100644 --- a/vendor/github.com/coreos/bbolt/bolt_s390x.go +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go similarity index 80% rename from vendor/github.com/coreos/bbolt/bolt_unix.go rename to vendor/github.com/boltdb/bolt/bolt_unix.go index 06592a08089..cad62dda1e3 100644 --- a/vendor/github.com/coreos/bbolt/bolt_unix.go +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -13,32 +13,29 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - flag := syscall.LOCK_NB - if exclusive { - flag |= syscall.LOCK_EX - } else { - flag |= syscall.LOCK_SH - } for { - // Attempt to obtain an exclusive lock. - err := syscall.Flock(int(fd), flag) + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) + time.Sleep(50 * time.Millisecond) } } diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go similarity index 75% rename from vendor/github.com/coreos/bbolt/bolt_unix_solaris.go rename to vendor/github.com/boltdb/bolt/bolt_unix_solaris.go index fd8335ecc96..307bf2b3ee9 100644 --- a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -13,33 +13,34 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) + time.Sleep(50 * time.Millisecond) } } diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go similarity index 88% rename from vendor/github.com/coreos/bbolt/bolt_windows.go rename to vendor/github.com/boltdb/bolt/bolt_windows.go index ca6f9a11c24..d538e6afd77 100644 --- a/vendor/github.com/coreos/bbolt/bolt_windows.go +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -59,30 +59,29 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro db.lockfile = f var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := f.Fd() - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } for { - // Attempt to obtain an exclusive lock. - err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } - // If we timed oumercit then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) + time.Sleep(50 * time.Millisecond) } } @@ -90,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro func funlock(db *DB) error { err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) db.lockfile.Close() - os.Remove(db.path + lockExt) + os.Remove(db.path+lockExt) return err } diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/coreos/bbolt/boltsync_unix.go rename to vendor/github.com/boltdb/bolt/boltsync_unix.go diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go similarity index 95% rename from vendor/github.com/coreos/bbolt/bucket.go rename to vendor/github.com/boltdb/bolt/bucket.go index 44db88b8abd..d2f8c524e42 100644 --- a/vendor/github.com/coreos/bbolt/bucket.go +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -14,6 +14,13 @@ const ( MaxValueSize = (1 << 31) - 2 ) +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -123,17 +130,9 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - - if unaligned { - value = cloneBytes(value) - } - // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { + if b.tx.writable { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -168,8 +167,9 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue } - return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,12 +316,7 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) - - // Return nil if the key doesn't exist. - if !bytes.Equal(key, k) { - return nil - } + _, _, flags := c.seek(key) // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -334,28 +329,6 @@ func (b *Bucket) Delete(key []byte) error { return nil } -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go similarity index 100% rename from vendor/github.com/coreos/bbolt/cursor.go rename to vendor/github.com/boltdb/bolt/cursor.go diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/boltdb/bolt/db.go similarity index 85% rename from vendor/github.com/coreos/bbolt/db.go rename to vendor/github.com/boltdb/bolt/db.go index 4c8c156b23e..1223493ca7b 100644 --- a/vendor/github.com/coreos/bbolt/db.go +++ b/vendor/github.com/boltdb/bolt/db.go @@ -7,7 +7,8 @@ import ( "log" "os" "runtime" - "sort" + "runtime/debug" + "strings" "sync" "time" "unsafe" @@ -22,8 +23,6 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED -const pgidNoFreelist pgid = 0xffffffffffffffff - // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -40,9 +39,6 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() -// The time elapsed between consecutive file locking attempts. -const flockRetryTimeout = 50 * time.Millisecond - // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -65,11 +61,6 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool - // When true, skips syncing freelist to disk. This improves the database - // write performance under normal operation, but requires a full database - // re-sync during recovery. - NoFreelistSync bool - // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -116,11 +107,9 @@ type DB struct { opened bool rwtx *Tx txs []*Tx + freelist *freelist stats Stats - freelist *freelist - freelistLoad sync.Once - pagePool sync.Pool batchMu sync.Mutex @@ -159,17 +148,14 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ - opened: true, - } + var db = &DB{opened: true} + // Set default options if no options are provided. if options == nil { options = DefaultOptions } - db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags - db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -198,7 +184,6 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - db.lockfile = nil // make 'unused' happy. TODO: rework locks _ = db.close() return nil, err } @@ -206,11 +191,6 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt - if db.pageSize = options.PageSize; db.pageSize == 0 { - // Set the default page size to the OS page size. - db.pageSize = defaultPageSize - } - // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err @@ -222,21 +202,20 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - // If we can't read the page size, but can read a page, assume - // it's the same as the OS or one given -- since that's how the - // page size was chosen in the first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - // - // TODO: scan for next page - if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { db.pageSize = int(m.pageSize) } - } else { - return nil, ErrInvalid } } @@ -253,50 +232,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - if db.readOnly { - return db, nil - } - - db.loadFreelist() - - // Flush freelist when transitioning from no sync to sync so - // NoFreelistSync unaware boltdb can open the db later. - if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) - if tx != nil { - err = tx.Commit() - } - if err != nil { - _ = db.close() - return nil, err - } - } + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) // Mark the database as opened and return. return db, nil } -// loadFreelist reads the freelist if it is synced, or reconstructs it -// by scanning the DB if it is not synced. It assumes there are no -// concurrent accesses being made to the freelist. -func (db *DB) loadFreelist() { - db.freelistLoad.Do(func() { - db.freelist = newFreelist() - if !db.hasSyncedFreelist() { - // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) - } else { - // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) - } - db.stats.FreePageN = len(db.freelist.ids) - }) -} - -func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist -} - // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -398,6 +341,9 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -580,36 +526,21 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() - return t, nil -} -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } } if minid > 0 { db.freelist.release(minid - 1) } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. + + return t, nil } -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -621,10 +552,7 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] + db.txs = append(db.txs[:i], db.txs[i+1:]...) break } } @@ -702,7 +630,11 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - return t.Rollback() + if err := t.Rollback(); err != nil { + return err + } + + return nil } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -802,7 +734,9 @@ retry: // pass success, or bolt internal errors, to all callers for _, c := range b.calls { - c.err <- err + if c.err != nil { + c.err <- err + } } break retry } @@ -889,7 +823,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -901,7 +835,7 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + if p.id = db.freelist.allocate(count); p.id != 0 { return p, nil } @@ -956,38 +890,6 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { - tx, err := db.beginTx() - defer func() { - err = tx.Rollback() - if err != nil { - panic("freepages: failed to rollback tx") - } - }() - if err != nil { - panic("freepages: failed to open read only tx") - } - - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) - ech := make(chan error) - go func() { - for e := range ech { - panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) - } - }() - tx.checkBucket(&tx.root, reachable, nofreed, ech) - close(ech) - - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { - if _, ok := reachable[i]; !ok { - fids = append(fids, i) - } - } - return fids -} - // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -998,10 +900,6 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool - // Do not sync freelist to disk. This improves the database write performance - // under normal operation, but requires a full database re-sync during recovery. - NoFreelistSync bool - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -1018,14 +916,6 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int - - // PageSize overrides the default OS page size. - PageSize int - - // NoSync sets the initial value of DB.NoSync. Normally this can just be - // set directly on the DB itself when returned from Open(), but this option - // is useful in APIs which expose Options but not the underlying DB. - NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1062,11 +952,15 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN + diff.TxN = other.TxN - s.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + type Info struct { Data uintptr PageSize int @@ -1105,8 +999,7 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync + } else if m.freelist >= m.pgid { panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1133,3 +1026,11 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go similarity index 100% rename from vendor/github.com/coreos/bbolt/doc.go rename to vendor/github.com/boltdb/bolt/doc.go diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go similarity index 100% rename from vendor/github.com/coreos/bbolt/errors.go rename to vendor/github.com/boltdb/bolt/errors.go diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go similarity index 56% rename from vendor/github.com/coreos/bbolt/freelist.go rename to vendor/github.com/boltdb/bolt/freelist.go index 266f1542945..1b7ba91b2a5 100644 --- a/vendor/github.com/coreos/bbolt/freelist.go +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -6,40 +6,25 @@ import ( "unsafe" ) -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), + pending: make(map[txid][]pgid), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) } // count returns count of pages on the freelist @@ -55,26 +40,27 @@ func (f *freelist) free_count() int { // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int - for _, txp := range f.pending { - count += len(txp.ids) + for _, list := range f.pending { + count += len(list) } return count } -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) } + sort.Sort(m) - mergepgids(dst, f.ids, m) + return pgids(f.ids).merge(m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(txid txid, n int) pgid { +func (f *freelist) allocate(n int) pgid { if len(f.ids) == 0 { return 0 } @@ -107,7 +93,7 @@ func (f *freelist) allocate(txid txid, n int) pgid { for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } - f.allocs[initial] = txid + return initial } @@ -124,73 +110,28 @@ func (f *freelist) free(txid txid, p *page) { } // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - + var ids = f.pending[txid] for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } + // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) + ids = append(ids, id) f.cache[id] = true } + f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) - for tid, txp := range f.pending { + for tid, ids := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { + m = append(m, ids...) delete(f.pending, tid) } } @@ -201,29 +142,12 @@ func (f *freelist) releaseRange(begin, end txid) { // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return + for _, id := range f.pending[txid] { + delete(f.cache, id) } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. + + // Remove pages from pending list. delete(f.pending, txid) - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) } // freed returns whether a given page is in the free list. @@ -233,9 +157,6 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) @@ -248,7 +169,7 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) @@ -260,33 +181,27 @@ func (f *freelist) read(p *page) { f.reindex() } -// read initializes the freelist from a given list of ids. -func (f *freelist) readIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - lenids := f.count() - if lenids == 0 { - p.count = uint16(lenids) - } else if lenids < 0xFFFF { - p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + if len(ids) == 0 { + p.count = uint16(len(ids)) + } else if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) } return nil @@ -298,8 +213,8 @@ func (f *freelist) reload(p *page) { // Build a cache of only pending pages. pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { pcache[pendingID] = true } } @@ -321,12 +236,12 @@ func (f *freelist) reload(p *page) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { - f.cache = make(map[pgid]bool, len(f.ids)) + f.cache = make(map[pgid]bool) for _, id := range f.ids { f.cache[id] = true } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { f.cache[pendingID] = true } } diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/boltdb/bolt/node.go similarity index 99% rename from vendor/github.com/coreos/bbolt/node.go rename to vendor/github.com/boltdb/bolt/node.go index f4ce240eddd..159318b229c 100644 --- a/vendor/github.com/coreos/bbolt/node.go +++ b/vendor/github.com/boltdb/bolt/node.go @@ -365,7 +365,7 @@ func (n *node) spill() error { } // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) if err != nil { return err } diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/boltdb/bolt/page.go similarity index 88% rename from vendor/github.com/coreos/bbolt/page.go rename to vendor/github.com/boltdb/bolt/page.go index cde403ae86d..7651a6bf7d9 100644 --- a/vendor/github.com/coreos/bbolt/page.go +++ b/vendor/github.com/boltdb/bolt/page.go @@ -145,33 +145,12 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } - if len(b) == 0 { + } else if len(b) == 0 { return a } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -193,5 +172,7 @@ func mergepgids(dst, a, b pgids) { } // Append what's left in follow. - _ = append(merged, follow...) + merged = append(merged, follow...) + + return merged } diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go similarity index 94% rename from vendor/github.com/coreos/bbolt/tx.go rename to vendor/github.com/boltdb/bolt/tx.go index 5c0290733f5..1cfb4cde855 100644 --- a/vendor/github.com/coreos/bbolt/tx.go +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -126,7 +126,10 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - return fn(k, tx.root.Bucket(k)) + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil }) } @@ -166,18 +169,28 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - } + opgid := tx.meta.pgid - if !tx.db.NoFreelistSync { - err := tx.commitFreelist() - if err != nil { + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() return err } - } else { - tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -222,31 +235,6 @@ func (tx *Tx) Commit() error { return nil } -func (tx *Tx) commitFreelist() error { - // Allocate new pages for the new free list. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - opgid := tx.meta.pgid - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - return nil -} - // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -317,11 +305,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr - } - }() + defer func() { _ = f.Close() }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -349,7 +333,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -360,7 +344,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, nil + return n, f.Close() } // CopyFile copies the entire database to file at the given path. @@ -395,14 +379,9 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { - // Force loading free list if opened in ReadOnly mode. - tx.db.loadFreelist() - // Check if any pages are double freed. freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { + for _, id := range tx.db.freelist.all() { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -413,10 +392,8 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) } // Recursively check buckets. @@ -474,7 +451,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) + p, err := tx.db.allocate(count) if err != nil { return nil, err } @@ -483,7 +460,7 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount += count + tx.stats.PageCount++ tx.stats.PageAlloc += count * tx.db.pageSize return p, nil diff --git a/vendor/github.com/cockroachdb/cmux/.gitignore b/vendor/github.com/cockroachdb/cmux/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/vendor/github.com/cockroachdb/cmux/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/cockroachdb/cmux/.travis.yml b/vendor/github.com/cockroachdb/cmux/.travis.yml deleted file mode 100644 index e73780f2eb0..00000000000 --- a/vendor/github.com/cockroachdb/cmux/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - -gobuild_args: -race - -before_install: - - go get -u github.com/golang/lint/golint - - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi - - go get -u golang.org/x/tools/cmd/vet - -before_script: - - '! gofmt -s -l . | read' - - golint ./... - - echo $TRAVIS_GO_VERSION - - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi - - go vet . - - go tool vet --shadow . diff --git a/vendor/github.com/cockroachdb/cmux/BUILD b/vendor/github.com/cockroachdb/cmux/BUILD deleted file mode 100644 index b8a9413ba38..00000000000 --- a/vendor/github.com/cockroachdb/cmux/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buffer.go", - "cmux.go", - "matchers.go", - "patricia.go", - ], - importpath = "github.com/cockroachdb/cmux", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/net/http2/hpack:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/cockroachdb/cmux/LICENSE b/vendor/github.com/cockroachdb/cmux/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/cockroachdb/cmux/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/cockroachdb/cmux/README.md b/vendor/github.com/cockroachdb/cmux/README.md deleted file mode 100644 index b3713da5876..00000000000 --- a/vendor/github.com/cockroachdb/cmux/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# cmux: Connection Mux [![Build Status](https://travis-ci.org/cockroachdb/cmux.svg?branch=master)](https://travis-ci.org/cockroachdb/cmux) [![GoDoc](https://godoc.org/github.com/cockroachdb/cmux?status.svg)](https://godoc.org/github.com/cockroachdb/cmux) - -cmux is a generic Go library to multiplex connections based on their payload. -Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any -other protocol on the same TCP listener. - -## How-To -Simply create your main listener, create a cmux for that listener, -and then match connections: -```go -// Create the main listener. -l, err := net.Listen("tcp", ":23456") -if err != nil { - log.Fatal(err) -} - -// Create a cmux. -m := cmux.New(l) - -// Match connections in order: -// First grpc, then HTTP, and otherwise Go RPC/TCP. -grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) -httpL := m.Match(cmux.HTTP1Fast()) -trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched. - -// Create your protocol servers. -grpcS := grpc.NewServer() -grpchello.RegisterGreeterServer(grpcs, &server{}) - -httpS := &http.Server{ - Handler: &helloHTTP1Handler{}, -} - -trpcS := rpc.NewServer() -s.Register(&ExampleRPCRcvr{}) - -// Use the muxed listeners for your servers. -go grpcS.Serve(grpcL) -go httpS.Serve(httpL) -go trpcS.Accept(trpcL) - -// Start serving! -m.Serve() -``` - -There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples). - -## Performance -Since we are only matching the very first bytes of a connection, the -performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP -streams) is negligible. - -## Limitations -* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221) -to identify TLS connections; since cmux's lookahead-implementing connection -wraps the underlying TLS connection, this type assertion fails. This means you -can serve HTTPS using cmux but `http.Request.TLS` will not be set in your -handlers. If you are able to wrap TLS around cmux, you can work around this -limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an -example of this approach. - -* *Different Protocols on The Same Connection*: `cmux` matches the connection -when it's accepted. For example, one connection can be either gRPC or REST, but -not both. That is, we assume that a client connection is either used for gRPC -or REST. diff --git a/vendor/github.com/cockroachdb/cmux/buffer.go b/vendor/github.com/cockroachdb/cmux/buffer.go deleted file mode 100644 index 5c178585363..00000000000 --- a/vendor/github.com/cockroachdb/cmux/buffer.go +++ /dev/null @@ -1,35 +0,0 @@ -package cmux - -import ( - "bytes" - "io" -) - -// bufferedReader is an optimized implementation of io.Reader that behaves like -// ``` -// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer)) -// ``` -// without allocating. -type bufferedReader struct { - source io.Reader - buffer *bytes.Buffer - bufferRead int - bufferSize int -} - -func (s *bufferedReader) Read(p []byte) (int, error) { - // Functionality of bytes.Reader. - bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize]) - s.bufferRead += bn - - p = p[bn:] - - // Funtionality of io.TeeReader. - sn, sErr := s.source.Read(p) - if sn > 0 { - if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil { - return bn + wn, wErr - } - } - return bn + sn, sErr -} diff --git a/vendor/github.com/cockroachdb/cmux/cmux.go b/vendor/github.com/cockroachdb/cmux/cmux.go deleted file mode 100644 index 89cc910b024..00000000000 --- a/vendor/github.com/cockroachdb/cmux/cmux.go +++ /dev/null @@ -1,210 +0,0 @@ -package cmux - -import ( - "bytes" - "fmt" - "io" - "net" - "sync" -) - -// Matcher matches a connection based on its content. -type Matcher func(io.Reader) bool - -// ErrorHandler handles an error and returns whether -// the mux should continue serving the listener. -type ErrorHandler func(error) bool - -var _ net.Error = ErrNotMatched{} - -// ErrNotMatched is returned whenever a connection is not matched by any of -// the matchers registered in the multiplexer. -type ErrNotMatched struct { - c net.Conn -} - -func (e ErrNotMatched) Error() string { - return fmt.Sprintf("mux: connection %v not matched by an matcher", - e.c.RemoteAddr()) -} - -// Temporary implements the net.Error interface. -func (e ErrNotMatched) Temporary() bool { return true } - -// Timeout implements the net.Error interface. -func (e ErrNotMatched) Timeout() bool { return false } - -type errListenerClosed string - -func (e errListenerClosed) Error() string { return string(e) } -func (e errListenerClosed) Temporary() bool { return false } -func (e errListenerClosed) Timeout() bool { return false } - -// ErrListenerClosed is returned from muxListener.Accept when the underlying -// listener is closed. -var ErrListenerClosed = errListenerClosed("mux: listener closed") - -// New instantiates a new connection multiplexer. -func New(l net.Listener) CMux { - return &cMux{ - root: l, - bufLen: 1024, - errh: func(_ error) bool { return true }, - donec: make(chan struct{}), - } -} - -// CMux is a multiplexer for network connections. -type CMux interface { - // Match returns a net.Listener that sees (i.e., accepts) only - // the connections matched by at least one of the matcher. - // - // The order used to call Match determines the priority of matchers. - Match(...Matcher) net.Listener - // Serve starts multiplexing the listener. Serve blocks and perhaps - // should be invoked concurrently within a go routine. - Serve() error - // HandleError registers an error handler that handles listener errors. - HandleError(ErrorHandler) -} - -type matchersListener struct { - ss []Matcher - l muxListener -} - -type cMux struct { - root net.Listener - bufLen int - errh ErrorHandler - donec chan struct{} - sls []matchersListener -} - -func (m *cMux) Match(matchers ...Matcher) net.Listener { - ml := muxListener{ - Listener: m.root, - connc: make(chan net.Conn, m.bufLen), - } - m.sls = append(m.sls, matchersListener{ss: matchers, l: ml}) - return ml -} - -func (m *cMux) Serve() error { - var wg sync.WaitGroup - - defer func() { - close(m.donec) - wg.Wait() - - for _, sl := range m.sls { - close(sl.l.connc) - // Drain the connections enqueued for the listener. - for c := range sl.l.connc { - _ = c.Close() - } - } - }() - - for { - c, err := m.root.Accept() - if err != nil { - if !m.handleErr(err) { - return err - } - continue - } - - wg.Add(1) - go m.serve(c, m.donec, &wg) - } -} - -func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) { - defer wg.Done() - - muc := newMuxConn(c) - for _, sl := range m.sls { - for _, s := range sl.ss { - matched := s(muc.getSniffer()) - if matched { - select { - case sl.l.connc <- muc: - case <-donec: - _ = c.Close() - } - return - } - } - } - - _ = c.Close() - err := ErrNotMatched{c: c} - if !m.handleErr(err) { - _ = m.root.Close() - } -} - -func (m *cMux) HandleError(h ErrorHandler) { - m.errh = h -} - -func (m *cMux) handleErr(err error) bool { - if !m.errh(err) { - return false - } - - if ne, ok := err.(net.Error); ok { - return ne.Temporary() - } - - return false -} - -type muxListener struct { - net.Listener - connc chan net.Conn -} - -func (l muxListener) Accept() (net.Conn, error) { - c, ok := <-l.connc - if !ok { - return nil, ErrListenerClosed - } - return c, nil -} - -// MuxConn wraps a net.Conn and provides transparent sniffing of connection data. -type MuxConn struct { - net.Conn - buf bytes.Buffer - sniffer bufferedReader -} - -func newMuxConn(c net.Conn) *MuxConn { - return &MuxConn{ - Conn: c, - } -} - -// From the io.Reader documentation: -// -// When Read encounters an error or end-of-file condition after -// successfully reading n > 0 bytes, it returns the number of -// bytes read. It may return the (non-nil) error from the same call -// or return the error (and n == 0) from a subsequent call. -// An instance of this general case is that a Reader returning -// a non-zero number of bytes at the end of the input stream may -// return either err == EOF or err == nil. The next Read should -// return 0, EOF. -func (m *MuxConn) Read(p []byte) (int, error) { - if n, err := m.buf.Read(p); err != io.EOF { - return n, err - } - return m.Conn.Read(p) -} - -func (m *MuxConn) getSniffer() io.Reader { - m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()} - return &m.sniffer -} diff --git a/vendor/github.com/cockroachdb/cmux/matchers.go b/vendor/github.com/cockroachdb/cmux/matchers.go deleted file mode 100644 index abc30f6e0ad..00000000000 --- a/vendor/github.com/cockroachdb/cmux/matchers.go +++ /dev/null @@ -1,150 +0,0 @@ -package cmux - -import ( - "bufio" - "io" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// Any is a Matcher that matches any connection. -func Any() Matcher { - return func(r io.Reader) bool { return true } -} - -// PrefixMatcher returns a matcher that matches a connection if it -// starts with any of the strings in strs. -func PrefixMatcher(strs ...string) Matcher { - pt := newPatriciaTreeString(strs...) - return pt.matchPrefix -} - -var defaultHTTPMethods = []string{ - "OPTIONS", - "GET", - "HEAD", - "POST", - "PUT", - "DELETE", - "TRACE", - "CONNECT", -} - -// HTTP1Fast only matches the methods in the HTTP request. -// -// This matcher is very optimistic: if it returns true, it does not mean that -// the request is a valid HTTP response. If you want a correct but slower HTTP1 -// matcher, use HTTP1 instead. -func HTTP1Fast(extMethods ...string) Matcher { - return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...) -} - -const maxHTTPRead = 4096 - -// HTTP1 parses the first line or upto 4096 bytes of the request to see if -// the conection contains an HTTP request. -func HTTP1() Matcher { - return func(r io.Reader) bool { - br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead}) - l, part, err := br.ReadLine() - if err != nil || part { - return false - } - - _, _, proto, ok := parseRequestLine(string(l)) - if !ok { - return false - } - - v, _, ok := http.ParseHTTPVersion(proto) - return ok && v == 1 - } -} - -// grabbed from net/http. -func parseRequestLine(line string) (method, uri, proto string, ok bool) { - s1 := strings.Index(line, " ") - s2 := strings.Index(line[s1+1:], " ") - if s1 < 0 || s2 < 0 { - return - } - s2 += s1 + 1 - return line[:s1], line[s1+1 : s2], line[s2+1:], true -} - -// HTTP2 parses the frame header of the first frame to detect whether the -// connection is an HTTP2 connection. -func HTTP2() Matcher { - return hasHTTP2Preface -} - -// HTTP1HeaderField returns a matcher matching the header fields of the first -// request of an HTTP 1 connection. -func HTTP1HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP1Field(r, name, value) - } -} - -// HTTP2HeaderField resturns a matcher matching the header fields of the first -// headers frame. -func HTTP2HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP2Field(r, name, value) - } -} - -func hasHTTP2Preface(r io.Reader) bool { - var b [len(http2.ClientPreface)]byte - if _, err := io.ReadFull(r, b[:]); err != nil { - return false - } - - return string(b[:]) == http2.ClientPreface -} - -func matchHTTP1Field(r io.Reader, name, value string) (matched bool) { - req, err := http.ReadRequest(bufio.NewReader(r)) - if err != nil { - return false - } - - return req.Header.Get(name) == value -} - -func matchHTTP2Field(r io.Reader, name, value string) (matched bool) { - if !hasHTTP2Preface(r) { - return false - } - - framer := http2.NewFramer(ioutil.Discard, r) - hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) { - if hf.Name == name && hf.Value == value { - matched = true - } - }) - for { - f, err := framer.ReadFrame() - if err != nil { - return false - } - - switch f := f.(type) { - case *http2.HeadersFrame: - if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { - return false - } - if matched { - return true - } - - if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 { - return false - } - } - } -} diff --git a/vendor/github.com/cockroachdb/cmux/patricia.go b/vendor/github.com/cockroachdb/cmux/patricia.go deleted file mode 100644 index 56ec4e7b287..00000000000 --- a/vendor/github.com/cockroachdb/cmux/patricia.go +++ /dev/null @@ -1,173 +0,0 @@ -package cmux - -import ( - "bytes" - "io" -) - -// patriciaTree is a simple patricia tree that handles []byte instead of string -// and cannot be changed after instantiation. -type patriciaTree struct { - root *ptNode -} - -func newPatriciaTree(b ...[]byte) *patriciaTree { - return &patriciaTree{ - root: newNode(b), - } -} - -func newPatriciaTreeString(strs ...string) *patriciaTree { - b := make([][]byte, len(strs)) - for i, s := range strs { - b[i] = []byte(s) - } - return &patriciaTree{ - root: newNode(b), - } -} - -func (t *patriciaTree) matchPrefix(r io.Reader) bool { - return t.root.match(r, true) -} - -func (t *patriciaTree) match(r io.Reader) bool { - return t.root.match(r, false) -} - -type ptNode struct { - prefix []byte - next map[byte]*ptNode - terminal bool -} - -func newNode(strs [][]byte) *ptNode { - if len(strs) == 0 { - return &ptNode{ - prefix: []byte{}, - terminal: true, - } - } - - if len(strs) == 1 { - return &ptNode{ - prefix: strs[0], - terminal: true, - } - } - - p, strs := splitPrefix(strs) - n := &ptNode{ - prefix: p, - } - - nexts := make(map[byte][][]byte) - for _, s := range strs { - if len(s) == 0 { - n.terminal = true - continue - } - nexts[s[0]] = append(nexts[s[0]], s[1:]) - } - - n.next = make(map[byte]*ptNode) - for first, rests := range nexts { - n.next[first] = newNode(rests) - } - - return n -} - -func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { - if len(bss) == 0 || len(bss[0]) == 0 { - return prefix, bss - } - - if len(bss) == 1 { - return bss[0], [][]byte{{}} - } - - for i := 0; ; i++ { - var cur byte - eq := true - for j, b := range bss { - if len(b) <= i { - eq = false - break - } - - if j == 0 { - cur = b[i] - continue - } - - if cur != b[i] { - eq = false - break - } - } - - if !eq { - break - } - - prefix = append(prefix, cur) - } - - rest = make([][]byte, 0, len(bss)) - for _, b := range bss { - rest = append(rest, b[len(prefix):]) - } - - return prefix, rest -} - -func readBytes(r io.Reader, n int) (b []byte, err error) { - b = make([]byte, n) - o := 0 - for o < n { - nr, err := r.Read(b[o:]) - if err != nil && err != io.EOF { - return b, err - } - - o += nr - - if err == io.EOF { - break - } - } - return b[:o], nil -} - -func (n *ptNode) match(r io.Reader, prefix bool) bool { - if l := len(n.prefix); l > 0 { - b, err := readBytes(r, l) - if err != nil || len(b) != l || !bytes.Equal(b, n.prefix) { - return false - } - } - - if prefix && n.terminal { - return true - } - - b := make([]byte, 1) - for { - nr, err := r.Read(b) - if nr != 0 { - break - } - - if err == io.EOF { - return n.terminal - } - - if err != nil { - return false - } - } - - nextN, ok := n.next[b[0]] - return ok && nextN.match(r, prefix) -} diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile deleted file mode 100644 index 43b94f3bdfe..00000000000 --- a/vendor/github.com/coreos/bbolt/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') - -# go get honnef.co/go/tools/simple -gosimple: - gosimple ./... - -# go get honnef.co/go/tools/unused -unused: - unused ./... - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt - -test: - go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - go test -v ./cmd/bolt - -.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go deleted file mode 100644 index 7e5cb4b9412..00000000000 --- a/vendor/github.com/coreos/bbolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go deleted file mode 100644 index 134b578bd44..00000000000 --- a/vendor/github.com/coreos/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build mips64 mips64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/coreos/etcd/auth/BUILD b/vendor/github.com/coreos/etcd/auth/BUILD index 892b00a396f..7452a66ee29 100644 --- a/vendor/github.com/coreos/etcd/auth/BUILD +++ b/vendor/github.com/coreos/etcd/auth/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "doc.go", - "jwt.go", "range_perm_cache.go", "simple_token.go", "store.go", @@ -15,14 +14,10 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/dgrijalva/jwt-go:go_default_library", "//vendor/golang.org/x/crypto/bcrypt:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/peer:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index 009ebda70ca..c6e2a12a7fa 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } var fileDescriptorAuth = []byte{ // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go deleted file mode 100644 index 214ae48c83a..00000000000 --- a/vendor/github.com/coreos/etcd/auth/jwt.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "crypto/rsa" - "io/ioutil" - - jwt "github.com/dgrijalva/jwt-go" - "golang.org/x/net/context" -) - -type tokenJWT struct { - signMethod string - signKey *rsa.PrivateKey - verifyKey *rsa.PublicKey -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - return t.verifyKey, nil - }) - - switch err.(type) { - case nil: - if !parsed.Valid { - plog.Warningf("invalid jwt token: %s", token) - return nil, false - } - - claims := parsed.Claims.(jwt.MapClaims) - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - default: - plog.Warningf("failed to parse jwt token: %s", err) - return nil, false - } - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), - jwt.MapClaims{ - "username": username, - "revision": revision, - }) - - token, err := tk.SignedString(t.signKey) - if err != nil { - plog.Debugf("failed to sign jwt token: %s", err) - return "", err - } - - plog.Debugf("jwt token: %s", token) - - return token, err -} - -func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { - for k, v := range opts { - switch k { - case "sign-method": - jwtSignMethod = v - case "pub-key": - jwtPubKeyPath = v - case "priv-key": - jwtPrivKeyPath = v - default: - plog.Errorf("unknown token specific option: %s", k) - return "", "", "", ErrInvalidAuthOpts - } - } - - return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil -} - -func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { - jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - t := &tokenJWT{} - - t.signMethod = jwtSignMethod - - verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) - if err != nil { - plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) - return nil, err - } - t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - if err != nil { - plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) - return nil, err - } - - signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) - if err != nil { - plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) - return nil, err - } - t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) - return nil, err - } - - return t, nil -} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go index 691b65ba38e..3cd1ad2a411 100644 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -15,11 +15,93 @@ package auth import ( + "bytes" + "sort" + "github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/adt" ) +// isSubset returns true if a is a subset of b. +// If a is a prefix of b, then a is a subset of b. +// Given intervals [a1,a2) and [b1,b2), is +// the a interval a subset of b? +func isSubset(a, b *rangePerm) bool { + switch { + case len(a.end) == 0 && len(b.end) == 0: + // a, b are both keys + return bytes.Equal(a.begin, b.begin) + case len(b.end) == 0: + // b is a key, a is a range + return false + case len(a.end) == 0: + // a is a key, b is a range. need b1 <= a1 and a1 < b2 + return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0 + default: + // both are ranges. need b1 <= a1 and a2 <= b2 + return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0 + } +} + +func isRangeEqual(a, b *rangePerm) bool { + return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end) +} + +// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms. +// If there are equal ranges, removeSubsetRangePerms only keeps one of them. +// It returns a sorted rangePerm slice. +func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) { + sort.Sort(RangePermSliceByBegin(perms)) + var prev *rangePerm + for i := range perms { + if i == 0 { + prev = perms[i] + newp = append(newp, perms[i]) + continue + } + if isRangeEqual(perms[i], prev) { + continue + } + if isSubset(perms[i], prev) { + continue + } + if isSubset(prev, perms[i]) { + prev = perms[i] + newp[len(newp)-1] = perms[i] + continue + } + prev = perms[i] + newp = append(newp, perms[i]) + } + return newp +} + +// mergeRangePerms merges adjacent rangePerms. +func mergeRangePerms(perms []*rangePerm) []*rangePerm { + var merged []*rangePerm + perms = removeSubsetRangePerms(perms) + + i := 0 + for i < len(perms) { + begin, next := i, i + for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 { + next++ + } + // don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty. + if next != begin && len(perms[next].end) > 0 { + merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end}) + } else { + merged = append(merged, perms[begin]) + if next != begin { + merged = append(merged, perms[next]) + } + } + i = next + 1 + } + + return merged +} + func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { user := getUser(tx, userName) if user == nil { @@ -27,8 +109,7 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission return nil } - readPerms := &adt.IntervalTree{} - writePerms := &adt.IntervalTree{} + var readPerms, writePerms []*rangePerm for _, roleName := range user.Roles { role := getRole(tx, roleName) @@ -37,66 +118,48 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } for _, perm := range role.KeyPermission { - var ivl adt.Interval - var rangeEnd []byte - - if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { - rangeEnd = perm.RangeEnd - } - - if len(perm.RangeEnd) != 0 { - ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) - } else { - ivl = adt.NewBytesAffinePoint(perm.Key) - } + rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd} switch perm.PermType { case authpb.READWRITE: - readPerms.Insert(ivl, struct{}{}) - writePerms.Insert(ivl, struct{}{}) + readPerms = append(readPerms, rp) + writePerms = append(writePerms, rp) case authpb.READ: - readPerms.Insert(ivl, struct{}{}) + readPerms = append(readPerms, rp) case authpb.WRITE: - writePerms.Insert(ivl, struct{}{}) + writePerms = append(writePerms, rp) } } } return &unifiedRangePermissions{ - readPerms: readPerms, - writePerms: writePerms, + readPerms: mergeRangePerms(readPerms), + writePerms: mergeRangePerms(writePerms), } } -func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - rangeEnd = nil - } +func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + var tocheck []*rangePerm - ivl := adt.NewBytesAffineInterval(key, rangeEnd) switch permtyp { case authpb.READ: - return cachedPerms.readPerms.Contains(ivl) + tocheck = cachedPerms.readPerms case authpb.WRITE: - return cachedPerms.writePerms.Contains(ivl) + tocheck = cachedPerms.writePerms default: plog.Panicf("unknown auth type: %v", permtyp) } - return false -} -func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { - pt := adt.NewBytesAffinePoint(key) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Intersects(pt) - case authpb.WRITE: - return cachedPerms.writePerms.Intersects(pt) - default: - plog.Panicf("unknown auth type: %v", permtyp) + requiredPerm := &rangePerm{begin: key, end: rangeEnd} + + for _, perm := range tocheck { + if isSubset(requiredPerm, perm) { + return true + } } + return false } @@ -112,11 +175,7 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key as.rangePermCache[userName] = perms } - if len(rangeEnd) == 0 { - return checkKeyPoint(as.rangePermCache[userName], key, permtyp) - } - - return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) + return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { @@ -128,6 +187,35 @@ func (as *authStore) invalidateCachedPerm(userName string) { } type unifiedRangePermissions struct { - readPerms *adt.IntervalTree - writePerms *adt.IntervalTree + // readPerms[i] and readPerms[j] (i != j) don't overlap + readPerms []*rangePerm + // writePerms[i] and writePerms[j] (i != j) don't overlap, too + writePerms []*rangePerm +} + +type rangePerm struct { + begin, end []byte +} + +type RangePermSliceByBegin []*rangePerm + +func (slice RangePermSliceByBegin) Len() int { + return len(slice) +} + +func (slice RangePermSliceByBegin) Less(i, j int) bool { + switch bytes.Compare(slice[i].begin, slice[j].begin) { + case 0: // begin(i) == begin(j) + return bytes.Compare(slice[i].end, slice[j].end) == -1 + + case -1: // begin(i) < begin(j) + return true + + default: + return false + } +} + +func (slice RangePermSliceByBegin) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] } diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go index 94d92a115e2..a39f3927685 100644 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -19,14 +19,10 @@ package auth import ( "crypto/rand" - "fmt" "math/big" - "strconv" "strings" "sync" "time" - - "golang.org/x/net/context" ) const ( @@ -94,14 +90,24 @@ func (tm *simpleTokenTTLKeeper) run() { } } -type tokenSimple struct { - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username +func (as *authStore) enable() { + delf := func(tk string) { + if username, ok := as.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(as.simpleTokens, tk) + } + } + as.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &as.simpleTokensMu, + } + go as.simpleTokenKeeper.run() } -func (t *tokenSimple) genTokenPrefix() (string, error) { +func (as *authStore) GenSimpleToken() (string, error) { ret := make([]byte, defaultSimpleTokenLength) for i := 0; i < defaultSimpleTokenLength; i++ { @@ -116,105 +122,28 @@ func (t *tokenSimple) genTokenPrefix() (string, error) { return string(ret), nil } -func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { - t.simpleTokensMu.Lock() - _, ok := t.simpleTokens[token] +func (as *authStore) assignSimpleTokenToUser(username, token string) { + as.simpleTokensMu.Lock() + _, ok := as.simpleTokens[token] if ok { plog.Panicf("token %s is alredy used", token) } - t.simpleTokens[token] = username - t.simpleTokenKeeper.addSimpleToken(token) - t.simpleTokensMu.Unlock() + as.simpleTokens[token] = username + as.simpleTokenKeeper.addSimpleToken(token) + as.simpleTokensMu.Unlock() } -func (t *tokenSimple) invalidateUser(username string) { - if t.simpleTokenKeeper == nil { +func (as *authStore) invalidateUser(username string) { + if as.simpleTokenKeeper == nil { return } - t.simpleTokensMu.Lock() - for token, name := range t.simpleTokens { + as.simpleTokensMu.Lock() + for token, name := range as.simpleTokens { if strings.Compare(name, username) == 0 { - delete(t.simpleTokens, token) - t.simpleTokenKeeper.deleteSimpleToken(token) + delete(as.simpleTokens, token) + as.simpleTokenKeeper.deleteSimpleToken(token) } } - t.simpleTokensMu.Unlock() -} - -func (t *tokenSimple) enable() { - delf := func(tk string) { - if username, ok := t.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(t.simpleTokens, tk) - } - } - t.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &t.simpleTokensMu, - } - go t.simpleTokenKeeper.run() -} - -func (t *tokenSimple) disable() { - t.simpleTokensMu.Lock() - tk := t.simpleTokenKeeper - t.simpleTokenKeeper = nil - t.simpleTokens = make(map[string]string) // invalidate all tokens - t.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } -} - -func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { - if !t.isValidSimpleToken(ctx, token) { - return nil, false - } - t.simpleTokensMu.Lock() - username, ok := t.simpleTokens[token] - if ok && t.simpleTokenKeeper != nil { - t.simpleTokenKeeper.resetSimpleToken(token) - } - t.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: revision}, ok -} - -func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { - // rev isn't used in simple token, it is only used in JWT - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - token := fmt.Sprintf("%s.%d", simpleToken, index) - t.assignSimpleTokenToUser(username, token) - - return token, nil -} - -func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false - } - - select { - case <-t.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): - } - - return false -} - -func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { - return &tokenSimple{ - simpleTokens: make(map[string]string), - indexWaiter: indexWaiter, - } + as.simpleTokensMu.Unlock() } diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go index 3fac7f5a6fd..236bb2c529d 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -18,10 +18,11 @@ import ( "bytes" "encoding/binary" "errors" + "fmt" "sort" + "strconv" "strings" "sync" - "sync/atomic" "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -29,9 +30,7 @@ import ( "github.com/coreos/pkg/capnslog" "golang.org/x/crypto/bcrypt" "golang.org/x/net/context" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" ) var ( @@ -61,8 +60,6 @@ var ( ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") ErrAuthOldRevision = errors.New("auth: revision in header is old") ErrInvalidAuthToken = errors.New("auth: invalid auth token") - ErrInvalidAuthOpts = errors.New("auth: invalid auth options") - ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") // BcryptCost is the algorithm cost / strength for hashing auth passwords BcryptCost = bcrypt.DefaultCost @@ -132,6 +129,10 @@ type AuthStore interface { // RoleList gets a list of all roles RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) + // AuthInfoFromToken gets a username from the given Token and current revision number + // (The revision number is used for preventing the TOCTOU problem) + AuthInfoFromToken(token string) (*AuthInfo, bool) + // IsPutPermitted checks put permission of the user IsPutPermitted(authInfo *AuthInfo, key []byte) error @@ -144,9 +145,8 @@ type AuthStore interface { // IsAdminPermitted checks admin permission of the user IsAdminPermitted(authInfo *AuthInfo) error - // GenTokenPrefix produces a random string in a case of simple token - // in a case of JWT, it produces an empty string - GenTokenPrefix() (string, error) + // GenSimpleToken produces a simple random string + GenSimpleToken() (string, error) // Revision gets current revision of authStore Revision() uint64 @@ -159,32 +159,33 @@ type AuthStore interface { // AuthInfoFromCtx gets AuthInfo from gRPC's context AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) - - // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context - AuthInfoFromTLS(ctx context.Context) *AuthInfo -} - -type TokenProvider interface { - info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) - assign(ctx context.Context, username string, revision uint64) (string, error) - enable() - disable() - - invalidateUser(string) - genTokenPrefix() (string, error) } type authStore struct { - // atomic operations; need 64-bit align, or 32-bit tests will crash - revision uint64 - be backend.Backend enabled bool enabledMu sync.RWMutex rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - tokenProvider TokenProvider + revision uint64 + + // tokenSimple in v3.2+ + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username +} + +func newDeleterFunc(as *authStore) func(string) { + return func(t string) { + as.simpleTokensMu.Lock() + defer as.simpleTokensMu.Unlock() + if username, ok := as.simpleTokens[t]; ok { + plog.Infof("deleting token %s for user %s", t, username) + delete(as.simpleTokens, t) + } + } } func (as *authStore) AuthEnable() error { @@ -214,11 +215,11 @@ func (as *authStore) AuthEnable() error { tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) as.enabled = true - as.tokenProvider.enable() + as.enable() as.rangePermCache = make(map[string]*unifiedRangePermissions) - as.setRevision(getRevision(tx)) + as.revision = getRevision(tx) plog.Noticef("Authentication enabled") @@ -240,7 +241,15 @@ func (as *authStore) AuthDisable() { b.ForceCommit() as.enabled = false - as.tokenProvider.disable() + + as.simpleTokensMu.Lock() + tk := as.simpleTokenKeeper + as.simpleTokenKeeper = nil + as.simpleTokens = make(map[string]string) // invalidate all tokens + as.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } plog.Noticef("Authentication disabled") } @@ -251,7 +260,10 @@ func (as *authStore) Close() error { if !as.enabled { return nil } - as.tokenProvider.disable() + if as.simpleTokenKeeper != nil { + as.simpleTokenKeeper.stop() + as.simpleTokenKeeper = nil + } return nil } @@ -260,6 +272,10 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthNotEnabled } + // TODO(mitake): after adding jwt support, branching based on values of ctx is required + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -269,23 +285,14 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthFailed } - // Password checking is already performed in the API layer, so we don't need to check for now. - // Staleness of password can be detected with OCC in the API layer, too. + token := fmt.Sprintf("%s.%d", simpleToken, index) + as.assignSimpleTokenToUser(username, token) - token, err := as.tokenProvider.assign(ctx, username, as.Revision()) - if err != nil { - return nil, err - } - - plog.Debugf("authorized %s, token is %s", username, token) + plog.Infof("authorized %s, token is %s", username, token) return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.isAuthEnabled() { - return 0, ErrAuthNotEnabled - } - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -315,7 +322,7 @@ func (as *authStore) Recover(be backend.Backend) { } } - as.setRevision(getRevision(tx)) + as.revision = getRevision(tx) tx.Unlock() @@ -359,11 +366,6 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 { - plog.Errorf("the user root must not be deleted") - return nil, ErrInvalidAuthMgmt - } - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -378,7 +380,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) + as.invalidateUser(r.Name) plog.Noticef("deleted a user: %s", r.Name) @@ -414,7 +416,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) + as.invalidateUser(r.Name) plog.Noticef("changed a password of a user: %s", r.Name) @@ -489,11 +491,6 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be revoked from the user root") - return nil, ErrInvalidAuthMgmt - } - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -596,10 +593,17 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be deleted") - return nil, ErrInvalidAuthMgmt - } + // TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles + // + // Assume a case like below: + // create a role r1 + // create a user u1 and grant r1 to u1 + // delete r1 + // + // After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1, + // the new r1 is automatically granted u1. + // In some cases, it would be confusing. So we need to provide an option for deleting the grant relation + // from all users. tx := as.be.BatchTx() tx.Lock() @@ -612,28 +616,6 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) - users := getAllUsers(tx) - for _, user := range users { - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - continue - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(string(user.Name)) - } - as.commitRevision(tx) plog.Noticef("deleted role %s", r.Role) @@ -663,8 +645,15 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, return &pb.AuthRoleAddResponse{}, nil } -func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { - return as.tokenProvider.info(ctx, token, as.Revision()) +func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) { + // same as '(t *tokenSimple) info' in v3.2+ + as.simpleTokensMu.Lock() + username, ok := as.simpleTokens[token] + if ok && as.simpleTokenKeeper != nil { + as.simpleTokenKeeper.resetSimpleToken(token) + } + as.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: as.revision}, ok } type permSlice []*authpb.Permission @@ -734,7 +723,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE return ErrUserEmpty } - if revision < as.Revision() { + if revision < as.revision { return ErrAuthOldRevision } @@ -897,7 +886,7 @@ func (as *authStore) isAuthEnabled() bool { return as.enabled } -func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { +func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore { tx := be.BatchTx() tx.Lock() @@ -915,17 +904,18 @@ func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { as := &authStore{ be: be, + simpleTokens: make(map[string]string), revision: getRevision(tx), + indexWaiter: indexWaiter, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), - tokenProvider: tp, } if enabled { - as.tokenProvider.enable() + as.enable() } - if as.Revision() == 0 { + if as.revision == 0 { as.commitRevision(tx) } @@ -945,9 +935,9 @@ func hasRootRole(u *authpb.User) bool { } func (as *authStore) commitRevision(tx backend.BatchTx) { - atomic.AddUint64(&as.revision, 1) + as.revision++ revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.Revision()) + binary.BigEndian.PutUint64(revBytes, as.revision) tx.UnsafePut(authBucketName, revisionKey, revBytes) } @@ -961,38 +951,31 @@ func getRevision(tx backend.BatchTx) uint64 { return binary.BigEndian.Uint64(vs[0]) } -func (as *authStore) setRevision(rev uint64) { - atomic.StoreUint64(&as.revision, rev) -} - func (as *authStore) Revision() uint64 { - return atomic.LoadUint64(&as.revision) + return as.revision } -func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { - peer, ok := peer.FromContext(ctx) - if !ok || peer == nil || peer.AuthInfo == nil { - return nil +func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false } - tlsInfo := peer.AuthInfo.(credentials.TLSInfo) - for _, chains := range tlsInfo.State.VerifiedChains { - for _, chain := range chains { - cn := chain.Subject.CommonName - plog.Debugf("found common name %s", cn) - - return &AuthInfo{ - Username: cn, - Revision: as.Revision(), - } - } + select { + case <-as.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): } - return nil + return false } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromIncomingContext(ctx) + md, ok := metadata.FromContext(ctx) if !ok { return nil, nil } @@ -1003,57 +986,14 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } token := ts[0] - authInfo, uok := as.authInfoFromToken(ctx, token) + if !as.isValidSimpleToken(token, ctx) { + return nil, ErrInvalidAuthToken + } + + authInfo, uok := as.AuthInfoFromToken(token) if !uok { plog.Warningf("invalid auth token: %s", token) return nil, ErrInvalidAuthToken } return authInfo, nil } - -func (as *authStore) GenTokenPrefix() (string, error) { - return as.tokenProvider.genTokenPrefix() -} - -func decomposeOpts(optstr string) (string, map[string]string, error) { - opts := strings.Split(optstr, ",") - tokenType := opts[0] - - typeSpecificOpts := make(map[string]string) - for i := 1; i < len(opts); i++ { - pair := strings.Split(opts[i], "=") - - if len(pair) != 2 { - plog.Errorf("invalid token specific option: %s", optstr) - return "", nil, ErrInvalidAuthOpts - } - - if _, ok := typeSpecificOpts[pair[0]]; ok { - plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) - return "", nil, ErrInvalidAuthOpts - } - - typeSpecificOpts[pair[0]] = pair[1] - } - - return tokenType, typeSpecificOpts, nil - -} - -func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - switch tokenType { - case "simple": - plog.Warningf("simple token is not cryptographically signed") - return newTokenProviderSimple(indexWaiter), nil - case "jwt": - return newTokenProviderJWT(typeSpecificOpts) - default: - plog.Errorf("unknown token type: %s", tokenType) - return nil, ErrInvalidAuthOpts - } -} diff --git a/vendor/github.com/coreos/etcd/client/BUILD b/vendor/github.com/coreos/etcd/client/BUILD index 00a5b08d87f..16c78ceec37 100644 --- a/vendor/github.com/coreos/etcd/client/BUILD +++ b/vendor/github.com/coreos/etcd/client/BUILD @@ -14,15 +14,14 @@ go_library( "keys.generated.go", "keys.go", "members.go", + "srv.go", "util.go", ], importpath = "github.com/coreos/etcd/client", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index 19ce2ec01da..f9131b4725c 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -15,7 +15,6 @@ package client import ( - "encoding/json" "errors" "fmt" "io/ioutil" @@ -28,8 +27,6 @@ import ( "sync" "time" - "github.com/coreos/etcd/version" - "golang.org/x/net/context" ) @@ -204,9 +201,6 @@ type Client interface { // returned SetEndpoints(eps []string) error - // GetVersion retrieves the current etcd server and cluster version - GetVersion(ctx context.Context) (*version.Versions, error) - httpClient } @@ -372,7 +366,12 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } - } else if resp.StatusCode/100 == 5 { + if isOneShot { + return nil, nil, err + } + continue + } + if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response @@ -380,16 +379,10 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } - err = cerr.Errors[0] - } - if err != nil { - if !isOneShot { - continue + if isOneShot { + return nil, nil, cerr.Errors[0] } - c.Lock() - c.pinned = (k + 1) % leps - c.Unlock() - return nil, nil, err + continue } if k != pinned { c.Lock() @@ -484,33 +477,6 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration } } -func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { - act := &getAction{Prefix: "/version"} - - resp, body, err := c.Do(ctx, act) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK: - if len(body) == 0 { - return nil, ErrEmptyBody - } - var vresp version.Versions - if err := json.Unmarshal(body, &vresp); err != nil { - return nil, ErrInvalidJSON - } - return &vresp, nil - default: - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return nil, ErrInvalidJSON - } - return nil, etcdErr - } -} - type roundTripResponse struct { resp *http.Response err error diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go index 442e35fe543..bfd7aec93f5 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -14,27 +14,8 @@ package client -import ( - "github.com/coreos/etcd/pkg/srv" -) - // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -func (d *srvDiscover) Discover(domain string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain) - if err != nil { - return nil, err - } - return srvs.Endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go new file mode 100644 index 00000000000..fdfa3435921 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/srv.go @@ -0,0 +1,65 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "fmt" + "net" + "net/url" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV +) + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +// Discover looks up the etcd servers for the domain. +func (d *srvDiscover) Discover(domain string) ([]string, error) { + var urls []*url.URL + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + return nil + } + + errHTTPS := updateURLs("etcd-client-ssl", "https") + errHTTP := updateURLs("etcd-client", "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/BUILD b/vendor/github.com/coreos/etcd/clientv3/BUILD index c610438b2a5..c4cec492504 100644 --- a/vendor/github.com/coreos/etcd/clientv3/BUILD +++ b/vendor/github.com/coreos/etcd/clientv3/BUILD @@ -4,19 +4,18 @@ go_library( name = "go_default_library", srcs = [ "auth.go", + "balancer.go", "client.go", "cluster.go", "compact_op.go", "compare.go", "config.go", "doc.go", - "health_balancer.go", "kv.go", "lease.go", "logger.go", "maintenance.go", "op.go", - "ready_wait.go", "retry.go", "sort.go", "txn.go", @@ -29,15 +28,15 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library", - "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", ], ) @@ -50,12 +49,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs", - "//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs", - "//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md index 376bfba7614..87c32d1a88a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -1,6 +1,6 @@ # etcd/clientv3 -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) `etcd/clientv3` is the official Go etcd client for v3. @@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs: ```go ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := cli.Put(ctx, "sample_key", "sample_value") +resp, err := kvc.Put(ctx, "sample_key", "sample_value") cancel() if err != nil { // handle error! @@ -57,7 +57,7 @@ etcd client returns 2 types of errors: Here is the example code to handle client errors: ```go -resp, err := cli.Put(ctx, "", "") +resp, err := kvc.Put(ctx, "", "") if err != nil { switch err { case context.Canceled: @@ -76,10 +76,6 @@ if err != nil { The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). -## Namespacing - -The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. - ## Examples More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index dddbcb4f626..b995bce8e3f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -20,7 +20,6 @@ import ( "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -101,20 +100,28 @@ type Auth interface { } type auth struct { + c *Client + + conn *grpc.ClientConn // conn in-use remote pb.AuthClient } func NewAuth(c *Client) Auth { - return &auth{remote: RetryAuthClient(c)} + conn := c.ActiveConnection() + return &auth{ + conn: c.ActiveConnection(), + remote: pb.NewAuthClient(conn), + c: c, + } } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) return (*AuthDisableResponse)(resp), toErr(ctx, err) } @@ -139,12 +146,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) ( } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) return (*AuthUserListResponse)(resp), toErr(ctx, err) } @@ -169,12 +176,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } @@ -202,7 +209,7 @@ type authenticator struct { } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) return (*AuthenticateResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go new file mode 100644 index 00000000000..0fef9c54934 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/balancer.go @@ -0,0 +1,239 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") + +// simpleBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type simpleBalancer struct { + // addrs are the client's endpoints for grpc + addrs []grpc.Address + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // mu protects upEps, pinAddr, and connectingAddr + mu sync.RWMutex + // upEps holds the current endpoints that have an active connection + upEps map[string]struct{} + // upc closes when upEps transitions from empty to non-zero or the balancer closes. + upc chan struct{} + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + host2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // intialization and shutdown. + pinAddr string + + closed bool +} + +func newSimpleBalancer(eps []string) *simpleBalancer { + notifyCh := make(chan []grpc.Address, 1) + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + notifyCh <- addrs + sb := &simpleBalancer{ + addrs: addrs, + notifyCh: notifyCh, + readyc: make(chan struct{}), + upEps: make(map[string]struct{}), + upc: make(chan struct{}), + host2ep: getHost2ep(eps), + } + return sb +} + +func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *simpleBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *simpleBalancer) getEndpoint(host string) string { + b.mu.Lock() + defer b.mu.Unlock() + return b.host2ep[host] +} + +func getHost2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} + +func (b *simpleBalancer) updateAddrs(eps []string) { + np := getHost2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.host2ep) + for k, v := range np { + if b.host2ep[k] != v { + match = false + break + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.host2ep = np + + addrs := make([]grpc.Address, 0, len(eps)) + for i := range eps { + addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) + } + b.addrs = addrs + b.notifyCh <- addrs +} + +func (b *simpleBalancer) Up(addr grpc.Address) func(error) { + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Or our simplerBalancer + // might panic since b.upc is closed. + if b.closed { + return func(err error) {} + } + + if len(b.upEps) == 0 { + // notify waiting Get()s and pin first connected address + close(b.upc) + b.pinAddr = addr.Addr + } + b.upEps[addr.Addr] = struct{}{} + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + b.mu.Lock() + delete(b.upEps, addr.Addr) + if len(b.upEps) == 0 && b.pinAddr != "" { + b.upc = make(chan struct{}) + } else if b.pinAddr == addr.Addr { + // choose new random up endpoint + for k := range b.upEps { + b.pinAddr = k + break + } + } + b.mu.Unlock() + } +} + +func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var addr string + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed := b.closed + addr = b.pinAddr + upEps := len(b.upEps) + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + + if upEps == 0 { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + addr = b.pinAddr + upEps := len(b.upEps) + b.mu.RUnlock() + if addr == "" { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if upEps > 0 { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *simpleBalancer) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + return nil + } + b.closed = true + close(b.notifyCh) + // terminate all waiting Get()s + b.pinAddr = "" + if len(b.upEps) == 0 { + close(b.upc) + } + return nil +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index 2dc7e8675c8..8263890bdff 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -20,25 +20,22 @@ import ( "fmt" "net" "net/url" - "strconv" "strings" "sync" "time" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" ) var ( ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") - ErrOldCluster = errors.New("etcdclient: old cluster version") ) // Client provides and manages an etcd v3 client session. @@ -50,20 +47,19 @@ type Client struct { Auth Maintenance - conn *grpc.ClientConn - dialerrc chan error - - cfg Config - creds *credentials.TransportCredentials - balancer *healthBalancer - mu sync.Mutex + conn *grpc.ClientConn + cfg Config + creds *credentials.TransportCredentials + balancer *simpleBalancer + retryWrapper retryRpcFunc + retryAuthWrapper retryRpcFunc ctx context.Context cancel context.CancelFunc - // Username is a user name for authentication. + // Username is a username for authentication Username string - // Password is a password for authentication. + // Password is a password for authentication Password string // tokenCred is an instance of WithPerRPCCredentials()'s argument tokenCred *authTokenCredential @@ -78,28 +74,26 @@ func New(cfg Config) (*Client, error) { return newClient(&cfg) } -// NewCtxClient creates a client with a context but no underlying grpc -// connection. This is useful for embedded cases that override the -// service interface implementations and do not need connection management. -func NewCtxClient(ctx context.Context) *Client { - cctx, cancel := context.WithCancel(ctx) - return &Client{ctx: cctx, cancel: cancel} -} - // NewFromURL creates a new etcdv3 client from a URL. func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) } +// NewFromConfigFile creates a new etcdv3 client from a configuration file. +func NewFromConfigFile(path string) (*Client, error) { + cfg, err := configFromFile(path) + if err != nil { + return nil, err + } + return New(*cfg) +} + // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() c.Watcher.Close() c.Lease.Close() - if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) - } - return c.ctx.Err() + return toErr(c.ctx, c.conn.Close()) } // Ctx is a context for "out of band" messages (e.g., for sending @@ -117,23 +111,8 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() c.cfg.Endpoints = eps - c.mu.Unlock() - c.balancer.updateAddrs(eps...) - - // updating notifyCh can trigger new connections, - // need update addrs if all connections are down - // or addrs does not include pinAddr. - c.balancer.mu.RLock() - update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) - c.balancer.mu.RUnlock() - if update { - select { - case c.balancer.updateAddrsC <- notifyNext: - case <-c.balancer.stopc: - } - } + c.balancer.updateAddrs(eps) } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -160,10 +139,8 @@ func (c *Client) autoSync() { case <-c.ctx.Done(): return case <-time.After(c.cfg.AutoSyncInterval): - ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) - err := c.Sync(ctx) - cancel() - if err != nil && err != c.ctx.Err() { + ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) + if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { logger.Println("Auto sync endpoints failed:", err) } } @@ -192,7 +169,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { - return proto, host, scheme + return } scheme = url.Scheme @@ -200,13 +177,12 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = url.Host switch url.Scheme { case "http", "https": - case "unix", "unixs": + case "unix": proto = "unix" - host = url.Host + url.Path default: proto, host = "", "" } - return proto, host, scheme + return } func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { @@ -215,7 +191,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden case "unix": case "http": creds = nil - case "https", "unixs": + case "https": if creds != nil { break } @@ -225,7 +201,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden default: creds = nil } - return creds + return } // dialSetupOpts gives the dial opts prior to any authentication @@ -233,22 +209,10 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } - if c.cfg.DialKeepAliveTime > 0 { - params := keepalive.ClientParameters{ - Time: c.cfg.DialKeepAliveTime, - Timeout: c.cfg.DialKeepAliveTimeout, - } - opts = append(opts, grpc.WithKeepaliveParams(params)) - } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) - if host == "" && endpoint != "" { - // dialing an endpoint not in the balancer; use - // endpoint passed into dial - proto, host, _ = parseEndpoint(endpoint) - } + proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } @@ -258,14 +222,7 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts default: } dialer := &net.Dialer{Timeout: t} - conn, err := dialer.DialContext(c.ctx, proto, host) - if err != nil { - select { - case c.dialerrc <- err: - default: - } - } - return conn, err + return dialer.DialContext(c.ctx, proto, host) } opts = append(opts, grpc.WithDialer(f)) @@ -331,23 +288,21 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo defer cancel() ctx = cctx } - - err := c.getToken(ctx) - if err != nil { - if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = context.DeadlineExceeded - } - return nil, err + if err := c.getToken(ctx); err != nil { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = grpc.ErrClientConnTimeout } - } else { - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + return nil, err } + + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } - opts = append(opts, c.cfg.DialOptions...) + // add metrics options + opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) + opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) - conn, err := grpc.DialContext(c.ctx, host, opts...) + conn, err := grpc.Dial(host, opts...) if err != nil { return nil, err } @@ -358,7 +313,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, md) + return metadata.NewContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -372,31 +327,20 @@ func newClient(cfg *Config) (*Client, error) { } // use a temporary skeleton client to bootstrap first connection - baseCtx := context.TODO() - if cfg.Context != nil { - baseCtx = cfg.Context - } - - ctx, cancel := context.WithCancel(baseCtx) + ctx, cancel := context.WithCancel(context.TODO()) client := &Client{ - conn: nil, - dialerrc: make(chan error, 1), - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, } if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password } - client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { - return grpcHealthCheck(client, ep) - }) - - // use Endpoints[0] so that for https:// without any tls config given, then - // grpc will assume the certificate server name is the endpoint host. + client.balancer = newSimpleBalancer(cfg.Endpoints) conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() @@ -404,27 +348,24 @@ func newClient(cfg *Config) (*Client, error) { return nil, err } client.conn = conn + client.retryWrapper = client.newRetryWrapper() + client.retryAuthWrapper = client.newAuthRetryWrapper() // wait for a connection if cfg.DialTimeout > 0 { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.ready(): + case <-client.balancer.readyc: hasConn = true case <-ctx.Done(): case <-waitc: } if !hasConn { - err := context.DeadlineExceeded - select { - case err = <-client.dialerrc: - default: - } client.cancel() client.balancer.Close() conn.Close() - return nil, err + return nil, grpc.ErrClientConnTimeout } } @@ -435,57 +376,10 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) - if cfg.RejectOldCluster { - if err := client.checkVersion(); err != nil { - client.Close() - return nil, err - } - } - go client.autoSync() return client, nil } -func (c *Client) checkVersion() (err error) { - var wg sync.WaitGroup - errc := make(chan error, len(c.cfg.Endpoints)) - ctx, cancel := context.WithCancel(c.ctx) - if c.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) - } - wg.Add(len(c.cfg.Endpoints)) - for _, ep := range c.cfg.Endpoints { - // if cluster is current, any endpoint gives a recent version - go func(e string) { - defer wg.Done() - resp, rerr := c.Status(ctx, e) - if rerr != nil { - errc <- rerr - return - } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - maj, _ = strconv.Atoi(vs[0]) - min, rerr = strconv.Atoi(vs[1]) - } - if maj < 3 || (maj == 3 && min < 2) { - rerr = ErrOldCluster - } - errc <- rerr - }(ep) - } - // wait for success - for i := 0; i < len(c.cfg.Endpoints); i++ { - if err = <-errc; err == nil { - break - } - } - cancel() - wg.Wait() - return err -} - // ActiveConnection returns the current in-use connection func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } @@ -498,14 +392,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - ev, _ := status.FromError(err) + code := grpc.Code(err) // Unavailable codes mean the system will be right back. // (e.g., can't connect, lost leader) // Treat Internal codes as if something failed, leaving the // system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? - return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal + return code != codes.Unavailable && code != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -516,8 +410,7 @@ func toErr(ctx context.Context, err error) error { if _, ok := err.(rpctypes.EtcdError); ok { return err } - ev, _ := status.FromError(err) - code := ev.Code() + code := grpc.Code(err) switch code { case codes.DeadlineExceeded: fallthrough @@ -526,16 +419,9 @@ func toErr(ctx context.Context, err error) error { err = ctx.Err() } case codes.Unavailable: + err = ErrNoAvailableEndpoints case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } return err } - -func canceledByCaller(stopCtx context.Context, err error) bool { - if stopCtx.Err() == nil || err == nil { - return false - } - - return err == context.Canceled || err == context.DeadlineExceeded -} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index 2df9f295165..b9bff626bd7 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -16,8 +16,8 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" + "google.golang.org/grpc" ) type ( @@ -50,43 +50,53 @@ func NewCluster(c *Client) Cluster { return &cluster{remote: RetryClusterClient(c)} } -func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { - return &cluster{remote: remote} -} - func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { r := &pb.MemberAddRequest{PeerURLs: peerAddrs} resp, err := c.remote.MemberAdd(ctx, r) - if err != nil { + if err == nil { + return (*MemberAddResponse)(resp), nil + } + if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } - return (*MemberAddResponse)(resp), nil + return nil, toErr(ctx, err) } func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} resp, err := c.remote.MemberRemove(ctx, r) - if err != nil { + if err == nil { + return (*MemberRemoveResponse)(resp), nil + } + if isHaltErr(ctx, err) { return nil, toErr(ctx, err) } - return (*MemberRemoveResponse)(resp), nil + return nil, toErr(ctx, err) } func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r) - if err == nil { - return (*MemberUpdateResponse)(resp), nil + for { + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } } - return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}) - if err == nil { - return (*MemberListResponse)(resp), nil + for { + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) + if err == nil { + return (*MemberListResponse)(resp), nil + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } } - return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go index 41e80c1da5d..32d97eb0cc1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -44,8 +44,10 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} } -// WithCompactPhysical makes Compact wait until all compacted entries are -// removed from the etcd server's storage. +// WithCompactPhysical makes compact RPC call wait until +// the compaction is physically applied to the local database +// such that compacted entries are totally removed from the +// backend database. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index 68a25fd800f..f89ffb52c4a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -82,24 +82,6 @@ func ModRevision(key string) Cmp { return Cmp{Key: []byte(key), Target: pb.Compare_MOD} } -// KeyBytes returns the byte slice holding with the comparison key. -func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } - -// WithKeyBytes sets the byte slice for the comparison key. -func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } - -// ValueBytes returns the byte slice holding the comparison value, if any. -func (cmp *Cmp) ValueBytes() []byte { - if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { - return tu.Value - } - return nil -} - -// WithValueBytes sets the byte slice for the comparison's value. -func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } - -// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v @@ -109,12 +91,3 @@ func mustInt64(val interface{}) int64 { } panic("bad value") } - -// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an -// int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { - if v, ok := val.(LeaseID); ok { - return int64(v) - } - return mustInt64(val) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD b/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD deleted file mode 100644 index 4ee0f5650b2..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "election.go", - "key.go", - "mutex.go", - "session.go", - "stm.go", - ], - importpath = "github.com/coreos/etcd/clientv3/concurrency", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go deleted file mode 100644 index dcdbf511d1b..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package concurrency implements concurrency operations on top of -// etcd such as distributed locks, barriers, and elections. -package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go deleted file mode 100644 index c092bde0aeb..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "errors" - "fmt" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" - - "golang.org/x/net/context" -) - -var ( - ErrElectionNotLeader = errors.New("election: not leader") - ErrElectionNoLeader = errors.New("election: no leader") -) - -type Election struct { - session *Session - - keyPrefix string - - leaderKey string - leaderRev int64 - leaderSession *Session - hdr *pb.ResponseHeader -} - -// NewElection returns a new election on a given key prefix. -func NewElection(s *Session, pfx string) *Election { - return &Election{session: s, keyPrefix: pfx + "/"} -} - -// ResumeElection initializes an election with a known leader. -func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { - return &Election{ - session: s, - leaderKey: leaderKey, - leaderRev: leaderRev, - leaderSession: s, - } -} - -// Campaign puts a value as eligible for the election. It blocks until -// it is elected, an error occurs, or the context is cancelled. -func (e *Election) Campaign(ctx context.Context, val string) error { - s := e.session - client := e.session.Client() - - k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) - txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) - txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) - txn = txn.Else(v3.OpGet(k)) - resp, err := txn.Commit() - if err != nil { - return err - } - e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s - if !resp.Succeeded { - kv := resp.Responses[0].GetResponseRange().Kvs[0] - e.leaderRev = kv.CreateRevision - if string(kv.Value) != val { - if err = e.Proclaim(ctx, val); err != nil { - e.Resign(ctx) - return err - } - } - } - - _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) - if err != nil { - // clean up in case of context cancel - select { - case <-ctx.Done(): - e.Resign(client.Ctx()) - default: - e.leaderSession = nil - } - return err - } - e.hdr = resp.Header - - return nil -} - -// Proclaim lets the leader announce a new value without another election. -func (e *Election) Proclaim(ctx context.Context, val string) error { - if e.leaderSession == nil { - return ErrElectionNotLeader - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - txn := client.Txn(ctx).If(cmp) - txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) - tresp, terr := txn.Commit() - if terr != nil { - return terr - } - if !tresp.Succeeded { - e.leaderKey = "" - return ErrElectionNotLeader - } - - e.hdr = tresp.Header - return nil -} - -// Resign lets a leader start a new election. -func (e *Election) Resign(ctx context.Context) (err error) { - if e.leaderSession == nil { - return nil - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() - if err == nil { - e.hdr = resp.Header - } - e.leaderKey = "" - e.leaderSession = nil - return err -} - -// Leader returns the leader value for the current election. -func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { - client := e.session.Client() - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return nil, err - } else if len(resp.Kvs) == 0 { - // no leader currently elected - return nil, ErrElectionNoLeader - } - return resp, nil -} - -// Observe returns a channel that reliably observes ordered leader proposals -// as GetResponse values on every current elected leader key. It will not -// necessarily fetch all historical leader updates, but will always post the -// most recent leader value. -// -// The channel closes when the context is canceled or the underlying watcher -// is otherwise disrupted. -func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { - retc := make(chan v3.GetResponse) - go e.observe(ctx, retc) - return retc -} - -func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { - client := e.session.Client() - - defer close(ch) - for { - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return - } - - var kv *mvccpb.KeyValue - var hdr *pb.ResponseHeader - - if len(resp.Kvs) == 0 { - cctx, cancel := context.WithCancel(ctx) - // wait for first key put on prefix - opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} - wch := client.Watch(cctx, e.keyPrefix, opts...) - for kv == nil { - wr, ok := <-wch - if !ok || wr.Err() != nil { - cancel() - return - } - // only accept puts; a delete will make observe() spin - for _, ev := range wr.Events { - if ev.Type == mvccpb.PUT { - hdr, kv = &wr.Header, ev.Kv - // may have multiple revs; hdr.rev = the last rev - // set to kv's rev in case batch has multiple Puts - hdr.Revision = kv.ModRevision - break - } - } - } - cancel() - } else { - hdr, kv = resp.Header, resp.Kvs[0] - } - - select { - case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: - case <-ctx.Done(): - return - } - - cctx, cancel := context.WithCancel(ctx) - wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) - keyDeleted := false - for !keyDeleted { - wr, ok := <-wch - if !ok { - cancel() - return - } - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - keyDeleted = true - break - } - resp.Header = &wr.Header - resp.Kvs = []*mvccpb.KeyValue{ev.Kv} - select { - case ch <- *resp: - case <-cctx.Done(): - cancel() - return - } - } - } - cancel() - } -} - -// Key returns the leader key if elected, empty string otherwise. -func (e *Election) Key() string { return e.leaderKey } - -// Rev returns the leader key's creation revision, if elected. -func (e *Election) Rev() int64 { return e.leaderRev } - -// Header is the response header from the last successful election proposal. -func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go deleted file mode 100644 index 9936737756c..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "fmt" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" - - "golang.org/x/net/context" -) - -func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - var wr v3.WatchResponse - wch := client.Watch(cctx, key, v3.WithRev(rev)) - for wr = range wch { - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - return nil - } - } - } - if err := wr.Err(); err != nil { - return err - } - if err := ctx.Err(); err != nil { - return err - } - return fmt.Errorf("lost watcher waiting for delete") -} - -// waitDeletes efficiently waits until all keys matching the prefix and no greater -// than the create revision. -func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { - getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) - for { - resp, err := client.Get(ctx, pfx, getOpts...) - if err != nil { - return nil, err - } - if len(resp.Kvs) == 0 { - return resp.Header, nil - } - lastKey := string(resp.Kvs[0].Key) - if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { - return nil, err - } - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go deleted file mode 100644 index 736a9d3d353..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "fmt" - "sync" - - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "golang.org/x/net/context" -) - -// Mutex implements the sync Locker interface with etcd -type Mutex struct { - s *Session - - pfx string - myKey string - myRev int64 - hdr *pb.ResponseHeader -} - -func NewMutex(s *Session, pfx string) *Mutex { - return &Mutex{s, pfx + "/", "", -1, nil} -} - -// Lock locks the mutex with a cancelable context. If the context is canceled -// while trying to acquire the lock, the mutex tries to clean its stale lock entry. -func (m *Mutex) Lock(ctx context.Context) error { - s := m.s - client := m.s.Client() - - m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) - cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) - // put self in lock waiters via myKey; oldest waiter holds lock - put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) - // reuse key in case this session already holds the lock - get := v3.OpGet(m.myKey) - // fetch current holder to complete uncontended path with only one RPC - getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) - resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() - if err != nil { - return err - } - m.myRev = resp.Header.Revision - if !resp.Succeeded { - m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision - } - // if no key on prefix / the minimum rev is key, already hold the lock - ownerKey := resp.Responses[1].GetResponseRange().Kvs - if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { - m.hdr = resp.Header - return nil - } - - // wait for deletion revisions prior to myKey - hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) - // release lock key if cancelled - select { - case <-ctx.Done(): - m.Unlock(client.Ctx()) - default: - m.hdr = hdr - } - return werr -} - -func (m *Mutex) Unlock(ctx context.Context) error { - client := m.s.Client() - if _, err := client.Delete(ctx, m.myKey); err != nil { - return err - } - m.myKey = "\x00" - m.myRev = -1 - return nil -} - -func (m *Mutex) IsOwner() v3.Cmp { - return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) -} - -func (m *Mutex) Key() string { return m.myKey } - -// Header is the response header received from etcd on acquiring the lock. -func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } - -type lockerMutex struct{ *Mutex } - -func (lm *lockerMutex) Lock() { - client := lm.s.Client() - if err := lm.Mutex.Lock(client.Ctx()); err != nil { - panic(err) - } -} -func (lm *lockerMutex) Unlock() { - client := lm.s.Client() - if err := lm.Mutex.Unlock(client.Ctx()); err != nil { - panic(err) - } -} - -// NewLocker creates a sync.Locker backed by an etcd mutex. -func NewLocker(s *Session, pfx string) sync.Locker { - return &lockerMutex{NewMutex(s, pfx)} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go deleted file mode 100644 index 55cb553ea4a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "time" - - v3 "github.com/coreos/etcd/clientv3" - - "golang.org/x/net/context" -) - -const defaultSessionTTL = 60 - -// Session represents a lease kept alive for the lifetime of a client. -// Fault-tolerant applications may use sessions to reason about liveness. -type Session struct { - client *v3.Client - opts *sessionOptions - id v3.LeaseID - - cancel context.CancelFunc - donec <-chan struct{} -} - -// NewSession gets the leased session for a client. -func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { - ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} - for _, opt := range opts { - opt(ops) - } - - id := ops.leaseID - if id == v3.NoLease { - resp, err := client.Grant(ops.ctx, int64(ops.ttl)) - if err != nil { - return nil, err - } - id = v3.LeaseID(resp.ID) - } - - ctx, cancel := context.WithCancel(ops.ctx) - keepAlive, err := client.KeepAlive(ctx, id) - if err != nil || keepAlive == nil { - cancel() - return nil, err - } - - donec := make(chan struct{}) - s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} - - // keep the lease alive until client error or cancelled context - go func() { - defer close(donec) - for range keepAlive { - // eat messages until keep alive channel closes - } - }() - - return s, nil -} - -// Client is the etcd client that is attached to the session. -func (s *Session) Client() *v3.Client { - return s.client -} - -// Lease is the lease ID for keys bound to the session. -func (s *Session) Lease() v3.LeaseID { return s.id } - -// Done returns a channel that closes when the lease is orphaned, expires, or -// is otherwise no longer being refreshed. -func (s *Session) Done() <-chan struct{} { return s.donec } - -// Orphan ends the refresh for the session lease. This is useful -// in case the state of the client connection is indeterminate (revoke -// would fail) or when transferring lease ownership. -func (s *Session) Orphan() { - s.cancel() - <-s.donec -} - -// Close orphans the session and revokes the session lease. -func (s *Session) Close() error { - s.Orphan() - // if revoke takes longer than the ttl, lease is expired anyway - ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) - _, err := s.client.Revoke(ctx, s.id) - cancel() - return err -} - -type sessionOptions struct { - ttl int - leaseID v3.LeaseID - ctx context.Context -} - -// SessionOption configures Session. -type SessionOption func(*sessionOptions) - -// WithTTL configures the session's TTL in seconds. -// If TTL is <= 0, the default 60 seconds TTL will be used. -func WithTTL(ttl int) SessionOption { - return func(so *sessionOptions) { - if ttl > 0 { - so.ttl = ttl - } - } -} - -// WithLease specifies the existing leaseID to be used for the session. -// This is useful in process restart scenario, for example, to reclaim -// leadership from an election prior to restart. -func WithLease(leaseID v3.LeaseID) SessionOption { - return func(so *sessionOptions) { - so.leaseID = leaseID - } -} - -// WithContext assigns a context to the session instead of defaulting to -// using the client context. This is useful for canceling NewSession and -// Close operations immediately without having to close the client. If the -// context is canceled before Close() completes, the session's lease will be -// abandoned and left to expire instead of being revoked. -func WithContext(ctx context.Context) SessionOption { - return func(so *sessionOptions) { - so.ctx = ctx - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go deleted file mode 100644 index 6bfd70ec428..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "math" - - v3 "github.com/coreos/etcd/clientv3" - - "golang.org/x/net/context" -) - -// STM is an interface for software transactional memory. -type STM interface { - // Get returns the value for a key and inserts the key in the txn's read set. - // If Get fails, it aborts the transaction with an error, never returning. - Get(key ...string) string - // Put adds a value for a key to the write set. - Put(key, val string, opts ...v3.OpOption) - // Rev returns the revision of a key in the read set. - Rev(key string) int64 - // Del deletes a key. - Del(key string) - - // commit attempts to apply the txn's changes to the server. - commit() *v3.TxnResponse - reset() -} - -// Isolation is an enumeration of transactional isolation levels which -// describes how transactions should interfere and conflict. -type Isolation int - -const ( - // SerializableSnapshot provides serializable isolation and also checks - // for write conflicts. - SerializableSnapshot Isolation = iota - // Serializable reads within the same transaction attempt return data - // from the at the revision of the first read. - Serializable - // RepeatableReads reads within the same transaction attempt always - // return the same data. - RepeatableReads - // ReadCommitted reads keys from any committed revision. - ReadCommitted -) - -// stmError safely passes STM errors through panic to the STM error channel. -type stmError struct{ err error } - -type stmOptions struct { - iso Isolation - ctx context.Context - prefetch []string -} - -type stmOption func(*stmOptions) - -// WithIsolation specifies the transaction isolation level. -func WithIsolation(lvl Isolation) stmOption { - return func(so *stmOptions) { so.iso = lvl } -} - -// WithAbortContext specifies the context for permanently aborting the transaction. -func WithAbortContext(ctx context.Context) stmOption { - return func(so *stmOptions) { so.ctx = ctx } -} - -// WithPrefetch is a hint to prefetch a list of keys before trying to apply. -// If an STM transaction will unconditionally fetch a set of keys, prefetching -// those keys will save the round-trip cost from requesting each key one by one -// with Get(). -func WithPrefetch(keys ...string) stmOption { - return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } -} - -// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. -func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { - opts := &stmOptions{ctx: c.Ctx()} - for _, f := range so { - f(opts) - } - if len(opts.prefetch) != 0 { - f := apply - apply = func(s STM) error { - s.Get(opts.prefetch...) - return f(s) - } - } - return runSTM(mkSTM(c, opts), apply) -} - -func mkSTM(c *v3.Client, opts *stmOptions) STM { - switch opts.iso { - case SerializableSnapshot: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { - return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) - } - return s - case Serializable: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case RepeatableReads: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case ReadCommitted: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return nil } - return s - default: - panic("unsupported stm") - } -} - -type stmResponse struct { - resp *v3.TxnResponse - err error -} - -func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { - outc := make(chan stmResponse, 1) - go func() { - defer func() { - if r := recover(); r != nil { - e, ok := r.(stmError) - if !ok { - // client apply panicked - panic(r) - } - outc <- stmResponse{nil, e.err} - } - }() - var out stmResponse - for { - s.reset() - if out.err = apply(s); out.err != nil { - break - } - if out.resp = s.commit(); out.resp != nil { - break - } - } - outc <- out - }() - r := <-outc - return r.resp, r.err -} - -// stm implements repeatable-read software transactional memory over etcd -type stm struct { - client *v3.Client - ctx context.Context - // rset holds read key values and revisions - rset readSet - // wset holds overwritten keys and their values - wset writeSet - // getOpts are the opts used for gets - getOpts []v3.OpOption - // conflicts computes the current conflicts on the txn - conflicts func() []v3.Cmp -} - -type stmPut struct { - val string - op v3.Op -} - -type readSet map[string]*v3.GetResponse - -func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { - for i, resp := range txnresp.Responses { - rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) - } -} - -// first returns the store revision from the first fetch -func (rs readSet) first() int64 { - ret := int64(math.MaxInt64 - 1) - for _, resp := range rs { - if rev := resp.Header.Revision; rev < ret { - ret = rev - } - } - return ret -} - -// cmps guards the txn from updates to read set -func (rs readSet) cmps() []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(rs)) - for k, rk := range rs { - cmps = append(cmps, isKeyCurrent(k, rk)) - } - return cmps -} - -type writeSet map[string]stmPut - -func (ws writeSet) get(keys ...string) *stmPut { - for _, key := range keys { - if wv, ok := ws[key]; ok { - return &wv - } - } - return nil -} - -// cmps returns a cmp list testing no writes have happened past rev -func (ws writeSet) cmps(rev int64) []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(ws)) - for key := range ws { - cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) - } - return cmps -} - -// puts is the list of ops for all pending writes -func (ws writeSet) puts() []v3.Op { - puts := make([]v3.Op, 0, len(ws)) - for _, v := range ws { - puts = append(puts, v.op) - } - return puts -} - -func (s *stm) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - return respToValue(s.fetch(keys...)) -} - -func (s *stm) Put(key, val string, opts ...v3.OpOption) { - s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} -} - -func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } - -func (s *stm) Rev(key string) int64 { - if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { - return resp.Kvs[0].ModRevision - } - return 0 -} - -func (s *stm) commit() *v3.TxnResponse { - txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - return nil -} - -func (s *stm) fetch(keys ...string) *v3.GetResponse { - if len(keys) == 0 { - return nil - } - ops := make([]v3.Op, len(keys)) - for i, key := range keys { - if resp, ok := s.rset[key]; ok { - return resp - } - ops[i] = v3.OpGet(key, s.getOpts...) - } - txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() - if err != nil { - panic(stmError{err}) - } - s.rset.add(keys, txnresp) - return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) -} - -func (s *stm) reset() { - s.rset = make(map[string]*v3.GetResponse) - s.wset = make(map[string]stmPut) -} - -type stmSerializable struct { - stm - prefetch map[string]*v3.GetResponse -} - -func (s *stmSerializable) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - firstRead := len(s.rset) == 0 - for _, key := range keys { - if resp, ok := s.prefetch[key]; ok { - delete(s.prefetch, key) - s.rset[key] = resp - } - } - resp := s.stm.fetch(keys...) - if firstRead { - // txn's base revision is defined by the first read - s.getOpts = []v3.OpOption{ - v3.WithRev(resp.Header.Revision), - v3.WithSerializable(), - } - } - return respToValue(resp) -} - -func (s *stmSerializable) Rev(key string) int64 { - s.Get(key) - return s.stm.Rev(key) -} - -func (s *stmSerializable) gets() ([]string, []v3.Op) { - keys := make([]string, 0, len(s.rset)) - ops := make([]v3.Op, 0, len(s.rset)) - for k := range s.rset { - keys = append(keys, k) - ops = append(ops, v3.OpGet(k)) - } - return keys, ops -} - -func (s *stmSerializable) commit() *v3.TxnResponse { - keys, getops := s.gets() - txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) - // use Else to prefetch keys in case of conflict to save a round trip - txnresp, err := txn.Else(getops...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - // load prefetch with Else data - s.rset.add(keys, txnresp) - s.prefetch = s.rset - s.getOpts = nil - return nil -} - -func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { - if len(r.Kvs) != 0 { - return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) - } - return v3.Compare(v3.ModRevision(k), "=", 0) -} - -func respToValue(resp *v3.GetResponse) string { - if resp == nil || len(resp.Kvs) == 0 { - return "" - } - return string(resp.Kvs[0].Value) -} - -// NewSTMRepeatable is deprecated. -func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) -} - -// NewSTMSerializable is deprecated. -func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) -} - -// NewSTMReadCommitted is deprecated. -func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index ccf7445c7ba..d1d5f40906a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -16,47 +16,98 @@ package clientv3 import ( "crypto/tls" + "crypto/x509" + "io/ioutil" "time" - "golang.org/x/net/context" - "google.golang.org/grpc" + "github.com/coreos/etcd/pkg/tlsutil" + "github.com/ghodss/yaml" ) type Config struct { - // Endpoints is a list of URLs. - Endpoints []string `json:"endpoints"` + // Endpoints is a list of URLs + Endpoints []string // AutoSyncInterval is the interval to update endpoints with its latest members. // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration `json:"auto-sync-interval"` + AutoSyncInterval time.Duration // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration `json:"dial-timeout"` - - // DialKeepAliveTime is the time in seconds after which client pings the server to see if - // transport is alive. - DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` - - // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the - // keep-alive probe. If the response is not received in this time, the connection is closed. - DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + DialTimeout time.Duration // TLS holds the client secure credentials, if any. TLS *tls.Config - // Username is a user name for authentication. - Username string `json:"username"` + // Username is a username for authentication + Username string - // Password is a password for authentication. - Password string `json:"password"` - - // RejectOldCluster when set will refuse to create a client against an outdated cluster. - RejectOldCluster bool `json:"reject-old-cluster"` - - // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). - DialOptions []grpc.DialOption - - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context + // Password is a password for authentication + Password string +} + +type yamlConfig struct { + Endpoints []string `json:"endpoints"` + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + DialTimeout time.Duration `json:"dial-timeout"` + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` + Certfile string `json:"cert-file"` + Keyfile string `json:"key-file"` + CAfile string `json:"ca-file"` +} + +func configFromFile(fpath string) (*Config, error) { + b, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + yc := &yamlConfig{} + + err = yaml.Unmarshal(b, yc) + if err != nil { + return nil, err + } + + cfg := &Config{ + Endpoints: yc.Endpoints, + AutoSyncInterval: yc.AutoSyncInterval, + DialTimeout: yc.DialTimeout, + } + + if yc.InsecureTransport { + cfg.TLS = nil + return cfg, nil + } + + var ( + cert *tls.Certificate + cp *x509.CertPool + ) + + if yc.Certfile != "" && yc.Keyfile != "" { + cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) + if err != nil { + return nil, err + } + } + + if yc.CAfile != "" { + cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) + if err != nil { + return nil, err + } + } + + tlscfg := &tls.Config{ + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: yc.InsecureSkipTLSVerify, + RootCAs: cp, + } + if cert != nil { + tlscfg.Certificates = []tls.Certificate{*cert} + } + cfg.TLS = tlscfg + + return cfg, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index dacc5bb346f..470ca4dc476 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -28,7 +28,7 @@ // Make sure to close the client after using it. If the client is not closed, the // connection will have leaky goroutines. // -// To specify a client request timeout, wrap the context with context.WithTimeout: +// To specify client request timeout, pass context.WithTimeout to APIs: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) // resp, err := kvc.Put(ctx, "sample_key", "sample_value") diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go deleted file mode 100644 index 52bea90e66e..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "errors" - "net/url" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -const ( - minHealthRetryDuration = 3 * time.Second - unknownService = "unknown service grpc.health.v1.Health" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") - -type healthCheckFunc func(ep string) (bool, error) - -type notifyMsg int - -const ( - notifyReset notifyMsg = iota - notifyNext -) - -// healthBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type healthBalancer struct { - // addrs are the client's endpoint addresses for grpc - addrs []grpc.Address - - // eps holds the raw endpoints from the client - eps []string - - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // healthCheck checks an endpoint's health. - healthCheck healthCheckFunc - healthCheckTimeout time.Duration - - unhealthyMu sync.RWMutex - unhealthyHostPorts map[string]time.Time - - // mu protects all fields below. - mu sync.RWMutex - - // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. - upc chan struct{} - - // downc closes when grpc calls down() on pinAddr - downc chan struct{} - - // stopc is closed to signal updateNotifyLoop should stop. - stopc chan struct{} - stopOnce sync.Once - wg sync.WaitGroup - - // donec closes when all goroutines are exited - donec chan struct{} - - // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan notifyMsg - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - hostPort2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // initialization and shutdown. - pinAddr string - - closed bool -} - -func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { - notifyCh := make(chan []grpc.Address) - addrs := eps2addrs(eps) - hb := &healthBalancer{ - addrs: addrs, - eps: eps, - notifyCh: notifyCh, - readyc: make(chan struct{}), - healthCheck: hc, - unhealthyHostPorts: make(map[string]time.Time), - upc: make(chan struct{}), - stopc: make(chan struct{}), - downc: make(chan struct{}), - donec: make(chan struct{}), - updateAddrsC: make(chan notifyMsg), - hostPort2ep: getHostPort2ep(eps), - } - if timeout < minHealthRetryDuration { - timeout = minHealthRetryDuration - } - hb.healthCheckTimeout = timeout - - close(hb.downc) - go hb.updateNotifyLoop() - hb.wg.Add(1) - go func() { - defer hb.wg.Done() - hb.updateUnhealthy() - }() - return hb -} - -func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *healthBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } - -func (b *healthBalancer) endpoint(hostPort string) string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.hostPort2ep[hostPort] -} - -func (b *healthBalancer) pinned() string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.pinAddr -} - -func (b *healthBalancer) hostPortError(hostPort string, err error) { - if b.endpoint(hostPort) == "" { - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) - } - return - } - - b.unhealthyMu.Lock() - b.unhealthyHostPorts[hostPort] = time.Now() - b.unhealthyMu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) - } -} - -func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { - if b.endpoint(hostPort) == "" { - if logger.V(4) { - logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) - } - return - } - - b.unhealthyMu.Lock() - delete(b.unhealthyHostPorts, hostPort) - b.unhealthyMu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) - } -} - -func (b *healthBalancer) countUnhealthy() (count int) { - b.unhealthyMu.RLock() - count = len(b.unhealthyHostPorts) - b.unhealthyMu.RUnlock() - return count -} - -func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { - b.unhealthyMu.RLock() - _, unhealthy = b.unhealthyHostPorts[hostPort] - b.unhealthyMu.RUnlock() - return unhealthy -} - -func (b *healthBalancer) cleanupUnhealthy() { - b.unhealthyMu.Lock() - for k, v := range b.unhealthyHostPorts { - if time.Since(v) > b.healthCheckTimeout { - delete(b.unhealthyHostPorts, k) - if logger.V(4) { - logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) - } - } - } - b.unhealthyMu.Unlock() -} - -func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { - unhealthyCnt := b.countUnhealthy() - - b.mu.RLock() - defer b.mu.RUnlock() - - hbAddrs := b.addrs - if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { - liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) - for k := range b.hostPort2ep { - liveHostPorts[k] = struct{}{} - } - return hbAddrs, liveHostPorts - } - - addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) - liveHostPorts := make(map[string]struct{}, len(addrs)) - for _, addr := range b.addrs { - if !b.isUnhealthy(addr.Addr) { - addrs = append(addrs, addr) - liveHostPorts[addr.Addr] = struct{}{} - } - } - return addrs, liveHostPorts -} - -func (b *healthBalancer) updateUnhealthy() { - for { - select { - case <-time.After(b.healthCheckTimeout): - b.cleanupUnhealthy() - pinned := b.pinned() - if pinned == "" || b.isUnhealthy(pinned) { - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - return - } - } - case <-b.stopc: - return - } - } -} - -func (b *healthBalancer) updateAddrs(eps ...string) { - np := getHostPort2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.hostPort2ep) - if match { - for k, v := range np { - if b.hostPort2ep[k] != v { - match = false - break - } - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.hostPort2ep = np - b.addrs, b.eps = eps2addrs(eps), eps - - b.unhealthyMu.Lock() - b.unhealthyHostPorts = make(map[string]time.Time) - b.unhealthyMu.Unlock() -} - -func (b *healthBalancer) next() { - b.mu.RLock() - downc := b.downc - b.mu.RUnlock() - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - } - // wait until disconnect so new RPCs are not issued on old connection - select { - case <-downc: - case <-b.stopc: - } -} - -func (b *healthBalancer) updateNotifyLoop() { - defer close(b.donec) - - for { - b.mu.RLock() - upc, downc, addr := b.upc, b.downc, b.pinAddr - b.mu.RUnlock() - // downc or upc should be closed - select { - case <-downc: - downc = nil - default: - } - select { - case <-upc: - upc = nil - default: - } - switch { - case downc == nil && upc == nil: - // stale - select { - case <-b.stopc: - return - default: - } - case downc == nil: - b.notifyAddrs(notifyReset) - select { - case <-upc: - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - case upc == nil: - select { - // close connections that are not the pinned address - case b.notifyCh <- []grpc.Address{{Addr: addr}}: - case <-downc: - case <-b.stopc: - return - } - select { - case <-downc: - b.notifyAddrs(notifyReset) - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - } - } -} - -func (b *healthBalancer) notifyAddrs(msg notifyMsg) { - if msg == notifyNext { - select { - case b.notifyCh <- []grpc.Address{}: - case <-b.stopc: - return - } - } - b.mu.RLock() - pinAddr := b.pinAddr - downc := b.downc - b.mu.RUnlock() - addrs, hostPorts := b.liveAddrs() - - var waitDown bool - if pinAddr != "" { - _, ok := hostPorts[pinAddr] - waitDown = !ok - } - - select { - case b.notifyCh <- addrs: - if waitDown { - select { - case <-downc: - case <-b.stopc: - } - } - case <-b.stopc: - } -} - -func (b *healthBalancer) Up(addr grpc.Address) func(error) { - if !b.mayPin(addr) { - return func(err error) {} - } - - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Otherwise, will panic - // if b.upc is already closed. - if b.closed { - return func(err error) {} - } - - // gRPC might call Up on a stale address. - // Prevent updating pinAddr with a stale address. - if !hasAddr(b.addrs, addr.Addr) { - return func(err error) {} - } - - if b.pinAddr != "" { - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) - } - return func(err error) {} - } - - // notify waiting Get()s and pin first connected address - close(b.upc) - b.downc = make(chan struct{}) - b.pinAddr = addr.Addr - if logger.V(4) { - logger.Infof("clientv3/balancer: pin %q", addr.Addr) - } - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - // If connected to a black hole endpoint or a killed server, the gRPC ping - // timeout will induce a network I/O error, and retrying until success; - // finding healthy endpoint on retry could take several timeouts and redials. - // To avoid wasting retries, gray-list unhealthy endpoints. - b.hostPortError(addr.Addr, err) - - b.mu.Lock() - b.upc = make(chan struct{}) - close(b.downc) - b.pinAddr = "" - b.mu.Unlock() - if logger.V(4) { - logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) - } - } -} - -func (b *healthBalancer) mayPin(addr grpc.Address) bool { - if b.endpoint(addr.Addr) == "" { // stale host:port - return false - } - - b.unhealthyMu.RLock() - unhealthyCnt := len(b.unhealthyHostPorts) - failedTime, bad := b.unhealthyHostPorts[addr.Addr] - b.unhealthyMu.RUnlock() - - b.mu.RLock() - skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt - b.mu.RUnlock() - if skip || !bad { - return true - } - - // prevent isolated member's endpoint from being infinitely retried, as follows: - // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm - // 2. balancer 'Up' unpins with grpc: failed with network I/O error - // 3. grpc-healthcheck still SERVING, thus retry to pin - // instead, return before grpc-healthcheck if failed within healthcheck timeout - if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { - if logger.V(4) { - logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) - } - return false - } - - if ok, _ := b.healthCheck(addr.Addr); ok { - b.removeUnhealthy(addr.Addr, "health check success") - return true - } - - b.hostPortError(addr.Addr, errors.New("health check failed")) - return false -} - -func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var ( - addr string - closed bool - ) - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr == "" { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-b.donec: - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr != "" { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *healthBalancer) Close() error { - b.mu.Lock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - b.mu.Unlock() - <-b.donec - return nil - } - b.closed = true - b.stopOnce.Do(func() { close(b.stopc) }) - b.pinAddr = "" - - // In the case of following scenario: - // 1. upc is not closed; no pinned address - // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks - // 3. client.conn.Close() calls balancer.Close(); closed = true - // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled - // we must close upc so Get() exits from blocking on upc - select { - case <-b.upc: - default: - // terminate all waiting Get()s - close(b.upc) - } - - b.mu.Unlock() - b.wg.Wait() - - // wait for updateNotifyLoop to finish - <-b.donec - close(b.notifyCh) - - return nil -} - -func grpcHealthCheck(client *Client, ep string) (bool, error) { - conn, err := client.dial(ep) - if err != nil { - return false, err - } - defer conn.Close() - cli := healthpb.NewHealthClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) - cancel() - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { - if s.Message() == unknownService { // etcd < v3.3.0 - return true, nil - } - } - return false, err - } - return resp.Status == healthpb.HealthCheckResponse_SERVING, nil -} - -func hasAddr(addrs []grpc.Address, targetAddr string) bool { - for _, addr := range addrs { - if targetAddr == addr.Addr { - return true - } - } - return false -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} - -func eps2addrs(eps []string) []grpc.Address { - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - return addrs -} - -func getHostPort2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index 949f6dc5b14..c8350f9268b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -16,8 +16,8 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" + "google.golang.org/grpc" ) type ( @@ -32,7 +32,7 @@ type KV interface { // Put puts a key-value pair into etcd. // Note that key,value can be plain bytes array and string is // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte{0x10, 0x20}). + // To get a string of bytes, do string([]byte(0x10, 0x20)). Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) // Get retrieves keys. @@ -51,6 +51,11 @@ type KV interface { // Compact compacts etcd KV history before the given rev. Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + // Do applies a single Op on KV without a transaction. + // Do is useful when declaring operations to be issued at a later time + // whereas Get/Put/Delete are for better suited for when the operation + // should be immediately issued at time of declaration. + // Do applies a single Op on KV without a transaction. // Do is useful when creating arbitrary operations to be issued at a // later time; the user can range over the operations, calling Do to @@ -66,26 +71,11 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse - txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } -func (op OpResponse) Txn() *TxnResponse { return op.txn } - -func (resp *PutResponse) OpResponse() OpResponse { - return OpResponse{put: resp} -} -func (resp *GetResponse) OpResponse() OpResponse { - return OpResponse{get: resp} -} -func (resp *DeleteResponse) OpResponse() OpResponse { - return OpResponse{del: resp} -} -func (resp *TxnResponse) OpResponse() OpResponse { - return OpResponse{txn: resp} -} type kv struct { remote pb.KVClient @@ -130,17 +120,35 @@ func (kv *kv) Txn(ctx context.Context) Txn { } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + for { + resp, err := kv.do(ctx, op) + if err == nil { + return resp, nil + } + + if isHaltErr(ctx, err) { + return resp, toErr(ctx, err) + } + // do not retry on modifications + if op.isWrite() { + return resp, toErr(ctx, err) + } + } +} + +func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { + // TODO: handle other ops case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest()) + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} resp, err = kv.remote.Put(ctx, r) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil @@ -152,14 +160,8 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } - case tTxn: - var resp *pb.TxnResponse - resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) - if err == nil { - return OpResponse{txn: (*TxnResponse)(resp)}, nil - } default: panic("Unknown op") } - return OpResponse{}, toErr(ctx, err) + return OpResponse{}, err } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index d90531bf2a1..10d3dd0b27f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -20,9 +20,8 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" + "google.golang.org/grpc" ) type ( @@ -30,7 +29,7 @@ type ( LeaseID int64 ) -// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +// LeaseGrantResponse is used to convert the protobuf grant response. type LeaseGrantResponse struct { *pb.ResponseHeader ID LeaseID @@ -38,14 +37,14 @@ type LeaseGrantResponse struct { Error string } -// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. type LeaseKeepAliveResponse struct { *pb.ResponseHeader ID LeaseID TTL int64 } -// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. type LeaseTimeToLiveResponse struct { *pb.ResponseHeader ID LeaseID `json:"id"` @@ -60,12 +59,6 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `json:"keys"` } -// LeaseStatus represents a lease status. -type LeaseStatus struct { - ID LeaseID `json:"id"` - // TODO: TTL int64 -} - const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -74,9 +67,6 @@ const ( leaseResponseChSize = 16 // NoLease is a lease ID for the absence of a lease. NoLease LeaseID = 0 - - // retryConnWait is how long to wait before retrying request due to an error - retryConnWait = 500 * time.Millisecond ) // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. @@ -107,7 +97,7 @@ type Lease interface { // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive + // KeepAliveOnce renews the lease once. In most of the cases, Keepalive // should be used instead of KeepAliveOnce. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) @@ -136,9 +126,6 @@ type lessor struct { // firstKeepAliveTimeout is the timeout for the first keepalive request // before the actual TTL is known to the lease client firstKeepAliveTimeout time.Duration - - // firstKeepAliveOnce ensures stream starts after first KeepAlive call. - firstKeepAliveOnce sync.Once } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -154,62 +141,85 @@ type keepAlive struct { } func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) -} - -func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { l := &lessor{ donec: make(chan struct{}), keepAlives: make(map[LeaseID]*keepAlive), - remote: remote, - firstKeepAliveTimeout: keepAliveTimeout, + remote: RetryLeaseClient(c), + firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } - reqLeaderCtx := WithRequireLeader(context.Background()) - l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + + l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) + go l.recvKeepAliveLoop() + go l.deadlineLoop() return l } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, + cctx, cancel := context.WithCancel(ctx) + done := cancelWhenStop(cancel, l.stopCtx.Done()) + defer close(done) + + for { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(cctx, r) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + if isHaltErr(cctx, err) { + return nil, toErr(cctx, err) } - return gresp, nil } - return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r) - if err == nil { - return (*LeaseRevokeResponse)(resp), nil + cctx, cancel := context.WithCancel(ctx) + done := cancelWhenStop(cancel, l.stopCtx.Done()) + defer close(done) + + for { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(cctx, r) + + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } } - return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, + cctx, cancel := context.WithCancel(ctx) + done := cancelWhenStop(cancel, l.stopCtx.Done()) + defer close(done) + + for { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + if isHaltErr(cctx, err) { + return nil, toErr(cctx, err) } - return gresp, nil } - return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -244,19 +254,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl l.mu.Unlock() go l.keepAliveCtxCloser(id, ctx, ka.donec) - l.firstKeepAliveOnce.Do(func() { - go l.recvKeepAliveLoop() - go l.deadlineLoop() - }) return ch, nil } func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + done := cancelWhenStop(cancel, l.stopCtx.Done()) + defer close(done) + for { - resp, err := l.keepAliveOnce(ctx, id) + resp, err := l.keepAliveOnce(cctx, id) if err == nil { - if resp.TTL <= 0 { + if resp.TTL == 0 { err = rpctypes.ErrLeaseNotFound } return resp, err @@ -269,8 +279,6 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive func (l *lessor) Close() error { l.stopCancel() - // close for synchronous teardown if stream goroutines never launched - l.firstKeepAliveOnce.Do(func() { close(l.donec) }) <-l.donec return nil } @@ -307,50 +315,11 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha } } -// closeRequireLeader scans keepAlives for ctxs that have require leader -// and closes the associated channels. -func (l *lessor) closeRequireLeader() { - l.mu.Lock() - defer l.mu.Unlock() - for _, ka := range l.keepAlives { - reqIdxs := 0 - // find all required leader channels, close, mark as nil - for i, ctx := range ka.ctxs { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - continue - } - ks := md[rpctypes.MetadataRequireLeaderKey] - if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { - continue - } - close(ka.chs[i]) - ka.chs[i] = nil - reqIdxs++ - } - if reqIdxs == 0 { - continue - } - // remove all channels that required a leader from keepalive - newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) - newCtxs := make([]context.Context, len(newChs)) - newIdx := 0 - for i := range ka.chs { - if ka.chs[i] == nil { - continue - } - newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] - newIdx++ - } - ka.chs, ka.ctxs = newChs, newCtxs - } -} - func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx) + stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } @@ -379,50 +348,32 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.close() + ka.Close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() }() - for { - stream, err := l.resetRecv() + stream, serr := l.resetRecv() + for serr == nil { + resp, err := stream.Recv() if err != nil { - if canceledByCaller(l.stopCtx, err) { + if isHaltErr(l.stopCtx, err) { return err } - } else { - for { - resp, err := stream.Recv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { - l.closeRequireLeader() - } - break - } - - l.recvKeepAlive(resp) - } - } - - select { - case <-time.After(retryConnWait): + stream, serr = l.resetRecv() continue - case <-l.stopCtx.Done(): - return l.stopCtx.Err() } + l.recvKeepAlive(resp) } + return serr } -// resetRecv opens a new lease stream and starts sending keep alive requests. +// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx) - if err != nil { + stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) + if err = toErr(sctx, err); err != nil { cancel() return nil, err } @@ -430,6 +381,7 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { l.mu.Lock() defer l.mu.Unlock() if l.stream != nil && l.streamCancel != nil { + l.stream.CloseSend() l.streamCancel() } @@ -459,7 +411,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.close() + ka.Close() return } @@ -489,7 +441,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.close() + ka.Close() delete(l.keepAlives, id) } } @@ -497,9 +449,19 @@ func (l *lessor) deadlineLoop() { } } -// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for { + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + var tosend []LeaseID now := time.Now() @@ -518,22 +480,29 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { return } } - - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } } } -func (ka *keepAlive) close() { +func (ka *keepAlive) Close() { close(ka.donec) for _, ch := range ka.chs { close(ch) } } + +// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done +// should be closed when the work is finished. When done fires, cancelWhenStop will release +// its internal resource. +func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} { + done := make(chan struct{}, 1) + + go func() { + select { + case <-stopc: + case <-done: + } + cancel() + }() + + return done +} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 012abdbce63..519db45d8e3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -16,35 +16,36 @@ package clientv3 import ( "io/ioutil" + "log" "sync" "google.golang.org/grpc/grpclog" ) // Logger is the logger used by client library. -// It implements grpclog.LoggerV2 interface. -type Logger grpclog.LoggerV2 +// It implements grpclog.Logger interface. +type Logger grpclog.Logger var ( logger settableLogger ) type settableLogger struct { - l grpclog.LoggerV2 + l grpclog.Logger mu sync.RWMutex } func init() { // disable client side logs by default logger.mu.Lock() - logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) + logger.l = log.New(ioutil.Discard, "", 0) // logger has to override the grpclog at initialization so that // any changes to the grpclog go through logger with locking // instead of through SetLogger // // now updates only happen through settableLogger.set - grpclog.SetLoggerV2(&logger) + grpclog.SetLogger(&logger) logger.mu.Unlock() } @@ -61,7 +62,6 @@ func GetLogger() Logger { func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l - grpclog.SetLoggerV2(&logger) s.mu.Unlock() } @@ -72,25 +72,11 @@ func (s *settableLogger) get() Logger { return l } -// implement the grpclog.LoggerV2 interface +// implement the grpclog.Logger interface -func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } -func (s *settableLogger) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args...) -} -func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } -func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } -func (s *settableLogger) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args...) -} -func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) V(l int) bool { return s.get().V(l) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index ca2f445b8b4..718356250be 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -18,8 +18,8 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" + "google.golang.org/grpc" ) type ( @@ -36,7 +36,7 @@ type Maintenance interface { // AlarmDisarm disarms a given alarm. AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment defragments storage backend of the etcd member with given endpoint. // Defragment is only needed when deleting a large number of keys and want to reclaim // the resources. // Defragment is an expensive operation. User should avoid defragmenting multiple members @@ -48,36 +48,17 @@ type Maintenance interface { // Status gets the status of the endpoint. Status(ctx context.Context, endpoint string) (*StatusResponse, error) - // Snapshot provides a reader for a point-in-time snapshot of etcd. + // Snapshot provides a reader for a snapshot of a backend. Snapshot(ctx context.Context) (io.ReadCloser, error) } type maintenance struct { - dial func(endpoint string) (pb.MaintenanceClient, func(), error) + c *Client remote pb.MaintenanceClient } func NewMaintenance(c *Client) Maintenance { - return &maintenance{ - dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { - conn, err := c.dial(endpoint) - if err != nil { - return nil, nil, err - } - cancel := func() { conn.Close() } - return RetryMaintenanceClient(c, conn), cancel, nil - }, - remote: RetryMaintenanceClient(c, c.conn), - } -} - -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { - return &maintenance{ - dial: func(string) (pb.MaintenanceClient, func(), error) { - return remote, func() {}, nil - }, - remote: remote, - } + return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)} } func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { @@ -86,11 +67,15 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - resp, err := m.remote.Alarm(ctx, req) - if err == nil { - return (*AlarmResponse)(resp), nil + for { + resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + if err == nil { + return (*AlarmResponse)(resp), nil + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } } - return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -116,7 +101,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req) + resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) if err == nil { return (*AlarmResponse)(resp), nil } @@ -124,12 +109,13 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - remote, cancel, err := m.dial(endpoint) + conn, err := m.c.Dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) + defer conn.Close() + remote := pb.NewMaintenanceClient(conn) + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } @@ -137,12 +123,13 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm } func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - remote, cancel, err := m.dial(endpoint) + conn, err := m.c.Dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}) + defer conn.Close() + remote := pb.NewMaintenanceClient(conn) + resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } @@ -150,7 +137,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) if err != nil { return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD b/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD deleted file mode 100644 index 8c8b12d4cf4..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "kv.go", - "lease.go", - "util.go", - "watch.go", - ], - importpath = "github.com/coreos/etcd/clientv3/namespace", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go deleted file mode 100644 index 3f883320fcc..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package namespace is a clientv3 wrapper that translates all keys to begin -// with a given prefix. -// -// First, create a client: -// -// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) -// if err != nil { -// // handle error! -// } -// -// Next, override the client interfaces: -// -// unprefixedKV := cli.KV -// cli.KV = namespace.NewKV(cli.KV, "my-prefix/") -// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/") -// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/") -// -// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/": -// -// cli.Put(context.TODO(), "abc", "123") -// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc") -// fmt.Printf("%s\n", resp.Kvs[0].Value) -// // Output: 123 -// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456") -// resp, _ = cli.Get("abc") -// fmt.Printf("%s\n", resp.Kvs[0].Value) -// // Output: 456 -// -package namespace diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go deleted file mode 100644 index 2b759e0d394..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "golang.org/x/net/context" -) - -type kvPrefix struct { - clientv3.KV - pfx string -} - -// NewKV wraps a KV instance so that all requests -// are prefixed with a given string. -func NewKV(kv clientv3.KV, prefix string) clientv3.KV { - return &kvPrefix{kv, prefix} -} - -func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { - if len(key) == 0 { - return nil, rpctypes.ErrEmptyKey - } - op := kv.prefixOp(clientv3.OpPut(key, val, opts...)) - r, err := kv.KV.Do(ctx, op) - if err != nil { - return nil, err - } - put := r.Put() - kv.unprefixPutResponse(put) - return put, nil -} - -func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - if len(key) == 0 { - return nil, rpctypes.ErrEmptyKey - } - r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...))) - if err != nil { - return nil, err - } - get := r.Get() - kv.unprefixGetResponse(get) - return get, nil -} - -func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { - if len(key) == 0 { - return nil, rpctypes.ErrEmptyKey - } - r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...))) - if err != nil { - return nil, err - } - del := r.Del() - kv.unprefixDeleteResponse(del) - return del, nil -} - -func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { - if len(op.KeyBytes()) == 0 { - return clientv3.OpResponse{}, rpctypes.ErrEmptyKey - } - r, err := kv.KV.Do(ctx, kv.prefixOp(op)) - if err != nil { - return r, err - } - switch { - case r.Get() != nil: - kv.unprefixGetResponse(r.Get()) - case r.Put() != nil: - kv.unprefixPutResponse(r.Put()) - case r.Del() != nil: - kv.unprefixDeleteResponse(r.Del()) - } - return r, nil -} - -type txnPrefix struct { - clientv3.Txn - kv *kvPrefix -} - -func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { - return &txnPrefix{kv.KV.Txn(ctx), kv} -} - -func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { - newCmps := make([]clientv3.Cmp, len(cs)) - for i := range cs { - newCmps[i] = cs[i] - pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil) - newCmps[i].WithKeyBytes(pfxKey) - } - txn.Txn = txn.Txn.If(newCmps...) - return txn -} - -func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { - newOps := make([]clientv3.Op, len(ops)) - for i := range ops { - newOps[i] = txn.kv.prefixOp(ops[i]) - } - txn.Txn = txn.Txn.Then(newOps...) - return txn -} - -func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { - newOps := make([]clientv3.Op, len(ops)) - for i := range ops { - newOps[i] = txn.kv.prefixOp(ops[i]) - } - txn.Txn = txn.Txn.Else(newOps...) - return txn -} - -func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { - resp, err := txn.Txn.Commit() - if err != nil { - return nil, err - } - txn.kv.unprefixTxnResponse(resp) - return resp, nil -} - -func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { - begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) - op.WithKeyBytes(begin) - op.WithRangeBytes(end) - return op -} - -func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { - for i := range resp.Kvs { - resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) { - if resp.PrevKv != nil { - resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) { - for i := range resp.PrevKvs { - resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { - for _, r := range resp.Responses { - switch tv := r.Response.(type) { - case *pb.ResponseOp_ResponseRange: - if tv.ResponseRange != nil { - kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange)) - } - case *pb.ResponseOp_ResponsePut: - if tv.ResponsePut != nil { - kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut)) - } - case *pb.ResponseOp_ResponseDeleteRange: - if tv.ResponseDeleteRange != nil { - kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) - } - default: - } - } -} - -func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { - return prefixInterval(p.pfx, key, end) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go deleted file mode 100644 index c3167fa5d87..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "bytes" - - "github.com/coreos/etcd/clientv3" - - "golang.org/x/net/context" -) - -type leasePrefix struct { - clientv3.Lease - pfx []byte -} - -// NewLease wraps a Lease interface to filter for only keys with a prefix -// and remove that prefix when fetching attached keys through TimeToLive. -func NewLease(l clientv3.Lease, prefix string) clientv3.Lease { - return &leasePrefix{l, []byte(prefix)} -} - -func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { - resp, err := l.Lease.TimeToLive(ctx, id, opts...) - if err != nil { - return nil, err - } - if len(resp.Keys) > 0 { - var outKeys [][]byte - for i := range resp.Keys { - if len(resp.Keys[i]) < len(l.pfx) { - // too short - continue - } - if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) { - // doesn't match prefix - continue - } - // strip prefix - outKeys = append(outKeys, resp.Keys[i][len(l.pfx):]) - } - resp.Keys = outKeys - } - return resp, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go deleted file mode 100644 index ecf04046c32..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) { - pfxKey = make([]byte, len(pfx)+len(key)) - copy(pfxKey[copy(pfxKey, pfx):], key) - - if len(end) == 1 && end[0] == 0 { - // the edge of the keyspace - pfxEnd = make([]byte, len(pfx)) - copy(pfxEnd, pfx) - ok := false - for i := len(pfxEnd) - 1; i >= 0; i-- { - if pfxEnd[i]++; pfxEnd[i] != 0 { - ok = true - break - } - } - if !ok { - // 0xff..ff => 0x00 - pfxEnd = []byte{0} - } - } else if len(end) >= 1 { - pfxEnd = make([]byte, len(pfx)+len(end)) - copy(pfxEnd[copy(pfxEnd, pfx):], end) - } - - return pfxKey, pfxEnd -} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go deleted file mode 100644 index 9907211529a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "sync" - - "github.com/coreos/etcd/clientv3" - - "golang.org/x/net/context" -) - -type watcherPrefix struct { - clientv3.Watcher - pfx string - - wg sync.WaitGroup - stopc chan struct{} - stopOnce sync.Once -} - -// NewWatcher wraps a Watcher instance so that all Watch requests -// are prefixed with a given string and all Watch responses have -// the prefix removed. -func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { - return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} -} - -func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { - // since OpOption is opaque, determine range for prefixing through an OpGet - op := clientv3.OpGet(key, opts...) - end := op.RangeBytes() - pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end) - if pfxEnd != nil { - opts = append(opts, clientv3.WithRange(string(pfxEnd))) - } - - wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...) - - // translate watch events from prefixed to unprefixed - pfxWch := make(chan clientv3.WatchResponse) - w.wg.Add(1) - go func() { - defer func() { - close(pfxWch) - w.wg.Done() - }() - for wr := range wch { - for i := range wr.Events { - wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):] - if wr.Events[i].PrevKv != nil { - wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key - } - } - select { - case pfxWch <- wr: - case <-ctx.Done(): - return - case <-w.stopc: - return - } - } - }() - return pfxWch -} - -func (w *watcherPrefix) Close() error { - err := w.Watcher.Close() - w.stopOnce.Do(func() { close(w.stopc) }) - w.wg.Wait() - return err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/BUILD b/vendor/github.com/coreos/etcd/clientv3/naming/BUILD deleted file mode 100644 index 9329d763bab..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/naming/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "grpc.go", - ], - importpath = "github.com/coreos/etcd/clientv3/naming", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/naming:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go deleted file mode 100644 index 71608cc738b..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services. -// -// To use, first import the packages: -// -// import ( -// "github.com/coreos/etcd/clientv3" -// etcdnaming "github.com/coreos/etcd/clientv3/naming" -// -// "google.golang.org/grpc" -// "google.golang.org/grpc/naming" -// ) -// -// First, register new endpoint addresses for a service: -// -// func etcdAdd(c *clientv3.Client, service, addr string) error { -// r := &etcdnaming.GRPCResolver{Client: c} -// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}) -// } -// -// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: -// -// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { -// r := &etcdnaming.GRPCResolver{Client: c} -// b := grpc.RoundRobin(r) -// return grpc.Dial(service, grpc.WithBalancer(b)) -// } -// -// Optionally, force delete an endpoint: -// -// func etcdDelete(c *clientv3, service, addr string) error { -// r := &etcdnaming.GRPCResolver{Client: c} -// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"}) -// } -// -// Or register an expiring endpoint with a lease: -// -// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { -// r := &etcdnaming.GRPCResolver{Client: c} -// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid)) -// } -// -package naming diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go deleted file mode 100644 index 7fabc4f109a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package naming - -import ( - "encoding/json" - "fmt" - - etcd "github.com/coreos/etcd/clientv3" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" - - "golang.org/x/net/context" -) - -var ErrWatcherClosed = fmt.Errorf("naming: watch closed") - -// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes. -type GRPCResolver struct { - // Client is an initialized etcd client. - Client *etcd.Client -} - -func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) { - switch nm.Op { - case naming.Add: - var v []byte - if v, err = json.Marshal(nm); err != nil { - return status.Error(codes.InvalidArgument, err.Error()) - } - _, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...) - case naming.Delete: - _, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...) - default: - return status.Error(codes.InvalidArgument, "naming: bad naming op") - } - return err -} - -func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) { - ctx, cancel := context.WithCancel(context.Background()) - w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel} - return w, nil -} - -type gRPCWatcher struct { - c *etcd.Client - target string - ctx context.Context - cancel context.CancelFunc - wch etcd.WatchChan - err error -} - -// Next gets the next set of updates from the etcd resolver. -// Calls to Next should be serialized; concurrent calls are not safe since -// there is no way to reconcile the update ordering. -func (gw *gRPCWatcher) Next() ([]*naming.Update, error) { - if gw.wch == nil { - // first Next() returns all addresses - return gw.firstNext() - } - if gw.err != nil { - return nil, gw.err - } - - // process new events on target/* - wr, ok := <-gw.wch - if !ok { - gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error()) - return nil, gw.err - } - if gw.err = wr.Err(); gw.err != nil { - return nil, gw.err - } - - updates := make([]*naming.Update, 0, len(wr.Events)) - for _, e := range wr.Events { - var jupdate naming.Update - var err error - switch e.Type { - case etcd.EventTypePut: - err = json.Unmarshal(e.Kv.Value, &jupdate) - jupdate.Op = naming.Add - case etcd.EventTypeDelete: - err = json.Unmarshal(e.PrevKv.Value, &jupdate) - jupdate.Op = naming.Delete - } - if err == nil { - updates = append(updates, &jupdate) - } - } - return updates, nil -} - -func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) { - // Use serialized request so resolution still works if the target etcd - // server is partitioned away from the quorum. - resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable()) - if gw.err = err; err != nil { - return nil, err - } - - updates := make([]*naming.Update, 0, len(resp.Kvs)) - for _, kv := range resp.Kvs { - var jupdate naming.Update - if err := json.Unmarshal(kv.Value, &jupdate); err != nil { - continue - } - updates = append(updates, &jupdate) - } - - opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()} - gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...) - return updates, nil -} - -func (gw *gRPCWatcher) Close() { gw.cancel() } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index e18d28662c4..6e260076698 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,7 +23,6 @@ const ( tRange opType = iota + 1 tPut tDeleteRange - tTxn ) var ( @@ -53,10 +52,6 @@ type Op struct { // for watch, put, delete prevKV bool - // for put - ignoreValue bool - ignoreLease bool - // progressNotify is for progress updates. progressNotify bool // createdNotify is for created event @@ -68,69 +63,8 @@ type Op struct { // for put val []byte leaseID LeaseID - - // txn - cmps []Cmp - thenOps []Op - elseOps []Op } -// accessors / mutators - -func (op Op) IsTxn() bool { return op.t == tTxn } -func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } - -// KeyBytes returns the byte slice holding the Op's key. -func (op Op) KeyBytes() []byte { return op.key } - -// WithKeyBytes sets the byte slice for the Op's key. -func (op *Op) WithKeyBytes(key []byte) { op.key = key } - -// RangeBytes returns the byte slice holding with the Op's range end, if any. -func (op Op) RangeBytes() []byte { return op.end } - -// Rev returns the requested revision, if any. -func (op Op) Rev() int64 { return op.rev } - -// IsPut returns true iff the operation is a Put. -func (op Op) IsPut() bool { return op.t == tPut } - -// IsGet returns true iff the operation is a Get. -func (op Op) IsGet() bool { return op.t == tRange } - -// IsDelete returns true iff the operation is a Delete. -func (op Op) IsDelete() bool { return op.t == tDeleteRange } - -// IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } - -// IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } - -// IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } - -// MinModRev returns the operation's minimum modify revision. -func (op Op) MinModRev() int64 { return op.minModRev } - -// MaxModRev returns the operation's maximum modify revision. -func (op Op) MaxModRev() int64 { return op.maxModRev } - -// MinCreateRev returns the operation's minimum create revision. -func (op Op) MinCreateRev() int64 { return op.minCreateRev } - -// MaxCreateRev returns the operation's maximum create revision. -func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } - -// WithRangeBytes sets the byte slice for the Op's range end. -func (op *Op) WithRangeBytes(end []byte) { op.end = end } - -// ValueBytes returns the byte slice holding the Op's value, if any. -func (op Op) ValueBytes() []byte { return op.val } - -// WithValueBytes sets the byte slice for the Op's value. -func (op *Op) WithValueBytes(v []byte) { op.val = v } - func (op Op) toRangeRequest() *pb.RangeRequest { if op.t != tRange { panic("op.t != tRange") @@ -155,28 +89,12 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } -func (op Op) toTxnRequest() *pb.TxnRequest { - thenOps := make([]*pb.RequestOp, len(op.thenOps)) - for i, tOp := range op.thenOps { - thenOps[i] = tOp.toRequestOp() - } - elseOps := make([]*pb.RequestOp, len(op.elseOps)) - for i, eOp := range op.elseOps { - elseOps[i] = eOp.toRequestOp() - } - cmps := make([]*pb.Compare, len(op.cmps)) - for i := range op.cmps { - cmps[i] = (*pb.Compare)(&op.cmps[i]) - } - return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} -} - func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} @@ -187,19 +105,6 @@ func (op Op) toRequestOp() *pb.RequestOp { } func (op Op) isWrite() bool { - if op.t == tTxn { - for _, tOp := range op.thenOps { - if tOp.isWrite() { - return true - } - } - for _, tOp := range op.elseOps { - if tOp.isWrite() { - return true - } - } - return false - } return op.t != tRange } @@ -265,10 +170,6 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } -func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { - return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} -} - func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -306,7 +207,6 @@ func WithLease(leaseID LeaseID) OpOption { } // WithLimit limits the number of results to return from 'Get' request. -// If WithLimit is given a 0 limit, it is treated as no limit. func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } // WithRev specifies the store revision for 'Get' request. @@ -322,9 +222,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption { if target == SortByKey && order == SortAscend { // If order != SortNone, server fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since by default the server returns results sorted by keys - // in lexicographically ascending order, the client should ignore - // SortOrder if the target is SortByKey. + // Since current mvcc.Range implementation returns results + // sorted by keys in lexicographically ascending order, + // client should ignore SortOrder if the target is SortByKey. order = SortNone } op.sort = &SortOption{target, order} @@ -357,10 +257,6 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { - if len(op.key) == 0 { - op.key, op.end = []byte{0}, []byte{0} - return - } op.end = getPrefix(op.key) } } @@ -464,24 +360,6 @@ func WithPrevKV() OpOption { } } -// WithIgnoreValue updates the key using its current value. -// This option can not be combined with non-empty values. -// Returns an error if the key does not exist. -func WithIgnoreValue() OpOption { - return func(op *Op) { - op.ignoreValue = true - } -} - -// WithIgnoreLease updates the key using its current lease. -// This option can not be combined with WithLease. -// Returns an error if the key does not exist. -func WithIgnoreLease() OpOption { - return func(op *Op) { - op.ignoreLease = true - } -} - // LeaseOp represents an Operation that lease can execute. type LeaseOp struct { id LeaseID @@ -499,7 +377,8 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) { } } -// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +// WithAttachedKeys requests lease timetolive API to return +// attached keys of given lease ID. func WithAttachedKeys() LeaseOption { return func(op *LeaseOp) { op.attachedKeys = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go deleted file mode 100644 index 23eea9367ff..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import "golang.org/x/net/context" - -// TODO: remove this when "FailFast=false" is fixed. -// See https://github.com/grpc/grpc-go/issues/1532. -func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { - select { - case <-ready: - return nil - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-clientCtx.Done(): - return clientCtx.Err() - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index c95b2cad7c4..78f31a8c4b0 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -17,183 +17,135 @@ package clientv3 import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type rpcFunc func(ctx context.Context) error -type retryRPCFunc func(context.Context, rpcFunc) error -type retryStopErrFunc func(error) bool +type retryRpcFunc func(context.Context, rpcFunc) error -func isRepeatableStopError(err error) bool { - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { - return true - } - // only retry if unavailable - ev, _ := status.FromError(err) - return ev.Code() != codes.Unavailable -} - -func isNonRepeatableStopError(err error) bool { - ev, _ := status.FromError(err) - if ev.Code() != codes.Unavailable { - return true - } - desc := rpctypes.ErrorDesc(err) - return desc != "there is no address available" && desc != "there is no connection available" -} - -func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { +func (c *Client) newRetryWrapper() retryRpcFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { - if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { - return err - } - pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - if logger.V(4) { - logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) - } - if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { - // mark this before endpoint switch is triggered - c.balancer.hostPortError(pinned, err) - c.balancer.next() - if logger.V(4) { - logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) - } - } - - if isStop(err) { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if _, ok := eErr.(rpctypes.EtcdError); ok { return err } + + // only retry if unavailable + if grpc.Code(err) != codes.Unavailable { + return err + } + + select { + case <-c.balancer.ConnectNotify(): + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-c.ctx.Done(): + return c.ctx.Err() + } } } } -func (c *Client) newAuthRetryWrapper() retryRPCFunc { +func (c *Client) newAuthRetryWrapper() retryRpcFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { - pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - if logger.V(4) { - logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) - } + // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { - if logger.V(4) { - logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) - } return err // return the original error for simplicity } continue } + return err } } } -// RetryKVClient implements a KVClient. +// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. func RetryKVClient(c *Client) pb.KVClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - conn := pb.NewKVClient(c.conn) - retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} - retryAuthWrapper := c.newAuthRetryWrapper() - return &retryKVClient{ - &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, - retryAuthWrapper} + retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} + return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} } type retryKVClient struct { - *nonRepeatableKVClient - repeatableRetry retryRPCFunc + *retryWriteKVClient } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Range(rctx, in, opts...) + err = rkv.retryf(ctx, func(rctx context.Context) error { + resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) return err }) return resp, err } -type nonRepeatableKVClient struct { - kc pb.KVClient - nonRepeatableRetry retryRPCFunc +type retryWriteKVClient struct { + pb.KVClient + retryf retryRpcFunc } -func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Put(rctx, in, opts...) +func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.retryf(ctx, func(rctx context.Context) error { + resp, err = rkv.KVClient.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.DeleteRange(rctx, in, opts...) +func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.retryf(ctx, func(rctx context.Context) error { + resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - // TODO: repeatableRetry if read-only txn - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Txn(rctx, in, opts...) +func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + err = rkv.retryf(ctx, func(rctx context.Context) error { + resp, err = rkv.KVClient.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Compact(rctx, in, opts...) +func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.retryf(ctx, func(rctx context.Context) error { + resp, err = rkv.KVClient.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - lc pb.LeaseClient - repeatableRetry retryRPCFunc + pb.LeaseClient + retryf retryRpcFunc } -// RetryLeaseClient implements a LeaseClient. +// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{ - pb.NewLeaseClient(c.conn), - c.newRetryWrapper(isRepeatableStopError), - } - return &retryLeaseClient{retry, c.newAuthRetryWrapper()} -} - -func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) - return err - }) - return resp, err + retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} + return &retryLeaseClient{retry, c.retryAuthWrapper} } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) + err = rlc.retryf(ctx, func(rctx context.Context) error { + resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -201,270 +153,140 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) + err = rlc.retryf(ctx, func(rctx context.Context) error { + resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } -func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { - stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) - return err - }) - return stream, err -} - type retryClusterClient struct { - *nonRepeatableClusterClient - repeatableRetry retryRPCFunc + pb.ClusterClient + retryf retryRpcFunc } -// RetryClusterClient implements a ClusterClient. +// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. func RetryClusterClient(c *Client) pb.ClusterClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - cc := pb.NewClusterClient(c.conn) - return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} + return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} } -func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberList(rctx, in, opts...) +func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.retryf(ctx, func(rctx context.Context) error { + resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) return err }) return resp, err } -type nonRepeatableClusterClient struct { - cc pb.ClusterClient - nonRepeatableRetry retryRPCFunc -} - -func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberAdd(rctx, in, opts...) +func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.retryf(ctx, func(rctx context.Context) error { + resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) return err }) return resp, err } -func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberRemove(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) - return err - }) - return resp, err -} - -// RetryMaintenanceClient implements a Maintenance. -func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - mc := pb.NewMaintenanceClient(conn) - return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} -} - -type retryMaintenanceClient struct { - *nonRepeatableMaintenanceClient - repeatableRetry retryRPCFunc -} - -func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Alarm(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Status(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Hash(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { - stream, err = rmc.mc.Snapshot(rctx, in, opts...) - return err - }) - return stream, err -} - -type nonRepeatableMaintenanceClient struct { - mc pb.MaintenanceClient - nonRepeatableRetry retryRPCFunc -} - -func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { - err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Defragment(rctx, in, opts...) +func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.retryf(ctx, func(rctx context.Context) error { + resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - *nonRepeatableAuthClient - repeatableRetry retryRPCFunc + pb.AuthClient + retryf retryRpcFunc } -// RetryAuthClient implements a AuthClient. +// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. func RetryAuthClient(c *Client) pb.AuthClient { - repeatableRetry := c.newRetryWrapper(isRepeatableStopError) - nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) - ac := pb.NewAuthClient(c.conn) - return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} + return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} } -func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserList(rctx, in, opts...) +func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGet(rctx, in, opts...) +func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGet(rctx, in, opts...) +func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - err = rac.repeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleList(rctx, in, opts...) +func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) return err }) return resp, err } -type nonRepeatableAuthClient struct { - ac pb.AuthClient - nonRepeatableRetry retryRPCFunc -} - -func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthEnable(rctx, in, opts...) +func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthDisable(rctx, in, opts...) +func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserAdd(rctx, in, opts...) +func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserDelete(rctx, in, opts...) +func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserChangePassword(rctx, in, opts...) +func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGrantRole(rctx, in, opts...) +func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) return err }) return resp, err } -func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleDelete(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { - err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { - resp, err = rac.ac.Authenticate(rctx, in, opts...) +func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.retryf(ctx, func(rctx context.Context) error { + resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index 2661c5942e2..a61decd6406 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -18,13 +18,13 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" + "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. // -// Txn(context.TODO()).If( +// Tx.If( // Compare(Value(k1), ">", v1), // Compare(Version(k1), "=", 2) // ).Then( @@ -49,6 +49,8 @@ type Txn interface { // Commit tries to commit the transaction. Commit() (*TxnResponse, error) + + // TODO: add a Do for shortcut the txn without any condition? } type txn struct { @@ -135,14 +137,30 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() + for { + resp, err := txn.commit() + if err == nil { + return resp, err + } + if isHaltErr(txn.ctx, err) { + return nil, toErr(txn.ctx, err) + } + if txn.isWrite { + return nil, toErr(txn.ctx, err) + } + } +} +func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var resp *pb.TxnResponse - var err error - resp, err = txn.kv.remote.Txn(txn.ctx, r) + var opts []grpc.CallOption + if !txn.isWrite { + opts = []grpc.CallOption{grpc.FailFast(false)} + } + resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) if err != nil { - return nil, toErr(txn.ctx, err) + return nil, err } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 12977aed896..9b083cc9462 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,12 +22,8 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" ) const ( @@ -43,9 +39,10 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. If revisions waiting to be sent over the - // watch are compacted, then the watch will be canceled by the server, the - // client will post a compacted error watch response, and the channel will close. + // through the returned channel. + // If the watch is slow or the required rev is compacted, the watch request + // might be canceled from the server-side and the chan will be closed. + // 'opts' can be: 'WithRev' and/or 'WithPrefix'. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -68,9 +65,6 @@ type WatchResponse struct { Created bool closeErr error - - // cancelReason is a reason of canceling watch - cancelReason string } // IsCreate returns true if the event tells that the key is newly created. @@ -91,9 +85,6 @@ func (wr *WatchResponse) Err() error { case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: - if len(wr.cancelReason) != 0 { - return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) - } return v3rpc.ErrFutureRev } return nil @@ -137,7 +128,7 @@ type watchGrpcStream struct { respc chan *pb.WatchResponse // donec closes to broadcast shutdown donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconnect logic + // errc transmits errors from grpc Recv to the watch stream reconn logic errc chan error // closingc gets the watcherStream of closing watchers closingc chan *watcherStream @@ -216,15 +207,16 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { owner: w, remote: w.remote, ctx: ctx, - ctxKey: streamKeyFromCtx(inctx), + ctxKey: fmt.Sprintf("%v", inctx), cancel: cancel, substreams: make(map[int64]*watcherStream), - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), + + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -255,7 +247,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } ok := false - ctxKey := streamKeyFromCtx(ctx) + ctxKey := fmt.Sprintf("%v", ctx) // find or allocate appropriate grpc watch stream w.mu.Lock() @@ -318,14 +310,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.close(); werr != nil { + if werr := wgs.Close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) close() (err error) { +func (w *watchGrpcStream) Close() (err error) { w.cancel() <-w.donec select { @@ -436,7 +428,7 @@ func (w *watchGrpcStream) run() { initReq: *wreq, id: -1, outc: outc, - // unbuffered so resumes won't cause repeat events + // unbufffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), } @@ -488,7 +480,7 @@ func (w *watchGrpcStream) run() { req := &pb.WatchRequest{RequestUnion: cr} wc.Send(req) } - // watch client failed on Recv; spawn another if possible + // watch client failed to recv; spawn another if possible case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -528,6 +520,10 @@ func (w *watchGrpcStream) nextResume() *watcherStream { // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false + } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -538,11 +534,6 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { CompactRevision: pbresp.CompactRevision, Created: pbresp.Created, Canceled: pbresp.Canceled, - cancelReason: pbresp.CancelReason, - } - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false } select { case ws.recvc <- wr: @@ -734,11 +725,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str ws.closing = true close(ws.outc) ws.outc = nil - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.closingc <- ws - }() + go func() { w.closingc <- ws }() case <-stopc: } }(w.resuming[i]) @@ -750,7 +737,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str return donec } -// joinSubstreams waits for all substream goroutines to complete. +// joinSubstream waits for all substream goroutines to complete func (w *watchGrpcStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec @@ -762,9 +749,7 @@ func (w *watchGrpcStream) joinSubstreams() { } } -// openWatchClient retries opening a watch client until success or halt. -// manually retry in case "ws==nil && err==nil" -// TODO: remove FailFast=false +// openWatchClient retries opening a watchclient until retryConnection fails func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { @@ -785,7 +770,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ StartRevision: wr.rev, @@ -798,10 +783,3 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} } - -func streamKeyFromCtx(ctx context.Context) string { - if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) - } - return "" -} diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go index 270287072d8..322a0987011 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -30,8 +30,7 @@ var ( ) const ( - checkCompactionInterval = 5 * time.Minute - executeCompactionInterval = time.Hour + checkCompactionInterval = 5 * time.Minute ) type Compactable interface { @@ -42,8 +41,6 @@ type RevGetter interface { Rev() int64 } -// Periodic compacts the log by purging revisions older than -// the configured retention time. Compaction happens hourly. type Periodic struct { clock clockwork.Clock periodInHour int @@ -88,12 +85,11 @@ func (t *Periodic) Run() { continue } } - - if clock.Now().Sub(last) < executeCompactionInterval { + if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { continue } - rev, remaining := t.getRev(t.periodInHour) + rev := t.getRev(t.periodInHour) if rev < 0 { continue } @@ -101,7 +97,7 @@ func (t *Periodic) Run() { plog.Noticef("Starting auto-compaction at revision %d", rev) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { - t.revs = remaining + t.revs = make([]int64, 0) last = clock.Now() plog.Noticef("Finished auto-compaction at revision %d", rev) } else { @@ -128,10 +124,10 @@ func (t *Periodic) Resume() { t.paused = false } -func (t *Periodic) getRev(h int) (int64, []int64) { +func (t *Periodic) getRev(h int) int64 { i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) if i < 0 { - return -1, t.revs + return -1 } - return t.revs[i], t.revs[i+1:] + return t.revs[i] } diff --git a/vendor/github.com/coreos/etcd/discovery/BUILD b/vendor/github.com/coreos/etcd/discovery/BUILD index 496402a6761..4a437e67af1 100644 --- a/vendor/github.com/coreos/etcd/discovery/BUILD +++ b/vendor/github.com/coreos/etcd/discovery/BUILD @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["discovery.go"], + srcs = [ + "discovery.go", + "srv.go", + ], importpath = "github.com/coreos/etcd/discovery", visibility = ["//visibility:public"], deps = [ diff --git a/vendor/github.com/coreos/etcd/discovery/srv.go b/vendor/github.com/coreos/etcd/discovery/srv.go new file mode 100644 index 00000000000..c3d20ca9243 --- /dev/null +++ b/vendor/github.com/coreos/etcd/discovery/srv.go @@ -0,0 +1,104 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV + resolveTCPAddr = net.ResolveTCPAddr +) + +// SRVGetCluster gets the cluster information via DNS discovery. +// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) +// Also doesn't do any lookups for the token (though it could) +// Also sees each entry as a separate instance. +func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) + return "", "", err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, err := resolveTCPAddr("tcp", host) + if err != nil { + plog.Warningf("couldn't resolve host %s during SRV discovery", host) + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) + if ok && url.Scheme != scheme { + plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + return nil + } + + failCount := 0 + err := updateNodeMap("etcd-server-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) + failCount++ + } + err = updateNodeMap("etcd-server", "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) + failCount++ + } + if failCount == 2 { + plog.Warningf(srvErr[0]) + plog.Warningf(srvErr[1]) + plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") + return "", "", err + } + return strings.Join(stringParts, ","), defaultToken, nil +} diff --git a/vendor/github.com/coreos/etcd/embed/BUILD b/vendor/github.com/coreos/etcd/embed/BUILD deleted file mode 100644 index 286bdae6892..00000000000 --- a/vendor/github.com/coreos/etcd/embed/BUILD +++ /dev/null @@ -1,60 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "doc.go", - "etcd.go", - "serve.go", - "util.go", - ], - importpath = "github.com/coreos/etcd/embed", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/cockroachdb/cmux:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/cors:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/debugutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", - "//vendor/github.com/coreos/etcd/wal:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/trace:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/keepalive:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go deleted file mode 100644 index 90efb3937d7..00000000000 --- a/vendor/github.com/coreos/etcd/embed/config.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/pkg/cors" - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/srv" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - - "github.com/ghodss/yaml" - "google.golang.org/grpc" -) - -const ( - ClusterStateFlagNew = "new" - ClusterStateFlagExisting = "existing" - - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 - DefaultMaxRequestBytes = 1.5 * 1024 * 1024 - DefaultGRPCKeepAliveMinTime = 5 * time.Second - DefaultGRPCKeepAliveInterval = 2 * time.Hour - DefaultGRPCKeepAliveTimeout = 20 * time.Second - - DefaultListenPeerURLs = "http://localhost:2380" - DefaultListenClientURLs = "http://localhost:2379" - - // maxElectionMs specifies the maximum value of election timeout. - // More details are listed in ../Documentation/tuning.md#time-parameters. - maxElectionMs = 50000 -) - -var ( - ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + - "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") - ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") - - DefaultInitialAdvertisePeerURLs = "http://localhost:2380" - DefaultAdvertiseClientURLs = "http://localhost:2379" - - defaultHostname string - defaultHostStatus error -) - -func init() { - defaultHostname, defaultHostStatus = netutil.GetDefaultHost() -} - -// Config holds the arguments for configuring an etcd server. -type Config struct { - // member - - CorsInfo *cors.CORSInfo - LPUrls, LCUrls []url.URL - Dir string `json:"data-dir"` - WalDir string `json:"wal-dir"` - MaxSnapFiles uint `json:"max-snapshots"` - MaxWalFiles uint `json:"max-wals"` - Name string `json:"name"` - SnapCount uint64 `json:"snapshot-count"` - AutoCompactionRetention int `json:"auto-compaction-retention"` - - // TickMs is the number of milliseconds between heartbeat ticks. - // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). - // make ticks a cluster wide configuration. - TickMs uint `json:"heartbeat-interval"` - ElectionMs uint `json:"election-timeout"` - QuotaBackendBytes int64 `json:"quota-backend-bytes"` - MaxRequestBytes uint `json:"max-request-bytes"` - - // gRPC server options - - // GRPCKeepAliveMinTime is the minimum interval that a client should - // wait before pinging server. When client pings "too fast", server - // sends goaway and closes the connection (errors: too_many_pings, - // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. - // Server expects client pings only when there is any active streams - // (PermitWithoutStream is set false). - GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` - // GRPCKeepAliveInterval is the frequency of server-to-client ping - // to check if a connection is alive. Close a non-responsive connection - // after an additional duration of Timeout. 0 to disable. - GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` - // GRPCKeepAliveTimeout is the additional duration of wait - // before closing a non-responsive connection. 0 to disable. - GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` - - // clustering - - APUrls, ACUrls []url.URL - ClusterState string `json:"initial-cluster-state"` - DNSCluster string `json:"discovery-srv"` - Dproxy string `json:"discovery-proxy"` - Durl string `json:"discovery"` - InitialCluster string `json:"initial-cluster"` - InitialClusterToken string `json:"initial-cluster-token"` - StrictReconfigCheck bool `json:"strict-reconfig-check"` - EnableV2 bool `json:"enable-v2"` - - // security - - ClientTLSInfo transport.TLSInfo - ClientAutoTLS bool - PeerTLSInfo transport.TLSInfo - PeerAutoTLS bool - - // debug - - Debug bool `json:"debug"` - LogPkgLevels string `json:"log-package-levels"` - EnablePprof bool `json:"enable-pprof"` - Metrics string `json:"metrics"` - - // ForceNewCluster starts a new cluster even if previously started; unsafe. - ForceNewCluster bool `json:"force-new-cluster"` - - // UserHandlers is for registering users handlers and only used for - // embedding etcd into other applications. - // The map key is the route path for the handler, and - // you must ensure it can't be conflicted with etcd's. - UserHandlers map[string]http.Handler `json:"-"` - // ServiceRegister is for registering users' gRPC services. A simple usage example: - // cfg := embed.NewConfig() - // cfg.ServerRegister = func(s *grpc.Server) { - // pb.RegisterFooServer(s, &fooServer{}) - // pb.RegisterBarServer(s, &barServer{}) - // } - // embed.StartEtcd(cfg) - ServiceRegister func(*grpc.Server) `json:"-"` - - // auth - - AuthToken string `json:"auth-token"` -} - -// configYAML holds the config suitable for yaml parsing -type configYAML struct { - Config - configJSON -} - -// configJSON has file options that are translated into Config options -type configJSON struct { - LPUrlsJSON string `json:"listen-peer-urls"` - LCUrlsJSON string `json:"listen-client-urls"` - CorsJSON string `json:"cors"` - APUrlsJSON string `json:"initial-advertise-peer-urls"` - ACUrlsJSON string `json:"advertise-client-urls"` - ClientSecurityJSON securityConfig `json:"client-transport-security"` - PeerSecurityJSON securityConfig `json:"peer-transport-security"` -} - -type securityConfig struct { - CAFile string `json:"ca-file"` - CertFile string `json:"cert-file"` - KeyFile string `json:"key-file"` - CertAuth bool `json:"client-cert-auth"` - TrustedCAFile string `json:"trusted-ca-file"` - AutoTLS bool `json:"auto-tls"` -} - -// NewConfig creates a new Config populated with default values. -func NewConfig() *Config { - lpurl, _ := url.Parse(DefaultListenPeerURLs) - apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) - lcurl, _ := url.Parse(DefaultListenClientURLs) - acurl, _ := url.Parse(DefaultAdvertiseClientURLs) - cfg := &Config{ - CorsInfo: &cors.CORSInfo{}, - MaxSnapFiles: DefaultMaxSnapshots, - MaxWalFiles: DefaultMaxWALs, - Name: DefaultName, - SnapCount: etcdserver.DefaultSnapCount, - MaxRequestBytes: DefaultMaxRequestBytes, - GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, - GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, - GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, - TickMs: 100, - ElectionMs: 1000, - LPUrls: []url.URL{*lpurl}, - LCUrls: []url.URL{*lcurl}, - APUrls: []url.URL{*apurl}, - ACUrls: []url.URL{*acurl}, - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", - StrictReconfigCheck: true, - Metrics: "basic", - EnableV2: true, - AuthToken: "simple", - } - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - return cfg -} - -func ConfigFromFile(path string) (*Config, error) { - cfg := &configYAML{Config: *NewConfig()} - if err := cfg.configFromFile(path); err != nil { - return nil, err - } - return &cfg.Config, nil -} - -func (cfg *configYAML) configFromFile(path string) error { - b, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - defaultInitialCluster := cfg.InitialCluster - - err = yaml.Unmarshal(b, cfg) - if err != nil { - return err - } - - if cfg.LPUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) - if err != nil { - plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err) - } - cfg.LPUrls = []url.URL(u) - } - - if cfg.LCUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) - if err != nil { - plog.Fatalf("unexpected error setting up listen-client-urls: %v", err) - } - cfg.LCUrls = []url.URL(u) - } - - if cfg.CorsJSON != "" { - if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil { - plog.Panicf("unexpected error setting up cors: %v", err) - } - } - - if cfg.APUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) - if err != nil { - plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err) - } - cfg.APUrls = []url.URL(u) - } - - if cfg.ACUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) - if err != nil { - plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err) - } - cfg.ACUrls = []url.URL(u) - } - - // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName - if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = "" - } - if cfg.ClusterState == "" { - cfg.ClusterState = ClusterStateFlagNew - } - - copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { - tls.CAFile = ysc.CAFile - tls.CertFile = ysc.CertFile - tls.KeyFile = ysc.KeyFile - tls.ClientCertAuth = ysc.CertAuth - tls.TrustedCAFile = ysc.TrustedCAFile - } - copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON) - copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) - cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS - cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS - - return cfg.Validate() -} - -func (cfg *Config) Validate() error { - if err := checkBindURLs(cfg.LPUrls); err != nil { - return err - } - if err := checkBindURLs(cfg.LCUrls); err != nil { - return err - } - - // Check if conflicting flags are passed. - nSet := 0 - for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { - if v { - nSet++ - } - } - - if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting { - return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState) - } - - if nSet > 1 { - return ErrConflictBootstrapFlags - } - - if 5*cfg.TickMs > cfg.ElectionMs { - return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs) - } - if cfg.ElectionMs > maxElectionMs { - return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs) - } - - // check this last since proxying in etcdmain may make this OK - if cfg.LCUrls != nil && cfg.ACUrls == nil { - return ErrUnsetAdvertiseClientURLsFlag - } - - return nil -} - -// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. -func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { - token = cfg.InitialClusterToken - switch { - case cfg.Durl != "": - urlsmap = types.URLsMap{} - // If using discovery, generate a temporary cluster based on - // self's advertised peer URLs - urlsmap[cfg.Name] = cfg.APUrls - token = cfg.Durl - case cfg.DNSCluster != "": - clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) - if cerr != nil { - plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) - return nil, "", cerr - } - for _, s := range clusterStrs { - plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) - } - clusterStr := strings.Join(clusterStrs, ",") - if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { - cfg.PeerTLSInfo.ServerName = cfg.DNSCluster - } - urlsmap, err = types.NewURLsMap(clusterStr) - // only etcd member must belong to the discovered cluster. - // proxy does not need to belong to the discovered cluster. - if which == "etcd" { - if _, ok := urlsmap[cfg.Name]; !ok { - return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) - } - } - default: - // We're statically configured, and cluster has appropriately been set. - urlsmap, err = types.NewURLsMap(cfg.InitialCluster) - } - return urlsmap, token, err -} - -func (cfg Config) InitialClusterFromName(name string) (ret string) { - if len(cfg.APUrls) == 0 { - return "" - } - n := name - if name == "" { - n = DefaultName - } - for i := range cfg.APUrls { - ret = ret + "," + n + "=" + cfg.APUrls[i].String() - } - return ret[1:] -} - -func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } -func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } - -func (cfg Config) defaultPeerHost() bool { - return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs -} - -func (cfg Config) defaultClientHost() bool { - return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs -} - -// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host, -// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. -// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380 -// then the advertise peer host would be updated with machine's default host, -// while keeping the listen URL's port. -// User can work around this by explicitly setting URL with 127.0.0.1. -// It returns the default hostname, if used, and the error, if any, from getting the machine's default host. -// TODO: check whether fields are set instead of whether fields have default value -func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) { - if defaultHostname == "" || defaultHostStatus != nil { - // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') - if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - } - return "", defaultHostStatus - } - - used := false - pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() - if cfg.defaultPeerHost() && pip == "0.0.0.0" { - cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} - used = true - } - // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') - if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - } - - cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() - if cfg.defaultClientHost() && cip == "0.0.0.0" { - cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} - used = true - } - dhost := defaultHostname - if !used { - dhost = "" - } - return dhost, defaultHostStatus -} - -// checkBindURLs returns an error if any URL uses a domain name. -// TODO: return error in 3.2.0 -func checkBindURLs(urls []url.URL) error { - for _, url := range urls { - if url.Scheme == "unix" || url.Scheme == "unixs" { - continue - } - host, _, err := net.SplitHostPort(url.Host) - if err != nil { - return err - } - if host == "localhost" { - // special case for local address - // TODO: support /etc/hosts ? - continue - } - if net.ParseIP(host) == nil { - return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) - } - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/github.com/coreos/etcd/embed/doc.go deleted file mode 100644 index c555aa58eba..00000000000 --- a/vendor/github.com/coreos/etcd/embed/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package embed provides bindings for embedding an etcd server in a program. - -Launch an embedded etcd server using the configuration defaults: - - import ( - "log" - "time" - - "github.com/coreos/etcd/embed" - ) - - func main() { - cfg := embed.NewConfig() - cfg.Dir = "default.etcd" - e, err := embed.StartEtcd(cfg) - if err != nil { - log.Fatal(err) - } - defer e.Close() - select { - case <-e.Server.ReadyNotify(): - log.Printf("Server is ready!") - case <-time.After(60 * time.Second): - e.Server.Stop() // trigger a shutdown - log.Printf("Server took too long to start!") - } - log.Fatal(<-e.Err()) - } -*/ -package embed diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go deleted file mode 100644 index 6d92f11ea66..00000000000 --- a/vendor/github.com/coreos/etcd/embed/etcd.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "crypto/tls" - "fmt" - "io/ioutil" - defaultLog "log" - "net" - "net/http" - "path/filepath" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" - "github.com/coreos/etcd/etcdserver/api/v2http" - "github.com/coreos/etcd/pkg/cors" - "github.com/coreos/etcd/pkg/debugutil" - runtimeutil "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/pkg/capnslog" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") - -const ( - // internal fd usage includes disk usage and transport usage. - // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs - // at most 2 to read/lock/write WALs. One case that it needs to 2 is to - // read all logs after some snapshot index, which locates at the end of - // the second last and the head of the last. For purging, it needs to read - // directory, so it needs 1. For fd monitor, it needs 1. - // For transport, rafthttp builds two long-polling connections and at most - // four temporary connections with each member. There are at most 9 members - // in a cluster, so it should reserve 96. - // For the safety, we set the total reserved number to 150. - reservedInternalFDNum = 150 -) - -// Etcd contains a running etcd server and its listeners. -type Etcd struct { - Peers []*peerListener - Clients []net.Listener - Server *etcdserver.EtcdServer - - cfg Config - stopc chan struct{} - errc chan error - sctxs map[string]*serveCtx - - closeOnce sync.Once -} - -type peerListener struct { - net.Listener - serve func() error - close func(context.Context) error -} - -// StartEtcd launches the etcd server and HTTP handlers for client/server communication. -// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait -// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. -func StartEtcd(inCfg *Config) (e *Etcd, err error) { - if err = inCfg.Validate(); err != nil { - return nil, err - } - serving := false - e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} - cfg := &e.cfg - defer func() { - if e == nil || err == nil { - return - } - if !serving { - // errored before starting gRPC server for serveCtx.grpcServerC - for _, sctx := range e.sctxs { - close(sctx.grpcServerC) - } - } - e.Close() - e = nil - }() - - if e.Peers, err = startPeerListeners(cfg); err != nil { - return - } - if e.sctxs, err = startClientListeners(cfg); err != nil { - return - } - for _, sctx := range e.sctxs { - e.Clients = append(e.Clients, sctx.l) - } - - var ( - urlsmap types.URLsMap - token string - ) - - if !isMemberInitialized(cfg) { - urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") - if err != nil { - return e, fmt.Errorf("error setting up initial cluster: %v", err) - } - } - - srvcfg := &etcdserver.ServerConfig{ - Name: cfg.Name, - ClientURLs: cfg.ACUrls, - PeerURLs: cfg.APUrls, - DataDir: cfg.Dir, - DedicatedWALDir: cfg.WalDir, - SnapCount: cfg.SnapCount, - MaxSnapFiles: cfg.MaxSnapFiles, - MaxWALFiles: cfg.MaxWalFiles, - InitialPeerURLsMap: urlsmap, - InitialClusterToken: token, - DiscoveryURL: cfg.Durl, - DiscoveryProxy: cfg.Dproxy, - NewCluster: cfg.IsNewCluster(), - ForceNewCluster: cfg.ForceNewCluster, - PeerTLSInfo: cfg.PeerTLSInfo, - TickMs: cfg.TickMs, - ElectionTicks: cfg.ElectionTicks(), - AutoCompactionRetention: cfg.AutoCompactionRetention, - QuotaBackendBytes: cfg.QuotaBackendBytes, - MaxRequestBytes: cfg.MaxRequestBytes, - StrictReconfigCheck: cfg.StrictReconfigCheck, - ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, - AuthToken: cfg.AuthToken, - } - - if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { - return - } - - // configure peer handlers after rafthttp.Transport started - ph := etcdhttp.NewPeerHandler(e.Server) - for _, p := range e.Peers { - srv := &http.Server{ - Handler: ph, - ReadTimeout: 5 * time.Minute, - ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error - } - - l := p.Listener - p.serve = func() error { return srv.Serve(l) } - p.close = func(ctx context.Context) error { - // gracefully shutdown http.Server - // close open listeners, idle connections - // until context cancel or time-out - return srv.Shutdown(ctx) - } - } - - // buffer channel so goroutines on closed connections won't wait forever - e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) - - e.Server.Start() - if err = e.serve(); err != nil { - return - } - serving = true - return -} - -// Config returns the current configuration. -func (e *Etcd) Config() Config { - return e.cfg -} - -func (e *Etcd) Close() { - e.closeOnce.Do(func() { close(e.stopc) }) - - timeout := 2 * time.Second - if e.Server != nil { - timeout = e.Server.Cfg.ReqTimeout() - } - for _, sctx := range e.sctxs { - for gs := range sctx.grpcServerC { - ch := make(chan struct{}) - go func() { - defer close(ch) - // close listeners to stop accepting new connections, - // will block on any existing transports - gs.GracefulStop() - }() - // wait until all pending RPCs are finished - select { - case <-ch: - case <-time.After(timeout): - // took too long, manually close open transports - // e.g. watch streams - gs.Stop() - // concurrent GracefulStop should be interrupted - <-ch - } - } - } - - for _, sctx := range e.sctxs { - sctx.cancel() - } - for i := range e.Clients { - if e.Clients[i] != nil { - e.Clients[i].Close() - } - } - - // close rafthttp transports - if e.Server != nil { - e.Server.Stop() - } - - // close all idle connections in peer handler (wait up to 1-second) - for i := range e.Peers { - if e.Peers[i] != nil && e.Peers[i].close != nil { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - e.Peers[i].close(ctx) - cancel() - } - } -} - -func (e *Etcd) Err() <-chan error { return e.errc } - -func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { - if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { - phosts := make([]string, len(cfg.LPUrls)) - for i, u := range cfg.LPUrls { - phosts[i] = u.Host - } - cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) - if err != nil { - plog.Fatalf("could not get certs (%v)", err) - } - } else if cfg.PeerAutoTLS { - plog.Warningf("ignoring peer auto TLS since certs given") - } - - if !cfg.PeerTLSInfo.Empty() { - plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) - } - - peers = make([]*peerListener, len(cfg.LPUrls)) - defer func() { - if err == nil { - return - } - for i := range peers { - if peers[i] != nil && peers[i].close != nil { - plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) - peers[i].close(context.Background()) - } - } - }() - - for i, u := range cfg.LPUrls { - if u.Scheme == "http" { - if !cfg.PeerTLSInfo.Empty() { - plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) - } - if cfg.PeerTLSInfo.ClientCertAuth { - plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) - } - } - peers[i] = &peerListener{close: func(context.Context) error { return nil }} - peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) - if err != nil { - return nil, err - } - // once serve, overwrite with 'http.Server.Shutdown' - peers[i].close = func(context.Context) error { - return peers[i].Listener.Close() - } - plog.Info("listening for peers on ", u.String()) - } - return peers, nil -} - -func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { - if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { - chosts := make([]string, len(cfg.LCUrls)) - for i, u := range cfg.LCUrls { - chosts[i] = u.Host - } - cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) - if err != nil { - plog.Fatalf("could not get certs (%v)", err) - } - } else if cfg.ClientAutoTLS { - plog.Warningf("ignoring client auto TLS since certs given") - } - - if cfg.EnablePprof { - plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) - } - - sctxs = make(map[string]*serveCtx) - for _, u := range cfg.LCUrls { - sctx := newServeCtx() - - if u.Scheme == "http" || u.Scheme == "unix" { - if !cfg.ClientTLSInfo.Empty() { - plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) - } - if cfg.ClientTLSInfo.ClientCertAuth { - plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) - } - } - if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { - return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) - } - - proto := "tcp" - addr := u.Host - if u.Scheme == "unix" || u.Scheme == "unixs" { - proto = "unix" - addr = u.Host + u.Path - } - - sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" - sctx.insecure = !sctx.secure - if oldctx := sctxs[addr]; oldctx != nil { - oldctx.secure = oldctx.secure || sctx.secure - oldctx.insecure = oldctx.insecure || sctx.insecure - continue - } - - if sctx.l, err = net.Listen(proto, addr); err != nil { - return nil, err - } - // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking - // hosts that disable ipv6. So, use the address given by the user. - sctx.addr = addr - - if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { - if fdLimit <= reservedInternalFDNum { - plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) - } - sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) - } - - if proto == "tcp" { - if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil { - return nil, err - } - } - - plog.Info("listening for client requests on ", u.Host) - defer func() { - if err != nil { - sctx.l.Close() - plog.Info("stopping listening for client requests on ", u.Host) - } - }() - for k := range cfg.UserHandlers { - sctx.userHandlers[k] = cfg.UserHandlers[k] - } - sctx.serviceRegister = cfg.ServiceRegister - if cfg.EnablePprof || cfg.Debug { - sctx.registerPprof() - } - if cfg.Debug { - sctx.registerTrace() - } - sctxs[addr] = sctx - } - return sctxs, nil -} - -func (e *Etcd) serve() (err error) { - var ctlscfg *tls.Config - if !e.cfg.ClientTLSInfo.Empty() { - plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) - if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil { - return err - } - } - - if e.cfg.CorsInfo.String() != "" { - plog.Infof("cors = %s", e.cfg.CorsInfo) - } - - // Start the peer server in a goroutine - for _, pl := range e.Peers { - go func(l *peerListener) { - e.errHandler(l.serve()) - }(pl) - } - - // Start a client server goroutine for each listen address - var h http.Handler - if e.Config().EnableV2 { - h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) - } else { - mux := http.NewServeMux() - etcdhttp.HandleBasic(mux, e.Server) - h = mux - } - h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) - - gopts := []grpc.ServerOption{} - if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: e.cfg.GRPCKeepAliveMinTime, - PermitWithoutStream: false, - })) - } - if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && - e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: e.cfg.GRPCKeepAliveInterval, - Timeout: e.cfg.GRPCKeepAliveTimeout, - })) - } - for _, sctx := range e.sctxs { - go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...)) - }(sctx) - } - return nil -} - -func (e *Etcd) errHandler(err error) { - select { - case <-e.stopc: - return - default: - } - select { - case <-e.stopc: - case e.errc <- err: - } -} diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go deleted file mode 100644 index 3627f88a958..00000000000 --- a/vendor/github.com/coreos/etcd/embed/serve.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "crypto/tls" - "io/ioutil" - defaultLog "log" - "net" - "net/http" - "strings" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3client" - "github.com/coreos/etcd/etcdserver/api/v3election" - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" - "github.com/coreos/etcd/etcdserver/api/v3lock" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" - v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" - "github.com/coreos/etcd/pkg/debugutil" - - "github.com/cockroachdb/cmux" - gw "github.com/grpc-ecosystem/grpc-gateway/runtime" - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -type serveCtx struct { - l net.Listener - addr string - secure bool - insecure bool - - ctx context.Context - cancel context.CancelFunc - - userHandlers map[string]http.Handler - serviceRegister func(*grpc.Server) - grpcServerC chan *grpc.Server -} - -func newServeCtx() *serveCtx { - ctx, cancel := context.WithCancel(context.Background()) - return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler), - grpcServerC: make(chan *grpc.Server, 2), // in case sctx.insecure,sctx.secure true - } -} - -// serve accepts incoming connections on the listener l, -// creating a new service goroutine for each. The service goroutines -// read requests and then call handler to reply to them. -func (sctx *serveCtx) serve( - s *etcdserver.EtcdServer, - tlscfg *tls.Config, - handler http.Handler, - errHandler func(error), - gopts ...grpc.ServerOption) error { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) - <-s.ReadyNotify() - plog.Info("ready to serve client requests") - - m := cmux.New(sctx.l) - v3c := v3client.New(s) - servElection := v3election.NewElectionServer(v3c) - servLock := v3lock.NewLockServer(v3c) - - if sctx.insecure { - gs := v3rpc.Server(s, nil, gopts...) - sctx.grpcServerC <- gs - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - grpcl := m.Match(cmux.HTTP2()) - go func() { errHandler(gs.Serve(grpcl)) }() - - opts := []grpc.DialOption{ - grpc.WithInsecure(), - } - gwmux, err := sctx.registerGateway(opts) - if err != nil { - return err - } - - httpmux := sctx.createMux(gwmux, handler) - - srvhttp := &http.Server{ - Handler: httpmux, - ErrorLog: logger, // do not log user error - } - httpl := m.Match(cmux.HTTP1()) - go func() { errHandler(srvhttp.Serve(httpl)) }() - plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) - } - - if sctx.secure { - gs := v3rpc.Server(s, tlscfg, gopts...) - sctx.grpcServerC <- gs - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - handler = grpcHandlerFunc(gs, handler) - - dtls := tlscfg.Clone() - // trust local server - dtls.InsecureSkipVerify = true - creds := credentials.NewTLS(dtls) - opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} - gwmux, err := sctx.registerGateway(opts) - if err != nil { - return err - } - - tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) - // TODO: add debug flag; enable logging when debug flag is set - httpmux := sctx.createMux(gwmux, handler) - - srv := &http.Server{ - Handler: httpmux, - TLSConfig: tlscfg, - ErrorLog: logger, // do not log user error - } - go func() { errHandler(srv.Serve(tlsl)) }() - - plog.Infof("serving client requests on %s", sctx.l.Addr().String()) - } - - close(sctx.grpcServerC) - return m.Serve() -} - -// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC -// connections or otherHandler otherwise. Copied from cockroachdb. -func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { - if otherHandler == nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - grpcServer.ServeHTTP(w, r) - }) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - grpcServer.ServeHTTP(w, r) - } else { - otherHandler.ServeHTTP(w, r) - } - }) -} - -type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error - -func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { - ctx := sctx.ctx - conn, err := grpc.DialContext(ctx, sctx.addr, opts...) - if err != nil { - return nil, err - } - gwmux := gw.NewServeMux() - - handlers := []registerHandlerFunc{ - etcdservergw.RegisterKVHandler, - etcdservergw.RegisterWatchHandler, - etcdservergw.RegisterLeaseHandler, - etcdservergw.RegisterClusterHandler, - etcdservergw.RegisterMaintenanceHandler, - etcdservergw.RegisterAuthHandler, - v3lockgw.RegisterLockHandler, - v3electiongw.RegisterElectionHandler, - } - for _, h := range handlers { - if err := h(ctx, gwmux, conn); err != nil { - return nil, err - } - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) - } - }() - - return gwmux, nil -} - -func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { - httpmux := http.NewServeMux() - for path, h := range sctx.userHandlers { - httpmux.Handle(path, h) - } - - httpmux.Handle("/v3alpha/", gwmux) - if handler != nil { - httpmux.Handle("/", handler) - } - return httpmux -} - -func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { - if sctx.userHandlers[s] != nil { - plog.Warningf("path %s already registered by user handler", s) - return - } - sctx.userHandlers[s] = h -} - -func (sctx *serveCtx) registerPprof() { - for p, h := range debugutil.PProfHandlers() { - sctx.registerUserHandler(p, h) - } -} - -func (sctx *serveCtx) registerTrace() { - reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } - sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) - evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } - sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) -} diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/github.com/coreos/etcd/embed/util.go deleted file mode 100644 index 168e031389d..00000000000 --- a/vendor/github.com/coreos/etcd/embed/util.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "path/filepath" - - "github.com/coreos/etcd/wal" -) - -func isMemberInitialized(cfg *Config) bool { - waldir := cfg.WalDir - if waldir == "" { - waldir = filepath.Join(cfg.Dir, "member", "wal") - } - - return wal.Exist(waldir) -} diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go index b541a628b87..8cf83cc716a 100644 --- a/vendor/github.com/coreos/etcd/error/error.go +++ b/vendor/github.com/coreos/etcd/error/error.go @@ -154,10 +154,9 @@ func (e Error) StatusCode() int { return status } -func (e Error) WriteTo(w http.ResponseWriter) error { +func (e Error) WriteTo(w http.ResponseWriter) { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - _, err := w.Write([]byte(e.toJsonString() + "\n")) - return err + fmt.Fprintln(w, e.toJsonString()) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/BUILD b/vendor/github.com/coreos/etcd/etcdserver/BUILD index ebbd59bb139..e05450a1bee 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/BUILD @@ -6,7 +6,6 @@ go_library( "apply.go", "apply_auth.go", "apply_v2.go", - "backend.go", "cluster_util.go", "config.go", "consistent_index.go", @@ -41,6 +40,7 @@ go_library( "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/contention:go_default_library", "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD index 5913cf0d91a..ab95a4f3277 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD @@ -29,11 +29,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:all-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:all-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:all-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go index 5e2de58e9a1..ab8cee7cf89 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -33,10 +33,11 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") // capabilityMaps is a static map of version to capability map. + // the base capabilities is the set of capability 2.0 supports. capabilityMaps = map[string]map[Capability]bool{ + "2.3.0": {AuthCapability: true}, "3.0.0": {AuthCapability: true, V3rpcCapability: true}, "3.1.0": {AuthCapability: true, V3rpcCapability: true}, - "3.2.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -47,10 +48,7 @@ var ( ) func init() { - enabledMap = map[Capability]bool{ - AuthCapability: true, - V3rpcCapability: true, - } + enabledMap = make(map[Capability]bool) } // UpdateCapability updates the enabledMap when the cluster version increases. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD deleted file mode 100644 index 323b6e08dc0..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "peer.go", - ], - importpath = "github.com/coreos/etcd/etcdserver/api/etcdhttp", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/error:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go deleted file mode 100644 index 283b32dbf95..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "expvar" - "fmt" - "net/http" - "strings" - "time" - - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") - mlog = logutil.NewMergeLogger(plog) -) - -const ( - configPath = "/config" - metricsPath = "/metrics" - healthPath = "/health" - varsPath = "/debug/vars" - versionPath = "/version" -) - -// HandleBasic adds handlers to a mux for serving JSON etcd client requests -// that do not access the v2 store. -func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) { - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { - if m == r.Method { - return true - } - w.Header().Set("Allow", m) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -// WriteError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError -func WriteError(w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD index 680ea8c0f36..ab856d75cac 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD @@ -9,6 +9,7 @@ go_library( "doc.go", "http.go", "metrics.go", + "peer.go", ], importpath = "github.com/coreos/etcd/etcdserver/api/v2http", visibility = ["//visibility:public"], @@ -16,15 +17,18 @@ go_library( "//vendor/github.com/coreos/etcd/error:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", + "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", "//vendor/github.com/coreos/etcd/store:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/jonboulle/clockwork:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go index aa1e71ec329..038f5417e67 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go @@ -17,6 +17,7 @@ package v2http import ( "encoding/json" "errors" + "expvar" "fmt" "io/ioutil" "net/http" @@ -29,36 +30,38 @@ import ( etcdErr "github.com/coreos/etcd/error" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" "github.com/coreos/etcd/store" + "github.com/coreos/etcd/version" + "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - machinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + deprecatedMachinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" + varsPath = "/debug/vars" + metricsPath = "/metrics" + healthPath = "/health" + versionPath = "/version" + configPath = "/config" ) // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - mux := http.NewServeMux() - etcdhttp.HandleBasic(mux, server) - handleV2(mux, server, timeout) - return requestLogger(mux) -} - -func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) { sec := auth.NewStore(server, timeout) + kh := &keysHandler{ sec: sec, server: server, @@ -81,23 +84,34 @@ func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Du clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - mah := &machinesHandler{cluster: server.Cluster()} + dmh := &deprecatedMachinesHandler{ + cluster: server.Cluster(), + } sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } + + mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) + mux.Handle(healthPath, healthHandler(server)) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(configPath+"/local/log", logHandleFunc) + mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) - mux.Handle(machinesPrefix, mah) + mux.Handle(deprecatedMachinesPrefix, dmh) handleAuth(mux, sech) + + return requestLogger(mux) } type keysHandler struct { @@ -156,11 +170,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -type machinesHandler struct { +type deprecatedMachinesHandler struct { cluster api.Cluster } -func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "HEAD") { return } @@ -220,7 +234,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } now := h.clock.Now() m := membership.NewMember("", req.PeerURLs, "", &now) - _, err := h.server.AddMember(ctx, *m) + err := h.server.AddMember(ctx, *m) switch { case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -241,7 +255,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ok { return } - _, err := h.server.RemoveMember(ctx, uint64(id)) + err := h.server.RemoveMember(ctx, uint64(id)) switch { case err == membership.ErrIDRemoved: writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) @@ -266,7 +280,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ID: id, RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, } - _, err := h.server.UpdateMember(ctx, m) + err := h.server.UpdateMember(ctx, m) switch { case err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -307,13 +321,103 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { } stats := h.stats.LeaderStats() if stats == nil { - etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) return } w.Header().Set("Content-Type", "application/json") w.Write(stats) } +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + if uint64(server.Leader()) == raft.None { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"health": "true"}`)) + } +} + +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r.Method, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + plog.Panicf("cannot marshal versions to json (%v)", err) + } + w.Write(b) +} + +func logHandleFunc(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "PUT") { + return + } + + in := struct{ Level string }{} + + d := json.NewDecoder(r.Body) + if err := d.Decode(&in); err != nil { + writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + return + } + + logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) + if err != nil { + writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + return + } + + plog.Noticef("globalLogLevel set to %q", logl.String()) + capnslog.SetGlobalLogLevel(logl) + w.WriteHeader(http.StatusNoContent) +} + // parseKeyRequest converts a received http.Request on keysPrefix to // a server Request, performing validation of supplied fields as appropriate. // If any validation fails, an empty Request and non-nil error is returned. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go index 589c172dbbb..62c99e19d4a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go @@ -20,11 +20,12 @@ import ( "strings" "time" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/pkg/capnslog" ) @@ -38,18 +39,37 @@ var ( mlog = logutil.NewMergeLogger(plog) ) +// writeError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError func writeError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - if e, ok := err.(auth.Error); ok { + switch e := err.(type) { + case *etcdErr.Error: + e.WriteTo(w) + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + case auth.Error: herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } - return + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: + mlog.MergeError(err) + default: + mlog.MergeErrorf("got unexpected response error (%v)", err) + } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } } - etcdhttp.WriteError(w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go rename to vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go index 721bae3c600..a1abadba8e7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdhttp +package v2http import ( "encoding/json" @@ -61,7 +61,7 @@ type peerMembersHandler struct { } func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { + if !allowMethod(w, r.Method, "GET") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD deleted file mode 100644 index 5fc2b9dc11c..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "v3client.go", - ], - importpath = "github.com/coreos/etcd/etcdserver/api/v3client", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", - "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go deleted file mode 100644 index 310715f5cd7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3client provides clientv3 interfaces from an etcdserver. -// -// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: -// -// import ( -// "context" -// -// "github.com/coreos/etcd/embed" -// "github.com/coreos/etcd/etcdserver/api/v3client" -// ) -// -// ... -// -// // create an embedded EtcdServer from the default configuration -// cfg := embed.NewConfig() -// cfg.Dir = "default.etcd" -// e, err := embed.StartEtcd(cfg) -// if err != nil { -// // handle error! -// } -// -// // wrap the EtcdServer with v3client -// cli := v3client.New(e.Server) -// -// // use like an ordinary clientv3 -// resp, err := cli.Put(context.TODO(), "some-key", "it works!") -// if err != nil { -// // handle error! -// } -// -package v3client diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go deleted file mode 100644 index cc4147d2f0c..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3client - -import ( - "time" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - "github.com/coreos/etcd/proxy/grpcproxy/adapter" - - "golang.org/x/net/context" -) - -// New creates a clientv3 client that wraps an in-process EtcdServer. Instead -// of making gRPC calls through sockets, the client makes direct function calls -// to the etcd server through its api/v3rpc function interfaces. -func New(s *etcdserver.EtcdServer) *clientv3.Client { - c := clientv3.NewCtxClient(context.Background()) - - kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) - c.KV = clientv3.NewKVFromKVClient(kvc) - - lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) - c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second) - - wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) - c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)} - - mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) - c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc) - - clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) - c.Cluster = clientv3.NewClusterFromClusterClient(clc) - - // TODO: implement clientv3.Auth interface? - - return c -} - -// BlankContext implements Stringer on a context so the ctx string doesn't -// depend on the context's WithValue data, which tends to be unsynchronized -// (e.g., x/net/trace), causing ctx.String() to throw data races. -type blankContext struct{ context.Context } - -func (*blankContext) String() string { return "(blankCtx)" } - -// watchWrapper wraps clientv3 watch calls to blank out the context -// to avoid races on trace data. -type watchWrapper struct{ clientv3.Watcher } - -func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { - return ww.Watcher.Watch(&blankContext{ctx}, key, opts...) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD deleted file mode 100644 index 55965b382df..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "election.go", - ], - importpath = "github.com/coreos/etcd/etcdserver/api/v3election", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go deleted file mode 100644 index d6fefd74150..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3election provides a v3 election service from an etcdserver. -package v3election diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go deleted file mode 100644 index f9061c07926..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3election - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" -) - -type electionServer struct { - c *clientv3.Client -} - -func NewElectionServer(c *clientv3.Client) epb.ElectionServer { - return &electionServer{c} -} - -func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { - s, err := es.session(ctx, req.Lease) - if err != nil { - return nil, err - } - e := concurrency.NewElection(s, string(req.Name)) - if err = e.Campaign(ctx, string(req.Value)); err != nil { - return nil, err - } - return &epb.CampaignResponse{ - Header: e.Header(), - Leader: &epb.LeaderKey{ - Name: req.Name, - Key: []byte(e.Key()), - Rev: e.Rev(), - Lease: int64(s.Lease()), - }, - }, nil -} - -func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { - s, err := es.session(ctx, req.Leader.Lease) - if err != nil { - return nil, err - } - e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) - if err := e.Proclaim(ctx, string(req.Value)); err != nil { - return nil, err - } - return &epb.ProclaimResponse{Header: e.Header()}, nil -} - -func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { - s, err := es.session(stream.Context(), -1) - if err != nil { - return err - } - e := concurrency.NewElection(s, string(req.Name)) - ch := e.Observe(stream.Context()) - for stream.Context().Err() == nil { - select { - case <-stream.Context().Done(): - case resp, ok := <-ch: - if !ok { - return nil - } - lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} - if err := stream.Send(lresp); err != nil { - return err - } - } - } - return stream.Context().Err() -} - -func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { - s, err := es.session(ctx, -1) - if err != nil { - return nil, err - } - l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) - if lerr != nil { - return nil, lerr - } - return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil -} - -func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { - s, err := es.session(ctx, req.Leader.Lease) - if err != nil { - return nil, err - } - e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) - if err := e.Resign(ctx); err != nil { - return nil, err - } - return &epb.ResignResponse{Header: e.Header()}, nil -} - -func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { - s, err := concurrency.NewSession( - es.c, - concurrency.WithLease(clientv3.LeaseID(lease)), - concurrency.WithContext(ctx), - ) - if err != nil { - return nil, err - } - s.Orphan() - return s, nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD deleted file mode 100644 index e46a6a322e4..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -filegroup( - name = "go_default_library_protos", - srcs = ["v3election.proto"], - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["v3election.pb.go"], - importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD deleted file mode 100644 index 41b80256081..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["v3election.pb.gw.go"], - importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go deleted file mode 100644 index ac00cbea983..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go +++ /dev/null @@ -1,313 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: etcdserver/api/v3election/v3electionpb/v3election.proto - -/* -Package v3electionpb is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gw - -import ( - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3electionpb.CampaignRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3electionpb.ProclaimRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3electionpb.LeaderRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { - var protoReq v3electionpb.LeaderRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.Observe(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3electionpb.ResignRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterElectionHandler(ctx, mux, conn) -} - -// RegisterElectionHandler registers the http handlers for service Election to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) -} - -// RegisterElectionHandler registers the http handlers for service Election to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ElectionClient" to call the correct interceptors. -func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { - - mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, "")) - - pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, "")) - - pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, "")) - - pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, "")) - - pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, "")) -) - -var ( - forward_Election_Campaign_0 = runtime.ForwardResponseMessage - - forward_Election_Proclaim_0 = runtime.ForwardResponseMessage - - forward_Election_Leader_0 = runtime.ForwardResponseMessage - - forward_Election_Observe_0 = runtime.ForwardResponseStream - - forward_Election_Resign_0 = runtime.ForwardResponseMessage -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go deleted file mode 100644 index 92acb1469e9..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ /dev/null @@ -1,2098 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: v3election.proto -// DO NOT EDIT! - -/* - Package v3electionpb is a generated protocol buffer package. - - It is generated from these files: - v3election.proto - - It has these top-level messages: - CampaignRequest - CampaignResponse - LeaderKey - LeaderRequest - LeaderResponse - ResignRequest - ResignResponse - ProclaimRequest - ProclaimResponse -*/ -package v3electionpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - _ "google.golang.org/genproto/googleapis/api/annotations" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type CampaignRequest struct { - // name is the election's identifier for the campaign. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // lease is the ID of the lease attached to leadership of the election. If the - // lease expires or is revoked before resigning leadership, then the - // leadership is transferred to the next campaigner, if any. - Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` - // value is the initial proclaimed value set when the campaigner wins the - // election. - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } -func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } -func (*CampaignRequest) ProtoMessage() {} -func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } - -func (m *CampaignRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *CampaignRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *CampaignRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type CampaignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // leader describes the resources used for holding leadereship of the election. - Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` -} - -func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } -func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } -func (*CampaignResponse) ProtoMessage() {} -func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} } - -func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *CampaignResponse) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -type LeaderKey struct { - // name is the election identifier that correponds to the leadership key. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // key is an opaque key representing the ownership of the election. If the key - // is deleted, then leadership is lost. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // rev is the creation revision of the key. It can be used to test for ownership - // of an election during transactions by testing the key's creation revision - // matches rev. - Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` - // lease is the lease ID of the election leader. - Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` -} - -func (m *LeaderKey) Reset() { *m = LeaderKey{} } -func (m *LeaderKey) String() string { return proto.CompactTextString(m) } -func (*LeaderKey) ProtoMessage() {} -func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } - -func (m *LeaderKey) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LeaderKey) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *LeaderKey) GetRev() int64 { - if m != nil { - return m.Rev - } - return 0 -} - -func (m *LeaderKey) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -type LeaderRequest struct { - // name is the election identifier for the leadership information. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } -func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } -func (*LeaderRequest) ProtoMessage() {} -func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } - -func (m *LeaderRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -type LeaderResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // kv is the key-value pair representing the latest leader update. - Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` -} - -func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } -func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } -func (*LeaderResponse) ProtoMessage() {} -func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} } - -func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { - if m != nil { - return m.Kv - } - return nil -} - -type ResignRequest struct { - // leader is the leadership to relinquish by resignation. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` -} - -func (m *ResignRequest) Reset() { *m = ResignRequest{} } -func (m *ResignRequest) String() string { return proto.CompactTextString(m) } -func (*ResignRequest) ProtoMessage() {} -func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} } - -func (m *ResignRequest) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -type ResignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *ResignResponse) Reset() { *m = ResignResponse{} } -func (m *ResignResponse) String() string { return proto.CompactTextString(m) } -func (*ResignResponse) ProtoMessage() {} -func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} } - -func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type ProclaimRequest struct { - // leader is the leadership hold on the election. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` - // value is an update meant to overwrite the leader's current value. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } -func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } -func (*ProclaimRequest) ProtoMessage() {} -func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} } - -func (m *ProclaimRequest) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -func (m *ProclaimRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type ProclaimResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } -func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } -func (*ProclaimResponse) ProtoMessage() {} -func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} } - -func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") - proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") - proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") - proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") - proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") - proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") - proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") - proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") - proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Election service - -type ElectionClient interface { - // Campaign waits to acquire leadership in an election, returning a LeaderKey - // representing the leadership if successful. The LeaderKey can then be used - // to issue new values on the election, transactionally guard API requests on - // leadership still being held, and resign from the election. - Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) - // Proclaim updates the leader's posted value with a new value. - Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) - // Leader returns the current election proclamation, if any. - Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) - // Observe streams election proclamations in-order as made by the election's - // elected leaders. - Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) - // Resign releases election leadership so other campaigners may acquire - // leadership on the election. - Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) -} - -type electionClient struct { - cc *grpc.ClientConn -} - -func NewElectionClient(cc *grpc.ClientConn) ElectionClient { - return &electionClient{cc} -} - -func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { - out := new(CampaignResponse) - err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { - out := new(ProclaimResponse) - err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { - out := new(LeaderResponse) - err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...) - if err != nil { - return nil, err - } - x := &electionObserveClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Election_ObserveClient interface { - Recv() (*LeaderResponse, error) - grpc.ClientStream -} - -type electionObserveClient struct { - grpc.ClientStream -} - -func (x *electionObserveClient) Recv() (*LeaderResponse, error) { - m := new(LeaderResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { - out := new(ResignResponse) - err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Election service - -type ElectionServer interface { - // Campaign waits to acquire leadership in an election, returning a LeaderKey - // representing the leadership if successful. The LeaderKey can then be used - // to issue new values on the election, transactionally guard API requests on - // leadership still being held, and resign from the election. - Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) - // Proclaim updates the leader's posted value with a new value. - Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) - // Leader returns the current election proclamation, if any. - Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) - // Observe streams election proclamations in-order as made by the election's - // elected leaders. - Observe(*LeaderRequest, Election_ObserveServer) error - // Resign releases election leadership so other campaigners may acquire - // leadership on the election. - Resign(context.Context, *ResignRequest) (*ResignResponse, error) -} - -func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { - s.RegisterService(&_Election_serviceDesc, srv) -} - -func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CampaignRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Campaign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Campaign", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProclaimRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Proclaim(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Proclaim", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Leader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Leader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LeaderRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) -} - -type Election_ObserveServer interface { - Send(*LeaderResponse) error - grpc.ServerStream -} - -type electionObserveServer struct { - grpc.ServerStream -} - -func (x *electionObserveServer) Send(m *LeaderResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResignRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Resign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Resign", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Election_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v3electionpb.Election", - HandlerType: (*ElectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Campaign", - Handler: _Election_Campaign_Handler, - }, - { - MethodName: "Proclaim", - Handler: _Election_Proclaim_Handler, - }, - { - MethodName: "Leader", - Handler: _Election_Leader_Handler, - }, - { - MethodName: "Resign", - Handler: _Election_Resign_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Observe", - Handler: _Election_Observe_Handler, - ServerStreams: true, - }, - }, - Metadata: "v3election.proto", -} - -func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Lease != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - } - if len(m.Value) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} - -func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) - n1, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Leader != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) - n2, err := m.Leader.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *LeaderKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.Rev != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) - } - if m.Lease != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - } - return i, nil -} - -func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) - n3, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Kv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size())) - n4, err := m.Kv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - return i, nil -} - -func (m *ResignRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Leader != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) - n5, err := m.Leader.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil -} - -func (m *ResignResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) - n6, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Leader != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) - n7, err := m.Leader.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} - -func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) - n8, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func encodeFixed64V3Election(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32V3Election(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CampaignRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovV3Election(uint64(m.Lease)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *CampaignResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *LeaderKey) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Rev != 0 { - n += 1 + sovV3Election(uint64(m.Rev)) - } - if m.Lease != 0 { - n += 1 + sovV3Election(uint64(m.Lease)) - } - return n -} - -func (m *LeaderRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *LeaderResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *ResignRequest) Size() (n int) { - var l int - _ = l - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *ResignResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *ProclaimRequest) Size() (n int) { - var l int - _ = l - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func (m *ProclaimResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - return n -} - -func sovV3Election(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozV3Election(x uint64) (n int) { - return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CampaignRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CampaignResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType) - } - m.Rev = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rev |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &mvccpb.KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResignRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResignResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipV3Election(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthV3Election - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipV3Election(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) } - -var fileDescriptorV3Election = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0x89, 0x34, 0xa4, 0x26, 0xda, 0x02, 0xaa, - 0x72, 0xf0, 0xa2, 0x86, 0x53, 0x4e, 0x08, 0x04, 0xaa, 0x54, 0x24, 0xc0, 0x07, 0x04, 0xc7, 0x8d, - 0x3b, 0x4a, 0xa2, 0x38, 0xde, 0xc5, 0x4e, 0x2d, 0xe5, 0xca, 0x2f, 0x70, 0xe1, 0x33, 0xf8, 0x0c, - 0x8e, 0x48, 0xfc, 0x00, 0x0a, 0x7c, 0x08, 0xda, 0x5d, 0x1b, 0x3b, 0x6e, 0x88, 0x50, 0x73, 0xb1, - 0xc6, 0x33, 0xcf, 0xf3, 0xe6, 0xbd, 0x9d, 0x35, 0xd8, 0x69, 0x1f, 0x43, 0x0c, 0xe6, 0x13, 0x11, - 0x79, 0x32, 0x16, 0x73, 0xe1, 0x34, 0x8b, 0x8c, 0x1c, 0xb6, 0x0f, 0x46, 0x62, 0x24, 0x74, 0x81, - 0xa9, 0xc8, 0x60, 0xda, 0x8f, 0x70, 0x1e, 0x5c, 0x30, 0xf5, 0x48, 0x30, 0x4e, 0x31, 0x2e, 0x85, - 0x72, 0xc8, 0x62, 0x19, 0x64, 0xb8, 0x43, 0x8d, 0x9b, 0xa5, 0x41, 0xa0, 0x1f, 0x72, 0xc8, 0xa6, - 0x69, 0x56, 0xea, 0x8c, 0x84, 0x18, 0x85, 0xc8, 0xb8, 0x9c, 0x30, 0x1e, 0x45, 0x62, 0xce, 0x15, - 0x63, 0x62, 0xaa, 0xf4, 0x2d, 0xec, 0x3f, 0xe7, 0x33, 0xc9, 0x27, 0xa3, 0xc8, 0xc7, 0x8f, 0x97, - 0x98, 0xcc, 0x1d, 0x07, 0xea, 0x11, 0x9f, 0x61, 0x8b, 0x74, 0xc9, 0x49, 0xd3, 0xd7, 0xb1, 0x73, - 0x00, 0x37, 0x43, 0xe4, 0x09, 0xb6, 0xac, 0x2e, 0x39, 0xa9, 0xf9, 0xe6, 0x45, 0x65, 0x53, 0x1e, - 0x5e, 0x62, 0xab, 0xa6, 0xa1, 0xe6, 0x85, 0x2e, 0xc0, 0x2e, 0x5a, 0x26, 0x52, 0x44, 0x09, 0x3a, - 0x4f, 0xa0, 0x31, 0x46, 0x7e, 0x81, 0xb1, 0xee, 0x7a, 0xe7, 0xb4, 0xe3, 0x95, 0x85, 0x78, 0x39, - 0xee, 0x4c, 0x63, 0xfc, 0x0c, 0xeb, 0x30, 0x68, 0x84, 0xe6, 0x2b, 0x4b, 0x7f, 0x75, 0xd7, 0x2b, - 0x5b, 0xe6, 0xbd, 0xd2, 0xb5, 0x73, 0x5c, 0xf8, 0x19, 0x8c, 0x7e, 0x80, 0xdb, 0x7f, 0x93, 0x6b, - 0x75, 0xd8, 0x50, 0x9b, 0xe2, 0x42, 0xb7, 0x6b, 0xfa, 0x2a, 0x54, 0x99, 0x18, 0x53, 0xad, 0xa0, - 0xe6, 0xab, 0xb0, 0xd0, 0x5a, 0x2f, 0x69, 0xa5, 0xc7, 0xb0, 0x6b, 0x5a, 0x6f, 0xb0, 0x89, 0x8e, - 0x61, 0x2f, 0x07, 0x6d, 0x25, 0xbc, 0x0b, 0xd6, 0x34, 0xcd, 0x44, 0xdb, 0x9e, 0x39, 0x51, 0xef, - 0x1c, 0x17, 0xef, 0x94, 0xc1, 0xbe, 0x35, 0x4d, 0xe9, 0x53, 0xd8, 0xf5, 0x31, 0x29, 0x9d, 0x5a, - 0xe1, 0x15, 0xf9, 0x3f, 0xaf, 0x5e, 0xc2, 0x5e, 0xde, 0x61, 0x9b, 0x59, 0xe9, 0x7b, 0xd8, 0x7f, - 0x13, 0x8b, 0x20, 0xe4, 0x93, 0xd9, 0x75, 0x67, 0x29, 0x16, 0xc9, 0x2a, 0x2f, 0xd2, 0x19, 0xd8, - 0x45, 0xe7, 0x6d, 0x66, 0x3c, 0xfd, 0x5a, 0x87, 0x9d, 0x17, 0xd9, 0x00, 0x8e, 0x84, 0x9d, 0x7c, - 0x3f, 0x9d, 0xa3, 0xd5, 0xc9, 0x2a, 0x57, 0xa1, 0xed, 0xfe, 0xab, 0x6c, 0x58, 0xe8, 0xc3, 0x4f, - 0x3f, 0x7e, 0x7f, 0xb6, 0xee, 0xd3, 0x36, 0x4b, 0xfb, 0x3c, 0x94, 0x63, 0xce, 0x72, 0x34, 0x0b, - 0x32, 0xec, 0x80, 0xf4, 0x14, 0x63, 0x2e, 0xa4, 0xca, 0x58, 0xb1, 0xae, 0xca, 0x58, 0xd5, 0xbf, - 0x89, 0x51, 0x66, 0x58, 0xc5, 0x38, 0x86, 0x86, 0x71, 0xd9, 0xb9, 0xb7, 0xce, 0xfb, 0x9c, 0xad, - 0xb3, 0xbe, 0x98, 0x71, 0x1d, 0x6b, 0xae, 0x23, 0xda, 0xba, 0xca, 0x65, 0xce, 0x4d, 0x31, 0x85, - 0x70, 0xeb, 0xf5, 0x50, 0xfb, 0xbf, 0x0d, 0xd5, 0x03, 0x4d, 0xe5, 0xd2, 0xc3, 0xab, 0x54, 0xc2, - 0x74, 0x1f, 0x90, 0xde, 0x63, 0xa2, 0x74, 0x99, 0xa5, 0xad, 0x92, 0xad, 0x5c, 0x86, 0x2a, 0xd9, - 0xea, 0x9e, 0x6f, 0xd2, 0x15, 0x6b, 0xe4, 0x80, 0xf4, 0x9e, 0xd9, 0xdf, 0x96, 0x2e, 0xf9, 0xbe, - 0x74, 0xc9, 0xcf, 0xa5, 0x4b, 0xbe, 0xfc, 0x72, 0x6f, 0x0c, 0x1b, 0xfa, 0x8f, 0xd9, 0xff, 0x13, - 0x00, 0x00, 0xff, 0xff, 0xfc, 0x4d, 0x5a, 0x40, 0xca, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto deleted file mode 100644 index ebf6c88f7fa..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto +++ /dev/null @@ -1,119 +0,0 @@ -syntax = "proto3"; -package v3electionpb; - -import "gogoproto/gogo.proto"; -import "etcd/etcdserver/etcdserverpb/rpc.proto"; -import "etcd/mvcc/mvccpb/kv.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// The election service exposes client-side election facilities as a gRPC interface. -service Election { - // Campaign waits to acquire leadership in an election, returning a LeaderKey - // representing the leadership if successful. The LeaderKey can then be used - // to issue new values on the election, transactionally guard API requests on - // leadership still being held, and resign from the election. - rpc Campaign(CampaignRequest) returns (CampaignResponse) { - option (google.api.http) = { - post: "/v3alpha/election/campaign" - body: "*" - }; - } - // Proclaim updates the leader's posted value with a new value. - rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) { - option (google.api.http) = { - post: "/v3alpha/election/proclaim" - body: "*" - }; - } - // Leader returns the current election proclamation, if any. - rpc Leader(LeaderRequest) returns (LeaderResponse) { - option (google.api.http) = { - post: "/v3alpha/election/leader" - body: "*" - }; - } - // Observe streams election proclamations in-order as made by the election's - // elected leaders. - rpc Observe(LeaderRequest) returns (stream LeaderResponse) { - option (google.api.http) = { - post: "/v3alpha/election/observe" - body: "*" - }; - } - // Resign releases election leadership so other campaigners may acquire - // leadership on the election. - rpc Resign(ResignRequest) returns (ResignResponse) { - option (google.api.http) = { - post: "/v3alpha/election/resign" - body: "*" - }; - } -} - -message CampaignRequest { - // name is the election's identifier for the campaign. - bytes name = 1; - // lease is the ID of the lease attached to leadership of the election. If the - // lease expires or is revoked before resigning leadership, then the - // leadership is transferred to the next campaigner, if any. - int64 lease = 2; - // value is the initial proclaimed value set when the campaigner wins the - // election. - bytes value = 3; -} - -message CampaignResponse { - etcdserverpb.ResponseHeader header = 1; - // leader describes the resources used for holding leadereship of the election. - LeaderKey leader = 2; -} - -message LeaderKey { - // name is the election identifier that correponds to the leadership key. - bytes name = 1; - // key is an opaque key representing the ownership of the election. If the key - // is deleted, then leadership is lost. - bytes key = 2; - // rev is the creation revision of the key. It can be used to test for ownership - // of an election during transactions by testing the key's creation revision - // matches rev. - int64 rev = 3; - // lease is the lease ID of the election leader. - int64 lease = 4; -} - -message LeaderRequest { - // name is the election identifier for the leadership information. - bytes name = 1; -} - -message LeaderResponse { - etcdserverpb.ResponseHeader header = 1; - // kv is the key-value pair representing the latest leader update. - mvccpb.KeyValue kv = 2; -} - -message ResignRequest { - // leader is the leadership to relinquish by resignation. - LeaderKey leader = 1; -} - -message ResignResponse { - etcdserverpb.ResponseHeader header = 1; -} - -message ProclaimRequest { - // leader is the leadership hold on the election. - LeaderKey leader = 1; - // value is an update meant to overwrite the leader's current value. - bytes value = 2; -} - -message ProclaimResponse { - etcdserverpb.ResponseHeader header = 1; -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD deleted file mode 100644 index a528567d38f..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "lock.go", - ], - importpath = "github.com/coreos/etcd/etcdserver/api/v3lock", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go deleted file mode 100644 index e0a1008abc9..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3lock provides a v3 locking service from an etcdserver. -package v3lock diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go deleted file mode 100644 index 66465bf13f6..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3lock - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" -) - -type lockServer struct { - c *clientv3.Client -} - -func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { - return &lockServer{c} -} - -func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { - s, err := concurrency.NewSession( - ls.c, - concurrency.WithLease(clientv3.LeaseID(req.Lease)), - concurrency.WithContext(ctx), - ) - if err != nil { - return nil, err - } - s.Orphan() - m := concurrency.NewMutex(s, string(req.Name)) - if err = m.Lock(ctx); err != nil { - return nil, err - } - return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil -} - -func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { - resp, err := ls.c.Delete(ctx, string(req.Key)) - if err != nil { - return nil, err - } - return &v3lockpb.UnlockResponse{Header: resp.Header}, nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD deleted file mode 100644 index abe9cd04aeb..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -filegroup( - name = "go_default_library_protos", - srcs = ["v3lock.proto"], - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["v3lock.pb.go"], - importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD deleted file mode 100644 index 9d54f77260e..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["v3lock.pb.gw.go"], - importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go deleted file mode 100644 index 5aef4756dfe..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto - -/* -Package v3lockpb is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gw - -import ( - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3lockpb.LockRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v3lockpb.UnlockRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLockHandler(ctx, mux, conn) -} - -// RegisterLockHandler registers the http handlers for service Lock to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn)) -} - -// RegisterLockHandler registers the http handlers for service Lock to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LockClient" to call the correct interceptors. -func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { - - mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, "")) - - pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, "")) -) - -var ( - forward_Lock_Lock_0 = runtime.ForwardResponseMessage - - forward_Lock_Unlock_0 = runtime.ForwardResponseMessage -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go deleted file mode 100644 index dcf2bad4019..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ /dev/null @@ -1,978 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: v3lock.proto -// DO NOT EDIT! - -/* - Package v3lockpb is a generated protocol buffer package. - - It is generated from these files: - v3lock.proto - - It has these top-level messages: - LockRequest - LockResponse - UnlockRequest - UnlockResponse -*/ -package v3lockpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - _ "google.golang.org/genproto/googleapis/api/annotations" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LockRequest struct { - // name is the identifier for the distributed shared lock to be acquired. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // lease is the ID of the lease that will be attached to ownership of the - // lock. If the lease expires or is revoked and currently holds the lock, - // the lock is automatically released. Calls to Lock with the same lease will - // be treated as a single acquistion; locking twice with the same lease is a - // no-op. - Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` -} - -func (m *LockRequest) Reset() { *m = LockRequest{} } -func (m *LockRequest) String() string { return proto.CompactTextString(m) } -func (*LockRequest) ProtoMessage() {} -func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } - -func (m *LockRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LockRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -type LockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // key is a key that will exist on etcd for the duration that the Lock caller - // owns the lock. Users should not modify this key or the lock may exhibit - // undefined behavior. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` -} - -func (m *LockResponse) Reset() { *m = LockResponse{} } -func (m *LockResponse) String() string { return proto.CompactTextString(m) } -func (*LockResponse) ProtoMessage() {} -func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} } - -func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LockResponse) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -type UnlockRequest struct { - // key is the lock ownership key granted by Lock. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` -} - -func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } -func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } -func (*UnlockRequest) ProtoMessage() {} -func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } - -func (m *UnlockRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -type UnlockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } -func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } -func (*UnlockResponse) ProtoMessage() {} -func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} } - -func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") - proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") - proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") - proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Lock service - -type LockClient interface { - // Lock acquires a distributed shared lock on a given named lock. - // On success, it will return a unique key that exists so long as the - // lock is held by the caller. This key can be used in conjunction with - // transactions to safely ensure updates to etcd only occur while holding - // lock ownership. The lock is held until Unlock is called on the key or the - // lease associate with the owner expires. - Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) - // Unlock takes a key returned by Lock and releases the hold on lock. The - // next Lock caller waiting for the lock will then be woken up and given - // ownership of the lock. - Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) -} - -type lockClient struct { - cc *grpc.ClientConn -} - -func NewLockClient(cc *grpc.ClientConn) LockClient { - return &lockClient{cc} -} - -func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { - out := new(LockResponse) - err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { - out := new(UnlockResponse) - err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Lock service - -type LockServer interface { - // Lock acquires a distributed shared lock on a given named lock. - // On success, it will return a unique key that exists so long as the - // lock is held by the caller. This key can be used in conjunction with - // transactions to safely ensure updates to etcd only occur while holding - // lock ownership. The lock is held until Unlock is called on the key or the - // lease associate with the owner expires. - Lock(context.Context, *LockRequest) (*LockResponse, error) - // Unlock takes a key returned by Lock and releases the hold on lock. The - // next Lock caller waiting for the lock will then be woken up and given - // ownership of the lock. - Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) -} - -func RegisterLockServer(s *grpc.Server, srv LockServer) { - s.RegisterService(&_Lock_serviceDesc, srv) -} - -func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LockServer).Lock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3lockpb.Lock/Lock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LockServer).Lock(ctx, req.(*LockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UnlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LockServer).Unlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3lockpb.Lock/Unlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lock_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v3lockpb.Lock", - HandlerType: (*LockServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Lock", - Handler: _Lock_Lock_Handler, - }, - { - MethodName: "Unlock", - Handler: _Lock_Unlock_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "v3lock.proto", -} - -func (m *LockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Lease != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) - } - return i, nil -} - -func (m *LockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) - n1, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - return i, nil -} - -func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - return i, nil -} - -func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) - n2, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *LockRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovV3Lock(uint64(m.Lease)) - } - return n -} - -func (m *LockResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Lock(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - return n -} - -func (m *UnlockRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - return n -} - -func (m *UnlockResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Lock(uint64(l)) - } - return n -} - -func sovV3Lock(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozV3Lock(x uint64) (n int) { - return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnlockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnlockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipV3Lock(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthV3Lock - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipV3Lock(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) } - -var fileDescriptorV3Lock = []byte{ - // 336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, - 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, - 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, - 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, - 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, - 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, - 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, - 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, - 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, - 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, - 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, - 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, - 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, - 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, - 0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, - 0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39, - 0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2, - 0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36, - 0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, - 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, - 0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto deleted file mode 100644 index 3e92a6ec277..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; -package v3lockpb; - -import "gogoproto/gogo.proto"; -import "etcd/etcdserver/etcdserverpb/rpc.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// The lock service exposes client-side locking facilities as a gRPC interface. -service Lock { - // Lock acquires a distributed shared lock on a given named lock. - // On success, it will return a unique key that exists so long as the - // lock is held by the caller. This key can be used in conjunction with - // transactions to safely ensure updates to etcd only occur while holding - // lock ownership. The lock is held until Unlock is called on the key or the - // lease associate with the owner expires. - rpc Lock(LockRequest) returns (LockResponse) { - option (google.api.http) = { - post: "/v3alpha/lock/lock" - body: "*" - }; - } - - // Unlock takes a key returned by Lock and releases the hold on lock. The - // next Lock caller waiting for the lock will then be woken up and given - // ownership of the lock. - rpc Unlock(UnlockRequest) returns (UnlockResponse) { - option (google.api.http) = { - post: "/v3alpha/lock/unlock" - body: "*" - }; - } -} - -message LockRequest { - // name is the identifier for the distributed shared lock to be acquired. - bytes name = 1; - // lease is the ID of the lease that will be attached to ownership of the - // lock. If the lease expires or is revoked and currently holds the lock, - // the lock is automatically released. Calls to Lock with the same lease will - // be treated as a single acquistion; locking twice with the same lease is a - // no-op. - int64 lease = 2; -} - -message LockResponse { - etcdserverpb.ResponseHeader header = 1; - // key is a key that will exist on etcd for the duration that the Lock caller - // owns the lock. Users should not modify this key or the lock may exhibit - // undefined behavior. - bytes key = 2; -} - -message UnlockRequest { - // key is the lock ownership key granted by Lock. - bytes key = 1; -} - -message UnlockResponse { - etcdserverpb.ResponseHeader header = 1; -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index ed70887b59d..88174e3bac2 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,7 +16,6 @@ package v3rpc import ( "crypto/tls" - "math" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -25,17 +24,11 @@ import ( "google.golang.org/grpc/grpclog" ) -const ( - grpcOverheadBytes = 512 * 1024 - maxStreams = math.MaxUint32 - maxSendBytes = math.MaxInt32 -) - func init() { grpclog.SetLogger(plog) } -func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { +func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { @@ -43,11 +36,8 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) - opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) - opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) - opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) - grpcServer := grpc.NewServer(append(opts, gopts...)...) + grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index de9470a8905..29aef2914a5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromIncomingContext(ctx) + md, ok := metadata.FromContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromIncomingContext(ss.Context()) + md, ok := metadata.FromContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go index d0220e03a26..6ea7bbacde0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -134,12 +134,6 @@ func checkPutRequest(r *pb.PutRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } - if r.IgnoreValue && len(r.Value) != 0 { - return rpctypes.ErrGRPCValueProvided - } - if r.IgnoreLease && r.Lease != 0 { - return rpctypes.ErrGRPCLeaseProvided - } return nil } @@ -252,8 +246,8 @@ func checkRequestOp(u *pb.RequestOp) error { return checkDeleteRequest(uv.RequestDeleteRange) } default: - // empty op / nil entry - return rpctypes.ErrGRPCKeyNotFound + // empty op + return nil } return nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index 7356633f8a6..be6e20b97fb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -18,7 +18,6 @@ import ( "io" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/lease" "golang.org/x/net/context" @@ -54,45 +53,20 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { + if err != nil { return nil, togRPCError(err) } - if err == lease.ErrLeaseNotFound { - resp = &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: rr.ID, - TTL: -1, - } - } ls.hdr.fill(resp.Header) return resp, nil } -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { - errc := make(chan error, 1) - go func() { - errc <- ls.leaseKeepAlive(stream) - }() - select { - case err = <-errc: - case <-stream.Context().Done(): - // the only server-side cancellation is noleader for now. - err = stream.Context().Err() - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - return err -} - -func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { - plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) return err } @@ -118,7 +92,6 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro resp.TTL = ttl err = stream.Send(resp) if err != nil { - plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) return err } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index 3657d036082..af29ab3b71e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -47,7 +47,6 @@ type RaftStatusGetter interface { } type AuthGetter interface { - AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } @@ -153,7 +152,7 @@ type authMaintenanceServer struct { } func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthInfoFromCtx(ctx) + authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx) if err != nil { return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go index 91a59389b87..bcd5dac5183 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go @@ -48,24 +48,21 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) now := time.Now() m := membership.NewMember("", urls, "", &now) - membs, merr := cs.server.AddMember(ctx, *m) - if merr != nil { - return nil, togRPCError(merr) + if err = cs.server.AddMember(ctx, *m); err != nil { + return nil, togRPCError(err) } return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, - Members: membersToProtoMembers(membs), + Header: cs.header(), + Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, }, nil } func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - membs, err := cs.server.RemoveMember(ctx, r.ID) - if err != nil { + if err := cs.server.RemoveMember(ctx, r.ID); err != nil { return nil, togRPCError(err) } - return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil + return &pb.MemberRemoveResponse{Header: cs.header()}, nil } func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { @@ -73,23 +70,15 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq ID: types.ID(r.ID), RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, } - membs, err := cs.server.UpdateMember(ctx, m) - if err != nil { + if err := cs.server.UpdateMember(ctx, m); err != nil { return nil, togRPCError(err) } - return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil + return &pb.MemberUpdateResponse{Header: cs.header()}, nil } func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := membersToProtoMembers(cs.cluster.Members()) - return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil -} + membs := cs.cluster.Members() -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} -} - -func membersToProtoMembers(membs []*membership.Member) []*pb.Member { protoMembs := make([]*pb.Member, len(membs)) for i := range membs { protoMembs[i] = &pb.Member{ @@ -99,5 +88,10 @@ func membersToProtoMembers(membs []*membership.Member) []*pb.Member { ClientURLs: membs[i].ClientURLs, } } - return protoMembs + + return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil +} + +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD index e1ada36c303..38cd71455d5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD @@ -12,7 +12,6 @@ go_library( deps = [ "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index bd17179e997..5a3cfc0a0db 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -17,20 +17,16 @@ package rpctypes import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) var ( // server-side error - ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") - ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") - ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") - ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") - ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") - ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") - ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") - ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") - ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") + ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") @@ -57,7 +53,6 @@ var ( ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") - ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") @@ -68,11 +63,7 @@ var ( ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") errStringToError = map[string]error{ - grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, - grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, - grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, - grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - + grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, @@ -104,7 +95,6 @@ var ( grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, - grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, @@ -116,15 +106,12 @@ var ( } // client-side error - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -151,7 +138,6 @@ var ( ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) - ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotCapable = Error(ErrGRPCNotCapable) @@ -189,10 +175,3 @@ func Error(err error) error { } return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} } - -func ErrorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 8d38d9bd18f..5a057ed040d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -42,6 +42,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCCompacted case mvcc.ErrFutureRev: return rpctypes.ErrGRPCFutureRev + case lease.ErrLeaseNotFound: + return rpctypes.ErrGRPCLeaseNotFound case etcdserver.ErrRequestTooLarge: return rpctypes.ErrGRPCRequestTooLarge case etcdserver.ErrNoSpace: @@ -61,8 +63,6 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCTimeoutDueToConnectionLost case etcdserver.ErrUnhealthy: return rpctypes.ErrGRPCUnhealthy - case etcdserver.ErrKeyNotFound: - return rpctypes.ErrGRPCKeyNotFound case lease.ErrLeaseNotFound: return rpctypes.ErrGRPCLeaseNotFound @@ -95,8 +95,6 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCAuthNotEnabled case auth.ErrInvalidAuthToken: return rpctypes.ErrGRPCInvalidAuthToken - case auth.ErrInvalidAuthMgmt: - return rpctypes.ErrGRPCInvalidAuthMgmt default: return grpc.Errorf(codes.Unknown, err.Error()) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index e328f6694a3..f0215531dee 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -21,7 +21,6 @@ import ( "golang.org/x/net/context" - "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -34,8 +33,6 @@ type watchServer struct { memberID int64 raftTimer etcdserver.RaftTimer watchable mvcc.WatchableKV - - ag AuthGetter } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { @@ -44,7 +41,6 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), - ag: s, } } @@ -105,8 +101,6 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup - - ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { @@ -124,8 +118,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), - - ag: ws.ag, } sws.wg.Add(1) @@ -141,7 +133,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { - plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) errc <- rerr } }() @@ -159,19 +150,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { return err } -func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { - authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) - if err != nil { - return false - } - if authInfo == nil { - // if auth is enabled, IsRangePermitted() can cause an error - authInfo = &auth.AuthInfo{} - } - - return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil -} - func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() @@ -193,32 +171,10 @@ func (sws *serverWatchStream) recvLoop() error { // \x00 is the smallest key creq.Key = []byte{0} } - if len(creq.RangeEnd) == 0 { - // force nil since watchstream.Watch distinguishes - // between nil and []byte{} for single key / >= - creq.RangeEnd = nil - } if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { // support >= key queries creq.RangeEnd = []byte{} } - - if !sws.isWatchPermitted(creq) { - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: -1, - Canceled: true, - Created: true, - CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), - } - - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - } - return nil - } - filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() @@ -338,7 +294,6 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { - plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) return } @@ -355,7 +310,6 @@ func (sws *serverWatchStream) sendLoop() { } if err := sws.gRPCStream.Send(c); err != nil { - plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) return } @@ -371,7 +325,6 @@ func (sws *serverWatchStream) sendLoop() { for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { - plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go index 0be93c52b6f..e4bf35bc47e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -16,6 +16,7 @@ package etcdserver import ( "bytes" + "fmt" "sort" "time" @@ -29,6 +30,11 @@ import ( ) const ( + // noTxn is an invalid txn ID. + // To apply with independent Range, Put, Delete, you can pass noTxn + // to apply functions instead of a valid txn ID. + noTxn = -1 + warnApplyDuration = 100 * time.Millisecond ) @@ -45,9 +51,9 @@ type applyResult struct { type applierV3 interface { Apply(r *pb.InternalRaftRequest) *applyResult - Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) @@ -93,11 +99,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) + ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range) case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) + ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put) case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) + ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange) case r.Txn != nil: ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) case r.Compaction != nil: @@ -146,87 +152,106 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { return ar } -func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { - resp = &pb.PutResponse{} +func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { + resp := &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} + var ( + rev int64 + err error + ) - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { + var rr *mvcc.RangeResult + if p.PrevKv { + if txnID != noTxn { + rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + } else { + rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + } + } + + if txnID != noTxn { + rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) + if err != nil { + return nil, err + } + } else { + leaseID := lease.LeaseID(p.Lease) if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } - txn = a.s.KV().Write() - defer txn.End() + rev = a.s.KV().Put(p.Key, p.Value, leaseID) } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } + resp.Header.Revision = rev + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txn.Put(p.Key, val, leaseID) return resp, nil } -func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} - if txn == nil { - txn = a.s.kv.Write() - defer txn.End() - } + var ( + n int64 + rev int64 + err error + ) if isGteRange(dr.RangeEnd) { dr.RangeEnd = []byte{} } + var rr *mvcc.RangeResult if dr.PrevKv { - rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + if txnID != noTxn { + rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + } else { + rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) + if err != nil { + return nil, err } } } - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) + if txnID != noTxn { + n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd) + if err != nil { + return nil, err + } + } else { + n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd) + } + + resp.Deleted = n + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } + } + resp.Header.Revision = rev return resp, nil } -func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} - if txn == nil { - txn = a.s.kv.Read() - defer txn.End() - } + var ( + rr *mvcc.RangeResult + err error + ) if isGteRange(r.RangeEnd) { r.RangeEnd = []byte{} @@ -250,9 +275,16 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang Count: r.CountOnly, } - rr, err := txn.Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err + if txnID != noTxn { + rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err + } + } else { + rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err + } } if r.MaxModRevision != 0 { @@ -318,64 +350,61 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - isWrite := !isTxnReadonly(rt) - txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) - - reqs, ok := a.compareToOps(txn, rt) - if isWrite { - if err := a.checkRequestPut(txn, reqs); err != nil { - txn.End() - return nil, err + ok := true + for _, c := range rt.Compare { + if _, ok = a.applyCompare(c); !ok { + break } } - if err := checkRequestRange(txn, reqs); err != nil { - txn.End() + + var reqs []*pb.RequestOp + if ok { + reqs = rt.Success + } else { + reqs = rt.Failure + } + + if err := a.checkRequestLeases(reqs); err != nil { + return nil, err + } + if err := a.checkRequestRange(reqs); err != nil { return nil, err } + // When executing the operations of txn, we need to hold the txn lock. + // So the reader will not see any intermediate results. + txnID := a.s.KV().TxnBegin() + resps := make([]*pb.ResponseOp, len(reqs)) - txnResp := &pb.TxnResponse{ - Responses: resps, - Succeeded: ok, - Header: &pb.ResponseHeader{}, - } - - // When executing mutable txn ops, etcd must hold the txn lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. - if isWrite { - txn.End() - txn = a.s.KV().Write() - } for i := range reqs { - resps[i] = a.applyUnion(txn, reqs[i]) + resps[i] = a.applyUnion(txnID, reqs[i]) } - rev := txn.Rev() - if len(txn.Changes()) != 0 { - rev++ - } - txn.End() - txnResp.Header.Revision = rev + err := a.s.KV().TxnEnd(txnID) + if err != nil { + panic(fmt.Sprint("unexpected error when closing txn", txnID)) + } + + txnResp := &pb.TxnResponse{} + txnResp.Header = &pb.ResponseHeader{} + txnResp.Header.Revision = a.s.KV().Rev() + txnResp.Responses = resps + txnResp.Succeeded = ok return txnResp, nil } -func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { - for _, c := range rt.Compare { - if !applyCompare(rv, c) { - return rt.Failure, false - } - } - return rt.Success, true -} - // applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) +// It returns the revision at which the comparison happens. If the comparison +// succeeds, the it returns true. Otherwise it returns false. +func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { + rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) + rev := rr.Rev + if err != nil { - return false + if err == mvcc.ErrTxnIDMismatch { + panic("unexpected txn ID mismatch error") + } + return rev, false } var ckv mvccpb.KeyValue if len(rr.KVs) != 0 { @@ -387,7 +416,7 @@ func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { // We can treat non-existence as the empty set explicitly, such that // even a key with a value of length 0 bytes is still a real key // that was written that way - return false + return rev, false } } @@ -419,22 +448,30 @@ func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { switch c.Result { case pb.Compare_EQUAL: - return result == 0 + if result != 0 { + return rev, false + } case pb.Compare_NOT_EQUAL: - return result != 0 + if result == 0 { + return rev, false + } case pb.Compare_GREATER: - return result > 0 + if result != 1 { + return rev, false + } case pb.Compare_LESS: - return result < 0 + if result != -1 { + return rev, false + } } - return true + return rev, true } -func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { +func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp { switch tv := union.Request.(type) { case *pb.RequestOp_RequestRange: if tv.RequestRange != nil { - resp, err := a.Range(txn, tv.RequestRange) + resp, err := a.Range(txnID, tv.RequestRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -442,7 +479,7 @@ func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *p } case *pb.RequestOp_RequestPut: if tv.RequestPut != nil { - resp, err := a.Put(txn, tv.RequestPut) + resp, err := a.Put(txnID, tv.RequestPut) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -450,7 +487,7 @@ func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *p } case *pb.RequestOp_RequestDeleteRange: if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) + resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -551,7 +588,7 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { return nil, ErrNoSpace } @@ -580,7 +617,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { } func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) if resp != nil { resp.Header = newHeader(a.s) @@ -701,9 +738,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { return "aApplierV3{app, NewBackendQuota(s)} } -func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { ok := a.q.Available(p) - resp, err := a.applierV3.Put(txn, p) + resp, err := a.applierV3.Put(txnID, p) if err == nil && !ok { err = ErrNoSpace } @@ -767,27 +804,14 @@ func (s *kvSortByValue) Less(i, j int) bool { return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 } -func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { +func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestPut) if !ok { continue } preq := tv.RequestPut - if preq == nil { - continue - } - if preq.IgnoreValue || preq.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(preq.Lease) == lease.NoLease { + if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { @@ -797,7 +821,7 @@ func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestO return nil } -func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { +func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestRange) if !ok { @@ -808,10 +832,10 @@ func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { continue } - if greq.Revision > rv.Rev() { + if greq.Revision > a.s.KV().Rev() { return mvcc.ErrFutureRev } - if greq.Revision < rv.FirstRev() { + if greq.Revision < a.s.KV().FirstRev() { return mvcc.ErrCompacted } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go index 7da4ae45df5..4868e855ca1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -19,7 +19,6 @@ import ( "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { @@ -59,7 +58,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { return ret } -func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { +func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, err } @@ -69,17 +68,17 @@ func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutRespon return nil, err } } - return aa.applierV3.Put(txn, r) + return aa.applierV3.Put(txnID, r) } -func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } - return aa.applierV3.Range(txn, r) + return aa.applierV3.Range(txnID, r) } -func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } @@ -90,7 +89,7 @@ func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest } } - return aa.applierV3.DeleteRange(txn, r) + return aa.applierV3.DeleteRange(txnID, r) } func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go deleted file mode 100644 index c5e2dabf3e7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/backend.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "os" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -func newBackend(cfg *ServerConfig) backend.Backend { - bcfg := backend.DefaultBackendConfig() - bcfg.Path = cfg.backendPath() - if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { - // permit 10% excess over quota for disarm - bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) - } - return backend.New(bcfg) -} - -// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { - snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) - if err != nil { - return nil, fmt.Errorf("database snapshot file path error: %v", err) - } - if err := os.Rename(snapPath, cfg.backendPath()); err != nil { - return nil, fmt.Errorf("rename snapshot file error: %v", err) - } - return openBackend(cfg), nil -} - -// openBackend returns a backend using the current etcd db. -func openBackend(cfg *ServerConfig) backend.Backend { - fn := cfg.backendPath() - beOpened := make(chan backend.Backend) - go func() { - beOpened <- newBackend(cfg) - }() - select { - case be := <-beOpened: - return be - case <-time.After(time.Second): - plog.Warningf("another etcd process is using %q and holds the file lock.", fn) - plog.Warningf("waiting for it to exit before starting...") - } - return <-beOpened -} - -// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes -// before updating the backend db after persisting raft snapshot to disk, -// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this -// case, replace the db with the snapshot db sent by the leader. -func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { - var cIndex consistentIndex - kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) - defer kv.Close() - if snapshot.Metadata.Index <= kv.ConsistentIndex() { - return oldbe, nil - } - oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go index f44862a4638..fa84ffae630 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,6 +23,7 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" @@ -240,6 +241,15 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) continue } + // etcd 2.0 does not have version endpoint on peer url. + if resp.StatusCode == http.StatusNotFound { + httputil.GracefulClose(resp) + return &version.Versions{ + Server: "2.0.0", + Cluster: "2.0.0", + }, nil + } + var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index c8ff27f4e14..9bcac0f076b 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -55,15 +55,10 @@ type ServerConfig struct { AutoCompactionRetention int QuotaBackendBytes int64 - // MaxRequestBytes is the maximum request size to send over raft. - MaxRequestBytes uint - StrictReconfigCheck bool // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool - - AuthToken string } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -203,5 +198,3 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } return time.Second } - -func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go index ed749dbe8d8..5edc155624b 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -33,7 +33,6 @@ var ( ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") ) type DiscoveryError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD index 4476d65ab94..0c1db78018e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD @@ -16,6 +16,7 @@ go_library( "etcdserver.pb.go", "raft_internal.pb.go", "rpc.pb.go", + "rpc.pb.gw.go", ], importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb", visibility = ["//visibility:public"], @@ -23,9 +24,12 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", ], ) @@ -38,10 +42,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index aabf90061f6..f34bedf3ed3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1018,7 +1018,7 @@ func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } var fileDescriptorEtcdserver = []byte{ // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD deleted file mode 100644 index 0b8e37503da..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["rpc.pb.gw.go"], - importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 44a3b6f69eb..66890c93c44 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -2038,7 +2038,7 @@ func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftIntern var fileDescriptorRaftInternal = []byte{ // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 894c815f824..b28f2e50e3c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -15,8 +15,6 @@ import ( authpb "github.com/coreos/etcd/auth/authpb" - _ "google.golang.org/genproto/googleapis/api/annotations" - context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -225,45 +223,16 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } -func (m *ResponseHeader) GetClusterId() uint64 { - if m != nil { - return m.ClusterId - } - return 0 -} - -func (m *ResponseHeader) GetMemberId() uint64 { - if m != nil { - return m.MemberId - } - return 0 -} - -func (m *ResponseHeader) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *ResponseHeader) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. + // If the range_end is one bit larger than the given key, + // then the range requests get the all keys with the prefix (the given key). + // If both key and range_end are '\0', then range requests returns all keys. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. + // limit is a limit on the number of keys returned for the request. Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -303,97 +272,6 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } -func (m *RangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *RangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *RangeRequest) GetLimit() int64 { - if m != nil { - return m.Limit - } - return 0 -} - -func (m *RangeRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { - if m != nil { - return m.SortOrder - } - return RangeRequest_NONE -} - -func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { - if m != nil { - return m.SortTarget - } - return RangeRequest_KEY -} - -func (m *RangeRequest) GetSerializable() bool { - if m != nil { - return m.Serializable - } - return false -} - -func (m *RangeRequest) GetKeysOnly() bool { - if m != nil { - return m.KeysOnly - } - return false -} - -func (m *RangeRequest) GetCountOnly() bool { - if m != nil { - return m.CountOnly - } - return false -} - -func (m *RangeRequest) GetMinModRevision() int64 { - if m != nil { - return m.MinModRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxModRevision() int64 { - if m != nil { - return m.MaxModRevision - } - return 0 -} - -func (m *RangeRequest) GetMinCreateRevision() int64 { - if m != nil { - return m.MinCreateRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxCreateRevision() int64 { - if m != nil { - return m.MaxCreateRevision - } - return 0 -} - type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -424,20 +302,6 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } -func (m *RangeResponse) GetMore() bool { - if m != nil { - return m.More - } - return false -} - -func (m *RangeResponse) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -449,12 +313,6 @@ type PutRequest struct { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } func (m *PutRequest) Reset() { *m = PutRequest{} } @@ -462,48 +320,6 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } -func (m *PutRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *PutRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *PutRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -func (m *PutRequest) GetIgnoreValue() bool { - if m != nil { - return m.IgnoreValue - } - return false -} - -func (m *PutRequest) GetIgnoreLease() bool { - if m != nil { - return m.IgnoreLease - } - return false -} - type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -534,12 +350,12 @@ type DeleteRangeRequest struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all + // the all keys with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. + // The previous key-value pairs will be returned in the delte response. PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } @@ -548,27 +364,6 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } -func (m *DeleteRangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *DeleteRangeRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -589,13 +384,6 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } -func (m *DeleteRangeResponse) GetDeleted() int64 { - if m != nil { - return m.Deleted - } - return 0 -} - func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -966,27 +754,6 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } -func (m *Compare) GetResult() Compare_CompareResult { - if m != nil { - return m.Result - } - return Compare_EQUAL -} - -func (m *Compare) GetTarget() Compare_CompareTarget { - if m != nil { - return m.Target - } - return Compare_VERSION -} - -func (m *Compare) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -1183,13 +950,6 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } -func (m *TxnResponse) GetSucceeded() bool { - if m != nil { - return m.Succeeded - } - return false -} - func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -1213,20 +973,6 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } -func (m *CompactionRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *CompactionRequest) GetPhysical() bool { - if m != nil { - return m.Physical - } - return false -} - type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1269,13 +1015,6 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } -func (m *HashResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - type SnapshotRequest struct { } @@ -1306,20 +1045,6 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } -func (m *SnapshotResponse) GetRemainingBytes() uint64 { - if m != nil { - return m.RemainingBytes - } - return 0 -} - -func (m *SnapshotResponse) GetBlob() []byte { - if m != nil { - return m.Blob - } - return nil -} - type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1473,48 +1198,6 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } -func (m *WatchCreateRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *WatchCreateRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *WatchCreateRequest) GetStartRevision() int64 { - if m != nil { - return m.StartRevision - } - return 0 -} - -func (m *WatchCreateRequest) GetProgressNotify() bool { - if m != nil { - return m.ProgressNotify - } - return false -} - -func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { - if m != nil { - return m.Filters - } - return nil -} - -func (m *WatchCreateRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1525,13 +1208,6 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } -func (m *WatchCancelRequest) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1552,10 +1228,8 @@ type WatchResponse struct { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - // cancel_reason indicates the reason for canceling the watcher. - CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } @@ -1570,41 +1244,6 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } -func (m *WatchResponse) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -func (m *WatchResponse) GetCreated() bool { - if m != nil { - return m.Created - } - return false -} - -func (m *WatchResponse) GetCanceled() bool { - if m != nil { - return m.Canceled - } - return false -} - -func (m *WatchResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -func (m *WatchResponse) GetCancelReason() string { - if m != nil { - return m.CancelReason - } - return "" -} - func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1624,20 +1263,6 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } -func (m *LeaseGrantRequest) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1659,27 +1284,6 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } -func (m *LeaseGrantResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseGrantResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1690,13 +1294,6 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } -func (m *LeaseRevokeRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1723,13 +1320,6 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } -func (m *LeaseKeepAliveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1750,20 +1340,6 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } -func (m *LeaseKeepAliveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseKeepAliveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1776,20 +1352,6 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } -func (m *LeaseTimeToLiveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveRequest) GetKeys() bool { - if m != nil { - return m.Keys - } - return false -} - type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1814,34 +1376,6 @@ func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { return nil } -func (m *LeaseTimeToLiveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { - if m != nil { - return m.GrantedTTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { - if m != nil { - return m.Keys - } - return nil -} - type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1858,34 +1392,6 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } -func (m *Member) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Member) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Member) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -func (m *Member) GetClientURLs() []string { - if m != nil { - return m.ClientURLs - } - return nil -} - type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1896,19 +1402,10 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } -func (m *MemberAddRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` - // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } @@ -1930,13 +1427,6 @@ func (m *MemberAddResponse) GetMember() *Member { return nil } -func (m *MemberAddResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - type MemberRemoveRequest struct { // ID is the member ID of the member to remove. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1947,17 +1437,8 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } -func (m *MemberRemoveRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } @@ -1972,13 +1453,6 @@ func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { return nil } -func (m *MemberRemoveResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1991,24 +1465,8 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } -func (m *MemberUpdateRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *MemberUpdateRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } @@ -2023,13 +1481,6 @@ func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { return nil } -func (m *MemberUpdateResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - type MemberListRequest struct { } @@ -2104,27 +1555,6 @@ func (m *AlarmRequest) String() string { return proto.CompactTextStri func (*AlarmRequest) ProtoMessage() {} func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } -func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { - if m != nil { - return m.Action - } - return AlarmRequest_GET -} - -func (m *AlarmRequest) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmRequest) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` @@ -2137,20 +1567,6 @@ func (m *AlarmMember) String() string { return proto.CompactTextStrin func (*AlarmMember) ProtoMessage() {} func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } -func (m *AlarmMember) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmMember) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. @@ -2210,41 +1626,6 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } -func (m *StatusResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *StatusResponse) GetDbSize() int64 { - if m != nil { - return m.DbSize - } - return 0 -} - -func (m *StatusResponse) GetLeader() uint64 { - if m != nil { - return m.Leader - } - return 0 -} - -func (m *StatusResponse) GetRaftIndex() uint64 { - if m != nil { - return m.RaftIndex - } - return 0 -} - -func (m *StatusResponse) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - type AuthEnableRequest struct { } @@ -2271,20 +1652,6 @@ func (m *AuthenticateRequest) String() string { return proto.CompactT func (*AuthenticateRequest) ProtoMessage() {} func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } -func (m *AuthenticateRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthenticateRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` @@ -2295,20 +1662,6 @@ func (m *AuthUserAddRequest) String() string { return proto.CompactTe func (*AuthUserAddRequest) ProtoMessage() {} func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } -func (m *AuthUserAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserAddRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -2318,13 +1671,6 @@ func (m *AuthUserGetRequest) String() string { return proto.CompactTe func (*AuthUserGetRequest) ProtoMessage() {} func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } -func (m *AuthUserGetRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - type AuthUserDeleteRequest struct { // name is the name of the user to delete. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -2335,13 +1681,6 @@ func (m *AuthUserDeleteRequest) String() string { return proto.Compac func (*AuthUserDeleteRequest) ProtoMessage() {} func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } -func (m *AuthUserDeleteRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -2356,20 +1695,6 @@ func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } -func (m *AuthUserChangePasswordRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserChangePasswordRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -2382,20 +1707,6 @@ func (m *AuthUserGrantRoleRequest) String() string { return proto.Com func (*AuthUserGrantRoleRequest) ProtoMessage() {} func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } -func (m *AuthUserGrantRoleRequest) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *AuthUserGrantRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` @@ -2406,20 +1717,6 @@ func (m *AuthUserRevokeRoleRequest) String() string { return proto.Co func (*AuthUserRevokeRoleRequest) ProtoMessage() {} func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } -func (m *AuthUserRevokeRoleRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserRevokeRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -2430,13 +1727,6 @@ func (m *AuthRoleAddRequest) String() string { return proto.CompactTe func (*AuthRoleAddRequest) ProtoMessage() {} func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } -func (m *AuthRoleAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } @@ -2446,13 +1736,6 @@ func (m *AuthRoleGetRequest) String() string { return proto.CompactTe func (*AuthRoleGetRequest) ProtoMessage() {} func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } -func (m *AuthRoleGetRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - type AuthUserListRequest struct { } @@ -2478,13 +1761,6 @@ func (m *AuthRoleDeleteRequest) String() string { return proto.Compac func (*AuthRoleDeleteRequest) ProtoMessage() {} func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } -func (m *AuthRoleDeleteRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -2499,13 +1775,6 @@ func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } -func (m *AuthRoleGrantPermissionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { if m != nil { return m.Perm @@ -2526,27 +1795,6 @@ func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } -func (m *AuthRoleRevokePermissionRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { - if m != nil { - return m.RangeEnd - } - return "" -} - type AuthEnableResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2597,13 +1845,6 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } -func (m *AuthenticateResponse) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2637,13 +1878,6 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } -func (m *AuthUserGetResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2767,13 +2001,6 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } -func (m *AuthRoleListResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2791,13 +2018,6 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } -func (m *AuthUserListResponse) GetUsers() []string { - if m != nil { - return m.Users - } - return nil -} - type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -4721,26 +3941,6 @@ func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } - if m.IgnoreValue { - dAtA[i] = 0x28 - i++ - if m.IgnoreValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.IgnoreLease { - dAtA[i] = 0x30 - i++ - if m.IgnoreLease { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } return i, nil } @@ -5549,12 +4749,6 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } - if len(m.CancelReason) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i += copy(dAtA[i:], m.CancelReason) - } if len(m.Events) > 0 { for _, msg := range m.Events { dAtA[i] = 0x5a @@ -5965,18 +5159,6 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { } i += n29 } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } return i, nil } @@ -6028,18 +5210,6 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { } i += n30 } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } return i, nil } @@ -6106,18 +5276,6 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { } i += n31 } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } return i, nil } @@ -7452,12 +6610,6 @@ func (m *PutRequest) Size() (n int) { if m.PrevKv { n += 2 } - if m.IgnoreValue { - n += 2 - } - if m.IgnoreLease { - n += 2 - } return n } @@ -7821,10 +6973,6 @@ func (m *WatchResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } - l = len(m.CancelReason) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() @@ -7995,12 +7143,6 @@ func (m *MemberAddResponse) Size() (n int) { l = m.Member.Size() n += 1 + l + sovRpc(uint64(l)) } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } return n } @@ -8020,12 +7162,6 @@ func (m *MemberRemoveResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } return n } @@ -8051,12 +7187,6 @@ func (m *MemberUpdateResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } return n } @@ -9283,46 +8413,6 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } } m.PrevKv = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreValue = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreLease = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -11255,24 +10345,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } else if wireType == 2 { + if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -11313,6 +10386,23 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } + } else if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } @@ -11566,35 +10656,6 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) @@ -12815,37 +11876,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12998,37 +12028,6 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -13210,37 +12209,6 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -17073,221 +16041,218 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3450 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, - 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, - 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, - 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, - 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, - 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, - 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, - 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, - 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, - 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, - 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, - 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, - 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, - 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, - 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, - 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, - 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, - 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, - 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, - 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, - 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, - 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, - 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, - 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, - 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, - 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, - 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, - 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, - 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, - 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, - 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, - 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, - 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, - 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, - 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, - 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, - 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, - 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, - 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, - 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, - 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, - 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, - 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, - 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, - 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, - 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, - 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, - 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, - 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, - 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, - 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, - 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, - 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, - 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, - 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, - 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, - 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, - 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, - 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, - 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, - 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, - 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, - 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, - 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, - 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, - 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, - 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, - 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, - 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, - 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, - 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, - 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, - 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, - 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, - 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, - 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, - 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, - 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, - 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, - 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, - 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, - 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, - 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, - 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, - 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, - 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, - 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, - 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, - 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, - 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, - 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, - 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, - 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, - 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, - 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, - 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, - 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, - 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, - 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, - 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, - 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, - 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, - 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, - 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, - 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, - 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, - 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, - 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, - 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, - 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, - 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, - 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, - 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, - 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, - 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, - 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, - 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, - 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, - 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, - 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, - 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, - 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, - 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, - 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, - 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, - 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, - 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, - 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, - 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, - 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, - 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, - 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, - 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, - 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, - 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, - 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, - 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, - 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, - 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, - 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, - 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, - 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, - 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, - 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, - 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, - 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, - 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, - 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, - 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, - 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, - 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, - 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, - 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, - 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, - 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, - 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, - 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, - 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, - 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, - 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, - 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, - 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, - 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, - 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, - 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, - 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, - 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, - 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, - 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, - 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, - 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, - 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, - 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, - 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, - 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, - 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, - 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, - 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, - 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, - 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, - 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, - 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, - 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, - 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, - 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, - 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, - 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, - 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, - 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, - 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, - 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, - 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, - 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, - 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, - 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, - 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, - 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, - 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, - 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, - 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, - 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, - 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, - 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, - 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, - 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, - 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, - 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, - 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, - 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, - 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, - 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, - 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, - 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, - 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, + // 3401 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcb, 0x73, 0x1b, 0xc7, + 0xd1, 0xe7, 0x02, 0x24, 0x40, 0x34, 0x1e, 0x84, 0x86, 0x94, 0x04, 0xae, 0x24, 0x8a, 0x1a, 0xbd, + 0x28, 0xc9, 0x26, 0x6d, 0xda, 0xdf, 0x77, 0xd0, 0xe7, 0x72, 0x7d, 0x14, 0x09, 0x8b, 0x0c, 0x29, + 0x52, 0x5e, 0x52, 0xb2, 0x53, 0xe5, 0x0a, 0x6a, 0x09, 0x8c, 0xc8, 0x2d, 0x02, 0xbb, 0xf0, 0xee, + 0x02, 0x22, 0x9d, 0xa4, 0x2a, 0xe5, 0xd8, 0x95, 0x4a, 0x8e, 0xf1, 0x21, 0xaf, 0x63, 0x2a, 0x87, + 0xfc, 0x01, 0xb9, 0xe5, 0x0f, 0x48, 0xe5, 0x92, 0x54, 0xe5, 0x1f, 0x48, 0x39, 0x39, 0xe4, 0x90, + 0x7b, 0x4e, 0xa9, 0xa4, 0xe6, 0xb5, 0x3b, 0xbb, 0xd8, 0x05, 0xe5, 0x6c, 0x7c, 0x11, 0x77, 0x66, + 0x7a, 0xfa, 0xd7, 0xdd, 0x33, 0xdd, 0xd3, 0xd3, 0x03, 0x41, 0xc9, 0xed, 0xb7, 0x97, 0xfb, 0xae, + 0xe3, 0x3b, 0xa8, 0x42, 0xfc, 0x76, 0xc7, 0x23, 0xee, 0x90, 0xb8, 0xfd, 0x43, 0x7d, 0xee, 0xc8, + 0x39, 0x72, 0xd8, 0xc0, 0x0a, 0xfd, 0xe2, 0x34, 0xfa, 0x3c, 0xa5, 0x59, 0xe9, 0x0d, 0xdb, 0x6d, + 0xf6, 0x4f, 0xff, 0x70, 0xe5, 0x64, 0x28, 0x86, 0xae, 0xb0, 0x21, 0x73, 0xe0, 0x1f, 0xb3, 0x7f, + 0xfa, 0x87, 0xec, 0x8f, 0x18, 0xbc, 0x7a, 0xe4, 0x38, 0x47, 0x5d, 0xb2, 0x62, 0xf6, 0xad, 0x15, + 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0xf8, 0x28, 0xfe, 0x5c, 0x83, 0x9a, 0x41, 0xbc, + 0xbe, 0x63, 0x7b, 0x64, 0x93, 0x98, 0x1d, 0xe2, 0xa2, 0x6b, 0x00, 0xed, 0xee, 0xc0, 0xf3, 0x89, + 0xdb, 0xb2, 0x3a, 0x0d, 0x6d, 0x51, 0x5b, 0x9a, 0x34, 0x4a, 0xa2, 0x67, 0xab, 0x83, 0xae, 0x40, + 0xa9, 0x47, 0x7a, 0x87, 0x7c, 0x34, 0xc7, 0x46, 0xa7, 0x79, 0xc7, 0x56, 0x07, 0xe9, 0x30, 0xed, + 0x92, 0xa1, 0xe5, 0x59, 0x8e, 0xdd, 0xc8, 0x2f, 0x6a, 0x4b, 0x79, 0x23, 0x68, 0xd3, 0x89, 0xae, + 0xf9, 0xc2, 0x6f, 0xf9, 0xc4, 0xed, 0x35, 0x26, 0xf9, 0x44, 0xda, 0x71, 0x40, 0xdc, 0x1e, 0xfe, + 0x6c, 0x0a, 0x2a, 0x86, 0x69, 0x1f, 0x11, 0x83, 0x7c, 0x3c, 0x20, 0x9e, 0x8f, 0xea, 0x90, 0x3f, + 0x21, 0x67, 0x0c, 0xbe, 0x62, 0xd0, 0x4f, 0x3e, 0xdf, 0x3e, 0x22, 0x2d, 0x62, 0x73, 0xe0, 0x0a, + 0x9d, 0x6f, 0x1f, 0x91, 0xa6, 0xdd, 0x41, 0x73, 0x30, 0xd5, 0xb5, 0x7a, 0x96, 0x2f, 0x50, 0x79, + 0x23, 0x22, 0xce, 0x64, 0x4c, 0x9c, 0x75, 0x00, 0xcf, 0x71, 0xfd, 0x96, 0xe3, 0x76, 0x88, 0xdb, + 0x98, 0x5a, 0xd4, 0x96, 0x6a, 0xab, 0xb7, 0x96, 0xd5, 0x85, 0x58, 0x56, 0x05, 0x5a, 0xde, 0x77, + 0x5c, 0x7f, 0x8f, 0xd2, 0x1a, 0x25, 0x4f, 0x7e, 0xa2, 0xf7, 0xa0, 0xcc, 0x98, 0xf8, 0xa6, 0x7b, + 0x44, 0xfc, 0x46, 0x81, 0x71, 0xb9, 0x7d, 0x0e, 0x97, 0x03, 0x46, 0x6c, 0x30, 0x78, 0xfe, 0x8d, + 0x30, 0x54, 0x3c, 0xe2, 0x5a, 0x66, 0xd7, 0xfa, 0xc4, 0x3c, 0xec, 0x92, 0x46, 0x71, 0x51, 0x5b, + 0x9a, 0x36, 0x22, 0x7d, 0x54, 0xff, 0x13, 0x72, 0xe6, 0xb5, 0x1c, 0xbb, 0x7b, 0xd6, 0x98, 0x66, + 0x04, 0xd3, 0xb4, 0x63, 0xcf, 0xee, 0x9e, 0xb1, 0x45, 0x73, 0x06, 0xb6, 0xcf, 0x47, 0x4b, 0x6c, + 0xb4, 0xc4, 0x7a, 0xd8, 0xf0, 0x12, 0xd4, 0x7b, 0x96, 0xdd, 0xea, 0x39, 0x9d, 0x56, 0x60, 0x10, + 0x60, 0x06, 0xa9, 0xf5, 0x2c, 0xfb, 0x89, 0xd3, 0x31, 0xa4, 0x59, 0x28, 0xa5, 0x79, 0x1a, 0xa5, + 0x2c, 0x0b, 0x4a, 0xf3, 0x54, 0xa5, 0x5c, 0x86, 0x59, 0xca, 0xb3, 0xed, 0x12, 0xd3, 0x27, 0x21, + 0x71, 0x85, 0x11, 0x5f, 0xe8, 0x59, 0xf6, 0x3a, 0x1b, 0x89, 0xd0, 0x9b, 0xa7, 0x23, 0xf4, 0x55, + 0x41, 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x02, 0x9b, 0xa3, 0x69, 0x98, 0xdc, 0xdd, 0xdb, + 0x6d, 0xd6, 0x27, 0x10, 0x40, 0x61, 0x6d, 0x7f, 0xbd, 0xb9, 0xbb, 0x51, 0xd7, 0x50, 0x19, 0x8a, + 0x1b, 0x4d, 0xde, 0xc8, 0xe1, 0x47, 0x00, 0xa1, 0x75, 0x51, 0x11, 0xf2, 0xdb, 0xcd, 0x6f, 0xd6, + 0x27, 0x28, 0xcd, 0xf3, 0xa6, 0xb1, 0xbf, 0xb5, 0xb7, 0x5b, 0xd7, 0xe8, 0xe4, 0x75, 0xa3, 0xb9, + 0x76, 0xd0, 0xac, 0xe7, 0x28, 0xc5, 0x93, 0xbd, 0x8d, 0x7a, 0x1e, 0x95, 0x60, 0xea, 0xf9, 0xda, + 0xce, 0xb3, 0x66, 0x7d, 0x12, 0x7f, 0xa1, 0x41, 0x55, 0xac, 0x17, 0xf7, 0x09, 0xf4, 0x36, 0x14, + 0x8e, 0x99, 0x5f, 0xb0, 0xad, 0x58, 0x5e, 0xbd, 0x1a, 0x5b, 0xdc, 0x88, 0xef, 0x18, 0x82, 0x16, + 0x61, 0xc8, 0x9f, 0x0c, 0xbd, 0x46, 0x6e, 0x31, 0xbf, 0x54, 0x5e, 0xad, 0x2f, 0x73, 0x87, 0x5d, + 0xde, 0x26, 0x67, 0xcf, 0xcd, 0xee, 0x80, 0x18, 0x74, 0x10, 0x21, 0x98, 0xec, 0x39, 0x2e, 0x61, + 0x3b, 0x76, 0xda, 0x60, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0x70, 0x1b, 0xe0, + 0xe9, 0xc0, 0x4f, 0xf7, 0x8c, 0x39, 0x98, 0x1a, 0x52, 0xbe, 0xc2, 0x2b, 0x78, 0x83, 0xb9, 0x04, + 0x31, 0x3d, 0x12, 0xb8, 0x04, 0x6d, 0xa0, 0xcb, 0x50, 0xec, 0xbb, 0x64, 0xd8, 0x3a, 0x19, 0x32, + 0x8c, 0x69, 0xa3, 0x40, 0x9b, 0xdb, 0x43, 0x6c, 0x43, 0x99, 0x81, 0x64, 0xd2, 0xfb, 0x5e, 0xc8, + 0x3d, 0xc7, 0xa6, 0x8d, 0xea, 0x2e, 0xf1, 0x3e, 0x02, 0xb4, 0x41, 0xba, 0xc4, 0x27, 0x59, 0xdc, + 0x5e, 0xd1, 0x26, 0x1f, 0xd1, 0xe6, 0xc7, 0x1a, 0xcc, 0x46, 0xd8, 0x67, 0x52, 0xab, 0x01, 0xc5, + 0x0e, 0x63, 0xc6, 0x25, 0xc8, 0x1b, 0xb2, 0x89, 0x1e, 0xc0, 0xb4, 0x10, 0xc0, 0x6b, 0xe4, 0x53, + 0x56, 0xbb, 0xc8, 0x65, 0xf2, 0xf0, 0xdf, 0x35, 0x28, 0x09, 0x45, 0xf7, 0xfa, 0x68, 0x0d, 0xaa, + 0x2e, 0x6f, 0xb4, 0x98, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x15, 0x31, 0x85, + 0x75, 0xa3, 0xff, 0x83, 0xb2, 0x64, 0xd1, 0x1f, 0xf8, 0xc2, 0xe4, 0x8d, 0x28, 0x83, 0x70, 0xe7, + 0x6c, 0x4e, 0x18, 0x20, 0xc8, 0x9f, 0x0e, 0x7c, 0x74, 0x00, 0x73, 0x72, 0x32, 0xd7, 0x46, 0x88, + 0x91, 0x67, 0x5c, 0x16, 0xa3, 0x5c, 0x46, 0x97, 0x6a, 0x73, 0xc2, 0x40, 0x62, 0xbe, 0x32, 0xf8, + 0xa8, 0x04, 0x45, 0xd1, 0x8b, 0xff, 0xa1, 0x01, 0x48, 0x83, 0xee, 0xf5, 0xd1, 0x06, 0xd4, 0x5c, + 0xd1, 0x8a, 0x28, 0x7c, 0x25, 0x51, 0x61, 0xb1, 0x0e, 0x13, 0x46, 0x55, 0x4e, 0xe2, 0x2a, 0xbf, + 0x0b, 0x95, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0xb2, 0x9c, 0x40, 0xb5, 0xfe, + 0x00, 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, + 0x0e, 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xd7, 0x79, 0x28, 0xae, 0x3b, 0xbd, 0xbe, 0xe9, 0xd2, 0x35, + 0x2a, 0xb8, 0xc4, 0x1b, 0x74, 0x7d, 0xa6, 0x6e, 0x6d, 0xf5, 0x66, 0x14, 0x41, 0x90, 0xc9, 0xbf, + 0x06, 0x23, 0x35, 0xc4, 0x14, 0x3a, 0x59, 0x1c, 0x2d, 0xb9, 0x57, 0x98, 0x2c, 0x0e, 0x16, 0x31, + 0x45, 0xfa, 0x52, 0x3e, 0xf4, 0x25, 0x1d, 0x8a, 0x43, 0xe2, 0x86, 0xc7, 0xe1, 0xe6, 0x84, 0x21, + 0x3b, 0xd0, 0x3d, 0x98, 0x89, 0x87, 0xe6, 0x29, 0x41, 0x53, 0x6b, 0x47, 0x23, 0xf9, 0x4d, 0xa8, + 0x44, 0xce, 0x87, 0x82, 0xa0, 0x2b, 0xf7, 0x94, 0xe3, 0xe1, 0x92, 0x0c, 0x4a, 0xf4, 0x2c, 0xab, + 0x6c, 0x4e, 0x88, 0xb0, 0x84, 0xff, 0x1f, 0xaa, 0x11, 0x5d, 0x69, 0xf8, 0x6d, 0xbe, 0xff, 0x6c, + 0x6d, 0x87, 0xc7, 0xea, 0xc7, 0x2c, 0x3c, 0x1b, 0x75, 0x8d, 0x86, 0xfc, 0x9d, 0xe6, 0xfe, 0x7e, + 0x3d, 0x87, 0xaa, 0x50, 0xda, 0xdd, 0x3b, 0x68, 0x71, 0xaa, 0x3c, 0x7e, 0x27, 0xe0, 0x20, 0x62, + 0xbd, 0x12, 0xe2, 0x27, 0x94, 0x10, 0xaf, 0xc9, 0x10, 0x9f, 0x0b, 0x43, 0x7c, 0xfe, 0x51, 0x0d, + 0x2a, 0xdc, 0x3e, 0xad, 0x81, 0x4d, 0x8f, 0x99, 0x5f, 0x6a, 0x00, 0x07, 0xa7, 0xb6, 0x0c, 0x40, + 0x2b, 0x50, 0x6c, 0x73, 0xe6, 0x0d, 0x8d, 0xf9, 0xf3, 0xc5, 0x44, 0x93, 0x1b, 0x92, 0x0a, 0xbd, + 0x09, 0x45, 0x6f, 0xd0, 0x6e, 0x13, 0x4f, 0x86, 0xfb, 0xcb, 0xf1, 0x90, 0x22, 0x1c, 0xde, 0x90, + 0x74, 0x74, 0xca, 0x0b, 0xd3, 0xea, 0x0e, 0x58, 0xf0, 0x1f, 0x3f, 0x45, 0xd0, 0xe1, 0x9f, 0x69, + 0x50, 0x66, 0x52, 0x66, 0x8a, 0x63, 0x57, 0xa1, 0xc4, 0x64, 0x20, 0x1d, 0x11, 0xc9, 0xa6, 0x8d, + 0xb0, 0x03, 0xfd, 0x2f, 0x94, 0xe4, 0x0e, 0x96, 0xc1, 0xac, 0x91, 0xcc, 0x76, 0xaf, 0x6f, 0x84, + 0xa4, 0x78, 0x1b, 0x2e, 0x30, 0xab, 0xb4, 0x69, 0x62, 0x29, 0xed, 0xa8, 0xa6, 0x5e, 0x5a, 0x2c, + 0xf5, 0xd2, 0x61, 0xba, 0x7f, 0x7c, 0xe6, 0x59, 0x6d, 0xb3, 0x2b, 0xa4, 0x08, 0xda, 0xf8, 0x1b, + 0x80, 0x54, 0x66, 0x59, 0xd4, 0xc5, 0x55, 0x28, 0x6f, 0x9a, 0xde, 0xb1, 0x10, 0x09, 0x7f, 0x08, + 0x15, 0xde, 0xcc, 0x64, 0x43, 0x04, 0x93, 0xc7, 0xa6, 0x77, 0xcc, 0x04, 0xaf, 0x1a, 0xec, 0x1b, + 0x5f, 0x80, 0x99, 0x7d, 0xdb, 0xec, 0x7b, 0xc7, 0x8e, 0x8c, 0xb5, 0x34, 0xb1, 0xae, 0x87, 0x7d, + 0x99, 0x10, 0xef, 0xc2, 0x8c, 0x4b, 0x7a, 0xa6, 0x65, 0x5b, 0xf6, 0x51, 0xeb, 0xf0, 0xcc, 0x27, + 0x9e, 0xc8, 0xbb, 0x6b, 0x41, 0xf7, 0x23, 0xda, 0x4b, 0x45, 0x3b, 0xec, 0x3a, 0x87, 0xc2, 0xe3, + 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0x95, 0x0f, 0x4c, 0xbf, 0x2d, 0xad, 0x80, 0xb6, 0xa0, 0x16, 0xf8, + 0x39, 0xeb, 0x11, 0xb2, 0xc4, 0x02, 0x3e, 0x9b, 0x23, 0x33, 0x32, 0x19, 0xf0, 0xab, 0x6d, 0xb5, + 0x83, 0xb1, 0x32, 0xed, 0x36, 0xe9, 0x06, 0xac, 0x72, 0xe9, 0xac, 0x18, 0xa1, 0xca, 0x4a, 0xed, + 0x78, 0x34, 0x13, 0x1e, 0x86, 0xdc, 0x2d, 0x7f, 0x9e, 0x03, 0x34, 0x2a, 0xc3, 0x57, 0xcd, 0x0f, + 0x6e, 0x43, 0xcd, 0xf3, 0x4d, 0xd7, 0x6f, 0xc5, 0x6e, 0x25, 0x55, 0xd6, 0x1b, 0xc4, 0xaa, 0xbb, + 0x30, 0xd3, 0x77, 0x9d, 0x23, 0x97, 0x78, 0x5e, 0xcb, 0x76, 0x7c, 0xeb, 0xc5, 0x99, 0x48, 0x8e, + 0x6a, 0xb2, 0x7b, 0x97, 0xf5, 0xa2, 0x26, 0x14, 0x5f, 0x58, 0x5d, 0x9f, 0xb8, 0x5e, 0x63, 0x6a, + 0x31, 0xbf, 0x54, 0x5b, 0x7d, 0x70, 0x9e, 0xd5, 0x96, 0xdf, 0x63, 0xf4, 0x07, 0x67, 0x7d, 0x62, + 0xc8, 0xb9, 0x6a, 0xda, 0x52, 0x88, 0xa4, 0x2d, 0xb7, 0x01, 0x42, 0x7a, 0x1a, 0xb5, 0x76, 0xf7, + 0x9e, 0x3e, 0x3b, 0xa8, 0x4f, 0xa0, 0x0a, 0x4c, 0xef, 0xee, 0x6d, 0x34, 0x77, 0x9a, 0x34, 0xae, + 0xe1, 0x15, 0x69, 0x1b, 0xd5, 0x86, 0x68, 0x1e, 0xa6, 0x5f, 0xd2, 0x5e, 0x79, 0x6d, 0xcb, 0x1b, + 0x45, 0xd6, 0xde, 0xea, 0xe0, 0xbf, 0x69, 0x50, 0x15, 0xbb, 0x20, 0xd3, 0x56, 0x54, 0x21, 0x72, + 0x11, 0x08, 0x9a, 0x23, 0xf1, 0xdd, 0xd1, 0x11, 0xa9, 0x98, 0x6c, 0x52, 0x77, 0xe7, 0x8b, 0x4d, + 0x3a, 0xc2, 0xac, 0x41, 0x1b, 0xdd, 0x83, 0x7a, 0x9b, 0xbb, 0x7b, 0xec, 0xd8, 0x31, 0x66, 0x44, + 0x7f, 0xb0, 0x48, 0xb7, 0xa1, 0x40, 0x86, 0xc4, 0xf6, 0xbd, 0x46, 0x99, 0xc5, 0xa6, 0xaa, 0x4c, + 0xb4, 0x9a, 0xb4, 0xd7, 0x10, 0x83, 0xf8, 0x7f, 0xe0, 0xc2, 0x0e, 0xcd, 0x74, 0x1f, 0xbb, 0xa6, + 0xad, 0xe6, 0xcc, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0xd5, 0x20, 0xb7, 0xb5, 0x21, 0x74, + 0xc8, 0x6d, 0x6d, 0xe0, 0x4f, 0x35, 0x40, 0xea, 0xbc, 0x4c, 0x66, 0x8a, 0x31, 0x97, 0xf0, 0xf9, + 0x10, 0x7e, 0x0e, 0xa6, 0x88, 0xeb, 0x3a, 0x2e, 0x33, 0x48, 0xc9, 0xe0, 0x0d, 0x7c, 0x4b, 0xc8, + 0x60, 0x90, 0xa1, 0x73, 0x12, 0xec, 0x79, 0xce, 0x4d, 0x0b, 0x44, 0xdd, 0x86, 0xd9, 0x08, 0x55, + 0xa6, 0x18, 0x79, 0x17, 0x2e, 0x32, 0x66, 0xdb, 0x84, 0xf4, 0xd7, 0xba, 0xd6, 0x30, 0x15, 0xb5, + 0x0f, 0x97, 0xe2, 0x84, 0x5f, 0xaf, 0x8d, 0xf0, 0x3b, 0x02, 0xf1, 0xc0, 0xea, 0x91, 0x03, 0x67, + 0x27, 0x5d, 0x36, 0x1a, 0xf8, 0xe8, 0x4d, 0x58, 0x1c, 0x26, 0xec, 0x1b, 0xff, 0x4a, 0x83, 0xcb, + 0x23, 0xd3, 0xbf, 0xe6, 0x55, 0x5d, 0x00, 0x38, 0xa2, 0xdb, 0x87, 0x74, 0xe8, 0x00, 0xbf, 0xc3, + 0x29, 0x3d, 0x81, 0x9c, 0x34, 0x76, 0x54, 0x84, 0x9c, 0xc7, 0x50, 0x78, 0xc2, 0xca, 0x27, 0x8a, + 0x56, 0x93, 0x52, 0x2b, 0xdb, 0xec, 0xf1, 0x5b, 0x5d, 0xc9, 0x60, 0xdf, 0xec, 0xe8, 0x24, 0xc4, + 0x7d, 0x66, 0xec, 0xf0, 0x23, 0xba, 0x64, 0x04, 0x6d, 0x8a, 0xde, 0xee, 0x5a, 0xc4, 0xf6, 0xd9, + 0xe8, 0x24, 0x1b, 0x55, 0x7a, 0xf0, 0x32, 0xd4, 0x39, 0xd2, 0x5a, 0xa7, 0xa3, 0x1c, 0xd3, 0x01, + 0x3f, 0x2d, 0xca, 0x0f, 0xbf, 0x84, 0x0b, 0x0a, 0x7d, 0x26, 0xd3, 0xbd, 0x06, 0x05, 0x5e, 0x23, + 0x12, 0x27, 0xc4, 0x5c, 0x74, 0x16, 0x87, 0x31, 0x04, 0x0d, 0xbe, 0x0d, 0xb3, 0xa2, 0x87, 0xf4, + 0x9c, 0xa4, 0x55, 0x67, 0xf6, 0xc1, 0x3b, 0x30, 0x17, 0x25, 0xcb, 0xe4, 0x08, 0x6b, 0x12, 0xf4, + 0x59, 0xbf, 0xa3, 0x1c, 0x38, 0xf1, 0x45, 0x51, 0x0d, 0x96, 0x8b, 0x19, 0x2c, 0x10, 0x48, 0xb2, + 0xc8, 0x24, 0xd0, 0xac, 0x34, 0xff, 0x8e, 0xe5, 0x05, 0x69, 0xc5, 0x27, 0x80, 0xd4, 0xce, 0x4c, + 0x8b, 0xb2, 0x0c, 0x45, 0x6e, 0x70, 0x99, 0xb9, 0x26, 0xaf, 0x8a, 0x24, 0xa2, 0x02, 0x6d, 0x90, + 0x17, 0xae, 0x79, 0xd4, 0x23, 0x41, 0x64, 0xa5, 0xf9, 0x9a, 0xda, 0x99, 0x49, 0xe3, 0x3f, 0x68, + 0x50, 0x59, 0xeb, 0x9a, 0x6e, 0x4f, 0x1a, 0xff, 0x5d, 0x28, 0xf0, 0x44, 0x50, 0xdc, 0x9d, 0xee, + 0x44, 0xd9, 0xa8, 0xb4, 0xbc, 0xb1, 0xc6, 0xd3, 0x46, 0x31, 0x8b, 0x2e, 0x96, 0x28, 0x4d, 0x6e, + 0xc4, 0x4a, 0x95, 0x1b, 0xe8, 0x75, 0x98, 0x32, 0xe9, 0x14, 0xe6, 0xbf, 0xb5, 0x78, 0x0a, 0xce, + 0xb8, 0xb1, 0x43, 0x9b, 0x53, 0xe1, 0xb7, 0xa1, 0xac, 0x20, 0xd0, 0x9b, 0xc5, 0xe3, 0xa6, 0x38, + 0x98, 0xd7, 0xd6, 0x0f, 0xb6, 0x9e, 0xf3, 0x0b, 0x47, 0x0d, 0x60, 0xa3, 0x19, 0xb4, 0x73, 0xf8, + 0x43, 0x31, 0x4b, 0x78, 0xb8, 0x2a, 0x8f, 0x96, 0x26, 0x4f, 0xee, 0x95, 0xe4, 0x39, 0x85, 0xaa, + 0x50, 0x3f, 0xd3, 0x1e, 0x78, 0x13, 0x0a, 0x8c, 0x9f, 0xdc, 0x02, 0xf3, 0x09, 0xb0, 0xd2, 0x3b, + 0x39, 0x21, 0x9e, 0x81, 0xea, 0xbe, 0x6f, 0xfa, 0x03, 0x4f, 0x6e, 0x81, 0xdf, 0x6b, 0x50, 0x93, + 0x3d, 0x59, 0xcb, 0x2c, 0xf2, 0x7a, 0xca, 0x63, 0x5e, 0x70, 0x39, 0xbd, 0x04, 0x85, 0xce, 0xe1, + 0xbe, 0xf5, 0x89, 0x2c, 0x66, 0x89, 0x16, 0xed, 0xef, 0x72, 0x1c, 0x5e, 0x50, 0x16, 0x2d, 0x7a, + 0xd1, 0x71, 0xcd, 0x17, 0xfe, 0x96, 0xdd, 0x21, 0xa7, 0x2c, 0x9f, 0x98, 0x34, 0xc2, 0x0e, 0x76, + 0x37, 0x11, 0x85, 0x67, 0x96, 0x7f, 0xa9, 0x85, 0xe8, 0x59, 0xb8, 0xb0, 0x36, 0xf0, 0x8f, 0x9b, + 0xb6, 0x79, 0xd8, 0x95, 0x41, 0x00, 0xcf, 0x01, 0xa2, 0x9d, 0x1b, 0x96, 0xa7, 0xf6, 0x36, 0x61, + 0x96, 0xf6, 0x12, 0xdb, 0xb7, 0xda, 0x4a, 0xc4, 0x90, 0x61, 0x5b, 0x8b, 0x85, 0x6d, 0xd3, 0xf3, + 0x5e, 0x3a, 0x6e, 0x47, 0xa8, 0x16, 0xb4, 0xf1, 0x06, 0x67, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, + 0xca, 0x65, 0x29, 0xe4, 0xf2, 0x98, 0xf8, 0x63, 0xb8, 0xe0, 0x07, 0x70, 0x51, 0x52, 0x8a, 0xfa, + 0xc5, 0x18, 0xe2, 0x3d, 0xb8, 0x26, 0x89, 0xd7, 0x8f, 0x69, 0x56, 0xfd, 0x54, 0x00, 0xfe, 0xa7, + 0x72, 0x3e, 0x82, 0x46, 0x20, 0x27, 0xcb, 0xb4, 0x9c, 0xae, 0x2a, 0xc0, 0xc0, 0x13, 0x7b, 0xa6, + 0x64, 0xb0, 0x6f, 0xda, 0xe7, 0x3a, 0xdd, 0xe0, 0x10, 0xa4, 0xdf, 0x78, 0x1d, 0xe6, 0x25, 0x0f, + 0x91, 0x03, 0x45, 0x99, 0x8c, 0x08, 0x94, 0xc4, 0x44, 0x18, 0x8c, 0x4e, 0x1d, 0x6f, 0x76, 0x95, + 0x32, 0x6a, 0x5a, 0xc6, 0x53, 0x53, 0x78, 0x5e, 0xe4, 0x3b, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0xd1, + 0x4d, 0x19, 0xa8, 0xdd, 0x62, 0x21, 0x68, 0xf7, 0xc8, 0x42, 0x8c, 0xb0, 0xfe, 0x08, 0x16, 0x02, + 0x21, 0xa8, 0xdd, 0x9e, 0x12, 0xb7, 0x67, 0x79, 0x9e, 0x72, 0xe3, 0x4e, 0x52, 0xfc, 0x0e, 0x4c, + 0xf6, 0x89, 0x88, 0x29, 0xe5, 0x55, 0xb4, 0xcc, 0x9f, 0x87, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0x77, + 0xe0, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, + 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, + 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x10, 0xe6, 0xa2, 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0x94, + 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, + 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, + 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, + 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0xbf, 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, + 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, + 0x35, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, + 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, + 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, 0x82, 0x84, 0x52, 0x79, 0x5a, 0x2d, 0x43, + 0x71, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, 0xad, 0xfe, 0x33, 0x0f, 0xb9, 0xed, 0xe7, + 0xe8, 0x5b, 0x30, 0xc5, 0x1f, 0x5e, 0xc6, 0xbc, 0x4b, 0xe9, 0xe3, 0x9e, 0x70, 0xf0, 0xd5, 0x4f, + 0xff, 0xf4, 0xd7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, 0xe1, 0x5b, 0x66, 0xb7, 0x7f, 0x6c, 0xae, + 0x9c, 0x0c, 0x57, 0xd8, 0x99, 0xf0, 0x50, 0xbb, 0x8f, 0x9e, 0x43, 0xfe, 0xe9, 0xc0, 0x47, 0xa9, + 0x8f, 0x56, 0x7a, 0xfa, 0xd3, 0x0e, 0xd6, 0x19, 0xe7, 0x39, 0x3c, 0xa3, 0x72, 0xee, 0x0f, 0x7c, + 0xca, 0x77, 0x08, 0x65, 0xe5, 0x75, 0x06, 0x9d, 0xfb, 0x9c, 0xa5, 0x9f, 0xff, 0xf2, 0x83, 0x31, + 0xc3, 0xbb, 0x8a, 0x2f, 0xab, 0x78, 0xfc, 0x11, 0x49, 0xd5, 0xe7, 0xe0, 0xd4, 0x8e, 0xeb, 0x13, + 0x3e, 0x30, 0xc4, 0xf5, 0x51, 0x8a, 0xfa, 0xc9, 0xfa, 0xf8, 0xa7, 0x36, 0xe5, 0xeb, 0x88, 0x17, + 0xa5, 0xb6, 0x8f, 0xae, 0x27, 0xbc, 0x48, 0xa8, 0xb5, 0x77, 0x7d, 0x31, 0x9d, 0x40, 0x20, 0xdd, + 0x60, 0x48, 0x57, 0xf0, 0x25, 0x15, 0xa9, 0x1d, 0xd0, 0x3d, 0xd4, 0xee, 0xaf, 0x1e, 0xc3, 0x14, + 0xab, 0x18, 0xa2, 0x96, 0xfc, 0xd0, 0x13, 0x6a, 0x9d, 0x29, 0x3b, 0x20, 0x52, 0x6b, 0xc4, 0xf3, + 0x0c, 0x6d, 0x16, 0xd7, 0x02, 0x34, 0x56, 0x34, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0xde, 0xd0, 0x56, + 0xbf, 0x3f, 0x09, 0x53, 0xac, 0x52, 0x83, 0xfa, 0x00, 0x61, 0x0d, 0x2e, 0xae, 0xe7, 0x48, 0x55, + 0x2f, 0xae, 0xe7, 0x68, 0xf9, 0x0e, 0x5f, 0x67, 0xc8, 0xf3, 0x78, 0x2e, 0x40, 0x66, 0xaf, 0xe0, + 0x2b, 0xac, 0x26, 0x43, 0xcd, 0xfa, 0x12, 0xca, 0x4a, 0x2d, 0x0d, 0x25, 0x71, 0x8c, 0x14, 0xe3, + 0xe2, 0xdb, 0x24, 0xa1, 0x10, 0x87, 0x6f, 0x32, 0xd0, 0x6b, 0xb8, 0xa1, 0x1a, 0x97, 0xe3, 0xba, + 0x8c, 0x92, 0x02, 0x7f, 0xa6, 0x41, 0x2d, 0x5a, 0x4f, 0x43, 0x37, 0x13, 0x58, 0xc7, 0xcb, 0x72, + 0xfa, 0xad, 0xf1, 0x44, 0xa9, 0x22, 0x70, 0xfc, 0x13, 0x42, 0xfa, 0x26, 0xa5, 0x14, 0xb6, 0x47, + 0x3f, 0xd0, 0x60, 0x26, 0x56, 0x25, 0x43, 0x49, 0x10, 0x23, 0x35, 0x38, 0xfd, 0xf6, 0x39, 0x54, + 0x42, 0x92, 0xbb, 0x4c, 0x92, 0x1b, 0xf8, 0xea, 0xa8, 0x31, 0x7c, 0xab, 0x47, 0x7c, 0x47, 0x48, + 0xb3, 0xfa, 0xaf, 0x3c, 0x14, 0xd7, 0xf9, 0xaf, 0x8c, 0x90, 0x0f, 0xa5, 0xa0, 0xf2, 0x84, 0x16, + 0x92, 0xaa, 0x12, 0x61, 0xca, 0xae, 0x5f, 0x4f, 0x1d, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, + 0x2b, 0x81, 0x08, 0xe2, 0xd7, 0x4c, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0x76, 0x3a, 0x74, 0x49, 0xbe, + 0xa7, 0x41, 0x45, 0x2d, 0x28, 0xa1, 0x1b, 0x89, 0xf5, 0x10, 0xb5, 0x26, 0xa5, 0xe3, 0x71, 0x24, + 0x02, 0xff, 0x1e, 0xc3, 0xbf, 0x89, 0x17, 0xd2, 0xf0, 0x5d, 0x46, 0x1f, 0x15, 0x81, 0x97, 0x90, + 0x92, 0x45, 0x88, 0x54, 0xa8, 0x92, 0x45, 0x88, 0x56, 0xa0, 0xce, 0x17, 0x61, 0xc0, 0xe8, 0xa9, + 0x08, 0xa7, 0x00, 0x61, 0x85, 0x09, 0x25, 0x1a, 0x57, 0xb9, 0xc4, 0xc4, 0x7d, 0x70, 0xb4, 0x38, + 0x95, 0xb0, 0x03, 0x62, 0xd8, 0x5d, 0xcb, 0xa3, 0xbe, 0xb8, 0xfa, 0xdb, 0x49, 0x28, 0x3f, 0x31, + 0x2d, 0xdb, 0x27, 0xb6, 0x69, 0xb7, 0x09, 0x3a, 0x82, 0x29, 0x76, 0x4a, 0xc5, 0x03, 0x8f, 0x5a, + 0xf6, 0x89, 0x07, 0x9e, 0x48, 0x4d, 0x04, 0xdf, 0x66, 0xd0, 0xd7, 0xb1, 0x1e, 0x40, 0xf7, 0x42, + 0xfe, 0x2b, 0xac, 0x9e, 0x41, 0x55, 0x3e, 0x81, 0x02, 0xaf, 0x5f, 0xa0, 0x18, 0xb7, 0x48, 0x9d, + 0x43, 0xbf, 0x9a, 0x3c, 0x98, 0xba, 0xcb, 0x54, 0x2c, 0x8f, 0x11, 0x53, 0xb0, 0x6f, 0x03, 0x84, + 0x05, 0xb3, 0xb8, 0x7d, 0x47, 0xea, 0x6b, 0xfa, 0x62, 0x3a, 0x81, 0x00, 0xbe, 0xcf, 0x80, 0x6f, + 0xe1, 0xeb, 0x89, 0xc0, 0x9d, 0x60, 0x02, 0x05, 0x6f, 0xc3, 0xe4, 0xa6, 0xe9, 0x1d, 0xa3, 0xd8, + 0x21, 0xa4, 0xbc, 0x92, 0xea, 0x7a, 0xd2, 0x90, 0x80, 0xba, 0xc5, 0xa0, 0x16, 0xf0, 0x7c, 0x22, + 0xd4, 0xb1, 0xe9, 0xd1, 0x98, 0x8e, 0x06, 0x30, 0x2d, 0x5f, 0x3e, 0xd1, 0xb5, 0x98, 0xcd, 0xa2, + 0xaf, 0xa4, 0xfa, 0x42, 0xda, 0xb0, 0x00, 0x5c, 0x62, 0x80, 0x18, 0x5f, 0x4b, 0x36, 0xaa, 0x20, + 0x7f, 0xa8, 0xdd, 0x7f, 0x43, 0x5b, 0xfd, 0x51, 0x1d, 0x26, 0x69, 0xbe, 0x44, 0x4f, 0x91, 0xf0, + 0x9a, 0x19, 0xb7, 0xf0, 0x48, 0x71, 0x27, 0x6e, 0xe1, 0xd1, 0x1b, 0x6a, 0xc2, 0x29, 0xc2, 0x7e, + 0x6b, 0x49, 0x18, 0x15, 0xd5, 0xd8, 0x87, 0xb2, 0x72, 0x19, 0x45, 0x09, 0x1c, 0xa3, 0xa5, 0xa3, + 0xf8, 0x29, 0x92, 0x70, 0x93, 0xc5, 0x8b, 0x0c, 0x54, 0xc7, 0x17, 0xa3, 0xa0, 0x1d, 0x4e, 0x46, + 0x51, 0xbf, 0x03, 0x15, 0xf5, 0xd6, 0x8a, 0x12, 0x98, 0xc6, 0x6a, 0x53, 0xf1, 0x58, 0x91, 0x74, + 0xe9, 0x4d, 0x70, 0x9a, 0xe0, 0x97, 0xa5, 0x92, 0x96, 0xa2, 0x7f, 0x0c, 0x45, 0x71, 0x97, 0x4d, + 0xd2, 0x37, 0x5a, 0xcd, 0x4a, 0xd2, 0x37, 0x76, 0x11, 0x4e, 0x48, 0x49, 0x18, 0x2c, 0xcd, 0xd9, + 0x65, 0x80, 0x16, 0x90, 0x8f, 0x89, 0x9f, 0x06, 0x19, 0xd6, 0x67, 0xd2, 0x20, 0x95, 0xfb, 0xd2, + 0x58, 0xc8, 0x23, 0xe2, 0x8b, 0xbd, 0x2c, 0x2f, 0x23, 0x28, 0x85, 0xa3, 0x1a, 0x0d, 0xf1, 0x38, + 0x92, 0xd4, 0x2c, 0x32, 0x44, 0x15, 0xa1, 0x10, 0x7d, 0x17, 0x20, 0xbc, 0x78, 0xc7, 0x13, 0x83, + 0xc4, 0xea, 0x5d, 0x3c, 0x31, 0x48, 0xbe, 0xbb, 0x27, 0x78, 0x70, 0x08, 0xce, 0x33, 0x59, 0x0a, + 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x48, 0x86, 0x48, 0x2c, 0x0c, 0xea, 0xaf, 0xbd, + 0x1a, 0x71, 0x6a, 0xf4, 0x0c, 0xe5, 0x6a, 0xb3, 0x29, 0xfd, 0x97, 0x54, 0xb2, 0xcf, 0x35, 0xa8, + 0x46, 0xae, 0xfa, 0xe8, 0x4e, 0xca, 0x3a, 0xc7, 0x8a, 0x8b, 0xfa, 0xdd, 0x73, 0xe9, 0x52, 0x73, + 0x27, 0x65, 0x57, 0xc8, 0xbc, 0xf1, 0x87, 0x1a, 0xd4, 0xa2, 0xf5, 0x01, 0x94, 0x02, 0x30, 0x52, + 0xa1, 0xd4, 0x97, 0xce, 0x27, 0x7c, 0x85, 0xd5, 0x0a, 0x53, 0xc9, 0x8f, 0xa1, 0x28, 0xca, 0x0a, + 0x49, 0x6e, 0x11, 0x2d, 0x70, 0x26, 0xb9, 0x45, 0xac, 0x26, 0x91, 0xe6, 0x16, 0xf4, 0x86, 0xae, + 0x78, 0xa2, 0x28, 0x3e, 0xa4, 0x41, 0x8e, 0xf7, 0xc4, 0x58, 0xe5, 0x62, 0x2c, 0x64, 0xe8, 0x89, + 0xb2, 0xf4, 0x80, 0x52, 0x38, 0x9e, 0xe3, 0x89, 0xf1, 0xca, 0x45, 0x9a, 0x27, 0x32, 0x54, 0xc5, + 0x13, 0xc3, 0x4a, 0x41, 0x92, 0x27, 0x8e, 0x94, 0x6f, 0x93, 0x3c, 0x71, 0xb4, 0xd8, 0x90, 0xb6, + 0xb6, 0x0c, 0x3c, 0xe2, 0x89, 0xb3, 0x09, 0x95, 0x05, 0xf4, 0x5a, 0x8a, 0x4d, 0x13, 0x4b, 0xc3, + 0xfa, 0xeb, 0xaf, 0x48, 0x3d, 0xde, 0x03, 0xf8, 0x6a, 0x48, 0x0f, 0xf8, 0x85, 0x06, 0x73, 0x49, + 0xa5, 0x09, 0x94, 0x02, 0x96, 0x52, 0x57, 0xd6, 0x97, 0x5f, 0x95, 0xfc, 0x15, 0xec, 0x16, 0xf8, + 0xc4, 0xa3, 0xfa, 0xef, 0xbe, 0x5c, 0xd0, 0xfe, 0xf8, 0xe5, 0x82, 0xf6, 0xe7, 0x2f, 0x17, 0xb4, + 0x9f, 0xfe, 0x65, 0x61, 0xe2, 0xb0, 0xc0, 0xfe, 0xc3, 0xc3, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x73, 0x7e, 0xb4, 0xb4, 0x77, 0x31, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go similarity index 69% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go rename to vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go index 02a23b78c10..473ad582ef8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go @@ -1,15 +1,15 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// Code generated by protoc-gen-grpc-gateway // source: etcdserver/etcdserverpb/rpc.proto +// DO NOT EDIT! /* Package etcdserverpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package gw +package etcdserverpb import ( - "github.com/coreos/etcd/etcdserver/etcdserverpb" "io" "net/http" @@ -20,21 +20,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader -var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.RangeRequest +func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -42,12 +40,12 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client } -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.PutRequest +func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq PutRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -55,12 +53,12 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client e } -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.DeleteRangeRequest +func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -68,12 +66,12 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.TxnRequest +func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq TxnRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -81,12 +79,12 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client e } -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.CompactionRequest +func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CompactionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -94,7 +92,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie } -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Watch(ctx) if err != nil { @@ -103,7 +101,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq etcdserverpb.WatchRequest + var protoReq WatchRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -146,12 +144,12 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli return stream, metadata, nil } -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.LeaseGrantRequest +func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaseGrantRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -159,12 +157,12 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.LeaseRevokeRequest +func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaseRevokeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -172,7 +170,7 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale } -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.LeaseKeepAlive(ctx) if err != nil { @@ -181,7 +179,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq etcdserverpb.LeaseKeepAliveRequest + var protoReq LeaseKeepAliveRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -224,12 +222,12 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh return stream, metadata, nil } -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.LeaseTimeToLiveRequest +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaseTimeToLiveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -237,12 +235,12 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars } -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.MemberAddRequest +func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MemberAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -250,12 +248,12 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale } -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.MemberRemoveRequest +func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MemberRemoveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -263,12 +261,12 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.MemberUpdateRequest +func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MemberUpdateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -276,12 +274,12 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.MemberListRequest +func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MemberListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -289,12 +287,12 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AlarmRequest +func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AlarmRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -302,12 +300,12 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale } -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.StatusRequest +func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StatusRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -315,12 +313,12 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.DefragmentRequest +func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DefragmentRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -328,12 +326,12 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar } -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.HashRequest +func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq HashRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -341,12 +339,12 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.SnapshotRequest +func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { + var protoReq SnapshotRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.Snapshot(ctx, &protoReq) @@ -362,12 +360,12 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh } -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthEnableRequest +func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthEnableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -375,12 +373,12 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthDisableRequest +func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthDisableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -388,12 +386,12 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthenticateRequest +func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthenticateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -401,12 +399,12 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale } -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserAddRequest +func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -414,12 +412,12 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserGetRequest +func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -427,12 +425,12 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserListRequest +func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -440,12 +438,12 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserDeleteRequest +func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -453,12 +451,12 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserChangePasswordRequest +func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserChangePasswordRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -466,12 +464,12 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma } -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserGrantRoleRequest +func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserGrantRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -479,12 +477,12 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal } -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthUserRevokeRoleRequest +func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -492,12 +490,12 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha } -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleAddRequest +func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -505,12 +503,12 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleGetRequest +func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -518,12 +516,12 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleListRequest +func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -531,12 +529,12 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleDeleteRequest +func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -544,12 +542,12 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleGrantPermissionRequest +func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -557,12 +555,12 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M } -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq etcdserverpb.AuthRoleRevokePermissionRequest +func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -598,15 +596,7 @@ func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, e // RegisterKVHandler registers the http handlers for service KV to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) -} - -// RegisterKVHandler registers the http handlers for service KV to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "KVClient" to call the correct interceptors. -func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { + client := NewKVClient(conn) mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -621,19 +611,18 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -650,19 +639,18 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -679,19 +667,18 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -708,19 +695,18 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -737,19 +723,18 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -808,15 +793,7 @@ func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterWatchHandler registers the http handlers for service Watch to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) -} - -// RegisterWatchHandler registers the http handlers for service Watch to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WatchClient" to call the correct interceptors. -func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { + client := NewWatchClient(conn) mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -831,19 +808,18 @@ func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -886,15 +862,7 @@ func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterLeaseHandler registers the http handlers for service Lease to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) -} - -// RegisterLeaseHandler registers the http handlers for service Lease to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LeaseClient" to call the correct interceptors. -func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { + client := NewLeaseClient(conn) mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -909,19 +877,18 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -938,19 +905,18 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -967,19 +933,18 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -996,19 +961,18 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1063,15 +1027,7 @@ func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeM // RegisterClusterHandler registers the http handlers for service Cluster to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) -} - -// RegisterClusterHandler registers the http handlers for service Cluster to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ClusterClient" to call the correct interceptors. -func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { + client := NewClusterClient(conn) mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1086,19 +1042,18 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1115,19 +1070,18 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1144,19 +1098,18 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1173,19 +1126,18 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1240,15 +1192,7 @@ func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn)) -} - -// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "MaintenanceClient" to call the correct interceptors. -func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { + client := NewMaintenanceClient(conn) mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1263,19 +1207,18 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1292,19 +1235,18 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1321,19 +1263,18 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1350,19 +1291,18 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1379,19 +1319,18 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1450,15 +1389,7 @@ func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterAuthHandler registers the http handlers for service Auth to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn)) -} - -// RegisterAuthHandler registers the http handlers for service Auth to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "AuthClient" to call the correct interceptors. -func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { + client := NewAuthClient(conn) mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1473,19 +1404,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1502,19 +1432,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1531,19 +1460,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1560,19 +1488,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1589,19 +1516,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1618,19 +1544,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1647,19 +1572,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1676,19 +1600,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1705,19 +1628,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1734,19 +1656,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1763,19 +1684,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1792,19 +1712,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1821,19 +1740,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1850,19 +1768,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1879,19 +1796,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1908,19 +1824,18 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) } resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index a6cd00ab7c3..ddf1ad23329 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -352,12 +352,11 @@ message RangeRequest { bytes key = 1; // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. + // If the range_end is one bit larger than the given key, + // then the range requests get the all keys with the prefix (the given key). + // If both key and range_end are '\0', then range requests returns all keys. bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. + // limit is a limit on the number of keys returned for the request. int64 limit = 3; // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -424,14 +423,6 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. bool prev_kv = 4; - - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - bool ignore_value = 5; - - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - bool ignore_lease = 6; } message PutResponse { @@ -445,13 +436,13 @@ message DeleteRangeRequest { bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all + // the all keys with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. bytes range_end = 2; // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. + // The previous key-value pairs will be returned in the delte response. bool prev_kv = 3; } @@ -654,9 +645,6 @@ message WatchResponse { // watcher with the same start_revision again. int64 compact_revision = 5; - // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6; - repeated mvccpb.Event events = 11; } @@ -737,8 +725,6 @@ message MemberAddResponse { ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; - // members is a list of all members after adding the new member. - repeated Member members = 3; } message MemberRemoveRequest { @@ -748,8 +734,6 @@ message MemberRemoveRequest { message MemberRemoveResponse { ResponseHeader header = 1; - // members is a list of all members after removing the member. - repeated Member members = 2; } message MemberUpdateRequest { @@ -761,8 +745,6 @@ message MemberUpdateRequest { message MemberUpdateResponse{ ResponseHeader header = 1; - // members is a list of all members after updating the member. - repeated Member members = 2; } message MemberListRequest { diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD index 473575baffc..d9478aa74fc 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "cluster.go", - "doc.go", "errors.go", "member.go", "store.go", diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go index 2330219f18a..25c45dfce12 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go @@ -178,7 +178,7 @@ func (c *RaftCluster) String() string { fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) var ids []string for id := range c.removed { - ids = append(ids, id.String()) + ids = append(ids, fmt.Sprintf("%s", id)) } fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) return b.String() diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go deleted file mode 100644 index b07fb2d9285..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package membership describes individual etcd members and clusters of members. -package membership diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go index d3f8f2474a4..f2ea0120d74 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go @@ -36,7 +36,7 @@ const ( var ( membersBucketName = []byte("members") - membersRemovedBucketName = []byte("members_removed") + membersRemovedBuckedName = []byte("members_removed") clusterBucketName = []byte("cluster") StoreMembersPrefix = path.Join(storePrefix, "members") @@ -62,7 +62,7 @@ func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { tx := be.BatchTx() tx.Lock() tx.UnsafeDelete(membersBucketName, mkey) - tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed")) + tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) tx.Unlock() } @@ -164,7 +164,7 @@ func mustCreateBackendBuckets(be backend.Backend) { tx.Lock() defer tx.Unlock() tx.UnsafeCreateBucket(membersBucketName) - tx.UnsafeCreateBucket(membersRemovedBucketName) + tx.UnsafeCreateBucket(membersRemovedBuckedName) tx.UnsafeCreateBucket(clusterBucketName) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go index 90bbd3632a6..2b549f738f7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -58,12 +58,6 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) - leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "lease_expired_total", - Help: "The total number of expired leases.", - }) ) func init() { @@ -73,7 +67,6 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) - prometheus.MustRegister(leaseExpired) } func monitorFileDescriptor(done <-chan struct{}) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go index 87126f1564c..088a4696253 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -16,15 +16,7 @@ package etcdserver import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB + "github.com/coreos/etcd/mvcc/backend" ) // Quota represents an arbitrary quota against arbitrary requests. Each request @@ -65,10 +57,11 @@ func NewBackendQuota(s *EtcdServer) Quota { } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given - return &backendQuota{s, DefaultQuotaBytes} + return &backendQuota{s, backend.DefaultQuotaBytes} } - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes) + return &backendQuota{s, backend.MaxQuotaBytes} } return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go index dcb894f82fb..d7ec176eb3a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -83,8 +83,7 @@ type RaftTimer interface { type apply struct { entries []raftpb.Entry snapshot raftpb.Snapshot - // notifyc synchronizes etcd server applies with the raft node - notifyc chan struct{} + raftDone <-chan struct{} // rx {} after raft has persisted messages } type raftNode struct { @@ -95,7 +94,14 @@ type raftNode struct { term uint64 lead uint64 - raftNodeConfig + mu sync.Mutex + // last lead elected time + lt time.Time + + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + + raft.Node // a chan to send/receive snapshot msgSnapC chan raftpb.Message @@ -107,51 +113,28 @@ type raftNode struct { readStateC chan raft.ReadState // utility - ticker *time.Ticker + ticker <-chan time.Time // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - - stopped chan struct{} - done chan struct{} -} - -type raftNodeConfig struct { - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - raft.Node + td *contention.TimeoutDetector + heartbeat time.Duration // for logging raftStorage *raft.MemoryStorage storage Storage - heartbeat time.Duration // for logging // transport specifies the transport to send and receive msgs to members. // Sending messages MUST NOT block. It is okay to drop messages, since // clients should timeout and reissue their messages. // If transport is nil, server will panic. transport rafthttp.Transporter -} -func newRaftNode(cfg raftNodeConfig) *raftNode { - r := &raftNode{ - raftNodeConfig: cfg, - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * cfg.heartbeat), - readStateC: make(chan raft.ReadState, 1), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan apply), - stopped: make(chan struct{}), - done: make(chan struct{}), - } - if r.heartbeat == 0 { - r.ticker = &time.Ticker{} - } else { - r.ticker = time.NewTicker(r.heartbeat) - } - return r + stopped chan struct{} + done chan struct{} } // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { + r.applyc = make(chan apply) + r.stopped = make(chan struct{}) + r.done = make(chan struct{}) internalTimeout := time.Second go func() { @@ -160,12 +143,14 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { - case <-r.ticker.C: + case <-r.ticker: r.Tick() case rd := <-r.Ready(): if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead - if newLeader { + if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { + r.mu.Lock() + r.lt = time.Now() + r.mu.Unlock() leaderChanges.Inc() } @@ -177,8 +162,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { atomic.StoreUint64(&r.lead, rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader - rh.updateLeadership(newLeader) - r.td.Reset() + rh.updateLeadership() } if len(rd.ReadStates) != 0 { @@ -191,11 +175,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } - notifyc := make(chan struct{}, 1) + raftDone := make(chan struct{}, 1) ap := apply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, - notifyc: notifyc, + raftDone: raftDone, } updateCommittedIndex(&ap, rh) @@ -211,7 +195,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { // For more details, check raft thesis 10.2.1 if islead { // gofail: var raftBeforeLeaderSend struct{} - r.transport.Send(r.processMessages(rd.Messages)) + r.sendMessages(rd.Messages) } // gofail: var raftBeforeSave struct{} @@ -228,9 +212,6 @@ func (r *raftNode) start(rh *raftReadyHandler) { if err := r.storage.SaveSnap(rd.Snapshot); err != nil { plog.Fatalf("raft save snapshot error: %v", err) } - // etcdserver now claim the snapshot has been persisted onto the disk - notifyc <- struct{}{} - // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) @@ -240,44 +221,10 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.raftStorage.Append(rd.Entries) if !islead { - // finish processing incoming messages before we signal raftdone chan - msgs := r.processMessages(rd.Messages) - - // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots - notifyc <- struct{}{} - - // Candidate or follower needs to wait for all pending configuration - // changes to be applied before sending messages. - // Otherwise we might incorrectly count votes (e.g. votes from removed members). - // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before apply-layer applies the config change. - // We simply wait for ALL pending entries to be applied for now. - // We might improve this later on if it causes unnecessary long blocking issues. - waitApply := false - for _, ent := range rd.CommittedEntries { - if ent.Type == raftpb.EntryConfChange { - waitApply = true - break - } - } - if waitApply { - // blocks until 'applyAll' calls 'applyWait.Trigger' - // to be in sync with scheduled config-change job - // (assume notifyc has cap of 1) - select { - case notifyc <- struct{}{}: - case <-r.stopped: - return - } - } - // gofail: var raftBeforeFollowerSend struct{} - r.transport.Send(msgs) - } else { - // leader already processed 'MsgSnap' and signaled - notifyc <- struct{}{} + r.sendMessages(rd.Messages) } - + raftDone <- struct{}{} r.Advance() case <-r.stopped: return @@ -299,7 +246,7 @@ func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { } } -func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { +func (r *raftNode) sendMessages(ms []raftpb.Message) { sentAppResp := false for i := len(ms) - 1; i >= 0; i-- { if r.isIDRemoved(ms[i].To) { @@ -335,13 +282,20 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { } } } - return ms + + r.transport.Send(ms) } func (r *raftNode) apply() chan apply { return r.applyc } +func (r *raftNode) leadElectedTime() time.Time { + r.mu.Lock() + defer r.mu.Unlock() + return r.lt +} + func (r *raftNode) stop() { r.stopped <- struct{}{} <-r.done @@ -349,7 +303,6 @@ func (r *raftNode) stop() { func (r *raftNode) onStop() { r.Stop() - r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { plog.Panicf("raft close storage error: %v", err) diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index 271c5e77313..98eb2cc7b29 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -23,6 +23,7 @@ import ( "net/http" "os" "path" + "path/filepath" "regexp" "sync" "sync/atomic" @@ -40,6 +41,7 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/contention" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -60,7 +62,7 @@ import ( ) const ( - DefaultSnapCount = 100000 + DefaultSnapCount = 10000 StoreClusterPrefix = "/0" StoreKeysPrefix = "/1" @@ -75,6 +77,7 @@ const ( // (since it will timeout). monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second + databaseFilename = "db" // max number of in-flight snapshot messages etcdserver allows to have // This number is more than enough for most clusters with 5 machines. maxInFlightMsgSnap = 16 @@ -82,8 +85,7 @@ const ( releaseDelayAfterSnapshot = 30 * time.Second // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 - recommendedMaxRequestBytes = 10 * 1024 * 1024 + maxPendingRevokes = 16 ) var ( @@ -133,15 +135,15 @@ type Server interface { // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) + AddMember(ctx context.Context, memb membership.Member) error // RemoveMember attempts to remove a member from the cluster. It will // return ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) + RemoveMember(ctx context.Context, id uint64) error // UpdateMember attempts to update an existing member in the cluster. It will // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) + UpdateMember(ctx context.Context, updateMemb membership.Member) error // ClusterVersion is the cluster-wide minimum major.minor version. // Cluster version is set to the min version that an etcd member is @@ -199,8 +201,7 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store - snapshotter *snap.Snapshotter + store store.Store applyV2 ApplierV2 @@ -220,7 +221,7 @@ type EtcdServer struct { stats *stats.ServerStats lstats *stats.LeaderStats - SyncTicker *time.Ticker + SyncTicker <-chan time.Time // compactor is used to auto-compact the KV. compactor *compactor.Periodic @@ -237,14 +238,6 @@ type EtcdServer struct { // wg is used to wait for the go routines that depends on the server state // to exit when stopping the server. wg sync.WaitGroup - - // ctx is used for etcd-initiated requests that may need to be canceled - // on etcd server shutdown. - ctx context.Context - cancel context.CancelFunc - - leadTimeMu sync.RWMutex - leadElectedTime time.Time } // NewServer creates a new EtcdServer from the supplied configuration. The @@ -260,10 +253,6 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) - if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) - } - if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } @@ -275,9 +264,23 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { } ss := snap.New(cfg.SnapDir()) - bepath := cfg.backendPath() + bepath := filepath.Join(cfg.SnapDir(), databaseFilename) beExist := fileutil.Exist(bepath) - be := openBackend(cfg) + + var be backend.Backend + beOpened := make(chan struct{}) + go func() { + be = backend.NewDefaultBackend(bepath) + beOpened <- struct{}{} + }() + + select { + case <-beOpened: + case <-time.After(time.Second): + plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") + plog.Warningf("waiting for it to exit before starting...") + <-beOpened + } defer func() { if err != nil { @@ -375,9 +378,6 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Panicf("recovered store from snapshot error: %v", err) } plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) - if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { - plog.Panicf("recovering backend from snapshot error: %v", err) - } } cfg.Print() if !cfg.ForceNewCluster { @@ -400,32 +400,39 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, fmt.Errorf("cannot access member directory: %v", terr) } - sstats := stats.NewServerStats(cfg.Name, id.String()) + sstats := &stats.ServerStats{ + Name: cfg.Name, + ID: id.String(), + } + sstats.Initialize() lstats := stats.NewLeaderStats(id.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - snapshotter: ss, - r: *newRaftNode( - raftNodeConfig{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - }, - ), + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + r: raftNode{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + ticker: time.Tick(heartbeat), + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * heartbeat), + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + readStateC: make(chan raft.ReadState, 1), + }, id: id, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: cl, stats: sstats, lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), + SyncTicker: time.Tick(500 * time.Millisecond), peerRt: prt, reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), forceVersionC: make(chan struct{}), @@ -451,26 +458,12 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } - newSrv := srv // since srv == nil in defer if srv is returned as nil - defer func() { - // closing backend without first closing kv can cause - // resumed compactions to fail with closed tx errors - if err != nil { - newSrv.kv.Close() - } - }() - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) - tp, err := auth.NewTokenProvider(cfg.AuthToken, + + srv.authStore = auth.NewAuthStore(srv.be, func(index uint64) <-chan struct{} { return srv.applyWait.Wait(index) - }, - ) - if err != nil { - plog.Errorf("failed to create token provider: %s", err) - return nil, err - } - srv.authStore = auth.NewAuthStore(srv.be, tp) + }) if h := cfg.AutoCompactionRetention; h != 0 { srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) srv.compactor.Run() @@ -538,7 +531,6 @@ func (s *EtcdServer) start() { s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) - s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() if s.ClusterVersion() != nil { @@ -611,19 +603,16 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { - updateLeadership func(newLeader bool) + updateLeadership func() updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { - sn, err := s.r.raftStorage.Snapshot() + snap, err := s.r.raftStorage.Snapshot() if err != nil { plog.Panicf("get snapshot from raft storage error: %v", err) } - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() - var ( smu sync.RWMutex syncC <-chan time.Time @@ -640,7 +629,7 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ - updateLeadership: func(newLeader bool) { + updateLeadership: func() { if !s.isLeader() { if s.lessor != nil { s.lessor.Demote() @@ -650,13 +639,7 @@ func (s *EtcdServer) run() { } setSyncC(nil) } else { - if newLeader { - t := time.Now() - s.leadTimeMu.Lock() - s.leadElectedTime = t - s.leadTimeMu.Unlock() - } - setSyncC(s.SyncTicker.C) + setSyncC(s.SyncTicker) if s.compactor != nil { s.compactor.Resume() } @@ -667,6 +650,9 @@ func (s *EtcdServer) run() { if s.stats != nil { s.stats.BecomeLeader() } + if s.r.td != nil { + s.r.td.Reset() + } }, updateCommittedIndex: func(ci uint64) { cci := s.getCommittedIndex() @@ -677,26 +663,25 @@ func (s *EtcdServer) run() { } s.r.start(rh) + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() ep := etcdProgress{ - confState: sn.Metadata.ConfState, - snapi: sn.Metadata.Index, - appliedt: sn.Metadata.Term, - appliedi: sn.Metadata.Index, + confState: snap.Metadata.ConfState, + snapi: snap.Metadata.Index, + appliedt: snap.Metadata.Term, + appliedi: snap.Metadata.Index, } defer func() { s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping close(s.stopping) s.wgMu.Unlock() - s.cancel() sched.Stop() // wait for gouroutines before closing raft so wal stays open s.wg.Wait() - s.SyncTicker.Stop() - // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines // by adding a peer after raft stops the transport s.r.stop() @@ -743,8 +728,7 @@ func (s *EtcdServer) run() { } lid := lease.ID s.goAttach(func() { - s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) - leaseExpired.Inc() + s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)}) <-c }) } @@ -778,7 +762,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. - <-apply.notifyc + <-apply.raftDone s.triggerSnapshot(ep) select { @@ -803,19 +787,23 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { apply.snapshot.Metadata.Index, ep.appliedi) } - // wait for raftNode to persist snapshot onto the disk - <-apply.notifyc - - newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) + snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) if err != nil { - plog.Panic(err) + plog.Panicf("get database snapshot file path error: %v", err) } + fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) + if err := os.Rename(snapfn, fn); err != nil { + plog.Panicf("rename snapshot file error: %v", err) + } + + newbe := backend.NewDefaultBackend(fn) + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { plog.Info("recovering lessor...") - s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) + s.lessor.Recover(newbe, s.kv) plog.Info("finished recovering lessor") } @@ -967,7 +955,7 @@ func (s *EtcdServer) TransferLeadership() error { } tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(s.ctx, tm) + ctx, cancel := context.WithTimeout(context.TODO(), tm) err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) cancel() return err @@ -1027,7 +1015,7 @@ func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { - // In the context of ordinary etcd process, s.authStore will never be nil. + // In the context of ordinal etcd process, s.authStore will never be nil. // This branch is for handling cases in server_test.go return nil } @@ -1038,7 +1026,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthInfoFromCtx(ctx) + authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) if err != nil { return err } @@ -1046,27 +1034,27 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err return s.AuthStore().IsAdminPermitted(authInfo) } -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err + return err } if s.Cfg.StrictReconfigCheck { // by default StrictReconfigCheck is enabled; reject new members if unhealthy if !s.cluster.IsReadyToAddNewMember() { plog.Warningf("not enough started members, rejecting member add %+v", memb) - return nil, ErrNotEnoughStartedMembers + return ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return nil, ErrUnhealthy + return ErrUnhealthy } } // TODO: move Member to protobuf type b, err := json.Marshal(memb) if err != nil { - return nil, err + return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, @@ -1076,14 +1064,14 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]* return s.configure(ctx, cc) } -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err + return err } // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss if err := s.mayRemoveMember(types.ID(id)); err != nil { - return nil, err + return err } cc := raftpb.ConfChange{ @@ -1119,14 +1107,14 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { return nil } -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error { b, merr := json.Marshal(memb) if merr != nil { - return nil, merr + return merr } if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err + return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, @@ -1149,34 +1137,31 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } -type confChangeResponse struct { - membs []*membership.Member - err error -} - // configure sends a configuration change through consensus and // then waits for it to be applied to the server. It // will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) - return nil, err + return err } select { case x := <-ch: - if x == nil { - plog.Panicf("configure trigger value should never be nil") + if err, ok := x.(error); ok { + return err } - resp := x.(*confChangeResponse) - return resp.membs, resp.err + if x != nil { + plog.Panicf("return type should always be error") + } + return nil case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait - return nil, s.parseProposeCtxErr(ctx.Err(), start) + return s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return nil, ErrStopped + return ErrStopped } } @@ -1184,6 +1169,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me // This makes no guarantee that the request will be proposed or performed. // The request will be canceled after the given timeout. func (s *EtcdServer) sync(timeout time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) req := pb.Request{ Method: "SYNC", ID: s.reqIDGen.Next(), @@ -1192,7 +1178,6 @@ func (s *EtcdServer) sync(timeout time.Duration) { data := pbutil.MustMarshal(&req) // There is no promise that node has leader when do SYNC request, // so it uses goroutine to propose. - ctx, cancel := context.WithTimeout(s.ctx, timeout) s.goAttach(func() { s.r.Propose(ctx, data) cancel() @@ -1217,7 +1202,7 @@ func (s *EtcdServer) publish(timeout time.Duration) { } for { - ctx, cancel := context.WithTimeout(s.ctx, timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) _, err := s.Do(ctx, req) cancel() switch err { @@ -1277,7 +1262,7 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + s.w.Trigger(cc.ID, err) default: plog.Panicf("entry type should be either EntryNormal or EntryConfChange") } @@ -1362,7 +1347,8 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + r := pb.InternalRaftRequest{Alarm: a} + s.processInternalRaftRequest(context.TODO(), r) s.w.Trigger(id, ar) }) } @@ -1558,7 +1544,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) { Path: membership.StoreClusterVersionKey(), Val: ver, } - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() switch err { @@ -1577,9 +1563,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: - s.leadTimeMu.RLock() - curLeadElected := s.leadElectedTime - s.leadTimeMu.RUnlock() + curLeadElected := s.r.leadElectedTime() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 928aa95b6b1..9cfc852168b 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -60,14 +60,9 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { n, err := snapshot.WriteTo(pw) if err == nil { plog.Infof("wrote database snapshot out [total bytes: %d]", n) - } else { - plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) } pw.CloseWithError(err) - err = snapshot.Close() - if err != nil { - plog.Panicf("failed to close database snapshot: %v", err) - } + snapshot.Close() }() return pr } diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go index 8f6a54ff751..1bed85474e3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -24,30 +24,25 @@ import ( // LeaderStats is used by the leader in an etcd cluster, and encapsulates // statistics about communication with its followers type LeaderStats struct { - leaderStats - sync.Mutex -} - -type leaderStats struct { // Leader is the ID of the leader in the etcd cluster. // TODO(jonboulle): clarify that these are IDs, not names Leader string `json:"leader"` Followers map[string]*FollowerStats `json:"followers"` + + sync.Mutex } // NewLeaderStats generates a new LeaderStats with the given id as leader func NewLeaderStats(id string) *LeaderStats { return &LeaderStats{ - leaderStats: leaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), - }, + Leader: id, + Followers: make(map[string]*FollowerStats), } } func (ls *LeaderStats) JSON() []byte { ls.Lock() - stats := ls.leaderStats + stats := *ls ls.Unlock() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go index 0278e885cf9..cd450e2d199 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go @@ -26,26 +26,6 @@ import ( // ServerStats encapsulates various statistics about an EtcdServer and its // communication with other members of the cluster type ServerStats struct { - serverStats - sync.Mutex -} - -func NewServerStats(name, id string) *ServerStats { - ss := &ServerStats{ - serverStats: serverStats{ - Name: name, - ID: id, - }, - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{back: -1} - ss.recvRateQueue = &statsQueue{back: -1} - return ss -} - -type serverStats struct { Name string `json:"name"` // ID is the raft ID of the node. // TODO(jonboulle): use ID instead of name? @@ -69,15 +49,17 @@ type serverStats struct { sendRateQueue *statsQueue recvRateQueue *statsQueue + + sync.Mutex } func (ss *ServerStats) JSON() []byte { ss.Lock() - stats := ss.serverStats + stats := *ss ss.Unlock() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? if err != nil { @@ -86,6 +68,32 @@ func (ss *ServerStats) JSON() []byte { return b } +// Initialize clears the statistics of ServerStats and resets its start time +func (ss *ServerStats) Initialize() { + if ss == nil { + return + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{ + back: -1, + } + ss.recvRateQueue = &statsQueue{ + back: -1, + } +} + +// RecvRates calculates and returns the rate of received append requests +func (ss *ServerStats) RecvRates() (float64, float64) { + return ss.recvRateQueue.Rate() +} + +// SendRates calculates and returns the rate of sent append requests +func (ss *ServerStats) SendRates() (float64, float64) { + return ss.sendRateQueue.Rate() +} + // RecvAppendReq updates the ServerStats in response to an AppendRequest // from the given leader being received func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go index aa8f87569db..693618fbd51 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -32,6 +32,9 @@ type Storage interface { Save(st raftpb.HardState, ents []raftpb.Entry) error // SaveSnap function saves snapshot to the underlying stable storage. SaveSnap(snap raftpb.Snapshot) error + // DBFilePath returns the file path of database snapshot saved with given + // id. + DBFilePath(id uint64) (string, error) // Close closes the Storage and performs finalization. Close() error } diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go index e3896ffc2d3..66084ae1244 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -87,7 +87,7 @@ type notifier struct { func newNotifier() *notifier { return ¬ifier{ - c: make(chan struct{}), + c: make(chan struct{}, 0), } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index ae449bbf22f..60653cb6dff 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -19,8 +19,6 @@ import ( "encoding/binary" "time" - "github.com/gogo/protobuf/proto" - "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" @@ -29,10 +27,17 @@ import ( "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/raft" + "github.com/coreos/go-semver/semver" "golang.org/x/net/context" ) const ( + // the max request size that raft accepts. + // TODO: make this a flag? But we probably do not want to + // accept large request which might block raft stream. User + // specify a large value might end up with shooting in the foot. + maxRequestBytes = 1.5 * 1024 * 1024 + // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -40,6 +45,10 @@ const ( maxGapBetweenApplyAndCommitIndex = 5000 ) +var ( + newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0")) +) + type RaftKV interface { Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) @@ -82,6 +91,11 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + // TODO: remove this checking when we release etcd 3.2 + if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { + return s.legacyRange(ctx, r) + } + if !r.Serializable { err := s.linearizableReadNotify(ctx) if err != nil { @@ -93,30 +107,65 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe chk := func(ai *auth.AuthInfo) error { return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(nil, r) } + get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } -func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) +// TODO: remove this func when we release etcd 3.2 +func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if r.Serializable { + var resp *pb.RangeResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) + } + get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err + } + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r}) if err != nil { return nil, err } - return resp.(*pb.PutResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.RangeResponse), nil +} + +func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.PutResponse), nil } func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) if err != nil { return nil, err } - return resp.(*pb.DeleteRangeResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.DeleteRangeResponse), nil } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + // TODO: remove this checking when we release etcd 3.2 + if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { + return s.legacyTxn(ctx, r) + } + if isTxnReadonly(r) { if !isTxnSerializable(r) { err := s.linearizableReadNotify(ctx) @@ -135,11 +184,38 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } return resp, err } - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err } - return resp.(*pb.TxnResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.TxnResponse), nil +} + +// TODO: remove this func when we release etcd 3.2 +func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if isTxnSerializable(r) { + var resp *pb.TxnResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return checkTxnAuth(s.authStore, ai, r) + } + get := func() { resp, err = s.applyV3Base.Txn(r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err + } + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.TxnResponse), nil } func isTxnSerializable(r *pb.TxnRequest) bool { @@ -204,19 +280,25 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (* // only use positive int64 id's r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) } - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) if err != nil { return nil, err } - return resp.(*pb.LeaseGrantResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.LeaseGrantResponse), nil } func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) if err != nil { return nil, err } - return resp.(*pb.LeaseRevokeResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.LeaseRevokeResponse), nil } func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { @@ -312,45 +394,54 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) } func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) if err != nil { return nil, err } - return resp.(*pb.AlarmResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AlarmResponse), nil } func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) if err != nil { return nil, err } - return resp.(*pb.AuthEnableResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthEnableResponse), nil } func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) if err != nil { return nil, err } - return resp.(*pb.AuthDisableResponse), nil + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthDisableResponse), nil } func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - if err := s.linearizableReadNotify(ctx); err != nil { + var result *applyResult + + err := s.linearizableReadNotify(ctx) + if err != nil { return nil, err } - var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - if err != auth.ErrAuthNotEnabled { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) - } + plog.Errorf("invalid authentication request to user %s was issued", r.Name) return nil, err } - st, err := s.AuthStore().GenTokenPrefix() + st, err := s.AuthStore().GenSimpleToken() if err != nil { return nil, err } @@ -361,147 +452,172 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest SimpleToken: st, } - resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) if err != nil { return nil, err } - if checkedRevision == s.AuthStore().Revision() { - break + if result.err != nil { + return nil, result.err } - plog.Infof("revision when password checked is obsolete, retrying") + + if checkedRevision != s.AuthStore().Revision() { + plog.Infof("revision when password checked is obsolete, retrying") + continue + } + + break } - return resp.(*pb.AuthenticateResponse), nil + return result.resp.(*pb.AuthenticateResponse), nil } func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserAddResponse), nil -} - -func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserDeleteResponse), nil -} - -func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserChangePasswordResponse), nil -} - -func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGrantRoleResponse), nil -} - -func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGetResponse), nil -} - -func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserListResponse), nil -} - -func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserRevokeRoleResponse), nil -} - -func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleAddResponse), nil -} - -func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGrantPermissionResponse), nil -} - -func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGetResponse), nil -} - -func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleListResponse), nil -} - -func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleRevokePermissionResponse), nil -} - -func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleDeleteResponse), nil -} - -func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - result, err := s.processInternalRaftRequestOnce(ctx, r) + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) if err != nil { return nil, err } if result.err != nil { return nil, result.err } - return result.resp, nil + return result.resp.(*pb.AuthUserAddResponse), nil } -func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - for { - resp, err := s.raftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - return resp, err - } +func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + if err != nil { + return nil, err } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserDeleteResponse), nil +} + +func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserChangePasswordResponse), nil +} + +func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserGrantRoleResponse), nil +} + +func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserGetResponse), nil +} + +func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserListResponse), nil +} + +func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthUserRevokeRoleResponse), nil +} + +func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleAddResponse), nil +} + +func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil +} + +func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleGetResponse), nil +} + +func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleListResponse), nil +} + +func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil +} + +func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp.(*pb.AuthRoleDeleteResponse), nil } // doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { for { - ai, err := s.AuthInfoFromCtx(ctx) + ai, err := s.AuthStore().AuthInfoFromCtx(ctx) if err != nil { return err } @@ -536,7 +652,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ID: s.reqIDGen.Next(), } - authInfo, err := s.AuthInfoFromCtx(ctx) + authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) if err != nil { return nil, err } @@ -550,7 +666,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > int(s.Cfg.MaxRequestBytes) { + if len(data) > maxRequestBytes { return nil, ErrRequestTooLarge } @@ -580,6 +696,19 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } } +func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { + var result *applyResult + var err error + for { + result, err = s.processInternalRaftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + break + } + } + + return result, err +} + // Watchable returns a watchable interface attached to the etcdserver. func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } @@ -673,14 +802,3 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { return ErrStopped } } - -func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { - if s.Cfg.ClientCertAuthEnabled { - authInfo := s.AuthStore().AuthInfoFromTLS(ctx) - if authInfo != nil { - return authInfo, nil - } - } - - return s.AuthStore().AuthInfoFromCtx(ctx) -} diff --git a/vendor/github.com/coreos/etcd/integration/BUILD b/vendor/github.com/coreos/etcd/integration/BUILD index cd730e4a52a..c6a3e2ae69b 100644 --- a/vendor/github.com/coreos/etcd/integration/BUILD +++ b/vendor/github.com/coreos/etcd/integration/BUILD @@ -13,15 +13,9 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/embed:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", @@ -31,7 +25,6 @@ go_library( "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/keepalive:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/integration/bridge.go b/vendor/github.com/coreos/etcd/integration/bridge.go index 59cebe1f0e0..b9e67318e52 100644 --- a/vendor/github.com/coreos/etcd/integration/bridge.go +++ b/vendor/github.com/coreos/etcd/integration/bridge.go @@ -17,7 +17,6 @@ package integration import ( "fmt" "io" - "io/ioutil" "net" "sync" @@ -32,10 +31,9 @@ type bridge struct { l net.Listener conns map[*bridgeConn]struct{} - stopc chan struct{} - pausec chan struct{} - blackholec chan struct{} - wg sync.WaitGroup + stopc chan struct{} + pausec chan struct{} + wg sync.WaitGroup mu sync.Mutex } @@ -43,12 +41,11 @@ type bridge struct { func newBridge(addr string) (*bridge, error) { b := &bridge{ // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), - blackholec: make(chan struct{}), + inaddr: addr + "0", + outaddr: addr, + conns: make(map[*bridgeConn]struct{}), + stopc: make(chan struct{}), + pausec: make(chan struct{}), } close(b.pausec) @@ -155,12 +152,12 @@ func (b *bridge) serveConn(bc *bridgeConn) { var wg sync.WaitGroup wg.Add(2) go func() { - b.ioCopy(bc, bc.out, bc.in) + io.Copy(bc.out, bc.in) bc.close() wg.Done() }() go func() { - b.ioCopy(bc, bc.in, bc.out) + io.Copy(bc.in, bc.out) bc.close() wg.Done() }() @@ -182,47 +179,3 @@ func (bc *bridgeConn) close() { bc.in.Close() bc.out.Close() } - -func (b *bridge) Blackhole() { - b.mu.Lock() - close(b.blackholec) - b.mu.Unlock() -} - -func (b *bridge) Unblackhole() { - b.mu.Lock() - for bc := range b.conns { - bc.Close() - } - b.conns = make(map[*bridgeConn]struct{}) - b.blackholec = make(chan struct{}) - b.mu.Unlock() -} - -// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer -func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error) { - buf := make([]byte, 32*1024) - for { - select { - case <-b.blackholec: - io.Copy(ioutil.Discard, src) - return nil - default: - } - nr, er := src.Read(buf) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if ew != nil { - return ew - } - if nr != nw { - return io.ErrShortWrite - } - } - if er != nil { - err = er - break - } - } - return -} diff --git a/vendor/github.com/coreos/etcd/integration/cluster.go b/vendor/github.com/coreos/etcd/integration/cluster.go index a8fa542b250..4989e1f62fa 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster.go +++ b/vendor/github.com/coreos/etcd/integration/cluster.go @@ -31,28 +31,21 @@ import ( "testing" "time" + "golang.org/x/net/context" + "google.golang.org/grpc" + "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" + "github.com/coreos/etcd/etcdserver/api" "github.com/coreos/etcd/etcdserver/api/v2http" - "github.com/coreos/etcd/etcdserver/api/v3client" - "github.com/coreos/etcd/etcdserver/api/v3election" - epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - "github.com/coreos/etcd/etcdserver/api/v3lock" - lockpb "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" "github.com/coreos/etcd/etcdserver/api/v3rpc" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" ) const ( @@ -79,29 +72,16 @@ var ( ClientCertAuth: true, } - testTLSInfoExpired = transport.TLSInfo{ - KeyFile: "./fixtures-expired/server-key.pem", - CertFile: "./fixtures-expired/server.pem", - TrustedCAFile: "./fixtures-expired/etcd-root-ca.pem", - ClientCertAuth: true, - } - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration") ) type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - DiscoveryURL string - UseGRPC bool - QuotaBackendBytes int64 - MaxRequestBytes uint - GRPCKeepAliveMinTime time.Duration - GRPCKeepAliveInterval time.Duration - GRPCKeepAliveTimeout time.Duration - // SkipCreatingClient to skip creating clients for each member. - SkipCreatingClient bool + Size int + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + DiscoveryURL string + UseGRPC bool + QuotaBackendBytes int64 } type cluster struct { @@ -109,6 +89,11 @@ type cluster struct { Members []*member } +func init() { + // manually enable v3 capability since we know the cluster members all support v3. + api.EnableCapability(api.V3rpcCapability) +} + func schemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return UrlScheme @@ -190,12 +175,8 @@ func (c *cluster) URL(i int) string { // URLs returns a list of all active client URLs in the cluster func (c *cluster) URLs() []string { - return getMembersURLs(c.Members) -} - -func getMembersURLs(members []*member) []string { urls := make([]string, 0) - for _, m := range members { + for _, m := range c.Members { select { case <-m.s.StopNotify(): continue @@ -229,14 +210,10 @@ func (c *cluster) HTTPMembers() []client.Member { func (c *cluster) mustNewMember(t *testing.T) *member { m := mustNewMember(t, memberConfig{ - name: c.name(rand.Int()), - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, - maxRequestBytes: c.cfg.MaxRequestBytes, - grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, - grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, - grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, + name: c.name(rand.Int()), + peerTLS: c.cfg.PeerTLS, + clientTLS: c.cfg.ClientTLS, + quotaBackendBytes: c.cfg.QuotaBackendBytes, }) m.DiscoveryURL = c.cfg.DiscoveryURL if c.cfg.UseGRPC { @@ -335,15 +312,9 @@ func (c *cluster) removeMember(t *testing.T, id uint64) error { } func (c *cluster) Terminate(t *testing.T) { - var wg sync.WaitGroup - wg.Add(len(c.Members)) for _, m := range c.Members { - go func(mm *member) { - defer wg.Done() - mm.Terminate(t) - }(m) + m.Terminate(t) } - wg.Wait() } func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { @@ -360,6 +331,7 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { time.Sleep(tickDuration) } } + return } func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) } @@ -371,18 +343,6 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int { for _, m := range membs { possibleLead[uint64(m.s.ID())] = true } - cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) - kapi := client.NewKeysAPI(cc) - - // ensure leader is up via linearizable get - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration) - _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) - cancel() - if err == nil || strings.Contains(err.Error(), "Key not found") { - break - } - } for lead == 0 || !possibleLead[lead] { lead = 0 @@ -486,13 +446,9 @@ type member struct { s *etcdserver.EtcdServer hss []*httptest.Server - grpcServerOpts []grpc.ServerOption - grpcServer *grpc.Server - grpcAddr string - grpcBridge *bridge - - // serverClient is a clientv3 that directly calls the etcdserver. - serverClient *clientv3.Client + grpcServer *grpc.Server + grpcAddr string + grpcBridge *bridge keepDataDirTerminate bool } @@ -500,14 +456,10 @@ type member struct { func (m *member) GRPCAddr() string { return m.grpcAddr } type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - quotaBackendBytes int64 - maxRequestBytes uint - grpcKeepAliveMinTime time.Duration - grpcKeepAliveInterval time.Duration - grpcKeepAliveTimeout time.Duration + name string + peerTLS *transport.TLSInfo + clientTLS *transport.TLSInfo + quotaBackendBytes int64 } // mustNewMember return an inited member with the given name. If peerTLS is @@ -555,26 +507,6 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member { m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.quotaBackendBytes - m.MaxRequestBytes = mcfg.maxRequestBytes - if m.MaxRequestBytes == 0 { - m.MaxRequestBytes = embed.DefaultMaxRequestBytes - } - m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough - - m.grpcServerOpts = []grpc.ServerOption{} - if mcfg.grpcKeepAliveMinTime > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: mcfg.grpcKeepAliveMinTime, - PermitWithoutStream: false, - })) - } - if mcfg.grpcKeepAliveInterval > time.Duration(0) && - mcfg.grpcKeepAliveTimeout > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: mcfg.grpcKeepAliveInterval, - Timeout: mcfg.grpcKeepAliveTimeout, - })) - } return m } @@ -591,7 +523,7 @@ func (m *member) listenGRPC() error { l.Close() return err } - m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr + m.grpcAddr = m.grpcBridge.URL() m.grpcListener = l return nil } @@ -603,8 +535,6 @@ func (m *member) electionTimeout() time.Duration { func (m *member) DropConnections() { m.grpcBridge.Reset() } func (m *member) PauseConnections() { m.grpcBridge.Pause() } func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } -func (m *member) Blackhole() { m.grpcBridge.Blackhole() } -func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *member) (*clientv3.Client, error) { @@ -667,10 +597,10 @@ func (m *member) Launch() error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.s.SyncTicker = time.Tick(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ @@ -714,10 +644,7 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...) - m.serverClient = v3client.New(m.s) - lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) - epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) + m.grpcServer = v3rpc.Server(m.s, tlscfg) go m.grpcServer.Serve(m.grpcListener) } @@ -761,12 +688,8 @@ func (m *member) Close() { m.grpcBridge.Close() m.grpcBridge = nil } - if m.serverClient != nil { - m.serverClient.Close() - m.serverClient = nil - } if m.grpcServer != nil { - m.grpcServer.GracefulStop() + m.grpcServer.Stop() m.grpcServer = nil } m.s.HardStop() @@ -862,7 +785,7 @@ func (m *member) Metric(metricName string) (string, error) { } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t *testing.T, others ...*member) { +func (m *member) InjectPartition(t *testing.T, others []*member) { for _, other := range others { m.s.CutPeer(other.s.ID()) other.s.CutPeer(m.s.ID()) @@ -870,7 +793,7 @@ func (m *member) InjectPartition(t *testing.T, others ...*member) { } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t *testing.T, others ...*member) { +func (m *member) RecoverPartition(t *testing.T, others []*member) { for _, other := range others { m.s.MendPeer(other.s.ID()) other.s.MendPeer(m.s.ID()) @@ -922,15 +845,12 @@ func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 { cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) - - if !cfg.SkipCreatingClient { - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - clus.clients = append(clus.clients, client) + for _, m := range clus.Members { + client, err := NewClientV3(m) + if err != nil { + t.Fatalf("cannot create client: %v", err) } + clus.clients = append(clus.clients, client) } return clus @@ -977,8 +897,4 @@ type grpcAPI struct { Maintenance pb.MaintenanceClient // Auth is the authentication API for the client's connection. Auth pb.AuthClient - // Lock is the lock API for the client's connection. - Lock lockpb.LockClient - // Election is the election API for the client's connection. - Election epb.ElectionClient } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_direct.go b/vendor/github.com/coreos/etcd/integration/cluster_direct.go index ff97e6146ed..84b2a796cc0 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_direct.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_direct.go @@ -18,8 +18,6 @@ package integration import ( "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) @@ -31,8 +29,6 @@ func toGRPC(c *clientv3.Client) grpcAPI { pb.NewWatchClient(c.ActiveConnection()), pb.NewMaintenanceClient(c.ActiveConnection()), pb.NewAuthClient(c.ActiveConnection()), - v3lockpb.NewLockClient(c.ActiveConnection()), - v3electionpb.NewElectionClient(c.ActiveConnection()), } } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go index 3916553be86..75319218ec6 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go @@ -20,10 +20,8 @@ import ( "sync" "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/namespace" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/proxy/grpcproxy" - "github.com/coreos/etcd/proxy/grpcproxy/adapter" ) var ( @@ -31,13 +29,10 @@ var ( proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy) ) -const proxyNamespace = "proxy-namespace" - type grpcClientProxy struct { grpc grpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} - lpdonec <-chan struct{} } func toGRPC(c *clientv3.Client) grpcAPI { @@ -48,30 +43,17 @@ func toGRPC(c *clientv3.Client) grpcAPI { return v.grpc } - // test namespacing proxy - c.KV = namespace.NewKV(c.KV, proxyNamespace) - c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace) - c.Lease = namespace.NewLease(c.Lease, proxyNamespace) - // test coalescing/caching proxy - kvp, kvpch := grpcproxy.NewKvProxy(c) wp, wpch := grpcproxy.NewWatchProxy(c) - lp, lpch := grpcproxy.NewLeaseProxy(c) - mp := grpcproxy.NewMaintenanceProxy(c) - clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs - lockp := grpcproxy.NewLockProxy(c) - electp := grpcproxy.NewElectionProxy(c) - + kvp, kvpch := grpcproxy.NewKvProxy(c) grpc := grpcAPI{ - adapter.ClusterServerToClusterClient(clp), - adapter.KvServerToKvClient(kvp), - adapter.LeaseServerToLeaseClient(lp), - adapter.WatchServerToWatchClient(wp), - adapter.MaintenanceServerToMaintenanceClient(mp), + pb.NewClusterClient(c.ActiveConnection()), + grpcproxy.KvServerToKvClient(kvp), + pb.NewLeaseClient(c.ActiveConnection()), + grpcproxy.WatchServerToWatchClient(wp), + pb.NewMaintenanceClient(c.ActiveConnection()), pb.NewAuthClient(c.ActiveConnection()), - adapter.LockServerToLockClient(lockp), - adapter.ElectionServerToElectionClient(electp), } - proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch} + proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch} return grpc } @@ -79,17 +61,13 @@ type proxyCloser struct { clientv3.Watcher wdonec <-chan struct{} kvdonec <-chan struct{} - lclose func() - lpdonec <-chan struct{} } func (pc *proxyCloser) Close() error { - // client ctx is canceled before calling close, so kv and lp will close out + // client ctx is canceled before calling close, so kv will close out <-pc.kvdonec err := pc.Watcher.Close() <-pc.wdonec - pc.lclose() - <-pc.lpdonec return err } @@ -101,14 +79,10 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { rpc := toGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV) pmu.Lock() - lc := c.Lease - c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, cfg.DialTimeout) c.Watcher = &proxyCloser{ Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), wdonec: proxies[c].wdonec, kvdonec: proxies[c].kvdonec, - lclose: func() { lc.Close() }, - lpdonec: proxies[c].lpdonec, } pmu.Unlock() return c, nil diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD index 1385cb46bf8..7be8ef45cc9 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD @@ -13,6 +13,7 @@ go_library( "//vendor/github.com/coreos/etcd/lease:go_default_library", "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go index c3175cbbb0f..256051efc8d 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go @@ -16,7 +16,6 @@ package leasehttp import ( "bytes" - "context" "errors" "fmt" "io/ioutil" @@ -27,6 +26,7 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease/leasepb" "github.com/coreos/etcd/pkg/httputil" + "golang.org/x/net/context" ) var ( @@ -202,27 +202,45 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string } req.Header.Set("Content-Type", "application/protobuf") - req = req.WithContext(ctx) + cancel := httputil.RequestCanceler(req) cc := &http.Client{Transport: rt} var b []byte // buffer errc channel so that errc don't block inside the go routinue - resp, err := cc.Do(req) - if err != nil { - return nil, err - } - b, err = readResponse(resp) - if err != nil { - return nil, err - } - if resp.StatusCode == http.StatusRequestTimeout { - return nil, ErrLeaseHTTPTimeout - } - if resp.StatusCode == http.StatusNotFound { - return nil, lease.ErrLeaseNotFound - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) + errc := make(chan error, 2) + go func() { + resp, err := cc.Do(req) + if err != nil { + errc <- err + return + } + b, err = readResponse(resp) + if err != nil { + errc <- err + return + } + if resp.StatusCode == http.StatusRequestTimeout { + errc <- ErrLeaseHTTPTimeout + return + } + if resp.StatusCode == http.StatusNotFound { + errc <- lease.ErrLeaseNotFound + return + } + if resp.StatusCode != http.StatusOK { + errc <- fmt.Errorf("lease: unknown error(%s)", string(b)) + return + } + errc <- nil + }() + select { + case derr := <-errc: + if derr != nil { + return nil, derr + } + case <-ctx.Done(): + cancel() + return nil, ctx.Err() } lresp := &leasepb.LeaseInternalResponse{} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go index ec8db732be5..fb3a9bab0c3 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go @@ -590,7 +590,7 @@ func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } var fileDescriptorLease = []byte{ // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go index 3418cf565ed..385bd76d73c 100644 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ b/vendor/github.com/coreos/etcd/lease/lessor.go @@ -31,39 +31,40 @@ import ( const ( // NoLease is a special LeaseID representing the absence of a lease. NoLease = LeaseID(0) - - forever = monotime.Time(math.MaxInt64) ) var ( leaseBucketName = []byte("lease") - // maximum number of leases to revoke per second; configurable for tests - leaseRevokeRate = 1000 + forever = monotime.Time(math.MaxInt64) ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") ErrLeaseExists = errors.New("lease already exists") ) -// TxnDelete is a TxnWrite that only permits deletes. Defined here -// to avoid circular dependency with mvcc. -type TxnDelete interface { - DeleteRange(key, end []byte) (n, rev int64) - End() -} - -// RangeDeleter is a TxnDelete constructor. -type RangeDeleter func() TxnDelete - type LeaseID int64 +// RangeDeleter defines an interface with Txn and DeleteRange method. +// We define this interface only for lessor to limit the number +// of methods of mvcc.KV to what lessor actually needs. +// +// Having a minimum interface makes testing easy. +type RangeDeleter interface { + // TxnBegin see comments on mvcc.KV + TxnBegin() int64 + // TxnEnd see comments on mvcc.KV + TxnEnd(txnID int64) error + // TxnDeleteRange see comments on mvcc.KV + TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) +} + // Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. type Lessor interface { - // SetRangeDeleter lets the lessor create TxnDeletes to the store. - // Lessor deletes the items in the revoked or expired lease by creating - // new TxnDeletes. - SetRangeDeleter(rd RangeDeleter) + // SetRangeDeleter sets the RangeDeleter to the Lessor. + // Lessor deletes the items in the revoked or expired lease from the + // the set RangeDeleter. + SetRangeDeleter(dr RangeDeleter) // Grant grants a lease that expires at least after TTL seconds. Grant(id LeaseID, ttl int64) (*Lease, error) @@ -247,14 +248,17 @@ func (le *lessor) Revoke(id LeaseID) error { return nil } - txn := le.rd() + tid := le.rd.TxnBegin() // sort keys so deletes are in same order among all members, // otherwise the backened hashes will be different keys := l.Keys() sort.StringSlice(keys).Sort() for _, key := range keys { - txn.DeleteRange([]byte(key), nil) + _, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil) + if err != nil { + panic(err) + } } le.mu.Lock() @@ -265,7 +269,11 @@ func (le *lessor) Revoke(id LeaseID) error { // deleting the keys if etcdserver fails in between. le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) - txn.End() + err := le.rd.TxnEnd(tid) + if err != nil { + panic(err) + } + return nil } @@ -327,53 +335,8 @@ func (le *lessor) Promote(extend time.Duration) { for _, l := range le.leaseMap { l.refresh(extend) } - - if len(le.leaseMap) < leaseRevokeRate { - // no possibility of lease pile-up - return - } - - // adjust expiries in case of overlap - leases := make([]*Lease, 0, len(le.leaseMap)) - for _, l := range le.leaseMap { - leases = append(leases, l) - } - sort.Sort(leasesByExpiry(leases)) - - baseWindow := leases[0].Remaining() - nextWindow := baseWindow + time.Second - expires := 0 - // have fewer expires than the total revoke rate so piled up leases - // don't consume the entire revoke limit - targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 - for _, l := range leases { - remaining := l.Remaining() - if remaining > nextWindow { - baseWindow = remaining - nextWindow = baseWindow + time.Second - expires = 1 - continue - } - expires++ - if expires <= targetExpiresPerSecond { - continue - } - rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) - // If leases are extended by n seconds, leases n seconds ahead of the - // base window should be extended by only one second. - rateDelay -= float64(remaining - baseWindow) - delay := time.Duration(rateDelay) - nextWindow = baseWindow + delay - l.refresh(delay + extend) - } } -type leasesByExpiry []*Lease - -func (le leasesByExpiry) Len() int { return len(le) } -func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } -func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } - func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -470,10 +433,6 @@ func (le *lessor) runLoop() { le.mu.Unlock() if len(ls) != 0 { - // rate limit - if len(ls) > leaseRevokeRate/2 { - ls = ls[:leaseRevokeRate/2] - } select { case <-le.stopC: return diff --git a/vendor/github.com/coreos/etcd/mvcc/BUILD b/vendor/github.com/coreos/etcd/mvcc/BUILD index 21b837ec9ac..ab4fab1efde 100644 --- a/vendor/github.com/coreos/etcd/mvcc/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/BUILD @@ -7,16 +7,12 @@ go_library( "index.go", "key_index.go", "kv.go", - "kv_view.go", "kvstore.go", "kvstore_compaction.go", - "kvstore_txn.go", "metrics.go", - "metrics_txn.go", "revision.go", "util.go", "watchable_store.go", - "watchable_store_txn.go", "watcher.go", "watcher_group.go", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD index 73c025f8294..137f4aacc21 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD @@ -5,24 +5,19 @@ go_library( srcs = [ "backend.go", "batch_tx.go", - "config_default.go", + "boltoption_default.go", "doc.go", "metrics.go", - "read_tx.go", - "tx_buffer.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ - "config_linux.go", - ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "config_windows.go", + "boltoption_linux.go", ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/mvcc/backend", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/coreos/bbolt:go_default_library", + "//vendor/github.com/boltdb/bolt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go index 87edd25f427..e5e0028f94b 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - bolt "github.com/coreos/bbolt" + "github.com/boltdb/bolt" "github.com/coreos/pkg/capnslog" ) @@ -35,21 +35,25 @@ var ( defragLimit = 10000 - // initialMmapSize is the initial size of the mmapped region. Setting this larger than + // InitialMmapSize is the initial size of the mmapped region. Setting this larger than // the potential max db size can prevent writer from blocking reader. // This only works for linux. - initialMmapSize = uint64(10 * 1024 * 1024 * 1024) + InitialMmapSize = int64(10 * 1024 * 1024 * 1024) plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") +) - // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. - minSnapshotWarningTimeout = time.Duration(30 * time.Second) +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB ) type Backend interface { - ReadTx() ReadTx BatchTx() BatchTx - Snapshot() Snapshot Hash(ignores map[IgnoreKey]struct{}) (uint32, error) // Size returns the current size of the backend. @@ -82,71 +86,36 @@ type backend struct { batchInterval time.Duration batchLimit int - batchTx *batchTxBuffered - - readTx *readTx + batchTx *batchTx stopc chan struct{} donec chan struct{} } -type BackendConfig struct { - // Path is the file path to the backend file. - Path string - // BatchInterval is the maximum time before flushing the BatchTx. - BatchInterval time.Duration - // BatchLimit is the maximum puts before flushing the BatchTx. - BatchLimit int - // MmapSize is the number of bytes to mmap for the backend. - MmapSize uint64 -} - -func DefaultBackendConfig() BackendConfig { - return BackendConfig{ - BatchInterval: defaultBatchInterval, - BatchLimit: defaultBatchLimit, - MmapSize: initialMmapSize, - } -} - -func New(bcfg BackendConfig) Backend { - return newBackend(bcfg) +func New(path string, d time.Duration, limit int) Backend { + return newBackend(path, d, limit) } func NewDefaultBackend(path string) Backend { - bcfg := DefaultBackendConfig() - bcfg.Path = path - return newBackend(bcfg) + return newBackend(path, defaultBatchInterval, defaultBatchLimit) } -func newBackend(bcfg BackendConfig) *backend { - bopts := &bolt.Options{} - if boltOpenOptions != nil { - *bopts = *boltOpenOptions - } - bopts.InitialMmapSize = bcfg.mmapSize() - - db, err := bolt.Open(bcfg.Path, 0600, bopts) +func newBackend(path string, d time.Duration, limit int) *backend { + db, err := bolt.Open(path, 0600, boltOpenOptions) if err != nil { - plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) + plog.Panicf("cannot open database at %s (%v)", path, err) } - // In future, may want to make buffering optional for low-concurrency systems - // or dynamically swap between buffered/non-buffered depending on workload. b := &backend{ db: db, - batchInterval: bcfg.BatchInterval, - batchLimit: bcfg.BatchLimit, - - readTx: &readTx{buf: txReadBuffer{ - txBuffer: txBuffer{make(map[string]*bucketBuffer)}}, - }, + batchInterval: d, + batchLimit: limit, stopc: make(chan struct{}), donec: make(chan struct{}), } - b.batchTx = newBatchTxBuffered(b) + b.batchTx = newBatchTx(b) go b.run() return b } @@ -158,8 +127,6 @@ func (b *backend) BatchTx() BatchTx { return b.batchTx } -func (b *backend) ReadTx() ReadTx { return b.readTx } - // ForceCommit forces the current batching tx to commit. func (b *backend) ForceCommit() { b.batchTx.Commit() @@ -174,33 +141,7 @@ func (b *backend) Snapshot() Snapshot { if err != nil { plog.Fatalf("cannot begin tx (%s)", err) } - - stopc, donec := make(chan struct{}), make(chan struct{}) - dbBytes := tx.Size() - go func() { - defer close(donec) - // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection - // assuming a min tcp throughput of 100MB/s. - var sendRateBytes int64 = 100 * 1024 * 1014 - warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) - if warningTimeout < minSnapshotWarningTimeout { - warningTimeout = minSnapshotWarningTimeout - } - start := time.Now() - ticker := time.NewTicker(warningTimeout) - defer ticker.Stop() - for { - select { - case <-ticker.C: - plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) - case <-stopc: - snapshotDurations.Observe(time.Since(start).Seconds()) - return - } - } - }() - - return &snapshot{tx, stopc, donec} + return &snapshot{tx} } type IgnoreKey struct { @@ -294,11 +235,7 @@ func (b *backend) defrag() error { b.mu.Lock() defer b.mu.Unlock() - // block concurrent read requests while resetting tx - b.readTx.mu.Lock() - defer b.readTx.mu.Unlock() - - b.batchTx.unsafeCommit(true) + b.batchTx.commit(true) b.batchTx.tx = nil tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) @@ -339,10 +276,6 @@ func (b *backend) defrag() error { plog.Fatalf("cannot begin tx (%s)", err) } - b.readTx.buf.reset() - b.readTx.tx = b.unsafeBegin(false) - atomic.StoreInt64(&b.size, b.readTx.tx.Size()) - return nil } @@ -398,22 +331,6 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { return tmptx.Commit() } -func (b *backend) begin(write bool) *bolt.Tx { - b.mu.RLock() - tx := b.unsafeBegin(write) - b.mu.RUnlock() - atomic.StoreInt64(&b.size, tx.Size()) - return tx -} - -func (b *backend) unsafeBegin(write bool) *bolt.Tx { - tx, err := b.db.Begin(write) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - return tx -} - // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") @@ -421,9 +338,7 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin plog.Fatal(err) } tmpPath := filepath.Join(dir, "database") - bcfg := DefaultBackendConfig() - bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit - return newBackend(bcfg), tmpPath + return newBackend(tmpPath, batchInterval, batchLimit), tmpPath } func NewDefaultTmpBackend() (*backend, string) { @@ -432,12 +347,6 @@ func NewDefaultTmpBackend() (*backend, string) { type snapshot struct { *bolt.Tx - stopc chan struct{} - donec chan struct{} } -func (s *snapshot) Close() error { - close(s.stopc) - <-s.donec - return s.Tx.Rollback() -} +func (s *snapshot) Close() error { return s.Tx.Rollback() } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go index e5fb8474089..04fea1e9477 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -16,24 +16,23 @@ package backend import ( "bytes" - "fmt" - "math" "sync" "sync/atomic" "time" - bolt "github.com/coreos/bbolt" + "github.com/boltdb/bolt" ) type BatchTx interface { - ReadTx + Lock() + Unlock() UnsafeCreateBucket(name []byte) UnsafePut(bucketName []byte, key []byte, value []byte) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) UnsafeDelete(bucketName []byte, key []byte) - // Commit commits a previous tx and begins a new writable one. + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error Commit() - // CommitAndStop commits the previous tx and does not create a new one. CommitAndStop() } @@ -41,10 +40,15 @@ type batchTx struct { sync.Mutex tx *bolt.Tx backend *backend - pending int } +func newBatchTx(backend *backend) *batchTx { + tx := &batchTx{backend: backend} + tx.Commit() + return tx +} + func (t *batchTx) UnsafeCreateBucket(name []byte) { _, err := t.tx.CreateBucket(name) if err != nil && err != bolt.ErrBucketExists { @@ -80,37 +84,30 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo } // UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) - if err != nil { - plog.Fatal(err) - } - return k, v -} - -func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { - bucket := tx.Bucket(bucketName) +func (t *batchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { + bucket := t.tx.Bucket(bucketName) if bucket == nil { - return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) + plog.Fatalf("bucket %s does not exist", bucketName) } + if len(endKey) == 0 { - if v := bucket.Get(key); v != nil { - return append(keys, key), append(vs, v), nil + if v := bucket.Get(key); v == nil { + return keys, vs + } else { + return append(keys, key), append(vs, v) } - return nil, nil, nil - } - if limit <= 0 { - limit = math.MaxInt64 } + c := bucket.Cursor() for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) - if limit == int64(len(keys)) { + if limit > 0 && limit == int64(len(keys)) { break } } - return keys, vs, nil + + return keys, vs } // UnsafeDelete must be called holding the lock on the tx. @@ -128,14 +125,12 @@ func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { // UnsafeForEach must be called holding the lock on the tx. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - return unsafeForEach(t.tx, bucketName, visitor) -} - -func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error { - if b := tx.Bucket(bucket); b != nil { - return b.ForEach(visitor) + b := t.tx.Bucket(bucketName) + if b == nil { + // bucket does not exist + return nil } - return nil + return b.ForEach(visitor) } // Commit commits a previous tx and begins a new writable one. @@ -145,7 +140,7 @@ func (t *batchTx) Commit() { t.commit(false) } -// CommitAndStop commits the previous tx and does not create a new one. +// CommitAndStop commits the previous tx and do not create a new one. func (t *batchTx) CommitAndStop() { t.Lock() defer t.Unlock() @@ -155,28 +150,37 @@ func (t *batchTx) CommitAndStop() { func (t *batchTx) Unlock() { if t.pending >= t.backend.batchLimit { t.commit(false) + t.pending = 0 } t.Mutex.Unlock() } func (t *batchTx) commit(stop bool) { + var err error // commit the last tx if t.tx != nil { if t.pending == 0 && !stop { t.backend.mu.RLock() defer t.backend.mu.RUnlock() - // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)', - // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size(). - // Server must make sure 'batchTx.commit(false)' does not follow - // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call). - atomic.StoreInt64(&t.backend.size, t.tx.Size()) + // batchTx.commit(true) calls *bolt.Tx.Commit, which + // initializes *bolt.Tx.db and *bolt.Tx.meta as nil, + // and subsequent *bolt.Tx.Size() call panics. + // + // This nil pointer reference panic happens when: + // 1. batchTx.commit(false) from newBatchTx + // 2. batchTx.commit(true) from stopping backend + // 3. batchTx.commit(false) from inflight mvcc Hash call + // + // Check if db is nil to prevent this panic + if t.tx.DB() != nil { + atomic.StoreInt64(&t.backend.size, t.tx.Size()) + } return } - start := time.Now() // gofail: var beforeCommit struct{} - err := t.tx.Commit() + err = t.tx.Commit() // gofail: var afterCommit struct{} commitDurations.Observe(time.Since(start).Seconds()) atomic.AddInt64(&t.backend.commits, 1) @@ -186,81 +190,17 @@ func (t *batchTx) commit(stop bool) { plog.Fatalf("cannot commit tx (%s)", err) } } - if !stop { - t.tx = t.backend.begin(true) - } -} -type batchTxBuffered struct { - batchTx - buf txWriteBuffer -} - -func newBatchTxBuffered(backend *backend) *batchTxBuffered { - tx := &batchTxBuffered{ - batchTx: batchTx{backend: backend}, - buf: txWriteBuffer{ - txBuffer: txBuffer{make(map[string]*bucketBuffer)}, - seq: true, - }, - } - tx.Commit() - return tx -} - -func (t *batchTxBuffered) Unlock() { - if t.pending != 0 { - t.backend.readTx.mu.Lock() - t.buf.writeback(&t.backend.readTx.buf) - t.backend.readTx.mu.Unlock() - if t.pending >= t.backend.batchLimit { - t.commit(false) - } - } - t.batchTx.Unlock() -} - -func (t *batchTxBuffered) Commit() { - t.Lock() - defer t.Unlock() - t.commit(false) -} - -func (t *batchTxBuffered) CommitAndStop() { - t.Lock() - defer t.Unlock() - t.commit(true) -} - -func (t *batchTxBuffered) commit(stop bool) { - // all read txs must be closed to acquire boltdb commit rwlock - t.backend.readTx.mu.Lock() - defer t.backend.readTx.mu.Unlock() - t.unsafeCommit(stop) -} - -func (t *batchTxBuffered) unsafeCommit(stop bool) { - if t.backend.readTx.tx != nil { - if err := t.backend.readTx.tx.Rollback(); err != nil { - plog.Fatalf("cannot rollback tx (%s)", err) - } - t.backend.readTx.buf.reset() - t.backend.readTx.tx = nil + if stop { + return } - t.batchTx.commit(stop) - - if !stop { - t.backend.readTx.tx = t.backend.begin(false) + t.backend.mu.RLock() + defer t.backend.mu.RUnlock() + // begin a new tx + t.tx, err = t.backend.db.Begin(true) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) } -} - -func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) { - t.batchTx.UnsafePut(bucketName, key, value) - t.buf.put(bucketName, key, value) -} - -func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { - t.batchTx.UnsafeSeqPut(bucketName, key, value) - t.buf.putSeq(bucketName, key, value) + atomic.StoreInt64(&t.backend.size, t.tx.Size()) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go similarity index 82% rename from vendor/github.com/coreos/etcd/mvcc/backend/config_default.go rename to vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go index edfed0025c6..92019c18415 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go @@ -12,12 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !linux,!windows +// +build !linux package backend -import bolt "github.com/coreos/bbolt" +import "github.com/boltdb/bolt" var boltOpenOptions *bolt.Options = nil - -func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go similarity index 88% rename from vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go rename to vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go index a8f6abeba63..4ee9b05a77c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - bolt "github.com/coreos/bbolt" + "github.com/boltdb/bolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead @@ -27,7 +27,6 @@ import ( // (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might // silently ignore this flag. Please update your kernel to prevent this. var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, + MmapFlags: syscall.MAP_POPULATE, + InitialMmapSize: int(InitialMmapSize), } - -func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go deleted file mode 100644 index 71d02700bcd..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package backend - -import bolt "github.com/coreos/bbolt" - -var boltOpenOptions *bolt.Options = nil - -// setting mmap size != 0 on windows will allocate the entire -// mmap size for the file, instead of growing it. So, force 0. - -func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go index 30a38801476..34a56a91956 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -24,18 +24,8 @@ var ( Help: "The latency distributions of commit called by backend.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) - - snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "backend_snapshot_duration_seconds", - Help: "The latency distribution of backend snapshots.", - // 10 ms -> 655 seconds - Buckets: prometheus.ExponentialBuckets(.01, 2, 17), - }) ) func init() { prometheus.MustRegister(commitDurations) - prometheus.MustRegister(snapshotDurations) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go deleted file mode 100644 index 9fc6b790620..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "bytes" - "math" - "sync" - - bolt "github.com/coreos/bbolt" -) - -// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; -// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket -// is known to never overwrite any key so range is safe. -var safeRangeBucket = []byte("key") - -type ReadTx interface { - Lock() - Unlock() - - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error -} - -type readTx struct { - // mu protects accesses to the txReadBuffer - mu sync.RWMutex - buf txReadBuffer - - // txmu protects accesses to the Tx on Range requests - txmu sync.Mutex - tx *bolt.Tx -} - -func (rt *readTx) Lock() { rt.mu.RLock() } -func (rt *readTx) Unlock() { rt.mu.RUnlock() } - -func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - if endKey == nil { - // forbid duplicates for single keys - limit = 1 - } - if limit <= 0 { - limit = math.MaxInt64 - } - if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { - panic("do not use unsafeRange on non-keys bucket") - } - keys, vals := rt.buf.Range(bucketName, key, endKey, limit) - if int64(len(keys)) == limit { - return keys, vals - } - rt.txmu.Lock() - // ignore error since bucket may have been created in this batch - k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) - rt.txmu.Unlock() - return append(k2, keys...), append(v2, vals...) -} - -func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - dups := make(map[string]struct{}) - f1 := func(k, v []byte) error { - dups[string(k)] = struct{}{} - return visitor(k, v) - } - f2 := func(k, v []byte) error { - if _, ok := dups[string(k)]; ok { - return nil - } - return visitor(k, v) - } - if err := rt.buf.ForEach(bucketName, f1); err != nil { - return err - } - rt.txmu.Lock() - err := unsafeForEach(rt.tx, bucketName, f2) - rt.txmu.Unlock() - return err -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go deleted file mode 100644 index 56e885dbfbc..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "bytes" - "sort" -) - -// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. -type txBuffer struct { - buckets map[string]*bucketBuffer -} - -func (txb *txBuffer) reset() { - for k, v := range txb.buckets { - if v.used == 0 { - // demote - delete(txb.buckets, k) - } - v.used = 0 - } -} - -// txWriteBuffer buffers writes of pending updates that have not yet committed. -type txWriteBuffer struct { - txBuffer - seq bool -} - -func (txw *txWriteBuffer) put(bucket, k, v []byte) { - txw.seq = false - txw.putSeq(bucket, k, v) -} - -func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) { - b, ok := txw.buckets[string(bucket)] - if !ok { - b = newBucketBuffer() - txw.buckets[string(bucket)] = b - } - b.add(k, v) -} - -func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { - for k, wb := range txw.buckets { - rb, ok := txr.buckets[k] - if !ok { - delete(txw.buckets, k) - txr.buckets[k] = wb - continue - } - if !txw.seq && wb.used > 1 { - // assume no duplicate keys - sort.Sort(wb) - } - rb.merge(wb) - } - txw.reset() -} - -// txReadBuffer accesses buffered updates. -type txReadBuffer struct{ txBuffer } - -func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - if b := txr.buckets[string(bucketName)]; b != nil { - return b.Range(key, endKey, limit) - } - return nil, nil -} - -func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error { - if b := txr.buckets[string(bucketName)]; b != nil { - return b.ForEach(visitor) - } - return nil -} - -type kv struct { - key []byte - val []byte -} - -// bucketBuffer buffers key-value pairs that are pending commit. -type bucketBuffer struct { - buf []kv - // used tracks number of elements in use so buf can be reused without reallocation. - used int -} - -func newBucketBuffer() *bucketBuffer { - return &bucketBuffer{buf: make([]kv, 512), used: 0} -} - -func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { - f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } - idx := sort.Search(bb.used, f) - if idx < 0 { - return nil, nil - } - if len(endKey) == 0 { - if bytes.Equal(key, bb.buf[idx].key) { - keys = append(keys, bb.buf[idx].key) - vals = append(vals, bb.buf[idx].val) - } - return keys, vals - } - if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { - return nil, nil - } - for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { - if bytes.Compare(endKey, bb.buf[i].key) <= 0 { - break - } - keys = append(keys, bb.buf[i].key) - vals = append(vals, bb.buf[i].val) - } - return keys, vals -} - -func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { - for i := 0; i < bb.used; i++ { - if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { - return err - } - } - return nil -} - -func (bb *bucketBuffer) add(k, v []byte) { - bb.buf[bb.used].key, bb.buf[bb.used].val = k, v - bb.used++ - if bb.used == len(bb.buf) { - buf := make([]kv, (3*len(bb.buf))/2) - copy(buf, bb.buf) - bb.buf = buf - } -} - -// merge merges data from bb into bbsrc. -func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { - for i := 0; i < bbsrc.used; i++ { - bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) - } - if bb.used == bbsrc.used { - return - } - if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { - return - } - - sort.Stable(bb) - - // remove duplicates, using only newest update - widx := 0 - for ridx := 1; ridx < bb.used; ridx++ { - if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { - widx++ - } - bb.buf[widx] = bb.buf[ridx] - } - bb.used = widx + 1 -} - -func (bb *bucketBuffer) Len() int { return bb.used } -func (bb *bucketBuffer) Less(i, j int) bool { - return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 -} -func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go index 991289cdd5c..397098a7ba7 100644 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -29,9 +29,7 @@ type index interface { RangeSince(key, end []byte, rev int64) []revision Compact(rev int64) map[revision]struct{} Equal(b index) bool - Insert(ki *keyIndex) - KeyIndex(ki *keyIndex) *keyIndex } type treeIndex struct { @@ -62,27 +60,18 @@ func (ti *treeIndex) Put(key []byte, rev revision) { func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { keyi := &keyIndex{key: key} + ti.RLock() defer ti.RUnlock() - if keyi = ti.keyIndex(keyi); keyi == nil { + item := ti.tree.Get(keyi) + if item == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } + + keyi = item.(*keyIndex) return keyi.get(atRev) } -func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { - ti.RLock() - defer ti.RUnlock() - return ti.keyIndex(keyi) -} - -func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { - if item := ti.tree.Get(keyi); item != nil { - return item.(*keyIndex) - } - return nil -} - func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go index 9104f9b2d36..983c64e2f6b 100644 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -222,6 +222,7 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { } // remove the previous generations. ki.generations = ki.generations[i:] + return } func (ki *keyIndex) isEmpty() bool { diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go index 6636347aa43..c851c8725e8 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -32,15 +32,15 @@ type RangeResult struct { Count int } -type ReadView interface { - // FirstRev returns the first KV revision at the time of opening the txn. +type KV interface { + // Rev returns the current revision of the KV. + Rev() int64 + + // FirstRev returns the first revision of the KV. // After a compaction, the first revision increases to the compaction // revision. FirstRev() int64 - // Rev returns the revision of the KV at the time of opening the txn. - Rev() int64 - // Range gets the keys in the range at rangeRev. // The returned rev is the current revision of the KV when the operation is executed. // If rangeRev <=0, range gets the keys at currentRev. @@ -50,17 +50,14 @@ type ReadView interface { // Limit limits the number of keys returned. // If the required rev is compacted, ErrCompacted will be returned. Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) -} -// TxnRead represents a read-only transaction with operations that will not -// block other read transactions. -type TxnRead interface { - ReadView - // End marks the transaction is complete and ready to commit. - End() -} + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) -type WriteView interface { // DeleteRange deletes the given range from the store. // A deleteRange increases the rev of the store if any key in the range exists. // The number of key deleted will be returned. @@ -70,51 +67,26 @@ type WriteView interface { // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). DeleteRange(key, end []byte) (n, rev int64) - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) -} - -// TxnWrite represents a transaction that can modify the store. -type TxnWrite interface { - TxnRead - WriteView - // Changes gets the changes made since opening the write txn. - Changes() []mvccpb.KeyValue -} - -// txnReadWrite coerces a read txn to a write, panicking on any write operation. -type txnReadWrite struct{ TxnRead } - -func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } -func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - panic("unexpected Put") -} -func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } - -func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } - -type KV interface { - ReadView - WriteView - - // Read creates a read transaction. - Read() TxnRead - - // Write creates a write transaction. - Write() TxnWrite - - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purposes. - Hash() (hash uint32, revision int64, err error) + // TxnBegin begins a txn. Only Txn prefixed operation can be executed, others will be blocked + // until txn ends. Only one on-going txn is allowed. + // TxnBegin returns an int64 txn ID. + // All txn prefixed operations with same txn ID will be done with the same rev. + TxnBegin() int64 + // TxnEnd ends the on-going txn with txn ID. If the on-going txn ID is not matched, error is returned. + TxnEnd(txnID int64) error + // TxnRange returns the current revision of the KV when the operation is executed. + TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) + TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) + TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) // Compact frees all superseded keys with revisions less than rev. Compact(rev int64) (<-chan struct{}, error) - // Commit commits outstanding txns into the underlying backend. + // Hash retrieves the hash of KV state and revision. + // This method is designed for consistency checking purpose. + Hash() (hash uint32, revision int64, err error) + + // Commit commits txns into the underlying backend. Commit() // Restore restores the KV store from a backend. diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go deleted file mode 100644 index f40ba8edc22..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type readView struct{ kv KV } - -func (rv *readView) FirstRev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.FirstRev() -} - -func (rv *readView) Rev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.Rev() -} - -func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - tr := rv.kv.Read() - defer tr.End() - return tr.Range(key, end, ro) -} - -type writeView struct{ kv KV } - -func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.DeleteRange(key, end) -} - -func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.Put(key, value, lease) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go index 28a508ccb95..28a18a06597 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -18,6 +18,7 @@ import ( "encoding/binary" "errors" "math" + "math/rand" "sync" "time" @@ -33,28 +34,24 @@ var ( keyBucketName = []byte("key") metaBucketName = []byte("meta") - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - ErrClosed = errors.New("mvcc: closed") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") -) - -const ( // markedRevBytesLen is the byte length of marked revision. // The first `revBytesLen` bytes represents a normal revision. The last // one byte is the mark. markedRevBytesLen = revBytesLen + 1 markBytePosition = markedRevBytesLen - 1 markTombstone byte = 't' -) -var restoreChunkKeys = 10000 // non-const for testing + consistentIndexKeyName = []byte("consistent_index") + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrTxnIDMismatch = errors.New("mvcc: txn id mismatch") + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") +) // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. @@ -64,11 +61,7 @@ type ConsistentIndexGetter interface { } type store struct { - ReadView - WriteView - - // mu read locks for txns and write locks for non-txn store changes. - mu sync.RWMutex + mu sync.Mutex // guards the following ig ConsistentIndexGetter @@ -77,19 +70,19 @@ type store struct { le lease.Lessor - // revMuLock protects currentRev and compactMainRev. - // Locked at end of write txn and released after write txn unlock lock. - // Locked before locking read txn and released after locking. - revMu sync.RWMutex - // currentRev is the revision of the last completed transaction. - currentRev int64 - // compactMainRev is the main revision of the last compaction. + currentRev revision + // the main revision of the last compaction compactMainRev int64 + tx backend.BatchTx + txnID int64 // tracks the current txnID to verify txn operations + txnModify bool + // bytesBuf8 is a byte slice of length 8 // to avoid a repetitive allocation in saveIndex. bytesBuf8 []byte + changes []mvccpb.KeyValue fifoSched schedule.Scheduler stopc chan struct{} @@ -105,18 +98,17 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto le: le, - currentRev: 1, + currentRev: revision{main: 1}, compactMainRev: -1, - bytesBuf8: make([]byte, 8), + bytesBuf8: make([]byte, 8, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), } - s.ReadView = &readView{s} - s.WriteView = &writeView{s} + if s.le != nil { - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + s.le.SetRangeDeleter(s) } tx := s.b.BatchTx() @@ -134,6 +126,140 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto return s } +func (s *store) Rev() int64 { + s.mu.Lock() + defer s.mu.Unlock() + + return s.currentRev.main +} + +func (s *store) FirstRev() int64 { + s.mu.Lock() + defer s.mu.Unlock() + + return s.compactMainRev +} + +func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 { + id := s.TxnBegin() + s.put(key, value, lease) + s.txnEnd(id) + + putCounter.Inc() + + return int64(s.currentRev.main) +} + +func (s *store) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + id := s.TxnBegin() + kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) + s.txnEnd(id) + + rangeCounter.Inc() + + r = &RangeResult{ + KVs: kvs, + Count: count, + Rev: rev, + } + + return r, err +} + +func (s *store) DeleteRange(key, end []byte) (n, rev int64) { + id := s.TxnBegin() + n = s.deleteRange(key, end) + s.txnEnd(id) + + deleteCounter.Inc() + + return n, int64(s.currentRev.main) +} + +func (s *store) TxnBegin() int64 { + s.mu.Lock() + s.currentRev.sub = 0 + s.tx = s.b.BatchTx() + s.tx.Lock() + + s.txnID = rand.Int63() + return s.txnID +} + +func (s *store) TxnEnd(txnID int64) error { + err := s.txnEnd(txnID) + if err != nil { + return err + } + + txnCounter.Inc() + return nil +} + +// txnEnd is used for unlocking an internal txn. It does +// not increase the txnCounter. +func (s *store) txnEnd(txnID int64) error { + if txnID != s.txnID { + return ErrTxnIDMismatch + } + + // only update index if the txn modifies the mvcc state. + // read only txn might execute with one write txn concurrently, + // it should not write its index to mvcc. + if s.txnModify { + s.saveIndex() + } + s.txnModify = false + + s.tx.Unlock() + if s.currentRev.sub != 0 { + s.currentRev.main += 1 + } + s.currentRev.sub = 0 + + dbTotalSize.Set(float64(s.b.Size())) + s.mu.Unlock() + return nil +} + +func (s *store) TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + if txnID != s.txnID { + return nil, ErrTxnIDMismatch + } + + kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) + + r = &RangeResult{ + KVs: kvs, + Count: count, + Rev: rev, + } + return r, err +} + +func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) { + if txnID != s.txnID { + return 0, ErrTxnIDMismatch + } + + s.put(key, value, lease) + return int64(s.currentRev.main + 1), nil +} + +func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) { + if txnID != s.txnID { + return 0, 0, ErrTxnIDMismatch + } + + n = s.deleteRange(key, end) + if n != 0 || s.currentRev.sub != 0 { + rev = int64(s.currentRev.main + 1) + } else { + rev = int64(s.currentRev.main) + } + return n, rev, nil +} + func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { if ctx == nil || ctx.Err() != nil { s.mu.Lock() @@ -149,25 +275,16 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { close(ch) } -func (s *store) Hash() (hash uint32, revision int64, err error) { - s.b.ForceCommit() - h, err := s.b.Hash(DefaultIgnores) - return h, s.currentRev, err -} - func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.mu.Lock() defer s.mu.Unlock() - s.revMu.Lock() - defer s.revMu.Unlock() - if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) return ch, ErrCompacted } - if rev > s.currentRev { + if rev > s.currentRev.main { return nil, ErrFutureRev } @@ -216,14 +333,24 @@ func init() { } } +func (s *store) Hash() (uint32, int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.b.ForceCommit() + + h, err := s.b.Hash(DefaultIgnores) + rev := s.currentRev.main + return h, rev, err +} + func (s *store) Commit() { s.mu.Lock() defer s.mu.Unlock() - tx := s.b.BatchTx() - tx.Lock() - s.saveIndex(tx) - tx.Unlock() + s.tx = s.b.BatchTx() + s.tx.Lock() + s.saveIndex() + s.tx.Unlock() s.b.ForceCommit() } @@ -236,8 +363,10 @@ func (s *store) Restore(b backend.Backend) error { s.b = b s.kvindex = newTreeIndex() - s.currentRev = 1 + s.currentRev = revision{main: 1} s.compactMainRev = -1 + s.tx = b.BatchTx() + s.txnID = -1 s.fifoSched = schedule.NewFIFOScheduler() s.stopc = make(chan struct{}) @@ -245,63 +374,75 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { - reportDbTotalSizeInBytesMu.Lock() - b := s.b - reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } - reportDbTotalSizeInBytesMu.Unlock() - min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) + // use an unordered map to hold the temp index data to speed up + // the initial key index recovery. + // we will convert this unordered map into the tree index later. + unordered := make(map[string]*keyIndex, 100000) + // restore index tx := s.b.BatchTx() tx.Lock() - _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main plog.Printf("restore compact to %d", s.compactMainRev) } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + + // TODO: limit N to reduce max memory usage + keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) + for i, key := range keys { + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + + rev := bytesToRev(key[:revBytesLen]) + + // restore index + switch { + case isTombstone(key): + if ki, ok := unordered[string(kv.Key)]; ok { + ki.tombstone(rev.main, rev.sub) + } + delete(keyToLease, string(kv.Key)) + + default: + ki, ok := unordered[string(kv.Key)] + if ok { + ki.put(rev.main, rev.sub) + } else { + ki = &keyIndex{key: kv.Key} + ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) + unordered[string(kv.Key)] = ki + } + + if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { + keyToLease[string(kv.Key)] = lid + } else { + delete(keyToLease, string(kv.Key)) + } + } + + // update revision + s.currentRev = rev } - // index keys concurrently as they're loaded in from tx - keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.kvindex) - for { - keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) - if len(keys) == 0 { - break - } - // rkvc blocks if the total pending keys exceeds the restore - // chunk size to keep keys from consuming too much memory. - restoreChunk(rkvc, keys, vals, keyToLease) - if len(keys) < restoreChunkKeys { - // partial set implies final set - break - } - // next set begins after where this one ended - newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) - newMin.sub++ - revToBytes(newMin, min) + // restore the tree index from the unordered index. + for _, v := range unordered { + s.kvindex.Insert(v) } - close(rkvc) - s.currentRev = <-revc // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // the correct revision should be set to compaction revision in the case, not the largest revision // we have seen. - if s.currentRev < s.compactMainRev { - s.currentRev = s.compactMainRev - } - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 + if s.currentRev.main < s.compactMainRev { + s.currentRev.main = s.compactMainRev } for key, lid := range keyToLease { @@ -314,6 +455,15 @@ func (s *store) restore() error { } } + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 + } + } + tx.Unlock() if scheduledCompact != 0 { @@ -324,75 +474,6 @@ func (s *store) restore() error { return nil } -type revKeyValue struct { - key []byte - kv mvccpb.KeyValue - kstr string -} - -func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { - rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) - go func() { - currentRev := int64(1) - defer func() { revc <- currentRev }() - // restore the tree index from streaming the unordered index. - kiCache := make(map[string]*keyIndex, restoreChunkKeys) - for rkv := range rkvc { - ki, ok := kiCache[rkv.kstr] - // purge kiCache if many keys but still missing in the cache - if !ok && len(kiCache) >= restoreChunkKeys { - i := 10 - for k := range kiCache { - delete(kiCache, k) - if i--; i == 0 { - break - } - } - } - // cache miss, fetch from tree index if there - if !ok { - ki = &keyIndex{key: rkv.kv.Key} - if idxKey := idx.KeyIndex(ki); idxKey != nil { - kiCache[rkv.kstr], ki = idxKey, idxKey - ok = true - } - } - rev := bytesToRev(rkv.key) - currentRev = rev.main - if ok { - if isTombstone(rkv.key) { - ki.tombstone(rev.main, rev.sub) - continue - } - ki.put(rev.main, rev.sub) - } else if !isTombstone(rkv.key) { - ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) - idx.Insert(ki) - kiCache[rkv.kstr] = ki - } - } - }() - return rkvc, revc -} - -func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { - for i, key := range keys { - rkv := revKeyValue{key: key} - if err := rkv.kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - rkv.kstr = string(rkv.kv.Key) - if isTombstone(key) { - delete(keyToLease, rkv.kstr) - } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { - keyToLease[rkv.kstr] = lid - } else { - delete(keyToLease, rkv.kstr) - } - kvc <- rkv - } -} - func (s *store) Close() error { close(s.stopc) s.fifoSched.Stop() @@ -409,10 +490,180 @@ func (a *store) Equal(b *store) bool { return a.kvindex.Equal(b.kvindex) } -func (s *store) saveIndex(tx backend.BatchTx) { +// range is a keyword in Go, add Keys suffix. +func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool) (kvs []mvccpb.KeyValue, count int, curRev int64, err error) { + curRev = int64(s.currentRev.main) + if s.currentRev.sub > 0 { + curRev += 1 + } + + if rangeRev > curRev { + return nil, -1, s.currentRev.main, ErrFutureRev + } + var rev int64 + if rangeRev <= 0 { + rev = curRev + } else { + rev = rangeRev + } + if rev < s.compactMainRev { + return nil, -1, 0, ErrCompacted + } + + _, revpairs := s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return nil, 0, curRev, nil + } + if countOnly { + return nil, len(revpairs), curRev, nil + } + + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + + _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if limit > 0 && len(kvs) >= int(limit) { + break + } + } + return kvs, len(revpairs), curRev, nil +} + +func (s *store) put(key, value []byte, leaseID lease.LeaseID) { + s.txnModify = true + + rev := s.currentRev.main + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub}) + s.changes = append(s.changes, kv) + s.currentRev.sub += 1 + + if oldLease != lease.NoLease { + if s.le == nil { + panic("no lessor to detach lease") + } + + err = s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + + if leaseID != lease.NoLease { + if s.le == nil { + panic("no lessor to attach lease") + } + + err = s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (s *store) deleteRange(key, end []byte) int64 { + s.txnModify = true + + rrev := s.currentRev.main + if s.currentRev.sub > 0 { + rrev += 1 + } + keys, revs := s.kvindex.Range(key, end, rrev) + + if len(keys) == 0 { + return 0 + } + + for i, key := range keys { + s.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (s *store) delete(key []byte, rev revision) { + mainrev := s.currentRev.main + 1 + + ibytes := newRevBytes() + revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{ + Key: key, + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + s.changes = append(s.changes, kv) + s.currentRev.sub += 1 + + item := lease.LeaseItem{Key: string(key)} + leaseID := s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (s *store) getChanges() []mvccpb.KeyValue { + changes := s.changes + s.changes = make([]mvccpb.KeyValue, 0, 4) + return changes +} + +func (s *store) saveIndex() { if s.ig == nil { return } + tx := s.tx bs := s.bytesBuf8 binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) // put the index into the underlying backend diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go deleted file mode 100644 index 13d4d530d0a..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type storeTxnRead struct { - s *store - tx backend.ReadTx - - firstRev int64 - rev int64 -} - -func (s *store) Read() TxnRead { - s.mu.RLock() - tx := s.b.ReadTx() - s.revMu.RLock() - tx.Lock() - firstRev, rev := s.compactMainRev, s.currentRev - s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) -} - -func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } -func (tr *storeTxnRead) Rev() int64 { return tr.rev } - -func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - return tr.rangeKeys(key, end, tr.Rev(), ro) -} - -func (tr *storeTxnRead) End() { - tr.tx.Unlock() - tr.s.mu.RUnlock() -} - -type storeTxnWrite struct { - *storeTxnRead - tx backend.BatchTx - // beginRev is the revision where the txn begins; it will write to the next revision. - beginRev int64 - changes []mvccpb.KeyValue -} - -func (s *store) Write() TxnWrite { - s.mu.RLock() - tx := s.b.BatchTx() - tx.Lock() - tw := &storeTxnWrite{ - storeTxnRead: &storeTxnRead{s, tx, 0, 0}, - tx: tx, - beginRev: s.currentRev, - changes: make([]mvccpb.KeyValue, 0, 4), - } - return newMetricsTxnWrite(tw) -} - -func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } - -func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - rev := tw.beginRev - if len(tw.changes) > 0 { - rev++ - } - return tw.rangeKeys(key, end, rev, ro) -} - -func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { - if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, int64(tw.beginRev + 1) - } - return 0, int64(tw.beginRev) -} - -func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { - tw.put(key, value, lease) - return int64(tw.beginRev + 1) -} - -func (tw *storeTxnWrite) End() { - // only update index if the txn modifies the mvcc state. - if len(tw.changes) != 0 { - tw.s.saveIndex(tw.tx) - // hold revMu lock to prevent new read txns from opening until writeback. - tw.s.revMu.Lock() - tw.s.currentRev++ - } - tw.tx.Unlock() - if len(tw.changes) != 0 { - tw.s.revMu.Unlock() - } - tw.s.mu.RUnlock() -} - -func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - - _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil - } - if ro.Count { - return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil - } - - var kvs []mvccpb.KeyValue - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { - break - } - } - return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil -} - -func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { - rev := tw.beginRev + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := tw.s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - idxRev := revision{main: rev, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - tw.s.kvindex.Put(key, idxRev) - tw.changes = append(tw.changes, kv) - - if oldLease != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to detach lease") - } - err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - if leaseID != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to attach lease") - } - err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { - rrev := tw.beginRev - if len(tw.changes) > 0 { - rrev += 1 - } - keys, revs := tw.s.kvindex.Range(key, end, rrev) - if len(keys) == 0 { - return 0 - } - for i, key := range keys { - tw.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (tw *storeTxnWrite) delete(key []byte, rev revision) { - ibytes := newRevBytes() - idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{Key: key} - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = tw.s.kvindex.Tombstone(key, idxRev) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - tw.changes = append(tw.changes, kv) - - item := lease.LeaseItem{Key: string(key)} - leaseID := tw.s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go index a65fe59b996..aa8af6aa552 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -15,8 +15,6 @@ package mvcc import ( - "sync" - "github.com/prometheus/client_golang/prometheus" ) @@ -131,21 +129,12 @@ var ( Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) - dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database in bytes.", - }, - func() float64 { - reportDbTotalSizeInBytesMu.RLock() - defer reportDbTotalSizeInBytesMu.RUnlock() - return reportDbTotalSizeInBytes() - }, - ) - // overridden by mvcc initialization - reportDbTotalSizeInBytesMu sync.RWMutex - reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } + }) ) func init() { diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go deleted file mode 100644 index fd2144279ae..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type metricsTxnWrite struct { - TxnWrite - ranges uint - puts uint - deletes uint -} - -func newMetricsTxnRead(tr TxnRead) TxnRead { - return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} -} - -func newMetricsTxnWrite(tw TxnWrite) TxnWrite { - return &metricsTxnWrite{tw, 0, 0, 0} -} - -func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { - tw.ranges++ - return tw.TxnWrite.Range(key, end, ro) -} - -func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { - tw.deletes++ - return tw.TxnWrite.DeleteRange(key, end) -} - -func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw.puts++ - return tw.TxnWrite.Put(key, value, lease) -} - -func (tw *metricsTxnWrite) End() { - defer tw.TxnWrite.End() - if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { - if sum > 1 { - txnCounter.Inc() - } - return - } - switch { - case tw.ranges == 1: - rangeCounter.Inc() - case tw.puts == 1: - putCounter.Inc() - case tw.deletes == 1: - deleteCounter.Inc() - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index 7033f132662..aa053f4e66e 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -713,7 +713,7 @@ func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } var fileDescriptorKv = []byte{ // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go index 68d9ab71d27..dbb79bcb693 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -41,11 +41,9 @@ type watchable interface { } type watchableStore struct { - *store + mu sync.Mutex - // mu protects watcher groups and batches. It should never be locked - // before locking store.mu to avoid deadlock. - mu sync.RWMutex + *store // victims are watcher batches that were blocked on the watch channel victims []watcherBatch @@ -78,11 +76,9 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet synced: newWatcherGroup(), stopc: make(chan struct{}), } - s.store.ReadView = &readView{s} - s.store.WriteView = &writeView{s} if s.le != nil { // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + s.le.SetRangeDeleter(s) } s.wg.Add(2) go s.syncWatchersLoop() @@ -90,6 +86,89 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet return s } +func (s *watchableStore) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + s.mu.Lock() + defer s.mu.Unlock() + + rev = s.store.Put(key, value, lease) + changes := s.store.getChanges() + if len(changes) != 1 { + plog.Panicf("unexpected len(changes) != 1 after put") + } + + ev := mvccpb.Event{ + Type: mvccpb.PUT, + Kv: &changes[0], + } + s.notify(rev, []mvccpb.Event{ev}) + return rev +} + +func (s *watchableStore) DeleteRange(key, end []byte) (n, rev int64) { + s.mu.Lock() + defer s.mu.Unlock() + + n, rev = s.store.DeleteRange(key, end) + changes := s.store.getChanges() + + if len(changes) != int(n) { + plog.Panicf("unexpected len(changes) != n after deleteRange") + } + + if n == 0 { + return n, rev + } + + evs := make([]mvccpb.Event, n) + for i := range changes { + evs[i] = mvccpb.Event{ + Type: mvccpb.DELETE, + Kv: &changes[i]} + evs[i].Kv.ModRevision = rev + } + s.notify(rev, evs) + return n, rev +} + +func (s *watchableStore) TxnBegin() int64 { + s.mu.Lock() + return s.store.TxnBegin() +} + +func (s *watchableStore) TxnEnd(txnID int64) error { + err := s.store.TxnEnd(txnID) + if err != nil { + return err + } + + changes := s.getChanges() + if len(changes) == 0 { + s.mu.Unlock() + return nil + } + + rev := s.store.Rev() + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + switch change.CreateRevision { + case 0: + evs[i] = mvccpb.Event{ + Type: mvccpb.DELETE, + Kv: &changes[i]} + evs[i].Kv.ModRevision = rev + default: + evs[i] = mvccpb.Event{ + Type: mvccpb.PUT, + Kv: &changes[i]} + } + } + + s.notify(rev, evs) + s.mu.Unlock() + + return nil +} + func (s *watchableStore) Close() error { close(s.stopc) s.wg.Wait() @@ -107,6 +186,9 @@ func (s *watchableStore) NewWatchStream() WatchStream { } func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { + s.mu.Lock() + defer s.mu.Unlock() + wa := &watcher{ key: key, end: end, @@ -116,24 +198,21 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c fcs: fcs, } - s.mu.Lock() - s.revMu.RLock() - synced := startRev > s.store.currentRev || startRev == 0 + s.store.mu.Lock() + synced := startRev > s.store.currentRev.main || startRev == 0 if synced { - wa.minRev = s.store.currentRev + 1 + wa.minRev = s.store.currentRev.main + 1 if startRev > wa.minRev { wa.minRev = startRev } } + s.store.mu.Unlock() if synced { s.synced.add(wa) } else { slowWatcherGauge.Inc() s.unsynced.add(wa) } - s.revMu.RUnlock() - s.mu.Unlock() - watcherGauge.Inc() return wa, func() { s.cancelWatcher(wa) } @@ -179,35 +258,17 @@ func (s *watchableStore) cancelWatcher(wa *watcher) { s.mu.Unlock() } -func (s *watchableStore) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.store.Restore(b) - if err != nil { - return err - } - - for wa := range s.synced.watchers { - s.unsynced.watchers.add(wa) - } - s.synced = newWatcherGroup() - return nil -} - // syncWatchersLoop syncs the watcher in the unsynced map every 100ms. func (s *watchableStore) syncWatchersLoop() { defer s.wg.Done() for { - s.mu.RLock() + s.mu.Lock() st := time.Now() lastUnsyncedWatchers := s.unsynced.size() - s.mu.RUnlock() - - unsyncedWatchers := 0 - if lastUnsyncedWatchers > 0 { - unsyncedWatchers = s.syncWatchers() - } + s.syncWatchers() + unsyncedWatchers := s.unsynced.size() + s.mu.Unlock() syncDuration := time.Since(st) waitDuration := 100 * time.Millisecond @@ -234,9 +295,9 @@ func (s *watchableStore) syncVictimsLoop() { for s.moveVictims() != 0 { // try to update all victim watchers } - s.mu.RLock() + s.mu.Lock() isEmpty := len(s.victims) == 0 - s.mu.RUnlock() + s.mu.Unlock() var tickc <-chan time.Time if !isEmpty { @@ -279,8 +340,8 @@ func (s *watchableStore) moveVictims() (moved int) { // assign completed victim watchers to unsync/sync s.mu.Lock() - s.store.revMu.RLock() - curRev := s.store.currentRev + s.store.mu.Lock() + curRev := s.store.currentRev.main for w, eb := range wb { if newVictim != nil && newVictim[w] != nil { // couldn't send watch response; stays victim @@ -297,7 +358,7 @@ func (s *watchableStore) moveVictims() (moved int) { s.synced.add(w) } } - s.store.revMu.RUnlock() + s.store.mu.Unlock() s.mu.Unlock() } @@ -315,23 +376,19 @@ func (s *watchableStore) moveVictims() (moved int) { // 2. iterate over the set to get the minimum revision and remove compacted watchers // 3. use minimum revision to get all key-value pairs and send those events to watchers // 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() int { - s.mu.Lock() - defer s.mu.Unlock() - +func (s *watchableStore) syncWatchers() { if s.unsynced.size() == 0 { - return 0 + return } - s.store.revMu.RLock() - defer s.store.revMu.RUnlock() + s.store.mu.Lock() + defer s.store.mu.Unlock() // in order to find key-value pairs from unsynced watchers, we need to // find min revision index, and these revisions can be used to // query the backend store of key-value pairs - curRev := s.store.currentRev + curRev := s.store.currentRev.main compactionRev := s.store.compactMainRev - wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) minBytes, maxBytes := newRevBytes(), newRevBytes() revToBytes(revision{main: minRev}, minBytes) @@ -339,7 +396,7 @@ func (s *watchableStore) syncWatchers() int { // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. - tx := s.store.b.ReadTx() + tx := s.store.b.BatchTx() tx.Lock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) evs := kvsToEvents(wg, revs, vs) @@ -389,8 +446,6 @@ func (s *watchableStore) syncWatchers() int { vsz += len(v) } slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) - - return s.unsynced.size() } // kvsToEvents gets all events for the watchers from all key-value pairs @@ -456,8 +511,8 @@ func (s *watchableStore) addVictim(victim watcherBatch) { func (s *watchableStore) rev() int64 { return s.store.Rev() } func (s *watchableStore) progress(w *watcher) { - s.mu.RLock() - defer s.mu.RUnlock() + s.mu.Lock() + defer s.mu.Unlock() if _, ok := s.synced.watchers[w]; ok { w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go deleted file mode 100644 index 5c5bfda1341..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func (tw *watchableStoreTxnWrite) End() { - changes := tw.Changes() - if len(changes) == 0 { - tw.TxnWrite.End() - return - } - - rev := tw.Rev() + 1 - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - evs[i].Kv = &changes[i] - if change.CreateRevision == 0 { - evs[i].Type = mvccpb.DELETE - evs[i].Kv.ModRevision = rev - } else { - evs[i].Type = mvccpb.PUT - } - } - - // end write txn under watchable store lock so the updates are visible - // when asynchronous event posting checks the current store revision - tw.s.mu.Lock() - tw.s.notify(rev, evs) - tw.TxnWrite.End() - tw.s.mu.Unlock() -} - -type watchableStoreTxnWrite struct { - TxnWrite - s *watchableStore -} - -func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go index 6ef1d0ce8bb..2710c1cc940 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -183,7 +183,7 @@ func (wg *watcherGroup) add(wa *watcher) { // contains is whether the given key has a watcher in the group. func (wg *watcherGroup) contains(key string) bool { _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) + return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key)) } // size gives the number of unique watchers in the group. diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go index 9769771ea4f..6edbe593fb4 100644 --- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -15,7 +15,6 @@ package adt import ( - "bytes" "math" ) @@ -135,29 +134,25 @@ func (x *intervalNode) updateMax() { type nodeVisitor func(n *intervalNode) bool // visit will call a node visitor on each node that overlaps the given interval -func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { +func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) { if x == nil { - return true + return } v := iv.Compare(&x.iv.Ivl) switch { case v < 0: - if !x.left.visit(iv, nv) { - return false - } + x.left.visit(iv, nv) case v > 0: maxiv := Interval{x.iv.Ivl.Begin, x.max} if maxiv.Compare(iv) == 0 { - if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { - return false - } + x.left.visit(iv, nv) + x.right.visit(iv, nv) } default: - if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { - return false - } + nv(x) + x.left.visit(iv, nv) + x.right.visit(iv, nv) } - return true } type IntervalValue struct { @@ -407,11 +402,10 @@ func (ivt *IntervalTree) MaxHeight() int { return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) } -// IntervalVisitor is used on tree searches; return false to stop searching. +// IntervalVisitor is used on tree searchs; return false to stop searching. type IntervalVisitor func(n *IntervalValue) bool // Visit calls a visitor function on every tree node intersecting the given interval. -// It will visit each interval [x, y) in ascending order sorted on x. func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) } @@ -438,8 +432,8 @@ func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { return &n.iv } -// Intersects returns true if there is some tree node intersecting the given interval. -func (ivt *IntervalTree) Intersects(iv Interval) bool { +// Contains returns true if there is some tree node intersecting the given interval. +func (ivt *IntervalTree) Contains(iv Interval) bool { x := ivt.root for x != nil && iv.Compare(&x.iv.Ivl) != 0 { if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { @@ -451,30 +445,6 @@ func (ivt *IntervalTree) Intersects(iv Interval) bool { return x != nil } -// Contains returns true if the interval tree's keys cover the entire given interval. -func (ivt *IntervalTree) Contains(ivl Interval) bool { - var maxEnd, minBegin Comparable - - isContiguous := true - ivt.Visit(ivl, func(n *IntervalValue) bool { - if minBegin == nil { - minBegin = n.Ivl.Begin - maxEnd = n.Ivl.End - return true - } - if maxEnd.Compare(n.Ivl.Begin) < 0 { - isContiguous = false - return false - } - if n.Ivl.End.Compare(maxEnd) > 0 { - maxEnd = n.Ivl.End - } - return true - }) - - return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0 -} - // Stab returns a slice with all elements in the tree intersecting the interval. func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { if ivt.count == 0 { @@ -559,32 +529,3 @@ func (v Int64Comparable) Compare(c Comparable) int { } return 0 } - -// BytesAffineComparable treats empty byte arrays as > all other byte arrays -type BytesAffineComparable []byte - -func (b BytesAffineComparable) Compare(c Comparable) int { - bc := c.(BytesAffineComparable) - - if len(b) == 0 { - if len(bc) == 0 { - return 0 - } - return 1 - } - if len(bc) == 0 { - return -1 - } - - return bytes.Compare(b, bc) -} - -func NewBytesAffineInterval(begin, end []byte) Interval { - return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)} -} -func NewBytesAffinePoint(b []byte) Interval { - be := make([]byte, len(b)+1) - copy(be, b) - be[len(b)] = 0 - return NewBytesAffineInterval(b, be) -} diff --git a/vendor/github.com/coreos/etcd/pkg/cors/BUILD b/vendor/github.com/coreos/etcd/pkg/cors/BUILD deleted file mode 100644 index e707c8a14d5..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/cors/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["cors.go"], - importpath = "github.com/coreos/etcd/pkg/cors", - visibility = ["//visibility:public"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go deleted file mode 100644 index 0c64f16a390..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/cors/cors.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cors handles cross-origin HTTP requests (CORS). -package cors - -import ( - "fmt" - "net/http" - "net/url" - "sort" - "strings" -) - -type CORSInfo map[string]bool - -// Set implements the flag.Value interface to allow users to define a list of CORS origins -func (ci *CORSInfo) Set(s string) error { - m := make(map[string]bool) - for _, v := range strings.Split(s, ",") { - v = strings.TrimSpace(v) - if v == "" { - continue - } - if v != "*" { - if _, err := url.Parse(v); err != nil { - return fmt.Errorf("Invalid CORS origin: %s", err) - } - } - m[v] = true - - } - *ci = CORSInfo(m) - return nil -} - -func (ci *CORSInfo) String() string { - o := make([]string, 0) - for k := range *ci { - o = append(o, k) - } - sort.StringSlice(o).Sort() - return strings.Join(o, ",") -} - -// OriginAllowed determines whether the server will allow a given CORS origin. -func (c CORSInfo) OriginAllowed(origin string) bool { - return c["*"] || c[origin] -} - -type CORSHandler struct { - Handler http.Handler - Info *CORSInfo -} - -// addHeader adds the correct cors headers given an origin -func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) { - w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") - w.Header().Add("Access-Control-Allow-Origin", origin) - w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") -} - -// ServeHTTP adds the correct CORS headers based on the origin and returns immediately -// with a 200 OK if the method is OPTIONS. -func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Write CORS header. - if h.Info.OriginAllowed("*") { - h.addHeader(w, "*") - } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) { - h.addHeader(w, origin) - } - - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - h.Handler.ServeHTTP(w, req) -} diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go deleted file mode 100644 index 74499eb2737..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package debugutil includes utility functions for debugging. -package debugutil diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go deleted file mode 100644 index 8d5544a3dca..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package debugutil - -import ( - "net/http" - "net/http/pprof" - "runtime" -) - -const HTTPPrefixPProf = "/debug/pprof" - -// PProfHandlers returns a map of pprof handlers keyed by the HTTP path. -func PProfHandlers() map[string]http.Handler { - // set only when there's no existing setting - if runtime.SetMutexProfileFraction(-1) == 0 { - // 1 out of 5 mutex events are reported, on average - runtime.SetMutexProfileFraction(5) - } - - m := make(map[string]http.Handler) - - m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index) - m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile) - m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol) - m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline) - m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace) - m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap") - m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine") - m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate") - m[HTTPPrefixPProf+"/block"] = pprof.Handler("block") - m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex") - - return m -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go index fce5126c695..aad40b75904 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -17,7 +17,6 @@ package fileutil import ( "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -102,11 +101,11 @@ func Exist(name string) bool { // shorten the length of the file. func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, io.SeekCurrent) + off, err := f.Seek(0, os.SEEK_CUR) if err != nil { return err } - lenf, lerr := f.Seek(0, io.SeekEnd) + lenf, lerr := f.Seek(0, os.SEEK_END) if lerr != nil { return lerr } @@ -117,6 +116,6 @@ func ZeroToEnd(f *os.File) error { if err = Preallocate(f, lenf, true); err != nil { return err } - _, err = f.Seek(off, io.SeekStart) + _, err = f.Seek(off, os.SEEK_SET) return err } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go index 939fea62381..dec25a1af44 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -17,7 +17,6 @@ package fileutil import ( - "io" "os" "syscall" ) @@ -37,7 +36,7 @@ const ( var ( wrlck = syscall.Flock_t{ Type: syscall.F_WRLCK, - Whence: int16(io.SeekStart), + Whence: int16(os.SEEK_SET), Start: 0, Len: 0, } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go index c747b7cf81f..bb7f0281239 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -14,10 +14,7 @@ package fileutil -import ( - "io" - "os" -) +import "os" // Preallocate tries to allocate the space for given // file. This operation is only supported on linux by a @@ -25,10 +22,6 @@ import ( // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { - if sizeInBytes == 0 { - // fallocate will return EINVAL if length is 0; skip - return nil - } if extendFile { return preallocExtend(f, sizeInBytes) } @@ -36,15 +29,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { } func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, io.SeekCurrent) + curOff, err := f.Seek(0, os.SEEK_CUR) if err != nil { return err } - size, err := f.Seek(sizeInBytes, io.SeekEnd) + size, err := f.Seek(sizeInBytes, os.SEEK_END) if err != nil { return err } - if _, err = f.Seek(curOff, io.SeekStart); err != nil { + if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { return err } if sizeInBytes > size { diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go index 09f44e7c71d..859fc9d49e1 100644 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go @@ -13,6 +13,15 @@ import ( "net/http" ) +func RequestCanceler(req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} + // GracefulClose drains http.Response.Body until it hits EOF // and closes it. This prevents TCP/TLS connections from closing, // therefore available for reuse. diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go index 2da21062657..931beb2d058 100644 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -32,8 +32,8 @@ const ( // a node member ID. // // The initial id is in this format: -// High order 2 bytes are from memberID, next 5 bytes are from timestamp, -// and low order one byte is a counter. +// High order byte is memberID, next 5 bytes are from timestamp, +// and low order 2 bytes are 0s. // | prefix | suffix | // | 2 bytes | 5 bytes | 1 byte | // | memberID | timestamp | cnt | diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD index e918523a418..2455b3c3edd 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD @@ -18,6 +18,7 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go index 5e38dc98dbf..bb5f392b34c 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -16,13 +16,14 @@ package netutil import ( - "context" "net" "net/url" "reflect" "sort" "time" + "golang.org/x/net/context" + "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" ) @@ -31,38 +32,11 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") // indirection for testing - resolveTCPAddr = resolveTCPAddrDefault + resolveTCPAddr = net.ResolveTCPAddr ) const retryInterval = time.Second -// taken from go's ResolveTCP code but uses configurable ctx -func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { - host, port, serr := net.SplitHostPort(addr) - if serr != nil { - return nil, serr - } - portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) - if perr != nil { - return nil, perr - } - - var ips []net.IPAddr - if ip := net.ParseIP(host); ip != nil { - ips = []net.IPAddr{{IP: ip}} - } else { - // Try as a DNS name. - ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) - if err != nil { - return nil, err - } - ips = ipss - } - // randomize? - ip := ips[0] - return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil -} - // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. @@ -101,7 +75,7 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { if host == "localhost" || net.ParseIP(host) != nil { return "", nil } - tcpAddr, err := resolveTCPAddr(ctx, u.Host) + tcpAddr, err := resolveTCPAddr("tcp", u.Host) if err == nil { plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) return tcpAddr.String(), nil diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go index bf8528b753a..79c59b01288 100644 --- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go +++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -88,6 +88,8 @@ func (f *fifo) Schedule(j Job) { } } f.pendings = append(f.pendings, j) + + return } func (f *fifo) Pending() int { diff --git a/vendor/github.com/coreos/etcd/pkg/srv/BUILD b/vendor/github.com/coreos/etcd/pkg/srv/BUILD deleted file mode 100644 index 3707eb3e968..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/srv/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["srv.go"], - importpath = "github.com/coreos/etcd/pkg/srv", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/etcd/pkg/types:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go deleted file mode 100644 index fefcbcb4b88..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/srv/srv.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package srv looks up DNS SRV records. -package srv - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict - resolveTCPAddr = net.ResolveTCPAddr -) - -// GetCluster gets the cluster information via DNS discovery. -// Also sees each entry as a separate instance. -func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - return nil, err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, terr := resolveTCPAddr("tcp", host) - if terr != nil { - err = terr - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - if ok && url.Scheme != scheme { - err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - if len(stringParts) == 0 { - return err - } - return nil - } - - failCount := 0 - err := updateNodeMap(service+"-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) - failCount++ - } - err = updateNodeMap(service, "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) - failCount++ - } - if failCount == 2 { - return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) - } - return stringParts, nil -} - -type SRVClients struct { - Endpoints []string - SRVs []*net.SRV -} - -// GetClient looks up the client endpoints for a service and domain. -func GetClient(service, domain string) (*SRVClients, error) { - var urls []*url.URL - var srvs []*net.SRV - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - srvs = append(srvs, addrs...) - return nil - } - - errHTTPS := updateURLs(service+"-ssl", "https") - errHTTP := updateURLs(service, "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD index d3b887f366d..054063068f4 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "assert.go", "leak.go", "pauseable_handler.go", "recorder.go", diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/assert.go b/vendor/github.com/coreos/etcd/pkg/testutil/assert.go deleted file mode 100644 index 9cf03457d52..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/assert.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "fmt" - "reflect" - "testing" -) - -func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { - if (e == nil || a == nil) && (isNil(e) && isNil(a)) { - return - } - if reflect.DeepEqual(e, a) { - return - } - s := "" - if len(msg) > 1 { - s = msg[0] + ": " - } - s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) - FatalStack(t, s) -} - -func AssertNil(t *testing.T, v interface{}) { - AssertEqual(t, nil, v) -} - -func AssertNotNil(t *testing.T, v interface{}) { - if v == nil { - t.Fatalf("expected non-nil, got %+v", v) - } -} - -func AssertTrue(t *testing.T, v bool, msg ...string) { - AssertEqual(t, true, v, msg...) -} - -func AssertFalse(t *testing.T, v bool, msg ...string) { - AssertEqual(t, false, v, msg...) -} - -func isNil(v interface{}) bool { - if v == nil { - return true - } - rv := reflect.ValueOf(v) - return rv.Kind() != reflect.Struct && rv.IsNil() -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go index a29d06d9bd0..80bc0eebc8a 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go +++ b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go @@ -62,11 +62,10 @@ func CheckLeakedGoroutine() bool { return true } -// CheckAfterTest returns an error if AfterTest would fail with an error. -func CheckAfterTest(d time.Duration) error { +func AfterTest(t *testing.T) { http.DefaultTransport.(*http.Transport).CloseIdleConnections() if testing.Short() { - return nil + return } var bad string badSubstring := map[string]string{ @@ -76,12 +75,10 @@ func CheckAfterTest(d time.Duration) error { "net.(*netFD).connect(": "a timing out dial", ").noteClientGone(": "a closenotifier sender", ").readLoop(": "a Transport", - ".grpc": "a gRPC resource", } var stacks string - begin := time.Now() - for time.Since(begin) < d { + for i := 0; i < 6; i++ { bad = "" stacks = strings.Join(interestingGoroutines(), "\n\n") for substr, what := range badSubstring { @@ -90,22 +87,13 @@ func CheckAfterTest(d time.Duration) error { } } if bad == "" { - return nil + return } // Bad stuff found, but goroutines might just still be // shutting down, so give it some time. time.Sleep(50 * time.Millisecond) } - return fmt.Errorf("appears to have leaked %s:\n%s", bad, stacks) -} - -// AfterTest is meant to run in a defer that executes after a test completes. -// It will detect common goroutine leaks, retrying in case there are goroutines -// not synchronously torn down, and fail the test if any goroutines are stuck. -func AfterTest(t *testing.T) { - if err := CheckAfterTest(300 * time.Millisecond); err != nil { - t.Errorf("Test %v", err) - } + t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) } func interestingGoroutines() (gs []string) { @@ -118,7 +106,6 @@ func interestingGoroutines() (gs []string) { } stack := strings.TrimSpace(sl[1]) if stack == "" || - strings.Contains(stack, "sync.(*WaitGroup).Done") || strings.Contains(stack, "created by os/signal.init") || strings.Contains(stack, "runtime/panic.go") || strings.Contains(stack, "created by testing.RunTests") || diff --git a/vendor/github.com/coreos/etcd/pkg/transport/BUILD b/vendor/github.com/coreos/etcd/pkg/transport/BUILD index 3ae75dce137..7074e74a002 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/transport/BUILD @@ -7,7 +7,6 @@ go_library( "keepalive_listener.go", "limit_listen.go", "listener.go", - "listener_tls.go", "timeout_conn.go", "timeout_dialer.go", "timeout_listener.go", @@ -18,7 +17,10 @@ go_library( ], importpath = "github.com/coreos/etcd/pkg/transport", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", + ], ) filegroup( diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 3b58b41543f..4fcdb5ad9a3 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -23,21 +23,22 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "log" "math/big" "net" "os" "path/filepath" - "strings" "time" + "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/tlsutil" ) -func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { +func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlsinfo, l) + return wrapTLS(addr, scheme, tlscfg, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -48,11 +49,15 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { +func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } - return newTLSListener(l, tlsinfo) + if tlscfg == nil { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) + } + return tls.NewListener(l, tlscfg), nil } type TLSInfo struct { @@ -65,10 +70,6 @@ type TLSInfo struct { // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string - // HandshakeFailure is optionally called when a connection fails to handshake. The - // connection will be closed immediately afterwards. - HandshakeFailure func(*tls.Conn, error) - selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -85,7 +86,7 @@ func (info TLSInfo) Empty() bool { } func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = os.MkdirAll(dirpath, 0700); err != nil { + if err = fileutil.TouchDirAll(dirpath); err != nil { return } @@ -172,14 +173,6 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } - // this only reloads certs when there's a client request - // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } return cfg, nil } @@ -242,6 +235,9 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if err != nil { return nil, err } + // if given a CA, trust any host with a cert signed by the CA + log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") + cfg.ServerName = "" } if info.selfCert { @@ -250,11 +246,31 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { return cfg, nil } -// IsClosedConnError returns true if the error is from closing listener, cmux. -// copied from golang.org/x/net/http2/http2.go -func IsClosedConnError(err error) bool { - // 'use of closed network connection' (Go <=1.8) - // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) - // 'mux: listener closed' (cmux.ErrListenerClosed) - return err != nil && strings.Contains(err.Error(), "closed") +// ShallowCopyTLSConfig copies *tls.Config. This is only +// work-around for go-vet tests, which complains +// +// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex +// +// Keep up-to-date with 'go/src/crypto/tls/common.go' +func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { + ncfg := tls.Config{ + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } + return &ncfg } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go deleted file mode 100644 index 86511860335..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net" - "strings" - "sync" -) - -// tlsListener overrides a TLS listener so it will reject client -// certificates with insufficient SAN credentials. -type tlsListener struct { - net.Listener - connc chan net.Conn - donec chan struct{} - err error - handshakeFailure func(*tls.Conn, error) -} - -func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { - if tlsinfo == nil || tlsinfo.Empty() { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) - } - tlscfg, err := tlsinfo.ServerConfig() - if err != nil { - return nil, err - } - - hf := tlsinfo.HandshakeFailure - if hf == nil { - hf = func(*tls.Conn, error) {} - } - tlsl := &tlsListener{ - Listener: tls.NewListener(l, tlscfg), - connc: make(chan net.Conn), - donec: make(chan struct{}), - handshakeFailure: hf, - } - go tlsl.acceptLoop() - return tlsl, nil -} - -func (l *tlsListener) Accept() (net.Conn, error) { - select { - case conn := <-l.connc: - return conn, nil - case <-l.donec: - return nil, l.err - } -} - -// acceptLoop launches each TLS handshake in a separate goroutine -// to prevent a hanging TLS connection from blocking other connections. -func (l *tlsListener) acceptLoop() { - var wg sync.WaitGroup - var pendingMu sync.Mutex - - pending := make(map[net.Conn]struct{}) - ctx, cancel := context.WithCancel(context.Background()) - defer func() { - cancel() - pendingMu.Lock() - for c := range pending { - c.Close() - } - pendingMu.Unlock() - wg.Wait() - close(l.donec) - }() - - for { - conn, err := l.Listener.Accept() - if err != nil { - l.err = err - return - } - - pendingMu.Lock() - pending[conn] = struct{}{} - pendingMu.Unlock() - - wg.Add(1) - go func() { - defer func() { - if conn != nil { - conn.Close() - } - wg.Done() - }() - - tlsConn := conn.(*tls.Conn) - herr := tlsConn.Handshake() - pendingMu.Lock() - delete(pending, conn) - pendingMu.Unlock() - if herr != nil { - l.handshakeFailure(tlsConn, herr) - return - } - - st := tlsConn.ConnectionState() - if len(st.PeerCertificates) > 0 { - cert := st.PeerCertificates[0] - addr := tlsConn.RemoteAddr().String() - if cerr := checkCert(ctx, cert, addr); cerr != nil { - l.handshakeFailure(tlsConn, cerr) - return - } - } - select { - case l.connc <- tlsConn: - conn = nil - case <-ctx.Done(): - } - }() - } -} - -func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { - h, _, herr := net.SplitHostPort(remoteAddr) - if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { - return nil - } - if herr != nil { - return herr - } - if len(cert.IPAddresses) > 0 { - cerr := cert.VerifyHostname(h) - if cerr == nil { - return nil - } - if len(cert.DNSNames) == 0 { - return cerr - } - } - if len(cert.DNSNames) > 0 { - ok, err := isHostInDNS(ctx, h, cert.DNSNames) - if ok { - return nil - } - errStr := "" - if err != nil { - errStr = " (" + err.Error() + ")" - } - return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) - } - return nil -} - -func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { - // reverse lookup - wildcards, names := []string{}, []string{} - for _, dns := range dnsNames { - if strings.HasPrefix(dns, "*.") { - wildcards = append(wildcards, dns[1:]) - } else { - names = append(names, dns) - } - } - lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) - for _, name := range lnames { - // strip trailing '.' from PTR record - if name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - for _, wc := range wildcards { - if strings.HasSuffix(name, wc) { - return true, nil - } - } - for _, n := range names { - if n == name { - return true, nil - } - } - } - err = lerr - - // forward lookup - for _, dns := range names { - addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) - if lerr != nil { - err = lerr - continue - } - for _, addr := range addrs { - if addr == host { - return true, nil - } - } - } - return false, err -} - -func (l *tlsListener) Close() error { - err := l.Listener.Close() - <-l.donec - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go index b35e04955bb..0f4df5fbe3b 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -15,6 +15,7 @@ package transport import ( + "crypto/tls" "net" "time" ) @@ -22,7 +23,7 @@ import ( // NewTimeoutListener returns a listener that listens on the given address. // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { +func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err @@ -32,7 +33,7 @@ func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { + if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go index 123e2036f0f..c126b6f7fa0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -22,7 +22,7 @@ import ( type unixListener struct{ net.Listener } func NewUnixListener(addr string) (net.Listener, error) { - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(addr); err != nil { return nil, err } l, err := net.Listen("unix", addr) @@ -33,7 +33,7 @@ func NewUnixListener(addr string) (net.Listener, error) { } func (ul *unixListener) Close() error { - if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(ul.Addr().String()); err != nil { return err } return ul.Listener.Close() diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go index 34fa237e825..0f31eeb9790 100644 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go @@ -21,29 +21,22 @@ import ( "sync" ) -// Wait is an interface that provides the ability to wait and trigger events that -// are associated with IDs. type Wait interface { - // Register waits returns a chan that waits on the given ID. - // The chan will be triggered when Trigger is called with - // the same ID. Register(id uint64) <-chan interface{} - // Trigger triggers the waiting chans with the given ID. Trigger(id uint64, x interface{}) IsRegistered(id uint64) bool } -type list struct { +type List struct { l sync.Mutex m map[uint64]chan interface{} } -// New creates a Wait. -func New() Wait { - return &list{m: make(map[uint64]chan interface{})} +func New() *List { + return &List{m: make(map[uint64]chan interface{})} } -func (w *list) Register(id uint64) <-chan interface{} { +func (w *List) Register(id uint64) <-chan interface{} { w.l.Lock() defer w.l.Unlock() ch := w.m[id] @@ -56,7 +49,7 @@ func (w *list) Register(id uint64) <-chan interface{} { return ch } -func (w *list) Trigger(id uint64, x interface{}) { +func (w *List) Trigger(id uint64, x interface{}) { w.l.Lock() ch := w.m[id] delete(w.m, id) @@ -67,7 +60,7 @@ func (w *list) Trigger(id uint64, x interface{}) { } } -func (w *list) IsRegistered(id uint64) bool { +func (w *List) IsRegistered(id uint64) bool { w.l.Lock() defer w.l.Unlock() _, ok := w.m[id] diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD index 881f0a54f10..e0aa0fddb02 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD @@ -6,18 +6,15 @@ go_library( "auth.go", "cluster.go", "doc.go", - "election.go", "kv.go", - "leader.go", + "kv_client_adapter.go", "lease.go", - "lock.go", - "logger.go", "maintenance.go", "metrics.go", - "register.go", "watch.go", "watch_broadcast.go", "watch_broadcasts.go", + "watch_client_adapter.go", "watch_ranges.go", "watcher.go", ], @@ -25,23 +22,17 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3/naming:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/time/rate:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/naming:go_default_library", ], ) @@ -56,7 +47,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:all-srcs", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD deleted file mode 100644 index 136c7198aa8..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "chan_stream.go", - "cluster_client_adapter.go", - "doc.go", - "election_client_adapter.go", - "kv_client_adapter.go", - "lease_client_adapter.go", - "lock_client_adapter.go", - "maintenance_client_adapter.go", - "watch_client_adapter.go", - ], - importpath = "github.com/coreos/etcd/proxy/grpcproxy/adapter", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go deleted file mode 100644 index 4ddf78e15ec..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type cls2clc struct{ cls pb.ClusterServer } - -func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient { - return &cls2clc{cls} -} - -func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) { - return s.cls.MemberList(ctx, r) -} - -func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) { - return s.cls.MemberAdd(ctx, r) -} - -func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) { - return s.cls.MemberUpdate(ctx, r) -} - -func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) { - return s.cls.MemberRemove(ctx, r) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go deleted file mode 100644 index 7170be23304..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package adapter provides gRPC adapters between client and server -// gRPC interfaces without needing to go through a gRPC connection. -package adapter diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go deleted file mode 100644 index 383c1b9d8fb..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type es2ec struct{ es v3electionpb.ElectionServer } - -func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient { - return &es2ec{es} -} - -func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) { - return s.es.Campaign(ctx, r) -} - -func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) { - return s.es.Proclaim(ctx, r) -} - -func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) { - return s.es.Leader(ctx, r) -} - -func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) { - return s.es.Resign(ctx, r) -} - -func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) { - cs := newPipeStream(ctx, func(ss chanServerStream) error { - return s.es.Observe(in, &es2ecServerStream{ss}) - }) - return &es2ecClientStream{cs}, nil -} - -// es2ecClientStream implements Election_ObserveClient -type es2ecClientStream struct{ chanClientStream } - -// es2ecServerStream implements Election_ObserveServer -type es2ecServerStream struct{ chanServerStream } - -func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { - return s.SendMsg(rr) -} -func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*v3electionpb.LeaderResponse), nil -} - -func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { - return s.SendMsg(rr) -} -func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*v3electionpb.LeaderRequest), nil -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go deleted file mode 100644 index d471fd9144b..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - "golang.org/x/net/context" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" -) - -type ls2lc struct { - leaseServer pb.LeaseServer -} - -func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient { - return &ls2lc{ls} -} - -func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) { - return c.leaseServer.LeaseGrant(ctx, in) -} - -func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) { - return c.leaseServer.LeaseRevoke(ctx, in) -} - -func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) { - cs := newPipeStream(ctx, func(ss chanServerStream) error { - return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss}) - }) - return &ls2lcClientStream{cs}, nil -} - -func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) { - return c.leaseServer.LeaseTimeToLive(ctx, in) -} - -// ls2lcClientStream implements Lease_LeaseKeepAliveClient -type ls2lcClientStream struct{ chanClientStream } - -// ls2lcServerStream implements Lease_LeaseKeepAliveServer -type ls2lcServerStream struct{ chanServerStream } - -func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { - return s.SendMsg(rr) -} -func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.LeaseKeepAliveResponse), nil -} - -func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { - return s.SendMsg(rr) -} -func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.LeaseKeepAliveRequest), nil -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go deleted file mode 100644 index 05e5cb020a1..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" - - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ls2lsc struct{ ls v3lockpb.LockServer } - -func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient { - return &ls2lsc{ls} -} - -func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) { - return s.ls.Lock(ctx, r) -} - -func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) { - return s.ls.Unlock(ctx, r) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go deleted file mode 100644 index 9b21bf2576e..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type mts2mtc struct{ mts pb.MaintenanceServer } - -func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient { - return &mts2mtc{mts} -} - -func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) { - return s.mts.Alarm(ctx, r) -} - -func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) { - return s.mts.Status(ctx, r) -} - -func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) { - return s.mts.Defragment(ctx, dr) -} - -func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) { - return s.mts.Hash(ctx, r) -} - -func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) { - cs := newPipeStream(ctx, func(ss chanServerStream) error { - return s.mts.Snapshot(in, &ss2scServerStream{ss}) - }) - return &ss2scClientStream{cs}, nil -} - -// ss2scClientStream implements Maintenance_SnapshotClient -type ss2scClientStream struct{ chanClientStream } - -// ss2scServerStream implements Maintenance_SnapshotServer -type ss2scServerStream struct{ chanServerStream } - -func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { - return s.SendMsg(rr) -} -func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.SnapshotResponse), nil -} - -func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { - return s.SendMsg(rr) -} -func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.SnapshotRequest), nil -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go deleted file mode 100644 index af4a13c4152..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapter - -import ( - "errors" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -var errAlreadySentHeader = errors.New("adapter: already sent header") - -type ws2wc struct{ wserv pb.WatchServer } - -func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { - return &ws2wc{wserv} -} - -func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { - cs := newPipeStream(ctx, func(ss chanServerStream) error { - return s.wserv.Watch(&ws2wcServerStream{ss}) - }) - return &ws2wcClientStream{cs}, nil -} - -// ws2wcClientStream implements Watch_WatchClient -type ws2wcClientStream struct{ chanClientStream } - -// ws2wcServerStream implements Watch_WatchServer -type ws2wcServerStream struct{ chanServerStream } - -func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { - return s.SendMsg(wr) -} -func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchResponse), nil -} - -func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { - return s.SendMsg(wr) -} -func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchRequest), nil -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD index 5d0c3e9e39c..ba6e5289a51 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD @@ -9,7 +9,7 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", - "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/github.com/karlseguin/ccache:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go index e84a05229e0..155bbf90022 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package cache exports functionality for efficiently caching and mapping -// `RangeRequest`s to corresponding `RangeResponse`s. package cache import ( "errors" "sync" + "time" + + "github.com/karlseguin/ccache" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/adt" - "github.com/golang/groupcache/lru" ) var ( @@ -31,12 +31,14 @@ var ( ErrCompacted = rpctypes.ErrGRPCCompacted ) +const defaultHistoricTTL = time.Hour +const defaultCurrentTTL = time.Minute + type Cache interface { Add(req *pb.RangeRequest, resp *pb.RangeResponse) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) Compact(revision int64) Invalidate(key []byte, endkey []byte) - Size() int Close() } @@ -52,17 +54,17 @@ func keyFunc(req *pb.RangeRequest) string { func NewCache(maxCacheEntries int) Cache { return &cache{ - lru: lru.New(maxCacheEntries), + lru: ccache.New(ccache.Configure().MaxSize(int64(maxCacheEntries))), compactedRev: -1, } } -func (c *cache) Close() {} +func (c *cache) Close() { c.lru.Stop() } // cache implements Cache type cache struct { mu sync.RWMutex - lru *lru.Cache + lru *ccache.Cache // a reverse index for cache invalidation cachedRanges adt.IntervalTree @@ -78,7 +80,11 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { defer c.mu.Unlock() if req.Revision > c.compactedRev { - c.lru.Add(key, resp) + if req.Revision == 0 { + c.lru.Set(key, resp, defaultCurrentTTL) + } else { + c.lru.Set(key, resp, defaultHistoricTTL) + } } // we do not need to invalidate a request with a revision specified. // so we do not need to add it into the reverse index. @@ -110,16 +116,16 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { key := keyFunc(req) - c.mu.Lock() - defer c.mu.Unlock() + c.mu.RLock() + defer c.mu.RUnlock() - if req.Revision > 0 && req.Revision < c.compactedRev { - c.lru.Remove(key) + if req.Revision < c.compactedRev { + c.lru.Delete(key) return nil, ErrCompacted } - if resp, ok := c.lru.Get(key); ok { - return resp.(*pb.RangeResponse), nil + if item := c.lru.Get(key); item != nil { + return item.Value().(*pb.RangeResponse), nil } return nil, errors.New("not exist") } @@ -143,7 +149,7 @@ func (c *cache) Invalidate(key, endkey []byte) { for _, iv := range ivs { keys := iv.Val.([]string) for _, key := range keys { - c.lru.Remove(key) + c.lru.Delete(key) } } // delete after removing all keys since it is destructive to 'ivs' @@ -160,9 +166,3 @@ func (c *cache) Compact(revision int64) { c.compactedRev = revision } } - -func (c *cache) Size() int { - c.mu.RLock() - defer c.mu.RUnlock() - return c.lru.Len() -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go index 899fb9be65f..8a2fa16c124 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go @@ -15,163 +15,38 @@ package grpcproxy import ( - "fmt" - "os" - "sync" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/naming" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" - "golang.org/x/time/rate" - "google.golang.org/grpc" - gnaming "google.golang.org/grpc/naming" ) -// allow maximum 1 retry per second -const resolveRetryRate = 1 - type clusterProxy struct { - clus clientv3.Cluster - ctx context.Context - gr *naming.GRPCResolver - - // advertise client URL - advaddr string - prefix string - - umu sync.RWMutex - umap map[string]gnaming.Update + client *clientv3.Client } -// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints. -// The returned channel is closed when there is grpc-proxy endpoint registered -// and the client's context is canceled so the 'register' loop returns. -func NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) { - cp := &clusterProxy{ - clus: c.Cluster, - ctx: c.Ctx(), - gr: &naming.GRPCResolver{Client: c}, - - advaddr: advaddr, - prefix: prefix, - umap: make(map[string]gnaming.Update), - } - - donec := make(chan struct{}) - if advaddr != "" && prefix != "" { - go func() { - defer close(donec) - cp.resolve(prefix) - }() - return cp, donec - } - - close(donec) - return cp, donec -} - -func (cp *clusterProxy) resolve(prefix string) { - rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate) - for rm.Wait(cp.ctx) == nil { - wa, err := cp.gr.Resolve(prefix) - if err != nil { - plog.Warningf("failed to resolve %q (%v)", prefix, err) - continue - } - cp.monitor(wa) - } -} - -func (cp *clusterProxy) monitor(wa gnaming.Watcher) { - for cp.ctx.Err() == nil { - ups, err := wa.Next() - if err != nil { - plog.Warningf("clusterProxy watcher error (%v)", err) - if grpc.ErrorDesc(err) == naming.ErrWatcherClosed.Error() { - return - } - } - - cp.umu.Lock() - for i := range ups { - switch ups[i].Op { - case gnaming.Add: - cp.umap[ups[i].Addr] = *ups[i] - case gnaming.Delete: - delete(cp.umap, ups[i].Addr) - } - } - cp.umu.Unlock() +func NewClusterProxy(c *clientv3.Client) pb.ClusterServer { + return &clusterProxy{ + client: c, } } func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - mresp, err := cp.clus.MemberAdd(ctx, r.PeerURLs) - if err != nil { - return nil, err - } - resp := (pb.MemberAddResponse)(*mresp) - return &resp, err + conn := cp.client.ActiveConnection() + return pb.NewClusterClient(conn).MemberAdd(ctx, r) } func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - mresp, err := cp.clus.MemberRemove(ctx, r.ID) - if err != nil { - return nil, err - } - resp := (pb.MemberRemoveResponse)(*mresp) - return &resp, err + conn := cp.client.ActiveConnection() + return pb.NewClusterClient(conn).MemberRemove(ctx, r) } func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs) - if err != nil { - return nil, err - } - resp := (pb.MemberUpdateResponse)(*mresp) - return &resp, err + conn := cp.client.ActiveConnection() + return pb.NewClusterClient(conn).MemberUpdate(ctx, r) } -func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { - cp.umu.RLock() - defer cp.umu.RUnlock() - mbs := make([]*pb.Member, 0, len(cp.umap)) - for addr, upt := range cp.umap { - m, err := decodeMeta(fmt.Sprint(upt.Metadata)) - if err != nil { - return nil, err - } - mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}}) - } - return mbs, nil -} - -// MemberList wraps member list API with following rules: -// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver -// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr' -// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register' -// - If 'advaddr' is empty, forward to member list API func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - if cp.advaddr != "" { - if cp.prefix != "" { - mbs, err := cp.membersFromUpdates() - if err != nil { - return nil, err - } - if len(mbs) > 0 { - return &pb.MemberListResponse{Members: mbs}, nil - } - } - // prefix is empty or no grpc-proxy members haven't been registered - hostname, _ := os.Hostname() - return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil - } - mresp, err := cp.clus.MemberList(ctx) - if err != nil { - return nil, err - } - resp := (pb.MemberListResponse)(*mresp) - return &resp, err + conn := cp.client.ActiveConnection() + return pb.NewClusterClient(conn).MemberList(ctx, r) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go deleted file mode 100644 index 27115a81d7d..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2017 The etcd Lockors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" -) - -type electionProxy struct { - client *clientv3.Client -} - -func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { - return &electionProxy{client: client} -} - -func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { - return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req) -} - -func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { - return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req) -} - -func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { - return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req) -} - -func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { - conn := ep.client.ActiveConnection() - ctx, cancel := context.WithCancel(s.Context()) - defer cancel() - sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req) - if err != nil { - return err - } - for { - rr, err := sc.Recv() - if err != nil { - return err - } - if err = s.Send(rr); err != nil { - return err - } - } -} - -func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { - return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go index 0654729a0ae..36885135797 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go @@ -33,7 +33,11 @@ func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { cache: cache.NewCache(cache.DefaultMaxEntries), } donec := make(chan struct{}) - close(donec) + go func() { + defer close(donec) + <-c.Ctx().Done() + kv.cache.Close() + }() return kv, donec } @@ -61,14 +65,12 @@ func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRespo req.Serializable = true gresp := (*pb.RangeResponse)(resp.Get()) p.cache.Add(&req, gresp) - cacheKeys.Set(float64(p.cache.Size())) return gresp, nil } func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { p.cache.Invalidate(r.Key, nil) - cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, PutRequestToOp(r)) return (*pb.PutResponse)(resp.Put()), err @@ -76,7 +78,6 @@ func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, e func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { p.cache.Invalidate(r.Key, r.RangeEnd) - cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, DelRequestToOp(r)) return (*pb.DeleteRangeResponse)(resp.Del()), err @@ -132,8 +133,6 @@ func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, e p.txnToCache(r.Failure, resp.Responses) } - cacheKeys.Set(float64(p.cache.Size())) - return (*pb.TxnResponse)(resp), nil } @@ -148,8 +147,6 @@ func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Com p.cache.Compact(r.Revision) } - cacheKeys.Set(float64(p.cache.Size())) - return (*pb.CompactionResponse)(resp), err } @@ -186,12 +183,7 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) - if r.CountOnly { - opts = append(opts, clientv3.WithCountOnly()) - } - if r.KeysOnly { - opts = append(opts, clientv3.WithKeysOnly()) - } + if r.Serializable { opts = append(opts, clientv3.WithSerializable()) } @@ -202,15 +194,7 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { func PutRequestToOp(r *pb.PutRequest) clientv3.Op { opts := []clientv3.OpOption{} opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) - if r.IgnoreValue { - opts = append(opts, clientv3.WithIgnoreValue()) - } - if r.IgnoreLease { - opts = append(opts, clientv3.WithIgnoreLease()) - } - if r.PrevKv { - opts = append(opts, clientv3.WithPrevKV()) - } + return clientv3.OpPut(string(r.Key), string(r.Value), opts...) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go index fec401d9dd0..7880b18109d 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adapter +package grpcproxy import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go deleted file mode 100644 index 86afdb7072b..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "math" - "sync" - - "golang.org/x/net/context" - "golang.org/x/time/rate" - "google.golang.org/grpc" - - "github.com/coreos/etcd/clientv3" -) - -const ( - lostLeaderKey = "__lostleader" // watched to detect leader loss - retryPerSecond = 10 -) - -type leader struct { - ctx context.Context - w clientv3.Watcher - // mu protects leaderc updates. - mu sync.RWMutex - leaderc chan struct{} - disconnc chan struct{} - donec chan struct{} -} - -func newLeader(ctx context.Context, w clientv3.Watcher) *leader { - l := &leader{ - ctx: clientv3.WithRequireLeader(ctx), - w: w, - leaderc: make(chan struct{}), - disconnc: make(chan struct{}), - donec: make(chan struct{}), - } - // begin assuming leader is lost - close(l.leaderc) - go l.recvLoop() - return l -} - -func (l *leader) recvLoop() { - defer close(l.donec) - - limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond) - rev := int64(math.MaxInt64 - 2) - for limiter.Wait(l.ctx) == nil { - wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify()) - cresp, ok := <-wch - if !ok { - l.loseLeader() - continue - } - if cresp.Err() != nil { - l.loseLeader() - if grpc.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() { - close(l.disconnc) - return - } - continue - } - l.gotLeader() - <-wch - l.loseLeader() - } -} - -func (l *leader) loseLeader() { - l.mu.RLock() - defer l.mu.RUnlock() - select { - case <-l.leaderc: - default: - close(l.leaderc) - } -} - -// gotLeader will force update the leadership status to having a leader. -func (l *leader) gotLeader() { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.leaderc: - l.leaderc = make(chan struct{}) - default: - } -} - -func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc } - -func (l *leader) stopNotify() <-chan struct{} { return l.donec } - -// lostNotify returns a channel that is closed if there has been -// a leader loss not yet followed by a leader reacquire. -func (l *leader) lostNotify() <-chan struct{} { - l.mu.RLock() - defer l.mu.RUnlock() - return l.leaderc -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go index 19c2249a7e2..4f870220b79 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go @@ -15,353 +15,73 @@ package grpcproxy import ( - "io" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) type leaseProxy struct { - // leaseClient handles req from LeaseGrant() that requires a lease ID. - leaseClient pb.LeaseClient - - lessor clientv3.Lease - - ctx context.Context - - leader *leader - - // mu protects adding outstanding leaseProxyStream through wg. - mu sync.RWMutex - - // wg waits until all outstanding leaseProxyStream quit. - wg sync.WaitGroup + client *clientv3.Client } -func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { - cctx, cancel := context.WithCancel(c.Ctx()) - lp := &leaseProxy{ - leaseClient: pb.NewLeaseClient(c.ActiveConnection()), - lessor: c.Lease, - ctx: cctx, - leader: newLeader(c.Ctx(), c.Watcher), +func NewLeaseProxy(c *clientv3.Client) pb.LeaseServer { + return &leaseProxy{ + client: c, } - ch := make(chan struct{}) - go func() { - defer close(ch) - <-lp.leader.stopNotify() - lp.mu.Lock() - select { - case <-lp.ctx.Done(): - case <-lp.leader.disconnectNotify(): - cancel() - } - <-lp.ctx.Done() - lp.mu.Unlock() - lp.wg.Wait() - }() - return lp, ch } func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - rp, err := lp.leaseClient.LeaseGrant(ctx, cr) - if err != nil { - return nil, err - } - lp.leader.gotLeader() - return rp, nil + conn := lp.client.ActiveConnection() + return pb.NewLeaseClient(conn).LeaseGrant(ctx, cr) } func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID)) - if err != nil { - return nil, err - } - lp.leader.gotLeader() - return (*pb.LeaseRevokeResponse)(r), nil + conn := lp.client.ActiveConnection() + return pb.NewLeaseClient(conn).LeaseRevoke(ctx, rr) } func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - var ( - r *clientv3.LeaseTimeToLiveResponse - err error - ) - if rr.Keys { - r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys()) - } else { - r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID)) - } - if err != nil { - return nil, err - } - rp := &pb.LeaseTimeToLiveResponse{ - Header: r.ResponseHeader, - ID: int64(r.ID), - TTL: r.TTL, - GrantedTTL: r.GrantedTTL, - Keys: r.Keys, - } - return rp, err + conn := lp.client.ActiveConnection() + return pb.NewLeaseClient(conn).LeaseTimeToLive(ctx, rr) } func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - lp.mu.Lock() - select { - case <-lp.ctx.Done(): - lp.mu.Unlock() - return lp.ctx.Err() - default: - lp.wg.Add(1) - } - lp.mu.Unlock() - + conn := lp.client.ActiveConnection() ctx, cancel := context.WithCancel(stream.Context()) - lps := leaseProxyStream{ - stream: stream, - lessor: lp.lessor, - keepAliveLeases: make(map[int64]*atomicCounter), - respc: make(chan *pb.LeaseKeepAliveResponse), - ctx: ctx, - cancel: cancel, - } - - errc := make(chan error, 2) - - var lostLeaderC <-chan struct{} - if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { - v := md[rpctypes.MetadataRequireLeaderKey] - if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - lostLeaderC = lp.leader.lostNotify() - // if leader is known to be lost at creation time, avoid - // letting events through at all - select { - case <-lostLeaderC: - lp.wg.Done() - return rpctypes.ErrNoLeader - default: - } - } - } - stopc := make(chan struct{}, 3) - go func() { - defer func() { stopc <- struct{}{} }() - if err := lps.recvLoop(); err != nil { - errc <- err - } - }() - - go func() { - defer func() { stopc <- struct{}{} }() - if err := lps.sendLoop(); err != nil { - errc <- err - } - }() - - // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated. - go func() { - defer func() { stopc <- struct{}{} }() - select { - case <-lostLeaderC: - case <-ctx.Done(): - case <-lp.ctx.Done(): - } - }() - - var err error - select { - case <-stopc: - stopc <- struct{}{} - case err = <-errc: - } - cancel() - - // recv/send may only shutdown after function exits; - // this goroutine notifies lease proxy that the stream is through - go func() { - <-stopc - <-stopc - <-stopc - lps.close() - close(errc) - lp.wg.Done() - }() - - select { - case <-lostLeaderC: - return rpctypes.ErrNoLeader - case <-lp.leader.disconnectNotify(): - return grpc.ErrClientConnClosing - default: - if err != nil { - return err - } - return ctx.Err() - } -} - -type leaseProxyStream struct { - stream pb.Lease_LeaseKeepAliveServer - - lessor clientv3.Lease - // wg tracks keepAliveLoop goroutines - wg sync.WaitGroup - // mu protects keepAliveLeases - mu sync.RWMutex - // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease. - keepAliveLeases map[int64]*atomicCounter - // respc receives lease keepalive responses from etcd backend - respc chan *pb.LeaseKeepAliveResponse - - ctx context.Context - cancel context.CancelFunc -} - -func (lps *leaseProxyStream) recvLoop() error { - for { - rr, err := lps.stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - lps.mu.Lock() - neededResps, ok := lps.keepAliveLeases[rr.ID] - if !ok { - neededResps = &atomicCounter{} - lps.keepAliveLeases[rr.ID] = neededResps - lps.wg.Add(1) - go func() { - defer lps.wg.Done() - if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil { - lps.cancel() - } - }() - } - neededResps.add(1) - lps.mu.Unlock() - } -} - -func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error { - cctx, ccancel := context.WithCancel(lps.ctx) - defer ccancel() - respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID)) + lc, err := pb.NewLeaseClient(conn).LeaseKeepAlive(ctx) if err != nil { + cancel() return err } - // ticker expires when loop hasn't received keepalive within TTL - var ticker <-chan time.Time + + go func() { + // Cancel the context attached to lc to unblock lc.Recv when + // this routine returns on error. + defer cancel() + + for { + // stream.Recv will be unblock when the loop in the parent routine + // returns on error. + rr, err := stream.Recv() + if err != nil { + return + } + err = lc.Send(rr) + if err != nil { + return + } + } + }() + for { - select { - case <-ticker: - lps.mu.Lock() - // if there are outstanding keepAlive reqs at the moment of ticker firing, - // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs. - if neededResps.get() > 0 { - lps.mu.Unlock() - ticker = nil - continue - } - delete(lps.keepAliveLeases, leaseID) - lps.mu.Unlock() - return nil - case rp, ok := <-respc: - if !ok { - lps.mu.Lock() - delete(lps.keepAliveLeases, leaseID) - lps.mu.Unlock() - if neededResps.get() == 0 { - return nil - } - ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID)) - if err != nil { - return err - } - r := &pb.LeaseKeepAliveResponse{ - Header: ttlResp.ResponseHeader, - ID: int64(ttlResp.ID), - TTL: ttlResp.TTL, - } - for neededResps.get() > 0 { - select { - case lps.respc <- r: - neededResps.add(-1) - case <-lps.ctx.Done(): - return nil - } - } - return nil - } - if neededResps.get() == 0 { - continue - } - ticker = time.After(time.Duration(rp.TTL) * time.Second) - r := &pb.LeaseKeepAliveResponse{ - Header: rp.ResponseHeader, - ID: int64(rp.ID), - TTL: rp.TTL, - } - lps.replyToClient(r, neededResps) + rr, err := lc.Recv() + if err != nil { + return err + } + err = stream.Send(rr) + if err != nil { + return err } } } - -func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) { - timer := time.After(500 * time.Millisecond) - for neededResps.get() > 0 { - select { - case lps.respc <- r: - neededResps.add(-1) - case <-timer: - return - case <-lps.ctx.Done(): - return - } - } -} - -func (lps *leaseProxyStream) sendLoop() error { - for { - select { - case lrp, ok := <-lps.respc: - if !ok { - return nil - } - if err := lps.stream.Send(lrp); err != nil { - return err - } - case <-lps.ctx.Done(): - return lps.ctx.Err() - } - } -} - -func (lps *leaseProxyStream) close() { - lps.cancel() - lps.wg.Wait() - // only close respc channel if all the keepAliveLoop() goroutines have finished - // this ensures those goroutines don't send resp to a closed resp channel - close(lps.respc) -} - -type atomicCounter struct { - counter int64 -} - -func (ac *atomicCounter) add(delta int64) { - atomic.AddInt64(&ac.counter, delta) -} - -func (ac *atomicCounter) get() int64 { - return atomic.LoadInt64(&ac.counter) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go deleted file mode 100644 index 804aff64a96..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 The etcd Lockors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" -) - -type lockProxy struct { - client *clientv3.Client -} - -func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { - return &lockProxy{client: client} -} - -func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { - return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req) -} - -func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { - return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go deleted file mode 100644 index c2d81804395..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import "github.com/coreos/pkg/capnslog" - -var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "grpcproxy") diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go index 384d1520360..209dc94a712 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go @@ -15,8 +15,6 @@ package grpcproxy import ( - "io" - "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" @@ -51,9 +49,6 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan for { rr, err := sc.Recv() if err != nil { - if err == io.EOF { - return nil - } return err } err = stream.Send(rr) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go index 864fa1609a0..f4a1d4c8de4 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go @@ -29,12 +29,6 @@ var ( Name: "events_coalescing_total", Help: "Total number of events coalescing", }) - cacheKeys = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_keys_total", - Help: "Total number of keys/ranges cached", - }) cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "grpc_proxy", @@ -52,7 +46,6 @@ var ( func init() { prometheus.MustRegister(watchersCoalescing) prometheus.MustRegister(eventsCoalescing) - prometheus.MustRegister(cacheKeys) prometheus.MustRegister(cacheHits) prometheus.MustRegister(cachedMisses) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go deleted file mode 100644 index 598c71f07ae..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "encoding/json" - "os" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - "github.com/coreos/etcd/clientv3/naming" - - "golang.org/x/time/rate" - gnaming "google.golang.org/grpc/naming" -) - -// allow maximum 1 retry per second -const registerRetryRate = 1 - -// Register registers itself as a grpc-proxy server by writing prefixed-key -// with session of specified TTL (in seconds). The returned channel is closed -// when the client's context is canceled. -func Register(c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} { - rm := rate.NewLimiter(rate.Limit(registerRetryRate), registerRetryRate) - - donec := make(chan struct{}) - go func() { - defer close(donec) - - for rm.Wait(c.Ctx()) == nil { - ss, err := registerSession(c, prefix, addr, ttl) - if err != nil { - plog.Warningf("failed to create a session %v", err) - continue - } - select { - case <-c.Ctx().Done(): - ss.Close() - return - - case <-ss.Done(): - plog.Warning("session expired; possible network partition or server restart") - plog.Warning("creating a new session to rejoin") - continue - } - } - }() - - return donec -} - -func registerSession(c *clientv3.Client, prefix string, addr string, ttl int) (*concurrency.Session, error) { - ss, err := concurrency.NewSession(c, concurrency.WithTTL(ttl)) - if err != nil { - return nil, err - } - - gr := &naming.GRPCResolver{Client: c} - if err = gr.Update(c.Ctx(), prefix, gnaming.Update{Op: gnaming.Add, Addr: addr, Metadata: getMeta()}, clientv3.WithLease(ss.Lease())); err != nil { - return nil, err - } - - plog.Infof("registered %q with %d-second lease", addr, ttl) - return ss, nil -} - -// meta represents metadata of proxy register. -type meta struct { - Name string `json:"name"` -} - -func getMeta() string { - hostname, _ := os.Hostname() - bts, _ := json.Marshal(meta{Name: hostname}) - return string(bts) -} - -func decodeMeta(s string) (meta, error) { - m := meta{} - err := json.Unmarshal([]byte(s), &m) - return m, err -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go index b960c94769a..42d196ca2ca 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go @@ -18,7 +18,7 @@ import ( "sync" "golang.org/x/net/context" - "google.golang.org/grpc" + "golang.org/x/time/rate" "google.golang.org/grpc/metadata" "github.com/coreos/etcd/clientv3" @@ -31,35 +31,49 @@ type watchProxy struct { cw clientv3.Watcher ctx context.Context - leader *leader - ranges *watchRanges - // mu protects adding outstanding watch servers through wg. - mu sync.Mutex + // retryLimiter controls the create watch retry rate on lost leaders. + retryLimiter *rate.Limiter + + // mu protects leaderc updates. + mu sync.RWMutex + leaderc chan struct{} // wg waits until all outstanding watch servers quit. wg sync.WaitGroup } +const ( + lostLeaderKey = "__lostleader" // watched to detect leader loss + retryPerSecond = 10 +) + func NewWatchProxy(c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { - cctx, cancel := context.WithCancel(c.Ctx()) wp := &watchProxy{ - cw: c.Watcher, - ctx: cctx, - leader: newLeader(c.Ctx(), c.Watcher), + cw: c.Watcher, + ctx: clientv3.WithRequireLeader(c.Ctx()), + retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond), + leaderc: make(chan struct{}), } wp.ranges = newWatchRanges(wp) ch := make(chan struct{}) go func() { defer close(ch) - <-wp.leader.stopNotify() - wp.mu.Lock() - select { - case <-wp.ctx.Done(): - case <-wp.leader.disconnectNotify(): - cancel() + // a new streams without opening any watchers won't catch + // a lost leader event, so have a special watch to monitor it + rev := int64((uint64(1) << 63) - 2) + for wp.ctx.Err() == nil { + wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev)) + for range wch { + } + wp.mu.Lock() + close(wp.leaderc) + wp.leaderc = make(chan struct{}) + wp.mu.Unlock() + wp.retryLimiter.Wait(wp.ctx) } + wp.mu.Lock() <-wp.ctx.Done() wp.mu.Unlock() wp.wg.Wait() @@ -73,12 +87,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { select { case <-wp.ctx.Done(): wp.mu.Unlock() - select { - case <-wp.leader.disconnectNotify(): - return grpc.ErrClientConnClosing - default: - return wp.ctx.Err() - } + return default: wp.wg.Add(1) } @@ -94,19 +103,11 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { cancel: cancel, } - var lostLeaderC <-chan struct{} - if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { + var leaderc <-chan struct{} + if md, ok := metadata.FromContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - lostLeaderC = wp.leader.lostNotify() - // if leader is known to be lost at creation time, avoid - // letting events through at all - select { - case <-lostLeaderC: - wp.wg.Done() - return rpctypes.ErrNoLeader - default: - } + leaderc = wp.lostLeaderNotify() } } @@ -125,7 +126,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { go func() { defer func() { stopc <- struct{}{} }() select { - case <-lostLeaderC: + case <-leaderc: case <-ctx.Done(): case <-wp.ctx.Done(): } @@ -144,15 +145,19 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { }() select { - case <-lostLeaderC: + case <-leaderc: return rpctypes.ErrNoLeader - case <-wp.leader.disconnectNotify(): - return grpc.ErrClientConnClosing default: return wps.ctx.Err() } } +func (wp *watchProxy) lostLeaderNotify() <-chan struct{} { + wp.mu.RLock() + defer wp.mu.RUnlock() + return wp.leaderc +} + // watchProxyStream forwards etcd watch events to a proxied client stream. type watchProxyStream struct { ranges *watchRanges diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go index 5e750bdb0d4..5529fb5a2bc 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go @@ -50,20 +50,27 @@ func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) wb.add(w) go func() { defer close(wb.donec) + // loop because leader loss will close channel + for cctx.Err() == nil { + opts := []clientv3.OpOption{ + clientv3.WithRange(w.wr.end), + clientv3.WithProgressNotify(), + clientv3.WithRev(wb.nextrev), + clientv3.WithPrevKV(), + } + // The create notification should be the first response; + // if the watch is recreated following leader loss, it + // shouldn't post a second create response to the client. + if wb.responses == 0 { + opts = append(opts, clientv3.WithCreatedNotify()) + } + wch := wp.cw.Watch(cctx, w.wr.key, opts...) - opts := []clientv3.OpOption{ - clientv3.WithRange(w.wr.end), - clientv3.WithProgressNotify(), - clientv3.WithRev(wb.nextrev), - clientv3.WithPrevKV(), - clientv3.WithCreatedNotify(), - } - - wch := wp.cw.Watch(cctx, w.wr.key, opts...) - - for wr := range wch { - wb.bcast(wr) - update(wb) + for wr := range wch { + wb.bcast(wr) + update(wb) + } + wp.retryLimiter.Wait(cctx) } }() return wb diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go similarity index 65% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go index 3aa01f2052b..283c2ed07fa 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go @@ -1,4 +1,4 @@ -// Copyright 2017 The etcd Authors +// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,79 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adapter +package grpcproxy import ( - "golang.org/x/net/context" + "errors" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) +var errAlreadySentHeader = errors.New("grpcproxy: already send header") + +type ws2wc struct{ wserv pb.WatchServer } + +func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { + return &ws2wc{wserv} +} + +func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { + // ch1 is buffered so server can send error on close + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) + headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) + + cctx, ccancel := context.WithCancel(ctx) + cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} + wclient := &ws2wcClientStream{chanClientStream{headerc, trailerc, cli}} + + sctx, scancel := context.WithCancel(ctx) + srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} + wserver := &ws2wcServerStream{chanServerStream{headerc, trailerc, srv, nil}} + go func() { + if err := s.wserv.Watch(wserver); err != nil { + select { + case srv.sendc <- err: + case <-sctx.Done(): + case <-cctx.Done(): + } + } + scancel() + ccancel() + }() + return wclient, nil +} + +// ws2wcClientStream implements Watch_WatchClient +type ws2wcClientStream struct{ chanClientStream } + +// ws2wcServerStream implements Watch_WatchServer +type ws2wcServerStream struct{ chanServerStream } + +func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { + return s.SendMsg(wr) +} +func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchResponse), nil +} + +func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { + return s.SendMsg(wr) +} +func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchRequest), nil +} + // chanServerStream implements grpc.ServerStream with a chanStream type chanServerStream struct { headerc chan<- metadata.MD @@ -87,8 +151,8 @@ func (cs *chanClientStream) Trailer() metadata.MD { } } -func (cs *chanClientStream) CloseSend() error { - close(cs.chanStream.sendc) +func (s *chanClientStream) CloseSend() error { + close(s.chanStream.sendc) return nil } @@ -116,50 +180,17 @@ func (s *chanStream) SendMsg(m interface{}) error { func (s *chanStream) RecvMsg(m interface{}) error { v := m.(*interface{}) - for { - select { - case msg, ok := <-s.recvc: - if !ok { - return grpc.ErrClientConnClosing - } - if err, ok := msg.(error); ok { - return err - } - *v = msg - return nil - case <-s.ctx.Done(): + select { + case msg, ok := <-s.recvc: + if !ok { + return grpc.ErrClientConnClosing } - if len(s.recvc) == 0 { - // prioritize any pending recv messages over canceled context - break + if err, ok := msg.(error); ok { + return err } + *v = msg + return nil + case <-s.ctx.Done(): } return s.ctx.Err() } - -func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { - // ch1 is buffered so server can send error on close - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) - headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) - - cctx, ccancel := context.WithCancel(ctx) - cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} - cs := chanClientStream{headerc, trailerc, cli} - - sctx, scancel := context.WithCancel(ctx) - srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} - ss := chanServerStream{headerc, trailerc, srv, nil} - - go func() { - if err := ssHandler(ss); err != nil { - select { - case srv.sendc <- err: - case <-sctx.Done(): - case <-cctx.Done(): - } - } - scancel() - ccancel() - }() - return cs -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go index 7387caf4dbd..e860a69ce81 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go @@ -102,17 +102,16 @@ func (w *watcher) send(wr clientv3.WatchResponse) { } // all events are filtered out? - if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { + if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ - Header: &wr.Header, - Created: wr.Created, - CompactRevision: wr.CompactRevision, - WatchId: w.id, - Events: events, + Header: &wr.Header, + Created: wr.Created, + WatchId: w.id, + Events: events, }) } diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md index f485b839771..a724b958579 100644 --- a/vendor/github.com/coreos/etcd/raft/README.md +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -13,7 +13,9 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/coreos/etcd/tree/master/contrib/raftexample # Features @@ -49,11 +51,11 @@ This raft implementation also includes a few optional enhancements: - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. -- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks ## Usage -The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. To start a three-node cluster ```go @@ -71,7 +73,7 @@ To start a three-node cluster n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) ``` -Start a single node cluster, like so: +You can start a single node cluster, like so: ```go // Create storage and config as shown above. // Set peer list to itself, so this node can become the leader of this single-node cluster. @@ -79,7 +81,7 @@ Start a single node cluster, like so: n := raft.StartNode(c, peers) ``` -To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: +To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so: ```go // Create storage and config as shown above. n := raft.StartNode(c, nil) @@ -108,21 +110,46 @@ To restart a node from previous state: n := raft.RestartNode(c) ``` -After creating a Node, the user has a few responsibilities: +Now that you are holding onto a Node you have a few responsibilities: -First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. -1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. -2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialise the messages directly inside +your main raft loop. -3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). -4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. -Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. -Third, after receiving a message from another node, pass it to Node.Step: +Third, when you receive a message from another node, pass it to Node.Step: ```go func recvRaftRPC(ctx context.Context, m raftpb.Message) { @@ -130,7 +157,10 @@ Third, after receiving a message from another node, pass it to Node.Step: } ``` -Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". +Finally, you need to call `Node.Tick()` at regular intervals (probably +via a `time.Ticker`). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". The total state machine handling loop will look something like this: @@ -160,13 +190,16 @@ The total state machine handling loop will look something like this: } ``` -To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: ```go n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -174,7 +207,8 @@ To add or remove node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) ``` -After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: ```go var cc raftpb.ConfChange @@ -189,8 +223,25 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. -To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. -This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go index 263af9ce405..8ae301c3d8d 100644 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -85,26 +85,6 @@ func (u *unstable) stableTo(i, t uint64) { if gt == t && i >= u.offset { u.entries = u.entries[i+1-u.offset:] u.offset = i + 1 - u.shrinkEntriesArray() - } -} - -// shrinkEntriesArray discards the underlying array used by the entries slice -// if most of it isn't being used. This avoids holding references to a bunch of -// potentially large entries that aren't needed anymore. Simply clearing the -// entries wouldn't be safe because clients might still be using them. -func (u *unstable) shrinkEntriesArray() { - // We replace the array if we're using less than half of the space in - // it. This number is fairly arbitrary, chosen as an attempt to balance - // memory usage vs number of allocations. It could probably be improved - // with some focused tuning. - const lenMultiple = 2 - if len(u.entries) == 0 { - u.entries = nil - } else if len(u.entries)*lenMultiple < cap(u.entries) { - newEntries := make([]pb.Entry, len(u.entries)) - copy(newEntries, u.entries) - u.entries = newEntries } } diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go index 5da1c1193b2..c8410fdc77f 100644 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -83,10 +83,6 @@ type Ready struct { // If it contains a MsgSnap message, the application MUST report back to raft // when the snapshot has been received or has failed by calling ReportSnapshot. Messages []pb.Message - - // MustSync indicates whether the HardState and Entries must be synchronously - // written to disk or if an asynchronous write is permissible. - MustSync bool } func isHardStateEqual(a, b pb.HardState) bool { @@ -521,17 +517,5 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } - rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) return rd } - -// MustSync returns true if the hard state and count of Raft entries indicate -// that a synchronous write to persistent storage is required. -func MustSync(st, prevst pb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term -} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go index 29f20398203..7be4407ee2b 100644 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -1159,10 +1159,6 @@ func (r *raft) addNode(id uint64) { } r.setProgress(id, 0, r.raftLog.lastIndex()+1) - // When a node is first added, we should mark it as recently active. - // Otherwise, CheckQuorum may cause us to step down if it is invoked - // before the added node has a chance to communicate with us. - r.prs[id].RecentActive = true } func (r *raft) removeNode(id uint64) { diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 4c6e79d58a0..86ad3120708 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,67 +1558,25 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - } - } else { + if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1889,7 +1847,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } var fileDescriptorRaft = []byte{ // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go index d9f07c3479d..ccd9eb78698 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go @@ -16,13 +16,13 @@ package rafthttp import ( "bytes" - "context" "errors" "io/ioutil" "sync" "time" "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft" @@ -118,8 +118,7 @@ func (p *pipeline) post(data []byte) (err error) { req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) done := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) - req = req.WithContext(ctx) + cancel := httputil.RequestCanceler(req) go func() { select { case <-done: diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go index 52273c9d195..105b330728e 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go @@ -16,7 +16,6 @@ package rafthttp import ( "bytes" - "context" "io" "io/ioutil" "net/http" @@ -105,9 +104,7 @@ func (s *snapshotSender) send(merged snap.Message) { // post posts the given request. // It returns nil when request is sent out and processed successfully. func (s *snapshotSender) post(req *http.Request) (err error) { - ctx, cancel := context.WithCancel(context.Background()) - req = req.WithContext(ctx) - defer cancel() + cancel := httputil.RequestCanceler(req) type responseAndError struct { resp *http.Response @@ -133,6 +130,7 @@ func (s *snapshotSender) post(req *http.Request) (err error) { select { case <-s.stopc: + cancel() return errStopped case r := <-result: if r.err != nil { diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go index 2a6c620f56d..e69a44ff65a 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go @@ -15,10 +15,10 @@ package rafthttp import ( - "context" "fmt" "io" "io/ioutil" + "net" "net/http" "path" "strings" @@ -27,7 +27,6 @@ import ( "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/httputil" - "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" @@ -52,7 +51,6 @@ var ( "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, - "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -142,8 +140,7 @@ func (cw *streamWriter) run() { flusher http.Flusher batched int ) - tickc := time.NewTicker(ConnReadTimeout / 3) - defer tickc.Stop() + tickc := time.Tick(ConnReadTimeout / 3) unflushed := 0 plog.Infof("started streaming with peer %s (writer)", cw.peerID) @@ -215,7 +212,7 @@ func (cw *streamWriter) run() { plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) } plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = tickc.C, cw.msgc + heartbeatc, msgc = tickc, cw.msgc case <-cw.stopc: if cw.close() { plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) @@ -317,7 +314,7 @@ func (cr *streamReader) run() { // all data is read out case err == io.EOF: // connection is closed by the remote - case transport.IsClosedConnError(err): + case isClosedConnectionError(err): default: cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) } @@ -429,17 +426,14 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { setPeerURLsHeader(req, cr.tr.URLs) - ctx, cancel := context.WithCancel(context.Background()) - req = req.WithContext(ctx) - cr.mu.Lock() - cr.cancel = cancel select { case <-cr.stopc: cr.mu.Unlock() return nil, fmt.Errorf("stream reader is stopped") default: } + cr.cancel = httputil.RequestCanceler(req) cr.mu.Unlock() resp, err := cr.tr.streamRt.RoundTrip(req) @@ -514,6 +508,11 @@ func (cr *streamReader) resume() { cr.paused = false } +func isClosedConnectionError(err error) bool { + operr, ok := err.(*net.OpError) + return ok && operr.Err.Error() == "use of closed network connection" +} + // checkStreamSupport checks whether the stream type is supported in the // given version. func checkStreamSupport(v *semver.Version, t streamType) bool { diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go index 12e548c7717..61855c52a60 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ b/vendor/github.com/coreos/etcd/rafthttp/util.go @@ -15,6 +15,8 @@ package rafthttp import ( + "crypto/tls" + "encoding/binary" "fmt" "io" "net" @@ -25,6 +27,7 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" ) @@ -36,8 +39,8 @@ var ( // NewListener returns a listener for raft message transfer between peers. // It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { - return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) +func NewListener(u url.URL, tlscfg *tls.Config) (net.Listener, error) { + return transport.NewTimeoutListener(u.Host, u.Scheme, tlscfg, ConnReadTimeout, ConnWriteTimeout) } // NewRoundTripper returns a roundTripper used to send requests @@ -58,6 +61,31 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) } +func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { + size := ent.Size() + if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { + return err + } + b, err := ent.Marshal() + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { + var l uint64 + if err := binary.Read(r, binary.BigEndian, &l); err != nil { + return err + } + buf := make([]byte, int(l)) + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + return ent.Unmarshal(buf) +} + // createPostRequest creates a HTTP POST request that sends raft message. func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go index 01d897ae861..ae3c743f80c 100644 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -15,7 +15,6 @@ package snap import ( - "errors" "fmt" "io" "io/ioutil" @@ -25,8 +24,6 @@ import ( "github.com/coreos/etcd/pkg/fileutil" ) -var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") - // SaveDBFrom saves snapshot of the database from the given reader. It // guarantees the save operation is atomic. func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { @@ -44,7 +41,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { os.Remove(f.Name()) return n, err } - fn := s.dbFilePath(id) + fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) if fileutil.Exist(fn) { os.Remove(f.Name()) return n, nil @@ -63,15 +60,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { // DBFilePath returns the file path for the snapshot of the database with // given id. If the snapshot does not exist, it returns error. func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - if _, err := fileutil.ReadDir(s.dir); err != nil { + fns, err := fileutil.ReadDir(s.dir) + if err != nil { return "", err } - if fn := s.dbFilePath(id); fileutil.Exist(fn) { - return fn, nil + wfn := fmt.Sprintf("%016x.snap.db", id) + for _, fn := range fns { + if fn == wfn { + return filepath.Join(s.dir, fn), nil + } } - return "", ErrNoDBSnapshot -} - -func (s *Snapshotter) dbFilePath(id uint64) string { - return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + return "", fmt.Errorf("snap: snapshot file doesn't exist") } diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go index 05a77ff9d06..130e2277c84 100644 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -342,7 +342,7 @@ func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } var fileDescriptorSnap = []byte{ // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go index 54159553500..731327b08ba 100644 --- a/vendor/github.com/coreos/etcd/store/node.go +++ b/vendor/github.com/coreos/etcd/store/node.go @@ -332,6 +332,7 @@ func (n *node) UpdateTTL(expireTime time.Time) { n.ExpireTime = expireTime // push into ttl heap n.store.ttlKeyHeap.push(n) + return } // Compare function compares node index and value with provided ones. diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go index edf7f21942b..6c19ad4c970 100644 --- a/vendor/github.com/coreos/etcd/store/store.go +++ b/vendor/github.com/coreos/etcd/store/store.go @@ -682,9 +682,6 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.PrevNode = node.Repr(false, false, s.clock) - if node.IsDir() { - e.Node.Dir = true - } callback := func(path string) { // notify function // notify the watchers with deleted set true diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go index 13c23e391d9..6dd63f3c541 100644 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go @@ -116,7 +116,7 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde } func (wh *watcherHub) add(e *Event) { - wh.EventHistory.addEvent(e) + e = wh.EventHistory.addEvent(e) } // notify function accepts an event and notify to the watchers. diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index a09a2a33c0d..0173d6f11d3 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.2.11" + Version = "3.1.10" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go index aac1e197e59..efe58928cc8 100644 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -52,7 +52,7 @@ func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { // newFileEncoder creates a new encoder with current file offset for the page writer. func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, io.SeekCurrent) + offset, err := f.Seek(0, os.SEEK_CUR) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go index 091036b57b9..ffb14161682 100644 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -62,7 +62,7 @@ func Repair(dirpath string) bool { } defer bf.Close() - if _, err = f.Seek(0, io.SeekStart); err != nil { + if _, err = f.Seek(0, os.SEEK_SET); err != nil { plog.Errorf("could not repair %v, failed to read file", f.Name()) return false } diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go index 2cac25c1c90..b65f6448304 100644 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -112,7 +112,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { if err != nil { return nil, err } - if _, err = f.Seek(0, io.SeekEnd); err != nil { + if _, err = f.Seek(0, os.SEEK_END); err != nil { return nil, err } if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { @@ -322,7 +322,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // not all, will cause CRC errors on WAL open. Since the records // were never fully synced to disk in the first place, it's safe // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { + if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { return nil, state, nil, err } if err = fileutil.ZeroToEnd(w.tail().File); err != nil { @@ -361,7 +361,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // Then cut atomically rename temp wal file to a wal file. func (w *WAL) cut() error { // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, io.SeekCurrent) + off, serr := w.tail().Seek(0, os.SEEK_CUR) if serr != nil { return serr } @@ -401,7 +401,7 @@ func (w *WAL) cut() error { return err } - off, err = w.tail().Seek(0, io.SeekCurrent) + off, err = w.tail().Seek(0, os.SEEK_CUR) if err != nil { return err } @@ -418,7 +418,7 @@ func (w *WAL) cut() error { if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { return err } - if _, err = newTail.Seek(off, io.SeekStart); err != nil { + if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { return err } @@ -552,7 +552,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return nil } - mustSync := raft.MustSync(st, w.state, len(ents)) + mustSync := mustSync(st, w.state, len(ents)) // TODO(xiangli): no more reference operator for i := range ents { @@ -564,7 +564,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return err } - curOff, err := w.tail().Seek(0, io.SeekCurrent) + curOff, err := w.tail().Seek(0, os.SEEK_CUR) if err != nil { return err } @@ -618,6 +618,15 @@ func (w *WAL) seq() uint64 { return seq } +func mustSync(st, prevst raftpb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} + func closeAll(rcs ...io.ReadCloser) error { for _, f := range rcs { if err := f.Close(); err != nil { diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go index 664fae1305b..e1a77d5e51a 100644 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -506,7 +506,7 @@ func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } var fileDescriptorRecord = []byte{ // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD deleted file mode 100644 index 3f80de51569..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -filegroup( - name = "go_default_library_protos", - srcs = ["descriptor.proto"], - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["descriptor.pb.go"], - importpath = "github.com/golang/protobuf/protoc-gen-go/descriptor", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile deleted file mode 100644 index f706871a6fa..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/descriptor.proto -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . - protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index c6a91bcab9c..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2215 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) -} - -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2519 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, - 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, - 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, - 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, - 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, - 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, - 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, - 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, - 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, - 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, - 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, - 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, - 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, - 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, - 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, - 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, - 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, - 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, - 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, - 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, - 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, - 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, - 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, - 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, - 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, - 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, - 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, - 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, - 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, - 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, - 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, - 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, - 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, - 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, - 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, - 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, - 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, - 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, - 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, - 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, - 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, - 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, - 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, - 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, - 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, - 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, - 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, - 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, - 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, - 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, - 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, - 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, - 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, - 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, - 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, - 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, - 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, - 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, - 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, - 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, - 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, - 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, - 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, - 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, - 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, - 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, - 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, - 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, - 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, - 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, - 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, - 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, - 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, - 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, - 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, - 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, - 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, - 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, - 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, - 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, - 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, - 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, - 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, - 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, - 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, - 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, - 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, - 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, - 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, - 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, - 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, - 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, - 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, - 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, - 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, - 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, - 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, - 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, - 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, - 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, - 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, - 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, - 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, - 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, - 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, - 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, - 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, - 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, - 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, - 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, - 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, - 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, - 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, - 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, - 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, - 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, - 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, - 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, - 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, - 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, - 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, - 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, - 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, - 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, - 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, - 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, - 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, - 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, - 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, - 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, - 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, - 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, - 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, - 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, - 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, - 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, - 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, - 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, - 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, - 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, - 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, - 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, - 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, - 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, - 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, - 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, - 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, - 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, - 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, - 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, - 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, - 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index 4d4fb378f50..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,849 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD index 4c47e798460..77e7c662829 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD @@ -15,7 +15,6 @@ go_library( "mux.go", "pattern.go", "proto2_convert.go", - "proto_errors.go", "query.go", ], importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -26,10 +25,10 @@ go_library( "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal:go_default_library", "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go index 6e0eb27e285..f248c738b23 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -9,23 +9,18 @@ import ( "time" "golang.org/x/net/context" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" ) -// MetadataHeaderPrefix is the http prefix that represents custom metadata -// parameters to or from a gRPC call. +// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to +// gRPC metadata for incoming requests processed by grpc-gateway const MetadataHeaderPrefix = "Grpc-Metadata-" - -// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields. -const MetadataPrefix = "grpcgateway-" - // MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to // HTTP headers in a response handled by grpc-gateway const MetadataTrailerPrefix = "Grpc-Trailer-" - const metadataGrpcTimeout = "Grpc-Timeout" const xForwardedFor = "X-Forwarded-For" @@ -44,25 +39,25 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { +func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) { var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { var err error timeout, err = timeoutDecode(tm) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) } } for key, vals := range req.Header { for _, val := range vals { - // For backwards-compatibility, pass through 'authorization' header with no prefix. - if strings.ToLower(key) == "authorization" { + if key == "Authorization" { pairs = append(pairs, "authorization", val) + continue } - if h, ok := mux.incomingHeaderMatcher(key); ok { - pairs = append(pairs, h, val) + if strings.HasPrefix(key, MetadataHeaderPrefix) { + pairs = append(pairs, key[len(MetadataHeaderPrefix):], val) } } } @@ -90,11 +85,7 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con if len(pairs) == 0 { return ctx, nil } - md := metadata.Pairs(pairs...) - if mux.metadataAnnotator != nil { - md = metadata.Join(md, mux.metadataAnnotator(ctx, req)) - } - return metadata.NewOutgoingContext(ctx, md), nil + return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil } // ServerMetadata consists of metadata sent from gRPC server. @@ -150,38 +141,3 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { } return } - -// isPermanentHTTPHeader checks whether hdr belongs to the list of -// permenant request headers maintained by IANA. -// http://www.iana.org/assignments/message-headers/message-headers.xml -func isPermanentHTTPHeader(hdr string) bool { - switch hdr { - case - "Accept", - "Accept-Charset", - "Accept-Language", - "Accept-Ranges", - "Authorization", - "Cache-Control", - "Content-Type", - "Cookie", - "Date", - "Expect", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Schedule-Tag-Match", - "If-Unmodified-Since", - "Max-Forwards", - "Origin", - "Pragma", - "Referer", - "User-Agent", - "Via", - "Warning": - return true - } - return false -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go index 8eebdcf49f4..0d3cb3bf3ca 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -6,9 +6,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" ) // HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. @@ -64,7 +64,7 @@ var ( type errorBody struct { Error string `protobuf:"bytes,1,name=error" json:"error"` - Code int32 `protobuf:"varint,2,name=code" json:"code"` + Code int `protobuf:"bytes,2,name=code" json:"code"` } //Make this also conform to proto.Message for builtin JSONPb Marshaler @@ -78,20 +78,14 @@ func (*errorBody) ProtoMessage() {} // // The response body returned by this function is a JSON object, // which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { +func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { const fallback = `{"error": "failed to marshal error message"}` w.Header().Del("Trailer") w.Header().Set("Content-Type", marshaler.ContentType()) - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - body := &errorBody{ - Error: s.Message(), - Code: int32(s.Code()), + Error: grpc.ErrorDesc(err), + Code: int(grpc.Code(err)), } buf, merr := marshaler.Marshal(body) @@ -109,9 +103,9 @@ func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseServerMetadata(w, md) handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) + st := HTTPStatusFromCode(grpc.Code(err)) w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go index ae6a5d551cf..d7040851ae9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -9,13 +9,12 @@ import ( "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime/internal" "golang.org/x/net/context" - "google.golang.org/grpc/codes" + "google.golang.org/grpc" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" ) // ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) if !ok { grpclog.Printf("Flush not supported in %T", w) @@ -29,7 +28,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal http.Error(w, "unexpected error", http.StatusInternalServerError) return } - handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseServerMetadata(w, md) w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Content-Type", marshaler.ContentType()) @@ -58,7 +57,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal grpclog.Printf("Failed to marshal response chunk: %v", err) return } - if _, err = w.Write(buf); err != nil { + if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { grpclog.Printf("Failed to send response chunk: %v", err) return } @@ -66,12 +65,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } } -func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { +func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) { for k, vs := range md.HeaderMD { - if h, ok := mux.outgoingHeaderMatcher(k); ok { - for _, v := range vs { - w.Header().Add(h, v) - } + hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k) + for i := range vs { + w.Header().Add(hKey, vs[i]) } } } @@ -86,31 +84,31 @@ func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { for k, vs := range md.TrailerMD { tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for _, v := range vs { - w.Header().Add(tKey, v) + for i := range vs { + w.Header().Add(tKey, vs[i]) } } } // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseServerMetadata(w, md) handleForwardResponseTrailerHeader(w, md) w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) + HTTPError(ctx, marshaler, w, req, err) return } buf, err := marshaler.Marshal(resp) if err != nil { grpclog.Printf("Marshal error: %v", err) - HTTPError(ctx, mux, marshaler, w, req, err) + HTTPError(ctx, marshaler, w, req, err) return } @@ -148,10 +146,7 @@ func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter func streamChunk(result proto.Message, err error) map[string]proto.Message { if err != nil { - grpcCode := codes.Unknown - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - } + grpcCode := grpc.Code(err) httpCode := HTTPStatusFromCode(grpcCode) return map[string]proto.Message{ "error": &internal.StreamError{ diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go index 44550f393b4..6f837cfd5d9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go @@ -1,5 +1,6 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. +// Code generated by protoc-gen-go. // source: runtime/internal/stream_chunk.proto +// DO NOT EDIT! /* Package internal is a generated protocol buffer package. @@ -41,34 +42,6 @@ func (m *StreamError) String() string { return proto.CompactTextStrin func (*StreamError) ProtoMessage() {} func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - func init() { proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") } @@ -77,7 +50,7 @@ func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDesc var fileDescriptor0 = []byte{ // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, 0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1, 0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c, 0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 205bc430921..2e6c5621302 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -1,16 +1,12 @@ package runtime import ( - "fmt" "net/http" - "net/textproto" "strings" - "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" + + "github.com/golang/protobuf/proto" ) // A HandlerFunc handles a specific pair of path pattern and HTTP method. @@ -23,10 +19,6 @@ type ServeMux struct { handlers map[string][]handler forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotator func(context.Context, *http.Request) metadata.MD - protoErrorHandler ProtoErrorHandlerFunc } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -44,64 +36,6 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. } } -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotator = annotator - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used to handle an error as general proto message defined by gRPC. -// The response including body and status is not backward compatible with the default error handler. -// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ @@ -113,29 +47,6 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { for _, opt := range opts { opt(serveMux) } - - if serveMux.protoErrorHandler != nil { - HTTPError = serveMux.protoErrorHandler - // OtherErrorHandler is no longer used when protoErrorHandler is set. - // Overwritten by a special error handler to return Unknown. - OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { - ctx := context.Background() - _, outboundMarshaler := MarshalerForRequest(serveMux, r) - sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") - serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) - } - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - return serveMux } @@ -146,17 +57,9 @@ func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - path := r.URL.Path if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) return } @@ -164,13 +67,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { l := len(components) var verb string if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) return } else if idx > 0 { c := components[l-1] @@ -180,13 +77,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) { r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) return } } @@ -213,36 +104,17 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // X-HTTP-Method-Override is optional. Always allow fallback to POST. if isPathLengthFallback(r) { if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) return } h.h(w, r, pathParams) return } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) return } } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) } // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go index 8a9ec2cdae4..3947dbea023 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -21,7 +21,7 @@ type op struct { operand int } -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto. type Pattern struct { // ops is a list of operations ops []op diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index b1b089273b6..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,61 +0,0 @@ -package runtime - -import ( - "io" - "net/http" - - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - w.Header().Del("Trailer") - w.Header().Set("Content-Type", marshaler.ContentType()) - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Printf("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Printf("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Printf("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Printf("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go index c00e0b914e2..56a919a52f1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -4,9 +4,7 @@ import ( "fmt" "net/url" "reflect" - "strconv" "strings" - "time" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/utilities" @@ -40,39 +38,31 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] if m.Kind() != reflect.Ptr { return fmt.Errorf("unexpected type %T: %v", msg, msg) } - var props *proto.Properties m = m.Elem() for i, fieldName := range fieldPath { isLast := i == len(fieldPath)-1 if !isLast && m.Kind() != reflect.Struct { return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { + f := fieldByProtoName(m, fieldName) + if !f.IsValid() { grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) return nil } switch f.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } m = f case reflect.Slice: // TODO(yugui) Support []byte if !isLast { return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) } - return populateRepeatedField(f, values, props) + return populateRepeatedField(f, values) case reflect.Ptr: if f.IsNil() { m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) + f.Set(m) } m = f.Elem() continue @@ -90,127 +80,39 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] default: grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) } - return populateField(m, values[0], props) + return populateField(m, values[0]) } // fieldByProtoName looks up a field whose corresponding protobuf field name is "name". // "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { +func fieldByProtoName(m reflect.Value, name string) reflect.Value { props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - if op, ok := props.OneofTypes[name]; ok { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - for _, p := range props.Prop { if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil + return m.FieldByName(p.Name) } } - return reflect.Value{}, nil, nil + return reflect.Value{} } -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { +func populateRepeatedField(f reflect.Value, values []string) error { elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - conv, ok := convFromType[elemType.Kind()] if !ok { return fmt.Errorf("unsupported field type %s", elemType) } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values))) for i, v := range values { result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) if err := result[1].Interface(); err != nil { return err.(error) } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + f.Index(i).Set(result[0]) } return nil } -func populateField(f reflect.Value, value string, props *proto.Properties) error { - // Handle well known type - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := f.Addr().Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "Timestamp": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.Field(0).SetInt(int64(t.Unix())) - f.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.Field(0).SetBool(true) - } else if value == "false" { - f.Field(0).SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.Field(0).SetString(value) - return nil - } - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - +func populateField(f reflect.Value, value string) error { conv, ok := convFromType[f.Kind()] if !ok { return fmt.Errorf("unsupported field type %T", f) @@ -219,48 +121,7 @@ func populateField(f reflect.Value, value string, props *proto.Properties) error if err := result[1].Interface(); err != nil { return err.(error) } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } + f.Set(result[0]) return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD b/vendor/github.com/karlseguin/ccache/BUILD similarity index 66% rename from vendor/github.com/coreos/etcd/pkg/debugutil/BUILD rename to vendor/github.com/karlseguin/ccache/BUILD index d943ed2561a..398b1fd68ad 100644 --- a/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD +++ b/vendor/github.com/karlseguin/ccache/BUILD @@ -3,10 +3,15 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "doc.go", - "pprof.go", + "bucket.go", + "cache.go", + "configuration.go", + "item.go", + "layeredbucket.go", + "layeredcache.go", + "secondarycache.go", ], - importpath = "github.com/coreos/etcd/pkg/debugutil", + importpath = "github.com/karlseguin/ccache", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/karlseguin/ccache/Makefile b/vendor/github.com/karlseguin/ccache/Makefile new file mode 100644 index 00000000000..5b3f26bafdc --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/Makefile @@ -0,0 +1,5 @@ +t: + go test ./... + +f: + go fmt ./... diff --git a/vendor/github.com/karlseguin/ccache/bucket.go b/vendor/github.com/karlseguin/ccache/bucket.go new file mode 100644 index 00000000000..d67535170c5 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/bucket.go @@ -0,0 +1,41 @@ +package ccache + +import ( + "sync" + "time" +) + +type bucket struct { + sync.RWMutex + lookup map[string]*Item +} + +func (b *bucket) get(key string) *Item { + b.RLock() + defer b.RUnlock() + return b.lookup[key] +} + +func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) { + expires := time.Now().Add(duration).UnixNano() + item := newItem(key, value, expires) + b.Lock() + defer b.Unlock() + existing := b.lookup[key] + b.lookup[key] = item + return item, existing +} + +func (b *bucket) delete(key string) *Item { + b.Lock() + defer b.Unlock() + item := b.lookup[key] + delete(b.lookup, key) + return item +} + +func (b *bucket) clear() { + b.Lock() + defer b.Unlock() + b.lookup = make(map[string]*Item) +} diff --git a/vendor/github.com/karlseguin/ccache/cache.go b/vendor/github.com/karlseguin/ccache/cache.go new file mode 100644 index 00000000000..a9e94f486f9 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/cache.go @@ -0,0 +1,227 @@ +// An LRU cached aimed at high concurrency +package ccache + +import ( + "container/list" + "hash/fnv" + "sync/atomic" + "time" +) + +type Cache struct { + *Configuration + list *list.List + size int64 + buckets []*bucket + bucketMask uint32 + deletables chan *Item + promotables chan *Item + donec chan struct{} +} + +// Create a new cache with the specified configuration +// See ccache.Configure() for creating a configuration +func New(config *Configuration) *Cache { + c := &Cache{ + list: list.New(), + Configuration: config, + bucketMask: uint32(config.buckets) - 1, + buckets: make([]*bucket, config.buckets), + } + for i := 0; i < int(config.buckets); i++ { + c.buckets[i] = &bucket{ + lookup: make(map[string]*Item), + } + } + c.restart() + return c +} + +// Get an item from the cache. Returns nil if the item wasn't found. +// This can return an expired item. Use item.Expired() to see if the item +// is expired and item.TTL() to see how long until the item expires (which +// will be negative for an already expired item). +func (c *Cache) Get(key string) *Item { + item := c.bucket(key).get(key) + if item == nil { + return nil + } + if item.expires > time.Now().UnixNano() { + c.promote(item) + } + return item +} + +// Used when the cache was created with the Track() configuration option. +// Avoid otherwise +func (c *Cache) TrackingGet(key string) TrackedItem { + item := c.Get(key) + if item == nil { + return NilTracked + } + item.track() + return item +} + +// Set the value in the cache for the specified duration +func (c *Cache) Set(key string, value interface{}, duration time.Duration) { + c.set(key, value, duration) +} + +// Replace the value if it exists, does not set if it doesn't. +// Returns true if the item existed an was replaced, false otherwise. +// Replace does not reset item's TTL +func (c *Cache) Replace(key string, value interface{}) bool { + item := c.bucket(key).get(key) + if item == nil { + return false + } + c.Set(key, value, item.TTL()) + return true +} + +// Attempts to get the value from the cache and calles fetch on a miss (missing +// or stale item). If fetch returns an error, no value is cached and the error +// is returned back to the caller. +func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := c.Get(key) + if item != nil && !item.Expired() { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return c.set(key, value, duration), nil +} + +// Remove the item from the cache, return true if the item was present, false otherwise. +func (c *Cache) Delete(key string) bool { + item := c.bucket(key).delete(key) + if item != nil { + c.deletables <- item + return true + } + return false +} + +//this isn't thread safe. It's meant to be called from non-concurrent tests +func (c *Cache) Clear() { + for _, bucket := range c.buckets { + bucket.clear() + } + c.size = 0 + c.list = list.New() +} + +// Stops the background worker. Operations performed on the cache after Stop +// is called are likely to panic +func (c *Cache) Stop() { + close(c.promotables) + <-c.donec +} + +func (c *Cache) restart() { + c.deletables = make(chan *Item, c.deleteBuffer) + c.promotables = make(chan *Item, c.promoteBuffer) + c.donec = make(chan struct{}) + go c.worker() +} + +func (c *Cache) deleteItem(bucket *bucket, item *Item) { + bucket.delete(item.key) //stop other GETs from getting it + c.deletables <- item +} + +func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item { + item, existing := c.bucket(key).set(key, value, duration) + if existing != nil { + c.deletables <- existing + } + c.promote(item) + return item +} + +func (c *Cache) bucket(key string) *bucket { + h := fnv.New32a() + h.Write([]byte(key)) + return c.buckets[h.Sum32()&c.bucketMask] +} + +func (c *Cache) promote(item *Item) { + c.promotables <- item +} + +func (c *Cache) worker() { + defer close(c.donec) + + for { + select { + case item, ok := <-c.promotables: + if ok == false { + goto drain + } + if c.doPromote(item) && c.size > c.maxSize { + c.gc() + } + case item := <-c.deletables: + c.doDelete(item) + } + } + +drain: + for { + select { + case item := <-c.deletables: + c.doDelete(item) + default: + close(c.deletables) + return + } + } +} + +func (c *Cache) doDelete(item *Item) { + if item.element == nil { + item.promotions = -2 + } else { + c.size -= item.size + c.list.Remove(item.element) + } +} + +func (c *Cache) doPromote(item *Item) bool { + //already deleted + if item.promotions == -2 { + return false + } + if item.element != nil { //not a new item + if item.shouldPromote(c.getsPerPromote) { + c.list.MoveToFront(item.element) + item.promotions = 0 + } + return false + } + + c.size += item.size + item.element = c.list.PushFront(item) + return true +} + +func (c *Cache) gc() { + element := c.list.Back() + for i := 0; i < c.itemsToPrune; i++ { + if element == nil { + return + } + prev := element.Prev() + item := element.Value.(*Item) + if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { + c.bucket(item.key).delete(item.key) + c.size -= item.size + c.list.Remove(element) + item.promotions = -2 + } + element = prev + } +} diff --git a/vendor/github.com/karlseguin/ccache/configuration.go b/vendor/github.com/karlseguin/ccache/configuration.go new file mode 100644 index 00000000000..daa8357767e --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/configuration.go @@ -0,0 +1,94 @@ +package ccache + +type Configuration struct { + maxSize int64 + buckets int + itemsToPrune int + deleteBuffer int + promoteBuffer int + getsPerPromote int32 + tracking bool +} + +// Creates a configuration object with sensible defaults +// Use this as the start of the fluent configuration: +// e.g.: ccache.New(ccache.Configure().MaxSize(10000)) +func Configure() *Configuration { + return &Configuration{ + buckets: 16, + itemsToPrune: 500, + deleteBuffer: 1024, + getsPerPromote: 3, + promoteBuffer: 1024, + maxSize: 5000, + tracking: false, + } +} + +// The max size for the cache +// [5000] +func (c *Configuration) MaxSize(max int64) *Configuration { + c.maxSize = max + return c +} + +// Keys are hashed into % bucket count to provide greater concurrency (every set +// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) +// [16] +func (c *Configuration) Buckets(count uint32) *Configuration { + if count == 0 || ((count&(^count+1)) == count) == false { + count = 16 + } + c.buckets = int(count) + return c +} + +// The number of items to prune when memory is low +// [500] +func (c *Configuration) ItemsToPrune(count uint32) *Configuration { + c.itemsToPrune = int(count) + return c +} + +// The size of the queue for items which should be promoted. If the queue fills +// up, promotions are skipped +// [1024] +func (c *Configuration) PromoteBuffer(size uint32) *Configuration { + c.promoteBuffer = int(size) + return c +} + +// The size of the queue for items which should be deleted. If the queue fills +// up, calls to Delete() will block +func (c *Configuration) DeleteBuffer(size uint32) *Configuration { + c.deleteBuffer = int(size) + return c +} + +// Give a large cache with a high read / write ratio, it's usually unecessary +// to promote an item on every Get. GetsPerPromote specifies the number of Gets +// a key must have before being promoted +// [3] +func (c *Configuration) GetsPerPromote(count int32) *Configuration { + c.getsPerPromote = count + return c +} + +// Typically, a cache is agnostic about how cached values are use. This is fine +// for a typical cache usage, where you fetch an item from the cache, do something +// (write it out) and nothing else. + +// However, if callers are going to keep a reference to a cached item for a long +// time, things get messy. Specifically, the cache can evict the item, while +// references still exist. Technically, this isn't an issue. However, if you reload +// the item back into the cache, you end up with 2 objects representing the same +// data. This is a waste of space and could lead to weird behavior (the type an +// identity map is meant to solve). + +// By turning tracking on and using the cache's TrackingGet, the cache +// won't evict items which you haven't called Release() on. It's a simple reference +// counter. +func (c *Configuration) Track() *Configuration { + c.tracking = true + return c +} diff --git a/vendor/github.com/karlseguin/ccache/item.go b/vendor/github.com/karlseguin/ccache/item.go new file mode 100644 index 00000000000..bb7c04fff9d --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/item.go @@ -0,0 +1,103 @@ +package ccache + +import ( + "container/list" + "sync/atomic" + "time" +) + +type Sized interface { + Size() int64 +} + +type TrackedItem interface { + Value() interface{} + Release() + Expired() bool + TTL() time.Duration + Expires() time.Time + Extend(duration time.Duration) +} + +type nilItem struct{} + +func (n *nilItem) Value() interface{} { return nil } +func (n *nilItem) Release() {} + +func (i *nilItem) Expired() bool { + return true +} + +func (i *nilItem) TTL() time.Duration { + return time.Minute +} + +func (i *nilItem) Expires() time.Time { + return time.Time{} +} + +func (i *nilItem) Extend(duration time.Duration) { +} + +var NilTracked = new(nilItem) + +type Item struct { + key string + group string + promotions int32 + refCount int32 + expires int64 + size int64 + value interface{} + element *list.Element +} + +func newItem(key string, value interface{}, expires int64) *Item { + size := int64(1) + if sized, ok := value.(Sized); ok { + size = sized.Size() + } + return &Item{ + key: key, + value: value, + promotions: 0, + size: size, + expires: expires, + } +} + +func (i *Item) shouldPromote(getsPerPromote int32) bool { + i.promotions += 1 + return i.promotions == getsPerPromote +} + +func (i *Item) Value() interface{} { + return i.value +} + +func (i *Item) track() { + atomic.AddInt32(&i.refCount, 1) +} + +func (i *Item) Release() { + atomic.AddInt32(&i.refCount, -1) +} + +func (i *Item) Expired() bool { + expires := atomic.LoadInt64(&i.expires) + return expires < time.Now().UnixNano() +} + +func (i *Item) TTL() time.Duration { + expires := atomic.LoadInt64(&i.expires) + return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) +} + +func (i *Item) Expires() time.Time { + expires := atomic.LoadInt64(&i.expires) + return time.Unix(0, expires) +} + +func (i *Item) Extend(duration time.Duration) { + atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) +} diff --git a/vendor/github.com/karlseguin/ccache/layeredbucket.go b/vendor/github.com/karlseguin/ccache/layeredbucket.go new file mode 100644 index 00000000000..88f3def4219 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/layeredbucket.go @@ -0,0 +1,82 @@ +package ccache + +import ( + "sync" + "time" +) + +type layeredBucket struct { + sync.RWMutex + buckets map[string]*bucket +} + +func (b *layeredBucket) get(primary, secondary string) *Item { + bucket := b.getSecondaryBucket(primary) + if bucket == nil { + return nil + } + return bucket.get(secondary) +} + +func (b *layeredBucket) getSecondaryBucket(primary string) *bucket { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return nil + } + return bucket +} + +func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) { + b.Lock() + bkt, exists := b.buckets[primary] + if exists == false { + bkt = &bucket{lookup: make(map[string]*Item)} + b.buckets[primary] = bkt + } + b.Unlock() + item, existing := bkt.set(secondary, value, duration) + item.group = primary + return item, existing +} + +func (b *layeredBucket) delete(primary, secondary string) *Item { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return nil + } + return bucket.delete(secondary) +} + +func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return false + } + + bucket.Lock() + defer bucket.Unlock() + + if l := len(bucket.lookup); l == 0 { + return false + } + for key, item := range bucket.lookup { + delete(bucket.lookup, key) + deletables <- item + } + return true +} + +func (b *layeredBucket) clear() { + b.Lock() + defer b.Unlock() + for _, bucket := range b.buckets { + bucket.clear() + } + b.buckets = make(map[string]*bucket) +} diff --git a/vendor/github.com/karlseguin/ccache/layeredcache.go b/vendor/github.com/karlseguin/ccache/layeredcache.go new file mode 100644 index 00000000000..20b13f94d30 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/layeredcache.go @@ -0,0 +1,237 @@ +// An LRU cached aimed at high concurrency +package ccache + +import ( + "container/list" + "hash/fnv" + "sync/atomic" + "time" +) + +type LayeredCache struct { + *Configuration + list *list.List + buckets []*layeredBucket + bucketMask uint32 + size int64 + deletables chan *Item + promotables chan *Item + donec chan struct{} +} + +// Create a new layered cache with the specified configuration. +// A layered cache used a two keys to identify a value: a primary key +// and a secondary key. Get, Set and Delete require both a primary and +// secondary key. However, DeleteAll requires only a primary key, deleting +// all values that share the same primary key. + +// Layered Cache is useful as an HTTP cache, where an HTTP purge might +// delete multiple variants of the same resource: +// primary key = "user/44" +// secondary key 1 = ".json" +// secondary key 2 = ".xml" + +// See ccache.Configure() for creating a configuration +func Layered(config *Configuration) *LayeredCache { + c := &LayeredCache{ + list: list.New(), + Configuration: config, + bucketMask: uint32(config.buckets) - 1, + buckets: make([]*layeredBucket, config.buckets), + deletables: make(chan *Item, config.deleteBuffer), + } + for i := 0; i < int(config.buckets); i++ { + c.buckets[i] = &layeredBucket{ + buckets: make(map[string]*bucket), + } + } + c.restart() + return c +} + +// Get an item from the cache. Returns nil if the item wasn't found. +// This can return an expired item. Use item.Expired() to see if the item +// is expired and item.TTL() to see how long until the item expires (which +// will be negative for an already expired item). +func (c *LayeredCache) Get(primary, secondary string) *Item { + item := c.bucket(primary).get(primary, secondary) + if item == nil { + return nil + } + if item.expires > time.Now().UnixNano() { + c.promote(item) + } + return item +} + +// Get the secondary cache for a given primary key. This operation will +// never return nil. In the case where the primary key does not exist, a +// new, underlying, empty bucket will be created and returned. +func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache { + primaryBkt := c.bucket(primary) + bkt := primaryBkt.getSecondaryBucket(primary) + primaryBkt.Lock() + if bkt == nil { + bkt = &bucket{lookup: make(map[string]*Item)} + primaryBkt.buckets[primary] = bkt + } + primaryBkt.Unlock() + return &SecondaryCache{ + bucket: bkt, + pCache: c, + } +} + +// Used when the cache was created with the Track() configuration option. +// Avoid otherwise +func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem { + item := c.Get(primary, secondary) + if item == nil { + return NilTracked + } + item.track() + return item +} + +// Set the value in the cache for the specified duration +func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) { + c.set(primary, secondary, value, duration) +} + +// Replace the value if it exists, does not set if it doesn't. +// Returns true if the item existed an was replaced, false otherwise. +// Replace does not reset item's TTL nor does it alter its position in the LRU +func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool { + item := c.bucket(primary).get(primary, secondary) + if item == nil { + return false + } + c.Set(primary, secondary, value, item.TTL()) + return true +} + +// Attempts to get the value from the cache and calles fetch on a miss. +// If fetch returns an error, no value is cached and the error is returned back +// to the caller. +func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := c.Get(primary, secondary) + if item != nil { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return c.set(primary, secondary, value, duration), nil +} + +// Remove the item from the cache, return true if the item was present, false otherwise. +func (c *LayeredCache) Delete(primary, secondary string) bool { + item := c.bucket(primary).delete(primary, secondary) + if item != nil { + c.deletables <- item + return true + } + return false +} + +// Deletes all items that share the same primary key +func (c *LayeredCache) DeleteAll(primary string) bool { + return c.bucket(primary).deleteAll(primary, c.deletables) +} + +//this isn't thread safe. It's meant to be called from non-concurrent tests +func (c *LayeredCache) Clear() { + for _, bucket := range c.buckets { + bucket.clear() + } + c.size = 0 + c.list = list.New() +} + +func (c *LayeredCache) Stop() { + close(c.promotables) + <-c.donec +} + +func (c *LayeredCache) restart() { + c.promotables = make(chan *Item, c.promoteBuffer) + c.donec = make(chan struct{}) + go c.worker() +} + +func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item { + item, existing := c.bucket(primary).set(primary, secondary, value, duration) + if existing != nil { + c.deletables <- existing + } + c.promote(item) + return item +} + +func (c *LayeredCache) bucket(key string) *layeredBucket { + h := fnv.New32a() + h.Write([]byte(key)) + return c.buckets[h.Sum32()&c.bucketMask] +} + +func (c *LayeredCache) promote(item *Item) { + c.promotables <- item +} + +func (c *LayeredCache) worker() { + defer close(c.donec) + for { + select { + case item, ok := <-c.promotables: + if ok == false { + return + } + if c.doPromote(item) && c.size > c.maxSize { + c.gc() + } + case item := <-c.deletables: + if item.element == nil { + item.promotions = -2 + } else { + c.size -= item.size + c.list.Remove(item.element) + } + } + } +} + +func (c *LayeredCache) doPromote(item *Item) bool { + // deleted before it ever got promoted + if item.promotions == -2 { + return false + } + if item.element != nil { //not a new item + if item.shouldPromote(c.getsPerPromote) { + c.list.MoveToFront(item.element) + item.promotions = 0 + } + return false + } + c.size += item.size + item.element = c.list.PushFront(item) + return true +} + +func (c *LayeredCache) gc() { + element := c.list.Back() + for i := 0; i < c.itemsToPrune; i++ { + if element == nil { + return + } + prev := element.Prev() + item := element.Value.(*Item) + if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { + c.bucket(item.group).delete(item.group, item.key) + c.size -= item.size + c.list.Remove(element) + item.promotions = -2 + } + element = prev + } +} diff --git a/vendor/github.com/karlseguin/ccache/license.txt b/vendor/github.com/karlseguin/ccache/license.txt new file mode 100644 index 00000000000..aebeebfa520 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/license.txt @@ -0,0 +1,19 @@ +Copyright (c) 2013 Karl Seguin. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/karlseguin/ccache/readme.md b/vendor/github.com/karlseguin/ccache/readme.md new file mode 100644 index 00000000000..8a7efa2e2b4 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/readme.md @@ -0,0 +1,172 @@ +# CCache +CCache is an LRU Cache, written in Go, focused on supporting high concurrency. + +Lock contention on the list is reduced by: + +* Introducing a window which limits the frequency that an item can get promoted +* Using a buffered channel to queue promotions for a single worker +* Garbage collecting within the same thread as the worker + +## Setup + +First, download the project: + + go get github.com/karlseguin/ccache + +## Configuration +Next, import and create a `Cache` instance: + + +```go +import ( + "github.com/karlseguin/ccache" +) + +var cache = ccache.New(ccache.Configure()) +``` + +`Configure` exposes a chainable API: + +```go +var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)) +``` + +The most likely configuration options to tweak are: + +* `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) +* `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) +* `ItemsToPrune(int)` - the number of items to prune when we hit `MaxSize`. Freeing up more than 1 slot at a time improved performance (default: 500) + +Configurations that change the internals of the cache, which aren't as likely to need tweaking: + +* `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). +* `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) +* `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) + +## Usage + +Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: + +### Get +```go +item := cache.Get("user:4") +if item == nil { + //handle +} else { + user := item.Value().(*User) +} +``` +The returned `*Item` exposes a number of methods: + +* `Value() interface{}` - the value cached +* `Expired() bool` - whether the item is expired or not +* `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) +* `Expires() time.Time` - the time the item will expire + +By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. + +### Set +`Set` expects the key, value and ttl: + +```go +cache.Set("user:4", user, time.Minute * 10) +``` + +### Fetch +There's also a `Fetch` which mixes a `Get` and a `Set`: + +```go +item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) { + //code to fetch the data incase of a miss + //should return the data to cache and the error, if any +}) +``` + +### Delete +`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key: + +```go +cache.Delete("user:4") +``` + +### Extend +The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. + +### Replace +The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: + +```go +cache.Replace("user:4", user) +``` + +`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. + +### Stop +The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called +the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. + +## Tracking +CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. + +When you configure your cache with `Track()`: + +```go +cache = ccache.New(ccache.Configure().Track()) +``` + +The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: + +```go +item := cache.TrackingGet("user:4") +user := item.Value() //will be nil if "user:4" didn't exist in the cache +item.Release() //can be called even if item.Value() returned nil +``` + +In practive, `Release` wouldn't be called until later, at some other place in your code. + +There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. + +More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. + +## LayeredCache + +CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). + +`LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. + +`LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: + +```go +cache := ccache.Layered(ccache.Configure()) + +cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) +cache.Set("/users/goku", "type:xml", "", time.Minute * 5) + +json := cache.Get("/users/goku", "type:json") +xml := cache.Get("/users/goku", "type:xml") + +cache.Delete("/users/goku", "type:json") +cache.Delete("/users/goku", "type:xml") +// OR +cache.DeleteAll("/users/goku") +``` + +# SecondaryCache + +In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: + +```go +cache := ccache.Layered(ccache.Configure()) +sCache := cache.GetOrCreateSecondaryCache("/users/goku") +sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) +``` + +The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. + +## Size +By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. + +However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. + +## Want Something Simpler? +For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache) diff --git a/vendor/github.com/karlseguin/ccache/secondarycache.go b/vendor/github.com/karlseguin/ccache/secondarycache.go new file mode 100644 index 00000000000..f901fde0c55 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/secondarycache.go @@ -0,0 +1,72 @@ +package ccache + +import "time" + +type SecondaryCache struct { + bucket *bucket + pCache *LayeredCache +} + +// Get the secondary key. +// The semantics are the same as for LayeredCache.Get +func (s *SecondaryCache) Get(secondary string) *Item { + return s.bucket.get(secondary) +} + +// Set the secondary key to a value. +// The semantics are the same as for LayeredCache.Set +func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item { + item, existing := s.bucket.set(secondary, value, duration) + if existing != nil { + s.pCache.deletables <- existing + } + s.pCache.promote(item) + return item +} + +// Fetch or set a secondary key. +// The semantics are the same as for LayeredCache.Fetch +func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := s.Get(secondary) + if item != nil { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return s.Set(secondary, value, duration), nil +} + +// Delete a secondary key. +// The semantics are the same as for LayeredCache.Delete +func (s *SecondaryCache) Delete(secondary string) bool { + item := s.bucket.delete(secondary) + if item != nil { + s.pCache.deletables <- item + return true + } + return false +} + +// Replace a secondary key. +// The semantics are the same as for LayeredCache.Replace +func (s *SecondaryCache) Replace(secondary string, value interface{}) bool { + item := s.Get(secondary) + if item == nil { + return false + } + s.Set(secondary, value, item.TTL()) + return true +} + +// Track a secondary key. +// The semantics are the same as for LayeredCache.TrackingGet +func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem { + item := c.Get(secondary) + if item == nil { + return NilTracked + } + item.track() + return item +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD b/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD deleted file mode 100644 index bf4e2ecbebb..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.pb.go", - "http.pb.go", - ], - importpath = "google.golang.org/genproto/googleapis/api/annotations", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go deleted file mode 100644 index 53d57f67a53..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/annotations.proto - -/* -Package annotations is a generated protocol buffer package. - -It is generated from these files: - google/api/annotations.proto - google/api/http.proto - -It has these top-level messages: - Http - HttpRule - CustomHttpPattern -*/ -package annotations - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_Http = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", -} - -func init() { - proto.RegisterExtension(E_Http) -} - -func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, - 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, - 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go deleted file mode 100644 index f91c604620b..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ /dev/null @@ -1,566 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/http.proto - -package annotations - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Defines the HTTP configuration for a service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` -} - -func (m *Http) Reset() { *m = Http{} } -func (m *Http) String() string { return proto.CompactTextString(m) } -func (*Http) ProtoMessage() {} -func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Http) GetRules() []*HttpRule { - if m != nil { - return m.Rules - } - return nil -} - -// `HttpRule` defines the mapping of an RPC method to one or more HTTP -// REST APIs. The mapping determines what portions of the request -// message are populated from the path, query parameters, or body of -// the HTTP request. The mapping is typically specified as an -// `google.api.http` annotation, see "google/api/annotations.proto" -// for details. -// -// The mapping consists of a field specifying the path template and -// method kind. The path template can refer to fields in the request -// message, as in the example below which describes a REST GET -// operation on a resource collection of messages: -// -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// SubMessage sub = 2; // `sub.subfield` is url-mapped -// } -// message Message { -// string text = 1; // content of the resource -// } -// -// The same http annotation can alternatively be expressed inside the -// `GRPC API Configuration` YAML file. -// -// http: -// rules: -// - selector: .Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// This definition enables an automatic, bidrectional mapping of HTTP -// JSON to RPC. Example: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` -// -// In general, not only fields but also field paths can be referenced -// from a path pattern. Fields mapped to the path pattern cannot be -// repeated and must have a primitive (non-message) type. -// -// Any fields in the request message which are not bound by the path -// pattern automatically become (optional) HTTP query -// parameters. Assume the following definition of the request message: -// -// -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// int64 revision = 2; // becomes a parameter -// SubMessage sub = 3; // `sub.subfield` becomes a parameter -// } -// -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` -// -// Note that fields which are mapped to HTTP parameters must have a -// primitive type or a repeated primitive type. Message types are not -// allowed. In the case of a repeated type, the parameter can be -// repeated in the URL, as in `...?param=A¶m=B`. -// -// For HTTP method kinds which allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice of -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` -// -// # Rules for HTTP mapping -// -// The rules for mapping HTTP path, query parameters, and body fields -// to the request message are as follows: -// -// 1. The `body` field specifies either `*` or a field path, or is -// omitted. If omitted, it assumes there is no HTTP body. -// 2. Leaf fields (recursive expansion of nested messages in the -// request) can be classified into three types: -// (a) Matched in the URL template. -// (b) Covered by body (if body is `*`, everything except (a) fields; -// else everything under the body field) -// (c) All other fields. -// 3. URL query parameters found in the HTTP request are mapped to (c) fields. -// 4. Any body sent with an HTTP request can contain only (b) fields. -// -// The syntax of the path template is as follows: -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single path segment. It follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion. -// -// The syntax `**` matches zero or more path segments. It follows the semantics -// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved -// Expansion. NOTE: it must be the last segment in the path except the Verb. -// -// The syntax `LITERAL` matches literal text in the URL path. -// -// The syntax `Variable` matches the entire path as specified by its template; -// this nested template must not contain further variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// NOTE: the field paths in variables and in the `body` must not refer to -// repeated fields or map fields. -// -// Use CustomHttpPattern to specify any HTTP method that is not included in the -// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for -// a given URL path rule. The wild-card rule is useful for services that provide -// content to Web (HTML) clients. -type HttpRule struct { - // Selects methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are valid to be assigned to Pattern: - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP body, or - // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. NOTE: the referred field must not be a repeated field and must be - // present at the top-level of request message type. - Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` -} - -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,oneof"` -} -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,oneof"` -} -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,oneof"` -} -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` -} -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` -} -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} -func (*HttpRule_Put) isHttpRule_Pattern() {} -func (*HttpRule_Post) isHttpRule_Pattern() {} -func (*HttpRule_Delete) isHttpRule_Pattern() {} -func (*HttpRule_Patch) isHttpRule_Pattern() {} -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (m *HttpRule) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (m *HttpRule) GetBody() string { - if m != nil { - return m.Body - } - return "" -} - -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } -} - -func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Get) - case *HttpRule_Put: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Put) - case *HttpRule_Post: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Post) - case *HttpRule_Delete: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Delete) - case *HttpRule_Patch: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Patch) - case *HttpRule_Custom: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Custom); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) - } - return nil -} - -func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*HttpRule) - switch tag { - case 2: // pattern.get - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Get{x} - return true, err - case 3: // pattern.put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Put{x} - return true, err - case 4: // pattern.post - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Post{x} - return true, err - case 5: // pattern.delete - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Delete{x} - return true, err - case 6: // pattern.patch - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Patch{x} - return true, err - case 8: // pattern.custom - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(CustomHttpPattern) - err := b.DecodeMessage(msg) - m.Pattern = &HttpRule_Custom{msg} - return true, err - default: - return false, nil - } -} - -func _HttpRule_OneofSizer(msg proto.Message) (n int) { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Get))) - n += len(x.Get) - case *HttpRule_Put: - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Put))) - n += len(x.Put) - case *HttpRule_Post: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Post))) - n += len(x.Post) - case *HttpRule_Delete: - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Delete))) - n += len(x.Delete) - case *HttpRule_Patch: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Patch))) - n += len(x.Patch) - case *HttpRule_Custom: - s := proto.Size(x.Custom) - n += proto.SizeVarint(8<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` -} - -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -func (m *CustomHttpPattern) GetKind() string { - if m != nil { - return m.Kind - } - return "" -} - -func (m *CustomHttpPattern) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func init() { - proto.RegisterType((*Http)(nil), "google.api.Http") - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 359 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, - 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29, - 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1, - 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe, - 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8, - 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39, - 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62, - 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18, - 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2, - 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48, - 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24, - 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49, - 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc, - 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84, - 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12, - 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74, - 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4, - 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67, - 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90, - 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64, - 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a, - 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, - 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/.please-update b/vendor/google.golang.org/grpc/.please-update deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 22bf25004a3..b3577c7ae20 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,20 +1,19 @@ language: go go: - - 1.7.x - - 1.8.x - - 1.9.x - -matrix: - include: - - go: 1.9.x - env: ARCH=386 + - 1.6.3 + - 1.7 + - 1.8 go_import_path: google.golang.org/grpc before_install: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi + - go get github.com/golang/lint/golint + - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover script: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi + - '! gofmt -s -d -l . 2>&1 | read' + - '! goimports -l . | read' + - '! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"' + - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 - make test testrace diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS deleted file mode 100644 index e491a9e7f78..00000000000 --- a/vendor/google.golang.org/grpc/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD index b266ac7de56..9b550cb77b9 100644 --- a/vendor/google.golang.org/grpc/BUILD +++ b/vendor/google.golang.org/grpc/BUILD @@ -5,18 +5,15 @@ go_library( srcs = [ "backoff.go", "balancer.go", - "balancer_conn_wrappers.go", - "balancer_v1_wrapper.go", "call.go", "clientconn.go", "codec.go", "doc.go", + "go16.go", + "go17.go", "grpclb.go", "interceptor.go", - "picker_wrapper.go", - "pickfirst.go", "proxy.go", - "resolver_conn_wrapper.go", "rpc_util.go", "server.go", "stream.go", @@ -29,18 +26,15 @@ go_library( "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/golang.org/x/net/trace:go_default_library", - "//vendor/google.golang.org/grpc/balancer:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/connectivity:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:go_default_library", + "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/internal:go_default_library", "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", "//vendor/google.golang.org/grpc/naming:go_default_library", "//vendor/google.golang.org/grpc/peer:go_default_library", - "//vendor/google.golang.org/grpc/resolver:go_default_library", "//vendor/google.golang.org/grpc/stats:go_default_library", "//vendor/google.golang.org/grpc/status:go_default_library", "//vendor/google.golang.org/grpc/tap:go_default_library", @@ -59,19 +53,15 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//vendor/google.golang.org/grpc/balancer:all-srcs", "//vendor/google.golang.org/grpc/codes:all-srcs", - "//vendor/google.golang.org/grpc/connectivity:all-srcs", "//vendor/google.golang.org/grpc/credentials:all-srcs", "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:all-srcs", "//vendor/google.golang.org/grpc/grpclog:all-srcs", - "//vendor/google.golang.org/grpc/health/grpc_health_v1:all-srcs", "//vendor/google.golang.org/grpc/internal:all-srcs", "//vendor/google.golang.org/grpc/keepalive:all-srcs", "//vendor/google.golang.org/grpc/metadata:all-srcs", "//vendor/google.golang.org/grpc/naming:all-srcs", "//vendor/google.golang.org/grpc/peer:all-srcs", - "//vendor/google.golang.org/grpc/resolver:all-srcs", "//vendor/google.golang.org/grpc/stats:all-srcs", "//vendor/google.golang.org/grpc/status:all-srcs", "//vendor/google.golang.org/grpc/tap:all-srcs", diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index a5c6e06e255..36cd6f7581b 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,32 +1,46 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! +We definitely welcome patches and contribution to grpc! Here are some guidelines +and information about how to do so. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +## Sending patches + +### Getting started + +1. Check out the code: + + $ go get google.golang.org/grpc + $ cd $GOPATH/src/google.golang.org/grpc + +1. Create a fork of the grpc-go repository. +1. Add your fork as a remote: + + $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git + +1. Make changes, commit them. +1. Run the test suite: + + $ make test + +1. Push your changes to your fork: + + $ git push fork ... + +1. Open a pull request. ## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. - -- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. - -- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). - -- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. - -- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. - -- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. - -- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). - -- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - -- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. +## Filing Issues +When filing an issue, make sure to answer these five questions: -- Exceptions to the rules can be made if there's a compelling reason for doing so. - +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +### Contributing code +Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE index d6456956733..f4988b45079 100644 --- a/vendor/google.golang.org/grpc/LICENSE +++ b/vendor/google.golang.org/grpc/LICENSE @@ -1,202 +1,28 @@ +Copyright 2014, Google Inc. +All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 39606b564a6..03bb01f0b35 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -20,17 +20,24 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go generate google.golang.org/grpc/... + go get -u -v github.com/golang/protobuf/protoc-gen-go + # use $$dir as the root for all proto files in the same directory + for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ + protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ + done test: testdeps - go test -cpu 1,4 google.golang.org/grpc/... + go test -v -cpu 1,4 google.golang.org/grpc/... testrace: testdeps - go test -race -cpu 1,4 google.golang.org/grpc/... + go test -v -race -cpu 1,4 google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... +coverage: testdeps + ./coverage.sh --coveralls + .PHONY: \ all \ deps \ diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS new file mode 100644 index 00000000000..69b47959fab --- /dev/null +++ b/vendor/google.golang.org/grpc/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the gRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of gRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of gRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of gRPC or any code incorporated within this +implementation of gRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of gRPC +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 622a5dc3e85..ae0236f92f3 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. Installation ------------ @@ -10,13 +10,13 @@ Installation To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` -$ go get -u google.golang.org/grpc +$ go get google.golang.org/grpc ``` Prerequisites ------------- -This requires Go 1.7 or later. +This requires Go 1.6 or later. Constraints ----------- @@ -26,13 +26,9 @@ Documentation ------------- See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). -Performance ------------ -See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). - Status ------ -General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). +GA FAQ --- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 090fbe87c52..c99024ee302 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -1,21 +1,3 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - package grpc import ( diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index ab65049ddc1..9d943fbadae 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -1,18 +1,33 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -20,7 +35,6 @@ package grpc import ( "fmt" - "net" "sync" "golang.org/x/net/context" @@ -46,10 +60,6 @@ type BalancerConfig struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. @@ -157,7 +167,7 @@ type roundRobin struct { func (rr *roundRobin) watchAddrUpdates() error { updates, err := rr.w.Next() if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) return err } rr.mu.Lock() @@ -173,7 +183,7 @@ func (rr *roundRobin) watchAddrUpdates() error { for _, v := range rr.addrs { if addr == v.addr { exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) break } } @@ -190,7 +200,7 @@ func (rr *roundRobin) watchAddrUpdates() error { } } default: - grpclog.Errorln("Unknown update.Op ", update.Op) + grpclog.Println("Unknown update.Op ", update.Op) } } // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. @@ -201,10 +211,6 @@ func (rr *roundRobin) watchAddrUpdates() error { if rr.done { return ErrClientConnClosing } - select { - case <-rr.addrCh: - default: - } rr.addrCh <- open return nil } @@ -227,7 +233,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error { return err } rr.w = w - rr.addrCh = make(chan []Address, 1) + rr.addrCh = make(chan []Address) go func() { for { if err := rr.watchAddrUpdates(); err != nil { @@ -379,9 +385,6 @@ func (rr *roundRobin) Notify() <-chan []Address { func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } rr.done = true if rr.w != nil { rr.w.Close() @@ -395,14 +398,3 @@ func (rr *roundRobin) Close() error { } return nil } - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} - -func pickFirstBalancerV1(r naming.Resolver) Balancer { - return &pickFirst{&roundRobin{r: r}} -} diff --git a/vendor/google.golang.org/grpc/balancer/BUILD b/vendor/google.golang.org/grpc/balancer/BUILD deleted file mode 100644 index e422cbb250b..00000000000 --- a/vendor/google.golang.org/grpc/balancer/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["balancer.go"], - importpath = "google.golang.org/grpc/balancer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/connectivity:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/resolver:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go deleted file mode 100644 index 84e10b630e7..00000000000 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package balancer defines APIs for load balancing in gRPC. -// All APIs in this package are experimental. -package balancer - -import ( - "errors" - "net" - - "golang.org/x/net/context" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/resolver" -) - -var ( - // m is a map from name to balancer builder. - m = make(map[string]Builder) - // defaultBuilder is the default balancer to use. - defaultBuilder Builder // TODO(bar) install pickfirst as default. -) - -// Register registers the balancer builder to the balancer map. -// b.Name will be used as the name registered with this builder. -func Register(b Builder) { - m[b.Name()] = b -} - -// Get returns the resolver builder registered with the given name. -// If no builder is register with the name, the default pickfirst will -// be used. -func Get(name string) Builder { - if b, ok := m[name]; ok { - return b - } - return defaultBuilder -} - -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. -// -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() -} - -// NewSubConnOptions contains options to create new SubConn. -type NewSubConnOptions struct{} - -// ClientConn represents a gRPC ClientConn. -type ClientConn interface { - // NewSubConn is called by balancer to create a new SubConn. - // It doesn't block and wait for the connections to be established. - // Behaviors of the SubConn can be controlled by options. - NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) - // RemoveSubConn removes the SubConn from ClientConn. - // The SubConn will be shutdown. - RemoveSubConn(SubConn) - - // UpdateBalancerState is called by balancer to nofity gRPC that some internal - // state in balancer has changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - UpdateBalancerState(s connectivity.State, p Picker) - - // Target returns the dial target for this ClientConn. - Target() string -} - -// BuildOptions contains additional information for Build. -type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// Builder creates a balancer. -type Builder interface { - // Build creates a new balancer with the ClientConn. - Build(cc ClientConn, opts BuildOptions) Balancer - // Name returns the name of balancers built by this builder. - // It will be used to pick balancers (for example in service config). - Name() string -} - -// PickOptions contains addition information for the Pick operation. -type PickOptions struct{} - -// DoneInfo contains additional information for done. -type DoneInfo struct { - // Err is the rpc error the RPC finished with. It could be nil. - Err error -} - -var ( - // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). - ErrNoSubConnAvailable = errors.New("no SubConn is available") - // ErrTransientFailure indicates all SubConns are in TransientFailure. - // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = errors.New("all SubConns are in TransientFailure") -) - -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot everytime its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). - // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). - // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will block - // this call until a new picker is updated and will call pick on the new picker. - // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with unavailable error. - // - // The returned done() function will be called once the rpc has finished, with the - // final status of that RPC. - // done may be nil if balancer doesn't care about the RPC status. - Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) -} - -// Balancer takes input from gRPC, manages SubConns, and collects and aggregates -// the connectivity states. -// -// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. -// -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. -type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - HandleResolvedAddrs([]resolver.Address, error) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index f5dbc4ba201..00000000000 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State -} - -// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. -// TODO make a general purpose buffer that uses interface{}. -type scStateUpdateBuffer struct { - c chan *scStateUpdate - mu sync.Mutex - backlog []*scStateUpdate -} - -func newSCStateUpdateBuffer() *scStateUpdateBuffer { - return &scStateUpdateBuffer{ - c: make(chan *scStateUpdate, 1), - } -} - -func (b *scStateUpdateBuffer) put(t *scStateUpdate) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- t: - return - default: - } - } - b.backlog = append(b.backlog, t) -} - -func (b *scStateUpdateBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that receives a recvMsg in the buffer. -// -// Upon receiving, the caller should call load to send another -// scStateChangeTuple onto the channel if there is any. -func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { - return b.c -} - -// resolverUpdate contains the new resolved addresses or error if there's -// any. -type resolverUpdate struct { - addrs []resolver.Address - err error -} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancer balancer.Balancer - stateChangeQueue *scStateUpdateBuffer - resolverUpdateCh chan *resolverUpdate - done chan struct{} -} - -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { - ccb := &ccBalancerWrapper{ - cc: cc, - stateChangeQueue: newSCStateUpdateBuffer(), - resolverUpdateCh: make(chan *resolverUpdate, 1), - done: make(chan struct{}), - } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - return ccb -} - -// watcher balancer functions sequencially, so the balancer can be implemeneted -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.stateChangeQueue.get(): - ccb.stateChangeQueue.load() - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - case t := <-ccb.resolverUpdateCh: - ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) - case <-ccb.done: - } - - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - } -} - -func (ccb *ccBalancerWrapper) close() { - close(ccb.done) -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.stateChangeQueue.put(&scStateUpdate{ - sc: sc, - state: s, - }) -} - -func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { - select { - case <-ccb.resolverUpdateCh: - default: - } - ccb.resolverUpdateCh <- &resolverUpdate{ - addrs: addrs, - err: err, - } -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) - ac, err := ccb.cc.newAddrConn(addrs) - if err != nil { - return nil, err - } - acbw := &acBalancerWrapper{ac: ac} - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - grpclog.Infof("ccBalancerWrapper: removing subconn") - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) -} - -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) - ccb.cc.csMgr.updateState(s) - ccb.cc.blockingpicker.updatePicker(p) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) - acbw.mu.Lock() - defer acbw.mu.Unlock() - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) - - if acState == connectivity.Shutdown { - return - } - - ac, err := cc.newAddrConn(addrs) - if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect(false) - } - } -} - -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect(false) -} - -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index 9d0616080a1..00000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,367 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(cc.Target(), BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &connectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateBalancerState(connectivity.Idle, bw) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - - // To aggregate the connectivity state. - csEvltr *connectivityStateEvaluator - state connectivity.State - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.cc.Target(), - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.cc.Target()}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - oldSC.UpdateAddresses(newAddrs) - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.recordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateBalancerState(bw.state, bw) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } - return -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. - return -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() - return -} - -// The picker is the balancerWrapper itself. -// Pick should never return ErrNoSubConnAvailable. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return nil, nil, err - } - var done func(balancer.DoneInfo) - if p != nil { - done = func(i balancer.DoneInfo) { p() } - } - var sc balancer.SubConn - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, sc = range bw.conns { - break - } - } else { - var ok bool - sc, ok = bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - if !ok && failfast { - return nil, nil, Errorf(codes.Unavailable, "there is no connection available") - } - if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { - // If the returned sc is not ready and RPC is failfast, - // return error, and this RPC will fail. - return nil, nil, Errorf(codes.Unavailable, "there is no connection available") - } - } - - return sc, done, nil -} - -// connectivityStateEvaluator gets updated by addrConns when their -// states transition, based on which it evaluates the state of -// ClientConn. -type connectivityStateEvaluator struct { - mu sync.Mutex - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. -} - -// recordTransition records state change happening in every subConn and based on -// that it evaluates what aggregated state should be. -// It can only transition between Ready, Connecting and TransientFailure. Other states, -// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection -// before any subConn is created ClientConn is in idle state. In the end when ClientConn -// closes it is in Shutdown state. -// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. -func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { - cse.mu.Lock() - defer cse.mu.Unlock() - - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 1ef2507c35f..af34a71316f 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -25,7 +40,6 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -59,10 +73,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } } for { - if c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil { if err == io.EOF { break } @@ -75,11 +86,14 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) } c.trailerMD = stream.Trailer() + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } return nil } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. @@ -100,17 +114,11 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, Client: true, } } - hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) if err != nil { - return err + return Errorf(codes.Internal, "grpc: %v", err) } - if c.maxSendMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") - } - if len(data) > *c.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) - } - err = t.Write(stream, hdr, data, opts) + err = t.Write(stream, outBuf, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) @@ -136,33 +144,25 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { - c := defaultCallInfo() - mc := cc.GetMethodConfig(method) - if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + c := defaultCallInfo + if mc, ok := cc.getMethodConfig(method); ok { + c.failFast = !mc.WaitForReady + if mc.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, mc.Timeout) + defer cancel() + } } - - if mc.Timeout != nil && *mc.Timeout >= 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - defer cancel() - } - - opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { - o.after(c) + o.after(&c) } }() - - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() @@ -179,25 +179,27 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } - ctx = newContextWithRPCInfo(ctx, c.failFast) + ctx = newContextWithRPCInfo(ctx) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - defer func() { + } + defer func() { + if sh != nil { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) - }() - } + } + }() topts := &transport.Options{ Last: true, Delay: false, @@ -207,9 +209,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream - // Record the done handler from Balancer.Get(...). It is called once the + // Record the put handler from Balancer.Get(...). It is called once the // RPC has completed or failed. - done func(balancer.DoneInfo) + put func() ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ @@ -219,11 +221,11 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - if c.creds != nil { - callHdr.Creds = c.creds - } - t, done, err = cc.getTransport(ctx, c.failFast) + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -243,31 +245,28 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } stream, err = t.NewStream(ctx, callHdr) if err != nil { - if done != nil { + if put != nil { if _, ok := err.(transport.ConnectionError); ok { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - done(balancer.DoneInfo{Err: err}) + put() } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) } - if peer, ok := peer.FromContext(stream.Context()); ok { - c.peer = peer - } - err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, stream, t, args, topts) if err != nil { - if done != nil { + if put != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - done(balancer.DoneInfo{Err: err}) + put() } // Retry a non-failfast RPC when // i) there is a connection error; or @@ -277,14 +276,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } - err = recvResponse(ctx, cc.dopts, t, c, stream, reply) + err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) if err != nil { - if done != nil { + if put != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - done(balancer.DoneInfo{Err: err}) + put() } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -295,12 +294,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) - if done != nil { + if put != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - done(balancer.DoneInfo{Err: err}) + put() } return stream.Status().Err() } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 71de2e50d2b..f542d8bd041 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -23,19 +38,14 @@ import ( "fmt" "math" "net" - "reflect" - "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/net/trace" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -46,7 +56,8 @@ var ( ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. + // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be + // removed in Q1 2017. ErrClientConnTimeout = errors.New("grpc: timed out when dialing") // errNoTransportSecurity indicates that there is no transport security @@ -68,8 +79,6 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errConnUnavailable indicates that the connection is unavailable. errConnUnavailable = errors.New("grpc: the connection is unavailable") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -77,71 +86,30 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions - callOptions []CallOption - // This is to support v1 balancer. - balancerBuilder balancer.Builder + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + balancer Balancer + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + maxMsgSize int } -const ( - defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultClientMaxSendMessageSize = math.MaxInt32 -) +const defaultClientMaxMsgSize = math.MaxInt32 // DialOption configures how we set up the connection. type DialOption func(*dialOptions) -// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched -// before doing a write on the wire. -func WithWriteBufferSize(s int) DialOption { - return func(o *dialOptions) { - o.copts.WriteBufferSize = s - } -} - -// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for each read syscall. -func WithReadBufferSize(s int) DialOption { - return func(o *dialOptions) { - o.copts.ReadBufferSize = s - } -} - -// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func WithInitialWindowSize(s int32) DialOption { - return func(o *dialOptions) { - o.copts.InitialWindowSize = s - } -} - -// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func WithInitialConnWindowSize(s int32) DialOption { - return func(o *dialOptions) { - o.copts.InitialConnWindowSize = s - } -} - -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. func WithMaxMsgSize(s int) DialOption { - return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) -} - -// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. -func WithDefaultCallOptions(cos ...CallOption) DialOption { return func(o *dialOptions) { - o.callOptions = append(o.callOptions, cos...) + o.maxMsgSize = s } } @@ -168,23 +136,10 @@ func WithDecompressor(dc Decompressor) DialOption { } } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// Deprecated: use the new balancer APIs in balancer package instead. +// WithBalancer returns a DialOption which sets a load balancer. func WithBalancer(b Balancer) DialOption { return func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - } -} - -// WithBalancerBuilder is for testing only. Users using custom balancers should -// register their balancer and use service config to choose the balancer to use. -func WithBalancerBuilder(b balancer.Builder) DialOption { - // TODO(bar) remove this when switching balancer is done. - return func(o *dialOptions) { - o.balancerBuilder = b + o.balancer = b } } @@ -249,7 +204,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption } // WithPerRPCCredentials returns a DialOption which sets -// credentials and places auth state on each outbound RPC. +// credentials which will place auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) @@ -258,7 +213,6 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // initially. This is valid if and only if WithBlock() is present. -// Deprecated: use DialContext and context.WithTimeout instead. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { o.timeout = d @@ -287,7 +241,7 @@ func WithStatsHandler(h stats.Handler) DialOption { } } -// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. +// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // address and won't try to reconnect. // The default value of FailOnNonTempDialError is false. @@ -305,7 +259,7 @@ func WithUserAgent(s string) DialOption { } } -// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. +// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { return func(o *dialOptions) { o.copts.KeepaliveParams = kp @@ -341,44 +295,26 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { } // DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connection. Once this function returns, the +// cancel or expire the pending connecting. Once this function returns, the // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // to terminate all the pending operations after this function returns. +// This is the EXPERIMENTAL API. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - - blockingpicker: newPickerWrapper(), + conns: make(map[Address]*addrConn), } cc.ctx, cc.cancel = context.WithCancel(context.Background()) - + cc.dopts.maxMsgSize = defaultClientMaxMsgSize for _, opt := range opts { opt(&cc.dopts) } - - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil { - return nil, errNoTransportSecurity - } - } else { - if cc.dopts.copts.TransportCredentials != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } - } - cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + return dialContext(ctx, "tcp", addr) }, ) } @@ -407,16 +343,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false if cc.dopts.scChan != nil { - // Try to get an initial service config. + // Wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = sc - scSet = true } - default: + case <-ctx.Done(): + return nil, ctx.Err() } } // Set defaults. @@ -434,130 +369,89 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else { cc.authority = target } + waitC := make(chan error, 1) + go func() { + defer close(waitC) + if cc.dopts.balancer == nil && cc.sc.LB != nil { + cc.dopts.balancer = cc.sc.LB + } + if cc.dopts.balancer != nil { + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() + } + config := BalancerConfig{ + DialCreds: credsClone, + } + if err := cc.dopts.balancer.Start(target, config); err != nil { + waitC <- err + return + } + ch := cc.dopts.balancer.Notify() + if ch != nil { + if cc.dopts.block { + doneChan := make(chan struct{}) + go cc.lbWatcher(doneChan) + <-doneChan + } else { + go cc.lbWatcher(nil) + } + return + } + } + // No balancer, or no resolver within the balancer. Connect directly. + if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { + waitC <- err + return + } + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + } - if cc.dopts.balancerBuilder != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - buildOpts := balancer.BuildOptions{ - DialCreds: credsClone, - Dialer: cc.dopts.copts.Dialer, - } - // Build should not take long time. So it's ok to not have a goroutine for it. - // TODO(bar) init balancer after first resolver result to support service config balancer. - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) - } else { - waitC := make(chan error, 1) - go func() { - defer close(waitC) - // No balancer, or no resolver within the balancer. Connect directly. - ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) - if err != nil { - waitC <- err - return - } - if err := ac.connect(cc.dopts.block); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err - } - } - } - if cc.dopts.scChan != nil && !scSet { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = sc - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } if cc.dopts.scChan != nil { go cc.scWatcher() } - // Build the resolver. - cc.resolverWrapper, err = newCCResolverWrapper(cc) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) - } - - if cc.balancerWrapper != nil && cc.resolverWrapper == nil { - // TODO(bar) there should always be a resolver (DNS as the default). - // Unblock balancer initialization with a fake resolver update if there's no resolver. - // The balancer wrapper will not read the addresses, so an empty list works. - // TODO(bar) remove this after the real resolver is started. - cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) - } - - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - return nil, ctx.Err() - } - } - } - return cc, nil } -// connectivityStateManager keeps the connectivity.State of ClientConn. -// This struct will eventually be exported so the balancers can access it. -type connectivityStateManager struct { - mu sync.Mutex - state connectivity.State - notifyChan chan struct{} -} +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int -// updateState updates the connectivity.State of ClientConn. -// If there's a change it notifies goroutines waiting on state change to -// happen. -func (csm *connectivityStateManager) updateState(state connectivity.State) { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.state == connectivity.Shutdown { - return - } - if csm.state == state { - return - } - csm.state = state - if csm.notifyChan != nil { - // There are other goroutines waiting on this channel. - close(csm.notifyChan) - csm.notifyChan = nil - } -} +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) -func (csm *connectivityStateManager) getState() connectivity.State { - csm.mu.Lock() - defer csm.mu.Unlock() - return csm.state -} - -func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.notifyChan == nil { - csm.notifyChan = make(chan struct{}) +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) } - return csm.notifyChan } // ClientConn represents a client connection to an RPC server. @@ -568,40 +462,58 @@ type ClientConn struct { target string authority string dopts dialOptions - csMgr *connectivityStateManager - - balancerWrapper *ccBalancerWrapper - resolverWrapper *ccResolverWrapper - - blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. + conns map[Address]*addrConn + // Keepalive parameter can be udated if a GoAway is received. mkp keepalive.ClientParameters } -// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or -// ctx expires. A true value is returned in former case and false in latter. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { - ch := cc.csMgr.getNotifyChan() - if cc.csMgr.getState() != sourceState { - return true +// lbWatcher watches the Notify channel of the balancer in cc and manages +// connections accordingly. If doneChan is not nil, it is closed after the +// first successfull connection is made. +func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { + for addrs := range cc.dopts.balancer.Notify() { + var ( + add []Address // Addresses need to setup connections. + del []*addrConn // Connections need to tear down. + ) + cc.mu.Lock() + for _, a := range addrs { + if _, ok := cc.conns[a]; !ok { + add = append(add, a) + } + } + for k, c := range cc.conns { + var keep bool + for _, a := range addrs { + if k == a { + keep = true + break + } + } + if !keep { + del = append(del, c) + delete(cc.conns, c.addr) + } + } + cc.mu.Unlock() + for _, a := range add { + if doneChan != nil { + err := cc.resetAddrConn(a, true, nil) + if err == nil { + close(doneChan) + doneChan = nil + } + } else { + cc.resetAddrConn(a, false, nil) + } + } + for _, c := range del { + c.tearDown(errConnDrain) + } } - select { - case <-ctx.Done(): - return false - case <-ch: - return true - } -} - -// GetState returns the connectivity.State of ClientConn. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) GetState() connectivity.State { - return cc.csMgr.getState() } func (cc *ClientConn) scWatcher() { @@ -622,64 +534,69 @@ func (cc *ClientConn) scWatcher() { } } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { +// resetAddrConn creates an addrConn for addr and adds it to cc.conns. +// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. +// If tearDownErr is nil, errConnDrain will be used instead. +func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { ac := &addrConn{ cc: cc, - addrs: addrs, + addr: addr, dopts: cc.dopts, } + cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = cc.mkp + cc.mu.RUnlock() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + ac.stateCV = sync.NewCond(&ac.mu) + if EnableTracing { + ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) + } + if !ac.dopts.insecure { + if ac.dopts.copts.TransportCredentials == nil { + return errNoTransportSecurity + } + } else { + if ac.dopts.copts.TransportCredentials != nil { + return errCredentialsConflict + } + for _, cd := range ac.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() - return nil, ErrClientConnClosing + return ErrClientConnClosing } - cc.conns[ac] = struct{}{} + stale := cc.conns[ac.addr] + cc.conns[ac.addr] = ac cc.mu.Unlock() - return ac, nil -} - -// removeAddrConn removes the addrConn in the subConn from clientConn. -// It also tears down the ac with the given error. -func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return + if stale != nil { + // There is an addrConn alive on ac.addr already. This could be due to + // 1) a buggy Balancer notifies duplicated Addresses; + // 2) goaway was received, a new ac will replace the old ac. + // The old ac should be deleted from cc.conns, but the + // underlying transport should drain rather than close. + if tearDownErr == nil { + // tearDownErr is nil if resetAddrConn is called by + // 1) Dial + // 2) lbWatcher + // In both cases, the stale ac should drain, not close. + stale.tearDown(errConnDrain) + } else { + stale.tearDown(tearDownErr) + } } - delete(cc.conns, ac) - cc.mu.Unlock() - ac.tearDown(err) -} - -// connect starts to creating transport and also starts the transport monitor -// goroutine for this ac. -// It does nothing if the ac is not IDLE. -// TODO(bar) Move this to the addrConn section. -// This was part of resetAddrConn, keep it here to make the diff look clean. -func (ac *addrConn) connect(block bool) error { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return errConnClosing - } - if ac.state != connectivity.Idle { - ac.mu.Unlock() - return nil - } - ac.state = connectivity.Connecting - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - ac.mu.Unlock() - if block { - if err := ac.resetTransport(); err != nil { + if err := ac.resetTransport(false); err != nil { if err != errConnClosing { + // Tear down ac and delete it from cc.conns. + cc.mu.Lock() + delete(cc.conns, ac.addr) + cc.mu.Unlock() ac.tearDown(err) } if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { @@ -692,8 +609,8 @@ func (ac *addrConn) connect(block bool) error { } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := ac.resetTransport(); err != nil { - grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err := ac.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) if err != errConnClosing { // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) @@ -706,86 +623,66 @@ func (ac *addrConn) connect(block bool) error { return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// It checks whether current connected address of ac is in the new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { - ac.mu.Lock() - defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) - if ac.state == connectivity.Shutdown { - ac.addrs = addrs - return true - } - - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } - } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs - } - - return curAddrFound -} - -// GetMethodConfig gets the method config of the input method. -// If there's an exact match for input method (i.e. /service/method), we return -// the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the serivce, we return it. -// Otherwise, we return an empty MethodConfig. -func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { - // TODO: Avoid the locking here. +// TODO: Avoid the locking here. +func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { cc.mu.RLock() defer cc.mu.RUnlock() - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m, _ = cc.sc.Methods[method[:i+1]] - } - return m + m, ok = cc.sc.Methods[method] + return } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { - if cc.balancerWrapper == nil { +func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { + var ( + ac *addrConn + ok bool + put func() + ) + if cc.dopts.balancer == nil { // If balancer is nil, there should be only one addrConn available. cc.mu.RLock() if cc.conns == nil { cc.mu.RUnlock() - // TODO this function returns toRPCErr and non-toRPCErr. Clean up - // the errors in ClientConn. return nil, nil, toRPCErr(ErrClientConnClosing) } - var ac *addrConn - for ac = range cc.conns { + for _, ac = range cc.conns { // Break after the first iteration to get the first addrConn. + ok = true break } cc.mu.RUnlock() - if ac == nil { - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) + } else { + var ( + addr Address + err error + ) + addr, put, err = cc.dopts.balancer.Get(ctx, opts) if err != nil { - return nil, nil, err + return nil, nil, toRPCErr(err) } - return t, nil, nil + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + ac, ok = cc.conns[addr] + cc.mu.RUnlock() } - - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) + if !ok { + if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) + put() + } + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) if err != nil { - return nil, nil, toRPCErr(err) + if put != nil { + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) + put() + } + return nil, nil, err } - return t, done, nil + return t, put, nil } // Close tears down the ClientConn and all underlying connections. @@ -799,16 +696,11 @@ func (cc *ClientConn) Close() error { } conns := cc.conns cc.conns = nil - cc.csMgr.updateState(connectivity.Shutdown) cc.mu.Unlock() - cc.blockingpicker.close() - if cc.resolverWrapper != nil { - cc.resolverWrapper.close() + if cc.dopts.balancer != nil { + cc.dopts.balancer.Close() } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - for ac := range conns { + for _, ac := range conns { ac.tearDown(ErrClientConnClosing) } return nil @@ -819,15 +711,15 @@ type addrConn struct { ctx context.Context cancel context.CancelFunc - cc *ClientConn - curAddr resolver.Address - addrs []resolver.Address - dopts dialOptions - events trace.EventLog - acbw balancer.SubConn + cc *ClientConn + addr Address + dopts dialOptions + events trace.EventLog - mu sync.Mutex - state connectivity.State + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + down func(error) // the handler called when a connection is down. // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} @@ -867,137 +759,125 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { } } -// resetTransport recreates a transport to the address for ac. The old -// transport will close itself on error or when the clientconn is closed. -// -// TODO(bar) make sure all state transitions are valid. -func (ac *addrConn) resetTransport() error { +// getState returns the connectivity state of the Conn +func (ac *addrConn) getState() ConnectivityState { ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return errConnClosing + defer ac.mu.Unlock() + return ac.state +} + +// waitForStateChange blocks until the state changes to something other than the sourceState. +func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if sourceState != ac.state { + return ac.state, nil } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - ac.transport = nil - ac.curAddr = resolver.Address{} - ac.mu.Unlock() - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - for retries := 0; ; retries++ { - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - ac.mu.Lock() - if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { - timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + done := make(chan struct{}) + var err error + go func() { + select { + case <-ctx.Done(): + ac.mu.Lock() + err = ctx.Err() + ac.stateCV.Broadcast() + ac.mu.Unlock() + case <-done: } - connectTime := time.Now() - if ac.state == connectivity.Shutdown { + }() + defer close(done) + for sourceState == ac.state { + ac.stateCV.Wait() + if err != nil { + return ac.state, err + } + } + return ac.state, nil +} + +func (ac *addrConn) resetTransport(closeTransport bool) error { + for retries := 0; ; retries++ { + ac.mu.Lock() + ac.printf("connecting") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. ac.mu.Unlock() return errConnClosing } - ac.printf("connecting") - if ac.state != connectivity.Connecting { - ac.state = connectivity.Connecting - // TODO(bar) remove condition once we always have a balancer. - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + if ac.down != nil { + ac.down(downErrorf(false, true, "%v", errNetworkIO)) + ac.down = nil } - // copy ac.addrs in case of race - addrsIter := make([]resolver.Address, len(ac.addrs)) - copy(addrsIter, ac.addrs) - copts := ac.dopts.copts + ac.state = Connecting + ac.stateCV.Broadcast() + t := ac.transport ac.mu.Unlock() - for _, addr := range addrsIter { + if closeTransport && t != nil { + t.Close() + } + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout + if timeout < sleepTime { + timeout = sleepTime + } + ctx, cancel := context.WithTimeout(ac.ctx, timeout) + connectTime := time.Now() + sinfo := transport.TargetInfo{ + Addr: ac.addr.Addr, + Metadata: ac.addr.Metadata, + } + newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) + // Don't call cancel in success path due to a race in Go 1.6: + // https://github.com/golang/go/issues/15078. + if err != nil { + cancel() + + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return err + } + grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if ac.state == Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() return errConnClosing } - ac.mu.Unlock() - sinfo := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - } - newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) - if err != nil { - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - ac.mu.Lock() - if ac.state != connectivity.Shutdown { - ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - } - ac.mu.Unlock() - return err - } - grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - ac.mu.Unlock() - continue - } - ac.mu.Lock() - ac.printf("ready") - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing - } - ac.state = connectivity.Ready - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - t := ac.transport - ac.transport = newTransport - if t != nil { - t.Close() - } - ac.curAddr = addr + ac.errorf("transient failure: %v", err) + ac.state = TransientFailure + ac.stateCV.Broadcast() if ac.ready != nil { close(ac.ready) ac.ready = nil } ac.mu.Unlock() - return nil + closeTransport = false + select { + case <-time.After(sleepTime - time.Since(connectTime)): + case <-ac.ctx.Done(): + return ac.ctx.Err() + } + continue } ac.mu.Lock() - ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) + ac.printf("ready") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + newTransport.Close() + return errConnClosing } + ac.state = Ready + ac.stateCV.Broadcast() + ac.transport = newTransport if ac.ready != nil { close(ac.ready) ac.ready = nil } - ac.mu.Unlock() - timer := time.NewTimer(sleepTime - time.Since(connectTime)) - select { - case <-timer.C: - case <-ac.ctx.Done(): - timer.Stop() - return ac.ctx.Err() + if ac.cc.dopts.balancer != nil { + ac.down = ac.cc.dopts.balancer.Up(ac.addr) } - timer.Stop() + ac.mu.Unlock() + return nil } } @@ -1008,54 +888,73 @@ func (ac *addrConn) transportMonitor() { ac.mu.Lock() t := ac.transport ac.mu.Unlock() - // Block until we receive a goaway or an error occurs. select { - case <-t.GoAway(): - case <-t.Error(): - } - // If a GoAway happened, regardless of error, adjust our keepalive - // parameters as appropriate. - select { - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - default: - } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - // Set connectivity state to TransientFailure before calling - // resetTransport. Transition READY->CONNECTING is not valid. - ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - ac.curAddr = resolver.Address{} - ac.mu.Unlock() - if err := ac.resetTransport(); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) + // This is needed to detect the teardown when + // the addrConn is idle (i.e., no RPC in flight). + case <-ac.ctx.Done(): + select { + case <-t.Error(): + t.Close() + default: } return + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + // If GoAway happens without any network I/O error, ac is closed without shutting down the + // underlying transport (the transport will be closed when all the pending RPCs finished or + // failed.). + // If GoAway and some network I/O error happen concurrently, ac and its underlying transport + // are closed. + // In both cases, a new ac is created. + select { + case <-t.Error(): + ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) + default: + ac.cc.resetAddrConn(ac.addr, false, errConnDrain) + } + return + case <-t.Error(): + select { + case <-ac.ctx.Done(): + t.Close() + return + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) + return + default: + } + ac.mu.Lock() + if ac.state == Shutdown { + // ac has been shutdown. + ac.mu.Unlock() + return + } + ac.state = TransientFailure + ac.stateCV.Broadcast() + ac.mu.Unlock() + if err := ac.resetTransport(true); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } } } } // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. +// iv) transport is in TransientFailure and there is a balancer/failfast is true. func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { for { ac.mu.Lock() switch { - case ac.state == connectivity.Shutdown: + case ac.state == Shutdown: if failfast || !hasBalancer { // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. err := ac.tearDownErr @@ -1064,11 +963,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } ac.mu.Unlock() return nil, errConnClosing - case ac.state == connectivity.Ready: + case ac.state == Ready: ct := ac.transport ac.mu.Unlock() return ct, nil - case ac.state == connectivity.TransientFailure: + case ac.state == TransientFailure: if failfast || hasBalancer { ac.mu.Unlock() return nil, errConnUnavailable @@ -1089,28 +988,6 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { - ac.mu.Lock() - if ac.state == connectivity.Ready { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect(false) - } - return nil, false -} - // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a @@ -1118,9 +995,13 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.cancel() + ac.mu.Lock() - ac.curAddr = resolver.Address{} defer ac.mu.Unlock() + if ac.down != nil { + ac.down(downErrorf(false, false, "%v", err)) + ac.down = nil + } if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1128,16 +1009,12 @@ func (ac *addrConn) tearDown(err error) { // address removal and GoAway. ac.transport.GracefulClose() } - if ac.state == connectivity.Shutdown { + if ac.state == Shutdown { return } - ac.state = connectivity.Shutdown + ac.state = Shutdown ac.tearDownErr = err - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + ac.stateCV.Broadcast() if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1146,11 +1023,8 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } + if ac.transport != nil && err != errConnDrain { + ac.transport.Close() + } return } - -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 905b048e2ac..bd76ebb7f17 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -1,20 +1,35 @@ /* +* + * Copyright 2014, Google Inc. + * All rights reserved. * - * Copyright 2014 gRPC authors. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * http://www.apache.org/licenses/LICENSE-2.0 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ +*/ package grpc @@ -81,7 +96,6 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) { func (p protoCodec) Unmarshal(data []byte, v interface{}) error { cb := protoBufferPool.Get().(*cachedProtoBuffer) cb.SetBuf(data) - v.(proto.Message).Reset() err := cb.Unmarshal(v.(proto.Message)) cb.SetBuf(nil) protoBufferPool.Put(cb) diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 259837060ab..e6762d08455 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=Code"; DO NOT EDIT. +// generated by stringer -type=Code; DO NOT EDIT package codes @@ -9,7 +9,7 @@ const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlre var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i Code) String() string { - if i >= Code(len(_Code_index)-1) { + if i+1 >= Code(len(_Code_index)) { return fmt.Sprintf("Code(%d)", i) } return _Code_name[_Code_index[i]:_Code_index[i+1]] diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 81fe7bf85b3..37c5b860bd6 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -29,7 +44,7 @@ const ( // OK is returned on success. OK Code = 0 - // Canceled indicates the operation was canceled (typically by the caller). + // Canceled indicates the operation was cancelled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is diff --git a/vendor/google.golang.org/grpc/connectivity/BUILD b/vendor/google.golang.org/grpc/connectivity/BUILD deleted file mode 100644 index d5555d4a28f..00000000000 --- a/vendor/google.golang.org/grpc/connectivity/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["connectivity.go"], - importpath = "google.golang.org/grpc/connectivity", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go deleted file mode 100644 index 568ef5dc68b..00000000000 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package connectivity defines connectivity semantics. -// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. -package connectivity - -import ( - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" -) - -// State indicates the state of connectivity. -// It can be the state of a ClientConn or SubConn. -type State int - -func (s State) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - grpclog.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" - } -} - -const ( - // Idle indicates the ClientConn is idle. - Idle State = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool -} diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh new file mode 100755 index 00000000000..b85f9181dee --- /dev/null +++ b/vendor/google.golang.org/grpc/coverage.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=set +end2endtest="google.golang.org/grpc/test" + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] + then + f="$workdir/$(echo $pkg | tr / -)" + go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" + go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" + fi + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + goveralls -coverprofile="$profile" +} + +generate_cover_data $(go list ./...) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1" ;; +esac +rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 0ce766a4dcf..d1217344b67 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -91,14 +106,10 @@ type TransportCredentials interface { // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo @@ -185,14 +196,14 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// NewClientTLSFromCert constructs a TLS from the input certificate for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// NewClientTLSFromFile constructs a TLS from the input certificate file for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { @@ -207,12 +218,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +// NewServerTLSFromCert constructs a TLS from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// NewServerTLSFromFile constructs a TLS from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go index 60409aac0fb..7597b09e358 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -3,19 +3,34 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go index 93f0e1d8de2..0ecf342da84 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -2,19 +2,34 @@ /* * - * Copyright 2017 gRPC authors. + * Copyright 2017, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go index d6bbcc9fdd9..cfd40dfa34a 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -2,19 +2,34 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index e153b2c390c..b4c0e740e9c 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,24 +1,6 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - /* Package grpc implements an RPC system called gRPC. -See grpc.io for more information about gRPC. +See www.grpc.io for more information about gRPC. */ package grpc diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 00000000000..b61c57e88de --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,56 @@ +// +build go1.6,!go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "net" + "net/http" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 00000000000..844f0e1899b --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,55 @@ +// +build go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "net" + "net/http" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go index db56ff36217..524e429df3e 100644 --- a/vendor/google.golang.org/grpc/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -1,18 +1,33 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -28,7 +43,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" - lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/naming" @@ -59,21 +74,41 @@ type balanceLoadClientStream struct { ClientStream } -func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } -func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { - m := new(lbmpb.LoadBalanceResponse) +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolution for grpclb should provide. The +// name resolver used by grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + // NewGRPCLBBalancer creates a grpclb load balancer. func NewGRPCLBBalancer(r naming.Resolver) Balancer { - return &grpclbBalancer{ + return &balancer{ r: r, } } @@ -96,27 +131,27 @@ type grpclbAddrInfo struct { dropForLoadBalancing bool } -type grpclbBalancer struct { - r naming.Resolver - target string - mu sync.Mutex - seq int // a sequence number to make sure addrCh does not get stale addresses. - w naming.Watcher - addrCh chan []Address - rbs []remoteBalancerInfo - addrs []*grpclbAddrInfo - next int - waitCh chan struct{} - done bool - rand *rand.Rand +type balancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + expTimer *time.Timer + rand *rand.Rand - clientStats lbmpb.ClientStats + clientStats lbpb.ClientStats } -func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { +func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { updates, err := w.Next() if err != nil { - grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) return err } b.mu.Lock() @@ -138,24 +173,24 @@ func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBala if exist { continue } - md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) + md, ok := update.Metadata.(*AddrMetadataGRPCLB) if !ok { // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) + grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) continue } switch md.AddrType { - case naming.Backend: + case Backend: // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Errorf("The name resolution does not give grpclb addresses") + grpclog.Printf("The name resolution does not give grpclb addresses") continue - case naming.GRPCLB: + case GRPCLB: b.rbs = append(b.rbs, remoteBalancerInfo{ addr: update.Addr, name: md.ServerName, }) default: - grpclog.Errorf("Received unknow address type %d", md.AddrType) + grpclog.Printf("Received unknow address type %d", md.AddrType) continue } case naming.Delete: @@ -167,7 +202,7 @@ func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBala } } default: - grpclog.Errorf("Unknown update.Op %v", update.Op) + grpclog.Println("Unknown update.Op ", update.Op) } } // TODO: Fall back to the basic round-robin load balancing if the resulting address is @@ -180,33 +215,42 @@ func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBala return nil } -func convertDuration(d *lbmpb.Duration) time.Duration { +func (b *balancer) serverListExpire(seq int) { + b.mu.Lock() + defer b.mu.Unlock() + // TODO: gRPC interanls do not clear the connections when the server list is stale. + // This means RPCs will keep using the existing server list until b receives new + // server list even though the list is expired. Revisit this behavior later. + if b.done || seq < b.seq { + return + } + b.next = 0 + b.addrs = nil + // Ask grpc internals to close all the corresponding connections. + b.addrCh <- nil +} + +func convertDuration(d *lbpb.Duration) time.Duration { if d == nil { return 0 } return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond } -func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { +func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { if l == nil { return } servers := l.GetServers() + expiration := convertDuration(l.GetExpirationInterval()) var ( sl []*grpclbAddrInfo addrs []Address ) for _, s := range servers { md := metadata.Pairs("lb-token", s.LoadBalanceToken) - ip := net.IP(s.IpAddress) - ipStr := ip.String() - if ip.To4() == nil { - // Add square brackets to ipv6 addresses, otherwise net.Dial() and - // net.SplitHostPort() will return too many colons error. - ipStr = fmt.Sprintf("[%s]", ipStr) - } addr := Address{ - Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), Metadata: &md, } sl = append(sl, &grpclbAddrInfo{ @@ -226,11 +270,20 @@ func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { b.next = 0 b.addrs = sl b.addrCh <- addrs + if b.expTimer != nil { + b.expTimer.Stop() + b.expTimer = nil + } + if expiration > 0 { + b.expTimer = time.AfterFunc(expiration, func() { + b.serverListExpire(seq) + }) + } } return } -func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { +func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -241,30 +294,29 @@ func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval tim } b.mu.Lock() stats := b.clientStats - b.clientStats = lbmpb.ClientStats{} // Clear the stats. + b.clientStats = lbpb.ClientStats{} // Clear the stats. b.mu.Unlock() t := time.Now() - stats.Timestamp = &lbmpb.Timestamp{ + stats.Timestamp = &lbpb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } - if err := s.Send(&lbmpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ ClientStats: &stats, }, }); err != nil { - grpclog.Errorf("grpclb: failed to send load report: %v", err) return } } } -func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { +func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx) if err != nil { - grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() @@ -273,39 +325,37 @@ func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (r return } b.mu.Unlock() - initReq := &lbmpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbmpb.InitialLoadBalanceRequest{ + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ Name: b.target, }, }, } if err := stream.Send(initReq); err != nil { - grpclog.Errorf("grpclb: failed to send init request: %v", err) // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { - grpclog.Errorf("grpclb: failed to recv init response: %v", err) // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { - grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") + grpclog.Println("Failed to receive the initial response from the remote balancer.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation - grpclog.Errorf("TODO: Delegation is not supported yet.") + grpclog.Println("TODO: Delegation is not supported yet.") return } streamDone := make(chan struct{}) defer close(streamDone) b.mu.Lock() - b.clientStats = lbmpb.ClientStats{} // Clear client stats. + b.clientStats = lbpb.ClientStats{} // Clear client stats. b.mu.Unlock() if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { go b.sendLoadReport(stream, d, streamDone) @@ -314,7 +364,6 @@ func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (r for { reply, err := stream.Recv() if err != nil { - grpclog.Errorf("grpclb: failed to recv server list: %v", err) break } b.mu.Lock() @@ -332,7 +381,7 @@ func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (r return true } -func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { +func (b *balancer) Start(target string, config BalancerConfig) error { b.rand = rand.New(rand.NewSource(time.Now().Unix())) // TODO: Fall back to the basic direct connection if there is no name resolver. if b.r == nil { @@ -348,7 +397,6 @@ func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { w, err := b.r.Resolve(target) if err != nil { b.mu.Unlock() - grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) return err } b.w = w @@ -358,7 +406,7 @@ func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { go func() { for { if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { - grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) + grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) close(balancerAddrsCh) return } @@ -442,32 +490,22 @@ func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { cc.Close() } // Talk to the remote load balancer to get the server list. - var ( - err error - dopts []DialOption - ) - if creds := config.DialCreds; creds != nil { + var err error + creds := config.DialCreds + ccError = make(chan struct{}) + if creds == nil { + cc, err = Dial(rb.addr, WithInsecure()) + } else { if rb.name != "" { if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) + grpclog.Printf("Failed to override the server name in the credentials: %v", err) continue } } - dopts = append(dopts, WithTransportCredentials(creds)) - } else { - dopts = append(dopts, WithInsecure()) + cc, err = Dial(rb.addr, WithTransportCredentials(creds)) } - if dialer := config.Dialer; dialer != nil { - // WithDialer takes a different type of function, so we instead use a special DialOption here. - dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) - } - dopts = append(dopts, WithBlock()) - ccError = make(chan struct{}) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - cc, err = DialContext(ctx, rb.addr, dopts...) - cancel() if err != nil { - grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) close(ccError) continue } @@ -491,7 +529,7 @@ func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { return nil } -func (b *grpclbBalancer) down(addr Address, err error) { +func (b *balancer) down(addr Address, err error) { b.mu.Lock() defer b.mu.Unlock() for _, a := range b.addrs { @@ -502,7 +540,7 @@ func (b *grpclbBalancer) down(addr Address, err error) { } } -func (b *grpclbBalancer) Up(addr Address) func(error) { +func (b *balancer) Up(addr Address) func(error) { b.mu.Lock() defer b.mu.Unlock() if b.done { @@ -530,7 +568,7 @@ func (b *grpclbBalancer) Up(addr Address) func(error) { } } -func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { +func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} b.mu.Lock() if b.done { @@ -600,10 +638,17 @@ func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr } } if !opts.BlockingWait { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ + if len(b.addrs) == 0 { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on b.addrs for a failfast RPC. + addr = b.addrs[b.next].addr + b.next++ b.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") return } // Wait on b.waitCh for non-failfast RPCs. @@ -680,17 +725,17 @@ func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr } } -func (b *grpclbBalancer) Notify() <-chan []Address { +func (b *balancer) Notify() <-chan []Address { return b.addrCh } -func (b *grpclbBalancer) Close() error { +func (b *balancer) Close() error { b.mu.Lock() defer b.mu.Unlock() - if b.done { - return errBalancerClosed - } b.done = true + if b.expTimer != nil { + b.expTimer.Stop() + } if b.waitCh != nil { close(b.waitCh) } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD index 35716e585b4..79fd880777f 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD @@ -1,10 +1,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") +filegroup( + name = "go_default_library_protos", + srcs = ["grpclb.proto"], + visibility = ["//visibility:public"], +) + go_library( name = "go_default_library", - srcs = ["doc.go"], + srcs = ["grpclb.pb.go"], importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1", visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) filegroup( @@ -16,10 +23,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go deleted file mode 100644 index aba962840c8..00000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer -// message and service protobuf definitions. -package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go similarity index 79% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go index f4a27125a4f..f63941bd803 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go @@ -1,11 +1,12 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_lb_v1/messages/messages.proto +// Code generated by protoc-gen-go. +// source: grpclb.proto +// DO NOT EDIT! /* -Package messages is a generated protocol buffer package. +Package grpc_lb_v1 is a generated protocol buffer package. It is generated from these files: - grpc_lb_v1/messages/messages.proto + grpclb.proto It has these top-level messages: Duration @@ -18,7 +19,7 @@ It has these top-level messages: ServerList Server */ -package messages +package grpc_lb_v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -472,6 +473,11 @@ type ServerList struct { // across more servers. The client should consume the server list in order // unless instructed otherwise via the client_config. Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` + // Indicates the amount of time that the client should consider this server + // list as valid. It may be considered stale after waiting this interval of + // time after receiving the list. If the interval is not positive, the + // client can assume the list is valid until the next list is received. + ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` } func (m *ServerList) Reset() { *m = ServerList{} } @@ -486,6 +492,13 @@ func (m *ServerList) GetServers() []*Server { return nil } +func (m *ServerList) GetExpirationInterval() *Duration { + if m != nil { + return m.ExpirationInterval + } + return nil +} + // Contains server information. When none of the [drop_for_*] fields are true, // use the other fields. When drop_for_rate_limiting is true, ignore all other // fields. Use drop_for_load_balancing only when it is true and @@ -563,53 +576,54 @@ func init() { proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } -func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 709 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, - 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, - 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, - 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, - 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, - 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, - 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, - 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, - 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, - 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, - 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, - 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, - 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, - 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, - 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, - 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, - 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, - 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, - 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, - 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, - 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, - 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, - 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, - 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, - 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, - 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, - 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, - 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, - 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, - 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, - 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, - 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, - 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, - 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, - 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, - 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, - 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, - 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, - 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, - 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, - 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, - 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, - 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, - 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, - 0xa6, 0x4a, 0x06, 0x00, 0x00, + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, + 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, + 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, + 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, + 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, + 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, + 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, + 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, + 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, + 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, + 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, + 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, + 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, + 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, + 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, + 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, + 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, + 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, + 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, + 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, + 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, + 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, + 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, + 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, + 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, + 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, + 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, + 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, + 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, + 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, + 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, + 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, + 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, + 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, + 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, + 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, + 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, + 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, + 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, + 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, + 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, + 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, + 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, + 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, + 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, + 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto similarity index 71% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto index 2ed04551fad..a2502fb284a 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto @@ -1,21 +1,35 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016, Google Inc. +// All rights reserved. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: // -// http://www.apache.org/licenses/LICENSE-2.0 +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. // -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package grpc.lb.v1; -option go_package = "messages"; message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 @@ -32,6 +46,7 @@ message Duration { } message Timestamp { + // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -44,6 +59,12 @@ message Timestamp { int32 nanos = 2; } +service LoadBalancer { + // Bidirectional rpc to get a list of servers. + rpc BalanceLoad(stream LoadBalanceRequest) + returns (stream LoadBalanceResponse); +} + message LoadBalanceRequest { oneof load_balance_request_type { // This message should be sent on the first request to the load balancer. @@ -121,8 +142,11 @@ message ServerList { // unless instructed otherwise via the client_config. repeated Server servers = 1; - // Was google.protobuf.Duration expiration_interval. - reserved 3; + // Indicates the amount of time that the client should consider this server + // list as valid. It may be considered stale after waiting this interval of + // time after receiving the list. If the interval is not positive, the + // client can assume the list is valid until the next list is received. + Duration expiration_interval = 3; } // Contains server information. When none of the [drop_for_*] fields are true, diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD deleted file mode 100644 index 06ab31fa949..00000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -filegroup( - name = "go_default_library_protos", - srcs = ["messages.proto"], - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["messages.pb.go"], - importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD b/vendor/google.golang.org/grpc/grpclog/BUILD index 4b225761989..4595e517367 100644 --- a/vendor/google.golang.org/grpc/grpclog/BUILD +++ b/vendor/google.golang.org/grpc/grpclog/BUILD @@ -2,11 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "grpclog.go", - "logger.go", - "loggerv2.go", - ], + srcs = ["logger.go"], importpath = "google.golang.org/grpc/grpclog", visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go deleted file mode 100644 index 1d71e25de50..00000000000 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog defines logging for grpc. -// -// All logs in transport package only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog - -import "os" - -var logger = newLoggerV2() - -// V reports whether verbosity level l is at least the requested verbose level. -func V(l int) bool { - return logger.V(l) -} - -// Info logs to the INFO log. -func Info(args ...interface{}) { - logger.Info(args...) -} - -// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { - logger.Infoln(args...) -} - -// Warning logs to the WARNING log. -func Warning(args ...interface{}) { - logger.Warning(args...) -} - -// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) -} - -// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { - logger.Warningln(args...) -} - -// Error logs to the ERROR log. -func Error(args ...interface{}) { - logger.Error(args...) -} - -// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) -} - -// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { - logger.Errorln(args...) -} - -// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. -// It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { - logger.Fatal(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calles os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -// Deprecated: use Info. -func Print(args ...interface{}) { - logger.Info(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -// Deprecated: use Infof. -func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -// Deprecated: use Infoln. -func Println(args ...interface{}) { - logger.Infoln(args...) -} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index d03b2397bfa..2cc09be4894 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -1,25 +1,52 @@ /* * - * Copyright 2015 gRPC authors. + * Copyright 2015, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ +/* +Package grpclog defines logging for grpc. +*/ package grpclog +import ( + "log" + "os" +) + +// Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) + // Logger mimics golang's standard Logger as an interface. -// Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) @@ -31,53 +58,36 @@ type Logger interface { // SetLogger sets the logger that is used in grpc. Call only from // init() functions. -// Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + logger = l } -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger +// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. +func Fatal(args ...interface{}) { + logger.Fatal(args...) } -func (g *loggerWrapper) Info(args ...interface{}) { - g.Logger.Print(args...) +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { - g.Logger.Println(args...) +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { - g.Logger.Printf(format, args...) +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +func Print(args ...interface{}) { + logger.Print(args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { - g.Logger.Print(args...) +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +func Printf(format string, args ...interface{}) { + logger.Printf(format, args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +func Println(args ...interface{}) { + logger.Println(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go deleted file mode 100644 index d4932577695..00000000000 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ /dev/null @@ -1,195 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "io" - "io/ioutil" - "log" - "os" - "strconv" -) - -// LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// SetLoggerV2 sets logger that is used in grpc to a V2 logger. -// Not mutex-protected, should be called before any gRPC functions. -func SetLoggerV2(l LoggerV2) { - logger = l -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int -} - -// NewLoggerV2 creates a loggerV2 with the provided writers. -// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). -// Error logs will be written to errorW, warningW and infoW. -// Warning logs will be written to warningW and infoW. -// Info logs will be written to infoW. -func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) -} - -// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and -// verbosity level. -func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} -} - -// newLoggerV2 creates a loggerV2 to be used as default logger. -// All logs are written to stderr. -func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard - - logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") - switch logLevel { - case "", "ERROR", "error": // If env is unset, set level to ERROR. - errorW = os.Stderr - case "WARNING", "warning": - warningW = os.Stderr - case "INFO", "info": - infoW = os.Stderr - } - - var v int - vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") - if vl, err := strconv.Atoi(vLevel); err == nil { - v = vl - } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) -} - -func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) -} - -func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) -} - -func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) -} - -func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) -} - -func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) -} - -func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) -} - -func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) -} - -func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) -} - -func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) -} - -func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) V(l int) bool { - return l <= g.v -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD b/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD deleted file mode 100644 index 9a60f52bef1..00000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -filegroup( - name = "go_default_library_protos", - srcs = ["health.proto"], - visibility = ["//visibility:public"], -) - -go_library( - name = "go_default_library", - srcs = ["health.pb.go"], - importpath = "google.golang.org/grpc/health/grpc_health_v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go deleted file mode 100644 index fdcbb9e0b7d..00000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ /dev/null @@ -1,190 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_health_v1/health.proto - -/* -Package grpc_health_v1 is a generated protocol buffer package. - -It is generated from these files: - grpc_health_v1/health.proto - -It has these top-level messages: - HealthCheckRequest - HealthCheckResponse -*/ -package grpc_health_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type HealthCheckResponse_ServingStatus int32 - -const ( - HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 - HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 - HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 -) - -var HealthCheckResponse_ServingStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SERVING", - 2: "NOT_SERVING", -} -var HealthCheckResponse_ServingStatus_value = map[string]int32{ - "UNKNOWN": 0, - "SERVING": 1, - "NOT_SERVING": 2, -} - -func (x HealthCheckResponse_ServingStatus) String() string { - return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) -} -func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{1, 0} -} - -type HealthCheckRequest struct { - Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` -} - -func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } -func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*HealthCheckRequest) ProtoMessage() {} -func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *HealthCheckRequest) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -type HealthCheckResponse struct { - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` -} - -func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } -func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*HealthCheckResponse) ProtoMessage() {} -func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { - if m != nil { - return m.Status - } - return HealthCheckResponse_UNKNOWN -} - -func init() { - proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") - proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") - proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Health service - -type HealthClient interface { - Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) -} - -type healthClient struct { - cc *grpc.ClientConn -} - -func NewHealthClient(cc *grpc.ClientConn) HealthClient { - return &healthClient{cc} -} - -func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Health service - -type HealthServer interface { - Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) -} - -func RegisterHealthServer(s *grpc.Server, srv HealthServer) { - s.RegisterService(&_Health_serviceDesc, srv) -} - -func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(HealthServer).Check(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Health_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.health.v1.Health", - HandlerType: (*HealthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Check", - Handler: _Health_Check_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "grpc_health_v1/health.proto", -} - -func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, - 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, - 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, - 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, - 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, - 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, - 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, - 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, - 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, - 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, - 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, - 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, - 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, - 0x20, 0x60, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto deleted file mode 100644 index 6072fdc3b80..00000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.health.v1; - -message HealthCheckRequest { - string service = 1; -} - -message HealthCheckResponse { - enum ServingStatus { - UNKNOWN = 0; - SERVING = 1; - NOT_SERVING = 2; - } - ServingStatus status = 1; -} - -service Health{ - rpc Check(HealthCheckRequest) returns (HealthCheckResponse); -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 06dc825b9fb..a6921614572 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -1,18 +1,33 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -27,15 +42,15 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// This is the EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is the EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 07083832c3c..5489143a85c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -1,17 +1,32 @@ /* - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index f8adc7e6d4f..d492589c96b 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -1,18 +1,33 @@ /* * - * Copyright 2017 gRPC authors. + * Copyright 2017, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -24,8 +39,8 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a connection is broken -// and send pings so intermediaries will be aware of the liveness of the connection. +// These configure how the client will actively probe to notice when a connection broken +// and to cause activity so intermediaries are aware the connection is still in use. // Make sure these parameters are set in coordination with the keepalive policy on the server, // as incompatible settings can result in closing of connection. type ClientParameters struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 589161d57fa..a4f2de026db 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -1,23 +1,38 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. +// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata. package metadata import ( @@ -36,17 +51,8 @@ func DecodeKeyValue(k, v string) (string, string, error) { // two convenience functions New and Pairs to generate MD. type MD map[string][]string -// New creates an MD from a given key-value map. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. +// New creates a MD from given key-value map. +// Keys are automatically converted to lowercase. func New(m map[string]string) MD { md := MD{} for k, val := range m { @@ -58,16 +64,7 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. +// Keys are automatically converted to lowercase. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) @@ -94,9 +91,9 @@ func (md MD) Copy() MD { return Join(md) } -// Join joins any number of mds into a single MD. +// Join joins any number of MDs into a single MD. // The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// the MDs containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -110,6 +107,11 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} +// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated. +func NewContext(ctx context.Context, md MD) context.Context { + return NewOutgoingContext(ctx, md) +} + // NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) @@ -120,17 +122,22 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, md) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. +func FromContext(ctx context.Context) (md MD, ok bool) { + return FromIncomingContext(ctx) +} + +// FromIncomingContext returns the incoming MD in ctx if it exists. The +// returned md should be immutable, writing to it may cause races. +// Modification should be made to the copies of the returned md. func FromIncomingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdIncomingKey{}).(MD) return } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to the copies of the returned MD. +// FromOutgoingContext returns the outgoing MD in ctx if it exists. The +// returned md should be immutable, writing to it may cause races. +// Modification should be made to the copies of the returned md. func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdOutgoingKey{}).(MD) return diff --git a/vendor/google.golang.org/grpc/naming/BUILD b/vendor/google.golang.org/grpc/naming/BUILD index ea07a9fb642..2318033a3c4 100644 --- a/vendor/google.golang.org/grpc/naming/BUILD +++ b/vendor/google.golang.org/grpc/naming/BUILD @@ -2,18 +2,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "dns_resolver.go", - "go17.go", - "go18.go", - "naming.go", - ], + srcs = ["naming.go"], importpath = "google.golang.org/grpc/naming", visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - ], ) filegroup( diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index 7e69a2ca0a6..00000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,290 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "errors" - "fmt" - "net" - "strconv" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets -// are strippd when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the adrress resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unncessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exisits until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go deleted file mode 100644 index 8bdf21e7998..00000000000 --- a/vendor/google.golang.org/grpc/naming/go17.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build go1.7, !go1.8 - -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "net" - - "golang.org/x/net/context" -) - -var ( - lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } - lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { - return net.LookupSRV(service, proto, name) - } -) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go deleted file mode 100644 index b5a0f842748..00000000000 --- a/vendor/google.golang.org/grpc/naming/go18.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build go1.8 - -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import "net" - -var ( - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index 1af7e32f86d..c2e0871e6f8 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index 317b8b9d09a..bfa6205ba9e 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -27,8 +42,7 @@ import ( "google.golang.org/grpc/credentials" ) -// Peer contains the information of the peer for an RPC, such as the address -// and authentication information. +// Peer contains the information of the peer for an RPC. type Peer struct { // Addr is the peer address. Addr net.Addr diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go deleted file mode 100644 index 9085dbc9c98..00000000000 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" -) - -// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick -// actions and unblock when there's a picker update. -type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker -} - -func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} - return bp -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (bp *pickerWrapper) updatePicker(p balancer.Picker) { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return - } - bp.picker = p - // bp.blockingCh should never be nil. - close(bp.blockingCh) - bp.blockingCh = make(chan struct{}) - bp.mu.Unlock() -} - -// pick returns the transport that will be used for the RPC. -// It may block in the following cases: -// - there's no picker -// - the current picker returns ErrNoSubConnAvailable -// - the current picker returns other errors and failfast is false. -// - the subConn returned by the current picker is not READY -// When one of these situations happens, pick blocks until the picker gets updated. -func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - var ( - p balancer.Picker - ch chan struct{} - ) - - for { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return nil, nil, ErrClientConnClosing - } - - if bp.picker == nil { - ch = bp.blockingCh - } - if ch == bp.blockingCh { - // This could happen when either: - // - bp.picker is nil (the previous if condition), or - // - has called pick on the current picker. - bp.mu.Unlock() - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - case <-ch: - } - continue - } - - ch = bp.blockingCh - p = bp.picker - bp.mu.Unlock() - - subConn, put, err := p.Pick(ctx, opts) - - if err != nil { - switch err { - case balancer.ErrNoSubConnAvailable: - continue - case balancer.ErrTransientFailure: - if !failfast { - continue - } - return nil, nil, status.Errorf(codes.Unavailable, "%v", err) - default: - // err is some other error. - return nil, nil, toRPCErr(err) - } - } - - acw, ok := subConn.(*acBalancerWrapper) - if !ok { - grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") - continue - } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { - return t, put, nil - } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") - // If ok == false, ac.state is not READY. - // A valid picker always returns READY subConn. This means the state of ac - // just changed, and picker will be updated shortly. - // continue back to the beginning of the for loop to repick. - } -} - -func (bp *pickerWrapper) close() { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.done { - return - } - bp.done = true - close(bp.blockingCh) -} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go deleted file mode 100644 index 7f993ef5a38..00000000000 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - -type pickfirstBuilder struct{} - -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} -} - -func (*pickfirstBuilder) Name() string { - return "pickfirst" -} - -type pickfirstBalancer struct { - cc balancer.ClientConn - sc balancer.SubConn -} - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) - return - } - if b.sc == nil { - b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - return - } - b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) - } else { - b.sc.UpdateAddresses(addrs) - } -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) - if b.sc != sc || s == connectivity.Shutdown { - b.sc = nil - return - } - - switch s { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateBalancerState(s, &picker{sc: sc}) - case connectivity.Connecting: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) - case connectivity.TransientFailure: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) - } -} - -func (b *pickfirstBalancer) Close() { -} - -type picker struct { - err error - sc balancer.SubConn -} - -func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if p.err != nil { - return nil, nil, p.err - } - return p.sc, nil, nil -} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go index 3e17efec61b..10188dc3433 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/proxy.go @@ -1,18 +1,33 @@ /* * - * Copyright 2017 gRPC authors. + * Copyright 2017, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -82,8 +97,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ Header: map[string][]string{"User-Agent": {grpcUA}}, }) - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { + if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } diff --git a/vendor/google.golang.org/grpc/resolver/BUILD b/vendor/google.golang.org/grpc/resolver/BUILD deleted file mode 100644 index 51f8d6f28fe..00000000000 --- a/vendor/google.golang.org/grpc/resolver/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["resolver.go"], - importpath = "google.golang.org/grpc/resolver", - visibility = ["//visibility:public"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go deleted file mode 100644 index 49307e8fe9e..00000000000 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package resolver defines APIs for name resolution in gRPC. -// All APIs in this package are experimental. -package resolver - -var ( - // m is a map from scheme to resolver builder. - m = make(map[string]Builder) - // defaultScheme is the default scheme to use. - defaultScheme string -) - -// TODO(bar) install dns resolver in init(){}. - -// Register registers the resolver builder to the resolver map. -// b.Scheme will be used as the scheme registered with this builder. -func Register(b Builder) { - m[b.Scheme()] = b -} - -// Get returns the resolver builder registered with the given scheme. -// If no builder is register with the scheme, the default scheme will -// be used. -// If the default scheme is not modified, "dns" will be the default -// scheme, and the preinstalled dns resolver will be used. -// If the default scheme is modified, and a resolver is registered with -// the scheme, that resolver will be returned. -// If the default scheme is modified, and no resolver is registered with -// the scheme, nil will be returned. -func Get(scheme string) Builder { - if b, ok := m[scheme]; ok { - return b - } - if b, ok := m[defaultScheme]; ok { - return b - } - return nil -} - -// SetDefaultScheme sets the default scheme that will be used. -// The default default scheme is "dns". -func SetDefaultScheme(scheme string) { - defaultScheme = scheme -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - GRPCLB -) - -// Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Type is the type of this address. - Type AddressType - // ServerName is the name of this address. - // It's the name of the grpc load balancer, which will be used for authentication. - ServerName string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BuildOption includes additional information for the builder to create -// the resolver. -type BuildOption struct { -} - -// ClientConn contains the callbacks for resolver to notify any updates -// to the gRPC ClientConn. -type ClientConn interface { - // NewAddress is called by resolver to notify ClientConn a new list - // of resolved addresses. - // The address list should be the complete list of resolved addresses. - NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - NewServiceConfig(serviceConfig string) -} - -// Target represents a target for gRPC, as specified in: -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -type Target struct { - Scheme string - Authority string - Endpoint string -} - -// Builder creates a resolver that will be used to watch name resolution updates. -type Builder interface { - // Build creates a new resolver for the given target. - // - // gRPC dial calls Build synchronously, and fails if the returned error is - // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. - Scheme() string -} - -// ResolveNowOption includes additional information for ResolveNow. -type ResolveNowOption struct{} - -// Resolver watches for the updates on the specified target. -// Updates include address updates and service config updates. -type Resolver interface { - // ResolveNow will be called by gRPC to try to resolve the target name again. - // It's just a hint, resolver can ignore this if it's not necessary. - ResolveNow(ResolveNowOption) - // Close closes the resolver. - Close() -} - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index 7d53964d094..00000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "strings" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. -type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done chan struct{} -} - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns "", s instead. -func split2(s, sep string) (string, string) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", s - } - return spl[0], spl[1] -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -func parseTarget(target string) (ret resolver.Target) { - ret.Scheme, ret.Endpoint = split2(target, "://") - ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") - return ret -} - -// newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme. It then builds the resolver and starts the -// monitoring goroutine for it. -// -// This function could return nil, nil, in tests for old behaviors. -// TODO(bar) never return nil, nil when DNS becomes the default resolver. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - target := parseTarget(cc.target) - grpclog.Infof("dialing to target with scheme: %q", target.Scheme) - - rb := resolver.Get(target.Scheme) - if rb == nil { - // TODO(bar) return error when DNS becomes the default (implemented and - // registered by DNS package). - grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) - return nil, nil - } - - ccr := &ccResolverWrapper{ - cc: cc, - addrCh: make(chan []resolver.Address, 1), - scCh: make(chan string, 1), - done: make(chan struct{}), - } - - var err error - ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) - if err != nil { - return nil, err - } - go ccr.watcher() - return ccr, nil -} - -// watcher processes address updates and service config updates sequencially. -// Otherwise, we need to resolve possible races between address and service -// config (e.g. they specify different balancer types). -func (ccr *ccResolverWrapper) watcher() { - for { - select { - case <-ccr.done: - return - default: - } - - select { - case addrs := <-ccr.addrCh: - grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) - // TODO(bar switching) this should never be nil. Pickfirst should be default. - if ccr.cc.balancerWrapper != nil { - // TODO(bar switching) create balancer if it's nil? - ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) - } - case sc := <-ccr.scCh: - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - case <-ccr.done: - return - } - } -} - -func (ccr *ccResolverWrapper) close() { - ccr.resolver.Close() - close(ccr.done) -} - -// NewAddress is called by the resolver implemenetion to send addresses to gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - select { - case <-ccr.addrCh: - default: - } - ccr.addrCh <- addrs -} - -// NewServiceConfig is called by the resolver implemenetion to send service -// configs to gPRC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - select { - case <-ccr.scCh: - default: - } - ccr.scCh <- sc -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 188a75fff94..34e1ad03b97 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -21,18 +36,15 @@ package grpc import ( "bytes" "compress/gzip" - stdctx "context" "encoding/binary" "io" "io/ioutil" "math" "os" - "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -48,25 +60,16 @@ type Compressor interface { Type() string } -type gzipCompressor struct { - pool sync.Pool -} - // NewGZIPCompressor creates a Compressor based on GZIP. func NewGZIPCompressor() Compressor { - return &gzipCompressor{ - pool: sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(ioutil.Discard) - }, - }, - } + return &gzipCompressor{} +} + +type gzipCompressor struct { } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := c.pool.Get().(*gzip.Writer) - defer c.pool.Put(z) - z.Reset(w) + z := gzip.NewWriter(w) if _, err := z.Write(p); err != nil { return err } @@ -86,7 +89,6 @@ type Decompressor interface { } type gzipDecompressor struct { - pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. @@ -95,26 +97,11 @@ func NewGZIPDecompressor() Decompressor { } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - var z *gzip.Reader - switch maybeZ := d.pool.Get().(type) { - case nil: - newZ, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - z = newZ - case *gzip.Reader: - z = maybeZ - if err := z.Reset(r); err != nil { - d.pool.Put(z) - return nil, err - } + z, err := gzip.NewReader(r) + if err != nil { + return nil, err } - - defer func() { - z.Close() - d.pool.Put(z) - }() + defer z.Close() return ioutil.ReadAll(z) } @@ -124,19 +111,14 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer - traceInfo traceInfo // in trace.go - maxReceiveMessageSize *int - maxSendMessageSize *int - creds credentials.PerRPCCredentials + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go } -func defaultCallInfo() *callInfo { - return &callInfo{failFast: true} -} +var defaultCallInfo = callInfo{failFast: true} // CallOption configures a Call before it starts or extracts information from // a Call after it completes. @@ -150,14 +132,6 @@ type CallOption interface { after(*callInfo) } -// EmptyCallOption does not alter the Call configuration. -// It can be embedded in another structure to carry satellite data for use -// by interceptors. -type EmptyCallOption struct{} - -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} - type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } @@ -199,8 +173,7 @@ func Peer(peer *peer.Peer) CallOption { // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. -// Note: failFast is default to true. +// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -208,31 +181,6 @@ func FailFast(failFast bool) CallOption { }) } -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return beforeCall(func(o *callInfo) error { - o.maxReceiveMessageSize = &s - return nil - }) -} - -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return beforeCall(func(o *callInfo) error { - o.maxSendMessageSize = &s - return nil - }) -} - -// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials -// for a call. -func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { - return beforeCall(func(c *callInfo) error { - c.creds = creds - return nil - }) -} - // The format of the payload: compressed or not? type payloadFormat uint8 @@ -249,7 +197,7 @@ type parser struct { r io.Reader // The header of a gRPC message. Find more detail - // at https://grpc.io/docs/guides/wire.html. + // at http://www.grpc.io/docs/guides/wire.html. header [5]byte } @@ -266,8 +214,8 @@ type parser struct { // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } @@ -277,13 +225,13 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if length == 0 { return pf, nil, nil } - if length > uint32(maxReceiveMessageSize) { - return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + if length > uint32(maxMsgSize) { + return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) - if _, err := p.r.Read(msg); err != nil { + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -292,20 +240,19 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt return pf, msg, nil } -// encode serializes msg and returns a buffer of message header and a buffer of msg. -// If msg is nil, it generates the message header and an empty msg buffer. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { - var b []byte - const ( - payloadLen = 1 - sizeLen = 4 +// encode serializes msg and prepends the message header. If msg is nil, it +// generates the message header of 0 message length. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) { + var ( + b []byte + length uint ) - if msg != nil { var err error + // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { - return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + return nil, err } if outPayload != nil { outPayload.Payload = msg @@ -315,28 +262,39 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl } if cp != nil { if err := cp.Do(cbuf, b); err != nil { - return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + return nil, err } b = cbuf.Bytes() } + length = uint(len(b)) + } + if length > math.MaxUint32 { + return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) } - if uint(len(b)) > math.MaxUint32 { - return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) - } + const ( + payloadLen = 1 + sizeLen = 4 + ) - bufHeader := make([]byte, payloadLen+sizeLen) + var buf = make([]byte, payloadLen+sizeLen+len(b)) + + // Write payload format if cp == nil { - bufHeader[0] = byte(compressionNone) + buf[0] = byte(compressionNone) } else { - bufHeader[0] = byte(compressionMade) + buf[0] = byte(compressionMade) } // Write length of b into buf - binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) + binary.BigEndian.PutUint32(buf[1:], uint32(length)) + // Copy encoded msg to buf + copy(buf[5:], b) + if outPayload != nil { - outPayload.WireLength = payloadLen + sizeLen + len(b) + outPayload.WireLength = len(buf) } - return bufHeader, b, nil + + return buf, nil } func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { @@ -352,8 +310,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { - pf, d, err := p.recvMsg(maxReceiveMessageSize) +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxMsgSize) if err != nil { return err } @@ -369,10 +327,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } - if len(d) > maxReceiveMessageSize { + if len(d) > maxMsgSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) } if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) @@ -388,15 +346,14 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ } type rpcInfo struct { - failfast bool bytesSent bool bytesReceived bool } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +func newContextWithRPCInfo(ctx context.Context) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { @@ -406,63 +363,11 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { if ss, ok := rpcInfoFromContext(ctx); ok { - ss.bytesReceived = s.bytesReceived - ss.bytesSent = s.bytesSent + *ss = s } return } -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) - case transport.ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled, stdctx.Canceled: - return status.Error(codes.Canceled, err.Error()) - case ErrClientConnClosing: - return status.Error(codes.FailedPrecondition, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled, stdctx.Canceled: - return codes.Canceled - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // @@ -493,6 +398,57 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Internal, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + // MethodConfig defines the configuration recommended by the service providers for a // particular method. // This is EXPERIMENTAL and subject to change. @@ -500,22 +456,24 @@ type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. - WaitForReady *bool + WaitForReady bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration + Timeout time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set + // The actual value used is the minumum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int + // TODO: support this. + MaxReqSize uint32 // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. - MaxRespSize *int + // TODO: support this. + MaxRespSize uint32 } // ServiceConfig is provided by the service provider and contains parameters for how @@ -526,38 +484,9 @@ type ServiceConfig struct { // via grpc.WithBalancer will override this. LB Balancer // Methods contains a map for the methods in this service. - // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. - // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. - // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig } -func min(a, b *int) *int { - if *a < *b { - return a - } - return b -} - -func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { - if mcMax == nil && doptMax == nil { - return &defaultVal - } - if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) - } - if mcMax != nil { - return mcMax - } - return doptMax -} - -// SupportPackageIsVersion3 is referenced from generated protocol buffer files. -// The latest support package version is 4. -// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the -// next support package version update. -const SupportPackageIsVersion3 = true - // SupportPackageIsVersion4 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the grpc package. // @@ -567,6 +496,6 @@ const SupportPackageIsVersion3 = true const SupportPackageIsVersion4 = true // Version is the current grpc version. -const Version = "1.7.5" +const Version = "1.3.0" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 787665dfeb3..b15f71c6c18 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -23,7 +38,6 @@ import ( "errors" "fmt" "io" - "math" "net" "net/http" "reflect" @@ -47,11 +61,6 @@ import ( "google.golang.org/grpc/transport" ) -const ( - defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultServerMaxSendMessageSize = math.MaxInt32 -) - type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -87,7 +96,6 @@ type Server struct { mu sync.Mutex // guards following lis map[net.Listener]bool conns map[io.Closer]bool - serve bool drain bool ctx context.Context cancel context.CancelFunc @@ -99,69 +107,27 @@ type Server struct { } type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - maxReceiveMessageSize int - maxSendMessageSize int - useHandlerImpl bool // use http.Handler-based server - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy - initialWindowSize int32 - initialConnWindowSize int32 - writeBufferSize int - readBufferSize int - connectionTimeout time.Duration + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + maxMsgSize int + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy } -var defaultServerOptions = options{ - maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, - maxSendMessageSize: defaultServerMaxSendMessageSize, - connectionTimeout: 120 * time.Second, -} +var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit -// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +// A ServerOption sets options. type ServerOption func(*options) -// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched -// before doing a write on the wire. -func WriteBufferSize(s int) ServerOption { - return func(o *options) { - o.writeBufferSize = s - } -} - -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -func ReadBufferSize(s int) ServerOption { - return func(o *options) { - o.readBufferSize = s - } -} - -// InitialWindowSize returns a ServerOption that sets window size for stream. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialWindowSize(s int32) ServerOption { - return func(o *options) { - o.initialWindowSize = s - } -} - -// InitialConnWindowSize returns a ServerOption that sets window size for a connection. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialConnWindowSize(s int32) ServerOption { - return func(o *options) { - o.initialConnWindowSize = s - } -} - // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { return func(o *options) { @@ -197,25 +163,11 @@ func RPCDecompressor(dc Decompressor) ServerOption { } } -// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. +// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. +// If this is not set, gRPC uses the default 4MB. func MaxMsgSize(m int) ServerOption { - return MaxRecvMsgSize(m) -} - -// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default 4MB. -func MaxRecvMsgSize(m int) ServerOption { return func(o *options) { - o.maxReceiveMessageSize = m - } -} - -// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. -// If this is not set, gRPC uses the default 4MB. -func MaxSendMsgSize(m int) ServerOption { - return func(o *options) { - o.maxSendMessageSize = m + o.maxMsgSize = m } } @@ -240,7 +192,7 @@ func Creds(c credentials.TransportCredentials) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return func(o *options) { if o.unaryInt != nil { - panic("The unary server interceptor was already set and may not be reset.") + panic("The unary server interceptor has been set.") } o.unaryInt = i } @@ -251,7 +203,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption { return func(o *options) { if o.streamInt != nil { - panic("The stream server interceptor was already set and may not be reset.") + panic("The stream server interceptor has been set.") } o.streamInt = i } @@ -262,7 +214,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption { return func(o *options) { if o.inTapHandle != nil { - panic("The tap handle was already set and may not be reset.") + panic("The tap handle has been set.") } o.inTapHandle = h } @@ -277,10 +229,10 @@ func StatsHandler(h stats.Handler) ServerOption { // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the "unimplemented" gRPC +// handler that will be invoked instead of returning the the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function has full access to the Context of the request and the -// stream, and the invocation bypasses interceptors. +// stream, and the invocation passes through interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return func(o *options) { o.unknownStreamDesc = &StreamDesc{ @@ -293,20 +245,11 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { } } -// ConnectionTimeout returns a ServerOption that sets the timeout for -// connection establishment (up to and including HTTP/2 handshaking) for all -// new connections. If this is not set, the default is 120 seconds. A zero or -// negative value will result in an immediate timeout. -func ConnectionTimeout(d time.Duration) ServerOption { - return func(o *options) { - o.connectionTimeout = d - } -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { - opts := defaultServerOptions + var opts options + opts.maxMsgSize = defaultMaxMsgSize for _, o := range opt { o(&opts) } @@ -345,8 +288,8 @@ func (s *Server) errorf(format string, a ...interface{}) { } } -// RegisterService registers a service and its implementation to the gRPC -// server. It is called from the IDL generated code. This must be called before +// RegisterService register a service and its implementation to the gRPC +// server. Called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() @@ -361,9 +304,6 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) - if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) - } if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } @@ -394,7 +334,7 @@ type MethodInfo struct { IsServerStream bool } -// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. @@ -452,7 +392,6 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") - s.serve = true if s.lis == nil { s.mu.Unlock() lis.Close() @@ -488,12 +427,10 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() - timer := time.NewTimer(tempDelay) select { - case <-timer.C: + case <-time.After(tempDelay): case <-s.ctx.Done(): } - timer.Stop() continue } s.mu.Lock() @@ -511,18 +448,16 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { - rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandshake returns ErrConnDispatched, keep rawConn open. + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandShake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { rawConn.Close() } - rawConn.SetDeadline(time.Time{}) return } @@ -535,32 +470,25 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Unlock() if s.opts.useHandlerImpl { - rawConn.SetDeadline(time.Time{}) s.serveUsingHandler(conn) } else { - st := s.newHTTP2Transport(conn, authInfo) - if st == nil { - return - } - rawConn.SetDeadline(time.Time{}) - s.serveStreams(st) + s.serveHTTP2Transport(conn, authInfo) } } -// newHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +// serveHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, - InitialWindowSize: s.opts.initialWindowSize, - InitialConnWindowSize: s.opts.initialConnWindowSize, - WriteBufferSize: s.opts.writeBufferSize, - ReadBufferSize: s.opts.readBufferSize, + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -568,14 +496,14 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) - return nil + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return } if !s.addConn(st) { st.Close() - return nil + return } - return st + s.serveStreams(st) } func (s *Server) serveStreams(st transport.ServerTransport) { @@ -626,30 +554,6 @@ func (s *Server) serveUsingHandler(conn net.Conn) { }) } -// ServeHTTP implements the Go standard library's http.Handler -// interface by responding to the gRPC request r, by looking up -// the requested gRPC method in the gRPC server s. -// -// The provided HTTP request must have arrived on an HTTP/2 -// connection. When using the Go standard library's server, -// practically this means that the Request must also have arrived -// over TLS. -// -// To share one port (such as 443 for https) between gRPC and an -// existing http.Handler, use a root http.Handler such as: -// -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } -// -// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally -// separate from grpc-go's HTTP/2 server. Performance and features may vary -// between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL -// and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r) if err != nil { @@ -714,15 +618,18 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str if s.opts.statsHandler != nil { outPayload = &stats.OutPayload{} } - hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) - return err + // This typically indicates a fatal issue (e.g., memory + // corruption or hardware faults) the application program + // cannot handle. + // + // TODO(zhaoq): There exist other options also such as only closing the + // faulty stream locally and remotely (Other streams can keep going). Find + // the optimal option. + grpclog.Fatalf("grpc: Server failed to encode response %v", err) } - if len(data) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) - } - err = t.Write(stream, hdr, data, opts) + err = t.Write(stream, p, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) @@ -737,7 +644,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - defer func() { + } + defer func() { + if sh != nil { end := &stats.End{ EndTime: time.Now(), } @@ -745,8 +654,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - }() - } + } + }() if trInfo != nil { defer trInfo.tr.Finish() trInfo.firstLine.client = false @@ -763,137 +672,139 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. stream.SetSendCompress(s.opts.cp.Type()) } p := &parser{r: stream} - pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) - } - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - } - if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. - } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), - } - } - df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - return Errorf(codes.Internal, err.Error()) - } - } - if len(req) > s.opts.maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - if inPayload != nil { - inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) - sh.HandleRPC(stream.Context(), inPayload) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(convertCode(appErr), appErr.Error()) - appStatus, _ = status.FromError(appErr) - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - return appErr - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + for { // TODO: delete + pf, req, err := p.recvMsg(s.opts.maxMsgSize) if err == io.EOF { // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), } } - return err + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) + } + } + if len(req) > s.opts.maxMsgSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.Internal, "grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -903,7 +814,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - defer func() { + } + defer func() { + if sh != nil { end := &stats.End{ EndTime: time.Now(), } @@ -911,22 +824,24 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - }() - } + } + }() if s.opts.cp != nil { stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxReceiveMessageSize: s.opts.maxReceiveMessageSize, - maxSendMessageSize: s.opts.maxSendMessageSize, - trInfo: trInfo, - statsHandler: sh, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxMsgSize: s.opts.maxMsgSize, + trInfo: trInfo, + statsHandler: sh, + } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -998,12 +913,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.InvalidArgument, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1028,7 +943,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1058,7 +973,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1096,9 +1011,8 @@ func (s *Server) Stop() { s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. +// GracefulStop stops the gRPC server gracefully. It stops the server to accept new +// connections and RPCs and blocks until all the pending RPCs are finished. func (s *Server) GracefulStop() { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index 05b384c6931..26e1a8e2f08 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -1,18 +1,33 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -30,22 +45,19 @@ type ConnTagInfo struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr + // TODO add QOS related fields. } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string - // FailFast indicates if this RPC is failfast. - // This field is only valid on client side, it's always false on server side. - FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. - // The context used for the rest lifetime of the RPC will be derived from - // the returned context. + // The returned context is used in the rest lifetime of the RPC. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index e844541e9c0..c2c9a9dfa23 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -1,23 +1,36 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -26,8 +39,6 @@ package stats import ( "net" "time" - - "golang.org/x/net/context" ) // RPCStats contains stats information about RPCs. @@ -38,7 +49,7 @@ type RPCStats interface { } // Begin contains stats when an RPC begins. -// FailFast is only valid if this Begin is from client side. +// FailFast are only valid if Client is true. type Begin struct { // Client is true if this Begin is from client side. Client bool @@ -48,7 +59,7 @@ type Begin struct { FailFast bool } -// IsClient indicates if the stats information is from client side. +// IsClient indicates if this is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} @@ -69,19 +80,19 @@ type InPayload struct { RecvTime time.Time } -// IsClient indicates if the stats information is from client side. +// IsClient indicates if this is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. +// FullMethod, addresses and Compression are only valid if Client is false. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int - // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -92,7 +103,7 @@ type InHeader struct { Compression string } -// IsClient indicates if the stats information is from client side. +// IsClient indicates if this is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} @@ -105,7 +116,7 @@ type InTrailer struct { WireLength int } -// IsClient indicates if the stats information is from client side. +// IsClient indicates if this is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} @@ -126,17 +137,19 @@ type OutPayload struct { SentTime time.Time } -// IsClient indicates if this stats information is from client side. +// IsClient indicates if this is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. +// FullMethod, addresses and Compression are only valid if Client is true. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // WireLength is the wire length of header. + WireLength int - // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -147,7 +160,7 @@ type OutHeader struct { Compression string } -// IsClient indicates if this stats information is from client side. +// IsClient indicates if this is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} @@ -160,7 +173,7 @@ type OutTrailer struct { WireLength int } -// IsClient indicates if this stats information is from client side. +// IsClient indicates if this is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} @@ -171,9 +184,7 @@ type End struct { Client bool // EndTime is the time when the RPC ends. EndTime time.Time - // Error is the error the RPC ended with. It is an error generated from - // status.Status and can be converted back to status.Status using - // status.FromError if non-nil. + // Error is the error just happened. It implements status.Status if non-nil. Error error } @@ -210,85 +221,3 @@ type ConnEnd struct { func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} - -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - -// SetTags attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to -// SetTags will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) -} - -// Tags returns the tags from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b -} - -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - -// SetTrace attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to -// SetTrace will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) -} - -// Trace returns the trace from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b -} diff --git a/vendor/google.golang.org/grpc/status/BUILD b/vendor/google.golang.org/grpc/status/BUILD index 84cb8afc49f..a92cd5f4e04 100644 --- a/vendor/google.golang.org/grpc/status/BUILD +++ b/vendor/google.golang.org/grpc/status/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/ptypes:go_default_library", "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", ], diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 871dc4b31c7..99a4cbe5112 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -1,18 +1,33 @@ /* * - * Copyright 2017 gRPC authors. + * Copyright 2017, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -28,11 +43,9 @@ package status import ( - "errors" "fmt" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) @@ -130,39 +143,3 @@ func FromError(err error) (s *Status, ok bool) { } return nil, false } - -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 75eab40b109..33f1c787b34 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -27,10 +42,8 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" @@ -60,17 +73,11 @@ type Stream interface { // side. On server side, it simply returns the error to the caller. // SendMsg is called by generated code. Also Users can call SendMsg // directly when it is really needed in their use cases. - // It's safe to have a goroutine calling SendMsg and another goroutine calling - // recvMsg on the same stream at the same time. - // But it is not safe to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. - // It's safe to have a goroutine calling SendMsg and another goroutine calling - // recvMsg on the same stream at the same time. - // But it is not safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } @@ -86,11 +93,6 @@ type ClientStream interface { // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error - // Stream.SendMsg() may return a non-nil error when something wrong happens sending - // the request. The returned error indicates the status of this sending, not the final - // status of the RPC. - // Always call Stream.RecvMsg() to get the final status if you care about the status of - // the RPC. Stream } @@ -107,48 +109,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var ( t transport.ClientTransport s *transport.Stream - done func(balancer.DoneInfo) + put func() cancel context.CancelFunc ) - c := defaultCallInfo() - mc := cc.GetMethodConfig(method) - if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + c := defaultCallInfo + if mc, ok := cc.getMethodConfig(method); ok { + c.failFast = !mc.WaitForReady + if mc.Timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, mc.Timeout) + } } - - if mc.Timeout != nil { - ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - defer func() { - if err != nil { - cancel() - } - }() - } - - opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(&c); err != nil { return nil, toRPCErr(err) } } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - // If it's not client streaming, we should already have the request to be sent, - // so we don't flush the header. - // If it's client streaming, the user may never send a request or send it any - // time soon, so we ask the transport to flush the header. - Flush: desc.ClientStreams, + Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - if c.creds != nil { - callHdr.Creds = c.creds - } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) @@ -168,29 +151,32 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - ctx = newContextWithRPCInfo(ctx, c.failFast) + ctx = newContextWithRPCInfo(ctx) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - defer func() { - if err != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, - } - sh.HandleRPC(ctx, end) + } + defer func() { + if err != nil && sh != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, } - }() + sh.HandleRPC(ctx, end) + } + }() + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, } for { - t, done, err = cc.getTransport(ctx, c.failFast) + t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -208,15 +194,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && done != nil { + if _, ok := err.(transport.ConnectionError); ok && put != nil { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - if done != nil { - done(balancer.DoneInfo{Err: err}) - done = nil + if put != nil { + put() + put = nil } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -225,23 +211,20 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } break } - // Set callInfo.peer object from stream's context. - if peer, ok := peer.FromContext(s.Context()); ok { - c.peer = peer - } cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - cancel: cancel, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + maxMsgSize: cc.dopts.maxMsgSize, + cancel: cancel, - done: done, - t: t, - s: s, - p: &parser{r: s}, + put: put, + t: t, + s: s, + p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, @@ -249,6 +232,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth statsCtx: ctx, statsHandler: cc.dopts.copts.StatsHandler, } + if cc.dopts.cp != nil { + cs.cbuf = new(bytes.Buffer) + } // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { @@ -277,21 +263,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c *callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - dc Decompressor - cancel context.CancelFunc + opts []CallOption + c callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor + maxMsgSize int + cancel context.CancelFunc tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex - done func(balancer.DoneInfo) + put func() closed bool finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), @@ -341,7 +329,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return } if err == io.EOF { - // Specialize the process for server streaming. SendMsg is only called + // Specialize the process for server streaming. SendMesg is only called // once when creating the stream object. io.EOF needs to be skipped when // the rpc is early finished (before the stream object is created.). // TODO: It is probably better to move this into the generated code. @@ -361,17 +349,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Client: true, } } - hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) + out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() if err != nil { - return err + return Errorf(codes.Internal, "grpc: %v", err) } - if cs.c.maxSendMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") - } - if len(data) > *cs.c.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) - } - err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) + err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) @@ -386,10 +373,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { Client: true, } } - if cs.c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -412,10 +396,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } // Special handling for client streaming rpc. // This recv expects EOF or errors, so we don't collect inPayload. - if cs.c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) @@ -443,7 +424,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) + err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) defer func() { if err != nil { cs.finish(err) @@ -483,15 +464,15 @@ func (cs *clientStream) finish(err error) { } }() for _, o := range cs.opts { - o.after(cs.c) + o.after(&cs.c) } - if cs.done != nil { + if cs.put != nil { updateRPCInfoInContext(cs.s.Context(), rpcInfo{ bytesSent: cs.s.BytesSent(), bytesReceived: cs.s.BytesReceived(), }) - cs.done(balancer.DoneInfo{Err: err}) - cs.done = nil + cs.put() + cs.put = nil } if cs.statsHandler != nil { end := &stats.End{ @@ -540,15 +521,15 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - maxReceiveMessageSize int - maxSendMessageSize int - trInfo *traceInfo + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer + maxMsgSize int + trInfo *traceInfo statsHandler stats.Handler @@ -592,23 +573,22 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.mu.Unlock() } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - } }() var outPayload *stats.OutPayload if ss.statsHandler != nil { outPayload = &stats.OutPayload{} } - hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) + out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() if err != nil { + err = Errorf(codes.Internal, "grpc: %v", err) return err } - if len(data) > ss.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) - } - if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } if outPayload != nil { @@ -632,16 +612,12 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } ss.mu.Unlock() } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - } }() var inPayload *stats.InPayload if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { if err == io.EOF { return err } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 22b8fb50dea..0f366476742 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -1,18 +1,33 @@ /* * - * Copyright 2016 gRPC authors. + * Copyright 2016, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -32,20 +47,8 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". -// -// It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. -// -// Note that it is executed in the per-connection I/O goroutine(s) instead of -// per-RPC goroutine. Therefore, users should NOT have any -// blocking/time-consuming work in this handle. Otherwise all the RPCs would -// slow down. Also, for the same reason, this handle won't be called -// concurrently by gRPC. +// ServerInHandle defines the function which runs when a new stream is created +// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead +// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming +// work in this handle. Otherwise all the RPCs would slow down. type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index c1c96dedcb7..f6747e1dfa4 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -1,18 +1,33 @@ /* * - * Copyright 2015 gRPC authors. + * Copyright 2015, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -31,7 +46,7 @@ import ( // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. // This should only be set before any RPCs are sent or received by this program. -var EnableTracing bool +var EnableTracing = true // methodFamily returns the trace family for the given method. // It turns "/pkg.Service/GetFoo" into "pkg.Service". @@ -76,15 +91,6 @@ func (f *firstLine) String() string { return line.String() } -const truncateSize = 100 - -func truncate(x string, l int) string { - if l > len(x) { - return x - } - return x[:l] -} - // payload represents an RPC request or response payload. type payload struct { sent bool // whether this is an outgoing payload @@ -94,9 +100,9 @@ type payload struct { func (p payload) String() string { if p.sent { - return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + return fmt.Sprintf("sent: %v", p.msg) } - return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) + return fmt.Sprintf("recv: %v", p.msg) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/BUILD b/vendor/google.golang.org/grpc/transport/BUILD index 838ad3079a6..d6b3e9fd45f 100644 --- a/vendor/google.golang.org/grpc/transport/BUILD +++ b/vendor/google.golang.org/grpc/transport/BUILD @@ -3,13 +3,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "bdp_estimator.go", "control.go", + "go16.go", + "go17.go", "handler_server.go", "http2_client.go", "http2_server.go", "http_util.go", - "log.go", "transport.go", ], importpath = "google.golang.org/grpc/transport", diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go deleted file mode 100644 index 8dd2ed42792..00000000000 --- a/vendor/google.golang.org/grpc/transport/bdp_estimator.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "sync" - "time" -) - -const ( - // bdpLimit is the maximum value the flow control windows - // will be increased to. - bdpLimit = (1 << 20) * 4 - // alpha is a constant factor used to keep a moving average - // of RTTs. - alpha = 0.9 - // If the current bdp sample is greater than or equal to - // our beta * our estimated bdp and the current bandwidth - // sample is the maximum bandwidth observed so far, we - // increase our bbp estimate by a factor of gamma. - beta = 0.66 - // To put our bdp to be smaller than or equal to twice the real BDP, - // we should multiply our current sample with 4/3, however to round things out - // we use 2 as the multiplication factor. - gamma = 2 -) - -var ( - // Adding arbitrary data to ping so that its ack can be - // identified. - // Easter-egg: what does the ping message say? - bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} -) - -type bdpEstimator struct { - // sentAt is the time when the ping was sent. - sentAt time.Time - - mu sync.Mutex - // bdp is the current bdp estimate. - bdp uint32 - // sample is the number of bytes received in one measurement cycle. - sample uint32 - // bwMax is the maximum bandwidth noted so far (bytes/sec). - bwMax float64 - // bool to keep track of the beginning of a new measurement cycle. - isSent bool - // Callback to update the window sizes. - updateFlowControl func(n uint32) - // sampleCount is the number of samples taken so far. - sampleCount uint64 - // round trip time (seconds) - rtt float64 -} - -// timesnap registers the time bdp ping was sent out so that -// network rtt can be calculated when its ack is received. -// It is called (by controller) when the bdpPing is -// being written on the wire. -func (b *bdpEstimator) timesnap(d [8]byte) { - if bdpPing.data != d { - return - } - b.sentAt = time.Now() -} - -// add adds bytes to the current sample for calculating bdp. -// It returns true only if a ping must be sent. This can be used -// by the caller (handleData) to make decision about batching -// a window update with it. -func (b *bdpEstimator) add(n uint32) bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.bdp == bdpLimit { - return false - } - if !b.isSent { - b.isSent = true - b.sample = n - b.sentAt = time.Time{} - b.sampleCount++ - return true - } - b.sample += n - return false -} - -// calculate is called when an ack for a bdp ping is received. -// Here we calculate the current bdp and bandwidth sample and -// decide if the flow control windows should go up. -func (b *bdpEstimator) calculate(d [8]byte) { - // Check if the ping acked for was the bdp ping. - if bdpPing.data != d { - return - } - b.mu.Lock() - rttSample := time.Since(b.sentAt).Seconds() - if b.sampleCount < 10 { - // Bootstrap rtt with an average of first 10 rtt samples. - b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) - } else { - // Heed to the recent past more. - b.rtt += (rttSample - b.rtt) * float64(alpha) - } - b.isSent = false - // The number of bytes accumulated so far in the sample is smaller - // than or equal to 1.5 times the real BDP on a saturated connection. - bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) - if bwCurrent > b.bwMax { - b.bwMax = bwCurrent - } - // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is - // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we - // should update our perception of the network BDP. - if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { - sampleFloat := float64(b.sample) - b.bdp = uint32(gamma * sampleFloat) - if b.bdp > bdpLimit { - b.bdp = bdpLimit - } - bdp := b.bdp - b.mu.Unlock() - b.updateFlowControl(bdp) - return - } - b.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index dd1a8d42e7e..8d29aee53d4 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -22,18 +37,17 @@ import ( "fmt" "math" "sync" - "sync/atomic" "time" "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection infinity = time.Duration(math.MaxInt64) defaultClientKeepaliveTime = infinity defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) @@ -44,43 +58,11 @@ const ( defaultServerKeepaliveTime = time.Duration(2 * time.Hour) defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultLocalSendQuota sets is default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultLocalSendQuota = 64 * 1024 ) // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. - -type headerFrame struct { - streamID uint32 - hf []hpack.HeaderField - endStream bool -} - -func (*headerFrame) item() {} - -type continuationFrame struct { - streamID uint32 - endHeaders bool - headerBlockFragment []byte -} - -type dataFrame struct { - streamID uint32 - endStream bool - d []byte - f func() -} - -func (*dataFrame) item() {} - -func (*continuationFrame) item() {} - type windowUpdate struct { streamID uint32 increment uint32 @@ -105,8 +87,6 @@ func (*resetStream) item() {} type goAway struct { code http2.ErrCode debugData []byte - headsUp bool - closeConn bool } func (*goAway) item() {} @@ -128,9 +108,8 @@ func (*ping) item() {} type quotaPool struct { c chan int - mu sync.Mutex - version uint32 - quota int + mu sync.Mutex + quota int } // newQuotaPool creates a quotaPool which has quota q available to consume. @@ -151,10 +130,6 @@ func newQuotaPool(q int) *quotaPool { func (qb *quotaPool) add(v int) { qb.mu.Lock() defer qb.mu.Unlock() - qb.lockedAdd(v) -} - -func (qb *quotaPool) lockedAdd(v int) { select { case n := <-qb.c: qb.quota += n @@ -175,35 +150,6 @@ func (qb *quotaPool) lockedAdd(v int) { } } -func (qb *quotaPool) addAndUpdate(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.lockedAdd(v) - // Update the version only after having added to the quota - // so that if acquireWithVesrion sees the new vesrion it is - // guaranteed to have seen the updated quota. - // Also, still keep this inside of the lock, so that when - // compareAndExecute is processing, this function doesn't - // get executed partially (quota gets updated but the version - // doesn't). - atomic.AddUint32(&(qb.version), 1) -} - -func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { - return qb.c, atomic.LoadUint32(&(qb.version)) -} - -func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { - qb.mu.Lock() - defer qb.mu.Unlock() - if version == atomic.LoadUint32(&(qb.version)) { - success() - return true - } - failure() - return false -} - // acquire returns the channel on which available quota amounts are sent. func (qb *quotaPool) acquire() <-chan int { return qb.c @@ -211,59 +157,16 @@ func (qb *quotaPool) acquire() <-chan int { // inFlow deals with inbound flow control type inFlow struct { - mu sync.Mutex // The inbound flow control limit for pending data. limit uint32 + + mu sync.Mutex // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 - // delta is the extra window update given by receiver when an application - // is reading data bigger in size than the inFlow limit. - delta uint32 -} - -// newLimit updates the inflow window to a new value n. -// It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { - f.mu.Lock() - defer f.mu.Unlock() - d := n - f.limit - f.limit = n - return d -} - -func (f *inFlow) maybeAdjust(n uint32) uint32 { - if n > uint32(math.MaxInt32) { - n = uint32(math.MaxInt32) - } - f.mu.Lock() - defer f.mu.Unlock() - // estSenderQuota is the receiver's view of the maximum number of bytes the sender - // can send without a window update. - estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) - // estUntransmittedData is the maximum number of bytes the sends might not have put - // on the wire yet. A value of 0 or less means that we have already received all or - // more bytes than the application is requesting to read. - estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. - // This implies that unless we send a window update, the sender won't be able to send all the bytes - // for this message. Therefore we must send an update over the limit since there's an active read - // request from the application. - if estUntransmittedData > estSenderQuota { - // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. - if f.limit+n > maxWindowSize { - f.delta = maxWindowSize - f.limit - } else { - // Send a window update for the whole message and not just the difference between - // estUntransmittedData and estSenderQuota. This will be helpful in case the message - // is padded; We will fallback on the current available window(at least a 1/4th of the limit). - f.delta = n - } - return f.delta - } - return 0 } // onData is invoked when some data frame is received. It updates pendingData. @@ -271,7 +174,7 @@ func (f *inFlow) onData(n uint32) error { f.mu.Lock() defer f.mu.Unlock() f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit+f.delta { + if f.pendingData+f.pendingUpdate > f.limit { return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } return nil @@ -286,13 +189,6 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } f.pendingData -= n - if n > f.delta { - n -= f.delta - f.delta = 0 - } else { - f.delta -= n - n = 0 - } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate @@ -302,10 +198,10 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } -func (f *inFlow) resetPendingUpdate() uint32 { +func (f *inFlow) resetPendingData() uint32 { f.mu.Lock() defer f.mu.Unlock() - n := f.pendingUpdate - f.pendingUpdate = 0 + n := f.pendingData + f.pendingData = 0 return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 00000000000..ee1c46bad57 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,46 @@ +// +build go1.6,!go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 00000000000..356f13ff197 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,46 @@ +// +build go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 7e0fdb35938..24f306babbb 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -1,18 +1,32 @@ /* + * Copyright 2016, Google Inc. + * All rights reserved. * - * Copyright 2016 gRPC authors. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -33,7 +47,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc/codes" @@ -89,6 +102,15 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr continue } for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } v, err := decodeMetadataHeader(k, v) if err != nil { return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) @@ -122,10 +144,6 @@ type serverHandlerTransport struct { // ServeHTTP (HandleStreams) goroutine. The channel is closed // when WriteStatus is called. writes chan func() - - // block concurrent WriteStatus calls - // e.g. grpc/(*serverStream).SendMsg/RecvMsg - writeStatusMu sync.Mutex } func (ht *serverHandlerTransport) Close() error { @@ -161,24 +179,15 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { - // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { + case ht.writes <- fn: + return nil case <-ht.closedCh: return ErrConnClosing - default: - select { - case ht.writes <- fn: - return nil - case <-ht.closedCh: - return ErrConnClosing - } } } func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { - ht.writeStatusMu.Lock() - defer ht.writeStatusMu.Unlock() - err := ht.do(func() { ht.writeCommonHeaders(s) @@ -193,15 +202,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } - if p := st.Proto(); p != nil && len(p.Details) > 0 { - stBytes, err := proto.Marshal(p) - if err != nil { - // TODO: return error instead, when callers are able to handle it. - panic(err) - } - - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) - } + // TODO: Support Grpc-Status-Details-Bin if md := s.Trailer(); len(md) > 0 { for k, vv := range md { @@ -217,11 +218,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } }) - - if err == nil { // transport has not been closed - ht.Close() - close(ht.writes) - } + close(ht.writes) return err } @@ -244,17 +241,16 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") - h.Add("Trailer", "Grpc-Status-Details-Bin") + // TODO: Support Grpc-Status-Details-Bin if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) } } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { return ht.do(func() { ht.writeCommonHeaders(s) - ht.rw.Write(hdr) ht.rw.Write(data) if !opts.Delay { ht.rw.(http.Flusher).Flush() @@ -313,13 +309,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -330,10 +326,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx = metadata.NewIncomingContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, - windowHandler: func(int) {}, - } + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) @@ -345,11 +338,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(recvMsg{data: buf[:n:n]}) + s.buf.put(&recvMsg{data: buf[:n:n]}) buf = buf[n:] } if err != nil { - s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) return } if len(buf) == 0 { diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 1abb62e6df4..380fff665fb 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -33,6 +48,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -43,7 +59,6 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { ctx context.Context - cancel context.CancelFunc target string // server name/addr userAgent string md interface{} @@ -53,6 +68,17 @@ type http2Client struct { authInfo credentials.AuthInfo // auth info about the connection nextID uint32 // the next stream ID to be used + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + // TODO(zhaoq): Maybe have a channel context? + shutdownChan chan struct{} + // errorChan is closed to notify the I/O error to the caller. + errorChan chan struct{} // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -65,7 +91,7 @@ type http2Client struct { // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *controlBuffer + controlBuf *recvBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool @@ -75,8 +101,6 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string - isSecure bool - creds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. @@ -86,11 +110,6 @@ type http2Client struct { statsHandler stats.Handler - initialWindowSize int32 - - bdpEst *bdpEstimator - outQuotaVersion uint32 - mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream @@ -98,6 +117,8 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 + // goAwayID records the Last-Stream-ID in the GoAway frame from the server. + goAwayID uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -109,7 +130,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if fn != nil { return fn(ctx, addr) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + return dialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { @@ -143,23 +164,14 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) { scheme := "http" - ctx, cancel := context.WithCancel(ctx) - connectCtx, connectCancel := context.WithTimeout(ctx, timeout) - defer func() { - connectCancel() - if err != nil { - cancel() - } - }() - - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + conn, err := dial(ctx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -167,20 +179,16 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t conn.Close() } }(conn) - var ( - isSecure bool - authInfo credentials.AuthInfo - ) + var authInfo credentials.AuthInfo if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) if err != nil { // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) + return nil, connectionErrorf(temp, err, "transport: %v", err) } - isSecure = true } kp := opts.KeepaliveParams // Validate keepalive parameters. @@ -190,24 +198,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } - dynamicWindow := true - icwz := int32(initialWindowSize) - if opts.InitialConnWindowSize >= defaultWindowSize { - icwz = opts.InitialConnWindowSize - dynamicWindow = false - } var buf bytes.Buffer - writeBufSize := defaultWriteBufSize - if opts.WriteBufferSize > 0 { - writeBufSize = opts.WriteBufferSize - } - readBufSize := defaultReadBufSize - if opts.ReadBufferSize > 0 { - readBufSize = opts.ReadBufferSize - } t := &http2Client{ ctx: ctx, - cancel: cancel, target: addr.Addr, userAgent: opts.UserAgent, md: addr.Metadata, @@ -216,36 +209,27 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t localAddr: conn.LocalAddr(), authInfo: authInfo, // The client initiated stream id is odd starting from 1. - nextID: 1, - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - framer: newFramer(conn, writeBufSize, readBufSize), - controlBuf: newControlBuffer(), - fc: &inFlow{limit: uint32(icwz)}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - isSecure: isSecure, - creds: opts.PerRPCCredentials, - maxStreams: defaultMaxStreamsClient, - streamsQuota: newQuotaPool(defaultMaxStreamsClient), - streamSendQuota: defaultWindowSize, - kp: kp, - statsHandler: opts.StatsHandler, - initialWindowSize: initialWindowSize, - } - if opts.InitialWindowSize >= defaultWindowSize { - t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false - } - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } + nextID: 1, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + errorChan: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, } // Make sure awakenKeepalive can't be written upon. // keepalive routine will make it writable, if need be. @@ -268,75 +252,65 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - if t.initialWindowSize != defaultWindowSize { - err = t.framer.fr.WriteSettings(http2.Setting{ + if initialWindowSize != defaultWindowSize { + err = t.framer.writeSettings(true, http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(t.initialWindowSize), + Val: uint32(initialWindowSize), }) } else { - err = t.framer.fr.WriteSettings() + err = t.framer.writeSettings(true) } if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } } - t.framer.writer.Flush() - go func() { - loopyWriter(t.ctx, t.controlBuf, t.itemHandler) - t.Close() - }() + go t.controller() if t.kp.Time != infinity { go t.keepalive() } + t.writableChan <- 0 return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - localSendQuota: newQuotaPool(defaultLocalSendQuota), - headerChan: make(chan struct{}), + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: initialWindowSize}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + headerChan: make(chan struct{}), } t.nextID += 2 - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, + s.dec = &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, } - return s } @@ -350,51 +324,31 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } + userCtx := ctx ctx = peer.NewContext(ctx, pr) - var ( - authData = make(map[string]string) - audience string - ) - // Create an audience string only if needed. - if len(t.creds) > 0 || callHdr.Creds != nil { + authData := make(map[string]string) + for _, c := range t.creds { // Construct URI required to get auth request metadata. - // Omit port if it is the default one. - host := strings.TrimSuffix(callHdr.Host, ":443") + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - pos = len(callHdr.Method) + return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) } - audience = "https://" + host + callHdr.Method[:pos] - } - for _, c := range t.creds { + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.Internal, "transport: %v", err) + return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) } for k, v := range data { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) authData[k] = v } } - callAuthData := map[string]string{} - // Check if credentials.PerRPCCredentials were provided via call options. - // Note: if these credentials are provided both via dial options and call - // options, then both sets of credentials will be applied. - if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") - } - data, err := callCreds.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, streamErrorf(codes.Internal, "transport: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2 - k = strings.ToLower(k) - callAuthData[k] = v - } - } t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() @@ -409,7 +363,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } t.mu.Unlock() - sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) + sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -417,49 +371,79 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if sq > 1 { t.streamsQuota.add(sq - 1) } - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - // Make the slice of certain predictable size to reduce allocations made by append. - hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te - hfLen += len(authData) + len(callAuthData) - headerFields := make([]hpack.HeaderField, 0, hfLen) - headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + // Return the quota back now because there is no stream returned to the caller. + if _, ok := err.(StreamError); ok { + t.streamsQuota.add(1) + } + return nil, err + } + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + // Need to make t writable again so that the rpc in flight can still proceed. + t.writableChan <- 0 + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + s.clientStatsCtx = userCtx + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.framer.writePing(false, false, [8]byte{}) + default: + } + } + + t.mu.Unlock() + + // HPACK encodes various headers. Note that once WriteField(...) is + // called, the corresponding headers/continuation frame has to be sent + // because hpack.Encoder is stateful. + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.SendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. - // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := dl.Sub(time.Now()) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } + for k, v := range authData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - for k, v := range callAuthData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } + var ( + hasMD bool + endHeaders bool + ) if md, ok := metadata.FromOutgoingContext(ctx); ok { + hasMD = true for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -469,56 +453,60 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - t.streamsQuota.add(1) - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - t.activeStreams[s.id] = s - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { - select { - case t.awakenKeepalive <- struct{}{}: - t.controlBuf.put(&ping{data: [8]byte{}}) - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: + first := true + bufLen := t.hBuf.Len() + // Sends the headers in a single batch even when they span multiple frames. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } + if first { + // Sends a HeadersFrame to server to start a new stream. + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Next(size), + EndStream: false, + EndHeaders: endHeaders, + } + // Do a force flush for the buffered frames iff it is the last headers frame + // and there is header metadata to be sent. Otherwise, there is flushing until + // the corresponding data frame is written. + err = t.framer.writeHeaders(flush, p) + first = false + } else { + // Sends Continuation frames for the leftover headers. + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) + } + if err != nil { + t.notifyError(err) + return nil, connectionErrorf(true, err, "transport: %v", err) } } - t.controlBuf.put(&headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: false, - }) - t.mu.Unlock() - - s.mu.Lock() s.bytesSent = true - s.mu.Unlock() if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, + WireLength: bufLen, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, } - t.statsHandler.HandleRPC(s.ctx, outHeader) + t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader) } + t.writableChan <- 0 return s, nil } @@ -530,10 +518,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) { t.mu.Unlock() return } - if err != nil { - // notify in-flight streams, before the deletion - s.write(recvMsg{err: err}) - } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -563,6 +547,11 @@ func (t *http2Client) CloseStream(s *Stream, err error) { s.mu.Lock() rstStream = s.rstStream rstError = s.rstError + if q := s.fc.resetPendingData(); q > 0 { + if n := t.fc.onRead(q); n > 0 { + t.controlBuf.put(&windowUpdate{0, n}) + } + } if s.state == streamDone { s.mu.Unlock() return @@ -588,9 +577,12 @@ func (t *http2Client) Close() (err error) { t.mu.Unlock() return } + if t.state == reachable || t.state == draining { + close(t.errorChan) + } t.state = closing t.mu.Unlock() - t.cancel() + close(t.shutdownChan) err = t.conn.Close() t.mu.Lock() streams := t.activeStreams @@ -612,18 +604,41 @@ func (t *http2Client) Close() (err error) { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return err + return } -// GracefulClose sets the state to draining, which prevents new streams from -// being created and causes the transport to be closed when the last active -// stream is closed. If there are no active streams, the transport is closed -// immediately. This does nothing if the transport is already draining or -// closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() switch t.state { - case closing, draining: + case unreachable: + // The server may close the connection concurrently. t is not available for + // any streams. Close it now. + t.mu.Unlock() + t.Close() + return nil + case closing: + t.mu.Unlock() + return nil + } + // Notify the streams which were initiated after the server sent GOAWAY. + select { + case <-t.goAway: + n := t.prevGoAwayID + if n == 0 && t.nextID > 1 { + n = t.nextID - 2 + } + m := t.goAwayID + 2 + if m == 2 { + m = 1 + } + for i := m; i <= n; i += 2 { + if s, ok := t.activeStreams[i]; ok { + close(s.goAway) + } + } + default: + } + if t.state == draining { t.mu.Unlock() return nil } @@ -638,38 +653,21 @@ func (t *http2Client) GracefulClose() error { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - - if hdr == nil && data == nil && opts.Last { - // stream.CloseSend uses this to send an empty frame with endStream=True - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) - return nil - } - // Add data to header frame so that we can equally distribute data across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - for idx, r := range [][]byte{hdr, data} { - for len(r) > 0 { +// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later +// if it improves the performance. +func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { + r := bytes.NewBuffer(data) + for { + var p []byte + if r.Len() > 0 { size := http2MaxFrameLen // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) + sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { return err } @@ -679,51 +677,69 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e if tq < size { size = tq } - if size > len(r) { - size = len(r) - } - p := r[:size] + p = r.Next(size) ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err + } + var ( + endStream bool + forceFlush bool + ) + if opts.Last && r.Len() == 0 { + endStream = true + } + // Indicate there is a writer who is about to write a data frame. + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the transport. + if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok || err == io.EOF { + // Return the connection quota back. + t.sendQuotaPool.add(len(p)) } - s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. - var endStream bool - // See if this is the last frame to be written. - if opts.Last { - if len(r)-size == 0 { // No more data in r after this iteration. - if idx == 0 { // We're writing data header. - if len(data) == 0 { // There's no data to follow. - endStream = true - } - } else { // We're writing data. - endStream = true - } - } + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) } - success := func() { - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) - if ps < sq { - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] - } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) - } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) + return err + } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(len(p)) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { + // Do a force flush iff this is last frame for the entire gRPC message + // and the caller is the only writer at this moment. + forceFlush = true + } + // If WriteData fails, all the pending streams will be handled + // by http2Client.Close(). No explicit CloseStream() needs to be + // invoked. + if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { + t.notifyError(err) + return connectionErrorf(true, err, "transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + if r.Len() == 0 { + break } } if !opts.Last { @@ -744,24 +760,6 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { return s, ok } -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := s.fc.maybeAdjust(n); w > 0 { - // Piggyback connection's window update along. - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -771,76 +769,41 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } if w := s.fc.onRead(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } t.controlBuf.put(&windowUpdate{s.id, w}) } } -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.initialWindowSize = int32(n) - t.mu.Unlock() - t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) - t.controlBuf.put(&settings{ - ack: false, - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: uint32(n), - }, - }, - }) -} - func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(uint32(size)) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - // - // Furthermore, if a bdpPing is being sent out we can piggyback - // connection's window update for the bytes we just received. - if sendBDPPing { - if size != 0 { // Could've been an empty data frame. - t.controlBuf.put(&windowUpdate{0, uint32(size)}) - } - t.controlBuf.put(bdpPing) - } else { - if err := t.fc.onData(uint32(size)); err != nil { - t.Close() - return - } - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } + if err := t.fc.onData(uint32(size)); err != nil { + t.notifyError(connectionErrorf(true, err, "%v", err)) + return } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } if size > 0 { + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -896,10 +859,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) statusCode = codes.Unknown } - s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode)) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -918,11 +881,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { } func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } + if f.IsAck() { // Do nothing. return } pingAck := &ping{ack: true} @@ -931,56 +890,36 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - t.mu.Lock() - if t.state != reachable && t.state != draining { - t.mu.Unlock() - return - } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") } - id := f.LastStreamID - if id > 0 && id%2 != 1 { - t.mu.Unlock() - t.Close() - return - } - // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). - // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay - // with the ID of the last stream the server will process. - // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we - // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server - // was being sent don't get killed. - select { - case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). - // If there are multiple GoAways the first one should always have an ID greater than the following ones. - if id > t.prevGoAwayID { + t.mu.Lock() + if t.state == reachable || t.state == draining { + if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { t.mu.Unlock() - t.Close() + t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) return } - default: - t.setGoAwayReason(f) - close(t.goAway) - t.state = draining - } - // All streams with IDs greater than the GoAwayId - // and smaller than the previous GoAway ID should be killed. - upperLimit := t.prevGoAwayID - if upperLimit == 0 { // This is the first GoAway Frame. - upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. - } - for streamID, stream := range t.activeStreams { - if streamID > id && streamID <= upperLimit { - close(stream.goAway) + select { + case <-t.goAway: + id := t.goAwayID + // t.goAway has been closed (i.e.,multiple GoAways). + if id < f.LastStreamID { + t.mu.Unlock() + t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) + return + } + t.prevGoAwayID = id + t.goAwayID = f.LastStreamID + t.mu.Unlock() + return + default: + t.setGoAwayReason(f) } + t.goAwayID = f.LastStreamID + close(t.goAway) } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close() - } } // setGoAwayReason sets the value of t.goAwayReason based @@ -1021,20 +960,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } - s.mu.Lock() s.bytesReceived = true - s.mu.Unlock() var state decodeState - if err := state.decodeResponseHeader(frame); err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true + for _, hf := range frame.Fields { + if err := state.processHeaderField(hf); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return } - s.mu.Unlock() - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return } endStream := frame.StreamEnded() @@ -1046,13 +985,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.ctx, inHeader) + t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer) } } }() @@ -1100,22 +1039,22 @@ func handleMalformedHTTP2(s *Stream, err error) { // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if err != nil { - t.Close() + t.notifyError(err) return } atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() + t.notifyError(err) return } t.handleSettings(sf) // loop to keep reading incoming messages on this transport. for { - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.CompareAndSwapUint32(&t.activity, 0, 1) if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1127,12 +1066,12 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) } continue } else { // Transport error. - t.Close() + t.notifyError(err) return } } @@ -1152,7 +1091,7 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } @@ -1176,7 +1115,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) { t.mu.Lock() for _, stream := range t.activeStreams { // Adjust the sending quota for each stream. - stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) } t.streamSendQuota = s.Val t.mu.Unlock() @@ -1184,78 +1123,49 @@ func (t *http2Client) applySettings(ss []http2.Setting) { } } -// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) -// is duplicated between the client and the server. -// The transport layer needs to be refactored to take care of this. -func (t *http2Client) itemHandler(i item) error { - var err error - switch i := i.(type) { - case *dataFrame: - err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) - if err == nil { - i.f() - } - case *headerFrame: - t.hBuf.Reset() - for _, f := range i.hf { - t.hEnc.WriteField(f) - } - endHeaders := false - first := true - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - first = false - err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: i.streamID, - BlockFragment: t.hBuf.Next(size), - EndStream: i.endStream, - EndHeaders: endHeaders, - }) - } else { - err = t.framer.fr.WriteContinuation( - i.streamID, - endHeaders, - t.hBuf.Next(size), - ) - } - if err != nil { - return err +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Client) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + t.streamsQuota.add(1) + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return } + case <-t.shutdownChan: + return } - case *windowUpdate: - err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) - case *settings: - if i.ack { - t.applySettings(i.ss) - err = t.framer.fr.WriteSettingsAck() - } else { - err = t.framer.fr.WriteSettings(i.ss...) - } - case *resetStream: - // If the server needs to be to intimated about stream closing, - // then we need to make sure the RST_STREAM frame is written to - // the wire before the headers of the next stream waiting on - // streamQuota. We ensure this by adding to the streamsQuota pool - // only after having acquired the writableChan to send RST_STREAM. - err = t.framer.fr.WriteRSTStream(i.streamID, i.code) - t.streamsQuota.add(1) - case *flushIO: - err = t.framer.writer.Flush() - case *ping: - if !i.ack { - t.bdpEst.timesnap(i.data) - } - err = t.framer.fr.WritePing(i.ack, i.data) - default: - errorf("transport: http2Client.controller got unexpected item type %v\n", i) } - return err } // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. @@ -1279,7 +1189,7 @@ func (t *http2Client) keepalive() { case <-t.awakenKeepalive: // If the control gets here a ping has been sent // need to reset the timer with keepalive.Timeout. - case <-t.ctx.Done(): + case <-t.shutdownChan: return } } else { @@ -1298,13 +1208,13 @@ func (t *http2Client) keepalive() { } t.Close() return - case <-t.ctx.Done(): + case <-t.shutdownChan: if !timer.Stop() { <-timer.C } return } - case <-t.ctx.Done(): + case <-t.shutdownChan: if !timer.Stop() { <-timer.C } @@ -1314,9 +1224,25 @@ func (t *http2Client) keepalive() { } func (t *http2Client) Error() <-chan struct{} { - return t.ctx.Done() + return t.errorChan } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } + +func (t *http2Client) notifyError(err error) { + t.mu.Lock() + // make sure t.errorChan is closed only once. + if t.state == draining { + t.mu.Unlock() + t.Close() + return + } + if t.state == reachable { + t.state = unreachable + close(t.errorChan) + grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) + } + t.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 00df8eed0fd..14cd19c64c6 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -21,7 +36,6 @@ package transport import ( "bytes" "errors" - "fmt" "io" "math" "math/rand" @@ -37,6 +51,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,25 +67,35 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context - cancel context.CancelFunc conn net.Conn remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + shutdownChan chan struct{} + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *controlBuffer + controlBuf *recvBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool - stats stats.Handler + + stats stats.Handler + // Flag to keep track of reading activity on transport. // 1 is true and 0 is false. activity uint32 // Accessed atomically. @@ -86,25 +111,15 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. - initialWindowSize int32 - bdpEst *bdpEstimator + resetPingStrikes uint32 // Accessed atomically. - mu sync.Mutex // guard the following - - // drainChan is initialized when drain(...) is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + mu sync.Mutex // guard the following state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 // idle is the time instant when the connection went idle. - // This is either the beginning of the connection or when the number of + // This is either the begining of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time @@ -113,51 +128,32 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - writeBufSize := defaultWriteBufSize - if config.WriteBufferSize > 0 { - writeBufSize = config.WriteBufferSize - } - readBufSize := defaultReadBufSize - if config.ReadBufferSize > 0 { - readBufSize = config.ReadBufferSize - } - framer := newFramer(conn, writeBufSize, readBufSize) + framer := newFramer(conn) // Send initial settings as connection preface to client. - var isettings []http2.Setting + var settings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - isettings = append(isettings, http2.Setting{ + settings = append(settings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, Val: maxStreams, }) } - dynamicWindow := true - iwz := int32(initialWindowSize) - if config.InitialWindowSize >= defaultWindowSize { - iwz = config.InitialWindowSize - dynamicWindow = false - } - icwz := int32(initialWindowSize) - if config.InitialConnWindowSize >= defaultWindowSize { - icwz = config.InitialConnWindowSize - dynamicWindow = false - } - if iwz != defaultWindowSize { - isettings = append(isettings, http2.Setting{ + if initialWindowSize != defaultWindowSize { + settings = append(settings, http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(iwz)}) + Val: uint32(initialWindowSize)}) } - if err := framer.fr.WriteSettings(isettings...); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) + if err := framer.writeSettings(true, settings...); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) } } kp := config.KeepaliveParams @@ -183,36 +179,29 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err kep.MinTime = defaultKeepalivePolicyMinTime } var buf bytes.Buffer - ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ - ctx: ctx, - cancel: cancel, - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - controlBuf: newControlBuffer(), - fc: &inFlow{limit: uint32(icwz)}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - stats: config.StatsHandler, - kp: kp, - idle: time.Now(), - kep: kep, - initialWindowSize: iwz, - } - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } + ctx: context.Background(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ @@ -222,68 +211,37 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - t.framer.writer.Flush() - - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - } - if !bytes.Equal(preface, clientPreface) { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - } - - frame, err := t.framer.fr.ReadFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) - } - atomic.StoreUint32(&t.activity, 1) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - } - t.handleSettings(sf) - - go func() { - loopyWriter(t.ctx, t.controlBuf, t.itemHandler) - t.Close() - }() + go t.controller() go t.keepalive() + t.writableChan <- 0 return t, nil } // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { - streamID := frame.Header().StreamID + buf := newRecvBuffer() + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: &inFlow{limit: initialWindowSize}, + } var state decodeState for _, hf := range frame.Fields { if err := state.processHeaderField(hf); err != nil { if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) + t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return } } - buf := newRecvBuffer() - s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.encoding, - method: state.method, - } - if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } + s.recvCompress = state.encoding if state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { @@ -305,12 +263,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - if state.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) - } - if state.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) + + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, } + s.recvCompress = state.encoding + s.method = state.method if t.inTapHandle != nil { var err error info := &tap.Info{ @@ -318,7 +277,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + // TODO: Log the real error. t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } @@ -330,25 +289,24 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() - t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } - if streamID%2 != 1 || streamID <= t.maxStreamID { + if s.id%2 != 1 || s.id <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) return true } - t.maxStreamID = streamID + t.maxStreamID = s.id s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - s.localSendQuota = newQuotaPool(defaultLocalSendQuota) - t.activeStreams[streamID] = s + t.activeStreams[s.id] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { @@ -362,15 +320,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.stats.HandleRPC(s.ctx, inHeader) } - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, - } handle(s) return } @@ -379,8 +328,40 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.readFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + for { - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -397,7 +378,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } @@ -420,7 +401,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -440,23 +421,6 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { return s, true } -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := s.fc.maybeAdjust(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -466,78 +430,42 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } if w := s.fc.onRead(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } t.controlBuf.put(&windowUpdate{s.id, w}) } } -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Server) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.initialWindowSize = int32(n) - t.mu.Unlock() - t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) - t.controlBuf.put(&settings{ - ack: false, - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: uint32(n), - }, - }, - }) - -} - func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(uint32(size)) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - // - // Furthermore, if a bdpPing is being sent out we can piggyback - // connection's window update for the bytes we just received. - if sendBDPPing { - if size != 0 { // Could be an empty frame. - t.controlBuf.put(&windowUpdate{0, uint32(size)}) - } - t.controlBuf.put(bdpPing) - } else { - if err := t.fc.onData(uint32(size)); err != nil { - errorf("transport: http2Server %v", err) - t.Close() - return - } - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } + if err := t.fc.onData(uint32(size)); err != nil { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } if size > 0 { + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -589,38 +517,17 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { ss = append(ss, s) return nil }) + // The settings will be applied once the ack is sent. t.controlBuf.put(&settings{ack: true, ss: ss}) } -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - - } -} - const ( maxPingStrikes = 2 defaultPingTimeout = 2 * time.Hour ) func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) - return - } - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } + if f.IsAck() { // Do nothing. return } pingAck := &ping{ack: true} @@ -643,7 +550,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should - // have come after at least defaultPingTimeout. + // have come after atleast defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } @@ -656,8 +563,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got to too many pings from the client, closing the connection.") - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")}) } } @@ -673,16 +579,47 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } +func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { + first := true + endHeaders := false + var err error + defer func() { + if err == nil { + // Reset ping strikes when seding headers since that might cause the + // peer to send ping. + atomic.StoreUint32(&t.resetPingStrikes, 1) + } + }() + // Sends the headers in a single batch. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: b.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + } + err = t.framer.writeHeaders(endHeaders, p) + first = false + } else { + err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) + } + if err != nil { + t.Close() + return connectionErrorf(true, err, "transport: %v", err) + } + } + return nil +} + // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - s.mu.Lock() if s.headerOk || s.state == streamDone { s.mu.Unlock() @@ -698,13 +635,14 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } md = s.header s.mu.Unlock() - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) if s.sendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, vv := range md { if isReservedHeader(k) { @@ -712,20 +650,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - t.controlBuf.put(&headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: false, - }) + bufLen := t.hBuf.Len() + if err := t.writeHeaders(s, t.hBuf, false); err != nil { + return err + } if t.stats != nil { outHeader := &stats.OutHeader{ - //WireLength: // TODO(mmukhi): Revisit this later, if needed. + WireLength: bufLen, } t.stats.HandleRPC(s.Context(), outHeader) } + t.writableChan <- 0 return nil } @@ -734,12 +672,6 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - select { - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - var headersSent, hasHeader bool s.mu.Lock() if s.state == streamDone { @@ -759,15 +691,20 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headersSent = true } - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !headersSent { - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + return err } - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + t.hBuf.Reset() + if !headersSent { + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } + t.hEnc.WriteField( + hpack.HeaderField{ + Name: "grpc-status", + Value: strconv.Itoa(int(st.Code())), + }) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) @@ -776,7 +713,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { panic(err) } - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. @@ -786,32 +723,29 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { continue } for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - t.controlBuf.put(&headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: true, - }) + bufLen := t.hBuf.Len() + if err := t.writeHeaders(s, t.hBuf, true); err != nil { + t.Close() + return err + } if t.stats != nil { - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + outTrailer := &stats.OutTrailer{ + WireLength: bufLen, + } + t.stats.HandleRPC(s.Context(), outTrailer) } t.closeStream(s) + t.writableChan <- 0 return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - +func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { + // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() if s.state == streamDone { @@ -825,81 +759,107 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) ( if writeHeaderFrame { t.WriteHeader(s, nil) } - // Add data to header frame so that we can equally distribute data across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - for _, r := range [][]byte{hdr, data} { - for len(r) > 0 { - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - if size > len(r) { - size = len(r) - } - p := r[:size] - ps := len(p) - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err - } - s.localSendQuota.add(ltq - ps) // It's ok we make this negative. + defer func() { + if err == nil { // Reset ping strikes when sending data since this might cause // the peer to send ping. atomic.StoreUint32(&t.resetPingStrikes, 1) - success := func() { - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { - s.localSendQuota.add(ps) - }}) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] - } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) - } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) - } } + }() + r := bytes.NewBuffer(data) + for { + if r.Len() == 0 { + return nil + } + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p := r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the + // transport. + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(ps) + } + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(ps) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } + var forceFlush bool + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { + forceFlush = true + } + if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { + t.Close() + return connectionErrorf(true, err, "transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + } + +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + } - return nil } // keepalive running in a separate goroutine does the following: // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} @@ -908,7 +868,7 @@ func (t *http2Server) keepalive() { maxAge := time.NewTimer(t.kp.MaxConnectionAge) keepalive := time.NewTimer(t.kp.Time) // NOTE: All exit paths of this function should reset their - // respective timers. A failure to do so will cause the + // respecitve timers. A failure to do so will cause the // following clean-up to deadlock and eventually leak. defer func() { if !maxIdle.Stop() { @@ -932,18 +892,23 @@ func (t *http2Server) keepalive() { continue } val := t.kp.MaxConnectionIdle - time.Since(idle) - t.mu.Unlock() if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.state = draining + t.mu.Unlock() + t.Drain() // Reseting the timer so that the clean-up doesn't deadlock. maxIdle.Reset(infinity) return } + t.mu.Unlock() maxIdle.Reset(val) case <-maxAge.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.mu.Lock() + t.state = draining + t.mu.Unlock() + t.Drain() maxAge.Reset(t.kp.MaxConnectionAgeGrace) select { case <-maxAge.C: @@ -951,7 +916,7 @@ func (t *http2Server) keepalive() { t.Close() // Reseting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.ctx.Done(): + case <-t.shutdownChan: } return case <-keepalive.C: @@ -969,137 +934,69 @@ func (t *http2Server) keepalive() { pingSent = true t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.ctx.Done(): + case <-t.shutdownChan: return } } } -var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} - -// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) -// is duplicated between the client and the server. -// The transport layer needs to be refactored to take care of this. -func (t *http2Server) itemHandler(i item) error { - switch i := i.(type) { - case *dataFrame: - if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { - return err - } - i.f() - return nil - case *headerFrame: - t.hBuf.Reset() - for _, f := range i.hf { - t.hEnc.WriteField(f) - } - first := true - endHeaders := false - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var err error - if first { - first = false - err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: i.streamID, - BlockFragment: t.hBuf.Next(size), - EndStream: i.endStream, - EndHeaders: endHeaders, - }) - } else { - err = t.framer.fr.WriteContinuation( - i.streamID, - endHeaders, - t.hBuf.Next(size), - ) - } - if err != nil { - return err - } - } - atomic.StoreUint32(&t.resetPingStrikes, 1) - return nil - case *windowUpdate: - return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) - case *settings: - if i.ack { - t.applySettings(i.ss) - return t.framer.fr.WriteSettingsAck() - } - return t.framer.fr.WriteSettings(i.ss...) - case *resetStream: - return t.framer.fr.WriteRSTStream(i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return fmt.Errorf("transport: Connection closing") - } - sid := t.maxStreamID - if !i.headsUp { - // Stop accepting more streams now. - t.state = draining - t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { - return err - } - if i.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return fmt.Errorf("transport: Connection closing") - } - return nil - } - t.mu.Unlock() - // For a graceful close, send out a GoAway with stream ID of MaxUInt32, - // Follow that with a ping and wait for the ack to come back or a timer - // to expire. During this time accept new streams since they might have - // originated before the GoAway reaches the client. - // After getting the ack or timer expiration send out another GoAway this - // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { - return err - } - if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { - return err - } - go func() { - timer := time.NewTimer(time.Minute) - defer timer.Stop() +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Server) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() select { - case <-t.drainChan: - case <-timer.C: - case <-t.ctx.Done(): + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return + } + sid := t.maxStreamID + t.state = draining + t.mu.Unlock() + t.framer.writeGoAway(true, sid, i.code, i.debugData) + if i.code == http2.ErrCodeEnhanceYourCalm { + t.Close() + } + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: return } - t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) - }() - return nil - case *flushIO: - return t.framer.writer.Flush() - case *ping: - if !i.ack { - t.bdpEst.timesnap(i.data) + case <-t.shutdownChan: + return } - return t.framer.fr.WritePing(i.ack, i.data) - default: - err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) - errorf("%v", err) - return err } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() (err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() @@ -1109,8 +1006,8 @@ func (t *http2Server) Close() error { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() - t.cancel() - err := t.conn.Close() + close(t.shutdownChan) + err = t.conn.Close() // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1119,7 +1016,7 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err + return } // closeStream clears the footprint of a stream when the stream is not needed @@ -1139,6 +1036,11 @@ func (t *http2Server) closeStream(s *Stream) { // called to interrupt the potential blocking on other goroutines. s.cancel() s.mu.Lock() + if q := s.fc.resetPendingData(); q > 0 { + if w := t.fc.onRead(q); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } if s.state == streamDone { s.mu.Unlock() return @@ -1152,17 +1054,7 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { - t.mu.Lock() - defer t.mu.Unlock() - if t.drainChan != nil { - return - } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo}) } var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 39f878cfd5b..795d5d18a4f 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -1,18 +1,33 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ @@ -25,9 +40,9 @@ import ( "fmt" "io" "net" - "net/http" "strconv" "strings" + "sync/atomic" "time" "github.com/golang/protobuf/proto" @@ -35,6 +50,7 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -44,8 +60,7 @@ const ( // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 + http2IOBufSize = 32 * 1024 ) var ( @@ -73,24 +88,6 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } - httpStatusConvTab = map[int]codes.Code{ - // 400 Bad Request - INTERNAL. - http.StatusBadRequest: codes.Internal, - // 401 Unauthorized - UNAUTHENTICATED. - http.StatusUnauthorized: codes.Unauthenticated, - // 403 Forbidden - PERMISSION_DENIED. - http.StatusForbidden: codes.PermissionDenied, - // 404 Not Found - UNIMPLEMENTED. - http.StatusNotFound: codes.Unimplemented, - // 429 Too Many Requests - UNAVAILABLE. - http.StatusTooManyRequests: codes.Unavailable, - // 502 Bad Gateway - UNAVAILABLE. - http.StatusBadGateway: codes.Unavailable, - // 503 Service Unavailable - UNAVAILABLE. - http.StatusServiceUnavailable: codes.Unavailable, - // 504 Gateway timeout - UNAVAILABLE. - http.StatusGatewayTimeout: codes.Unavailable, - } ) // Records the states during HPACK decoding. Must be reset once the @@ -103,17 +100,14 @@ type decodeState struct { statusGen *status.Status // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not // intended for direct access outside of parsing. - rawStatusCode *int + rawStatusCode int32 rawStatusMsg string - httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte + mdata map[string][]string } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -165,7 +159,7 @@ func validContentType(t string) bool { func (d *decodeState) status() *status.Status { if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + d.statusGen = status.New(codes.Code(d.rawStatusCode), d.rawStatusMsg) } return d.statusGen } @@ -199,51 +193,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { - for _, hf := range frame.Fields { - if err := d.processHeaderField(hf); err != nil { - return err - } - } - - // If grpc status exists, no need to check further. - if d.rawStatusCode != nil || d.statusGen != nil { - return nil - } - - // If grpc status doesn't exist and http status doesn't exist, - // then it's a malformed header. - if d.httpStatus == nil { - return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") - } - - if *(d.httpStatus) != http.StatusOK { - code, ok := httpStatusConvTab[*(d.httpStatus)] - if !ok { - code = codes.Unknown - } - return streamErrorf(code, http.StatusText(*(d.httpStatus))) - } - - // gRPC status doesn't exist and http status is OK. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propogated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propogated. - code := int(codes.Unknown) - d.rawStatusCode = &code - return nil - -} - -func (d *decodeState) addMetadata(k, v string) { - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - d.mdata[k] = append(d.mdata[k], v) -} - func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": @@ -257,7 +206,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { if err != nil { return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.rawStatusCode = &code + d.rawStatusCode = int32(code) case "grpc-message": d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": @@ -278,36 +227,18 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { } case ":path": d.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) - } - d.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - } - d.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - } - d.statsTrace = v - d.addMetadata(f.Name, string(v)) default: - if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { - break + if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.mdata[f.Name] = append(d.mdata[f.Name], v) } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - return nil - } - d.addMetadata(f.Name, string(v)) } return nil } @@ -475,10 +406,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { +func newFramer(conn net.Conn) *framer { f := &framer{ - reader: bufio.NewReaderSize(conn, readBufferSize), - writer: bufio.NewWriterSize(conn, writeBufferSize), + reader: bufio.NewReaderSize(conn, http2IOBufSize), + writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -487,3 +418,132 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +func (f *framer) adjustNumWriters(i int32) int32 { + return atomic.AddInt32(&f.numWriters, i) +} + +// The following writeXXX functions can only be called when the caller gets +// unblocked from writableChan channel (i.e., owns the privilege to write). + +func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { + if err := f.fr.WriteData(streamID, endStream, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { + if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { + if err := f.fr.WriteHeaders(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { + if err := f.fr.WritePing(ack, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { + if err := f.fr.WritePriority(streamID, p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { + if err := f.fr.WritePushPromise(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { + if err := f.fr.WriteRSTStream(streamID, code); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { + if err := f.fr.WriteSettings(settings...); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettingsAck(forceFlush bool) error { + if err := f.fr.WriteSettingsAck(); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { + if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) flushWrite() error { + return f.writer.Flush() +} + +func (f *framer) readFrame() (http2.Frame, error) { + return f.fr.ReadFrame() +} + +func (f *framer) errorDetail() error { + return f.fr.ErrorDetail() +} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go deleted file mode 100644 index ac8e358c5c8..00000000000 --- a/vendor/google.golang.org/grpc/transport/log.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } -} - -func fatalf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Fatalf(format, args...) - } -} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index ce5cb74d2ee..87dc27e5bba 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -1,32 +1,48 @@ /* * - * Copyright 2014 gRPC authors. + * Copyright 2014, Google Inc. + * All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: * - * http://www.apache.org/licenses/LICENSE-2.0 + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ -// Package transport defines and implements message oriented communication -// channel to complete various transactions (e.g., an RPC). +/* +Package transport defines and implements message oriented communication channel +to complete various transactions (e.g., an RPC). +*/ package transport import ( - stdctx "context" + "bytes" "fmt" "io" "net" "sync" - "time" "golang.org/x/net/context" "golang.org/x/net/http2" @@ -49,56 +65,57 @@ type recvMsg struct { err error } -// recvBuffer is an unbounded channel of recvMsg structs. -// Note recvBuffer differs from controlBuffer only in that recvBuffer -// holds a channel of only recvMsg structs instead of objects implementing "item" interface. -// recvBuffer is written to much more often than -// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +func (*recvMsg) item() {} + +// All items in an out of a recvBuffer should be the same type. +type item interface { + item() +} + +// recvBuffer is an unbounded channel of item. type recvBuffer struct { - c chan recvMsg + c chan item mu sync.Mutex - backlog []recvMsg + backlog []item } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ - c: make(chan recvMsg, 1), + c: make(chan item, 1), } return b } -func (b *recvBuffer) put(r recvMsg) { +func (b *recvBuffer) put(r item) { b.mu.Lock() + defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) - b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() + defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: - b.backlog[0] = recvMsg{} b.backlog = b.backlog[1:] default: } } - b.mu.Unlock() } -// get returns the channel that receives a recvMsg in the buffer. +// get returns the channel that receives an item in the buffer. // -// Upon receipt of a recvMsg, the caller should call load to send another -// recvMsg onto the channel if there is any. -func (b *recvBuffer) get() <-chan recvMsg { +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *recvBuffer) get() <-chan item { return b.c } @@ -108,7 +125,7 @@ type recvBufferReader struct { ctx context.Context goAway chan struct{} recv *recvBuffer - last []byte // Stores the remaining data in the previous calls. + last *bytes.Reader // Stores the remaining data in the previous calls. err error } @@ -119,87 +136,27 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } - n, r.err = r.read(p) - return n, r.err -} - -func (r *recvBufferReader) read(p []byte) (n int, err error) { - if r.last != nil && len(r.last) > 0 { + defer func() { r.err = err }() + if r.last != nil && r.last.Len() > 0 { // Read remaining data left in last call. - copied := copy(p, r.last) - r.last = r.last[copied:] - return copied, nil + return r.last.Read(p) } select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) case <-r.goAway: return 0, ErrStreamDrain - case m := <-r.recv.get(): + case i := <-r.recv.get(): r.recv.load() + m := i.(*recvMsg) if m.err != nil { return 0, m.err } - copied := copy(p, m.data) - r.last = m.data[copied:] - return copied, nil + r.last = bytes.NewReader(m.data) + return r.last.Read(p) } } -// All items in an out of a controlBuffer should be the same type. -type item interface { - item() -} - -// controlBuffer is an unbounded channel of item. -type controlBuffer struct { - c chan item - mu sync.Mutex - backlog []item -} - -func newControlBuffer() *controlBuffer { - b := &controlBuffer{ - c: make(chan item, 1), - } - return b -} - -func (b *controlBuffer) put(r item) { - b.mu.Lock() - if len(b.backlog) == 0 { - select { - case b.c <- r: - b.mu.Unlock() - return - default: - } - } - b.backlog = append(b.backlog, r) - b.mu.Unlock() -} - -func (b *controlBuffer) load() { - b.mu.Lock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } - b.mu.Unlock() -} - -// get returns the channel that receives an item in the buffer. -// -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *controlBuffer) get() <-chan item { - return b.c -} - type streamState uint8 const ( @@ -214,6 +171,11 @@ type Stream struct { id uint32 // nil for client side Stream. st ServerTransport + // clientStatsCtx keeps the user context for stats handling. + // It's only valid on client side. Server side stats context is same as s.ctx. + // All client side stats collection should use the clientStatsCtx (instead of the stream context) + // so that all the generated stats for a particular RPC can be associated in the processing phase. + clientStatsCtx context.Context // ctx is the associated context of the stream. ctx context.Context // cancel is always nil for client side Stream. @@ -227,20 +189,16 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + dec io.Reader fc *inFlow recvQuota uint32 - - // TODO: Remote this unused variable. // The accumulated inbound quota pending for window update. updateQuota uint32 + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if need be. - requestRead func(int) - - sendQuotaPool *quotaPool - localSendQuota *quotaPool + sendQuotaPool *quotaPool // Close headerChan to indicate the end of reception of header metadata. headerChan chan struct{} // header caches the received header metadata. @@ -293,24 +251,16 @@ func (s *Stream) GoAway() <-chan struct{} { // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is canceled/expired. +// header metadata or iii) the stream is cancelled/expired. func (s *Stream) Header() (metadata.MD, error) { - var err error select { case <-s.ctx.Done(): - err = ContextErr(s.ctx.Err()) + return nil, ContextErr(s.ctx.Err()) case <-s.goAway: - err = ErrStreamDrain + return nil, ErrStreamDrain case <-s.headerChan: return s.header.Copy(), nil } - // Even if the stream is closed, header is returned if available. - select { - case <-s.headerChan: - return s.header.Copy(), nil - default: - } - return nil, err } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -318,9 +268,8 @@ func (s *Stream) Header() (metadata.MD, error) { // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() - c := s.trailer.Copy() - s.mu.RUnlock() - return c + defer s.mu.RUnlock() + return s.trailer.Copy() } // ServerTransport returns the underlying ServerTransport for the stream. @@ -348,16 +297,14 @@ func (s *Stream) Status() *status.Status { // Server side only. func (s *Stream) SetHeader(md metadata.MD) error { s.mu.Lock() + defer s.mu.Unlock() if s.headerOk || s.state == streamDone { - s.mu.Unlock() return ErrIllegalHeaderWrite } if md.Len() == 0 { - s.mu.Unlock() return nil } s.header = metadata.Join(s.header, md) - s.mu.Unlock() return nil } @@ -368,44 +315,25 @@ func (s *Stream) SetTrailer(md metadata.MD) error { return nil } s.mu.Lock() + defer s.mu.Unlock() s.trailer = metadata.Join(s.trailer, md) - s.mu.Unlock() return nil } func (s *Stream) write(m recvMsg) { - s.buf.put(m) + s.buf.put(&m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { - // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er - } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) -} - -// tranportReader reads all the data available for this Stream from the transport and +// Read reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. -type transportReader struct { - reader io.Reader - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - er error -} - -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (s *Stream) Read(p []byte) (n int, err error) { + n, err = s.dec.Read(p) if err != nil { - t.er = err return } - t.windowHandler(n) + s.windowHandler(n) return } @@ -420,17 +348,15 @@ func (s *Stream) finish(st *status.Status) { // BytesSent indicates whether any bytes have been sent on this stream. func (s *Stream) BytesSent() bool { s.mu.Lock() - bs := s.bytesSent - s.mu.Unlock() - return bs + defer s.mu.Unlock() + return s.bytesSent } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { s.mu.Lock() - br := s.bytesReceived - s.mu.Unlock() - return br + defer s.mu.Unlock() + return s.bytesReceived } // GoString is implemented by Stream so context.String() won't @@ -459,22 +385,19 @@ type transportState int const ( reachable transportState = iota + unreachable closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy - InitialWindowSize int32 - InitialConnWindowSize int32 - WriteBufferSize int - ReadBufferSize int + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -502,14 +425,6 @@ type ConnectOptions struct { KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler - // InitialWindowSize sets the initial window size for a stream. - InitialWindowSize int32 - // InitialConnWindowSize sets the initial window size for a connection. - InitialConnWindowSize int32 - // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. - WriteBufferSize int - // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. - ReadBufferSize int } // TargetInfo contains the information of the target such as network address and metadata. @@ -520,8 +435,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts, timeout) +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts) } // Options provides additional hints and information for message @@ -533,7 +448,7 @@ type Options struct { // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The - // transport implementation may ignore the hint. + // Transport implementation may ignore the hint. Delay bool } @@ -553,15 +468,10 @@ type CallHdr struct { // outbound message. SendCompress string - // Creds specifies credentials.PerRPCCredentials for a call. - Creds credentials.PerRPCCredentials - // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is - // only a hint. - // If it's true, the transport may modify the flush decision + // only a hint. The transport may modify the flush decision // for performance purposes. - // If it's false, new stream will never be flushed. Flush bool } @@ -579,7 +489,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -597,7 +507,7 @@ type ClientTransport interface { // once the transport is initiated. Error() <-chan struct{} - // GoAway returns a channel that is closed when ClientTransport + // GoAway returns a channel that is closed when ClientTranspor // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} @@ -621,7 +531,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -703,33 +613,45 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// wait blocks until it can receive from one of the provided contexts or channels -func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) +} + +// wait blocks until it can receive from ctx.Done, closing, or proceed. +// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise +// it return the StreamError for ctx.Err. +// If it receives from goAway, it returns 0, ErrStreamDrain. +// If it receives from closing, it returns 0, ErrConnClosing. +// If it receives from proceed, it returns the received integer, nil. +func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-done: + // User cancellation has precedence. + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + default: + } return 0, io.EOF case <-goAway: return 0, ErrStreamDrain - case <-tctx.Done(): + case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled, stdctx.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) -} - // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -739,39 +661,6 @@ const ( // NoReason is the default value when GoAway frame is received. NoReason GoAwayReason = 1 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was received and that the debug data said "too_many_pings". + // was recieved and that the debug data said "too_many_pings". TooManyPings GoAwayReason = 2 ) - -// loopyWriter is run in a separate go routine. It is the single code path that will -// write data on wire. -func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { - for { - select { - case i := <-cbuf.get(): - cbuf.load() - if err := handler(i); err != nil { - return - } - case <-ctx.Done(): - return - } - hasData: - for { - select { - case i := <-cbuf.get(): - cbuf.load() - if err := handler(i); err != nil { - return - } - case <-ctx.Done(): - return - default: - if err := handler(&flushIO{}); err != nil { - return - } - break hasData - } - } - } -} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100755 index d006a426347..00000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -die() { - echo "$@" >&2 - exit 1 -} - -# TODO: Remove this check and the mangling below once "context" is imported -# directly. -if git status --porcelain | read; then - die "Uncommitted or untracked files found; commit changes first" -fi - -PATH="$GOPATH/bin:$GOROOT/bin:$PATH" - -# Check proto in manual runs or cron runs. -if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then - check_proto="true" -fi - -if [ "$1" = "-install" ]; then - go get -d \ - google.golang.org/grpc/... - go get -u \ - github.com/golang/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/golang/protobuf/protoc-gen-go \ - golang.org/x/tools/cmd/stringer - if [[ "$check_proto" = "true" ]]; then - if [[ "$TRAVIS" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif ! which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) -gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) -goimports -l . 2>&1 | tee /dev/stderr | (! read) -golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). -# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). -git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' -set +o pipefail -# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. -go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) -set -o pipefail -git reset --hard HEAD - -if [[ "$check_proto" = "true" ]]; then - PATH="/home/travis/bin:$PATH" make proto && \ - git status --porcelain 2>&1 | (! read) || \ - (git status; git --no-pager diff; exit 1) -fi - -# TODO(menghanl): fix errors in transport_test. -staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... From b6e1fecde7f43f8a0284b56c291fd7fa08391ea1 Mon Sep 17 00:00:00 2001 From: zouyee Date: Tue, 19 Dec 2017 23:24:35 +0800 Subject: [PATCH 423/794] check function return err --- pkg/volume/cephfs/cephfs.go | 6 +++++- pkg/volume/glusterfs/glusterfs.go | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index bbe681d71b5..103766e7e22 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -232,7 +232,10 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { if !notMnt { return nil } - os.MkdirAll(dir, 0750) + + if err := os.MkdirAll(dir, 0750); err != nil { + return err + } // check whether it belongs to fuse, if not, default to use kernel mount. if cephfsVolume.checkFuseMount() { @@ -253,6 +256,7 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { } } glog.V(4).Infof("CephFS kernel mount.") + err = cephfsVolume.execMount(dir) if err != nil { // cleanup upon failure. diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index d90ea04cb73..6e3c4545fb3 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -259,8 +259,9 @@ func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error { if !notMnt { return nil } - - os.MkdirAll(dir, 0750) + if err := os.MkdirAll(dir, 0750); err != nil { + return err + } err = b.setUpAtInternal(dir) if err == nil { return nil From b814c5677ffabb3dd9e512d842a6f72950ee1b48 Mon Sep 17 00:00:00 2001 From: liz Date: Fri, 1 Dec 2017 14:01:47 -0500 Subject: [PATCH 424/794] Collect all the assorted image URLs from e2e tests in one place utils/image/manifest has an additional `arch` parameter, which determines whether an image ends in `-$ARCH` (like `-amd64`). All locations that previously had gcr.io urls referenced in costants or inline have been updated to refere test/utils/image. --- test/e2e/apimachinery/aggregator.go | 3 +- test/e2e/apimachinery/initializers.go | 2 +- test/e2e/apimachinery/webhook.go | 3 +- test/e2e/apps/disruption.go | 5 +- test/e2e/auth/metadata_concealment.go | 3 +- test/e2e/common/apparmor.go | 3 +- test/e2e/common/util.go | 6 +- test/e2e/framework/pv_util.go | 3 +- test/e2e/framework/service_util.go | 2 +- test/e2e/framework/util.go | 9 +- test/e2e/framework/volume_util.go | 18 +--- test/e2e/instrumentation/logging/utils/BUILD | 1 + .../logging/utils/logging_pod.go | 4 +- test/e2e/instrumentation/monitoring/BUILD | 1 + .../monitoring/custom_metrics_deployments.go | 3 +- test/e2e/network/dns_common.go | 2 +- test/e2e/storage/volumes.go | 3 +- test/utils/image/manifest.go | 95 ++++++++++++------- 18 files changed, 95 insertions(+), 71 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index e91fd69130d..ee72e3158f8 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -41,6 +41,7 @@ import ( rbacapi "k8s.io/kubernetes/pkg/apis/rbac" utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" . "github.com/onsi/ginkgo" @@ -70,7 +71,7 @@ var _ = SIGDescribe("Aggregator", func() { framework.SkipUnlessProviderIs("gce", "gke") // Testing a 1.7 version of the sample-apiserver - TestSampleAPIServer(f, "gcr.io/kubernetes-e2e-test-images/k8s-aggregator-sample-apiserver-amd64:1.7v2") + TestSampleAPIServer(f, imageutils.GetE2EImage(imageutils.APIServer)) }) }) diff --git a/test/e2e/apimachinery/initializers.go b/test/e2e/apimachinery/initializers.go index 1253e5b9e49..f0381721f1a 100644 --- a/test/e2e/apimachinery/initializers.go +++ b/test/e2e/apimachinery/initializers.go @@ -315,7 +315,7 @@ func newReplicaset() *v1beta1.ReplicaSet { Containers: []v1.Container{ { Name: name + "-container", - Image: "k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0", + Image: imageutils.GetE2EImage(imageutils.Porter), }, }, }, diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index e9a714f49ee..755055c9696 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -100,7 +101,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() { // Note that in 1.9 we will have backwards incompatible change to // admission webhooks, so the image will be updated to 1.9 sometime in // the development 1.9 cycle. - deployWebhookAndService(f, "gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7", context) + deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.AdmissionWebhook), context) }) AfterEach(func() { cleanWebhookTest(client, namespaceName) diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 85f5d8cb801..8ae257565fa 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" ) // schedulingTimeout is longer specifically because sometimes we need to wait @@ -257,7 +258,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { Containers: []v1.Container{ { Name: "busybox", - Image: "k8s.gcr.io/echoserver:1.6", + Image: imageutils.GetE2EImage(imageutils.EchoServer), }, }, RestartPolicy: v1.RestartPolicyAlways, @@ -301,7 +302,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) { container := v1.Container{ Name: "busybox", - Image: "k8s.gcr.io/echoserver:1.6", + Image: imageutils.GetE2EImage(imageutils.EchoServer), } if exclusive { container.Ports = []v1.ContainerPort{ diff --git a/test/e2e/auth/metadata_concealment.go b/test/e2e/auth/metadata_concealment.go index df9455c8208..01d46bf8dd7 100644 --- a/test/e2e/auth/metadata_concealment.go +++ b/test/e2e/auth/metadata_concealment.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + imageutil "k8s.io/kubernetes/test/utils/image" ) var _ = SIGDescribe("Metadata Concealment", func() { @@ -45,7 +46,7 @@ var _ = SIGDescribe("Metadata Concealment", func() { Containers: []v1.Container{ { Name: "check-metadata-concealment", - Image: "k8s.gcr.io/check-metadata-concealment:v0.0.2", + Image: imageutil.GetE2EImage(imageutil.CheckMetadataConcealment), }, }, RestartPolicy: v1.RestartPolicyOnFailure, diff --git a/test/e2e/common/apparmor.go b/test/e2e/common/apparmor.go index dcac6c0da61..5cb4728a858 100644 --- a/test/e2e/common/apparmor.go +++ b/test/e2e/common/apparmor.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/gomega" ) @@ -185,7 +186,7 @@ func createAppArmorProfileLoader(f *framework.Framework) { Spec: api.PodSpec{ Containers: []api.Container{{ Name: "apparmor-loader", - Image: "k8s.gcr.io/apparmor-loader:0.1", + Image: imageutils.GetE2EImage(imageutils.AppArmorLoader), Args: []string{"-poll", "10s", "/profiles"}, SecurityContext: &api.SecurityContext{ Privileged: &True, diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index a5e89c43551..2edfd8de7e5 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -61,9 +61,9 @@ var CommonImageWhiteList = sets.NewString( imageutils.GetE2EImage(imageutils.ServeHostname), imageutils.GetE2EImage(imageutils.TestWebserver), imageutils.GetE2EImage(imageutils.Hostexec), - "k8s.gcr.io/volume-nfs:0.8", - "k8s.gcr.io/volume-gluster:0.2", - "k8s.gcr.io/e2e-net-amd64:1.0", + imageutils.GetE2EImage(imageutils.VolumeNFSServer), + imageutils.GetE2EImage(imageutils.VolumeGlusterServer), + imageutils.GetE2EImage(imageutils.E2ENet), ) func svcByName(name string, port int) *v1.Service { diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index ae20e07ebe6..c81e024b603 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -40,6 +40,7 @@ import ( awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/volume/util/volumehelper" + imageutils "k8s.io/kubernetes/test/utils/image" ) const ( @@ -856,7 +857,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo Containers: []v1.Container{ { Name: "write-pod", - Image: "k8s.gcr.io/busybox:1.24", + Image: imageutils.GetE2EImage(imageutils.BusyBox), Command: []string{"/bin/sh"}, Args: []string{"-c", command}, SecurityContext: &v1.SecurityContext{ diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 7ff5a2e0d52..ae9edb92d93 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -802,7 +802,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod { Containers: []v1.Container{ { Name: "echoserver", - Image: "k8s.gcr.io/echoserver:1.6", + Image: imageutils.GetE2EImage(imageutils.EchoServer), Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, }, }, diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 02f9c43e72a..e2f298d50d8 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -33,7 +33,6 @@ import ( "path" "path/filepath" "regexp" - goruntime "runtime" "sort" "strconv" "strings" @@ -154,10 +153,6 @@ const ( // How long claims have to become dynamically provisioned ClaimProvisionTimeout = 5 * time.Minute - // When these values are updated, also update cmd/kubelet/app/options/options.go - currentPodInfraContainerImageName = "k8s.gcr.io/pause" - currentPodInfraContainerImageVersion = "3.0" - // How long a node is allowed to become "Ready" after it is restarted before // the test is considered failed. RestartNodeReadyAgainTimeout = 5 * time.Minute @@ -230,13 +225,13 @@ func GetServerArchitecture(c clientset.Interface) string { // GetPauseImageName fetches the pause image name for the same architecture as the apiserver. func GetPauseImageName(c clientset.Interface) string { - return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion + return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c)) } // GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on. // TODO: move this function to the test/utils func GetPauseImageNameForHostArch() string { - return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion + return imageutils.GetE2EImage(imageutils.Pause) } func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 03a26d76516..4c396c06676 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -48,21 +48,13 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + imageutils "k8s.io/kubernetes/test/utils/image" "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage -const ( - NfsServerImage string = "k8s.gcr.io/volume-nfs:0.8" - IscsiServerImage string = "k8s.gcr.io/volume-iscsi:0.1" - GlusterfsServerImage string = "k8s.gcr.io/volume-gluster:0.2" - CephServerImage string = "k8s.gcr.io/volume-ceph:0.1" - RbdServerImage string = "k8s.gcr.io/volume-rbd:0.1" -) - const ( Kb int64 = 1000 Mb int64 = 1000 * Kb @@ -116,7 +108,7 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf config = VolumeTestConfig{ Namespace: namespace, Prefix: "nfs", - ServerImage: NfsServerImage, + ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer), ServerPorts: []int{2049}, } if len(args) > 0 { @@ -131,7 +123,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume config = VolumeTestConfig{ Namespace: namespace, Prefix: "gluster", - ServerImage: GlusterfsServerImage, + ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer), ServerPorts: []int{24007, 24008, 49152}, } pod, ip = CreateStorageServer(cs, config) @@ -173,7 +165,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest config = VolumeTestConfig{ Namespace: namespace, Prefix: "iscsi", - ServerImage: IscsiServerImage, + ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer), ServerPorts: []int{3260}, ServerVolumes: map[string]string{ // iSCSI container needs to insert modules from the host @@ -189,7 +181,7 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo config = VolumeTestConfig{ Namespace: namespace, Prefix: "rbd", - ServerImage: RbdServerImage, + ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer), ServerPorts: []int{6789}, ServerVolumes: map[string]string{ "/lib/modules": "/lib/modules", diff --git a/test/e2e/instrumentation/logging/utils/BUILD b/test/e2e/instrumentation/logging/utils/BUILD index c965ff43b53..35cd377400d 100644 --- a/test/e2e/instrumentation/logging/utils/BUILD +++ b/test/e2e/instrumentation/logging/utils/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//test/e2e/framework:go_default_library", + "//test/utils/image:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index 10ef37fcf6e..8c4f1b4c2ed 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -21,10 +21,12 @@ import ( "time" "fmt" + api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" ) const ( @@ -101,7 +103,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error { Containers: []api_v1.Container{ { Name: loggingContainerName, - Image: "k8s.gcr.io/logs-generator:v0.1.0", + Image: imageutils.GetE2EImage(imageutils.LogsGenerator), Env: []api_v1.EnvVar{ { Name: "LOGS_GENERATOR_LINES_TOTAL", diff --git a/test/e2e/instrumentation/monitoring/BUILD b/test/e2e/instrumentation/monitoring/BUILD index 4a0523411c5..65f123e0e51 100644 --- a/test/e2e/instrumentation/monitoring/BUILD +++ b/test/e2e/instrumentation/monitoring/BUILD @@ -21,6 +21,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/instrumentation/common:go_default_library", + "//test/utils/image:go_default_library", "//vendor/github.com/influxdata/influxdb/client/v2:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index ebe8d9740a3..b18a08dd5cd 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -25,6 +25,7 @@ import ( rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + imageutils "k8s.io/kubernetes/test/utils/image" ) var ( @@ -98,7 +99,7 @@ func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.Pod Containers: []corev1.Container{ { Name: "stackdriver-exporter", - Image: "k8s.gcr.io/sd-dummy-exporter:v0.1.0", + Image: imageutils.GetE2EImage(imageutils.SDDummyExporter), ImagePullPolicy: corev1.PullPolicy("Always"), Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)}, Env: []corev1.EnvVar{ diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 2edb22c74b6..66ecefa9da2 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -242,7 +242,7 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { Containers: []v1.Container{ { Name: "dns", - Image: "k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5", + Image: imageutils.GetE2EImage(imageutils.DNSMasq), Command: []string{ "/usr/sbin/dnsmasq", "-u", "root", diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index f5e82ec3470..90021d69dfe 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -57,6 +57,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" + imageutils "k8s.io/kubernetes/test/utils/image" ) func DeleteCinderVolume(name string) error { @@ -260,7 +261,7 @@ var _ = utils.SIGDescribe("Volumes", func() { config := framework.VolumeTestConfig{ Namespace: namespace.Name, Prefix: "cephfs", - ServerImage: framework.CephServerImage, + ServerImage: imageutils.GetE2EImage(imageutils.VolumeCephServer), ServerPorts: []int{6789}, } diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index a54f6323c1d..bde65ac80d4 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -32,6 +32,7 @@ type ImageConfig struct { registry string name string version string + hasArch bool } func (i *ImageConfig) SetRegistry(registry string) { @@ -47,42 +48,66 @@ func (i *ImageConfig) SetVersion(version string) { } var ( - ClusterTester = ImageConfig{e2eRegistry, "clusterapi-tester", "1.0"} - CudaVectorAdd = ImageConfig{e2eRegistry, "cuda-vector-add", "1.0"} - Dnsutils = ImageConfig{e2eRegistry, "dnsutils", "1.0"} - EntrypointTester = ImageConfig{e2eRegistry, "entrypoint-tester", "1.0"} - Fakegitserver = ImageConfig{e2eRegistry, "fakegitserver", "1.0"} - GBFrontend = ImageConfig{sampleRegistry, "gb-frontend", "v5"} - GBRedisSlave = ImageConfig{sampleRegistry, "gb-redisslave", "v2"} - Goproxy = ImageConfig{e2eRegistry, "goproxy", "1.0"} - Hostexec = ImageConfig{e2eRegistry, "hostexec", "1.0"} - Iperf = ImageConfig{e2eRegistry, "iperf", "1.0"} - JessieDnsutils = ImageConfig{e2eRegistry, "jessie-dnsutils", "1.0"} - Kitten = ImageConfig{e2eRegistry, "kitten", "1.0"} - Liveness = ImageConfig{e2eRegistry, "liveness", "1.0"} - LogsGenerator = ImageConfig{e2eRegistry, "logs-generator", "1.0"} - Mounttest = ImageConfig{e2eRegistry, "mounttest", "1.0"} - MounttestUser = ImageConfig{e2eRegistry, "mounttest-user", "1.0"} - Nautilus = ImageConfig{e2eRegistry, "nautilus", "1.0"} - Net = ImageConfig{e2eRegistry, "net", "1.0"} - Netexec = ImageConfig{e2eRegistry, "netexec", "1.0"} - Nettest = ImageConfig{e2eRegistry, "nettest", "1.0"} - NginxSlim = ImageConfig{gcRegistry, "nginx-slim", "0.20"} - NginxSlimNew = ImageConfig{gcRegistry, "nginx-slim", "0.21"} - Nonewprivs = ImageConfig{e2eRegistry, "nonewprivs", "1.0"} - NoSnatTest = ImageConfig{e2eRegistry, "no-snat-test", "1.0"} - NoSnatTestProxy = ImageConfig{e2eRegistry, "no-snat-test-proxy", "1.0"} - NWayHTTP = ImageConfig{e2eRegistry, "n-way-http", "1.0"} - Pause = ImageConfig{gcRegistry, "pause", "3.0"} - Porter = ImageConfig{e2eRegistry, "porter", "1.0"} - PortForwardTester = ImageConfig{e2eRegistry, "port-forward-tester", "1.0"} - Redis = ImageConfig{e2eRegistry, "redis", "1.0"} - ResourceConsumer = ImageConfig{e2eRegistry, "resource-consumer", "1.3"} - ResourceController = ImageConfig{e2eRegistry, "resource-consumer/controller", "1.0"} - ServeHostname = ImageConfig{e2eRegistry, "serve-hostname", "1.0"} - TestWebserver = ImageConfig{e2eRegistry, "test-webserver", "1.0"} + AdmissionWebhook = ImageConfig{e2eRegistry, "k8s-sample-admission-webhook", "1.8v7", true} + APIServer = ImageConfig{e2eRegistry, "k8s-aggregator-sample-apiserver", "1.7v2", true} + AppArmorLoader = ImageConfig{gcRegistry, "apparmor-loader", "0.1", false} + BusyBox = ImageConfig{gcRegistry, "busybox", "1.24", false} + CheckMetadataConcealment = ImageConfig{gcRegistry, "check-metadata-concealment", "v0.0.2", false} + ClusterTester = ImageConfig{e2eRegistry, "clusterapi-tester", "1.0", true} + CudaVectorAdd = ImageConfig{e2eRegistry, "cuda-vector-add", "1.0", true} + Dnsutils = ImageConfig{e2eRegistry, "dnsutils", "1.0", true} + DNSMasq = ImageConfig{gcRegistry, "k8s-dns-dnsmasq", "1.14.5", true} + EchoServer = ImageConfig{gcRegistry, "echoserver", "1.6", false} + EntrypointTester = ImageConfig{e2eRegistry, "entrypoint-tester", "1.0", true} + E2ENet = ImageConfig{gcRegistry, "e2e-net", "1.0", true} + Fakegitserver = ImageConfig{e2eRegistry, "fakegitserver", "1.0", true} + GBFrontend = ImageConfig{sampleRegistry, "gb-frontend", "v5", true} + GBRedisSlave = ImageConfig{sampleRegistry, "gb-redisslave", "v2", true} + Goproxy = ImageConfig{e2eRegistry, "goproxy", "1.0", true} + Hostexec = ImageConfig{e2eRegistry, "hostexec", "1.0", true} + Iperf = ImageConfig{e2eRegistry, "iperf", "1.0", true} + JessieDnsutils = ImageConfig{e2eRegistry, "jessie-dnsutils", "1.0", true} + Kitten = ImageConfig{e2eRegistry, "kitten", "1.0", true} + Liveness = ImageConfig{e2eRegistry, "liveness", "1.0", true} + LogsGenerator = ImageConfig{gcRegistry, "logs-generator", "v0.1.0", false} + Mounttest = ImageConfig{e2eRegistry, "mounttest", "1.0", true} + MounttestUser = ImageConfig{e2eRegistry, "mounttest-user", "1.0", true} + Nautilus = ImageConfig{e2eRegistry, "nautilus", "1.0", true} + Net = ImageConfig{e2eRegistry, "net", "1.0", true} + Netexec = ImageConfig{e2eRegistry, "netexec", "1.0", true} + Nettest = ImageConfig{e2eRegistry, "nettest", "1.0", true} + NginxSlim = ImageConfig{gcRegistry, "nginx-slim", "0.20", true} + NginxSlimNew = ImageConfig{gcRegistry, "nginx-slim", "0.21", true} + Nonewprivs = ImageConfig{e2eRegistry, "nonewprivs", "1.0", true} + NoSnatTest = ImageConfig{e2eRegistry, "no-snat-test", "1.0", true} + NoSnatTestProxy = ImageConfig{e2eRegistry, "no-snat-test-proxy", "1.0", true} + NWayHTTP = ImageConfig{e2eRegistry, "n-way-http", "1.0", true} + // When these values are updated, also update cmd/kubelet/app/options/options.go + Pause = ImageConfig{gcRegistry, "pause", "3.0", false} + Porter = ImageConfig{e2eRegistry, "porter", "1.0", true} + PortForwardTester = ImageConfig{e2eRegistry, "port-forward-tester", "1.0", true} + Redis = ImageConfig{e2eRegistry, "redis", "1.0", true} + ResourceConsumer = ImageConfig{e2eRegistry, "resource-consumer", "1.3", true} + ResourceController = ImageConfig{e2eRegistry, "resource-consumer/controller", "1.0", true} + SDDummyExporter = ImageConfig{gcRegistry, "sd-dummy-exporter", "v0.1.0", false} + ServeHostname = ImageConfig{e2eRegistry, "serve-hostname", "1.0", true} + TestWebserver = ImageConfig{e2eRegistry, "test-webserver", "1.0", true} + VolumeNFSServer = ImageConfig{gcRegistry, "volume-nfs", "0.8", false} + VolumeISCSIServer = ImageConfig{gcRegistry, "volume-icsci", "0.1", false} + VolumeGlusterServer = ImageConfig{gcRegistry, "volume-gluster", "0.2", false} + VolumeCephServer = ImageConfig{gcRegistry, "volume-ceph", "0.1", false} + VolumeRBDServer = ImageConfig{gcRegistry, "volume-rbd", "0.1", false} ) func GetE2EImage(image ImageConfig) string { - return fmt.Sprintf("%s/%s-%s:%s", image.registry, image.name, runtime.GOARCH, image.version) + return GetE2EImageWithArch(image, runtime.GOARCH) +} + +func GetE2EImageWithArch(image ImageConfig, arch string) string { + if image.hasArch { + return fmt.Sprintf("%s/%s-%s:%s", image.registry, image.name, arch, image.version) + } else { + return fmt.Sprintf("%s/%s:%s", image.registry, image.name, image.version) + + } } From b8af5964af988a3a5740296c5c3c77159e01f996 Mon Sep 17 00:00:00 2001 From: Fabiano Franz Date: Tue, 19 Dec 2017 16:37:12 -0200 Subject: [PATCH 425/794] fabiano no longer a thing --- OWNERS_ALIASES | 3 --- hack/OWNERS | 1 - test/OWNERS | 2 -- 3 files changed, 6 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 7c546c3a562..8f898647ea1 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -17,7 +17,6 @@ aliases: - adohe - brendandburns - deads2k - - fabianofranz - janetkuo - liggitt - pwittrock @@ -30,7 +29,6 @@ aliases: - dshulyak - eparis - ericchiang - - fabianofranz - ghodss - mengqiy - rootfs @@ -127,7 +125,6 @@ aliases: - slack - colemickens - foxish - - fabianofranz - pwittrock - AdoHe - lukemarsden diff --git a/hack/OWNERS b/hack/OWNERS index c99757021e8..5ff25ea901f 100644 --- a/hack/OWNERS +++ b/hack/OWNERS @@ -14,7 +14,6 @@ approvers: - cblecker - deads2k - eparis - - fabianofranz - fejta - ixdy - jbeda diff --git a/test/OWNERS b/test/OWNERS index 908b4f42895..0c2941ff4da 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -4,7 +4,6 @@ reviewers: - enisoc - enj # for test/integration/etcd/etcd_storage_path_test.go - erictune - - fabianofranz # for test/e2e/kubectl.go - foxish # for test/e2e/network-partition.go - gmarek - janetkuo @@ -32,7 +31,6 @@ approvers: - enj # for test/integration/etcd/etcd_storage_path_test.go - eparis - erictune - - fabianofranz # for test/e2e/kubectl.go - foxish # for test/e2e/network-partition.go - gmarek - janetkuo From f52628db600e9ceac4727595ed57c5b96cf8fb92 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Tue, 19 Dec 2017 00:43:39 -0800 Subject: [PATCH 426/794] Deprecate the alpha Accelerators feature gate. Encourage people to use DevicePlugins instead. --- pkg/features/kube_features.go | 2 ++ pkg/kubelet/kubelet.go | 1 + 2 files changed, 3 insertions(+) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d500a7fd3b5..f12e0c342c4 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -64,6 +64,8 @@ const ( // owner: @vishh // alpha: v1.6 // + // This is deprecated and will be removed in v1.11. Use DevicePlugins instead. + // // Enables support for GPUs as a schedulable resource. // Only Nvidia GPUs are supported as of v1.6. // Works only with Docker Container Runtime. diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 632ef952f3d..eb335301f31 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -875,6 +875,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime)) if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) { if containerRuntime == kubetypes.DockerContainerRuntime { + glog.Warningln("Accelerators feature is deprecated and will be removed in v1.11. Please use device plugins instead. They can be enabled using the DevicePlugins feature gate.") if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClientConfig); err != nil { return nil, err } From 216ec16592c29db5383cc06ddd7e3ffb8729a29e Mon Sep 17 00:00:00 2001 From: Yang Guo Date: Sun, 3 Dec 2017 13:39:05 -0800 Subject: [PATCH 427/794] Support passing kube-scheduler policy config --- cluster/gce/gci/configure-helper.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 0bf30051c9e..b0cc87dad6e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -992,6 +992,14 @@ current-context: kube-scheduler EOF } +function create-kubescheduler-policy-config { + echo "Creating kube-scheduler policy config file" + mkdir -p /etc/srv/kubernetes/kube-scheduler + cat </etc/srv/kubernetes/kube-scheduler/policy-config +${SCHEDULER_POLICY_CONFIG} +EOF +} + function create-node-problem-detector-kubeconfig { echo "Creating node-problem-detector kubeconfig file" mkdir -p /var/lib/node-problem-detector @@ -1880,6 +1888,11 @@ function start-kube-scheduler { if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}" fi + if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then + create-kubescheduler-policy-config + params+=" --use-legacy-policy-config" + params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config" + fi local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag") # Remove salt comments and replace variables with values. From b4a47bc2284f0dc034d8bb1f6c6d5044e104bc76 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 18 Dec 2017 17:20:06 +0800 Subject: [PATCH 428/794] implement fakeIPSet in ipset util --- pkg/util/ipset/testing/fake.go | 100 ++++++++++++++---- pkg/util/ipset/testing/fake_test.go | 152 ++++++++++++++++++++++++++++ 2 files changed, 232 insertions(+), 20 deletions(-) create mode 100644 pkg/util/ipset/testing/fake_test.go diff --git a/pkg/util/ipset/testing/fake.go b/pkg/util/ipset/testing/fake.go index aedf3d21b3b..2a58bdd399d 100644 --- a/pkg/util/ipset/testing/fake.go +++ b/pkg/util/ipset/testing/fake.go @@ -17,67 +17,127 @@ limitations under the License. package testing import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/util/ipset" ) // FakeIPSet is a no-op implementation of ipset Interface type FakeIPSet struct { - Lines []byte + // version of ipset util + Version string + // The key of Sets map is the ip set name + Sets map[string]*ipset.IPSet + // The key of Entries map is the ip set name where the entries exists + Entries map[string]sets.String } -// NewFake create a new fake ipset interface. -func NewFake() *FakeIPSet { - return &FakeIPSet{} +// NewFake create a new fake ipset interface - it initialize the FakeIPSet. +func NewFake(version string) *FakeIPSet { + return &FakeIPSet{ + Version: version, + Sets: make(map[string]*ipset.IPSet), + Entries: make(map[string]sets.String), + } } // GetVersion is part of interface. -func (*FakeIPSet) GetVersion() (string, error) { - return "0.0", nil +func (f *FakeIPSet) GetVersion() (string, error) { + return f.Version, nil } -// FlushSet is part of interface. -func (*FakeIPSet) FlushSet(set string) error { +// FlushSet is part of interface. It deletes all entries from a named set but keeps the set itself. +func (f *FakeIPSet) FlushSet(set string) error { + if f.Entries == nil { + return fmt.Errorf("entries map can't be nil") + } + + // delete all entry elements + for true { + if _, has := f.Entries[set].PopAny(); has { + continue + } + break + } return nil } -// DestroySet is part of interface. -func (*FakeIPSet) DestroySet(set string) error { +// DestroySet is part of interface. It deletes both the entries and the set itself. +func (f *FakeIPSet) DestroySet(set string) error { + delete(f.Sets, set) + delete(f.Entries, set) return nil } // DestroyAllSets is part of interface. -func (*FakeIPSet) DestroyAllSets() error { +func (f *FakeIPSet) DestroyAllSets() error { + f.Sets = nil + f.Entries = nil return nil } // CreateSet is part of interface. -func (*FakeIPSet) CreateSet(set *ipset.IPSet, ignoreExistErr bool) error { +func (f *FakeIPSet) CreateSet(set *ipset.IPSet, ignoreExistErr bool) error { + if f.Sets[set.Name] != nil { + if !ignoreExistErr { + // already exists + return fmt.Errorf("Set cannot be created: set with the same name already exists") + } + return nil + } + f.Sets[set.Name] = set + // initialize entry map + f.Entries[set.Name] = sets.NewString() return nil } // AddEntry is part of interface. -func (*FakeIPSet) AddEntry(entry string, set string, ignoreExistErr bool) error { +func (f *FakeIPSet) AddEntry(entry string, set string, ignoreExistErr bool) error { + if f.Entries[set].Has(entry) { + if !ignoreExistErr { + // already exists + return fmt.Errorf("Element cannot be added to the set: it's already added") + } + return nil + } + f.Entries[set].Insert(entry) return nil } // DelEntry is part of interface. -func (*FakeIPSet) DelEntry(entry string, set string) error { +func (f *FakeIPSet) DelEntry(entry string, set string) error { + if f.Entries == nil { + return fmt.Errorf("entries map can't be nil") + } + f.Entries[set].Delete(entry) return nil } // TestEntry is part of interface. -func (*FakeIPSet) TestEntry(entry string, set string) (bool, error) { - return true, nil +func (f *FakeIPSet) TestEntry(entry string, set string) (bool, error) { + if f.Entries == nil { + return false, fmt.Errorf("entries map can't be nil") + } + found := f.Entries[set].Has(entry) + return found, nil } // ListEntries is part of interface. -func (*FakeIPSet) ListEntries(set string) ([]string, error) { - return nil, nil +func (f *FakeIPSet) ListEntries(set string) ([]string, error) { + if f.Entries == nil { + return nil, fmt.Errorf("entries map can't be nil") + } + return f.Entries[set].UnsortedList(), nil } // ListSets is part of interface. -func (*FakeIPSet) ListSets() ([]string, error) { - return nil, nil +func (f *FakeIPSet) ListSets() ([]string, error) { + res := []string{} + for set := range f.Sets { + res = append(res, set) + } + return res, nil } var _ = ipset.Interface(&FakeIPSet{}) diff --git a/pkg/util/ipset/testing/fake_test.go b/pkg/util/ipset/testing/fake_test.go new file mode 100644 index 00000000000..2128395cf8e --- /dev/null +++ b/pkg/util/ipset/testing/fake_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/ipset" +) + +const testVersion = "v6.19" + +func TestSetEntry(t *testing.T) { + fake := NewFake(testVersion) + version, err := fake.GetVersion() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if version != testVersion { + t.Errorf("Unexpected version mismatch, expected: %s, got: %s", testVersion, version) + } + // create a set + set := &ipset.IPSet{ + Name: "foo", + SetType: ipset.HashIPPort, + HashFamily: ipset.ProtocolFamilyIPV4, + } + if err := fake.CreateSet(set, true); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // add two entries + fake.AddEntry("192.168.1.1,tcp:8080", set.Name, true) + fake.AddEntry("192.168.1.2,tcp:8081", set.Name, true) + entries, err := fake.ListEntries(set.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(entries) != 2 { + t.Errorf("Expected 2 entries, got %d", len(entries)) + } + expectedEntries := sets.NewString("192.168.1.1,tcp:8080", "192.168.1.2,tcp:8081") + if !expectedEntries.Equal(sets.NewString(entries...)) { + t.Errorf("Unexpected entries mismatch, expected: %v, got: %v", expectedEntries, entries) + } + + // test entries + found, err := fake.TestEntry("192.168.1.1,tcp:8080", set.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !found { + t.Errorf("Unexpected entry 192.168.1.1,tcp:8080 not found") + } + + found, err = fake.TestEntry("192.168.1.2,tcp:8081", set.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !found { + t.Errorf("Unexpected entry 192.168.1.2,tcp:8081 not found") + } + + // delete entry from a given set + if err := fake.DelEntry("192.168.1.1,tcp:8080", set.Name); err != nil { + t.Errorf("Unexpected error: %v", err) + } + entries, err = fake.ListEntries(set.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(entries) != 1 { + t.Errorf("Expected 1 entries, got %d", len(entries)) + } + expectedEntries = sets.NewString("192.168.1.2,tcp:8081") + if !expectedEntries.Equal(sets.NewString(entries...)) { + t.Errorf("Unexpected entries mismatch, expected: %v, got: %v", expectedEntries, entries) + } + + // Flush set + if err := fake.FlushSet(set.Name); err != nil { + t.Errorf("Unexpected error: %v", err) + } + entries, err = fake.ListEntries(set.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(entries) != 0 { + t.Errorf("Expected 0 entries, got %d, entries: %v", len(entries), entries) + } + + // create another set + set2 := &ipset.IPSet{ + Name: "bar", + SetType: ipset.HashIPPortIP, + HashFamily: ipset.ProtocolFamilyIPV6, + } + if err := fake.CreateSet(set2, true); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + setList, err := fake.ListSets() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(setList) != 2 { + t.Errorf("Expected 2 sets, got %d", len(setList)) + } + expectedSets := sets.NewString("foo", "bar") + if !expectedSets.Equal(sets.NewString(setList...)) { + t.Errorf("Unexpected sets mismatch, expected: %v, got: %v", expectedSets, setList) + } + + // Destroy a given set + if err := fake.DestroySet(set.Name); err != nil { + t.Errorf("Unexpected error: %v", err) + } + if fake.Sets[set.Name] != nil { + t.Errorf("Unexpected set: %v", fake.Sets[set.Name]) + } + if fake.Entries[set.Name] != nil { + t.Errorf("Unexpected entries: %v", fake.Entries[set.Name]) + } + + // Destroy all sets + if err := fake.DestroyAllSets(); err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(fake.Sets) != 0 { + t.Errorf("Expected 0 sets, got %d, sets: %v", len(fake.Sets), fake.Sets) + } + if len(fake.Entries) != 0 { + t.Errorf("Expected 0 entries, got %d, entries: %v", len(fake.Entries), fake.Entries) + } +} + +// TODO: Test ignoreExistErr=false From cddc479904aa39484e4395fe56ea300348136c79 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 20 Dec 2017 10:08:09 +0800 Subject: [PATCH 429/794] replace syscall with sys/unix pkg --- pkg/proxy/ipvs/netlink_linux.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/pkg/proxy/ipvs/netlink_linux.go b/pkg/proxy/ipvs/netlink_linux.go index 2553e4c6243..44b08646a22 100644 --- a/pkg/proxy/ipvs/netlink_linux.go +++ b/pkg/proxy/ipvs/netlink_linux.go @@ -21,17 +21,11 @@ package ipvs import ( "fmt" "net" - "syscall" - // TODO: replace syscall with golang.org/x/sys/unix? - // The Go doc for syscall says: - // NOTE: This package is locked down. - // Code outside the standard Go repository should be migrated to use the corresponding package in the golang.org/x/sys repository. - // That is also where updates required by new systems or versions should be applied. - // See https://golang.org/s/go1.4-syscall for more information. "k8s.io/apimachinery/pkg/util/sets" "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" ) type netlinkHandle struct { @@ -55,7 +49,7 @@ func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool, } if err := h.AddrAdd(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil { // "EEXIST" will be returned if the address is already bound to device - if err == syscall.Errno(syscall.EEXIST) { + if err == unix.EEXIST { return true, nil } return false, fmt.Errorf("error bind address: %s to interface: %s, err: %v", address, devName, err) @@ -136,9 +130,9 @@ func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) } routeFilter := &netlink.Route{ - Table: syscall.RT_TABLE_LOCAL, - Type: syscall.RTN_LOCAL, - Protocol: syscall.RTPROT_KERNEL, + Table: unix.RT_TABLE_LOCAL, + Type: unix.RTN_LOCAL, + Protocol: unix.RTPROT_KERNEL, } filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL From c26912cd1e487500830e53a8643dc3c770182e60 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 20 Dec 2017 10:57:58 +0800 Subject: [PATCH 430/794] update bazel BUILD --- pkg/proxy/ipvs/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 5efde3ae9c6..520ffee5178 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -75,6 +75,7 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/vishvananda/netlink:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], }), From 7e1d74ead2f74fdfc1e7fddbe45ede45ebe5b248 Mon Sep 17 00:00:00 2001 From: Yang Guo Date: Tue, 19 Dec 2017 20:50:09 -0800 Subject: [PATCH 431/794] node_e2e: do not return error if Docker's check-config.sh fails --- test/e2e_node/gke_environment_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e_node/gke_environment_test.go b/test/e2e_node/gke_environment_test.go index 0b1c95c348e..dd88976b39c 100644 --- a/test/e2e_node/gke_environment_test.go +++ b/test/e2e_node/gke_environment_test.go @@ -139,10 +139,10 @@ func checkDockerConfig() error { if _, err := os.Stat(bin); os.IsNotExist(err) { continue } - output, err := runCommand(bin) - if err != nil { - return err - } + // We don't check the return code because it's OK if the script returns + // a non-zero exit code just because the configs in the whitelist are + // missing. + output, _ := runCommand(bin) for _, line := range strings.Split(output, "\n") { if !strings.Contains(line, "missing") { continue From ee5ea41f4d199b9cef022edc77185eab995c509d Mon Sep 17 00:00:00 2001 From: silenceshell Date: Wed, 20 Dec 2017 13:35:43 +0800 Subject: [PATCH 432/794] fix a typo `timemoutseconds` should be `timeoutSeconds`. --- staging/src/k8s.io/client-go/tools/cache/reflector.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index a97b5f98abb..d52bf043338 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -302,12 +302,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { default: } - timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options = metav1.ListOptions{ ResourceVersion: resourceVersion, // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. - TimeoutSeconds: &timemoutseconds, + TimeoutSeconds: &timeoutSeconds, } r.metrics.numberOfWatches.Inc() From ec532389013faa4a4752936fcb9cf89a86650201 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Mon, 18 Dec 2017 20:31:17 -0800 Subject: [PATCH 433/794] Add '/version/*' to the system:discovery role, since that's what the open api spec says. --- plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go | 2 +- .../authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index b8e4db11103..243f2109dcf 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -169,7 +169,7 @@ func ClusterRoles() []rbac.ClusterRole { ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get").URLs( - "/healthz", "/version", + "/healthz", "/version", "/version/*", // remove once swagger 1.2 support is removed "/swaggerapi", "/swaggerapi/*", // do not expand this pattern for openapi discovery docs diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 908b6ee1b7e..a47756c6c81 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -640,6 +640,7 @@ items: - /swaggerapi - /swaggerapi/* - /version + - /version/* verbs: - get - apiVersion: rbac.authorization.k8s.io/v1 From 6a5990caa0328f49dced9573cc138ba3041d50ff Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 20 Dec 2017 15:53:43 +0800 Subject: [PATCH 434/794] add tests in ipvs/ipset_test.go --- pkg/proxy/ipvs/ipset_test.go | 133 +++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/pkg/proxy/ipvs/ipset_test.go b/pkg/proxy/ipvs/ipset_test.go index f1e1975dd29..d6a0dc7a926 100644 --- a/pkg/proxy/ipvs/ipset_test.go +++ b/pkg/proxy/ipvs/ipset_test.go @@ -18,6 +18,9 @@ package ipvs import ( "testing" + + utilipset "k8s.io/kubernetes/pkg/util/ipset" + fakeipset "k8s.io/kubernetes/pkg/util/ipset/testing" ) func TestCheckIPSetVersion(t *testing.T) { @@ -47,3 +50,133 @@ func TestCheckIPSetVersion(t *testing.T) { } } } + +const testIPSetVersion = "v6.19" + +func TestSyncIPSetEntries(t *testing.T) { + testCases := []struct { + setName string + setType utilipset.Type + ipv6 bool + activeEntries []string + currentEntries []string + expectedEntries []string + }{ + { // case 0 + setName: "foo", + setType: utilipset.HashIPPort, + ipv6: false, + activeEntries: []string{"172.17.0.4,tcp:80"}, + currentEntries: nil, + expectedEntries: []string{"172.17.0.4,tcp:80"}, + }, + { // case 1 + setName: "abz", + setType: utilipset.HashIPPort, + ipv6: true, + activeEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"}, + currentEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"}, + expectedEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"}, + }, + { // case 2 + setName: "bca", + setType: utilipset.HashIPPort, + ipv6: false, + activeEntries: []string{"172.17.0.4,tcp:80", "172.17.0.5,tcp:80"}, + currentEntries: []string{"172.17.0.5,udp:53"}, + expectedEntries: []string{"172.17.0.4,tcp:80", "172.17.0.5,tcp:80"}, + }, + { // case 3 + setName: "bar", + setType: utilipset.HashIPPortIP, + ipv6: false, + activeEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"}, + currentEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"}, + expectedEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"}, + }, + { // case 4 + setName: "baz", + setType: utilipset.HashIPPortIP, + ipv6: true, + activeEntries: []string{"FE80:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8080:FE80:0000:0000:0000:0202:B3FF:FE1E:8329"}, + currentEntries: []string{"1111:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8081:1111:0000:0000:0000:0202:B3FF:FE1E:8329:8081"}, + expectedEntries: []string{"FE80:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8080:FE80:0000:0000:0000:0202:B3FF:FE1E:8329"}, + }, + { // case 5 + setName: "NOPE", + setType: utilipset.HashIPPortIP, + ipv6: false, + activeEntries: []string{"172.17.0.4,tcp:80,172.17.0.9", "172.17.0.5,tcp:80,172.17.0.10"}, + currentEntries: nil, + expectedEntries: []string{"172.17.0.4,tcp:80,172.17.0.9", "172.17.0.5,tcp:80,172.17.0.10"}, + }, + { // case 6 + setName: "ABC-DEF", + setType: utilipset.HashIPPortNet, + ipv6: false, + activeEntries: []string{"172.17.0.4,tcp:80,172.17.0.0/16", "172.17.0.5,tcp:80,172.17.0.0/16"}, + currentEntries: nil, + expectedEntries: []string{"172.17.0.4,tcp:80,172.17.0.0/16", "172.17.0.5,tcp:80,172.17.0.0/16"}, + }, + { // case 7 + setName: "zar", + setType: utilipset.HashIPPortNet, + ipv6: true, + activeEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"}, + currentEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"}, + expectedEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"}, + }, + { // case 8 + setName: "bbb", + setType: utilipset.HashIPPortNet, + ipv6: true, + activeEntries: nil, + currentEntries: []string{"FE80::8329,udp:8801,2001:db8::/32"}, + expectedEntries: nil, + }, + { // case 9 + setName: "AAA", + setType: utilipset.BitmapPort, + activeEntries: nil, + currentEntries: []string{"80"}, + expectedEntries: nil, + }, + { // case 10 + setName: "c-c-c", + setType: utilipset.BitmapPort, + activeEntries: []string{"8080", "9090"}, + currentEntries: []string{"80"}, + expectedEntries: []string{"8080", "9090"}, + }, + { // case 11 + setName: "NODE-PORT", + setType: utilipset.BitmapPort, + activeEntries: []string{"8080"}, + currentEntries: []string{"80", "9090", "8081", "8082"}, + expectedEntries: []string{"8080"}, + }, + } + + for i := range testCases { + set := NewIPSet(fakeipset.NewFake(testIPSetVersion), testCases[i].setName, testCases[i].setType, testCases[i].ipv6) + + if err := set.handle.CreateSet(&set.IPSet, true); err != nil { + t.Errorf("Unexpected error: %v", err) + } + for _, entry := range testCases[i].expectedEntries { + set.handle.AddEntry(entry, testCases[i].setName, true) + } + + set.activeEntries.Insert(testCases[i].activeEntries...) + set.syncIPSetEntries() + for _, entry := range testCases[i].expectedEntries { + found, err := set.handle.TestEntry(entry, testCases[i].setName) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !found { + t.Errorf("Unexpected entry 172.17.0.4,tcp:80 not found in set foo") + } + } + } +} From 15f427ac2c67ab75d573b7ee4aa4596733588cf0 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 20 Dec 2017 15:54:07 +0800 Subject: [PATCH 435/794] fix ipvs/proxier_test.go compile error --- pkg/proxy/ipvs/proxier_test.go | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index f1b27ec525c..b962685440e 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -344,7 +344,7 @@ func TestGetNodeIPs(t *testing.T) { func TestNodePort(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) nodeIPv4 := net.ParseIP("100.101.102.103") nodeIPv6 := net.ParseIP("2001:db8::1:1") nodeIPs := sets.NewString(nodeIPv4.String(), nodeIPv6.String()) @@ -422,7 +422,7 @@ func TestNodePort(t *testing.T) { func TestNodePortNoEndpoint(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) nodeIP := net.ParseIP("100.101.102.103") fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) svcIP := "10.20.30.41" @@ -476,7 +476,7 @@ func TestNodePortNoEndpoint(t *testing.T) { func TestClusterIPNoEndpoint(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -520,7 +520,7 @@ func TestClusterIPNoEndpoint(t *testing.T) { func TestClusterIP(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIPv4 := "10.20.30.41" @@ -627,7 +627,7 @@ func TestClusterIP(t *testing.T) { func TestExternalIPsNoEndpoint(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -682,7 +682,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) { func TestExternalIPs(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -752,7 +752,7 @@ func TestExternalIPs(t *testing.T) { func TestLoadBalancer(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -804,7 +804,7 @@ func strPtr(s string) *string { func TestOnlyLocalNodePorts(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) nodeIP := net.ParseIP("100.101.102.103") fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) svcIP := "10.20.30.41" @@ -882,11 +882,10 @@ func TestOnlyLocalNodePorts(t *testing.T) { } } -// NO help func TestOnlyLocalLoadBalancing(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) svcIP := "10.20.30.41" svcPort := 80 @@ -951,7 +950,7 @@ func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, po func TestBuildServiceMapAddRemove(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) services := []*api.Service{ @@ -1057,7 +1056,7 @@ func TestBuildServiceMapAddRemove(t *testing.T) { func TestBuildServiceMapServiceHeadless(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) makeServiceMap(fp, @@ -1091,7 +1090,7 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) { func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) makeServiceMap(fp, @@ -1119,7 +1118,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) { func TestBuildServiceMapServiceUpdate(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) servicev1 := makeTestService("somewhere", "some-service", func(svc *api.Service) { @@ -1202,7 +1201,7 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) { func TestSessionAffinity(t *testing.T) { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) nodeIP := net.ParseIP("100.101.102.103") fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}) svcIP := "10.20.30.41" @@ -2066,7 +2065,7 @@ func Test_updateEndpointsMap(t *testing.T) { for tci, tc := range testCases { ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil) fp.hostname = nodeName From 0f201037bdc651b6ee60208a704e5f0f50550283 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Wed, 20 Dec 2017 16:02:14 +0800 Subject: [PATCH 436/794] update bazel BUILD --- pkg/util/ipset/testing/BUILD | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pkg/util/ipset/testing/BUILD b/pkg/util/ipset/testing/BUILD index 593b04157ca..001a6887b23 100644 --- a/pkg/util/ipset/testing/BUILD +++ b/pkg/util/ipset/testing/BUILD @@ -1,11 +1,14 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["fake.go"], importpath = "k8s.io/kubernetes/pkg/util/ipset/testing", visibility = ["//visibility:public"], - deps = ["//pkg/util/ipset:go_default_library"], + deps = [ + "//pkg/util/ipset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) filegroup( @@ -21,3 +24,14 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["fake_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/ipset/testing", + library = ":go_default_library", + deps = [ + "//pkg/util/ipset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) From 7d31b44bbe3111ab0f823a12d05ecb8b0925bc9a Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Wed, 20 Dec 2017 17:10:31 +0800 Subject: [PATCH 437/794] cleanup useless functions and variables --- pkg/client/conditions/BUILD | 1 - pkg/client/conditions/conditions.go | 85 ----------------------------- 2 files changed, 86 deletions(-) diff --git a/pkg/client/conditions/BUILD b/pkg/client/conditions/BUILD index 5c381c4e119..a475e8f1cbd 100644 --- a/pkg/client/conditions/BUILD +++ b/pkg/client/conditions/BUILD @@ -10,7 +10,6 @@ go_library( srcs = ["conditions.go"], importpath = "k8s.io/kubernetes/pkg/client/conditions", deps = [ - "//pkg/api/v1/pod:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/client/conditions/conditions.go b/pkg/client/conditions/conditions.go index be4c0bfbf36..16255119545 100644 --- a/pkg/client/conditions/conditions.go +++ b/pkg/client/conditions/conditions.go @@ -23,17 +23,12 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) // ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that // the pod has already reached completed state. var ErrPodCompleted = fmt.Errorf("pod ran to completion") -// ErrContainerTerminated is returned by PodContainerRunning in the intermediate -// state where the pod indicates it's still running, but its container is already terminated -var ErrContainerTerminated = fmt.Errorf("container terminated") - // PodRunning returns true if the pod is running, false if the pod has not yet reached running state, // returns ErrPodCompleted if the pod has run to completion, or an error in any other case. func PodRunning(event watch.Event) (bool, error) { @@ -70,86 +65,6 @@ func PodCompleted(event watch.Event) (bool, error) { return false, nil } -// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not -// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or -// an error in any other case. -func PodRunningAndReady(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *v1.Pod: - switch t.Status.Phase { - case v1.PodFailed, v1.PodSucceeded: - return false, ErrPodCompleted - case v1.PodRunning: - return podutil.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *v1.Pod: - switch t.Status.Phase { - case v1.PodPending: - return false, nil - default: - return true, nil - } - } - return false, nil -} - -// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), -// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. -func PodContainerRunning(containerName string) watch.ConditionFunc { - return func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *v1.Pod: - switch t.Status.Phase { - case v1.PodRunning, v1.PodPending: - case v1.PodFailed, v1.PodSucceeded: - return false, ErrPodCompleted - default: - return false, nil - } - for _, s := range t.Status.ContainerStatuses { - if s.Name != containerName { - continue - } - if s.State.Terminated != nil { - return false, ErrContainerTerminated - } - return s.State.Running != nil, nil - } - for _, s := range t.Status.InitContainerStatuses { - if s.Name != containerName { - continue - } - if s.State.Terminated != nil { - return false, ErrContainerTerminated - } - return s.State.Running != nil, nil - } - return false, nil - } - return false, nil - } -} - // ServiceAccountHasSecrets returns true if the service account has at least one secret, // false if it does not, or an error. func ServiceAccountHasSecrets(event watch.Event) (bool, error) { From fe6bfd1dc0f07bce8d8bc0960f6c0c0e8d8b56b5 Mon Sep 17 00:00:00 2001 From: "Da K. Ma" Date: Wed, 20 Dec 2017 18:13:48 +0800 Subject: [PATCH 438/794] Renamed func name according to TODO. --- pkg/kubelet/kubeletconfig/controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index 6ff4cd6b284..c12a595e991 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -143,8 +143,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { // assert: now we know that a dynamicConfigDir was provided, and we can rely on that existing // make sure the filesystem is set up properly - // TODO(mtaufen): rename this to initializeDynamicConfigDir - if err := cc.initialize(); err != nil { + if err := cc.initializeDynamicConfigDir(); err != nil { return nil, err } @@ -244,10 +243,10 @@ func (cc *Controller) StartSync(client clientset.Interface, nodeName string) { } } -// initialize makes sure that the storage layers for various controller components are set up correctly -func (cc *Controller) initialize() error { +// initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly +func (cc *Controller) initializeDynamicConfigDir() error { utillog.Infof("ensuring filesystem is set up correctly") - // initialize local checkpoint storage location + // initializeDynamicConfigDir local checkpoint storage location return cc.checkpointStore.Initialize() } From ceb6dea1e05c9c5fd8b86bc1300a0ccf1bba2073 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Mon, 11 Dec 2017 14:46:03 +0100 Subject: [PATCH 439/794] test: e2e: support NFS test on overlayfs TL;DR; NFS is not supported over overlayfs. In CRI-O we default to overlayfs as the storage driver, therefore running the NFS test always fails to export the directory. This patch adds a volume mount from the host so /exports isn't on overlayfs anymore. Signed-off-by: Antonio Murdaca --- test/e2e/framework/volume_util.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 4c396c06676..4a0e8e7ac8f 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -83,6 +83,7 @@ type VolumeTestConfig struct { ServerArgs []string // Volumes needed to be mounted to the server container from the host // map -> + // if is empty, mount a tmpfs emptydir ServerVolumes map[string]string // Wait for the pod to terminate successfully // False indicates that the pod is long running @@ -106,10 +107,11 @@ type VolumeTest struct { // NFS-specific wrapper for CreateStorageServer. func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) { config = VolumeTestConfig{ - Namespace: namespace, - Prefix: "nfs", - ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer), - ServerPorts: []int{2049}, + Namespace: namespace, + Prefix: "nfs", + ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer), + ServerPorts: []int{2049}, + ServerVolumes: map[string]string{"": "/exports"}, } if len(args) > 0 { config.ServerArgs = args @@ -230,8 +232,12 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1. for src, dst := range config.ServerVolumes { mountName := fmt.Sprintf("path%d", i) volumes[i].Name = mountName - volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{ - Path: src, + if src == "" { + volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory} + } else { + volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{ + Path: src, + } } mounts[i].Name = mountName From e62952d02ba115a31b215dcb29752310bec1fa5d Mon Sep 17 00:00:00 2001 From: Yassine TIJANI Date: Mon, 18 Dec 2017 17:38:39 +0000 Subject: [PATCH 440/794] using consts to refer to predicate names --- .../algorithm/predicates/predicates.go | 22 +++++----- .../algorithmprovider/defaults/defaults.go | 40 +++++++++---------- .../defaults/defaults_test.go | 2 +- .../pkg/scheduler/core/generic_scheduler.go | 2 +- .../scheduler/core/generic_scheduler_test.go | 4 +- plugin/pkg/scheduler/factory/factory.go | 6 +-- plugin/pkg/scheduler/scheduler.go | 2 +- plugin/pkg/scheduler/scheduler_test.go | 5 +-- 8 files changed, 41 insertions(+), 42 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 8d458401f98..7ae07750b29 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -49,8 +49,8 @@ import ( ) const ( - MatchInterPodAffinity = "MatchInterPodAffinity" - CheckVolumeBinding = "CheckVolumeBinding" + MatchInterPodAffinityPred = "MatchInterPodAffinity" + CheckVolumeBindingPred = "CheckVolumeBinding" CheckNodeConditionPred = "CheckNodeCondition" GeneralPred = "GeneralPredicates" HostNamePred = "HostName" @@ -67,7 +67,7 @@ const ( MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount" NoVolumeZoneConflictPred = "NoVolumeZoneConflict" CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure" - CheckNodeDiskPressure = "CheckNodeDiskPressure" + CheckNodeDiskPressurePred = "CheckNodeDiskPressure" // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 @@ -95,19 +95,19 @@ const ( // For example: // https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422 -// IMPORTANT: this list contains the ordering of the predicates, if you develop a new predicates -// it is mandatory to add its name on this list. -// otherwise it won't be processed, see generic_scheduler#podFitsOnNode() -// the order is based on the restrictiveness & complexity of predicates -// design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md +// IMPORTANT NOTE: this list contains the ordering of the predicates, if you develop a new predicate +// it is mandatory to add its name to this list. +// Otherwise it won't be processed, see generic_scheduler#podFitsOnNode(). +// The order is based on the restrictiveness & complexity of predicates. +// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md var ( predicatesOrdering = []string{CheckNodeConditionPred, GeneralPred, HostNamePred, PodFitsHostPortsPred, MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred, PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred, checkServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, - MaxAzureDiskVolumeCountPred, CheckVolumeBinding, NoVolumeZoneConflictPred, - CheckNodeMemoryPressurePred, CheckNodeDiskPressure, MatchInterPodAffinity} + MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred, + CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred} ) // NodeInfo: Other types for predicate functions... @@ -124,7 +124,7 @@ type CachedPersistentVolumeInfo struct { corelisters.PersistentVolumeLister } -func GetPredicatesOrdering() []string { +func PredicatesOrdering() []string { return predicatesOrdering } diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 254f5f2d01c..6ba618f3950 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -65,17 +65,17 @@ func init() { // Fit is defined based on the absence of port conflicts. // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate("PodFitsHostPorts", predicates.PodFitsHostPorts) + factory.RegisterFitPredicate(predicates.PodFitsHostPortsPred, predicates.PodFitsHostPorts) // Fit is determined by resource availability. // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate("PodFitsResources", predicates.PodFitsResources) + factory.RegisterFitPredicate(predicates.PodFitsResourcesPred, predicates.PodFitsResources) // Fit is determined by the presence of the Host parameter and a string match // This predicate is actually a default predicate, because it is invoked from // predicates.GeneralPredicates() - factory.RegisterFitPredicate("HostName", predicates.PodFitsHost) + factory.RegisterFitPredicate(predicates.HostNamePred, predicates.PodFitsHost) // Fit is determined by node selector query. - factory.RegisterFitPredicate("MatchNodeSelector", predicates.PodMatchNodeSelector) + factory.RegisterFitPredicate(predicates.MatchNodeSelectorPred, predicates.PodMatchNodeSelector) // Use equivalence class to speed up heavy predicates phase. factory.RegisterGetEquivalencePodFunction( @@ -117,62 +117,62 @@ func defaultPredicates() sets.String { return sets.NewString( // Fit is determined by volume zone requirements. factory.RegisterFitPredicateFactory( - "NoVolumeZoneConflict", + predicates.NoVolumeZoneConflictPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo) }, ), // Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node factory.RegisterFitPredicateFactory( - "MaxEBSVolumeCount", + predicates.MaxEBSVolumeCountPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by whether or not there would be too many GCE PD volumes attached to the node factory.RegisterFitPredicateFactory( - "MaxGCEPDVolumeCount", + predicates.MaxGCEPDVolumeCountPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node factory.RegisterFitPredicateFactory( - "MaxAzureDiskVolumeCount", + predicates.MaxAzureDiskVolumeCountPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by inter-pod affinity. factory.RegisterFitPredicateFactory( - predicates.MatchInterPodAffinity, + predicates.MatchInterPodAffinityPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister) }, ), // Fit is determined by non-conflicting disk volumes. - factory.RegisterFitPredicate("NoDiskConflict", predicates.NoDiskConflict), + factory.RegisterFitPredicate(predicates.NoDiskConflictPred, predicates.NoDiskConflict), // GeneralPredicates are the predicates that are enforced by all Kubernetes components // (e.g. kubelet and all schedulers) - factory.RegisterFitPredicate("GeneralPredicates", predicates.GeneralPredicates), + factory.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates), // Fit is determined by node memory pressure condition. - factory.RegisterFitPredicate("CheckNodeMemoryPressure", predicates.CheckNodeMemoryPressurePredicate), + factory.RegisterFitPredicate(predicates.CheckNodeMemoryPressurePred, predicates.CheckNodeMemoryPressurePredicate), // Fit is determined by node disk pressure condition. - factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate), + factory.RegisterFitPredicate(predicates.CheckNodeDiskPressurePred, predicates.CheckNodeDiskPressurePredicate), // Fit is determined by node conditions: not ready, network unavailable or out of disk. - factory.RegisterMandatoryFitPredicate("CheckNodeCondition", predicates.CheckNodeConditionPredicate), + factory.RegisterMandatoryFitPredicate(predicates.CheckNodeConditionPred, predicates.CheckNodeConditionPredicate), // Fit is determined based on whether a pod can tolerate all of the node's taints - factory.RegisterFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints), + factory.RegisterFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints), // Fit is determined by volume topology requirements. factory.RegisterFitPredicateFactory( - predicates.CheckVolumeBinding, + predicates.CheckVolumeBindingPred, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { return predicates.NewVolumeBindingPredicate(args.VolumeBinder) }, @@ -185,18 +185,18 @@ func ApplyFeatureGates() { if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) { // Remove "CheckNodeCondition" predicate - factory.RemoveFitPredicate("CheckNodeCondition") + factory.RemoveFitPredicate(predicates.CheckNodeConditionPred) // Remove Key "CheckNodeCondition" From All Algorithm Provider // The key will be removed from all providers which in algorithmProviderMap[] // if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider() - factory.RemovePredicateKeyFromAlgorithmProviderMap("CheckNodeCondition") + factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred) // Fit is determined based on whether a pod can tolerate all of the node's taints - factory.RegisterMandatoryFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints) + factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) // Insert Key "PodToleratesNodeTaints" To All Algorithm Provider // The key will insert to all providers which in algorithmProviderMap[] // if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider() - factory.InsertPredicateKeyToAlgorithmProviderMap("PodToleratesNodeTaints") + factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") } diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go index a6aa09aadfd..16fc1b75e76 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go @@ -78,7 +78,7 @@ func TestDefaultPredicates(t *testing.T) { "CheckNodeDiskPressure", "CheckNodeCondition", "PodToleratesNodeTaints", - predicates.CheckVolumeBinding, + predicates.CheckVolumeBindingPred, ) if expected := defaultPredicates(); !result.Equal(expected) { diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index ae2ccdfac3e..e9e7e95a6c2 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -444,7 +444,7 @@ func podFitsOnNode( // TODO(bsalamat): consider using eCache and adding proper eCache invalidations // when pods are nominated or their nominations change. eCacheAvailable = eCacheAvailable && !podsAdded - for _, predicateKey := range predicates.GetPredicatesOrdering() { + for _, predicateKey := range predicates.PredicatesOrdering() { //TODO (yastij) : compute average predicate restrictiveness to export it as promethus metric if predicate, exist := predicateFuncs[predicateKey]; exist { if eCacheAvailable { diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/plugin/pkg/scheduler/core/generic_scheduler_test.go index 1b1b9d15c6f..4c09ba66956 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/core/generic_scheduler_test.go @@ -43,7 +43,7 @@ import ( ) var ( - order = []string{"false", "true", "matches", "nopods", predicates.MatchInterPodAffinity} + order = []string{"false", "true", "matches", "nopods", predicates.MatchInterPodAffinityPred} ) func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { @@ -872,7 +872,7 @@ func TestSelectNodesForPreemption(t *testing.T) { nodes = append(nodes, node) } if test.addAffinityPredicate { - test.predicates[predicates.MatchInterPodAffinity] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods)) + test.predicates[predicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods)) } nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 5b9ccc80adc..45494fc73bb 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -409,7 +409,7 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) { if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { // Add/delete impacts the available PVs to choose from - invalidPredicates.Insert(predicates.CheckVolumeBinding) + invalidPredicates.Insert(predicates.CheckVolumeBindingPred) } c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) @@ -480,7 +480,7 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { // Add/delete impacts the available PVs to choose from - invalidPredicates.Insert(predicates.CheckVolumeBinding) + invalidPredicates.Insert(predicates.CheckVolumeBindingPred) } c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) } @@ -491,7 +491,7 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent if old.Spec.VolumeName != new.Spec.VolumeName { if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { // PVC volume binding has changed - invalidPredicates.Insert(predicates.CheckVolumeBinding) + invalidPredicates.Insert(predicates.CheckVolumeBindingPred) } // The bound volume type may change invalidPredicates.Insert(maxPDVolumeCountPredicateKeys...) diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index b69aacc8eaf..b58123eca27 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -278,7 +278,7 @@ func (sched *Scheduler) assumeAndBindVolumes(assumed *v1.Pod, host string) error err = fmt.Errorf("Volume binding started, waiting for completion") if bindingRequired { if sched.config.Ecache != nil { - invalidPredicates := sets.NewString(predicates.CheckVolumeBinding) + invalidPredicates := sets.NewString(predicates.CheckVolumeBindingPred) sched.config.Ecache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) } diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index c017cc4219c..cc943f6c7af 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -43,8 +43,6 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) -var order = []string{"VolumeBindingChecker"} - type fakeBinder struct { b func(binding *v1.Binding) error } @@ -621,7 +619,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi scache.AddNode(&testNode) predicateMap := map[string]algorithm.FitPredicate{ - "VolumeBindingChecker": predicates.NewVolumeBindingPredicate(fakeVolumeBinder), + predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder), } recorder := broadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"}) @@ -639,6 +637,7 @@ func makePredicateError(failReason string) error { } func TestSchedulerWithVolumeBinding(t *testing.T) { + order := []string{predicates.CheckVolumeBindingPred, predicates.GeneralPred} predicates.SetPredicatesOrdering(order) findErr := fmt.Errorf("find err") assumeErr := fmt.Errorf("assume err") From b2a6eb99c1b252b2e95cfc4141a2a4a648eb3422 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 20 Dec 2017 16:30:07 +0200 Subject: [PATCH 441/794] Evicted pods should respawn --- .../layers/kubernetes-master/reactive/kubernetes_master.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index ff764bf8080..aac738bdc2c 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -1241,7 +1241,8 @@ def all_kube_system_pods_running(): result = json.loads(output) for pod in result['items']: status = pod['status']['phase'] - if status != 'Running': + # Evicted nodes should re-spawn + if status != 'Running' and pod['status']['reason'] != 'Evicted': return False return True From d3ca7a0eb8198aa1e4e1eb21596175d941a5c4a2 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 20 Dec 2017 16:56:18 +0200 Subject: [PATCH 442/794] reason key should exist --- .../layers/kubernetes-master/reactive/kubernetes_master.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index aac738bdc2c..44dcb7bc86e 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -1242,7 +1242,9 @@ def all_kube_system_pods_running(): for pod in result['items']: status = pod['status']['phase'] # Evicted nodes should re-spawn - if status != 'Running' and pod['status']['reason'] != 'Evicted': + if status != 'Running' and \ + ('reason' in pod['status'].keys() + and pod['status']['reason'] != 'Evicted'): return False return True From c9688155f6b69c97f8962aeed9f505d8eaf91ff5 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 20 Dec 2017 17:14:00 +0200 Subject: [PATCH 443/794] Much better --- .../layers/kubernetes-master/reactive/kubernetes_master.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 44dcb7bc86e..196f39e80e4 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -1243,8 +1243,7 @@ def all_kube_system_pods_running(): status = pod['status']['phase'] # Evicted nodes should re-spawn if status != 'Running' and \ - ('reason' in pod['status'].keys() - and pod['status']['reason'] != 'Evicted'): + pod['status'].get('reason', '') != 'Evicted': return False return True From bd8eaa7f18d6fba71f188c0484afc1e2d8700cd1 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 20 Dec 2017 17:25:54 +0200 Subject: [PATCH 444/794] Even better --- .../juju/layers/kubernetes-master/reactive/kubernetes_master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 196f39e80e4..d27caf59f57 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -1243,7 +1243,7 @@ def all_kube_system_pods_running(): status = pod['status']['phase'] # Evicted nodes should re-spawn if status != 'Running' and \ - pod['status'].get('reason', '') != 'Evicted': + pod['status'].get('reason', '') != 'Evicted': return False return True From b06a5a60279355de1804cc360f28978a92c01ec8 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 6 Dec 2017 10:14:58 +0100 Subject: [PATCH 445/794] Add custom metrics e2e test with two metrics. Tests a scenario where a pod is scaled based on two custom metrics. --- .../autoscaling/custom_metrics_autoscaling.go | 81 ++++++++++++++----- test/e2e/instrumentation/monitoring/BUILD | 1 - .../monitoring/custom_metrics_deployments.go | 76 +++++++++++------ 3 files changed, 112 insertions(+), 46 deletions(-) diff --git a/test/e2e/autoscaling/custom_metrics_autoscaling.go b/test/e2e/autoscaling/custom_metrics_autoscaling.go index 15ceadcc35d..5d002bf5055 100644 --- a/test/e2e/autoscaling/custom_metrics_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_autoscaling.go @@ -51,23 +51,58 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { initialReplicas := 2 scaledReplicas := 1 - deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) - customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name), deployment, nil, initialReplicas, scaledReplicas) + // metric should cause scale down + metricValue := int64(100) + metricTarget := 2 * metricValue + deployment := monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue) + customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas) }) It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { initialReplicas := 2 scaledReplicas := 1 - deployment := monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) - pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.ObjectMeta.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100) - customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name), deployment, pod, initialReplicas, scaledReplicas) + // metric should cause scale down + metricValue := int64(100) + metricTarget := 2 * metricValue + deployment := monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue) + pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue) + customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, pod, initialReplicas, scaledReplicas) }) It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() { initialReplicas := 2 scaledReplicas := 1 - deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 100) - customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name), deployment, nil, initialReplicas, scaledReplicas) + // metric should cause scale down + metricValue := int64(100) + metricTarget := 2 * metricValue + deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue) + customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas) + }) + + It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() { + initialReplicas := 1 + scaledReplicas := 3 + // metric 1 would cause a scale down, if not for metric 2 + metric1Value := int64(100) + metric1Target := 2 * metric1Value + // metric2 should cause a scale up + metric2Value := int64(200) + metric2Target := int64(0.5 * float64(metric2Value)) + containers := []monitoring.CustomMetricContainerSpec{ + { + Name: "stackdriver-exporter-metric1", + MetricName: "metric1", + MetricValue: metric1Value, + }, + { + Name: "stackdriver-exporter-metric2", + MetricName: "metric2", + MetricValue: metric2Value, + }, + } + metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target} + deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers) + customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets), deployment, nil, initialReplicas, scaledReplicas) }) }) @@ -153,35 +188,41 @@ func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, d } } -func podsHPA(namespace string) *as.HorizontalPodAutoscaler { +func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler { + return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget}) +} + +func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler { var minReplicas int32 = 1 + metrics := []as.MetricSpec{} + for metric, target := range metricTargets { + metrics = append(metrics, as.MetricSpec{ + Type: as.PodsMetricSourceType, + Pods: &as.PodsMetricSource{ + MetricName: metric, + TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI), + }, + }) + } return &as.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "custom-metrics-pods-hpa", Namespace: namespace, }, Spec: as.HorizontalPodAutoscalerSpec{ - Metrics: []as.MetricSpec{ - { - Type: as.PodsMetricSourceType, - Pods: &as.PodsMetricSource{ - MetricName: monitoring.CustomMetricName, - TargetAverageValue: *resource.NewQuantity(200, resource.DecimalSI), - }, - }, - }, + Metrics: metrics, MaxReplicas: 3, MinReplicas: &minReplicas, ScaleTargetRef: as.CrossVersionObjectReference{ APIVersion: "extensions/v1beta1", Kind: "Deployment", - Name: stackdriverExporterDeployment, + Name: deploymentName, }, }, } } -func objectHPA(namespace string) *as.HorizontalPodAutoscaler { +func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler { var minReplicas int32 = 1 return &as.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ @@ -198,7 +239,7 @@ func objectHPA(namespace string) *as.HorizontalPodAutoscaler { Kind: "Pod", Name: stackdriverExporterPod, }, - TargetValue: *resource.NewQuantity(200, resource.DecimalSI), + TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI), }, }, }, diff --git a/test/e2e/instrumentation/monitoring/BUILD b/test/e2e/instrumentation/monitoring/BUILD index 65f123e0e51..4a0523411c5 100644 --- a/test/e2e/instrumentation/monitoring/BUILD +++ b/test/e2e/instrumentation/monitoring/BUILD @@ -21,7 +21,6 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/instrumentation/common:go_default_library", - "//test/utils/image:go_default_library", "//vendor/github.com/influxdata/influxdb/client/v2:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index b18a08dd5cd..b6e861575ee 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -25,14 +25,14 @@ import ( rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" - imageutils "k8s.io/kubernetes/test/utils/image" ) var ( - CustomMetricName = "foo" - UnusedMetricName = "unused" - CustomMetricValue = int64(448) - UnusedMetricValue = int64(446) + CustomMetricName = "foo" + UnusedMetricName = "unused" + CustomMetricValue = int64(448) + UnusedMetricValue = int64(446) + StackdriverExporter = "stackdriver-exporter" // HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for // HPA for testing purposes, i.e. it should grant permission to read custom metrics. HPAPermissions = &rbac.ClusterRoleBinding{ @@ -54,9 +54,37 @@ var ( } ) -// StackdriverExporterDeployment is a Deployment of simple application that exports a metric of +// CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment +// with multiple containers exporting different metrics. +type CustomMetricContainerSpec struct { + Name string + MetricName string + MetricValue int64 +} + +// SimpleStackdriverExporterDeployment is a Deployment of simple application that exports a metric of // fixed value to Stackdriver in a loop. -func StackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment { +func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment { + return StackdriverExporterDeployment(name, namespace, replicas, + []CustomMetricContainerSpec{ + { + Name: StackdriverExporter, + MetricName: CustomMetricName, + MetricValue: metricValue, + }, + }) +} + +// StackdriverExporterDeployment is a Deployment of an application that can expose +// an arbitrary amount of metrics of fixed value to Stackdriver in a loop. Each metric +// is exposed by a different container in one pod. +// The metric names and values are configured via the containers parameter. +func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *extensions.Deployment { + podSpec := corev1.PodSpec{Containers: []corev1.Container{}} + for _, containerSpec := range containers { + podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, containerSpec.MetricName, containerSpec.MetricValue)) + } + return &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -72,7 +100,7 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, metri "name": name, }, }, - Spec: stackdriverExporterPodSpec(CustomMetricName, metricValue), + Spec: podSpec, }, Replicas: &replicas, }, @@ -90,31 +118,29 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met "name": podLabel, }, }, - Spec: stackdriverExporterPodSpec(metricName, metricValue), + Spec: corev1.PodSpec{ + Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, metricName, metricValue)}, + }, } } -func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.PodSpec { - return corev1.PodSpec{ - Containers: []corev1.Container{ +func stackdriverExporterContainerSpec(name string, metricName string, metricValue int64) corev1.Container { + return corev1.Container{ + Name: name, + Image: "gcr.io/google-containers/sd-dummy-exporter:v0.1.0", + ImagePullPolicy: corev1.PullPolicy("Always"), + Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)}, + Env: []corev1.EnvVar{ { - Name: "stackdriver-exporter", - Image: imageutils.GetE2EImage(imageutils.SDDummyExporter), - ImagePullPolicy: corev1.PullPolicy("Always"), - Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)}, - Env: []corev1.EnvVar{ - { - Name: "POD_ID", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.uid", - }, - }, + Name: "POD_ID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", }, }, - Ports: []corev1.ContainerPort{{ContainerPort: 80}}, }, }, + Ports: []corev1.ContainerPort{{ContainerPort: 80}}, } } From a91e2dc4d2f7b8cb4b24b281bed704e633d2b9e5 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Wed, 20 Dec 2017 18:02:52 +0100 Subject: [PATCH 446/794] security_context_test.go(TestVerifyRunAsNonRoot): add more test cases. --- .../kuberuntime/security_context_test.go | 41 ++++++++++++++++--- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/kuberuntime/security_context_test.go b/pkg/kubelet/kuberuntime/security_context_test.go index 70375ec645f..d0aa922d5f5 100644 --- a/pkg/kubelet/kuberuntime/security_context_test.go +++ b/pkg/kubelet/kuberuntime/security_context_test.go @@ -45,16 +45,20 @@ func TestVerifyRunAsNonRoot(t *testing.T) { } rootUser := int64(0) + anyUser := int64(1000) runAsNonRootTrue := true runAsNonRootFalse := false for _, test := range []struct { - desc string - sc *v1.SecurityContext - fail bool + desc string + sc *v1.SecurityContext + uid *int64 + username string + fail bool }{ { desc: "Pass if SecurityContext is not set", sc: nil, + uid: &rootUser, fail: false, }, { @@ -62,6 +66,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) { sc: &v1.SecurityContext{ RunAsUser: &rootUser, }, + uid: &rootUser, fail: false, }, { @@ -69,6 +74,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) { sc: &v1.SecurityContext{ RunAsNonRoot: &runAsNonRootFalse, }, + uid: &rootUser, fail: false, }, { @@ -77,6 +83,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) { RunAsNonRoot: &runAsNonRootFalse, RunAsUser: &rootUser, }, + uid: &rootUser, fail: false, }, { @@ -85,6 +92,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) { RunAsNonRoot: &runAsNonRootTrue, RunAsUser: &rootUser, }, + uid: &rootUser, fail: true, }, { @@ -92,12 +100,35 @@ func TestVerifyRunAsNonRoot(t *testing.T) { sc: &v1.SecurityContext{ RunAsNonRoot: &runAsNonRootTrue, }, + uid: &rootUser, fail: true, }, + { + desc: "Fail if image's username is set and RunAsNonRoot is true", + sc: &v1.SecurityContext{ + RunAsNonRoot: &runAsNonRootTrue, + }, + username: "test", + fail: true, + }, + { + desc: "Pass if image's user is non-root and RunAsNonRoot is true", + sc: &v1.SecurityContext{ + RunAsNonRoot: &runAsNonRootTrue, + }, + uid: &anyUser, + fail: false, + }, + { + desc: "Pass if container's user and image's user aren't set and RunAsNonRoot is true", + sc: &v1.SecurityContext{ + RunAsNonRoot: &runAsNonRootTrue, + }, + fail: false, + }, } { pod.Spec.Containers[0].SecurityContext = test.sc - uid := int64(0) - err := verifyRunAsNonRoot(pod, &pod.Spec.Containers[0], &uid, "") + err := verifyRunAsNonRoot(pod, &pod.Spec.Containers[0], test.uid, test.username) if test.fail { assert.Error(t, err, test.desc) } else { From 6738da1d2879b8c28f636a670c62d5c824f597b2 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 20 Dec 2017 12:05:36 -0500 Subject: [PATCH 447/794] Fix problem accessing private docker registries In 027c8b9ef27ff85fb9eb4a18f04e3809d4065c7a, we added code to move from .dockercfg to config.json file. But we forgot to use the right secret type and the key to store the base64'ed creds --- hack/make-rules/test-cmd-util.sh | 4 ++-- pkg/kubectl/secret_for_docker_registry.go | 14 +++++++------- pkg/kubectl/secret_for_docker_registry_test.go | 18 +++++++++--------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 4690d31439b..9e05ffa867d 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -2227,8 +2227,8 @@ run_secrets_test() { kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets # Post-condition: secret exists and has expected values kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg' - [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]] + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson' + [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockerconfigjson:')" ]] # Clean-up kubectl delete secret test-secret --namespace=test-secrets diff --git a/pkg/kubectl/secret_for_docker_registry.go b/pkg/kubectl/secret_for_docker_registry.go index fc1e15b6b91..cad90cfe1a2 100644 --- a/pkg/kubectl/secret_for_docker_registry.go +++ b/pkg/kubectl/secret_for_docker_registry.go @@ -85,15 +85,15 @@ func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object if err := s.validate(); err != nil { return nil, err } - dockercfgContent, err := handleDockercfgContent(s.Username, s.Password, s.Email, s.Server) + dockercfgJsonContent, err := handleDockerCfgJsonContent(s.Username, s.Password, s.Email, s.Server) if err != nil { return nil, err } secret := &v1.Secret{} secret.Name = s.Name - secret.Type = v1.SecretTypeDockercfg + secret.Type = v1.SecretTypeDockerConfigJson secret.Data = map[string][]byte{} - secret.Data[v1.DockerConfigKey] = dockercfgContent + secret.Data[v1.DockerConfigJsonKey] = dockercfgJsonContent if s.AppendHash { h, err := hash.SecretHash(secret) if err != nil { @@ -133,17 +133,17 @@ func (s SecretForDockerRegistryGeneratorV1) validate() error { return nil } -// handleDockercfgContent serializes a dockercfg json file -func handleDockercfgContent(username, password, email, server string) ([]byte, error) { +// handleDockerCfgJsonContent serializes a ~/.docker/config.json file +func handleDockerCfgJsonContent(username, password, email, server string) ([]byte, error) { dockercfgAuth := credentialprovider.DockerConfigEntry{ Username: username, Password: password, Email: email, } - dockerCfg := credentialprovider.DockerConfigJson{ + dockerCfgJson := credentialprovider.DockerConfigJson{ Auths: map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth}, } - return json.Marshal(dockerCfg) + return json.Marshal(dockerCfgJson) } diff --git a/pkg/kubectl/secret_for_docker_registry_test.go b/pkg/kubectl/secret_for_docker_registry_test.go index 66d1ec12feb..844acc3bccc 100644 --- a/pkg/kubectl/secret_for_docker_registry_test.go +++ b/pkg/kubectl/secret_for_docker_registry_test.go @@ -26,11 +26,11 @@ import ( func TestSecretForDockerRegistryGenerate(t *testing.T) { username, password, email, server := "test-user", "test-password", "test-user@example.org", "https://index.docker.io/v1/" - secretData, err := handleDockercfgContent(username, password, email, server) + secretData, err := handleDockerCfgJsonContent(username, password, email, server) if err != nil { t.Errorf("unexpected error: %v", err) } - secretDataNoEmail, err := handleDockercfgContent(username, password, "", server) + secretDataNoEmail, err := handleDockerCfgJsonContent(username, password, "", server) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -53,9 +53,9 @@ func TestSecretForDockerRegistryGenerate(t *testing.T) { Name: "foo", }, Data: map[string][]byte{ - v1.DockerConfigKey: secretData, + v1.DockerConfigJsonKey: secretData, }, - Type: v1.SecretTypeDockercfg, + Type: v1.SecretTypeDockerConfigJson, }, expectErr: false, }, @@ -70,12 +70,12 @@ func TestSecretForDockerRegistryGenerate(t *testing.T) { }, expected: &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo-94759gc65b", + Name: "foo-548cm7fgdh", }, Data: map[string][]byte{ - v1.DockerConfigKey: secretData, + v1.DockerConfigJsonKey: secretData, }, - Type: v1.SecretTypeDockercfg, + Type: v1.SecretTypeDockerConfigJson, }, expectErr: false, }, @@ -91,9 +91,9 @@ func TestSecretForDockerRegistryGenerate(t *testing.T) { Name: "foo", }, Data: map[string][]byte{ - v1.DockerConfigKey: secretDataNoEmail, + v1.DockerConfigJsonKey: secretDataNoEmail, }, - Type: v1.SecretTypeDockercfg, + Type: v1.SecretTypeDockerConfigJson, }, expectErr: false, }, From 5fe3d9dd13d76230ce584ec1f265c6b7b3e76717 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 20 Dec 2017 13:33:36 -0500 Subject: [PATCH 448/794] Update code-of-conduct.md Refer to kubernetes/community as authoritative source for code of conduct --- code-of-conduct.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/code-of-conduct.md b/code-of-conduct.md index 622d4c1d1c2..0d15c00cf32 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,5 +1,3 @@ -## Kubernetes Community Code of Conduct +# Kubernetes Community Code of Conduct -Kubernetes follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]() +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) From 519d0cde7b4f06b527d5636c8fdb550dec96ad59 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 20 Dec 2017 10:44:07 -0800 Subject: [PATCH 449/794] Update policy.go --- plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 243f2109dcf..138c4f2a440 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -169,7 +169,7 @@ func ClusterRoles() []rbac.ClusterRole { ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get").URLs( - "/healthz", "/version", "/version/*", + "/healthz", "/version", "/version/", // remove once swagger 1.2 support is removed "/swaggerapi", "/swaggerapi/*", // do not expand this pattern for openapi discovery docs From 20bb9edff45d36df3fe88adea80bc1bf26ea90e9 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 20 Dec 2017 10:44:55 -0800 Subject: [PATCH 450/794] Update cluster-roles.yaml --- .../authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index a47756c6c81..477a6e903ba 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -640,7 +640,7 @@ items: - /swaggerapi - /swaggerapi/* - /version - - /version/* + - /version/ verbs: - get - apiVersion: rbac.authorization.k8s.io/v1 From e7ffe1bb985cfe1d29d15beb7620d684c8f821d6 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 22 Nov 2017 14:29:55 -0800 Subject: [PATCH 451/794] Move multizone volume tests to separate file and update multizone volume tests to only allow gce and gke --- test/e2e/multicluster/BUILD | 1 + test/e2e/multicluster/ubernetes_lite.go | 223 --------------- .../multicluster/ubernetes_lite_volumes.go | 266 ++++++++++++++++++ 3 files changed, 267 insertions(+), 223 deletions(-) create mode 100644 test/e2e/multicluster/ubernetes_lite_volumes.go diff --git a/test/e2e/multicluster/BUILD b/test/e2e/multicluster/BUILD index 51cebcabd99..2d4b21e4e02 100644 --- a/test/e2e/multicluster/BUILD +++ b/test/e2e/multicluster/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "framework.go", "ubernetes_lite.go", + "ubernetes_lite_volumes.go", ], importpath = "k8s.io/kubernetes/test/e2e/multicluster", visibility = ["//visibility:public"], diff --git a/test/e2e/multicluster/ubernetes_lite.go b/test/e2e/multicluster/ubernetes_lite.go index a83ad4d26a8..efc4d83bc56 100644 --- a/test/e2e/multicluster/ubernetes_lite.go +++ b/test/e2e/multicluster/ubernetes_lite.go @@ -19,13 +19,10 @@ package multicluster import ( "fmt" "math" - "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" @@ -60,132 +57,8 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { It("should spread the pods of a replication controller across zones", func() { SpreadRCOrFail(f, int32((2*zoneCount)+1), image) }) - - It("should schedule pods in the same zones as statically provisioned PVs [sig-storage]", func() { - PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) - }) - - It("should only be allowed to provision PDs in zones where nodes exist", func() { - OnlyAllowNodeZones(f, zoneCount, image) - }) }) -// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes -func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { - gceCloud, err := framework.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) - - // Get all the zones that the nodes are in - expectedZones, err := gceCloud.GetAllZonesFromCloudProvider() - Expect(err).NotTo(HaveOccurred()) - framework.Logf("Expected zones: %v\n", expectedZones) - - // Get all the zones in this current region - region := gceCloud.Region() - allZonesInRegion, err := gceCloud.ListZonesInRegion(region) - Expect(err).NotTo(HaveOccurred()) - - var extraZone string - for _, zone := range allZonesInRegion { - if !expectedZones.Has(zone.Name) { - extraZone = zone.Name - break - } - } - Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region)) - - By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone)) - project := framework.TestContext.CloudConfig.ProjectID - zone := extraZone - myuuid := string(uuid.NewUUID()) - name := "compute-" + myuuid - imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606" - - rb := &compute.Instance{ - MachineType: "zones/" + zone + "/machineTypes/f1-micro", - Disks: []*compute.AttachedDisk{ - { - AutoDelete: true, - Boot: true, - Type: "PERSISTENT", - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskName: "my-root-pd-" + myuuid, - SourceImage: imageURL, - }, - }, - }, - NetworkInterfaces: []*compute.NetworkInterface{ - { - AccessConfigs: []*compute.AccessConfig{ - { - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - }, - }, - Network: "/global/networks/default", - }, - }, - Name: name, - } - - err = gceCloud.InsertInstance(project, zone, rb) - Expect(err).NotTo(HaveOccurred()) - - defer func() { - // Teardown of the compute instance - framework.Logf("Deleting compute resource: %v", name) - resp, err := gceCloud.DeleteInstance(project, zone, name) - Expect(err).NotTo(HaveOccurred()) - framework.Logf("Compute deletion response: %v\n", resp) - }() - - By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes") - // Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1 - // This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees" - var pvcList []*v1.PersistentVolumeClaim - c := f.ClientSet - ns := f.Namespace.Name - - for index := 1; index <= zoneCount+1; index++ { - pvc := newNamedDefaultClaim(ns, index) - pvc, err = framework.CreatePVC(c, ns, pvc) - Expect(err).NotTo(HaveOccurred()) - pvcList = append(pvcList, pvc) - - // Defer the cleanup - defer func() { - framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) - err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) - if err != nil { - framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) - } - }() - } - - // Wait for all claims bound - for _, claim := range pvcList { - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - } - - pvZones := sets.NewString() - By("Checking that PDs have been provisioned in only the expected zones") - for _, claim := range pvcList { - // Get a new copy of the claim to have all fields populated - claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the related PV - pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - pvZone, ok := pv.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] - Expect(ok).To(BeTrue(), "PV has no LabelZone to be found") - pvZones.Insert(pvZone) - } - Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) -} - // Check that the pods comprising a service get spread evenly across available zones func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service @@ -368,99 +241,3 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { Expect(err).NotTo(HaveOccurred()) Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) } - -type StaticPVTestConfig struct { - pvSource *v1.PersistentVolumeSource - pv *v1.PersistentVolume - pvc *v1.PersistentVolumeClaim - pod *v1.Pod -} - -// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in. -func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) { - // TODO: add GKE after enabling admission plugin in GKE - // TODO: add AWS - framework.SkipUnlessProviderIs("gce") - - var err error - c := f.ClientSet - ns := f.Namespace.Name - - zones, err := getZoneNames(c) - Expect(err).NotTo(HaveOccurred()) - - By("Creating static PVs across zones") - configs := make([]*StaticPVTestConfig, podCount) - for i := range configs { - configs[i] = &StaticPVTestConfig{} - } - - defer func() { - By("Cleaning up pods and PVs") - for _, config := range configs { - framework.DeletePodOrFail(c, ns, config.pod.Name) - } - for _, config := range configs { - framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns) - framework.PVPVCCleanup(c, ns, config.pv, config.pvc) - err = framework.DeletePVSource(config.pvSource) - Expect(err).NotTo(HaveOccurred()) - } - }() - - for i, config := range configs { - zone := zones[i%len(zones)] - config.pvSource, err = framework.CreatePVSource(zone) - Expect(err).NotTo(HaveOccurred()) - - pvConfig := framework.PersistentVolumeConfig{ - NamePrefix: "multizone-pv", - PVSource: *config.pvSource, - Prebind: nil, - } - className := "" - pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className} - - config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) - Expect(err).NotTo(HaveOccurred()) - } - - By("Waiting for all PVCs to be bound") - for _, config := range configs { - framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc) - } - - By("Creating pods for each static PV") - for _, config := range configs { - podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) - Expect(err).NotTo(HaveOccurred()) - } - - By("Waiting for all pods to be running") - for _, config := range configs { - err = framework.WaitForPodRunningInNamespace(c, config.pod) - Expect(err).NotTo(HaveOccurred()) - } -} - -func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim { - claim := v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-" + strconv.Itoa(index), - Namespace: ns, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"), - }, - }, - }, - } - - return &claim -} diff --git a/test/e2e/multicluster/ubernetes_lite_volumes.go b/test/e2e/multicluster/ubernetes_lite_volumes.go new file mode 100644 index 00000000000..64012568d5d --- /dev/null +++ b/test/e2e/multicluster/ubernetes_lite_volumes.go @@ -0,0 +1,266 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multicluster + +import ( + "fmt" + "strconv" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + compute "google.golang.org/api/compute/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/test/e2e/framework" +) + +var _ = Describe("[sig-storage] Multi-AZ Clusters", func() { + f := framework.NewDefaultFramework("multi-az") + var zoneCount int + var err error + image := framework.ServeHostnameImage + BeforeEach(func() { + framework.SkipUnlessProviderIs("gce", "gke") + if zoneCount <= 0 { + zoneCount, err = getZoneCount(f.ClientSet) + Expect(err).NotTo(HaveOccurred()) + } + By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount)) + msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount) + framework.SkipUnlessAtLeast(zoneCount, 2, msg) + // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread + }) + It("should schedule pods in the same zones as statically provisioned PVs", func() { + PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) + }) + + It("should only be allowed to provision PDs in zones where nodes exist", func() { + OnlyAllowNodeZones(f, zoneCount, image) + }) +}) + +// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes +func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { + gceCloud, err := framework.GetGCECloud() + Expect(err).NotTo(HaveOccurred()) + + // Get all the zones that the nodes are in + expectedZones, err := gceCloud.GetAllZonesFromCloudProvider() + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Expected zones: %v\n", expectedZones) + + // Get all the zones in this current region + region := gceCloud.Region() + allZonesInRegion, err := gceCloud.ListZonesInRegion(region) + Expect(err).NotTo(HaveOccurred()) + + var extraZone string + for _, zone := range allZonesInRegion { + if !expectedZones.Has(zone.Name) { + extraZone = zone.Name + break + } + } + Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region)) + + By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone)) + project := framework.TestContext.CloudConfig.ProjectID + zone := extraZone + myuuid := string(uuid.NewUUID()) + name := "compute-" + myuuid + imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606" + + rb := &compute.Instance{ + MachineType: "zones/" + zone + "/machineTypes/f1-micro", + Disks: []*compute.AttachedDisk{ + { + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: "my-root-pd-" + myuuid, + SourceImage: imageURL, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + }, + }, + Network: "/global/networks/default", + }, + }, + Name: name, + } + + err = gceCloud.InsertInstance(project, zone, rb) + Expect(err).NotTo(HaveOccurred()) + + defer func() { + // Teardown of the compute instance + framework.Logf("Deleting compute resource: %v", name) + resp, err := gceCloud.DeleteInstance(project, zone, name) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Compute deletion response: %v\n", resp) + }() + + By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes") + // Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1 + // This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees" + var pvcList []*v1.PersistentVolumeClaim + c := f.ClientSet + ns := f.Namespace.Name + + for index := 1; index <= zoneCount+1; index++ { + pvc := newNamedDefaultClaim(ns, index) + pvc, err = framework.CreatePVC(c, ns, pvc) + Expect(err).NotTo(HaveOccurred()) + pvcList = append(pvcList, pvc) + + // Defer the cleanup + defer func() { + framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) + err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) + if err != nil { + framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) + } + }() + } + + // Wait for all claims bound + for _, claim := range pvcList { + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + } + + pvZones := sets.NewString() + By("Checking that PDs have been provisioned in only the expected zones") + for _, claim := range pvcList { + // Get a new copy of the claim to have all fields populated + claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the related PV + pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pvZone, ok := pv.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + Expect(ok).To(BeTrue(), "PV has no LabelZone to be found") + pvZones.Insert(pvZone) + } + Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) +} + +type staticPVTestConfig struct { + pvSource *v1.PersistentVolumeSource + pv *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim + pod *v1.Pod +} + +// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in. +func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) { + var err error + c := f.ClientSet + ns := f.Namespace.Name + + zones, err := getZoneNames(c) + Expect(err).NotTo(HaveOccurred()) + + By("Creating static PVs across zones") + configs := make([]*staticPVTestConfig, podCount) + for i := range configs { + configs[i] = &staticPVTestConfig{} + } + + defer func() { + By("Cleaning up pods and PVs") + for _, config := range configs { + framework.DeletePodOrFail(c, ns, config.pod.Name) + } + for _, config := range configs { + framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns) + framework.PVPVCCleanup(c, ns, config.pv, config.pvc) + err = framework.DeletePVSource(config.pvSource) + Expect(err).NotTo(HaveOccurred()) + } + }() + + for i, config := range configs { + zone := zones[i%len(zones)] + config.pvSource, err = framework.CreatePVSource(zone) + Expect(err).NotTo(HaveOccurred()) + + pvConfig := framework.PersistentVolumeConfig{ + NamePrefix: "multizone-pv", + PVSource: *config.pvSource, + Prebind: nil, + } + className := "" + pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className} + + config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) + Expect(err).NotTo(HaveOccurred()) + } + + By("Waiting for all PVCs to be bound") + for _, config := range configs { + framework.WaitOnPVandPVC(c, ns, config.pv, config.pvc) + } + + By("Creating pods for each static PV") + for _, config := range configs { + podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") + config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) + Expect(err).NotTo(HaveOccurred()) + } + + By("Waiting for all pods to be running") + for _, config := range configs { + err = framework.WaitForPodRunningInNamespace(c, config.pod) + Expect(err).NotTo(HaveOccurred()) + } +} + +func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim { + claim := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-" + strconv.Itoa(index), + Namespace: ns, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"), + }, + }, + }, + } + + return &claim +} From 918a6a7de7a77294a57c11c8fe40ce54cd18ea56 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 20 Dec 2017 15:21:56 -0500 Subject: [PATCH 452/794] Add code-of-conduct.md to staging repos --- staging/src/k8s.io/api/code-of-conduct.md | 3 +++ staging/src/k8s.io/apiextensions-apiserver/code-of-conduct.md | 3 +++ staging/src/k8s.io/apimachinery/code-of-conduct.md | 3 +++ staging/src/k8s.io/apiserver/code-of-conduct.md | 3 +++ staging/src/k8s.io/client-go/code-of-conduct.md | 3 +++ staging/src/k8s.io/code-generator/code-of-conduct.md | 3 +++ staging/src/k8s.io/kube-aggregator/code-of-conduct.md | 3 +++ staging/src/k8s.io/metrics/code-of-conduct.md | 3 +++ staging/src/k8s.io/sample-apiserver/code-of-conduct.md | 3 +++ staging/src/k8s.io/sample-controller/code-of-conduct.md | 3 +++ 10 files changed, 30 insertions(+) create mode 100644 staging/src/k8s.io/api/code-of-conduct.md create mode 100644 staging/src/k8s.io/apiextensions-apiserver/code-of-conduct.md create mode 100644 staging/src/k8s.io/apimachinery/code-of-conduct.md create mode 100644 staging/src/k8s.io/apiserver/code-of-conduct.md create mode 100644 staging/src/k8s.io/client-go/code-of-conduct.md create mode 100644 staging/src/k8s.io/code-generator/code-of-conduct.md create mode 100644 staging/src/k8s.io/kube-aggregator/code-of-conduct.md create mode 100644 staging/src/k8s.io/metrics/code-of-conduct.md create mode 100644 staging/src/k8s.io/sample-apiserver/code-of-conduct.md create mode 100644 staging/src/k8s.io/sample-controller/code-of-conduct.md diff --git a/staging/src/k8s.io/api/code-of-conduct.md b/staging/src/k8s.io/api/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/api/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/apiextensions-apiserver/code-of-conduct.md b/staging/src/k8s.io/apiextensions-apiserver/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/apimachinery/code-of-conduct.md b/staging/src/k8s.io/apimachinery/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/apiserver/code-of-conduct.md b/staging/src/k8s.io/apiserver/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/apiserver/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/client-go/code-of-conduct.md b/staging/src/k8s.io/client-go/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/client-go/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/code-generator/code-of-conduct.md b/staging/src/k8s.io/code-generator/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/code-generator/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/kube-aggregator/code-of-conduct.md b/staging/src/k8s.io/kube-aggregator/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/metrics/code-of-conduct.md b/staging/src/k8s.io/metrics/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/metrics/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/sample-apiserver/code-of-conduct.md b/staging/src/k8s.io/sample-apiserver/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/sample-apiserver/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/staging/src/k8s.io/sample-controller/code-of-conduct.md b/staging/src/k8s.io/sample-controller/code-of-conduct.md new file mode 100644 index 00000000000..0d15c00cf32 --- /dev/null +++ b/staging/src/k8s.io/sample-controller/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) From 3a456bf9cbf15a80a47d2619c8a494388d848098 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Mon, 18 Dec 2017 11:19:15 -0800 Subject: [PATCH 453/794] Set a minimum b.N for scheduler_perf benchmarks. Under the default behavior of Go benchmark tests, all our scheduler_perf benchmark tests run with b.N=1, which is lower than we would like. This adds a lower bound to b.N so that the results are more meaningful. The alternative to this change is to always run these tests with the -benchtime flag set to a duration which will force b.N to increase. That would cause any test setup to be executed repeatedly as b.N ramps up, and -timout would probably also need to be set higher. --- .../scheduler_perf/scheduler_bench_test.go | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index ee2ca2fbf1b..608e4888b51 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -31,22 +31,25 @@ import ( // BenchmarkScheduling benchmarks the scheduling rate when the cluster has // various quantities of nodes and scheduled pods. func BenchmarkScheduling(b *testing.B) { - tests := []struct{ nodes, pods int }{ - {nodes: 100, pods: 0}, - {nodes: 100, pods: 1000}, - {nodes: 1000, pods: 0}, - {nodes: 1000, pods: 1000}, + tests := []struct{ nodes, pods, minOps int }{ + {nodes: 100, pods: 0, minOps: 100}, + {nodes: 100, pods: 1000, minOps: 100}, + {nodes: 1000, pods: 0, minOps: 100}, + {nodes: 1000, pods: 1000, minOps: 100}, } for _, test := range tests { name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.pods) - b.Run(name, func(b *testing.B) { benchmarkScheduling(test.nodes, test.pods, b) }) + b.Run(name, func(b *testing.B) { benchmarkScheduling(test.nodes, test.pods, test.minOps, b) }) } } // benchmarkScheduling benchmarks scheduling rate with specific number of nodes -// and specific number of pods already scheduled. Since an operation takes relatively -// long time, b.N should be small: 10 - 100. -func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) { +// and specific number of pods already scheduled. +// Since an operation typically takes more than 1 second, we put a minimum bound on b.N of minOps. +func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { + if b.N < minOps { + b.N = minOps + } schedulerConfigFactory, finalFunc := mustSetupScheduler() defer finalFunc() c := schedulerConfigFactory.GetClient() From 03bdef404503b579458962026b3c88ef8b181cf4 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Wed, 6 Dec 2017 18:19:23 -0800 Subject: [PATCH 454/794] Add scheduler benchmark tests for affinity rules. This adds new benchmark tests that measure scheduler latency of pods that use affinity rules. Specifically, this tests affinity rules with topologyKey="kubernetes.io/hostname". --- .../scheduler_perf/scheduler_bench_test.go | 89 +++++++++++++++---- 1 file changed, 74 insertions(+), 15 deletions(-) diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index 608e4888b51..fb28da4ec60 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -21,7 +21,10 @@ import ( "testing" "time" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/integration/framework" testutils "k8s.io/kubernetes/test/utils" @@ -31,24 +34,80 @@ import ( // BenchmarkScheduling benchmarks the scheduling rate when the cluster has // various quantities of nodes and scheduled pods. func BenchmarkScheduling(b *testing.B) { - tests := []struct{ nodes, pods, minOps int }{ - {nodes: 100, pods: 0, minOps: 100}, - {nodes: 100, pods: 1000, minOps: 100}, - {nodes: 1000, pods: 0, minOps: 100}, - {nodes: 1000, pods: 1000, minOps: 100}, + tests := []struct{ nodes, existingPods, minPods int }{ + {nodes: 100, existingPods: 0, minPods: 100}, + {nodes: 100, existingPods: 1000, minPods: 100}, + {nodes: 1000, existingPods: 0, minPods: 100}, + {nodes: 1000, existingPods: 1000, minPods: 100}, } + setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1") + testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc2") for _, test := range tests { - name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.pods) - b.Run(name, func(b *testing.B) { benchmarkScheduling(test.nodes, test.pods, test.minOps, b) }) + name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods) + b.Run(name, func(b *testing.B) { + benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b) + }) } } +// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with +// PodAntiAffinity rules when the cluster has various quantities of nodes and +// scheduled pods. +func BenchmarkSchedulingAntiAffinity(b *testing.B) { + tests := []struct{ nodes, existingPods, minPods int }{ + {nodes: 500, existingPods: 250, minPods: 250}, + {nodes: 500, existingPods: 5000, minPods: 250}, + } + // The setup strategy creates pods with no affinity rules. + setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup") + // The test strategy creates pods with anti-affinity for each other. + testBasePod := makeBasePodWithAntiAffinity( + map[string]string{"name": "test", "color": "green"}, + map[string]string{"color": "green"}) + testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod) + for _, test := range tests { + name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods) + b.Run(name, func(b *testing.B) { + benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b) + }) + } + +} + +// makeBasePodWithAntiAffinity creates a Pod object to be used as a template. +// The Pod has a PodAntiAffinity requirement against pods with the given labels. +func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod { + basePod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "affinity-pod-", + Labels: podLabels, + }, + Spec: testutils.MakePodSpec(), + } + basePod.Spec.Affinity = &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: affinityLabels, + }, + TopologyKey: apis.LabelHostname, + }, + }, + }, + } + return basePod +} + // benchmarkScheduling benchmarks scheduling rate with specific number of nodes // and specific number of pods already scheduled. -// Since an operation typically takes more than 1 second, we put a minimum bound on b.N of minOps. -func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { - if b.N < minOps { - b.N = minOps +// This will schedule numExistingPods pods before the benchmark starts, and at +// least minPods pods during the benchmark. +func benchmarkScheduling(numNodes, numExistingPods, minPods int, + setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy, + b *testing.B) { + if b.N < minPods { + b.N = minPods } schedulerConfigFactory, finalFunc := mustSetupScheduler() defer finalFunc() @@ -65,7 +124,7 @@ func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { defer nodePreparer.CleanupNodes() config := testutils.NewTestPodCreatorConfig() - config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1")) + config.AddStrategy("sched-test", numExistingPods, setupPodStrategy) podCreator := testutils.NewTestPodCreator(c, config) podCreator.CreatePods() @@ -74,7 +133,7 @@ func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { if err != nil { glog.Fatalf("%v", err) } - if len(scheduled) >= numScheduledPods { + if len(scheduled) >= numExistingPods { break } time.Sleep(1 * time.Second) @@ -82,7 +141,7 @@ func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { // start benchmark b.ResetTimer() config = testutils.NewTestPodCreatorConfig() - config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2")) + config.AddStrategy("sched-test", b.N, testPodStrategy) podCreator = testutils.NewTestPodCreator(c, config) podCreator.CreatePods() for { @@ -92,7 +151,7 @@ func benchmarkScheduling(numNodes, numScheduledPods, minOps int, b *testing.B) { if err != nil { glog.Fatalf("%v", err) } - if len(scheduled) >= numScheduledPods+b.N { + if len(scheduled) >= numExistingPods+b.N { break } // Note: This might introduce slight deviation in accuracy of benchmark results. From d5d7d6d6845cdfcb6b0a631a3e503332e799d65a Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 12 Dec 2017 10:05:03 -0800 Subject: [PATCH 455/794] Send an event just before the Kubelet restarts to use a new config --- cmd/kubelet/app/server.go | 2 +- pkg/kubelet/kubeletconfig/BUILD | 3 + .../kubeletconfig/checkpoint/download.go | 9 ++ .../kubeletconfig/checkpoint/download_test.go | 15 ++ pkg/kubelet/kubeletconfig/configsync.go | 86 ++++++++-- pkg/kubelet/kubeletconfig/controller.go | 6 +- test/e2e_node/BUILD | 1 + test/e2e_node/dynamic_kubelet_config_test.go | 151 ++++++++++++++---- 8 files changed, 224 insertions(+), 49 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8100b44afae..1d6901b5447 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -401,7 +401,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { // Alpha Dynamic Configuration Implementation; // if the kubelet config controller is available, inject the latest to start the config and status sync loops if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { - kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, string(nodeName)) + kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)) } if kubeDeps.Auth == nil { diff --git a/pkg/kubelet/kubeletconfig/BUILD b/pkg/kubelet/kubeletconfig/BUILD index 71bbe9fb145..ba381a6fed7 100644 --- a/pkg/kubelet/kubeletconfig/BUILD +++ b/pkg/kubelet/kubeletconfig/BUILD @@ -25,13 +25,16 @@ go_library( "//pkg/kubelet/kubeletconfig/util/log:go_default_library", "//pkg/kubelet/kubeletconfig/util/panic:go_default_library", "//pkg/util/filesystem:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download.go b/pkg/kubelet/kubeletconfig/checkpoint/download.go index 297d974a1aa..9778f318b7c 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -34,6 +34,8 @@ import ( type RemoteConfigSource interface { // UID returns the UID of the remote config source object UID() string + // APIPath returns the API path to the remote resource, e.g. its SelfLink + APIPath() string // Download downloads the remote config source object returns a Checkpoint backed by the object, // or a sanitized failure reason and error if the download fails Download(client clientset.Interface) (Checkpoint, string, error) @@ -110,6 +112,13 @@ func (r *remoteConfigMap) UID() string { return string(r.source.ConfigMapRef.UID) } +const configMapAPIPathFmt = "/api/v1/namespaces/%s/configmaps/%s" + +func (r *remoteConfigMap) APIPath() string { + ref := r.source.ConfigMapRef + return fmt.Sprintf(configMapAPIPathFmt, ref.Namespace, ref.Name) +} + func (r *remoteConfigMap) Download(client clientset.Interface) (Checkpoint, string, error) { var reason string uid := string(r.source.ConfigMapRef.UID) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go index a902b6384e4..ccca6c3161c 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go @@ -17,6 +17,7 @@ limitations under the License. package checkpoint import ( + "fmt" "testing" "github.com/davecgh/go-spew/spew" @@ -92,6 +93,20 @@ func TestRemoteConfigMapUID(t *testing.T) { } } +func TestRemoteConfigMapAPIPath(t *testing.T) { + name := "name" + namespace := "namespace" + cpt := &remoteConfigMap{ + &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: name, Namespace: namespace, UID: ""}}, + } + expect := fmt.Sprintf(configMapAPIPathFmt, cpt.source.ConfigMapRef.Namespace, cpt.source.ConfigMapRef.Name) + // APIPath() method should return the correct path to the referenced resource + path := cpt.APIPath() + if expect != path { + t.Errorf("expect APIPath() to return %q, but got %q", expect, namespace) + } +} + func TestRemoteConfigMapDownload(t *testing.T) { _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs() if err != nil { diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index ac7e40656f9..66f9eb9160e 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -19,15 +19,30 @@ package kubeletconfig import ( "fmt" "os" + "time" + + "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" ) +const ( + // KubeletConfigChangedEventReason identifies an event as a change of Kubelet configuration + KubeletConfigChangedEventReason = "KubeletConfigChanged" + // EventMessageFmt is the message format for Kubelet config change events + EventMessageFmt = "Kubelet will restart to use: %s" + // LocalConfigMessage is the text to apply to EventMessageFmt when the Kubelet has been configured to use its local config (init or defaults) + LocalConfigMessage = "local config" +) + // pokeConfiSourceWorker tells the worker thread that syncs config sources that work needs to be done func (cc *Controller) pokeConfigSourceWorker() { select { @@ -37,7 +52,7 @@ func (cc *Controller) pokeConfigSourceWorker() { } // syncConfigSource checks if work needs to be done to use a new configuration, and does that work if necessary -func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName string) { +func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) { select { case <-cc.pendingConfigSource: default: @@ -62,13 +77,22 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName stri } // check the Node and download any new config - if updated, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil { + if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil { cc.configOK.SetFailSyncCondition(reason) syncerr = fmt.Errorf("%s, error: %v", reason, err) return } else if updated { - // TODO(mtaufen): Consider adding a "currently restarting kubelet" ConfigOK message for this case - utillog.Infof("config updated, Kubelet will restart to begin using new config") + path := LocalConfigMessage + if cur != nil { + path = cur.APIPath() + } + // we directly log and send the event, instead of using the event recorder, + // because the event recorder won't flush its queue before we exit (we'd lose the event) + event := eventf(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, EventMessageFmt, path) + glog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) + if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { + utillog.Errorf("failed to send event, error: %v", err) + } os.Exit(0) } @@ -81,31 +105,31 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName stri // doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config, // depending on the `source`, and returns whether the current config in the checkpoint store was updated as a result -func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *apiv1.NodeConfigSource) (bool, string, error) { +func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *apiv1.NodeConfigSource) (bool, checkpoint.RemoteConfigSource, string, error) { if source == nil { utillog.Infof("Node.Spec.ConfigSource is empty, will reset current and last-known-good to defaults") updated, reason, err := cc.resetConfig() if err != nil { - return false, reason, err + return false, nil, reason, err } - return updated, "", nil + return updated, nil, "", nil } // if the NodeConfigSource is non-nil, download the config utillog.Infof("Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary") remote, reason, err := checkpoint.NewRemoteConfigSource(source) if err != nil { - return false, reason, err + return false, nil, reason, err } reason, err = cc.checkpointConfigSource(client, remote) if err != nil { - return false, reason, err + return false, nil, reason, err } updated, reason, err := cc.setCurrentConfig(remote) if err != nil { - return false, reason, err + return false, nil, reason, err } - return updated, "", nil + return updated, remote, "", nil } // checkpointConfigSource downloads and checkpoints the object referred to by `source` if the checkpoint does not already exist, @@ -181,3 +205,43 @@ func latestNode(store cache.Store, nodeName string) (*apiv1.Node, error) { } return node, nil } + +// eventf constructs and returns an event containing a formatted message +// similar to k8s.io/client-go/tools/record/event.go +func eventf(nodeName, eventType, reason, messageFmt string, args ...interface{}) *apiv1.Event { + return makeEvent(nodeName, eventType, reason, fmt.Sprintf(messageFmt, args...)) +} + +// makeEvent constructs an event +// similar to makeEvent in k8s.io/client-go/tools/record/event.go +func makeEvent(nodeName, eventtype, reason, message string) *apiv1.Event { + const componentKubelet = "kubelet" + // NOTE(mtaufen): This is consistent with pkg/kubelet/kubelet.go. Even though setting the node + // name as the UID looks strange, it appears to be conventional for events sent by the Kubelet. + ref := apiv1.ObjectReference{ + Kind: "Node", + Name: nodeName, + UID: types.UID(nodeName), + Namespace: "", + } + + t := metav1.Time{Time: time.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &apiv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + }, + InvolvedObject: ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + Source: apiv1.EventSource{Component: componentKubelet, Host: string(nodeName)}, + } +} diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index 6ff4cd6b284..0b493ba053c 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -24,10 +24,10 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" - "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" @@ -201,7 +201,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { // StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty. // It will always start the Node condition reporting loop, and will also start the dynamic conifg sync loops // if dynamic config is enabled on the controller. If `nodeName` is empty but `client` is non-nil, an error is logged. -func (cc *Controller) StartSync(client clientset.Interface, nodeName string) { +func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) { if client == nil { utillog.Infof("nil client, will not start sync loops") return @@ -236,7 +236,7 @@ func (cc *Controller) StartSync(client clientset.Interface, nodeName string) { go utilpanic.HandlePanic(func() { utillog.Infof("starting config source sync loop") wait.JitterUntil(func() { - cc.syncConfigSource(client, nodeName) + cc.syncConfigSource(client, eventClient, nodeName) }, 10*time.Second, 0.2, true, wait.NeverStop) })() } else { diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index dfd6e95789a..df312031edf 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -128,6 +128,7 @@ go_test( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/images:go_default_library", + "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/kubelet/kubeletconfig/status:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/types:go_default_library", diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index dd83b3d1bc1..a0e9da80857 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -26,6 +26,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" "k8s.io/kubernetes/test/e2e/framework" @@ -39,6 +40,11 @@ type configState struct { configSource *apiv1.NodeConfigSource expectConfigOK *apiv1.NodeCondition expectConfig *kubeletconfig.KubeletConfiguration + // whether the state would cause a config change event as a result of the update to Node.Spec.ConfigSource, + // assuming that the current source would have also caused a config change event. + // for example, some malformed references may result in a download failure, in which case the Kubelet + // does not restart to change config, while an invalid payload will be detected upon restart + event bool } // This test is marked [Disruptive] because the Kubelet restarts several times during this test. @@ -82,7 +88,8 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID), Reason: status.CurRemoteOKReason}, - expectConfig: originalKC}) + expectConfig: originalKC, + }, false) }) Context("When setting new NodeConfigSources that cause transitions between ConfigOK conditions", func() { @@ -121,7 +128,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: status.CurDefaultMessage, Reason: status.CurDefaultOKReason}, - expectConfig: nil}, + expectConfig: nil, + event: true, + }, // Node.Spec.ConfigSource has all nil subfields {desc: "Node.Spec.ConfigSource has all nil subfields", @@ -129,7 +138,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: "", Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonAllNilSubfields)}, - expectConfig: nil}, + expectConfig: nil, + event: false, + }, // Node.Spec.ConfigSource.ConfigMapRef is partial {desc: "Node.Spec.ConfigSource.ConfigMapRef is partial", @@ -140,17 +151,21 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: "", Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonPartialObjectReference)}, - expectConfig: nil}, + expectConfig: nil, + event: false, + }, // Node.Spec.ConfigSource's UID does not align with namespace/name - {desc: "Node.Spec.ConfigSource's UID does not align with namespace/name", + {desc: "Node.Spec.ConfigSource.ConfigMapRef.UID does not align with Namespace/Name", configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo", Namespace: correctConfigMap.Namespace, Name: correctConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: "", Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", correctConfigMap.UID))}, - expectConfig: nil}, + expectConfig: nil, + event: false, + }, // correct {desc: "correct", @@ -161,7 +176,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID), Reason: status.CurRemoteOKReason}, - expectConfig: correctKC}, + expectConfig: correctKC, + event: true, + }, // fail-parse {desc: "fail-parse", @@ -172,7 +189,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: status.LkgDefaultMessage, Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)}, - expectConfig: nil}, + expectConfig: nil, + event: true, + }, // fail-validate {desc: "fail-validate", @@ -183,7 +202,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: status.LkgDefaultMessage, Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)}, - expectConfig: nil}, + expectConfig: nil, + event: true, + }, } L := len(states) @@ -194,8 +215,8 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube }) }) - Context("When a remote config becomes the new last-known-good before the Kubelet is updated to use a new, bad config", func() { - It("it should report a status and configz indicating that it rolled back to the new last-known-good", func() { + Context("When a remote config becomes the new last-known-good, and then the Kubelet is updated to use a new, bad config", func() { + It("the Kubelet should report a status and configz indicating that it rolled back to the new last-known-good", func() { var err error // we base the "lkg" configmap off of the current configuration, but set the trial // duration very low so that it quickly becomes the last-known-good @@ -225,7 +246,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID), Reason: status.CurRemoteOKReason}, - expectConfig: lkgKC}, + expectConfig: lkgKC, + event: true, + }, // bad config {desc: "bad config", @@ -236,7 +259,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: fmt.Sprintf(status.LkgRemoteMessageFmt, lkgConfigMap.UID), Reason: fmt.Sprintf(status.CurFailParseReasonFmt, badConfigMap.UID)}, - expectConfig: lkgKC}, + expectConfig: lkgKC, + event: true, + }, } testBothDirections(f, &states[0], states[1:]) @@ -271,7 +296,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID), Reason: status.CurRemoteOKReason}, - expectConfig: kc1}, + expectConfig: kc1, + event: true, + }, {desc: "cm2", configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ UID: cm2.UID, @@ -280,7 +307,9 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID), Reason: status.CurRemoteOKReason}, - expectConfig: kc2}, + expectConfig: kc2, + event: true, + }, } for i := 0; i < 50; i++ { // change the config 101 times (changes 3 times in the first iteration, 2 times in each subsequent iteration) @@ -296,61 +325,68 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube func testBothDirections(f *framework.Framework, first *configState, states []configState) { // set to first and check that everything got set up properly By(fmt.Sprintf("setting configSource to state %q", first.desc)) - setAndTestKubeletConfigState(f, first) + // we don't always expect an event here, because setting "first" might not represent + // a change from the current configuration + setAndTestKubeletConfigState(f, first, false) // for each state, set to that state, check condition and configz, then reset to first and check again for i := range states { By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc)) - setAndTestKubeletConfigState(f, &states[i]) + // from first -> states[i], states[i].event fully describes whether we should get a config change event + setAndTestKubeletConfigState(f, &states[i], states[i].event) By(fmt.Sprintf("back to %q from %q", first.desc, states[i].desc)) - setAndTestKubeletConfigState(f, first) + // whether first -> states[i] should have produced a config change event partially determines whether states[i] -> first should produce an event + setAndTestKubeletConfigState(f, first, first.event && states[i].event) } } // setAndTestKubeletConfigState tests that after setting the config source, the ConfigOK condition // and (if appropriate) configuration exposed via conifgz are as expected. // The configuration will be converted to the internal type prior to comparison. -func setAndTestKubeletConfigState(f *framework.Framework, state *configState) { +func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) { // set the desired state, retry a few times in case we are competing with other editors Eventually(func() error { if err := setNodeConfigSource(f, state.configSource); err != nil { - return err + return fmt.Errorf("case %s: error setting Node.Spec.ConfigSource", err) } return nil }, time.Minute, time.Second).Should(BeNil()) // check that config source actually got set to what we expect - checkNodeConfigSource(f, state.configSource) + checkNodeConfigSource(f, state.desc, state.configSource) // check condition - checkConfigOKCondition(f, state.expectConfigOK) + checkConfigOKCondition(f, state.desc, state.expectConfigOK) // check expectConfig if state.expectConfig != nil { - checkConfig(f, state.expectConfig) + checkConfig(f, state.desc, state.expectConfig) + } + // check that an event was sent for the config change + if expectEvent { + checkEvent(f, state.desc, state.configSource) } } // make sure the node's config source matches what we expect, after setting it -func checkNodeConfigSource(f *framework.Framework, expect *apiv1.NodeConfigSource) { +func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) { const ( timeout = time.Minute interval = time.Second ) - Eventually(func() error { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { - return err + return fmt.Errorf("checkNodeConfigSource: case %s: %v", desc, err) } actual := node.Spec.ConfigSource if !reflect.DeepEqual(expect, actual) { - return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual)) + return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", desc, expect, actual)) } return nil }, timeout, interval).Should(BeNil()) } // make sure the ConfigOK node condition eventually matches what we expect -func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition) { +func checkConfigOKCondition(f *framework.Framework, desc string, expect *apiv1.NodeCondition) { const ( timeout = time.Minute interval = time.Second @@ -359,14 +395,14 @@ func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition) Eventually(func() error { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { - return err + return fmt.Errorf("checkConfigOKCondition: case %s: %v", desc, err) } actual := getConfigOKCondition(node.Status.Conditions) if actual == nil { - return fmt.Errorf("ConfigOK condition not found on node %q", framework.TestContext.NodeName) + return fmt.Errorf("checkConfigOKCondition: case %s: ConfigOK condition not found on node %q", desc, framework.TestContext.NodeName) } if err := expectConfigOK(expect, actual); err != nil { - return err + return fmt.Errorf("checkConfigOKCondition: case %s: %v", desc, err) } return nil }, timeout, interval).Should(BeNil()) @@ -388,7 +424,7 @@ func expectConfigOK(expect, actual *apiv1.NodeCondition) error { } // make sure config exposed on configz matches what we expect -func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfiguration) { +func checkConfig(f *framework.Framework, desc string, expect *kubeletconfig.KubeletConfiguration) { const ( timeout = time.Minute interval = time.Second @@ -396,11 +432,58 @@ func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfigurat Eventually(func() error { actual, err := getCurrentKubeletConfig() if err != nil { - return err + return fmt.Errorf("checkConfig: case %s: %v", desc, err) } if !reflect.DeepEqual(expect, actual) { - return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual)) + return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", desc, expect, actual)) } return nil }, timeout, interval).Should(BeNil()) } + +// checkEvent makes sure an event was sent marking the Kubelet's restart to use new config, +// and that it mentions the config we expect. +func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) { + const ( + timeout = time.Minute + interval = time.Second + ) + Eventually(func() error { + events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("checkEvent: case %s: %v", desc, err) + } + // find config changed event with most recent timestamp + var recent *apiv1.Event + for i := range events.Items { + if events.Items[i].Reason == controller.KubeletConfigChangedEventReason { + if recent == nil { + recent = &events.Items[i] + continue + } + // for these events, first and last timestamp are always the same + if events.Items[i].FirstTimestamp.Time.After(recent.FirstTimestamp.Time) { + recent = &events.Items[i] + } + } + } + + // we expect at least one config change event + if recent == nil { + return fmt.Errorf("checkEvent: case %s: no events found with reason %s", desc, controller.KubeletConfigChangedEventReason) + } + + // ensure the message is what we expect (including the resource path) + expectMessage := fmt.Sprintf(controller.EventMessageFmt, controller.LocalConfigMessage) + if expect != nil { + if expect.ConfigMapRef != nil { + expectMessage = fmt.Sprintf(controller.EventMessageFmt, fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", expect.ConfigMapRef.Namespace, expect.ConfigMapRef.Name)) + } + } + if expectMessage != recent.Message { + return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", desc, expectMessage, recent.Message) + } + + return nil + }, timeout, interval).Should(BeNil()) +} From 69a2deea9c4b7aafb6644c86d4fe22f2990a108d Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 20 Dec 2017 15:37:22 -0600 Subject: [PATCH 456/794] iscsi: set node.startup to manual If the default iSCSI node.startup is set to automatic, if there is a node failure, any pods on that node will get rescheduled to another node. If the failed node is later brought back up it will then try to log back in to any iSCSI sessions it had prior to the failure, which may no longer exist or may be now in-use by the other nodes. It appears most platforms keep the open-iscsi default of node.startup-automatic. But in case this system-wide setting has been changed, and just to be explicit, this sets node.startup values for kubernetes controlled volumes to manual. Closes issue #21305 --- pkg/volume/iscsi/iscsi_util.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index a72dace7735..6c6d03c0034 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -279,6 +279,12 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err) continue } + // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot + out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual") + if err != nil { + // don't fail if we can't set startup mode, but log warning so there is a clue + glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) + } if exist := waitForPathToExist(&devicePath, 10, iscsiTransport); !exist { glog.Errorf("Could not attach disk: Timeout after 10s") // update last error From 3fd7e3a587bfb5e0d77676454fc498f5388ab04c Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 20 Dec 2017 18:50:03 +0000 Subject: [PATCH 457/794] Cleanup api service before namespace deletion. --- test/e2e/apimachinery/aggregator.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index ee72e3158f8..7d52b759548 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -53,18 +53,26 @@ var _ = SIGDescribe("Aggregator", func() { var ns string var c clientset.Interface var aggrclient *aggregatorclient.Clientset + + // BeforeEachs run in LIFO order, AfterEachs run in FIFO order. + // We want cleanTest to happen before the namespace cleanup AfterEach + // inserted by NewDefaultFramework, so we put this AfterEach in front + // of NewDefaultFramework. + AfterEach(func() { + cleanTest(c, aggrclient, ns) + }) + f := framework.NewDefaultFramework("aggregator") + // We want namespace initialization BeforeEach inserted by + // NewDefaultFramework to happen before this, so we put this BeforeEach + // after NewDefaultFramework. BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name aggrclient = f.AggregatorClient }) - AfterEach(func() { - cleanTest(c, aggrclient, ns) - }) - It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() { // Make sure the relevant provider supports Agggregator framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery()) From 46fb8b00a3dcdc07747dbe6b5f4bdf8e2593c06d Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Wed, 20 Dec 2017 14:41:52 -0800 Subject: [PATCH 458/794] Auto generated BUILD file --- test/integration/scheduler_perf/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index f18bdb8cdf7..435f10919aa 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -42,6 +42,7 @@ go_test( library = ":go_default_library", tags = ["integration"], deps = [ + "//pkg/kubelet/apis:go_default_library", "//plugin/pkg/scheduler:go_default_library", "//test/integration/framework:go_default_library", "//test/utils:go_default_library", From 9247ff7e21c4b08e64d364daf2aa069093e1db79 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Wed, 20 Dec 2017 17:52:06 -0500 Subject: [PATCH 459/794] Update nfsprovisioner image to v1.0.9 to fix annotation race with pv controller --- test/e2e/storage/volume_provisioning.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index e87cc902201..be7efd9cd45 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -914,7 +914,7 @@ func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod { Containers: []v1.Container{ { Name: "nfs-provisioner", - Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.6", + Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9", SecurityContext: &v1.SecurityContext{ Capabilities: &v1.Capabilities{ Add: []v1.Capability{"DAC_READ_SEARCH"}, From da6cc5d7a514febded10d4d7483779d02cffcc65 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Thu, 21 Dec 2017 00:25:46 +0000 Subject: [PATCH 460/794] Print/return the text from a number of errors that were silent before. --- pkg/volume/azure_dd/attacher.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 11377b0b0ee..839c96048ce 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -60,14 +60,14 @@ var getLunMutex = keymutex.NewKeyMutex() func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { - glog.Warningf("failed to get azure disk spec") + glog.Warningf("failed to get azure disk spec (%v)", err) return "", err } instanceid, err := a.cloud.InstanceID(nodeName) if err != nil { - glog.Warningf("failed to get azure instance id") - return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName) + glog.Warningf("failed to get azure instance id (%v)", err) + return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } if ind := strings.LastIndex(instanceid, "/"); ind >= 0 { instanceid = instanceid[(ind + 1):] @@ -96,8 +96,8 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { - glog.Warningf("no LUN available for instance %q", nodeName) - return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid) + glog.Warningf("no LUN available for instance %q (%v)", nodeName, err) + return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err) } glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) @@ -156,7 +156,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, var err error lun, err := strconv.Atoi(devicePath) if err != nil { - return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath) + return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s (%v)", devicePath, err) } volumeSource, err := getVolumeSource(spec) @@ -260,7 +260,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro instanceid, err := d.cloud.InstanceID(nodeName) if err != nil { - glog.Warningf("no instance id for node %q, skip detaching", nodeName) + glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) return nil } if ind := strings.LastIndex(instanceid, "/"); ind >= 0 { From c0eee2950f018bcce1ff5dc5eb46d23f74767ce7 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 20 Dec 2017 23:28:07 +0000 Subject: [PATCH 461/794] Fix a bug in validating node existence. --- pkg/cloudprovider/providers/azure/BUILD | 2 + .../providers/azure/azure_wrap.go | 5 +- .../providers/azure/azure_wrap_test.go | 53 +++++++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_wrap_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 322024f2c61..eea1107d602 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -63,6 +63,7 @@ go_test( "azure_loadbalancer_test.go", "azure_test.go", "azure_util_test.go", + "azure_wrap_test.go", ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", library = ":go_default_library", @@ -71,6 +72,7 @@ go_test( "//pkg/kubelet/apis:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index f1aa0def597..ec045fee007 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -35,7 +35,10 @@ func checkResourceExistsFromError(err error) (bool, error) { return true, nil } v, ok := err.(autorest.DetailedError) - if ok && v.StatusCode == http.StatusNotFound { + if !ok { + return false, err + } + if v.StatusCode == http.StatusNotFound { return false, nil } return false, v diff --git a/pkg/cloudprovider/providers/azure/azure_wrap_test.go b/pkg/cloudprovider/providers/azure/azure_wrap_test.go new file mode 100644 index 00000000000..380194ba9c3 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_wrap_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/Azure/go-autorest/autorest" +) + +func TestExtractNotFound(t *testing.T) { + notFound := autorest.DetailedError{StatusCode: http.StatusNotFound} + otherHTTP := autorest.DetailedError{StatusCode: http.StatusForbidden} + otherErr := fmt.Errorf("other error") + + tests := []struct { + err error + expectedErr error + exists bool + }{ + {nil, nil, true}, + {otherErr, otherErr, false}, + {notFound, nil, false}, + {otherHTTP, otherHTTP, false}, + } + + for _, test := range tests { + exists, err := checkResourceExistsFromError(test.err) + if test.exists != exists { + t.Errorf("expected: %v, saw: %v", test.exists, exists) + } + if !reflect.DeepEqual(test.expectedErr, err) { + t.Errorf("expected err: %v, saw: %v", test.expectedErr, err) + } + } +} From 3432741b8093d14910b70cb8bd7fe92b33290636 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Thu, 21 Dec 2017 09:13:34 +0800 Subject: [PATCH 462/794] [kubelet]fix unstandardized function name, rename new() to newSourceFile() --- pkg/kubelet/config/file.go | 4 ++-- pkg/kubelet/config/file_linux_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index a706abdb36e..aeabec55987 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -48,12 +48,12 @@ func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, u // "golang.org/x/exp/inotify" requires a path without trailing "/" path = strings.TrimRight(path, string(os.PathSeparator)) - config := new(path, nodeName, period, updates) + config := newSourceFile(path, nodeName, period, updates) glog.V(1).Infof("Watching path %q", path) go wait.Forever(config.run, period) } -func new(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile { +func newSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile { send := func(objs []interface{}) { var pods []*v1.Pod for _, o := range objs { diff --git a/pkg/kubelet/config/file_linux_test.go b/pkg/kubelet/config/file_linux_test.go index 9ca23f115a9..64ca4943a55 100644 --- a/pkg/kubelet/config/file_linux_test.go +++ b/pkg/kubelet/config/file_linux_test.go @@ -46,7 +46,7 @@ import ( func TestExtractFromNonExistentFile(t *testing.T) { ch := make(chan interface{}, 1) - c := new("/some/fake/file", "localhost", time.Millisecond, ch) + c := newSourceFile("/some/fake/file", "localhost", time.Millisecond, ch) err := c.watch() if err == nil { t.Errorf("Expected error") @@ -137,7 +137,7 @@ func TestExtractFromBadDataFile(t *testing.T) { } ch := make(chan interface{}, 1) - c := new(fileName, "localhost", time.Millisecond, ch) + c := newSourceFile(fileName, "localhost", time.Millisecond, ch) err = c.resetStoreFromPath() if err == nil { t.Fatalf("expected error, got nil") @@ -153,7 +153,7 @@ func TestExtractFromEmptyDir(t *testing.T) { defer os.RemoveAll(dirName) ch := make(chan interface{}, 1) - c := new(dirName, "localhost", time.Millisecond, ch) + c := newSourceFile(dirName, "localhost", time.Millisecond, ch) err = c.resetStoreFromPath() if err != nil { t.Fatalf("unexpected error: %v", err) From c65225ee19f213cdf4ecf9e4c70ffec53c77fecb Mon Sep 17 00:00:00 2001 From: Yongkun Anfernee Gui Date: Thu, 9 Nov 2017 15:53:52 -0800 Subject: [PATCH 463/794] Merge 3 resource allocation priority functions --- .../pkg/scheduler/algorithm/priorities/BUILD | 1 + .../balanced_resource_allocation.go | 95 ++++--------------- .../algorithm/priorities/image_locality.go | 7 ++ .../algorithm/priorities/least_requested.go | 70 ++++---------- .../algorithm/priorities/most_requested.go | 67 +++---------- .../priorities/resource_allocation.go | 82 ++++++++++++++++ 6 files changed, 143 insertions(+), 179 deletions(-) create mode 100644 plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/BUILD b/plugin/pkg/scheduler/algorithm/priorities/BUILD index a7a10dc50da..b6e8d618cd2 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/BUILD +++ b/plugin/pkg/scheduler/algorithm/priorities/BUILD @@ -19,6 +19,7 @@ go_library( "node_label.go", "node_prefer_avoid_pods.go", "reduce.go", + "resource_allocation.go", "resource_limits.go", "selector_spreading.go", "taint_toleration.go", diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index 791e28ee1a0..c57bcbfc68c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -17,76 +17,40 @@ limitations under the License. package priorities import ( - "fmt" "math" - "k8s.io/api/core/v1" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" ) -// This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. -const ( - mb int64 = 1024 * 1024 - minImgSize int64 = 23 * mb - maxImgSize int64 = 1000 * mb +var ( + balanceResourcePriority = &ResourceAllocationPriority{"BalanceResourceAllocation", balancedResourceScorer} + + // BalancedResourceAllocationMap favors nodes with balanced resource usage rate. + // BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together + // with LeastRequestedPriority. It calculates the difference between the cpu and memory fraction + // of capacity, and prioritizes the host based on how close the two metrics are to each other. + // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: + // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced + // Resource Utilization" + BalancedResourceAllocationMap = balanceResourcePriority.PriorityMap ) -// Also used in most/least_requested nad metadata. -// TODO: despaghettify it -func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { - result := &schedulercache.Resource{} - for i := range pod.Spec.Containers { - container := &pod.Spec.Containers[i] - cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) - result.MilliCPU += cpu - result.Memory += memory - } - return result -} +func balancedResourceScorer(requested, allocable *schedulercache.Resource) int64 { + cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU) + memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory) -func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - node := nodeInfo.Node() - if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") - } - - allocatableResources := nodeInfo.AllocatableResource() - totalResources := *podRequests - totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU - totalResources.Memory += nodeInfo.NonZeroRequest().Memory - - cpuFraction := fractionOfCapacity(totalResources.MilliCPU, allocatableResources.MilliCPU) - memoryFraction := fractionOfCapacity(totalResources.Memory, allocatableResources.Memory) - score := int(0) if cpuFraction >= 1 || memoryFraction >= 1 { // if requested >= capacity, the corresponding host should never be preferred. - score = 0 - } else { - // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 - // respectively. Multilying the absolute value of the difference by 10 scales the value to - // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from - // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. - diff := math.Abs(cpuFraction - memoryFraction) - score = int((1 - diff) * float64(schedulerapi.MaxPriority)) - } - if glog.V(10) { - glog.Infof( - "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", - pod.Name, node.Name, - allocatableResources.MilliCPU, allocatableResources.Memory, - totalResources.MilliCPU, totalResources.Memory, - score, - ) + return 0 } - return schedulerapi.HostPriority{ - Host: node.Name, - Score: score, - }, nil + // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 + // respectively. Multilying the absolute value of the difference by 10 scales the value to + // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from + // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. + diff := math.Abs(cpuFraction - memoryFraction) + return int64((1 - diff) * float64(schedulerapi.MaxPriority)) } func fractionOfCapacity(requested, capacity int64) float64 { @@ -95,20 +59,3 @@ func fractionOfCapacity(requested, capacity int64) float64 { } return float64(requested) / float64(capacity) } - -// BalancedResourceAllocationMap favors nodes with balanced resource usage rate. -// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority. -// It calculates the difference between the cpu and memory fraction of capacity, and prioritizes the host based on how -// close the two metrics are to each other. -// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: -// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" -func BalancedResourceAllocationMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - var nonZeroRequest *schedulercache.Resource - if priorityMeta, ok := meta.(*priorityMetadata); ok { - nonZeroRequest = priorityMeta.nonZeroRequest - } else { - // We couldn't parse metadatat - fallback to computing it. - nonZeroRequest = getNonZeroRequests(pod) - } - return calculateBalancedResourceAllocation(pod, nonZeroRequest, nodeInfo) -} diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go index 1629dee4fdf..86b3bb6c54f 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go @@ -24,6 +24,13 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) +// This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. +const ( + mb int64 = 1024 * 1024 + minImgSize int64 = 23 * mb + maxImgSize int64 = 1000 * mb +) + // ImageLocalityPriorityMap is a priority function that favors nodes that already have requested pod container's images. // It will detect whether the requested images are present on a node, and then calculate a score ranging from 0 to 10 // based on the total size of those images. diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go index 73d5db676e9..39d3208f6fa 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go @@ -17,73 +17,37 @@ limitations under the License. package priorities import ( - "fmt" - - "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" ) -// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources. -// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes -// based on the minimum of the average of the fraction of requested to capacity. -// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2 -func LeastRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - var nonZeroRequest *schedulercache.Resource - if priorityMeta, ok := meta.(*priorityMetadata); ok { - nonZeroRequest = priorityMeta.nonZeroRequest - } else { - // We couldn't parse metadata - fallback to computing it. - nonZeroRequest = getNonZeroRequests(pod) - } - return calculateUnusedPriority(pod, nonZeroRequest, nodeInfo) +var ( + leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer} + + // LeastRequestedPriority is a priority function that favors nodes with fewer requested resources. + // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and + // prioritizes based on the minimum of the average of the fraction of requested to capacity. + // + // Details: + // cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity)/2 + LeastRequestedPriorityMap = leastResourcePriority.PriorityMap +) + +func leastResourceScorer(requested, allocable *schedulercache.Resource) int64 { + return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) + + leastRequestedScore(requested.Memory, allocable.Memory)) / 2 } // The unused capacity is calculated on a scale of 0-10 // 0 being the lowest priority and 10 being the highest. // The more unused resources the higher the score is. -func calculateUnusedScore(requested int64, capacity int64, node string) int64 { +func leastRequestedScore(requested, capacity int64) int64 { if capacity == 0 { return 0 } if requested > capacity { - glog.V(10).Infof("Combined requested resources %d from existing pods exceeds capacity %d on node %s", - requested, capacity, node) return 0 } + return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity } - -// Calculates host priority based on the amount of unused resources. -// 'node' has information about the resources on the node. -// 'pods' is a list of pods currently scheduled on the node. -func calculateUnusedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - node := nodeInfo.Node() - if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") - } - - allocatableResources := nodeInfo.AllocatableResource() - totalResources := *podRequests - totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU - totalResources.Memory += nodeInfo.NonZeroRequest().Memory - - cpuScore := calculateUnusedScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name) - memoryScore := calculateUnusedScore(totalResources.Memory, allocatableResources.Memory, node.Name) - if glog.V(10) { - glog.Infof( - "%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", - pod.Name, node.Name, - allocatableResources.MilliCPU, allocatableResources.Memory, - totalResources.MilliCPU, totalResources.Memory, - cpuScore, memoryScore, - ) - } - - return schedulerapi.HostPriority{ - Host: node.Name, - Score: int((cpuScore + memoryScore) / 2), - }, nil -} diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go index 9d1697db93e..9cba1a32ee5 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go @@ -17,28 +17,23 @@ limitations under the License. package priorities import ( - "fmt" - - "k8s.io/api/core/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" ) -// MostRequestedPriority is a priority function that favors nodes with most requested resources. -// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes -// based on the maximum of the average of the fraction of requested to capacity. -// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2 -func MostRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - var nonZeroRequest *schedulercache.Resource - if priorityMeta, ok := meta.(*priorityMetadata); ok { - nonZeroRequest = priorityMeta.nonZeroRequest - } else { - // We couldn't parse metadatat - fallback to computing it. - nonZeroRequest = getNonZeroRequests(pod) - } - return calculateUsedPriority(pod, nonZeroRequest, nodeInfo) +var ( + mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer} + + // MostRequestedPriority is a priority function that favors nodes with most requested resources. + // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes + // based on the maximum of the average of the fraction of requested to capacity. + // Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2 + MostRequestedPriorityMap = mostResourcePriority.PriorityMap +) + +func mostResourceScorer(requested, allocable *schedulercache.Resource) int64 { + return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) + + mostRequestedScore(requested.Memory, allocable.Memory)) / 2 } // The used capacity is calculated on a scale of 0-10 @@ -48,45 +43,13 @@ func MostRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler // (10 - calculateUnusedScore). The main difference is in rounding. It was added to // keep the final formula clean and not to modify the widely used (by users // in their default scheduling policies) calculateUSedScore. -func calculateUsedScore(requested int64, capacity int64, node string) int64 { +func mostRequestedScore(requested, capacity int64) int64 { if capacity == 0 { return 0 } if requested > capacity { - glog.V(10).Infof("Combined requested resources %d from existing pods exceeds capacity %d on node %s", - requested, capacity, node) return 0 } + return (requested * schedulerapi.MaxPriority) / capacity } - -// Calculate the resource used on a node. 'node' has information about the resources on the node. -// 'pods' is a list of pods currently scheduled on the node. -func calculateUsedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { - node := nodeInfo.Node() - if node == nil { - return schedulerapi.HostPriority{}, fmt.Errorf("node not found") - } - - allocatableResources := nodeInfo.AllocatableResource() - totalResources := *podRequests - totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU - totalResources.Memory += nodeInfo.NonZeroRequest().Memory - - cpuScore := calculateUsedScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name) - memoryScore := calculateUsedScore(totalResources.Memory, allocatableResources.Memory, node.Name) - if glog.V(10) { - glog.Infof( - "%v -> %v: Most Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", - pod.Name, node.Name, - allocatableResources.MilliCPU, allocatableResources.Memory, - totalResources.MilliCPU, totalResources.Memory, - cpuScore, memoryScore, - ) - } - - return schedulerapi.HostPriority{ - Host: node.Name, - Score: int((cpuScore + memoryScore) / 2), - }, nil -} diff --git a/plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go b/plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go new file mode 100644 index 00000000000..9723eff142e --- /dev/null +++ b/plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package priorities + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +type ResourceAllocationPriority struct { + Name string + scorer func(requested, allocable *schedulercache.Resource) int64 +} + +func (r *ResourceAllocationPriority) PriorityMap( + pod *v1.Pod, + meta interface{}, + nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + node := nodeInfo.Node() + if node == nil { + return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + } + allocatable := nodeInfo.AllocatableResource() + + var requested schedulercache.Resource + if priorityMeta, ok := meta.(*priorityMetadata); ok { + requested = *priorityMeta.nonZeroRequest + } else { + // We couldn't parse metadatat - fallback to computing it. + requested = *getNonZeroRequests(pod) + } + + requested.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU + requested.Memory += nodeInfo.NonZeroRequest().Memory + + score := r.scorer(&requested, &allocatable) + + if glog.V(10) { + glog.Infof( + "%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", + pod.Name, node.Name, r.Name, + allocatable.MilliCPU, allocatable.Memory, + requested.MilliCPU+allocatable.MilliCPU, requested.Memory+allocatable.Memory, + score, + ) + } + + return schedulerapi.HostPriority{ + Host: node.Name, + Score: int(score), + }, nil +} + +func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { + result := &schedulercache.Resource{} + for i := range pod.Spec.Containers { + container := &pod.Spec.Containers[i] + cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) + result.MilliCPU += cpu + result.Memory += memory + } + return result +} From c893ce8c95941c438fc25c4d5fd829417b81db8d Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Mon, 18 Dec 2017 12:04:04 -0800 Subject: [PATCH 464/794] Get automatically created subnetwork if none is specified --- pkg/cloudprovider/providers/gce/BUILD | 1 + pkg/cloudprovider/providers/gce/gce.go | 65 +++++++++++--- pkg/cloudprovider/providers/gce/gce_util.go | 63 +++++++++++++ .../providers/gce/gce_util_test.go | 90 +++++++++++++++++++ 4 files changed, 205 insertions(+), 14 deletions(-) create mode 100644 pkg/cloudprovider/providers/gce/gce_util_test.go diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 9636a85c9e3..69509a2243a 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -95,6 +95,7 @@ go_test( "gce_healthchecks_test.go", "gce_loadbalancer_external_test.go", "gce_test.go", + "gce_util_test.go", "metrics_test.go", ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 95d4f33afb5..46e5b614daa 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "fmt" "io" "net/http" @@ -435,24 +436,26 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { } else if config.SubnetworkName != "" { subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName) } else { - // Attempt to determine the subnetwork in case it's an automatic network. - // Legacy networks will not have a subnetwork, so subnetworkURL should remain empty. + // Determine the type of network and attempt to discover the correct subnet for AUTO mode. + // Gracefully fail because kubelet calls CreateGCECloud without any config, and minions + // lack the proper credentials for API calls. if networkName := lastComponent(networkURL); networkName != "" { - if n, err := getNetwork(service, netProjID, networkName); err != nil { - // Gracefully fail because kubelet calls CreateGCECloud without any config, and API calls will fail coming from minions. - glog.Warningf("Could not retrieve network %q in attempt to determine if legacy network or see list of subnets, err %v", networkURL, err) + var n *compute.Network + if n, err = getNetwork(service, netProjID, networkName); err != nil { + glog.Warningf("Could not retrieve network %q; err: %v", networkName, err) } else { - // Legacy networks have a non-empty IPv4Range - if len(n.IPv4Range) > 0 { - glog.Infof("Determined network %q is type legacy", networkURL) + switch typeOfNetwork(n) { + case netTypeLegacy: + glog.Infof("Network %q is type legacy - no subnetwork", networkName) isLegacyNetwork = true - } else { - // Try to find the subnet in the list of subnets - subnetURL = findSubnetForRegion(n.Subnetworks, config.Region) - if len(subnetURL) > 0 { - glog.Infof("Using subnet %q within network %q & region %q because none was specified.", subnetURL, n.Name, config.Region) + case netTypeCustom: + glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) + case netTypeAuto: + subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region) + if err != nil { + glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) } else { - glog.Warningf("Could not find any subnet in region %q within list %v.", config.Region, n.Subnetworks) + glog.Infof("Auto selecting subnetwork %q", subnetURL) } } } @@ -499,6 +502,30 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { return gce, nil } +// determineSubnetURL queries for all subnetworks in a region for a given network and returns +// the URL of the subnetwork which exists in the auto-subnet range. +func determineSubnetURL(service *compute.Service, networkProjectID, networkName, region string) (string, error) { + subnets, err := listSubnetworksOfNetwork(service, networkProjectID, networkName, region) + if err != nil { + return "", err + } + + autoSubnets, err := subnetsInCIDR(subnets, autoSubnetIPRange) + if err != nil { + return "", err + } + + if len(autoSubnets) == 0 { + return "", fmt.Errorf("no subnet exists in auto CIDR") + } + + if len(autoSubnets) > 1 { + return "", fmt.Errorf("multiple subnetworks in the same region exist in auto CIDR") + } + + return autoSubnets[0].SelfLink, nil +} + func tryConvertToProjectNames(configProject, configNetworkProject string, service *compute.Service) (projID, netProjID string) { projID = configProject if isProjectNumber(projID) { @@ -740,6 +767,16 @@ func getNetwork(svc *compute.Service, networkProjectID, networkID string) (*comp return svc.Networks.Get(networkProjectID, networkID).Do() } +// listSubnetworksOfNetwork returns a list of subnetworks for a particular region of a network. +func listSubnetworksOfNetwork(svc *compute.Service, networkProjectID, networkID, region string) ([]*compute.Subnetwork, error) { + var subnets []*compute.Subnetwork + err := svc.Subnetworks.List(networkProjectID, region).Filter(fmt.Sprintf("network eq .*/%v$", networkID)).Pages(context.Background(), func(res *compute.SubnetworkList) error { + subnets = append(subnets, res.Items...) + return nil + }) + return subnets, err +} + // getProjectID returns the project's string ID given a project number or string func getProjectID(svc *compute.Service, projectNumberOrID string) (string, error) { proj, err := svc.Projects.Get(projectNumberOrID).Do() diff --git a/pkg/cloudprovider/providers/gce/gce_util.go b/pkg/cloudprovider/providers/gce/gce_util.go index 484a67e0b37..fb70d1dd053 100644 --- a/pkg/cloudprovider/providers/gce/gce_util.go +++ b/pkg/cloudprovider/providers/gce/gce_util.go @@ -19,6 +19,7 @@ package gce import ( "errors" "fmt" + "net" "net/http" "regexp" "strings" @@ -40,6 +41,13 @@ type gceInstance struct { Type string } +var ( + autoSubnetIPRange = &net.IPNet{ + IP: net.ParseIP("10.128.0.0"), + Mask: net.CIDRMask(9, 32), + } +) + var providerIdRE = regexp.MustCompile(`^` + ProviderName + `://([^/]+)/([^/]+)/([^/]+)$`) func getProjectAndZone() (string, string, error) { @@ -211,3 +219,58 @@ func handleAlphaNetworkTierGetError(err error) (string, error) { // Can't get the network tier, just return an error. return "", err } + +// containsCIDR returns true if outer contains inner. +func containsCIDR(outer, inner *net.IPNet) bool { + return outer.Contains(firstIPInRange(inner)) && outer.Contains(lastIPInRange(inner)) +} + +// firstIPInRange returns the first IP in a given IP range. +func firstIPInRange(ipNet *net.IPNet) net.IP { + return ipNet.IP.Mask(ipNet.Mask) +} + +// lastIPInRange returns the last IP in a given IP range. +func lastIPInRange(cidr *net.IPNet) net.IP { + ip := append([]byte{}, cidr.IP...) + for i, b := range cidr.Mask { + ip[i] |= ^b + } + return ip +} + +// subnetsInCIDR takes a list of subnets for a single region and +// returns subnets which exists in the specified CIDR range. +func subnetsInCIDR(subnets []*compute.Subnetwork, cidr *net.IPNet) ([]*compute.Subnetwork, error) { + var res []*compute.Subnetwork + for _, subnet := range subnets { + _, subnetRange, err := net.ParseCIDR(subnet.IpCidrRange) + if err != nil { + return nil, fmt.Errorf("unable to parse CIDR %q for subnet %q: %v", subnet.IpCidrRange, subnet.Name, err) + } + if containsCIDR(cidr, subnetRange) { + res = append(res, subnet) + } + } + return res, nil +} + +type netType string + +const ( + netTypeLegacy netType = "LEGACY" + netTypeAuto netType = "AUTO" + netTypeCustom netType = "CUSTOM" +) + +func typeOfNetwork(network *compute.Network) netType { + if network.IPv4Range != "" { + return netTypeLegacy + } + + if network.AutoCreateSubnetworks { + return netTypeAuto + } + + return netTypeCustom +} diff --git a/pkg/cloudprovider/providers/gce/gce_util_test.go b/pkg/cloudprovider/providers/gce/gce_util_test.go new file mode 100644 index 00000000000..f0bd4379b00 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/gce_util_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "net" + "reflect" + "testing" + + compute "google.golang.org/api/compute/v1" +) + +func TestLastIPInRange(t *testing.T) { + for _, tc := range []struct { + cidr string + want string + }{ + {"10.1.2.3/32", "10.1.2.3"}, + {"10.1.2.0/31", "10.1.2.1"}, + {"10.1.0.0/30", "10.1.0.3"}, + {"10.0.0.0/29", "10.0.0.7"}, + {"::0/128", "::"}, + {"::0/127", "::1"}, + {"::0/126", "::3"}, + {"::0/120", "::ff"}, + } { + _, c, err := net.ParseCIDR(tc.cidr) + if err != nil { + t.Errorf("net.ParseCIDR(%v) = _, %v, %v; want nil", tc.cidr, c, err) + continue + } + + if lastIP := lastIPInRange(c); lastIP.String() != tc.want { + t.Errorf("LastIPInRange(%v) = %v; want %v", tc.cidr, lastIP, tc.want) + } + } +} + +func TestSubnetsInCIDR(t *testing.T) { + subnets := []*compute.Subnetwork{ + { + Name: "A", + IpCidrRange: "10.0.0.0/20", + }, + { + Name: "B", + IpCidrRange: "10.0.16.0/20", + }, + { + Name: "C", + IpCidrRange: "10.132.0.0/20", + }, + { + Name: "D", + IpCidrRange: "10.0.32.0/20", + }, + { + Name: "E", + IpCidrRange: "10.134.0.0/20", + }, + } + expectedNames := []string{"C", "E"} + + gotSubs, err := subnetsInCIDR(subnets, autoSubnetIPRange) + if err != nil { + t.Errorf("autoSubnetInList() = _, %v", err) + } + + var gotNames []string + for _, v := range gotSubs { + gotNames = append(gotNames, v.Name) + } + if !reflect.DeepEqual(gotNames, expectedNames) { + t.Errorf("autoSubnetInList() = %v, expected: %v", gotNames, expectedNames) + } +} From 9f97d61de4e9a188f8ef2c58d6e1786c6793225e Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Wed, 20 Dec 2017 23:48:15 -0600 Subject: [PATCH 465/794] Fixed space/tab indentation Some lines had spaces for indentation instead of tabs. --- pkg/volume/iscsi/iscsi_util.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 6c6d03c0034..12e8430d85e 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -279,8 +279,8 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err) continue } - // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot - out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual") + // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot + out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual") if err != nil { // don't fail if we can't set startup mode, but log warning so there is a clue glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) From a7c7da76d5f7d497391d5fa373c88086ee21432b Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 21 Dec 2017 01:07:07 -0500 Subject: [PATCH 466/794] Ensure dependents are added to virtual node before attemptToDelete --- pkg/controller/garbagecollector/graph_builder.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/controller/garbagecollector/graph_builder.go b/pkg/controller/garbagecollector/graph_builder.go index 03a96503a06..355d3dea5fa 100644 --- a/pkg/controller/garbagecollector/graph_builder.go +++ b/pkg/controller/garbagecollector/graph_builder.go @@ -382,10 +382,7 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer ownerNode, ok := gb.uidToNode.Read(owner.UID) if !ok { // Create a "virtual" node in the graph for the owner if it doesn't - // exist in the graph yet. Then enqueue the virtual node into the - // attemptToDelete. The garbage processor will enqueue a virtual delete - // event to delete it from the graph if API server confirms this - // owner doesn't exist. + // exist in the graph yet. ownerNode = &node{ identity: objectReference{ OwnerReference: owner, @@ -395,9 +392,15 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer } glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity) gb.uidToNode.Write(ownerNode) - gb.attemptToDelete.Add(ownerNode) } ownerNode.addDependent(n) + if !ok { + // Enqueue the virtual node into attemptToDelete. + // The garbage processor will enqueue a virtual delete + // event to delete it from the graph if API server confirms this + // owner doesn't exist. + gb.attemptToDelete.Add(ownerNode) + } } } From 9d34fee57cc0b839956e6de5722280ba2fa36a1d Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Tue, 12 Dec 2017 16:41:19 +0100 Subject: [PATCH 467/794] hack/local-up-cluster.sh: improve messages when script was running with ENABLE_DAEMON=true --- hack/local-up-cluster.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 9fadd23db82..95371b3d704 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -833,8 +833,12 @@ function create_storage_class { function print_success { if [[ "${START_MODE}" != "kubeletonly" ]]; then + if [[ "${ENABLE_DAEMON}" = false ]]; then + echo "Local Kubernetes cluster is running. Press Ctrl-C to shut it down." + else + echo "Local Kubernetes cluster is running." + fi cat < Date: Thu, 21 Dec 2017 20:11:36 +0800 Subject: [PATCH 468/794] RBD Plugin: Pass monitors addresses in a comma-separated list instead of trying one by one. In production, monitors may crash (or have a network problem), if we try monitors one by one, rbd command will hang a long time (e.g. `rbd map -m ` on linux 4.4 timed out in 6 minutes) when trying a unconnectable monitor. This is unacceptable. Actually, we can simply pass a comma-separed list monitor addresses to `rbd` command utility. Kernel rbd/libceph modules will pick monitor randomly and try one by one, `rbd` command utility succeed soon if there is a good one in monitors list. --- pkg/volume/rbd/rbd_util.go | 378 ++++++++++++++++--------------------- 1 file changed, 163 insertions(+), 215 deletions(-) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 15b4b52ee66..94c5f4efa9f 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -25,7 +25,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "math/rand" "os" "os/exec" "path" @@ -133,6 +132,17 @@ func rbdErrors(runErr, resultErr error) error { return resultErr } +// 'rbd' utility simply pass '-m ' parameter to kernel rbd/libceph +// modules, which takes a comma-seprated list of one or more monitor addresses +// (e.g. ip1[:port1][,ip2[:port2]...]) in its first version in linux (see +// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239) +// Also, libceph choose monitor randomly, so we can simply pass all addresses +// without randomization (see +// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132). +func (util *RBDUtil) kernelRBDMonitorsOpt(mons []string) string { + return strings.Join(mons, ",") +} + // rbdLock acquires a lock on image if lock is true, otherwise releases if a // lock is found on image. func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { @@ -156,88 +166,79 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { // construct lock id using host name and a magic prefix lock_id := kubeLockMagic + node.GetHostname("") - l := len(b.Mon) - // avoid mount storm, pick a host randomly - start := rand.Int() % l - // iterate all hosts until mount succeeds. - for i := start; i < start+l; i++ { - mon := b.Mon[i%l] - // cmd "rbd lock list" serves two purposes: - // for fencing, check if lock already held for this host - // this edge case happens if host crashes in the middle of acquiring lock and mounting rbd - // for defencing, get the locker name, something like "client.1234" - args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - output = string(cmd) - glog.Infof("lock list output %q", output) - if err != nil { - continue + mon := util.kernelRBDMonitorsOpt(b.Mon) + + // cmd "rbd lock list" serves two purposes: + // for fencing, check if lock already held for this host + // this edge case happens if host crashes in the middle of acquiring lock and mounting rbd + // for defencing, get the locker name, something like "client.1234" + args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon} + args = append(args, secret_opt...) + cmd, err = b.exec.Run("rbd", args...) + output = string(cmd) + glog.Infof("lock list output %q", output) + if err != nil { + return err + } + + if lock { + // check if lock is already held for this host by matching lock_id and rbd lock id + if strings.Contains(output, lock_id) { + // this host already holds the lock, exit + glog.V(1).Infof("rbd: lock already held for %s", lock_id) + return nil + } + // clean up orphaned lock if no watcher on the image + used, rbdOutput, statusErr := util.rbdStatus(&b) + if statusErr != nil { + return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput) + } + if used { + // this image is already used by a node other than this node + return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output) } - if lock { - // check if lock is already held for this host by matching lock_id and rbd lock id - if strings.Contains(output, lock_id) { - // this host already holds the lock, exit - glog.V(1).Infof("rbd: lock already held for %s", lock_id) - return nil - } - // clean up orphaned lock if no watcher on the image - used, rbdOutput, statusErr := util.rbdStatus(&b) - if statusErr != nil { - return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput) - } - if used { - // this image is already used by a node other than this node - return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output) - } - - // best effort clean up orphaned locked if not used - locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1) - for _, v := range locks { - if len(v) > 0 { - lockInfo := strings.Split(v[0], " ") - if len(lockInfo) > 2 { - args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd)) - } + // best effort clean up orphaned locked if not used + locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1) + for _, v := range locks { + if len(v) > 0 { + lockInfo := strings.Split(v[0], " ") + if len(lockInfo) > 2 { + args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon} + args = append(args, secret_opt...) + cmd, err = b.exec.Run("rbd", args...) + glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd)) } } + } - // hold a lock: rbd lock add - args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon} + // hold a lock: rbd lock add + args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon} + args = append(args, secret_opt...) + cmd, err = b.exec.Run("rbd", args...) + if err == nil { + glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + } + } else { + // defencing, find locker name + ind := strings.LastIndex(output, lock_id) - 1 + for i := ind; i >= 0; i-- { + if output[i] == '\n' { + locker = output[(i + 1):ind] + break + } + } + // remove a lock if found: rbd lock remove + if len(locker) > 0 { + args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) if err == nil { - glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) } - } else { - // defencing, find locker name - ind := strings.LastIndex(output, lock_id) - 1 - for i := ind; i >= 0; i-- { - if output[i] == '\n' { - locker = output[(i + 1):ind] - break - } - } - // remove a lock if found: rbd lock remove - if len(locker) > 0 { - args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - if err == nil { - glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) - } - } - } - - if err == nil { - // break if operation succeeds - break } } + return err } @@ -274,27 +275,17 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { return "", fmt.Errorf("rbd image %s/%s is still being used. rbd output: %s", b.Pool, b.Image, rbdOutput) } - // rbd map - l := len(b.Mon) - // avoid mount storm, pick a host randomly - start := rand.Int() % l - // iterate all hosts until mount succeeds. - for i := start; i < start+l; i++ { - mon := b.Mon[i%l] - glog.V(1).Infof("rbd: map mon %s", mon) - if b.Secret != "" { - output, err = b.exec.Run("rbd", - "map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key="+b.Secret) - } else { - output, err = b.exec.Run("rbd", - "map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring) - } - if err == nil { - break - } - glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) + mon := util.kernelRBDMonitorsOpt(b.Mon) + glog.V(1).Infof("rbd: map mon %s", mon) + if b.Secret != "" { + output, err = b.exec.Run("rbd", + "map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key="+b.Secret) + } else { + output, err = b.exec.Run("rbd", + "map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring) } if err != nil { + glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) } devicePath, found = waitForPath(b.Pool, b.Image, 10) @@ -378,34 +369,23 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo // convert to MB that rbd defaults on sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) volSz := fmt.Sprintf("%d", sz) - // rbd create - l := len(p.rbdMounter.Mon) - // pick a mon randomly - start := rand.Int() % l - // iterate all monitors until create succeeds. - for i := start; i < start+l; i++ { - mon := p.Mon[i%l] - if p.rbdMounter.imageFormat == rbdImageFormat2 { - glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) - } else { - glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) - } - args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat} - if p.rbdMounter.imageFormat == rbdImageFormat2 { - // if no image features is provided, it results in empty string - // which disable all RBD image format 2 features as we expected - features := strings.Join(p.rbdMounter.imageFeatures, ",") - args = append(args, "--image-feature", features) - } - output, err = p.exec.Run("rbd", args...) - if err == nil { - break - } else { - glog.Warningf("failed to create rbd image, output %v", string(output)) - } + mon := util.kernelRBDMonitorsOpt(p.Mon) + if p.rbdMounter.imageFormat == rbdImageFormat2 { + glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + } else { + glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) } + args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat} + if p.rbdMounter.imageFormat == rbdImageFormat2 { + // if no image features is provided, it results in empty string + // which disable all RBD image format 2 features as we expected + features := strings.Join(p.rbdMounter.imageFeatures, ",") + args = append(args, "--image-feature", features) + } + output, err = p.exec.Run("rbd", args...) if err != nil { + glog.Warningf("failed to create rbd image, output %v", string(output)) return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output)) } @@ -427,21 +407,15 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput) } // rbd rm - l := len(p.rbdMounter.Mon) - // pick a mon randomly - start := rand.Int() % l - // iterate all monitors until rm succeeds. - for i := start; i < start+l; i++ { - mon := p.rbdMounter.Mon[i%l] - glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) - output, err = p.exec.Run("rbd", - "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret) - if err == nil { - return nil - } else { - glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) - } + mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon) + glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + output, err = p.exec.Run("rbd", + "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret) + if err == nil { + return nil } + + glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) return fmt.Errorf("error %v, rbd output: %v", err, string(output)) } @@ -465,21 +439,15 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc } // rbd resize - l := len(rbdExpander.rbdMounter.Mon) - // pick a mon randomly - start := rand.Int() % l - // iterate all monitors until resize succeeds. - for i := start; i < start+l; i++ { - mon := rbdExpander.rbdMounter.Mon[i%l] - glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) - output, err = rbdExpander.exec.Run("rbd", - "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) - if err == nil { - return newSizeQuant, nil - } else { - glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) - } + mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon) + glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) + output, err = rbdExpander.exec.Run("rbd", + "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) + if err == nil { + return newSizeQuant, nil } + + glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) return oldSize, err } @@ -497,44 +465,34 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) { secret = b.Secret } - l := len(b.Mon) - start := rand.Int() % l - // iterate all hosts until rbd command succeeds. - for i := start; i < start+l; i++ { - mon := b.Mon[i%l] - // cmd "rbd info" get the image info with the following output: - // - // # image exists (exit=0) - // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 - // size 1024 MB in 256 objects - // order 22 (4096 kB objects) - // block_name_prefix: rbd_data.1253ac238e1f29 - // format: 2 - // ... - // - // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json - // {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]} - // - // - // # image does not exist (exit=2) - // rbd: error opening image 1234: (2) No such file or directory - // - glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) - cmd, err = b.exec.Run("rbd", - "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) - output = string(cmd) + mon := util.kernelRBDMonitorsOpt(b.Mon) + // cmd "rbd info" get the image info with the following output: + // + // # image exists (exit=0) + // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 + // size 1024 MB in 256 objects + // order 22 (4096 kB objects) + // block_name_prefix: rbd_data.1253ac238e1f29 + // format: 2 + // ... + // + // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json + // {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]} + // + // + // # image does not exist (exit=2) + // rbd: error opening image 1234: (2) No such file or directory + // + glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + cmd, err = b.exec.Run("rbd", + "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) + output = string(cmd) - // break if command succeeds - if err == nil { - break - } - - if err, ok := err.(*exec.Error); ok { - if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") - // fail fast if command not found - return 0, err - } + if err, ok := err.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { + glog.Errorf("rbd cmd not found") + // fail fast if command not found + return 0, err } } @@ -576,41 +534,31 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { secret = b.Secret } - l := len(b.Mon) - start := rand.Int() % l - // iterate all hosts until rbd command succeeds. - for i := start; i < start+l; i++ { - mon := b.Mon[i%l] - // cmd "rbd status" list the rbd client watch with the following output: - // - // # there is a watcher (exit=0) - // Watchers: - // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 - // - // # there is no watcher (exit=0) - // Watchers: none - // - // Otherwise, exit is non-zero, for example: - // - // # image does not exist (exit=2) - // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory - // - glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) - cmd, err = b.exec.Run("rbd", - "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) - output = string(cmd) + mon := util.kernelRBDMonitorsOpt(b.Mon) + // cmd "rbd status" list the rbd client watch with the following output: + // + // # there is a watcher (exit=0) + // Watchers: + // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 + // + // # there is no watcher (exit=0) + // Watchers: none + // + // Otherwise, exit is non-zero, for example: + // + // # image does not exist (exit=2) + // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory + // + glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + cmd, err = b.exec.Run("rbd", + "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) + output = string(cmd) - // break if command succeeds - if err == nil { - break - } - - if err, ok := err.(*exec.Error); ok { - if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") - // fail fast if command not found - return false, output, err - } + if err, ok := err.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { + glog.Errorf("rbd cmd not found") + // fail fast if command not found + return false, output, err } } From 5e966af1d0ea618b5a50560230587d9813bbf55a Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 21 Dec 2017 08:09:55 -0500 Subject: [PATCH 469/794] add eventratelimit config to scheme --- plugin/pkg/admission/eventratelimit/admission.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugin/pkg/admission/eventratelimit/admission.go b/plugin/pkg/admission/eventratelimit/admission.go index 7e025319690..8cd64ebe587 100644 --- a/plugin/pkg/admission/eventratelimit/admission.go +++ b/plugin/pkg/admission/eventratelimit/admission.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/util/flowcontrol" api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" + eventratelimitapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation" ) @@ -43,6 +44,10 @@ func Register(plugins *admission.Plugins) { } return newEventRateLimit(configuration, realClock{}) }) + + // add our config types + eventratelimitapi.AddToScheme(plugins.ConfigScheme) + eventratelimitapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // Plugin implements an admission controller that can enforce event rate limits From 80cee97f0c7370ff9612a93adb45bad7e8ab582a Mon Sep 17 00:00:00 2001 From: Tomer Froumin Date: Thu, 21 Dec 2017 11:14:26 +0200 Subject: [PATCH 470/794] Fixed typos and made documentation more consistent --- .../providers/azure/azure_loadbalancer.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 6657a074d89..0ff061a495b 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -45,16 +45,17 @@ const ( // 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") // In this case the Loadbalancer of the primary Availability set is selected // 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set - // is selected which has the miinimum rules associated with it. - // 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the - // miinimum rules associated with it. + // is selected which has the minimum rules associated with it. + // 3. "as1,as2" mode - this is when the load balancer from the specified availability sets is selected that has the + // minimum rules associated with it. ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode" - // ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the + // ServiceAnnotationLoadBalancerAutoModeValue is the annotation used on the service to specify the // Azure load balancer auto selection from the availability sets ServiceAnnotationLoadBalancerAutoModeValue = "__auto__" - // ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service. + // ServiceAnnotationDNSLabelName is the annotation used on the service + // to specify the DNS label name for the service. ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name" // ServiceAnnotationSharedSecurityRule is the annotation used on the service From 4acc23b4090c471172522aefce77028ab487e1b9 Mon Sep 17 00:00:00 2001 From: linyouchong Date: Thu, 21 Dec 2017 23:36:20 +0800 Subject: [PATCH 471/794] fix incorrect comment --- .../volume/attachdetach/cache/desired_state_of_world.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go b/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go index 57ee9253ec1..3fe8e825e1d 100644 --- a/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go @@ -152,7 +152,7 @@ type nodeManaged struct { // volumesToAttach is a map containing the set of volumes that should be // attached to this node. The key in the map is the name of the volume and - // the value is a pod object containing more information about the volume. + // the value is a volumeToAttach object containing more information about the volume. volumesToAttach map[v1.UniqueVolumeName]volumeToAttach // keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes @@ -160,7 +160,7 @@ type nodeManaged struct { keepTerminatedPodVolumes bool } -// The volume object represents a volume that should be attached to a node. +// The volumeToAttach object represents a volume that should be attached to a node. type volumeToAttach struct { // multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume. // It is used to to prevent reporting the error from being reported more than once for a given volume. From 7b3638ea773dafe1c1eb2723d21a8a5c553bd747 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Tue, 19 Dec 2017 16:32:34 -0800 Subject: [PATCH 472/794] Avoid string concatenation when comparing pods. Pod comparison in (*NodeInfo).Filter was using GetPodFullName before comparing pod names. This is a concatenation of pod name and pod namespace, and it is significantly faster to compare name & namespace instead. --- plugin/pkg/scheduler/schedulercache/node_info.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/plugin/pkg/scheduler/schedulercache/node_info.go index 13f71d525a0..99fb77430c1 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/plugin/pkg/scheduler/schedulercache/node_info.go @@ -510,12 +510,11 @@ func getPodKey(pod *v1.Pod) (string, error) { // matches NodeInfo.node and the pod is not found in the pods list. Otherwise, // returns true. func (n *NodeInfo) Filter(pod *v1.Pod) bool { - pFullName := util.GetPodFullName(pod) if pod.Spec.NodeName != n.node.Name { return true } for _, p := range n.pods { - if util.GetPodFullName(p) == pFullName { + if p.Name == pod.Name && p.Namespace == pod.Namespace { return true } } From 2d66fe69335524f6d62715773e4b8737f3fe3df1 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 20 Dec 2017 23:27:27 -0800 Subject: [PATCH 473/794] Update CHANGELOG-1.8.md for v1.8.6. --- CHANGELOG-1.8.md | 150 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 111 insertions(+), 39 deletions(-) diff --git a/CHANGELOG-1.8.md b/CHANGELOG-1.8.md index 6384a956b1f..56d268d1178 100644 --- a/CHANGELOG-1.8.md +++ b/CHANGELOG-1.8.md @@ -1,45 +1,52 @@ -- [v1.8.5](#v185) - - [Downloads for v1.8.5](#downloads-for-v185) +- [v1.8.6](#v186) + - [Downloads for v1.8.6](#downloads-for-v186) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.8.4](#changelog-since-v184) + - [Changelog since v1.8.5](#changelog-since-v185) - [Other notable changes](#other-notable-changes) -- [v1.8.4](#v184) - - [Downloads for v1.8.4](#downloads-for-v184) +- [v1.8.5](#v185) + - [Downloads for v1.8.5](#downloads-for-v185) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.8.3](#changelog-since-v183) + - [Changelog since v1.8.4](#changelog-since-v184) - [Other notable changes](#other-notable-changes-1) -- [v1.8.3](#v183) - - [Downloads for v1.8.3](#downloads-for-v183) +- [v1.8.4](#v184) + - [Downloads for v1.8.4](#downloads-for-v184) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.8.2](#changelog-since-v182) + - [Changelog since v1.8.3](#changelog-since-v183) - [Other notable changes](#other-notable-changes-2) -- [v1.8.2](#v182) - - [Downloads for v1.8.2](#downloads-for-v182) +- [v1.8.3](#v183) + - [Downloads for v1.8.3](#downloads-for-v183) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.8.1](#changelog-since-v181) + - [Changelog since v1.8.2](#changelog-since-v182) - [Other notable changes](#other-notable-changes-3) -- [v1.8.1](#v181) - - [Downloads for v1.8.1](#downloads-for-v181) +- [v1.8.2](#v182) + - [Downloads for v1.8.2](#downloads-for-v182) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.8.0](#changelog-since-v180) - - [Action Required](#action-required) + - [Changelog since v1.8.1](#changelog-since-v181) - [Other notable changes](#other-notable-changes-4) -- [v1.8.0](#v180) - - [Downloads for v1.8.0](#downloads-for-v180) +- [v1.8.1](#v181) + - [Downloads for v1.8.1](#downloads-for-v181) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) + - [Changelog since v1.8.0](#changelog-since-v180) + - [Action Required](#action-required) + - [Other notable changes](#other-notable-changes-5) +- [v1.8.0](#v180) + - [Downloads for v1.8.0](#downloads-for-v180) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Introduction to v1.8.0](#introduction-to-v180) - [Major Themes](#major-themes) - [SIG API Machinery](#sig-api-machinery) @@ -100,49 +107,114 @@ - [External Dependencies](#external-dependencies) - [v1.8.0-rc.1](#v180-rc1) - [Downloads for v1.8.0-rc.1](#downloads-for-v180-rc1) - - [Client Binaries](#client-binaries-6) - - [Server Binaries](#server-binaries-6) - - [Node Binaries](#node-binaries-6) - - [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1) - - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-5) -- [v1.8.0-beta.1](#v180-beta1) - - [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3) - - [Action Required](#action-required-2) + - [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-6) -- [v1.8.0-alpha.3](#v180-alpha3) - - [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3) +- [v1.8.0-beta.1](#v180-beta1) + - [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2) - - [Action Required](#action-required-3) + - [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-7) -- [v1.8.0-alpha.2](#v180-alpha2) - - [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2) +- [v1.8.0-alpha.3](#v180-alpha3) + - [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.7.0](#changelog-since-v170) - - [Action Required](#action-required-4) + - [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-8) -- [v1.8.0-alpha.1](#v180-alpha1) - - [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1) +- [v1.8.0-alpha.2](#v180-alpha2) + - [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) + - [Changelog since v1.7.0](#changelog-since-v170) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-9) +- [v1.8.0-alpha.1](#v180-alpha1) + - [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1) + - [Client Binaries](#client-binaries-11) + - [Server Binaries](#server-binaries-11) + - [Node Binaries](#node-binaries-11) - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-9) + - [Other notable changes](#other-notable-changes-10) +# v1.8.6 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples) + +## Downloads for v1.8.6 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes.tar.gz) | `8289c42b5d6da1dbf910585fca3a9d909195e540cc81bace61ec1d06b2366c1b` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-src.tar.gz) | `8a9d5d890c44137527fe3976d71d4f7cb18db21ba34262ce587cd979a88bb2fe` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-darwin-386.tar.gz) | `0e282477bfed6b534f2fbbd125e6e3e065bf72d15ac3532acef405e6717d8fb7` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-darwin-amd64.tar.gz) | `767c7bfbc6c1d01120e11726b9e33e184d32294e07c69a299b229329c5b98eba` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-386.tar.gz) | `088b40c343fecb83b514bf9af0ad1c359c98ae7aa3b62d2a078c1363f50901c9` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-amd64.tar.gz) | `47541706e4d27da55d32372344d7a4038ed389ba0be1e6fe15c651c574aac97a` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-arm64.tar.gz) | `4be0b7a01c28c1f85d4f01f86def03dd3d49ef88cb43bf7be641d9d16b6aabc2` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-arm.tar.gz) | `2d70384262cbdfb0958542bc5a71d926c49557fc8cc3000a2592571a945ad119` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-ppc64le.tar.gz) | `c3be3a125ac77aa809da3495ad38456059a89cccfdfad0babaf95896fb958adc` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-linux-s390x.tar.gz) | `2b9831c2dd65c9669b335e3623e6a7001173b9ddf203f52f37b350659d9f1102` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-windows-386.tar.gz) | `9d14a96372cdcecbbb28717aff305fcd68beb540066a27f1b5e84e208a25405f` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-client-windows-amd64.tar.gz) | `0fbe358ff305188fe00793284e22c9c5b2ec0e0213882f0bfe0e4bf9685075f0` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-server-linux-amd64.tar.gz) | `9c8ff48343e5314638965407358d1e91d510c72a1c7dd7cde0c3be12790fdb98` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-server-linux-arm64.tar.gz) | `dd35c1b7572ab383eb2ff60f3b039053afa124836db6d044ab14afdafbe5ca74` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-server-linux-arm.tar.gz) | `5f4637d309eb47f4f97db8d2978b0b37b271339feb5952b216a9d09ad7e67c32` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-server-linux-ppc64le.tar.gz) | `6d3ea43edd53253e9e3b9ceb49e61b6d2c093e55be35f7b1a8f798cde842a562` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-server-linux-s390x.tar.gz) | `dfe89b91399977cee291d57b446625f01cf76ebecce696e2e889863bd3c8d3b1` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-linux-amd64.tar.gz) | `f8f3e7bb07db540f4b88fa5818c46efb918e795e5e89e389b9048f2f7f37674d` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-linux-arm64.tar.gz) | `1754b8a20d9176317fea3b77b5c48ad5565b922820adcbca4017bf210168dc6e` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-linux-arm.tar.gz) | `0a8255effff1d5b3ad7c84c3d6f6b8cfb5beb71606bfedaef0bb45f170b806d6` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-linux-ppc64le.tar.gz) | `fef465c9f66eda35479e152619b6c91e2432e92736646a898c5917098a10a1b4` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-linux-s390x.tar.gz) | `ff024e59d52afdee003f11c65f7de428915f7e28f9b8be4b3ebf117422ae5d67` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.6/kubernetes-node-windows-amd64.tar.gz) | `19a673b714c02322c544ec3a972e011410b69a7aed016ecf7ba09eccb175a1de` + +## Changelog since v1.8.5 + +### Other notable changes + +* change default azure file/dir mode to 0755 ([#56551](https://github.com/kubernetes/kubernetes/pull/56551), [@andyzhangx](https://github.com/andyzhangx)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57394](https://github.com/kubernetes/kubernetes/pull/57394), [@mborsz](https://github.com/mborsz)) +* enable flexvolume on Windows node ([#56921](https://github.com/kubernetes/kubernetes/pull/56921), [@andyzhangx](https://github.com/andyzhangx)) +* Add prometheus metrics for the PodSecurityPolicy admission controller ([#57346](https://github.com/kubernetes/kubernetes/pull/57346), [@tallclair](https://github.com/tallclair)) +* fix CreateVolume func: use search mode instead ([#54687](https://github.com/kubernetes/kubernetes/pull/54687), [@andyzhangx](https://github.com/andyzhangx)) +* remove time waiting after create storage account (save 25s) ([#56679](https://github.com/kubernetes/kubernetes/pull/56679), [@andyzhangx](https://github.com/andyzhangx)) +* Add pvc as part of equivalence hash ([#56577](https://github.com/kubernetes/kubernetes/pull/56577), [@resouer](https://github.com/resouer)) +* fix azure disk storage account init issue ([#55927](https://github.com/kubernetes/kubernetes/pull/55927), [@andyzhangx](https://github.com/andyzhangx)) +* falls back to parse Docker runtime version as generic if not semver ([#54040](https://github.com/kubernetes/kubernetes/pull/54040), [@dixudx](https://github.com/dixudx)) +* BUG FIX: Check both name and ports for azure health probes ([#56918](https://github.com/kubernetes/kubernetes/pull/56918), [@feiskyer](https://github.com/feiskyer)) + + + # v1.8.5 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples) From f220a1705dc27b7549e34d93edac2e708a5acdd5 Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Thu, 21 Dec 2017 10:17:54 -0800 Subject: [PATCH 474/794] Update vendor of google.golang.org/api repo --- Godeps/Godeps.json | 22 +- vendor/google.golang.org/api/CONTRIBUTORS | 1 + .../v2beta2/cloudmonitoring-api.json | 2 +- .../api/compute/v0.alpha/compute-api.json | 3454 +++- .../api/compute/v0.alpha/compute-gen.go | 9773 +++++++++--- .../api/compute/v0.beta/compute-api.json | 3371 +++- .../api/compute/v0.beta/compute-gen.go | 9886 ++++++++++-- .../api/compute/v1/compute-api.json | 7131 ++++++++- .../api/compute/v1/compute-gen.go | 12954 ++++++++++++++-- .../api/container/v1/container-api.json | 3321 ++-- .../api/container/v1/container-gen.go | 609 +- .../google.golang.org/api/gensupport/send.go | 10 + .../api/logging/v2beta1/logging-api.json | 2405 +-- .../api/logging/v2beta1/logging-gen.go | 197 +- .../api/monitoring/v3/monitoring-api.json | 3336 ++-- .../api/monitoring/v3/monitoring-gen.go | 1753 ++- .../api/pubsub/v1/pubsub-api.json | 1911 ++- .../api/pubsub/v1/pubsub-gen.go | 1546 +- 18 files changed, 51601 insertions(+), 10081 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index fe43770a94f..45c34a3aab6 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2874,47 +2874,47 @@ }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/compute/v0.alpha", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/compute/v0.beta", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/logging/v2beta1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/monitoring/v3", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/api/pubsub/v1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS index bf7d94f712f..2d4679e398a 100644 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -44,6 +44,7 @@ Ivan Krasin Jason Hall Johan Euphrosine Kostik Shtoyk +Matthew Whisenhunt Michael McGreevy Nick Craig-Wood Ross Light diff --git a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json index 8910233454b..16d7b4fb806 100644 --- a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json +++ b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json @@ -21,7 +21,7 @@ "basePath": "/cloudmonitoring/v2beta2/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "cloudmonitoring/v2beta2/projects/", - "batchPath": "batch", + "batchPath": "batch/cloudmonitoring/v2beta2", "parameters": { "alt": { "type": "string", diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index 5c3f16e3ba4..5b39417753b 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/CwN9bOpaV-xhJ_YHqke_sIbhsB0\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/8LXjCsGzTRkw9pxlAZsYlHn9Eew\"", "discoveryVersion": "v1", "id": "compute:alpha", "name": "compute", "version": "alpha", - "revision": "20170905", + "revision": "20171207", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -20,7 +20,7 @@ "basePath": "/compute/alpha/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "compute/alpha/projects/", - "batchPath": "batch", + "batchPath": "batch/compute/alpha", "parameters": { "alt": { "type": "string", @@ -110,7 +110,7 @@ "AcceleratorType": { "id": "AcceleratorType", "type": "object", - "description": "An Accelerator Type resource.", + "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -193,9 +193,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -206,7 +210,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -226,6 +232,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -293,9 +305,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -306,7 +322,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -326,6 +344,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -375,9 +399,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -388,7 +416,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -408,6 +438,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -500,15 +536,15 @@ "Address": { "id": "Address", "type": "object", - "description": "A reserved address resource.", + "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", "properties": { "address": { "type": "string", - "description": "The static external IP address represented by this resource." + "description": "The static IP address represented by this resource." }, "addressType": { "type": "string", - "description": "The type of address to reserve. If unspecified, defaults to EXTERNAL.", + "description": "The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.", "enum": [ "DNS_FORWARDING", "EXTERNAL", @@ -600,7 +636,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", "enum": [ "IN_USE", "RESERVED" @@ -612,7 +648,7 @@ }, "subnetwork": { "type": "string", - "description": "For external addresses, this field should not be used.\n\nThe URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range." + "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes." }, "users": { "type": "array", @@ -662,9 +698,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -675,7 +715,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -695,6 +737,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -762,9 +810,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -775,7 +827,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -795,6 +849,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -844,9 +904,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -857,7 +921,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -877,6 +943,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -946,6 +1018,13 @@ "description": "The size of the disk in base-2 GB. This supersedes disk_size_gb in InitializeParams.", "format": "int64" }, + "guestOsFeatures": { + "type": "array", + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", + "items": { + "$ref": "GuestOsFeature" + } + }, "index": { "type": "integer", "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", @@ -991,9 +1070,21 @@ "" ] }, + "savedState": { + "type": "string", + "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED iff the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", + "enum": [ + "DISK_SAVED_STATE_UNSPECIFIED", + "PRESERVED" + ], + "enumDescriptions": [ + "", + "" + ] + }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." }, "type": { "type": "string", @@ -1041,7 +1132,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family \n\nIf the source image is deleted later, this field will not be set." }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -1132,7 +1223,7 @@ "Autoscaler": { "id": "Autoscaler", "type": "object", - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances.", + "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", "properties": { "autoscalingPolicy": { "$ref": "AutoscalingPolicy", @@ -1166,6 +1257,11 @@ ] } }, + "recommendedSize": { + "type": "integer", + "description": "[Output Only] Target recommended MIG size computed by autoscaler. Autoscaler calculates recommended MIG size even when autoscaling policy mode is different from ON. This field is empty when autoscaler is not connected to the existing managed instance group or autoscaler did not generate its first prediction.", + "format": "int32" + }, "region": { "type": "string", "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope)." @@ -1246,9 +1342,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1259,7 +1359,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1279,6 +1381,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1346,9 +1454,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1359,7 +1471,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1379,6 +1493,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1476,9 +1596,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1489,7 +1613,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1509,6 +1635,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1613,20 +1745,20 @@ "properties": { "filter": { "type": "string", - "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or global data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is / called a global metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value." + "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value." }, "metric": { "type": "string", - "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric.\n\nThe metric must have a value type of INT64 or DOUBLE." + "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE." }, "singleInstanceAssignment": { "type": "number", - "description": "If scaling is based on a global metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", + "description": "If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", "format": "double" }, "utilizationTarget": { "type": "number", - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double" }, "utilizationTargetType": { @@ -1870,9 +2002,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1883,7 +2019,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1903,6 +2041,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1934,13 +2078,17 @@ "BackendService": { "id": "BackendService", "type": "object", - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", "properties": { "affinityCookieTtlSec": { "type": "integer", "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", "format": "int32" }, + "appEngineBackend": { + "$ref": "BackendServiceAppEngineBackend", + "description": "Directs request to an App Engine app. cloudFunctionBackend and backends[] must be empty if this is set." + }, "backends": { "type": "array", "description": "The list of backends that serve this BackendService.", @@ -1952,6 +2100,10 @@ "$ref": "BackendServiceCdnPolicy", "description": "Cloud CDN configuration for this BackendService." }, + "cloudFunctionBackend": { + "$ref": "BackendServiceCloudFunctionBackend", + "description": "Directs request to a cloud function. appEngineBackend and backends[] must be empty if this is set." + }, "connectionDraining": { "$ref": "ConnectionDraining" }, @@ -2127,9 +2279,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2140,7 +2296,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2160,6 +2318,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2188,6 +2352,25 @@ } } }, + "BackendServiceAppEngineBackend": { + "id": "BackendServiceAppEngineBackend", + "type": "object", + "description": "Configuration of a App Engine backend.", + "properties": { + "appEngineService": { + "type": "string", + "description": "Optional. App Engine app service name." + }, + "targetProject": { + "type": "string", + "description": "Required. Project ID of the project hosting the app. This is the project ID of this project. Reference to another project is not allowed." + }, + "version": { + "type": "string", + "description": "Optional. Version of App Engine app service. When empty, App Engine will do its normal traffic split." + } + } + }, "BackendServiceCdnPolicy": { "id": "BackendServiceCdnPolicy", "type": "object", @@ -2211,6 +2394,21 @@ } } }, + "BackendServiceCloudFunctionBackend": { + "id": "BackendServiceCloudFunctionBackend", + "type": "object", + "description": "Configuration of a Cloud Function backend.", + "properties": { + "functionName": { + "type": "string", + "description": "Required. A cloud function name. Special value ?*? represents all cloud functions in the project." + }, + "targetProject": { + "type": "string", + "description": "Required. Project ID of the project hosting the cloud function." + } + } + }, "BackendServiceFailoverPolicy": { "id": "BackendServiceFailoverPolicy", "type": "object", @@ -2306,9 +2504,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2319,7 +2521,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2339,6 +2543,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2397,9 +2607,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2410,7 +2624,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2430,6 +2646,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2529,7 +2751,7 @@ "Commitment": { "id": "Commitment", "type": "object", - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts.", + "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", "properties": { "creationTimestamp": { "type": "string", @@ -2652,9 +2874,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2665,7 +2891,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2685,6 +2913,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2752,9 +2986,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2765,7 +3003,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2785,6 +3025,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2834,9 +3080,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2847,7 +3097,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2867,6 +3119,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3024,6 +3282,26 @@ } } }, + "DailyMaintenanceWindow": { + "id": "DailyMaintenanceWindow", + "type": "object", + "description": "Time window specified for daily maintenance operations.", + "properties": { + "daysInCycle": { + "type": "integer", + "description": "Allows to define schedule that runs every nth day of the month.", + "format": "int32" + }, + "duration": { + "type": "string", + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario." + }, + "startTime": { + "type": "string", + "description": "Time within the maintenance window to start the maintenance operations. It must be in format \"HH:MM?, where HH : [00-23] and MM : [00-59] GMT." + } + } + }, "DeprecationStatus": { "id": "DeprecationStatus", "type": "object", @@ -3064,7 +3342,7 @@ "Disk": { "id": "Disk", "type": "object", - "description": "A Disk resource.", + "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", "properties": { "creationTimestamp": { "type": "string", @@ -3078,6 +3356,13 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." }, + "guestOsFeatures": { + "type": "array", + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", + "items": { + "$ref": "GuestOsFeature" + } + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -3164,7 +3449,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family" }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -3270,9 +3555,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3283,7 +3572,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3303,6 +3594,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3331,6 +3628,47 @@ } } }, + "DiskInstantiationConfig": { + "id": "DiskInstantiationConfig", + "type": "object", + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "properties": { + "autoDelete": { + "type": "boolean", + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." + }, + "deviceName": { + "type": "string", + "description": "Specifies the device name of the disk to which the configurations apply to." + }, + "instantiateFrom": { + "type": "string", + "description": "Specifies whether to include the disk and what image to use.", + "enum": [ + "ATTACH_READ_ONLY", + "BLANK", + "DEFAULT", + "DO_NOT_INCLUDE", + "IMAGE_URL", + "SOURCE_IMAGE", + "SOURCE_IMAGE_FAMILY" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ] + }, + "sourceImage": { + "type": "string", + "description": "The custom source image to be used to restore this disk when instantiating this instance template." + } + } + }, "DiskList": { "id": "DiskList", "type": "object", @@ -3370,9 +3708,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3383,7 +3725,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3403,6 +3747,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3448,7 +3798,7 @@ "DiskType": { "id": "DiskType", "type": "object", - "description": "A DiskType resource.", + "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -3535,9 +3885,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3548,7 +3902,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3568,6 +3924,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3635,9 +3997,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3648,7 +4014,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3668,6 +4036,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3717,9 +4091,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3730,7 +4108,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3750,6 +4130,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3810,9 +4196,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3823,7 +4213,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3843,6 +4235,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3994,6 +4392,10 @@ "" ] }, + "disabled": { + "type": "boolean", + "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled." + }, "enableLogging": { "type": "boolean", "description": "This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to the configured export destination for all firewall logs in the network. Logs may be exported to BigQuery or Pub/Sub." @@ -4062,7 +4464,7 @@ }, "targetTags": { "type": "array", - "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", "items": { "type": "string" } @@ -4108,9 +4510,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4121,7 +4527,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4141,6 +4549,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4176,17 +4590,17 @@ "properties": { "calculated": { "type": "integer", - "description": "[Output Only] Absolute value calculated based on mode: mode = fixed -\u003e calculated = fixed = percent -\u003e calculated = ceiling(percent/100 * base_value)", + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the caculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", "format": "int32" }, "fixed": { "type": "integer", - "description": "fixed must be non-negative.", + "description": "Specifies a fixed number of VM instances. This must be a positive integer.", "format": "int32" }, "percent": { "type": "integer", - "description": "percent must belong to [0, 100].", + "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", "format": "int32" } } @@ -4194,11 +4608,11 @@ "ForwardingRule": { "id": "ForwardingRule", "type": "object", - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple.", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", "properties": { "IPAddress": { "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address" }, "IPProtocol": { "type": "string", @@ -4387,9 +4801,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4400,7 +4818,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4420,6 +4840,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4487,9 +4913,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4500,7 +4930,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4520,6 +4952,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4578,9 +5016,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4591,7 +5033,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4611,6 +5055,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4688,14 +5138,18 @@ "properties": { "type": { "type": "string", - "description": "The type of supported feature. Currently only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "MULTI_IP_SUBNET", + "SECURE_BOOT", + "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" ], "enumDescriptions": [ + "", + "", "", "", "", @@ -4996,9 +5450,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5009,7 +5467,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5029,6 +5489,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5252,9 +5718,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5265,7 +5735,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5285,6 +5757,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5352,9 +5830,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5365,7 +5847,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5385,6 +5869,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5535,9 +6025,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5548,7 +6042,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5568,6 +6064,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5635,9 +6137,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5648,7 +6154,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5668,6 +6176,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5717,9 +6231,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5730,7 +6248,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5750,6 +6270,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5799,9 +6325,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5812,7 +6342,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5832,6 +6364,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5860,6 +6398,26 @@ } } }, + "HourlyMaintenanceWindow": { + "id": "HourlyMaintenanceWindow", + "type": "object", + "description": "Time window specified for hourly maintenance operations.", + "properties": { + "duration": { + "type": "string", + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario." + }, + "hoursInCycle": { + "type": "integer", + "description": "Allows to define schedule that runs every nth hour.", + "format": "int32" + }, + "startTime": { + "type": "string", + "description": "Time within the maintenance window to start the maintenance operations. It must be in format \"HH:MM?, where HH : [00-23] and MM : [00-59] GMT." + } + } + }, "HttpHealthCheck": { "id": "HttpHealthCheck", "type": "object", @@ -5966,9 +6524,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5979,7 +6541,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5999,6 +6563,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6133,9 +6703,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6146,7 +6720,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6166,6 +6742,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6197,7 +6779,7 @@ "Image": { "id": "Image", "type": "object", - "description": "An Image resource.", + "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", "properties": { "archiveSizeBytes": { "type": "string", @@ -6227,7 +6809,7 @@ }, "guestOsFeatures": { "type": "array", - "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { "$ref": "GuestOsFeature" } @@ -6341,6 +6923,18 @@ "type": "string", "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name." }, + "sourceSnapshot": { + "type": "string", + "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image." + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, + "sourceSnapshotId": { + "type": "string", + "description": "[Output Only] The ID value of the snapshot used to create this image. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given snapshot name." + }, "sourceType": { "type": "string", "description": "The type of the image used to create this disk. The default and only value is RAW", @@ -6407,9 +7001,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6420,7 +7018,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6440,6 +7040,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6471,7 +7077,7 @@ "Instance": { "id": "Instance", "type": "object", - "description": "An Instance resource.", + "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", "properties": { "canIpForward": { "type": "boolean", @@ -6546,6 +7152,13 @@ ] } }, + "maintenancePolicies": { + "type": "array", + "description": "Maintenance policies applied to this instance.", + "items": { + "type": "string" + } + }, "metadata": { "$ref": "Metadata", "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." @@ -6586,6 +7199,9 @@ "$ref": "ServiceAccount" } }, + "shieldedVmConfig": { + "$ref": "ShieldedVmConfig" + }, "startRestricted": { "type": "boolean", "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity." @@ -6667,9 +7283,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6680,7 +7300,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6700,6 +7322,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6731,6 +7359,7 @@ "InstanceGroup": { "id": "InstanceGroup", "type": "object", + "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", "properties": { "creationTimestamp": { "type": "string", @@ -6838,9 +7467,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6851,7 +7484,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6871,6 +7506,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6938,9 +7579,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6951,7 +7596,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6971,6 +7618,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7002,7 +7655,7 @@ "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", - "description": "An Instance Group Manager resource.", + "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", "properties": { "activities": { "$ref": "InstanceGroupManagerActivities" @@ -7110,7 +7763,7 @@ "description": "[Output Only] The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used." }, "statefulPolicy": { - "$ref": "InstanceGroupManagerStatefulPolicy", + "$ref": "StatefulPolicy", "description": "Stateful configuration for this Instanced Group Manager" }, "targetPools": { @@ -7137,7 +7790,7 @@ }, "versions": { "type": "array", - "description": "Versions supported by this IGM. User should set this field if they need fine-grained control over how many instances in each version are run by this IGM. Versions are keyed by instanceTemplate. Every instanceTemplate can appear at most once. This field overrides instanceTemplate field. If both instanceTemplate and versions are set, the user receives a warning. \"instanceTemplate: X\" is semantically equivalent to \"versions [ { instanceTemplate: X } ]\". Exactly one version must have targetSize field left unset. Size of such a version will be calculated automatically.", + "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate. Every template can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", "items": { "$ref": "InstanceGroupManagerVersion" } @@ -7321,9 +7974,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7334,7 +7991,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7354,6 +8013,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7398,6 +8063,18 @@ "maxUnavailable": { "$ref": "FixedOrPercent", "description": "Maximum number of instances that can be unavailable when autohealing. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's liveness health check result was observed to be HEALTHY at least once. By default, a percent value of 100% is used." + }, + "mode": { + "type": "string", + "description": "Defines operating mode for this policy.", + "enum": [ + "OFF", + "ON" + ], + "enumDescriptions": [ + "", + "" + ] } } }, @@ -7440,9 +8117,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7453,7 +8134,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7473,6 +8156,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7527,40 +8216,17 @@ } } }, - "InstanceGroupManagerStatefulPolicy": { - "id": "InstanceGroupManagerStatefulPolicy", - "type": "object", - "properties": { - "preservedDisks": { - "type": "array", - "description": "Disks created on the instances that will be preserved on instance delete, resize down, etc.", - "items": { - "$ref": "InstanceGroupManagerStatefulPolicyDiskPolicy" - } - } - } - }, - "InstanceGroupManagerStatefulPolicyDiskPolicy": { - "id": "InstanceGroupManagerStatefulPolicyDiskPolicy", - "type": "object", - "properties": { - "deviceName": { - "type": "string", - "description": "Device name of the disk to be preserved" - } - } - }, "InstanceGroupManagerUpdatePolicy": { "id": "InstanceGroupManagerUpdatePolicy", "type": "object", "properties": { "maxSurge": { "$ref": "FixedOrPercent", - "description": "Maximum number of instances that can be created above the InstanceGroupManager.targetSize during the update process. By default, a fixed value of 1 is used. Using maxSurge \u003e 0 will cause instance names to change during the update process. At least one of { maxSurge, maxUnavailable } must be greater than 0." + "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." }, "maxUnavailable": { "$ref": "FixedOrPercent", - "description": "Maximum number of instances that can be unavailable during the update process. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's liveness health check result was observed to be HEALTHY at least once. By default, a fixed value of 1 is used. At least one of { maxSurge, maxUnavailable } must be greater than 0." + "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied:\n\n \n- The instance's status is RUNNING. \n- If there is a health check on the instance group, the instance's liveness health check result must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." }, "minReadySec": { "type": "integer", @@ -7569,7 +8235,7 @@ }, "minimalAction": { "type": "string", - "description": "Minimal action to be taken on an instance. The order of action types is: RESTART \u003c REPLACE.", + "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a code\u003eRESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", "enum": [ "NONE", "REFRESH", @@ -7613,7 +8279,7 @@ }, "targetSize": { "$ref": "FixedOrPercent", - "description": "Intended number of instances that are created from instanceTemplate. The final number of instances created from instanceTemplate will be equal to: * if expressed as fixed number: min(targetSize.fixed, instanceGroupManager.targetSize), * if expressed as percent: ceiling(targetSize.percent * InstanceGroupManager.targetSize). If unset, this version will handle all the remaining instances." + "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: \n- If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. \n- if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded up. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." } } }, @@ -7745,9 +8411,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7758,7 +8428,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7778,6 +8450,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7855,9 +8533,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7868,7 +8550,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7888,6 +8572,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8021,9 +8711,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8034,7 +8728,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8054,6 +8750,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8134,9 +8836,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8147,7 +8853,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8167,6 +8875,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8252,9 +8966,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8265,7 +8983,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8285,6 +9005,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8352,9 +9078,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8365,7 +9095,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8385,6 +9117,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8515,7 +9253,7 @@ "InstanceTemplate": { "id": "InstanceTemplate", "type": "object", - "description": "An Instance Template resource.", + "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", "properties": { "creationTimestamp": { "type": "string", @@ -8556,6 +9294,10 @@ "sourceInstance": { "type": "string", "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance" + }, + "sourceInstanceParams": { + "$ref": "SourceInstanceParams", + "description": "The source instance params to use to create this instance template." } } }, @@ -8598,9 +9340,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8611,7 +9357,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8631,6 +9379,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8700,6 +9454,49 @@ } } }, + "InstancesAddMaintenancePoliciesRequest": { + "id": "InstancesAddMaintenancePoliciesRequest", + "type": "object", + "properties": { + "maintenancePolicies": { + "type": "array", + "description": "Maintenance policies to be added to this instance.", + "items": { + "type": "string" + } + } + } + }, + "InstancesRemoveMaintenancePoliciesRequest": { + "id": "InstancesRemoveMaintenancePoliciesRequest", + "type": "object", + "properties": { + "maintenancePolicies": { + "type": "array", + "description": "Maintenance policies to be removed from this instance.", + "items": { + "type": "string" + } + } + } + }, + "InstancesResumeRequest": { + "id": "InstancesResumeRequest", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to resume the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", + "items": { + "$ref": "CustomerEncryptionKeyProtectedDisk" + } + }, + "instanceEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key.\n\nIf the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." + } + } + }, "InstancesScopedList": { "id": "InstancesScopedList", "type": "object", @@ -8721,9 +9518,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8734,7 +9535,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8754,6 +9557,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8869,11 +9678,11 @@ "Interconnect": { "id": "Interconnect", "type": "object", - "description": "Protocol definitions for Mixer API to support Interconnect. Next available tag: 23", + "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", "properties": { "adminEnabled": { "type": "boolean", - "description": "Administrative status of the interconnect. When this is set to ?true?, the Interconnect is functional and may carry traffic (assuming there are functional InterconnectAttachments and other requirements are satisfied). When set to ?false?, no packets will be carried over this Interconnect and no BGP routes will be exchanged over it. By default, it is set to ?true?." + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true." }, "circuitInfos": { "type": "array", @@ -8882,10 +9691,6 @@ "$ref": "InterconnectCircuitInfo" } }, - "connectionAuthorization": { - "type": "string", - "description": "[Output Only] URL to retrieve the Letter Of Authority and Customer Facility Assignment (LOA-CFA) documentation relating to this Interconnect. This documentation authorizes the facility provider to connect to the specified crossconnect ports." - }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -8927,11 +9732,14 @@ }, "interconnectType": { "type": "string", + "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", "enum": [ - "IT_PARTNER", - "IT_PRIVATE" + "DEDICATED", + "IT_PRIVATE", + "PARTNER" ], "enumDescriptions": [ + "", "", "" ] @@ -8941,8 +9749,21 @@ "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", "default": "compute#interconnect" }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve an Interconnect.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this Interconnect resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, "linkType": { "type": "string", + "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", "enum": [ "LINK_TYPE_ETHERNET_10G_LR" ], @@ -8997,14 +9818,77 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." + }, + "state": { + "type": "string", + "description": "[Output Only] The current state of whether or not this Interconnect is functional.", + "enum": [ + "ACTIVE", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ] } } }, "InterconnectAttachment": { "id": "InterconnectAttachment", "type": "object", - "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 18", + "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", "properties": { + "adminEnabled": { + "type": "boolean", + "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER." + }, + "availabilityZone": { + "type": "string", + "enum": [ + "ZONE_1", + "ZONE_2", + "ZONE_ANY" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "bandwidth": { + "type": "string", + "enum": [ + "BPS_100M", + "BPS_10G", + "BPS_1G", + "BPS_200M", + "BPS_2G", + "BPS_300M", + "BPS_400M", + "BPS_500M", + "BPS_50M", + "BPS_5G" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "candidateSubnets": { + "type": "array", + "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google?s edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", + "items": { + "type": "string" + } + }, "cloudRouterIpAddress": { "type": "string", "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment." @@ -9019,7 +9903,7 @@ }, "description": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "An optional description of this resource." }, "googleReferenceId": { "type": "string", @@ -9039,6 +9923,18 @@ "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", "default": "compute#interconnectAttachment" }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this InterconnectAttachment resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -9056,9 +9952,21 @@ "" ] }, + "pairingKey": { + "type": "string", + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. Opaque string identifying an PARTNER attachment. Of the form ?cloud-region/XXXXXX?." + }, + "partnerAsn": { + "type": "string", + "description": "[Output only for PARTNER. Input for PARTNER_PROVIDER. Not present for DEDICATED] BGP ASN of the Partner. A layer 3 Partner should supply this if they configured BGP on behalf of the customer.", + "format": "int64" + }, + "partnerMetadata": { + "$ref": "InterconnectAttachmentPartnerMetadata" + }, "privateInterconnectInfo": { "$ref": "InterconnectAttachmentPrivateInfo", - "description": "[Output Only] Information specific to a Private InterconnectAttachment. Only populated if the interconnect that this is attached is of type IT_PRIVATE." + "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." }, "region": { "type": "string", @@ -9071,6 +9979,46 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." + }, + "state": { + "type": "string", + "description": "[Output Only] The current state of whether or not this interconnect attachment is functional.", + "enum": [ + "ACTIVE", + "DEFUNCT", + "PARTNER_REQUEST_RECEIVED", + "PENDING_CUSTOMER", + "PENDING_PARTNER", + "STATE_UNSPECIFIED", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ] + }, + "type": { + "type": "string", + "enum": [ + "DEDICATED", + "PARTNER", + "PARTNER_PROVIDER" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "vlanTag8021q": { + "type": "integer", + "description": "Available only for DEDICATED and PARTNER_PROVIDER. Desired VLAN tag for this attachment, in the range 2-4094. This field refers to 802.1q VLAN tag, also known as IEEE 802.1Q Only specified at creation time.", + "format": "int32" } } }, @@ -9113,9 +10061,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9126,7 +10078,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9146,6 +10100,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9213,9 +10173,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9226,7 +10190,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9246,6 +10212,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9274,10 +10246,29 @@ } } }, + "InterconnectAttachmentPartnerMetadata": { + "id": "InterconnectAttachmentPartnerMetadata", + "type": "object", + "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments. Only mutable for PARTNER_PROVIDER type, output-only for PARTNER, not available for DEDICATED.", + "properties": { + "interconnectName": { + "type": "string", + "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner?s portal. For instance ?Chicago 1?. This value may be validated to match approved Partner values." + }, + "partnerName": { + "type": "string", + "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values." + }, + "portalUrl": { + "type": "string", + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values." + } + } + }, "InterconnectAttachmentPrivateInfo": { "id": "InterconnectAttachmentPrivateInfo", "type": "object", - "description": "Private information for an interconnect attachment when this belongs to an interconnect of type IT_PRIVATE.", + "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", "properties": { "tag8021q": { "type": "integer", @@ -9307,9 +10298,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9320,7 +10315,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9340,6 +10337,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9375,7 +10378,7 @@ "properties": { "customerDemarcId": { "type": "string", - "description": "Customer-side demarc ID for this circuit. This will only be set if it was provided by the Customer to Google during circuit turn-up." + "description": "Customer-side demarc ID for this circuit." }, "googleCircuitId": { "type": "string", @@ -9426,9 +10429,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9439,7 +10446,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9459,6 +10468,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9490,7 +10505,7 @@ "InterconnectLocation": { "id": "InterconnectLocation", "type": "object", - "description": "Protocol definitions for Mixer API to support InterconnectLocation.", + "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", "properties": { "address": { "type": "string", @@ -9498,23 +10513,33 @@ }, "availabilityZone": { "type": "string", - "description": "Availability zone for this location. Within a city, maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." + "description": "[Output Only] Availability zone for this location. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." }, "city": { "type": "string", - "description": "City designator used by the Interconnect UI to locate this InterconnectLocation within the Continent. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." }, "continent": { "type": "string", - "description": "Continent for this location. Used by the location picker in the Interconnect UI.", + "description": "[Output Only] Continent for this location.", "enum": [ + "AFRICA", + "ASIA_PAC", "C_AFRICA", "C_ASIA_PAC", "C_EUROPE", "C_NORTH_AMERICA", - "C_SOUTH_AMERICA" + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" ], "enumDescriptions": [ + "", + "", + "", + "", + "", "", "", "", @@ -9608,9 +10633,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9621,7 +10650,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9641,6 +10672,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9683,10 +10720,14 @@ "type": "string", "description": "Identifies the network presence of this location.", "enum": [ + "GLOBAL", + "LOCAL_REGION", "LP_GLOBAL", "LP_LOCAL_REGION" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -9694,10 +10735,6 @@ "region": { "type": "string", "description": "URL for the region of this location." - }, - "regionKey": { - "type": "string", - "description": "Scope key for the region of this location." } } }, @@ -9715,19 +10752,25 @@ }, "description": { "type": "string", - "description": "Short user-visible description of the purpose of the outage." + "description": "A description about the purpose of the outage." }, "endTime": { "type": "string", + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", "format": "int64" }, "issueType": { "type": "string", + "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", "enum": [ "IT_OUTAGE", - "IT_PARTIAL_OUTAGE" + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -9738,25 +10781,33 @@ }, "source": { "type": "string", + "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", "enum": [ + "GOOGLE", "NSRC_GOOGLE" ], "enumDescriptions": [ + "", "" ] }, "startTime": { "type": "string", - "description": "Scheduled start and end times for the outage (milliseconds since Unix epoch).", + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", "format": "int64" }, "state": { "type": "string", + "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", "enum": [ + "ACTIVE", + "CANCELLED", "NS_ACTIVE", "NS_CANCELED" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -9823,9 +10874,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9836,7 +10891,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9856,6 +10913,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10069,9 +11132,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10082,7 +11149,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10102,6 +11171,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10211,7 +11286,7 @@ "MachineType": { "id": "MachineType", "type": "object", - "description": "A Machine Type resource.", + "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -10313,9 +11388,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10326,7 +11405,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10346,6 +11427,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10413,9 +11500,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10426,7 +11517,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10446,6 +11539,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10495,9 +11594,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10508,7 +11611,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10528,6 +11633,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10556,6 +11667,382 @@ } } }, + "MaintenancePoliciesList": { + "id": "MaintenancePoliciesList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of MaintenancePolicy resources.", + "items": { + "$ref": "MaintenancePolicy" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource.Always compute#maintenancePoliciesList for listsof maintenancePolicies", + "default": "compute#maintenancePoliciesList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "MaintenancePoliciesScopedList": { + "id": "MaintenancePoliciesScopedList", + "type": "object", + "properties": { + "maintenancePolicies": { + "type": "array", + "description": "List of maintenancePolicies contained in this scope.", + "items": { + "$ref": "MaintenancePolicy" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of maintenancePolicies when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "MaintenancePolicy": { + "id": "MaintenancePolicy", + "type": "object", + "description": "A maintenance policy for an instance. This specifies what kind of maintenance operations our infrastructure may perform on this instance and when.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#maintenance_policies for maintenance policies.", + "default": "compute#maintenancePolicy" + }, + "name": { + "type": "string", + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "region": { + "type": "string" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "vmMaintenancePolicy": { + "$ref": "VmMaintenancePolicy", + "description": "Maintenance policy applicable to VMs for infrastructure maintenance." + } + } + }, + "MaintenancePolicyAggregatedList": { + "id": "MaintenancePolicyAggregatedList", + "type": "object", + "description": "Contains a list of maintenancePolicies.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of MaintenancePolicy resources.", + "additionalProperties": { + "$ref": "MaintenancePoliciesScopedList", + "description": "Name of the scope containing this set of maintenancePolicies." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#maintenancePolicyAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "MaintenanceWindow": { + "id": "MaintenanceWindow", + "type": "object", + "description": "A maintenance window for VMs and disks. When set, we restrict our maintenance operations to this window.", + "properties": { + "dailyMaintenanceWindow": { + "$ref": "DailyMaintenanceWindow" + }, + "hourlyMaintenanceWindow": { + "$ref": "HourlyMaintenanceWindow" + } + } + }, "ManagedInstance": { "id": "ManagedInstance", "type": "object", @@ -10701,7 +12188,7 @@ "description": "[Output Only] Indicates where does the override come from.", "enum": [ "AUTO_GENERATED", - "USER_GENERATED" + "USER_PROVIDED" ], "enumDescriptions": [ "", @@ -10816,7 +12303,7 @@ "Network": { "id": "Network", "type": "object", - "description": "Represents a Network resource. Read Networks and Firewalls for more information.", + "description": "Represents a Network resource. Read Networks and Firewalls for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", "properties": { "IPv4Range": { "type": "string", @@ -11029,9 +12516,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11042,7 +12533,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11062,6 +12555,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11152,9 +12651,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11165,7 +12668,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11185,6 +12690,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11291,9 +12802,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11304,7 +12819,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11324,6 +12841,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11373,9 +12896,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11386,7 +12913,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11406,6 +12935,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11537,9 +13072,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11550,7 +13089,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11570,6 +13111,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11662,7 +13209,12 @@ }, "name": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "Name of the peering, which should conform to RFC1035.", + "annotations": { + "required": [ + "compute.networks.addPeering" + ] + } }, "peerNetwork": { "type": "string", @@ -11683,7 +13235,7 @@ "Operation": { "id": "Operation", "type": "object", - "description": "An Operation resource, used to manage asynchronous API requests.", + "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", "properties": { "clientOperationId": { "type": "string", @@ -11819,9 +13371,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11832,7 +13388,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11852,6 +13410,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11924,9 +13488,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11937,7 +13505,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11957,6 +13527,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12024,9 +13600,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12037,7 +13617,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12057,6 +13639,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12106,9 +13694,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12119,7 +13711,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12139,6 +13733,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12269,7 +13869,7 @@ "Project": { "id": "Project", "type": "object", - "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", + "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", "properties": { "commonInstanceMetadata": { "$ref": "Metadata", @@ -12464,13 +14064,17 @@ "INSTANCE_GROUP_MANAGERS", "INSTANCE_TEMPLATES", "INTERCONNECTS", + "INTERNAL_ADDRESSES", "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", + "MAINTENANCE_POLICIES", "NETWORKS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", "REGIONAL_AUTOSCALERS", "REGIONAL_INSTANCE_GROUP_MANAGERS", "ROUTERS", @@ -12536,6 +14140,10 @@ "", "", "", + "", + "", + "", + "", "" ] }, @@ -12573,7 +14181,7 @@ "Region": { "id": "Region", "type": "object", - "description": "Region resource.", + "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", "properties": { "creationTimestamp": { "type": "string", @@ -12672,9 +14280,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12685,7 +14297,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12705,6 +14319,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12771,9 +14391,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12784,7 +14408,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12804,6 +14430,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12882,9 +14514,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12895,7 +14531,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12915,6 +14553,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12996,9 +14640,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13009,7 +14657,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13029,6 +14679,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13168,9 +14824,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13181,7 +14841,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13201,6 +14863,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13337,9 +15005,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13350,7 +15022,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13370,6 +15044,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13478,9 +15158,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13491,7 +15175,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13511,6 +15197,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13598,7 +15290,7 @@ "Route": { "id": "Route", "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -13708,9 +15400,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13721,7 +15417,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13741,6 +15439,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13809,9 +15513,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13822,7 +15530,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13842,6 +15552,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13947,6 +15663,21 @@ } } }, + "RouterAdvertisedIpRange": { + "id": "RouterAdvertisedIpRange", + "type": "object", + "description": "Description-tagged IP ranges for the router to advertise.", + "properties": { + "description": { + "type": "string", + "description": "User-specified description for the IP range." + }, + "range": { + "type": "string", + "description": "The IP range to advertise. The value must be a CIDR-formatted string." + } + } + }, "RouterAdvertisedPrefix": { "id": "RouterAdvertisedPrefix", "type": "object", @@ -14002,9 +15733,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14015,7 +15750,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14035,6 +15772,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14092,6 +15835,13 @@ ] } }, + "advertisedIpRanges": { + "type": "array", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + } + }, "advertisedPrefixs": { "type": "array", "description": "User-specified list of individual prefixes to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These prefixes will be advertised in addition to any specified groups. Leave this field blank to advertise no custom prefixes.", @@ -14135,6 +15885,13 @@ ] } }, + "advertisedIpRanges": { + "type": "array", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + } + }, "advertisedPrefixs": { "type": "array", "description": "User-specified list of individual prefixes to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These prefixes will be advertised in addition to any specified groups. Leave this field blank to advertise no custom prefixes.", @@ -14233,9 +15990,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14246,7 +16007,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14266,6 +16029,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14571,9 +16340,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14584,7 +16357,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14604,6 +16379,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14659,7 +16440,7 @@ }, "conditions": { "type": "array", - "description": "Additional restrictions that must be met", + "description": "Additional restrictions that must be met. All conditions must pass for the rule to match.", "items": { "$ref": "Condition" } @@ -14854,9 +16635,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14867,7 +16652,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14887,6 +16674,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14966,6 +16759,10 @@ "$ref": "SecurityPolicyRuleMatcherConfig", "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." }, + "expr": { + "$ref": "Expr", + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." + }, "srcIpRanges": { "type": "array", "description": "CIDR IP address range. Only IPv4 is supported.", @@ -14973,13 +16770,6 @@ "type": "string" } }, - "srcRegionCodes": { - "type": "array", - "description": "Match by country or region code.", - "items": { - "type": "string" - } - }, "versionedExpr": { "type": "string", "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", @@ -15055,6 +16845,21 @@ } } }, + "ShieldedVmConfig": { + "id": "ShieldedVmConfig", + "type": "object", + "description": "A set of Shielded VM options.", + "properties": { + "enableSecureBoot": { + "type": "boolean", + "description": "Defines whether the instance should have secure boot enabled." + }, + "enableVtpm": { + "type": "boolean", + "description": "Defines whether the instance should have the TPM enabled." + } + } + }, "SignedUrlKey": { "id": "SignedUrlKey", "type": "object", @@ -15074,7 +16879,7 @@ "Snapshot": { "id": "Snapshot", "type": "object", - "description": "A persistent disk snapshot resource.", + "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", "properties": { "creationTimestamp": { "type": "string", @@ -15171,7 +16976,7 @@ }, "storageBytes": { "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", "format": "int64" }, "storageBytesStatus": { @@ -15185,6 +16990,13 @@ "", "" ] + }, + "storageLocations": { + "type": "array", + "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", + "items": { + "type": "string" + } } } }, @@ -15227,9 +17039,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15240,7 +17056,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15260,6 +17078,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15288,10 +17112,24 @@ } } }, + "SourceInstanceParams": { + "id": "SourceInstanceParams", + "type": "object", + "description": "A specification of the parameters to use when creating the instance template from a source instance.", + "properties": { + "diskConfigs": { + "type": "array", + "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "items": { + "$ref": "DiskInstantiationConfig" + } + } + } + }, "SslCertificate": { "id": "SslCertificate", "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", "properties": { "certificate": { "type": "string", @@ -15369,9 +17207,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15382,7 +17224,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15402,6 +17246,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15468,9 +17318,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15481,7 +17335,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15501,6 +17357,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15636,9 +17498,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15649,7 +17515,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15669,6 +17537,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15708,10 +17582,43 @@ } } }, + "StatefulPolicy": { + "id": "StatefulPolicy", + "type": "object", + "properties": { + "preservedResources": { + "$ref": "StatefulPolicyPreservedResources" + } + } + }, + "StatefulPolicyPreservedDisk": { + "id": "StatefulPolicyPreservedDisk", + "type": "object", + "properties": { + "deviceName": { + "type": "string", + "description": "Device name of the disk to be preserved" + } + } + }, + "StatefulPolicyPreservedResources": { + "id": "StatefulPolicyPreservedResources", + "type": "object", + "description": "Configuration of all preserved resources.", + "properties": { + "disks": { + "type": "array", + "description": "Disks created on the instances that will be preserved on instance delete, resize down, etc.", + "items": { + "$ref": "StatefulPolicyPreservedDisk" + } + } + } + }, "Subnetwork": { "id": "Subnetwork", "type": "object", - "description": "A Subnetwork resource.", + "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", "properties": { "allowSubnetCidrRoutesOverlap": { "type": "boolean", @@ -15817,9 +17724,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15830,7 +17741,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15850,6 +17763,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15917,9 +17836,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15930,7 +17853,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15950,6 +17875,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16024,9 +17955,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16037,7 +17972,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16057,6 +17994,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16165,7 +18108,7 @@ "TargetHttpProxy": { "id": "TargetHttpProxy", "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -16239,9 +18182,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16252,7 +18199,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16272,6 +18221,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16336,7 +18291,7 @@ "TargetHttpsProxy": { "id": "TargetHttpsProxy", "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", "properties": { "clientSslPolicy": { "type": "string", @@ -16439,9 +18394,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16452,7 +18411,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16472,6 +18433,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16503,7 +18470,7 @@ "TargetInstance": { "id": "TargetInstance", "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", "properties": { "creationTimestamp": { "type": "string", @@ -16591,9 +18558,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16604,7 +18575,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16624,6 +18597,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16691,9 +18670,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16704,7 +18687,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16724,6 +18709,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16773,9 +18764,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16786,7 +18781,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16806,6 +18803,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16837,7 +18840,7 @@ "TargetPool": { "id": "TargetPool", "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", + "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", "properties": { "backupPool": { "type": "string", @@ -16952,9 +18955,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -16965,7 +18972,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -16985,6 +18994,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17069,9 +19084,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17082,7 +19101,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17102,6 +19123,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17203,9 +19230,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17216,7 +19247,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17236,6 +19269,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17317,7 +19356,7 @@ "TargetSslProxy": { "id": "TargetSslProxy", "type": "object", - "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", "properties": { "clientSslPolicy": { "type": "string", @@ -17418,9 +19457,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17431,7 +19474,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17451,6 +19496,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17510,7 +19561,7 @@ "TargetTcpProxy": { "id": "TargetTcpProxy", "type": "object", - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", + "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -17596,9 +19647,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17609,7 +19664,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17629,6 +19686,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17660,7 +19723,7 @@ "TargetVpnGateway": { "id": "TargetVpnGateway", "type": "object", - "description": "Represents a Target VPN gateway resource.", + "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", "properties": { "creationTimestamp": { "type": "string", @@ -17790,9 +19853,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17803,7 +19870,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17823,6 +19892,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17890,9 +19965,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17903,7 +19982,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -17923,6 +20004,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -17972,9 +20059,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -17985,7 +20076,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18005,6 +20098,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18203,9 +20302,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18216,7 +20319,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18236,6 +20341,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18347,6 +20458,14 @@ "type": "object", "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", "properties": { + "ipCidrRange": { + "type": "string", + "description": "The range of internal addresses that are owned by this subnetwork." + }, + "network": { + "type": "string", + "description": "Network URL." + }, "subnetwork": { "type": "string", "description": "Subnetwork URL." @@ -18391,9 +20510,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18404,7 +20527,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18424,6 +20549,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18467,9 +20598,21 @@ } } }, + "VmMaintenancePolicy": { + "id": "VmMaintenancePolicy", + "type": "object", + "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when.", + "properties": { + "maintenanceWindow": { + "$ref": "MaintenanceWindow", + "description": "Maintenance windows that are applied to VMs covered by this policy." + } + } + }, "VpnTunnel": { "id": "VpnTunnel", "type": "object", + "description": "VPN tunnel resource. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", "properties": { "creationTimestamp": { "type": "string", @@ -18640,9 +20783,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18653,7 +20800,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18673,6 +20822,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18740,9 +20895,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18753,7 +20912,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18773,6 +20934,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18822,9 +20989,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18835,7 +21006,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18855,6 +21028,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -18921,9 +21100,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -18934,7 +21117,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -18954,6 +21139,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -19008,7 +21199,7 @@ "Zone": { "id": "Zone", "type": "object", - "description": "A Zone resource.", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", "properties": { "availableCpuPlatforms": { "type": "array", @@ -19104,9 +21295,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -19117,7 +21312,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -19137,6 +21334,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -21498,7 +23701,7 @@ "id": "compute.disks.resize", "path": "{project}/zones/{zone}/disks/{disk}/resize", "httpMethod": "POST", - "description": "Resizes the specified persistent disk.", + "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", "parameters": { "disk": { "type": "string", @@ -24682,7 +26885,7 @@ "id": "compute.images.list", "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", @@ -26582,6 +28785,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "addMaintenancePolicies": { + "id": "compute.instances.addMaintenancePolicies", + "path": "{project}/zones/{zone}/instances/{instance}/addMaintenancePolicies", + "httpMethod": "POST", + "description": "Adds existing maintenance policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesAddMaintenancePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "aggregatedList": { "id": "compute.instances.aggregatedList", "path": "{project}/aggregated/instances", @@ -27200,6 +29452,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "removeMaintenancePolicies": { + "id": "compute.instances.removeMaintenancePolicies", + "path": "{project}/zones/{zone}/instances/{instance}/removeMaintenancePolicies", + "httpMethod": "POST", + "description": "Removes maintenance policies from an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesRemoveMaintenancePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "reset": { "id": "compute.instances.reset", "path": "{project}/zones/{zone}/instances/{instance}/reset", @@ -27246,6 +29547,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "resume": { + "id": "compute.instances.resume", + "path": "{project}/zones/{zone}/instances/{instance}/resume", + "httpMethod": "POST", + "description": "Resumes an instance that was suspended using the instances().suspend method.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to resume.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesResumeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDeletionProtection": { "id": "compute.instances.setDeletionProtection", "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", @@ -27935,7 +30285,7 @@ "id": "compute.instances.stop", "path": "{project}/zones/{zone}/instances/{instance}/stop", "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", "parameters": { "discardLocalSsd": { "type": "boolean", @@ -28189,6 +30539,55 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "updateShieldedVmConfig": { + "id": "compute.instances.updateShieldedVmConfig", + "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", + "httpMethod": "PATCH", + "description": "Updates the Shielded VM config for an instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "ShieldedVmConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -28471,6 +30870,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "id": "compute.interconnectAttachments.patch", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "PATCH", + "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "interconnectAttachment": { + "type": "string", + "description": "Name of the interconnect attachment to patch.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "request": { + "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "id": "compute.interconnectAttachments.setIamPolicy", "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy", @@ -28515,6 +30963,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "setLabels": { + "id": "compute.interconnectAttachments.setLabels", + "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.interconnectAttachments.testIamPermissions", "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", @@ -28953,6 +31450,42 @@ "https://www.googleapis.com/auth/compute" ] }, + "setLabels": { + "id": "compute.interconnects.setLabels", + "path": "{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.interconnects.testIamPermissions", "path": "{project}/global/interconnects/{resource}/testIamPermissions", @@ -29027,6 +31560,113 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "getIamPolicy": { + "id": "compute.licenseCodes.getIamPolicy", + "path": "{project}/global/licenseCodes/{resource}/getIamPolicy", + "httpMethod": "GET", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "id": "compute.licenseCodes.setIamPolicy", + "path": "{project}/global/licenseCodes/{resource}/setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.licenseCodes.testIamPermissions", + "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, @@ -29450,6 +32090,376 @@ } } }, + "maintenancePolicies": { + "methods": { + "aggregatedList": { + "id": "compute.maintenancePolicies.aggregatedList", + "path": "{project}/aggregated/maintenancePolicies", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of maintenance policies.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MaintenancePolicyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.maintenancePolicies.delete", + "path": "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}", + "httpMethod": "DELETE", + "description": "Deletes the specified maintenance policy.", + "parameters": { + "maintenancePolicy": { + "type": "string", + "description": "Name of the maintenance policy to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "maintenancePolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.maintenancePolicies.get", + "path": "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}", + "httpMethod": "GET", + "description": "Retrieves all information of the specified maintenance policy.", + "parameters": { + "maintenancePolicy": { + "type": "string", + "description": "Name of the maintenance policy to retrieve.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "maintenancePolicy" + ], + "response": { + "$ref": "MaintenancePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "id": "compute.maintenancePolicies.getIamPolicy", + "path": "{project}/regions/{region}/maintenancePolicies/{resource}/getIamPolicy", + "httpMethod": "GET", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.maintenancePolicies.insert", + "path": "{project}/regions/{region}/maintenancePolicies", + "httpMethod": "POST", + "description": "Creates a new maintenance policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "MaintenancePolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.maintenancePolicies.list", + "path": "{project}/regions/{region}/maintenancePolicies", + "httpMethod": "GET", + "description": "List all the maintenance policies that have been configured for the specified project in specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "MaintenancePoliciesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "id": "compute.maintenancePolicies.setIamPolicy", + "path": "{project}/regions/{region}/maintenancePolicies/{resource}/setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.maintenancePolicies.testIamPermissions", + "path": "{project}/regions/{region}/maintenancePolicies/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "networkEndpointGroups": { "methods": { "aggregatedList": { @@ -30173,7 +33183,7 @@ "id": "compute.networks.patch", "path": "{project}/global/networks/{network}", "httpMethod": "PATCH", - "description": "Patches the specified network with the data included in the request.", + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", "parameters": { "network": { "type": "string", @@ -34296,6 +37306,42 @@ }, "securityPolicies": { "methods": { + "addRule": { + "id": "compute.securityPolicies.addRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", + "httpMethod": "POST", + "description": "Inserts a rule into a security policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "id": "compute.securityPolicies.delete", "path": "{project}/global/securityPolicies/{securityPolicy}", @@ -34368,6 +37414,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getRule": { + "id": "compute.securityPolicies.getRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + "httpMethod": "GET", + "description": "Gets a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to get from the security policy.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to which the queried rule belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "SecurityPolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "id": "compute.securityPolicies.insert", "path": "{project}/global/securityPolicies", @@ -34491,6 +37577,87 @@ "https://www.googleapis.com/auth/compute" ] }, + "patchRule": { + "id": "compute.securityPolicies.patchRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + "httpMethod": "POST", + "description": "Patches a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to patch.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeRule": { + "id": "compute.securityPolicies.removeRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + "httpMethod": "POST", + "description": "Deletes a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to remove from the security policy.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.securityPolicies.testIamPermissions", "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", @@ -38170,6 +41337,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.targetVpnGateways.setLabels", + "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.targetVpnGateways.testIamPermissions", "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 8badd5a9f9f..80301b1273e 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -100,6 +100,7 @@ func New(client *http.Client) (*Service, error) { s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) + s.MaintenancePolicies = NewMaintenancePoliciesService(s) s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) s.Networks = NewNetworksService(s) s.Projects = NewProjectsService(s) @@ -196,6 +197,8 @@ type Service struct { MachineTypes *MachineTypesService + MaintenancePolicies *MaintenancePoliciesService + NetworkEndpointGroups *NetworkEndpointGroupsService Networks *NetworksService @@ -525,6 +528,15 @@ type MachineTypesService struct { s *Service } +func NewMaintenancePoliciesService(s *Service) *MaintenancePoliciesService { + rs := &MaintenancePoliciesService{s: s} + return rs +} + +type MaintenancePoliciesService struct { + s *Service +} + func NewNetworkEndpointGroupsService(s *Service) *NetworkEndpointGroupsService { rs := &NetworkEndpointGroupsService{s: s} return rs @@ -825,12 +837,13 @@ type AcceleratorConfig struct { } func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorConfig - raw := noMethod(*s) + type NoMethod AcceleratorConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AcceleratorType: An Accelerator Type resource. +// AcceleratorType: An Accelerator Type resource. (== resource_for +// beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==) type AcceleratorType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -890,8 +903,8 @@ type AcceleratorType struct { } func (s *AcceleratorType) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorType - raw := noMethod(*s) + type NoMethod AcceleratorType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -944,8 +957,8 @@ type AcceleratorTypeAggregatedList struct { } func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedList - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -959,9 +972,13 @@ type AcceleratorTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -972,7 +989,9 @@ type AcceleratorTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1003,8 +1022,8 @@ type AcceleratorTypeAggregatedListWarning struct { } func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1040,8 +1059,8 @@ type AcceleratorTypeAggregatedListWarningData struct { } func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1094,8 +1113,8 @@ type AcceleratorTypeList struct { } func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeList - raw := noMethod(*s) + type NoMethod AcceleratorTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1109,9 +1128,13 @@ type AcceleratorTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1122,7 +1145,9 @@ type AcceleratorTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1153,8 +1178,8 @@ type AcceleratorTypeListWarning struct { } func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1190,8 +1215,8 @@ type AcceleratorTypeListWarningData struct { } func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1223,8 +1248,8 @@ type AcceleratorTypesScopedList struct { } func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedList - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1238,9 +1263,13 @@ type AcceleratorTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1251,7 +1280,9 @@ type AcceleratorTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1282,8 +1313,8 @@ type AcceleratorTypesScopedListWarning struct { } func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1319,8 +1350,8 @@ type AcceleratorTypesScopedListWarningData struct { } func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1402,18 +1433,20 @@ type AccessConfig struct { } func (s *AccessConfig) MarshalJSON() ([]byte, error) { - type noMethod AccessConfig - raw := noMethod(*s) + type NoMethod AccessConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Address: A reserved address resource. +// Address: A reserved address resource. (== resource_for beta.addresses +// ==) (== resource_for v1.addresses ==) (== resource_for +// beta.globalAddresses ==) (== resource_for v1.globalAddresses ==) type Address struct { - // Address: The static external IP address represented by this resource. + // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` - // AddressType: The type of address to reserve. If unspecified, defaults - // to EXTERNAL. + // AddressType: The type of address to reserve, either INTERNAL or + // EXTERNAL. If unspecified, defaults to EXTERNAL. // // Possible values: // "DNS_FORWARDING" @@ -1492,21 +1525,21 @@ type Address struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the address, which can be either - // IN_USE or RESERVED. An address that is RESERVED is currently reserved - // and available to use. An IN_USE address is currently being used by - // another resource and is not available. + // Status: [Output Only] The status of the address, which can be one of + // RESERVING, RESERVED, or IN_USE. An address that is RESERVING is + // currently in the process of being reserved. A RESERVED address is + // currently reserved and available to use. An IN_USE address is + // currently being used by another resource and is not available. // // Possible values: // "IN_USE" // "RESERVED" Status string `json:"status,omitempty"` - // Subnetwork: For external addresses, this field should not be - // used. - // - // The URL of the subnetwork in which to reserve the address. If an IP - // address is specified, it must be within the subnetwork's IP range. + // Subnetwork: The URL of the subnetwork in which to reserve the + // address. If an IP address is specified, it must be within the + // subnetwork's IP range. This field can only be used with INTERNAL type + // with GCE_ENDPOINT/DNS_RESOLVER purposes. Subnetwork string `json:"subnetwork,omitempty"` // Users: [Output Only] The URLs of the resources that are using this @@ -1535,8 +1568,8 @@ type Address struct { } func (s *Address) MarshalJSON() ([]byte, error) { - type noMethod Address - raw := noMethod(*s) + type NoMethod Address + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1588,8 +1621,8 @@ type AddressAggregatedList struct { } func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedList - raw := noMethod(*s) + type NoMethod AddressAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1603,9 +1636,13 @@ type AddressAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1616,7 +1653,9 @@ type AddressAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1647,8 +1686,8 @@ type AddressAggregatedListWarning struct { } func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedListWarning - raw := noMethod(*s) + type NoMethod AddressAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1684,8 +1723,8 @@ type AddressAggregatedListWarningData struct { } func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AddressAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1738,8 +1777,8 @@ type AddressList struct { } func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) + type NoMethod AddressList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1752,9 +1791,13 @@ type AddressListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1765,7 +1808,9 @@ type AddressListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1796,8 +1841,8 @@ type AddressListWarning struct { } func (s *AddressListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressListWarning - raw := noMethod(*s) + type NoMethod AddressListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1833,8 +1878,8 @@ type AddressListWarningData struct { } func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressListWarningData - raw := noMethod(*s) + type NoMethod AddressListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1864,8 +1909,8 @@ type AddressesScopedList struct { } func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) + type NoMethod AddressesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1879,9 +1924,13 @@ type AddressesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1892,7 +1941,9 @@ type AddressesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1923,8 +1974,8 @@ type AddressesScopedListWarning struct { } func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarning - raw := noMethod(*s) + type NoMethod AddressesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1960,8 +2011,8 @@ type AddressesScopedListWarningData struct { } func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarningData - raw := noMethod(*s) + type NoMethod AddressesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1999,8 +2050,8 @@ type AliasIpRange struct { } func (s *AliasIpRange) MarshalJSON() ([]byte, error) { - type noMethod AliasIpRange - raw := noMethod(*s) + type NoMethod AliasIpRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2054,6 +2105,11 @@ type AttachedDisk struct { // disk_size_gb in InitializeParams. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + // Index: [Output Only] A zero-based index to this disk, where 0 is // reserved for the boot disk. If you have many disks attached to an // instance, each disk would have a unique index number. @@ -2096,9 +2152,20 @@ type AttachedDisk struct { // "READ_WRITE" Mode string `json:"mode,omitempty"` + // SavedState: For LocalSSD disks on VM Instances in STOPPED or + // SUSPENDED state, this field is set to PRESERVED iff the LocalSSD data + // has been saved to a persistent location by customer request. (see the + // discard_local_ssd option on Stop/Suspend). Read-only in the api. + // + // Possible values: + // "DISK_SAVED_STATE_UNSPECIFIED" + // "PRESERVED" + SavedState string `json:"savedState,omitempty"` + // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required. + // initializeParams.sourceImage or disks.source is required except for + // local SSD. // // If desired, you can also attach existing non-root persistent disks // using this property. This field is only applicable for persistent @@ -2134,8 +2201,8 @@ type AttachedDisk struct { } func (s *AttachedDisk) MarshalJSON() ([]byte, error) { - type noMethod AttachedDisk - raw := noMethod(*s) + type NoMethod AttachedDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2181,7 +2248,7 @@ type AttachedDiskInitializeParams struct { // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is - // required. + // required except for local SSD. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -2195,17 +2262,17 @@ type AttachedDiskInitializeParams struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family // // If the source image is deleted later, this field will not be set. SourceImage string `json:"sourceImage,omitempty"` @@ -2237,8 +2304,8 @@ type AttachedDiskInitializeParams struct { } func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { - type noMethod AttachedDiskInitializeParams - raw := noMethod(*s) + type NoMethod AttachedDiskInitializeParams + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2296,8 +2363,8 @@ type AuditConfig struct { } func (s *AuditConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditConfig - raw := noMethod(*s) + type NoMethod AuditConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2344,8 +2411,8 @@ type AuditLogConfig struct { } func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditLogConfig - raw := noMethod(*s) + type NoMethod AuditLogConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2381,15 +2448,18 @@ type AuthorizationLoggingOptions struct { } func (s *AuthorizationLoggingOptions) MarshalJSON() ([]byte, error) { - type noMethod AuthorizationLoggingOptions - raw := noMethod(*s) + type NoMethod AuthorizationLoggingOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Autoscaler: Represents an Autoscaler resource. Autoscalers allow you // to automatically scale virtual machine instances in managed instance // groups according to an autoscaling policy that you define. For more -// information, read Autoscaling Groups of Instances. +// information, read Autoscaling Groups of Instances. (== resource_for +// beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== +// resource_for beta.regionAutoscalers ==) (== resource_for +// v1.regionAutoscalers ==) type Autoscaler struct { // AutoscalingPolicy: The configuration parameters for the autoscaling // algorithm. You can define one or more of the policies for an @@ -2425,6 +2495,13 @@ type Autoscaler struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // RecommendedSize: [Output Only] Target recommended MIG size computed + // by autoscaler. Autoscaler calculates recommended MIG size even when + // autoscaling policy mode is different from ON. This field is empty + // when autoscaler is not connected to the existing managed instance + // group or autoscaler did not generate its first prediction. + RecommendedSize int64 `json:"recommendedSize,omitempty"` + // Region: [Output Only] URL of the region where the instance group // resides (for autoscalers living in regional scope). Region string `json:"region,omitempty"` @@ -2477,8 +2554,8 @@ type Autoscaler struct { } func (s *Autoscaler) MarshalJSON() ([]byte, error) { - type noMethod Autoscaler - raw := noMethod(*s) + type NoMethod Autoscaler + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2530,8 +2607,8 @@ type AutoscalerAggregatedList struct { } func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedList - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2545,9 +2622,13 @@ type AutoscalerAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2558,7 +2639,9 @@ type AutoscalerAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2589,8 +2672,8 @@ type AutoscalerAggregatedListWarning struct { } func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedListWarning - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2626,8 +2709,8 @@ type AutoscalerAggregatedListWarningData struct { } func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2680,8 +2763,8 @@ type AutoscalerList struct { } func (s *AutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerList - raw := noMethod(*s) + type NoMethod AutoscalerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2694,9 +2777,13 @@ type AutoscalerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2707,7 +2794,9 @@ type AutoscalerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2738,8 +2827,8 @@ type AutoscalerListWarning struct { } func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerListWarning - raw := noMethod(*s) + type NoMethod AutoscalerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2775,8 +2864,8 @@ type AutoscalerListWarningData struct { } func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerListWarningData - raw := noMethod(*s) + type NoMethod AutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2822,8 +2911,8 @@ type AutoscalerStatusDetails struct { } func (s *AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerStatusDetails - raw := noMethod(*s) + type NoMethod AutoscalerStatusDetails + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2854,8 +2943,8 @@ type AutoscalersScopedList struct { } func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedList - raw := noMethod(*s) + type NoMethod AutoscalersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2869,9 +2958,13 @@ type AutoscalersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2882,7 +2975,9 @@ type AutoscalersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2913,8 +3008,8 @@ type AutoscalersScopedListWarning struct { } func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarning - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2950,8 +3045,8 @@ type AutoscalersScopedListWarningData struct { } func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarningData - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3027,8 +3122,8 @@ type AutoscalingPolicy struct { } func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicy - raw := noMethod(*s) + type NoMethod AutoscalingPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3068,18 +3163,18 @@ type AutoscalingPolicyCpuUtilization struct { } func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCpuUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCpuUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCpuUtilization + type NoMethod AutoscalingPolicyCpuUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3093,7 +3188,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // Filter: A filter string, compatible with a Stackdriver Monitoring // filter string for TimeSeries.list API call. This filter is used to // select a specific TimeSeries for the purpose of autoscaling and to - // determine whether the metric is exporting per-instance or global + // determine whether the metric is exporting per-instance or per-group // data. // // For the filter to be valid for autoscaling purposes, the following @@ -3111,8 +3206,8 @@ type AutoscalingPolicyCustomMetricUtilization struct { // If the resource type is any other value, the autoscaler expects this // metric to contain values that apply to the entire autoscaled instance // group and resource label filtering can be performed to point - // autoscaler at the correct TimeSeries to scale upon. This is / called - // a global metric for the purpose of autoscaling. + // autoscaler at the correct TimeSeries to scale upon. This is called a + // per-group metric for the purpose of autoscaling. // // If not specified, the type defaults to gce_instance. // @@ -3124,15 +3219,12 @@ type AutoscalingPolicyCustomMetricUtilization struct { Filter string `json:"filter,omitempty"` // Metric: The identifier (type) of the Stackdriver Monitoring metric. - // The metric cannot have negative values and should be a utilization - // metric, which means that the number of virtual machines handling - // requests should increase or decrease proportionally to the - // metric. + // The metric cannot have negative values. // // The metric must have a value type of INT64 or DOUBLE. Metric string `json:"metric,omitempty"` - // SingleInstanceAssignment: If scaling is based on a global metric + // SingleInstanceAssignment: If scaling is based on a per-group metric // value that represents the total amount of work to be done or resource // usage, set this value to an amount assigned for a single instance of // the scaled group. Autoscaler will keep the number of instances @@ -3151,7 +3243,9 @@ type AutoscalingPolicyCustomMetricUtilization struct { SingleInstanceAssignment float64 `json:"singleInstanceAssignment,omitempty"` // UtilizationTarget: The target value of the metric that autoscaler - // should maintain. This must be a positive value. + // should maintain. This must be a positive value. A utilization metric + // scales number of virtual machines handling requests to increase or + // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is // compute.googleapis.com/instance/network/received_bytes_count. The @@ -3188,19 +3282,19 @@ type AutoscalingPolicyCustomMetricUtilization struct { } func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCustomMetricUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCustomMetricUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCustomMetricUtilization + type NoMethod AutoscalingPolicyCustomMetricUtilization var s1 struct { SingleInstanceAssignment gensupport.JSONFloat64 `json:"singleInstanceAssignment"` UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3237,18 +3331,18 @@ type AutoscalingPolicyLoadBalancingUtilization struct { } func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyLoadBalancingUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyLoadBalancingUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyLoadBalancingUtilization + type NoMethod AutoscalingPolicyLoadBalancingUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3295,19 +3389,19 @@ type AutoscalingPolicyQueueBasedScaling struct { } func (s *AutoscalingPolicyQueueBasedScaling) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyQueueBasedScaling - raw := noMethod(*s) + type NoMethod AutoscalingPolicyQueueBasedScaling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyQueueBasedScaling) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyQueueBasedScaling + type NoMethod AutoscalingPolicyQueueBasedScaling var s1 struct { AcceptableBacklogPerInstance gensupport.JSONFloat64 `json:"acceptableBacklogPerInstance"` SingleWorkerThroughputPerSec gensupport.JSONFloat64 `json:"singleWorkerThroughputPerSec"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3349,8 +3443,8 @@ type AutoscalingPolicyQueueBasedScalingCloudPubSub struct { } func (s *AutoscalingPolicyQueueBasedScalingCloudPubSub) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyQueueBasedScalingCloudPubSub - raw := noMethod(*s) + type NoMethod AutoscalingPolicyQueueBasedScalingCloudPubSub + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3478,21 +3572,21 @@ type Backend struct { } func (s *Backend) MarshalJSON() ([]byte, error) { - type noMethod Backend - raw := noMethod(*s) + type NoMethod Backend + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Backend) UnmarshalJSON(data []byte) error { - type noMethod Backend + type NoMethod Backend var s1 struct { CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` MaxRatePerEndpoint gensupport.JSONFloat64 `json:"maxRatePerEndpoint"` MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3564,8 +3658,8 @@ type BackendBucket struct { } func (s *BackendBucket) MarshalJSON() ([]byte, error) { - type noMethod BackendBucket - raw := noMethod(*s) + type NoMethod BackendBucket + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3606,8 +3700,8 @@ type BackendBucketCdnPolicy struct { } func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketCdnPolicy - raw := noMethod(*s) + type NoMethod BackendBucketCdnPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3659,8 +3753,8 @@ type BackendBucketList struct { } func (s *BackendBucketList) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketList - raw := noMethod(*s) + type NoMethod BackendBucketList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3674,9 +3768,13 @@ type BackendBucketListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3687,7 +3785,9 @@ type BackendBucketListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3718,8 +3818,8 @@ type BackendBucketListWarning struct { } func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketListWarning - raw := noMethod(*s) + type NoMethod BackendBucketListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3755,13 +3855,15 @@ type BackendBucketListWarningData struct { } func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketListWarningData - raw := noMethod(*s) + type NoMethod BackendBucketListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackendService: A BackendService resource. This resource defines a -// group of backend virtual machines and their serving capacity. +// group of backend virtual machines and their serving capacity. (== +// resource_for v1.backendService ==) (== resource_for +// beta.backendService ==) type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds if // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is @@ -3771,12 +3873,20 @@ type BackendService struct { // When the load balancing scheme is INTERNAL, this field is not used. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` + // AppEngineBackend: Directs request to an App Engine app. + // cloudFunctionBackend and backends[] must be empty if this is set. + AppEngineBackend *BackendServiceAppEngineBackend `json:"appEngineBackend,omitempty"` + // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` // CdnPolicy: Cloud CDN configuration for this BackendService. CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` + // CloudFunctionBackend: Directs request to a cloud function. + // appEngineBackend and backends[] must be empty if this is set. + CloudFunctionBackend *BackendServiceCloudFunctionBackend `json:"cloudFunctionBackend,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -3935,8 +4045,8 @@ type BackendService struct { } func (s *BackendService) MarshalJSON() ([]byte, error) { - type noMethod BackendService - raw := noMethod(*s) + type NoMethod BackendService + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3989,8 +4099,8 @@ type BackendServiceAggregatedList struct { } func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedList - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4004,9 +4114,13 @@ type BackendServiceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4017,7 +4131,9 @@ type BackendServiceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4048,8 +4164,8 @@ type BackendServiceAggregatedListWarning struct { } func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedListWarning - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4085,8 +4201,47 @@ type BackendServiceAggregatedListWarningData struct { } func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceAppEngineBackend: Configuration of a App Engine +// backend. +type BackendServiceAppEngineBackend struct { + // AppEngineService: Optional. App Engine app service name. + AppEngineService string `json:"appEngineService,omitempty"` + + // TargetProject: Required. Project ID of the project hosting the app. + // This is the project ID of this project. Reference to another project + // is not allowed. + TargetProject string `json:"targetProject,omitempty"` + + // Version: Optional. Version of App Engine app service. When empty, App + // Engine will do its normal traffic split. + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AppEngineService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AppEngineService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAppEngineBackend) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceAppEngineBackend + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4129,8 +4284,42 @@ type BackendServiceCdnPolicy struct { } func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceCdnPolicy - raw := noMethod(*s) + type NoMethod BackendServiceCdnPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceCloudFunctionBackend: Configuration of a Cloud Function +// backend. +type BackendServiceCloudFunctionBackend struct { + // FunctionName: Required. A cloud function name. Special value ?*? + // represents all cloud functions in the project. + FunctionName string `json:"functionName,omitempty"` + + // TargetProject: Required. Project ID of the project hosting the cloud + // function. + TargetProject string `json:"targetProject,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FunctionName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FunctionName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceCloudFunctionBackend) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCloudFunctionBackend + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4190,18 +4379,18 @@ type BackendServiceFailoverPolicy struct { } func (s *BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceFailoverPolicy - raw := noMethod(*s) + type NoMethod BackendServiceFailoverPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { - type noMethod BackendServiceFailoverPolicy + type NoMethod BackendServiceFailoverPolicy var s1 struct { FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -4238,8 +4427,8 @@ type BackendServiceGroupHealth struct { } func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceGroupHealth - raw := noMethod(*s) + type NoMethod BackendServiceGroupHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4273,8 +4462,8 @@ type BackendServiceIAP struct { } func (s *BackendServiceIAP) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceIAP - raw := noMethod(*s) + type NoMethod BackendServiceIAP + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4327,8 +4516,8 @@ type BackendServiceList struct { } func (s *BackendServiceList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceList - raw := noMethod(*s) + type NoMethod BackendServiceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4342,9 +4531,13 @@ type BackendServiceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4355,7 +4548,9 @@ type BackendServiceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4386,8 +4581,8 @@ type BackendServiceListWarning struct { } func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceListWarning - raw := noMethod(*s) + type NoMethod BackendServiceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4423,8 +4618,8 @@ type BackendServiceListWarningData struct { } func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceListWarningData - raw := noMethod(*s) + type NoMethod BackendServiceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4450,8 +4645,8 @@ type BackendServiceReference struct { } func (s *BackendServiceReference) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceReference - raw := noMethod(*s) + type NoMethod BackendServiceReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4482,8 +4677,8 @@ type BackendServicesScopedList struct { } func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedList - raw := noMethod(*s) + type NoMethod BackendServicesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4497,9 +4692,13 @@ type BackendServicesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4510,7 +4709,9 @@ type BackendServicesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4541,8 +4742,8 @@ type BackendServicesScopedListWarning struct { } func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarning - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4578,8 +4779,8 @@ type BackendServicesScopedListWarningData struct { } func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarningData - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4642,8 +4843,8 @@ type Binding struct { } func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) + type NoMethod Binding + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4672,8 +4873,8 @@ type CacheInvalidationRule struct { } func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { - type noMethod CacheInvalidationRule - raw := noMethod(*s) + type NoMethod CacheInvalidationRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4725,8 +4926,8 @@ type CacheKeyPolicy struct { } func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { - type noMethod CacheKeyPolicy - raw := noMethod(*s) + type NoMethod CacheKeyPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4739,7 +4940,8 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // Committed use discounts are subject to Google Cloud Platform's // Service Specific Terms. By purchasing a committed use discount, you // agree to these terms. Committed use discounts will not renew, so you -// must purchase a new commitment to continue receiving discounts. +// must purchase a new commitment to continue receiving discounts. (== +// resource_for beta.commitments ==) (== resource_for v1.commitments ==) type Commitment struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -4833,8 +5035,8 @@ type Commitment struct { } func (s *Commitment) MarshalJSON() ([]byte, error) { - type noMethod Commitment - raw := noMethod(*s) + type NoMethod Commitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4886,8 +5088,8 @@ type CommitmentAggregatedList struct { } func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedList - raw := noMethod(*s) + type NoMethod CommitmentAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4901,9 +5103,13 @@ type CommitmentAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4914,7 +5120,9 @@ type CommitmentAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4945,8 +5153,8 @@ type CommitmentAggregatedListWarning struct { } func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedListWarning - raw := noMethod(*s) + type NoMethod CommitmentAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4982,8 +5190,8 @@ type CommitmentAggregatedListWarningData struct { } func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedListWarningData - raw := noMethod(*s) + type NoMethod CommitmentAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5036,8 +5244,8 @@ type CommitmentList struct { } func (s *CommitmentList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentList - raw := noMethod(*s) + type NoMethod CommitmentList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5050,9 +5258,13 @@ type CommitmentListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5063,7 +5275,9 @@ type CommitmentListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5094,8 +5308,8 @@ type CommitmentListWarning struct { } func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentListWarning - raw := noMethod(*s) + type NoMethod CommitmentListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5131,8 +5345,8 @@ type CommitmentListWarningData struct { } func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentListWarningData - raw := noMethod(*s) + type NoMethod CommitmentListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5163,8 +5377,8 @@ type CommitmentsScopedList struct { } func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedList - raw := noMethod(*s) + type NoMethod CommitmentsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5178,9 +5392,13 @@ type CommitmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5191,7 +5409,9 @@ type CommitmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5222,8 +5442,8 @@ type CommitmentsScopedListWarning struct { } func (s *CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarning - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5259,8 +5479,8 @@ type CommitmentsScopedListWarningData struct { } func (s *CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarningData - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5327,8 +5547,8 @@ type Condition struct { } func (s *Condition) MarshalJSON() ([]byte, error) { - type noMethod Condition - raw := noMethod(*s) + type NoMethod Condition + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5358,8 +5578,8 @@ type ConnectionDraining struct { } func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { - type noMethod ConnectionDraining - raw := noMethod(*s) + type NoMethod ConnectionDraining + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5410,8 +5630,8 @@ type CustomerEncryptionKey struct { } func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKey - raw := noMethod(*s) + type NoMethod CustomerEncryptionKey + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5444,8 +5664,47 @@ type CustomerEncryptionKeyProtectedDisk struct { } func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKeyProtectedDisk - raw := noMethod(*s) + type NoMethod CustomerEncryptionKeyProtectedDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DailyMaintenanceWindow: Time window specified for daily maintenance +// operations. +type DailyMaintenanceWindow struct { + // DaysInCycle: Allows to define schedule that runs every nth day of the + // month. + DaysInCycle int64 `json:"daysInCycle,omitempty"` + + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` + + // StartTime: Time within the maintenance window to start the + // maintenance operations. It must be in format "HH:MM?, where HH : + // [00-23] and MM : [00-59] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DaysInCycle") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DaysInCycle") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod DailyMaintenanceWindow + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5505,12 +5764,13 @@ type DeprecationStatus struct { } func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { - type noMethod DeprecationStatus - raw := noMethod(*s) + type NoMethod DeprecationStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Disk: A Disk resource. +// Disk: A Disk resource. (== resource_for beta.disks ==) (== +// resource_for v1.disks ==) type Disk struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -5536,6 +5796,11 @@ type Disk struct { // you do not need to provide a key to use the disk later. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -5632,17 +5897,17 @@ type Disk struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family SourceImage string `json:"sourceImage,omitempty"` // SourceImageEncryptionKey: The customer-supplied encryption key of the @@ -5730,8 +5995,8 @@ type Disk struct { } func (s *Disk) MarshalJSON() ([]byte, error) { - type noMethod Disk - raw := noMethod(*s) + type NoMethod Disk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5783,8 +6048,8 @@ type DiskAggregatedList struct { } func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedList - raw := noMethod(*s) + type NoMethod DiskAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5798,9 +6063,13 @@ type DiskAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5811,7 +6080,9 @@ type DiskAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5842,8 +6113,8 @@ type DiskAggregatedListWarning struct { } func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedListWarning - raw := noMethod(*s) + type NoMethod DiskAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5879,8 +6150,61 @@ type DiskAggregatedListWarningData struct { } func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedListWarningData - raw := noMethod(*s) + type NoMethod DiskAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskInstantiationConfig: A specification of the desired way to +// instantiate a disk in the instance template when its created from a +// source instance. +type DiskInstantiationConfig struct { + // AutoDelete: Specifies whether the disk will be auto-deleted when the + // instance is deleted (but not when the disk is detached from the + // instance). + AutoDelete bool `json:"autoDelete,omitempty"` + + // DeviceName: Specifies the device name of the disk to which the + // configurations apply to. + DeviceName string `json:"deviceName,omitempty"` + + // InstantiateFrom: Specifies whether to include the disk and what image + // to use. + // + // Possible values: + // "ATTACH_READ_ONLY" + // "BLANK" + // "DEFAULT" + // "DO_NOT_INCLUDE" + // "IMAGE_URL" + // "SOURCE_IMAGE" + // "SOURCE_IMAGE_FAMILY" + InstantiateFrom string `json:"instantiateFrom,omitempty"` + + // SourceImage: The custom source image to be used to restore this disk + // when instantiating this instance template. + SourceImage string `json:"sourceImage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskInstantiationConfig) MarshalJSON() ([]byte, error) { + type NoMethod DiskInstantiationConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5933,8 +6257,8 @@ type DiskList struct { } func (s *DiskList) MarshalJSON() ([]byte, error) { - type noMethod DiskList - raw := noMethod(*s) + type NoMethod DiskList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5947,9 +6271,13 @@ type DiskListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5960,7 +6288,9 @@ type DiskListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5991,8 +6321,8 @@ type DiskListWarning struct { } func (s *DiskListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskListWarning - raw := noMethod(*s) + type NoMethod DiskListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6028,8 +6358,8 @@ type DiskListWarningData struct { } func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskListWarningData - raw := noMethod(*s) + type NoMethod DiskListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6071,12 +6401,13 @@ type DiskMoveRequest struct { } func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod DiskMoveRequest - raw := noMethod(*s) + type NoMethod DiskMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DiskType: A DiskType resource. +// DiskType: A DiskType resource. (== resource_for beta.diskTypes ==) +// (== resource_for v1.diskTypes ==) type DiskType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -6137,8 +6468,8 @@ type DiskType struct { } func (s *DiskType) MarshalJSON() ([]byte, error) { - type noMethod DiskType - raw := noMethod(*s) + type NoMethod DiskType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6190,8 +6521,8 @@ type DiskTypeAggregatedList struct { } func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedList - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6205,9 +6536,13 @@ type DiskTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6218,7 +6553,9 @@ type DiskTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6249,8 +6586,8 @@ type DiskTypeAggregatedListWarning struct { } func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6286,8 +6623,8 @@ type DiskTypeAggregatedListWarningData struct { } func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6340,8 +6677,8 @@ type DiskTypeList struct { } func (s *DiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeList - raw := noMethod(*s) + type NoMethod DiskTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6354,9 +6691,13 @@ type DiskTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6367,7 +6708,9 @@ type DiskTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6398,8 +6741,8 @@ type DiskTypeListWarning struct { } func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeListWarning - raw := noMethod(*s) + type NoMethod DiskTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6435,8 +6778,8 @@ type DiskTypeListWarningData struct { } func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeListWarningData - raw := noMethod(*s) + type NoMethod DiskTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6466,8 +6809,8 @@ type DiskTypesScopedList struct { } func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedList - raw := noMethod(*s) + type NoMethod DiskTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6481,9 +6824,13 @@ type DiskTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6494,7 +6841,9 @@ type DiskTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6525,8 +6874,8 @@ type DiskTypesScopedListWarning struct { } func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarning - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6562,8 +6911,8 @@ type DiskTypesScopedListWarningData struct { } func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6590,8 +6939,8 @@ type DisksResizeRequest struct { } func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod DisksResizeRequest - raw := noMethod(*s) + type NoMethod DisksResizeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6621,8 +6970,8 @@ type DisksScopedList struct { } func (s *DisksScopedList) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedList - raw := noMethod(*s) + type NoMethod DisksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6636,9 +6985,13 @@ type DisksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6649,7 +7002,9 @@ type DisksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6680,8 +7035,8 @@ type DisksScopedListWarning struct { } func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarning - raw := noMethod(*s) + type NoMethod DisksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6717,8 +7072,8 @@ type DisksScopedListWarningData struct { } func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarningData - raw := noMethod(*s) + type NoMethod DisksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6743,8 +7098,8 @@ type DistributionPolicy struct { } func (s *DistributionPolicy) MarshalJSON() ([]byte, error) { - type noMethod DistributionPolicy - raw := noMethod(*s) + type NoMethod DistributionPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6772,8 +7127,8 @@ type DistributionPolicyZoneConfiguration struct { } func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { - type noMethod DistributionPolicyZoneConfiguration - raw := noMethod(*s) + type NoMethod DistributionPolicyZoneConfiguration + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6822,8 +7177,8 @@ type Expr struct { } func (s *Expr) MarshalJSON() ([]byte, error) { - type noMethod Expr - raw := noMethod(*s) + type NoMethod Expr + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6863,6 +7218,12 @@ type Firewall struct { // "INGRESS" Direction string `json:"direction,omitempty"` + // Disabled: Denotes whether the firewall rule is disabled, i.e not + // applied to the network it is associated with. When set to true, the + // firewall rule is not enforced and the network behaves as if it did + // not exist. If this is unspecified, the firewall rule will be enabled. + Disabled bool `json:"disabled,omitempty"` + // EnableLogging: This field denotes whether to enable logging for a // particular firewall rule. If logging is enabled, logs will be // exported to the configured export destination for all firewall logs @@ -6956,10 +7317,11 @@ type Firewall struct { // applies to all instances on the specified network. TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` - // TargetTags: A list of instance tags indicating sets of instances - // located in the network that may make network connections as specified - // in allowed[]. If no targetTags are specified, the firewall rule - // applies to all instances on the specified network. + // TargetTags: A list of tags that controls which instances the firewall + // rule applies to. If targetTags are specified, then the firewall rule + // applies only to instances in the VPC network that have one of those + // tags. If no targetTags are specified, the firewall rule applies to + // all instances on the specified network. TargetTags []string `json:"targetTags,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -6984,8 +7346,8 @@ type Firewall struct { } func (s *Firewall) MarshalJSON() ([]byte, error) { - type noMethod Firewall - raw := noMethod(*s) + type NoMethod Firewall + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7022,8 +7384,8 @@ type FirewallAllowed struct { } func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { - type noMethod FirewallAllowed - raw := noMethod(*s) + type NoMethod FirewallAllowed + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7060,8 +7422,8 @@ type FirewallDenied struct { } func (s *FirewallDenied) MarshalJSON() ([]byte, error) { - type noMethod FirewallDenied - raw := noMethod(*s) + type NoMethod FirewallDenied + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7114,8 +7476,8 @@ type FirewallList struct { } func (s *FirewallList) MarshalJSON() ([]byte, error) { - type noMethod FirewallList - raw := noMethod(*s) + type NoMethod FirewallList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7128,9 +7490,13 @@ type FirewallListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7141,7 +7507,9 @@ type FirewallListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7172,8 +7540,8 @@ type FirewallListWarning struct { } func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { - type noMethod FirewallListWarning - raw := noMethod(*s) + type NoMethod FirewallListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7209,23 +7577,32 @@ type FirewallListWarningData struct { } func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { - type noMethod FirewallListWarningData - raw := noMethod(*s) + type NoMethod FirewallListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FixedOrPercent: Encapsulates numeric value that can be either // absolute or relative. type FixedOrPercent struct { - // Calculated: [Output Only] Absolute value calculated based on mode: - // mode = fixed -> calculated = fixed = percent -> calculated = - // ceiling(percent/100 * base_value) + // Calculated: [Output Only] Absolute value of VM instances calculated + // based on the specific mode. + // + // + // - If the value is fixed, then the caculated value is equal to the + // fixed value. + // - If the value is a percent, then the calculated value is percent/100 + // * targetSize. For example, the calculated value of a 80% of a managed + // instance group with 150 instances would be (80/100 * 150) = 120 VM + // instances. If there is a remainder, the number is rounded up. Calculated int64 `json:"calculated,omitempty"` - // Fixed: fixed must be non-negative. + // Fixed: Specifies a fixed number of VM instances. This must be a + // positive integer. Fixed int64 `json:"fixed,omitempty"` - // Percent: percent must belong to [0, 100]. + // Percent: Specifies a percentage of instances between 0 to 100%, + // inclusive. For example, specify 80 for 80%. Percent int64 `json:"percent,omitempty"` // ForceSendFields is a list of field names (e.g. "Calculated") to @@ -7246,31 +7623,51 @@ type FixedOrPercent struct { } func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { - type noMethod FixedOrPercent - raw := noMethod(*s) + type NoMethod FixedOrPercent + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource // specifies which pool of target virtual machines to forward a packet -// to if it matches the given [IPAddress, IPProtocol, ports] tuple. +// to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== +// resource_for beta.forwardingRules ==) (== resource_for +// v1.forwardingRules ==) (== resource_for beta.globalForwardingRules +// ==) (== resource_for v1.globalForwardingRules ==) (== resource_for +// beta.regionForwardingRules ==) (== resource_for +// v1.regionForwardingRules ==) type ForwardingRule struct { // IPAddress: The IP address that this forwarding rule is serving on // behalf of. // - // For global forwarding rules, the address must be a global IP. For - // regional forwarding rules, the address must live in the same region - // as the forwarding rule. By default, this field is empty and an - // ephemeral IPv4 address from the same scope (global or regional) will - // be assigned. A regional forwarding rule supports IPv4 only. A global - // forwarding rule supports either IPv4 or IPv6. + // Addresses are restricted based on the forwarding rule's load + // balancing scheme (EXTERNAL or INTERNAL) and scope (global or + // regional). + // + // When the load balancing scheme is EXTERNAL, for global forwarding + // rules, the address must be a global IP, and for regional forwarding + // rules, the address must live in the same region as the forwarding + // rule. If this field is empty, an ephemeral IPv4 address from the same + // scope (global or regional) will be assigned. A regional forwarding + // rule supports IPv4 only. A global forwarding rule supports either + // IPv4 or IPv6. // // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnetwork configured for - // the forwarding rule. A reserved address cannot be used. If the field - // is empty, the IP address will be automatically allocated from the - // internal IP range of the subnetwork or network configured for this - // forwarding rule. + // 1918 IP address belonging to the network/subnet configured for the + // forwarding rule. By default, if this field is empty, an ephemeral + // internal IP address will be automatically allocated from the IP range + // of the subnet or network configured for this forwarding rule. + // + // An address can be specified either by a literal IP address or a URL + // reference to an existing Address resource. The following examples are + // all valid: + // - 100.1.2.3 + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + // - projects/project/regions/region/addresses/address + // - regions/region/addresses/address + // - global/addresses/address + // - address IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options @@ -7495,8 +7892,8 @@ type ForwardingRule struct { } func (s *ForwardingRule) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRule - raw := noMethod(*s) + type NoMethod ForwardingRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7548,8 +7945,8 @@ type ForwardingRuleAggregatedList struct { } func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedList - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7563,9 +7960,13 @@ type ForwardingRuleAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7576,7 +7977,9 @@ type ForwardingRuleAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7607,8 +8010,8 @@ type ForwardingRuleAggregatedListWarning struct { } func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedListWarning - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7644,8 +8047,8 @@ type ForwardingRuleAggregatedListWarningData struct { } func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7697,8 +8100,8 @@ type ForwardingRuleList struct { } func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleList - raw := noMethod(*s) + type NoMethod ForwardingRuleList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7712,9 +8115,13 @@ type ForwardingRuleListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7725,7 +8132,9 @@ type ForwardingRuleListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7756,8 +8165,8 @@ type ForwardingRuleListWarning struct { } func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleListWarning - raw := noMethod(*s) + type NoMethod ForwardingRuleListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7793,8 +8202,8 @@ type ForwardingRuleListWarningData struct { } func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRuleListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7820,8 +8229,8 @@ type ForwardingRuleReference struct { } func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleReference - raw := noMethod(*s) + type NoMethod ForwardingRuleReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7852,8 +8261,8 @@ type ForwardingRulesScopedList struct { } func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedList - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7867,9 +8276,13 @@ type ForwardingRulesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7880,7 +8293,9 @@ type ForwardingRulesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7911,8 +8326,8 @@ type ForwardingRulesScopedListWarning struct { } func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarning - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7948,8 +8363,8 @@ type ForwardingRulesScopedListWarningData struct { } func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7991,8 +8406,8 @@ type GlobalSetLabelsRequest struct { } func (s *GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod GlobalSetLabelsRequest - raw := noMethod(*s) + type NoMethod GlobalSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8033,22 +8448,21 @@ type GuestAttributes struct { } func (s *GuestAttributes) MarshalJSON() ([]byte, error) { - type noMethod GuestAttributes - raw := noMethod(*s) + type NoMethod GuestAttributes + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GuestOsFeature: Guest OS features. type GuestOsFeature struct { - // Type: The type of supported feature. Currently only - // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the - // server might also populate this property with the value WINDOWS to - // indicate that this is a Windows image. This value is purely - // informational and does not enable or disable any features. + // Type: The ID of a supported feature. Read Enabling guest operating + // system features to see a list of available options. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" Type string `json:"type,omitempty"` @@ -8071,8 +8485,8 @@ type GuestOsFeature struct { } func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { - type noMethod GuestOsFeature - raw := noMethod(*s) + type NoMethod GuestOsFeature + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8153,8 +8567,8 @@ type HTTP2HealthCheck struct { } func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTP2HealthCheck - raw := noMethod(*s) + type NoMethod HTTP2HealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8235,8 +8649,8 @@ type HTTPHealthCheck struct { } func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPHealthCheck - raw := noMethod(*s) + type NoMethod HTTPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8317,8 +8731,8 @@ type HTTPSHealthCheck struct { } func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPSHealthCheck - raw := noMethod(*s) + type NoMethod HTTPSHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8421,8 +8835,8 @@ type HealthCheck struct { } func (s *HealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HealthCheck - raw := noMethod(*s) + type NoMethod HealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8474,8 +8888,8 @@ type HealthCheckList struct { } func (s *HealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckList - raw := noMethod(*s) + type NoMethod HealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8488,9 +8902,13 @@ type HealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8501,7 +8919,9 @@ type HealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8532,8 +8952,8 @@ type HealthCheckListWarning struct { } func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckListWarning - raw := noMethod(*s) + type NoMethod HealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8569,8 +8989,8 @@ type HealthCheckListWarningData struct { } func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8601,8 +9021,8 @@ type HealthCheckReference struct { } func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckReference - raw := noMethod(*s) + type NoMethod HealthCheckReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8641,8 +9061,8 @@ type HealthStatus struct { } func (s *HealthStatus) MarshalJSON() ([]byte, error) { - type noMethod HealthStatus - raw := noMethod(*s) + type NoMethod HealthStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8688,8 +9108,8 @@ type HealthStatusForNetworkEndpoint struct { } func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { - type noMethod HealthStatusForNetworkEndpoint - raw := noMethod(*s) + type NoMethod HealthStatusForNetworkEndpoint + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8792,8 +9212,8 @@ type Host struct { } func (s *Host) MarshalJSON() ([]byte, error) { - type noMethod Host - raw := noMethod(*s) + type NoMethod Host + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8845,8 +9265,8 @@ type HostAggregatedList struct { } func (s *HostAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod HostAggregatedList - raw := noMethod(*s) + type NoMethod HostAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8860,9 +9280,13 @@ type HostAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8873,7 +9297,9 @@ type HostAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8904,8 +9330,8 @@ type HostAggregatedListWarning struct { } func (s *HostAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostAggregatedListWarning - raw := noMethod(*s) + type NoMethod HostAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8941,8 +9367,8 @@ type HostAggregatedListWarningData struct { } func (s *HostAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostAggregatedListWarningData - raw := noMethod(*s) + type NoMethod HostAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8995,8 +9421,8 @@ type HostList struct { } func (s *HostList) MarshalJSON() ([]byte, error) { - type noMethod HostList - raw := noMethod(*s) + type NoMethod HostList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9009,9 +9435,13 @@ type HostListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9022,7 +9452,9 @@ type HostListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9053,8 +9485,8 @@ type HostListWarning struct { } func (s *HostListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostListWarning - raw := noMethod(*s) + type NoMethod HostListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9090,8 +9522,8 @@ type HostListWarningData struct { } func (s *HostListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostListWarningData - raw := noMethod(*s) + type NoMethod HostListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9130,8 +9562,8 @@ type HostRule struct { } func (s *HostRule) MarshalJSON() ([]byte, error) { - type noMethod HostRule - raw := noMethod(*s) + type NoMethod HostRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9204,8 +9636,8 @@ type HostType struct { } func (s *HostType) MarshalJSON() ([]byte, error) { - type noMethod HostType - raw := noMethod(*s) + type NoMethod HostType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9257,8 +9689,8 @@ type HostTypeAggregatedList struct { } func (s *HostTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod HostTypeAggregatedList - raw := noMethod(*s) + type NoMethod HostTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9272,9 +9704,13 @@ type HostTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9285,7 +9721,9 @@ type HostTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9316,8 +9754,8 @@ type HostTypeAggregatedListWarning struct { } func (s *HostTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod HostTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9353,8 +9791,8 @@ type HostTypeAggregatedListWarningData struct { } func (s *HostTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod HostTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9407,8 +9845,8 @@ type HostTypeList struct { } func (s *HostTypeList) MarshalJSON() ([]byte, error) { - type noMethod HostTypeList - raw := noMethod(*s) + type NoMethod HostTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9421,9 +9859,13 @@ type HostTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9434,7 +9876,9 @@ type HostTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9465,8 +9909,8 @@ type HostTypeListWarning struct { } func (s *HostTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostTypeListWarning - raw := noMethod(*s) + type NoMethod HostTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9502,8 +9946,8 @@ type HostTypeListWarningData struct { } func (s *HostTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostTypeListWarningData - raw := noMethod(*s) + type NoMethod HostTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9533,8 +9977,8 @@ type HostTypesScopedList struct { } func (s *HostTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod HostTypesScopedList - raw := noMethod(*s) + type NoMethod HostTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9548,9 +9992,13 @@ type HostTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9561,7 +10009,9 @@ type HostTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9592,8 +10042,8 @@ type HostTypesScopedListWarning struct { } func (s *HostTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostTypesScopedListWarning - raw := noMethod(*s) + type NoMethod HostTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9629,8 +10079,8 @@ type HostTypesScopedListWarningData struct { } func (s *HostTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod HostTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9660,8 +10110,8 @@ type HostsScopedList struct { } func (s *HostsScopedList) MarshalJSON() ([]byte, error) { - type noMethod HostsScopedList - raw := noMethod(*s) + type NoMethod HostsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9675,9 +10125,13 @@ type HostsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9688,7 +10142,9 @@ type HostsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9719,8 +10175,8 @@ type HostsScopedListWarning struct { } func (s *HostsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod HostsScopedListWarning - raw := noMethod(*s) + type NoMethod HostsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9756,8 +10212,46 @@ type HostsScopedListWarningData struct { } func (s *HostsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HostsScopedListWarningData - raw := noMethod(*s) + type NoMethod HostsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HourlyMaintenanceWindow: Time window specified for hourly maintenance +// operations. +type HourlyMaintenanceWindow struct { + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` + + // HoursInCycle: Allows to define schedule that runs every nth hour. + HoursInCycle int64 `json:"hoursInCycle,omitempty"` + + // StartTime: Time within the maintenance window to start the + // maintenance operations. It must be in format "HH:MM?, where HH : + // [00-23] and MM : [00-59] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Duration") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Duration") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HourlyMaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod HourlyMaintenanceWindow + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9847,8 +10341,8 @@ type HttpHealthCheck struct { } func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheck - raw := noMethod(*s) + type NoMethod HttpHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9900,8 +10394,8 @@ type HttpHealthCheckList struct { } func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckList - raw := noMethod(*s) + type NoMethod HttpHealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9915,9 +10409,13 @@ type HttpHealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9928,7 +10426,9 @@ type HttpHealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9959,8 +10459,8 @@ type HttpHealthCheckListWarning struct { } func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckListWarning - raw := noMethod(*s) + type NoMethod HttpHealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9996,8 +10496,8 @@ type HttpHealthCheckListWarningData struct { } func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HttpHealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10086,8 +10586,8 @@ type HttpsHealthCheck struct { } func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheck - raw := noMethod(*s) + type NoMethod HttpsHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10139,8 +10639,8 @@ type HttpsHealthCheckList struct { } func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckList - raw := noMethod(*s) + type NoMethod HttpsHealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10154,9 +10654,13 @@ type HttpsHealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10167,7 +10671,9 @@ type HttpsHealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10198,8 +10704,8 @@ type HttpsHealthCheckListWarning struct { } func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckListWarning - raw := noMethod(*s) + type NoMethod HttpsHealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10235,12 +10741,13 @@ type HttpsHealthCheckListWarningData struct { } func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HttpsHealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Image: An Image resource. +// Image: An Image resource. (== resource_for beta.images ==) (== +// resource_for v1.images ==) type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). @@ -10268,18 +10775,9 @@ type Image struct { // RFC1035. Family string `json:"family,omitempty"` - // GuestOsFeatures: A list of features to enable on the guest OS. - // Applicable for bootable images only. Currently, only one feature can - // be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to - // have its own queue. For Windows images, you can only enable - // VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or - // higher. Linux images with kernel versions 3.17 and higher will - // support VIRTIO_SCSI_MULTIQUEUE. - // - // For new Windows images, the server might also populate this field - // with the value WINDOWS, to indicate that this is a Windows image. - // This value is purely informational and does not enable or disable any - // features. + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -10380,6 +10878,26 @@ type Image struct { // taken from the current or a previous instance of a given image name. SourceImageId string `json:"sourceImageId,omitempty"` + // SourceSnapshot: URL of the source snapshot used to create this image. + // This can be a full or valid partial URL. You must provide exactly one + // of: + // - this property, or + // - the sourceImage property, or + // - the rawDisk.source property, or + // - the sourceDisk property in order to create an image. + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. Required if the source snapshot is protected by + // a customer-supplied encryption key. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + + // SourceSnapshotId: [Output Only] The ID value of the snapshot used to + // create this image. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // snapshot name. + SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` + // SourceType: The type of the image used to create this disk. The // default and only value is RAW // @@ -10421,8 +10939,8 @@ type Image struct { } func (s *Image) MarshalJSON() ([]byte, error) { - type noMethod Image - raw := noMethod(*s) + type NoMethod Image + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10464,8 +10982,8 @@ type ImageRawDisk struct { } func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { - type noMethod ImageRawDisk - raw := noMethod(*s) + type NoMethod ImageRawDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10517,8 +11035,8 @@ type ImageList struct { } func (s *ImageList) MarshalJSON() ([]byte, error) { - type noMethod ImageList - raw := noMethod(*s) + type NoMethod ImageList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10531,9 +11049,13 @@ type ImageListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10544,7 +11066,9 @@ type ImageListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10575,8 +11099,8 @@ type ImageListWarning struct { } func (s *ImageListWarning) MarshalJSON() ([]byte, error) { - type noMethod ImageListWarning - raw := noMethod(*s) + type NoMethod ImageListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10612,12 +11136,13 @@ type ImageListWarningData struct { } func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ImageListWarningData - raw := noMethod(*s) + type NoMethod ImageListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Instance: An Instance resource. +// Instance: An Instance resource. (== resource_for beta.instances ==) +// (== resource_for v1.instances ==) type Instance struct { // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan @@ -10727,6 +11252,9 @@ type Instance struct { // machine types. MachineType string `json:"machineType,omitempty"` + // MaintenancePolicies: Maintenance policies applied to this instance. + MaintenancePolicies []string `json:"maintenancePolicies,omitempty"` + // Metadata: The metadata key/value pairs assigned to this instance. // This includes custom metadata and predefined keys. Metadata *Metadata `json:"metadata,omitempty"` @@ -10768,6 +11296,8 @@ type Instance struct { // instance. See Service Accounts for more information. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` + // StartRestricted: [Output Only] Whether a VM has been restricted for // start because Compute Engine has detected suspicious activity. StartRestricted bool `json:"startRestricted,omitempty"` @@ -10823,8 +11353,8 @@ type Instance struct { } func (s *Instance) MarshalJSON() ([]byte, error) { - type noMethod Instance - raw := noMethod(*s) + type NoMethod Instance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10877,8 +11407,8 @@ type InstanceAggregatedList struct { } func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedList - raw := noMethod(*s) + type NoMethod InstanceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10892,9 +11422,13 @@ type InstanceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10905,7 +11439,9 @@ type InstanceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10936,8 +11472,8 @@ type InstanceAggregatedListWarning struct { } func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10973,11 +11509,15 @@ type InstanceAggregatedListWarningData struct { } func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroup: InstanceGroups (== resource_for beta.instanceGroups +// ==) (== resource_for v1.instanceGroups ==) (== resource_for +// beta.regionInstanceGroups ==) (== resource_for +// v1.regionInstanceGroups ==) type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -11062,8 +11602,8 @@ type InstanceGroup struct { } func (s *InstanceGroup) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroup - raw := noMethod(*s) + type NoMethod InstanceGroup + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11116,8 +11656,8 @@ type InstanceGroupAggregatedList struct { } func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11131,9 +11671,13 @@ type InstanceGroupAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11144,7 +11688,9 @@ type InstanceGroupAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11175,8 +11721,8 @@ type InstanceGroupAggregatedListWarning struct { } func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11212,8 +11758,8 @@ type InstanceGroupAggregatedListWarningData struct { } func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11266,8 +11812,8 @@ type InstanceGroupList struct { } func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupList - raw := noMethod(*s) + type NoMethod InstanceGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11281,9 +11827,13 @@ type InstanceGroupListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11294,7 +11844,9 @@ type InstanceGroupListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11325,8 +11877,8 @@ type InstanceGroupListWarning struct { } func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11362,12 +11914,16 @@ type InstanceGroupListWarningData struct { } func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroupManager: An Instance Group Manager resource. +// InstanceGroupManager: An Instance Group Manager resource. (== +// resource_for beta.instanceGroupManagers ==) (== resource_for +// v1.instanceGroupManagers ==) (== resource_for +// beta.regionInstanceGroupManagers ==) (== resource_for +// v1.regionInstanceGroupManagers ==) type InstanceGroupManager struct { Activities *InstanceGroupManagerActivities `json:"activities,omitempty"` @@ -11457,7 +12013,7 @@ type InstanceGroupManager struct { // StatefulPolicy: Stateful configuration for this Instanced Group // Manager - StatefulPolicy *InstanceGroupManagerStatefulPolicy `json:"statefulPolicy,omitempty"` + StatefulPolicy *StatefulPolicy `json:"statefulPolicy,omitempty"` // TargetPools: The URLs for all TargetPool resources to which instances // in the instanceGroup field are added. The target pools automatically @@ -11472,15 +12028,15 @@ type InstanceGroupManager struct { // UpdatePolicy: The update policy for this managed instance group. UpdatePolicy *InstanceGroupManagerUpdatePolicy `json:"updatePolicy,omitempty"` - // Versions: Versions supported by this IGM. User should set this field - // if they need fine-grained control over how many instances in each - // version are run by this IGM. Versions are keyed by instanceTemplate. - // Every instanceTemplate can appear at most once. This field overrides - // instanceTemplate field. If both instanceTemplate and versions are - // set, the user receives a warning. "instanceTemplate: X" is - // semantically equivalent to "versions [ { instanceTemplate: X } ]". - // Exactly one version must have targetSize field left unset. Size of - // such a version will be calculated automatically. + // Versions: Specifies the instance templates used by this managed + // instance group to create instances. + // + // Each version is defined by an instanceTemplate. Every template can + // appear at most once per instance group. This field overrides the + // top-level instanceTemplate field. Read more about the relationships + // between these fields. Exactly one version must leave the targetSize + // field unset. That version will be applied to all remaining instances. + // For more information, read about canary updates. Versions []*InstanceGroupManagerVersion `json:"versions,omitempty"` // Zone: [Output Only] The URL of the zone where the managed instance @@ -11509,8 +12065,8 @@ type InstanceGroupManager struct { } func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManager - raw := noMethod(*s) + type NoMethod InstanceGroupManager + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11589,8 +12145,8 @@ type InstanceGroupManagerActionsSummary struct { } func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerActionsSummary - raw := noMethod(*s) + type NoMethod InstanceGroupManagerActionsSummary + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11648,8 +12204,8 @@ type InstanceGroupManagerActivities struct { } func (s *InstanceGroupManagerActivities) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerActivities - raw := noMethod(*s) + type NoMethod InstanceGroupManagerActivities + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11702,8 +12258,8 @@ type InstanceGroupManagerAggregatedList struct { } func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11717,9 +12273,13 @@ type InstanceGroupManagerAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11730,7 +12290,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11761,8 +12323,8 @@ type InstanceGroupManagerAggregatedListWarning struct { } func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11798,8 +12360,8 @@ type InstanceGroupManagerAggregatedListWarningData struct { } func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11823,6 +12385,13 @@ type InstanceGroupManagerAutoHealingPolicy struct { // at least once. By default, a percent value of 100% is used. MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` + // Mode: Defines operating mode for this policy. + // + // Possible values: + // "OFF" + // "ON" + Mode string `json:"mode,omitempty"` + // ForceSendFields is a list of field names (e.g. "HealthCheck") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -11841,8 +12410,8 @@ type InstanceGroupManagerAutoHealingPolicy struct { } func (s *InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAutoHealingPolicy - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAutoHealingPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11897,8 +12466,8 @@ type InstanceGroupManagerList struct { } func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11912,9 +12481,13 @@ type InstanceGroupManagerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11925,7 +12498,9 @@ type InstanceGroupManagerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11956,8 +12531,8 @@ type InstanceGroupManagerListWarning struct { } func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11993,8 +12568,8 @@ type InstanceGroupManagerListWarningData struct { } func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12033,81 +12608,40 @@ type InstanceGroupManagerPendingActionsSummary struct { } func (s *InstanceGroupManagerPendingActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerPendingActionsSummary - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagerStatefulPolicy struct { - // PreservedDisks: Disks created on the instances that will be preserved - // on instance delete, resize down, etc. - PreservedDisks []*InstanceGroupManagerStatefulPolicyDiskPolicy `json:"preservedDisks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PreservedDisks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PreservedDisks") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerStatefulPolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerStatefulPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagerStatefulPolicyDiskPolicy struct { - // DeviceName: Device name of the disk to be preserved - DeviceName string `json:"deviceName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DeviceName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DeviceName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerStatefulPolicyDiskPolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerStatefulPolicyDiskPolicy - raw := noMethod(*s) + type NoMethod InstanceGroupManagerPendingActionsSummary + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagerUpdatePolicy struct { - // MaxSurge: Maximum number of instances that can be created above the - // InstanceGroupManager.targetSize during the update process. By - // default, a fixed value of 1 is used. Using maxSurge > 0 will cause - // instance names to change during the update process. At least one of { - // maxSurge, maxUnavailable } must be greater than 0. + // MaxSurge: The maximum number of instances that can be created above + // the specified targetSize during the update process. By default, a + // fixed value of 1 is used. This value can be either a fixed number or + // a percentage if the instance group has 10 or more instances. If you + // set a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxSurge. MaxSurge *FixedOrPercent `json:"maxSurge,omitempty"` - // MaxUnavailable: Maximum number of instances that can be unavailable - // during the update process. The instance is considered available if - // all of the following conditions are satisfied: 1. Instance's status - // is RUNNING. 2. Instance's liveness health check result was observed - // to be HEALTHY at least once. By default, a fixed value of 1 is used. - // At least one of { maxSurge, maxUnavailable } must be greater than 0. + // MaxUnavailable: The maximum number of instances that can be + // unavailable during the update process. An instance is considered + // available if all of the following conditions are satisfied: + // + // + // - The instance's status is RUNNING. + // - If there is a health check on the instance group, the instance's + // liveness health check result must be HEALTHY at least once. If there + // is no health check on the group, then the instance only needs to have + // a status of RUNNING to be considered available. By default, a fixed + // value of 1 is used. This value can be either a fixed number or a + // percentage if the instance group has 10 or more instances. If you set + // a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxUnavailable. MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` // MinReadySec: Minimum number of seconds to wait for after a newly @@ -12115,8 +12649,13 @@ type InstanceGroupManagerUpdatePolicy struct { // 3600]. MinReadySec int64 `json:"minReadySec,omitempty"` - // MinimalAction: Minimal action to be taken on an instance. The order - // of action types is: RESTART < REPLACE. + // MinimalAction: Minimal action to be taken on an instance. You can + // specify either RESTART to restart existing instances or REPLACE to + // delete and create new instances from the target template. If you + // specify a code>RESTART, the Updater will attempt to perform that + // action only. However, if the Updater determines that the minimal + // action you specify is not enough to perform the update, it might + // perform a more disruptive action. // // Possible values: // "NONE" @@ -12148,8 +12687,8 @@ type InstanceGroupManagerUpdatePolicy struct { } func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerUpdatePolicy - raw := noMethod(*s) + type NoMethod InstanceGroupManagerUpdatePolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12165,13 +12704,16 @@ type InstanceGroupManagerVersion struct { // favor of 'name'. Tag string `json:"tag,omitempty"` - // TargetSize: Intended number of instances that are created from - // instanceTemplate. The final number of instances created from - // instanceTemplate will be equal to: * if expressed as fixed number: - // min(targetSize.fixed, instanceGroupManager.targetSize), * if - // expressed as percent: ceiling(targetSize.percent * - // InstanceGroupManager.targetSize). If unset, this version will handle - // all the remaining instances. + // TargetSize: Specifies the intended number of instances to be created + // from the instanceTemplate. The final number of instances created from + // the template will be equal to: + // - If expressed as a fixed number, the minimum of either + // targetSize.fixed or instanceGroupManager.targetSize is used. + // - if expressed as a percent, the targetSize would be + // (targetSize.percent/100 * InstanceGroupManager.targetSize) If there + // is a remainder, the number is rounded up. If unset, this version + // will update any remaining instances not updated by another version. + // Read Starting a canary update for more information. TargetSize *FixedOrPercent `json:"targetSize,omitempty"` // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to @@ -12193,8 +12735,8 @@ type InstanceGroupManagerVersion struct { } func (s *InstanceGroupManagerVersion) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerVersion - raw := noMethod(*s) + type NoMethod InstanceGroupManagerVersion + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12222,8 +12764,8 @@ type InstanceGroupManagersAbandonInstancesRequest struct { } func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12274,8 +12816,8 @@ type InstanceGroupManagersApplyUpdatesRequest struct { } func (s *InstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersApplyUpdatesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersApplyUpdatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12303,8 +12845,8 @@ type InstanceGroupManagersDeleteInstancesRequest struct { } func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12333,8 +12875,8 @@ type InstanceGroupManagersDeletePerInstanceConfigsReq struct { } func (s *InstanceGroupManagersDeletePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersDeletePerInstanceConfigsReq - raw := noMethod(*s) + type NoMethod InstanceGroupManagersDeletePerInstanceConfigsReq + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12374,8 +12916,8 @@ type InstanceGroupManagersListManagedInstancesResponse struct { } func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListManagedInstancesResponse - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListManagedInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12416,8 +12958,8 @@ type InstanceGroupManagersListPerInstanceConfigsResp struct { } func (s *InstanceGroupManagersListPerInstanceConfigsResp) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListPerInstanceConfigsResp - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListPerInstanceConfigsResp + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12431,9 +12973,13 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12444,7 +12990,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12475,8 +13023,8 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { } func (s *InstanceGroupManagersListPerInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListPerInstanceConfigsRespWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListPerInstanceConfigsRespWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12512,8 +13060,8 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarningData struct { } func (s *InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListPerInstanceConfigsRespWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListPerInstanceConfigsRespWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12541,8 +13089,8 @@ type InstanceGroupManagersRecreateInstancesRequest struct { } func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersRecreateInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersRecreateInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12595,8 +13143,8 @@ type InstanceGroupManagersResizeAdvancedRequest struct { } func (s *InstanceGroupManagersResizeAdvancedRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersResizeAdvancedRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersResizeAdvancedRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12629,8 +13177,8 @@ type InstanceGroupManagersScopedList struct { } func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12645,9 +13193,13 @@ type InstanceGroupManagersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12658,7 +13210,9 @@ type InstanceGroupManagersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12689,8 +13243,8 @@ type InstanceGroupManagersScopedListWarning struct { } func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12726,8 +13280,8 @@ type InstanceGroupManagersScopedListWarningData struct { } func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12753,8 +13307,8 @@ type InstanceGroupManagersSetAutoHealingRequest struct { } func (s *InstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetAutoHealingRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12783,8 +13337,8 @@ type InstanceGroupManagersSetInstanceTemplateRequest struct { } func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetInstanceTemplateRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetInstanceTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12822,8 +13376,8 @@ type InstanceGroupManagersSetTargetPoolsRequest struct { } func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12853,8 +13407,8 @@ type InstanceGroupManagersUpdatePerInstanceConfigsReq struct { } func (s *InstanceGroupManagersUpdatePerInstanceConfigsReq) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersUpdatePerInstanceConfigsReq - raw := noMethod(*s) + type NoMethod InstanceGroupManagersUpdatePerInstanceConfigsReq + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12880,8 +13434,8 @@ type InstanceGroupsAddInstancesRequest struct { } func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsAddInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsAddInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12934,8 +13488,8 @@ type InstanceGroupsListInstances struct { } func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstances + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12949,9 +13503,13 @@ type InstanceGroupsListInstancesWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12962,7 +13520,9 @@ type InstanceGroupsListInstancesWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12993,8 +13553,8 @@ type InstanceGroupsListInstancesWarning struct { } func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesWarning - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13030,8 +13590,8 @@ type InstanceGroupsListInstancesWarningData struct { } func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13064,8 +13624,8 @@ type InstanceGroupsListInstancesRequest struct { } func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13091,8 +13651,8 @@ type InstanceGroupsRemoveInstancesRequest struct { } func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsRemoveInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsRemoveInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13124,8 +13684,8 @@ type InstanceGroupsScopedList struct { } func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13140,9 +13700,13 @@ type InstanceGroupsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13153,7 +13717,9 @@ type InstanceGroupsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13184,8 +13750,8 @@ type InstanceGroupsScopedListWarning struct { } func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13221,8 +13787,8 @@ type InstanceGroupsScopedListWarningData struct { } func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13256,8 +13822,8 @@ type InstanceGroupsSetNamedPortsRequest struct { } func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13310,8 +13876,8 @@ type InstanceList struct { } func (s *InstanceList) MarshalJSON() ([]byte, error) { - type noMethod InstanceList - raw := noMethod(*s) + type NoMethod InstanceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13324,9 +13890,13 @@ type InstanceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13337,7 +13907,9 @@ type InstanceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13368,8 +13940,8 @@ type InstanceListWarning struct { } func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceListWarning - raw := noMethod(*s) + type NoMethod InstanceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13405,8 +13977,8 @@ type InstanceListWarningData struct { } func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceListWarningData - raw := noMethod(*s) + type NoMethod InstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13459,8 +14031,8 @@ type InstanceListReferrers struct { } func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrers - raw := noMethod(*s) + type NoMethod InstanceListReferrers + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13474,9 +14046,13 @@ type InstanceListReferrersWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13487,7 +14063,9 @@ type InstanceListReferrersWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13518,8 +14096,8 @@ type InstanceListReferrersWarning struct { } func (s *InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrersWarning - raw := noMethod(*s) + type NoMethod InstanceListReferrersWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13555,8 +14133,8 @@ type InstanceListReferrersWarningData struct { } func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrersWarningData - raw := noMethod(*s) + type NoMethod InstanceListReferrersWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13598,8 +14176,8 @@ type InstanceMoveRequest struct { } func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceMoveRequest - raw := noMethod(*s) + type NoMethod InstanceMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13685,8 +14263,8 @@ type InstanceProperties struct { } func (s *InstanceProperties) MarshalJSON() ([]byte, error) { - type noMethod InstanceProperties - raw := noMethod(*s) + type NoMethod InstanceProperties + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13712,12 +14290,13 @@ type InstanceReference struct { } func (s *InstanceReference) MarshalJSON() ([]byte, error) { - type noMethod InstanceReference - raw := noMethod(*s) + type NoMethod InstanceReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplate: An Instance Template resource. +// InstanceTemplate: An Instance Template resource. (== resource_for +// beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==) type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -13759,6 +14338,10 @@ type InstanceTemplate struct { // - projects/project/zones/zone/instances/instance SourceInstance string `json:"sourceInstance,omitempty"` + // SourceInstanceParams: The source instance params to use to create + // this instance template. + SourceInstanceParams *SourceInstanceParams `json:"sourceInstanceParams,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13782,8 +14365,8 @@ type InstanceTemplate struct { } func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplate - raw := noMethod(*s) + type NoMethod InstanceTemplate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13836,8 +14419,8 @@ type InstanceTemplateList struct { } func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateList - raw := noMethod(*s) + type NoMethod InstanceTemplateList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13851,9 +14434,13 @@ type InstanceTemplateListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13864,7 +14451,9 @@ type InstanceTemplateListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13895,8 +14484,8 @@ type InstanceTemplateListWarning struct { } func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateListWarning - raw := noMethod(*s) + type NoMethod InstanceTemplateListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13932,8 +14521,8 @@ type InstanceTemplateListWarningData struct { } func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateListWarningData - raw := noMethod(*s) + type NoMethod InstanceTemplateListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13976,8 +14565,108 @@ type InstanceWithNamedPorts struct { } func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { - type noMethod InstanceWithNamedPorts - raw := noMethod(*s) + type NoMethod InstanceWithNamedPorts + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesAddMaintenancePoliciesRequest struct { + // MaintenancePolicies: Maintenance policies to be added to this + // instance. + MaintenancePolicies []string `json:"maintenancePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaintenancePolicies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaintenancePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesAddMaintenancePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesAddMaintenancePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesRemoveMaintenancePoliciesRequest struct { + // MaintenancePolicies: Maintenance policies to be removed from this + // instance. + MaintenancePolicies []string `json:"maintenancePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaintenancePolicies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaintenancePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesRemoveMaintenancePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesRemoveMaintenancePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesResumeRequest struct { + // Disks: Array of disks associated with this instance that are + // protected with a customer-supplied encryption key. + // + // In order to resume the instance, the disk url and its corresponding + // key must be provided. + // + // If the disk is not protected with a customer-supplied encryption key + // it should not be specified. + Disks []*CustomerEncryptionKeyProtectedDisk `json:"disks,omitempty"` + + // InstanceEncryptionKey: Decrypts data associated with an instance that + // is protected with a customer-supplied encryption key. + // + // If the instance you are starting is protected with a + // customer-supplied encryption key, the correct key must be provided + // otherwise the instance resume will not succeed. + InstanceEncryptionKey *CustomerEncryptionKey `json:"instanceEncryptionKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disks") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesResumeRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesResumeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14007,8 +14696,8 @@ type InstancesScopedList struct { } func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedList - raw := noMethod(*s) + type NoMethod InstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14022,9 +14711,13 @@ type InstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14035,7 +14728,9 @@ type InstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14066,8 +14761,8 @@ type InstancesScopedListWarning struct { } func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarning - raw := noMethod(*s) + type NoMethod InstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14103,8 +14798,8 @@ type InstancesScopedListWarningData struct { } func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod InstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14135,8 +14830,8 @@ type InstancesSetLabelsRequest struct { } func (s *InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetLabelsRequest - raw := noMethod(*s) + type NoMethod InstancesSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14164,8 +14859,8 @@ type InstancesSetMachineResourcesRequest struct { } func (s *InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineResourcesRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineResourcesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14193,8 +14888,8 @@ type InstancesSetMachineTypeRequest struct { } func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineTypeRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineTypeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14222,8 +14917,8 @@ type InstancesSetMinCpuPlatformRequest struct { } func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMinCpuPlatformRequest - raw := noMethod(*s) + type NoMethod InstancesSetMinCpuPlatformRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14253,8 +14948,8 @@ type InstancesSetServiceAccountRequest struct { } func (s *InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetServiceAccountRequest - raw := noMethod(*s) + type NoMethod InstancesSetServiceAccountRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14295,32 +14990,28 @@ type InstancesStartWithEncryptionKeyRequest struct { } func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesStartWithEncryptionKeyRequest - raw := noMethod(*s) + type NoMethod InstancesStartWithEncryptionKeyRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Interconnect: Protocol definitions for Mixer API to support -// Interconnect. Next available tag: 23 +// Interconnect: Represents an Interconnects resource. The Interconnects +// resource is a dedicated connection between Google's network and your +// on-premises network. For more information, see the Dedicated +// overview page. (== resource_for v1.interconnects ==) (== resource_for +// beta.interconnects ==) type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is - // set to ?true?, the Interconnect is functional and may carry traffic - // (assuming there are functional InterconnectAttachments and other - // requirements are satisfied). When set to ?false?, no packets will be - // carried over this Interconnect and no BGP routes will be exchanged - // over it. By default, it is set to ?true?. + // set to true, the Interconnect is functional and can carry traffic. + // When set to false, no packets can be carried over the interconnect + // and no BGP routes are exchanged over it. By default, the status is + // set to true. AdminEnabled bool `json:"adminEnabled,omitempty"` // CircuitInfos: [Output Only] List of CircuitInfo objects, that // describe the individual circuits in this LAG. CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` - // ConnectionAuthorization: [Output Only] URL to retrieve the Letter Of - // Authority and Customer Facility Assignment (LOA-CFA) documentation - // relating to this Interconnect. This documentation authorizes the - // facility provider to connect to the specified crossconnect ports. - ConnectionAuthorization string `json:"connectionAuthorization,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -14354,15 +15045,39 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` + // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has + // been deprecated in favor of "DEDICATED" + // // Possible values: - // "IT_PARTNER" + // "DEDICATED" // "IT_PRIVATE" + // "PARTNER" InterconnectType string `json:"interconnectType,omitempty"` // Kind: [Output Only] Type of the resource. Always compute#interconnect // for interconnects. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // Interconnect, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve an + // Interconnect. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this Interconnect resource. These can be + // later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LinkType: Type of link requested. This field indicates speed of each + // of the links in the bundle, not the entire bundle. Only 10G per link + // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // // Possible values: // "LINK_TYPE_ETHERNET_10G_LR" LinkType string `json:"linkType,omitempty"` @@ -14412,6 +15127,14 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] The current state of whether or not this + // Interconnect is functional. + // + // Possible values: + // "ACTIVE" + // "UNPROVISIONED" + State string `json:"state,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14434,14 +15157,49 @@ type Interconnect struct { } func (s *Interconnect) MarshalJSON() ([]byte, error) { - type noMethod Interconnect - raw := noMethod(*s) + type NoMethod Interconnect + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachment: Protocol definitions for Mixer API to support -// InterconnectAttachment. Next available tag: 18 +// InterconnectAttachment: Represents an InterconnectAttachment (VLAN +// attachment) resource. For more information, see Creating VLAN +// Attachments. (== resource_for beta.interconnectAttachments ==) (== +// resource_for v1.interconnectAttachments ==) type InterconnectAttachment struct { + // AdminEnabled: Determines whether this Attachment will carry packets. + // Not present for PARTNER_PROVIDER. + AdminEnabled bool `json:"adminEnabled,omitempty"` + + // Possible values: + // "ZONE_1" + // "ZONE_2" + // "ZONE_ANY" + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // Possible values: + // "BPS_100M" + // "BPS_10G" + // "BPS_1G" + // "BPS_200M" + // "BPS_2G" + // "BPS_300M" + // "BPS_400M" + // "BPS_500M" + // "BPS_50M" + // "BPS_5G" + Bandwidth string `json:"bandwidth,omitempty"` + + // CandidateSubnets: Up to 16 candidate prefixes that can be used to + // restrict the allocation of cloudRouterIpAddress and + // customerRouterIpAddress for this attachment. All prefixes must be + // within link-local address space (169.254.0.0/16) and must be /29 or + // shorter (/28, /27, etc). Google will attempt to select an unused /29 + // from the supplied candidate prefix(es). The request will fail if all + // possible /29s are in use on Google?s edge. If not supplied, Google + // will randomly select an unused /29 from all of link-local space. + CandidateSubnets []string `json:"candidateSubnets,omitempty"` + // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to // be configured on Cloud Router Interface for this interconnect // attachment. @@ -14456,8 +15214,7 @@ type InterconnectAttachment struct { // interconnect attachment. CustomerRouterIpAddress string `json:"customerRouterIpAddress,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Description: An optional description of this resource. Description string `json:"description,omitempty"` // GoogleReferenceId: [Output Only] Google reference ID, to be used when @@ -14477,6 +15234,22 @@ type InterconnectAttachment struct { // compute#interconnectAttachment for interconnect attachments. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // InterconnectAttachment, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve an + // InterconnectAttachment. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this InterconnectAttachment resource. + // These can be later modified by the setLabels method. Each label + // key/value must comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -14494,9 +15267,21 @@ type InterconnectAttachment struct { // "OS_UNPROVISIONED" OperationalStatus string `json:"operationalStatus,omitempty"` - // PrivateInterconnectInfo: [Output Only] Information specific to a - // Private InterconnectAttachment. Only populated if the interconnect - // that this is attached is of type IT_PRIVATE. + // PairingKey: [Output only for type PARTNER. Input only for + // PARTNER_PROVIDER. Not present for DEDICATED]. Opaque string + // identifying an PARTNER attachment. Of the form ?cloud-region/XXXXXX?. + PairingKey string `json:"pairingKey,omitempty"` + + // PartnerAsn: [Output only for PARTNER. Input for PARTNER_PROVIDER. Not + // present for DEDICATED] BGP ASN of the Partner. A layer 3 Partner + // should supply this if they configured BGP on behalf of the customer. + PartnerAsn int64 `json:"partnerAsn,omitempty,string"` + + PartnerMetadata *InterconnectAttachmentPartnerMetadata `json:"partnerMetadata,omitempty"` + + // PrivateInterconnectInfo: [Output Only] Information specific to an + // InterconnectAttachment. This property is populated if the + // interconnect that this is attached to is of type DEDICATED. PrivateInterconnectInfo *InterconnectAttachmentPrivateInfo `json:"privateInterconnectInfo,omitempty"` // Region: [Output Only] URL of the region where the regional @@ -14512,32 +15297,55 @@ type InterconnectAttachment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] The current state of whether or not this + // interconnect attachment is functional. + // + // Possible values: + // "ACTIVE" + // "DEFUNCT" + // "PARTNER_REQUEST_RECEIVED" + // "PENDING_CUSTOMER" + // "PENDING_PARTNER" + // "STATE_UNSPECIFIED" + // "UNPROVISIONED" + State string `json:"state,omitempty"` + + // Possible values: + // "DEDICATED" + // "PARTNER" + // "PARTNER_PROVIDER" + Type string `json:"type,omitempty"` + + // VlanTag8021q: Available only for DEDICATED and PARTNER_PROVIDER. + // Desired VLAN tag for this attachment, in the range 2-4094. This field + // refers to 802.1q VLAN tag, also known as IEEE 802.1Q Only specified + // at creation time. + VlanTag8021q int64 `json:"vlanTag8021q,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "CloudRouterIpAddress") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "AdminEnabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CloudRouterIpAddress") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AdminEnabled") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachment - raw := noMethod(*s) + type NoMethod InterconnectAttachment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14590,8 +15398,8 @@ type InterconnectAttachmentAggregatedList struct { } func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14605,9 +15413,13 @@ type InterconnectAttachmentAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14618,7 +15430,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14649,8 +15463,8 @@ type InterconnectAttachmentAggregatedListWarning struct { } func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14686,8 +15500,8 @@ type InterconnectAttachmentAggregatedListWarningData struct { } func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14742,8 +15556,8 @@ type InterconnectAttachmentList struct { } func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14757,9 +15571,13 @@ type InterconnectAttachmentListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14770,7 +15588,9 @@ type InterconnectAttachmentListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14801,8 +15621,8 @@ type InterconnectAttachmentListWarning struct { } func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14838,14 +15658,60 @@ type InterconnectAttachmentListWarningData struct { } func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentPrivateInfo: Private information for an -// interconnect attachment when this belongs to an interconnect of type -// IT_PRIVATE. +// InterconnectAttachmentPartnerMetadata: Informational metadata about +// Partner attachments from Partners to display to customers. These +// fields are propagated from PARTNER_PROVIDER attachments to their +// corresponding PARTNER attachments. Only mutable for PARTNER_PROVIDER +// type, output-only for PARTNER, not available for DEDICATED. +type InterconnectAttachmentPartnerMetadata struct { + // InterconnectName: Plain text name of the Interconnect this attachment + // is connected to, as displayed in the Partner?s portal. For instance + // ?Chicago 1?. This value may be validated to match approved Partner + // values. + InterconnectName string `json:"interconnectName,omitempty"` + + // PartnerName: Plain text name of the Partner providing this + // attachment. This value may be validated to match approved Partner + // values. + PartnerName string `json:"partnerName,omitempty"` + + // PortalUrl: URL of the Partner?s portal for this Attachment. Partners + // may customise this to be a deep-link to the specific resource on the + // Partner portal. This value may be validated to match approved Partner + // values. + PortalUrl string `json:"portalUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InterconnectName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPartnerMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPrivateInfo: Information for an interconnect +// attachment when this belongs to an interconnect of type DEDICATED. type InterconnectAttachmentPrivateInfo struct { // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for // traffic between Google and the customer, going to and from this @@ -14870,8 +15736,8 @@ type InterconnectAttachmentPrivateInfo struct { } func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentPrivateInfo - raw := noMethod(*s) + type NoMethod InterconnectAttachmentPrivateInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14904,8 +15770,8 @@ type InterconnectAttachmentsScopedList struct { } func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14919,9 +15785,13 @@ type InterconnectAttachmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14932,7 +15802,9 @@ type InterconnectAttachmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14963,8 +15835,8 @@ type InterconnectAttachmentsScopedListWarning struct { } func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15000,8 +15872,8 @@ type InterconnectAttachmentsScopedListWarningData struct { } func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15009,9 +15881,7 @@ func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, er // the Customer and Google. CircuitInfo objects are created by Google, // so all fields are output only. Next id: 4 type InterconnectCircuitInfo struct { - // CustomerDemarcId: Customer-side demarc ID for this circuit. This will - // only be set if it was provided by the Customer to Google during - // circuit turn-up. + // CustomerDemarcId: Customer-side demarc ID for this circuit. CustomerDemarcId string `json:"customerDemarcId,omitempty"` // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned @@ -15041,8 +15911,8 @@ type InterconnectCircuitInfo struct { } func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectCircuitInfo - raw := noMethod(*s) + type NoMethod InterconnectCircuitInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15096,8 +15966,8 @@ type InterconnectList struct { } func (s *InterconnectList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectList - raw := noMethod(*s) + type NoMethod InterconnectList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15110,9 +15980,13 @@ type InterconnectListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15123,7 +15997,9 @@ type InterconnectListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15154,8 +16030,8 @@ type InterconnectListWarning struct { } func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectListWarning - raw := noMethod(*s) + type NoMethod InterconnectListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15191,37 +16067,44 @@ type InterconnectListWarningData struct { } func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectListWarningData - raw := noMethod(*s) + type NoMethod InterconnectListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectLocation: Protocol definitions for Mixer API to support -// InterconnectLocation. +// InterconnectLocation: Represents an InterconnectLocations resource. +// The InterconnectLocations resource describes the locations where you +// can connect to Google's networks. For more information, see +// Colocation Facilities. type InterconnectLocation struct { // Address: [Output Only] The postal address of the Point of Presence, // each line in the address is separated by a newline character. Address string `json:"address,omitempty"` - // AvailabilityZone: Availability zone for this location. Within a city, - // maintenance will not be simultaneously scheduled in more than one - // availability zone. Example: "zone1" or "zone2". + // AvailabilityZone: [Output Only] Availability zone for this location. + // Within a metropolitan area (metro), maintenance will not be + // simultaneously scheduled in more than one availability zone. Example: + // "zone1" or "zone2". AvailabilityZone string `json:"availabilityZone,omitempty"` - // City: City designator used by the Interconnect UI to locate this - // InterconnectLocation within the Continent. For example: "Chicago, - // IL", "Amsterdam, Netherlands". + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: Continent for this location. Used by the location picker - // in the Interconnect UI. + // Continent: [Output Only] Continent for this location. // // Possible values: + // "AFRICA" + // "ASIA_PAC" // "C_AFRICA" // "C_ASIA_PAC" // "C_EUROPE" // "C_NORTH_AMERICA" // "C_SOUTH_AMERICA" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" Continent string `json:"continent,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -15284,8 +16167,8 @@ type InterconnectLocation struct { } func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocation - raw := noMethod(*s) + type NoMethod InterconnectLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15339,8 +16222,8 @@ type InterconnectLocationList struct { } func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationList - raw := noMethod(*s) + type NoMethod InterconnectLocationList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15354,9 +16237,13 @@ type InterconnectLocationListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15367,7 +16254,9 @@ type InterconnectLocationListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15398,8 +16287,8 @@ type InterconnectLocationListWarning struct { } func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationListWarning - raw := noMethod(*s) + type NoMethod InterconnectLocationListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15435,8 +16324,8 @@ type InterconnectLocationListWarningData struct { } func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationListWarningData - raw := noMethod(*s) + type NoMethod InterconnectLocationListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15451,6 +16340,8 @@ type InterconnectLocationRegionInfo struct { // LocationPresence: Identifies the network presence of this location. // // Possible values: + // "GLOBAL" + // "LOCAL_REGION" // "LP_GLOBAL" // "LP_LOCAL_REGION" LocationPresence string `json:"locationPresence,omitempty"` @@ -15458,9 +16349,6 @@ type InterconnectLocationRegionInfo struct { // Region: URL for the region of this location. Region string `json:"region,omitempty"` - // RegionKey: Scope key for the region of this location. - RegionKey string `json:"regionKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "ExpectedRttMs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -15479,8 +16367,8 @@ type InterconnectLocationRegionInfo struct { } func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationRegionInfo - raw := noMethod(*s) + type NoMethod InterconnectLocationRegionInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15491,29 +16379,45 @@ type InterconnectOutageNotification struct { // Google-side circuit IDs that will be affected. AffectedCircuits []string `json:"affectedCircuits,omitempty"` - // Description: Short user-visible description of the purpose of the - // outage. + // Description: A description about the purpose of the outage. Description string `json:"description,omitempty"` + // EndTime: Scheduled end time for the outage (milliseconds since Unix + // epoch). EndTime int64 `json:"endTime,omitempty,string"` + // IssueType: Form this outage is expected to take. Note that the "IT_" + // versions of this enum have been deprecated in favor of the unprefixed + // values. + // // Possible values: // "IT_OUTAGE" // "IT_PARTIAL_OUTAGE" + // "OUTAGE" + // "PARTIAL_OUTAGE" IssueType string `json:"issueType,omitempty"` // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` + // Source: The party that generated this notification. Note that + // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // // Possible values: + // "GOOGLE" // "NSRC_GOOGLE" Source string `json:"source,omitempty"` - // StartTime: Scheduled start and end times for the outage (milliseconds - // since Unix epoch). + // StartTime: Scheduled start time for the outage (milliseconds since + // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` + // State: State of this notification. Note that the "NS_" versions of + // this enum have been deprecated in favor of the unprefixed values. + // // Possible values: + // "ACTIVE" + // "CANCELLED" // "NS_ACTIVE" // "NS_CANCELED" State string `json:"state,omitempty"` @@ -15537,8 +16441,8 @@ type InterconnectOutageNotification struct { } func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { - type noMethod InterconnectOutageNotification - raw := noMethod(*s) + type NoMethod InterconnectOutageNotification + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15570,8 +16474,8 @@ type InternalIpOwner struct { } func (s *InternalIpOwner) MarshalJSON() ([]byte, error) { - type noMethod InternalIpOwner - raw := noMethod(*s) + type NoMethod InternalIpOwner + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15624,8 +16528,8 @@ type IpOwnerList struct { } func (s *IpOwnerList) MarshalJSON() ([]byte, error) { - type noMethod IpOwnerList - raw := noMethod(*s) + type NoMethod IpOwnerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15638,9 +16542,13 @@ type IpOwnerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15651,7 +16559,9 @@ type IpOwnerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15682,8 +16592,8 @@ type IpOwnerListWarning struct { } func (s *IpOwnerListWarning) MarshalJSON() ([]byte, error) { - type noMethod IpOwnerListWarning - raw := noMethod(*s) + type NoMethod IpOwnerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15719,8 +16629,8 @@ type IpOwnerListWarningData struct { } func (s *IpOwnerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod IpOwnerListWarningData - raw := noMethod(*s) + type NoMethod IpOwnerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15786,8 +16696,8 @@ type License struct { } func (s *License) MarshalJSON() ([]byte, error) { - type noMethod License - raw := noMethod(*s) + type NoMethod License + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15856,8 +16766,8 @@ type LicenseCode struct { } func (s *LicenseCode) MarshalJSON() ([]byte, error) { - type noMethod LicenseCode - raw := noMethod(*s) + type NoMethod LicenseCode + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15887,8 +16797,8 @@ type LicenseCodeLicenseAlias struct { } func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { - type noMethod LicenseCodeLicenseAlias - raw := noMethod(*s) + type NoMethod LicenseCodeLicenseAlias + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15920,8 +16830,8 @@ type LicenseResourceRequirements struct { } func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { - type noMethod LicenseResourceRequirements - raw := noMethod(*s) + type NoMethod LicenseResourceRequirements + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15969,8 +16879,8 @@ type LicensesListResponse struct { } func (s *LicensesListResponse) MarshalJSON() ([]byte, error) { - type noMethod LicensesListResponse - raw := noMethod(*s) + type NoMethod LicensesListResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15984,9 +16894,13 @@ type LicensesListResponseWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15997,7 +16911,9 @@ type LicensesListResponseWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16028,8 +16944,8 @@ type LicensesListResponseWarning struct { } func (s *LicensesListResponseWarning) MarshalJSON() ([]byte, error) { - type noMethod LicensesListResponseWarning - raw := noMethod(*s) + type NoMethod LicensesListResponseWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16065,8 +16981,8 @@ type LicensesListResponseWarningData struct { } func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { - type noMethod LicensesListResponseWarningData - raw := noMethod(*s) + type NoMethod LicensesListResponseWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16099,8 +17015,8 @@ type LogConfig struct { } func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig - raw := noMethod(*s) + type NoMethod LogConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16138,8 +17054,8 @@ type LogConfigCloudAuditOptions struct { } func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCloudAuditOptions - raw := noMethod(*s) + type NoMethod LogConfigCloudAuditOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16191,8 +17107,8 @@ type LogConfigCounterOptions struct { } func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCounterOptions - raw := noMethod(*s) + type NoMethod LogConfigCounterOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16225,12 +17141,13 @@ type LogConfigDataAccessOptions struct { } func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigDataAccessOptions - raw := noMethod(*s) + type NoMethod LogConfigDataAccessOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineType: A Machine Type resource. +// MachineType: A Machine Type resource. (== resource_for +// v1.machineTypes ==) (== resource_for beta.machineTypes ==) type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -16305,8 +17222,8 @@ type MachineType struct { } func (s *MachineType) MarshalJSON() ([]byte, error) { - type noMethod MachineType - raw := noMethod(*s) + type NoMethod MachineType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16359,8 +17276,8 @@ type MachineTypeAggregatedList struct { } func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedList - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16374,9 +17291,13 @@ type MachineTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16387,7 +17308,9 @@ type MachineTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16418,8 +17341,8 @@ type MachineTypeAggregatedListWarning struct { } func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16455,8 +17378,8 @@ type MachineTypeAggregatedListWarningData struct { } func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16509,8 +17432,8 @@ type MachineTypeList struct { } func (s *MachineTypeList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeList - raw := noMethod(*s) + type NoMethod MachineTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16523,9 +17446,13 @@ type MachineTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16536,7 +17463,9 @@ type MachineTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16567,8 +17496,8 @@ type MachineTypeListWarning struct { } func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeListWarning - raw := noMethod(*s) + type NoMethod MachineTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16604,8 +17533,8 @@ type MachineTypeListWarningData struct { } func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeListWarningData - raw := noMethod(*s) + type NoMethod MachineTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16636,8 +17565,8 @@ type MachineTypesScopedList struct { } func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedList - raw := noMethod(*s) + type NoMethod MachineTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16651,9 +17580,13 @@ type MachineTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16664,7 +17597,9 @@ type MachineTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16695,8 +17630,8 @@ type MachineTypesScopedListWarning struct { } func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarning - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16732,8 +17667,552 @@ type MachineTypesScopedListWarningData struct { } func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MaintenancePoliciesList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of MaintenancePolicy resources. + Items []*MaintenancePolicy `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource.Always + // compute#maintenancePoliciesList for listsof maintenancePolicies + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *MaintenancePoliciesListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePoliciesListWarning: [Output Only] Informational warning +// message. +type MaintenancePoliciesListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MaintenancePoliciesListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MaintenancePoliciesListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MaintenancePoliciesScopedList struct { + // MaintenancePolicies: List of maintenancePolicies contained in this + // scope. + MaintenancePolicies []*MaintenancePolicy `json:"maintenancePolicies,omitempty"` + + // Warning: Informational warning which replaces the list of + // maintenancePolicies when the list is empty. + Warning *MaintenancePoliciesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaintenancePolicies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaintenancePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePoliciesScopedListWarning: Informational warning which +// replaces the list of maintenancePolicies when the list is empty. +type MaintenancePoliciesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MaintenancePoliciesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MaintenancePoliciesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePoliciesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePolicy: A maintenance policy for an instance. This +// specifies what kind of maintenance operations our infrastructure may +// perform on this instance and when. +type MaintenancePolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#maintenance_policies for maintenance policies. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // VmMaintenancePolicy: Maintenance policy applicable to VMs for + // infrastructure maintenance. + VmMaintenancePolicy *VmMaintenancePolicy `json:"vmMaintenancePolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePolicy) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePolicyAggregatedList: Contains a list of +// maintenancePolicies. +type MaintenancePolicyAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of MaintenancePolicy resources. + Items map[string]MaintenancePoliciesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *MaintenancePolicyAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePolicyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePolicyAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePolicyAggregatedListWarning: [Output Only] Informational +// warning message. +type MaintenancePolicyAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MaintenancePolicyAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePolicyAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MaintenancePolicyAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePolicyAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenanceWindow: A maintenance window for VMs and disks. When set, +// we restrict our maintenance operations to this window. +type MaintenanceWindow struct { + DailyMaintenanceWindow *DailyMaintenanceWindow `json:"dailyMaintenanceWindow,omitempty"` + + HourlyMaintenanceWindow *HourlyMaintenanceWindow `json:"hourlyMaintenanceWindow,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DailyMaintenanceWindow") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DailyMaintenanceWindow") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod MaintenanceWindow + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16840,8 +18319,8 @@ type ManagedInstance struct { } func (s *ManagedInstance) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstance - raw := noMethod(*s) + type NoMethod ManagedInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16868,8 +18347,8 @@ type ManagedInstanceLastAttempt struct { } func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttempt - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttempt + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16898,8 +18377,8 @@ type ManagedInstanceLastAttemptErrors struct { } func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16932,8 +18411,8 @@ type ManagedInstanceLastAttemptErrorsErrors struct { } func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrorsErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrorsErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16947,7 +18426,7 @@ type ManagedInstanceOverride struct { // // Possible values: // "AUTO_GENERATED" - // "USER_GENERATED" + // "USER_PROVIDED" Origin string `json:"origin,omitempty"` // ForceSendFields is a list of field names (e.g. "Disks") to @@ -16968,8 +18447,8 @@ type ManagedInstanceOverride struct { } func (s *ManagedInstanceOverride) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceOverride - raw := noMethod(*s) + type NoMethod ManagedInstanceOverride + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17007,8 +18486,8 @@ type ManagedInstanceOverrideDiskOverride struct { } func (s *ManagedInstanceOverrideDiskOverride) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceOverrideDiskOverride - raw := noMethod(*s) + type NoMethod ManagedInstanceOverrideDiskOverride + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17040,8 +18519,8 @@ type ManagedInstanceVersion struct { } func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceVersion - raw := noMethod(*s) + type NoMethod ManagedInstanceVersion + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17081,8 +18560,8 @@ type Metadata struct { } func (s *Metadata) MarshalJSON() ([]byte, error) { - type noMethod Metadata - raw := noMethod(*s) + type NoMethod Metadata + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17118,8 +18597,8 @@ type MetadataItems struct { } func (s *MetadataItems) MarshalJSON() ([]byte, error) { - type noMethod MetadataItems - raw := noMethod(*s) + type NoMethod MetadataItems + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17150,13 +18629,14 @@ type NamedPort struct { } func (s *NamedPort) MarshalJSON() ([]byte, error) { - type noMethod NamedPort - raw := noMethod(*s) + type NoMethod NamedPort + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Network: Represents a Network resource. Read Networks and Firewalls -// for more information. +// for more information. (== resource_for v1.networks ==) (== +// resource_for beta.networks ==) type Network struct { // IPv4Range: The range of internal addresses that are legal on this // network. This range is a CIDR specification, for example: @@ -17255,8 +18735,8 @@ type Network struct { } func (s *Network) MarshalJSON() ([]byte, error) { - type noMethod Network - raw := noMethod(*s) + type NoMethod Network + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17300,8 +18780,8 @@ type NetworkEndpoint struct { } func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpoint - raw := noMethod(*s) + type NoMethod NetworkEndpoint + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17381,8 +18861,8 @@ type NetworkEndpointGroup struct { } func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroup - raw := noMethod(*s) + type NoMethod NetworkEndpointGroup + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17435,8 +18915,8 @@ type NetworkEndpointGroupAggregatedList struct { } func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupAggregatedList - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17450,9 +18930,13 @@ type NetworkEndpointGroupAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17463,7 +18947,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17494,8 +18980,8 @@ type NetworkEndpointGroupAggregatedListWarning struct { } func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupAggregatedListWarning - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17531,8 +19017,8 @@ type NetworkEndpointGroupAggregatedListWarningData struct { } func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupAggregatedListWarningData - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17574,8 +19060,8 @@ type NetworkEndpointGroupLbNetworkEndpointGroup struct { } func (s *NetworkEndpointGroupLbNetworkEndpointGroup) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupLbNetworkEndpointGroup - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupLbNetworkEndpointGroup + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17627,8 +19113,8 @@ type NetworkEndpointGroupList struct { } func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupList - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17642,9 +19128,13 @@ type NetworkEndpointGroupListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17655,7 +19145,9 @@ type NetworkEndpointGroupListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17686,8 +19178,8 @@ type NetworkEndpointGroupListWarning struct { } func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupListWarning - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17723,8 +19215,8 @@ type NetworkEndpointGroupListWarningData struct { } func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupListWarningData - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17751,8 +19243,8 @@ type NetworkEndpointGroupsAttachEndpointsRequest struct { } func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsAttachEndpointsRequest - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsAttachEndpointsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17779,8 +19271,8 @@ type NetworkEndpointGroupsDetachEndpointsRequest struct { } func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsDetachEndpointsRequest - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsDetachEndpointsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17813,8 +19305,8 @@ type NetworkEndpointGroupsListEndpointsRequest struct { } func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsListEndpointsRequest - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsListEndpointsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17864,8 +19356,8 @@ type NetworkEndpointGroupsListNetworkEndpoints struct { } func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsListNetworkEndpoints - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsListNetworkEndpoints + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17879,9 +19371,13 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17892,7 +19388,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17923,8 +19421,8 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { } func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsListNetworkEndpointsWarning - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17960,8 +19458,8 @@ type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { } func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsListNetworkEndpointsWarningData - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17994,8 +19492,8 @@ type NetworkEndpointGroupsScopedList struct { } func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsScopedList - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18010,9 +19508,13 @@ type NetworkEndpointGroupsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18023,7 +19525,9 @@ type NetworkEndpointGroupsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18054,8 +19558,8 @@ type NetworkEndpointGroupsScopedListWarning struct { } func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsScopedListWarning - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18091,8 +19595,8 @@ type NetworkEndpointGroupsScopedListWarningData struct { } func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointGroupsScopedListWarningData - raw := noMethod(*s) + type NoMethod NetworkEndpointGroupsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18121,8 +19625,8 @@ type NetworkEndpointWithHealthStatus struct { } func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { - type noMethod NetworkEndpointWithHealthStatus - raw := noMethod(*s) + type NoMethod NetworkEndpointWithHealthStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18207,8 +19711,8 @@ type NetworkInterface struct { } func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) + type NoMethod NetworkInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18261,8 +19765,8 @@ type NetworkList struct { } func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList - raw := noMethod(*s) + type NoMethod NetworkList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18275,9 +19779,13 @@ type NetworkListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18288,7 +19796,9 @@ type NetworkListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18319,8 +19829,8 @@ type NetworkListWarning struct { } func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkListWarning - raw := noMethod(*s) + type NoMethod NetworkListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18356,8 +19866,8 @@ type NetworkListWarningData struct { } func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkListWarningData - raw := noMethod(*s) + type NoMethod NetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18418,8 +19928,8 @@ type NetworkPeering struct { } func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering - raw := noMethod(*s) + type NoMethod NetworkPeering + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18457,8 +19967,8 @@ type NetworkRoutingConfig struct { } func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { - type noMethod NetworkRoutingConfig - raw := noMethod(*s) + type NoMethod NetworkRoutingConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18495,8 +20005,8 @@ type NetworksAddPeeringRequest struct { } func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest - raw := noMethod(*s) + type NoMethod NetworksAddPeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18522,13 +20032,16 @@ type NetworksRemovePeeringRequest struct { } func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest - raw := noMethod(*s) + type NoMethod NetworksRemovePeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: An Operation resource, used to manage asynchronous API -// requests. +// requests. (== resource_for v1.globalOperations ==) (== resource_for +// beta.globalOperations ==) (== resource_for v1.regionOperations ==) +// (== resource_for beta.regionOperations ==) (== resource_for +// v1.zoneOperations ==) (== resource_for beta.zoneOperations ==) type Operation struct { // ClientOperationId: [Output Only] Reserved for future use. ClientOperationId string `json:"clientOperationId,omitempty"` @@ -18651,8 +20164,8 @@ type Operation struct { } func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation - raw := noMethod(*s) + type NoMethod Operation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18681,8 +20194,8 @@ type OperationError struct { } func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError - raw := noMethod(*s) + type NoMethod OperationError + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18715,8 +20228,8 @@ type OperationErrorErrors struct { } func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors - raw := noMethod(*s) + type NoMethod OperationErrorErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18728,9 +20241,13 @@ type OperationWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18741,7 +20258,9 @@ type OperationWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18772,8 +20291,8 @@ type OperationWarnings struct { } func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings - raw := noMethod(*s) + type NoMethod OperationWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18809,8 +20328,8 @@ type OperationWarningsData struct { } func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData - raw := noMethod(*s) + type NoMethod OperationWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18862,8 +20381,8 @@ type OperationAggregatedList struct { } func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList - raw := noMethod(*s) + type NoMethod OperationAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18877,9 +20396,13 @@ type OperationAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18890,7 +20413,9 @@ type OperationAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18921,8 +20446,8 @@ type OperationAggregatedListWarning struct { } func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedListWarning - raw := noMethod(*s) + type NoMethod OperationAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18958,8 +20483,8 @@ type OperationAggregatedListWarningData struct { } func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedListWarningData - raw := noMethod(*s) + type NoMethod OperationAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19012,8 +20537,8 @@ type OperationList struct { } func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) + type NoMethod OperationList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19026,9 +20551,13 @@ type OperationListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19039,7 +20568,9 @@ type OperationListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19070,8 +20601,8 @@ type OperationListWarning struct { } func (s *OperationListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationListWarning - raw := noMethod(*s) + type NoMethod OperationListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19107,8 +20638,8 @@ type OperationListWarningData struct { } func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationListWarningData - raw := noMethod(*s) + type NoMethod OperationListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19138,8 +20669,8 @@ type OperationsScopedList struct { } func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList - raw := noMethod(*s) + type NoMethod OperationsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19153,9 +20684,13 @@ type OperationsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19166,7 +20701,9 @@ type OperationsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19197,8 +20734,8 @@ type OperationsScopedListWarning struct { } func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning - raw := noMethod(*s) + type NoMethod OperationsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19234,8 +20771,8 @@ type OperationsScopedListWarningData struct { } func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData - raw := noMethod(*s) + type NoMethod OperationsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19283,8 +20820,8 @@ type PathMatcher struct { } func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher - raw := noMethod(*s) + type NoMethod PathMatcher + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19319,8 +20856,8 @@ type PathRule struct { } func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule - raw := noMethod(*s) + type NoMethod PathRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19349,8 +20886,8 @@ type PerInstanceConfig struct { } func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { - type noMethod PerInstanceConfig - raw := noMethod(*s) + type NoMethod PerInstanceConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19435,14 +20972,15 @@ type Policy struct { } func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) + type NoMethod Policy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A Project resource. Projects can only be created in the // Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. +// only be modified in the console. (== resource_for v1.projects ==) (== +// resource_for beta.projects ==) type Project struct { // CommonInstanceMetadata: Metadata key/value pairs available to all // instances contained in this project. See Custom metadata for more @@ -19529,8 +21067,8 @@ type Project struct { } func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) + type NoMethod Project + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19556,8 +21094,8 @@ type ProjectsDisableXpnResourceRequest struct { } func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsDisableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19583,8 +21121,8 @@ type ProjectsEnableXpnResourceRequest struct { } func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsEnableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19628,8 +21166,8 @@ type ProjectsGetXpnResources struct { } func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources - raw := noMethod(*s) + type NoMethod ProjectsGetXpnResources + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19657,8 +21195,8 @@ type ProjectsListXpnHostsRequest struct { } func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest - raw := noMethod(*s) + type NoMethod ProjectsListXpnHostsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19689,8 +21227,8 @@ type ProjectsSetDefaultNetworkTierRequest struct { } func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsSetDefaultNetworkTierRequest - raw := noMethod(*s) + type NoMethod ProjectsSetDefaultNetworkTierRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19716,8 +21254,8 @@ type ProjectsSetDefaultServiceAccountRequest struct { } func (s *ProjectsSetDefaultServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsSetDefaultServiceAccountRequest - raw := noMethod(*s) + type NoMethod ProjectsSetDefaultServiceAccountRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19746,13 +21284,17 @@ type Quota struct { // "INSTANCE_GROUP_MANAGERS" // "INSTANCE_TEMPLATES" // "INTERCONNECTS" + // "INTERNAL_ADDRESSES" // "IN_USE_ADDRESSES" // "LOCAL_SSD_TOTAL_GB" + // "MAINTENANCE_POLICIES" // "NETWORKS" // "NVIDIA_K80_GPUS" // "NVIDIA_P100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "REGIONAL_AUTOSCALERS" // "REGIONAL_INSTANCE_GROUP_MANAGERS" // "ROUTERS" @@ -19796,19 +21338,19 @@ type Quota struct { } func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) + type NoMethod Quota + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota + type NoMethod Quota var s1 struct { Limit gensupport.JSONFloat64 `json:"limit"` Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -19852,12 +21394,13 @@ type Reference struct { } func (s *Reference) MarshalJSON() ([]byte, error) { - type noMethod Reference - raw := noMethod(*s) + type NoMethod Reference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Region: Region resource. +// Region: Region resource. (== resource_for beta.regions ==) (== +// resource_for v1.regions ==) type Region struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -19921,8 +21464,8 @@ type Region struct { } func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region - raw := noMethod(*s) + type NoMethod Region + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19974,8 +21517,8 @@ type RegionAutoscalerList struct { } func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList - raw := noMethod(*s) + type NoMethod RegionAutoscalerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19989,9 +21532,13 @@ type RegionAutoscalerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20002,7 +21549,9 @@ type RegionAutoscalerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20033,8 +21582,8 @@ type RegionAutoscalerListWarning struct { } func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerListWarning - raw := noMethod(*s) + type NoMethod RegionAutoscalerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20070,8 +21619,8 @@ type RegionAutoscalerListWarningData struct { } func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerListWarningData - raw := noMethod(*s) + type NoMethod RegionAutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20123,8 +21672,8 @@ type RegionDiskTypeList struct { } func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod RegionDiskTypeList - raw := noMethod(*s) + type NoMethod RegionDiskTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20138,9 +21687,13 @@ type RegionDiskTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20151,7 +21704,9 @@ type RegionDiskTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20182,8 +21737,8 @@ type RegionDiskTypeListWarning struct { } func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionDiskTypeListWarning - raw := noMethod(*s) + type NoMethod RegionDiskTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20219,8 +21774,8 @@ type RegionDiskTypeListWarningData struct { } func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionDiskTypeListWarningData - raw := noMethod(*s) + type NoMethod RegionDiskTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20247,8 +21802,8 @@ type RegionDisksResizeRequest struct { } func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionDisksResizeRequest - raw := noMethod(*s) + type NoMethod RegionDisksResizeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20300,8 +21855,8 @@ type RegionInstanceGroupList struct { } func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20315,9 +21870,13 @@ type RegionInstanceGroupListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20328,7 +21887,9 @@ type RegionInstanceGroupListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20359,8 +21920,8 @@ type RegionInstanceGroupListWarning struct { } func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupListWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20396,8 +21957,8 @@ type RegionInstanceGroupListWarningData struct { } func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupListWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20426,8 +21987,8 @@ type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { } func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerDeleteInstanceConfigReq - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20482,8 +22043,8 @@ type RegionInstanceGroupManagerList struct { } func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20497,9 +22058,13 @@ type RegionInstanceGroupManagerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20510,7 +22075,9 @@ type RegionInstanceGroupManagerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20541,8 +22108,8 @@ type RegionInstanceGroupManagerListWarning struct { } func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerListWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20578,8 +22145,8 @@ type RegionInstanceGroupManagerListWarningData struct { } func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerListWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20609,8 +22176,8 @@ type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { } func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerUpdateInstanceConfigReq - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerUpdateInstanceConfigReq + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20638,8 +22205,8 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { } func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20688,8 +22255,8 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { } func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersApplyUpdatesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersApplyUpdatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20717,8 +22284,8 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { } func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20759,8 +22326,8 @@ type RegionInstanceGroupManagersListInstanceConfigsResp struct { } func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstanceConfigsResp - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstanceConfigsResp + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20774,9 +22341,13 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20787,7 +22358,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20818,8 +22391,8 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { } func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20855,8 +22428,8 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { } func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20895,8 +22468,8 @@ type RegionInstanceGroupManagersListInstancesResponse struct { } func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20924,8 +22497,8 @@ type RegionInstanceGroupManagersRecreateRequest struct { } func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersRecreateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20951,8 +22524,8 @@ type RegionInstanceGroupManagersSetAutoHealingRequest struct { } func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetAutoHealingRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20985,8 +22558,8 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { } func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21014,8 +22587,8 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { } func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21066,8 +22639,8 @@ type RegionInstanceGroupsListInstances struct { } func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstances + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21081,9 +22654,13 @@ type RegionInstanceGroupsListInstancesWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21094,7 +22671,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21125,8 +22704,8 @@ type RegionInstanceGroupsListInstancesWarning struct { } func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21162,8 +22741,8 @@ type RegionInstanceGroupsListInstancesWarningData struct { } func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21200,8 +22779,8 @@ type RegionInstanceGroupsListInstancesRequest struct { } func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21235,8 +22814,8 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { } func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21289,8 +22868,8 @@ type RegionList struct { } func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList - raw := noMethod(*s) + type NoMethod RegionList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21303,9 +22882,13 @@ type RegionListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21316,7 +22899,9 @@ type RegionListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21347,8 +22932,8 @@ type RegionListWarning struct { } func (s *RegionListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionListWarning - raw := noMethod(*s) + type NoMethod RegionListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21384,8 +22969,8 @@ type RegionListWarningData struct { } func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionListWarningData - raw := noMethod(*s) + type NoMethod RegionListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21420,8 +23005,8 @@ type RegionSetLabelsRequest struct { } func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionSetLabelsRequest - raw := noMethod(*s) + type NoMethod RegionSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21462,8 +23047,8 @@ type ResourceCommitment struct { } func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment - raw := noMethod(*s) + type NoMethod ResourceCommitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21490,8 +23075,8 @@ type ResourceGroupReference struct { } func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference - raw := noMethod(*s) + type NoMethod ResourceGroupReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21512,7 +23097,8 @@ func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { // Compute Engine-operated gateway. // // Packets that do not match any route in the sending instance's routing -// table are dropped. +// table are dropped. (== resource_for beta.routes ==) (== resource_for +// v1.routes ==) type Route struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -21617,8 +23203,8 @@ type Route struct { } func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route - raw := noMethod(*s) + type NoMethod Route + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21630,9 +23216,13 @@ type RouteWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21643,7 +23233,9 @@ type RouteWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21674,8 +23266,8 @@ type RouteWarnings struct { } func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings - raw := noMethod(*s) + type NoMethod RouteWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21711,8 +23303,8 @@ type RouteWarningsData struct { } func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData - raw := noMethod(*s) + type NoMethod RouteWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21764,8 +23356,8 @@ type RouteList struct { } func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList - raw := noMethod(*s) + type NoMethod RouteList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21778,9 +23370,13 @@ type RouteListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21791,7 +23387,9 @@ type RouteListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21822,8 +23420,8 @@ type RouteListWarning struct { } func (s *RouteListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouteListWarning - raw := noMethod(*s) + type NoMethod RouteListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21859,8 +23457,8 @@ type RouteListWarningData struct { } func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouteListWarningData - raw := noMethod(*s) + type NoMethod RouteListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21939,8 +23537,41 @@ type Router struct { } func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router - raw := noMethod(*s) + type NoMethod Router + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouterAdvertisedIpRange: Description-tagged IP ranges for the router +// to advertise. +type RouterAdvertisedIpRange struct { + // Description: User-specified description for the IP range. + Description string `json:"description,omitempty"` + + // Range: The IP range to advertise. The value must be a CIDR-formatted + // string. + Range string `json:"range,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterAdvertisedIpRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21972,8 +23603,8 @@ type RouterAdvertisedPrefix struct { } func (s *RouterAdvertisedPrefix) MarshalJSON() ([]byte, error) { - type noMethod RouterAdvertisedPrefix - raw := noMethod(*s) + type NoMethod RouterAdvertisedPrefix + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22025,8 +23656,8 @@ type RouterAggregatedList struct { } func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList - raw := noMethod(*s) + type NoMethod RouterAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22040,9 +23671,13 @@ type RouterAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22053,7 +23688,9 @@ type RouterAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22084,8 +23721,8 @@ type RouterAggregatedListWarning struct { } func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedListWarning - raw := noMethod(*s) + type NoMethod RouterAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22121,8 +23758,8 @@ type RouterAggregatedListWarningData struct { } func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedListWarningData - raw := noMethod(*s) + type NoMethod RouterAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22145,6 +23782,14 @@ type RouterBgp struct { // "ALL_SUBNETS" AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These IP ranges will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom IP + // ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // AdvertisedPrefixs: User-specified list of individual prefixes to // advertise in custom mode. This field can only be populated if // advertise_mode is CUSTOM and is advertised to all peers of the @@ -22177,8 +23822,8 @@ type RouterBgp struct { } func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp - raw := noMethod(*s) + type NoMethod RouterBgp + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22202,6 +23847,14 @@ type RouterBgpPeer struct { // "ALL_SUBNETS" AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in Bgp message). These IP ranges will be advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom IP ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // AdvertisedPrefixs: User-specified list of individual prefixes to // advertise in custom mode. This field can only be populated if // advertise_mode is CUSTOM and overrides the list defined for the @@ -22252,8 +23905,8 @@ type RouterBgpPeer struct { } func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer - raw := noMethod(*s) + type NoMethod RouterBgpPeer + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22299,8 +23952,8 @@ type RouterInterface struct { } func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface - raw := noMethod(*s) + type NoMethod RouterInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22353,8 +24006,8 @@ type RouterList struct { } func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList - raw := noMethod(*s) + type NoMethod RouterList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22367,9 +24020,13 @@ type RouterListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22380,7 +24037,9 @@ type RouterListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22411,8 +24070,8 @@ type RouterListWarning struct { } func (s *RouterListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouterListWarning - raw := noMethod(*s) + type NoMethod RouterListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22448,8 +24107,8 @@ type RouterListWarningData struct { } func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouterListWarningData - raw := noMethod(*s) + type NoMethod RouterListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22520,8 +24179,8 @@ type RouterNat struct { } func (s *RouterNat) MarshalJSON() ([]byte, error) { - type noMethod RouterNat - raw := noMethod(*s) + type NoMethod RouterNat + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22567,8 +24226,8 @@ type RouterNatSubnetworkToNat struct { } func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { - type noMethod RouterNatSubnetworkToNat - raw := noMethod(*s) + type NoMethod RouterNatSubnetworkToNat + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22604,8 +24263,8 @@ type RouterStatus struct { } func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus - raw := noMethod(*s) + type NoMethod RouterStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22665,8 +24324,8 @@ type RouterStatusBgpPeerStatus struct { } func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus - raw := noMethod(*s) + type NoMethod RouterStatusBgpPeerStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22716,8 +24375,8 @@ type RouterStatusNatStatus struct { } func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusNatStatus - raw := noMethod(*s) + type NoMethod RouterStatusNatStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22749,8 +24408,8 @@ type RouterStatusResponse struct { } func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse - raw := noMethod(*s) + type NoMethod RouterStatusResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22780,8 +24439,8 @@ type RoutersPreviewResponse struct { } func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse - raw := noMethod(*s) + type NoMethod RoutersPreviewResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22811,8 +24470,8 @@ type RoutersScopedList struct { } func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList - raw := noMethod(*s) + type NoMethod RoutersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22826,9 +24485,13 @@ type RoutersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22839,7 +24502,9 @@ type RoutersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22870,8 +24535,8 @@ type RoutersScopedListWarning struct { } func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning - raw := noMethod(*s) + type NoMethod RoutersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22907,8 +24572,8 @@ type RoutersScopedListWarningData struct { } func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData - raw := noMethod(*s) + type NoMethod RoutersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22925,7 +24590,8 @@ type Rule struct { // "NO_ACTION" Action string `json:"action,omitempty"` - // Conditions: Additional restrictions that must be met + // Conditions: Additional restrictions that must be met. All conditions + // must pass for the rule to match. Conditions []*Condition `json:"conditions,omitempty"` // Description: Human-readable description of the rule. @@ -22967,8 +24633,8 @@ type Rule struct { } func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule - raw := noMethod(*s) + type NoMethod Rule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23046,8 +24712,8 @@ type SSLHealthCheck struct { } func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck - raw := noMethod(*s) + type NoMethod SSLHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23098,8 +24764,8 @@ type Scheduling struct { } func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling - raw := noMethod(*s) + type NoMethod Scheduling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23174,8 +24840,8 @@ type SecurityPolicy struct { } func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicy - raw := noMethod(*s) + type NoMethod SecurityPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23224,8 +24890,8 @@ type SecurityPolicyList struct { } func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyList - raw := noMethod(*s) + type NoMethod SecurityPolicyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23239,9 +24905,13 @@ type SecurityPolicyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -23252,7 +24922,9 @@ type SecurityPolicyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -23283,8 +24955,8 @@ type SecurityPolicyListWarning struct { } func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyListWarning - raw := noMethod(*s) + type NoMethod SecurityPolicyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23320,8 +24992,8 @@ type SecurityPolicyListWarningData struct { } func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyListWarningData - raw := noMethod(*s) + type NoMethod SecurityPolicyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23347,8 +25019,8 @@ type SecurityPolicyReference struct { } func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyReference - raw := noMethod(*s) + type NoMethod SecurityPolicyReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23381,6 +25053,10 @@ type SecurityPolicyRule struct { // are evaluated in the increasing order of priority. Priority int64 `json:"priority,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -23399,8 +25075,8 @@ type SecurityPolicyRule struct { } func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRule - raw := noMethod(*s) + type NoMethod SecurityPolicyRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23412,12 +25088,14 @@ type SecurityPolicyRuleMatcher struct { // specified and cannot be specified if versioned_expr is not specified. Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and + // contents in the request header. + Expr *Expr `json:"expr,omitempty"` + // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. SrcIpRanges []string `json:"srcIpRanges,omitempty"` - // SrcRegionCodes: Match by country or region code. - SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` - // VersionedExpr: Preconfigured versioned expression. If this field is // specified, config must also be specified. Available preconfigured // expressions along with their requirements are: SRC_IPS_V1 - must @@ -23446,8 +25124,8 @@ type SecurityPolicyRuleMatcher struct { } func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRuleMatcher - raw := noMethod(*s) + type NoMethod SecurityPolicyRuleMatcher + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23473,8 +25151,8 @@ type SecurityPolicyRuleMatcherConfig struct { } func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRuleMatcherConfig - raw := noMethod(*s) + type NoMethod SecurityPolicyRuleMatcherConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23524,8 +25202,8 @@ type SerialPortOutput struct { } func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput - raw := noMethod(*s) + type NoMethod SerialPortOutput + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23556,8 +25234,41 @@ type ServiceAccount struct { } func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount - raw := noMethod(*s) + type NoMethod ServiceAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ShieldedVmConfig: A set of Shielded VM options. +type ShieldedVmConfig struct { + // EnableSecureBoot: Defines whether the instance should have secure + // boot enabled. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` + + // EnableVtpm: Defines whether the instance should have the TPM enabled. + EnableVtpm bool `json:"enableVtpm,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnableSecureBoot") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableSecureBoot") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ShieldedVmConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedVmConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23594,12 +25305,13 @@ type SignedUrlKey struct { } func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { - type noMethod SignedUrlKey - raw := noMethod(*s) + type NoMethod SignedUrlKey + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. +// Snapshot: A persistent disk snapshot resource. (== resource_for +// beta.snapshots ==) (== resource_for v1.snapshots ==) type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -23698,7 +25410,7 @@ type Snapshot struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageBytes: [Output Only] A size of the the storage used by the + // StorageBytes: [Output Only] A size of the storage used by the // snapshot. As snapshots share storage, this number is expected to // change with snapshot creation/deletion. StorageBytes int64 `json:"storageBytes,omitempty,string"` @@ -23714,6 +25426,10 @@ type Snapshot struct { // "UP_TO_DATE" StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + // StorageLocations: GCS bucket storage location of the snapshot + // (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -23737,8 +25453,8 @@ type Snapshot struct { } func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot - raw := noMethod(*s) + type NoMethod Snapshot + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23790,8 +25506,8 @@ type SnapshotList struct { } func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList - raw := noMethod(*s) + type NoMethod SnapshotList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23804,9 +25520,13 @@ type SnapshotListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -23817,7 +25537,9 @@ type SnapshotListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -23848,8 +25570,8 @@ type SnapshotListWarning struct { } func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { - type noMethod SnapshotListWarning - raw := noMethod(*s) + type NoMethod SnapshotListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23885,14 +25607,48 @@ type SnapshotListWarningData struct { } func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SnapshotListWarningData - raw := noMethod(*s) + type NoMethod SnapshotListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SourceInstanceParams: A specification of the parameters to use when +// creating the instance template from a source instance. +type SourceInstanceParams struct { + // DiskConfigs: Attached disks configuration. If not provided, defaults + // are applied: For boot disk and any other R/W disks, new custom images + // will be created from each disk. For read-only disks, they will be + // attached in read-only mode. Local SSD disks will be created as blank + // volumes. + DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceParams + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SslCertificate: An SslCertificate resource. This resource provides a // mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. +// to serve secure connections from the user. (== resource_for +// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) type SslCertificate struct { // Certificate: A local certificate file. The certificate must be in PEM // format. The certificate chain must be no greater than 5 certs long. @@ -23953,8 +25709,8 @@ type SslCertificate struct { } func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate - raw := noMethod(*s) + type NoMethod SslCertificate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24006,8 +25762,8 @@ type SslCertificateList struct { } func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList - raw := noMethod(*s) + type NoMethod SslCertificateList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24021,9 +25777,13 @@ type SslCertificateListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24034,7 +25794,9 @@ type SslCertificateListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24065,8 +25827,8 @@ type SslCertificateListWarning struct { } func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateListWarning - raw := noMethod(*s) + type NoMethod SslCertificateListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24102,8 +25864,8 @@ type SslCertificateListWarningData struct { } func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateListWarningData - raw := noMethod(*s) + type NoMethod SslCertificateListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24155,8 +25917,8 @@ type SslPoliciesList struct { } func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type noMethod SslPoliciesList - raw := noMethod(*s) + type NoMethod SslPoliciesList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24169,9 +25931,13 @@ type SslPoliciesListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24182,7 +25948,9 @@ type SslPoliciesListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24213,8 +25981,8 @@ type SslPoliciesListWarning struct { } func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type noMethod SslPoliciesListWarning - raw := noMethod(*s) + type NoMethod SslPoliciesListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24250,8 +26018,8 @@ type SslPoliciesListWarningData struct { } func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SslPoliciesListWarningData - raw := noMethod(*s) + type NoMethod SslPoliciesListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24280,8 +26048,8 @@ type SslPoliciesListAvailableFeaturesResponse struct { } func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type noMethod SslPoliciesListAvailableFeaturesResponse - raw := noMethod(*s) + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24387,8 +26155,8 @@ type SslPolicy struct { } func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type noMethod SslPolicy - raw := noMethod(*s) + type NoMethod SslPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24400,9 +26168,13 @@ type SslPolicyWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24413,7 +26185,9 @@ type SslPolicyWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24444,8 +26218,8 @@ type SslPolicyWarnings struct { } func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { - type noMethod SslPolicyWarnings - raw := noMethod(*s) + type NoMethod SslPolicyWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24481,8 +26255,8 @@ type SslPolicyWarningsData struct { } func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { - type noMethod SslPolicyWarningsData - raw := noMethod(*s) + type NoMethod SslPolicyWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24510,12 +26284,97 @@ type SslPolicyReference struct { } func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { - type noMethod SslPolicyReference - raw := noMethod(*s) + type NoMethod SslPolicyReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. +type StatefulPolicy struct { + PreservedResources *StatefulPolicyPreservedResources `json:"preservedResources,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PreservedResources") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PreservedResources") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicy) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StatefulPolicyPreservedDisk struct { + // DeviceName: Device name of the disk to be preserved + DeviceName string `json:"deviceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeviceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeviceName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicyPreservedDisk) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicyPreservedDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StatefulPolicyPreservedResources: Configuration of all preserved +// resources. +type StatefulPolicyPreservedResources struct { + // Disks: Disks created on the instances that will be preserved on + // instance delete, resize down, etc. + Disks []*StatefulPolicyPreservedDisk `json:"disks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disks") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicyPreservedResources) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicyPreservedResources + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks +// ==) (== resource_for v1.subnetworks ==) type Subnetwork struct { // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict // with static routes. Setting this to true allows this subnetwork's @@ -24627,8 +26486,8 @@ type Subnetwork struct { } func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork - raw := noMethod(*s) + type NoMethod Subnetwork + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24680,8 +26539,8 @@ type SubnetworkAggregatedList struct { } func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24695,9 +26554,13 @@ type SubnetworkAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24708,7 +26571,9 @@ type SubnetworkAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24739,8 +26604,8 @@ type SubnetworkAggregatedListWarning struct { } func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedListWarning - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24776,8 +26641,8 @@ type SubnetworkAggregatedListWarningData struct { } func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedListWarningData - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24830,8 +26695,8 @@ type SubnetworkList struct { } func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList - raw := noMethod(*s) + type NoMethod SubnetworkList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24844,9 +26709,13 @@ type SubnetworkListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24857,7 +26726,9 @@ type SubnetworkListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24888,8 +26759,8 @@ type SubnetworkListWarning struct { } func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkListWarning - raw := noMethod(*s) + type NoMethod SubnetworkListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24925,8 +26796,8 @@ type SubnetworkListWarningData struct { } func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkListWarningData - raw := noMethod(*s) + type NoMethod SubnetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24964,8 +26835,8 @@ type SubnetworkSecondaryRange struct { } func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange - raw := noMethod(*s) + type NoMethod SubnetworkSecondaryRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24995,8 +26866,8 @@ type SubnetworksExpandIpCidrRangeRequest struct { } func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest - raw := noMethod(*s) + type NoMethod SubnetworksExpandIpCidrRangeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25026,8 +26897,8 @@ type SubnetworksScopedList struct { } func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList - raw := noMethod(*s) + type NoMethod SubnetworksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25041,9 +26912,13 @@ type SubnetworksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -25054,7 +26929,9 @@ type SubnetworksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -25085,8 +26962,8 @@ type SubnetworksScopedListWarning struct { } func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25122,8 +26999,8 @@ type SubnetworksScopedListWarningData struct { } func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25150,8 +27027,8 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { } func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest - raw := noMethod(*s) + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25229,8 +27106,8 @@ type TCPHealthCheck struct { } func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck - raw := noMethod(*s) + type NoMethod TCPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25268,13 +27145,14 @@ type Tags struct { } func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags - raw := noMethod(*s) + type NoMethod Tags + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. +// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== +// resource_for v1.targetHttpProxies ==) type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -25331,8 +27209,8 @@ type TargetHttpProxy struct { } func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy - raw := noMethod(*s) + type NoMethod TargetHttpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25385,8 +27263,8 @@ type TargetHttpProxyList struct { } func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList - raw := noMethod(*s) + type NoMethod TargetHttpProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25400,9 +27278,13 @@ type TargetHttpProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -25413,7 +27295,9 @@ type TargetHttpProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -25444,8 +27328,8 @@ type TargetHttpProxyListWarning struct { } func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyListWarning - raw := noMethod(*s) + type NoMethod TargetHttpProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25481,8 +27365,8 @@ type TargetHttpProxyListWarningData struct { } func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetHttpProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25513,8 +27397,8 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { } func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetQuicOverrideRequest - raw := noMethod(*s) + type NoMethod TargetHttpsProxiesSetQuicOverrideRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25543,13 +27427,14 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { } func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. +// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== +// resource_for v1.targetHttpsProxies ==) type TargetHttpsProxy struct { // ClientSslPolicy: URL to ClientSslPolicy resource which controls the // set of allowed SSL versions and ciphers. @@ -25640,8 +27525,8 @@ type TargetHttpsProxy struct { } func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy - raw := noMethod(*s) + type NoMethod TargetHttpsProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25694,8 +27579,8 @@ type TargetHttpsProxyList struct { } func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList - raw := noMethod(*s) + type NoMethod TargetHttpsProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25709,9 +27594,13 @@ type TargetHttpsProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -25722,7 +27611,9 @@ type TargetHttpsProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -25753,8 +27644,8 @@ type TargetHttpsProxyListWarning struct { } func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyListWarning - raw := noMethod(*s) + type NoMethod TargetHttpsProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25790,13 +27681,15 @@ type TargetHttpsProxyListWarningData struct { } func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetHttpsProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. +// endpoint instance that terminates traffic of certain protocols. (== +// resource_for beta.targetInstances ==) (== resource_for +// v1.targetInstances ==) type TargetInstance struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -25870,8 +27763,8 @@ type TargetInstance struct { } func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance - raw := noMethod(*s) + type NoMethod TargetInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25922,8 +27815,8 @@ type TargetInstanceAggregatedList struct { } func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -25937,9 +27830,13 @@ type TargetInstanceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -25950,7 +27847,9 @@ type TargetInstanceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -25981,8 +27880,8 @@ type TargetInstanceAggregatedListWarning struct { } func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26018,8 +27917,8 @@ type TargetInstanceAggregatedListWarningData struct { } func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26071,8 +27970,8 @@ type TargetInstanceList struct { } func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) + type NoMethod TargetInstanceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26086,9 +27985,13 @@ type TargetInstanceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -26099,7 +28002,9 @@ type TargetInstanceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -26130,8 +28035,8 @@ type TargetInstanceListWarning struct { } func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceListWarning - raw := noMethod(*s) + type NoMethod TargetInstanceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26167,8 +28072,8 @@ type TargetInstanceListWarningData struct { } func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceListWarningData - raw := noMethod(*s) + type NoMethod TargetInstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26199,8 +28104,8 @@ type TargetInstancesScopedList struct { } func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList - raw := noMethod(*s) + type NoMethod TargetInstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26214,9 +28119,13 @@ type TargetInstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -26227,7 +28136,9 @@ type TargetInstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -26258,8 +28169,8 @@ type TargetInstancesScopedListWarning struct { } func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26295,14 +28206,15 @@ type TargetInstancesScopedListWarningData struct { } func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPool: A TargetPool resource. This resource defines a pool of // instances, an associated HttpHealthCheck resource, and the fallback -// target pool. +// target pool. (== resource_for beta.targetPools ==) (== resource_for +// v1.targetPools ==) type TargetPool struct { // BackupPool: This field is applicable only when the containing target // pool is serving a forwarding rule as the primary pool, and its @@ -26424,18 +28336,18 @@ type TargetPool struct { } func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool - raw := noMethod(*s) + type NoMethod TargetPool + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool + type NoMethod TargetPool var s1 struct { FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -26492,8 +28404,8 @@ type TargetPoolAggregatedList struct { } func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26507,9 +28419,13 @@ type TargetPoolAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -26520,7 +28436,9 @@ type TargetPoolAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -26551,8 +28469,8 @@ type TargetPoolAggregatedListWarning struct { } func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26588,8 +28506,8 @@ type TargetPoolAggregatedListWarningData struct { } func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26623,8 +28541,8 @@ type TargetPoolInstanceHealth struct { } func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth - raw := noMethod(*s) + type NoMethod TargetPoolInstanceHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26677,8 +28595,8 @@ type TargetPoolList struct { } func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList - raw := noMethod(*s) + type NoMethod TargetPoolList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26691,9 +28609,13 @@ type TargetPoolListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -26704,7 +28626,9 @@ type TargetPoolListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -26735,8 +28659,8 @@ type TargetPoolListWarning struct { } func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolListWarning - raw := noMethod(*s) + type NoMethod TargetPoolListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26772,8 +28696,8 @@ type TargetPoolListWarningData struct { } func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26799,8 +28723,8 @@ type TargetPoolsAddHealthCheckRequest struct { } func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26832,8 +28756,8 @@ type TargetPoolsAddInstanceRequest struct { } func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26864,8 +28788,8 @@ type TargetPoolsRemoveHealthCheckRequest struct { } func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26891,8 +28815,8 @@ type TargetPoolsRemoveInstanceRequest struct { } func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26922,8 +28846,8 @@ type TargetPoolsScopedList struct { } func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList - raw := noMethod(*s) + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -26937,9 +28861,13 @@ type TargetPoolsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -26950,7 +28878,9 @@ type TargetPoolsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -26981,8 +28911,8 @@ type TargetPoolsScopedListWarning struct { } func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27018,8 +28948,8 @@ type TargetPoolsScopedListWarningData struct { } func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27044,8 +28974,8 @@ type TargetReference struct { } func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference - raw := noMethod(*s) + type NoMethod TargetReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27072,8 +29002,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { } func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27104,8 +29034,8 @@ type TargetSslProxiesSetProxyHeaderRequest struct { } func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27134,13 +29064,14 @@ type TargetSslProxiesSetSslCertificatesRequest struct { } func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. +// SSL proxy. (== resource_for beta.targetSslProxies ==) (== +// resource_for v1.targetSslProxies ==) type TargetSslProxy struct { // ClientSslPolicy: URL to ClientSslPolicy resource which controls the // set of allowed SSL versions and ciphers. @@ -27219,8 +29150,8 @@ type TargetSslProxy struct { } func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy - raw := noMethod(*s) + type NoMethod TargetSslProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27272,8 +29203,8 @@ type TargetSslProxyList struct { } func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList - raw := noMethod(*s) + type NoMethod TargetSslProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27287,9 +29218,13 @@ type TargetSslProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -27300,7 +29235,9 @@ type TargetSslProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -27331,8 +29268,8 @@ type TargetSslProxyListWarning struct { } func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyListWarning - raw := noMethod(*s) + type NoMethod TargetSslProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27368,8 +29305,8 @@ type TargetSslProxyListWarningData struct { } func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetSslProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27396,8 +29333,8 @@ type TargetTcpProxiesSetBackendServiceRequest struct { } func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27428,13 +29365,14 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { } func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. +// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== +// resource_for v1.targetTcpProxies ==) type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -27499,8 +29437,8 @@ type TargetTcpProxy struct { } func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy - raw := noMethod(*s) + type NoMethod TargetTcpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27552,8 +29490,8 @@ type TargetTcpProxyList struct { } func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList - raw := noMethod(*s) + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27567,9 +29505,13 @@ type TargetTcpProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -27580,7 +29522,9 @@ type TargetTcpProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -27611,8 +29555,8 @@ type TargetTcpProxyListWarning struct { } func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyListWarning - raw := noMethod(*s) + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27648,12 +29592,14 @@ type TargetTcpProxyListWarningData struct { } func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. +// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// resource_for beta.targetVpnGateways ==) (== resource_for +// v1.targetVpnGateways ==) type TargetVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -27749,8 +29695,8 @@ type TargetVpnGateway struct { } func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway - raw := noMethod(*s) + type NoMethod TargetVpnGateway + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27802,8 +29748,8 @@ type TargetVpnGatewayAggregatedList struct { } func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27817,9 +29763,13 @@ type TargetVpnGatewayAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -27830,7 +29780,9 @@ type TargetVpnGatewayAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -27861,8 +29813,8 @@ type TargetVpnGatewayAggregatedListWarning struct { } func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27898,8 +29850,8 @@ type TargetVpnGatewayAggregatedListWarningData struct { } func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27952,8 +29904,8 @@ type TargetVpnGatewayList struct { } func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -27967,9 +29919,13 @@ type TargetVpnGatewayListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -27980,7 +29936,9 @@ type TargetVpnGatewayListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -28011,8 +29969,8 @@ type TargetVpnGatewayListWarning struct { } func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewayListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28048,8 +30006,8 @@ type TargetVpnGatewayListWarningData struct { } func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewayListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28081,8 +30039,8 @@ type TargetVpnGatewaysScopedList struct { } func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28096,9 +30054,13 @@ type TargetVpnGatewaysScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -28109,7 +30071,9 @@ type TargetVpnGatewaysScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -28140,8 +30104,8 @@ type TargetVpnGatewaysScopedListWarning struct { } func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28177,8 +30141,8 @@ type TargetVpnGatewaysScopedListWarningData struct { } func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28209,8 +30173,8 @@ type TestFailure struct { } func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure - raw := noMethod(*s) + type NoMethod TestFailure + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28238,8 +30202,8 @@ type TestPermissionsRequest struct { } func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsRequest - raw := noMethod(*s) + type NoMethod TestPermissionsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28270,8 +30234,8 @@ type TestPermissionsResponse struct { } func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsResponse - raw := noMethod(*s) + type NoMethod TestPermissionsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28311,8 +30275,8 @@ type UDPHealthCheck struct { } func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod UDPHealthCheck - raw := noMethod(*s) + type NoMethod UDPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28391,8 +30355,8 @@ type UrlMap struct { } func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap - raw := noMethod(*s) + type NoMethod UrlMap + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28444,8 +30408,8 @@ type UrlMapList struct { } func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList - raw := noMethod(*s) + type NoMethod UrlMapList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28458,9 +30422,13 @@ type UrlMapListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -28471,7 +30439,9 @@ type UrlMapListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -28502,8 +30472,8 @@ type UrlMapListWarning struct { } func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { - type noMethod UrlMapListWarning - raw := noMethod(*s) + type NoMethod UrlMapListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28539,8 +30509,8 @@ type UrlMapListWarningData struct { } func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { - type noMethod UrlMapListWarningData - raw := noMethod(*s) + type NoMethod UrlMapListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28565,8 +30535,8 @@ type UrlMapReference struct { } func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference - raw := noMethod(*s) + type NoMethod UrlMapReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28603,8 +30573,8 @@ type UrlMapTest struct { } func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest - raw := noMethod(*s) + type NoMethod UrlMapTest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28642,8 +30612,8 @@ type UrlMapValidationResult struct { } func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult - raw := noMethod(*s) + type NoMethod UrlMapValidationResult + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28669,8 +30639,8 @@ type UrlMapsValidateRequest struct { } func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28699,18 +30669,25 @@ type UrlMapsValidateResponse struct { } func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse - raw := noMethod(*s) + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UsableSubnetwork: Subnetwork which the current user has // compute.subnetworks.use permission on. type UsableSubnetwork struct { + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Network: Network URL. + Network string `json:"network,omitempty"` + // Subnetwork: Subnetwork URL. Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "Subnetwork") to + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28718,18 +30695,18 @@ type UsableSubnetwork struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Subnetwork") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { - type noMethod UsableSubnetwork - raw := noMethod(*s) + type NoMethod UsableSubnetwork + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28782,8 +30759,8 @@ type UsableSubnetworksAggregatedList struct { } func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod UsableSubnetworksAggregatedList - raw := noMethod(*s) + type NoMethod UsableSubnetworksAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28797,9 +30774,13 @@ type UsableSubnetworksAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -28810,7 +30791,9 @@ type UsableSubnetworksAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -28841,8 +30824,8 @@ type UsableSubnetworksAggregatedListWarning struct { } func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod UsableSubnetworksAggregatedListWarning - raw := noMethod(*s) + type NoMethod UsableSubnetworksAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28878,8 +30861,8 @@ type UsableSubnetworksAggregatedListWarningData struct { } func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod UsableSubnetworksAggregatedListWarningData - raw := noMethod(*s) + type NoMethod UsableSubnetworksAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -28921,11 +30904,45 @@ type UsageExportLocation struct { } func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation - raw := noMethod(*s) + type NoMethod UsageExportLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VmMaintenancePolicy: A Vm Maintenance Policy specifies what kind of +// infrastructure maintenance we are allowed to perform on this VM and +// when. +type VmMaintenancePolicy struct { + // MaintenanceWindow: Maintenance windows that are applied to VMs + // covered by this policy. + MaintenanceWindow *MaintenanceWindow `json:"maintenanceWindow,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaintenanceWindow") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaintenanceWindow") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmMaintenancePolicy) MarshalJSON() ([]byte, error) { + type NoMethod VmMaintenancePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) +// (== resource_for v1.vpnTunnels ==) type VpnTunnel struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -29052,8 +31069,8 @@ type VpnTunnel struct { } func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel - raw := noMethod(*s) + type NoMethod VpnTunnel + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29105,8 +31122,8 @@ type VpnTunnelAggregatedList struct { } func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29120,9 +31137,13 @@ type VpnTunnelAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -29133,7 +31154,9 @@ type VpnTunnelAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -29164,8 +31187,8 @@ type VpnTunnelAggregatedListWarning struct { } func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29201,8 +31224,8 @@ type VpnTunnelAggregatedListWarningData struct { } func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29255,8 +31278,8 @@ type VpnTunnelList struct { } func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList - raw := noMethod(*s) + type NoMethod VpnTunnelList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29269,9 +31292,13 @@ type VpnTunnelListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -29282,7 +31309,9 @@ type VpnTunnelListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -29313,8 +31342,8 @@ type VpnTunnelListWarning struct { } func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29350,8 +31379,8 @@ type VpnTunnelListWarningData struct { } func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29381,8 +31410,8 @@ type VpnTunnelsScopedList struct { } func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29396,9 +31425,13 @@ type VpnTunnelsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -29409,7 +31442,9 @@ type VpnTunnelsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -29440,8 +31475,8 @@ type VpnTunnelsScopedListWarning struct { } func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29477,8 +31512,8 @@ type VpnTunnelsScopedListWarningData struct { } func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29530,8 +31565,8 @@ type XpnHostList struct { } func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList - raw := noMethod(*s) + type NoMethod XpnHostList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29544,9 +31579,13 @@ type XpnHostListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -29557,7 +31596,9 @@ type XpnHostListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -29588,8 +31629,8 @@ type XpnHostListWarning struct { } func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { - type noMethod XpnHostListWarning - raw := noMethod(*s) + type NoMethod XpnHostListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29625,8 +31666,8 @@ type XpnHostListWarningData struct { } func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { - type noMethod XpnHostListWarningData - raw := noMethod(*s) + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29662,12 +31703,13 @@ type XpnResourceId struct { } func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId - raw := noMethod(*s) + type NoMethod XpnResourceId + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. +// Zone: A Zone resource. (== resource_for beta.zones ==) (== +// resource_for v1.zones ==) type Zone struct { // AvailableCpuPlatforms: [Output Only] Available cpu/platform // selections for the zone. @@ -29733,8 +31775,8 @@ type Zone struct { } func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone - raw := noMethod(*s) + type NoMethod Zone + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29786,8 +31828,8 @@ type ZoneList struct { } func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList - raw := noMethod(*s) + type NoMethod ZoneList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29800,9 +31842,13 @@ type ZoneListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -29813,7 +31859,9 @@ type ZoneListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -29844,8 +31892,8 @@ type ZoneListWarning struct { } func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { - type noMethod ZoneListWarning - raw := noMethod(*s) + type NoMethod ZoneListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29881,8 +31929,8 @@ type ZoneListWarningData struct { } func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ZoneListWarningData - raw := noMethod(*s) + type NoMethod ZoneListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -29917,8 +31965,8 @@ type ZoneSetLabelsRequest struct { } func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest - raw := noMethod(*s) + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -30096,7 +32144,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30287,7 +32335,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30514,7 +32562,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30774,7 +32822,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30970,7 +33018,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31137,7 +33185,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31310,7 +33358,7 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31537,7 +33585,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31748,7 +33796,7 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31911,7 +33959,7 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32137,7 +34185,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32332,7 +34380,7 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32499,7 +34547,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32671,7 +34719,7 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32897,7 +34945,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33113,7 +35161,7 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33274,7 +35322,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33456,7 +35504,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33632,7 +35680,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33794,7 +35842,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33956,7 +36004,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34118,7 +36166,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34270,7 +36318,7 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34431,7 +36479,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34646,7 +36694,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34847,7 +36895,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34999,7 +37047,7 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35146,7 +37194,7 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35313,7 +37361,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35484,7 +37532,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35706,7 +37754,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35899,7 +37947,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36061,7 +38109,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36224,7 +38272,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36370,7 +38418,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36536,7 +38584,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36752,7 +38800,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36957,7 +39005,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37128,7 +39176,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37279,7 +39327,7 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37449,7 +39497,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37601,7 +39649,7 @@ func (c *ClientSslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37820,7 +39868,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38012,7 +40060,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38240,7 +40288,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38500,7 +40548,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38709,7 +40757,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38891,7 +40939,7 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39058,7 +41106,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39221,7 +41269,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39404,7 +41452,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39636,7 +41684,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39734,7 +41782,8 @@ type DisksResizeCall struct { header_ http.Header } -// Resize: Resizes the specified persistent disk. +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -39846,12 +41895,12 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Resizes the specified persistent disk.", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", // "httpMethod": "POST", // "id": "compute.disks.resize", // "parameterOrder": [ @@ -40009,7 +42058,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40186,7 +42235,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40349,7 +42398,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40517,7 +42566,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40673,7 +42722,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40835,7 +42884,7 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41051,7 +43100,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41253,7 +43302,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41405,7 +43454,7 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41575,7 +43624,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41798,7 +43847,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41994,7 +44043,7 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42161,7 +44210,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42334,7 +44383,7 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42561,7 +44610,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42774,7 +44823,7 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42956,7 +45005,7 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43139,7 +45188,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43302,7 +45351,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43470,7 +45519,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43627,7 +45676,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43789,7 +45838,7 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44004,7 +46053,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44185,7 +46234,7 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44332,7 +46381,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44492,7 +46541,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44649,7 +46698,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44811,7 +46860,7 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45027,7 +47076,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45229,7 +47278,7 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45381,7 +47430,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45548,7 +47597,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45700,7 +47749,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45919,7 +47968,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46217,7 +48266,7 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46434,7 +48483,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46626,7 +48675,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46782,7 +48831,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46943,7 +48992,7 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47158,7 +49207,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47359,7 +49408,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47511,7 +49560,7 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47678,7 +49727,7 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47900,7 +49949,7 @@ func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTyp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48091,7 +50140,7 @@ func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48318,7 +50367,7 @@ func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48577,7 +50626,7 @@ func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggrega }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48772,7 +50821,7 @@ func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48939,7 +50988,7 @@ func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49102,7 +51151,7 @@ func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49274,7 +51323,7 @@ func (c *HostsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49499,7 +51548,7 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49691,7 +51740,7 @@ func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49849,7 +51898,7 @@ func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50017,7 +52066,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50174,7 +52223,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50336,7 +52385,7 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50552,7 +52601,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50754,7 +52803,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50906,7 +52955,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51074,7 +53123,7 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51237,7 +53286,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51393,7 +53442,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51554,7 +53603,7 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51769,7 +53818,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51970,7 +54019,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52122,7 +54171,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52289,7 +54338,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52453,7 +54502,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52624,7 +54673,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52784,7 +54833,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52936,7 +54985,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53088,7 +55137,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53257,7 +55306,7 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53316,8 +55365,8 @@ type ImagesListCall struct { header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your // project. This method does not get any images that belong to other // projects, including publicly-available images, like Debian 8. If you // want to get a list of publicly-available images, use this method to @@ -53486,12 +55535,12 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", // "id": "compute.images.list", // "parameterOrder": [ @@ -53667,7 +55716,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53814,7 +55863,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53961,7 +56010,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54146,7 +56195,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54376,7 +56425,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54561,7 +56610,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54731,7 +56780,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54922,7 +56971,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55083,7 +57132,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55247,7 +57296,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55425,7 +57474,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55650,7 +57699,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55864,7 +57913,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56120,7 +58169,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56342,7 +58391,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56535,7 +58584,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56718,7 +58767,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56916,7 +58965,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57095,7 +59144,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57276,7 +59325,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57461,7 +59510,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57622,7 +59671,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57803,7 +59852,7 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57985,7 +60034,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58166,7 +60215,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58395,7 +60444,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58593,7 +60642,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58758,7 +60807,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58928,7 +60977,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59153,7 +61202,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59410,7 +61459,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59634,7 +61683,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59813,7 +61862,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59974,7 +62023,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60146,7 +62195,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60303,7 +62352,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60468,7 +62517,7 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60684,7 +62733,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60865,7 +62914,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61037,7 +63086,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61100,6 +63149,189 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } +// method id "compute.instances.addMaintenancePolicies": + +type InstancesAddMaintenancePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesaddmaintenancepoliciesrequest *InstancesAddMaintenancePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddMaintenancePolicies: Adds existing maintenance policies to an +// instance. You can only add one policy right now which will be applied +// to this instance for scheduling live migrations. +func (r *InstancesService) AddMaintenancePolicies(project string, zone string, instance string, instancesaddmaintenancepoliciesrequest *InstancesAddMaintenancePoliciesRequest) *InstancesAddMaintenancePoliciesCall { + c := &InstancesAddMaintenancePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesaddmaintenancepoliciesrequest = instancesaddmaintenancepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAddMaintenancePoliciesCall) RequestId(requestId string) *InstancesAddMaintenancePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddMaintenancePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddMaintenancePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddMaintenancePoliciesCall) Context(ctx context.Context) *InstancesAddMaintenancePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAddMaintenancePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAddMaintenancePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddmaintenancepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addMaintenancePolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.addMaintenancePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddMaintenancePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds existing maintenance policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", + // "httpMethod": "POST", + // "id": "compute.instances.addMaintenancePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/addMaintenancePolicies", + // "request": { + // "$ref": "InstancesAddMaintenancePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.aggregatedList": type InstancesAggregatedListCall struct { @@ -61275,7 +63507,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61489,7 +63721,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61670,7 +63902,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61845,7 +64077,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62032,7 +64264,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62207,7 +64439,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62376,7 +64608,7 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62544,7 +64776,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62726,7 +64958,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62930,7 +65162,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63162,7 +65394,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63428,7 +65660,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63521,6 +65753,188 @@ func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*Instance } } +// method id "compute.instances.removeMaintenancePolicies": + +type InstancesRemoveMaintenancePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesremovemaintenancepoliciesrequest *InstancesRemoveMaintenancePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveMaintenancePolicies: Removes maintenance policies from an +// instance. +func (r *InstancesService) RemoveMaintenancePolicies(project string, zone string, instance string, instancesremovemaintenancepoliciesrequest *InstancesRemoveMaintenancePoliciesRequest) *InstancesRemoveMaintenancePoliciesCall { + c := &InstancesRemoveMaintenancePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesremovemaintenancepoliciesrequest = instancesremovemaintenancepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesRemoveMaintenancePoliciesCall) RequestId(requestId string) *InstancesRemoveMaintenancePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesRemoveMaintenancePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveMaintenancePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesRemoveMaintenancePoliciesCall) Context(ctx context.Context) *InstancesRemoveMaintenancePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesRemoveMaintenancePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesRemoveMaintenancePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremovemaintenancepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/removeMaintenancePolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.removeMaintenancePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesRemoveMaintenancePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes maintenance policies from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.removeMaintenancePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/removeMaintenancePolicies", + // "request": { + // "$ref": "InstancesRemoveMaintenancePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.reset": type InstancesResetCall struct { @@ -63641,7 +66055,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63694,6 +66108,188 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "compute.instances.resume": + +type InstancesResumeCall struct { + s *Service + project string + zone string + instance string + instancesresumerequest *InstancesResumeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resume: Resumes an instance that was suspended using the +// instances().suspend method. +func (r *InstancesService) Resume(project string, zone string, instance string, instancesresumerequest *InstancesResumeRequest) *InstancesResumeCall { + c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesresumerequest = instancesresumerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesResumeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesresumerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.resume" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resumes an instance that was suspended using the instances().suspend method.", + // "httpMethod": "POST", + // "id": "compute.instances.resume", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to resume.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/resume", + // "request": { + // "$ref": "InstancesResumeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setDeletionProtection": type InstancesSetDeletionProtectionCall struct { @@ -63819,7 +66415,7 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64000,7 +66596,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64175,7 +66771,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64352,7 +66948,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64534,7 +67130,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64716,7 +67312,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64899,7 +67495,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65083,7 +67679,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65265,7 +67861,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65448,7 +68044,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65631,7 +68227,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65787,7 +68383,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65956,7 +68552,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66136,7 +68732,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66206,11 +68802,10 @@ type InstancesStopCall struct { // Stop: Stops a running instance, shutting it down cleanly, and allows // you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -66325,12 +68920,12 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", // "id": "compute.instances.stop", // "parameterOrder": [ @@ -66514,7 +69109,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66679,7 +69274,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66860,7 +69455,7 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67050,7 +69645,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67113,6 +69708,189 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } +// method id "compute.instances.updateShieldedVmConfig": + +type InstancesUpdateShieldedVmConfigCall struct { + s *Service + project string + zone string + instance string + shieldedvmconfig *ShieldedVmConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateShieldedVmConfig: Updates the Shielded VM config for an +// instance. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, instance string, shieldedvmconfig *ShieldedVmConfig) *InstancesUpdateShieldedVmConfigCall { + c := &InstancesUpdateShieldedVmConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.shieldedvmconfig = shieldedvmconfig + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *InstancesUpdateShieldedVmConfigCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedVmConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesUpdateShieldedVmConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedVmConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.updateShieldedVmConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the Shielded VM config for an instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedVmConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", + // "request": { + // "$ref": "ShieldedVmConfig" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectAttachments.aggregatedList": type InterconnectAttachmentsAggregatedListCall struct { @@ -67289,7 +70067,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67484,7 +70262,7 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67650,7 +70428,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67813,7 +70591,7 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67985,7 +70763,7 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68211,7 +70989,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68296,6 +71074,189 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int } } +// method id "compute.interconnectAttachments.patch": + +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnectAttachments.patch", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "request": { + // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectAttachments.setIamPolicy": type InterconnectAttachmentsSetIamPolicyCall struct { @@ -68403,7 +71364,7 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68454,6 +71415,188 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } +// method id "compute.interconnectAttachments.setLabels": + +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectAttachments.testIamPermissions": type InterconnectAttachmentsTestIamPermissionsCall struct { @@ -68561,7 +71704,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68725,7 +71868,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68941,7 +72084,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69122,7 +72265,7 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69281,7 +72424,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69437,7 +72580,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69589,7 +72732,7 @@ func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69750,7 +72893,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69965,7 +73108,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70166,7 +73309,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70318,7 +73461,7 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70361,6 +73504,153 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } +// method id "compute.interconnects.setLabels": + +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.interconnects.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnects.testIamPermissions": type InterconnectsTestIamPermissionsCall struct { @@ -70465,7 +73755,7 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70620,7 +73910,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70661,6 +73951,453 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } +// method id "compute.licenseCodes.getIamPolicy": + +type LicenseCodesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicenseCodesService) GetIamPolicy(project string, resource string) *LicenseCodesGetIamPolicyCall { + c := &LicenseCodesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicenseCodesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicenseCodesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicenseCodesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicenseCodesGetIamPolicyCall) Context(ctx context.Context) *LicenseCodesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicenseCodesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenseCodes.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.licenseCodes.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenseCodes/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.licenseCodes.setIamPolicy": + +type LicenseCodesSetIamPolicyCall struct { + s *Service + project string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicenseCodesService) SetIamPolicy(project string, resource string, policy *Policy) *LicenseCodesSetIamPolicyCall { + c := &LicenseCodesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.policy = policy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicenseCodesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicenseCodesSetIamPolicyCall) Context(ctx context.Context) *LicenseCodesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicenseCodesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicenseCodesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenseCodes.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenseCodes/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.licenseCodes.testIamPermissions": + +type LicenseCodesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { + c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.licenses.delete": type LicensesDeleteCall struct { @@ -70776,7 +74513,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70932,7 +74669,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71084,7 +74821,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71244,7 +74981,7 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71466,7 +75203,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71647,7 +75384,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71794,7 +75531,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72013,7 +75750,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72205,7 +75942,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72433,7 +76170,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72518,6 +76255,1503 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } +// method id "compute.maintenancePolicies.aggregatedList": + +type MaintenancePoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of maintenance policies. +func (r *MaintenancePoliciesService) AggregatedList(project string) *MaintenancePoliciesAggregatedListCall { + c := &MaintenancePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MaintenancePoliciesAggregatedListCall) Filter(filter string) *MaintenancePoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MaintenancePoliciesAggregatedListCall) MaxResults(maxResults int64) *MaintenancePoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MaintenancePoliciesAggregatedListCall) OrderBy(orderBy string) *MaintenancePoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MaintenancePoliciesAggregatedListCall) PageToken(pageToken string) *MaintenancePoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesAggregatedListCall) Fields(s ...googleapi.Field) *MaintenancePoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MaintenancePoliciesAggregatedListCall) IfNoneMatch(entityTag string) *MaintenancePoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesAggregatedListCall) Context(ctx context.Context) *MaintenancePoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/maintenancePolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.aggregatedList" call. +// Exactly one of *MaintenancePolicyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *MaintenancePolicyAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MaintenancePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MaintenancePolicyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MaintenancePolicyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of maintenance policies.", + // "httpMethod": "GET", + // "id": "compute.maintenancePolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/maintenancePolicies", + // "response": { + // "$ref": "MaintenancePolicyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MaintenancePoliciesAggregatedListCall) Pages(ctx context.Context, f func(*MaintenancePolicyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.maintenancePolicies.delete": + +type MaintenancePoliciesDeleteCall struct { + s *Service + project string + region string + maintenancePolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified maintenance policy. +func (r *MaintenancePoliciesService) Delete(project string, region string, maintenancePolicy string) *MaintenancePoliciesDeleteCall { + c := &MaintenancePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.maintenancePolicy = maintenancePolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *MaintenancePoliciesDeleteCall) RequestId(requestId string) *MaintenancePoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesDeleteCall) Fields(s ...googleapi.Field) *MaintenancePoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesDeleteCall) Context(ctx context.Context) *MaintenancePoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "maintenancePolicy": c.maintenancePolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MaintenancePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified maintenance policy.", + // "httpMethod": "DELETE", + // "id": "compute.maintenancePolicies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "maintenancePolicy" + // ], + // "parameters": { + // "maintenancePolicy": { + // "description": "Name of the maintenance policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.maintenancePolicies.get": + +type MaintenancePoliciesGetCall struct { + s *Service + project string + region string + maintenancePolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves all information of the specified maintenance policy. +func (r *MaintenancePoliciesService) Get(project string, region string, maintenancePolicy string) *MaintenancePoliciesGetCall { + c := &MaintenancePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.maintenancePolicy = maintenancePolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesGetCall) Fields(s ...googleapi.Field) *MaintenancePoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MaintenancePoliciesGetCall) IfNoneMatch(entityTag string) *MaintenancePoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesGetCall) Context(ctx context.Context) *MaintenancePoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "maintenancePolicy": c.maintenancePolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.get" call. +// Exactly one of *MaintenancePolicy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MaintenancePolicy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MaintenancePoliciesGetCall) Do(opts ...googleapi.CallOption) (*MaintenancePolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MaintenancePolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves all information of the specified maintenance policy.", + // "httpMethod": "GET", + // "id": "compute.maintenancePolicies.get", + // "parameterOrder": [ + // "project", + // "region", + // "maintenancePolicy" + // ], + // "parameters": { + // "maintenancePolicy": { + // "description": "Name of the maintenance policy to retrieve.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies/{maintenancePolicy}", + // "response": { + // "$ref": "MaintenancePolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.maintenancePolicies.getIamPolicy": + +type MaintenancePoliciesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *MaintenancePoliciesService) GetIamPolicy(project string, region string, resource string) *MaintenancePoliciesGetIamPolicyCall { + c := &MaintenancePoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *MaintenancePoliciesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MaintenancePoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *MaintenancePoliciesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesGetIamPolicyCall) Context(ctx context.Context) *MaintenancePoliciesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MaintenancePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.maintenancePolicies.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.maintenancePolicies.insert": + +type MaintenancePoliciesInsertCall struct { + s *Service + project string + region string + maintenancepolicy *MaintenancePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new maintenance policy. +func (r *MaintenancePoliciesService) Insert(project string, region string, maintenancepolicy *MaintenancePolicy) *MaintenancePoliciesInsertCall { + c := &MaintenancePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.maintenancepolicy = maintenancepolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *MaintenancePoliciesInsertCall) RequestId(requestId string) *MaintenancePoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesInsertCall) Fields(s ...googleapi.Field) *MaintenancePoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesInsertCall) Context(ctx context.Context) *MaintenancePoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.maintenancepolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MaintenancePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new maintenance policy.", + // "httpMethod": "POST", + // "id": "compute.maintenancePolicies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies", + // "request": { + // "$ref": "MaintenancePolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.maintenancePolicies.list": + +type MaintenancePoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the maintenance policies that have been configured for +// the specified project in specified region. +func (r *MaintenancePoliciesService) List(project string, region string) *MaintenancePoliciesListCall { + c := &MaintenancePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MaintenancePoliciesListCall) Filter(filter string) *MaintenancePoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MaintenancePoliciesListCall) MaxResults(maxResults int64) *MaintenancePoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MaintenancePoliciesListCall) OrderBy(orderBy string) *MaintenancePoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MaintenancePoliciesListCall) PageToken(pageToken string) *MaintenancePoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesListCall) Fields(s ...googleapi.Field) *MaintenancePoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MaintenancePoliciesListCall) IfNoneMatch(entityTag string) *MaintenancePoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesListCall) Context(ctx context.Context) *MaintenancePoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.list" call. +// Exactly one of *MaintenancePoliciesList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MaintenancePoliciesList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MaintenancePoliciesListCall) Do(opts ...googleapi.CallOption) (*MaintenancePoliciesList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MaintenancePoliciesList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all the maintenance policies that have been configured for the specified project in specified region.", + // "httpMethod": "GET", + // "id": "compute.maintenancePolicies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies", + // "response": { + // "$ref": "MaintenancePoliciesList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MaintenancePoliciesListCall) Pages(ctx context.Context, f func(*MaintenancePoliciesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.maintenancePolicies.setIamPolicy": + +type MaintenancePoliciesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *MaintenancePoliciesService) SetIamPolicy(project string, region string, resource string, policy *Policy) *MaintenancePoliciesSetIamPolicyCall { + c := &MaintenancePoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.policy = policy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *MaintenancePoliciesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesSetIamPolicyCall) Context(ctx context.Context) *MaintenancePoliciesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MaintenancePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.maintenancePolicies.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.maintenancePolicies.testIamPermissions": + +type MaintenancePoliciesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *MaintenancePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *MaintenancePoliciesTestIamPermissionsCall { + c := &MaintenancePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MaintenancePoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MaintenancePoliciesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MaintenancePoliciesTestIamPermissionsCall) Context(ctx context.Context) *MaintenancePoliciesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MaintenancePoliciesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MaintenancePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/maintenancePolicies/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.maintenancePolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MaintenancePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.maintenancePolicies.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/maintenancePolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.networkEndpointGroups.aggregatedList": type NetworkEndpointGroupsAggregatedListCall struct { @@ -72694,7 +77928,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72897,7 +78131,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73072,7 +78306,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73249,7 +78483,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73417,7 +78651,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73587,7 +78821,7 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73812,7 +79046,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74072,7 +79306,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74273,7 +79507,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74447,7 +79681,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74611,7 +79845,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74768,7 +80002,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74930,7 +80164,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75146,7 +80380,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75435,7 +80669,7 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75560,7 +80794,8 @@ type NetworksPatchCall struct { } // Patch: Patches the specified network with the data included in the -// request. +// request. Only the following fields can be modified: +// routingConfig.routingMode. func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75670,12 +80905,12 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Patches the specified network with the data included in the request.", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", // "httpMethod": "PATCH", // "id": "compute.networks.patch", // "parameterOrder": [ @@ -75840,7 +81075,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76004,7 +81239,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76153,7 +81388,7 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76309,7 +81544,7 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76466,7 +81701,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76618,7 +81853,7 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76776,7 +82011,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76924,7 +82159,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77065,7 +82300,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77229,7 +82464,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77426,7 +82661,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77620,7 +82855,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77780,7 +83015,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77941,7 +83176,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78103,7 +83338,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78264,7 +83499,7 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78427,7 +83662,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78588,7 +83823,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78754,7 +83989,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78926,7 +84161,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79152,7 +84387,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79368,7 +84603,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79529,7 +84764,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79711,7 +84946,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79883,7 +85118,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80049,7 +85284,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80205,7 +85440,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80382,7 +85617,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80608,7 +85843,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80823,7 +86058,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80986,7 +86221,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81166,7 +86401,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81396,7 +86631,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81587,7 +86822,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81759,7 +86994,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81985,7 +87220,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82177,7 +87412,7 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82343,7 +87578,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82570,7 +87805,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82786,7 +88021,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82967,7 +88202,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83132,7 +88367,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83311,7 +88546,7 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83542,7 +88777,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83752,7 +88987,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83933,7 +89168,7 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84096,7 +89331,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84289,7 +89524,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84450,7 +89685,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84618,7 +89853,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84809,7 +90044,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84970,7 +90205,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85133,7 +90368,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85310,7 +90545,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85535,7 +90770,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85747,7 +90982,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86003,7 +91238,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86225,7 +91460,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86418,7 +91653,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86602,7 +91837,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86788,7 +92023,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86969,7 +92204,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87150,7 +92385,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87311,7 +92546,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87492,7 +92727,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87655,7 +92890,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87817,7 +93052,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88042,7 +93277,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88303,7 +93538,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88523,7 +93758,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88684,7 +93919,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88970,7 +94205,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89198,7 +94433,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89395,7 +94630,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89612,7 +94847,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89863,7 +95098,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90058,7 +95293,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90225,7 +95460,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90388,7 +95623,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90560,7 +95795,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90786,7 +96021,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90998,7 +96233,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91162,7 +96397,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91321,7 +96556,7 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91499,7 +96734,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91671,7 +96906,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91828,7 +97063,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -91990,7 +97225,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -92206,7 +97441,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -92387,7 +97622,7 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -92431,6 +97666,152 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } +// method id "compute.securityPolicies.addRule": + +type SecurityPoliciesAddRuleCall struct { + s *Service + project string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddRule: Inserts a rule into a security policy. +func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { + c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesAddRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesAddRuleCall) Context(ctx context.Context) *SecurityPoliciesAddRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesAddRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a rule into a security policy.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.addRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.securityPolicies.delete": type SecurityPoliciesDeleteCall struct { @@ -92546,7 +97927,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -92702,7 +98083,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -92743,6 +98124,170 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } +// method id "compute.securityPolicies.getRule": + +type SecurityPoliciesGetRuleCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetRule: Gets a rule at the specified priority. +func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { + c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the security policy. +func (c *SecurityPoliciesGetRuleCall) Priority(priority int64) *SecurityPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesGetRuleCall) Context(ctx context.Context) *SecurityPoliciesGetRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesGetRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.getRule" call. +// Exactly one of *SecurityPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicyRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a rule at the specified priority.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.getRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to get from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + // "response": { + // "$ref": "SecurityPolicyRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.securityPolicies.insert": type SecurityPoliciesInsertCall struct { @@ -92863,7 +98408,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93078,7 +98623,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93278,7 +98823,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93326,6 +98871,314 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } +// method id "compute.securityPolicies.patchRule": + +type SecurityPoliciesPatchRuleCall struct { + s *Service + project string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PatchRule: Patches a rule at the specified priority. +func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { + c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesPatchRuleCall) Context(ctx context.Context) *SecurityPoliciesPatchRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.patchRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.securityPolicies.removeRule": + +type SecurityPoliciesRemoveRuleCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveRule: Deletes a rule at the specified priority. +func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { + c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the security policy. +func (c *SecurityPoliciesRemoveRuleCall) Priority(priority int64) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *SecurityPoliciesRemoveRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.removeRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to remove from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.securityPolicies.testIamPermissions": type SecurityPoliciesTestIamPermissionsCall struct { @@ -93430,7 +99283,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93596,7 +99449,7 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93753,7 +99606,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -93905,7 +99758,7 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94122,7 +99975,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94303,7 +100156,7 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94450,7 +100303,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94597,7 +100450,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94756,7 +100609,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -94912,7 +100765,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95073,7 +100926,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95288,7 +101141,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95469,7 +101322,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95630,7 +101483,7 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95785,7 +101638,7 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -95945,7 +101798,7 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -96160,7 +102013,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -96414,7 +102267,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -96593,7 +102446,7 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -96744,7 +102597,7 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -96962,7 +102815,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -97157,7 +103010,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -97336,7 +103189,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -97506,7 +103359,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -97669,7 +103522,7 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -97841,7 +103694,7 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -98067,7 +103920,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -98326,7 +104179,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -98532,7 +104385,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -98695,7 +104548,7 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -98873,7 +104726,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99036,7 +104889,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99204,7 +105057,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99361,7 +105214,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99523,7 +105376,7 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99739,7 +105592,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -99939,7 +105792,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100091,7 +105944,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100250,7 +106103,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100406,7 +106259,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100567,7 +106420,7 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100782,7 +106635,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -100981,7 +106834,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -101150,7 +107003,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -101324,7 +107177,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -101493,7 +107346,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -101645,7 +107498,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -101864,7 +107717,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102060,7 +107913,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102228,7 +108081,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102401,7 +108254,7 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102628,7 +108481,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102820,7 +108673,7 @@ func (c *TargetInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -102998,7 +108851,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -103180,7 +109033,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -103411,7 +109264,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -103607,7 +109460,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -103775,7 +109628,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -103932,7 +109785,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -104108,7 +109961,7 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -104335,7 +110188,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -104546,7 +110399,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -104728,7 +110581,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -104917,7 +110770,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105086,7 +110939,7 @@ func (c *TargetPoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105253,7 +111106,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105409,7 +111262,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105570,7 +111423,7 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105785,7 +111638,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -105984,7 +111837,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106154,7 +112007,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106324,7 +112177,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106497,7 +112350,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106648,7 +112501,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106807,7 +112660,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -106963,7 +112816,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -107124,7 +112977,7 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -107339,7 +113192,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -107538,7 +113391,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -107708,7 +113561,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -107860,7 +113713,7 @@ func (c *TargetTcpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108078,7 +113931,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108273,7 +114126,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108440,7 +114293,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108612,7 +114465,7 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108838,7 +114691,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -108923,6 +114776,188 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.targetVpnGateways.setLabels": + +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetVpnGateways.testIamPermissions": type TargetVpnGatewaysTestIamPermissionsCall struct { @@ -109030,7 +115065,7 @@ func (c *TargetVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -109198,7 +115233,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -109355,7 +115390,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -109517,7 +115552,7 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -109680,7 +115715,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -109904,7 +115939,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110106,7 +116141,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110258,7 +116293,7 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110426,7 +116461,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110580,7 +116615,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110797,7 +116832,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -110992,7 +117027,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -111159,7 +117194,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -111331,7 +117366,7 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -111557,7 +117592,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -111768,7 +117803,7 @@ func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -111931,7 +117966,7 @@ func (c *VpnTunnelsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -112217,7 +118252,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -112445,7 +118480,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -112642,7 +118677,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -112859,7 +118894,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index a7e3cc0a0d2..5779593b74f 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/QIBUrC-C0KuVnsoUFunufep6MGE\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/HIjKZ2H1Lvje9CpaMVbiW03yMTM\"", "discoveryVersion": "v1", "id": "compute:beta", "name": "compute", "version": "beta", - "revision": "20170905", + "revision": "20171207", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -20,7 +20,7 @@ "basePath": "/compute/beta/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "compute/beta/projects/", - "batchPath": "batch", + "batchPath": "batch/compute/beta", "parameters": { "alt": { "type": "string", @@ -110,7 +110,7 @@ "AcceleratorType": { "id": "AcceleratorType", "type": "object", - "description": "An Accelerator Type resource.", + "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -193,9 +193,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -206,7 +210,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -226,6 +232,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -293,9 +305,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -306,7 +322,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -326,6 +344,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -375,9 +399,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -388,7 +416,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -408,6 +438,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -454,6 +490,14 @@ "type": "string", "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance." }, + "publicPtrDomainName": { + "type": "string", + "description": "The DNS domain name for the public PTR record. This field can only be set when the set_public_ptr field is enabled." + }, + "setPublicPtr": { + "type": "boolean", + "description": "Specifies whether a public DNS ?PTR? record should be created to map the external IP address of the instance to a DNS domain name." + }, "type": { "type": "string", "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", @@ -470,15 +514,15 @@ "Address": { "id": "Address", "type": "object", - "description": "A reserved address resource.", + "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", "properties": { "address": { "type": "string", - "description": "The static external IP address represented by this resource." + "description": "The static IP address represented by this resource." }, "addressType": { "type": "string", - "description": "The type of address to reserve. If unspecified, defaults to EXTERNAL.", + "description": "The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.", "enum": [ "EXTERNAL", "INTERNAL", @@ -554,7 +598,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", "enum": [ "IN_USE", "RESERVED" @@ -566,7 +610,7 @@ }, "subnetwork": { "type": "string", - "description": "For external addresses, this field should not be used.\n\nThe URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range." + "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes." }, "users": { "type": "array", @@ -616,9 +660,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -629,7 +677,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -649,6 +699,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -716,9 +772,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -729,7 +789,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -749,6 +811,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -798,9 +866,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -811,7 +883,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -831,6 +905,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -942,7 +1022,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." }, "type": { "type": "string", @@ -990,7 +1070,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family \n\nIf the source image is deleted later, this field will not be set." }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -1081,7 +1161,7 @@ "Autoscaler": { "id": "Autoscaler", "type": "object", - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances.", + "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", "properties": { "autoscalingPolicy": { "$ref": "AutoscalingPolicy", @@ -1195,9 +1275,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1208,7 +1292,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1228,6 +1314,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1295,9 +1387,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1308,7 +1404,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1328,6 +1426,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1425,9 +1529,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1438,7 +1546,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1458,6 +1568,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1542,20 +1658,20 @@ "properties": { "filter": { "type": "string", - "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or global data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is / called a global metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value." + "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value." }, "metric": { "type": "string", - "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric.\n\nThe metric must have a value type of INT64 or DOUBLE." + "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE." }, "singleInstanceAssignment": { "type": "number", - "description": "If scaling is based on a global metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", + "description": "If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", "format": "double" }, "utilizationTarget": { "type": "number", - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double" }, "utilizationTargetType": { @@ -1654,6 +1770,10 @@ "type": "string", "description": "Cloud Storage bucket name." }, + "cdnPolicy": { + "$ref": "BackendBucketCdnPolicy", + "description": "Cloud CDN Coniguration for this BackendBucket." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -1687,6 +1807,25 @@ } } }, + "BackendBucketCdnPolicy": { + "id": "BackendBucketCdnPolicy", + "type": "object", + "description": "Message containing Cloud CDN configuration for a backend bucket.", + "properties": { + "signedUrlCacheMaxAgeSec": { + "type": "string", + "description": "Number of seconds up to which the response to a signed URL request will be cached in the CDN. After this time period, the Signed URL will be revalidated before being served. Defaults to 1hr (3600s). If this field is set, Cloud CDN will internally act as though all responses from this bucket had a ?Cache-Control: public, max-age=[TTL]? header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", + "format": "int64" + }, + "signedUrlKeyNames": { + "type": "array", + "description": "[Output Only] Names of the keys currently configured for Cloud CDN Signed URL on this backend bucket.", + "items": { + "type": "string" + } + } + } + }, "BackendBucketList": { "id": "BackendBucketList", "type": "object", @@ -1726,9 +1865,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1739,7 +1882,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1759,6 +1904,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1790,7 +1941,7 @@ "BackendService": { "id": "BackendService", "type": "object", - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", "properties": { "affinityCookieTtlSec": { "type": "integer", @@ -1971,9 +2122,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1984,7 +2139,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2004,6 +2161,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2040,6 +2203,18 @@ "cacheKeyPolicy": { "$ref": "CacheKeyPolicy", "description": "The CacheKeyPolicy for this CdnPolicy." + }, + "signedUrlCacheMaxAgeSec": { + "type": "string", + "description": "Number of seconds up to which the response to a signed URL request will be cached in the CDN. After this time period, the Signed URL will be revalidated before being served. Defaults to 1hr (3600s). If this field is set, Cloud CDN will internally act as though all responses from this backend had a ?Cache-Control: public, max-age=[TTL]? header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", + "format": "int64" + }, + "signedUrlKeyNames": { + "type": "array", + "description": "[Output Only] Names of the keys currently configured for Cloud CDN Signed URL on this backend service.", + "items": { + "type": "string" + } } } }, @@ -2119,9 +2294,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2132,7 +2311,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2152,6 +2333,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2201,9 +2388,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2214,7 +2405,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2234,6 +2427,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2333,7 +2532,7 @@ "Commitment": { "id": "Commitment", "type": "object", - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts.", + "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", "properties": { "creationTimestamp": { "type": "string", @@ -2456,9 +2655,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2469,7 +2672,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2489,6 +2694,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2556,9 +2767,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2569,7 +2784,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2589,6 +2806,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2638,9 +2861,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2651,7 +2878,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2671,6 +2900,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2864,7 +3099,7 @@ "Disk": { "id": "Disk", "type": "object", - "description": "A Disk resource.", + "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", "properties": { "creationTimestamp": { "type": "string", @@ -2908,6 +3143,14 @@ "type": "string", "description": "[Output Only] Last detach timestamp in RFC3339 text format." }, + "licenseCodes": { + "type": "array", + "description": "Integer license codes indicating which licenses are attached to this disk.", + "items": { + "type": "string", + "format": "int64" + } + }, "licenses": { "type": "array", "description": "Any applicable publicly visible licenses.", @@ -2940,7 +3183,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family" }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -3046,9 +3289,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3059,7 +3306,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3079,6 +3328,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3107,6 +3362,47 @@ } } }, + "DiskInstantiationConfig": { + "id": "DiskInstantiationConfig", + "type": "object", + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "properties": { + "autoDelete": { + "type": "boolean", + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." + }, + "deviceName": { + "type": "string", + "description": "Specifies the device name of the disk to which the configurations apply to." + }, + "instantiateFrom": { + "type": "string", + "description": "Specifies whether to include the disk and what image to use.", + "enum": [ + "ATTACH_READ_ONLY", + "BLANK", + "DEFAULT", + "DO_NOT_INCLUDE", + "IMAGE_URL", + "SOURCE_IMAGE", + "SOURCE_IMAGE_FAMILY" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ] + }, + "sourceImage": { + "type": "string", + "description": "The custom source image to be used to restore this disk when instantiating this instance template." + } + } + }, "DiskList": { "id": "DiskList", "type": "object", @@ -3146,9 +3442,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3159,7 +3459,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3179,6 +3481,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3224,7 +3532,7 @@ "DiskType": { "id": "DiskType", "type": "object", - "description": "A DiskType resource.", + "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -3311,9 +3619,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3324,7 +3636,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3344,6 +3658,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3411,9 +3731,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3424,7 +3748,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3444,6 +3770,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3493,9 +3825,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3506,7 +3842,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3526,6 +3864,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3586,9 +3930,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3599,7 +3947,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3619,6 +3969,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3834,7 +4190,7 @@ }, "targetTags": { "type": "array", - "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", "items": { "type": "string" } @@ -3880,9 +4236,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -3893,7 +4253,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -3913,6 +4275,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -3948,17 +4316,17 @@ "properties": { "calculated": { "type": "integer", - "description": "[Output Only] Absolute value calculated based on mode: mode = fixed -\u003e calculated = fixed = percent -\u003e calculated = ceiling(percent/100 * base_value)", + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the caculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", "format": "int32" }, "fixed": { "type": "integer", - "description": "fixed must be non-negative.", + "description": "Specifies a fixed number of VM instances. This must be a positive integer.", "format": "int32" }, "percent": { "type": "integer", - "description": "percent must belong to [0, 100].", + "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", "format": "int32" } } @@ -3966,11 +4334,11 @@ "ForwardingRule": { "id": "ForwardingRule", "type": "object", - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple.", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", "properties": { "IPAddress": { "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address" }, "IPProtocol": { "type": "string", @@ -4140,9 +4508,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4153,7 +4525,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4173,6 +4547,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4240,9 +4620,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4253,7 +4637,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4273,6 +4659,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4322,9 +4714,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4335,7 +4731,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4355,6 +4753,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4408,14 +4812,18 @@ "properties": { "type": { "type": "string", - "description": "The type of supported feature. Currently only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "MULTI_IP_SUBNET", + "SECURE_BOOT", + "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" ], "enumDescriptions": [ + "", + "", "", "", "", @@ -4456,6 +4864,10 @@ "requestPath": { "type": "string", "description": "The request path of the HTTP health check request. The default value is /." + }, + "response": { + "type": "string", + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII." } } }, @@ -4491,6 +4903,10 @@ "requestPath": { "type": "string", "description": "The request path of the HTTPS health check request. The default value is /." + }, + "response": { + "type": "string", + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII." } } }, @@ -4622,9 +5038,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4635,7 +5055,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4655,6 +5077,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4852,9 +5280,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4865,7 +5297,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4885,6 +5319,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5019,9 +5459,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5032,7 +5476,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5052,6 +5498,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5083,7 +5535,7 @@ "Image": { "id": "Image", "type": "object", - "description": "An Image resource.", + "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", "properties": { "archiveSizeBytes": { "type": "string", @@ -5113,7 +5565,7 @@ }, "guestOsFeatures": { "type": "array", - "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { "$ref": "GuestOsFeature" } @@ -5144,6 +5596,14 @@ "type": "string" } }, + "licenseCodes": { + "type": "array", + "description": "Integer license codes indicating which licenses are attached to this image.", + "items": { + "type": "string", + "format": "int64" + } + }, "licenses": { "type": "array", "description": "Any applicable license URI.", @@ -5285,9 +5745,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5298,7 +5762,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5318,6 +5784,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5349,7 +5821,7 @@ "Instance": { "id": "Instance", "type": "object", - "description": "An Instance resource.", + "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", "properties": { "canIpForward": { "type": "boolean", @@ -5363,6 +5835,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion." + }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." @@ -5533,9 +6009,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5546,7 +6026,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5566,6 +6048,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5597,6 +6085,7 @@ "InstanceGroup": { "id": "InstanceGroup", "type": "object", + "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", "properties": { "creationTimestamp": { "type": "string", @@ -5704,9 +6193,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5717,7 +6210,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5737,6 +6232,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5804,9 +6305,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5817,7 +6322,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5837,6 +6344,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5868,7 +6381,7 @@ "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", - "description": "An Instance Group Manager resource.", + "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", "properties": { "autoHealingPolicies": { "type": "array", @@ -5996,7 +6509,7 @@ }, "versions": { "type": "array", - "description": "Versions supported by this IGM. User should set this field if they need fine-grained control over how many instances in each version are run by this IGM. Versions are keyed by instanceTemplate. Every instanceTemplate can appear at most once. This field overrides instanceTemplate field. If both instanceTemplate and versions are set, the user receives a warning. \"instanceTemplate: X\" is semantically equivalent to \"versions [ { instanceTemplate: X } ]\". Exactly one version must have targetSize field left unset. Size of such a version will be calculated automatically.", + "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate. Every template can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", "items": { "$ref": "InstanceGroupManagerVersion" } @@ -6097,9 +6610,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6110,7 +6627,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6130,6 +6649,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6212,9 +6737,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6225,7 +6754,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6245,6 +6776,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6305,11 +6842,11 @@ "properties": { "maxSurge": { "$ref": "FixedOrPercent", - "description": "Maximum number of instances that can be created above the InstanceGroupManager.targetSize during the update process. By default, a fixed value of 1 is used. Using maxSurge \u003e 0 will cause instance names to change during the update process. At least one of { maxSurge, maxUnavailable } must be greater than 0." + "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." }, "maxUnavailable": { "$ref": "FixedOrPercent", - "description": "Maximum number of instances that can be unavailable during the update process. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's liveness health check result was observed to be HEALTHY at least once. By default, a fixed value of 1 is used. At least one of { maxSurge, maxUnavailable } must be greater than 0." + "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied:\n\n \n- The instance's status is RUNNING. \n- If there is a health check on the instance group, the instance's liveness health check result must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." }, "minReadySec": { "type": "integer", @@ -6318,7 +6855,7 @@ }, "minimalAction": { "type": "string", - "description": "Minimal action to be taken on an instance. The order of action types is: RESTART \u003c REPLACE.", + "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a code\u003eRESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", "enum": [ "REPLACE", "RESTART" @@ -6354,7 +6891,7 @@ }, "targetSize": { "$ref": "FixedOrPercent", - "description": "Intended number of instances that are created from instanceTemplate. The final number of instances created from instanceTemplate will be equal to: * if expressed as fixed number: min(targetSize.fixed, instanceGroupManager.targetSize), * if expressed as percent: ceiling(targetSize.percent * InstanceGroupManager.targetSize). If unset, this version will handle all the remaining instances." + "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: \n- If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. \n- if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded up. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." } } }, @@ -6450,9 +6987,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6463,7 +7004,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6483,6 +7026,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6602,9 +7151,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6615,7 +7168,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6635,6 +7190,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6715,9 +7276,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6728,7 +7293,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6748,6 +7315,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6833,9 +7406,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6846,7 +7423,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6866,6 +7445,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6933,9 +7518,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6946,7 +7535,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6966,6 +7557,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7096,7 +7693,7 @@ "InstanceTemplate": { "id": "InstanceTemplate", "type": "object", - "description": "An Instance Template resource.", + "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7133,6 +7730,14 @@ "selfLink": { "type": "string", "description": "[Output Only] The URL for this instance template. The server defines this URL." + }, + "sourceInstance": { + "type": "string", + "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance" + }, + "sourceInstanceParams": { + "$ref": "SourceInstanceParams", + "description": "The source instance params to use to create this instance template." } } }, @@ -7175,9 +7780,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7188,7 +7797,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7208,6 +7819,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7298,9 +7915,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7311,7 +7932,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7331,6 +7954,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7442,11 +8071,11 @@ "Interconnect": { "id": "Interconnect", "type": "object", - "description": "Protocol definitions for Mixer API to support Interconnect. Next available tag: 23", + "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", "properties": { "adminEnabled": { "type": "boolean", - "description": "Administrative status of the interconnect. When this is set to ?true?, the Interconnect is functional and may carry traffic (assuming there are functional InterconnectAttachments and other requirements are satisfied). When set to ?false?, no packets will be carried over this Interconnect and no BGP routes will be exchanged over it. By default, it is set to ?true?." + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true." }, "circuitInfos": { "type": "array", @@ -7455,10 +8084,6 @@ "$ref": "InterconnectCircuitInfo" } }, - "connectionAuthorization": { - "type": "string", - "description": "[Output Only] URL to retrieve the Letter Of Authority and Customer Facility Assignment (LOA-CFA) documentation relating to this Interconnect. This documentation authorizes the facility provider to connect to the specified crossconnect ports." - }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -7500,10 +8125,13 @@ }, "interconnectType": { "type": "string", + "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", "enum": [ + "DEDICATED", "IT_PRIVATE" ], "enumDescriptions": [ + "", "" ] }, @@ -7514,6 +8142,7 @@ }, "linkType": { "type": "string", + "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", "enum": [ "LINK_TYPE_ETHERNET_10G_LR" ], @@ -7574,7 +8203,7 @@ "InterconnectAttachment": { "id": "InterconnectAttachment", "type": "object", - "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 18", + "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", "properties": { "cloudRouterIpAddress": { "type": "string", @@ -7590,7 +8219,7 @@ }, "description": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "An optional description of this resource." }, "googleReferenceId": { "type": "string", @@ -7629,7 +8258,7 @@ }, "privateInterconnectInfo": { "$ref": "InterconnectAttachmentPrivateInfo", - "description": "[Output Only] Information specific to a Private InterconnectAttachment. Only populated if the interconnect that this is attached is of type IT_PRIVATE." + "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." }, "region": { "type": "string", @@ -7684,9 +8313,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7697,7 +8330,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7717,6 +8352,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7784,9 +8425,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7797,7 +8442,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7817,6 +8464,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7848,7 +8501,7 @@ "InterconnectAttachmentPrivateInfo": { "id": "InterconnectAttachmentPrivateInfo", "type": "object", - "description": "Private information for an interconnect attachment when this belongs to an interconnect of type IT_PRIVATE.", + "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", "properties": { "tag8021q": { "type": "integer", @@ -7878,9 +8531,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7891,7 +8548,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7911,6 +8570,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7946,7 +8611,7 @@ "properties": { "customerDemarcId": { "type": "string", - "description": "Customer-side demarc ID for this circuit. This will only be set if it was provided by the Customer to Google during circuit turn-up." + "description": "Customer-side demarc ID for this circuit." }, "googleCircuitId": { "type": "string", @@ -7997,9 +8662,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8010,7 +8679,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8030,6 +8701,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8061,7 +8738,7 @@ "InterconnectLocation": { "id": "InterconnectLocation", "type": "object", - "description": "Protocol definitions for Mixer API to support InterconnectLocation.", + "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", "properties": { "address": { "type": "string", @@ -8069,23 +8746,33 @@ }, "availabilityZone": { "type": "string", - "description": "Availability zone for this location. Within a city, maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." + "description": "[Output Only] Availability zone for this location. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." }, "city": { "type": "string", - "description": "City designator used by the Interconnect UI to locate this InterconnectLocation within the Continent. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." }, "continent": { "type": "string", - "description": "Continent for this location. Used by the location picker in the Interconnect UI.", + "description": "[Output Only] Continent for this location.", "enum": [ + "AFRICA", + "ASIA_PAC", "C_AFRICA", "C_ASIA_PAC", "C_EUROPE", "C_NORTH_AMERICA", - "C_SOUTH_AMERICA" + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" ], "enumDescriptions": [ + "", + "", + "", + "", + "", "", "", "", @@ -8179,9 +8866,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8192,7 +8883,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8212,6 +8905,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8254,10 +8953,14 @@ "type": "string", "description": "Identifies the network presence of this location.", "enum": [ + "GLOBAL", + "LOCAL_REGION", "LP_GLOBAL", "LP_LOCAL_REGION" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -8265,10 +8968,6 @@ "region": { "type": "string", "description": "URL for the region of this location." - }, - "regionKey": { - "type": "string", - "description": "Scope key for the region of this location." } } }, @@ -8286,19 +8985,25 @@ }, "description": { "type": "string", - "description": "Short user-visible description of the purpose of the outage." + "description": "A description about the purpose of the outage." }, "endTime": { "type": "string", + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", "format": "int64" }, "issueType": { "type": "string", + "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", "enum": [ "IT_OUTAGE", - "IT_PARTIAL_OUTAGE" + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -8309,25 +9014,33 @@ }, "source": { "type": "string", + "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", "enum": [ + "GOOGLE", "NSRC_GOOGLE" ], "enumDescriptions": [ + "", "" ] }, "startTime": { "type": "string", - "description": "Scheduled start and end times for the outage (milliseconds since Unix epoch).", + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", "format": "int64" }, "state": { "type": "string", + "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", "enum": [ + "ACTIVE", + "CANCELLED", "NS_ACTIVE", "NS_CANCELED" ], "enumDescriptions": [ + "", + "", "", "" ] @@ -8343,11 +9056,29 @@ "type": "boolean", "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee." }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, "kind": { "type": "string", "description": "[Output Only] Type of resource. Always compute#license for licenses.", "default": "compute#license" }, + "licenseCode": { + "type": "string", + "description": "[Output Only] The unique code used to attach this license to images, snapshots, and disks.", + "format": "uint64" + }, "name": { "type": "string", "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", @@ -8358,9 +9089,219 @@ ] } }, + "resourceRequirements": { + "$ref": "LicenseResourceRequirements" + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." + }, + "transferable": { + "type": "boolean", + "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk." + } + } + }, + "LicenseCode": { + "id": "LicenseCode", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] Description of this License Code." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", + "default": "compute#licenseCode" + }, + "licenseAlias": { + "type": "array", + "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", + "items": { + "$ref": "LicenseCodeLicenseAlias" + } + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", + "pattern": "[0-9]{0,20}?", + "annotations": { + "required": [ + "compute.licenses.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "state": { + "type": "string", + "description": "[Output Only] Current state of this License Code.", + "enum": [ + "DISABLED", + "ENABLED", + "RESTRICTED", + "STATE_UNSPECIFIED", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "transferable": { + "type": "boolean", + "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred." + } + } + }, + "LicenseCodeLicenseAlias": { + "id": "LicenseCodeLicenseAlias", + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "[Output Only] Description of this License Code." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] URL of license corresponding to this License Code." + } + } + }, + "LicenseResourceRequirements": { + "id": "LicenseResourceRequirements", + "type": "object", + "properties": { + "minGuestCpuCount": { + "type": "integer", + "description": "Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start.", + "format": "int32" + }, + "minMemoryMb": { + "type": "integer", + "description": "Minimum memory required to use the Instance. Enforced at Instance creation and Instance start.", + "format": "int32" + } + } + }, + "LicensesListResponse": { + "id": "LicensesListResponse", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of License resources.", + "items": { + "$ref": "License" + } + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8445,7 +9386,7 @@ "MachineType": { "id": "MachineType", "type": "object", - "description": "A Machine Type resource.", + "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -8547,9 +9488,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8560,7 +9505,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8580,6 +9527,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8647,9 +9600,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8660,7 +9617,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8680,6 +9639,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8729,9 +9694,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8742,7 +9711,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8762,6 +9733,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8976,7 +9953,7 @@ "Network": { "id": "Network", "type": "object", - "description": "Represents a Network resource. Read Networks and Firewalls for more information.", + "description": "Represents a Network resource. Read Networks and Firewalls for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", "properties": { "IPv4Range": { "type": "string", @@ -9063,6 +10040,11 @@ "$ref": "AliasIpRange" } }, + "fingerprint": { + "type": "string", + "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface.", + "format": "byte" + }, "kind": { "type": "string", "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", @@ -9125,9 +10107,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9138,7 +10124,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9158,6 +10146,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9250,7 +10244,12 @@ }, "name": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "Name of the peering, which should conform to RFC1035.", + "annotations": { + "required": [ + "compute.networks.addPeering" + ] + } }, "peerNetwork": { "type": "string", @@ -9271,7 +10270,7 @@ "Operation": { "id": "Operation", "type": "object", - "description": "An Operation resource, used to manage asynchronous API requests.", + "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", "properties": { "clientOperationId": { "type": "string", @@ -9407,9 +10406,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9420,7 +10423,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9440,6 +10445,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9512,9 +10523,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9525,7 +10540,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9545,6 +10562,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9612,9 +10635,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9625,7 +10652,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9645,6 +10674,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9694,9 +10729,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9707,7 +10746,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9727,6 +10768,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9844,7 +10891,7 @@ "Project": { "id": "Project", "type": "object", - "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", + "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", "properties": { "commonInstanceMetadata": { "$ref": "Metadata", @@ -9994,6 +11041,7 @@ "INSTANCE_GROUP_MANAGERS", "INSTANCE_TEMPLATES", "INTERCONNECTS", + "INTERNAL_ADDRESSES", "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", @@ -10001,6 +11049,8 @@ "NVIDIA_P100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", "REGIONAL_AUTOSCALERS", "REGIONAL_INSTANCE_GROUP_MANAGERS", "ROUTERS", @@ -10065,6 +11115,9 @@ "", "", "", + "", + "", + "", "" ] }, @@ -10102,7 +11155,7 @@ "Region": { "id": "Region", "type": "object", - "description": "Region resource.", + "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", "properties": { "creationTimestamp": { "type": "string", @@ -10201,9 +11254,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10214,7 +11271,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10234,6 +11293,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10301,9 +11366,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10314,7 +11383,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10334,6 +11405,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10401,9 +11478,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10414,7 +11495,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10434,6 +11517,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10596,9 +11685,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10609,7 +11702,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10629,6 +11724,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10737,9 +11838,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10750,7 +11855,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10770,6 +11877,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -10855,7 +11968,7 @@ "Route": { "id": "Route", "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -10965,9 +12078,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -10978,7 +12095,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -10998,6 +12117,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11066,9 +12191,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11079,7 +12208,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11099,6 +12230,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11197,6 +12334,21 @@ } } }, + "RouterAdvertisedIpRange": { + "id": "RouterAdvertisedIpRange", + "type": "object", + "description": "Description-tagged IP ranges for the router to advertise.", + "properties": { + "description": { + "type": "string", + "description": "User-specified description for the IP range." + }, + "range": { + "type": "string", + "description": "The IP range to advertise. The value must be a CIDR-formatted string." + } + } + }, "RouterAggregatedList": { "id": "RouterAggregatedList", "type": "object", @@ -11237,9 +12389,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11250,7 +12406,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11270,6 +12428,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11302,6 +12466,38 @@ "id": "RouterBgp", "type": "object", "properties": { + "advertiseMode": { + "type": "string", + "description": "User-specified flag to indicate which mode to use for advertisement.", + "enum": [ + "CUSTOM", + "DEFAULT" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "advertisedGroups": { + "type": "array", + "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "items": { + "type": "string", + "enum": [ + "ALL_SUBNETS" + ], + "enumDescriptions": [ + "" + ] + } + }, + "advertisedIpRanges": { + "type": "array", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + } + }, "asn": { "type": "integer", "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", @@ -11313,6 +12509,38 @@ "id": "RouterBgpPeer", "type": "object", "properties": { + "advertiseMode": { + "type": "string", + "description": "User-specified flag to indicate which mode to use for advertisement.", + "enum": [ + "CUSTOM", + "DEFAULT" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "advertisedGroups": { + "type": "array", + "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "items": { + "type": "string", + "enum": [ + "ALL_SUBNETS" + ], + "enumDescriptions": [ + "" + ] + } + }, + "advertisedIpRanges": { + "type": "array", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + } + }, "advertisedRoutePriority": { "type": "integer", "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", @@ -11404,9 +12632,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11417,7 +12649,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11437,6 +12671,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11600,9 +12840,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11613,7 +12857,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11633,6 +12879,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11688,7 +12940,7 @@ }, "conditions": { "type": "array", - "description": "Additional restrictions that must be met", + "description": "Additional restrictions that must be met. All conditions must pass for the rule to match.", "items": { "$ref": "Condition" } @@ -11869,9 +13121,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -11882,7 +13138,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -11902,6 +13160,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -11976,6 +13240,35 @@ "id": "SecurityPolicyRuleMatcher", "type": "object", "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "properties": { + "config": { + "$ref": "SecurityPolicyRuleMatcherConfig", + "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." + }, + "srcIpRanges": { + "type": "array", + "description": "CIDR IP address range. Only IPv4 is supported.", + "items": { + "type": "string" + } + }, + "versionedExpr": { + "type": "string", + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", + "enum": [ + "SRC_IPS_V1", + "VERSIONED_EXPR_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SecurityPolicyRuleMatcherConfig": { + "id": "SecurityPolicyRuleMatcherConfig", + "type": "object", "properties": { "srcIpRanges": { "type": "array", @@ -12034,10 +13327,26 @@ } } }, + "SignedUrlKey": { + "id": "SignedUrlKey", + "type": "object", + "description": "Represents a customer-supplied Signing Key used by Cloud CDN Signed URLs", + "properties": { + "keyName": { + "type": "string", + "description": "Name of the key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "keyValue": { + "type": "string", + "description": "128-bit key value used for signing the URL. The key value must be a valid RFC 4648 Section 5 base64url encoded string." + } + } + }, "Snapshot": { "id": "Snapshot", "type": "object", - "description": "A persistent disk snapshot resource.", + "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", "properties": { "creationTimestamp": { "type": "string", @@ -12074,6 +13383,14 @@ "type": "string" } }, + "licenseCodes": { + "type": "array", + "description": "Integer license codes indicating which licenses are attached to this snapshot.", + "items": { + "type": "string", + "format": "int64" + } + }, "licenses": { "type": "array", "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", @@ -12126,7 +13443,7 @@ }, "storageBytes": { "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", "format": "int64" }, "storageBytesStatus": { @@ -12182,9 +13499,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12195,7 +13516,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12215,6 +13538,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12243,10 +13572,24 @@ } } }, + "SourceInstanceParams": { + "id": "SourceInstanceParams", + "type": "object", + "description": "A specification of the parameters to use when creating the instance template from a source instance.", + "properties": { + "diskConfigs": { + "type": "array", + "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "items": { + "$ref": "DiskInstantiationConfig" + } + } + } + }, "SslCertificate": { "id": "SslCertificate", "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", "properties": { "certificate": { "type": "string", @@ -12324,9 +13667,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12337,7 +13684,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12357,6 +13706,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12385,11 +13740,317 @@ } } }, + "SslPoliciesList": { + "id": "SslPoliciesList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslPolicy resources.", + "items": { + "$ref": "SslPolicy" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslPoliciesList for lists of sslPolicies.", + "default": "compute#sslPoliciesList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SslPoliciesListAvailableFeaturesResponse": { + "id": "SslPoliciesListAvailableFeaturesResponse", + "type": "object", + "properties": { + "features": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "SslPolicy": { + "id": "SslPolicy", + "type": "object", + "description": "A SSL policy specifies the server-side support for SSL features. This can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects connections between clients and the HTTPS or SSL proxy load balancer. They do not affect the connection between the load balancers and the backends.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customFeatures": { + "type": "array", + "description": "List of features enabled when the selected profile is CUSTOM. The\n- method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", + "items": { + "type": "string" + } + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "enabledFeatures": { + "type": "array", + "description": "[Output Only] The list of features enabled in the SSL policy.", + "items": { + "type": "string" + } + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", + "default": "compute#sslPolicy" + }, + "minTlsVersion": { + "type": "string", + "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3.", + "enum": [ + "TLS_1_0", + "TLS_1_1", + "TLS_1_2", + "TLS_1_3" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "name": { + "type": "string", + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "profile": { + "type": "string", + "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", + "enum": [ + "COMPATIBLE", + "CUSTOM", + "MODERN", + "RESTRICTED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "warnings": { + "type": "array", + "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + } + }, + "SslPolicyReference": { + "id": "SslPolicyReference", + "type": "object", + "properties": { + "sslPolicy": { + "type": "string", + "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource." + } + } + }, "Subnetwork": { "id": "Subnetwork", "type": "object", - "description": "A Subnetwork resource.", + "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", "properties": { + "allowSubnetCidrRoutesOverlap": { + "type": "boolean", + "description": "Whether this subnetwork can conflict with static routes. Setting this to true allows this subnetwork's primary and secondary ranges to conflict with routes that have already been configured on the corresponding network. Static routes will take precedence over the subnetwork route if the route prefix length is at least as large as the subnetwork prefix length.\n\nAlso, packets destined to IPs within subnetwork may contain private/sensitive data and are prevented from leaving the virtual network. Setting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and automatically created subnetworks.\n\nThis field cannot be set to true at resource creation time." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -12398,6 +14059,11 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time." }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a Subnetwork. An up-to-date fingerprint must be provided in order to update the Subnetwork.", + "format": "byte" + }, "gatewayAddress": { "type": "string", "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork. This field can be set only at resource creation time." @@ -12485,9 +14151,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12498,7 +14168,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12518,6 +14190,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12585,9 +14263,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12598,7 +14280,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12618,6 +14302,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12692,9 +14382,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12705,7 +14399,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12725,6 +14421,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12819,7 +14521,7 @@ "TargetHttpProxy": { "id": "TargetHttpProxy", "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -12893,9 +14595,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -12906,7 +14612,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -12926,6 +14634,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -12954,6 +14668,26 @@ } } }, + "TargetHttpsProxiesSetQuicOverrideRequest": { + "id": "TargetHttpsProxiesSetQuicOverrideRequest", + "type": "object", + "properties": { + "quicOverride": { + "type": "string", + "description": "QUIC policy for the TargetHttpsProxy resource.", + "enum": [ + "DISABLE", + "ENABLE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, "TargetHttpsProxiesSetSslCertificatesRequest": { "id": "TargetHttpsProxiesSetSslCertificatesRequest", "type": "object", @@ -12970,7 +14704,7 @@ "TargetHttpsProxy": { "id": "TargetHttpsProxy", "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -12995,6 +14729,20 @@ "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, + "quicOverride": { + "type": "string", + "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, Enables QUIC when set to ENABLE, and disables QUIC when set to DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Not specifying this field is equivalent to specifying NONE.", + "enum": [ + "DISABLE", + "ENABLE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -13006,6 +14754,10 @@ "type": "string" } }, + "sslPolicy": { + "type": "string", + "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured." + }, "urlMap": { "type": "string", "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" @@ -13051,9 +14803,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13064,7 +14820,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13084,6 +14842,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13115,7 +14879,7 @@ "TargetInstance": { "id": "TargetInstance", "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", "properties": { "creationTimestamp": { "type": "string", @@ -13203,9 +14967,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13216,7 +14984,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13236,6 +15006,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13303,9 +15079,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13316,7 +15096,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13336,6 +15118,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13385,9 +15173,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13398,7 +15190,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13418,6 +15212,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13449,7 +15249,7 @@ "TargetPool": { "id": "TargetPool", "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", + "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", "properties": { "backupPool": { "type": "string", @@ -13564,9 +15364,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13577,7 +15381,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13597,6 +15403,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13681,9 +15493,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13694,7 +15510,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13714,6 +15532,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13815,9 +15639,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -13828,7 +15656,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -13848,6 +15678,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -13929,7 +15765,7 @@ "TargetSslProxy": { "id": "TargetSslProxy", "type": "object", - "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -13980,6 +15816,10 @@ "items": { "type": "string" } + }, + "sslPolicy": { + "type": "string", + "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured." } } }, @@ -14022,9 +15862,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14035,7 +15879,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14055,6 +15901,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14114,7 +15966,7 @@ "TargetTcpProxy": { "id": "TargetTcpProxy", "type": "object", - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", + "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -14200,9 +16052,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14213,7 +16069,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14233,6 +16091,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14264,7 +16128,7 @@ "TargetVpnGateway": { "id": "TargetVpnGateway", "type": "object", - "description": "Represents a Target VPN gateway resource.", + "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", "properties": { "creationTimestamp": { "type": "string", @@ -14291,6 +16155,18 @@ "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "default": "compute#targetVpnGateway" }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve an TargetVpnGateway.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this TargetVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -14382,9 +16258,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14395,7 +16275,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14415,6 +16297,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14482,9 +16370,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14495,7 +16387,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14515,6 +16409,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14564,9 +16464,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14577,7 +16481,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14597,6 +16503,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14795,9 +16707,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -14808,7 +16724,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -14828,6 +16746,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -14952,6 +16876,7 @@ "VpnTunnel": { "id": "VpnTunnel", "type": "object", + "description": "VPN tunnel resource. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", "properties": { "creationTimestamp": { "type": "string", @@ -14980,6 +16905,18 @@ "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", "default": "compute#vpnTunnel" }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this VpnTunnel, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve a VpnTunnel.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this VpnTunnel. These can be later modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, "localTrafficSelector": { "type": "array", "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", @@ -15110,9 +17047,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15123,7 +17064,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15143,6 +17086,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15210,9 +17159,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15223,7 +17176,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15243,6 +17198,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15292,9 +17253,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15305,7 +17270,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15325,6 +17292,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15391,9 +17364,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15404,7 +17381,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15424,6 +17403,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -15478,7 +17463,7 @@ "Zone": { "id": "Zone", "type": "object", - "description": "A Zone resource.", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", "properties": { "availableCpuPlatforms": { "type": "array", @@ -15574,9 +17559,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -15587,7 +17576,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -15607,6 +17598,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -16520,6 +18517,46 @@ }, "backendBuckets": { "methods": { + "addSignedUrlKey": { + "id": "compute.backendBuckets.addSignedUrlKey", + "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + "httpMethod": "POST", + "description": "Adds the given Signed URL Key to the backend bucket.", + "parameters": { + "backendBucket": { + "type": "string", + "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendBucket" + ], + "request": { + "$ref": "SignedUrlKey" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "id": "compute.backendBuckets.delete", "path": "{project}/global/backendBuckets/{backendBucket}", @@ -16558,6 +18595,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "deleteSignedUrlKey": { + "id": "compute.backendBuckets.deleteSignedUrlKey", + "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + "httpMethod": "POST", + "description": "Deletes the given Signed URL Key from the backend bucket.", + "parameters": { + "backendBucket": { + "type": "string", + "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + "required": true, + "location": "path" + }, + "keyName": { + "type": "string", + "description": "The name of the Signed URL Key to delete.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendBucket", + "keyName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "id": "compute.backendBuckets.get", "path": "{project}/global/backendBuckets/{backendBucket}", @@ -16760,6 +18841,46 @@ }, "backendServices": { "methods": { + "addSignedUrlKey": { + "id": "compute.backendServices.addSignedUrlKey", + "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + "httpMethod": "POST", + "description": "Adds the given Signed URL Key to the specified backend service.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "SignedUrlKey" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "aggregatedList": { "id": "compute.backendServices.aggregatedList", "path": "{project}/aggregated/backendServices", @@ -16847,6 +18968,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "deleteSignedUrlKey": { + "id": "compute.backendServices.deleteSignedUrlKey", + "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + "httpMethod": "POST", + "description": "Deletes the given Signed URL Key from the specified backend service.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + "required": true, + "location": "path" + }, + "keyName": { + "type": "string", + "description": "The name of the Signed URL Key to delete.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendService", + "keyName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "id": "compute.backendServices.get", "path": "{project}/global/backendServices/{backendService}", @@ -17610,7 +19775,7 @@ "id": "compute.disks.resize", "path": "{project}/zones/{zone}/disks/{disk}/resize", "httpMethod": "POST", - "description": "Resizes the specified persistent disk.", + "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", "parameters": { "disk": { "type": "string", @@ -20104,7 +22269,7 @@ "id": "compute.images.list", "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", @@ -22148,6 +24313,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, + "sourceInstanceTemplate": { + "type": "string", + "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate \n- projects/project/global/global/instanceTemplates/instanceTemplate \n- global/instancesTemplates/instanceTemplate", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -22339,6 +24509,58 @@ "https://www.googleapis.com/auth/compute" ] }, + "setDeletionProtection": { + "id": "compute.instances.setDeletionProtection", + "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + "httpMethod": "POST", + "description": "Sets deletion protection on the instance.", + "parameters": { + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion.", + "default": "true", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDiskAutoDelete": { "id": "compute.instances.setDiskAutoDelete", "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", @@ -22891,7 +25113,7 @@ "id": "compute.instances.stop", "path": "{project}/zones/{zone}/instances/{instance}/stop", "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", "parameters": { "instance": { "type": "string", @@ -22977,6 +25199,118 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "updateAccessConfig": { + "id": "compute.instances.updateAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + "httpMethod": "POST", + "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface where the access config is attached.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "request": { + "$ref": "AccessConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "updateNetworkInterface": { + "id": "compute.instances.updateNetworkInterface", + "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + "httpMethod": "PATCH", + "description": "Updates an instance's network interface. This method follows PATCH semantics.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface to update.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "request": { + "$ref": "NetworkInterface" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -23587,8 +25921,84 @@ } } }, + "licenseCodes": { + "methods": { + "get": { + "id": "compute.licenseCodes.get", + "path": "{project}/global/licenseCodes/{licenseCode}", + "httpMethod": "GET", + "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + "parameters": { + "licenseCode": { + "type": "string", + "description": "Number corresponding to the License code resource to return.", + "required": true, + "pattern": "[0-9]{0,61}?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "licenseCode" + ], + "response": { + "$ref": "LicenseCode" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "licenses": { "methods": { + "delete": { + "id": "compute.licenses.delete", + "path": "{project}/global/licenses/{license}", + "httpMethod": "DELETE", + "description": "Deletes the specified license.", + "parameters": { + "license": { + "type": "string", + "description": "Name of the license resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "license" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "id": "compute.licenses.get", "path": "{project}/global/licenses/{license}", @@ -23622,6 +26032,91 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "insert": { + "id": "compute.licenses.insert", + "path": "{project}/global/licenses", + "httpMethod": "POST", + "description": "Create a License resource in the specified project.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "License" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "compute.licenses.list", + "path": "{project}/global/licenses", + "httpMethod": "GET", + "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 8. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "LicensesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, @@ -23978,7 +26473,7 @@ "id": "compute.networks.patch", "path": "{project}/global/networks/{network}", "httpMethod": "PATCH", - "description": "Patches the specified network with the data included in the request.", + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", "parameters": { "network": { "type": "string", @@ -27308,6 +29803,42 @@ }, "securityPolicies": { "methods": { + "addRule": { + "id": "compute.securityPolicies.addRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", + "httpMethod": "POST", + "description": "Inserts a rule into a security policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "id": "compute.securityPolicies.delete", "path": "{project}/global/securityPolicies/{securityPolicy}", @@ -27380,6 +29911,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getRule": { + "id": "compute.securityPolicies.getRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + "httpMethod": "GET", + "description": "Gets a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to get from the security policy.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to which the queried rule belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "SecurityPolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "id": "compute.securityPolicies.insert", "path": "{project}/global/securityPolicies", @@ -27503,6 +30074,87 @@ "https://www.googleapis.com/auth/compute" ] }, + "patchRule": { + "id": "compute.securityPolicies.patchRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + "httpMethod": "POST", + "description": "Patches a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to patch.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeRule": { + "id": "compute.securityPolicies.removeRule", + "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + "httpMethod": "POST", + "description": "Deletes a rule at the specified priority.", + "parameters": { + "priority": { + "type": "integer", + "description": "The priority of the rule to remove from the security policy.", + "format": "int32", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.securityPolicies.testIamPermissions", "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", @@ -27935,6 +30587,288 @@ } } }, + "sslPolicies": { + "methods": { + "delete": { + "id": "compute.sslPolicies.delete", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "DELETE", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.sslPolicies.get", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "GET", + "description": "List all of the ordered rules present in a single specified policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "response": { + "$ref": "SslPolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.sslPolicies.insert", + "path": "{project}/global/sslPolicies", + "httpMethod": "POST", + "description": "Returns the specified SSL policy resource. Get a list of available SSL policies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "SslPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.sslPolicies.list", + "path": "{project}/global/sslPolicies", + "httpMethod": "GET", + "description": "List all the SSL policies that have been configured for the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslPoliciesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listAvailableFeatures": { + "id": "compute.sslPolicies.listAvailableFeatures", + "path": "{project}/global/sslPolicies/listAvailableFeatures", + "httpMethod": "GET", + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslPoliciesListAvailableFeaturesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.sslPolicies.patch", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "description": "Patches the specified SSL policy with the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "request": { + "$ref": "SslPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.sslPolicies.testIamPermissions", + "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "subnetworks": { "methods": { "aggregatedList": { @@ -28263,6 +31197,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "id": "compute.subnetworks.patch", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "PATCH", + "description": "Patches the specified subnetwork with the data included in the request. Only the following fields within the subnetwork resource can be specified in the request: secondary_ip_range and allow_subnet_cidr_routes_overlap. It is also mandatory to specify the current fingeprint of the subnetwork resource being patched.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "subnetwork": { + "type": "string", + "description": "Name of the Subnetwork resource to patch.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "subnetwork" + ], + "request": { + "$ref": "Subnetwork" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "id": "compute.subnetworks.setIamPolicy", "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", @@ -28795,6 +31778,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setQuicOverride": { + "id": "compute.targetHttpsProxies.setQuicOverride", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + "httpMethod": "POST", + "description": "Sets the QUIC override policy for TargetHttpsProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setSslCertificates": { "id": "compute.targetHttpsProxies.setSslCertificates", "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", @@ -28836,6 +31859,46 @@ "https://www.googleapis.com/auth/compute" ] }, + "setSslPolicy": { + "id": "compute.targetHttpsProxies.setSslPolicy", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + "httpMethod": "POST", + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setUrlMap": { "id": "compute.targetHttpsProxies.setUrlMap", "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", @@ -30059,6 +33122,46 @@ "https://www.googleapis.com/auth/compute" ] }, + "setSslPolicy": { + "id": "compute.targetSslProxies.setSslPolicy", + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "httpMethod": "POST", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.targetSslProxies.testIamPermissions", "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", @@ -30575,6 +33678,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.targetVpnGateways.setLabels", + "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.targetVpnGateways.testIamPermissions", "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", @@ -31213,6 +34365,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setLabels": { + "id": "compute.vpnTunnels.setLabels", + "path": "{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.vpnTunnels.testIamPermissions", "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index f9e1af4b0bb..ee8cc7254f0 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -94,6 +94,7 @@ func New(client *http.Client) (*Service, error) { s.InterconnectAttachments = NewInterconnectAttachmentsService(s) s.InterconnectLocations = NewInterconnectLocationsService(s) s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) s.Networks = NewNetworksService(s) @@ -110,6 +111,7 @@ func New(client *http.Client) (*Service, error) { s.SecurityPolicies = NewSecurityPoliciesService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) + s.SslPolicies = NewSslPoliciesService(s) s.Subnetworks = NewSubnetworksService(s) s.TargetHttpProxies = NewTargetHttpProxiesService(s) s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) @@ -176,6 +178,8 @@ type Service struct { Interconnects *InterconnectsService + LicenseCodes *LicenseCodesService + Licenses *LicensesService MachineTypes *MachineTypesService @@ -208,6 +212,8 @@ type Service struct { SslCertificates *SslCertificatesService + SslPolicies *SslPoliciesService + Subnetworks *SubnetworksService TargetHttpProxies *TargetHttpProxiesService @@ -447,6 +453,15 @@ type InterconnectsService struct { s *Service } +func NewLicenseCodesService(s *Service) *LicenseCodesService { + rs := &LicenseCodesService{s: s} + return rs +} + +type LicenseCodesService struct { + s *Service +} + func NewLicensesService(s *Service) *LicensesService { rs := &LicensesService{s: s} return rs @@ -591,6 +606,15 @@ type SslCertificatesService struct { s *Service } +func NewSslPoliciesService(s *Service) *SslPoliciesService { + rs := &SslPoliciesService{s: s} + return rs +} + +type SslPoliciesService struct { + s *Service +} + func NewSubnetworksService(s *Service) *SubnetworksService { rs := &SubnetworksService{s: s} return rs @@ -729,12 +753,13 @@ type AcceleratorConfig struct { } func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorConfig - raw := noMethod(*s) + type NoMethod AcceleratorConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AcceleratorType: An Accelerator Type resource. +// AcceleratorType: An Accelerator Type resource. (== resource_for +// beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==) type AcceleratorType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -794,8 +819,8 @@ type AcceleratorType struct { } func (s *AcceleratorType) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorType - raw := noMethod(*s) + type NoMethod AcceleratorType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -848,8 +873,8 @@ type AcceleratorTypeAggregatedList struct { } func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedList - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -863,9 +888,13 @@ type AcceleratorTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -876,7 +905,9 @@ type AcceleratorTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -907,8 +938,8 @@ type AcceleratorTypeAggregatedListWarning struct { } func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -944,8 +975,8 @@ type AcceleratorTypeAggregatedListWarningData struct { } func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -998,8 +1029,8 @@ type AcceleratorTypeList struct { } func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeList - raw := noMethod(*s) + type NoMethod AcceleratorTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1013,9 +1044,13 @@ type AcceleratorTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1026,7 +1061,9 @@ type AcceleratorTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1057,8 +1094,8 @@ type AcceleratorTypeListWarning struct { } func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1094,8 +1131,8 @@ type AcceleratorTypeListWarningData struct { } func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1127,8 +1164,8 @@ type AcceleratorTypesScopedList struct { } func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedList - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1142,9 +1179,13 @@ type AcceleratorTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1155,7 +1196,9 @@ type AcceleratorTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1186,8 +1229,8 @@ type AcceleratorTypesScopedListWarning struct { } func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1223,8 +1266,8 @@ type AcceleratorTypesScopedListWarningData struct { } func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1247,6 +1290,15 @@ type AccessConfig struct { // live in the same region as the zone of the instance. NatIP string `json:"natIP,omitempty"` + // PublicPtrDomainName: The DNS domain name for the public PTR record. + // This field can only be set when the set_public_ptr field is enabled. + PublicPtrDomainName string `json:"publicPtrDomainName,omitempty"` + + // SetPublicPtr: Specifies whether a public DNS ?PTR? record should be + // created to map the external IP address of the instance to a DNS + // domain name. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + // Type: The type of configuration. The default and only option is // ONE_TO_ONE_NAT. // @@ -1272,18 +1324,20 @@ type AccessConfig struct { } func (s *AccessConfig) MarshalJSON() ([]byte, error) { - type noMethod AccessConfig - raw := noMethod(*s) + type NoMethod AccessConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Address: A reserved address resource. +// Address: A reserved address resource. (== resource_for beta.addresses +// ==) (== resource_for v1.addresses ==) (== resource_for +// beta.globalAddresses ==) (== resource_for v1.globalAddresses ==) type Address struct { - // Address: The static external IP address represented by this resource. + // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` - // AddressType: The type of address to reserve. If unspecified, defaults - // to EXTERNAL. + // AddressType: The type of address to reserve, either INTERNAL or + // EXTERNAL. If unspecified, defaults to EXTERNAL. // // Possible values: // "EXTERNAL" @@ -1349,21 +1403,21 @@ type Address struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the address, which can be either - // IN_USE or RESERVED. An address that is RESERVED is currently reserved - // and available to use. An IN_USE address is currently being used by - // another resource and is not available. + // Status: [Output Only] The status of the address, which can be one of + // RESERVING, RESERVED, or IN_USE. An address that is RESERVING is + // currently in the process of being reserved. A RESERVED address is + // currently reserved and available to use. An IN_USE address is + // currently being used by another resource and is not available. // // Possible values: // "IN_USE" // "RESERVED" Status string `json:"status,omitempty"` - // Subnetwork: For external addresses, this field should not be - // used. - // - // The URL of the subnetwork in which to reserve the address. If an IP - // address is specified, it must be within the subnetwork's IP range. + // Subnetwork: The URL of the subnetwork in which to reserve the + // address. If an IP address is specified, it must be within the + // subnetwork's IP range. This field can only be used with INTERNAL type + // with GCE_ENDPOINT/DNS_RESOLVER purposes. Subnetwork string `json:"subnetwork,omitempty"` // Users: [Output Only] The URLs of the resources that are using this @@ -1392,8 +1446,8 @@ type Address struct { } func (s *Address) MarshalJSON() ([]byte, error) { - type noMethod Address - raw := noMethod(*s) + type NoMethod Address + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1445,8 +1499,8 @@ type AddressAggregatedList struct { } func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedList - raw := noMethod(*s) + type NoMethod AddressAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1460,9 +1514,13 @@ type AddressAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1473,7 +1531,9 @@ type AddressAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1504,8 +1564,8 @@ type AddressAggregatedListWarning struct { } func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedListWarning - raw := noMethod(*s) + type NoMethod AddressAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1541,8 +1601,8 @@ type AddressAggregatedListWarningData struct { } func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AddressAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1595,8 +1655,8 @@ type AddressList struct { } func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) + type NoMethod AddressList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1609,9 +1669,13 @@ type AddressListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1622,7 +1686,9 @@ type AddressListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1653,8 +1719,8 @@ type AddressListWarning struct { } func (s *AddressListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressListWarning - raw := noMethod(*s) + type NoMethod AddressListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1690,8 +1756,8 @@ type AddressListWarningData struct { } func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressListWarningData - raw := noMethod(*s) + type NoMethod AddressListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1721,8 +1787,8 @@ type AddressesScopedList struct { } func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) + type NoMethod AddressesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1736,9 +1802,13 @@ type AddressesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1749,7 +1819,9 @@ type AddressesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1780,8 +1852,8 @@ type AddressesScopedListWarning struct { } func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarning - raw := noMethod(*s) + type NoMethod AddressesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1817,8 +1889,8 @@ type AddressesScopedListWarningData struct { } func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarningData - raw := noMethod(*s) + type NoMethod AddressesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1856,8 +1928,8 @@ type AliasIpRange struct { } func (s *AliasIpRange) MarshalJSON() ([]byte, error) { - type noMethod AliasIpRange - raw := noMethod(*s) + type NoMethod AliasIpRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1951,7 +2023,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required. + // initializeParams.sourceImage or disks.source is required except for + // local SSD. // // If desired, you can also attach existing non-root persistent disks // using this property. This field is only applicable for persistent @@ -1987,8 +2060,8 @@ type AttachedDisk struct { } func (s *AttachedDisk) MarshalJSON() ([]byte, error) { - type noMethod AttachedDisk - raw := noMethod(*s) + type NoMethod AttachedDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2034,7 +2107,7 @@ type AttachedDiskInitializeParams struct { // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is - // required. + // required except for local SSD. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -2048,17 +2121,17 @@ type AttachedDiskInitializeParams struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family // // If the source image is deleted later, this field will not be set. SourceImage string `json:"sourceImage,omitempty"` @@ -2090,8 +2163,8 @@ type AttachedDiskInitializeParams struct { } func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { - type noMethod AttachedDiskInitializeParams - raw := noMethod(*s) + type NoMethod AttachedDiskInitializeParams + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2149,8 +2222,8 @@ type AuditConfig struct { } func (s *AuditConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditConfig - raw := noMethod(*s) + type NoMethod AuditConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2197,8 +2270,8 @@ type AuditLogConfig struct { } func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditLogConfig - raw := noMethod(*s) + type NoMethod AuditLogConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2234,15 +2307,18 @@ type AuthorizationLoggingOptions struct { } func (s *AuthorizationLoggingOptions) MarshalJSON() ([]byte, error) { - type noMethod AuthorizationLoggingOptions - raw := noMethod(*s) + type NoMethod AuthorizationLoggingOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Autoscaler: Represents an Autoscaler resource. Autoscalers allow you // to automatically scale virtual machine instances in managed instance // groups according to an autoscaling policy that you define. For more -// information, read Autoscaling Groups of Instances. +// information, read Autoscaling Groups of Instances. (== resource_for +// beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== +// resource_for beta.regionAutoscalers ==) (== resource_for +// v1.regionAutoscalers ==) type Autoscaler struct { // AutoscalingPolicy: The configuration parameters for the autoscaling // algorithm. You can define one or more of the policies for an @@ -2330,8 +2406,8 @@ type Autoscaler struct { } func (s *Autoscaler) MarshalJSON() ([]byte, error) { - type noMethod Autoscaler - raw := noMethod(*s) + type NoMethod Autoscaler + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2383,8 +2459,8 @@ type AutoscalerAggregatedList struct { } func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedList - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2398,9 +2474,13 @@ type AutoscalerAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2411,7 +2491,9 @@ type AutoscalerAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2442,8 +2524,8 @@ type AutoscalerAggregatedListWarning struct { } func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedListWarning - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2479,8 +2561,8 @@ type AutoscalerAggregatedListWarningData struct { } func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedListWarningData - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2533,8 +2615,8 @@ type AutoscalerList struct { } func (s *AutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerList - raw := noMethod(*s) + type NoMethod AutoscalerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2547,9 +2629,13 @@ type AutoscalerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2560,7 +2646,9 @@ type AutoscalerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2591,8 +2679,8 @@ type AutoscalerListWarning struct { } func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerListWarning - raw := noMethod(*s) + type NoMethod AutoscalerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2628,8 +2716,8 @@ type AutoscalerListWarningData struct { } func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerListWarningData - raw := noMethod(*s) + type NoMethod AutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2675,8 +2763,8 @@ type AutoscalerStatusDetails struct { } func (s *AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerStatusDetails - raw := noMethod(*s) + type NoMethod AutoscalerStatusDetails + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2707,8 +2795,8 @@ type AutoscalersScopedList struct { } func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedList - raw := noMethod(*s) + type NoMethod AutoscalersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2722,9 +2810,13 @@ type AutoscalersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2735,7 +2827,9 @@ type AutoscalersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2766,8 +2860,8 @@ type AutoscalersScopedListWarning struct { } func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarning - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2803,8 +2897,8 @@ type AutoscalersScopedListWarningData struct { } func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarningData - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2867,8 +2961,8 @@ type AutoscalingPolicy struct { } func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicy - raw := noMethod(*s) + type NoMethod AutoscalingPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2908,18 +3002,18 @@ type AutoscalingPolicyCpuUtilization struct { } func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCpuUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCpuUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCpuUtilization + type NoMethod AutoscalingPolicyCpuUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -2933,7 +3027,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // Filter: A filter string, compatible with a Stackdriver Monitoring // filter string for TimeSeries.list API call. This filter is used to // select a specific TimeSeries for the purpose of autoscaling and to - // determine whether the metric is exporting per-instance or global + // determine whether the metric is exporting per-instance or per-group // data. // // For the filter to be valid for autoscaling purposes, the following @@ -2951,8 +3045,8 @@ type AutoscalingPolicyCustomMetricUtilization struct { // If the resource type is any other value, the autoscaler expects this // metric to contain values that apply to the entire autoscaled instance // group and resource label filtering can be performed to point - // autoscaler at the correct TimeSeries to scale upon. This is / called - // a global metric for the purpose of autoscaling. + // autoscaler at the correct TimeSeries to scale upon. This is called a + // per-group metric for the purpose of autoscaling. // // If not specified, the type defaults to gce_instance. // @@ -2964,15 +3058,12 @@ type AutoscalingPolicyCustomMetricUtilization struct { Filter string `json:"filter,omitempty"` // Metric: The identifier (type) of the Stackdriver Monitoring metric. - // The metric cannot have negative values and should be a utilization - // metric, which means that the number of virtual machines handling - // requests should increase or decrease proportionally to the - // metric. + // The metric cannot have negative values. // // The metric must have a value type of INT64 or DOUBLE. Metric string `json:"metric,omitempty"` - // SingleInstanceAssignment: If scaling is based on a global metric + // SingleInstanceAssignment: If scaling is based on a per-group metric // value that represents the total amount of work to be done or resource // usage, set this value to an amount assigned for a single instance of // the scaled group. Autoscaler will keep the number of instances @@ -2991,7 +3082,9 @@ type AutoscalingPolicyCustomMetricUtilization struct { SingleInstanceAssignment float64 `json:"singleInstanceAssignment,omitempty"` // UtilizationTarget: The target value of the metric that autoscaler - // should maintain. This must be a positive value. + // should maintain. This must be a positive value. A utilization metric + // scales number of virtual machines handling requests to increase or + // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is // compute.googleapis.com/instance/network/received_bytes_count. The @@ -3028,19 +3121,19 @@ type AutoscalingPolicyCustomMetricUtilization struct { } func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCustomMetricUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCustomMetricUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCustomMetricUtilization + type NoMethod AutoscalingPolicyCustomMetricUtilization var s1 struct { SingleInstanceAssignment gensupport.JSONFloat64 `json:"singleInstanceAssignment"` UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3077,18 +3170,18 @@ type AutoscalingPolicyLoadBalancingUtilization struct { } func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyLoadBalancingUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyLoadBalancingUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyLoadBalancingUtilization + type NoMethod AutoscalingPolicyLoadBalancingUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3198,20 +3291,20 @@ type Backend struct { } func (s *Backend) MarshalJSON() ([]byte, error) { - type noMethod Backend - raw := noMethod(*s) + type NoMethod Backend + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Backend) UnmarshalJSON(data []byte) error { - type noMethod Backend + type NoMethod Backend var s1 struct { CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -3227,6 +3320,9 @@ type BackendBucket struct { // BucketName: Cloud Storage bucket name. BucketName string `json:"bucketName,omitempty"` + // CdnPolicy: Cloud CDN Coniguration for this BackendBucket. + CdnPolicy *BackendBucketCdnPolicy `json:"cdnPolicy,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -3279,8 +3375,50 @@ type BackendBucket struct { } func (s *BackendBucket) MarshalJSON() ([]byte, error) { - type noMethod BackendBucket - raw := noMethod(*s) + type NoMethod BackendBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendBucketCdnPolicy: Message containing Cloud CDN configuration +// for a backend bucket. +type BackendBucketCdnPolicy struct { + // SignedUrlCacheMaxAgeSec: Number of seconds up to which the response + // to a signed URL request will be cached in the CDN. After this time + // period, the Signed URL will be revalidated before being served. + // Defaults to 1hr (3600s). If this field is set, Cloud CDN will + // internally act as though all responses from this bucket had a + // ?Cache-Control: public, max-age=[TTL]? header, regardless of any + // existing Cache-Control header. The actual headers served in responses + // will not be altered. + SignedUrlCacheMaxAgeSec int64 `json:"signedUrlCacheMaxAgeSec,omitempty,string"` + + // SignedUrlKeyNames: [Output Only] Names of the keys currently + // configured for Cloud CDN Signed URL on this backend bucket. + SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SignedUrlCacheMaxAgeSec") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SignedUrlCacheMaxAgeSec") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3332,8 +3470,8 @@ type BackendBucketList struct { } func (s *BackendBucketList) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketList - raw := noMethod(*s) + type NoMethod BackendBucketList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3347,9 +3485,13 @@ type BackendBucketListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3360,7 +3502,9 @@ type BackendBucketListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3391,8 +3535,8 @@ type BackendBucketListWarning struct { } func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketListWarning - raw := noMethod(*s) + type NoMethod BackendBucketListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3428,13 +3572,15 @@ type BackendBucketListWarningData struct { } func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketListWarningData - raw := noMethod(*s) + type NoMethod BackendBucketListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackendService: A BackendService resource. This resource defines a -// group of backend virtual machines and their serving capacity. +// group of backend virtual machines and their serving capacity. (== +// resource_for v1.backendService ==) (== resource_for +// beta.backendService ==) type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds if // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is @@ -3601,8 +3747,8 @@ type BackendService struct { } func (s *BackendService) MarshalJSON() ([]byte, error) { - type noMethod BackendService - raw := noMethod(*s) + type NoMethod BackendService + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3655,8 +3801,8 @@ type BackendServiceAggregatedList struct { } func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedList - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3670,9 +3816,13 @@ type BackendServiceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3683,7 +3833,9 @@ type BackendServiceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3714,8 +3866,8 @@ type BackendServiceAggregatedListWarning struct { } func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedListWarning - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3751,8 +3903,8 @@ type BackendServiceAggregatedListWarningData struct { } func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3762,6 +3914,20 @@ type BackendServiceCdnPolicy struct { // CacheKeyPolicy: The CacheKeyPolicy for this CdnPolicy. CacheKeyPolicy *CacheKeyPolicy `json:"cacheKeyPolicy,omitempty"` + // SignedUrlCacheMaxAgeSec: Number of seconds up to which the response + // to a signed URL request will be cached in the CDN. After this time + // period, the Signed URL will be revalidated before being served. + // Defaults to 1hr (3600s). If this field is set, Cloud CDN will + // internally act as though all responses from this backend had a + // ?Cache-Control: public, max-age=[TTL]? header, regardless of any + // existing Cache-Control header. The actual headers served in responses + // will not be altered. + SignedUrlCacheMaxAgeSec int64 `json:"signedUrlCacheMaxAgeSec,omitempty,string"` + + // SignedUrlKeyNames: [Output Only] Names of the keys currently + // configured for Cloud CDN Signed URL on this backend service. + SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` + // ForceSendFields is a list of field names (e.g. "CacheKeyPolicy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3781,8 +3947,8 @@ type BackendServiceCdnPolicy struct { } func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceCdnPolicy - raw := noMethod(*s) + type NoMethod BackendServiceCdnPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3815,8 +3981,8 @@ type BackendServiceGroupHealth struct { } func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceGroupHealth - raw := noMethod(*s) + type NoMethod BackendServiceGroupHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3850,8 +4016,8 @@ type BackendServiceIAP struct { } func (s *BackendServiceIAP) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceIAP - raw := noMethod(*s) + type NoMethod BackendServiceIAP + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3904,8 +4070,8 @@ type BackendServiceList struct { } func (s *BackendServiceList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceList - raw := noMethod(*s) + type NoMethod BackendServiceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3919,9 +4085,13 @@ type BackendServiceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3932,7 +4102,9 @@ type BackendServiceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3963,8 +4135,8 @@ type BackendServiceListWarning struct { } func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceListWarning - raw := noMethod(*s) + type NoMethod BackendServiceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4000,8 +4172,8 @@ type BackendServiceListWarningData struct { } func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceListWarningData - raw := noMethod(*s) + type NoMethod BackendServiceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4032,8 +4204,8 @@ type BackendServicesScopedList struct { } func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedList - raw := noMethod(*s) + type NoMethod BackendServicesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4047,9 +4219,13 @@ type BackendServicesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4060,7 +4236,9 @@ type BackendServicesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4091,8 +4269,8 @@ type BackendServicesScopedListWarning struct { } func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarning - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4128,8 +4306,8 @@ type BackendServicesScopedListWarningData struct { } func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarningData - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4192,8 +4370,8 @@ type Binding struct { } func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) + type NoMethod Binding + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4222,8 +4400,8 @@ type CacheInvalidationRule struct { } func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { - type noMethod CacheInvalidationRule - raw := noMethod(*s) + type NoMethod CacheInvalidationRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4275,8 +4453,8 @@ type CacheKeyPolicy struct { } func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { - type noMethod CacheKeyPolicy - raw := noMethod(*s) + type NoMethod CacheKeyPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4289,7 +4467,8 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // Committed use discounts are subject to Google Cloud Platform's // Service Specific Terms. By purchasing a committed use discount, you // agree to these terms. Committed use discounts will not renew, so you -// must purchase a new commitment to continue receiving discounts. +// must purchase a new commitment to continue receiving discounts. (== +// resource_for beta.commitments ==) (== resource_for v1.commitments ==) type Commitment struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -4383,8 +4562,8 @@ type Commitment struct { } func (s *Commitment) MarshalJSON() ([]byte, error) { - type noMethod Commitment - raw := noMethod(*s) + type NoMethod Commitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4436,8 +4615,8 @@ type CommitmentAggregatedList struct { } func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedList - raw := noMethod(*s) + type NoMethod CommitmentAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4451,9 +4630,13 @@ type CommitmentAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4464,7 +4647,9 @@ type CommitmentAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4495,8 +4680,8 @@ type CommitmentAggregatedListWarning struct { } func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedListWarning - raw := noMethod(*s) + type NoMethod CommitmentAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4532,8 +4717,8 @@ type CommitmentAggregatedListWarningData struct { } func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedListWarningData - raw := noMethod(*s) + type NoMethod CommitmentAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4586,8 +4771,8 @@ type CommitmentList struct { } func (s *CommitmentList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentList - raw := noMethod(*s) + type NoMethod CommitmentList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4600,9 +4785,13 @@ type CommitmentListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4613,7 +4802,9 @@ type CommitmentListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4644,8 +4835,8 @@ type CommitmentListWarning struct { } func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentListWarning - raw := noMethod(*s) + type NoMethod CommitmentListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4681,8 +4872,8 @@ type CommitmentListWarningData struct { } func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentListWarningData - raw := noMethod(*s) + type NoMethod CommitmentListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4713,8 +4904,8 @@ type CommitmentsScopedList struct { } func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedList - raw := noMethod(*s) + type NoMethod CommitmentsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4728,9 +4919,13 @@ type CommitmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4741,7 +4936,9 @@ type CommitmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4772,8 +4969,8 @@ type CommitmentsScopedListWarning struct { } func (s *CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarning - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4809,8 +5006,8 @@ type CommitmentsScopedListWarningData struct { } func (s *CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarningData - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4877,8 +5074,8 @@ type Condition struct { } func (s *Condition) MarshalJSON() ([]byte, error) { - type noMethod Condition - raw := noMethod(*s) + type NoMethod Condition + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4908,8 +5105,8 @@ type ConnectionDraining struct { } func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { - type noMethod ConnectionDraining - raw := noMethod(*s) + type NoMethod ConnectionDraining + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4956,8 +5153,8 @@ type CustomerEncryptionKey struct { } func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKey - raw := noMethod(*s) + type NoMethod CustomerEncryptionKey + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4990,8 +5187,8 @@ type CustomerEncryptionKeyProtectedDisk struct { } func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKeyProtectedDisk - raw := noMethod(*s) + type NoMethod CustomerEncryptionKeyProtectedDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5051,12 +5248,13 @@ type DeprecationStatus struct { } func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { - type noMethod DeprecationStatus - raw := noMethod(*s) + type NoMethod DeprecationStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Disk: A Disk resource. +// Disk: A Disk resource. (== resource_for beta.disks ==) (== +// resource_for v1.disks ==) type Disk struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -5113,6 +5311,10 @@ type Disk struct { // text format. LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` + // LicenseCodes: Integer license codes indicating which licenses are + // attached to this disk. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + // Licenses: Any applicable publicly visible licenses. Licenses []string `json:"licenses,omitempty"` @@ -5158,17 +5360,17 @@ type Disk struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family SourceImage string `json:"sourceImage,omitempty"` // SourceImageEncryptionKey: The customer-supplied encryption key of the @@ -5256,8 +5458,8 @@ type Disk struct { } func (s *Disk) MarshalJSON() ([]byte, error) { - type noMethod Disk - raw := noMethod(*s) + type NoMethod Disk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5309,8 +5511,8 @@ type DiskAggregatedList struct { } func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedList - raw := noMethod(*s) + type NoMethod DiskAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5324,9 +5526,13 @@ type DiskAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5337,7 +5543,9 @@ type DiskAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5368,8 +5576,8 @@ type DiskAggregatedListWarning struct { } func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedListWarning - raw := noMethod(*s) + type NoMethod DiskAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5405,8 +5613,61 @@ type DiskAggregatedListWarningData struct { } func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedListWarningData - raw := noMethod(*s) + type NoMethod DiskAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskInstantiationConfig: A specification of the desired way to +// instantiate a disk in the instance template when its created from a +// source instance. +type DiskInstantiationConfig struct { + // AutoDelete: Specifies whether the disk will be auto-deleted when the + // instance is deleted (but not when the disk is detached from the + // instance). + AutoDelete bool `json:"autoDelete,omitempty"` + + // DeviceName: Specifies the device name of the disk to which the + // configurations apply to. + DeviceName string `json:"deviceName,omitempty"` + + // InstantiateFrom: Specifies whether to include the disk and what image + // to use. + // + // Possible values: + // "ATTACH_READ_ONLY" + // "BLANK" + // "DEFAULT" + // "DO_NOT_INCLUDE" + // "IMAGE_URL" + // "SOURCE_IMAGE" + // "SOURCE_IMAGE_FAMILY" + InstantiateFrom string `json:"instantiateFrom,omitempty"` + + // SourceImage: The custom source image to be used to restore this disk + // when instantiating this instance template. + SourceImage string `json:"sourceImage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskInstantiationConfig) MarshalJSON() ([]byte, error) { + type NoMethod DiskInstantiationConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5459,8 +5720,8 @@ type DiskList struct { } func (s *DiskList) MarshalJSON() ([]byte, error) { - type noMethod DiskList - raw := noMethod(*s) + type NoMethod DiskList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5473,9 +5734,13 @@ type DiskListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5486,7 +5751,9 @@ type DiskListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5517,8 +5784,8 @@ type DiskListWarning struct { } func (s *DiskListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskListWarning - raw := noMethod(*s) + type NoMethod DiskListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5554,8 +5821,8 @@ type DiskListWarningData struct { } func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskListWarningData - raw := noMethod(*s) + type NoMethod DiskListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5597,12 +5864,13 @@ type DiskMoveRequest struct { } func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod DiskMoveRequest - raw := noMethod(*s) + type NoMethod DiskMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DiskType: A DiskType resource. +// DiskType: A DiskType resource. (== resource_for beta.diskTypes ==) +// (== resource_for v1.diskTypes ==) type DiskType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -5663,8 +5931,8 @@ type DiskType struct { } func (s *DiskType) MarshalJSON() ([]byte, error) { - type noMethod DiskType - raw := noMethod(*s) + type NoMethod DiskType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5716,8 +5984,8 @@ type DiskTypeAggregatedList struct { } func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedList - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5731,9 +5999,13 @@ type DiskTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5744,7 +6016,9 @@ type DiskTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5775,8 +6049,8 @@ type DiskTypeAggregatedListWarning struct { } func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5812,8 +6086,8 @@ type DiskTypeAggregatedListWarningData struct { } func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5866,8 +6140,8 @@ type DiskTypeList struct { } func (s *DiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeList - raw := noMethod(*s) + type NoMethod DiskTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5880,9 +6154,13 @@ type DiskTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -5893,7 +6171,9 @@ type DiskTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5924,8 +6204,8 @@ type DiskTypeListWarning struct { } func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeListWarning - raw := noMethod(*s) + type NoMethod DiskTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5961,8 +6241,8 @@ type DiskTypeListWarningData struct { } func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeListWarningData - raw := noMethod(*s) + type NoMethod DiskTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5992,8 +6272,8 @@ type DiskTypesScopedList struct { } func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedList - raw := noMethod(*s) + type NoMethod DiskTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6007,9 +6287,13 @@ type DiskTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6020,7 +6304,9 @@ type DiskTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6051,8 +6337,8 @@ type DiskTypesScopedListWarning struct { } func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarning - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6088,8 +6374,8 @@ type DiskTypesScopedListWarningData struct { } func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6116,8 +6402,8 @@ type DisksResizeRequest struct { } func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod DisksResizeRequest - raw := noMethod(*s) + type NoMethod DisksResizeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6147,8 +6433,8 @@ type DisksScopedList struct { } func (s *DisksScopedList) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedList - raw := noMethod(*s) + type NoMethod DisksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6162,9 +6448,13 @@ type DisksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6175,7 +6465,9 @@ type DisksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6206,8 +6498,8 @@ type DisksScopedListWarning struct { } func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarning - raw := noMethod(*s) + type NoMethod DisksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6243,8 +6535,8 @@ type DisksScopedListWarningData struct { } func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarningData - raw := noMethod(*s) + type NoMethod DisksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6269,8 +6561,8 @@ type DistributionPolicy struct { } func (s *DistributionPolicy) MarshalJSON() ([]byte, error) { - type noMethod DistributionPolicy - raw := noMethod(*s) + type NoMethod DistributionPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6298,8 +6590,8 @@ type DistributionPolicyZoneConfiguration struct { } func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { - type noMethod DistributionPolicyZoneConfiguration - raw := noMethod(*s) + type NoMethod DistributionPolicyZoneConfiguration + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6348,8 +6640,8 @@ type Expr struct { } func (s *Expr) MarshalJSON() ([]byte, error) { - type noMethod Expr - raw := noMethod(*s) + type NoMethod Expr + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6476,10 +6768,11 @@ type Firewall struct { // applies to all instances on the specified network. TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` - // TargetTags: A list of instance tags indicating sets of instances - // located in the network that may make network connections as specified - // in allowed[]. If no targetTags are specified, the firewall rule - // applies to all instances on the specified network. + // TargetTags: A list of tags that controls which instances the firewall + // rule applies to. If targetTags are specified, then the firewall rule + // applies only to instances in the VPC network that have one of those + // tags. If no targetTags are specified, the firewall rule applies to + // all instances on the specified network. TargetTags []string `json:"targetTags,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -6504,8 +6797,8 @@ type Firewall struct { } func (s *Firewall) MarshalJSON() ([]byte, error) { - type noMethod Firewall - raw := noMethod(*s) + type NoMethod Firewall + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6542,8 +6835,8 @@ type FirewallAllowed struct { } func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { - type noMethod FirewallAllowed - raw := noMethod(*s) + type NoMethod FirewallAllowed + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6580,8 +6873,8 @@ type FirewallDenied struct { } func (s *FirewallDenied) MarshalJSON() ([]byte, error) { - type noMethod FirewallDenied - raw := noMethod(*s) + type NoMethod FirewallDenied + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6634,8 +6927,8 @@ type FirewallList struct { } func (s *FirewallList) MarshalJSON() ([]byte, error) { - type noMethod FirewallList - raw := noMethod(*s) + type NoMethod FirewallList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6648,9 +6941,13 @@ type FirewallListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6661,7 +6958,9 @@ type FirewallListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6692,8 +6991,8 @@ type FirewallListWarning struct { } func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { - type noMethod FirewallListWarning - raw := noMethod(*s) + type NoMethod FirewallListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6729,23 +7028,32 @@ type FirewallListWarningData struct { } func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { - type noMethod FirewallListWarningData - raw := noMethod(*s) + type NoMethod FirewallListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FixedOrPercent: Encapsulates numeric value that can be either // absolute or relative. type FixedOrPercent struct { - // Calculated: [Output Only] Absolute value calculated based on mode: - // mode = fixed -> calculated = fixed = percent -> calculated = - // ceiling(percent/100 * base_value) + // Calculated: [Output Only] Absolute value of VM instances calculated + // based on the specific mode. + // + // + // - If the value is fixed, then the caculated value is equal to the + // fixed value. + // - If the value is a percent, then the calculated value is percent/100 + // * targetSize. For example, the calculated value of a 80% of a managed + // instance group with 150 instances would be (80/100 * 150) = 120 VM + // instances. If there is a remainder, the number is rounded up. Calculated int64 `json:"calculated,omitempty"` - // Fixed: fixed must be non-negative. + // Fixed: Specifies a fixed number of VM instances. This must be a + // positive integer. Fixed int64 `json:"fixed,omitempty"` - // Percent: percent must belong to [0, 100]. + // Percent: Specifies a percentage of instances between 0 to 100%, + // inclusive. For example, specify 80 for 80%. Percent int64 `json:"percent,omitempty"` // ForceSendFields is a list of field names (e.g. "Calculated") to @@ -6766,31 +7074,51 @@ type FixedOrPercent struct { } func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { - type noMethod FixedOrPercent - raw := noMethod(*s) + type NoMethod FixedOrPercent + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource // specifies which pool of target virtual machines to forward a packet -// to if it matches the given [IPAddress, IPProtocol, ports] tuple. +// to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== +// resource_for beta.forwardingRules ==) (== resource_for +// v1.forwardingRules ==) (== resource_for beta.globalForwardingRules +// ==) (== resource_for v1.globalForwardingRules ==) (== resource_for +// beta.regionForwardingRules ==) (== resource_for +// v1.regionForwardingRules ==) type ForwardingRule struct { // IPAddress: The IP address that this forwarding rule is serving on // behalf of. // - // For global forwarding rules, the address must be a global IP. For - // regional forwarding rules, the address must live in the same region - // as the forwarding rule. By default, this field is empty and an - // ephemeral IPv4 address from the same scope (global or regional) will - // be assigned. A regional forwarding rule supports IPv4 only. A global - // forwarding rule supports either IPv4 or IPv6. + // Addresses are restricted based on the forwarding rule's load + // balancing scheme (EXTERNAL or INTERNAL) and scope (global or + // regional). + // + // When the load balancing scheme is EXTERNAL, for global forwarding + // rules, the address must be a global IP, and for regional forwarding + // rules, the address must live in the same region as the forwarding + // rule. If this field is empty, an ephemeral IPv4 address from the same + // scope (global or regional) will be assigned. A regional forwarding + // rule supports IPv4 only. A global forwarding rule supports either + // IPv4 or IPv6. // // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnetwork configured for - // the forwarding rule. A reserved address cannot be used. If the field - // is empty, the IP address will be automatically allocated from the - // internal IP range of the subnetwork or network configured for this - // forwarding rule. + // 1918 IP address belonging to the network/subnet configured for the + // forwarding rule. By default, if this field is empty, an ephemeral + // internal IP address will be automatically allocated from the IP range + // of the subnet or network configured for this forwarding rule. + // + // An address can be specified either by a literal IP address or a URL + // reference to an existing Address resource. The following examples are + // all valid: + // - 100.1.2.3 + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + // - projects/project/regions/region/addresses/address + // - regions/region/addresses/address + // - global/addresses/address + // - address IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options @@ -6988,8 +7316,8 @@ type ForwardingRule struct { } func (s *ForwardingRule) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRule - raw := noMethod(*s) + type NoMethod ForwardingRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7041,8 +7369,8 @@ type ForwardingRuleAggregatedList struct { } func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedList - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7056,9 +7384,13 @@ type ForwardingRuleAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7069,7 +7401,9 @@ type ForwardingRuleAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7100,8 +7434,8 @@ type ForwardingRuleAggregatedListWarning struct { } func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedListWarning - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7137,8 +7471,8 @@ type ForwardingRuleAggregatedListWarningData struct { } func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7190,8 +7524,8 @@ type ForwardingRuleList struct { } func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleList - raw := noMethod(*s) + type NoMethod ForwardingRuleList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7205,9 +7539,13 @@ type ForwardingRuleListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7218,7 +7556,9 @@ type ForwardingRuleListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7249,8 +7589,8 @@ type ForwardingRuleListWarning struct { } func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleListWarning - raw := noMethod(*s) + type NoMethod ForwardingRuleListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7286,8 +7626,8 @@ type ForwardingRuleListWarningData struct { } func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRuleListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7318,8 +7658,8 @@ type ForwardingRulesScopedList struct { } func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedList - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7333,9 +7673,13 @@ type ForwardingRulesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7346,7 +7690,9 @@ type ForwardingRulesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7377,8 +7723,8 @@ type ForwardingRulesScopedListWarning struct { } func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarning - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7414,8 +7760,8 @@ type ForwardingRulesScopedListWarningData struct { } func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7457,22 +7803,21 @@ type GlobalSetLabelsRequest struct { } func (s *GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod GlobalSetLabelsRequest - raw := noMethod(*s) + type NoMethod GlobalSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GuestOsFeature: Guest OS features. type GuestOsFeature struct { - // Type: The type of supported feature. Currently only - // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the - // server might also populate this property with the value WINDOWS to - // indicate that this is a Windows image. This value is purely - // informational and does not enable or disable any features. + // Type: The ID of a supported feature. Read Enabling guest operating + // system features to see a list of available options. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" Type string `json:"type,omitempty"` @@ -7495,8 +7840,8 @@ type GuestOsFeature struct { } func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { - type noMethod GuestOsFeature - raw := noMethod(*s) + type NoMethod GuestOsFeature + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7527,6 +7872,11 @@ type HTTPHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` + // Response: The string to match anywhere in the first 1024 bytes of the + // response body. If left empty (the default value), the status code + // determines health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + // ForceSendFields is a list of field names (e.g. "Host") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -7545,8 +7895,8 @@ type HTTPHealthCheck struct { } func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPHealthCheck - raw := noMethod(*s) + type NoMethod HTTPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7577,6 +7927,11 @@ type HTTPSHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` + // Response: The string to match anywhere in the first 1024 bytes of the + // response body. If left empty (the default value), the status code + // determines health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + // ForceSendFields is a list of field names (e.g. "Host") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -7595,8 +7950,8 @@ type HTTPSHealthCheck struct { } func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPSHealthCheck - raw := noMethod(*s) + type NoMethod HTTPSHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7696,8 +8051,8 @@ type HealthCheck struct { } func (s *HealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HealthCheck - raw := noMethod(*s) + type NoMethod HealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7749,8 +8104,8 @@ type HealthCheckList struct { } func (s *HealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckList - raw := noMethod(*s) + type NoMethod HealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7763,9 +8118,13 @@ type HealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7776,7 +8135,9 @@ type HealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7807,8 +8168,8 @@ type HealthCheckListWarning struct { } func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckListWarning - raw := noMethod(*s) + type NoMethod HealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7844,8 +8205,8 @@ type HealthCheckListWarningData struct { } func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7876,8 +8237,8 @@ type HealthCheckReference struct { } func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckReference - raw := noMethod(*s) + type NoMethod HealthCheckReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7916,8 +8277,8 @@ type HealthStatus struct { } func (s *HealthStatus) MarshalJSON() ([]byte, error) { - type noMethod HealthStatus - raw := noMethod(*s) + type NoMethod HealthStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7956,8 +8317,8 @@ type HostRule struct { } func (s *HostRule) MarshalJSON() ([]byte, error) { - type noMethod HostRule - raw := noMethod(*s) + type NoMethod HostRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8047,8 +8408,8 @@ type HttpHealthCheck struct { } func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheck - raw := noMethod(*s) + type NoMethod HttpHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8100,8 +8461,8 @@ type HttpHealthCheckList struct { } func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckList - raw := noMethod(*s) + type NoMethod HttpHealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8115,9 +8476,13 @@ type HttpHealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8128,7 +8493,9 @@ type HttpHealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8159,8 +8526,8 @@ type HttpHealthCheckListWarning struct { } func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckListWarning - raw := noMethod(*s) + type NoMethod HttpHealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8196,8 +8563,8 @@ type HttpHealthCheckListWarningData struct { } func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HttpHealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8286,8 +8653,8 @@ type HttpsHealthCheck struct { } func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheck - raw := noMethod(*s) + type NoMethod HttpsHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8339,8 +8706,8 @@ type HttpsHealthCheckList struct { } func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckList - raw := noMethod(*s) + type NoMethod HttpsHealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8354,9 +8721,13 @@ type HttpsHealthCheckListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8367,7 +8738,9 @@ type HttpsHealthCheckListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8398,8 +8771,8 @@ type HttpsHealthCheckListWarning struct { } func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckListWarning - raw := noMethod(*s) + type NoMethod HttpsHealthCheckListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8435,12 +8808,13 @@ type HttpsHealthCheckListWarningData struct { } func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckListWarningData - raw := noMethod(*s) + type NoMethod HttpsHealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Image: An Image resource. +// Image: An Image resource. (== resource_for beta.images ==) (== +// resource_for v1.images ==) type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). @@ -8468,18 +8842,9 @@ type Image struct { // RFC1035. Family string `json:"family,omitempty"` - // GuestOsFeatures: A list of features to enable on the guest OS. - // Applicable for bootable images only. Currently, only one feature can - // be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to - // have its own queue. For Windows images, you can only enable - // VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or - // higher. Linux images with kernel versions 3.17 and higher will - // support VIRTIO_SCSI_MULTIQUEUE. - // - // For new Windows images, the server might also populate this field - // with the value WINDOWS, to indicate that this is a Windows image. - // This value is purely informational and does not enable or disable any - // features. + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -8520,6 +8885,10 @@ type Image struct { // the setLabels method. Labels map[string]string `json:"labels,omitempty"` + // LicenseCodes: Integer license codes indicating which licenses are + // attached to this image. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + // Licenses: Any applicable license URI. Licenses []string `json:"licenses,omitempty"` @@ -8617,8 +8986,8 @@ type Image struct { } func (s *Image) MarshalJSON() ([]byte, error) { - type noMethod Image - raw := noMethod(*s) + type NoMethod Image + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8660,8 +9029,8 @@ type ImageRawDisk struct { } func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { - type noMethod ImageRawDisk - raw := noMethod(*s) + type NoMethod ImageRawDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8713,8 +9082,8 @@ type ImageList struct { } func (s *ImageList) MarshalJSON() ([]byte, error) { - type noMethod ImageList - raw := noMethod(*s) + type NoMethod ImageList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8727,9 +9096,13 @@ type ImageListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8740,7 +9113,9 @@ type ImageListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8771,8 +9146,8 @@ type ImageListWarning struct { } func (s *ImageListWarning) MarshalJSON() ([]byte, error) { - type noMethod ImageListWarning - raw := noMethod(*s) + type NoMethod ImageListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8808,12 +9183,13 @@ type ImageListWarningData struct { } func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ImageListWarningData - raw := noMethod(*s) + type NoMethod ImageListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Instance: An Instance resource. +// Instance: An Instance resource. (== resource_for beta.instances ==) +// (== resource_for v1.instances ==) type Instance struct { // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan @@ -8828,6 +9204,10 @@ type Instance struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DeletionProtection: Whether the resource should be protected against + // deletion. + DeletionProtection bool `json:"deletionProtection,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -8982,8 +9362,8 @@ type Instance struct { } func (s *Instance) MarshalJSON() ([]byte, error) { - type noMethod Instance - raw := noMethod(*s) + type NoMethod Instance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9036,8 +9416,8 @@ type InstanceAggregatedList struct { } func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedList - raw := noMethod(*s) + type NoMethod InstanceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9051,9 +9431,13 @@ type InstanceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9064,7 +9448,9 @@ type InstanceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9095,8 +9481,8 @@ type InstanceAggregatedListWarning struct { } func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9132,11 +9518,15 @@ type InstanceAggregatedListWarningData struct { } func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroup: InstanceGroups (== resource_for beta.instanceGroups +// ==) (== resource_for v1.instanceGroups ==) (== resource_for +// beta.regionInstanceGroups ==) (== resource_for +// v1.regionInstanceGroups ==) type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -9221,8 +9611,8 @@ type InstanceGroup struct { } func (s *InstanceGroup) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroup - raw := noMethod(*s) + type NoMethod InstanceGroup + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9275,8 +9665,8 @@ type InstanceGroupAggregatedList struct { } func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9290,9 +9680,13 @@ type InstanceGroupAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9303,7 +9697,9 @@ type InstanceGroupAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9334,8 +9730,8 @@ type InstanceGroupAggregatedListWarning struct { } func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9371,8 +9767,8 @@ type InstanceGroupAggregatedListWarningData struct { } func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9425,8 +9821,8 @@ type InstanceGroupList struct { } func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupList - raw := noMethod(*s) + type NoMethod InstanceGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9440,9 +9836,13 @@ type InstanceGroupListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9453,7 +9853,9 @@ type InstanceGroupListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9484,8 +9886,8 @@ type InstanceGroupListWarning struct { } func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9521,12 +9923,16 @@ type InstanceGroupListWarningData struct { } func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroupManager: An Instance Group Manager resource. +// InstanceGroupManager: An Instance Group Manager resource. (== +// resource_for beta.instanceGroupManagers ==) (== resource_for +// v1.instanceGroupManagers ==) (== resource_for +// beta.regionInstanceGroupManagers ==) (== resource_for +// v1.regionInstanceGroupManagers ==) type InstanceGroupManager struct { // AutoHealingPolicies: The autohealing policy for this managed instance // group. You can specify only one value. @@ -9625,15 +10031,15 @@ type InstanceGroupManager struct { // UpdatePolicy: The update policy for this managed instance group. UpdatePolicy *InstanceGroupManagerUpdatePolicy `json:"updatePolicy,omitempty"` - // Versions: Versions supported by this IGM. User should set this field - // if they need fine-grained control over how many instances in each - // version are run by this IGM. Versions are keyed by instanceTemplate. - // Every instanceTemplate can appear at most once. This field overrides - // instanceTemplate field. If both instanceTemplate and versions are - // set, the user receives a warning. "instanceTemplate: X" is - // semantically equivalent to "versions [ { instanceTemplate: X } ]". - // Exactly one version must have targetSize field left unset. Size of - // such a version will be calculated automatically. + // Versions: Specifies the instance templates used by this managed + // instance group to create instances. + // + // Each version is defined by an instanceTemplate. Every template can + // appear at most once per instance group. This field overrides the + // top-level instanceTemplate field. Read more about the relationships + // between these fields. Exactly one version must leave the targetSize + // field unset. That version will be applied to all remaining instances. + // For more information, read about canary updates. Versions []*InstanceGroupManagerVersion `json:"versions,omitempty"` // Zone: [Output Only] The URL of the zone where the managed instance @@ -9663,8 +10069,8 @@ type InstanceGroupManager struct { } func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManager - raw := noMethod(*s) + type NoMethod InstanceGroupManager + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9743,8 +10149,8 @@ type InstanceGroupManagerActionsSummary struct { } func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerActionsSummary - raw := noMethod(*s) + type NoMethod InstanceGroupManagerActionsSummary + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9797,8 +10203,8 @@ type InstanceGroupManagerAggregatedList struct { } func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9812,9 +10218,13 @@ type InstanceGroupManagerAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9825,7 +10235,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9856,8 +10268,8 @@ type InstanceGroupManagerAggregatedListWarning struct { } func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9893,8 +10305,8 @@ type InstanceGroupManagerAggregatedListWarningData struct { } func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9929,8 +10341,8 @@ type InstanceGroupManagerAutoHealingPolicy struct { } func (s *InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAutoHealingPolicy - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAutoHealingPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9985,8 +10397,8 @@ type InstanceGroupManagerList struct { } func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10000,9 +10412,13 @@ type InstanceGroupManagerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10013,7 +10429,9 @@ type InstanceGroupManagerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10044,8 +10462,8 @@ type InstanceGroupManagerListWarning struct { } func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10081,8 +10499,8 @@ type InstanceGroupManagerListWarningData struct { } func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10121,25 +10539,40 @@ type InstanceGroupManagerPendingActionsSummary struct { } func (s *InstanceGroupManagerPendingActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerPendingActionsSummary - raw := noMethod(*s) + type NoMethod InstanceGroupManagerPendingActionsSummary + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type InstanceGroupManagerUpdatePolicy struct { - // MaxSurge: Maximum number of instances that can be created above the - // InstanceGroupManager.targetSize during the update process. By - // default, a fixed value of 1 is used. Using maxSurge > 0 will cause - // instance names to change during the update process. At least one of { - // maxSurge, maxUnavailable } must be greater than 0. + // MaxSurge: The maximum number of instances that can be created above + // the specified targetSize during the update process. By default, a + // fixed value of 1 is used. This value can be either a fixed number or + // a percentage if the instance group has 10 or more instances. If you + // set a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxSurge. MaxSurge *FixedOrPercent `json:"maxSurge,omitempty"` - // MaxUnavailable: Maximum number of instances that can be unavailable - // during the update process. The instance is considered available if - // all of the following conditions are satisfied: 1. Instance's status - // is RUNNING. 2. Instance's liveness health check result was observed - // to be HEALTHY at least once. By default, a fixed value of 1 is used. - // At least one of { maxSurge, maxUnavailable } must be greater than 0. + // MaxUnavailable: The maximum number of instances that can be + // unavailable during the update process. An instance is considered + // available if all of the following conditions are satisfied: + // + // + // - The instance's status is RUNNING. + // - If there is a health check on the instance group, the instance's + // liveness health check result must be HEALTHY at least once. If there + // is no health check on the group, then the instance only needs to have + // a status of RUNNING to be considered available. By default, a fixed + // value of 1 is used. This value can be either a fixed number or a + // percentage if the instance group has 10 or more instances. If you set + // a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxUnavailable. MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` // MinReadySec: Minimum number of seconds to wait for after a newly @@ -10147,8 +10580,13 @@ type InstanceGroupManagerUpdatePolicy struct { // 3600]. MinReadySec int64 `json:"minReadySec,omitempty"` - // MinimalAction: Minimal action to be taken on an instance. The order - // of action types is: RESTART < REPLACE. + // MinimalAction: Minimal action to be taken on an instance. You can + // specify either RESTART to restart existing instances or REPLACE to + // delete and create new instances from the target template. If you + // specify a code>RESTART, the Updater will attempt to perform that + // action only. However, if the Updater determines that the minimal + // action you specify is not enough to perform the update, it might + // perform a more disruptive action. // // Possible values: // "REPLACE" @@ -10178,8 +10616,8 @@ type InstanceGroupManagerUpdatePolicy struct { } func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerUpdatePolicy - raw := noMethod(*s) + type NoMethod InstanceGroupManagerUpdatePolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10190,13 +10628,16 @@ type InstanceGroupManagerVersion struct { // this managed instance group. Name string `json:"name,omitempty"` - // TargetSize: Intended number of instances that are created from - // instanceTemplate. The final number of instances created from - // instanceTemplate will be equal to: * if expressed as fixed number: - // min(targetSize.fixed, instanceGroupManager.targetSize), * if - // expressed as percent: ceiling(targetSize.percent * - // InstanceGroupManager.targetSize). If unset, this version will handle - // all the remaining instances. + // TargetSize: Specifies the intended number of instances to be created + // from the instanceTemplate. The final number of instances created from + // the template will be equal to: + // - If expressed as a fixed number, the minimum of either + // targetSize.fixed or instanceGroupManager.targetSize is used. + // - if expressed as a percent, the targetSize would be + // (targetSize.percent/100 * InstanceGroupManager.targetSize) If there + // is a remainder, the number is rounded up. If unset, this version + // will update any remaining instances not updated by another version. + // Read Starting a canary update for more information. TargetSize *FixedOrPercent `json:"targetSize,omitempty"` // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to @@ -10218,8 +10659,8 @@ type InstanceGroupManagerVersion struct { } func (s *InstanceGroupManagerVersion) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerVersion - raw := noMethod(*s) + type NoMethod InstanceGroupManagerVersion + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10247,8 +10688,8 @@ type InstanceGroupManagersAbandonInstancesRequest struct { } func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10276,8 +10717,8 @@ type InstanceGroupManagersDeleteInstancesRequest struct { } func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10317,8 +10758,8 @@ type InstanceGroupManagersListManagedInstancesResponse struct { } func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListManagedInstancesResponse - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListManagedInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10346,8 +10787,8 @@ type InstanceGroupManagersRecreateInstancesRequest struct { } func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersRecreateInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersRecreateInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10400,8 +10841,8 @@ type InstanceGroupManagersResizeAdvancedRequest struct { } func (s *InstanceGroupManagersResizeAdvancedRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersResizeAdvancedRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersResizeAdvancedRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10434,8 +10875,8 @@ type InstanceGroupManagersScopedList struct { } func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10450,9 +10891,13 @@ type InstanceGroupManagersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10463,7 +10908,9 @@ type InstanceGroupManagersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10494,8 +10941,8 @@ type InstanceGroupManagersScopedListWarning struct { } func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10531,8 +10978,8 @@ type InstanceGroupManagersScopedListWarningData struct { } func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10558,8 +11005,8 @@ type InstanceGroupManagersSetAutoHealingRequest struct { } func (s *InstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetAutoHealingRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10588,8 +11035,8 @@ type InstanceGroupManagersSetInstanceTemplateRequest struct { } func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetInstanceTemplateRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetInstanceTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10627,8 +11074,8 @@ type InstanceGroupManagersSetTargetPoolsRequest struct { } func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10654,8 +11101,8 @@ type InstanceGroupsAddInstancesRequest struct { } func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsAddInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsAddInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10708,8 +11155,8 @@ type InstanceGroupsListInstances struct { } func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstances + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10723,9 +11170,13 @@ type InstanceGroupsListInstancesWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10736,7 +11187,9 @@ type InstanceGroupsListInstancesWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10767,8 +11220,8 @@ type InstanceGroupsListInstancesWarning struct { } func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesWarning - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10804,8 +11257,8 @@ type InstanceGroupsListInstancesWarningData struct { } func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10838,8 +11291,8 @@ type InstanceGroupsListInstancesRequest struct { } func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10865,8 +11318,8 @@ type InstanceGroupsRemoveInstancesRequest struct { } func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsRemoveInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsRemoveInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10898,8 +11351,8 @@ type InstanceGroupsScopedList struct { } func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10914,9 +11367,13 @@ type InstanceGroupsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10927,7 +11384,9 @@ type InstanceGroupsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10958,8 +11417,8 @@ type InstanceGroupsScopedListWarning struct { } func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10995,8 +11454,8 @@ type InstanceGroupsScopedListWarningData struct { } func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11030,8 +11489,8 @@ type InstanceGroupsSetNamedPortsRequest struct { } func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11084,8 +11543,8 @@ type InstanceList struct { } func (s *InstanceList) MarshalJSON() ([]byte, error) { - type noMethod InstanceList - raw := noMethod(*s) + type NoMethod InstanceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11098,9 +11557,13 @@ type InstanceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11111,7 +11574,9 @@ type InstanceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11142,8 +11607,8 @@ type InstanceListWarning struct { } func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceListWarning - raw := noMethod(*s) + type NoMethod InstanceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11179,8 +11644,8 @@ type InstanceListWarningData struct { } func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceListWarningData - raw := noMethod(*s) + type NoMethod InstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11233,8 +11698,8 @@ type InstanceListReferrers struct { } func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrers - raw := noMethod(*s) + type NoMethod InstanceListReferrers + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11248,9 +11713,13 @@ type InstanceListReferrersWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11261,7 +11730,9 @@ type InstanceListReferrersWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11292,8 +11763,8 @@ type InstanceListReferrersWarning struct { } func (s *InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrersWarning - raw := noMethod(*s) + type NoMethod InstanceListReferrersWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11329,8 +11800,8 @@ type InstanceListReferrersWarningData struct { } func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrersWarningData - raw := noMethod(*s) + type NoMethod InstanceListReferrersWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11372,8 +11843,8 @@ type InstanceMoveRequest struct { } func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceMoveRequest - raw := noMethod(*s) + type NoMethod InstanceMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11459,8 +11930,8 @@ type InstanceProperties struct { } func (s *InstanceProperties) MarshalJSON() ([]byte, error) { - type noMethod InstanceProperties - raw := noMethod(*s) + type NoMethod InstanceProperties + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11486,12 +11957,13 @@ type InstanceReference struct { } func (s *InstanceReference) MarshalJSON() ([]byte, error) { - type noMethod InstanceReference - raw := noMethod(*s) + type NoMethod InstanceReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplate: An Instance Template resource. +// InstanceTemplate: An Instance Template resource. (== resource_for +// beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==) type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -11525,6 +11997,18 @@ type InstanceTemplate struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` + // SourceInstance: The source instance used to create the template. You + // can provide this as a partial or full URL to the resource. For + // example, the following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + SourceInstance string `json:"sourceInstance,omitempty"` + + // SourceInstanceParams: The source instance params to use to create + // this instance template. + SourceInstanceParams *SourceInstanceParams `json:"sourceInstanceParams,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11548,8 +12032,8 @@ type InstanceTemplate struct { } func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplate - raw := noMethod(*s) + type NoMethod InstanceTemplate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11602,8 +12086,8 @@ type InstanceTemplateList struct { } func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateList - raw := noMethod(*s) + type NoMethod InstanceTemplateList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11617,9 +12101,13 @@ type InstanceTemplateListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11630,7 +12118,9 @@ type InstanceTemplateListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11661,8 +12151,8 @@ type InstanceTemplateListWarning struct { } func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateListWarning - raw := noMethod(*s) + type NoMethod InstanceTemplateListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11698,8 +12188,8 @@ type InstanceTemplateListWarningData struct { } func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateListWarningData - raw := noMethod(*s) + type NoMethod InstanceTemplateListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11742,8 +12232,8 @@ type InstanceWithNamedPorts struct { } func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { - type noMethod InstanceWithNamedPorts - raw := noMethod(*s) + type NoMethod InstanceWithNamedPorts + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11773,8 +12263,8 @@ type InstancesScopedList struct { } func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedList - raw := noMethod(*s) + type NoMethod InstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11788,9 +12278,13 @@ type InstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11801,7 +12295,9 @@ type InstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11832,8 +12328,8 @@ type InstancesScopedListWarning struct { } func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarning - raw := noMethod(*s) + type NoMethod InstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11869,8 +12365,8 @@ type InstancesScopedListWarningData struct { } func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod InstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11901,8 +12397,8 @@ type InstancesSetLabelsRequest struct { } func (s *InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetLabelsRequest - raw := noMethod(*s) + type NoMethod InstancesSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11930,8 +12426,8 @@ type InstancesSetMachineResourcesRequest struct { } func (s *InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineResourcesRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineResourcesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11959,8 +12455,8 @@ type InstancesSetMachineTypeRequest struct { } func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineTypeRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineTypeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11988,8 +12484,8 @@ type InstancesSetMinCpuPlatformRequest struct { } func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMinCpuPlatformRequest - raw := noMethod(*s) + type NoMethod InstancesSetMinCpuPlatformRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12019,8 +12515,8 @@ type InstancesSetServiceAccountRequest struct { } func (s *InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetServiceAccountRequest - raw := noMethod(*s) + type NoMethod InstancesSetServiceAccountRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12053,32 +12549,28 @@ type InstancesStartWithEncryptionKeyRequest struct { } func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesStartWithEncryptionKeyRequest - raw := noMethod(*s) + type NoMethod InstancesStartWithEncryptionKeyRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Interconnect: Protocol definitions for Mixer API to support -// Interconnect. Next available tag: 23 +// Interconnect: Represents an Interconnects resource. The Interconnects +// resource is a dedicated connection between Google's network and your +// on-premises network. For more information, see the Dedicated +// overview page. (== resource_for v1.interconnects ==) (== resource_for +// beta.interconnects ==) type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is - // set to ?true?, the Interconnect is functional and may carry traffic - // (assuming there are functional InterconnectAttachments and other - // requirements are satisfied). When set to ?false?, no packets will be - // carried over this Interconnect and no BGP routes will be exchanged - // over it. By default, it is set to ?true?. + // set to true, the Interconnect is functional and can carry traffic. + // When set to false, no packets can be carried over the interconnect + // and no BGP routes are exchanged over it. By default, the status is + // set to true. AdminEnabled bool `json:"adminEnabled,omitempty"` // CircuitInfos: [Output Only] List of CircuitInfo objects, that // describe the individual circuits in this LAG. CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` - // ConnectionAuthorization: [Output Only] URL to retrieve the Letter Of - // Authority and Customer Facility Assignment (LOA-CFA) documentation - // relating to this Interconnect. This documentation authorizes the - // facility provider to connect to the specified crossconnect ports. - ConnectionAuthorization string `json:"connectionAuthorization,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -12112,7 +12604,11 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` + // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has + // been deprecated in favor of "DEDICATED" + // // Possible values: + // "DEDICATED" // "IT_PRIVATE" InterconnectType string `json:"interconnectType,omitempty"` @@ -12120,6 +12616,10 @@ type Interconnect struct { // for interconnects. Kind string `json:"kind,omitempty"` + // LinkType: Type of link requested. This field indicates speed of each + // of the links in the bundle, not the entire bundle. Only 10G per link + // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // // Possible values: // "LINK_TYPE_ETHERNET_10G_LR" LinkType string `json:"linkType,omitempty"` @@ -12191,13 +12691,15 @@ type Interconnect struct { } func (s *Interconnect) MarshalJSON() ([]byte, error) { - type noMethod Interconnect - raw := noMethod(*s) + type NoMethod Interconnect + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachment: Protocol definitions for Mixer API to support -// InterconnectAttachment. Next available tag: 18 +// InterconnectAttachment: Represents an InterconnectAttachment (VLAN +// attachment) resource. For more information, see Creating VLAN +// Attachments. (== resource_for beta.interconnectAttachments ==) (== +// resource_for v1.interconnectAttachments ==) type InterconnectAttachment struct { // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to // be configured on Cloud Router Interface for this interconnect @@ -12213,8 +12715,7 @@ type InterconnectAttachment struct { // interconnect attachment. CustomerRouterIpAddress string `json:"customerRouterIpAddress,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Description: An optional description of this resource. Description string `json:"description,omitempty"` // GoogleReferenceId: [Output Only] Google reference ID, to be used when @@ -12251,9 +12752,9 @@ type InterconnectAttachment struct { // "OS_UNPROVISIONED" OperationalStatus string `json:"operationalStatus,omitempty"` - // PrivateInterconnectInfo: [Output Only] Information specific to a - // Private InterconnectAttachment. Only populated if the interconnect - // that this is attached is of type IT_PRIVATE. + // PrivateInterconnectInfo: [Output Only] Information specific to an + // InterconnectAttachment. This property is populated if the + // interconnect that this is attached to is of type DEDICATED. PrivateInterconnectInfo *InterconnectAttachmentPrivateInfo `json:"privateInterconnectInfo,omitempty"` // Region: [Output Only] URL of the region where the regional @@ -12293,8 +12794,8 @@ type InterconnectAttachment struct { } func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachment - raw := noMethod(*s) + type NoMethod InterconnectAttachment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12347,8 +12848,8 @@ type InterconnectAttachmentAggregatedList struct { } func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12362,9 +12863,13 @@ type InterconnectAttachmentAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12375,7 +12880,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12406,8 +12913,8 @@ type InterconnectAttachmentAggregatedListWarning struct { } func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12443,8 +12950,8 @@ type InterconnectAttachmentAggregatedListWarningData struct { } func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentAggregatedListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12499,8 +13006,8 @@ type InterconnectAttachmentList struct { } func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12514,9 +13021,13 @@ type InterconnectAttachmentListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12527,7 +13038,9 @@ type InterconnectAttachmentListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12558,8 +13071,8 @@ type InterconnectAttachmentListWarning struct { } func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12595,14 +13108,13 @@ type InterconnectAttachmentListWarningData struct { } func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentPrivateInfo: Private information for an -// interconnect attachment when this belongs to an interconnect of type -// IT_PRIVATE. +// InterconnectAttachmentPrivateInfo: Information for an interconnect +// attachment when this belongs to an interconnect of type DEDICATED. type InterconnectAttachmentPrivateInfo struct { // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for // traffic between Google and the customer, going to and from this @@ -12627,8 +13139,8 @@ type InterconnectAttachmentPrivateInfo struct { } func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentPrivateInfo - raw := noMethod(*s) + type NoMethod InterconnectAttachmentPrivateInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12661,8 +13173,8 @@ type InterconnectAttachmentsScopedList struct { } func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedList - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12676,9 +13188,13 @@ type InterconnectAttachmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12689,7 +13205,9 @@ type InterconnectAttachmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12720,8 +13238,8 @@ type InterconnectAttachmentsScopedListWarning struct { } func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedListWarning - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12757,8 +13275,8 @@ type InterconnectAttachmentsScopedListWarningData struct { } func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectAttachmentsScopedListWarningData - raw := noMethod(*s) + type NoMethod InterconnectAttachmentsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12766,9 +13284,7 @@ func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, er // the Customer and Google. CircuitInfo objects are created by Google, // so all fields are output only. Next id: 4 type InterconnectCircuitInfo struct { - // CustomerDemarcId: Customer-side demarc ID for this circuit. This will - // only be set if it was provided by the Customer to Google during - // circuit turn-up. + // CustomerDemarcId: Customer-side demarc ID for this circuit. CustomerDemarcId string `json:"customerDemarcId,omitempty"` // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned @@ -12798,8 +13314,8 @@ type InterconnectCircuitInfo struct { } func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectCircuitInfo - raw := noMethod(*s) + type NoMethod InterconnectCircuitInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12853,8 +13369,8 @@ type InterconnectList struct { } func (s *InterconnectList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectList - raw := noMethod(*s) + type NoMethod InterconnectList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12867,9 +13383,13 @@ type InterconnectListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12880,7 +13400,9 @@ type InterconnectListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12911,8 +13433,8 @@ type InterconnectListWarning struct { } func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectListWarning - raw := noMethod(*s) + type NoMethod InterconnectListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12948,37 +13470,44 @@ type InterconnectListWarningData struct { } func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectListWarningData - raw := noMethod(*s) + type NoMethod InterconnectListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectLocation: Protocol definitions for Mixer API to support -// InterconnectLocation. +// InterconnectLocation: Represents an InterconnectLocations resource. +// The InterconnectLocations resource describes the locations where you +// can connect to Google's networks. For more information, see +// Colocation Facilities. type InterconnectLocation struct { // Address: [Output Only] The postal address of the Point of Presence, // each line in the address is separated by a newline character. Address string `json:"address,omitempty"` - // AvailabilityZone: Availability zone for this location. Within a city, - // maintenance will not be simultaneously scheduled in more than one - // availability zone. Example: "zone1" or "zone2". + // AvailabilityZone: [Output Only] Availability zone for this location. + // Within a metropolitan area (metro), maintenance will not be + // simultaneously scheduled in more than one availability zone. Example: + // "zone1" or "zone2". AvailabilityZone string `json:"availabilityZone,omitempty"` - // City: City designator used by the Interconnect UI to locate this - // InterconnectLocation within the Continent. For example: "Chicago, - // IL", "Amsterdam, Netherlands". + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: Continent for this location. Used by the location picker - // in the Interconnect UI. + // Continent: [Output Only] Continent for this location. // // Possible values: + // "AFRICA" + // "ASIA_PAC" // "C_AFRICA" // "C_ASIA_PAC" // "C_EUROPE" // "C_NORTH_AMERICA" // "C_SOUTH_AMERICA" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" Continent string `json:"continent,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -13041,8 +13570,8 @@ type InterconnectLocation struct { } func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocation - raw := noMethod(*s) + type NoMethod InterconnectLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13096,8 +13625,8 @@ type InterconnectLocationList struct { } func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationList - raw := noMethod(*s) + type NoMethod InterconnectLocationList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13111,9 +13640,13 @@ type InterconnectLocationListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13124,7 +13657,9 @@ type InterconnectLocationListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13155,8 +13690,8 @@ type InterconnectLocationListWarning struct { } func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationListWarning - raw := noMethod(*s) + type NoMethod InterconnectLocationListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13192,8 +13727,8 @@ type InterconnectLocationListWarningData struct { } func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationListWarningData - raw := noMethod(*s) + type NoMethod InterconnectLocationListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13208,6 +13743,8 @@ type InterconnectLocationRegionInfo struct { // LocationPresence: Identifies the network presence of this location. // // Possible values: + // "GLOBAL" + // "LOCAL_REGION" // "LP_GLOBAL" // "LP_LOCAL_REGION" LocationPresence string `json:"locationPresence,omitempty"` @@ -13215,9 +13752,6 @@ type InterconnectLocationRegionInfo struct { // Region: URL for the region of this location. Region string `json:"region,omitempty"` - // RegionKey: Scope key for the region of this location. - RegionKey string `json:"regionKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "ExpectedRttMs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -13236,8 +13770,8 @@ type InterconnectLocationRegionInfo struct { } func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { - type noMethod InterconnectLocationRegionInfo - raw := noMethod(*s) + type NoMethod InterconnectLocationRegionInfo + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13248,29 +13782,45 @@ type InterconnectOutageNotification struct { // Google-side circuit IDs that will be affected. AffectedCircuits []string `json:"affectedCircuits,omitempty"` - // Description: Short user-visible description of the purpose of the - // outage. + // Description: A description about the purpose of the outage. Description string `json:"description,omitempty"` + // EndTime: Scheduled end time for the outage (milliseconds since Unix + // epoch). EndTime int64 `json:"endTime,omitempty,string"` + // IssueType: Form this outage is expected to take. Note that the "IT_" + // versions of this enum have been deprecated in favor of the unprefixed + // values. + // // Possible values: // "IT_OUTAGE" // "IT_PARTIAL_OUTAGE" + // "OUTAGE" + // "PARTIAL_OUTAGE" IssueType string `json:"issueType,omitempty"` // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` + // Source: The party that generated this notification. Note that + // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // // Possible values: + // "GOOGLE" // "NSRC_GOOGLE" Source string `json:"source,omitempty"` - // StartTime: Scheduled start and end times for the outage (milliseconds - // since Unix epoch). + // StartTime: Scheduled start time for the outage (milliseconds since + // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` + // State: State of this notification. Note that the "NS_" versions of + // this enum have been deprecated in favor of the unprefixed values. + // // Possible values: + // "ACTIVE" + // "CANCELLED" // "NS_ACTIVE" // "NS_CANCELED" State string `json:"state,omitempty"` @@ -13294,8 +13844,8 @@ type InterconnectOutageNotification struct { } func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { - type noMethod InterconnectOutageNotification - raw := noMethod(*s) + type NoMethod InterconnectOutageNotification + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13305,17 +13855,40 @@ type License struct { // reflects whether a license charges a usage fee. ChargesUseFee bool `json:"chargesUseFee,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource; + // provided by the client when the resource is created. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + // Kind: [Output Only] Type of resource. Always compute#license for // licenses. Kind string `json:"kind,omitempty"` + // LicenseCode: [Output Only] The unique code used to attach this + // license to images, snapshots, and disks. + LicenseCode uint64 `json:"licenseCode,omitempty,string"` + // Name: [Output Only] Name of the resource. The name is 1-63 characters // long and complies with RFC1035. Name string `json:"name,omitempty"` + ResourceRequirements *LicenseResourceRequirements `json:"resourceRequirements,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // Transferable: If false, licenses will not be copied from the source + // resource when creating an image from a disk, disk from snapshot, or + // snapshot from disk. + Transferable bool `json:"transferable,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13338,8 +13911,293 @@ type License struct { } func (s *License) MarshalJSON() ([]byte, error) { - type noMethod License - raw := noMethod(*s) + type NoMethod License + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseCode struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#licenseCode for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseAlias: [Output Only] URL and description aliases of Licenses + // with the same License Code. + LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-20 characters + // long and must be a valid 64 bit integer. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // State: [Output Only] Current state of this License Code. + // + // Possible values: + // "DISABLED" + // "ENABLED" + // "RESTRICTED" + // "STATE_UNSPECIFIED" + // "TERMINATED" + State string `json:"state,omitempty"` + + // Transferable: [Output Only] If true, the license will remain attached + // when creating images or snapshots from disks. Otherwise, the license + // is not transferred. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCode) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseCodeLicenseAlias struct { + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // SelfLink: [Output Only] URL of license corresponding to this License + // Code. + SelfLink string `json:"selfLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCodeLicenseAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseResourceRequirements struct { + // MinGuestCpuCount: Minimum number of guest cpus required to use the + // Instance. Enforced at Instance creation and Instance start. + MinGuestCpuCount int64 `json:"minGuestCpuCount,omitempty"` + + // MinMemoryMb: Minimum memory required to use the Instance. Enforced at + // Instance creation and Instance start. + MinMemoryMb int64 `json:"minMemoryMb,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MinGuestCpuCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MinGuestCpuCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceRequirements + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicensesListResponse struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of License resources. + Items []*License `json:"items,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *LicensesListResponseWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicensesListResponse) MarshalJSON() ([]byte, error) { + type NoMethod LicensesListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LicensesListResponseWarning: [Output Only] Informational warning +// message. +type LicensesListResponseWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*LicensesListResponseWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicensesListResponseWarning) MarshalJSON() ([]byte, error) { + type NoMethod LicensesListResponseWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicensesListResponseWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { + type NoMethod LicensesListResponseWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13372,8 +14230,8 @@ type LogConfig struct { } func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig - raw := noMethod(*s) + type NoMethod LogConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13411,8 +14269,8 @@ type LogConfigCloudAuditOptions struct { } func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCloudAuditOptions - raw := noMethod(*s) + type NoMethod LogConfigCloudAuditOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13464,8 +14322,8 @@ type LogConfigCounterOptions struct { } func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCounterOptions - raw := noMethod(*s) + type NoMethod LogConfigCounterOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13498,12 +14356,13 @@ type LogConfigDataAccessOptions struct { } func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigDataAccessOptions - raw := noMethod(*s) + type NoMethod LogConfigDataAccessOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineType: A Machine Type resource. +// MachineType: A Machine Type resource. (== resource_for +// v1.machineTypes ==) (== resource_for beta.machineTypes ==) type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -13578,8 +14437,8 @@ type MachineType struct { } func (s *MachineType) MarshalJSON() ([]byte, error) { - type noMethod MachineType - raw := noMethod(*s) + type NoMethod MachineType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13632,8 +14491,8 @@ type MachineTypeAggregatedList struct { } func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedList - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13647,9 +14506,13 @@ type MachineTypeAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13660,7 +14523,9 @@ type MachineTypeAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13691,8 +14556,8 @@ type MachineTypeAggregatedListWarning struct { } func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedListWarning - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13728,8 +14593,8 @@ type MachineTypeAggregatedListWarningData struct { } func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedListWarningData - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13782,8 +14647,8 @@ type MachineTypeList struct { } func (s *MachineTypeList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeList - raw := noMethod(*s) + type NoMethod MachineTypeList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13796,9 +14661,13 @@ type MachineTypeListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13809,7 +14678,9 @@ type MachineTypeListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13840,8 +14711,8 @@ type MachineTypeListWarning struct { } func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeListWarning - raw := noMethod(*s) + type NoMethod MachineTypeListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13877,8 +14748,8 @@ type MachineTypeListWarningData struct { } func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeListWarningData - raw := noMethod(*s) + type NoMethod MachineTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13909,8 +14780,8 @@ type MachineTypesScopedList struct { } func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedList - raw := noMethod(*s) + type NoMethod MachineTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13924,9 +14795,13 @@ type MachineTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13937,7 +14812,9 @@ type MachineTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13968,8 +14845,8 @@ type MachineTypesScopedListWarning struct { } func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarning - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14005,8 +14882,8 @@ type MachineTypesScopedListWarningData struct { } func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14095,8 +14972,8 @@ type ManagedInstance struct { } func (s *ManagedInstance) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstance - raw := noMethod(*s) + type NoMethod ManagedInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14123,8 +15000,8 @@ type ManagedInstanceLastAttempt struct { } func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttempt - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttempt + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14153,8 +15030,8 @@ type ManagedInstanceLastAttemptErrors struct { } func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14187,8 +15064,8 @@ type ManagedInstanceLastAttemptErrorsErrors struct { } func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrorsErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrorsErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14220,8 +15097,8 @@ type ManagedInstanceVersion struct { } func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceVersion - raw := noMethod(*s) + type NoMethod ManagedInstanceVersion + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14261,8 +15138,8 @@ type Metadata struct { } func (s *Metadata) MarshalJSON() ([]byte, error) { - type noMethod Metadata - raw := noMethod(*s) + type NoMethod Metadata + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14298,8 +15175,8 @@ type MetadataItems struct { } func (s *MetadataItems) MarshalJSON() ([]byte, error) { - type noMethod MetadataItems - raw := noMethod(*s) + type NoMethod MetadataItems + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14330,13 +15207,14 @@ type NamedPort struct { } func (s *NamedPort) MarshalJSON() ([]byte, error) { - type noMethod NamedPort - raw := noMethod(*s) + type NoMethod NamedPort + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Network: Represents a Network resource. Read Networks and Firewalls -// for more information. +// for more information. (== resource_for v1.networks ==) (== +// resource_for beta.networks ==) type Network struct { // IPv4Range: The range of internal addresses that are legal on this // network. This range is a CIDR specification, for example: @@ -14419,8 +15297,8 @@ type Network struct { } func (s *Network) MarshalJSON() ([]byte, error) { - type noMethod Network - raw := noMethod(*s) + type NoMethod Network + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14438,6 +15316,12 @@ type NetworkInterface struct { // subnet-mode networks. AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` + // Fingerprint: Fingerprint hash of contents stored in this network + // interface. This field will be ignored when inserting an Instance or + // adding a NetworkInterface. An up-to-date fingerprint must be provided + // in order to update the NetworkInterface. + Fingerprint string `json:"fingerprint,omitempty"` + // Kind: [Output Only] Type of the resource. Always // compute#networkInterface for network interfaces. Kind string `json:"kind,omitempty"` @@ -14499,8 +15383,8 @@ type NetworkInterface struct { } func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) + type NoMethod NetworkInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14553,8 +15437,8 @@ type NetworkList struct { } func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList - raw := noMethod(*s) + type NoMethod NetworkList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14567,9 +15451,13 @@ type NetworkListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14580,7 +15468,9 @@ type NetworkListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14611,8 +15501,8 @@ type NetworkListWarning struct { } func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { - type noMethod NetworkListWarning - raw := noMethod(*s) + type NoMethod NetworkListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14648,8 +15538,8 @@ type NetworkListWarningData struct { } func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { - type noMethod NetworkListWarningData - raw := noMethod(*s) + type NoMethod NetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14710,8 +15600,8 @@ type NetworkPeering struct { } func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering - raw := noMethod(*s) + type NoMethod NetworkPeering + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14749,8 +15639,8 @@ type NetworkRoutingConfig struct { } func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { - type noMethod NetworkRoutingConfig - raw := noMethod(*s) + type NoMethod NetworkRoutingConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14787,8 +15677,8 @@ type NetworksAddPeeringRequest struct { } func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest - raw := noMethod(*s) + type NoMethod NetworksAddPeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14814,13 +15704,16 @@ type NetworksRemovePeeringRequest struct { } func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest - raw := noMethod(*s) + type NoMethod NetworksRemovePeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: An Operation resource, used to manage asynchronous API -// requests. +// requests. (== resource_for v1.globalOperations ==) (== resource_for +// beta.globalOperations ==) (== resource_for v1.regionOperations ==) +// (== resource_for beta.regionOperations ==) (== resource_for +// v1.zoneOperations ==) (== resource_for beta.zoneOperations ==) type Operation struct { // ClientOperationId: [Output Only] Reserved for future use. ClientOperationId string `json:"clientOperationId,omitempty"` @@ -14943,8 +15836,8 @@ type Operation struct { } func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation - raw := noMethod(*s) + type NoMethod Operation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14973,8 +15866,8 @@ type OperationError struct { } func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError - raw := noMethod(*s) + type NoMethod OperationError + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15007,8 +15900,8 @@ type OperationErrorErrors struct { } func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors - raw := noMethod(*s) + type NoMethod OperationErrorErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15020,9 +15913,13 @@ type OperationWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15033,7 +15930,9 @@ type OperationWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15064,8 +15963,8 @@ type OperationWarnings struct { } func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings - raw := noMethod(*s) + type NoMethod OperationWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15101,8 +16000,8 @@ type OperationWarningsData struct { } func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData - raw := noMethod(*s) + type NoMethod OperationWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15154,8 +16053,8 @@ type OperationAggregatedList struct { } func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList - raw := noMethod(*s) + type NoMethod OperationAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15169,9 +16068,13 @@ type OperationAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15182,7 +16085,9 @@ type OperationAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15213,8 +16118,8 @@ type OperationAggregatedListWarning struct { } func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedListWarning - raw := noMethod(*s) + type NoMethod OperationAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15250,8 +16155,8 @@ type OperationAggregatedListWarningData struct { } func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedListWarningData - raw := noMethod(*s) + type NoMethod OperationAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15304,8 +16209,8 @@ type OperationList struct { } func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) + type NoMethod OperationList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15318,9 +16223,13 @@ type OperationListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15331,7 +16240,9 @@ type OperationListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15362,8 +16273,8 @@ type OperationListWarning struct { } func (s *OperationListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationListWarning - raw := noMethod(*s) + type NoMethod OperationListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15399,8 +16310,8 @@ type OperationListWarningData struct { } func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationListWarningData - raw := noMethod(*s) + type NoMethod OperationListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15430,8 +16341,8 @@ type OperationsScopedList struct { } func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList - raw := noMethod(*s) + type NoMethod OperationsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15445,9 +16356,13 @@ type OperationsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -15458,7 +16373,9 @@ type OperationsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -15489,8 +16406,8 @@ type OperationsScopedListWarning struct { } func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning - raw := noMethod(*s) + type NoMethod OperationsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15526,8 +16443,8 @@ type OperationsScopedListWarningData struct { } func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData - raw := noMethod(*s) + type NoMethod OperationsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15575,8 +16492,8 @@ type PathMatcher struct { } func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher - raw := noMethod(*s) + type NoMethod PathMatcher + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15611,8 +16528,8 @@ type PathRule struct { } func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule - raw := noMethod(*s) + type NoMethod PathRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15697,14 +16614,15 @@ type Policy struct { } func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) + type NoMethod Policy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A Project resource. Projects can only be created in the // Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. +// only be modified in the console. (== resource_for v1.projects ==) (== +// resource_for beta.projects ==) type Project struct { // CommonInstanceMetadata: Metadata key/value pairs available to all // instances contained in this project. See Custom metadata for more @@ -15780,8 +16698,8 @@ type Project struct { } func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) + type NoMethod Project + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15807,8 +16725,8 @@ type ProjectsDisableXpnResourceRequest struct { } func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsDisableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15834,8 +16752,8 @@ type ProjectsEnableXpnResourceRequest struct { } func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsEnableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15879,8 +16797,8 @@ type ProjectsGetXpnResources struct { } func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources - raw := noMethod(*s) + type NoMethod ProjectsGetXpnResources + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15908,8 +16826,8 @@ type ProjectsListXpnHostsRequest struct { } func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest - raw := noMethod(*s) + type NoMethod ProjectsListXpnHostsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15937,6 +16855,7 @@ type Quota struct { // "INSTANCE_GROUP_MANAGERS" // "INSTANCE_TEMPLATES" // "INTERCONNECTS" + // "INTERNAL_ADDRESSES" // "IN_USE_ADDRESSES" // "LOCAL_SSD_TOTAL_GB" // "NETWORKS" @@ -15944,6 +16863,8 @@ type Quota struct { // "NVIDIA_P100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "REGIONAL_AUTOSCALERS" // "REGIONAL_INSTANCE_GROUP_MANAGERS" // "ROUTERS" @@ -15987,19 +16908,19 @@ type Quota struct { } func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) + type NoMethod Quota + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota + type NoMethod Quota var s1 struct { Limit gensupport.JSONFloat64 `json:"limit"` Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -16043,12 +16964,13 @@ type Reference struct { } func (s *Reference) MarshalJSON() ([]byte, error) { - type noMethod Reference - raw := noMethod(*s) + type NoMethod Reference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Region: Region resource. +// Region: Region resource. (== resource_for beta.regions ==) (== +// resource_for v1.regions ==) type Region struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -16112,8 +17034,8 @@ type Region struct { } func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region - raw := noMethod(*s) + type NoMethod Region + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16165,8 +17087,8 @@ type RegionAutoscalerList struct { } func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList - raw := noMethod(*s) + type NoMethod RegionAutoscalerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16180,9 +17102,13 @@ type RegionAutoscalerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16193,7 +17119,9 @@ type RegionAutoscalerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16224,8 +17152,8 @@ type RegionAutoscalerListWarning struct { } func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerListWarning - raw := noMethod(*s) + type NoMethod RegionAutoscalerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16261,8 +17189,8 @@ type RegionAutoscalerListWarningData struct { } func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerListWarningData - raw := noMethod(*s) + type NoMethod RegionAutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16314,8 +17242,8 @@ type RegionInstanceGroupList struct { } func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16329,9 +17257,13 @@ type RegionInstanceGroupListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16342,7 +17274,9 @@ type RegionInstanceGroupListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16373,8 +17307,8 @@ type RegionInstanceGroupListWarning struct { } func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupListWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16410,8 +17344,8 @@ type RegionInstanceGroupListWarningData struct { } func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupListWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16466,8 +17400,8 @@ type RegionInstanceGroupManagerList struct { } func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16481,9 +17415,13 @@ type RegionInstanceGroupManagerListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16494,7 +17432,9 @@ type RegionInstanceGroupManagerListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16525,8 +17465,8 @@ type RegionInstanceGroupManagerListWarning struct { } func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerListWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16562,8 +17502,8 @@ type RegionInstanceGroupManagerListWarningData struct { } func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerListWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16591,8 +17531,8 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { } func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16620,8 +17560,8 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { } func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16660,8 +17600,8 @@ type RegionInstanceGroupManagersListInstancesResponse struct { } func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16689,8 +17629,8 @@ type RegionInstanceGroupManagersRecreateRequest struct { } func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersRecreateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16716,8 +17656,8 @@ type RegionInstanceGroupManagersSetAutoHealingRequest struct { } func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetAutoHealingRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16750,8 +17690,8 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { } func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16779,8 +17719,8 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { } func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16831,8 +17771,8 @@ type RegionInstanceGroupsListInstances struct { } func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstances + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16846,9 +17786,13 @@ type RegionInstanceGroupsListInstancesWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -16859,7 +17803,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -16890,8 +17836,8 @@ type RegionInstanceGroupsListInstancesWarning struct { } func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesWarning - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16927,8 +17873,8 @@ type RegionInstanceGroupsListInstancesWarningData struct { } func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesWarningData - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -16965,8 +17911,8 @@ type RegionInstanceGroupsListInstancesRequest struct { } func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17000,8 +17946,8 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { } func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17054,8 +18000,8 @@ type RegionList struct { } func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList - raw := noMethod(*s) + type NoMethod RegionList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17068,9 +18014,13 @@ type RegionListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17081,7 +18031,9 @@ type RegionListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17112,8 +18064,8 @@ type RegionListWarning struct { } func (s *RegionListWarning) MarshalJSON() ([]byte, error) { - type noMethod RegionListWarning - raw := noMethod(*s) + type NoMethod RegionListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17149,8 +18101,8 @@ type RegionListWarningData struct { } func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RegionListWarningData - raw := noMethod(*s) + type NoMethod RegionListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17185,8 +18137,8 @@ type RegionSetLabelsRequest struct { } func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionSetLabelsRequest - raw := noMethod(*s) + type NoMethod RegionSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17226,8 +18178,8 @@ type ResourceCommitment struct { } func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment - raw := noMethod(*s) + type NoMethod ResourceCommitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17254,8 +18206,8 @@ type ResourceGroupReference struct { } func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference - raw := noMethod(*s) + type NoMethod ResourceGroupReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17276,7 +18228,8 @@ func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { // Compute Engine-operated gateway. // // Packets that do not match any route in the sending instance's routing -// table are dropped. +// table are dropped. (== resource_for beta.routes ==) (== resource_for +// v1.routes ==) type Route struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -17381,8 +18334,8 @@ type Route struct { } func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route - raw := noMethod(*s) + type NoMethod Route + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17394,9 +18347,13 @@ type RouteWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17407,7 +18364,9 @@ type RouteWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17438,8 +18397,8 @@ type RouteWarnings struct { } func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings - raw := noMethod(*s) + type NoMethod RouteWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17475,8 +18434,8 @@ type RouteWarningsData struct { } func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData - raw := noMethod(*s) + type NoMethod RouteWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17528,8 +18487,8 @@ type RouteList struct { } func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList - raw := noMethod(*s) + type NoMethod RouteList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17542,9 +18501,13 @@ type RouteListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17555,7 +18518,9 @@ type RouteListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17586,8 +18551,8 @@ type RouteListWarning struct { } func (s *RouteListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouteListWarning - raw := noMethod(*s) + type NoMethod RouteListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17623,8 +18588,8 @@ type RouteListWarningData struct { } func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouteListWarningData - raw := noMethod(*s) + type NoMethod RouteListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17699,8 +18664,41 @@ type Router struct { } func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router - raw := noMethod(*s) + type NoMethod Router + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouterAdvertisedIpRange: Description-tagged IP ranges for the router +// to advertise. +type RouterAdvertisedIpRange struct { + // Description: User-specified description for the IP range. + Description string `json:"description,omitempty"` + + // Range: The IP range to advertise. The value must be a CIDR-formatted + // string. + Range string `json:"range,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterAdvertisedIpRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17752,8 +18750,8 @@ type RouterAggregatedList struct { } func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList - raw := noMethod(*s) + type NoMethod RouterAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17767,9 +18765,13 @@ type RouterAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -17780,7 +18782,9 @@ type RouterAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -17811,8 +18815,8 @@ type RouterAggregatedListWarning struct { } func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedListWarning - raw := noMethod(*s) + type NoMethod RouterAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17848,19 +18852,45 @@ type RouterAggregatedListWarningData struct { } func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedListWarningData - raw := noMethod(*s) + type NoMethod RouterAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterBgp struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` + + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and is advertised to all peers of the router. These groups + // will be advertised in addition to any specified prefixes. Leave this + // field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These IP ranges will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom IP + // ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 // private ASN, either 16-bit or 32-bit. The value will be fixed for // this router resource. All VPN tunnels that link to this router will // have the same local ASN. Asn int64 `json:"asn,omitempty"` - // ForceSendFields is a list of field names (e.g. "Asn") to + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17868,22 +18898,49 @@ type RouterBgp struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Asn") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp - raw := noMethod(*s) + type NoMethod RouterBgp + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type RouterBgpPeer struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` + + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and overrides the list defined for the router (in Bgp + // message). These groups will be advertised in addition to any + // specified prefixes. Leave this field blank to advertise no custom + // groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in Bgp message). These IP ranges will be advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom IP ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // AdvertisedRoutePriority: The priority of routes advertised to this // BGP peer. In the case where there is more than one matching route of // maximum length, the routes with lowest priority value win. @@ -17908,28 +18965,26 @@ type RouterBgpPeer struct { // Only IPv4 is supported. PeerIpAddress string `json:"peerIpAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "AdvertisedRoutePriority") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutePriority") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer - raw := noMethod(*s) + type NoMethod RouterBgpPeer + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -17975,8 +19030,8 @@ type RouterInterface struct { } func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface - raw := noMethod(*s) + type NoMethod RouterInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18029,8 +19084,8 @@ type RouterList struct { } func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList - raw := noMethod(*s) + type NoMethod RouterList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18043,9 +19098,13 @@ type RouterListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18056,7 +19115,9 @@ type RouterListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18087,8 +19148,8 @@ type RouterListWarning struct { } func (s *RouterListWarning) MarshalJSON() ([]byte, error) { - type noMethod RouterListWarning - raw := noMethod(*s) + type NoMethod RouterListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18124,8 +19185,8 @@ type RouterListWarningData struct { } func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RouterListWarningData - raw := noMethod(*s) + type NoMethod RouterListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18159,8 +19220,8 @@ type RouterStatus struct { } func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus - raw := noMethod(*s) + type NoMethod RouterStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18220,8 +19281,8 @@ type RouterStatusBgpPeerStatus struct { } func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus - raw := noMethod(*s) + type NoMethod RouterStatusBgpPeerStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18253,8 +19314,8 @@ type RouterStatusResponse struct { } func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse - raw := noMethod(*s) + type NoMethod RouterStatusResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18284,8 +19345,8 @@ type RoutersPreviewResponse struct { } func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse - raw := noMethod(*s) + type NoMethod RoutersPreviewResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18315,8 +19376,8 @@ type RoutersScopedList struct { } func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList - raw := noMethod(*s) + type NoMethod RoutersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18330,9 +19391,13 @@ type RoutersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18343,7 +19408,9 @@ type RoutersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18374,8 +19441,8 @@ type RoutersScopedListWarning struct { } func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning - raw := noMethod(*s) + type NoMethod RoutersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18411,8 +19478,8 @@ type RoutersScopedListWarningData struct { } func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData - raw := noMethod(*s) + type NoMethod RoutersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18429,7 +19496,8 @@ type Rule struct { // "NO_ACTION" Action string `json:"action,omitempty"` - // Conditions: Additional restrictions that must be met + // Conditions: Additional restrictions that must be met. All conditions + // must pass for the rule to match. Conditions []*Condition `json:"conditions,omitempty"` // Description: Human-readable description of the rule. @@ -18471,8 +19539,8 @@ type Rule struct { } func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule - raw := noMethod(*s) + type NoMethod Rule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18523,8 +19591,8 @@ type SSLHealthCheck struct { } func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck - raw := noMethod(*s) + type NoMethod SSLHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18575,8 +19643,8 @@ type Scheduling struct { } func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling - raw := noMethod(*s) + type NoMethod Scheduling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18651,8 +19719,8 @@ type SecurityPolicy struct { } func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicy - raw := noMethod(*s) + type NoMethod SecurityPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18701,8 +19769,8 @@ type SecurityPolicyList struct { } func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyList - raw := noMethod(*s) + type NoMethod SecurityPolicyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18716,9 +19784,13 @@ type SecurityPolicyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -18729,7 +19801,9 @@ type SecurityPolicyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -18760,8 +19834,8 @@ type SecurityPolicyListWarning struct { } func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyListWarning - raw := noMethod(*s) + type NoMethod SecurityPolicyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18797,8 +19871,8 @@ type SecurityPolicyListWarningData struct { } func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyListWarningData - raw := noMethod(*s) + type NoMethod SecurityPolicyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18824,8 +19898,8 @@ type SecurityPolicyReference struct { } func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyReference - raw := noMethod(*s) + type NoMethod SecurityPolicyReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18858,6 +19932,10 @@ type SecurityPolicyRule struct { // are evaluated in the increasing order of priority. Priority int64 `json:"priority,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -18876,14 +19954,56 @@ type SecurityPolicyRule struct { } func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRule - raw := noMethod(*s) + type NoMethod SecurityPolicyRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SecurityPolicyRuleMatcher: Represents a match condition that incoming // traffic is evaluated against. Exactly one field must be specified. type SecurityPolicyRuleMatcher struct { + // Config: The configuration options available when specifying + // versioned_expr. This field must be specified if versioned_expr is + // specified and cannot be specified if versioned_expr is not specified. + Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` + + // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must + // specify the corresponding src_ip_range field in config. + // + // Possible values: + // "SRC_IPS_V1" + // "VERSIONED_EXPR_UNSPECIFIED" + VersionedExpr string `json:"versionedExpr,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Config") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcher + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleMatcherConfig struct { // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. SrcIpRanges []string `json:"srcIpRanges,omitempty"` @@ -18904,9 +20024,9 @@ type SecurityPolicyRuleMatcher struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRuleMatcher - raw := noMethod(*s) +func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18956,8 +20076,8 @@ type SerialPortOutput struct { } func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput - raw := noMethod(*s) + type NoMethod SerialPortOutput + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -18988,12 +20108,51 @@ type ServiceAccount struct { } func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount - raw := noMethod(*s) + type NoMethod ServiceAccount + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. +// SignedUrlKey: Represents a customer-supplied Signing Key used by +// Cloud CDN Signed URLs +type SignedUrlKey struct { + // KeyName: Name of the key. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + KeyName string `json:"keyName,omitempty"` + + // KeyValue: 128-bit key value used for signing the URL. The key value + // must be a valid RFC 4648 Section 5 base64url encoded string. + KeyValue string `json:"keyValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KeyName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { + type NoMethod SignedUrlKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Snapshot: A persistent disk snapshot resource. (== resource_for +// beta.snapshots ==) (== resource_for v1.snapshots ==) type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -19029,6 +20188,10 @@ type Snapshot struct { // by the setLabels method. Label values may be empty. Labels map[string]string `json:"labels,omitempty"` + // LicenseCodes: Integer license codes indicating which licenses are + // attached to this snapshot. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + // Licenses: [Output Only] A list of public visible licenses that apply // to this snapshot. This can be because the original image had licenses // attached (such as a Windows image). @@ -19088,7 +20251,7 @@ type Snapshot struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageBytes: [Output Only] A size of the the storage used by the + // StorageBytes: [Output Only] A size of the storage used by the // snapshot. As snapshots share storage, this number is expected to // change with snapshot creation/deletion. StorageBytes int64 `json:"storageBytes,omitempty,string"` @@ -19127,8 +20290,8 @@ type Snapshot struct { } func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot - raw := noMethod(*s) + type NoMethod Snapshot + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19180,8 +20343,8 @@ type SnapshotList struct { } func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList - raw := noMethod(*s) + type NoMethod SnapshotList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19194,9 +20357,13 @@ type SnapshotListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19207,7 +20374,9 @@ type SnapshotListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19238,8 +20407,8 @@ type SnapshotListWarning struct { } func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { - type noMethod SnapshotListWarning - raw := noMethod(*s) + type NoMethod SnapshotListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19275,14 +20444,48 @@ type SnapshotListWarningData struct { } func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SnapshotListWarningData - raw := noMethod(*s) + type NoMethod SnapshotListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SourceInstanceParams: A specification of the parameters to use when +// creating the instance template from a source instance. +type SourceInstanceParams struct { + // DiskConfigs: Attached disks configuration. If not provided, defaults + // are applied: For boot disk and any other R/W disks, new custom images + // will be created from each disk. For read-only disks, they will be + // attached in read-only mode. Local SSD disks will be created as blank + // volumes. + DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceParams + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SslCertificate: An SslCertificate resource. This resource provides a // mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. +// to serve secure connections from the user. (== resource_for +// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) type SslCertificate struct { // Certificate: A local certificate file. The certificate must be in PEM // format. The certificate chain must be no greater than 5 certs long. @@ -19343,8 +20546,8 @@ type SslCertificate struct { } func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate - raw := noMethod(*s) + type NoMethod SslCertificate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19396,8 +20599,8 @@ type SslCertificateList struct { } func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList - raw := noMethod(*s) + type NoMethod SslCertificateList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19411,9 +20614,13 @@ type SslCertificateListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19424,7 +20631,9 @@ type SslCertificateListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19455,8 +20664,8 @@ type SslCertificateListWarning struct { } func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateListWarning - raw := noMethod(*s) + type NoMethod SslCertificateListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19492,13 +20701,451 @@ type SslCertificateListWarningData struct { } func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateListWarningData - raw := noMethod(*s) + type NoMethod SslCertificateListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. +type SslPoliciesList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SslPoliciesListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslPoliciesListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Features") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPolicy: A SSL policy specifies the server-side support for SSL +// features. This can be attached to a TargetHttpsProxy or a +// TargetSslProxy. This affects connections between clients and the +// HTTPS or SSL proxy load balancer. They do not affect the connection +// between the load balancers and the backends. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomFeatures: List of features enabled when the selected profile is + // CUSTOM. The + // - method returns the set of features that can be specified in this + // list. This field must be empty if the profile is not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. + Kind string `json:"kind,omitempty"` + + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3. + // + // Possible values: + // "TLS_1_0" + // "TLS_1_1" + // "TLS_1_2" + // "TLS_1_3" + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" + // "CUSTOM" + // "MODERN" + // "RESTRICTED" + Profile string `json:"profile,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslPolicyWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarnings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarningsData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyReference struct { + // SslPolicy: URL of the SSL policy resource. Set this to empty string + // to clear any existing SSL policy associated with the target proxy + // resource. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks +// ==) (== resource_for v1.subnetworks ==) type Subnetwork struct { + // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict + // with static routes. Setting this to true allows this subnetwork's + // primary and secondary ranges to conflict with routes that have + // already been configured on the corresponding network. Static routes + // will take precedence over the subnetwork route if the route prefix + // length is at least as large as the subnetwork prefix length. + // + // Also, packets destined to IPs within subnetwork may contain + // private/sensitive data and are prevented from leaving the virtual + // network. Setting this field to true will disable this feature. + // + // The default value is false and applies to all existing subnetworks + // and automatically created subnetworks. + // + // This field cannot be set to true at resource creation time. + AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -19508,6 +21155,12 @@ type Subnetwork struct { // resource creation time. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a Subnetwork. An up-to-date + // fingerprint must be provided in order to update the Subnetwork. + Fingerprint string `json:"fingerprint,omitempty"` + // GatewayAddress: [Output Only] The gateway address for default routes // to reach destination addresses outside this subnetwork. This field // can be set only at resource creation time. @@ -19566,27 +21219,28 @@ type Subnetwork struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork - raw := noMethod(*s) + type NoMethod Subnetwork + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19638,8 +21292,8 @@ type SubnetworkAggregatedList struct { } func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19653,9 +21307,13 @@ type SubnetworkAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19666,7 +21324,9 @@ type SubnetworkAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19697,8 +21357,8 @@ type SubnetworkAggregatedListWarning struct { } func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedListWarning - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19734,8 +21394,8 @@ type SubnetworkAggregatedListWarningData struct { } func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedListWarningData - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19788,8 +21448,8 @@ type SubnetworkList struct { } func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList - raw := noMethod(*s) + type NoMethod SubnetworkList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19802,9 +21462,13 @@ type SubnetworkListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -19815,7 +21479,9 @@ type SubnetworkListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -19846,8 +21512,8 @@ type SubnetworkListWarning struct { } func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkListWarning - raw := noMethod(*s) + type NoMethod SubnetworkListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19883,8 +21549,8 @@ type SubnetworkListWarningData struct { } func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkListWarningData - raw := noMethod(*s) + type NoMethod SubnetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19922,8 +21588,8 @@ type SubnetworkSecondaryRange struct { } func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange - raw := noMethod(*s) + type NoMethod SubnetworkSecondaryRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19953,8 +21619,8 @@ type SubnetworksExpandIpCidrRangeRequest struct { } func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest - raw := noMethod(*s) + type NoMethod SubnetworksExpandIpCidrRangeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19984,8 +21650,8 @@ type SubnetworksScopedList struct { } func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList - raw := noMethod(*s) + type NoMethod SubnetworksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -19999,9 +21665,13 @@ type SubnetworksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20012,7 +21682,9 @@ type SubnetworksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20043,8 +21715,8 @@ type SubnetworksScopedListWarning struct { } func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20080,8 +21752,8 @@ type SubnetworksScopedListWarningData struct { } func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20108,8 +21780,8 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { } func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest - raw := noMethod(*s) + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20160,8 +21832,8 @@ type TCPHealthCheck struct { } func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck - raw := noMethod(*s) + type NoMethod TCPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20199,13 +21871,14 @@ type Tags struct { } func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags - raw := noMethod(*s) + type NoMethod Tags + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. +// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== +// resource_for v1.targetHttpProxies ==) type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -20262,8 +21935,8 @@ type TargetHttpProxy struct { } func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy - raw := noMethod(*s) + type NoMethod TargetHttpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20316,8 +21989,8 @@ type TargetHttpProxyList struct { } func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList - raw := noMethod(*s) + type NoMethod TargetHttpProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20331,9 +22004,13 @@ type TargetHttpProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20344,7 +22021,9 @@ type TargetHttpProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20375,8 +22054,8 @@ type TargetHttpProxyListWarning struct { } func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyListWarning - raw := noMethod(*s) + type NoMethod TargetHttpProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20412,8 +22091,40 @@ type TargetHttpProxyListWarningData struct { } func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetHttpProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxiesSetQuicOverrideRequest struct { + // QuicOverride: QUIC policy for the TargetHttpsProxy resource. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuicOverride") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuicOverride") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetQuicOverrideRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20442,13 +22153,14 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { } func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. +// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== +// resource_for v1.targetHttpsProxies ==) type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -20475,6 +22187,21 @@ type TargetHttpsProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // QuicOverride: Specifies the QUIC override policy for this + // TargetHttpsProxy resource. This determines whether the load balancer + // will attempt to negotiate QUIC with clients or not. Can specify one + // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, + // Enables QUIC when set to ENABLE, and disables QUIC when set to + // DISABLE. If NONE is specified, uses the QUIC policy with no user + // overrides, which is equivalent to DISABLE. Not specifying this field + // is equivalent to specifying NONE. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -20483,6 +22210,11 @@ type TargetHttpsProxy struct { // Currently, exactly one SSL certificate must be specified. SslCertificates []string `json:"sslCertificates,omitempty"` + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource + // will not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource // that defines the mapping from URL to the BackendService. For example, // the following are all valid URLs for specifying a URL map: @@ -20515,8 +22247,8 @@ type TargetHttpsProxy struct { } func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy - raw := noMethod(*s) + type NoMethod TargetHttpsProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20569,8 +22301,8 @@ type TargetHttpsProxyList struct { } func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList - raw := noMethod(*s) + type NoMethod TargetHttpsProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20584,9 +22316,13 @@ type TargetHttpsProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20597,7 +22333,9 @@ type TargetHttpsProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20628,8 +22366,8 @@ type TargetHttpsProxyListWarning struct { } func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyListWarning - raw := noMethod(*s) + type NoMethod TargetHttpsProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20665,13 +22403,15 @@ type TargetHttpsProxyListWarningData struct { } func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetHttpsProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. +// endpoint instance that terminates traffic of certain protocols. (== +// resource_for beta.targetInstances ==) (== resource_for +// v1.targetInstances ==) type TargetInstance struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -20745,8 +22485,8 @@ type TargetInstance struct { } func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance - raw := noMethod(*s) + type NoMethod TargetInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20797,8 +22537,8 @@ type TargetInstanceAggregatedList struct { } func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20812,9 +22552,13 @@ type TargetInstanceAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20825,7 +22569,9 @@ type TargetInstanceAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -20856,8 +22602,8 @@ type TargetInstanceAggregatedListWarning struct { } func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20893,8 +22639,8 @@ type TargetInstanceAggregatedListWarningData struct { } func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20946,8 +22692,8 @@ type TargetInstanceList struct { } func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) + type NoMethod TargetInstanceList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -20961,9 +22707,13 @@ type TargetInstanceListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -20974,7 +22724,9 @@ type TargetInstanceListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21005,8 +22757,8 @@ type TargetInstanceListWarning struct { } func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceListWarning - raw := noMethod(*s) + type NoMethod TargetInstanceListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21042,8 +22794,8 @@ type TargetInstanceListWarningData struct { } func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceListWarningData - raw := noMethod(*s) + type NoMethod TargetInstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21074,8 +22826,8 @@ type TargetInstancesScopedList struct { } func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList - raw := noMethod(*s) + type NoMethod TargetInstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21089,9 +22841,13 @@ type TargetInstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21102,7 +22858,9 @@ type TargetInstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21133,8 +22891,8 @@ type TargetInstancesScopedListWarning struct { } func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21170,14 +22928,15 @@ type TargetInstancesScopedListWarningData struct { } func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPool: A TargetPool resource. This resource defines a pool of // instances, an associated HttpHealthCheck resource, and the fallback -// target pool. +// target pool. (== resource_for beta.targetPools ==) (== resource_for +// v1.targetPools ==) type TargetPool struct { // BackupPool: This field is applicable only when the containing target // pool is serving a forwarding rule as the primary pool, and its @@ -21299,18 +23058,18 @@ type TargetPool struct { } func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool - raw := noMethod(*s) + type NoMethod TargetPool + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool + type NoMethod TargetPool var s1 struct { FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -21367,8 +23126,8 @@ type TargetPoolAggregatedList struct { } func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21382,9 +23141,13 @@ type TargetPoolAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21395,7 +23158,9 @@ type TargetPoolAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21426,8 +23191,8 @@ type TargetPoolAggregatedListWarning struct { } func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21463,8 +23228,8 @@ type TargetPoolAggregatedListWarningData struct { } func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21498,8 +23263,8 @@ type TargetPoolInstanceHealth struct { } func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth - raw := noMethod(*s) + type NoMethod TargetPoolInstanceHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21552,8 +23317,8 @@ type TargetPoolList struct { } func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList - raw := noMethod(*s) + type NoMethod TargetPoolList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21566,9 +23331,13 @@ type TargetPoolListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21579,7 +23348,9 @@ type TargetPoolListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21610,8 +23381,8 @@ type TargetPoolListWarning struct { } func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolListWarning - raw := noMethod(*s) + type NoMethod TargetPoolListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21647,8 +23418,8 @@ type TargetPoolListWarningData struct { } func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21674,8 +23445,8 @@ type TargetPoolsAddHealthCheckRequest struct { } func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21707,8 +23478,8 @@ type TargetPoolsAddInstanceRequest struct { } func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21739,8 +23510,8 @@ type TargetPoolsRemoveHealthCheckRequest struct { } func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21766,8 +23537,8 @@ type TargetPoolsRemoveInstanceRequest struct { } func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21797,8 +23568,8 @@ type TargetPoolsScopedList struct { } func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList - raw := noMethod(*s) + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21812,9 +23583,13 @@ type TargetPoolsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -21825,7 +23600,9 @@ type TargetPoolsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -21856,8 +23633,8 @@ type TargetPoolsScopedListWarning struct { } func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21893,8 +23670,8 @@ type TargetPoolsScopedListWarningData struct { } func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21919,8 +23696,8 @@ type TargetReference struct { } func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference - raw := noMethod(*s) + type NoMethod TargetReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21947,8 +23724,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { } func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -21979,8 +23756,8 @@ type TargetSslProxiesSetProxyHeaderRequest struct { } func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22009,13 +23786,14 @@ type TargetSslProxiesSetSslCertificatesRequest struct { } func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. +// SSL proxy. (== resource_for beta.targetSslProxies ==) (== +// resource_for v1.targetSslProxies ==) type TargetSslProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -22062,6 +23840,11 @@ type TargetSslProxy struct { // certificate must be specified. SslCertificates []string `json:"sslCertificates,omitempty"` + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetSslProxy resource. If not set, the TargetSslProxy resource will + // not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -22085,8 +23868,8 @@ type TargetSslProxy struct { } func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy - raw := noMethod(*s) + type NoMethod TargetSslProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22138,8 +23921,8 @@ type TargetSslProxyList struct { } func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList - raw := noMethod(*s) + type NoMethod TargetSslProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22153,9 +23936,13 @@ type TargetSslProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22166,7 +23953,9 @@ type TargetSslProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22197,8 +23986,8 @@ type TargetSslProxyListWarning struct { } func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyListWarning - raw := noMethod(*s) + type NoMethod TargetSslProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22234,8 +24023,8 @@ type TargetSslProxyListWarningData struct { } func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetSslProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22262,8 +24051,8 @@ type TargetTcpProxiesSetBackendServiceRequest struct { } func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22294,13 +24083,14 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { } func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. +// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== +// resource_for v1.targetTcpProxies ==) type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -22365,8 +24155,8 @@ type TargetTcpProxy struct { } func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy - raw := noMethod(*s) + type NoMethod TargetTcpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22418,8 +24208,8 @@ type TargetTcpProxyList struct { } func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList - raw := noMethod(*s) + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22433,9 +24223,13 @@ type TargetTcpProxyListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22446,7 +24240,9 @@ type TargetTcpProxyListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22477,8 +24273,8 @@ type TargetTcpProxyListWarning struct { } func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyListWarning - raw := noMethod(*s) + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22514,12 +24310,14 @@ type TargetTcpProxyListWarningData struct { } func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyListWarningData - raw := noMethod(*s) + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. +// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// resource_for beta.targetVpnGateways ==) (== resource_for +// v1.targetVpnGateways ==) type TargetVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -22542,6 +24340,22 @@ type TargetVpnGateway struct { // for target VPN gateways. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // TargetVpnGateway, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve an + // TargetVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this TargetVpnGateway resource. These can + // be later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -22599,8 +24413,8 @@ type TargetVpnGateway struct { } func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway - raw := noMethod(*s) + type NoMethod TargetVpnGateway + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22652,8 +24466,8 @@ type TargetVpnGatewayAggregatedList struct { } func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22667,9 +24481,13 @@ type TargetVpnGatewayAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22680,7 +24498,9 @@ type TargetVpnGatewayAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22711,8 +24531,8 @@ type TargetVpnGatewayAggregatedListWarning struct { } func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22748,8 +24568,8 @@ type TargetVpnGatewayAggregatedListWarningData struct { } func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22802,8 +24622,8 @@ type TargetVpnGatewayList struct { } func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22817,9 +24637,13 @@ type TargetVpnGatewayListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22830,7 +24654,9 @@ type TargetVpnGatewayListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22861,8 +24687,8 @@ type TargetVpnGatewayListWarning struct { } func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewayListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22898,8 +24724,8 @@ type TargetVpnGatewayListWarningData struct { } func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewayListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22931,8 +24757,8 @@ type TargetVpnGatewaysScopedList struct { } func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -22946,9 +24772,13 @@ type TargetVpnGatewaysScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -22959,7 +24789,9 @@ type TargetVpnGatewaysScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -22990,8 +24822,8 @@ type TargetVpnGatewaysScopedListWarning struct { } func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23027,8 +24859,8 @@ type TargetVpnGatewaysScopedListWarningData struct { } func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23059,8 +24891,8 @@ type TestFailure struct { } func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure - raw := noMethod(*s) + type NoMethod TestFailure + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23088,8 +24920,8 @@ type TestPermissionsRequest struct { } func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsRequest - raw := noMethod(*s) + type NoMethod TestPermissionsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23120,8 +24952,8 @@ type TestPermissionsResponse struct { } func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsResponse - raw := noMethod(*s) + type NoMethod TestPermissionsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23161,8 +24993,8 @@ type UDPHealthCheck struct { } func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod UDPHealthCheck - raw := noMethod(*s) + type NoMethod UDPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23241,8 +25073,8 @@ type UrlMap struct { } func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap - raw := noMethod(*s) + type NoMethod UrlMap + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23294,8 +25126,8 @@ type UrlMapList struct { } func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList - raw := noMethod(*s) + type NoMethod UrlMapList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23308,9 +25140,13 @@ type UrlMapListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -23321,7 +25157,9 @@ type UrlMapListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -23352,8 +25190,8 @@ type UrlMapListWarning struct { } func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { - type noMethod UrlMapListWarning - raw := noMethod(*s) + type NoMethod UrlMapListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23389,8 +25227,8 @@ type UrlMapListWarningData struct { } func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { - type noMethod UrlMapListWarningData - raw := noMethod(*s) + type NoMethod UrlMapListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23415,8 +25253,8 @@ type UrlMapReference struct { } func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference - raw := noMethod(*s) + type NoMethod UrlMapReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23453,8 +25291,8 @@ type UrlMapTest struct { } func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest - raw := noMethod(*s) + type NoMethod UrlMapTest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23492,8 +25330,8 @@ type UrlMapValidationResult struct { } func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult - raw := noMethod(*s) + type NoMethod UrlMapValidationResult + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23519,8 +25357,8 @@ type UrlMapsValidateRequest struct { } func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23549,8 +25387,8 @@ type UrlMapsValidateResponse struct { } func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse - raw := noMethod(*s) + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23592,11 +25430,13 @@ type UsageExportLocation struct { } func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation - raw := noMethod(*s) + type NoMethod UsageExportLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) +// (== resource_for v1.vpnTunnels ==) type VpnTunnel struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -23623,6 +25463,22 @@ type VpnTunnel struct { // VPN tunnels. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnTunnel, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve a + // VpnTunnel. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this VpnTunnel. These can be later + // modified by the setLabels method. Each label key/value pair must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // LocalTrafficSelector: Local traffic selector to use when establishing // the VPN tunnel with peer VPN gateway. The value should be a CIDR // formatted string, for example: 192.168.0.0/16. The ranges should be @@ -23707,8 +25563,8 @@ type VpnTunnel struct { } func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel - raw := noMethod(*s) + type NoMethod VpnTunnel + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23760,8 +25616,8 @@ type VpnTunnelAggregatedList struct { } func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23775,9 +25631,13 @@ type VpnTunnelAggregatedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -23788,7 +25648,9 @@ type VpnTunnelAggregatedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -23819,8 +25681,8 @@ type VpnTunnelAggregatedListWarning struct { } func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23856,8 +25718,8 @@ type VpnTunnelAggregatedListWarningData struct { } func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23910,8 +25772,8 @@ type VpnTunnelList struct { } func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList - raw := noMethod(*s) + type NoMethod VpnTunnelList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23924,9 +25786,13 @@ type VpnTunnelListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -23937,7 +25803,9 @@ type VpnTunnelListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -23968,8 +25836,8 @@ type VpnTunnelListWarning struct { } func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24005,8 +25873,8 @@ type VpnTunnelListWarningData struct { } func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24036,8 +25904,8 @@ type VpnTunnelsScopedList struct { } func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24051,9 +25919,13 @@ type VpnTunnelsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24064,7 +25936,9 @@ type VpnTunnelsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24095,8 +25969,8 @@ type VpnTunnelsScopedListWarning struct { } func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24132,8 +26006,8 @@ type VpnTunnelsScopedListWarningData struct { } func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24185,8 +26059,8 @@ type XpnHostList struct { } func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList - raw := noMethod(*s) + type NoMethod XpnHostList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24199,9 +26073,13 @@ type XpnHostListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24212,7 +26090,9 @@ type XpnHostListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24243,8 +26123,8 @@ type XpnHostListWarning struct { } func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { - type noMethod XpnHostListWarning - raw := noMethod(*s) + type NoMethod XpnHostListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24280,8 +26160,8 @@ type XpnHostListWarningData struct { } func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { - type noMethod XpnHostListWarningData - raw := noMethod(*s) + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24317,12 +26197,13 @@ type XpnResourceId struct { } func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId - raw := noMethod(*s) + type NoMethod XpnResourceId + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. +// Zone: A Zone resource. (== resource_for beta.zones ==) (== +// resource_for v1.zones ==) type Zone struct { // AvailableCpuPlatforms: [Output Only] Available cpu/platform // selections for the zone. @@ -24388,8 +26269,8 @@ type Zone struct { } func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone - raw := noMethod(*s) + type NoMethod Zone + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24441,8 +26322,8 @@ type ZoneList struct { } func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList - raw := noMethod(*s) + type NoMethod ZoneList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24455,9 +26336,13 @@ type ZoneListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -24468,7 +26353,9 @@ type ZoneListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -24499,8 +26386,8 @@ type ZoneListWarning struct { } func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { - type noMethod ZoneListWarning - raw := noMethod(*s) + type NoMethod ZoneListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24536,8 +26423,8 @@ type ZoneListWarningData struct { } func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ZoneListWarningData - raw := noMethod(*s) + type NoMethod ZoneListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24572,8 +26459,8 @@ type ZoneSetLabelsRequest struct { } func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest - raw := noMethod(*s) + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -24751,7 +26638,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24942,7 +26829,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25169,7 +27056,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25429,7 +27316,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25625,7 +27512,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25792,7 +27679,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25965,7 +27852,7 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26192,7 +28079,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26403,7 +28290,7 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26566,7 +28453,7 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26792,7 +28679,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26987,7 +28874,7 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27154,7 +29041,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27326,7 +29213,7 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27552,7 +29439,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27768,7 +29655,7 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27929,7 +29816,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28111,7 +29998,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28165,6 +30052,175 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } +// method id "compute.backendBuckets.addSignedUrlKey": + +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds the given Signed URL Key to the backend bucket. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds the given Signed URL Key to the backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendBuckets.delete": type BackendBucketsDeleteCall struct { @@ -28280,7 +30336,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28325,6 +30381,174 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "compute.backendBuckets.deleteSignedUrlKey": + +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes the given Signed URL Key from the backend +// bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the given Signed URL Key from the backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendBuckets.get": type BackendBucketsGetCall struct { @@ -28436,7 +30660,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28597,7 +30821,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28812,7 +31036,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29013,7 +31237,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29184,7 +31408,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29232,6 +31456,176 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "compute.backendServices.addSignedUrlKey": + +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds the given Signed URL Key to the specified +// backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds the given Signed URL Key to the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendServices.aggregatedList": type BackendServicesAggregatedListCall struct { @@ -29407,7 +31801,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29600,7 +31994,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29645,6 +32039,174 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } +// method id "compute.backendServices.deleteSignedUrlKey": + +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes the given Signed URL Key from the +// specified backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the given Signed URL Key from the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService", + // "keyName" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendServices.get": type BackendServicesGetCall struct { @@ -29757,7 +32319,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29903,7 +32465,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30069,7 +32631,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30285,7 +32847,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30490,7 +33052,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30661,7 +33223,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30812,7 +33374,7 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30982,7 +33544,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31205,7 +33767,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31397,7 +33959,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31625,7 +34187,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31885,7 +34447,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32094,7 +34656,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32276,7 +34838,7 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32443,7 +35005,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32626,7 +35188,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32858,7 +35420,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32956,7 +35518,8 @@ type DisksResizeCall struct { header_ http.Header } -// Resize: Resizes the specified persistent disk. +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33068,12 +35631,12 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Resizes the specified persistent disk.", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", // "httpMethod": "POST", // "id": "compute.disks.resize", // "parameterOrder": [ @@ -33250,7 +35813,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33413,7 +35976,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33581,7 +36144,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33737,7 +36300,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33899,7 +36462,7 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34115,7 +36678,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34317,7 +36880,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34469,7 +37032,7 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34639,7 +37202,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34862,7 +37425,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35058,7 +37621,7 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35225,7 +37788,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35398,7 +37961,7 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35625,7 +38188,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35836,7 +38399,7 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36019,7 +38582,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36182,7 +38745,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36350,7 +38913,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36507,7 +39070,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36669,7 +39232,7 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36884,7 +39447,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37065,7 +39628,7 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37212,7 +39775,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37372,7 +39935,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37529,7 +40092,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37691,7 +40254,7 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37907,7 +40470,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38088,7 +40651,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38255,7 +40818,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38407,7 +40970,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38626,7 +41189,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38924,7 +41487,7 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39141,7 +41704,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39333,7 +41896,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39489,7 +42052,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39650,7 +42213,7 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39865,7 +42428,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40066,7 +42629,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40218,7 +42781,7 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40385,7 +42948,7 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40549,7 +43112,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40706,7 +43269,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40868,7 +43431,7 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41084,7 +43647,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41286,7 +43849,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41438,7 +44001,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41606,7 +44169,7 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41769,7 +44332,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41925,7 +44488,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42086,7 +44649,7 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42301,7 +44864,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42502,7 +45065,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42654,7 +45217,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42821,7 +45384,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42985,7 +45548,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43156,7 +45719,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43316,7 +45879,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43468,7 +46031,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43637,7 +46200,7 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43696,8 +46259,8 @@ type ImagesListCall struct { header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your // project. This method does not get any images that belong to other // projects, including publicly-available images, like Debian 8. If you // want to get a list of publicly-available images, use this method to @@ -43866,12 +46429,12 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", // "id": "compute.images.list", // "parameterOrder": [ @@ -44047,7 +46610,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44194,7 +46757,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44379,7 +46942,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44609,7 +47172,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44807,7 +47370,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44998,7 +47561,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45167,7 +47730,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45345,7 +47908,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45570,7 +48133,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45784,7 +48347,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46002,7 +48565,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46195,7 +48758,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46378,7 +48941,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46576,7 +49139,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46755,7 +49318,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46936,7 +49499,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47121,7 +49684,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47282,7 +49845,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47463,7 +50026,7 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47644,7 +50207,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47873,7 +50436,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48071,7 +50634,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48236,7 +50799,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48406,7 +50969,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48631,7 +51194,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48888,7 +51451,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49112,7 +51675,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49291,7 +51854,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49452,7 +52015,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49624,7 +52187,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49781,7 +52344,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49946,7 +52509,7 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50162,7 +52725,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50343,7 +52906,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50515,7 +53078,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50753,7 +53316,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50959,7 +53522,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51135,7 +53698,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51310,7 +53873,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51497,7 +54060,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51672,7 +54235,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51854,7 +54417,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51960,6 +54523,22 @@ func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { return c } +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate +// - projects/project/global/global/instanceTemplates/instanceTemplate +// +// - global/instancesTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52042,7 +54621,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52067,6 +54646,11 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate \n- projects/project/global/global/instanceTemplates/instanceTemplate \n- global/instancesTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -52269,7 +54853,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52535,7 +55119,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52748,7 +55332,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52801,6 +55385,190 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "compute.instances.setDeletionProtection": + +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setDiskAutoDelete": type InstancesSetDiskAutoDeleteCall struct { @@ -52923,7 +55691,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53117,7 +55885,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53299,7 +56067,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53481,7 +56249,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53664,7 +56432,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53848,7 +56616,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54030,7 +56798,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54213,7 +56981,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54396,7 +57164,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54573,7 +57341,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54753,7 +57521,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54823,11 +57591,10 @@ type InstancesStopCall struct { // Stop: Stops a running instance, shutting it down cleanly, and allows // you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -54934,12 +57701,12 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", // "id": "compute.instances.stop", // "parameterOrder": [ @@ -55094,7 +57861,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55146,6 +57913,388 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } +// method id "compute.instances.updateAccessConfig": + +type InstancesUpdateAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.updateAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "POST", + // "id": "compute.instances.updateAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.updateNetworkInterface": + +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.updateNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "request": { + // "$ref": "NetworkInterface" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectAttachments.aggregatedList": type InterconnectAttachmentsAggregatedListCall struct { @@ -55322,7 +58471,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55517,7 +58666,7 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55683,7 +58832,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55855,7 +59004,7 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56081,7 +59230,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56273,7 +59422,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56437,7 +59586,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56653,7 +59802,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56845,7 +59994,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57001,7 +60150,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57162,7 +60311,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57377,7 +60526,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57578,7 +60727,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57730,7 +60879,7 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57774,6 +60923,318 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } +// method id "compute.licenseCodes.get": + +type LicenseCodesGetCall struct { + s *Service + project string + licenseCode string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.licenseCode = licenseCode + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicenseCodesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "licenseCode": c.licenseCode, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LicenseCode{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "httpMethod": "GET", + // "id": "compute.licenseCodes.get", + // "parameterOrder": [ + // "project", + // "licenseCode" + // ], + // "parameters": { + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", + // "location": "path", + // "pattern": "[0-9]{0,61}?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenseCodes/{licenseCode}", + // "response": { + // "$ref": "LicenseCode" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.licenses.delete": + +type LicensesDeleteCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified license. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.license = license + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "license": c.license, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified license.", + // "httpMethod": "DELETE", + // "id": "compute.licenses.delete", + // "parameterOrder": [ + // "project", + // "license" + // ], + // "parameters": { + // "license": { + // "description": "Name of the license resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses/{license}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.licenses.get": type LicensesGetCall struct { @@ -57885,7 +61346,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57926,6 +61387,424 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } +// method id "compute.licenses.insert": + +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Create a License resource in the specified project. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.license = license + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a License resource in the specified project.", + // "httpMethod": "POST", + // "id": "compute.licenses.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses", + // "request": { + // "$ref": "License" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.licenses.list": + +type LicensesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 8. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *LicensesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LicensesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 8. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "httpMethod": "GET", + // "id": "compute.licenses.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses", + // "response": { + // "$ref": "LicensesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.machineTypes.aggregatedList": type MachineTypesAggregatedListCall struct { @@ -58101,7 +61980,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58293,7 +62172,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58521,7 +62400,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58728,7 +62607,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58892,7 +62771,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59049,7 +62928,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59211,7 +63090,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59427,7 +63306,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59517,7 +63396,8 @@ type NetworksPatchCall struct { } // Patch: Patches the specified network with the data included in the -// request. +// request. Only the following fields can be modified: +// routingConfig.routingMode. func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59627,12 +63507,12 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Patches the specified network with the data included in the request.", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", // "httpMethod": "PATCH", // "id": "compute.networks.patch", // "parameterOrder": [ @@ -59797,7 +63677,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59961,7 +63841,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60110,7 +63990,7 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60266,7 +64146,7 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60423,7 +64303,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60575,7 +64455,7 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60733,7 +64613,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60881,7 +64761,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61022,7 +64902,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61186,7 +65066,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61383,7 +65263,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61577,7 +65457,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61737,7 +65617,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61898,7 +65778,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62061,7 +65941,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62222,7 +66102,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62388,7 +66268,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62560,7 +66440,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62786,7 +66666,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63002,7 +66882,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63163,7 +67043,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63345,7 +67225,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63517,7 +67397,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63683,7 +67563,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63839,7 +67719,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64016,7 +67896,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64242,7 +68122,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64457,7 +68337,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64620,7 +68500,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64800,7 +68680,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65030,7 +68910,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65221,7 +69101,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65393,7 +69273,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65619,7 +69499,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65845,7 +69725,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66018,7 +69898,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66209,7 +70089,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66377,7 +70257,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66554,7 +70434,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66779,7 +70659,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66991,7 +70871,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67209,7 +71089,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67402,7 +71282,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67586,7 +71466,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67772,7 +71652,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67953,7 +71833,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68134,7 +72014,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68295,7 +72175,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68476,7 +72356,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68643,7 +72523,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68868,7 +72748,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69129,7 +73009,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69349,7 +73229,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69510,7 +73390,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69796,7 +73676,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70024,7 +73904,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70221,7 +74101,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70438,7 +74318,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70689,7 +74569,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -70884,7 +74764,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71051,7 +74931,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71214,7 +75094,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71386,7 +75266,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71612,7 +75492,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71824,7 +75704,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -71988,7 +75868,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72147,7 +76027,7 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72325,7 +76205,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72497,7 +76377,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72654,7 +76534,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -72816,7 +76696,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73032,7 +76912,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73213,7 +77093,7 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73257,6 +77137,152 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } +// method id "compute.securityPolicies.addRule": + +type SecurityPoliciesAddRuleCall struct { + s *Service + project string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddRule: Inserts a rule into a security policy. +func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { + c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesAddRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesAddRuleCall) Context(ctx context.Context) *SecurityPoliciesAddRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesAddRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a rule into a security policy.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.addRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.securityPolicies.delete": type SecurityPoliciesDeleteCall struct { @@ -73372,7 +77398,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73528,7 +77554,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73569,6 +77595,170 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } +// method id "compute.securityPolicies.getRule": + +type SecurityPoliciesGetRuleCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetRule: Gets a rule at the specified priority. +func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { + c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the security policy. +func (c *SecurityPoliciesGetRuleCall) Priority(priority int64) *SecurityPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesGetRuleCall) Context(ctx context.Context) *SecurityPoliciesGetRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesGetRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.getRule" call. +// Exactly one of *SecurityPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicyRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a rule at the specified priority.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.getRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to get from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + // "response": { + // "$ref": "SecurityPolicyRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.securityPolicies.insert": type SecurityPoliciesInsertCall struct { @@ -73689,7 +77879,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -73904,7 +78094,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74104,7 +78294,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74152,6 +78342,314 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } +// method id "compute.securityPolicies.patchRule": + +type SecurityPoliciesPatchRuleCall struct { + s *Service + project string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PatchRule: Patches a rule at the specified priority. +func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { + c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesPatchRuleCall) Context(ctx context.Context) *SecurityPoliciesPatchRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.patchRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.securityPolicies.removeRule": + +type SecurityPoliciesRemoveRuleCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveRule: Deletes a rule at the specified priority. +func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { + c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the security policy. +func (c *SecurityPoliciesRemoveRuleCall) Priority(priority int64) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *SecurityPoliciesRemoveRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.removeRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to remove from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.securityPolicies.testIamPermissions": type SecurityPoliciesTestIamPermissionsCall struct { @@ -74256,7 +78754,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74422,7 +78920,7 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74579,7 +79077,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74796,7 +79294,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -74977,7 +79475,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75124,7 +79622,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75283,7 +79781,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75439,7 +79937,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75600,7 +80098,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75815,7 +80313,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -75996,7 +80494,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76040,6 +80538,1281 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } +// method id "compute.sslPolicies.delete": + +type SslPoliciesDeleteCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. +func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { + c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslPolicy = sslPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesDeleteCall) RequestId(requestId string) *SslPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesDeleteCall) Fields(s ...googleapi.Field) *SslPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesDeleteCall) Context(ctx context.Context) *SslPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "httpMethod": "DELETE", + // "id": "compute.sslPolicies.delete", + // "parameterOrder": [ + // "project", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslPolicies.get": + +type SslPoliciesGetCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { + c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslPolicy = sslPolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesGetCall) Fields(s ...googleapi.Field) *SslPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesGetCall) IfNoneMatch(entityTag string) *SslPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesGetCall) Context(ctx context.Context) *SslPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all of the ordered rules present in a single specified policy.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.get", + // "parameterOrder": [ + // "project", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "response": { + // "$ref": "SslPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.sslPolicies.insert": + +type SslPoliciesInsertCall struct { + s *Service + project string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Returns the specified SSL policy resource. Get a list of +// available SSL policies by making a list() request. +func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { + c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesInsertCall) RequestId(requestId string) *SslPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesInsertCall) Fields(s ...googleapi.Field) *SslPoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesInsertCall) Context(ctx context.Context) *SslPoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified SSL policy resource. Get a list of available SSL policies by making a list() request.", + // "httpMethod": "POST", + // "id": "compute.sslPolicies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslPolicies.list": + +type SslPoliciesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the SSL policies that have been configured for the +// specified project. +func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { + c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesListCall) Fields(s ...googleapi.Field) *SslPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesListCall) IfNoneMatch(entityTag string) *SslPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesListCall) Context(ctx context.Context) *SslPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslPoliciesList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all the SSL policies that have been configured for the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies", + // "response": { + // "$ref": "SslPoliciesList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.sslPolicies.listAvailableFeatures": + +type SslPoliciesListAvailableFeaturesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { + c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslPoliciesListAvailableFeaturesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.listAvailableFeatures", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/listAvailableFeatures", + // "response": { + // "$ref": "SslPoliciesListAvailableFeaturesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.sslPolicies.patch": + +type SslPoliciesPatchCall struct { + s *Service + project string + sslPolicy string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified SSL policy with the data included in the +// request. +func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { + c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesPatchCall) RequestId(requestId string) *SslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesPatchCall) Fields(s ...googleapi.Field) *SslPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesPatchCall) Context(ctx context.Context) *SslPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified SSL policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.sslPolicies.patch", + // "parameterOrder": [ + // "project", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslPolicies.testIamPermissions": + +type SslPoliciesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslPoliciesTestIamPermissionsCall { + c := &SslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslPoliciesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SslPoliciesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.sslPolicies.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.subnetworks.aggregatedList": type SubnetworksAggregatedListCall struct { @@ -76214,7 +81987,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76409,7 +82182,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76588,7 +82361,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76758,7 +82531,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -76921,7 +82694,7 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77093,7 +82866,7 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77319,7 +83092,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77404,6 +83177,191 @@ func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) } } +// method id "compute.subnetworks.patch": + +type SubnetworksPatchCall struct { + s *Service + project string + region string + subnetwork string + subnetwork2 *Subnetwork + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified subnetwork with the data included in the +// request. Only the following fields within the subnetwork resource can +// be specified in the request: secondary_ip_range and +// allow_subnet_cidr_routes_overlap. It is also mandatory to specify the +// current fingeprint of the subnetwork resource being patched. +func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { + c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork + c.subnetwork2 = subnetwork2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksPatchCall) RequestId(requestId string) *SubnetworksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksPatchCall) Fields(s ...googleapi.Field) *SubnetworksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksPatchCall) Context(ctx context.Context) *SubnetworksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified subnetwork with the data included in the request. Only the following fields within the subnetwork resource can be specified in the request: secondary_ip_range and allow_subnet_cidr_routes_overlap. It is also mandatory to specify the current fingeprint of the subnetwork resource being patched.", + // "httpMethod": "PATCH", + // "id": "compute.subnetworks.patch", + // "parameterOrder": [ + // "project", + // "region", + // "subnetwork" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "request": { + // "$ref": "Subnetwork" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.subnetworks.setIamPolicy": type SubnetworksSetIamPolicyCall struct { @@ -77511,7 +83469,7 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77689,7 +83647,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -77852,7 +83810,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78020,7 +83978,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78177,7 +84135,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78339,7 +84297,7 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78555,7 +84513,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78755,7 +84713,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -78907,7 +84865,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79066,7 +85024,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79222,7 +85180,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79383,7 +85341,7 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79598,7 +85556,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79675,6 +85633,175 @@ func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHt } } +// method id "compute.targetHttpsProxies.setQuicOverride": + +type TargetHttpsProxiesSetQuicOverrideCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { + c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetquicoverriderequest = targethttpsproxiessetquicoverriderequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetQuicOverrideCall) RequestId(requestId string) *TargetHttpsProxiesSetQuicOverrideCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetQuicOverrideCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetQuicOverrideCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetQuicOverrideCall) Context(ctx context.Context) *TargetHttpsProxiesSetQuicOverrideCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetquicoverriderequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.setQuicOverride" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the QUIC override policy for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setQuicOverride", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + // "request": { + // "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpsProxies.setSslCertificates": type TargetHttpsProxiesSetSslCertificatesCall struct { @@ -79797,7 +85924,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -79845,6 +85972,179 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } +// method id "compute.targetHttpsProxies.setSslPolicy": + +type TargetHttpsProxiesSetSslPolicyCall struct { + s *Service + project string + targetHttpsProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslPolicy: Sets the SSL policy for TargetHttpsProxy. The SSL +// policy specifies the server-side support for SSL features. This +// affects connections between clients and the HTTPS proxy load +// balancer. They do not affect the connection between the load balancer +// and the backends. +func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { + c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.sslpolicyreference = sslpolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetSslPolicyCall) RequestId(requestId string) *TargetHttpsProxiesSetSslPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.setSslPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setSslPolicy", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + // "request": { + // "$ref": "SslPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpsProxies.setUrlMap": type TargetHttpsProxiesSetUrlMapCall struct { @@ -79967,7 +86267,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80119,7 +86419,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80338,7 +86638,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80534,7 +86834,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80702,7 +87002,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -80875,7 +87175,7 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81102,7 +87402,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81294,7 +87594,7 @@ func (c *TargetInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81472,7 +87772,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81654,7 +87954,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -81885,7 +88185,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82081,7 +88381,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82249,7 +88549,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82406,7 +88706,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82582,7 +88882,7 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -82809,7 +89109,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83020,7 +89320,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83202,7 +89502,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83391,7 +89691,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83560,7 +89860,7 @@ func (c *TargetPoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83727,7 +90027,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -83883,7 +90183,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84044,7 +90344,7 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84259,7 +90559,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84458,7 +90758,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84628,7 +90928,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84798,7 +91098,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -84846,6 +91146,178 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } +// method id "compute.targetSslProxies.setSslPolicy": + +type TargetSslProxiesSetSslPolicyCall struct { + s *Service + project string + targetSslProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy +// specifies the server-side support for SSL features. This affects +// connections between clients and the SSL proxy load balancer. They do +// not affect the connection between the load balancer and the backends. +func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { + c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetSslProxy = targetSslProxy + c.sslpolicyreference = sslpolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetSslPolicyCall) RequestId(requestId string) *TargetSslProxiesSetSslPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetSslProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetSslProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetSslProxiesSetSslPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetSslProxy": c.targetSslProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetSslProxies.setSslPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "httpMethod": "POST", + // "id": "compute.targetSslProxies.setSslPolicy", + // "parameterOrder": [ + // "project", + // "targetSslProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + // "request": { + // "$ref": "SslPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetSslProxies.testIamPermissions": type TargetSslProxiesTestIamPermissionsCall struct { @@ -84950,7 +91422,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85109,7 +91581,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85265,7 +91737,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85426,7 +91898,7 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85641,7 +92113,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -85840,7 +92312,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86010,7 +92482,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86232,7 +92704,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86427,7 +92899,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86594,7 +93066,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86766,7 +93238,7 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -86992,7 +93464,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87077,6 +93549,188 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.targetVpnGateways.setLabels": + +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetVpnGateways.testIamPermissions": type TargetVpnGatewaysTestIamPermissionsCall struct { @@ -87184,7 +93838,7 @@ func (c *TargetVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87352,7 +94006,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87509,7 +94163,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87671,7 +94325,7 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -87834,7 +94488,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88058,7 +94712,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88260,7 +94914,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88412,7 +95066,7 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88580,7 +95234,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88734,7 +95388,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -88951,7 +95605,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89146,7 +95800,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89313,7 +95967,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89485,7 +96139,7 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89711,7 +96365,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -89796,6 +96450,188 @@ func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) e } } +// method id "compute.vpnTunnels.setLabels": + +type VpnTunnelsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a VpnTunnel. To learn more about +// labels, read the Labeling Resources documentation. +func (r *VpnTunnelsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnTunnelsSetLabelsCall { + c := &VpnTunnelsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsSetLabelsCall) RequestId(requestId string) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsSetLabelsCall) Fields(s ...googleapi.Field) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsSetLabelsCall) Context(ctx context.Context) *VpnTunnelsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnTunnels.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.vpnTunnels.testIamPermissions": type VpnTunnelsTestIamPermissionsCall struct { @@ -89903,7 +96739,7 @@ func (c *VpnTunnelsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90189,7 +97025,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90417,7 +97253,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90614,7 +97450,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -90831,7 +97667,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 95ca7af201a..96edf85352d 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/bvUbOBPnfuX4gDZ5aBr7PZU4ZJM\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/BzHLXiml7t9ltKStcz-Ic74UmRU\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20170905", + "revision": "20171122", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -20,7 +20,7 @@ "basePath": "/compute/v1/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "compute/v1/projects/", - "batchPath": "batch", + "batchPath": "batch/compute/v1", "parameters": { "alt": { "type": "string", @@ -110,7 +110,7 @@ "AcceleratorType": { "id": "AcceleratorType", "type": "object", - "description": "An Accelerator Type resource.", + "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -182,6 +182,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -213,6 +294,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -237,9 +399,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -250,7 +416,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -270,6 +438,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -332,11 +506,25 @@ "Address": { "id": "Address", "type": "object", - "description": "A reserved address resource.", + "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", "properties": { "address": { "type": "string", - "description": "The static external IP address represented by this resource." + "description": "The static IP address represented by this resource." + }, + "addressType": { + "type": "string", + "description": "The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.", + "enum": [ + "EXTERNAL", + "INTERNAL", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ] }, "creationTimestamp": { "type": "string", @@ -390,7 +578,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", "enum": [ "IN_USE", "RESERVED" @@ -400,6 +588,10 @@ "" ] }, + "subnetwork": { + "type": "string", + "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes." + }, "users": { "type": "array", "description": "[Output Only] The URLs of the resources that are using this address.", @@ -437,6 +629,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -468,6 +741,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -492,9 +846,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -505,7 +863,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -525,6 +885,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -636,7 +1002,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." }, "type": { "type": "string", @@ -672,7 +1038,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family \n\nIf the source image is deleted later, this field will not be set." }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -683,7 +1049,7 @@ "Autoscaler": { "id": "Autoscaler", "type": "object", - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances.", + "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", "properties": { "autoscalingPolicy": { "$ref": "AutoscalingPolicy", @@ -786,6 +1152,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -817,6 +1264,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -889,9 +1417,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -902,7 +1434,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -922,6 +1456,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1006,11 +1546,11 @@ "properties": { "metric": { "type": "string", - "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric.\n\nThe metric must have a value type of INT64 or DOUBLE." + "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE." }, "utilizationTarget": { "type": "number", - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double" }, "utilizationTargetType": { @@ -1170,13 +1710,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "BackendService": { "id": "BackendService", "type": "object", - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", "properties": { "affinityCookieTtlSec": { "type": "integer", @@ -1342,6 +1963,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1421,6 +2123,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1445,9 +2228,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1458,7 +2245,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1478,6 +2267,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1555,7 +2350,7 @@ "Commitment": { "id": "Commitment", "type": "object", - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts.", + "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", "properties": { "creationTimestamp": { "type": "string", @@ -1667,6 +2462,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1698,6 +2574,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1722,9 +2679,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1735,7 +2696,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1755,6 +2718,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1864,7 +2833,7 @@ "Disk": { "id": "Disk", "type": "object", - "description": "A Disk resource.", + "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", "properties": { "creationTimestamp": { "type": "string", @@ -1940,7 +2909,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a custom image that you created, specify the image name in the following format:\n\nglobal/images/my-custom-image \n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-image-family" }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -2023,6 +2992,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2054,6 +3104,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2074,7 +3205,7 @@ "DiskType": { "id": "DiskType", "type": "object", - "description": "A DiskType resource.", + "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -2150,6 +3281,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2181,6 +3393,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2205,9 +3498,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2218,7 +3515,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2238,6 +3537,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2298,9 +3603,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2311,7 +3620,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2331,6 +3642,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2472,6 +3789,13 @@ "type": "string" } }, + "sourceServiceAccounts": { + "type": "array", + "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "items": { + "type": "string" + } + }, "sourceTags": { "type": "array", "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", @@ -2479,9 +3803,16 @@ "type": "string" } }, + "targetServiceAccounts": { + "type": "array", + "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + } + }, "targetTags": { "type": "array", - "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", "items": { "type": "string" } @@ -2516,17 +3847,98 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "ForwardingRule": { "id": "ForwardingRule", "type": "object", - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple.", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", "properties": { "IPAddress": { "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address" }, "IPProtocol": { "type": "string", @@ -2664,6 +4076,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2695,6 +4188,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2719,9 +4293,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2732,7 +4310,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2752,6 +4332,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2805,7 +4391,7 @@ "properties": { "type": { "type": "string", - "description": "The type of supported feature. Currently only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "VIRTIO_SCSI_MULTIQUEUE", @@ -3001,6 +4587,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3162,6 +4829,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3260,13 +5008,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "Image": { "id": "Image", "type": "object", - "description": "An Image resource.", + "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", "properties": { "archiveSizeBytes": { "type": "string", @@ -3296,7 +5125,7 @@ }, "guestOsFeatures": { "type": "array", - "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { "$ref": "GuestOsFeature" } @@ -3390,6 +5219,18 @@ "type": "string", "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." }, + "sourceImage": { + "type": "string", + "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image." + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, + "sourceImageId": { + "type": "string", + "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name." + }, "sourceType": { "type": "string", "description": "The type of the image used to create this disk. The default and only value is RAW", @@ -3445,13 +5286,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "Instance": { "id": "Instance", "type": "object", - "description": "An Instance resource.", + "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", "properties": { "canIpForward": { "type": "boolean", @@ -3465,6 +5387,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion." + }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." @@ -3518,6 +5444,10 @@ "$ref": "Metadata", "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." }, + "minCpuPlatform": { + "type": "string", + "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\"." + }, "name": { "type": "string", "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -3620,12 +5550,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "InstanceGroup": { "id": "InstanceGroup", "type": "object", + "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", "properties": { "creationTimestamp": { "type": "string", @@ -3722,6 +5734,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3753,13 +5846,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", - "description": "An Instance Group Manager resource.", + "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", "properties": { "baseInstanceName": { "type": "string", @@ -3930,6 +6104,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3961,6 +6216,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4037,9 +6373,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4050,7 +6390,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4070,6 +6412,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4166,6 +6514,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4221,9 +6650,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4234,7 +6667,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4254,6 +6689,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4328,6 +6769,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4392,6 +6914,10 @@ "$ref": "Metadata", "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, + "minCpuPlatform": { + "type": "string", + "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform." + }, "networkInterfaces": { "type": "array", "description": "An array of network access configurations for this interface.", @@ -4429,7 +6955,7 @@ "InstanceTemplate": { "id": "InstanceTemplate", "type": "object", - "description": "An Instance Template resource.", + "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", "properties": { "creationTimestamp": { "type": "string", @@ -4497,6 +7023,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4562,9 +7169,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4575,7 +7186,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4595,6 +7208,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4663,6 +7282,16 @@ } } }, + "InstancesSetMinCpuPlatformRequest": { + "id": "InstancesSetMinCpuPlatformRequest", + "type": "object", + "properties": { + "minCpuPlatform": { + "type": "string", + "description": "Minimum cpu/platform this instance should be started at." + } + } + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "type": "object", @@ -4693,6 +7322,985 @@ } } }, + "Interconnect": { + "id": "Interconnect", + "type": "object", + "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", + "properties": { + "adminEnabled": { + "type": "boolean", + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true." + }, + "circuitInfos": { + "type": "array", + "description": "[Output Only] List of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { + "$ref": "InterconnectCircuitInfo" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customerName": { + "type": "string", + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "expectedOutages": { + "type": "array", + "description": "[Output Only] List of outages expected for this Interconnect.", + "items": { + "$ref": "InterconnectOutageNotification" + } + }, + "googleIpAddress": { + "type": "string", + "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests." + }, + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interconnectAttachments": { + "type": "array", + "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "type": "string" + } + }, + "interconnectType": { + "type": "string", + "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", + "enum": [ + "DEDICATED", + "IT_PRIVATE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "default": "compute#interconnect" + }, + "linkType": { + "type": "string", + "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", + "enum": [ + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "" + ] + }, + "location": { + "type": "string", + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned." + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.interconnects.insert" + ] + } + }, + "nocContactEmail": { + "type": "string", + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications." + }, + "operationalStatus": { + "type": "string", + "description": "[Output Only] The current status of whether or not this Interconnect is functional.", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "peerIpAddress": { + "type": "string", + "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests." + }, + "provisionedLinkCount": { + "type": "integer", + "description": "[Output Only] Number of links actually provisioned in this interconnect.", + "format": "int32" + }, + "requestedLinkCount": { + "type": "integer", + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectAttachment": { + "id": "InterconnectAttachment", + "type": "object", + "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", + "properties": { + "cloudRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customerRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment." + }, + "description": { + "type": "string", + "description": "An optional description of this resource." + }, + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interconnect": { + "type": "string", + "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "default": "compute#interconnectAttachment" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "operationalStatus": { + "type": "string", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "privateInterconnectInfo": { + "$ref": "InterconnectAttachmentPrivateInfo", + "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional interconnect attachment resides." + }, + "router": { + "type": "string", + "description": "URL of the cloud router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network & region within which the Cloud Router is configured." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectAttachmentAggregatedList": { + "id": "InterconnectAttachmentAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of InterconnectAttachmentsScopedList resources.", + "additionalProperties": { + "$ref": "InterconnectAttachmentsScopedList", + "description": "Name of the scope containing this set of interconnect attachments." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "default": "compute#interconnectAttachmentAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectAttachmentList": { + "id": "InterconnectAttachmentList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnect attachments.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of InterconnectAttachment resources.", + "items": { + "$ref": "InterconnectAttachment" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", + "default": "compute#interconnectAttachmentList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectAttachmentPrivateInfo": { + "id": "InterconnectAttachmentPrivateInfo", + "type": "object", + "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", + "properties": { + "tag8021q": { + "type": "integer", + "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", + "format": "uint32" + } + } + }, + "InterconnectAttachmentsScopedList": { + "id": "InterconnectAttachmentsScopedList", + "type": "object", + "properties": { + "interconnectAttachments": { + "type": "array", + "description": "List of interconnect attachments contained in this scope.", + "items": { + "$ref": "InterconnectAttachment" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectCircuitInfo": { + "id": "InterconnectCircuitInfo", + "type": "object", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "properties": { + "customerDemarcId": { + "type": "string", + "description": "Customer-side demarc ID for this circuit." + }, + "googleCircuitId": { + "type": "string", + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up." + }, + "googleDemarcId": { + "type": "string", + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA." + } + } + }, + "InterconnectList": { + "id": "InterconnectList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnects.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Interconnect resources.", + "items": { + "$ref": "Interconnect" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", + "default": "compute#interconnectList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectLocation": { + "id": "InterconnectLocation", + "type": "object", + "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", + "properties": { + "address": { + "type": "string", + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character." + }, + "availabilityZone": { + "type": "string", + "description": "[Output Only] Availability zone for this location. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." + }, + "city": { + "type": "string", + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." + }, + "continent": { + "type": "string", + "description": "[Output Only] Continent for this location.", + "enum": [ + "AFRICA", + "ASIA_PAC", + "C_AFRICA", + "C_ASIA_PAC", + "C_EUROPE", + "C_NORTH_AMERICA", + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional description of the resource." + }, + "facilityProvider": { + "type": "string", + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX)." + }, + "facilityProviderFacilityId": { + "type": "string", + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1)." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "default": "compute#interconnectLocation" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "peeringdbFacilityId": { + "type": "string", + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb)." + }, + "regionInfos": { + "type": "array", + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", + "items": { + "$ref": "InterconnectLocationRegionInfo" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectLocationList": { + "id": "InterconnectLocationList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnect locations.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of InterconnectLocation resources.", + "items": { + "$ref": "InterconnectLocation" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#interconnectLocationList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectLocationRegionInfo": { + "id": "InterconnectLocationRegionInfo", + "type": "object", + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "properties": { + "expectedRttMs": { + "type": "string", + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64" + }, + "locationPresence": { + "type": "string", + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "region": { + "type": "string", + "description": "URL for the region of this location." + } + } + }, + "InterconnectOutageNotification": { + "id": "InterconnectOutageNotification", + "type": "object", + "description": "Description of a planned outage on this Interconnect. Next id: 9", + "properties": { + "affectedCircuits": { + "type": "array", + "description": "Iff issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", + "items": { + "type": "string" + } + }, + "description": { + "type": "string", + "description": "A description about the purpose of the outage." + }, + "endTime": { + "type": "string", + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", + "format": "int64" + }, + "issueType": { + "type": "string", + "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "name": { + "type": "string", + "description": "Unique identifier for this outage notification." + }, + "source": { + "type": "string", + "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", + "enum": [ + "GOOGLE", + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "startTime": { + "type": "string", + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", + "format": "int64" + }, + "state": { + "type": "string", + "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "enum": [ + "ACTIVE", + "CANCELLED", + "NS_ACTIVE", + "NS_CANCELED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + } + } + }, "License": { "id": "License", "type": "object", @@ -4726,7 +8334,7 @@ "MachineType": { "id": "MachineType", "type": "object", - "description": "A Machine Type resource.", + "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -4836,6 +8444,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4867,6 +8556,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4891,9 +8661,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4904,7 +8678,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4924,6 +8700,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5118,7 +8900,7 @@ "Network": { "id": "Network", "type": "object", - "description": "Represents a Network resource. Read Networks and Firewalls for more information.", + "description": "Represents a Network resource. Read Networks and Firewalls for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", "properties": { "IPv4Range": { "type": "string", @@ -5169,6 +8951,10 @@ "$ref": "NetworkPeering" } }, + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -5252,6 +9038,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5290,6 +9157,25 @@ } } }, + "NetworkRoutingConfig": { + "id": "NetworkRoutingConfig", + "type": "object", + "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", + "properties": { + "routingMode": { + "type": "string", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "enum": [ + "GLOBAL", + "REGIONAL" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, "NetworksAddPeeringRequest": { "id": "NetworksAddPeeringRequest", "type": "object", @@ -5300,7 +9186,12 @@ }, "name": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "Name of the peering, which should conform to RFC1035.", + "annotations": { + "required": [ + "compute.networks.addPeering" + ] + } }, "peerNetwork": { "type": "string", @@ -5321,7 +9212,7 @@ "Operation": { "id": "Operation", "type": "object", - "description": "An Operation resource, used to manage asynchronous API requests.", + "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", "properties": { "clientOperationId": { "type": "string", @@ -5457,9 +9348,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5470,7 +9365,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5490,6 +9387,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5551,6 +9454,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5582,6 +9566,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5606,9 +9671,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5619,7 +9688,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5639,6 +9710,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5714,7 +9791,7 @@ "Project": { "id": "Project", "type": "object", - "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", + "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", "properties": { "commonInstanceMetadata": { "$ref": "Metadata", @@ -5863,6 +9940,8 @@ "INSTANCE_GROUPS", "INSTANCE_GROUP_MANAGERS", "INSTANCE_TEMPLATES", + "INTERCONNECTS", + "INTERNAL_ADDRESSES", "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", @@ -5870,6 +9949,8 @@ "NVIDIA_P100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", "REGIONAL_AUTOSCALERS", "REGIONAL_INSTANCE_GROUP_MANAGERS", "ROUTERS", @@ -5933,6 +10014,10 @@ "", "", "", + "", + "", + "", + "", "" ] }, @@ -5946,7 +10031,7 @@ "Region": { "id": "Region", "type": "object", - "description": "Region resource.", + "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", "properties": { "creationTimestamp": { "type": "string", @@ -6034,6 +10119,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6065,6 +10231,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6096,6 +10343,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6206,6 +10534,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6278,6 +10687,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6320,7 +10810,7 @@ "Route": { "id": "Route", "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", "properties": { "creationTimestamp": { "type": "string", @@ -6430,9 +10920,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6443,7 +10937,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6463,6 +10959,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6520,6 +11022,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6622,6 +11205,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6677,6 +11341,10 @@ "type": "string", "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." }, + "linkedInterconnectAttachment": { + "type": "string", + "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." + }, "linkedVpnTunnel": { "type": "string", "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." @@ -6716,6 +11384,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6854,9 +11603,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6867,7 +11620,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6887,6 +11642,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7028,7 +11789,7 @@ "Snapshot": { "id": "Snapshot", "type": "object", - "description": "A persistent disk snapshot resource.", + "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7117,7 +11878,7 @@ }, "storageBytes": { "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", "format": "int64" }, "storageBytesStatus": { @@ -7162,13 +11923,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "SslCertificate": { "id": "SslCertificate", "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", "properties": { "certificate": { "type": "string", @@ -7235,13 +12077,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "Subnetwork": { "id": "Subnetwork", "type": "object", - "description": "A Subnetwork resource.", + "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7327,6 +12250,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7358,6 +12362,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7407,9 +12492,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7420,7 +12509,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7440,6 +12531,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7534,7 +12631,7 @@ "TargetHttpProxy": { "id": "TargetHttpProxy", "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7597,6 +12694,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7616,7 +12794,7 @@ "TargetHttpsProxy": { "id": "TargetHttpsProxy", "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7686,13 +12864,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "TargetInstance": { "id": "TargetInstance", "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", "properties": { "creationTimestamp": { "type": "string", @@ -7769,6 +13028,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7800,6 +13140,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7824,9 +13245,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7837,7 +13262,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7857,6 +13284,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7888,7 +13321,7 @@ "TargetPool": { "id": "TargetPool", "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", + "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", "properties": { "backupPool": { "type": "string", @@ -7992,6 +13425,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8040,6 +13554,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8116,9 +13711,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8129,7 +13728,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8149,6 +13750,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8230,7 +13837,7 @@ "TargetSslProxy": { "id": "TargetSslProxy", "type": "object", - "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -8312,6 +13919,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8346,7 +14034,7 @@ "TargetTcpProxy": { "id": "TargetTcpProxy", "type": "object", - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", + "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", "properties": { "creationTimestamp": { "type": "string", @@ -8421,13 +14109,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, "TargetVpnGateway": { "id": "TargetVpnGateway", "type": "object", - "description": "Represents a Target VPN gateway resource.", + "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", "properties": { "creationTimestamp": { "type": "string", @@ -8534,6 +14303,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8565,6 +14415,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8589,9 +14520,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8602,7 +14537,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8622,6 +14559,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8760,6 +14703,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8859,6 +14883,7 @@ "VpnTunnel": { "id": "VpnTunnel", "type": "object", + "description": "VPN tunnel resource. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", "properties": { "creationTimestamp": { "type": "string", @@ -9006,6 +15031,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9037,6 +15143,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9061,9 +15248,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9074,7 +15265,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9094,6 +15287,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9149,6 +15348,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9178,8 +15458,15 @@ "Zone": { "id": "Zone", "type": "object", - "description": "A Zone resource.", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", "properties": { + "availableCpuPlatforms": { + "type": "array", + "description": "[Output Only] Available cpu/platform selections for the zone.", + "items": { + "type": "string" + } + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -9256,6 +15543,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11018,7 +17386,7 @@ "id": "compute.disks.resize", "path": "{project}/zones/{zone}/disks/{disk}/resize", "httpMethod": "POST", - "description": "Resizes the specified persistent disk.", + "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", "parameters": { "disk": { "type": "string", @@ -13079,7 +19447,7 @@ "id": "compute.images.list", "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", @@ -14897,6 +21265,58 @@ "https://www.googleapis.com/auth/compute" ] }, + "setDeletionProtection": { + "id": "compute.instances.setDeletionProtection", + "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + "httpMethod": "POST", + "description": "Sets deletion protection on the instance.", + "parameters": { + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion.", + "default": "true", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDiskAutoDelete": { "id": "compute.instances.setDiskAutoDelete", "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", @@ -15154,6 +21574,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "setMinCpuPlatform": { + "id": "compute.instances.setMinCpuPlatform", + "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + "httpMethod": "POST", + "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesSetMinCpuPlatformRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setScheduling": { "id": "compute.instances.setScheduling", "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", @@ -15400,7 +21869,7 @@ "id": "compute.instances.stop", "path": "{project}/zones/{zone}/instances/{instance}/stop", "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", "parameters": { "instance": { "type": "string", @@ -15444,6 +21913,531 @@ } } }, + "interconnectAttachments": { + "methods": { + "aggregatedList": { + "id": "compute.interconnectAttachments.aggregatedList", + "path": "{project}/aggregated/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of interconnect attachments.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectAttachmentAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.interconnectAttachments.delete", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect attachment.", + "parameters": { + "interconnectAttachment": { + "type": "string", + "description": "Name of the interconnect attachment to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.interconnectAttachments.get", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "GET", + "description": "Returns the specified interconnect attachment.", + "parameters": { + "interconnectAttachment": { + "type": "string", + "description": "Name of the interconnect attachment to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "response": { + "$ref": "InterconnectAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.interconnectAttachments.insert", + "path": "{project}/regions/{region}/interconnectAttachments", + "httpMethod": "POST", + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.interconnectAttachments.list", + "path": "{project}/regions/{region}/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect attachments contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "InterconnectAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "interconnectLocations": { + "methods": { + "get": { + "id": "compute.interconnectLocations.get", + "path": "{project}/global/interconnectLocations/{interconnectLocation}", + "httpMethod": "GET", + "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + "parameters": { + "interconnectLocation": { + "type": "string", + "description": "Name of the interconnect location to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "interconnectLocation" + ], + "response": { + "$ref": "InterconnectLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.interconnectLocations.list", + "path": "{project}/global/interconnectLocations", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect locations available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "interconnects": { + "methods": { + "delete": { + "id": "compute.interconnects.delete", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.interconnects.get", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", + "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "response": { + "$ref": "Interconnect" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.interconnects.insert", + "path": "{project}/global/interconnects", + "httpMethod": "POST", + "description": "Creates a Interconnect in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.interconnects.list", + "path": "{project}/global/interconnects", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.interconnects.patch", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "PATCH", + "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "request": { + "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "licenses": { "methods": { "get": { @@ -15831,6 +22825,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "id": "compute.networks.patch", + "path": "{project}/global/networks/{network}", + "httpMethod": "PATCH", + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "removePeering": { "id": "compute.networks.removePeering", "path": "{project}/global/networks/{network}/removePeering", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index f6125c61c51..643966ea140 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -91,6 +91,9 @@ func New(client *http.Client) (*Service, error) { s.InstanceGroups = NewInstanceGroupsService(s) s.InstanceTemplates = NewInstanceTemplatesService(s) s.Instances = NewInstancesService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) + s.Interconnects = NewInterconnectsService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) s.Networks = NewNetworksService(s) @@ -166,6 +169,12 @@ type Service struct { Instances *InstancesService + InterconnectAttachments *InterconnectAttachmentsService + + InterconnectLocations *InterconnectLocationsService + + Interconnects *InterconnectsService + Licenses *LicensesService MachineTypes *MachineTypesService @@ -408,6 +417,33 @@ type InstancesService struct { s *Service } +func NewInterconnectAttachmentsService(s *Service) *InterconnectAttachmentsService { + rs := &InterconnectAttachmentsService{s: s} + return rs +} + +type InterconnectAttachmentsService struct { + s *Service +} + +func NewInterconnectLocationsService(s *Service) *InterconnectLocationsService { + rs := &InterconnectLocationsService{s: s} + return rs +} + +type InterconnectLocationsService struct { + s *Service +} + +func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +} + +type InterconnectsService struct { + s *Service +} + func NewLicensesService(s *Service) *LicensesService { rs := &LicensesService{s: s} return rs @@ -681,12 +717,13 @@ type AcceleratorConfig struct { } func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorConfig - raw := noMethod(*s) + type NoMethod AcceleratorConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AcceleratorType: An Accelerator Type resource. +// AcceleratorType: An Accelerator Type resource. (== resource_for +// beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==) type AcceleratorType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -746,8 +783,8 @@ type AcceleratorType struct { } func (s *AcceleratorType) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorType - raw := noMethod(*s) + type NoMethod AcceleratorType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -775,6 +812,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -797,8 +837,110 @@ type AcceleratorTypeAggregatedList struct { } func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedList - raw := noMethod(*s) + type NoMethod AcceleratorTypeAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AcceleratorTypeAggregatedListWarning: [Output Only] Informational +// warning message. +type AcceleratorTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AcceleratorTypeAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AcceleratorTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -826,6 +968,9 @@ type AcceleratorTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -848,8 +993,110 @@ type AcceleratorTypeList struct { } func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeList - raw := noMethod(*s) + type NoMethod AcceleratorTypeList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AcceleratorTypeListWarning: [Output Only] Informational warning +// message. +type AcceleratorTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AcceleratorTypeListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AcceleratorTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -881,8 +1128,8 @@ type AcceleratorTypesScopedList struct { } func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedList - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -896,9 +1143,13 @@ type AcceleratorTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -909,7 +1160,9 @@ type AcceleratorTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -940,8 +1193,8 @@ type AcceleratorTypesScopedListWarning struct { } func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarning - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -977,8 +1230,8 @@ type AcceleratorTypesScopedListWarningData struct { } func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod AcceleratorTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1026,16 +1279,27 @@ type AccessConfig struct { } func (s *AccessConfig) MarshalJSON() ([]byte, error) { - type noMethod AccessConfig - raw := noMethod(*s) + type NoMethod AccessConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Address: A reserved address resource. +// Address: A reserved address resource. (== resource_for beta.addresses +// ==) (== resource_for v1.addresses ==) (== resource_for +// beta.globalAddresses ==) (== resource_for v1.globalAddresses ==) type Address struct { - // Address: The static external IP address represented by this resource. + // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` + // AddressType: The type of address to reserve, either INTERNAL or + // EXTERNAL. If unspecified, defaults to EXTERNAL. + // + // Possible values: + // "EXTERNAL" + // "INTERNAL" + // "UNSPECIFIED_TYPE" + AddressType string `json:"addressType,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -1078,16 +1342,23 @@ type Address struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the address, which can be either - // IN_USE or RESERVED. An address that is RESERVED is currently reserved - // and available to use. An IN_USE address is currently being used by - // another resource and is not available. + // Status: [Output Only] The status of the address, which can be one of + // RESERVING, RESERVED, or IN_USE. An address that is RESERVING is + // currently in the process of being reserved. A RESERVED address is + // currently reserved and available to use. An IN_USE address is + // currently being used by another resource and is not available. // // Possible values: // "IN_USE" // "RESERVED" Status string `json:"status,omitempty"` + // Subnetwork: The URL of the subnetwork in which to reserve the + // address. If an IP address is specified, it must be within the + // subnetwork's IP range. This field can only be used with INTERNAL type + // with GCE_ENDPOINT/DNS_RESOLVER purposes. + Subnetwork string `json:"subnetwork,omitempty"` + // Users: [Output Only] The URLs of the resources that are using this // address. Users []string `json:"users,omitempty"` @@ -1114,8 +1385,8 @@ type Address struct { } func (s *Address) MarshalJSON() ([]byte, error) { - type noMethod Address - raw := noMethod(*s) + type NoMethod Address + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1142,6 +1413,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1164,8 +1438,110 @@ type AddressAggregatedList struct { } func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedList - raw := noMethod(*s) + type NoMethod AddressAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressAggregatedListWarning: [Output Only] Informational warning +// message. +type AddressAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AddressAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AddressAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1193,6 +1569,9 @@ type AddressList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1215,8 +1594,109 @@ type AddressList struct { } func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) + type NoMethod AddressList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressListWarning: [Output Only] Informational warning message. +type AddressListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AddressListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AddressListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1246,8 +1726,8 @@ type AddressesScopedList struct { } func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) + type NoMethod AddressesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1261,9 +1741,13 @@ type AddressesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1274,7 +1758,9 @@ type AddressesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1305,8 +1791,8 @@ type AddressesScopedListWarning struct { } func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarning - raw := noMethod(*s) + type NoMethod AddressesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1342,8 +1828,8 @@ type AddressesScopedListWarningData struct { } func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarningData - raw := noMethod(*s) + type NoMethod AddressesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1381,8 +1867,8 @@ type AliasIpRange struct { } func (s *AliasIpRange) MarshalJSON() ([]byte, error) { - type noMethod AliasIpRange - raw := noMethod(*s) + type NoMethod AliasIpRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1476,7 +1962,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required. + // initializeParams.sourceImage or disks.source is required except for + // local SSD. // // If desired, you can also attach existing non-root persistent disks // using this property. This field is only applicable for persistent @@ -1512,8 +1999,8 @@ type AttachedDisk struct { } func (s *AttachedDisk) MarshalJSON() ([]byte, error) { - type noMethod AttachedDisk - raw := noMethod(*s) + type NoMethod AttachedDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1552,7 +2039,7 @@ type AttachedDiskInitializeParams struct { // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is - // required. + // required except for local SSD. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -1566,17 +2053,17 @@ type AttachedDiskInitializeParams struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family // // If the source image is deleted later, this field will not be set. SourceImage string `json:"sourceImage,omitempty"` @@ -1608,15 +2095,18 @@ type AttachedDiskInitializeParams struct { } func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { - type noMethod AttachedDiskInitializeParams - raw := noMethod(*s) + type NoMethod AttachedDiskInitializeParams + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Autoscaler: Represents an Autoscaler resource. Autoscalers allow you // to automatically scale virtual machine instances in managed instance // groups according to an autoscaling policy that you define. For more -// information, read Autoscaling Groups of Instances. +// information, read Autoscaling Groups of Instances. (== resource_for +// beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== +// resource_for beta.regionAutoscalers ==) (== resource_for +// v1.regionAutoscalers ==) type Autoscaler struct { // AutoscalingPolicy: The configuration parameters for the autoscaling // algorithm. You can define one or more of the policies for an @@ -1704,8 +2194,8 @@ type Autoscaler struct { } func (s *Autoscaler) MarshalJSON() ([]byte, error) { - type noMethod Autoscaler - raw := noMethod(*s) + type NoMethod Autoscaler + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1732,6 +2222,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1754,8 +2247,110 @@ type AutoscalerAggregatedList struct { } func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedList - raw := noMethod(*s) + type NoMethod AutoscalerAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AutoscalerAggregatedListWarning: [Output Only] Informational warning +// message. +type AutoscalerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalerAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1783,6 +2378,9 @@ type AutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1805,8 +2403,109 @@ type AutoscalerList struct { } func (s *AutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerList - raw := noMethod(*s) + type NoMethod AutoscalerList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AutoscalerListWarning: [Output Only] Informational warning message. +type AutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalerListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1852,8 +2551,8 @@ type AutoscalerStatusDetails struct { } func (s *AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerStatusDetails - raw := noMethod(*s) + type NoMethod AutoscalerStatusDetails + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1884,8 +2583,8 @@ type AutoscalersScopedList struct { } func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedList - raw := noMethod(*s) + type NoMethod AutoscalersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1899,9 +2598,13 @@ type AutoscalersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1912,7 +2615,9 @@ type AutoscalersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1943,8 +2648,8 @@ type AutoscalersScopedListWarning struct { } func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarning - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1980,8 +2685,8 @@ type AutoscalersScopedListWarningData struct { } func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarningData - raw := noMethod(*s) + type NoMethod AutoscalersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2044,8 +2749,8 @@ type AutoscalingPolicy struct { } func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicy - raw := noMethod(*s) + type NoMethod AutoscalingPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2085,18 +2790,18 @@ type AutoscalingPolicyCpuUtilization struct { } func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCpuUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCpuUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCpuUtilization + type NoMethod AutoscalingPolicyCpuUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -2108,16 +2813,15 @@ func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { // policy. type AutoscalingPolicyCustomMetricUtilization struct { // Metric: The identifier (type) of the Stackdriver Monitoring metric. - // The metric cannot have negative values and should be a utilization - // metric, which means that the number of virtual machines handling - // requests should increase or decrease proportionally to the - // metric. + // The metric cannot have negative values. // // The metric must have a value type of INT64 or DOUBLE. Metric string `json:"metric,omitempty"` // UtilizationTarget: The target value of the metric that autoscaler - // should maintain. This must be a positive value. + // should maintain. This must be a positive value. A utilization metric + // scales number of virtual machines handling requests to increase or + // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is // compute.googleapis.com/instance/network/received_bytes_count. The @@ -2154,18 +2858,18 @@ type AutoscalingPolicyCustomMetricUtilization struct { } func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCustomMetricUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyCustomMetricUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCustomMetricUtilization + type NoMethod AutoscalingPolicyCustomMetricUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -2201,18 +2905,18 @@ type AutoscalingPolicyLoadBalancingUtilization struct { } func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyLoadBalancingUtilization - raw := noMethod(*s) + type NoMethod AutoscalingPolicyLoadBalancingUtilization + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyLoadBalancingUtilization + type NoMethod AutoscalingPolicyLoadBalancingUtilization var s1 struct { UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -2322,20 +3026,20 @@ type Backend struct { } func (s *Backend) MarshalJSON() ([]byte, error) { - type noMethod Backend - raw := noMethod(*s) + type NoMethod Backend + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Backend) UnmarshalJSON(data []byte) error { - type noMethod Backend + type NoMethod Backend var s1 struct { CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -2403,8 +3107,8 @@ type BackendBucket struct { } func (s *BackendBucket) MarshalJSON() ([]byte, error) { - type noMethod BackendBucket - raw := noMethod(*s) + type NoMethod BackendBucket + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2431,6 +3135,9 @@ type BackendBucketList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendBucketListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2453,13 +3160,117 @@ type BackendBucketList struct { } func (s *BackendBucketList) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketList - raw := noMethod(*s) + type NoMethod BackendBucketList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendBucketListWarning: [Output Only] Informational warning +// message. +type BackendBucketListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendBucketListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendBucketListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // BackendService: A BackendService resource. This resource defines a -// group of backend virtual machines and their serving capacity. +// group of backend virtual machines and their serving capacity. (== +// resource_for v1.backendService ==) (== resource_for +// beta.backendService ==) type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds if // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is @@ -2622,8 +3433,8 @@ type BackendService struct { } func (s *BackendService) MarshalJSON() ([]byte, error) { - type noMethod BackendService - raw := noMethod(*s) + type NoMethod BackendService + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2651,6 +3462,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2673,8 +3487,110 @@ type BackendServiceAggregatedList struct { } func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedList - raw := noMethod(*s) + type NoMethod BackendServiceAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceAggregatedListWarning: [Output Only] Informational +// warning message. +type BackendServiceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2703,8 +3619,8 @@ type BackendServiceCdnPolicy struct { } func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceCdnPolicy - raw := noMethod(*s) + type NoMethod BackendServiceCdnPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2737,8 +3653,8 @@ type BackendServiceGroupHealth struct { } func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceGroupHealth - raw := noMethod(*s) + type NoMethod BackendServiceGroupHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2772,8 +3688,8 @@ type BackendServiceIAP struct { } func (s *BackendServiceIAP) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceIAP - raw := noMethod(*s) + type NoMethod BackendServiceIAP + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2801,6 +3717,9 @@ type BackendServiceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2823,8 +3742,110 @@ type BackendServiceList struct { } func (s *BackendServiceList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceList - raw := noMethod(*s) + type NoMethod BackendServiceList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceListWarning: [Output Only] Informational warning +// message. +type BackendServiceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2855,8 +3876,8 @@ type BackendServicesScopedList struct { } func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedList - raw := noMethod(*s) + type NoMethod BackendServicesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2870,9 +3891,13 @@ type BackendServicesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2883,7 +3908,9 @@ type BackendServicesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2914,8 +3941,8 @@ type BackendServicesScopedListWarning struct { } func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarning - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2951,8 +3978,8 @@ type BackendServicesScopedListWarningData struct { } func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarningData - raw := noMethod(*s) + type NoMethod BackendServicesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2981,8 +4008,8 @@ type CacheInvalidationRule struct { } func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { - type noMethod CacheInvalidationRule - raw := noMethod(*s) + type NoMethod CacheInvalidationRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3034,8 +4061,8 @@ type CacheKeyPolicy struct { } func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { - type noMethod CacheKeyPolicy - raw := noMethod(*s) + type NoMethod CacheKeyPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3048,7 +4075,8 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // Committed use discounts are subject to Google Cloud Platform's // Service Specific Terms. By purchasing a committed use discount, you // agree to these terms. Committed use discounts will not renew, so you -// must purchase a new commitment to continue receiving discounts. +// must purchase a new commitment to continue receiving discounts. (== +// resource_for beta.commitments ==) (== resource_for v1.commitments ==) type Commitment struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -3142,8 +4170,8 @@ type Commitment struct { } func (s *Commitment) MarshalJSON() ([]byte, error) { - type noMethod Commitment - raw := noMethod(*s) + type NoMethod Commitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3170,6 +4198,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3192,8 +4223,110 @@ type CommitmentAggregatedList struct { } func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedList - raw := noMethod(*s) + type NoMethod CommitmentAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitmentAggregatedListWarning: [Output Only] Informational warning +// message. +type CommitmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod CommitmentAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod CommitmentAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3221,6 +4354,9 @@ type CommitmentList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3243,8 +4379,109 @@ type CommitmentList struct { } func (s *CommitmentList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentList - raw := noMethod(*s) + type NoMethod CommitmentList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitmentListWarning: [Output Only] Informational warning message. +type CommitmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod CommitmentListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod CommitmentListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3275,8 +4512,8 @@ type CommitmentsScopedList struct { } func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedList - raw := noMethod(*s) + type NoMethod CommitmentsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3290,9 +4527,13 @@ type CommitmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3303,7 +4544,9 @@ type CommitmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3334,8 +4577,8 @@ type CommitmentsScopedListWarning struct { } func (s *CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarning - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3371,8 +4614,8 @@ type CommitmentsScopedListWarningData struct { } func (s *CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarningData - raw := noMethod(*s) + type NoMethod CommitmentsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3402,8 +4645,8 @@ type ConnectionDraining struct { } func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { - type noMethod ConnectionDraining - raw := noMethod(*s) + type NoMethod ConnectionDraining + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3435,8 +4678,8 @@ type CustomerEncryptionKey struct { } func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKey - raw := noMethod(*s) + type NoMethod CustomerEncryptionKey + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3469,8 +4712,8 @@ type CustomerEncryptionKeyProtectedDisk struct { } func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKeyProtectedDisk - raw := noMethod(*s) + type NoMethod CustomerEncryptionKeyProtectedDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3530,12 +4773,13 @@ type DeprecationStatus struct { } func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { - type noMethod DeprecationStatus - raw := noMethod(*s) + type NoMethod DeprecationStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Disk: A Disk resource. +// Disk: A Disk resource. (== resource_for beta.disks ==) (== +// resource_for v1.disks ==) type Disk struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -3637,17 +4881,17 @@ type Disk struct { // // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD // - // To create a disk with a private image that you created, specify the + // To create a disk with a custom image that you created, specify the // image name in the following format: // - // global/images/my-private-image + // global/images/my-custom-image // - // You can also specify a private image by its image family, which + // You can also specify a custom image by its image family, which // returns the latest version of the image in that family. Replace the // image name with // family/family-name: // - // global/images/family/my-private-family + // global/images/family/my-image-family SourceImage string `json:"sourceImage,omitempty"` // SourceImageEncryptionKey: The customer-supplied encryption key of the @@ -3728,8 +4972,8 @@ type Disk struct { } func (s *Disk) MarshalJSON() ([]byte, error) { - type noMethod Disk - raw := noMethod(*s) + type NoMethod Disk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3756,6 +5000,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3778,8 +5025,110 @@ type DiskAggregatedList struct { } func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedList - raw := noMethod(*s) + type NoMethod DiskAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod DiskAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod DiskAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3807,6 +5156,9 @@ type DiskList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3829,8 +5181,109 @@ type DiskList struct { } func (s *DiskList) MarshalJSON() ([]byte, error) { - type noMethod DiskList - raw := noMethod(*s) + type NoMethod DiskList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskListWarning: [Output Only] Informational warning message. +type DiskListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarning) MarshalJSON() ([]byte, error) { + type NoMethod DiskListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod DiskListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3872,12 +5325,13 @@ type DiskMoveRequest struct { } func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod DiskMoveRequest - raw := noMethod(*s) + type NoMethod DiskMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DiskType: A DiskType resource. +// DiskType: A DiskType resource. (== resource_for beta.diskTypes ==) +// (== resource_for v1.diskTypes ==) type DiskType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -3938,8 +5392,8 @@ type DiskType struct { } func (s *DiskType) MarshalJSON() ([]byte, error) { - type noMethod DiskType - raw := noMethod(*s) + type NoMethod DiskType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3966,6 +5420,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3988,8 +5445,110 @@ type DiskTypeAggregatedList struct { } func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedList - raw := noMethod(*s) + type NoMethod DiskTypeAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod DiskTypeAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod DiskTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4017,6 +5576,9 @@ type DiskTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4039,8 +5601,109 @@ type DiskTypeList struct { } func (s *DiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeList - raw := noMethod(*s) + type NoMethod DiskTypeList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DiskTypeListWarning: [Output Only] Informational warning message. +type DiskTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod DiskTypeListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod DiskTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4070,8 +5733,8 @@ type DiskTypesScopedList struct { } func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedList - raw := noMethod(*s) + type NoMethod DiskTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4085,9 +5748,13 @@ type DiskTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4098,7 +5765,9 @@ type DiskTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4129,8 +5798,8 @@ type DiskTypesScopedListWarning struct { } func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarning - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4166,8 +5835,8 @@ type DiskTypesScopedListWarningData struct { } func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod DiskTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4194,8 +5863,8 @@ type DisksResizeRequest struct { } func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod DisksResizeRequest - raw := noMethod(*s) + type NoMethod DisksResizeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4225,8 +5894,8 @@ type DisksScopedList struct { } func (s *DisksScopedList) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedList - raw := noMethod(*s) + type NoMethod DisksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4240,9 +5909,13 @@ type DisksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4253,7 +5926,9 @@ type DisksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4284,8 +5959,8 @@ type DisksScopedListWarning struct { } func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarning - raw := noMethod(*s) + type NoMethod DisksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4321,8 +5996,8 @@ type DisksScopedListWarningData struct { } func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarningData - raw := noMethod(*s) + type NoMethod DisksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4413,6 +6088,20 @@ type Firewall struct { // the firewall to apply. Only IPv4 is supported. SourceRanges []string `json:"sourceRanges,omitempty"` + // SourceServiceAccounts: If source service accounts are specified, the + // firewall will apply only to traffic originating from an instance with + // a service account in this list. Source service accounts cannot be + // used to control traffic to an instance's external IP address because + // service accounts are associated with an instance, not an IP address. + // sourceRanges can be set at the same time as sourceServiceAccounts. If + // both are set, the firewall will apply to traffic that has source IP + // address within sourceRanges OR the source IP belongs to an instance + // with service account listed in sourceServiceAccount. The connection + // does not need to match both properties for the firewall to apply. + // sourceServiceAccounts cannot be used at the same time as sourceTags + // or targetTags. + SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` + // SourceTags: If source tags are specified, the firewall rule applies // only to traffic with source IPs that match the primary network // interfaces of VM instances that have the tag and are in the same VPC @@ -4427,10 +6116,19 @@ type Firewall struct { // the firewall to apply. SourceTags []string `json:"sourceTags,omitempty"` - // TargetTags: A list of instance tags indicating sets of instances - // located in the network that may make network connections as specified - // in allowed[]. If no targetTags are specified, the firewall rule + // TargetServiceAccounts: A list of service accounts indicating sets of + // instances located in the network that may make network connections as + // specified in allowed[]. targetServiceAccounts cannot be used at the + // same time as targetTags or sourceTags. If neither + // targetServiceAccounts nor targetTags are specified, the firewall rule // applies to all instances on the specified network. + TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` + + // TargetTags: A list of tags that controls which instances the firewall + // rule applies to. If targetTags are specified, then the firewall rule + // applies only to instances in the VPC network that have one of those + // tags. If no targetTags are specified, the firewall rule applies to + // all instances on the specified network. TargetTags []string `json:"targetTags,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4455,8 +6153,8 @@ type Firewall struct { } func (s *Firewall) MarshalJSON() ([]byte, error) { - type noMethod Firewall - raw := noMethod(*s) + type NoMethod Firewall + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4493,8 +6191,8 @@ type FirewallAllowed struct { } func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { - type noMethod FirewallAllowed - raw := noMethod(*s) + type NoMethod FirewallAllowed + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4531,8 +6229,8 @@ type FirewallDenied struct { } func (s *FirewallDenied) MarshalJSON() ([]byte, error) { - type noMethod FirewallDenied - raw := noMethod(*s) + type NoMethod FirewallDenied + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4560,6 +6258,9 @@ type FirewallList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *FirewallListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4582,31 +6283,152 @@ type FirewallList struct { } func (s *FirewallList) MarshalJSON() ([]byte, error) { - type noMethod FirewallList - raw := noMethod(*s) + type NoMethod FirewallList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FirewallListWarning: [Output Only] Informational warning message. +type FirewallListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*FirewallListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { + type NoMethod FirewallListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod FirewallListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource // specifies which pool of target virtual machines to forward a packet -// to if it matches the given [IPAddress, IPProtocol, ports] tuple. +// to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== +// resource_for beta.forwardingRules ==) (== resource_for +// v1.forwardingRules ==) (== resource_for beta.globalForwardingRules +// ==) (== resource_for v1.globalForwardingRules ==) (== resource_for +// beta.regionForwardingRules ==) (== resource_for +// v1.regionForwardingRules ==) type ForwardingRule struct { // IPAddress: The IP address that this forwarding rule is serving on // behalf of. // - // For global forwarding rules, the address must be a global IP. For - // regional forwarding rules, the address must live in the same region - // as the forwarding rule. By default, this field is empty and an - // ephemeral IPv4 address from the same scope (global or regional) will - // be assigned. A regional forwarding rule supports IPv4 only. A global - // forwarding rule supports either IPv4 or IPv6. + // Addresses are restricted based on the forwarding rule's load + // balancing scheme (EXTERNAL or INTERNAL) and scope (global or + // regional). + // + // When the load balancing scheme is EXTERNAL, for global forwarding + // rules, the address must be a global IP, and for regional forwarding + // rules, the address must live in the same region as the forwarding + // rule. If this field is empty, an ephemeral IPv4 address from the same + // scope (global or regional) will be assigned. A regional forwarding + // rule supports IPv4 only. A global forwarding rule supports either + // IPv4 or IPv6. // // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnetwork configured for - // the forwarding rule. A reserved address cannot be used. If the field - // is empty, the IP address will be automatically allocated from the - // internal IP range of the subnetwork or network configured for this - // forwarding rule. + // 1918 IP address belonging to the network/subnet configured for the + // forwarding rule. By default, if this field is empty, an ephemeral + // internal IP address will be automatically allocated from the IP range + // of the subnet or network configured for this forwarding rule. + // + // An address can be specified either by a literal IP address or a URL + // reference to an existing Address resource. The following examples are + // all valid: + // - 100.1.2.3 + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + // - projects/project/regions/region/addresses/address + // - regions/region/addresses/address + // - global/addresses/address + // - address IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options @@ -4768,8 +6590,8 @@ type ForwardingRule struct { } func (s *ForwardingRule) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRule - raw := noMethod(*s) + type NoMethod ForwardingRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4796,6 +6618,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4818,8 +6643,110 @@ type ForwardingRuleAggregatedList struct { } func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedList - raw := noMethod(*s) + type NoMethod ForwardingRuleAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ForwardingRuleAggregatedListWarning: [Output Only] Informational +// warning message. +type ForwardingRuleAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4846,6 +6773,9 @@ type ForwardingRuleList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4868,8 +6798,110 @@ type ForwardingRuleList struct { } func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleList - raw := noMethod(*s) + type NoMethod ForwardingRuleList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ForwardingRuleListWarning: [Output Only] Informational warning +// message. +type ForwardingRuleListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4900,8 +6932,8 @@ type ForwardingRulesScopedList struct { } func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedList - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4915,9 +6947,13 @@ type ForwardingRulesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4928,7 +6964,9 @@ type ForwardingRulesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4959,8 +6997,8 @@ type ForwardingRulesScopedListWarning struct { } func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarning - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4996,8 +7034,8 @@ type ForwardingRulesScopedListWarningData struct { } func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarningData - raw := noMethod(*s) + type NoMethod ForwardingRulesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5039,18 +7077,15 @@ type GlobalSetLabelsRequest struct { } func (s *GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod GlobalSetLabelsRequest - raw := noMethod(*s) + type NoMethod GlobalSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GuestOsFeature: Guest OS features. type GuestOsFeature struct { - // Type: The type of supported feature. Currently only - // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the - // server might also populate this property with the value WINDOWS to - // indicate that this is a Windows image. This value is purely - // informational and does not enable or disable any features. + // Type: The ID of a supported feature. Read Enabling guest operating + // system features to see a list of available options. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -5076,8 +7111,8 @@ type GuestOsFeature struct { } func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { - type noMethod GuestOsFeature - raw := noMethod(*s) + type NoMethod GuestOsFeature + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5126,8 +7161,8 @@ type HTTPHealthCheck struct { } func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPHealthCheck - raw := noMethod(*s) + type NoMethod HTTPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5176,8 +7211,8 @@ type HTTPSHealthCheck struct { } func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPSHealthCheck - raw := noMethod(*s) + type NoMethod HTTPSHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5274,8 +7309,8 @@ type HealthCheck struct { } func (s *HealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HealthCheck - raw := noMethod(*s) + type NoMethod HealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5302,6 +7337,9 @@ type HealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5324,8 +7362,109 @@ type HealthCheckList struct { } func (s *HealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckList - raw := noMethod(*s) + type NoMethod HealthCheckList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthCheckListWarning: [Output Only] Informational warning message. +type HealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HealthCheckListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5356,8 +7495,8 @@ type HealthCheckReference struct { } func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckReference - raw := noMethod(*s) + type NoMethod HealthCheckReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5396,8 +7535,8 @@ type HealthStatus struct { } func (s *HealthStatus) MarshalJSON() ([]byte, error) { - type noMethod HealthStatus - raw := noMethod(*s) + type NoMethod HealthStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5436,8 +7575,8 @@ type HostRule struct { } func (s *HostRule) MarshalJSON() ([]byte, error) { - type noMethod HostRule - raw := noMethod(*s) + type NoMethod HostRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5527,8 +7666,8 @@ type HttpHealthCheck struct { } func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheck - raw := noMethod(*s) + type NoMethod HttpHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5555,6 +7694,9 @@ type HttpHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5577,8 +7719,110 @@ type HttpHealthCheckList struct { } func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckList - raw := noMethod(*s) + type NoMethod HttpHealthCheckList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HttpHealthCheckListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HttpHealthCheckListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5667,8 +7911,8 @@ type HttpsHealthCheck struct { } func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheck - raw := noMethod(*s) + type NoMethod HttpsHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5695,6 +7939,9 @@ type HttpsHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpsHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5717,12 +7964,115 @@ type HttpsHealthCheckList struct { } func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckList - raw := noMethod(*s) + type NoMethod HttpsHealthCheckList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Image: An Image resource. +// HttpsHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpsHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpsHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HttpsHealthCheckListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpsHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HttpsHealthCheckListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Image: An Image resource. (== resource_for beta.images ==) (== +// resource_for v1.images ==) type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). @@ -5750,18 +8100,9 @@ type Image struct { // RFC1035. Family string `json:"family,omitempty"` - // GuestOsFeatures: A list of features to enable on the guest OS. - // Applicable for bootable images only. Currently, only one feature can - // be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to - // have its own queue. For Windows images, you can only enable - // VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or - // higher. Linux images with kernel versions 3.17 and higher will - // support VIRTIO_SCSI_MULTIQUEUE. - // - // For new Windows images, the server might also populate this field - // with the value WINDOWS, to indicate that this is a Windows image. - // This value is purely informational and does not enable or disable any - // features. + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5840,6 +8181,24 @@ type Image struct { // the current or a previous instance of a given disk name. SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: URL of the source image used to create this image. This + // can be a full or valid partial URL. You must provide exactly one of: + // + // - this property, or + // - the rawDisk.source property, or + // - the sourceDisk property in order to create an image. + SourceImage string `json:"sourceImage,omitempty"` + + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + + // SourceImageId: [Output Only] The ID value of the image used to create + // this image. This value may be used to determine whether the image was + // taken from the current or a previous instance of a given image name. + SourceImageId string `json:"sourceImageId,omitempty"` + // SourceType: The type of the image used to create this disk. The // default and only value is RAW // @@ -5881,8 +8240,8 @@ type Image struct { } func (s *Image) MarshalJSON() ([]byte, error) { - type noMethod Image - raw := noMethod(*s) + type NoMethod Image + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5924,8 +8283,8 @@ type ImageRawDisk struct { } func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { - type noMethod ImageRawDisk - raw := noMethod(*s) + type NoMethod ImageRawDisk + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5952,6 +8311,9 @@ type ImageList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ImageListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5974,12 +8336,114 @@ type ImageList struct { } func (s *ImageList) MarshalJSON() ([]byte, error) { - type noMethod ImageList - raw := noMethod(*s) + type NoMethod ImageList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Instance: An Instance resource. +// ImageListWarning: [Output Only] Informational warning message. +type ImageListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ImageListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ImageListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ImageListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ImageListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Instance: An Instance resource. (== resource_for beta.instances ==) +// (== resource_for v1.instances ==) type Instance struct { // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan @@ -5994,6 +8458,10 @@ type Instance struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DeletionProtection: Whether the resource should be protected against + // deletion. + DeletionProtection bool `json:"deletionProtection,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -6056,6 +8524,12 @@ type Instance struct { // This includes custom metadata and predefined keys. Metadata *Metadata `json:"metadata,omitempty"` + // MinCpuPlatform: Specifies a minimum CPU platform for the VM instance. + // Applicable values are the friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy + // Bridge". + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // Name: The name of the resource, provided by the client when initially // creating the resource. The resource name must be 1-63 characters // long, and comply with RFC1035. Specifically, the name must be 1-63 @@ -6142,8 +8616,8 @@ type Instance struct { } func (s *Instance) MarshalJSON() ([]byte, error) { - type noMethod Instance - raw := noMethod(*s) + type NoMethod Instance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6171,6 +8645,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6193,11 +8670,117 @@ type InstanceAggregatedList struct { } func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedList - raw := noMethod(*s) + type NoMethod InstanceAggregatedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceAggregatedListWarning: [Output Only] Informational warning +// message. +type InstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroup: InstanceGroups (== resource_for beta.instanceGroups +// ==) (== resource_for v1.instanceGroups ==) (== resource_for +// beta.regionInstanceGroups ==) (== resource_for +// v1.regionInstanceGroups ==) type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -6282,8 +8865,8 @@ type InstanceGroup struct { } func (s *InstanceGroup) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroup - raw := noMethod(*s) + type NoMethod InstanceGroup + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6311,6 +8894,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6333,8 +8919,110 @@ type InstanceGroupAggregatedList struct { } func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceGroupAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6362,6 +9050,9 @@ type InstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6384,12 +9075,118 @@ type InstanceGroupList struct { } func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupList - raw := noMethod(*s) + type NoMethod InstanceGroupList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroupManager: An Instance Group Manager resource. +// InstanceGroupListWarning: [Output Only] Informational warning +// message. +type InstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupManager: An Instance Group Manager resource. (== +// resource_for beta.instanceGroupManagers ==) (== resource_for +// v1.instanceGroupManagers ==) (== resource_for +// beta.regionInstanceGroupManagers ==) (== resource_for +// v1.regionInstanceGroupManagers ==) type InstanceGroupManager struct { // BaseInstanceName: The base instance name to use for instances in this // group. The value must be 1-58 characters long. Instances are named by @@ -6484,8 +9281,8 @@ type InstanceGroupManager struct { } func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManager - raw := noMethod(*s) + type NoMethod InstanceGroupManager + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6558,8 +9355,8 @@ type InstanceGroupManagerActionsSummary struct { } func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerActionsSummary - raw := noMethod(*s) + type NoMethod InstanceGroupManagerActionsSummary + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6587,6 +9384,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6609,8 +9409,110 @@ type InstanceGroupManagerAggregatedList struct { } func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupManagerAggregatedListWarning: [Output Only] +// Informational warning message. +type InstanceGroupManagerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6640,6 +9542,9 @@ type InstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6662,8 +9567,110 @@ type InstanceGroupManagerList struct { } func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerList - raw := noMethod(*s) + type NoMethod InstanceGroupManagerList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupManagerListWarning: [Output Only] Informational warning +// message. +type InstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6691,8 +9698,8 @@ type InstanceGroupManagersAbandonInstancesRequest struct { } func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6720,8 +9727,8 @@ type InstanceGroupManagersDeleteInstancesRequest struct { } func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6753,8 +9760,8 @@ type InstanceGroupManagersListManagedInstancesResponse struct { } func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListManagedInstancesResponse - raw := noMethod(*s) + type NoMethod InstanceGroupManagersListManagedInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6782,8 +9789,8 @@ type InstanceGroupManagersRecreateInstancesRequest struct { } func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersRecreateInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersRecreateInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6816,8 +9823,8 @@ type InstanceGroupManagersScopedList struct { } func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6832,9 +9839,13 @@ type InstanceGroupManagersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6845,7 +9856,9 @@ type InstanceGroupManagersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -6876,8 +9889,8 @@ type InstanceGroupManagersScopedListWarning struct { } func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6913,8 +9926,8 @@ type InstanceGroupManagersScopedListWarningData struct { } func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupManagersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6943,8 +9956,8 @@ type InstanceGroupManagersSetInstanceTemplateRequest struct { } func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetInstanceTemplateRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetInstanceTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -6982,8 +9995,8 @@ type InstanceGroupManagersSetTargetPoolsRequest struct { } func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7009,8 +10022,8 @@ type InstanceGroupsAddInstancesRequest struct { } func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsAddInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsAddInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7038,6 +10051,9 @@ type InstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7060,8 +10076,110 @@ type InstanceGroupsListInstances struct { } func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstances + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type InstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupsListInstancesWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7094,8 +10212,8 @@ type InstanceGroupsListInstancesRequest struct { } func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7121,8 +10239,8 @@ type InstanceGroupsRemoveInstancesRequest struct { } func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsRemoveInstancesRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsRemoveInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7154,8 +10272,8 @@ type InstanceGroupsScopedList struct { } func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedList - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7170,9 +10288,13 @@ type InstanceGroupsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7183,7 +10305,9 @@ type InstanceGroupsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7214,8 +10338,8 @@ type InstanceGroupsScopedListWarning struct { } func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarning - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7251,8 +10375,8 @@ type InstanceGroupsScopedListWarningData struct { } func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarningData - raw := noMethod(*s) + type NoMethod InstanceGroupsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7286,8 +10410,8 @@ type InstanceGroupsSetNamedPortsRequest struct { } func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod InstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7315,6 +10439,9 @@ type InstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7337,8 +10464,109 @@ type InstanceList struct { } func (s *InstanceList) MarshalJSON() ([]byte, error) { - type noMethod InstanceList - raw := noMethod(*s) + type NoMethod InstanceList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceListWarning: [Output Only] Informational warning message. +type InstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7380,8 +10608,8 @@ type InstanceMoveRequest struct { } func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceMoveRequest - raw := noMethod(*s) + type NoMethod InstanceMoveRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7421,6 +10649,14 @@ type InstanceProperties struct { // more information. Metadata *Metadata `json:"metadata,omitempty"` + // MinCpuPlatform: Minimum cpu/platform to be used by this instance. The + // instance may be scheduled on the specified or newer cpu/platform. + // Applicable values are the friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy + // Bridge". For more information, read Specifying a Minimum CPU + // Platform. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // NetworkInterfaces: An array of network access configurations for this // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` @@ -7459,8 +10695,8 @@ type InstanceProperties struct { } func (s *InstanceProperties) MarshalJSON() ([]byte, error) { - type noMethod InstanceProperties - raw := noMethod(*s) + type NoMethod InstanceProperties + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7486,12 +10722,13 @@ type InstanceReference struct { } func (s *InstanceReference) MarshalJSON() ([]byte, error) { - type noMethod InstanceReference - raw := noMethod(*s) + type NoMethod InstanceReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplate: An Instance Template resource. +// InstanceTemplate: An Instance Template resource. (== resource_for +// beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==) type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -7548,8 +10785,8 @@ type InstanceTemplate struct { } func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplate - raw := noMethod(*s) + type NoMethod InstanceTemplate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7577,6 +10814,9 @@ type InstanceTemplateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7599,8 +10839,110 @@ type InstanceTemplateList struct { } func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateList - raw := noMethod(*s) + type NoMethod InstanceTemplateList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplateListWarning: [Output Only] Informational warning +// message. +type InstanceTemplateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceTemplateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7643,8 +10985,8 @@ type InstanceWithNamedPorts struct { } func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { - type noMethod InstanceWithNamedPorts - raw := noMethod(*s) + type NoMethod InstanceWithNamedPorts + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7674,8 +11016,8 @@ type InstancesScopedList struct { } func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedList - raw := noMethod(*s) + type NoMethod InstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7689,9 +11031,13 @@ type InstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7702,7 +11048,9 @@ type InstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7733,8 +11081,8 @@ type InstancesScopedListWarning struct { } func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarning - raw := noMethod(*s) + type NoMethod InstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7770,8 +11118,8 @@ type InstancesScopedListWarningData struct { } func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod InstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7802,8 +11150,8 @@ type InstancesSetLabelsRequest struct { } func (s *InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetLabelsRequest - raw := noMethod(*s) + type NoMethod InstancesSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7831,8 +11179,8 @@ type InstancesSetMachineResourcesRequest struct { } func (s *InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineResourcesRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineResourcesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7860,8 +11208,37 @@ type InstancesSetMachineTypeRequest struct { } func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineTypeRequest - raw := noMethod(*s) + type NoMethod InstancesSetMachineTypeRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesSetMinCpuPlatformRequest struct { + // MinCpuPlatform: Minimum cpu/platform this instance should be started + // at. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MinCpuPlatform") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MinCpuPlatform") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesSetMinCpuPlatformRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7891,8 +11268,8 @@ type InstancesSetServiceAccountRequest struct { } func (s *InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetServiceAccountRequest - raw := noMethod(*s) + type NoMethod InstancesSetServiceAccountRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7925,8 +11302,1303 @@ type InstancesStartWithEncryptionKeyRequest struct { } func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesStartWithEncryptionKeyRequest - raw := noMethod(*s) + type NoMethod InstancesStartWithEncryptionKeyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Interconnect: Represents an Interconnects resource. The Interconnects +// resource is a dedicated connection between Google's network and your +// on-premises network. For more information, see the Dedicated +// overview page. (== resource_for v1.interconnects ==) (== resource_for +// beta.interconnects ==) +type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. + // When set to false, no packets can be carried over the interconnect + // and no BGP routes are exchanged over it. By default, the status is + // set to true. + AdminEnabled bool `json:"adminEnabled,omitempty"` + + // CircuitInfos: [Output Only] List of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomerName: Customer name, to put in the Letter of Authorization as + // the party authorized to request a crossconnect. + CustomerName string `json:"customerName,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ExpectedOutages: [Output Only] List of outages expected for this + // Interconnect. + ExpectedOutages []*InterconnectOutageNotification `json:"expectedOutages,omitempty"` + + // GoogleIpAddress: [Output Only] IP address configured on the Google + // side of the Interconnect link. This can be used only for ping tests. + GoogleIpAddress string `json:"googleIpAddress,omitempty"` + + // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InterconnectAttachments: [Output Only] A list of the URLs of all + // InterconnectAttachments configured to use this Interconnect. + InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` + + // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has + // been deprecated in favor of "DEDICATED" + // + // Possible values: + // "DEDICATED" + // "IT_PRIVATE" + InterconnectType string `json:"interconnectType,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#interconnect + // for interconnects. + Kind string `json:"kind,omitempty"` + + // LinkType: Type of link requested. This field indicates speed of each + // of the links in the bundle, not the entire bundle. Only 10G per link + // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // + // Possible values: + // "LINK_TYPE_ETHERNET_10G_LR" + LinkType string `json:"linkType,omitempty"` + + // Location: URL of the InterconnectLocation object that represents + // where this connection is to be provisioned. + Location string `json:"location,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NocContactEmail: Email address to contact the customer NOC for + // operations and maintenance notifications regarding this Interconnect. + // If specified, this will be used for notifications in addition to all + // other forms described, such as Stackdriver logs alerting and Cloud + // Notifications. + NocContactEmail string `json:"nocContactEmail,omitempty"` + + // OperationalStatus: [Output Only] The current status of whether or not + // this Interconnect is functional. + // + // Possible values: + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` + + // PeerIpAddress: [Output Only] IP address configured on the customer + // side of the Interconnect link. The customer should configure this IP + // address during turnup when prompted by Google NOC. This can be used + // only for ping tests. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // ProvisionedLinkCount: [Output Only] Number of links actually + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdminEnabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdminEnabled") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Interconnect) MarshalJSON() ([]byte, error) { + type NoMethod Interconnect + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachment: Represents an InterconnectAttachment (VLAN +// attachment) resource. For more information, see Creating VLAN +// Attachments. (== resource_for beta.interconnectAttachments ==) (== +// resource_for v1.interconnectAttachments ==) +type InterconnectAttachment struct { + // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to + // be configured on Cloud Router Interface for this interconnect + // attachment. + CloudRouterIpAddress string `json:"cloudRouterIpAddress,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomerRouterIpAddress: [Output Only] IPv4 address + prefix length + // to be configured on the customer router subinterface for this + // interconnect attachment. + CustomerRouterIpAddress string `json:"customerRouterIpAddress,omitempty"` + + // Description: An optional description of this resource. + Description string `json:"description,omitempty"` + + // GoogleReferenceId: [Output Only] Google reference ID, to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Interconnect: URL of the underlying Interconnect object that this + // attachment's traffic will traverse through. + Interconnect string `json:"interconnect,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectAttachment for interconnect attachments. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // OperationalStatus: [Output Only] The current status of whether or not + // this interconnect attachment is functional. + // + // Possible values: + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` + + // PrivateInterconnectInfo: [Output Only] Information specific to an + // InterconnectAttachment. This property is populated if the + // interconnect that this is attached to is of type DEDICATED. + PrivateInterconnectInfo *InterconnectAttachmentPrivateInfo `json:"privateInterconnectInfo,omitempty"` + + // Region: [Output Only] URL of the region where the regional + // interconnect attachment resides. + Region string `json:"region,omitempty"` + + // Router: URL of the cloud router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to + // the network & region within which the Cloud Router is configured. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CloudRouterIpAddress") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CloudRouterIpAddress") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectAttachmentsScopedList resources. + Items map[string]InterconnectAttachmentsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentAggregatedList for aggregated lists of + // interconnect attachments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentAggregatedListWarning: [Output Only] +// Informational warning message. +type InterconnectAttachmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentList: Response to the list request, and +// contains a list of interconnect attachments. +type InterconnectAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectAttachment resources. + Items []*InterconnectAttachment `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentList for lists of interconnect + // attachments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPrivateInfo: Information for an interconnect +// attachment when this belongs to an interconnect of type DEDICATED. +type InterconnectAttachmentPrivateInfo struct { + // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for + // traffic between Google and the customer, going to and from this + // network and region. + Tag8021q int64 `json:"tag8021q,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Tag8021q") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPrivateInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedList struct { + // InterconnectAttachments: List of interconnect attachments contained + // in this scope. + InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectAttachments") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectAttachments") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentsScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type InterconnectAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectCircuitInfo: Describes a single physical circuit between +// the Customer and Google. CircuitInfo objects are created by Google, +// so all fields are output only. Next id: 4 +type InterconnectCircuitInfo struct { + // CustomerDemarcId: Customer-side demarc ID for this circuit. + CustomerDemarcId string `json:"customerDemarcId,omitempty"` + + // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned + // at circuit turn-up. + GoogleCircuitId string `json:"googleCircuitId,omitempty"` + + // GoogleDemarcId: Google-side demarc ID for this circuit. Assigned at + // circuit turn-up and provided by Google to the customer in the LOA. + GoogleDemarcId string `json:"googleDemarcId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomerDemarcId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomerDemarcId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectCircuitInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectList: Response to the list request, and contains a list +// of interconnects. +type InterconnectList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Interconnect resources. + Items []*Interconnect `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#interconnectList + // for lists of interconnects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectListWarning: [Output Only] Informational warning message. +type InterconnectListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocation: Represents an InterconnectLocations resource. +// The InterconnectLocations resource describes the locations where you +// can connect to Google's networks. For more information, see +// Colocation Facilities. +type InterconnectLocation struct { + // Address: [Output Only] The postal address of the Point of Presence, + // each line in the address is separated by a newline character. + Address string `json:"address,omitempty"` + + // AvailabilityZone: [Output Only] Availability zone for this location. + // Within a metropolitan area (metro), maintenance will not be + // simultaneously scheduled in more than one availability zone. Example: + // "zone1" or "zone2". + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". + City string `json:"city,omitempty"` + + // Continent: [Output Only] Continent for this location. + // + // Possible values: + // "AFRICA" + // "ASIA_PAC" + // "C_AFRICA" + // "C_ASIA_PAC" + // "C_EUROPE" + // "C_NORTH_AMERICA" + // "C_SOUTH_AMERICA" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" + Continent string `json:"continent,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] An optional description of the resource. + Description string `json:"description,omitempty"` + + // FacilityProvider: [Output Only] The name of the provider for this + // facility (e.g., EQUINIX). + FacilityProvider string `json:"facilityProvider,omitempty"` + + // FacilityProviderFacilityId: [Output Only] A provider-assigned + // Identifier for this facility (e.g., Ashburn-DC1). + FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectLocation for interconnect locations. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this + // facility (corresponding with a netfac type in peeringdb). + PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` + + // RegionInfos: [Output Only] A list of InterconnectLocation.RegionInfo + // objects, that describe parameters pertaining to the relation between + // this InterconnectLocation and various Google Cloud regions. + RegionInfos []*InterconnectLocationRegionInfo `json:"regionInfos,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationList: Response to the list request, and contains +// a list of interconnect locations. +type InterconnectLocationList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectLocation resources. + Items []*InterconnectLocation `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectLocationList for lists of interconnect locations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectLocationListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectLocationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationListWarning: [Output Only] Informational warning +// message. +type InterconnectLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectLocationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectLocationListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectLocationListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationRegionInfo: Information about any potential +// InterconnectAttachments between an Interconnect at a specific +// InterconnectLocation, and a specific Cloud Region. +type InterconnectLocationRegionInfo struct { + // ExpectedRttMs: Expected round-trip time in milliseconds, from this + // InterconnectLocation to a VM in this region. + ExpectedRttMs int64 `json:"expectedRttMs,omitempty,string"` + + // LocationPresence: Identifies the network presence of this location. + // + // Possible values: + // "GLOBAL" + // "LOCAL_REGION" + // "LP_GLOBAL" + // "LP_LOCAL_REGION" + LocationPresence string `json:"locationPresence,omitempty"` + + // Region: URL for the region of this location. + Region string `json:"region,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpectedRttMs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpectedRttMs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectLocationRegionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectOutageNotification: Description of a planned outage on +// this Interconnect. Next id: 9 +type InterconnectOutageNotification struct { + // AffectedCircuits: Iff issue_type is IT_PARTIAL_OUTAGE, a list of the + // Google-side circuit IDs that will be affected. + AffectedCircuits []string `json:"affectedCircuits,omitempty"` + + // Description: A description about the purpose of the outage. + Description string `json:"description,omitempty"` + + // EndTime: Scheduled end time for the outage (milliseconds since Unix + // epoch). + EndTime int64 `json:"endTime,omitempty,string"` + + // IssueType: Form this outage is expected to take. Note that the "IT_" + // versions of this enum have been deprecated in favor of the unprefixed + // values. + // + // Possible values: + // "IT_OUTAGE" + // "IT_PARTIAL_OUTAGE" + // "OUTAGE" + // "PARTIAL_OUTAGE" + IssueType string `json:"issueType,omitempty"` + + // Name: Unique identifier for this outage notification. + Name string `json:"name,omitempty"` + + // Source: The party that generated this notification. Note that + // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // + // Possible values: + // "GOOGLE" + // "NSRC_GOOGLE" + Source string `json:"source,omitempty"` + + // StartTime: Scheduled start time for the outage (milliseconds since + // Unix epoch). + StartTime int64 `json:"startTime,omitempty,string"` + + // State: State of this notification. Note that the "NS_" versions of + // this enum have been deprecated in favor of the unprefixed values. + // + // Possible values: + // "ACTIVE" + // "CANCELLED" + // "NS_ACTIVE" + // "NS_CANCELED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AffectedCircuits") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AffectedCircuits") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectOutageNotification + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -7969,12 +12641,13 @@ type License struct { } func (s *License) MarshalJSON() ([]byte, error) { - type noMethod License - raw := noMethod(*s) + type NoMethod License + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineType: A Machine Type resource. +// MachineType: A Machine Type resource. (== resource_for +// v1.machineTypes ==) (== resource_for beta.machineTypes ==) type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -8057,8 +12730,8 @@ type MachineType struct { } func (s *MachineType) MarshalJSON() ([]byte, error) { - type noMethod MachineType - raw := noMethod(*s) + type NoMethod MachineType + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8084,8 +12757,8 @@ type MachineTypeScratchDisks struct { } func (s *MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeScratchDisks - raw := noMethod(*s) + type NoMethod MachineTypeScratchDisks + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8113,6 +12786,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8135,8 +12811,110 @@ type MachineTypeAggregatedList struct { } func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedList - raw := noMethod(*s) + type NoMethod MachineTypeAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MachineTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type MachineTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod MachineTypeAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod MachineTypeAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8164,6 +12942,9 @@ type MachineTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8186,8 +12967,109 @@ type MachineTypeList struct { } func (s *MachineTypeList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeList - raw := noMethod(*s) + type NoMethod MachineTypeList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MachineTypeListWarning: [Output Only] Informational warning message. +type MachineTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod MachineTypeListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod MachineTypeListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8218,8 +13100,8 @@ type MachineTypesScopedList struct { } func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedList - raw := noMethod(*s) + type NoMethod MachineTypesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8233,9 +13115,13 @@ type MachineTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8246,7 +13132,9 @@ type MachineTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8277,8 +13165,8 @@ type MachineTypesScopedListWarning struct { } func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarning - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8314,8 +13202,8 @@ type MachineTypesScopedListWarningData struct { } func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarningData - raw := noMethod(*s) + type NoMethod MachineTypesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8400,8 +13288,8 @@ type ManagedInstance struct { } func (s *ManagedInstance) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstance - raw := noMethod(*s) + type NoMethod ManagedInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8428,8 +13316,8 @@ type ManagedInstanceLastAttempt struct { } func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttempt - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttempt + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8458,8 +13346,8 @@ type ManagedInstanceLastAttemptErrors struct { } func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8492,8 +13380,8 @@ type ManagedInstanceLastAttemptErrorsErrors struct { } func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrorsErrors - raw := noMethod(*s) + type NoMethod ManagedInstanceLastAttemptErrorsErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8533,8 +13421,8 @@ type Metadata struct { } func (s *Metadata) MarshalJSON() ([]byte, error) { - type noMethod Metadata - raw := noMethod(*s) + type NoMethod Metadata + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8570,8 +13458,8 @@ type MetadataItems struct { } func (s *MetadataItems) MarshalJSON() ([]byte, error) { - type noMethod MetadataItems - raw := noMethod(*s) + type NoMethod MetadataItems + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8602,13 +13490,14 @@ type NamedPort struct { } func (s *NamedPort) MarshalJSON() ([]byte, error) { - type noMethod NamedPort - raw := noMethod(*s) + type NoMethod NamedPort + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Network: Represents a Network resource. Read Networks and Firewalls -// for more information. +// for more information. (== resource_for v1.networks ==) (== +// resource_for beta.networks ==) type Network struct { // IPv4Range: The range of internal addresses that are legal on this // network. This range is a CIDR specification, for example: @@ -8657,6 +13546,11 @@ type Network struct { // Peerings: [Output Only] List of network peerings for the resource. Peerings []*NetworkPeering `json:"peerings,omitempty"` + // RoutingConfig: The network-level routing configuration for this + // network. Used by Cloud Router to determine what type of network-wide + // routing behavior to enforce. + RoutingConfig *NetworkRoutingConfig `json:"routingConfig,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -8686,8 +13580,8 @@ type Network struct { } func (s *Network) MarshalJSON() ([]byte, error) { - type noMethod Network - raw := noMethod(*s) + type NoMethod Network + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8766,8 +13660,8 @@ type NetworkInterface struct { } func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) + type NoMethod NetworkInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8795,6 +13689,9 @@ type NetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8817,8 +13714,109 @@ type NetworkList struct { } func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList - raw := noMethod(*s) + type NoMethod NetworkList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8879,8 +13877,47 @@ type NetworkPeering struct { } func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering - raw := noMethod(*s) + type NoMethod NetworkPeering + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's cloud routers will only advertise routes + // with subnetworks of this network in the same region as the router. If + // set to GLOBAL, this network's cloud routers will advertise routes + // with all subnetworks of this network, across regions. + // + // Possible values: + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RoutingMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkRoutingConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8917,8 +13954,8 @@ type NetworksAddPeeringRequest struct { } func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest - raw := noMethod(*s) + type NoMethod NetworksAddPeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -8944,13 +13981,16 @@ type NetworksRemovePeeringRequest struct { } func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest - raw := noMethod(*s) + type NoMethod NetworksRemovePeeringRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: An Operation resource, used to manage asynchronous API -// requests. +// requests. (== resource_for v1.globalOperations ==) (== resource_for +// beta.globalOperations ==) (== resource_for v1.regionOperations ==) +// (== resource_for beta.regionOperations ==) (== resource_for +// v1.zoneOperations ==) (== resource_for beta.zoneOperations ==) type Operation struct { // ClientOperationId: [Output Only] Reserved for future use. ClientOperationId string `json:"clientOperationId,omitempty"` @@ -9073,8 +14113,8 @@ type Operation struct { } func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation - raw := noMethod(*s) + type NoMethod Operation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9103,8 +14143,8 @@ type OperationError struct { } func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError - raw := noMethod(*s) + type NoMethod OperationError + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9137,8 +14177,8 @@ type OperationErrorErrors struct { } func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors - raw := noMethod(*s) + type NoMethod OperationErrorErrors + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9150,9 +14190,13 @@ type OperationWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9163,7 +14207,9 @@ type OperationWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9194,8 +14240,8 @@ type OperationWarnings struct { } func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings - raw := noMethod(*s) + type NoMethod OperationWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9231,8 +14277,8 @@ type OperationWarningsData struct { } func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData - raw := noMethod(*s) + type NoMethod OperationWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9259,6 +14305,9 @@ type OperationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9281,8 +14330,110 @@ type OperationAggregatedList struct { } func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList - raw := noMethod(*s) + type NoMethod OperationAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationAggregatedListWarning: [Output Only] Informational warning +// message. +type OperationAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9310,6 +14461,9 @@ type OperationList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9332,8 +14486,109 @@ type OperationList struct { } func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) + type NoMethod OperationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9363,8 +14618,8 @@ type OperationsScopedList struct { } func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList - raw := noMethod(*s) + type NoMethod OperationsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9378,9 +14633,13 @@ type OperationsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9391,7 +14650,9 @@ type OperationsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9422,8 +14683,8 @@ type OperationsScopedListWarning struct { } func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning - raw := noMethod(*s) + type NoMethod OperationsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9459,8 +14720,8 @@ type OperationsScopedListWarningData struct { } func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData - raw := noMethod(*s) + type NoMethod OperationsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9508,8 +14769,8 @@ type PathMatcher struct { } func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher - raw := noMethod(*s) + type NoMethod PathMatcher + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9544,14 +14805,15 @@ type PathRule struct { } func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule - raw := noMethod(*s) + type NoMethod PathRule + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A Project resource. Projects can only be created in the // Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. +// only be modified in the console. (== resource_for v1.projects ==) (== +// resource_for beta.projects ==) type Project struct { // CommonInstanceMetadata: Metadata key/value pairs available to all // instances contained in this project. See Custom metadata for more @@ -9627,8 +14889,8 @@ type Project struct { } func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) + type NoMethod Project + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9654,8 +14916,8 @@ type ProjectsDisableXpnResourceRequest struct { } func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsDisableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9681,8 +14943,8 @@ type ProjectsEnableXpnResourceRequest struct { } func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest - raw := noMethod(*s) + type NoMethod ProjectsEnableXpnResourceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9726,8 +14988,8 @@ type ProjectsGetXpnResources struct { } func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources - raw := noMethod(*s) + type NoMethod ProjectsGetXpnResources + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9755,8 +15017,8 @@ type ProjectsListXpnHostsRequest struct { } func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest - raw := noMethod(*s) + type NoMethod ProjectsListXpnHostsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9783,6 +15045,8 @@ type Quota struct { // "INSTANCE_GROUPS" // "INSTANCE_GROUP_MANAGERS" // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "INTERNAL_ADDRESSES" // "IN_USE_ADDRESSES" // "LOCAL_SSD_TOTAL_GB" // "NETWORKS" @@ -9790,6 +15054,8 @@ type Quota struct { // "NVIDIA_P100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "REGIONAL_AUTOSCALERS" // "REGIONAL_INSTANCE_GROUP_MANAGERS" // "ROUTERS" @@ -9833,19 +15099,19 @@ type Quota struct { } func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) + type NoMethod Quota + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota + type NoMethod Quota var s1 struct { Limit gensupport.JSONFloat64 `json:"limit"` Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -9854,7 +15120,8 @@ func (s *Quota) UnmarshalJSON(data []byte) error { return nil } -// Region: Region resource. +// Region: Region resource. (== resource_for beta.regions ==) (== +// resource_for v1.regions ==) type Region struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -9918,8 +15185,8 @@ type Region struct { } func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region - raw := noMethod(*s) + type NoMethod Region + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9946,6 +15213,9 @@ type RegionAutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9968,8 +15238,110 @@ type RegionAutoscalerList struct { } func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList - raw := noMethod(*s) + type NoMethod RegionAutoscalerList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionAutoscalerListWarning: [Output Only] Informational warning +// message. +type RegionAutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionAutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -9996,6 +15368,9 @@ type RegionInstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10018,8 +15393,110 @@ type RegionInstanceGroupList struct { } func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10049,6 +15526,9 @@ type RegionInstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10071,8 +15551,110 @@ type RegionInstanceGroupManagerList struct { } func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagerList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10100,8 +15682,8 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { } func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10129,8 +15711,8 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { } func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10161,8 +15743,8 @@ type RegionInstanceGroupManagersListInstancesResponse struct { } func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersListInstancesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10190,8 +15772,8 @@ type RegionInstanceGroupManagersRecreateRequest struct { } func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersRecreateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10224,8 +15806,8 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { } func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10253,8 +15835,8 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { } func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupManagersSetTemplateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10280,6 +15862,9 @@ type RegionInstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10302,8 +15887,110 @@ type RegionInstanceGroupsListInstances struct { } func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstances + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10340,8 +16027,8 @@ type RegionInstanceGroupsListInstancesRequest struct { } func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsListInstancesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10375,8 +16062,8 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { } func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) + type NoMethod RegionInstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10404,6 +16091,9 @@ type RegionList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10426,8 +16116,109 @@ type RegionList struct { } func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList - raw := noMethod(*s) + type NoMethod RegionList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10467,8 +16258,8 @@ type ResourceCommitment struct { } func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment - raw := noMethod(*s) + type NoMethod ResourceCommitment + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10495,8 +16286,8 @@ type ResourceGroupReference struct { } func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference - raw := noMethod(*s) + type NoMethod ResourceGroupReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10517,7 +16308,8 @@ func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { // Compute Engine-operated gateway. // // Packets that do not match any route in the sending instance's routing -// table are dropped. +// table are dropped. (== resource_for beta.routes ==) (== resource_for +// v1.routes ==) type Route struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -10622,8 +16414,8 @@ type Route struct { } func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route - raw := noMethod(*s) + type NoMethod Route + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10635,9 +16427,13 @@ type RouteWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10648,7 +16444,9 @@ type RouteWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10679,8 +16477,8 @@ type RouteWarnings struct { } func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings - raw := noMethod(*s) + type NoMethod RouteWarnings + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10716,8 +16514,8 @@ type RouteWarningsData struct { } func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData - raw := noMethod(*s) + type NoMethod RouteWarningsData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10744,6 +16542,9 @@ type RouteList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10766,8 +16567,109 @@ type RouteList struct { } func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList - raw := noMethod(*s) + type NoMethod RouteList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouteListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10842,8 +16744,8 @@ type Router struct { } func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router - raw := noMethod(*s) + type NoMethod Router + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10870,6 +16772,9 @@ type RouterAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10892,8 +16797,110 @@ type RouterAggregatedList struct { } func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList - raw := noMethod(*s) + type NoMethod RouterAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouterAggregatedListWarning: [Output Only] Informational warning +// message. +type RouterAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouterAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10922,8 +16929,8 @@ type RouterBgp struct { } func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp - raw := noMethod(*s) + type NoMethod RouterBgp + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10972,8 +16979,8 @@ type RouterBgpPeer struct { } func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer - raw := noMethod(*s) + type NoMethod RouterBgpPeer + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -10985,6 +16992,12 @@ type RouterInterface struct { // interface. IpRange string `json:"ipRange,omitempty"` + // LinkedInterconnectAttachment: URI of the linked interconnect + // attachment. It must be in the same region as the router. Each + // interface can have at most one linked resource and it could either be + // a VPN Tunnel or an interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` + // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same // region as the router. Each interface can have at most one linked // resource and it could either be a VPN Tunnel or an interconnect @@ -11013,8 +17026,8 @@ type RouterInterface struct { } func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface - raw := noMethod(*s) + type NoMethod RouterInterface + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11042,6 +17055,9 @@ type RouterList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11064,8 +17080,109 @@ type RouterList struct { } func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList - raw := noMethod(*s) + type NoMethod RouterList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouterListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11099,8 +17216,8 @@ type RouterStatus struct { } func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus - raw := noMethod(*s) + type NoMethod RouterStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11160,8 +17277,8 @@ type RouterStatusBgpPeerStatus struct { } func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus - raw := noMethod(*s) + type NoMethod RouterStatusBgpPeerStatus + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11193,8 +17310,8 @@ type RouterStatusResponse struct { } func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse - raw := noMethod(*s) + type NoMethod RouterStatusResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11224,8 +17341,8 @@ type RoutersPreviewResponse struct { } func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse - raw := noMethod(*s) + type NoMethod RoutersPreviewResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11255,8 +17372,8 @@ type RoutersScopedList struct { } func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList - raw := noMethod(*s) + type NoMethod RoutersScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11270,9 +17387,13 @@ type RoutersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11283,7 +17404,9 @@ type RoutersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11314,8 +17437,8 @@ type RoutersScopedListWarning struct { } func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning - raw := noMethod(*s) + type NoMethod RoutersScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11351,8 +17474,8 @@ type RoutersScopedListWarningData struct { } func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData - raw := noMethod(*s) + type NoMethod RoutersScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11403,8 +17526,8 @@ type SSLHealthCheck struct { } func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck - raw := noMethod(*s) + type NoMethod SSLHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11455,8 +17578,8 @@ type Scheduling struct { } func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling - raw := noMethod(*s) + type NoMethod Scheduling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11506,8 +17629,8 @@ type SerialPortOutput struct { } func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput - raw := noMethod(*s) + type NoMethod SerialPortOutput + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11538,12 +17661,13 @@ type ServiceAccount struct { } func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount - raw := noMethod(*s) + type NoMethod ServiceAccount + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. +// Snapshot: A persistent disk snapshot resource. (== resource_for +// beta.snapshots ==) (== resource_for v1.snapshots ==) type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -11638,7 +17762,7 @@ type Snapshot struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageBytes: [Output Only] A size of the the storage used by the + // StorageBytes: [Output Only] A size of the storage used by the // snapshot. As snapshots share storage, this number is expected to // change with snapshot creation/deletion. StorageBytes int64 `json:"storageBytes,omitempty,string"` @@ -11677,8 +17801,8 @@ type Snapshot struct { } func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot - raw := noMethod(*s) + type NoMethod Snapshot + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11705,6 +17829,9 @@ type SnapshotList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SnapshotListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11727,14 +17854,116 @@ type SnapshotList struct { } func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList - raw := noMethod(*s) + type NoMethod SnapshotList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SnapshotListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SnapshotListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SslCertificate: An SslCertificate resource. This resource provides a // mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. +// to serve secure connections from the user. (== resource_for +// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) type SslCertificate struct { // Certificate: A local certificate file. The certificate must be in PEM // format. The certificate chain must be no greater than 5 certs long. @@ -11795,8 +18024,8 @@ type SslCertificate struct { } func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate - raw := noMethod(*s) + type NoMethod SslCertificate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11823,6 +18052,9 @@ type SslCertificateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11845,12 +18077,115 @@ type SslCertificateList struct { } func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList - raw := noMethod(*s) + type NoMethod SslCertificateList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. +// SslCertificateListWarning: [Output Only] Informational warning +// message. +type SslCertificateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslCertificateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks +// ==) (== resource_for v1.subnetworks ==) type Subnetwork struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -11938,8 +18273,8 @@ type Subnetwork struct { } func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork - raw := noMethod(*s) + type NoMethod Subnetwork + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -11966,6 +18301,9 @@ type SubnetworkAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11988,8 +18326,110 @@ type SubnetworkAggregatedList struct { } func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList - raw := noMethod(*s) + type NoMethod SubnetworkAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// message. +type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12017,6 +18457,9 @@ type SubnetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12039,8 +18482,109 @@ type SubnetworkList struct { } func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList - raw := noMethod(*s) + type NoMethod SubnetworkList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12078,8 +18622,8 @@ type SubnetworkSecondaryRange struct { } func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange - raw := noMethod(*s) + type NoMethod SubnetworkSecondaryRange + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12109,8 +18653,8 @@ type SubnetworksExpandIpCidrRangeRequest struct { } func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest - raw := noMethod(*s) + type NoMethod SubnetworksExpandIpCidrRangeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12140,8 +18684,8 @@ type SubnetworksScopedList struct { } func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList - raw := noMethod(*s) + type NoMethod SubnetworksScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12155,9 +18699,13 @@ type SubnetworksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12168,7 +18716,9 @@ type SubnetworksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12199,8 +18749,8 @@ type SubnetworksScopedListWarning struct { } func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12236,8 +18786,8 @@ type SubnetworksScopedListWarningData struct { } func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) + type NoMethod SubnetworksScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12264,8 +18814,8 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { } func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest - raw := noMethod(*s) + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12316,8 +18866,8 @@ type TCPHealthCheck struct { } func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck - raw := noMethod(*s) + type NoMethod TCPHealthCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12355,13 +18905,14 @@ type Tags struct { } func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags - raw := noMethod(*s) + type NoMethod Tags + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. +// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== +// resource_for v1.targetHttpProxies ==) type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -12418,8 +18969,8 @@ type TargetHttpProxy struct { } func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy - raw := noMethod(*s) + type NoMethod TargetHttpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12447,6 +18998,9 @@ type TargetHttpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12469,8 +19023,110 @@ type TargetHttpProxyList struct { } func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList - raw := noMethod(*s) + type NoMethod TargetHttpProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12499,13 +19155,14 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { } func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. +// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== +// resource_for v1.targetHttpsProxies ==) type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -12572,8 +19229,8 @@ type TargetHttpsProxy struct { } func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy - raw := noMethod(*s) + type NoMethod TargetHttpsProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12601,6 +19258,9 @@ type TargetHttpsProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12623,13 +19283,117 @@ type TargetHttpsProxyList struct { } func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList - raw := noMethod(*s) + type NoMethod TargetHttpsProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. +// endpoint instance that terminates traffic of certain protocols. (== +// resource_for beta.targetInstances ==) (== resource_for +// v1.targetInstances ==) type TargetInstance struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -12703,8 +19467,8 @@ type TargetInstance struct { } func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance - raw := noMethod(*s) + type NoMethod TargetInstance + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12730,6 +19494,9 @@ type TargetInstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12752,8 +19519,110 @@ type TargetInstanceAggregatedList struct { } func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList - raw := noMethod(*s) + type NoMethod TargetInstanceAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12780,6 +19649,9 @@ type TargetInstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12802,8 +19674,110 @@ type TargetInstanceList struct { } func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) + type NoMethod TargetInstanceList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12834,8 +19808,8 @@ type TargetInstancesScopedList struct { } func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList - raw := noMethod(*s) + type NoMethod TargetInstancesScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12849,9 +19823,13 @@ type TargetInstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12862,7 +19840,9 @@ type TargetInstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12893,8 +19873,8 @@ type TargetInstancesScopedListWarning struct { } func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -12930,14 +19910,15 @@ type TargetInstancesScopedListWarningData struct { } func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetInstancesScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetPool: A TargetPool resource. This resource defines a pool of // instances, an associated HttpHealthCheck resource, and the fallback -// target pool. +// target pool. (== resource_for beta.targetPools ==) (== resource_for +// v1.targetPools ==) type TargetPool struct { // BackupPool: This field is applicable only when the containing target // pool is serving a forwarding rule as the primary pool, and its @@ -13059,18 +20040,18 @@ type TargetPool struct { } func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool - raw := noMethod(*s) + type NoMethod TargetPool + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool + type NoMethod TargetPool var s1 struct { FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -13102,6 +20083,9 @@ type TargetPoolAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13124,8 +20108,110 @@ type TargetPoolAggregatedList struct { } func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList - raw := noMethod(*s) + type NoMethod TargetPoolAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13159,8 +20245,8 @@ type TargetPoolInstanceHealth struct { } func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth - raw := noMethod(*s) + type NoMethod TargetPoolInstanceHealth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13188,6 +20274,9 @@ type TargetPoolList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13210,8 +20299,109 @@ type TargetPoolList struct { } func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList - raw := noMethod(*s) + type NoMethod TargetPoolList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13237,8 +20427,8 @@ type TargetPoolsAddHealthCheckRequest struct { } func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13270,8 +20460,8 @@ type TargetPoolsAddInstanceRequest struct { } func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsAddInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13302,8 +20492,8 @@ type TargetPoolsRemoveHealthCheckRequest struct { } func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveHealthCheckRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13329,8 +20519,8 @@ type TargetPoolsRemoveInstanceRequest struct { } func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest - raw := noMethod(*s) + type NoMethod TargetPoolsRemoveInstanceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13360,8 +20550,8 @@ type TargetPoolsScopedList struct { } func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList - raw := noMethod(*s) + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13375,9 +20565,13 @@ type TargetPoolsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13388,7 +20582,9 @@ type TargetPoolsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13419,8 +20615,8 @@ type TargetPoolsScopedListWarning struct { } func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13456,8 +20652,8 @@ type TargetPoolsScopedListWarningData struct { } func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetPoolsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13482,8 +20678,8 @@ type TargetReference struct { } func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference - raw := noMethod(*s) + type NoMethod TargetReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13510,8 +20706,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { } func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13542,8 +20738,8 @@ type TargetSslProxiesSetProxyHeaderRequest struct { } func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13572,13 +20768,14 @@ type TargetSslProxiesSetSslCertificatesRequest struct { } func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest - raw := noMethod(*s) + type NoMethod TargetSslProxiesSetSslCertificatesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. +// SSL proxy. (== resource_for beta.targetSslProxies ==) (== +// resource_for v1.targetSslProxies ==) type TargetSslProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -13648,8 +20845,8 @@ type TargetSslProxy struct { } func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy - raw := noMethod(*s) + type NoMethod TargetSslProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13676,6 +20873,9 @@ type TargetSslProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13698,8 +20898,110 @@ type TargetSslProxyList struct { } func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList - raw := noMethod(*s) + type NoMethod TargetSslProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13726,8 +21028,8 @@ type TargetTcpProxiesSetBackendServiceRequest struct { } func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetBackendServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13758,13 +21060,14 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { } func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest - raw := noMethod(*s) + type NoMethod TargetTcpProxiesSetProxyHeaderRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. +// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== +// resource_for v1.targetTcpProxies ==) type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -13829,8 +21132,8 @@ type TargetTcpProxy struct { } func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy - raw := noMethod(*s) + type NoMethod TargetTcpProxy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13857,6 +21160,9 @@ type TargetTcpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13879,12 +21185,116 @@ type TargetTcpProxyList struct { } func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList - raw := noMethod(*s) + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// resource_for beta.targetVpnGateways ==) (== resource_for +// v1.targetVpnGateways ==) type TargetVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -13964,8 +21374,8 @@ type TargetVpnGateway struct { } func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway - raw := noMethod(*s) + type NoMethod TargetVpnGateway + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -13992,6 +21402,9 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14014,8 +21427,110 @@ type TargetVpnGatewayAggregatedList struct { } func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14043,6 +21558,9 @@ type TargetVpnGatewayList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14065,8 +21583,110 @@ type TargetVpnGatewayList struct { } func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) + type NoMethod TargetVpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14098,8 +21718,8 @@ type TargetVpnGatewaysScopedList struct { } func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14113,9 +21733,13 @@ type TargetVpnGatewaysScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14126,7 +21750,9 @@ type TargetVpnGatewaysScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14157,8 +21783,8 @@ type TargetVpnGatewaysScopedListWarning struct { } func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14194,8 +21820,8 @@ type TargetVpnGatewaysScopedListWarningData struct { } func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData - raw := noMethod(*s) + type NoMethod TargetVpnGatewaysScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14226,8 +21852,8 @@ type TestFailure struct { } func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure - raw := noMethod(*s) + type NoMethod TestFailure + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14306,8 +21932,8 @@ type UrlMap struct { } func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap - raw := noMethod(*s) + type NoMethod UrlMap + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14334,6 +21960,9 @@ type UrlMapList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *UrlMapListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14356,8 +21985,109 @@ type UrlMapList struct { } func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList - raw := noMethod(*s) + type NoMethod UrlMapList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14382,8 +22112,8 @@ type UrlMapReference struct { } func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference - raw := noMethod(*s) + type NoMethod UrlMapReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14420,8 +22150,8 @@ type UrlMapTest struct { } func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest - raw := noMethod(*s) + type NoMethod UrlMapTest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14459,8 +22189,8 @@ type UrlMapValidationResult struct { } func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult - raw := noMethod(*s) + type NoMethod UrlMapValidationResult + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14486,8 +22216,8 @@ type UrlMapsValidateRequest struct { } func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14516,8 +22246,8 @@ type UrlMapsValidateResponse struct { } func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse - raw := noMethod(*s) + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14559,11 +22289,13 @@ type UsageExportLocation struct { } func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation - raw := noMethod(*s) + type NoMethod UsageExportLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) +// (== resource_for v1.vpnTunnels ==) type VpnTunnel struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -14674,8 +22406,8 @@ type VpnTunnel struct { } func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel - raw := noMethod(*s) + type NoMethod VpnTunnel + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14702,6 +22434,9 @@ type VpnTunnelAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14724,8 +22459,110 @@ type VpnTunnelAggregatedList struct { } func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList - raw := noMethod(*s) + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14753,6 +22590,9 @@ type VpnTunnelList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14775,8 +22615,109 @@ type VpnTunnelList struct { } func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList - raw := noMethod(*s) + type NoMethod VpnTunnelList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14806,8 +22747,8 @@ type VpnTunnelsScopedList struct { } func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14821,9 +22762,13 @@ type VpnTunnelsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14834,7 +22779,9 @@ type VpnTunnelsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14865,8 +22812,8 @@ type VpnTunnelsScopedListWarning struct { } func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14902,8 +22849,8 @@ type VpnTunnelsScopedListWarningData struct { } func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData - raw := noMethod(*s) + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14930,6 +22877,9 @@ type XpnHostList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14952,8 +22902,109 @@ type XpnHostList struct { } func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList - raw := noMethod(*s) + type NoMethod XpnHostList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -14989,13 +23040,18 @@ type XpnResourceId struct { } func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId - raw := noMethod(*s) + type NoMethod XpnResourceId + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. +// Zone: A Zone resource. (== resource_for beta.zones ==) (== +// resource_for v1.zones ==) type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -15036,15 +23092,16 @@ type Zone struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -15055,8 +23112,8 @@ type Zone struct { } func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone - raw := noMethod(*s) + type NoMethod Zone + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15083,6 +23140,9 @@ type ZoneList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -15105,8 +23165,109 @@ type ZoneList struct { } func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList - raw := noMethod(*s) + type NoMethod ZoneList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarningData + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15141,8 +23302,8 @@ type ZoneSetLabelsRequest struct { } func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest - raw := noMethod(*s) + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -15320,7 +23481,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -15511,7 +23672,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -15738,7 +23899,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -15998,7 +24159,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -16194,7 +24355,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -16361,7 +24522,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -16534,7 +24695,7 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -16761,7 +24922,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17020,7 +25181,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17215,7 +25376,7 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17382,7 +25543,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17554,7 +25715,7 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17780,7 +25941,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -17996,7 +26157,7 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -18180,7 +26341,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -18349,7 +26510,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -18505,7 +26666,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -18666,7 +26827,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -18881,7 +27042,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19082,7 +27243,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19253,7 +27414,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19476,7 +27637,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19669,7 +27830,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19826,7 +27987,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -19972,7 +28133,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -20138,7 +28299,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -20354,7 +28515,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -20559,7 +28720,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -20733,7 +28894,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -20956,7 +29117,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -21148,7 +29309,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -21376,7 +29537,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -21636,7 +29797,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -21845,7 +30006,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -22027,7 +30188,7 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -22194,7 +30355,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -22377,7 +30538,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -22609,7 +30770,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -22707,7 +30868,8 @@ type DisksResizeCall struct { header_ http.Header } -// Resize: Resizes the specified persistent disk. +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -22819,12 +30981,12 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Resizes the specified persistent disk.", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", // "httpMethod": "POST", // "id": "compute.disks.resize", // "parameterOrder": [ @@ -23001,7 +31163,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -23173,7 +31335,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -23329,7 +31491,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -23491,7 +31653,7 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -23707,7 +31869,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -23909,7 +32071,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24083,7 +32245,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24306,7 +32468,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24502,7 +32664,7 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24669,7 +32831,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -24842,7 +33004,7 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25069,7 +33231,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25281,7 +33443,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25453,7 +33615,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25610,7 +33772,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25772,7 +33934,7 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -25987,7 +34149,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26180,7 +34342,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26337,7 +34499,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26499,7 +34661,7 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26715,7 +34877,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -26916,7 +35078,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27139,7 +35301,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27437,7 +35599,7 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27654,7 +35816,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -27846,7 +36008,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28002,7 +36164,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28163,7 +36325,7 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28378,7 +36540,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28579,7 +36741,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28750,7 +36912,7 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -28914,7 +37076,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29071,7 +37233,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29233,7 +37395,7 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29449,7 +37611,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29651,7 +37813,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29823,7 +37985,7 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -29986,7 +38148,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30142,7 +38304,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30303,7 +38465,7 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30518,7 +38680,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30719,7 +38881,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -30890,7 +39052,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31054,7 +39216,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31225,7 +39387,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31385,7 +39547,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31537,7 +39699,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31706,7 +39868,7 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -31765,8 +39927,8 @@ type ImagesListCall struct { header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your // project. This method does not get any images that belong to other // projects, including publicly-available images, like Debian 8. If you // want to get a list of publicly-available images, use this method to @@ -31935,12 +40097,12 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", // "id": "compute.images.list", // "parameterOrder": [ @@ -32116,7 +40278,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32300,7 +40462,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32530,7 +40692,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32728,7 +40890,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -32919,7 +41081,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33088,7 +41250,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33266,7 +41428,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33491,7 +41653,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33705,7 +41867,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -33910,7 +42072,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34093,7 +42255,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34279,7 +42441,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34464,7 +42626,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34645,7 +42807,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -34874,7 +43036,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35072,7 +43234,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35237,7 +43399,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35407,7 +43569,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35632,7 +43794,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -35889,7 +44051,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36113,7 +44275,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36292,7 +44454,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36466,7 +44628,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36623,7 +44785,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -36788,7 +44950,7 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37004,7 +45166,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37209,7 +45371,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37447,7 +45609,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37653,7 +45815,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -37829,7 +45991,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38004,7 +46166,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38191,7 +46353,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38366,7 +46528,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38548,7 +46710,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38736,7 +46898,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -38963,7 +47125,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39168,7 +47330,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39221,6 +47383,190 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "compute.instances.setDeletionProtection": + +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setDiskAutoDelete": type InstancesSetDiskAutoDeleteCall struct { @@ -39343,7 +47689,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39537,7 +47883,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39719,7 +48065,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -39901,7 +48247,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40084,7 +48430,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40140,6 +48486,190 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "compute.instances.setMinCpuPlatform": + +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "httpMethod": "POST", + // "id": "compute.instances.setMinCpuPlatform", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "request": { + // "$ref": "InstancesSetMinCpuPlatformRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setScheduling": type InstancesSetSchedulingCall struct { @@ -40266,7 +48796,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40449,7 +48979,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40632,7 +49162,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40809,7 +49339,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -40989,7 +49519,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41059,11 +49589,10 @@ type InstancesStopCall struct { // Stop: Stops a running instance, shutting it down cleanly, and allows // you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41170,12 +49699,12 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", // "id": "compute.instances.stop", // "parameterOrder": [ @@ -41223,6 +49752,2327 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } +// method id "compute.interconnectAttachments.aggregatedList": + +type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/interconnectAttachments", + // "response": { + // "$ref": "InterconnectAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnectAttachments.get": + +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified interconnect attachment.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "InterconnectAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectAttachments.insert": + +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectattachment = interconnectattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments", + // "request": { + // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnectAttachments.list": + +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments", + // "response": { + // "$ref": "InterconnectAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnectLocations.get": + +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the details for the specified interconnect location. Get +// a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnectLocation = interconnectLocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnectLocation": c.interconnectLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", + // "parameterOrder": [ + // "project", + // "interconnectLocation" + // ], + // "parameters": { + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { + // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectLocations.list": + +type InterconnectLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations", + // "response": { + // "$ref": "InterconnectLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnects.delete": + +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified interconnect.", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnects.get": + +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Interconnect{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnects.insert": + +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.interconnects.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects", + // "request": { + // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnects.list": + +type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnects.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects", + // "response": { + // "$ref": "InterconnectList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + c.interconnect2 = interconnect2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "request": { + // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.licenses.get": type LicensesGetCall struct { @@ -41334,7 +52184,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41550,7 +52400,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41742,7 +52592,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -41970,7 +52820,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42177,7 +53027,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42341,7 +53191,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42498,7 +53348,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42660,7 +53510,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42876,7 +53726,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -42953,6 +53803,178 @@ func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error } } +// method id "compute.networks.patch": + +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified network with the data included in the +// request. Only the following fields can be modified: +// routingConfig.routingMode. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "request": { + // "$ref": "Network" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.networks.removePeering": type NetworksRemovePeeringCall struct { @@ -43075,7 +54097,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43239,7 +54261,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43396,7 +54418,7 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43553,7 +54575,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43705,7 +54727,7 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -43863,7 +54885,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44011,7 +55033,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44152,7 +55174,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44316,7 +55338,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44513,7 +55535,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44707,7 +55729,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -44867,7 +55889,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45028,7 +56050,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45191,7 +56213,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45352,7 +56374,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45518,7 +56540,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45690,7 +56712,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -45916,7 +56938,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46132,7 +57154,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46316,7 +57338,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46488,7 +57510,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46654,7 +57676,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46810,7 +57832,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -46987,7 +58009,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47213,7 +58235,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47428,7 +58450,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47612,7 +58634,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -47842,7 +58864,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48033,7 +59055,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48205,7 +59227,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48431,7 +59453,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48657,7 +59679,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -48830,7 +59852,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49021,7 +60043,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49189,7 +60211,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49366,7 +60388,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49591,7 +60613,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -49803,7 +60825,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50008,7 +61030,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50192,7 +61214,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50379,7 +61401,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50560,7 +61582,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50727,7 +61749,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -50952,7 +61974,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51213,7 +62235,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51433,7 +62455,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51721,7 +62743,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -51949,7 +62971,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52146,7 +63168,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52363,7 +63385,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52614,7 +63636,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52809,7 +63831,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -52976,7 +63998,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53139,7 +64161,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53311,7 +64333,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53537,7 +64559,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53749,7 +64771,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -53913,7 +64935,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54091,7 +65113,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54263,7 +65285,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54420,7 +65442,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54582,7 +65604,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54798,7 +65820,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -54997,7 +66019,7 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55154,7 +66176,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55371,7 +66393,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55552,7 +66574,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55710,7 +66732,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -55866,7 +66888,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56027,7 +67049,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56242,7 +67264,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56493,7 +67515,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56688,7 +67710,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -56867,7 +67889,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57037,7 +68059,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57209,7 +68231,7 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57435,7 +68457,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57647,7 +68669,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57819,7 +68841,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -57976,7 +68998,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58138,7 +69160,7 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58354,7 +69376,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58554,7 +69576,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58717,7 +69739,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -58873,7 +69895,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59034,7 +70056,7 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59249,7 +70271,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59448,7 +70470,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59618,7 +70640,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -59841,7 +70863,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60037,7 +71059,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60205,7 +71227,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60378,7 +71400,7 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60605,7 +71627,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60816,7 +71838,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -60998,7 +72020,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61229,7 +72251,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61425,7 +72447,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61593,7 +72615,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61750,7 +72772,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -61926,7 +72948,7 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62153,7 +73175,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62364,7 +73386,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62546,7 +73568,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62735,7 +73757,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -62912,7 +73934,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63068,7 +74090,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63229,7 +74251,7 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63444,7 +74466,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63643,7 +74665,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63813,7 +74835,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -63983,7 +75005,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64146,7 +75168,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64302,7 +75324,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64463,7 +75485,7 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64678,7 +75700,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -64877,7 +75899,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65047,7 +76069,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65269,7 +76291,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65464,7 +76486,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65631,7 +76653,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -65803,7 +76825,7 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66029,7 +77051,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66230,7 +77252,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66387,7 +77409,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66549,7 +77571,7 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66712,7 +77734,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -66936,7 +77958,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67138,7 +78160,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67310,7 +78332,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67464,7 +78486,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67681,7 +78703,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -67876,7 +78898,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68043,7 +79065,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68215,7 +79237,7 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68441,7 +79463,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68760,7 +79782,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -68988,7 +80010,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69185,7 +80207,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -69402,7 +80424,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index 06dd8d0cc97..d2ab8a2314a 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -1,435 +1,63 @@ { + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "rootUrl": "https://container.googleapis.com/", + "ownerDomain": "google.com", + "name": "container", + "batchPath": "batch", + "fullyEncodeReservedExpansion": true, + "title": "Google Container Engine API", + "ownerName": "Google", "resources": { "projects": { "resources": { "zones": { "methods": { "getServerconfig": { - "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "id": "container.projects.zones.getServerconfig", + "description": "Returns configuration info about the Kubernetes Engine service.", + "response": { + "$ref": "ServerConfig" + }, "parameterOrder": [ "projectId", "zone" ], - "response": { - "$ref": "ServerConfig" - }, - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for.", - "type": "string", - "required": true, - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", - "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", - "id": "container.projects.zones.getServerconfig", - "description": "Returns configuration info about the Container Engine service." + "httpMethod": "GET" } }, "resources": { "clusters": { "methods": { - "logging": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", - "id": "container.projects.zones.clusters.logging", - "request": { - "$ref": "SetLoggingServiceRequest" - }, - "description": "Sets the logging service of a specific cluster.", - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true, - "location": "path" - } - } - }, - "list": { - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", - "httpMethod": "GET", - "parameterOrder": [ - "projectId", - "zone" - ], - "response": { - "$ref": "ListClustersResponse" - }, - "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", - "type": "string", - "required": true, - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", - "path": "v1/projects/{projectId}/zones/{zone}/clusters", - "id": "container.projects.zones.clusters.list" - }, - "create": { - "request": { - "$ref": "CreateClusterRequest" - }, - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "projectId", - "zone" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", - "id": "container.projects.zones.clusters.create", - "path": "v1/projects/{projectId}/zones/{zone}/clusters" - }, - "resourceLabels": { - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "description": "The name of the cluster.", - "type": "string", - "required": true, - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", - "id": "container.projects.zones.clusters.resourceLabels", - "description": "Sets labels on a cluster.", - "request": { - "$ref": "SetLabelsRequest" - } - }, - "completeIpRotation": { - "request": { - "$ref": "CompleteIPRotationRequest" - }, - "description": "Completes master IP rotation.", - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster.", - "type": "string", - "required": true, - "location": "path" - } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", - "id": "container.projects.zones.clusters.completeIpRotation" - }, - "get": { - "description": "Gets the details of a specific cluster.", - "response": { - "$ref": "Cluster" - }, - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster to retrieve.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "id": "container.projects.zones.clusters.get", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" - }, - "legacyAbac": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", - "id": "container.projects.zones.clusters.legacyAbac", - "request": { - "$ref": "SetLegacyAbacRequest" - }, - "description": "Enables or disables the ABAC authorization mechanism on a cluster.", - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster to update.", - "type": "string", - "required": true, - "location": "path" - } - } - }, - "setNetworkPolicy": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", - "id": "container.projects.zones.clusters.setNetworkPolicy", - "request": { - "$ref": "SetNetworkPolicyRequest" - }, - "description": "Enables/Disables Network Policy for a cluster.", - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "description": "The name of the cluster.", - "type": "string", - "required": true, - "location": "path" - } - } - }, - "startIpRotation": { - "description": "Start master IP rotation.", - "request": { - "$ref": "StartIPRotationRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "Operation" - }, - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", - "id": "container.projects.zones.clusters.startIpRotation" - }, - "addons": { - "description": "Sets the addons of a specific cluster.", - "request": { - "$ref": "SetAddonsConfigRequest" - }, - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "httpMethod": "POST", - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true, - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", - "id": "container.projects.zones.clusters.addons", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons" - }, "delete": { + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "id": "container.projects.zones.clusters.delete", "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", "response": { "$ref": "Operation" @@ -440,37 +68,58 @@ "clusterId" ], "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "projectId": { + "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", "required": true, - "location": "path" + "type": "string" }, "zone": { "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true + "required": true, + "type": "string" }, "clusterId": { + "location": "path", "description": "The name of the cluster to delete.", - "type": "string", "required": true, - "location": "path" + "type": "string" } }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" + }, + "locations": { "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "id": "container.projects.zones.clusters.delete", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" - }, - "locations": { + "parameters": { + "clusterId": { + "description": "The name of the cluster to upgrade.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", - "id": "container.projects.zones.clusters.locations", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + "id": "container.projects.zones.clusters.locations", "description": "Sets the locations of a specific cluster.", "request": { "$ref": "SetLocationsRequest" @@ -483,36 +132,9 @@ "zone", "clusterId" ], - "httpMethod": "POST", - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "httpMethod": "POST" }, "update": { - "request": { - "$ref": "UpdateClusterRequest" - }, - "description": "Updates the settings of a specific cluster.", "response": { "$ref": "Operation" }, @@ -527,29 +149,59 @@ ], "parameters": { "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, "type": "string", - "required": true + "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, "type": "string", - "required": true + "location": "path" }, "clusterId": { "location": "path", "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true + "required": true, + "type": "string" } }, "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", "id": "container.projects.zones.clusters.update", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" + "description": "Updates the settings of a specific cluster.", + "request": { + "$ref": "UpdateClusterRequest" + } }, "monitoring": { + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster to upgrade.", + "required": true, + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", + "id": "container.projects.zones.clusters.monitoring", "request": { "$ref": "SetMonitoringServiceRequest" }, @@ -562,80 +214,56 @@ "zone", "clusterId" ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", - "id": "container.projects.zones.clusters.monitoring", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring" + "httpMethod": "POST" }, "master": { - "request": { - "$ref": "UpdateMasterRequest" + "response": { + "$ref": "Operation" }, - "description": "Updates the master of a specific cluster.", - "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId" ], - "response": { - "$ref": "Operation" - }, + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "projectId": { "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", "required": true, + "type": "string", "location": "path" }, "zone": { "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true + "required": true, + "type": "string" }, "clusterId": { "description": "The name of the cluster to upgrade.", - "type": "string", "required": true, + "type": "string", "location": "path" } }, "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", - "id": "container.projects.zones.clusters.master" + "id": "container.projects.zones.clusters.master", + "description": "Updates the master of a specific cluster.", + "request": { + "$ref": "UpdateMasterRequest" + } }, "setMasterAuth": { - "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "id": "container.projects.zones.clusters.setMasterAuth", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", "request": { "$ref": "SetMasterAuthRequest" }, + "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", "httpMethod": "POST", "parameterOrder": [ "projectId", @@ -646,87 +274,466 @@ "$ref": "Operation" }, "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", + "clusterId": { + "location": "path", + "description": "The name of the cluster to upgrade.", "required": true, - "location": "path" + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth" + }, + "logging": { + "description": "Sets the logging service of a specific cluster.", + "request": { + "$ref": "SetLoggingServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster to upgrade.", + "required": true, + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" }, "zone": { "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", + "id": "container.projects.zones.clusters.logging" + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ListClustersResponse" + }, + "parameters": { + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string", "location": "path" }, - "clusterId": { - "description": "The name of the cluster to upgrade.", - "type": "string", + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", "required": true, + "type": "string", "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", - "id": "container.projects.zones.clusters.setMasterAuth" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", + "id": "container.projects.zones.clusters.list", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "description": "Lists all clusters owned by a project in either the specified zone or all\nzones." + }, + "resourceLabels": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + }, + "clusterId": { + "description": "The name of the cluster.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", + "id": "container.projects.zones.clusters.resourceLabels", + "description": "Sets labels on a cluster.", + "request": { + "$ref": "SetLabelsRequest" + } + }, + "create": { + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "id": "container.projects.zones.clusters.create", + "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + "request": { + "$ref": "CreateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters" + }, + "completeIpRotation": { + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string", + "location": "path" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", + "id": "container.projects.zones.clusters.completeIpRotation", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", + "request": { + "$ref": "CompleteIPRotationRequest" + }, + "description": "Completes master IP rotation.", + "httpMethod": "POST", + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "Operation" + } + }, + "setNetworkPolicy": { + "description": "Enables/Disables Network Policy for a cluster.", + "request": { + "$ref": "SetNetworkPolicyRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string", + "location": "path" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", + "id": "container.projects.zones.clusters.setNetworkPolicy" + }, + "legacyAbac": { + "description": "Enables or disables the ABAC authorization mechanism on a cluster.", + "request": { + "$ref": "SetLegacyAbacRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + }, + "clusterId": { + "description": "The name of the cluster to update.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", + "id": "container.projects.zones.clusters.legacyAbac" + }, + "get": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "description": "The name of the cluster to retrieve.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "id": "container.projects.zones.clusters.get", + "description": "Gets the details of a specific cluster.", + "response": { + "$ref": "Cluster" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "GET" + }, + "startIpRotation": { + "description": "Start master IP rotation.", + "request": { + "$ref": "StartIPRotationRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster.", + "required": true, + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", + "id": "container.projects.zones.clusters.startIpRotation" + }, + "addons": { + "parameters": { + "clusterId": { + "description": "The name of the cluster to upgrade.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", + "id": "container.projects.zones.clusters.addons", + "request": { + "$ref": "SetAddonsConfigRequest" + }, + "description": "Sets the addons of a specific cluster.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST" + }, + "setMaintenancePolicy": { + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "POST", + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster to update.", + "required": true, + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy", + "id": "container.projects.zones.clusters.setMaintenancePolicy", + "request": { + "$ref": "SetMaintenancePolicyRequest" + }, + "description": "Sets the maintenance policy for a cluster." } }, "resources": { "nodePools": { "methods": { - "autoscaling": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", - "id": "container.projects.zones.clusters.nodePools.autoscaling", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", - "description": "Sets the autoscaling settings of a specific node pool.", - "request": { - "$ref": "SetNodePoolAutoscalingRequest" - }, - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "projectId", - "zone", - "clusterId", - "nodePoolId" - ], - "httpMethod": "POST", - "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster to upgrade.", - "type": "string", - "required": true, - "location": "path" - }, - "nodePoolId": { - "description": "The name of the node pool to upgrade.", - "type": "string", - "required": true, - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, "get": { - "description": "Retrieves the node pool requested.", - "httpMethod": "GET", "response": { "$ref": "NodePool" }, @@ -736,86 +743,88 @@ "clusterId", "nodePoolId" ], - "parameters": { - "nodePoolId": { - "location": "path", - "description": "The name of the node pool.", - "type": "string", - "required": true - }, - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster.", - "type": "string", - "required": true, - "location": "path" - } - }, + "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], + "parameters": { + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string", + "location": "path" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + }, + "clusterId": { + "description": "The name of the cluster.", + "required": true, + "type": "string", + "location": "path" + }, + "nodePoolId": { + "description": "The name of the node pool.", + "required": true, + "type": "string", + "location": "path" + } + }, "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "id": "container.projects.zones.clusters.nodePools.get" + "id": "container.projects.zones.clusters.nodePools.get", + "description": "Retrieves the node pool requested." }, "update": { - "description": "Updates the version and/or image type of a specific node pool.", + "id": "container.projects.zones.clusters.nodePools.update", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", "request": { "$ref": "UpdateNodePoolRequest" }, - "response": { - "$ref": "Operation" - }, + "description": "Updates the version and/or image type of a specific node pool.", + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "httpMethod": "POST", + "response": { + "$ref": "Operation" + }, "parameters": { "projectId": { "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true + "required": true, + "type": "string" }, "zone": { + "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", "required": true, - "location": "path" + "type": "string" }, "clusterId": { - "location": "path", "description": "The name of the cluster to upgrade.", + "required": true, "type": "string", - "required": true + "location": "path" }, "nodePoolId": { "description": "The name of the node pool to upgrade.", - "type": "string", "required": true, + "type": "string", "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", - "id": "container.projects.zones.clusters.nodePools.update", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update" }, "delete": { "description": "Deletes a node pool from a cluster.", @@ -833,225 +842,218 @@ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, "clusterId": { "location": "path", "description": "The name of the cluster.", - "type": "string", - "required": true + "required": true, + "type": "string" }, "nodePoolId": { "location": "path", "description": "The name of the node pool to delete.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, "type": "string", - "required": true + "location": "path" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" } }, "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "id": "container.projects.zones.clusters.nodePools.delete", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}" + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "id": "container.projects.zones.clusters.nodePools.delete" }, - "setSize": { - "request": { - "$ref": "SetNodePoolSizeRequest" + "setManagement": { + "response": { + "$ref": "Operation" }, - "description": "Sets the size of a specific node pool.", - "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "response": { - "$ref": "Operation" + "httpMethod": "POST", + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + }, + "clusterId": { + "location": "path", + "description": "The name of the cluster to update.", + "required": true, + "type": "string" + }, + "nodePoolId": { + "description": "The name of the node pool to update.", + "required": true, + "type": "string", + "location": "path" + } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "parameters": { - "nodePoolId": { - "location": "path", - "description": "The name of the node pool to update.", - "type": "string", - "required": true - }, - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { - "description": "The name of the cluster to update.", - "type": "string", - "required": true, - "location": "path" - } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", - "id": "container.projects.zones.clusters.nodePools.setSize" - }, - "setManagement": { "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement", "id": "container.projects.zones.clusters.nodePools.setManagement", "request": { "$ref": "SetNodePoolManagementRequest" }, - "description": "Sets the NodeManagement options for a node pool.", - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId", - "nodePoolId" - ], - "response": { - "$ref": "Operation" - }, + "description": "Sets the NodeManagement options for a node pool." + }, + "setSize": { "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, "clusterId": { + "location": "path", "description": "The name of the cluster to update.", - "type": "string", "required": true, - "location": "path" + "type": "string" }, "nodePoolId": { "location": "path", "description": "The name of the node pool to update.", - "type": "string", - "required": true - } - } - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], - "response": { - "$ref": "ListNodePoolsResponse" - }, - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", "required": true, - "location": "path" + "type": "string" + }, + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true, - "location": "path" - }, - "clusterId": { "location": "path", - "description": "The name of the cluster.", - "type": "string", - "required": true + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "id": "container.projects.zones.clusters.nodePools.list", - "description": "Lists the node pools for a cluster." - }, - "rollback": { - "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", + "id": "container.projects.zones.clusters.nodePools.setSize", + "description": "Sets the size of a specific node pool.", "request": { - "$ref": "RollbackNodePoolUpgradeRequest" + "$ref": "SetNodePoolSizeRequest" + }, + "response": { + "$ref": "Operation" }, - "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], + "httpMethod": "POST" + }, + "list": { + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "id": "container.projects.zones.clusters.nodePools.list", + "description": "Lists the node pools for a cluster.", "response": { - "$ref": "Operation" + "$ref": "ListNodePoolsResponse" }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster.", + "required": true, + "type": "string" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "type": "string", + "location": "path" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools" + }, + "rollback": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "description": "The name of the cluster to rollback.", + "required": true, + "type": "string", + "location": "path" + }, "nodePoolId": { "description": "The name of the node pool to rollback.", - "type": "string", "required": true, + "type": "string", "location": "path" }, "projectId": { + "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", "required": true, - "location": "path" + "type": "string" }, "zone": { "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster to rollback.", - "type": "string", - "required": true + "required": true, + "type": "string" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", - "id": "container.projects.zones.clusters.nodePools.rollback" + "id": "container.projects.zones.clusters.nodePools.rollback", + "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + "request": { + "$ref": "RollbackNodePoolUpgradeRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId", + "nodePoolId" + ], + "httpMethod": "POST" }, "create": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "id": "container.projects.zones.clusters.nodePools.create", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "request": { - "$ref": "CreateNodePoolRequest" - }, - "description": "Creates a node pool for a cluster.", "response": { "$ref": "Operation" }, @@ -1068,22 +1070,77 @@ "projectId": { "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", - "type": "string", - "required": true + "required": true, + "type": "string" }, "zone": { + "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", "required": true, - "location": "path" + "type": "string" }, "clusterId": { + "location": "path", "description": "The name of the cluster.", - "type": "string", "required": true, - "location": "path" + "type": "string" } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "id": "container.projects.zones.clusters.nodePools.create", + "description": "Creates a node pool for a cluster.", + "request": { + "$ref": "CreateNodePoolRequest" } + }, + "autoscaling": { + "description": "Sets the autoscaling settings of a specific node pool.", + "request": { + "$ref": "SetNodePoolAutoscalingRequest" + }, + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId", + "nodePoolId" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "clusterId": { + "location": "path", + "description": "The name of the cluster to upgrade.", + "required": true, + "type": "string" + }, + "nodePoolId": { + "description": "The name of the node pool to upgrade.", + "required": true, + "type": "string", + "location": "path" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string", + "location": "path" + }, + "zone": { + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", + "id": "container.projects.zones.clusters.nodePools.autoscaling" } } } @@ -1091,76 +1148,33 @@ }, "operations": { "methods": { - "get": { - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "projectId", - "zone", - "operationId" - ], - "httpMethod": "GET", + "cancel": { "parameters": { "operationId": { "description": "The server-assigned `name` of the operation.", - "type": "string", "required": true, + "type": "string", "location": "path" }, "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true - }, - "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", - "id": "container.projects.zones.operations.get", - "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", - "description": "Gets the specified operation." - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "projectId", - "zone" - ], - "response": { - "$ref": "ListOperationsResponse" - }, - "parameters": { - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", "required": true, + "type": "string", "location": "path" }, "zone": { - "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", - "type": "string", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", "required": true, + "type": "string", "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations", - "path": "v1/projects/{projectId}/zones/{zone}/operations", - "id": "container.projects.zones.operations.list", - "description": "Lists all operations in a project in a specific zone or all zones." - }, - "cancel": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel", + "id": "container.projects.zones.operations.cancel", "request": { "$ref": "CancelOperationRequest" }, @@ -1173,7 +1187,40 @@ "zone", "operationId" ], - "httpMethod": "POST", + "httpMethod": "POST" + }, + "list": { + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "projectId": { + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", + "required": true, + "type": "string", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations", + "path": "v1/projects/{projectId}/zones/{zone}/operations", + "id": "container.projects.zones.operations.list", + "description": "Lists all operations in a project in a specific zone or all zones.", + "response": { + "$ref": "ListOperationsResponse" + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "httpMethod": "GET" + }, + "get": { "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], @@ -1181,25 +1228,35 @@ "operationId": { "location": "path", "description": "The server-assigned `name` of the operation.", - "type": "string", - "required": true + "required": true, + "type": "string" }, "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "required": true, "type": "string", - "required": true + "location": "path" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", - "type": "string", + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "required": true, - "location": "path" + "type": "string" } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel", - "id": "container.projects.zones.operations.cancel", - "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel" + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "id": "container.projects.zones.operations.get", + "description": "Gets the specified operation.", + "response": { + "$ref": "Operation" + }, + "parameterOrder": [ + "projectId", + "zone", + "operationId" + ], + "httpMethod": "GET" } } } @@ -1209,31 +1266,25 @@ } }, "parameters": { - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", "location": "query" }, "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "oauth_token": { "location": "query", - "description": "OAuth 2.0 token for the current user.", + "description": "OAuth bearer token.", "type": "string" }, "upload_protocol": { + "location": "query", "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" + "type": "string" }, "prettyPrint": { "description": "Returns response with indentations and line breaks.", - "default": "true", "type": "boolean", + "default": "true", "location": "query" }, "fields": { @@ -1242,9 +1293,9 @@ "location": "query" }, "uploadType": { - "location": "query", "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" + "type": "string", + "location": "query" }, "$.xgafv": { "enumDescriptions": [ @@ -1260,11 +1311,13 @@ "type": "string" }, "callback": { + "location": "query", "description": "JSONP", - "type": "string", - "location": "query" + "type": "string" }, "alt": { + "description": "Data format for response.", + "default": "json", "enum": [ "json", "media", @@ -1276,43 +1329,144 @@ "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" + "location": "query" }, "access_token": { - "location": "query", "description": "OAuth access token.", - "type": "string" + "type": "string", + "location": "query" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" }, "quotaUser": { + "location": "query", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", + "type": "string" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", "location": "query" } }, + "version": "v1", + "baseUrl": "https://container.googleapis.com/", + "kind": "discovery#restDescription", + "description": "The Google Kubernetes Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.", + "servicePath": "", + "basePath": "", + "revision": "20171127", + "documentationLink": "https://cloud.google.com/container-engine/", + "id": "container:v1", + "discoveryVersion": "v1", "schemas": { - "CancelOperationRequest": { - "description": "CancelOperationRequest cancels a single operation.", - "type": "object", - "properties": {}, - "id": "CancelOperationRequest" - }, - "KubernetesDashboard": { - "description": "Configuration for the Kubernetes Dashboard.", + "MasterAuthorizedNetworksConfig": { + "id": "MasterAuthorizedNetworksConfig", + "description": "Master authorized networks is a Beta feature.\nConfiguration options for the master authorized networks feature. Enabled\nmaster authorized networks will disallow all external traffic to access\nKubernetes master through HTTPS except traffic from the given CIDR blocks,\nGoogle Compute Engine Public IPs and Google Prod IPs.", "type": "object", "properties": { - "disabled": { - "description": "Whether the Kubernetes Dashboard is enabled for this cluster.", + "enabled": { + "description": "Whether or not master authorized networks is enabled.", + "type": "boolean" + }, + "cidrBlocks": { + "description": "cidr_blocks define up to 10 external networks that could access\nKubernetes master through HTTPS.", + "type": "array", + "items": { + "$ref": "CidrBlock" + } + } + } + }, + "SetNodePoolManagementRequest": { + "id": "SetNodePoolManagementRequest", + "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", + "type": "object", + "properties": { + "management": { + "$ref": "NodeManagement", + "description": "NodeManagement configuration for the node pool." + } + } + }, + "SetNodePoolAutoscalingRequest": { + "description": "SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool.", + "type": "object", + "properties": { + "autoscaling": { + "$ref": "NodePoolAutoscaling", + "description": "Autoscaling configuration for the node pool." + } + }, + "id": "SetNodePoolAutoscalingRequest" + }, + "CreateClusterRequest": { + "id": "CreateClusterRequest", + "description": "CreateClusterRequest creates a cluster.", + "type": "object", + "properties": { + "cluster": { + "$ref": "Cluster", + "description": "A [cluster\nresource](/container-engine/reference/rest/v1/projects.zones.clusters)" + } + } + }, + "LegacyAbac": { + "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", "type": "boolean" } }, - "id": "KubernetesDashboard" + "id": "LegacyAbac" + }, + "AcceleratorConfig": { + "id": "AcceleratorConfig", + "description": "AcceleratorConfig represents a Hardware Accelerator request.", + "type": "object", + "properties": { + "acceleratorCount": { + "description": "The number of the accelerator cards exposed to an instance.", + "format": "int64", + "type": "string" + }, + "acceleratorType": { + "description": "The accelerator type resource name. List of supported accelerators\n[here](/compute/docs/gpus/#Introduction)", + "type": "string" + } + } + }, + "UpdateNodePoolRequest": { + "description": "UpdateNodePoolRequests update a node pool's image and/or version.", + "type": "object", + "properties": { + "nodeVersion": { + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "type": "string" + }, + "imageType": { + "description": "The desired image type for the node pool.", + "type": "string" + } + }, + "id": "UpdateNodePoolRequest" + }, + "SetAddonsConfigRequest": { + "description": "SetAddonsConfigRequest sets the addons associated with the cluster.", + "type": "object", + "properties": { + "addonsConfig": { + "$ref": "AddonsConfig", + "description": "The desired configurations for the various addons available to run in the\ncluster." + } + }, + "id": "SetAddonsConfigRequest" }, "SetLegacyAbacRequest": { "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", @@ -1325,29 +1479,540 @@ }, "id": "SetLegacyAbacRequest" }, + "AddonsConfig": { + "id": "AddonsConfig", + "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "type": "object", + "properties": { + "networkPolicyConfig": { + "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes.", + "$ref": "NetworkPolicyConfig" + }, + "horizontalPodAutoscaling": { + "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", + "$ref": "HorizontalPodAutoscaling" + }, + "httpLoadBalancing": { + "$ref": "HttpLoadBalancing", + "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + }, + "kubernetesDashboard": { + "$ref": "KubernetesDashboard", + "description": "Configuration for the Kubernetes Dashboard." + } + } + }, + "SetLocationsRequest": { + "description": "SetLocationsRequest sets the locations of the cluster.", + "type": "object", + "properties": { + "locations": { + "description": "The desired list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located. Changing the locations a cluster is in will result\nin nodes being either created or removed from the cluster, depending on\nwhether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "SetLocationsRequest" + }, + "SetNodePoolSizeRequest": { + "id": "SetNodePoolSizeRequest", + "description": "SetNodePoolSizeRequest sets the size a node\npool.", + "type": "object", + "properties": { + "nodeCount": { + "description": "The desired node count for the pool.", + "format": "int32", + "type": "integer" + } + } + }, + "NetworkPolicyConfig": { + "description": "Configuration for NetworkPolicy. This only tracks whether the addon\nis enabled or not on the Master, it does not track whether network policy\nis enabled for the nodes.", + "type": "object", + "properties": { + "disabled": { + "description": "Whether NetworkPolicy is enabled for this cluster.", + "type": "boolean" + } + }, + "id": "NetworkPolicyConfig" + }, + "UpdateClusterRequest": { + "id": "UpdateClusterRequest", + "description": "UpdateClusterRequest updates the settings of a cluster.", + "type": "object", + "properties": { + "update": { + "$ref": "ClusterUpdate", + "description": "A description of the update." + } + } + }, + "Cluster": { + "description": "A Google Kubernetes Engine cluster.", + "type": "object", + "properties": { + "network": { + "description": "The name of the Google Compute Engine\n[network](/compute/docs/networks-and-firewalls#networks) to which the\ncluster is connected. If left unspecified, the `default` network\nwill be used.", + "type": "string" + }, + "labelFingerprint": { + "description": "The fingerprint of the set of labels for this cluster.", + "type": "string" + }, + "zone": { + "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "type": "string" + }, + "loggingService": { + "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com` - the Google Cloud Logging service.\n* `none` - no logs will be exported from the cluster.\n* if left as an empty string,`logging.googleapis.com` will be used.", + "type": "string" + }, + "nodeIpv4CidrSize": { + "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange.", + "format": "int32", + "type": "integer" + }, + "expireTime": { + "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, + "masterAuthorizedNetworksConfig": { + "$ref": "MasterAuthorizedNetworksConfig", + "description": "Master authorized networks is a Beta feature.\nThe configuration options for master authorized networks feature." + }, + "statusMessage": { + "description": "[Output only] Additional information about the current status of this\ncluster, if available.", + "type": "string" + }, + "masterAuth": { + "description": "The authentication information for accessing the master endpoint.", + "$ref": "MasterAuth" + }, + "currentMasterVersion": { + "description": "[Output only] The current software version of the master endpoint.", + "type": "string" + }, + "nodeConfig": { + "description": "Parameters used in creating the cluster's nodes.\nSee `nodeConfig` for the description of its properties.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool.\n\nIf unspecified, the defaults are used.", + "$ref": "NodeConfig" + }, + "addonsConfig": { + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." + }, + "status": { + "description": "[Output only] The current status of this cluster.", + "type": "string", + "enumDescriptions": [ + "Not set.", + "The PROVISIONING state indicates the cluster is being created.", + "The RUNNING state indicates the cluster has been created and is fully\nusable.", + "The RECONCILING state indicates that some work is actively being done on\nthe cluster, such as upgrading the master or node software. Details can\nbe found in the `statusMessage` field.", + "The STOPPING state indicates the cluster is being deleted.", + "The ERROR state indicates the cluster may be unusable. Details\ncan be found in the `statusMessage` field." + ], + "enum": [ + "STATUS_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR" + ] + }, + "currentNodeVersion": { + "description": "[Output only] The current version of the node software components.\nIf they are currently at multiple versions because they're in the process\nof being upgraded, this reflects the minimum version of all nodes.", + "type": "string" + }, + "subnetwork": { + "description": "The name of the Google Compute Engine\n[subnetwork](/compute/docs/subnetworks) to which the\ncluster is connected.", + "type": "string" + }, + "name": { + "description": "The name of this cluster. The name must be unique within this project\nand zone, and can be up to 40 characters with the following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", + "type": "string" + }, + "resourceLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "The resource labels for the cluster to use to annotate any related\nGoogle Compute Engine resources.", + "type": "object" + }, + "maintenancePolicy": { + "description": "Configure the maintenance policy for this cluster.", + "$ref": "MaintenancePolicy" + }, + "initialClusterVersion": { + "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.", + "type": "string" + }, + "ipAllocationPolicy": { + "description": "Configuration for cluster IP allocation.", + "$ref": "IPAllocationPolicy" + }, + "legacyAbac": { + "$ref": "LegacyAbac", + "description": "Configuration for the legacy ABAC authorization mode." + }, + "endpoint": { + "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", + "type": "string" + }, + "createTime": { + "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, + "clusterIpv4Cidr": { + "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", + "type": "string" + }, + "initialNodeCount": { + "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.", + "format": "int32", + "type": "integer" + }, + "selfLink": { + "description": "[Output only] Server-defined URL for the resource.", + "type": "string" + }, + "locations": { + "description": "The list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located.", + "type": "array", + "items": { + "type": "string" + } + }, + "nodePools": { + "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", + "type": "array", + "items": { + "$ref": "NodePool" + } + }, + "instanceGroupUrls": { + "description": "Deprecated. Use node_pools.instance_group_urls.", + "type": "array", + "items": { + "type": "string" + } + }, + "networkPolicy": { + "$ref": "NetworkPolicy", + "description": "Configuration options for the NetworkPolicy feature." + }, + "servicesIpv4Cidr": { + "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", + "type": "string" + }, + "enableKubernetesAlpha": { + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", + "type": "boolean" + }, + "description": { + "description": "An optional description of this cluster.", + "type": "string" + }, + "currentNodeCount": { + "description": "[Output only] The number of nodes currently in the cluster.", + "format": "int32", + "type": "integer" + }, + "monitoringService": { + "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* `monitoring.googleapis.com` - the Google Cloud Monitoring service.\n* `none` - no metrics will be exported from the cluster.\n* if left as an empty string, `monitoring.googleapis.com` will be used.", + "type": "string" + } + }, + "id": "Cluster" + }, + "CreateNodePoolRequest": { + "id": "CreateNodePoolRequest", + "description": "CreateNodePoolRequest creates a node pool for a cluster.", + "type": "object", + "properties": { + "nodePool": { + "$ref": "NodePool", + "description": "The node pool to create." + } + } + }, + "MasterAuth": { + "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", + "type": "object", + "properties": { + "password": { + "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", + "type": "string" + }, + "clientCertificateConfig": { + "$ref": "ClientCertificateConfig", + "description": "Configuration for client certificate authentication on the cluster. If no\nconfiguration is specified, a client certificate is issued." + }, + "clientKey": { + "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", + "type": "string" + }, + "clusterCaCertificate": { + "description": "[Output only] Base64-encoded public certificate that is the root of\ntrust for the cluster.", + "type": "string" + }, + "clientCertificate": { + "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", + "type": "string" + }, + "username": { + "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, you can disable basic authentication by\nproviding an empty username.", + "type": "string" + } + }, + "id": "MasterAuth" + }, + "DailyMaintenanceWindow": { + "description": "Time window specified for daily maintenance operations.", + "type": "object", + "properties": { + "duration": { + "description": "[Output only] Duration of the time window, automatically chosen to be\nsmallest possible in the given scenario.\nDuration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"PTnHnMnS\".", + "type": "string" + }, + "startTime": { + "description": "Time within the maintenance window to start the maintenance operations.\nTime format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"HH:MM”, where HH : [00-23] and MM : [00-59] GMT.", + "type": "string" + } + }, + "id": "DailyMaintenanceWindow" + }, + "ClientCertificateConfig": { + "id": "ClientCertificateConfig", + "description": "Configuration for client certificates on the cluster.", + "type": "object", + "properties": { + "issueClientCertificate": { + "description": "Issue a client certificate.", + "type": "boolean" + } + } + }, + "MaintenancePolicy": { + "id": "MaintenancePolicy", + "description": "MaintenancePolicy defines the maintenance policy to be used for the cluster.", + "type": "object", + "properties": { + "window": { + "description": "Specifies the maintenance window in which maintenance may be performed.", + "$ref": "MaintenanceWindow" + } + } + }, + "SetLoggingServiceRequest": { + "description": "SetLoggingServiceRequest sets the logging service of a cluster.", + "type": "object", + "properties": { + "loggingService": { + "description": "The logging service the cluster should use to write metrics.\nCurrently available options:\n\n* \"logging.googleapis.com\" - the Google Cloud Logging service\n* \"none\" - no metrics will be exported from the cluster", + "type": "string" + } + }, + "id": "SetLoggingServiceRequest" + }, + "SetMaintenancePolicyRequest": { + "description": "SetMaintenancePolicyRequest sets the maintenance policy for a cluster.", + "type": "object", + "properties": { + "maintenancePolicy": { + "description": "The maintenance policy to be set for the cluster. An empty field\nclears the existing maintenance policy.", + "$ref": "MaintenancePolicy" + } + }, + "id": "SetMaintenancePolicyRequest" + }, + "Empty": { + "id": "Empty", + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {} + }, + "ListNodePoolsResponse": { + "id": "ListNodePoolsResponse", + "description": "ListNodePoolsResponse is the result of ListNodePoolsRequest.", + "type": "object", + "properties": { + "nodePools": { + "description": "A list of node pools for a cluster.", + "type": "array", + "items": { + "$ref": "NodePool" + } + } + } + }, + "CompleteIPRotationRequest": { + "id": "CompleteIPRotationRequest", + "description": "CompleteIPRotationRequest moves the cluster master back into single-IP mode.", + "type": "object", + "properties": {} + }, + "StartIPRotationRequest": { + "id": "StartIPRotationRequest", + "description": "StartIPRotationRequest creates a new IP for the cluster and then performs\na node upgrade on each node pool to point to the new IP.", + "type": "object", + "properties": {} + }, + "NodePool": { + "id": "NodePool", + "description": "NodePool contains the name and configuration for a cluster's node pool.\nNode pools are a set of nodes (i.e. VM's), with a common configuration and\nspecification, under the control of the cluster master. They may have a set\nof Kubernetes labels applied to them, which may be used to reference them\nduring pod scheduling. They may also be resized up or down, to accommodate\nthe workload.", + "type": "object", + "properties": { + "version": { + "description": "The version of the Kubernetes of this node.", + "type": "string" + }, + "instanceGroupUrls": { + "description": "[Output only] The resource URLs of the [managed instance\ngroups](/compute/docs/instance-groups/creating-groups-of-managed-instances)\nassociated with this node pool.", + "type": "array", + "items": { + "type": "string" + } + }, + "status": { + "description": "[Output only] The status of the nodes in this pool instance.", + "type": "string", + "enumDescriptions": [ + "Not set.", + "The PROVISIONING state indicates the node pool is being created.", + "The RUNNING state indicates the node pool has been created\nand is fully usable.", + "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", + "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", + "The STOPPING state indicates the node pool is being deleted.", + "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." + ], + "enum": [ + "STATUS_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RUNNING_WITH_ERROR", + "RECONCILING", + "STOPPING", + "ERROR" + ] + }, + "config": { + "description": "The node configuration of the pool.", + "$ref": "NodeConfig" + }, + "statusMessage": { + "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", + "type": "string" + }, + "name": { + "description": "The name of the node pool.", + "type": "string" + }, + "autoscaling": { + "$ref": "NodePoolAutoscaling", + "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled\nonly if a valid configuration is present." + }, + "initialNodeCount": { + "description": "The initial node count for the pool. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.", + "format": "int32", + "type": "integer" + }, + "management": { + "$ref": "NodeManagement", + "description": "NodeManagement configuration for this NodePool." + }, + "selfLink": { + "description": "[Output only] Server-defined URL for the resource.", + "type": "string" + } + } + }, + "SetLabelsRequest": { + "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container\nEngine cluster, which will in turn set them for Google Compute Engine\nresources used by that cluster", + "type": "object", + "properties": { + "resourceLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels to set for that cluster.", + "type": "object" + }, + "labelFingerprint": { + "description": "The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nKubernetes Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", + "type": "string" + } + }, + "id": "SetLabelsRequest" + }, + "NodeManagement": { + "id": "NodeManagement", + "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", + "type": "object", + "properties": { + "autoUpgrade": { + "description": "A flag that specifies whether node auto-upgrade is enabled for the node\npool. If enabled, node auto-upgrade helps keep the nodes in your node pool\nup to date with the latest release version of Kubernetes.", + "type": "boolean" + }, + "autoRepair": { + "description": "A flag that specifies whether the node auto-repair is enabled for the node\npool. If enabled, the nodes in this node pool will be monitored and, if\nthey fail health checks too many times, an automatic repair action will be\ntriggered.", + "type": "boolean" + }, + "upgradeOptions": { + "$ref": "AutoUpgradeOptions", + "description": "Specifies the Auto Upgrade knobs for the node pool." + } + } + }, + "CancelOperationRequest": { + "id": "CancelOperationRequest", + "description": "CancelOperationRequest cancels a single operation.", + "type": "object", + "properties": {} + }, + "KubernetesDashboard": { + "id": "KubernetesDashboard", + "description": "Configuration for the Kubernetes Dashboard.", + "type": "object", + "properties": { + "disabled": { + "description": "Whether the Kubernetes Dashboard is enabled for this cluster.", + "type": "boolean" + } + } + }, "Operation": { "description": "This operation resource represents operations that may have happened or are\nhappening on the cluster. All fields are output only.", "type": "object", "properties": { + "statusMessage": { + "description": "If an error has occurred, a textual description of the error.", + "type": "string" + }, + "name": { + "description": "The server-assigned ID for the operation.", + "type": "string" + }, "selfLink": { "description": "Server-defined URL for the resource.", "type": "string" }, - "endTime": { - "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - "type": "string" - }, - "targetLink": { - "description": "Server-defined URL for the target of the operation.", - "type": "string" - }, "detail": { "description": "Detailed operation progress, if available.", "type": "string" }, + "targetLink": { + "description": "Server-defined URL for the target of the operation.", + "type": "string" + }, + "endTime": { + "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, "operationType": { - "description": "The operation type.", - "type": "string", "enumDescriptions": [ "Not set.", "Cluster create.", @@ -1364,7 +2029,8 @@ "Set labels.", "Set/generate master auth materials", "Set node pool size.", - "Updates network policy for a cluster." + "Updates network policy for a cluster.", + "Set the maintenance policy." ], "enum": [ "TYPE_UNSPECIFIED", @@ -1382,8 +2048,11 @@ "SET_LABELS", "SET_MASTER_AUTH", "SET_NODE_POOL_SIZE", - "SET_NETWORK_POLICY" - ] + "SET_NETWORK_POLICY", + "SET_MAINTENANCE_POLICY" + ], + "description": "The operation type.", + "type": "string" }, "startTime": { "description": "[Output only] The time the operation started, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", @@ -1410,81 +2079,29 @@ ], "description": "The current status of the operation.", "type": "string" - }, - "statusMessage": { - "description": "If an error has occurred, a textual description of the error.", - "type": "string" - }, - "name": { - "description": "The server-assigned ID for the operation.", - "type": "string" } }, "id": "Operation" }, - "AddonsConfig": { - "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "MaintenanceWindow": { + "id": "MaintenanceWindow", + "description": "MaintenanceWindow defines the maintenance window to be used for the cluster.", "type": "object", "properties": { - "kubernetesDashboard": { - "description": "Configuration for the Kubernetes Dashboard.", - "$ref": "KubernetesDashboard" - }, - "horizontalPodAutoscaling": { - "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", - "$ref": "HorizontalPodAutoscaling" - }, - "httpLoadBalancing": { - "$ref": "HttpLoadBalancing", - "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + "dailyMaintenanceWindow": { + "description": "DailyMaintenanceWindow specifies a daily maintenance operation window.", + "$ref": "DailyMaintenanceWindow" } - }, - "id": "AddonsConfig" - }, - "SetLocationsRequest": { - "description": "SetLocationsRequest sets the locations of the cluster.", - "type": "object", - "properties": { - "locations": { - "description": "The desired list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located. Changing the locations a cluster is in will result\nin nodes being either created or removed from the cluster, depending on\nwhether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "SetLocationsRequest" + } }, "RollbackNodePoolUpgradeRequest": { + "id": "RollbackNodePoolUpgradeRequest", "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed\nNodePool upgrade. This will be an no-op if the last upgrade successfully\ncompleted.", "type": "object", - "properties": {}, - "id": "RollbackNodePoolUpgradeRequest" - }, - "SetNodePoolSizeRequest": { - "description": "SetNodePoolSizeRequest sets the size a node\npool.", - "type": "object", - "properties": { - "nodeCount": { - "format": "int32", - "description": "The desired node count for the pool.", - "type": "integer" - } - }, - "id": "SetNodePoolSizeRequest" - }, - "UpdateClusterRequest": { - "description": "UpdateClusterRequest updates the settings of a cluster.", - "type": "object", - "properties": { - "update": { - "description": "A description of the update.", - "$ref": "ClusterUpdate" - } - }, - "id": "UpdateClusterRequest" + "properties": {} }, "NetworkPolicy": { + "id": "NetworkPolicy", "description": "Configuration options for the NetworkPolicy feature.\nhttps://kubernetes.io/docs/concepts/services-networking/networkpolicies/", "type": "object", "properties": { @@ -1493,6 +2110,8 @@ "type": "boolean" }, "provider": { + "description": "The selected network policy provider.", + "type": "string", "enumDescriptions": [ "Not set", "Tigera (Calico Felix)." @@ -1500,229 +2119,41 @@ "enum": [ "PROVIDER_UNSPECIFIED", "CALICO" - ], - "description": "The selected network policy provider.", - "type": "string" + ] } - }, - "id": "NetworkPolicy" + } }, "UpdateMasterRequest": { + "id": "UpdateMasterRequest", "description": "UpdateMasterRequest updates the master of the cluster.", "type": "object", "properties": { "masterVersion": { - "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", + "description": "The Kubernetes version to change the master to. Use \"-\" to have the server\nautomatically select the default version.", "type": "string" } - }, - "id": "UpdateMasterRequest" - }, - "Cluster": { - "description": "A Google Container Engine cluster.", - "type": "object", - "properties": { - "createTime": { - "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - "type": "string" - }, - "clusterIpv4Cidr": { - "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", - "type": "string" - }, - "initialNodeCount": { - "format": "int32", - "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.", - "type": "integer" - }, - "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", - "type": "string" - }, - "nodePools": { - "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", - "items": { - "$ref": "NodePool" - }, - "type": "array" - }, - "locations": { - "description": "The list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located.", - "items": { - "type": "string" - }, - "type": "array" - }, - "instanceGroupUrls": { - "description": "[Output only] The resource URLs of [instance\ngroups](/compute/docs/instance-groups/) associated with this\ncluster.", - "items": { - "type": "string" - }, - "type": "array" - }, - "networkPolicy": { - "description": "Configuration options for the NetworkPolicy feature.", - "$ref": "NetworkPolicy" - }, - "servicesIpv4Cidr": { - "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", - "type": "string" - }, - "enableKubernetesAlpha": { - "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", - "type": "boolean" - }, - "description": { - "description": "An optional description of this cluster.", - "type": "string" - }, - "currentNodeCount": { - "format": "int32", - "description": "[Output only] The number of nodes currently in the cluster.", - "type": "integer" - }, - "monitoringService": { - "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* `monitoring.googleapis.com` - the Google Cloud Monitoring service.\n* `none` - no metrics will be exported from the cluster.\n* if left as an empty string, `monitoring.googleapis.com` will be used.", - "type": "string" - }, - "network": { - "description": "The name of the Google Compute Engine\n[network](/compute/docs/networks-and-firewalls#networks) to which the\ncluster is connected. If left unspecified, the `default` network\nwill be used.", - "type": "string" - }, - "labelFingerprint": { - "description": "The fingerprint of the set of labels for this cluster.", - "type": "string" - }, - "zone": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string" - }, - "expireTime": { - "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - "type": "string" - }, - "nodeIpv4CidrSize": { - "format": "int32", - "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange.", - "type": "integer" - }, - "loggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com` - the Google Cloud Logging service.\n* `none` - no logs will be exported from the cluster.\n* if left as an empty string,`logging.googleapis.com` will be used.", - "type": "string" - }, - "masterAuthorizedNetworksConfig": { - "description": "Master authorized networks is a Beta feature.\nThe configuration options for master authorized networks feature.", - "$ref": "MasterAuthorizedNetworksConfig" - }, - "statusMessage": { - "description": "[Output only] Additional information about the current status of this\ncluster, if available.", - "type": "string" - }, - "masterAuth": { - "description": "The authentication information for accessing the master endpoint.", - "$ref": "MasterAuth" - }, - "currentMasterVersion": { - "description": "[Output only] The current software version of the master endpoint.", - "type": "string" - }, - "nodeConfig": { - "$ref": "NodeConfig", - "description": "Parameters used in creating the cluster's nodes.\nSee `nodeConfig` for the description of its properties.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool.\n\nIf unspecified, the defaults are used." - }, - "addonsConfig": { - "description": "Configurations for the various addons available to run in the cluster.", - "$ref": "AddonsConfig" - }, - "status": { - "enumDescriptions": [ - "Not set.", - "The PROVISIONING state indicates the cluster is being created.", - "The RUNNING state indicates the cluster has been created and is fully\nusable.", - "The RECONCILING state indicates that some work is actively being done on\nthe cluster, such as upgrading the master or node software. Details can\nbe found in the `statusMessage` field.", - "The STOPPING state indicates the cluster is being deleted.", - "The ERROR state indicates the cluster may be unusable. Details\ncan be found in the `statusMessage` field." - ], - "enum": [ - "STATUS_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RECONCILING", - "STOPPING", - "ERROR" - ], - "description": "[Output only] The current status of this cluster.", - "type": "string" - }, - "subnetwork": { - "description": "The name of the Google Compute Engine\n[subnetwork](/compute/docs/subnetworks) to which the\ncluster is connected.", - "type": "string" - }, - "currentNodeVersion": { - "description": "[Output only] The current version of the node software components.\nIf they are currently at multiple versions because they're in the process\nof being upgraded, this reflects the minimum version of all nodes.", - "type": "string" - }, - "name": { - "description": "The name of this cluster. The name must be unique within this project\nand zone, and can be up to 40 characters with the following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", - "type": "string" - }, - "resourceLabels": { - "description": "The resource labels for the cluster to use to annotate any related\nGoogle Compute Engine resources.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "initialClusterVersion": { - "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.", - "type": "string" - }, - "ipAllocationPolicy": { - "description": "Configuration for cluster IP allocation.", - "$ref": "IPAllocationPolicy" - }, - "legacyAbac": { - "$ref": "LegacyAbac", - "description": "Configuration for the legacy ABAC authorization mode." - }, - "endpoint": { - "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", - "type": "string" - } - }, - "id": "Cluster" - }, - "CreateNodePoolRequest": { - "description": "CreateNodePoolRequest creates a node pool for a cluster.", - "type": "object", - "properties": { - "nodePool": { - "description": "The node pool to create.", - "$ref": "NodePool" - } - }, - "id": "CreateNodePoolRequest" + } }, "ListOperationsResponse": { + "id": "ListOperationsResponse", "description": "ListOperationsResponse is the result of ListOperationsRequest.", "type": "object", "properties": { "operations": { "description": "A list of operations in the project in the specified zone.", + "type": "array", "items": { "$ref": "Operation" - }, - "type": "array" + } }, "missingZones": { "description": "If any zones are listed here, the list of operations returned\nmay be missing the operations from those zones.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } } - }, - "id": "ListOperationsResponse" + } }, "SetMonitoringServiceRequest": { "description": "SetMonitoringServiceRequest sets the monitoring service of a cluster.", @@ -1739,151 +2170,124 @@ "description": "CidrBlock contains an optional name and one CIDR block.", "type": "object", "properties": { - "displayName": { - "description": "display_name is an optional field for users to identify CIDR blocks.", - "type": "string" - }, "cidrBlock": { "description": "cidr_block must be specified in CIDR notation.", "type": "string" + }, + "displayName": { + "description": "display_name is an optional field for users to identify CIDR blocks.", + "type": "string" } }, "id": "CidrBlock" }, "ServerConfig": { - "description": "Container Engine service configuration.", + "id": "ServerConfig", + "description": "Kubernetes Engine service configuration.", "type": "object", "properties": { - "validMasterVersions": { - "description": "List of valid master versions.", + "validNodeVersions": { + "description": "List of valid node upgrade target versions.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } }, - "defaultImageType": { - "description": "Default image type.", - "type": "string" + "validImageTypes": { + "description": "List of valid image types.", + "type": "array", + "items": { + "type": "string" + } + }, + "validMasterVersions": { + "description": "List of valid master versions.", + "type": "array", + "items": { + "type": "string" + } }, "defaultClusterVersion": { "description": "Version of Kubernetes the service deploys by default.", "type": "string" }, - "validImageTypes": { - "description": "List of valid image types.", - "items": { - "type": "string" - }, - "type": "array" - }, - "validNodeVersions": { - "description": "List of valid node upgrade target versions.", - "items": { - "type": "string" - }, - "type": "array" + "defaultImageType": { + "description": "Default image type.", + "type": "string" } - }, - "id": "ServerConfig" + } }, "NodeConfig": { + "id": "NodeConfig", "description": "Parameters that describe the nodes in a cluster.", "type": "object", "properties": { - "metadata": { - "additionalProperties": { - "type": "string" - }, - "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the four reserved keys:\n\"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", - "type": "object" - }, - "diskSizeGb": { - "format": "int32", - "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", - "type": "integer" - }, "tags": { "description": "The list of instance tags applied to all nodes. Tags are used to identify\nvalid sources or targets for network firewalls and are specified by\nthe client during cluster or node pool creation. Each tag within the list\nmust comply with RFC1035.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } }, "serviceAccount": { "description": "The Google Cloud Platform Service Account to be used by the node VMs. If\nno Service Account is specified, the \"default\" service account is used.", "type": "string" }, - "accelerators": { - "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - }, - "machineType": { - "description": "The name of a Google Compute Engine [machine\ntype](/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", - "type": "string" - }, "imageType": { "description": "The image type to use for this node. Note that for a given image type,\nthe latest version of it will be used.", "type": "string" }, "oauthScopes": { "description": "The set of Google API scopes to be made available on all of the\nnode VMs under the \"default\" service account.\n\nThe following scopes are recommended, but not required, and by default are\nnot included:\n\n* `https://www.googleapis.com/auth/compute` is required for mounting\npersistent storage on your nodes.\n* `https://www.googleapis.com/auth/devstorage.read_only` is required for\ncommunicating with **gcr.io**\n(the [Google Container Registry](/container-registry/)).\n\nIf unspecified, no scopes are added, unless Cloud Logging or Cloud\nMonitoring are enabled, in which case their required scopes will be added.", + "type": "array", "items": { "type": "string" + } + }, + "labels": { + "additionalProperties": { + "type": "string" }, - "type": "array" + "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "type": "object" + }, + "diskSizeGb": { + "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", + "format": "int32", + "type": "integer" + }, + "accelerators": { + "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", + "type": "array", + "items": { + "$ref": "AcceleratorConfig" + } + }, + "machineType": { + "description": "The name of a Google Compute Engine [machine\ntype](/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", + "type": "string" + }, + "minCpuPlatform": { + "description": "Minimum CPU platform to be used by this instance. The instance may be\nscheduled on the specified or newer CPU platform. Applicable values are the\nfriendly names of CPU platforms, such as\n\u003ccode\u003eminCpuPlatform: "Intel Haswell"\u003c/code\u003e or\n\u003ccode\u003eminCpuPlatform: "Intel Sandy Bridge"\u003c/code\u003e. For more\ninformation, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)", + "type": "string" }, "preemptible": { "description": "Whether the nodes are created as preemptible VM instances. See:\nhttps://cloud.google.com/compute/docs/instances/preemptible for more\ninformation about preemptible VM instances.", "type": "boolean" }, - "labels": { - "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", - "type": "object", + "localSsdCount": { + "description": "The number of local SSD disks to be attached to the node.\n\nThe limit for this value is dependant upon the maximum number of\ndisks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits\nfor more information.", + "format": "int32", + "type": "integer" + }, + "metadata": { "additionalProperties": { "type": "string" - } - }, - "localSsdCount": { - "format": "int32", - "description": "The number of local SSD disks to be attached to the node.\n\nThe limit for this value is dependant upon the maximum number of\ndisks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits\nfor more information.", - "type": "integer" + }, + "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the four reserved keys:\n\"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", + "type": "object" } - }, - "id": "NodeConfig" - }, - "MasterAuth": { - "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", - "type": "object", - "properties": { - "clientCertificateConfig": { - "$ref": "ClientCertificateConfig", - "description": "Configuration for client certificate authentication on the cluster. If no\nconfiguration is specified, a client certificate is issued." - }, - "password": { - "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", - "type": "string" - }, - "clientKey": { - "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", - "type": "string" - }, - "clusterCaCertificate": { - "description": "[Output only] Base64-encoded public certificate that is the root of\ntrust for the cluster.", - "type": "string" - }, - "clientCertificate": { - "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", - "type": "string" - }, - "username": { - "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, you can disable basic authentication by\nproviding an empty username.", - "type": "string" - } - }, - "id": "MasterAuth" + } }, "AutoUpgradeOptions": { "description": "AutoUpgradeOptions defines the set of options for the user to control how\nthe Auto Upgrades will proceed.", @@ -1901,27 +2305,28 @@ "id": "AutoUpgradeOptions" }, "ListClustersResponse": { + "id": "ListClustersResponse", "description": "ListClustersResponse is the result of ListClustersRequest.", "type": "object", "properties": { - "clusters": { - "description": "A list of clusters in the project in the specified zone, or\nacross all ones.", - "items": { - "$ref": "Cluster" - }, - "type": "array" - }, "missingZones": { "description": "If any zones are listed here, the list of clusters returned\nmay be missing those zones.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } + }, + "clusters": { + "description": "A list of clusters in the project in the specified zone, or\nacross all ones.", + "type": "array", + "items": { + "$ref": "Cluster" + } } - }, - "id": "ListClustersResponse" + } }, "HttpLoadBalancing": { + "id": "HttpLoadBalancing", "description": "Configuration options for the HTTP (L7) load balancing controller addon,\nwhich makes it easy to set up HTTP load balancers for services in a cluster.", "type": "object", "properties": { @@ -1929,10 +2334,10 @@ "description": "Whether the HTTP Load Balancing controller is enabled in the cluster.\nWhen enabled, it runs a small pod in the cluster that manages the load\nbalancers.", "type": "boolean" } - }, - "id": "HttpLoadBalancing" + } }, "SetNetworkPolicyRequest": { + "id": "SetNetworkPolicyRequest", "description": "SetNetworkPolicyRequest enables/disables network policy for a cluster.", "type": "object", "properties": { @@ -1940,35 +2345,10 @@ "$ref": "NetworkPolicy", "description": "Configuration options for the NetworkPolicy feature." } - }, - "id": "SetNetworkPolicyRequest" - }, - "SetMasterAuthRequest": { - "description": "SetMasterAuthRequest updates the admin password of a cluster.", - "type": "object", - "properties": { - "update": { - "description": "A description of the update.", - "$ref": "MasterAuth" - }, - "action": { - "description": "The exact form of action to be taken on the master auth", - "type": "string", - "enumDescriptions": [ - "Operation is unknown and will error out", - "Set the password to a user generated value.", - "Generate a new password and set it to that." - ], - "enum": [ - "UNKNOWN", - "SET_PASSWORD", - "GENERATE_PASSWORD" - ] - } - }, - "id": "SetMasterAuthRequest" + } }, "NodePoolAutoscaling": { + "id": "NodePoolAutoscaling", "description": "NodePoolAutoscaling contains information required by cluster autoscaler to\nadjust the size of the node pool to the current cluster usage.", "type": "object", "properties": { @@ -1977,33 +2357,77 @@ "type": "boolean" }, "maxNodeCount": { - "format": "int32", "description": "Maximum number of nodes in the NodePool. Must be \u003e= min_node_count. There\nhas to enough quota to scale up the cluster.", + "format": "int32", "type": "integer" }, "minNodeCount": { - "format": "int32", "description": "Minimum number of nodes in the NodePool. Must be \u003e= 1 and \u003c=\nmax_node_count.", + "format": "int32", "type": "integer" } - }, - "id": "NodePoolAutoscaling" + } }, - "ClientCertificateConfig": { - "description": "Configuration for client certificates on the cluster.", + "SetMasterAuthRequest": { + "id": "SetMasterAuthRequest", + "description": "SetMasterAuthRequest updates the admin password of a cluster.", "type": "object", "properties": { - "issueClientCertificate": { - "description": "Issue a client certificate.", - "type": "boolean" + "action": { + "description": "The exact form of action to be taken on the master auth.", + "type": "string", + "enumDescriptions": [ + "Operation is unknown and will error out.", + "Set the password to a user generated value.", + "Generate a new password and set it to that.", + "Set the username. If an empty username is provided, basic authentication\nis disabled for the cluster. If a non-empty username is provided, basic\nauthentication is enabled, with either a provided password or a generated\none." + ], + "enum": [ + "UNKNOWN", + "SET_PASSWORD", + "GENERATE_PASSWORD", + "SET_USERNAME" + ] + }, + "update": { + "$ref": "MasterAuth", + "description": "A description of the update." } - }, - "id": "ClientCertificateConfig" + } }, "IPAllocationPolicy": { + "id": "IPAllocationPolicy", "description": "Configuration for controlling how IPs are allocated in the cluster.", "type": "object", "properties": { + "createSubnetwork": { + "description": "Whether a new subnetwork will be created automatically for the cluster.\n\nThis field is only applicable when `use_ip_aliases` is true.", + "type": "boolean" + }, + "useIpAliases": { + "description": "Whether alias IPs will be used for pod IPs in the cluster.", + "type": "boolean" + }, + "servicesSecondaryRangeName": { + "description": "The name of the secondary range to be used as for the services\nCIDR block. The secondary range will be used for service\nClusterIPs. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", + "type": "string" + }, + "subnetworkName": { + "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", + "type": "string" + }, + "servicesIpv4CidrBlock": { + "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "type": "string" + }, + "clusterIpv4Cidr": { + "description": "This field is deprecated, use cluster_ipv4_cidr_block.", + "type": "string" + }, + "nodeIpv4Cidr": { + "description": "This field is deprecated, use node_ipv4_cidr_block.", + "type": "string" + }, "clusterIpv4CidrBlock": { "description": "The IP address range for the cluster pod IPs. If this field is set, then\n`cluster.cluster_ipv4_cidr` must be left blank.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", "type": "string" @@ -2019,68 +2443,31 @@ "servicesIpv4Cidr": { "description": "This field is deprecated, use services_ipv4_cidr_block.", "type": "string" - }, - "createSubnetwork": { - "description": "Whether a new subnetwork will be created automatically for the cluster.\n\nThis field is only applicable when `use_ip_aliases` is true.", - "type": "boolean" - }, - "useIpAliases": { - "description": "Whether alias IPs will be used for pod IPs in the cluster.", - "type": "boolean" - }, - "servicesSecondaryRangeName": { - "description": "The name of the secondary range to be used as for the services\nCIDR block. The secondary range will be used for service\nClusterIPs. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", - "type": "string" - }, - "servicesIpv4CidrBlock": { - "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", - "type": "string" - }, - "subnetworkName": { - "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", - "type": "string" - }, - "clusterIpv4Cidr": { - "description": "This field is deprecated, use cluster_ipv4_cidr_block.", - "type": "string" - }, - "nodeIpv4Cidr": { - "description": "This field is deprecated, use node_ipv4_cidr_block.", - "type": "string" } - }, - "id": "IPAllocationPolicy" + } }, "ClusterUpdate": { "description": "ClusterUpdate describes an update to the cluster. Exactly one update can\nbe applied to a cluster with each request, so at most one field can be\nprovided.", "type": "object", "properties": { - "desiredNodePoolId": { - "description": "The node pool to be upgraded. This field is mandatory if\n\"desired_node_version\", \"desired_image_family\" or\n\"desired_node_pool_autoscaling\" is specified and there is more than one\nnode pool on the cluster.", - "type": "string" - }, - "desiredNodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", - "type": "string" - }, "desiredMasterVersion": { "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", "type": "string" }, "desiredMasterAuthorizedNetworksConfig": { - "$ref": "MasterAuthorizedNetworksConfig", - "description": "Master authorized networks is a Beta feature.\nThe desired configuration options for master authorized networks feature." + "description": "Master authorized networks is a Beta feature.\nThe desired configuration options for master authorized networks feature.", + "$ref": "MasterAuthorizedNetworksConfig" }, "desiredLocations": { "description": "The desired list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located. Changing the locations a cluster is in will result\nin nodes being either created or removed from the cluster, depending on\nwhether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } }, "desiredNodePoolAutoscaling": { - "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool.", - "$ref": "NodePoolAutoscaling" + "$ref": "NodePoolAutoscaling", + "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool." }, "desiredMonitoringService": { "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com\" - the Google Cloud Monitoring service\n* \"none\" - no metrics will be exported from the cluster", @@ -2093,22 +2480,20 @@ "desiredAddonsConfig": { "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." + }, + "desiredNodePoolId": { + "description": "The node pool to be upgraded. This field is mandatory if\n\"desired_node_version\", \"desired_image_family\" or\n\"desired_node_pool_autoscaling\" is specified and there is more than one\nnode pool on the cluster.", + "type": "string" + }, + "desiredNodeVersion": { + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "type": "string" } }, "id": "ClusterUpdate" }, - "SetLoggingServiceRequest": { - "description": "SetLoggingServiceRequest sets the logging service of a cluster.", - "type": "object", - "properties": { - "loggingService": { - "description": "The logging service the cluster should use to write metrics.\nCurrently available options:\n\n* \"logging.googleapis.com\" - the Google Cloud Logging service\n* \"none\" - no metrics will be exported from the cluster", - "type": "string" - } - }, - "id": "SetLoggingServiceRequest" - }, "HorizontalPodAutoscaling": { + "id": "HorizontalPodAutoscaling", "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", "type": "object", "properties": { @@ -2116,281 +2501,13 @@ "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.\nWhen enabled, it ensures that a Heapster pod is running in the cluster,\nwhich is also used by the Cloud Monitoring service.", "type": "boolean" } - }, - "id": "HorizontalPodAutoscaling" - }, - "MasterAuthorizedNetworksConfig": { - "description": "Master authorized networks is a Beta feature.\nConfiguration options for the master authorized networks feature. Enabled\nmaster authorized networks will disallow all external traffic to access\nKubernetes master through HTTPS except traffic from the given CIDR blocks,\nGoogle Compute Engine Public IPs and Google Prod IPs.", - "type": "object", - "properties": { - "enabled": { - "description": "Whether or not master authorized networks is enabled.", - "type": "boolean" - }, - "cidrBlocks": { - "description": "cidr_blocks define up to 10 external networks that could access\nKubernetes master through HTTPS.", - "items": { - "$ref": "CidrBlock" - }, - "type": "array" - } - }, - "id": "MasterAuthorizedNetworksConfig" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" - }, - "SetNodePoolManagementRequest": { - "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", - "type": "object", - "properties": { - "management": { - "$ref": "NodeManagement", - "description": "NodeManagement configuration for the node pool." - } - }, - "id": "SetNodePoolManagementRequest" - }, - "SetNodePoolAutoscalingRequest": { - "description": "SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool.", - "type": "object", - "properties": { - "autoscaling": { - "$ref": "NodePoolAutoscaling", - "description": "Autoscaling configuration for the node pool." - } - }, - "id": "SetNodePoolAutoscalingRequest" - }, - "CreateClusterRequest": { - "description": "CreateClusterRequest creates a cluster.", - "type": "object", - "properties": { - "cluster": { - "description": "A [cluster\nresource](/container-engine/reference/rest/v1/projects.zones.clusters)", - "$ref": "Cluster" - } - }, - "id": "CreateClusterRequest" - }, - "ListNodePoolsResponse": { - "description": "ListNodePoolsResponse is the result of ListNodePoolsRequest.", - "type": "object", - "properties": { - "nodePools": { - "description": "A list of node pools for a cluster.", - "items": { - "$ref": "NodePool" - }, - "type": "array" - } - }, - "id": "ListNodePoolsResponse" - }, - "CompleteIPRotationRequest": { - "description": "CompleteIPRotationRequest moves the cluster master back into single-IP mode.", - "type": "object", - "properties": {}, - "id": "CompleteIPRotationRequest" - }, - "StartIPRotationRequest": { - "description": "StartIPRotationRequest creates a new IP for the cluster and then performs\na node upgrade on each node pool to point to the new IP.", - "type": "object", - "properties": {}, - "id": "StartIPRotationRequest" - }, - "LegacyAbac": { - "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", - "type": "object", - "properties": { - "enabled": { - "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", - "type": "boolean" - } - }, - "id": "LegacyAbac" - }, - "UpdateNodePoolRequest": { - "description": "UpdateNodePoolRequests update a node pool's image and/or version.", - "type": "object", - "properties": { - "imageType": { - "description": "The desired image type for the node pool.", - "type": "string" - }, - "nodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", - "type": "string" - } - }, - "id": "UpdateNodePoolRequest" - }, - "AcceleratorConfig": { - "description": "AcceleratorConfig represents a Hardware Accelerator request.", - "type": "object", - "properties": { - "acceleratorType": { - "description": "The accelerator type resource name. List of supported accelerators\n[here](/compute/docs/gpus/#Introduction)", - "type": "string" - }, - "acceleratorCount": { - "format": "int64", - "description": "The number of the accelerator cards exposed to an instance.", - "type": "string" - } - }, - "id": "AcceleratorConfig" - }, - "SetAddonsConfigRequest": { - "description": "SetAddonsConfigRequest sets the addons associated with the cluster.", - "type": "object", - "properties": { - "addonsConfig": { - "description": "The desired configurations for the various addons available to run in the\ncluster.", - "$ref": "AddonsConfig" - } - }, - "id": "SetAddonsConfigRequest" - }, - "SetLabelsRequest": { - "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container\nEngine cluster, which will in turn set them for Google Compute Engine\nresources used by that cluster", - "type": "object", - "properties": { - "resourceLabels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels to set for that cluster.", - "type": "object" - }, - "labelFingerprint": { - "description": "The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nContainer Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", - "type": "string" - } - }, - "id": "SetLabelsRequest" - }, - "NodePool": { - "description": "NodePool contains the name and configuration for a cluster's node pool.\nNode pools are a set of nodes (i.e. VM's), with a common configuration and\nspecification, under the control of the cluster master. They may have a set\nof Kubernetes labels applied to them, which may be used to reference them\nduring pod scheduling. They may also be resized up or down, to accommodate\nthe workload.", - "type": "object", - "properties": { - "autoscaling": { - "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled\nonly if a valid configuration is present.", - "$ref": "NodePoolAutoscaling" - }, - "management": { - "$ref": "NodeManagement", - "description": "NodeManagement configuration for this NodePool." - }, - "initialNodeCount": { - "format": "int32", - "description": "The initial node count for the pool. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.", - "type": "integer" - }, - "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", - "type": "string" - }, - "instanceGroupUrls": { - "description": "[Output only] The resource URLs of [instance\ngroups](/compute/docs/instance-groups/) associated with this\nnode pool.", - "items": { - "type": "string" - }, - "type": "array" - }, - "version": { - "description": "[Output only] The version of the Kubernetes of this node.", - "type": "string" - }, - "status": { - "enumDescriptions": [ - "Not set.", - "The PROVISIONING state indicates the node pool is being created.", - "The RUNNING state indicates the node pool has been created\nand is fully usable.", - "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", - "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", - "The STOPPING state indicates the node pool is being deleted.", - "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." - ], - "enum": [ - "STATUS_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RUNNING_WITH_ERROR", - "RECONCILING", - "STOPPING", - "ERROR" - ], - "description": "[Output only] The status of the nodes in this pool instance.", - "type": "string" - }, - "config": { - "description": "The node configuration of the pool.", - "$ref": "NodeConfig" - }, - "name": { - "description": "The name of the node pool.", - "type": "string" - }, - "statusMessage": { - "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", - "type": "string" - } - }, - "id": "NodePool" - }, - "NodeManagement": { - "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", - "type": "object", - "properties": { - "autoRepair": { - "description": "A flag that specifies whether the node auto-repair is enabled for the node\npool. If enabled, the nodes in this node pool will be monitored and, if\nthey fail health checks too many times, an automatic repair action will be\ntriggered.", - "type": "boolean" - }, - "autoUpgrade": { - "description": "A flag that specifies whether node auto-upgrade is enabled for the node\npool. If enabled, node auto-upgrade helps keep the nodes in your node pool\nup to date with the latest release version of Kubernetes.", - "type": "boolean" - }, - "upgradeOptions": { - "$ref": "AutoUpgradeOptions", - "description": "Specifies the Auto Upgrade knobs for the node pool." - } - }, - "id": "NodeManagement" + } } }, - "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "version": "v1", - "baseUrl": "https://container.googleapis.com/", - "canonicalName": "Container", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "servicePath": "", - "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.", - "kind": "discovery#restDescription", - "rootUrl": "https://container.googleapis.com/", - "basePath": "", - "ownerDomain": "google.com", - "name": "container", - "batchPath": "batch", - "id": "container:v1", - "documentationLink": "https://cloud.google.com/container-engine/", - "revision": "20170825", - "title": "Google Container Engine API", - "discoveryVersion": "v1", - "ownerName": "Google" + "protocol": "rest", + "canonicalName": "Container" } diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index 89add684e4a..00e74adf45d 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -163,8 +163,8 @@ type AcceleratorConfig struct { } func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorConfig - raw := noMethod(*s) + type NoMethod AcceleratorConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -188,6 +188,13 @@ type AddonsConfig struct { // KubernetesDashboard: Configuration for the Kubernetes Dashboard. KubernetesDashboard *KubernetesDashboard `json:"kubernetesDashboard,omitempty"` + // NetworkPolicyConfig: Configuration for NetworkPolicy. This only + // tracks whether the addon + // is enabled or not on the Master, it does not track whether network + // policy + // is enabled for the nodes. + NetworkPolicyConfig *NetworkPolicyConfig `json:"networkPolicyConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. // "HorizontalPodAutoscaling") to unconditionally include in API // requests. By default, fields with empty values are omitted from API @@ -208,8 +215,8 @@ type AddonsConfig struct { } func (s *AddonsConfig) MarshalJSON() ([]byte, error) { - type noMethod AddonsConfig - raw := noMethod(*s) + type NoMethod AddonsConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -249,8 +256,8 @@ type AutoUpgradeOptions struct { } func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) { - type noMethod AutoUpgradeOptions - raw := noMethod(*s) + type NoMethod AutoUpgradeOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -286,8 +293,8 @@ type CidrBlock struct { } func (s *CidrBlock) MarshalJSON() ([]byte, error) { - type noMethod CidrBlock - raw := noMethod(*s) + type NoMethod CidrBlock + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -317,12 +324,12 @@ type ClientCertificateConfig struct { } func (s *ClientCertificateConfig) MarshalJSON() ([]byte, error) { - type noMethod ClientCertificateConfig - raw := noMethod(*s) + type NoMethod ClientCertificateConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Cluster: A Google Container Engine cluster. +// Cluster: A Google Kubernetes Engine cluster. type Cluster struct { // AddonsConfig: Configurations for the various addons available to run // in the cluster. @@ -414,10 +421,7 @@ type Cluster struct { // time. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` - // InstanceGroupUrls: [Output only] The resource URLs of - // [instance - // groups](/compute/docs/instance-groups/) associated with this - // cluster. + // InstanceGroupUrls: Deprecated. Use node_pools.instance_group_urls. InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` // IpAllocationPolicy: Configuration for cluster IP allocation. @@ -446,6 +450,9 @@ type Cluster struct { // * if left as an empty string,`logging.googleapis.com` will be used. LoggingService string `json:"loggingService,omitempty"` + // MaintenancePolicy: Configure the maintenance policy for this cluster. + MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` + // MasterAuth: The authentication information for accessing the master // endpoint. MasterAuth *MasterAuth `json:"masterAuth,omitempty"` @@ -594,8 +601,8 @@ type Cluster struct { } func (s *Cluster) MarshalJSON() ([]byte, error) { - type noMethod Cluster - raw := noMethod(*s) + type NoMethod Cluster + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -688,8 +695,8 @@ type ClusterUpdate struct { } func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { - type noMethod ClusterUpdate - raw := noMethod(*s) + type NoMethod ClusterUpdate + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -724,8 +731,8 @@ type CreateClusterRequest struct { } func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateClusterRequest - raw := noMethod(*s) + type NoMethod CreateClusterRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -753,8 +760,49 @@ type CreateNodePoolRequest struct { } func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateNodePoolRequest - raw := noMethod(*s) + type NoMethod CreateNodePoolRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DailyMaintenanceWindow: Time window specified for daily maintenance +// operations. +type DailyMaintenanceWindow struct { + // Duration: [Output only] Duration of the time window, automatically + // chosen to be + // smallest possible in the given scenario. + // Duration will be in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) + // format "PTnHnMnS". + Duration string `json:"duration,omitempty"` + + // StartTime: Time within the maintenance window to start the + // maintenance operations. + // Time format should be in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) + // format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Duration") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Duration") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DailyMaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod DailyMaintenanceWindow + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -807,8 +855,8 @@ type HorizontalPodAutoscaling struct { } func (s *HorizontalPodAutoscaling) MarshalJSON() ([]byte, error) { - type noMethod HorizontalPodAutoscaling - raw := noMethod(*s) + type NoMethod HorizontalPodAutoscaling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -842,8 +890,8 @@ type HttpLoadBalancing struct { } func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { - type noMethod HttpLoadBalancing - raw := noMethod(*s) + type NoMethod HttpLoadBalancing + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -987,8 +1035,8 @@ type IPAllocationPolicy struct { } func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { - type noMethod IPAllocationPolicy - raw := noMethod(*s) + type NoMethod IPAllocationPolicy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1016,8 +1064,8 @@ type KubernetesDashboard struct { } func (s *KubernetesDashboard) MarshalJSON() ([]byte, error) { - type noMethod KubernetesDashboard - raw := noMethod(*s) + type NoMethod KubernetesDashboard + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1052,8 +1100,8 @@ type LegacyAbac struct { } func (s *LegacyAbac) MarshalJSON() ([]byte, error) { - type noMethod LegacyAbac - raw := noMethod(*s) + type NoMethod LegacyAbac + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1092,8 +1140,8 @@ type ListClustersResponse struct { } func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { - type noMethod ListClustersResponse - raw := noMethod(*s) + type NoMethod ListClustersResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1125,8 +1173,8 @@ type ListNodePoolsResponse struct { } func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListNodePoolsResponse - raw := noMethod(*s) + type NoMethod ListNodePoolsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1164,8 +1212,70 @@ type ListOperationsResponse struct { } func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListOperationsResponse - raw := noMethod(*s) + type NoMethod ListOperationsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenancePolicy: MaintenancePolicy defines the maintenance policy +// to be used for the cluster. +type MaintenancePolicy struct { + // Window: Specifies the maintenance window in which maintenance may be + // performed. + Window *MaintenanceWindow `json:"window,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Window") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Window") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MaintenancePolicy) MarshalJSON() ([]byte, error) { + type NoMethod MaintenancePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MaintenanceWindow: MaintenanceWindow defines the maintenance window +// to be used for the cluster. +type MaintenanceWindow struct { + // DailyMaintenanceWindow: DailyMaintenanceWindow specifies a daily + // maintenance operation window. + DailyMaintenanceWindow *DailyMaintenanceWindow `json:"dailyMaintenanceWindow,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DailyMaintenanceWindow") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DailyMaintenanceWindow") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod MaintenanceWindow + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1230,8 +1340,8 @@ type MasterAuth struct { } func (s *MasterAuth) MarshalJSON() ([]byte, error) { - type noMethod MasterAuth - raw := noMethod(*s) + type NoMethod MasterAuth + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1271,8 +1381,8 @@ type MasterAuthorizedNetworksConfig struct { } func (s *MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { - type noMethod MasterAuthorizedNetworksConfig - raw := noMethod(*s) + type NoMethod MasterAuthorizedNetworksConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1309,8 +1419,40 @@ type NetworkPolicy struct { } func (s *NetworkPolicy) MarshalJSON() ([]byte, error) { - type noMethod NetworkPolicy - raw := noMethod(*s) + type NoMethod NetworkPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkPolicyConfig: Configuration for NetworkPolicy. This only +// tracks whether the addon +// is enabled or not on the Master, it does not track whether network +// policy +// is enabled for the nodes. +type NetworkPolicyConfig struct { + // Disabled: Whether NetworkPolicy is enabled for this cluster. + Disabled bool `json:"disabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkPolicyConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkPolicyConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1394,6 +1536,20 @@ type NodeConfig struct { // The total size of all keys and values must be less than 512 KB. Metadata map[string]string `json:"metadata,omitempty"` + // MinCpuPlatform: Minimum CPU platform to be used by this instance. The + // instance may be + // scheduled on the specified or newer CPU platform. Applicable values + // are the + // friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" + // or + // minCpuPlatform: "Intel Sandy Bridge". For + // more + // information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min- + // cpu-platform) + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // OauthScopes: The set of Google API scopes to be made available on all // of the // node VMs under the "default" service account. @@ -1457,8 +1613,8 @@ type NodeConfig struct { } func (s *NodeConfig) MarshalJSON() ([]byte, error) { - type noMethod NodeConfig - raw := noMethod(*s) + type NoMethod NodeConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1503,8 +1659,8 @@ type NodeManagement struct { } func (s *NodeManagement) MarshalJSON() ([]byte, error) { - type noMethod NodeManagement - raw := noMethod(*s) + type NoMethod NodeManagement + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1537,10 +1693,11 @@ type NodePool struct { // firewall and routes quota. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` - // InstanceGroupUrls: [Output only] The resource URLs of - // [instance - // groups](/compute/docs/instance-groups/) associated with this - // node pool. + // InstanceGroupUrls: [Output only] The resource URLs of the [managed + // instance + // groups](/compute/docs/instance-groups/creating-groups-of-mana + // ged-instances) + // associated with this node pool. InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` // Management: NodeManagement configuration for this NodePool. @@ -1584,7 +1741,7 @@ type NodePool struct { // node pool instance, if available. StatusMessage string `json:"statusMessage,omitempty"` - // Version: [Output only] The version of the Kubernetes of this node. + // Version: The version of the Kubernetes of this node. Version string `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1609,8 +1766,8 @@ type NodePool struct { } func (s *NodePool) MarshalJSON() ([]byte, error) { - type noMethod NodePool - raw := noMethod(*s) + type NoMethod NodePool + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1649,8 +1806,8 @@ type NodePoolAutoscaling struct { } func (s *NodePoolAutoscaling) MarshalJSON() ([]byte, error) { - type noMethod NodePoolAutoscaling - raw := noMethod(*s) + type NoMethod NodePoolAutoscaling + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1688,6 +1845,7 @@ type Operation struct { // "SET_MASTER_AUTH" - Set/generate master auth materials // "SET_NODE_POOL_SIZE" - Set node pool size. // "SET_NETWORK_POLICY" - Updates network policy for a cluster. + // "SET_MAINTENANCE_POLICY" - Set the maintenance policy. OperationType string `json:"operationType,omitempty"` // SelfLink: Server-defined URL for the resource. @@ -1743,8 +1901,8 @@ type Operation struct { } func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation - raw := noMethod(*s) + type NoMethod Operation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1756,7 +1914,7 @@ func (s *Operation) MarshalJSON() ([]byte, error) { type RollbackNodePoolUpgradeRequest struct { } -// ServerConfig: Container Engine service configuration. +// ServerConfig: Kubernetes Engine service configuration. type ServerConfig struct { // DefaultClusterVersion: Version of Kubernetes the service deploys by // default. @@ -1798,8 +1956,8 @@ type ServerConfig struct { } func (s *ServerConfig) MarshalJSON() ([]byte, error) { - type noMethod ServerConfig - raw := noMethod(*s) + type NoMethod ServerConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1829,8 +1987,8 @@ type SetAddonsConfigRequest struct { } func (s *SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { - type noMethod SetAddonsConfigRequest - raw := noMethod(*s) + type NoMethod SetAddonsConfigRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1844,7 +2002,7 @@ type SetLabelsRequest struct { // this resource, // used to detect conflicts. The fingerprint is initially generated // by - // Container Engine and changes after every request to modify or + // Kubernetes Engine and changes after every request to modify or // update // labels. You must always provide an up-to-date fingerprint hash // when @@ -1875,8 +2033,8 @@ type SetLabelsRequest struct { } func (s *SetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod SetLabelsRequest - raw := noMethod(*s) + type NoMethod SetLabelsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1905,8 +2063,8 @@ type SetLegacyAbacRequest struct { } func (s *SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { - type noMethod SetLegacyAbacRequest - raw := noMethod(*s) + type NoMethod SetLegacyAbacRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1944,8 +2102,8 @@ type SetLocationsRequest struct { } func (s *SetLocationsRequest) MarshalJSON() ([]byte, error) { - type noMethod SetLocationsRequest - raw := noMethod(*s) + type NoMethod SetLocationsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1979,20 +2137,59 @@ type SetLoggingServiceRequest struct { } func (s *SetLoggingServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod SetLoggingServiceRequest - raw := noMethod(*s) + type NoMethod SetLoggingServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SetMaintenancePolicyRequest: SetMaintenancePolicyRequest sets the +// maintenance policy for a cluster. +type SetMaintenancePolicyRequest struct { + // MaintenancePolicy: The maintenance policy to be set for the cluster. + // An empty field + // clears the existing maintenance policy. + MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaintenancePolicy") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaintenancePolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SetMaintenancePolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod SetMaintenancePolicyRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SetMasterAuthRequest: SetMasterAuthRequest updates the admin password // of a cluster. type SetMasterAuthRequest struct { - // Action: The exact form of action to be taken on the master auth + // Action: The exact form of action to be taken on the master auth. // // Possible values: - // "UNKNOWN" - Operation is unknown and will error out + // "UNKNOWN" - Operation is unknown and will error out. // "SET_PASSWORD" - Set the password to a user generated value. // "GENERATE_PASSWORD" - Generate a new password and set it to that. + // "SET_USERNAME" - Set the username. If an empty username is + // provided, basic authentication + // is disabled for the cluster. If a non-empty username is provided, + // basic + // authentication is enabled, with either a provided password or a + // generated + // one. Action string `json:"action,omitempty"` // Update: A description of the update. @@ -2016,8 +2213,8 @@ type SetMasterAuthRequest struct { } func (s *SetMasterAuthRequest) MarshalJSON() ([]byte, error) { - type noMethod SetMasterAuthRequest - raw := noMethod(*s) + type NoMethod SetMasterAuthRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2051,8 +2248,8 @@ type SetMonitoringServiceRequest struct { } func (s *SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod SetMonitoringServiceRequest - raw := noMethod(*s) + type NoMethod SetMonitoringServiceRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2080,8 +2277,8 @@ type SetNetworkPolicyRequest struct { } func (s *SetNetworkPolicyRequest) MarshalJSON() ([]byte, error) { - type noMethod SetNetworkPolicyRequest - raw := noMethod(*s) + type NoMethod SetNetworkPolicyRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2109,8 +2306,8 @@ type SetNodePoolAutoscalingRequest struct { } func (s *SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { - type noMethod SetNodePoolAutoscalingRequest - raw := noMethod(*s) + type NoMethod SetNodePoolAutoscalingRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2139,8 +2336,8 @@ type SetNodePoolManagementRequest struct { } func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { - type noMethod SetNodePoolManagementRequest - raw := noMethod(*s) + type NoMethod SetNodePoolManagementRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2169,8 +2366,8 @@ type SetNodePoolSizeRequest struct { } func (s *SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { - type noMethod SetNodePoolSizeRequest - raw := noMethod(*s) + type NoMethod SetNodePoolSizeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2204,19 +2401,17 @@ type UpdateClusterRequest struct { } func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { - type noMethod UpdateClusterRequest - raw := noMethod(*s) + type NoMethod UpdateClusterRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UpdateMasterRequest: UpdateMasterRequest updates the master of the // cluster. type UpdateMasterRequest struct { - // MasterVersion: The Kubernetes version to change the master to. The - // only valid value is the - // latest supported version. Use "-" to have the server automatically - // select - // the latest version. + // MasterVersion: The Kubernetes version to change the master to. Use + // "-" to have the server + // automatically select the default version. MasterVersion string `json:"masterVersion,omitempty"` // ForceSendFields is a list of field names (e.g. "MasterVersion") to @@ -2237,8 +2432,8 @@ type UpdateMasterRequest struct { } func (s *UpdateMasterRequest) MarshalJSON() ([]byte, error) { - type noMethod UpdateMasterRequest - raw := noMethod(*s) + type NoMethod UpdateMasterRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2272,8 +2467,8 @@ type UpdateNodePoolRequest struct { } func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { - type noMethod UpdateNodePoolRequest - raw := noMethod(*s) + type NoMethod UpdateNodePoolRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2289,7 +2484,7 @@ type ProjectsZonesGetServerconfigCall struct { header_ http.Header } -// GetServerconfig: Returns configuration info about the Container +// GetServerconfig: Returns configuration info about the Kubernetes // Engine service. func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *ProjectsZonesGetServerconfigCall { c := &ProjectsZonesGetServerconfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -2388,12 +2583,12 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Returns configuration info about the Container Engine service.", + // "description": "Returns configuration info about the Kubernetes Engine service.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", // "httpMethod": "GET", // "id": "container.projects.zones.getServerconfig", @@ -2532,7 +2727,7 @@ func (c *ProjectsZonesClustersAddonsCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2686,7 +2881,7 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2853,7 +3048,7 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3004,7 +3199,7 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3162,7 +3357,7 @@ func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluste }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3314,7 +3509,7 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3474,7 +3669,7 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3618,7 +3813,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3772,7 +3967,7 @@ func (c *ProjectsZonesClustersLoggingCall) Do(opts ...googleapi.CallOption) (*Op }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3926,7 +4121,7 @@ func (c *ProjectsZonesClustersMasterCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4080,7 +4275,7 @@ func (c *ProjectsZonesClustersMonitoringCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4234,7 +4429,7 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4282,6 +4477,160 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Do(opts ...googleapi.CallOptio } +// method id "container.projects.zones.clusters.setMaintenancePolicy": + +type ProjectsZonesClustersSetMaintenancePolicyCall struct { + s *Service + projectId string + zone string + clusterId string + setmaintenancepolicyrequest *SetMaintenancePolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMaintenancePolicy: Sets the maintenance policy for a cluster. +func (r *ProjectsZonesClustersService) SetMaintenancePolicy(projectId string, zone string, clusterId string, setmaintenancepolicyrequest *SetMaintenancePolicyRequest) *ProjectsZonesClustersSetMaintenancePolicyCall { + c := &ProjectsZonesClustersSetMaintenancePolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.setmaintenancepolicyrequest = setmaintenancepolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersSetMaintenancePolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Context(ctx context.Context) *ProjectsZonesClustersSetMaintenancePolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsZonesClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setmaintenancepolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.zones.clusters.setMaintenancePolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersSetMaintenancePolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the maintenance policy for a cluster.", + // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy", + // "httpMethod": "POST", + // "id": "container.projects.zones.clusters.setMaintenancePolicy", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to update.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy", + // "request": { + // "$ref": "SetMaintenancePolicyRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "container.projects.zones.clusters.setMasterAuth": type ProjectsZonesClustersSetMasterAuthCall struct { @@ -4392,7 +4741,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4546,7 +4895,7 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4700,7 +5049,7 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4854,7 +5203,7 @@ func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5011,7 +5360,7 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Do(opts ...googleapi.Cal }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5172,7 +5521,7 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5322,7 +5671,7 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5490,7 +5839,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5655,7 +6004,7 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5811,7 +6160,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -5975,7 +6324,7 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Do(opts ...googleapi.C }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6139,7 +6488,7 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6304,7 +6653,7 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOpti }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6465,7 +6814,7 @@ func (c *ProjectsZonesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*E }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6626,7 +6975,7 @@ func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Oper }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -6782,7 +7131,7 @@ func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*Lis }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go index 092044f448c..0f75aa86792 100644 --- a/vendor/google.golang.org/api/gensupport/send.go +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -5,6 +5,7 @@ package gensupport import ( + "encoding/json" "errors" "net/http" @@ -59,3 +60,12 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* } return resp, err } + +// DecodeResponse decodes the body of res into target. If there is no body, +// target is unchanged. +func DecodeResponse(target interface{}, res *http.Response) error { + if res.StatusCode == http.StatusNoContent { + return nil + } + return json.NewDecoder(res.Body).Decode(target) +} diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json b/vendor/google.golang.org/api/logging/v2beta1/logging-api.json index db0da1a73fc..49fa9f4beb1 100644 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json +++ b/vendor/google.golang.org/api/logging/v2beta1/logging-api.json @@ -1,12 +1,887 @@ { + "batchPath": "batch", + "fullyEncodeReservedExpansion": true, + "title": "Stackdriver Logging API", + "ownerName": "Google", + "resources": { + "organizations": { + "resources": { + "logs": { + "methods": { + "delete": { + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "logName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+/logs/[^/]+$", + "location": "path" + } + }, + "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", + "path": "v2beta1/{+logName}", + "id": "logging.organizations.logs.delete" + }, + "list": { + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListLogsResponse" + }, + "parameters": { + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" + }, + "parent": { + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/organizations/{organizationsId}/logs", + "id": "logging.organizations.logs.list", + "path": "v2beta1/{+parent}/logs" + } + } + } + } + }, + "entries": { + "methods": { + "list": { + "response": { + "$ref": "ListLogEntriesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/entries:list", + "path": "v2beta1/entries:list", + "id": "logging.entries.list", + "request": { + "$ref": "ListLogEntriesRequest" + }, + "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs." + }, + "write": { + "request": { + "$ref": "WriteLogEntriesRequest" + }, + "description": "Log entry resourcesWrites log entries to Stackdriver Logging. This API method is the only way to send log entries to Stackdriver Logging. This method is used, directly or indirectly, by the Stackdriver Logging agent (fluentd) and all logging libraries configured to use Stackdriver Logging.", + "response": { + "$ref": "WriteLogEntriesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "parameters": {}, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "flatPath": "v2beta1/entries:write", + "path": "v2beta1/entries:write", + "id": "logging.entries.write" + } + } + }, + "projects": { + "resources": { + "logs": { + "methods": { + "list": { + "response": { + "$ref": "ListLogsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "parent": { + "location": "path", + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/logs", + "path": "v2beta1/{+parent}/logs", + "id": "logging.projects.logs.list", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed." + }, + "delete": { + "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", + "path": "v2beta1/{+logName}", + "id": "logging.projects.logs.delete", + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "logName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/logs/[^/]+$", + "location": "path" + } + } + } + } + }, + "sinks": { + "methods": { + "list": { + "description": "Lists sinks.", + "response": { + "$ref": "ListSinksResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "parent": { + "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/sinks", + "path": "v2beta1/{+parent}/sinks", + "id": "logging.projects.sinks.list" + }, + "get": { + "httpMethod": "GET", + "parameterOrder": [ + "sinkName" + ], + "response": { + "$ref": "LogSink" + }, + "parameters": { + "sinkName": { + "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "id": "logging.projects.sinks.get", + "path": "v2beta1/{+sinkName}", + "description": "Gets a sink." + }, + "update": { + "response": { + "$ref": "LogSink" + }, + "parameterOrder": [ + "sinkName" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "sinkName": { + "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" + }, + "uniqueWriterIdentity": { + "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false.", + "type": "boolean", + "location": "query" + }, + "updateMask": { + "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmaskExample: updateMask=filter.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "path": "v2beta1/{+sinkName}", + "id": "logging.projects.sinks.update", + "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.", + "request": { + "$ref": "LogSink" + } + }, + "create": { + "request": { + "$ref": "LogSink" + }, + "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", + "response": { + "$ref": "LogSink" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", + "parameters": { + "uniqueWriterIdentity": { + "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", + "type": "boolean", + "location": "query" + }, + "parent": { + "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "flatPath": "v2beta1/projects/{projectsId}/sinks", + "path": "v2beta1/{+parent}/sinks", + "id": "logging.projects.sinks.create" + }, + "delete": { + "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", + "path": "v2beta1/{+sinkName}", + "id": "logging.projects.sinks.delete", + "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "sinkName" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "parameters": { + "sinkName": { + "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/sinks/[^/]+$", + "location": "path" + } + } + } + } + }, + "metrics": { + "methods": { + "delete": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "metricName" + ], + "httpMethod": "DELETE", + "parameters": { + "metricName": { + "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metrics/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "path": "v2beta1/{+metricName}", + "id": "logging.projects.metrics.delete", + "description": "Deletes a logs-based metric." + }, + "list": { + "response": { + "$ref": "ListLogMetricsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" + }, + "parent": { + "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/metrics", + "path": "v2beta1/{+parent}/metrics", + "id": "logging.projects.metrics.list", + "description": "Lists logs-based metrics." + }, + "get": { + "description": "Gets a logs-based metric.", + "response": { + "$ref": "LogMetric" + }, + "parameterOrder": [ + "metricName" + ], + "httpMethod": "GET", + "parameters": { + "metricName": { + "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metrics/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "path": "v2beta1/{+metricName}", + "id": "logging.projects.metrics.get" + }, + "update": { + "description": "Creates or updates a logs-based metric.", + "request": { + "$ref": "LogMetric" + }, + "response": { + "$ref": "LogMetric" + }, + "parameterOrder": [ + "metricName" + ], + "httpMethod": "PUT", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "parameters": { + "metricName": { + "location": "path", + "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metrics/[^/]+$" + } + }, + "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", + "path": "v2beta1/{+metricName}", + "id": "logging.projects.metrics.update" + }, + "create": { + "request": { + "$ref": "LogMetric" + }, + "description": "Creates a logs-based metric.", + "response": { + "$ref": "LogMetric" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", + "parameters": { + "parent": { + "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.write" + ], + "flatPath": "v2beta1/projects/{projectsId}/metrics", + "path": "v2beta1/{+parent}/metrics", + "id": "logging.projects.metrics.create" + } + } + } + } + }, + "billingAccounts": { + "resources": { + "logs": { + "methods": { + "delete": { + "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", + "httpMethod": "DELETE", + "parameterOrder": [ + "logName" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "logName": { + "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "required": true, + "type": "string", + "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/logging.admin" + ], + "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", + "id": "logging.billingAccounts.logs.delete", + "path": "v2beta1/{+logName}" + }, + "list": { + "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", + "path": "v2beta1/{+parent}/logs", + "id": "logging.billingAccounts.logs.list", + "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", + "response": { + "$ref": "ListLogsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "parent": { + "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", + "required": true, + "type": "string", + "pattern": "^billingAccounts/[^/]+$", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + } + } + } + } + } + } + }, + "monitoredResourceDescriptors": { + "methods": { + "list": { + "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.", + "response": { + "$ref": "ListMonitoredResourceDescriptorsResponse" + }, + "parameterOrder": [], + "httpMethod": "GET", + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "pageToken": { + "location": "query", + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read" + ], + "flatPath": "v2beta1/monitoredResourceDescriptors", + "path": "v2beta1/monitoredResourceDescriptors", + "id": "logging.monitoredResourceDescriptors.list" + } + } + } + }, + "parameters": { + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, + "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json" + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string" + } + }, + "version": "v2beta1", + "baseUrl": "https://logging.googleapis.com/", + "kind": "discovery#restDescription", + "description": "Writes log entries and manages your Stackdriver Logging configuration.", + "servicePath": "", "basePath": "", - "revision": "20170905", - "documentationLink": "https://cloud.google.com/logging/docs/", "id": "logging:v2beta1", + "documentationLink": "https://cloud.google.com/logging/docs/", + "revision": "20171219", "discoveryVersion": "v1", "version_module": true, "schemas": { + "LogLine": { + "description": "Application log line emitted while processing a request.", + "type": "object", + "properties": { + "time": { + "description": "Approximate time when this log entry was made.", + "format": "google-datetime", + "type": "string" + }, + "severity": { + "enumDescriptions": [ + "(0) The log entry has no assigned severity level.", + "(100) Debug or trace information.", + "(200) Routine information, such as ongoing status or performance.", + "(300) Normal but significant events, such as start up, shut down, or a configuration change.", + "(400) Warning events might cause problems.", + "(500) Error events are likely to cause problems.", + "(600) Critical events cause more severe problems or outages.", + "(700) A person must take an action immediately.", + "(800) One or more systems are unusable." + ], + "enum": [ + "DEFAULT", + "DEBUG", + "INFO", + "NOTICE", + "WARNING", + "ERROR", + "CRITICAL", + "ALERT", + "EMERGENCY" + ], + "description": "Severity of this log entry.", + "type": "string" + }, + "logMessage": { + "description": "App-provided log message.", + "type": "string" + }, + "sourceLocation": { + "$ref": "SourceLocation", + "description": "Where in the source code this log message was written." + } + }, + "id": "LogLine" + }, + "Linear": { + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "type": "object", + "properties": { + "numFiniteBuckets": { + "description": "Must be greater than 0.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "Must be greater than 0.", + "format": "double", + "type": "number" + }, + "offset": { + "description": "Lower bound of the first bucket.", + "format": "double", + "type": "number" + } + }, + "id": "Linear" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "SourceLocation": { + "description": "Specifies a location in a source code file.", + "type": "object", + "properties": { + "file": { + "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", + "type": "string" + }, + "functionName": { + "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", + "type": "string" + }, + "line": { + "description": "Line within the source file.", + "format": "int64", + "type": "string" + } + }, + "id": "SourceLocation" + }, + "ListLogEntriesRequest": { + "description": "The parameters to ListLogEntries.", + "type": "object", + "properties": { + "resourceNames": { + "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list.", + "type": "array", + "items": { + "type": "string" + } + }, + "projectIds": { + "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", + "type": "string" + }, + "pageToken": { + "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. page_token must be the value of next_page_token from the previous response. The values of other method parameters should be identical to those in the previous call.", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of next_page_token in the response indicates that more results might be available.", + "format": "int32", + "type": "integer" + }, + "orderBy": { + "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their insert_id values.", + "type": "string" + } + }, + "id": "ListLogEntriesRequest" + }, + "Explicit": { + "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", + "type": "object", + "properties": { + "bounds": { + "description": "The values must be monotonically increasing.", + "type": "array", + "items": { + "format": "double", + "type": "number" + } + } + }, + "id": "Explicit" + }, "SourceReference": { + "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", + "type": "object", "properties": { "repository": { "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"", @@ -17,34 +892,32 @@ "type": "string" } }, - "id": "SourceReference", - "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", - "type": "object" + "id": "SourceReference" }, "WriteLogEntriesResponse": { - "id": "WriteLogEntriesResponse", "description": "Result returned from WriteLogEntries. empty", "type": "object", - "properties": {} + "properties": {}, + "id": "WriteLogEntriesResponse" }, "Exponential": { "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "type": "object", "properties": { - "numFiniteBuckets": { - "type": "integer", - "format": "int32", - "description": "Must be greater than 0." - }, "growthFactor": { - "format": "double", "description": "Must be greater than 1.", + "format": "double", "type": "number" }, "scale": { - "format": "double", "description": "Must be greater than 0.", + "format": "double", "type": "number" + }, + "numFiniteBuckets": { + "description": "Must be greater than 0.", + "format": "int32", + "type": "integer" } }, "id": "Exponential" @@ -53,10 +926,6 @@ "description": "The parameters to WriteLogEntries.", "type": "object", "properties": { - "partialSuccess": { - "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, then the response status is the error associated with one of the failed entries and the response includes error details keyed by the entries' zero-based index in the entries.write method.", - "type": "boolean" - }, "labels": { "additionalProperties": { "type": "string" @@ -65,19 +934,23 @@ "type": "object" }, "resource": { - "$ref": "MonitoredResource", - "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." - }, - "entries": { - "description": "Required. The log entries to send to Stackdriver Logging. The order of log entries in this list does not matter. Values supplied in this method's log_name, resource, and labels fields are copied into those log entries in this list that do not include values for their corresponding fields. For more information, see the LogEntry type.If the timestamp or insert_id fields are missing in log entries, then this method supplies the current time or a unique identifier, respectively. The supplied values are chosen so that, among the log entries that did not supply their own values, the entries earlier in the list will sort before the entries later in the list. See the entries.list method.Log entries with timestamps that are more than the logs retention period in the past or more than 24 hours in the future might be discarded. Discarding does not return an error.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should try to include several log entries in this list, rather than calling this method for each individual log entry.", - "items": { - "$ref": "LogEntry" - }, - "type": "array" + "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry.", + "$ref": "MonitoredResource" }, "logName": { - "type": "string", - "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry." + "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", + "type": "string" + }, + "entries": { + "description": "Required. The log entries to send to Stackdriver Logging. The order of log entries in this list does not matter. Values supplied in this method's log_name, resource, and labels fields are copied into those log entries in this list that do not include values for their corresponding fields. For more information, see the LogEntry type.If the timestamp or insert_id fields are missing in log entries, then this method supplies the current time or a unique identifier, respectively. The supplied values are chosen so that, among the log entries that did not supply their own values, the entries earlier in the list will sort before the entries later in the list. See the entries.list method.Log entries with timestamps that are more than the logs retention period in the past or more than 24 hours in the future will not be available when calling entries.list. However, those log entries can still be exported with LogSinks.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should try to include several log entries in this list, rather than calling this method for each individual log entry.", + "type": "array", + "items": { + "$ref": "LogEntry" + } + }, + "partialSuccess": { + "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, then the response status is the error associated with one of the failed entries and the response includes error details keyed by the entries' zero-based index in the entries.write method.", + "type": "boolean" } }, "id": "WriteLogEntriesRequest" @@ -86,6 +959,14 @@ "description": "A description of a label.", "type": "object", "properties": { + "key": { + "description": "The label key.", + "type": "string" + }, + "description": { + "description": "A human-readable description for the label.", + "type": "string" + }, "valueType": { "description": "The type of data that can be assigned to the label.", "type": "string", @@ -99,14 +980,6 @@ "BOOL", "INT64" ] - }, - "key": { - "description": "The label key.", - "type": "string" - }, - "description": { - "description": "A human-readable description for the label.", - "type": "string" } }, "id": "LabelDescriptor" @@ -115,17 +988,17 @@ "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", "type": "object", "properties": { - "exponentialBuckets": { - "$ref": "Exponential", - "description": "The exponential buckets." + "linearBuckets": { + "$ref": "Linear", + "description": "The linear bucket." }, "explicitBuckets": { "$ref": "Explicit", "description": "The explicit buckets." }, - "linearBuckets": { - "description": "The linear bucket.", - "$ref": "Linear" + "exponentialBuckets": { + "$ref": "Exponential", + "description": "The exponential buckets." } }, "id": "BucketOptions" @@ -135,11 +1008,11 @@ "type": "object", "properties": { "metrics": { + "description": "A list of logs-based metrics.", + "type": "array", "items": { "$ref": "LogMetric" - }, - "type": "array", - "description": "A list of logs-based metrics." + } }, "nextPageToken": { "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", @@ -153,12 +1026,6 @@ "type": "object", "properties": { "metricKind": { - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", "type": "string", "enumDescriptions": [ @@ -166,29 +1033,35 @@ "An instantaneous measurement of a value.", "The change in a value during a time interval.", "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" ] }, + "displayName": { + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", + "type": "string" + }, "description": { "description": "A detailed description of the metric, which can be used in documentation.", "type": "string" }, - "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\".", - "type": "string" - }, "unit": { "description": "The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10**3)\nM mega (10**6)\nG giga (10**9)\nT tera (10**12)\nP peta (10**15)\nE exa (10**18)\nZ zetta (10**21)\nY yotta (10**24)\nm milli (10**-3)\nu micro (10**-6)\nn nano (10**-9)\np pico (10**-12)\nf femto (10**-15)\na atto (10**-18)\nz zepto (10**-21)\ny yocto (10**-24)\nKi kibi (2**10)\nMi mebi (2**20)\nGi gibi (2**30)\nTi tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, such as 1/s.The grammar also includes these connectors:\n/ division (as an infix operator, e.g. 1/s).\n. multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'.", "type": "string" }, "labels": { "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "type": "array", "items": { "$ref": "LabelDescriptor" - }, - "type": "array" + } }, "name": { - "description": "The resource name of the metric descriptor. Depending on the implementation, the name typically includes: (1) the parent resource name that defines the scope of the metric type or of its data; and (2) the metric's URL-encoded type, which also appears in the type field of this descriptor. For example, following is the resource name of a custom metric within the GCP project my-project-id:\n\"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"\n", + "description": "The resource name of the metric descriptor.", "type": "string" }, "type": { @@ -196,6 +1069,15 @@ "type": "string" }, "valueType": { + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string. This value type can be used only if the metric kind is GAUGE.", + "The value is a Distribution.", + "The value is money." + ], "enum": [ "VALUE_TYPE_UNSPECIFIED", "BOOL", @@ -206,16 +1088,7 @@ "MONEY" ], "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ] + "type": "string" } }, "id": "MetricDescriptor" @@ -224,12 +1097,48 @@ "description": "An individual entry in a log.", "type": "object", "properties": { + "resource": { + "$ref": "MonitoredResource", + "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error." + }, + "httpRequest": { + "description": "Optional. Information about the HTTP request associated with this log entry, if applicable.", + "$ref": "HttpRequest" + }, + "jsonPayload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The log entry payload, represented as a structure that is expressed as a JSON object.", + "type": "object" + }, + "insertId": { + "description": "Optional. A unique identifier for the log entry. If you provide a value, then Stackdriver Logging considers other log entries in the same project, with the same timestamp, and with the same insert_id to be duplicates which can be removed. If omitted in new log entries, then Stackdriver Logging assigns its own unique identifier. The insert_id is also used to order log entries that have the same timestamp value.", + "type": "string" + }, + "operation": { + "$ref": "LogEntryOperation", + "description": "Optional. Information about an operation associated with the log entry, if applicable." + }, + "textPayload": { + "description": "The log entry payload, represented as a Unicode string (UTF-8).", + "type": "string" + }, + "protoPayload": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", + "type": "object" + }, "labels": { + "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", + "type": "object", "additionalProperties": { "type": "string" - }, - "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", - "type": "object" + } }, "trace": { "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", @@ -262,58 +1171,26 @@ ] }, "sourceLocation": { - "$ref": "LogEntrySourceLocation", - "description": "Optional. Source code location information associated with the log entry, if any." + "description": "Optional. Source code location information associated with the log entry, if any.", + "$ref": "LogEntrySourceLocation" }, - "receiveTimestamp": { - "type": "string", - "format": "google-datetime", - "description": "Output only. The time the log entry was received by Stackdriver Logging." + "spanId": { + "description": "Optional. The span ID within the trace associated with the log entry. For Stackdriver Trace spans, this is the same format that the Stackdriver Trace API v2 uses: a 16-character hexadecimal encoding of an 8-byte array, such as \u003ccode\u003e\"000000000000004a\"\u003c/code\u003e.", + "type": "string" }, "timestamp": { + "description": "Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Stackdriver Logging assigns it the current time.Incoming log entries should have timestamps that are no more than the logs retention period in the past, and no more than 24 hours in the future. Log entries outside those time boundaries will not be available when calling entries.list, but those log entries can still be exported with LogSinks.", + "format": "google-datetime", + "type": "string" + }, + "receiveTimestamp": { + "description": "Output only. The time the log entry was received by Stackdriver Logging.", "format": "google-datetime", - "description": "Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Stackdriver Logging assigns it the current time.Incoming log entries should have timestamps that are no more than the logs retention period in the past, and no more than 24 hours in the future. See the entries.write API method for more information.", "type": "string" }, "logName": { - "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", + "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\nA project number may optionally be used in place of PROJECT_ID. The project number is translated to its corresponding PROJECT_ID internally and the log_name field will contain PROJECT_ID in queries and exports.[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", "type": "string" - }, - "httpRequest": { - "$ref": "HttpRequest", - "description": "Optional. Information about the HTTP request associated with this log entry, if applicable." - }, - "resource": { - "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error.", - "$ref": "MonitoredResource" - }, - "jsonPayload": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "The log entry payload, represented as a structure that is expressed as a JSON object.", - "type": "object" - }, - "operation": { - "description": "Optional. Information about an operation associated with the log entry, if applicable.", - "$ref": "LogEntryOperation" - }, - "insertId": { - "type": "string", - "description": "Optional. A unique identifier for the log entry. If you provide a value, then Stackdriver Logging considers other log entries in the same project, with the same timestamp, and with the same insert_id to be duplicates which can be removed. If omitted in new log entries, then Stackdriver Logging assigns its own unique identifier. The insert_id is also used to order log entries that have the same timestamp value." - }, - "textPayload": { - "description": "The log entry payload, represented as a Unicode string (UTF-8).", - "type": "string" - }, - "protoPayload": { - "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - } } }, "id": "LogEntry" @@ -322,18 +1199,100 @@ "description": "Complete log information about a single HTTP request to an App Engine application.", "type": "object", "properties": { + "userAgent": { + "description": "User agent that made the request.", + "type": "string" + }, + "wasLoadingRequest": { + "description": "Whether this was a loading request for the instance.", + "type": "boolean" + }, + "sourceReference": { + "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", + "type": "array", + "items": { + "$ref": "SourceReference" + } + }, + "responseSize": { + "description": "Size in bytes sent back to client by request.", + "format": "int64", + "type": "string" + }, + "traceId": { + "description": "Stackdriver Trace identifier for this request.", + "type": "string" + }, + "line": { + "description": "A list of log lines emitted by the application while serving this request.", + "type": "array", + "items": { + "$ref": "LogLine" + } + }, + "taskQueueName": { + "description": "Queue name of the request, in the case of an offline request.", + "type": "string" + }, + "referrer": { + "description": "Referrer URL of request.", + "type": "string" + }, + "requestId": { + "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", + "type": "string" + }, + "nickname": { + "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", + "type": "string" + }, + "status": { + "description": "HTTP response status code. Example: 200, 404.", + "format": "int32", + "type": "integer" + }, + "resource": { + "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", + "type": "string" + }, + "pendingTime": { + "description": "Time this request spent in the pending request queue.", + "format": "google-duration", + "type": "string" + }, + "taskName": { + "description": "Task name of the request, in the case of an offline request.", + "type": "string" + }, + "urlMapEntry": { + "description": "File or class that handled the request.", + "type": "string" + }, + "instanceIndex": { + "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", + "format": "int32", + "type": "integer" + }, + "finished": { + "description": "Whether this request is finished or active.", + "type": "boolean" + }, + "host": { + "description": "Internet host and port number of the resource being requested.", + "type": "string" + }, "httpVersion": { "description": "HTTP version of request. Example: \"HTTP/1.1\".", "type": "string" }, "startTime": { - "type": "string", + "description": "Time when the request started.", "format": "google-datetime", - "description": "Time when the request started." + "type": "string" }, "latency": { - "format": "google-duration", "description": "Latency of the request.", + "format": "google-duration", "type": "string" }, "ip": { @@ -353,8 +1312,8 @@ "type": "string" }, "cost": { - "format": "double", "description": "An indication of the relative cost of serving this request.", + "format": "double", "type": "number" }, "instanceId": { @@ -362,8 +1321,8 @@ "type": "string" }, "megaCycles": { - "format": "int64", "description": "Number of CPU megacycles used to process request.", + "format": "int64", "type": "string" }, "first": { @@ -375,95 +1334,13 @@ "type": "string" }, "moduleId": { - "type": "string", - "description": "Module of the application that handled this request." + "description": "Module of the application that handled this request.", + "type": "string" }, "endTime": { - "type": "string", + "description": "Time when the request finished.", "format": "google-datetime", - "description": "Time when the request finished." - }, - "userAgent": { - "description": "User agent that made the request.", "type": "string" - }, - "wasLoadingRequest": { - "description": "Whether this was a loading request for the instance.", - "type": "boolean" - }, - "sourceReference": { - "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", - "items": { - "$ref": "SourceReference" - }, - "type": "array" - }, - "responseSize": { - "format": "int64", - "description": "Size in bytes sent back to client by request.", - "type": "string" - }, - "traceId": { - "description": "Stackdriver Trace identifier for this request.", - "type": "string" - }, - "line": { - "description": "A list of log lines emitted by the application while serving this request.", - "items": { - "$ref": "LogLine" - }, - "type": "array" - }, - "taskQueueName": { - "description": "Queue name of the request, in the case of an offline request.", - "type": "string" - }, - "referrer": { - "type": "string", - "description": "Referrer URL of request." - }, - "requestId": { - "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", - "type": "string" - }, - "nickname": { - "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", - "type": "string" - }, - "status": { - "type": "integer", - "format": "int32", - "description": "HTTP response status code. Example: 200, 404." - }, - "pendingTime": { - "format": "google-duration", - "description": "Time this request spent in the pending request queue.", - "type": "string" - }, - "resource": { - "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", - "type": "string" - }, - "taskName": { - "description": "Task name of the request, in the case of an offline request.", - "type": "string" - }, - "urlMapEntry": { - "description": "File or class that handled the request.", - "type": "string" - }, - "instanceIndex": { - "format": "int32", - "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", - "type": "integer" - }, - "host": { - "description": "Internet host and port number of the resource being requested.", - "type": "string" - }, - "finished": { - "description": "Whether this request is finished or active.", - "type": "boolean" } }, "id": "RequestLog" @@ -473,11 +1350,11 @@ "type": "object", "properties": { "resourceDescriptors": { + "description": "A list of resource descriptors.", + "type": "array", "items": { "$ref": "MonitoredResourceDescriptor" - }, - "type": "array", - "description": "A list of resource descriptors." + } }, "nextPageToken": { "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", @@ -487,6 +1364,8 @@ "id": "ListMonitoredResourceDescriptorsResponse" }, "LogEntryOperation": { + "description": "Additional information about a potentially long-running operation with which a log entry is associated.", + "type": "object", "properties": { "last": { "description": "Optional. Set this to True if this is the last log entry in the operation.", @@ -496,39 +1375,21 @@ "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", "type": "string" }, - "first": { - "description": "Optional. Set this to True if this is the first log entry in the operation.", - "type": "boolean" - }, "producer": { "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", "type": "string" + }, + "first": { + "description": "Optional. Set this to True if this is the first log entry in the operation.", + "type": "boolean" } }, - "id": "LogEntryOperation", - "description": "Additional information about a potentially long-running operation with which a log entry is associated.", - "type": "object" + "id": "LogEntryOperation" }, "LogMetric": { "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.Logs-based metric can also be used to extract values from logs and create a a distribution of the values. The distribution records the statistics of the extracted values along with an optional histogram of the values as specified by the bucket options.", "type": "object", "properties": { - "description": { - "description": "Optional. A description of this metric, which is used in documentation.", - "type": "string" - }, - "valueExtractor": { - "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", - "type": "string" - }, - "bucketOptions": { - "description": "Optional. The bucket_options are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values.", - "$ref": "BucketOptions" - }, - "name": { - "type": "string", - "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\"." - }, "labelExtractors": { "additionalProperties": { "type": "string" @@ -537,8 +1398,8 @@ "type": "object" }, "metricDescriptor": { - "$ref": "MetricDescriptor", - "description": "Optional. The metric descriptor associated with the logs-based metric. If unspecified, it uses a default metric descriptor with a DELTA metric kind, INT64 value type, with no labels and a unit of \"1\". Such a metric counts the number of log entries matching the filter expression.The name, type, and description fields in the metric_descriptor are output only, and is constructed using the name and description field in the LogMetric.To create a logs-based metric that records a distribution of log values, a DELTA metric kind with a DISTRIBUTION value type must be used along with a value_extractor expression in the LogMetric.Each label in the metric descriptor must have a matching label name as the key and an extractor expression as the value in the label_extractors map.The metric_kind and value_type fields in the metric_descriptor cannot be updated once initially configured. New labels can be added in the metric_descriptor, but existing labels cannot be modified except for their description." + "description": "Optional. The metric descriptor associated with the logs-based metric. If unspecified, it uses a default metric descriptor with a DELTA metric kind, INT64 value type, with no labels and a unit of \"1\". Such a metric counts the number of log entries matching the filter expression.The name, type, and description fields in the metric_descriptor are output only, and is constructed using the name and description field in the LogMetric.To create a logs-based metric that records a distribution of log values, a DELTA metric kind with a DISTRIBUTION value type must be used along with a value_extractor expression in the LogMetric.Each label in the metric descriptor must have a matching label name as the key and an extractor expression as the value in the label_extractors map.The metric_kind and value_type fields in the metric_descriptor cannot be updated once initially configured. New labels can be added in the metric_descriptor, but existing labels cannot be modified except for their description.", + "$ref": "MetricDescriptor" }, "version": { "description": "Deprecated. The API version that created or updated this metric. The v2 format is used by default and cannot be changed.", @@ -555,42 +1416,48 @@ "filter": { "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters.", "type": "string" + }, + "description": { + "description": "Optional. A description of this metric, which is used in documentation.", + "type": "string" + }, + "bucketOptions": { + "$ref": "BucketOptions", + "description": "Optional. The bucket_options are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values." + }, + "valueExtractor": { + "description": "Optional. A value_extractor is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group.The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution.Example: REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(\\d+).*\")", + "type": "string" + }, + "name": { + "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", + "type": "string" } }, "id": "LogMetric" }, "MonitoredResource": { + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", "type": "object", "properties": { + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels \"project_id\", \"instance_id\", and \"zone\".", + "type": "object" + }, "type": { "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance.", "type": "string" - }, - "labels": { - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels \"project_id\", \"instance_id\", and \"zone\".", - "type": "object", - "additionalProperties": { - "type": "string" - } } }, - "id": "MonitoredResource", - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n" + "id": "MonitoredResource" }, "LogSink": { - "id": "LogSink", "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", "type": "object", "properties": { - "writerIdentity": { - "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", - "type": "string" - }, - "startTime": { - "type": "string", - "format": "google-datetime", - "description": "Deprecated. This field is ignored when creating or updating sinks." - }, "outputVersionFormat": { "description": "Deprecated. The log entry format to use for this sink's exported log entries. The v2 format is used by default and cannot be changed.", "type": "string", @@ -613,20 +1480,30 @@ "description": "Optional. This field applies only to sinks owned by organizations and folders. If the field is false, the default, only the logs owned by the sink's parent resource are available for export. If the field is true, then logs from all the projects, folders, and billing accounts contained in the sink's parent resource are also available for export. Whether a particular log entry from the children is exported depends on the sink's filter expression. For example, if this field is true, then the filter resource.type=gce_instance would export all Compute Engine VM instance log entries from all projects in the sink's parent. To only export entries from certain child projects, filter on the project part of the log name:\nlogName:(\"projects/test-project1/\" OR \"projects/test-project2/\") AND\nresource.type=gce_instance\n", "type": "boolean" }, - "filter": { - "type": "string", - "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. For example:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n" - }, "destination": { "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.", "type": "string" }, + "filter": { + "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. For example:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", + "type": "string" + }, "endTime": { - "format": "google-datetime", "description": "Deprecated. This field is ignored when creating or updating sinks.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "Deprecated. This field is ignored when creating or updating sinks.", + "format": "google-datetime", + "type": "string" + }, + "writerIdentity": { + "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", "type": "string" } - } + }, + "id": "LogSink" }, "ListLogsResponse": { "description": "Result returned from ListLogs.", @@ -638,45 +1515,57 @@ }, "logNames": { "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\".", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } } }, "id": "ListLogsResponse" }, "HttpRequest": { - "id": "HttpRequest", "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message.", "type": "object", "properties": { + "referer": { + "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", + "type": "string" + }, + "latency": { + "description": "The request processing latency on the server, from the time the request was received until the response was sent.", + "format": "google-duration", + "type": "string" + }, + "userAgent": { + "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", + "type": "string" + }, "cacheFillBytes": { - "format": "int64", "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", + "format": "int64", "type": "string" }, "requestMethod": { - "type": "string", - "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\"." - }, - "responseSize": { - "format": "int64", - "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", - "type": "string" - }, - "requestSize": { - "format": "int64", - "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", + "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\".", "type": "string" }, "protocol": { "description": "Protocol used for the request. Examples: \"HTTP/1.1\", \"HTTP/2\", \"websocket\"", "type": "string" }, + "responseSize": { + "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", + "format": "int64", + "type": "string" + }, + "requestSize": { + "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", + "format": "int64", + "type": "string" + }, "requestUrl": { - "type": "string", - "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\"." + "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\".", + "type": "string" }, "remoteIp": { "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\".", @@ -699,74 +1588,63 @@ "type": "boolean" }, "status": { - "format": "int32", "description": "The response code indicating the status of response. Examples: 200, 404.", + "format": "int32", "type": "integer" - }, - "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", - "type": "string" - }, - "userAgent": { - "type": "string", - "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\"." - }, - "latency": { - "type": "string", - "format": "google-duration", - "description": "The request processing latency on the server, from the time the request was received until the response was sent." } - } + }, + "id": "HttpRequest" }, "ListSinksResponse": { "description": "Result returned from ListSinks.", "type": "object", "properties": { + "sinks": { + "description": "A list of sinks.", + "type": "array", + "items": { + "$ref": "LogSink" + } + }, "nextPageToken": { "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", "type": "string" - }, - "sinks": { - "description": "A list of sinks.", - "items": { - "$ref": "LogSink" - }, - "type": "array" } }, "id": "ListSinksResponse" }, "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", + "type": "object", "properties": { - "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", - "type": "string" - }, "labels": { "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "type": "array", "items": { "$ref": "LabelDescriptor" - }, - "type": "array" + } }, "name": { "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", "type": "string" }, - "description": { - "type": "string", - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation." - }, "displayName": { "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", "type": "string" + }, + "description": { + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", + "type": "string" + }, + "type": { + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "type": "string" } }, - "id": "MonitoredResourceDescriptor", - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", - "type": "object" + "id": "MonitoredResourceDescriptor" }, "LogEntrySourceLocation": { + "description": "Additional information about the source code location that produced the log entry.", "type": "object", "properties": { "file": { @@ -778,13 +1656,12 @@ "type": "string" }, "line": { - "format": "int64", "description": "Optional. Line within the source file. 1-based; 0 indicates no line number available.", + "format": "int64", "type": "string" } }, - "id": "LogEntrySourceLocation", - "description": "Additional information about the source code location that produced the log entry." + "id": "LogEntrySourceLocation" }, "ListLogEntriesResponse": { "description": "Result returned from ListLogEntries.", @@ -796,915 +1673,43 @@ }, "entries": { "description": "A list of log entries. If entries is empty, nextPageToken may still be returned, indicating that more entries may exist. See nextPageToken for more information.", + "type": "array", "items": { "$ref": "LogEntry" - }, - "type": "array" + } } }, "id": "ListLogEntriesResponse" - }, - "LogLine": { - "description": "Application log line emitted while processing a request.", - "type": "object", - "properties": { - "sourceLocation": { - "$ref": "SourceLocation", - "description": "Where in the source code this log message was written." - }, - "time": { - "type": "string", - "format": "google-datetime", - "description": "Approximate time when this log entry was made." - }, - "logMessage": { - "type": "string", - "description": "App-provided log message." - }, - "severity": { - "description": "Severity of this log entry.", - "type": "string", - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or a configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ], - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" - ] - } - }, - "id": "LogLine" - }, - "Linear": { - "type": "object", - "properties": { - "numFiniteBuckets": { - "format": "int32", - "description": "Must be greater than 0.", - "type": "integer" - }, - "width": { - "format": "double", - "description": "Must be greater than 0.", - "type": "number" - }, - "offset": { - "format": "double", - "description": "Lower bound of the first bucket.", - "type": "number" - } - }, - "id": "Linear", - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1))." - }, - "Empty": { - "type": "object", - "properties": {}, - "id": "Empty", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}." - }, - "SourceLocation": { - "description": "Specifies a location in a source code file.", - "type": "object", - "properties": { - "functionName": { - "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", - "type": "string" - }, - "line": { - "format": "int64", - "description": "Line within the source file.", - "type": "string" - }, - "file": { - "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", - "type": "string" - } - }, - "id": "SourceLocation" - }, - "ListLogEntriesRequest": { - "description": "The parameters to ListLogEntries.", - "type": "object", - "properties": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. page_token must be the value of next_page_token from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of next_page_token in the response indicates that more results might be available.", - "type": "integer" - }, - "orderBy": { - "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their insert_id values.", - "type": "string" - }, - "resourceNames": { - "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list.", - "items": { - "type": "string" - }, - "type": "array" - }, - "projectIds": { - "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", - "items": { - "type": "string" - }, - "type": "array" - }, - "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", - "type": "string" - } - }, - "id": "ListLogEntriesRequest" - }, - "Explicit": { - "id": "Explicit", - "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", - "type": "object", - "properties": { - "bounds": { - "description": "The values must be monotonically increasing.", - "items": { - "format": "double", - "type": "number" - }, - "type": "array" - } - } } }, + "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, - "protocol": "rest", "canonicalName": "Logging", "auth": { "oauth2": { "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/logging.write": { - "description": "Submit log data for your projects" - }, - "https://www.googleapis.com/auth/logging.read": { - "description": "View log data for your projects" + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" }, "https://www.googleapis.com/auth/logging.admin": { "description": "Administrate log data for your projects" }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" + "https://www.googleapis.com/auth/logging.read": { + "description": "View log data for your projects" + }, + "https://www.googleapis.com/auth/logging.write": { + "description": "Submit log data for your projects" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" } } } }, "rootUrl": "https://logging.googleapis.com/", "ownerDomain": "google.com", - "name": "logging", - "batchPath": "batch", - "title": "Stackdriver Logging API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "metrics": { - "methods": { - "create": { - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "parameters": { - "parent": { - "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.create", - "path": "v2beta1/{+parent}/metrics", - "description": "Creates a logs-based metric.", - "request": { - "$ref": "LogMetric" - } - }, - "delete": { - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - "id": "logging.projects.metrics.delete", - "path": "v2beta1/{+metricName}", - "description": "Deletes a logs-based metric.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "metricName" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "parameters": { - "metricName": { - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path", - "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "type": "string", - "required": true - } - } - }, - "get": { - "id": "logging.projects.metrics.get", - "path": "v2beta1/{+metricName}", - "description": "Gets a logs-based metric.", - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "metricName" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "metricName": { - "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}" - }, - "list": { - "description": "Lists logs-based metrics.", - "response": { - "$ref": "ListLogMetricsResponse" - }, - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageSize": { - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "type": "integer", - "location": "query" - }, - "parent": { - "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." - } - }, - "flatPath": "v2beta1/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.list", - "path": "v2beta1/{+parent}/metrics" - }, - "update": { - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - "id": "logging.projects.metrics.update", - "path": "v2beta1/{+metricName}", - "description": "Creates or updates a logs-based metric.", - "request": { - "$ref": "LogMetric" - }, - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "metricName" - ], - "httpMethod": "PUT", - "parameters": { - "metricName": { - "location": "path", - "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/metrics/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ] - } - } - }, - "sinks": { - "methods": { - "create": { - "response": { - "$ref": "LogSink" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "parent": { - "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$" - }, - "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - "type": "boolean" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "id": "logging.projects.sinks.create", - "path": "v2beta1/{+parent}/sinks", - "request": { - "$ref": "LogSink" - }, - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink." - }, - "delete": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "sinkName" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "id": "logging.projects.sinks.delete", - "path": "v2beta1/{+sinkName}", - "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted." - }, - "get": { - "id": "logging.projects.sinks.get", - "path": "v2beta1/{+sinkName}", - "description": "Gets a sink.", - "response": { - "$ref": "LogSink" - }, - "parameterOrder": [ - "sinkName" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "sinkName": { - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path", - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "type": "string", - "required": true - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}" - }, - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListSinksResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "type": "integer" - }, - "parent": { - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "path": "v2beta1/{+parent}/sinks", - "id": "logging.projects.sinks.list", - "description": "Lists sinks." - }, - "update": { - "parameters": { - "updateMask": { - "format": "google-fieldmask", - "description": "Optional. Field mask that specifies the fields in sink that need an update. A sink field will be overwritten if, and only if, it is in the update mask. name and output only fields cannot be updated.An empty updateMask is temporarily treated as using the following mask for backwards compatibility purposes: destination,filter,includeChildren At some point in the future, behavior will be removed and specifying an empty updateMask will be an error.For a detailed FieldMask definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmaskExample: updateMask=filter.", - "type": "string", - "location": "query" - }, - "uniqueWriterIdentity": { - "type": "boolean", - "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is set to false or defaulted to false." - }, - "sinkName": { - "location": "path", - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/sinks/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "path": "v2beta1/{+sinkName}", - "id": "logging.projects.sinks.update", - "description": "Updates a sink. This method replaces the following fields in the existing sink with values from the new sink: destination, and filter. The updated sink might also have a new writer_identity; see the unique_writer_identity field.", - "request": { - "$ref": "LogSink" - }, - "httpMethod": "PUT", - "parameterOrder": [ - "sinkName" - ], - "response": { - "$ref": "LogSink" - } - } - } - }, - "logs": { - "methods": { - "delete": { - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "logName" - ], - "httpMethod": "DELETE", - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", - "id": "logging.projects.logs.delete", - "path": "v2beta1/{+logName}" - }, - "list": { - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "parent": { - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$" - }, - "pageToken": { - "type": "string", - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call." - }, - "pageSize": { - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/logs", - "id": "logging.projects.logs.list", - "path": "v2beta1/{+parent}/logs" - } - } - } - } - }, - "billingAccounts": { - "resources": { - "logs": { - "methods": { - "list": { - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "type": "integer", - "location": "query", - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available." - }, - "parent": { - "pattern": "^billingAccounts/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", - "id": "logging.billingAccounts.logs.list", - "path": "v2beta1/{+parent}/logs" - }, - "delete": { - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - "httpMethod": "DELETE", - "parameterOrder": [ - "logName" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "logName": { - "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - "location": "path", - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", - "path": "v2beta1/{+logName}", - "id": "logging.billingAccounts.logs.delete" - } - } - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "list": { - "httpMethod": "GET", - "parameterOrder": [], - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v2beta1/monitoredResourceDescriptors", - "path": "v2beta1/monitoredResourceDescriptors", - "id": "logging.monitoredResourceDescriptors.list", - "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging." - } - } - }, - "organizations": { - "resources": { - "logs": { - "methods": { - "delete": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "logName" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string", - "required": true, - "pattern": "^organizations/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", - "id": "logging.organizations.logs.delete", - "path": "v2beta1/{+logName}", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted." - }, - "list": { - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageSize": { - "format": "int32", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "type": "integer", - "location": "query" - }, - "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "type": "string", - "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v2beta1/organizations/{organizationsId}/logs", - "path": "v2beta1/{+parent}/logs", - "id": "logging.organizations.logs.list", - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListLogsResponse" - } - } - } - } - } - }, - "entries": { - "methods": { - "list": { - "id": "logging.entries.list", - "path": "v2beta1/entries:list", - "request": { - "$ref": "ListLogEntriesRequest" - }, - "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", - "response": { - "$ref": "ListLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": {}, - "flatPath": "v2beta1/entries:list" - }, - "write": { - "response": { - "$ref": "WriteLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "parameters": {}, - "flatPath": "v2beta1/entries:write", - "id": "logging.entries.write", - "path": "v2beta1/entries:write", - "request": { - "$ref": "WriteLogEntriesRequest" - }, - "description": "Log entry resourcesWrites log entries to Stackdriver Logging. This API method is the only way to send log entries to Stackdriver Logging. This method is used, directly or indirectly, by the Stackdriver Logging agent (fluentd) and all logging libraries configured to use Stackdriver Logging." - } - } - } - }, - "parameters": { - "upload_protocol": { - "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" - }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "fields": { - "location": "query", - "description": "Selector specifying which fields to include in a partial response.", - "type": "string" - }, - "$.xgafv": { - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - }, - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" - }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - } - }, - "version": "v2beta1", - "baseUrl": "https://logging.googleapis.com/", - "kind": "discovery#restDescription", - "description": "Writes log entries and manages your Stackdriver Logging configuration.", - "servicePath": "" + "name": "logging" } diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go b/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go index 08c611bff6e..d7f761ac854 100644 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go +++ b/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go @@ -248,8 +248,8 @@ type BucketOptions struct { } func (s *BucketOptions) MarshalJSON() ([]byte, error) { - type noMethod BucketOptions - raw := noMethod(*s) + type NoMethod BucketOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -297,8 +297,8 @@ type Explicit struct { } func (s *Explicit) MarshalJSON() ([]byte, error) { - type noMethod Explicit - raw := noMethod(*s) + type NoMethod Explicit + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -337,19 +337,19 @@ type Exponential struct { } func (s *Exponential) MarshalJSON() ([]byte, error) { - type noMethod Exponential - raw := noMethod(*s) + type NoMethod Exponential + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Exponential) UnmarshalJSON(data []byte) error { - type noMethod Exponential + type NoMethod Exponential var s1 struct { GrowthFactor gensupport.JSONFloat64 `json:"growthFactor"` Scale gensupport.JSONFloat64 `json:"scale"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -444,8 +444,8 @@ type HttpRequest struct { } func (s *HttpRequest) MarshalJSON() ([]byte, error) { - type noMethod HttpRequest - raw := noMethod(*s) + type NoMethod HttpRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -483,8 +483,8 @@ type LabelDescriptor struct { } func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { - type noMethod LabelDescriptor - raw := noMethod(*s) + type NoMethod LabelDescriptor + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -523,19 +523,19 @@ type Linear struct { } func (s *Linear) MarshalJSON() ([]byte, error) { - type noMethod Linear - raw := noMethod(*s) + type NoMethod Linear + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Linear) UnmarshalJSON(data []byte) error { - type noMethod Linear + type NoMethod Linear var s1 struct { Offset gensupport.JSONFloat64 `json:"offset"` Width gensupport.JSONFloat64 `json:"width"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -612,8 +612,8 @@ type ListLogEntriesRequest struct { } func (s *ListLogEntriesRequest) MarshalJSON() ([]byte, error) { - type noMethod ListLogEntriesRequest - raw := noMethod(*s) + type NoMethod ListLogEntriesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -658,8 +658,8 @@ type ListLogEntriesResponse struct { } func (s *ListLogEntriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogEntriesResponse - raw := noMethod(*s) + type NoMethod ListLogEntriesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -696,8 +696,8 @@ type ListLogMetricsResponse struct { } func (s *ListLogMetricsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogMetricsResponse - raw := noMethod(*s) + type NoMethod ListLogMetricsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -736,8 +736,8 @@ type ListLogsResponse struct { } func (s *ListLogsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogsResponse - raw := noMethod(*s) + type NoMethod ListLogsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -775,8 +775,8 @@ type ListMonitoredResourceDescriptorsResponse struct { } func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMonitoredResourceDescriptorsResponse - raw := noMethod(*s) + type NoMethod ListMonitoredResourceDescriptorsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -813,8 +813,8 @@ type ListSinksResponse struct { } func (s *ListSinksResponse) MarshalJSON() ([]byte, error) { - type noMethod ListSinksResponse - raw := noMethod(*s) + type NoMethod ListSinksResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -850,7 +850,10 @@ type LogEntry struct { // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L // OG_ID]" // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // [LOG_ID] must be URL-encoded within log_name. Example: + // A project number may optionally be used in place of PROJECT_ID. The + // project number is translated to its corresponding PROJECT_ID + // internally and the log_name field will contain PROJECT_ID in queries + // and exports.[LOG_ID] must be URL-encoded within log_name. Example: // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa // ctivity". [LOG_ID] must be less than 512 characters long and can only // include the following characters: upper and lower case alphanumeric @@ -903,6 +906,12 @@ type LogEntry struct { // with the log entry, if any. SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"` + // SpanId: Optional. The span ID within the trace associated with the + // log entry. For Stackdriver Trace spans, this is the same format that + // the Stackdriver Trace API v2 uses: a 16-character hexadecimal + // encoding of an 8-byte array, such as "000000000000004a". + SpanId string `json:"spanId,omitempty"` + // TextPayload: The log entry payload, represented as a Unicode string // (UTF-8). TextPayload string `json:"textPayload,omitempty"` @@ -913,7 +922,9 @@ type LogEntry struct { // log entry, then Stackdriver Logging assigns it the current // time.Incoming log entries should have timestamps that are no more // than the logs retention period in the past, and no more than 24 hours - // in the future. See the entries.write API method for more information. + // in the future. Log entries outside those time boundaries will not be + // available when calling entries.list, but those log entries can still + // be exported with LogSinks. Timestamp string `json:"timestamp,omitempty"` // Trace: Optional. Resource name of the trace associated with the log @@ -940,8 +951,8 @@ type LogEntry struct { } func (s *LogEntry) MarshalJSON() ([]byte, error) { - type noMethod LogEntry - raw := noMethod(*s) + type NoMethod LogEntry + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -983,8 +994,8 @@ type LogEntryOperation struct { } func (s *LogEntryOperation) MarshalJSON() ([]byte, error) { - type noMethod LogEntryOperation - raw := noMethod(*s) + type NoMethod LogEntryOperation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1025,8 +1036,8 @@ type LogEntrySourceLocation struct { } func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) { - type noMethod LogEntrySourceLocation - raw := noMethod(*s) + type NoMethod LogEntrySourceLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1077,8 +1088,8 @@ type LogLine struct { } func (s *LogLine) MarshalJSON() ([]byte, error) { - type noMethod LogLine - raw := noMethod(*s) + type NoMethod LogLine + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1198,8 +1209,8 @@ type LogMetric struct { } func (s *LogMetric) MarshalJSON() ([]byte, error) { - type noMethod LogMetric - raw := noMethod(*s) + type NoMethod LogMetric + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1306,8 +1317,8 @@ type LogSink struct { } func (s *LogSink) MarshalJSON() ([]byte, error) { - type noMethod LogSink - raw := noMethod(*s) + type NoMethod LogSink + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1321,7 +1332,9 @@ type MetricDescriptor struct { // DisplayName: A concise name for the metric, which can be displayed in // user interfaces. Use sentence case without an ending period, for - // example "Request count". + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that can be used to describe a specific @@ -1346,16 +1359,7 @@ type MetricDescriptor struct { // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // Name: The resource name of the metric descriptor. Depending on the - // implementation, the name typically includes: (1) the parent resource - // name that defines the scope of the metric type or of its data; and - // (2) the metric's URL-encoded type, which also appears in the type - // field of this descriptor. For example, following is the resource name - // of a custom metric within the GCP project - // my-project-id: - // "projects/my-project-id/metricDescriptors/custom.google - // apis.com%2Finvoice%2Fpaid%2Famount" - // + // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` // Type: The metric type, including its DNS name prefix. The type is not @@ -1454,8 +1458,8 @@ type MetricDescriptor struct { } func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptor - raw := noMethod(*s) + type NoMethod MetricDescriptor + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1502,8 +1506,8 @@ type MonitoredResource struct { } func (s *MonitoredResource) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResource - raw := noMethod(*s) + type NoMethod MonitoredResource + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1564,8 +1568,8 @@ type MonitoredResourceDescriptor struct { } func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResourceDescriptor - raw := noMethod(*s) + type NoMethod MonitoredResourceDescriptor + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1711,18 +1715,18 @@ type RequestLog struct { } func (s *RequestLog) MarshalJSON() ([]byte, error) { - type noMethod RequestLog - raw := noMethod(*s) + type NoMethod RequestLog + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *RequestLog) UnmarshalJSON(data []byte) error { - type noMethod RequestLog + type NoMethod RequestLog var s1 struct { Cost gensupport.JSONFloat64 `json:"cost"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -1765,8 +1769,8 @@ type SourceLocation struct { } func (s *SourceLocation) MarshalJSON() ([]byte, error) { - type noMethod SourceLocation - raw := noMethod(*s) + type NoMethod SourceLocation + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1799,8 +1803,8 @@ type SourceReference struct { } func (s *SourceReference) MarshalJSON() ([]byte, error) { - type noMethod SourceReference - raw := noMethod(*s) + type NoMethod SourceReference + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1818,11 +1822,12 @@ type WriteLogEntriesRequest struct { // entries earlier in the list will sort before the entries later in the // list. See the entries.list method.Log entries with timestamps that // are more than the logs retention period in the past or more than 24 - // hours in the future might be discarded. Discarding does not return an - // error.To improve throughput and to avoid exceeding the quota limit - // for calls to entries.write, you should try to include several log - // entries in this list, rather than calling this method for each - // individual log entry. + // hours in the future will not be available when calling entries.list. + // However, those log entries can still be exported with LogSinks.To + // improve throughput and to avoid exceeding the quota limit for calls + // to entries.write, you should try to include several log entries in + // this list, rather than calling this method for each individual log + // entry. Entries []*LogEntry `json:"entries,omitempty"` // Labels: Optional. Default labels that are added to the labels field @@ -1882,8 +1887,8 @@ type WriteLogEntriesRequest struct { } func (s *WriteLogEntriesRequest) MarshalJSON() ([]byte, error) { - type noMethod WriteLogEntriesRequest - raw := noMethod(*s) + type NoMethod WriteLogEntriesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1989,7 +1994,7 @@ func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2149,7 +2154,7 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2313,7 +2318,7 @@ func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesRespo }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2461,7 +2466,7 @@ func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesRes }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2612,7 +2617,7 @@ func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2765,7 +2770,7 @@ func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2925,7 +2930,7 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3087,7 +3092,7 @@ func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3247,7 +3252,7 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3414,7 +3419,7 @@ func (c *ProjectsMetricsCreateCall) Do(opts ...googleapi.CallOption) (*LogMetric }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3544,7 +3549,7 @@ func (c *ProjectsMetricsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3685,7 +3690,7 @@ func (c *ProjectsMetricsGetCall) Do(opts ...googleapi.CallOption) (*LogMetric, e }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3846,7 +3851,7 @@ func (c *ProjectsMetricsListCall) Do(opts ...googleapi.CallOption) (*ListLogMetr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4013,7 +4018,7 @@ func (c *ProjectsMetricsUpdateCall) Do(opts ...googleapi.CallOption) (*LogMetric }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4171,7 +4176,7 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4306,7 +4311,7 @@ func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, erro }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4446,7 +4451,7 @@ func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4607,7 +4612,7 @@ func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4808,7 +4813,7 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json index 7bfd143b2a7..c5cbe6b1b08 100644 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json @@ -1,15 +1,1711 @@ { + "ownerName": "Google", + "resources": { + "uptimeCheckIps": { + "methods": { + "list": { + "path": "v3/uptimeCheckIps", + "id": "monitoring.uptimeCheckIps.list", + "description": "Returns the list of IPs that checkers run from", + "response": { + "$ref": "ListUptimeCheckIpsResponse" + }, + "parameterOrder": [], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call. NOTE: this field is not yet implemented", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned. NOTE: this field is not yet implemented", + "format": "int32", + "type": "integer" + } + }, + "flatPath": "v3/uptimeCheckIps" + } + } + }, + "projects": { + "resources": { + "uptimeCheckConfigs": { + "methods": { + "delete": { + "description": "Deletes an uptime check configuration. Note that this method will fail if the uptime check configuration is referenced by an alert policy or other dependent configs that would be rendered invalid by the deletion.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", + "location": "path", + "description": "The uptime check configuration to delete. The format isprojects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].", + "required": true, + "type": "string" + } + }, + "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.uptimeCheckConfigs.delete" + }, + "list": { + "description": "Lists the existing valid uptime check configurations for the project, leaving out any invalid configurations.", + "response": { + "$ref": "ListUptimeCheckConfigsResponse" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned.", + "format": "int32", + "type": "integer" + }, + "parent": { + "description": "The project whose uptime check configurations are listed. The format isprojects/[PROJECT_ID].", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs", + "path": "v3/{+parent}/uptimeCheckConfigs", + "id": "monitoring.projects.uptimeCheckConfigs.list" + }, + "get": { + "path": "v3/{+name}", + "id": "monitoring.projects.uptimeCheckConfigs.get", + "description": "Gets a single uptime check configuration.", + "response": { + "$ref": "UptimeCheckConfig" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "name": { + "location": "path", + "description": "The uptime check configuration to retrieve. The format isprojects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$" + } + }, + "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}" + }, + "patch": { + "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.uptimeCheckConfigs.patch", + "description": "Updates an uptime check configuration. You can either replace the entire configuration with a new one or replace only certain fields in the current configuration by specifying the fields to be updated via \"updateMask\". Returns the updated configuration.", + "request": { + "$ref": "UptimeCheckConfig" + }, + "response": { + "$ref": "UptimeCheckConfig" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "updateMask": { + "description": "Optional. If present, only the listed fields in the current uptime check configuration are updated with values from the new configuration. If this field is empty, then the current configuration is completely replaced with the new configuration.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + }, + "name": { + "location": "path", + "description": "A unique resource name for this UptimeCheckConfig. The format is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This field should be omitted when creating the uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$" + } + } + }, + "create": { + "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs", + "id": "monitoring.projects.uptimeCheckConfigs.create", + "path": "v3/{+parent}/uptimeCheckConfigs", + "request": { + "$ref": "UptimeCheckConfig" + }, + "description": "Creates a new uptime check configuration.", + "httpMethod": "POST", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "UptimeCheckConfig" + }, + "parameters": { + "parent": { + "location": "path", + "description": "The project in which to create the uptime check. The format is:projects/[PROJECT_ID].", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + } + } + }, + "monitoredResourceDescriptors": { + "methods": { + "list": { + "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", + "id": "monitoring.projects.monitoredResourceDescriptors.list", + "path": "v3/{+name}/monitoredResourceDescriptors", + "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListMonitoredResourceDescriptorsResponse" + }, + "parameters": { + "name": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "filter": { + "location": "query", + "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ] + }, + "get": { + "path": "v3/{+name}", + "id": "monitoring.projects.monitoredResourceDescriptors.get", + "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account.", + "response": { + "$ref": "MonitoredResourceDescriptor" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}" + } + } + }, + "groups": { + "methods": { + "delete": { + "description": "Deletes an existing group.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$", + "location": "path" + } + }, + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.groups.delete" + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "ListGroupsResponse" + }, + "parameters": { + "pageToken": { + "type": "string", + "location": "query", + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call." + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "ancestorsOfGroup": { + "location": "query", + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", + "type": "string" + }, + "name": { + "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "childrenOfGroup": { + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", + "type": "string", + "location": "query" + }, + "descendantsOfGroup": { + "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.", + "type": "string", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "flatPath": "v3/projects/{projectsId}/groups", + "id": "monitoring.projects.groups.list", + "path": "v3/{+name}/groups", + "description": "Lists the existing groups." + }, + "get": { + "path": "v3/{+name}", + "id": "monitoring.projects.groups.get", + "description": "Gets a single group.", + "response": { + "$ref": "Group" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$", + "location": "path", + "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\"." + } + }, + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}" + }, + "update": { + "response": { + "$ref": "Group" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PUT", + "parameters": { + "name": { + "pattern": "^projects/[^/]+/groups/[^/]+$", + "location": "path", + "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", + "required": true, + "type": "string" + }, + "validateOnly": { + "description": "If true, validate this request but do not update the existing group.", + "type": "boolean", + "location": "query" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.groups.update", + "request": { + "$ref": "Group" + }, + "description": "Updates an existing group. You can change any group attributes except name." + }, + "create": { + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\"." + }, + "validateOnly": { + "location": "query", + "description": "If true, validate this request but do not create the group.", + "type": "boolean" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "flatPath": "v3/projects/{projectsId}/groups", + "path": "v3/{+name}/groups", + "id": "monitoring.projects.groups.create", + "request": { + "$ref": "Group" + }, + "description": "Creates a new group.", + "response": { + "$ref": "Group" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST" + } + }, + "resources": { + "members": { + "methods": { + "list": { + "description": "Lists the monitored resources that are members of a group.", + "response": { + "$ref": "ListGroupMembersResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + }, + "interval.startTime": { + "location": "query", + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "name": { + "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/groups/[^/]+$", + "location": "path" + }, + "interval.endTime": { + "location": "query", + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "type": "string" + }, + "filter": { + "type": "string", + "location": "query", + "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", + "path": "v3/{+name}/members", + "id": "monitoring.projects.groups.members.list" + } + } + } + } + }, + "collectdTimeSeries": { + "methods": { + "create": { + "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", + "request": { + "$ref": "CreateCollectdTimeSeriesRequest" + }, + "response": { + "$ref": "CreateCollectdTimeSeriesResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ], + "parameters": { + "name": { + "location": "path", + "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", + "path": "v3/{+name}/collectdTimeSeries", + "id": "monitoring.projects.collectdTimeSeries.create" + } + } + }, + "metricDescriptors": { + "methods": { + "create": { + "id": "monitoring.projects.metricDescriptors.create", + "path": "v3/{+name}/metricDescriptors", + "request": { + "$ref": "MetricDescriptor" + }, + "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "MetricDescriptor" + }, + "parameters": { + "name": { + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/metricDescriptors" + }, + "delete": { + "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ], + "parameters": { + "name": { + "location": "path", + "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metricDescriptors/.+$" + } + }, + "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.metricDescriptors.delete" + }, + "list": { + "httpMethod": "GET", + "response": { + "$ref": "ListMetricDescriptorsResponse" + }, + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", + "type": "string", + "location": "query" + }, + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\"." + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + }, + "pageSize": { + "location": "query", + "description": "A positive number that is the maximum number of results to return.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/metricDescriptors", + "id": "monitoring.projects.metricDescriptors.list", + "path": "v3/{+name}/metricDescriptors", + "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account." + }, + "get": { + "response": { + "$ref": "MetricDescriptor" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/metricDescriptors/.+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write" + ], + "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + "path": "v3/{+name}", + "id": "monitoring.projects.metricDescriptors.get", + "description": "Gets a single metric descriptor. This method does not require a Stackdriver account." + } + } + }, + "timeSeries": { + "methods": { + "list": { + "response": { + "$ref": "ListTimeSeriesResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ], + "parameters": { + "interval.endTime": { + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "type": "string", + "location": "query" + }, + "aggregation.alignmentPeriod": { + "location": "query", + "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", + "format": "google-duration", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", + "format": "int32", + "type": "integer" + }, + "orderBy": { + "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank.", + "type": "string", + "location": "query" + }, + "aggregation.crossSeriesReducer": { + "location": "query", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", + "type": "string" + }, + "filter": { + "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", + "type": "string", + "location": "query" + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "type": "string", + "location": "query" + }, + "aggregation.perSeriesAligner": { + "type": "string", + "location": "query", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05" + ], + "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned." + }, + "interval.startTime": { + "type": "string", + "location": "query", + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime" + }, + "view": { + "description": "Specifies which information is returned about the time series.", + "type": "string", + "location": "query", + "enum": [ + "FULL", + "HEADERS" + ] + }, + "aggregation.groupByFields": { + "type": "string", + "repeated": true, + "location": "query", + "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored." + }, + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\"." + } + }, + "flatPath": "v3/projects/{projectsId}/timeSeries", + "path": "v3/{+name}/timeSeries", + "id": "monitoring.projects.timeSeries.list", + "description": "Lists time series that match a filter. This method does not require a Stackdriver account." + }, + "create": { + "flatPath": "v3/projects/{projectsId}/timeSeries", + "id": "monitoring.projects.timeSeries.create", + "path": "v3/{+name}/timeSeries", + "request": { + "$ref": "CreateTimeSeriesRequest" + }, + "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "name": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", + "required": true, + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ] + } + } + } + } + } + }, + "parameters": { + "fields": { + "location": "query", + "description": "Selector specifying which fields to include in a partial response.", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string", + "location": "query" + }, + "callback": { + "location": "query", + "description": "JSONP", + "type": "string" + }, + "$.xgafv": { + "description": "V1 error format.", + "type": "string", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "enum": [ + "1", + "2" + ] + }, + "alt": { + "type": "string", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "description": "Data format for response.", + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "access_token": { + "description": "OAuth access token.", + "type": "string", + "location": "query" + }, + "key": { + "type": "string", + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token." + }, + "quotaUser": { + "location": "query", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string" + }, + "pp": { + "location": "query", + "description": "Pretty-print response.", + "type": "boolean", + "default": "true" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "type": "string", + "location": "query", + "description": "OAuth 2.0 token for the current user." + }, + "upload_protocol": { + "location": "query", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + } + }, + "version": "v3", + "baseUrl": "https://monitoring.googleapis.com/", "servicePath": "", "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.", "kind": "discovery#restDescription", "basePath": "", "id": "monitoring:v3", "documentationLink": "https://cloud.google.com/monitoring/api/", - "revision": "20170911", + "revision": "20171218", "discoveryVersion": "v1", "version_module": true, "schemas": { + "BucketOptions": { + "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", + "type": "object", + "properties": { + "exponentialBuckets": { + "$ref": "Exponential", + "description": "The exponential buckets." + }, + "linearBuckets": { + "$ref": "Linear", + "description": "The linear bucket." + }, + "explicitBuckets": { + "description": "The explicit buckets.", + "$ref": "Explicit" + } + }, + "id": "BucketOptions" + }, + "HttpCheck": { + "description": "Information involved in an HTTP/HTTPS uptime check request.", + "type": "object", + "properties": { + "port": { + "description": "The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).", + "format": "int32", + "type": "integer" + }, + "headers": { + "additionalProperties": { + "type": "string" + }, + "description": "The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second.", + "type": "object" + }, + "path": { + "description": "The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to \"/\").", + "type": "string" + }, + "useSsl": { + "description": "If true, use HTTPS instead of HTTP to run the check.", + "type": "boolean" + }, + "maskHeaders": { + "description": "Boolean specifiying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.", + "type": "boolean" + }, + "authInfo": { + "$ref": "BasicAuthentication", + "description": "The authentication information. Optional when creating an HTTP check; defaults to empty." + } + }, + "id": "HttpCheck" + }, + "Status": { + "type": "object", + "properties": { + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "type": "any", + "description": "Properties of the object. Contains field @type with type URL." + } + } + }, + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + } + }, + "id": "Status", + "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc that can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons." + }, + "CollectdValue": { + "type": "object", + "properties": { + "value": { + "description": "The measurement value.", + "$ref": "TypedValue" + }, + "dataSourceType": { + "enumDescriptions": [ + "An unspecified data source type. This corresponds to google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.", + "An instantaneous measurement of a varying quantity. This corresponds to google.api.MetricDescriptor.MetricKind.GAUGE.", + "A cumulative value over time. This corresponds to google.api.MetricDescriptor.MetricKind.CUMULATIVE.", + "A rate of change of the measurement.", + "An amount of change since the last measurement interval. This corresponds to google.api.MetricDescriptor.MetricKind.DELTA." + ], + "enum": [ + "UNSPECIFIED_DATA_SOURCE_TYPE", + "GAUGE", + "COUNTER", + "DERIVE", + "ABSOLUTE" + ], + "description": "The type of measurement.", + "type": "string" + }, + "dataSourceName": { + "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", + "type": "string" + } + }, + "id": "CollectdValue", + "description": "A single data point from a collectd-based plugin." + }, + "MetricDescriptor": { + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", + "type": "object", + "properties": { + "metricKind": { + "enumDescriptions": [ + "Do not use this default value.", + "An instantaneous measurement of a value.", + "The change in a value during a time interval.", + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" + ], + "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", + "type": "string" + }, + "description": { + "description": "A detailed description of the metric, which can be used in documentation.", + "type": "string" + }, + "displayName": { + "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", + "type": "string" + }, + "unit": { + "description": "Optional. The unit in which the metric value is reported. For example, kBy/s means kilobytes/sec, and 1 is the dimensionless unit. The supported units are a subset of The Unified Code for Units of Measure standard (http://unitsofmeasure.org/ucum.html).\u003cbr\u003e\u003cbr\u003e This field is part of the metric's documentation, but it is ignored by Stackdriver.", + "type": "string" + }, + "labels": { + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "The resource name of the metric descriptor.", + "type": "string" + }, + "type": { + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined custom metric types have the DNS name custom.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", + "type": "string" + }, + "valueType": { + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string. This value type can be used only if the metric kind is GAUGE.", + "The value is a Distribution.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ], + "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported." + } + }, + "id": "MetricDescriptor" + }, + "SourceContext": { + "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.", + "type": "object", + "properties": { + "fileName": { + "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: \"google/protobuf/source_context.proto\".", + "type": "string" + } + }, + "id": "SourceContext" + }, + "Range": { + "description": "The range of the population values.", + "type": "object", + "properties": { + "max": { + "description": "The maximum of the population values.", + "format": "double", + "type": "number" + }, + "min": { + "description": "The minimum of the population values.", + "format": "double", + "type": "number" + } + }, + "id": "Range" + }, + "UptimeCheckConfig": { + "id": "UptimeCheckConfig", + "description": "This message configures which resources and services to monitor for availability.", + "type": "object", + "properties": { + "contentMatchers": { + "description": "The expected content on the page the check is run against. Currently, only the first entry in the list is supported, and other entries will be ignored. The server will look for an exact match of the string in the page response's content. This field is optional and should only be specified if a content match is required.", + "type": "array", + "items": { + "$ref": "ContentMatcher" + } + }, + "timeout": { + "description": "The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Required.", + "format": "google-duration", + "type": "string" + }, + "period": { + "description": "How often the uptime check is performed. Currently, only 1, 5, 10, and 15 minutes are supported. Required.", + "format": "google-duration", + "type": "string" + }, + "name": { + "description": "A unique resource name for this UptimeCheckConfig. The format is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This field should be omitted when creating the uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + "type": "string" + }, + "httpCheck": { + "$ref": "HttpCheck", + "description": "Contains information needed to make an HTTP or HTTPS check." + }, + "resourceGroup": { + "$ref": "ResourceGroup", + "description": "The group resource associated with the configuration." + }, + "tcpCheck": { + "$ref": "TcpCheck", + "description": "Contains information needed to make a TCP check." + }, + "internalCheckers": { + "description": "The internal checkers that this check will egress from. If is_internal is true and this list is empty, the check will egress from all InternalCheckers configured for the project that owns this CheckConfig.", + "type": "array", + "items": { + "$ref": "InternalChecker" + } + }, + "isInternal": { + "type": "boolean", + "description": "Denotes whether this is a check that egresses from InternalCheckers." + }, + "selectedRegions": { + "enumDescriptions": [ + "Default value if no region is specified. Will result in uptime checks running from all regions.", + "Allows checks to run from locations within the United States of America.", + "Allows checks to run from locations within the continent of Europe.", + "Allows checks to run from locations within the continent of South America.", + "Allows checks to run from locations within the Asia Pacific area (ex: Singapore)." + ], + "description": "The list of regions from which the check will be run. If this field is specified, enough regions to include a minimum of 3 locations must be provided, or an error message is returned. Not specifying this field will result in uptime checks running from all regions.", + "type": "array", + "items": { + "enum": [ + "REGION_UNSPECIFIED", + "USA", + "EUROPE", + "SOUTH_AMERICA", + "ASIA_PACIFIC" + ], + "type": "string" + } + }, + "displayName": { + "description": "A human-friendly name for the uptime check configuration. The display name should be unique within a Stackdriver Account in order to make it easier to identify; however, uniqueness is not enforced. Required.", + "type": "string" + }, + "monitoredResource": { + "description": "The monitored resource associated with the configuration.", + "$ref": "MonitoredResource" + } + } + }, + "ListGroupsResponse": { + "properties": { + "group": { + "description": "The groups that match the specified filters.", + "type": "array", + "items": { + "$ref": "Group" + } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + } + }, + "id": "ListGroupsResponse", + "description": "The ListGroups response.", + "type": "object" + }, + "CreateCollectdTimeSeriesRequest": { + "properties": { + "collectdVersion": { + "description": "The version of collectd that collected the data. Example: \"5.3.0-192.el6\".", + "type": "string" + }, + "resource": { + "$ref": "MonitoredResource", + "description": "The monitored resource associated with the time series." + }, + "collectdPayloads": { + "description": "The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.", + "type": "array", + "items": { + "$ref": "CollectdPayload" + } + } + }, + "id": "CreateCollectdTimeSeriesRequest", + "description": "The CreateCollectdTimeSeries request.", + "type": "object" + }, + "ListGroupMembersResponse": { + "description": "The ListGroupMembers response.", + "type": "object", + "properties": { + "members": { + "description": "A set of monitored resources in the group.", + "type": "array", + "items": { + "$ref": "MonitoredResource" + } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + }, + "totalSize": { + "description": "The total number of elements matching this request.", + "format": "int32", + "type": "integer" + } + }, + "id": "ListGroupMembersResponse" + }, + "ListMonitoredResourceDescriptorsResponse": { + "properties": { + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + }, + "resourceDescriptors": { + "description": "The monitored resource descriptors that are available to this project and that match filter, if present.", + "type": "array", + "items": { + "$ref": "MonitoredResourceDescriptor" + } + } + }, + "id": "ListMonitoredResourceDescriptorsResponse", + "description": "The ListMonitoredResourceDescriptors response.", + "type": "object" + }, + "TimeSeries": { + "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", + "type": "object", + "properties": { + "metadata": { + "$ref": "MonitoredResourceMetadata", + "description": "Output only. The associated monitored resource metadata. When reading a a timeseries, this field will include metadata labels that are explicitly named in the reduction. When creating a timeseries, this field is ignored." + }, + "valueType": { + "type": "string", + "enumDescriptions": [ + "Do not use this default value.", + "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", + "The value is a signed 64-bit integer.", + "The value is a double precision floating point number.", + "The value is a text string. This value type can be used only if the metric kind is GAUGE.", + "The value is a Distribution.", + "The value is money." + ], + "enum": [ + "VALUE_TYPE_UNSPECIFIED", + "BOOL", + "INT64", + "DOUBLE", + "STRING", + "DISTRIBUTION", + "MONEY" + ], + "description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the type of the data in the points field." + }, + "metricKind": { + "enumDescriptions": [ + "Do not use this default value.", + "An instantaneous measurement of a value.", + "The change in a value during a time interval.", + "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." + ], + "enum": [ + "METRIC_KIND_UNSPECIFIED", + "GAUGE", + "DELTA", + "CUMULATIVE" + ], + "description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either GAUGE (the default) or CUMULATIVE.", + "type": "string" + }, + "points": { + "description": "The data points of this time series. When listing time series, the order of the points is specified by the list method.When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.", + "type": "array", + "items": { + "$ref": "Point" + } + }, + "metric": { + "$ref": "Metric", + "description": "The associated metric. A fully-specified metric used to identify the time series." + }, + "resource": { + "description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data.", + "$ref": "MonitoredResource" + } + }, + "id": "TimeSeries" + }, + "CreateTimeSeriesRequest": { + "description": "The CreateTimeSeries request.", + "type": "object", + "properties": { + "timeSeries": { + "type": "array", + "items": { + "$ref": "TimeSeries" + }, + "description": "The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource." + } + }, + "id": "CreateTimeSeriesRequest" + }, + "Distribution": { + "properties": { + "bucketCounts": { + "description": "Required in the Stackdriver Monitoring API v3. The values for each bucket specified in bucket_options. The sum of the values in bucketCounts must equal the value in the count field of the Distribution object. The order of the bucket counts follows the numbering schemes described for the three bucket types. The underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; and the overflow bucket has number N-1. The size of bucket_counts must not be greater than N. If the size is less than N, then the remaining buckets are assigned values of zero.", + "type": "array", + "items": { + "format": "int64", + "type": "string" + } + }, + "bucketOptions": { + "$ref": "BucketOptions", + "description": "Required in the Stackdriver Monitoring API v3. Defines the histogram bucket boundaries." + }, + "sumOfSquaredDeviation": { + "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", + "format": "double", + "type": "number" + }, + "range": { + "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3.", + "$ref": "Range" + }, + "count": { + "description": "The number of values in the population. Must be non-negative. This value must equal the sum of the values in bucket_counts if a histogram is provided.", + "format": "int64", + "type": "string" + }, + "mean": { + "description": "The arithmetic mean of the values in the population. If count is zero then this field must be zero.", + "format": "double", + "type": "number" + } + }, + "id": "Distribution", + "description": "Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.", + "type": "object" + }, + "MonitoredResource": { + "properties": { + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels \"project_id\", \"instance_id\", and \"zone\".", + "type": "object" + }, + "type": { + "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance.", + "type": "string" + } + }, + "id": "MonitoredResource", + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", + "type": "object" + }, + "ListMetricDescriptorsResponse": { + "description": "The ListMetricDescriptors response.", + "type": "object", + "properties": { + "metricDescriptors": { + "description": "The metric descriptors that are available to the project and that match the value of filter, if present.", + "type": "array", + "items": { + "$ref": "MetricDescriptor" + } + }, + "nextPageToken": { + "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", + "type": "string" + } + }, + "id": "ListMetricDescriptorsResponse" + }, + "CollectdPayloadError": { + "type": "object", + "properties": { + "index": { + "description": "The zero-based index in CreateCollectdTimeSeriesRequest.collectd_payloads.", + "format": "int32", + "type": "integer" + }, + "error": { + "$ref": "Status", + "description": "Records the error status for the payload. If this field is present, the partial errors for nested values won't be populated." + }, + "valueErrors": { + "description": "Records the error status for values that were not written due to an error.Failed payloads for which nothing is written will not include partial value errors.", + "type": "array", + "items": { + "$ref": "CollectdValueError" + } + } + }, + "id": "CollectdPayloadError", + "description": "Describes the error status for payloads that were not written." + }, + "MonitoredResourceDescriptor": { + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", + "type": "object", + "properties": { + "labels": { + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", + "type": "array", + "items": { + "$ref": "LabelDescriptor" + } + }, + "name": { + "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", + "type": "string" + }, + "displayName": { + "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", + "type": "string" + }, + "description": { + "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", + "type": "string" + }, + "type": { + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", + "type": "string" + } + }, + "id": "MonitoredResourceDescriptor" + }, + "ResourceGroup": { + "description": "The resource submessage for group checks. It can be used instead of a monitored resource, when multiple resources are being monitored.", + "type": "object", + "properties": { + "resourceType": { + "enumDescriptions": [ + "Default value (not valid).", + "A group of instances from Google Cloud Platform (GCP) or Amazon Web Services (AWS).", + "A group of AWS load balancers." + ], + "enum": [ + "RESOURCE_TYPE_UNSPECIFIED", + "INSTANCE", + "AWS_ELB_LOAD_BALANCER" + ], + "description": "The resource type of the group members.", + "type": "string" + }, + "groupId": { + "type": "string", + "description": "The group of resources being monitored. Should be only the group_id, not projects/\u003cproject_id\u003e/groups/\u003cgroup_id\u003e." + } + }, + "id": "ResourceGroup" + }, + "TypedValue": { + "description": "A single strongly-typed value.", + "type": "object", + "properties": { + "doubleValue": { + "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10\u003csup\u003e±300\u003c/sup\u003e and it has 16 significant digits of precision.", + "format": "double", + "type": "number" + }, + "int64Value": { + "description": "A 64-bit integer. Its range is approximately ±9.2x10\u003csup\u003e18\u003c/sup\u003e.", + "format": "int64", + "type": "string" + }, + "distributionValue": { + "$ref": "Distribution", + "description": "A distribution value." + }, + "boolValue": { + "description": "A Boolean value: true or false.", + "type": "boolean" + }, + "stringValue": { + "description": "A variable-length string value.", + "type": "string" + } + }, + "id": "TypedValue" + }, + "UptimeCheckIp": { + "description": "Contains the region, location, and list of IP addresses where checkers in the location run from.", + "type": "object", + "properties": { + "ipAddress": { + "description": "The IP address from which the uptime check originates. This is a full IP address (not an IP address range). Most IP addresses, as of this publication, are in IPv4 format; however, one should not rely on the IP addresses being in IPv4 format indefinitely and should support interpreting this field in either IPv4 or IPv6 format.", + "type": "string" + }, + "region": { + "enum": [ + "REGION_UNSPECIFIED", + "USA", + "EUROPE", + "SOUTH_AMERICA", + "ASIA_PACIFIC" + ], + "description": "A broad region category in which the IP address is located.", + "type": "string", + "enumDescriptions": [ + "Default value if no region is specified. Will result in uptime checks running from all regions.", + "Allows checks to run from locations within the United States of America.", + "Allows checks to run from locations within the continent of Europe.", + "Allows checks to run from locations within the continent of South America.", + "Allows checks to run from locations within the Asia Pacific area (ex: Singapore)." + ] + }, + "location": { + "description": "A more specific location within the region that typically encodes a particular city/town/metro (and its containing state/province or country) within the broader umbrella region category.", + "type": "string" + } + }, + "id": "UptimeCheckIp" + }, + "CollectdValueError": { + "description": "Describes the error status for values that were not written.", + "type": "object", + "properties": { + "index": { + "description": "The zero-based index in CollectdPayload.values within the parent CreateCollectdTimeSeriesRequest.collectd_payloads.", + "format": "int32", + "type": "integer" + }, + "error": { + "$ref": "Status", + "description": "Records the error status for the value." + } + }, + "id": "CollectdValueError" + }, + "CollectdPayload": { + "description": "A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.", + "type": "object", + "properties": { + "typeInstance": { + "description": "The measurement type instance. Example: \"used\".", + "type": "string" + }, + "metadata": { + "description": "The measurement metadata. Example: \"process_id\" -\u003e 12345", + "type": "object", + "additionalProperties": { + "$ref": "TypedValue" + } + }, + "type": { + "description": "The measurement type. Example: \"memory\".", + "type": "string" + }, + "plugin": { + "description": "The name of the plugin. Example: \"disk\".", + "type": "string" + }, + "pluginInstance": { + "description": "The instance name of the plugin Example: \"hdcl\".", + "type": "string" + }, + "endTime": { + "description": "The end time of the interval.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "The start time of the interval.", + "format": "google-datetime", + "type": "string" + }, + "values": { + "description": "The measured values during this time interval. Each value must have a different dataSourceName.", + "type": "array", + "items": { + "$ref": "CollectdValue" + } + } + }, + "id": "CollectdPayload" + }, + "CreateCollectdTimeSeriesResponse": { + "description": "The CreateCollectdTimeSeries response.", + "type": "object", + "properties": { + "payloadErrors": { + "description": "Records the error status for points that were not written due to an error.Failed requests for which nothing is written will return an error response instead.", + "type": "array", + "items": { + "$ref": "CollectdPayloadError" + } + } + }, + "id": "CreateCollectdTimeSeriesResponse" + }, + "Linear": { + "id": "Linear", + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "type": "object", + "properties": { + "width": { + "description": "Must be greater than 0.", + "format": "double", + "type": "number" + }, + "offset": { + "description": "Lower bound of the first bucket.", + "format": "double", + "type": "number" + }, + "numFiniteBuckets": { + "description": "Must be greater than 0.", + "format": "int32", + "type": "integer" + } + } + }, "Option": { + "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "type": "object", "properties": { "value": { @@ -25,71 +1721,137 @@ "type": "string" } }, - "id": "Option", - "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc." + "id": "Option" + }, + "ContentMatcher": { + "description": "Used to perform string matching. Currently, this matches on the exact content. In the future, it can be expanded to allow for regular expressions and more complex matching.", + "type": "object", + "properties": { + "content": { + "description": "String content to match", + "type": "string" + } + }, + "id": "ContentMatcher" }, "Empty": { - "properties": {}, - "id": "Empty", "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "InternalChecker": { + "description": "Nimbus InternalCheckers.", + "type": "object", + "properties": { + "displayName": { + "description": "The checker's human-readable name.", + "type": "string" + }, + "projectId": { + "description": "The GCP project ID. Not necessarily the same as the project_id for the config.", + "type": "string" + }, + "checkerId": { + "description": "The checker ID.", + "type": "string" + }, + "gcpZone": { + "type": "string", + "description": "The GCP zone the uptime check should egress from. Only respected for internal uptime checks, where internal_network is specified." + }, + "network": { + "description": "The internal network to perform this uptime check on.", + "type": "string" + } + }, + "id": "InternalChecker" + }, + "TcpCheck": { + "description": "Information required for a TCP uptime check request.", + "type": "object", + "properties": { + "port": { + "type": "integer", + "description": "The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) to construct the full URL. Required.", + "format": "int32" + } + }, + "id": "TcpCheck" + }, + "Explicit": { + "properties": { + "bounds": { + "description": "The values must be monotonically increasing.", + "type": "array", + "items": { + "format": "double", + "type": "number" + } + } + }, + "id": "Explicit", + "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", "type": "object" }, "TimeInterval": { "type": "object", "properties": { "endTime": { - "format": "google-datetime", "description": "Required. The end of the time interval.", + "format": "google-datetime", "type": "string" }, "startTime": { - "type": "string", + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", "format": "google-datetime", - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time." + "type": "string" } }, "id": "TimeInterval", "description": "A time interval extending just after a start time through an end time. If the start time is the same as the end time, then the interval represents a single point in time." }, - "Explicit": { - "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", + "ListUptimeCheckIpsResponse": { "type": "object", "properties": { - "bounds": { - "items": { - "format": "double", - "type": "number" - }, + "uptimeCheckIps": { "type": "array", - "description": "The values must be monotonically increasing." + "items": { + "$ref": "UptimeCheckIp" + }, + "description": "The returned list of IP addresses (including region and location) that the checkers run from." + }, + "nextPageToken": { + "description": "This field represents the pagination token to retrieve the next page of results. If the value is empty, it means no further results for the request. To retrieve the next page of results, the value of the next_page_token is passed to the subsequent List method call (in the request message's page_token field). NOTE: this field is not yet implemented", + "type": "string" } }, - "id": "Explicit" + "id": "ListUptimeCheckIpsResponse", + "description": "The protocol for the ListUptimeCheckIps response." }, "Exponential": { "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "type": "object", "properties": { - "growthFactor": { - "format": "double", - "description": "Must be greater than 1.", - "type": "number" - }, "scale": { - "format": "double", "description": "Must be greater than 0.", + "format": "double", "type": "number" }, "numFiniteBuckets": { - "format": "int32", "description": "Must be greater than 0.", + "format": "int32", "type": "integer" + }, + "growthFactor": { + "description": "Must be greater than 1.", + "format": "double", + "type": "number" } }, "id": "Exponential" }, "Point": { - "type": "object", "properties": { "value": { "$ref": "TypedValue", @@ -101,50 +1863,49 @@ } }, "id": "Point", - "description": "A single data point in a time series." + "description": "A single data point in a time series.", + "type": "object" }, "Metric": { "description": "A specific metric, identified by specifying values for all of the labels of a MetricDescriptor.", "type": "object", "properties": { - "type": { - "description": "An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.", - "type": "string" - }, "labels": { "additionalProperties": { "type": "string" }, "description": "The set of label values that uniquely identify this metric. All labels listed in the MetricDescriptor must be assigned values.", "type": "object" + }, + "type": { + "description": "An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.", + "type": "string" } }, "id": "Metric" }, + "ListUptimeCheckConfigsResponse": { + "description": "The protocol for the ListUptimeCheckConfigs response.", + "type": "object", + "properties": { + "uptimeCheckConfigs": { + "description": "The returned uptime check configurations.", + "type": "array", + "items": { + "$ref": "UptimeCheckConfig" + } + }, + "nextPageToken": { + "type": "string", + "description": "This field represents the pagination token to retrieve the next page of results. If the value is empty, it means no further results for the request. To retrieve the next page of results, the value of the next_page_token is passed to the subsequent List method call (in the request message's page_token field)." + } + }, + "id": "ListUptimeCheckConfigsResponse" + }, "Field": { "description": "A single field of a message type.", "type": "object", "properties": { - "packed": { - "description": "Whether to use alternative packed wire representation.", - "type": "boolean" - }, - "cardinality": { - "enumDescriptions": [ - "For fields with unknown cardinality.", - "For optional fields.", - "For required fields. Proto2 syntax only.", - "For repeated fields." - ], - "enum": [ - "CARDINALITY_UNKNOWN", - "CARDINALITY_OPTIONAL", - "CARDINALITY_REQUIRED", - "CARDINALITY_REPEATED" - ], - "description": "The field cardinality.", - "type": "string" - }, "defaultValue": { "description": "The string value of the default value of this field. Proto2 syntax only.", "type": "string" @@ -154,12 +1915,12 @@ "type": "string" }, "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration types. Example: \"type.googleapis.com/google.protobuf.Timestamp\".", - "type": "string" + "type": "string", + "description": "The field type URL, without the scheme, for message or enumeration types. Example: \"type.googleapis.com/google.protobuf.Timestamp\"." }, "number": { - "format": "int32", "description": "The field number.", + "format": "int32", "type": "integer" }, "jsonName": { @@ -167,8 +1928,6 @@ "type": "string" }, "kind": { - "description": "The field type.", - "type": "string", "enumDescriptions": [ "Field type unknown.", "Field type double.", @@ -210,25 +1969,46 @@ "TYPE_SFIXED64", "TYPE_SINT32", "TYPE_SINT64" - ] + ], + "description": "The field type.", + "type": "string" }, "options": { "description": "The protocol buffer options.", + "type": "array", "items": { "$ref": "Option" - }, - "type": "array" + } }, "oneofIndex": { - "format": "int32", "description": "The index of the field type in Type.oneofs, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", + "format": "int32", "type": "integer" + }, + "cardinality": { + "enumDescriptions": [ + "For fields with unknown cardinality.", + "For optional fields.", + "For required fields. Proto2 syntax only.", + "For repeated fields." + ], + "enum": [ + "CARDINALITY_UNKNOWN", + "CARDINALITY_OPTIONAL", + "CARDINALITY_REQUIRED", + "CARDINALITY_REPEATED" + ], + "description": "The field cardinality.", + "type": "string" + }, + "packed": { + "description": "Whether to use alternative packed wire representation.", + "type": "boolean" } }, "id": "Field" }, "LabelDescriptor": { - "id": "LabelDescriptor", "description": "A description of a label.", "type": "object", "properties": { @@ -241,35 +2021,36 @@ "type": "string" }, "valueType": { - "enum": [ - "STRING", - "BOOL", - "INT64" - ], "description": "The type of data that can be assigned to the label.", "type": "string", "enumDescriptions": [ "A variable-length string. This is the default.", "Boolean; true or false.", "A 64-bit signed integer." + ], + "enum": [ + "STRING", + "BOOL", + "INT64" ] } - } + }, + "id": "LabelDescriptor" }, "ListTimeSeriesResponse": { "description": "The ListTimeSeries response.", "type": "object", "properties": { - "timeSeries": { - "description": "One or more time series that match the filter included in the request.", - "items": { - "$ref": "TimeSeries" - }, - "type": "array" - }, "nextPageToken": { "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", "type": "string" + }, + "timeSeries": { + "description": "One or more time series that match the filter included in the request.", + "type": "array", + "items": { + "$ref": "TimeSeries" + } } }, "id": "ListTimeSeriesResponse" @@ -279,17 +2060,17 @@ "type": "object", "properties": { "filter": { - "type": "string", - "description": "The filter used to determine which monitored resources belong to this group." - }, - "parentName": { - "type": "string", - "description": "The name of the group's parent, if it has one. The format is \"projects/{project_id_or_number}/groups/{group_id}\". For groups with no parent, parentName is the empty string, \"\"." + "description": "The filter used to determine which monitored resources belong to this group.", + "type": "string" }, "name": { "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", "type": "string" }, + "parentName": { + "description": "The name of the group's parent, if it has one. The format is \"projects/{project_id_or_number}/groups/{group_id}\". For groups with no parent, parentName is the empty string, \"\".", + "type": "string" + }, "displayName": { "description": "A user-assigned name for this group, used only for display purposes.", "type": "string" @@ -305,35 +2086,12 @@ "description": "A protocol buffer message type.", "type": "object", "properties": { - "syntax": { - "description": "The source syntax.", - "type": "string", - "enumDescriptions": [ - "Syntax proto2.", - "Syntax proto3." - ], - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ] - }, - "sourceContext": { - "$ref": "SourceContext", - "description": "The source context." - }, - "options": { - "description": "The protocol buffer options.", - "items": { - "$ref": "Option" - }, - "type": "array" - }, "fields": { "description": "The list of fields.", + "type": "array", "items": { "$ref": "Field" - }, - "type": "array" + } }, "name": { "description": "The fully qualified message name.", @@ -341,624 +2099,95 @@ }, "oneofs": { "description": "The list of types appearing in oneof definitions in this type.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } + }, + "sourceContext": { + "description": "The source context.", + "$ref": "SourceContext" + }, + "syntax": { + "enumDescriptions": [ + "Syntax proto2.", + "Syntax proto3." + ], + "enum": [ + "SYNTAX_PROTO2", + "SYNTAX_PROTO3" + ], + "description": "The source syntax.", + "type": "string" + }, + "options": { + "description": "The protocol buffer options.", + "type": "array", + "items": { + "$ref": "Option" + } } }, "id": "Type" }, - "BucketOptions": { - "id": "BucketOptions", - "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", + "BasicAuthentication": { + "description": "A type of authentication to perform against the specified resource or URL that uses username and password. Currently, only Basic authentication is supported in Uptime Monitoring.", "type": "object", "properties": { - "explicitBuckets": { - "description": "The explicit buckets.", - "$ref": "Explicit" - }, - "linearBuckets": { - "description": "The linear bucket.", - "$ref": "Linear" - }, - "exponentialBuckets": { - "$ref": "Exponential", - "description": "The exponential buckets." - } - } - }, - "CollectdValue": { - "description": "A single data point from a collectd-based plugin.", - "type": "object", - "properties": { - "dataSourceType": { - "enum": [ - "UNSPECIFIED_DATA_SOURCE_TYPE", - "GAUGE", - "COUNTER", - "DERIVE", - "ABSOLUTE" - ], - "description": "The type of measurement.", - "type": "string", - "enumDescriptions": [ - "An unspecified data source type. This corresponds to google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.", - "An instantaneous measurement of a varying quantity. This corresponds to google.api.MetricDescriptor.MetricKind.GAUGE.", - "A cumulative value over time. This corresponds to google.api.MetricDescriptor.MetricKind.CUMULATIVE.", - "A rate of change of the measurement.", - "An amount of change since the last measurement interval. This corresponds to google.api.MetricDescriptor.MetricKind.DELTA." - ] - }, - "dataSourceName": { - "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", + "password": { + "description": "The password to authenticate.", "type": "string" }, - "value": { - "$ref": "TypedValue", - "description": "The measurement value." + "username": { + "description": "The username to authenticate.", + "type": "string" } }, - "id": "CollectdValue" + "id": "BasicAuthentication" }, - "Status": { - "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc that can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.", + "MonitoredResourceMetadata": { + "description": "Auxiliary metadata for a MonitoredResource object. MonitoredResource objects contain the minimum set of information to uniquely identify a monitored resource instance. There is some other useful auxiliary metadata. Google Stackdriver Monitoring & Logging uses an ingestion pipeline to extract metadata for cloud resources of all types , and stores the metadata in this message.", "type": "object", "properties": { - "code": { - "format": "int32", - "description": "The status code, which should be an enum value of google.rpc.Code.", - "type": "integer" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", - "type": "string" - }, - "details": { - "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", - "items": { - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - } + "systemLabels": { + "additionalProperties": { + "type": "any", + "description": "Properties of the object." }, - "type": "array" - } - }, - "id": "Status" - }, - "SourceContext": { - "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.", - "type": "object", - "properties": { - "fileName": { - "type": "string", - "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: \"google/protobuf/source_context.proto\"." - } - }, - "id": "SourceContext" - }, - "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", - "type": "object", - "properties": { - "metricKind": { - "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." - ], - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], - "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string" + "description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google Stackdriver. Stackdriver determines what system labels are useful and how to obtain their values. Some examples: \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example:\n{ \"name\": \"my-test-instance\",\n \"security_group\": [\"a\", \"b\", \"c\"],\n \"spot_instance\": false }\n", + "type": "object" }, - "description": { - "type": "string", - "description": "A detailed description of the metric, which can be used in documentation." - }, - "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\".", - "type": "string" - }, - "unit": { - "description": "The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10**3)\nM mega (10**6)\nG giga (10**9)\nT tera (10**12)\nP peta (10**15)\nE exa (10**18)\nZ zetta (10**21)\nY yotta (10**24)\nm milli (10**-3)\nu micro (10**-6)\nn nano (10**-9)\np pico (10**-12)\nf femto (10**-15)\na atto (10**-18)\nz zepto (10**-21)\ny yocto (10**-24)\nKi kibi (2**10)\nMi mebi (2**20)\nGi gibi (2**30)\nTi tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, such as 1/s.The grammar also includes these connectors:\n/ division (as an infix operator, e.g. 1/s).\n. multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'.", - "type": "string" - }, - "labels": { - "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", - "items": { - "$ref": "LabelDescriptor" - }, - "type": "array" - }, - "name": { - "description": "The resource name of the metric descriptor. Depending on the implementation, the name typically includes: (1) the parent resource name that defines the scope of the metric type or of its data; and (2) the metric's URL-encoded type, which also appears in the type field of this descriptor. For example, following is the resource name of a custom metric within the GCP project my-project-id:\n\"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"\n", - "type": "string" - }, - "type": { - "type": "string", - "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined custom metric types have the DNS name custom.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n" - }, - "valueType": { - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ], - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ], - "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string" - } - }, - "id": "MetricDescriptor" - }, - "Range": { - "type": "object", - "properties": { - "min": { - "format": "double", - "description": "The minimum of the population values.", - "type": "number" - }, - "max": { - "format": "double", - "description": "The maximum of the population values.", - "type": "number" - } - }, - "id": "Range", - "description": "The range of the population values." - }, - "ListGroupsResponse": { - "description": "The ListGroups response.", - "type": "object", - "properties": { - "group": { - "description": "The groups that match the specified filters.", - "items": { - "$ref": "Group" - }, - "type": "array" - }, - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - } - }, - "id": "ListGroupsResponse" - }, - "CreateCollectdTimeSeriesRequest": { - "description": "The CreateCollectdTimeSeries request.", - "type": "object", - "properties": { - "collectdVersion": { - "description": "The version of collectd that collected the data. Example: \"5.3.0-192.el6\".", - "type": "string" - }, - "collectdPayloads": { - "description": "The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.", - "items": { - "$ref": "CollectdPayload" - }, - "type": "array" - }, - "resource": { - "$ref": "MonitoredResource", - "description": "The monitored resource associated with the time series." - } - }, - "id": "CreateCollectdTimeSeriesRequest" - }, - "ListGroupMembersResponse": { - "id": "ListGroupMembersResponse", - "description": "The ListGroupMembers response.", - "type": "object", - "properties": { - "members": { - "description": "A set of monitored resources in the group.", - "items": { - "$ref": "MonitoredResource" - }, - "type": "array" - }, - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "totalSize": { - "format": "int32", - "description": "The total number of elements matching this request.", - "type": "integer" - } - } - }, - "ListMonitoredResourceDescriptorsResponse": { - "id": "ListMonitoredResourceDescriptorsResponse", - "description": "The ListMonitoredResourceDescriptors response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "resourceDescriptors": { - "description": "The monitored resource descriptors that are available to this project and that match filter, if present.", - "items": { - "$ref": "MonitoredResourceDescriptor" - }, - "type": "array" - } - } - }, - "TimeSeries": { - "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", - "type": "object", - "properties": { - "metricKind": { - "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." - ], - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], - "description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either GAUGE (the default) or CUMULATIVE.", - "type": "string" - }, - "points": { - "description": "The data points of this time series. When listing time series, the order of the points is specified by the list method.When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.", - "items": { - "$ref": "Point" - }, - "type": "array" - }, - "metric": { - "description": "The associated metric. A fully-specified metric used to identify the time series.", - "$ref": "Metric" - }, - "valueType": { - "description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the type of the data in the points field.", - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ], - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ] - }, - "resource": { - "$ref": "MonitoredResource", - "description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data." - } - }, - "id": "TimeSeries" - }, - "CreateTimeSeriesRequest": { - "description": "The CreateTimeSeries request.", - "type": "object", - "properties": { - "timeSeries": { - "description": "The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.", - "items": { - "$ref": "TimeSeries" - }, - "type": "array" - } - }, - "id": "CreateTimeSeriesRequest" - }, - "Distribution": { - "properties": { - "sumOfSquaredDeviation": { - "format": "double", - "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", - "type": "number" - }, - "range": { - "$ref": "Range", - "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3." - }, - "count": { - "format": "int64", - "description": "The number of values in the population. Must be non-negative. This value must equal the sum of the values in bucket_counts if a histogram is provided.", - "type": "string" - }, - "mean": { - "format": "double", - "description": "The arithmetic mean of the values in the population. If count is zero then this field must be zero.", - "type": "number" - }, - "bucketCounts": { - "description": "Required in the Stackdriver Monitoring API v3. The values for each bucket specified in bucket_options. The sum of the values in bucketCounts must equal the value in the count field of the Distribution object. The order of the bucket counts follows the numbering schemes described for the three bucket types. The underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; and the overflow bucket has number N-1. The size of bucket_counts must not be greater than N. If the size is less than N, then the remaining buckets are assigned values of zero.", - "items": { - "format": "int64", - "type": "string" - }, - "type": "array" - }, - "bucketOptions": { - "$ref": "BucketOptions", - "description": "Required in the Stackdriver Monitoring API v3. Defines the histogram bucket boundaries." - } - }, - "id": "Distribution", - "description": "Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.", - "type": "object" - }, - "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object", - "properties": { - "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance.", - "type": "string" - }, - "labels": { + "userLabels": { "additionalProperties": { "type": "string" }, - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels \"project_id\", \"instance_id\", and \"zone\".", + "description": "Output only. A map of user-defined metadata labels.", "type": "object" } }, - "id": "MonitoredResource" - }, - "ListMetricDescriptorsResponse": { - "description": "The ListMetricDescriptors response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "metricDescriptors": { - "description": "The metric descriptors that are available to the project and that match the value of filter, if present.", - "items": { - "$ref": "MetricDescriptor" - }, - "type": "array" - } - }, - "id": "ListMetricDescriptorsResponse" - }, - "CollectdPayloadError": { - "description": "Describes the error status for payloads that were not written.", - "type": "object", - "properties": { - "valueErrors": { - "description": "Records the error status for values that were not written due to an error.Failed payloads for which nothing is written will not include partial value errors.", - "items": { - "$ref": "CollectdValueError" - }, - "type": "array" - }, - "error": { - "$ref": "Status", - "description": "Records the error status for the payload. If this field is present, the partial errors for nested values won't be populated." - }, - "index": { - "type": "integer", - "format": "int32", - "description": "The zero-based index in CreateCollectdTimeSeriesRequest.collectd_payloads." - } - }, - "id": "CollectdPayloadError" - }, - "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", - "type": "object", - "properties": { - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", - "items": { - "$ref": "LabelDescriptor" - }, - "type": "array" - }, - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", - "type": "string" - }, - "description": { - "type": "string", - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation." - }, - "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", - "type": "string" - }, - "type": { - "type": "string", - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters." - } - }, - "id": "MonitoredResourceDescriptor" - }, - "TypedValue": { - "description": "A single strongly-typed value.", - "type": "object", - "properties": { - "distributionValue": { - "$ref": "Distribution", - "description": "A distribution value." - }, - "stringValue": { - "description": "A variable-length string value.", - "type": "string" - }, - "boolValue": { - "description": "A Boolean value: true or false.", - "type": "boolean" - }, - "doubleValue": { - "format": "double", - "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10\u003csup\u003e±300\u003c/sup\u003e and it has 16 significant digits of precision.", - "type": "number" - }, - "int64Value": { - "format": "int64", - "description": "A 64-bit integer. Its range is approximately ±9.2x10\u003csup\u003e18\u003c/sup\u003e.", - "type": "string" - } - }, - "id": "TypedValue" - }, - "CollectdValueError": { - "description": "Describes the error status for values that were not written.", - "type": "object", - "properties": { - "index": { - "format": "int32", - "description": "The zero-based index in CollectdPayload.values within the parent CreateCollectdTimeSeriesRequest.collectd_payloads.", - "type": "integer" - }, - "error": { - "description": "Records the error status for the value.", - "$ref": "Status" - } - }, - "id": "CollectdValueError" - }, - "CollectdPayload": { - "description": "A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.", - "type": "object", - "properties": { - "endTime": { - "type": "string", - "format": "google-datetime", - "description": "The end time of the interval." - }, - "startTime": { - "format": "google-datetime", - "description": "The start time of the interval.", - "type": "string" - }, - "values": { - "description": "The measured values during this time interval. Each value must have a different dataSourceName.", - "items": { - "$ref": "CollectdValue" - }, - "type": "array" - }, - "typeInstance": { - "description": "The measurement type instance. Example: \"used\".", - "type": "string" - }, - "metadata": { - "description": "The measurement metadata. Example: \"process_id\" -\u003e 12345", - "type": "object", - "additionalProperties": { - "$ref": "TypedValue" - } - }, - "type": { - "description": "The measurement type. Example: \"memory\".", - "type": "string" - }, - "plugin": { - "description": "The name of the plugin. Example: \"disk\".", - "type": "string" - }, - "pluginInstance": { - "description": "The instance name of the plugin Example: \"hdcl\".", - "type": "string" - } - }, - "id": "CollectdPayload" - }, - "CreateCollectdTimeSeriesResponse": { - "id": "CreateCollectdTimeSeriesResponse", - "description": "The CreateCollectdTimeSeries response.", - "type": "object", - "properties": { - "payloadErrors": { - "description": "Records the error status for points that were not written due to an error.Failed requests for which nothing is written will return an error response instead.", - "items": { - "$ref": "CollectdPayloadError" - }, - "type": "array" - } - } - }, - "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", - "type": "object", - "properties": { - "numFiniteBuckets": { - "type": "integer", - "format": "int32", - "description": "Must be greater than 0." - }, - "width": { - "format": "double", - "description": "Must be greater than 0.", - "type": "number" - }, - "offset": { - "format": "double", - "description": "Lower bound of the first bucket.", - "type": "number" - } - }, - "id": "Linear" + "id": "MonitoredResourceMetadata" } }, "protocol": "rest", "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" }, "canonicalName": "Monitoring", "auth": { "oauth2": { "scopes": { + "https://www.googleapis.com/auth/monitoring": { + "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" + }, "https://www.googleapis.com/auth/monitoring.write": { "description": "Publish metric data to your Google Cloud projects" }, - "https://www.googleapis.com/auth/monitoring.read": { - "description": "View monitoring data for all of your Google Cloud and third-party projects" - }, "https://www.googleapis.com/auth/cloud-platform": { "description": "View and manage your data across Google Cloud Platform services" }, - "https://www.googleapis.com/auth/monitoring": { - "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" + "https://www.googleapis.com/auth/monitoring.read": { + "description": "View monitoring data for all of your Google Cloud and third-party projects" } } } @@ -968,742 +2197,5 @@ "name": "monitoring", "batchPath": "batch", "fullyEncodeReservedExpansion": true, - "title": "Stackdriver Monitoring API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "collectdTimeSeries": { - "methods": { - "create": { - "request": { - "$ref": "CreateCollectdTimeSeriesRequest" - }, - "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", - "response": { - "$ref": "CreateCollectdTimeSeriesResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\"." - } - }, - "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", - "id": "monitoring.projects.collectdTimeSeries.create", - "path": "v3/{+name}/collectdTimeSeries" - } - } - }, - "timeSeries": { - "methods": { - "create": { - "id": "monitoring.projects.timeSeries.create", - "path": "v3/{+name}/timeSeries", - "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", - "request": { - "$ref": "CreateTimeSeriesRequest" - }, - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/timeSeries" - }, - "list": { - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "response": { - "$ref": "ListTimeSeriesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "filter": { - "location": "query", - "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", - "type": "string" - }, - "aggregation.perSeriesAligner": { - "location": "query", - "enum": [ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05" - ], - "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string" - }, - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "interval.startTime": { - "location": "query", - "format": "google-datetime", - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "type": "string" - }, - "view": { - "description": "Specifies which information is returned about the time series.", - "type": "string", - "location": "query", - "enum": [ - "FULL", - "HEADERS" - ] - }, - "aggregation.groupByFields": { - "repeated": true, - "location": "query", - "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - "type": "string" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true - }, - "interval.endTime": { - "format": "google-datetime", - "description": "Required. The end of the time interval.", - "type": "string", - "location": "query" - }, - "aggregation.alignmentPeriod": { - "format": "google-duration", - "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", - "type": "integer" - }, - "orderBy": { - "type": "string", - "location": "query", - "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank." - }, - "aggregation.crossSeriesReducer": { - "type": "string", - "location": "query", - "enum": [ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05" - ], - "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned." - } - }, - "flatPath": "v3/projects/{projectsId}/timeSeries", - "id": "monitoring.projects.timeSeries.list", - "path": "v3/{+name}/timeSeries", - "description": "Lists time series that match a filter. This method does not require a Stackdriver account." - } - } - }, - "metricDescriptors": { - "methods": { - "get": { - "response": { - "$ref": "MetricDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "name": { - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path", - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\"." - } - }, - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - "id": "monitoring.projects.metricDescriptors.get", - "path": "v3/{+name}", - "description": "Gets a single metric descriptor. This method does not require a Stackdriver account." - }, - "list": { - "response": { - "$ref": "ListMetricDescriptorsResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "A positive number that is the maximum number of results to return.", - "type": "integer" - }, - "filter": { - "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", - "type": "string", - "location": "query" - } - }, - "flatPath": "v3/projects/{projectsId}/metricDescriptors", - "id": "monitoring.projects.metricDescriptors.list", - "path": "v3/{+name}/metricDescriptors", - "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account." - }, - "create": { - "id": "monitoring.projects.metricDescriptors.create", - "path": "v3/{+name}/metricDescriptors", - "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.", - "request": { - "$ref": "MetricDescriptor" - }, - "response": { - "$ref": "MetricDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors" - }, - "delete": { - "path": "v3/{+name}", - "id": "monitoring.projects.metricDescriptors.delete", - "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.", - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path", - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", - "type": "string", - "required": true - } - }, - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}" - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "get": { - "id": "monitoring.projects.monitoredResourceDescriptors.get", - "path": "v3/{+name}", - "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account.", - "response": { - "$ref": "MonitoredResourceDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}" - }, - "list": { - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "name": { - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "A positive number that is the maximum number of results to return.", - "type": "integer" - }, - "filter": { - "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", - "type": "string", - "location": "query" - } - }, - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", - "id": "monitoring.projects.monitoredResourceDescriptors.list", - "path": "v3/{+name}/monitoredResourceDescriptors", - "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account." - } - } - }, - "groups": { - "methods": { - "create": { - "description": "Creates a new group.", - "request": { - "$ref": "Group" - }, - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Group" - }, - "parameters": { - "validateOnly": { - "description": "If true, validate this request but do not create the group.", - "type": "boolean", - "location": "query" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups", - "path": "v3/{+name}/groups", - "id": "monitoring.projects.groups.create" - }, - "delete": { - "description": "Deletes an existing group.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "parameters": { - "name": { - "location": "path", - "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/groups/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "id": "monitoring.projects.groups.delete", - "path": "v3/{+name}" - }, - "get": { - "description": "Gets a single group.", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Group" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path", - "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "type": "string", - "required": true - } - }, - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.groups.get" - }, - "list": { - "description": "Lists the existing groups.", - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListGroupsResponse" - }, - "parameters": { - "name": { - "location": "path", - "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$" - }, - "childrenOfGroup": { - "location": "query", - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", - "type": "string" - }, - "descendantsOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.", - "type": "string", - "location": "query" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "A positive number that is the maximum number of results to return.", - "type": "integer" - }, - "ancestorsOfGroup": { - "location": "query", - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/groups", - "path": "v3/{+name}/groups", - "id": "monitoring.projects.groups.list" - }, - "update": { - "httpMethod": "PUT", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Group" - }, - "parameters": { - "validateOnly": { - "location": "query", - "description": "If true, validate this request but do not update the existing group.", - "type": "boolean" - }, - "name": { - "location": "path", - "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/groups/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.groups.update", - "description": "Updates an existing group. You can change any group attributes except name.", - "request": { - "$ref": "Group" - } - } - }, - "resources": { - "members": { - "methods": { - "list": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ListGroupMembersResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "interval.endTime": { - "format": "google-datetime", - "description": "Required. The end of the time interval.", - "type": "string", - "location": "query" - }, - "filter": { - "location": "query", - "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n", - "type": "string" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "interval.startTime": { - "type": "string", - "location": "query", - "format": "google-datetime", - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time." - }, - "pageSize": { - "format": "int32", - "description": "A positive number that is the maximum number of results to return.", - "type": "integer", - "location": "query" - }, - "name": { - "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - } - }, - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", - "path": "v3/{+name}/members", - "id": "monitoring.projects.groups.members.list", - "description": "Lists the monitored resources that are members of a group." - } - } - } - } - } - } - } - }, - "parameters": { - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "default": "true", - "type": "boolean", - "location": "query", - "description": "Returns response with indentations and line breaks." - }, - "fields": { - "type": "string", - "location": "query", - "description": "Selector specifying which fields to include in a partial response." - }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "$.xgafv": { - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string" - }, - "alt": { - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json", - "enum": [ - "json", - "media", - "proto" - ], - "type": "string" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "type": "string", - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters." - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - } - }, - "version": "v3", - "baseUrl": "https://monitoring.googleapis.com/" + "title": "Stackdriver Monitoring API" } diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go index 27ee9ebee3d..459e8d1531b 100644 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go @@ -68,6 +68,7 @@ func New(client *http.Client) (*Service, error) { } s := &Service{client: client, BasePath: basePath} s.Projects = NewProjectsService(s) + s.UptimeCheckIps = NewUptimeCheckIpsService(s) return s, nil } @@ -77,6 +78,8 @@ type Service struct { UserAgent string // optional additional User-Agent fragment Projects *ProjectsService + + UptimeCheckIps *UptimeCheckIpsService } func (s *Service) userAgent() string { @@ -93,6 +96,7 @@ func NewProjectsService(s *Service) *ProjectsService { rs.MetricDescriptors = NewProjectsMetricDescriptorsService(s) rs.MonitoredResourceDescriptors = NewProjectsMonitoredResourceDescriptorsService(s) rs.TimeSeries = NewProjectsTimeSeriesService(s) + rs.UptimeCheckConfigs = NewProjectsUptimeCheckConfigsService(s) return rs } @@ -108,6 +112,8 @@ type ProjectsService struct { MonitoredResourceDescriptors *ProjectsMonitoredResourceDescriptorsService TimeSeries *ProjectsTimeSeriesService + + UptimeCheckConfigs *ProjectsUptimeCheckConfigsService } func NewProjectsCollectdTimeSeriesService(s *Service) *ProjectsCollectdTimeSeriesService { @@ -167,6 +173,57 @@ type ProjectsTimeSeriesService struct { s *Service } +func NewProjectsUptimeCheckConfigsService(s *Service) *ProjectsUptimeCheckConfigsService { + rs := &ProjectsUptimeCheckConfigsService{s: s} + return rs +} + +type ProjectsUptimeCheckConfigsService struct { + s *Service +} + +func NewUptimeCheckIpsService(s *Service) *UptimeCheckIpsService { + rs := &UptimeCheckIpsService{s: s} + return rs +} + +type UptimeCheckIpsService struct { + s *Service +} + +// BasicAuthentication: A type of authentication to perform against the +// specified resource or URL that uses username and password. Currently, +// only Basic authentication is supported in Uptime Monitoring. +type BasicAuthentication struct { + // Password: The password to authenticate. + Password string `json:"password,omitempty"` + + // Username: The username to authenticate. + Username string `json:"username,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Password") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Password") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BasicAuthentication) MarshalJSON() ([]byte, error) { + type NoMethod BasicAuthentication + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketOptions: BucketOptions describes the bucket boundaries used to // create a histogram for the distribution. The buckets can be in a // linear sequence, an exponential sequence, or each bucket can be @@ -211,8 +268,8 @@ type BucketOptions struct { } func (s *BucketOptions) MarshalJSON() ([]byte, error) { - type noMethod BucketOptions - raw := noMethod(*s) + type NoMethod BucketOptions + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -263,8 +320,8 @@ type CollectdPayload struct { } func (s *CollectdPayload) MarshalJSON() ([]byte, error) { - type noMethod CollectdPayload - raw := noMethod(*s) + type NoMethod CollectdPayload + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -302,8 +359,8 @@ type CollectdPayloadError struct { } func (s *CollectdPayloadError) MarshalJSON() ([]byte, error) { - type noMethod CollectdPayloadError - raw := noMethod(*s) + type NoMethod CollectdPayloadError + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -351,8 +408,8 @@ type CollectdValue struct { } func (s *CollectdValue) MarshalJSON() ([]byte, error) { - type noMethod CollectdValue - raw := noMethod(*s) + type NoMethod CollectdValue + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -384,8 +441,38 @@ type CollectdValueError struct { } func (s *CollectdValueError) MarshalJSON() ([]byte, error) { - type noMethod CollectdValueError - raw := noMethod(*s) + type NoMethod CollectdValueError + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ContentMatcher: Used to perform string matching. Currently, this +// matches on the exact content. In the future, it can be expanded to +// allow for regular expressions and more complex matching. +type ContentMatcher struct { + // Content: String content to match + Content string `json:"content,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Content") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Content") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ContentMatcher) MarshalJSON() ([]byte, error) { + type NoMethod ContentMatcher + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -424,8 +511,8 @@ type CreateCollectdTimeSeriesRequest struct { } func (s *CreateCollectdTimeSeriesRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateCollectdTimeSeriesRequest - raw := noMethod(*s) + type NoMethod CreateCollectdTimeSeriesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -459,8 +546,8 @@ type CreateCollectdTimeSeriesResponse struct { } func (s *CreateCollectdTimeSeriesResponse) MarshalJSON() ([]byte, error) { - type noMethod CreateCollectdTimeSeriesResponse - raw := noMethod(*s) + type NoMethod CreateCollectdTimeSeriesResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -491,8 +578,8 @@ type CreateTimeSeriesRequest struct { } func (s *CreateTimeSeriesRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateTimeSeriesRequest - raw := noMethod(*s) + type NoMethod CreateTimeSeriesRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -565,19 +652,19 @@ type Distribution struct { } func (s *Distribution) MarshalJSON() ([]byte, error) { - type noMethod Distribution - raw := noMethod(*s) + type NoMethod Distribution + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Distribution) UnmarshalJSON(data []byte) error { - type noMethod Distribution + type NoMethod Distribution var s1 struct { Mean gensupport.JSONFloat64 `json:"mean"` SumOfSquaredDeviation gensupport.JSONFloat64 `json:"sumOfSquaredDeviation"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -630,8 +717,8 @@ type Explicit struct { } func (s *Explicit) MarshalJSON() ([]byte, error) { - type noMethod Explicit - raw := noMethod(*s) + type NoMethod Explicit + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -670,19 +757,19 @@ type Exponential struct { } func (s *Exponential) MarshalJSON() ([]byte, error) { - type noMethod Exponential - raw := noMethod(*s) + type NoMethod Exponential + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Exponential) UnmarshalJSON(data []byte) error { - type noMethod Exponential + type NoMethod Exponential var s1 struct { GrowthFactor gensupport.JSONFloat64 `json:"growthFactor"` Scale gensupport.JSONFloat64 `json:"scale"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -774,8 +861,8 @@ type Field struct { } func (s *Field) MarshalJSON() ([]byte, error) { - type noMethod Field - raw := noMethod(*s) + type NoMethod Field + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -850,8 +937,112 @@ type Group struct { } func (s *Group) MarshalJSON() ([]byte, error) { - type noMethod Group - raw := noMethod(*s) + type NoMethod Group + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpCheck: Information involved in an HTTP/HTTPS uptime check +// request. +type HttpCheck struct { + // AuthInfo: The authentication information. Optional when creating an + // HTTP check; defaults to empty. + AuthInfo *BasicAuthentication `json:"authInfo,omitempty"` + + // Headers: The list of headers to send as part of the uptime check + // request. If two headers have the same key and different values, they + // should be entered as a single header, with the value being a + // comma-separated list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering + // two separate headers with the same key in a Create call will cause + // the first to be overwritten by the second. + Headers map[string]string `json:"headers,omitempty"` + + // MaskHeaders: Boolean specifiying whether to encrypt the header + // information. Encryption should be specified for any headers related + // to authentication that you do not wish to be seen when retrieving the + // configuration. The server will be responsible for encrypting the + // headers. On Get/List calls, if mask_headers is set to True then the + // headers will be obscured with ******. + MaskHeaders bool `json:"maskHeaders,omitempty"` + + // Path: The path to the page to run the check against. Will be combined + // with the host (specified within the MonitoredResource) and port to + // construct the full URL. Optional (defaults to "/"). + Path string `json:"path,omitempty"` + + // Port: The port to the page to run the check against. Will be combined + // with host (specified within the MonitoredResource) and path to + // construct the full URL. Optional (defaults to 80 without SSL, or 443 + // with SSL). + Port int64 `json:"port,omitempty"` + + // UseSsl: If true, use HTTPS instead of HTTP to run the check. + UseSsl bool `json:"useSsl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuthInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuthInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpCheck) MarshalJSON() ([]byte, error) { + type NoMethod HttpCheck + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InternalChecker: Nimbus InternalCheckers. +type InternalChecker struct { + // CheckerId: The checker ID. + CheckerId string `json:"checkerId,omitempty"` + + // DisplayName: The checker's human-readable name. + DisplayName string `json:"displayName,omitempty"` + + // GcpZone: The GCP zone the uptime check should egress from. Only + // respected for internal uptime checks, where internal_network is + // specified. + GcpZone string `json:"gcpZone,omitempty"` + + // Network: The internal network to perform this uptime check on. + Network string `json:"network,omitempty"` + + // ProjectId: The GCP project ID. Not necessarily the same as the + // project_id for the config. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CheckerId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckerId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InternalChecker) MarshalJSON() ([]byte, error) { + type NoMethod InternalChecker + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -889,8 +1080,8 @@ type LabelDescriptor struct { } func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { - type noMethod LabelDescriptor - raw := noMethod(*s) + type NoMethod LabelDescriptor + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -929,19 +1120,19 @@ type Linear struct { } func (s *Linear) MarshalJSON() ([]byte, error) { - type noMethod Linear - raw := noMethod(*s) + type NoMethod Linear + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Linear) UnmarshalJSON(data []byte) error { - type noMethod Linear + type NoMethod Linear var s1 struct { Offset gensupport.JSONFloat64 `json:"offset"` Width gensupport.JSONFloat64 `json:"width"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -985,8 +1176,8 @@ type ListGroupMembersResponse struct { } func (s *ListGroupMembersResponse) MarshalJSON() ([]byte, error) { - type noMethod ListGroupMembersResponse - raw := noMethod(*s) + type NoMethod ListGroupMembersResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1022,8 +1213,8 @@ type ListGroupsResponse struct { } func (s *ListGroupsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListGroupsResponse - raw := noMethod(*s) + type NoMethod ListGroupsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1061,8 +1252,8 @@ type ListMetricDescriptorsResponse struct { } func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMetricDescriptorsResponse - raw := noMethod(*s) + type NoMethod ListMetricDescriptorsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1100,8 +1291,8 @@ type ListMonitoredResourceDescriptorsResponse struct { } func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMonitoredResourceDescriptorsResponse - raw := noMethod(*s) + type NoMethod ListMonitoredResourceDescriptorsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1138,8 +1329,90 @@ type ListTimeSeriesResponse struct { } func (s *ListTimeSeriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTimeSeriesResponse - raw := noMethod(*s) + type NoMethod ListTimeSeriesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListUptimeCheckConfigsResponse: The protocol for the +// ListUptimeCheckConfigs response. +type ListUptimeCheckConfigsResponse struct { + // NextPageToken: This field represents the pagination token to retrieve + // the next page of results. If the value is empty, it means no further + // results for the request. To retrieve the next page of results, the + // value of the next_page_token is passed to the subsequent List method + // call (in the request message's page_token field). + NextPageToken string `json:"nextPageToken,omitempty"` + + // UptimeCheckConfigs: The returned uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `json:"uptimeCheckConfigs,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListUptimeCheckConfigsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListUptimeCheckConfigsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListUptimeCheckIpsResponse: The protocol for the ListUptimeCheckIps +// response. +type ListUptimeCheckIpsResponse struct { + // NextPageToken: This field represents the pagination token to retrieve + // the next page of results. If the value is empty, it means no further + // results for the request. To retrieve the next page of results, the + // value of the next_page_token is passed to the subsequent List method + // call (in the request message's page_token field). NOTE: this field is + // not yet implemented + NextPageToken string `json:"nextPageToken,omitempty"` + + // UptimeCheckIps: The returned list of IP addresses (including region + // and location) that the checkers run from. + UptimeCheckIps []*UptimeCheckIp `json:"uptimeCheckIps,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListUptimeCheckIpsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListUptimeCheckIpsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1172,8 +1445,8 @@ type Metric struct { } func (s *Metric) MarshalJSON() ([]byte, error) { - type noMethod Metric - raw := noMethod(*s) + type NoMethod Metric + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1187,7 +1460,9 @@ type MetricDescriptor struct { // DisplayName: A concise name for the metric, which can be displayed in // user interfaces. Use sentence case without an ending period, for - // example "Request count". + // example "Request count". This field is optional but it is recommended + // to be set for any metrics associated with user-visible concepts, such + // as Quota. DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that can be used to describe a specific @@ -1212,16 +1487,7 @@ type MetricDescriptor struct { // zero and sets a new start time for the following points. MetricKind string `json:"metricKind,omitempty"` - // Name: The resource name of the metric descriptor. Depending on the - // implementation, the name typically includes: (1) the parent resource - // name that defines the scope of the metric type or of its data; and - // (2) the metric's URL-encoded type, which also appears in the type - // field of this descriptor. For example, following is the resource name - // of a custom metric within the GCP project - // my-project-id: - // "projects/my-project-id/metricDescriptors/custom.google - // apis.com%2Finvoice%2Fpaid%2Famount" - // + // Name: The resource name of the metric descriptor. Name string `json:"name,omitempty"` // Type: The metric type, including its DNS name prefix. The type is not @@ -1235,55 +1501,12 @@ type MetricDescriptor struct { // Type string `json:"type,omitempty"` - // Unit: The unit in which the metric value is reported. It is only - // applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The - // supported units are a subset of The Unified Code for Units of Measure - // (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT) - // bit bit - // By byte - // s second - // min minute - // h hour - // d dayPrefixes (PREFIX) - // k kilo (10**3) - // M mega (10**6) - // G giga (10**9) - // T tera (10**12) - // P peta (10**15) - // E exa (10**18) - // Z zetta (10**21) - // Y yotta (10**24) - // m milli (10**-3) - // u micro (10**-6) - // n nano (10**-9) - // p pico (10**-12) - // f femto (10**-15) - // a atto (10**-18) - // z zepto (10**-21) - // y yocto (10**-24) - // Ki kibi (2**10) - // Mi mebi (2**20) - // Gi gibi (2**30) - // Ti tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, - // such as 1/s.The grammar also includes these connectors: - // / division (as an infix operator, e.g. 1/s). - // . multiplication (as an infix operator, e.g. GBy.d)The grammar for a - // unit is as follows: - // Expression = Component { "." Component } { "/" Component } - // ; - // - // Component = [ PREFIX ] UNIT [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // Notes: - // Annotation is just a comment if it follows a UNIT and is equivalent - // to 1 if it is used alone. For examples, {requests}/s == 1/s, - // By{transmitted}/s == By/s. - // NAME is a sequence of non-blank printable ASCII characters not - // containing '{' or '}'. + // Unit: Optional. The unit in which the metric value is reported. For + // example, kBy/s means kilobytes/sec, and 1 is the dimensionless unit. + // The supported units are a subset of The Unified Code for Units of + // Measure standard (http://unitsofmeasure.org/ucum.html).

This + // field is part of the metric's documentation, but it is ignored by + // Stackdriver. Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point @@ -1324,8 +1547,8 @@ type MetricDescriptor struct { } func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptor - raw := noMethod(*s) + type NoMethod MetricDescriptor + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1372,8 +1595,8 @@ type MonitoredResource struct { } func (s *MonitoredResource) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResource - raw := noMethod(*s) + type NoMethod MonitoredResource + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1438,8 +1661,54 @@ type MonitoredResourceDescriptor struct { } func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResourceDescriptor - raw := noMethod(*s) + type NoMethod MonitoredResourceDescriptor + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MonitoredResourceMetadata: Auxiliary metadata for a MonitoredResource +// object. MonitoredResource objects contain the minimum set of +// information to uniquely identify a monitored resource instance. There +// is some other useful auxiliary metadata. Google Stackdriver +// Monitoring & Logging uses an ingestion pipeline to extract metadata +// for cloud resources of all types , and stores the metadata in this +// message. +type MonitoredResourceMetadata struct { + // SystemLabels: Output only. Values for predefined system metadata + // labels. System labels are a kind of metadata extracted by Google + // Stackdriver. Stackdriver determines what system labels are useful and + // how to obtain their values. Some examples: "machine_image", "vpc", + // "subnet_id", "security_group", "name", etc. System label values can + // be only strings, Boolean values, or a list of strings. For example: + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + // + SystemLabels googleapi.RawMessage `json:"systemLabels,omitempty"` + + // UserLabels: Output only. A map of user-defined metadata labels. + UserLabels map[string]string `json:"userLabels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SystemLabels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SystemLabels") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MonitoredResourceMetadata) MarshalJSON() ([]byte, error) { + type NoMethod MonitoredResourceMetadata + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1477,8 +1746,8 @@ type Option struct { } func (s *Option) MarshalJSON() ([]byte, error) { - type noMethod Option - raw := noMethod(*s) + type NoMethod Option + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1516,8 +1785,8 @@ type Point struct { } func (s *Point) MarshalJSON() ([]byte, error) { - type noMethod Point - raw := noMethod(*s) + type NoMethod Point + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1547,19 +1816,19 @@ type Range struct { } func (s *Range) MarshalJSON() ([]byte, error) { - type noMethod Range - raw := noMethod(*s) + type NoMethod Range + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *Range) UnmarshalJSON(data []byte) error { - type noMethod Range + type NoMethod Range var s1 struct { Max gensupport.JSONFloat64 `json:"max"` Min gensupport.JSONFloat64 `json:"min"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -1568,6 +1837,46 @@ func (s *Range) UnmarshalJSON(data []byte) error { return nil } +// ResourceGroup: The resource submessage for group checks. It can be +// used instead of a monitored resource, when multiple resources are +// being monitored. +type ResourceGroup struct { + // GroupId: The group of resources being monitored. Should be only the + // group_id, not projects//groups/. + GroupId string `json:"groupId,omitempty"` + + // ResourceType: The resource type of the group members. + // + // Possible values: + // "RESOURCE_TYPE_UNSPECIFIED" - Default value (not valid). + // "INSTANCE" - A group of instances from Google Cloud Platform (GCP) + // or Amazon Web Services (AWS). + // "AWS_ELB_LOAD_BALANCER" - A group of AWS load balancers. + ResourceType string `json:"resourceType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GroupId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GroupId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourceGroup) MarshalJSON() ([]byte, error) { + type NoMethod ResourceGroup + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SourceContext: SourceContext represents information about the source // of a protobuf element, like the file in which it is defined. type SourceContext struct { @@ -1594,8 +1903,8 @@ type SourceContext struct { } func (s *SourceContext) MarshalJSON() ([]byte, error) { - type noMethod SourceContext - raw := noMethod(*s) + type NoMethod SourceContext + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1670,8 +1979,38 @@ type Status struct { } func (s *Status) MarshalJSON() ([]byte, error) { - type noMethod Status - raw := noMethod(*s) + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TcpCheck: Information required for a TCP uptime check request. +type TcpCheck struct { + // Port: The port to the page to run the check against. Will be combined + // with host (specified within the MonitoredResource) to construct the + // full URL. Required. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TcpCheck) MarshalJSON() ([]byte, error) { + type NoMethod TcpCheck + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1705,8 +2044,8 @@ type TimeInterval struct { } func (s *TimeInterval) MarshalJSON() ([]byte, error) { - type noMethod TimeInterval - raw := noMethod(*s) + type NoMethod TimeInterval + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1716,6 +2055,12 @@ func (s *TimeInterval) MarshalJSON() ([]byte, error) { // fully-specified metric. This type is used for both listing and // creating time series. type TimeSeries struct { + // Metadata: Output only. The associated monitored resource metadata. + // When reading a a timeseries, this field will include metadata labels + // that are explicitly named in the reduction. When creating a + // timeseries, this field is ignored. + Metadata *MonitoredResourceMetadata `json:"metadata,omitempty"` + // Metric: The associated metric. A fully-specified metric used to // identify the time series. Metric *Metric `json:"metric,omitempty"` @@ -1771,7 +2116,7 @@ type TimeSeries struct { // "MONEY" - The value is money. ValueType string `json:"valueType,omitempty"` - // ForceSendFields is a list of field names (e.g. "Metric") to + // ForceSendFields is a list of field names (e.g. "Metadata") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1779,8 +2124,8 @@ type TimeSeries struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Metric") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Metadata") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -1789,8 +2134,8 @@ type TimeSeries struct { } func (s *TimeSeries) MarshalJSON() ([]byte, error) { - type noMethod TimeSeries - raw := noMethod(*s) + type NoMethod TimeSeries + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1837,8 +2182,8 @@ type Type struct { } func (s *Type) MarshalJSON() ([]byte, error) { - type noMethod Type - raw := noMethod(*s) + type NoMethod Type + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1880,18 +2225,18 @@ type TypedValue struct { } func (s *TypedValue) MarshalJSON() ([]byte, error) { - type noMethod TypedValue - raw := noMethod(*s) + type NoMethod TypedValue + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *TypedValue) UnmarshalJSON(data []byte) error { - type noMethod TypedValue + type NoMethod TypedValue var s1 struct { DoubleValue *gensupport.JSONFloat64 `json:"doubleValue"` - *noMethod + *NoMethod } - s1.noMethod = (*noMethod)(s) + s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } @@ -1901,6 +2246,163 @@ func (s *TypedValue) UnmarshalJSON(data []byte) error { return nil } +// UptimeCheckConfig: This message configures which resources and +// services to monitor for availability. +type UptimeCheckConfig struct { + // ContentMatchers: The expected content on the page the check is run + // against. Currently, only the first entry in the list is supported, + // and other entries will be ignored. The server will look for an exact + // match of the string in the page response's content. This field is + // optional and should only be specified if a content match is required. + ContentMatchers []*ContentMatcher `json:"contentMatchers,omitempty"` + + // DisplayName: A human-friendly name for the uptime check + // configuration. The display name should be unique within a Stackdriver + // Account in order to make it easier to identify; however, uniqueness + // is not enforced. Required. + DisplayName string `json:"displayName,omitempty"` + + // HttpCheck: Contains information needed to make an HTTP or HTTPS + // check. + HttpCheck *HttpCheck `json:"httpCheck,omitempty"` + + // InternalCheckers: The internal checkers that this check will egress + // from. If is_internal is true and this list is empty, the check will + // egress from all InternalCheckers configured for the project that owns + // this CheckConfig. + InternalCheckers []*InternalChecker `json:"internalCheckers,omitempty"` + + // IsInternal: Denotes whether this is a check that egresses from + // InternalCheckers. + IsInternal bool `json:"isInternal,omitempty"` + + // MonitoredResource: The monitored resource associated with the + // configuration. + MonitoredResource *MonitoredResource `json:"monitoredResource,omitempty"` + + // Name: A unique resource name for this UptimeCheckConfig. The format + // is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This + // field should be omitted when creating the uptime check configuration; + // on create, the resource name is assigned by the server and included + // in the response. + Name string `json:"name,omitempty"` + + // Period: How often the uptime check is performed. Currently, only 1, + // 5, 10, and 15 minutes are supported. Required. + Period string `json:"period,omitempty"` + + // ResourceGroup: The group resource associated with the configuration. + ResourceGroup *ResourceGroup `json:"resourceGroup,omitempty"` + + // SelectedRegions: The list of regions from which the check will be + // run. If this field is specified, enough regions to include a minimum + // of 3 locations must be provided, or an error message is returned. Not + // specifying this field will result in uptime checks running from all + // regions. + // + // Possible values: + // "REGION_UNSPECIFIED" - Default value if no region is specified. + // Will result in uptime checks running from all regions. + // "USA" - Allows checks to run from locations within the United + // States of America. + // "EUROPE" - Allows checks to run from locations within the continent + // of Europe. + // "SOUTH_AMERICA" - Allows checks to run from locations within the + // continent of South America. + // "ASIA_PACIFIC" - Allows checks to run from locations within the + // Asia Pacific area (ex: Singapore). + SelectedRegions []string `json:"selectedRegions,omitempty"` + + // TcpCheck: Contains information needed to make a TCP check. + TcpCheck *TcpCheck `json:"tcpCheck,omitempty"` + + // Timeout: The maximum amount of time to wait for the request to + // complete (must be between 1 and 60 seconds). Required. + Timeout string `json:"timeout,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ContentMatchers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentMatchers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UptimeCheckConfig) MarshalJSON() ([]byte, error) { + type NoMethod UptimeCheckConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UptimeCheckIp: Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + // IpAddress: The IP address from which the uptime check originates. + // This is a full IP address (not an IP address range). Most IP + // addresses, as of this publication, are in IPv4 format; however, one + // should not rely on the IP addresses being in IPv4 format indefinitely + // and should support interpreting this field in either IPv4 or IPv6 + // format. + IpAddress string `json:"ipAddress,omitempty"` + + // Location: A more specific location within the region that typically + // encodes a particular city/town/metro (and its containing + // state/province or country) within the broader umbrella region + // category. + Location string `json:"location,omitempty"` + + // Region: A broad region category in which the IP address is located. + // + // Possible values: + // "REGION_UNSPECIFIED" - Default value if no region is specified. + // Will result in uptime checks running from all regions. + // "USA" - Allows checks to run from locations within the United + // States of America. + // "EUROPE" - Allows checks to run from locations within the continent + // of Europe. + // "SOUTH_AMERICA" - Allows checks to run from locations within the + // continent of South America. + // "ASIA_PACIFIC" - Allows checks to run from locations within the + // Asia Pacific area (ex: Singapore). + Region string `json:"region,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpAddress") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UptimeCheckIp) MarshalJSON() ([]byte, error) { + type NoMethod UptimeCheckIp + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "monitoring.projects.collectdTimeSeries.create": type ProjectsCollectdTimeSeriesCreateCall struct { @@ -2004,7 +2506,7 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2148,7 +2650,7 @@ func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2282,7 +2784,7 @@ func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2422,7 +2924,7 @@ func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2609,7 +3111,7 @@ func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2798,7 +3300,7 @@ func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2989,7 +3491,7 @@ func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListG }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3173,7 +3675,7 @@ func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3304,7 +3806,7 @@ func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3445,7 +3947,7 @@ func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*Me }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3615,7 +4117,7 @@ func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*L }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3795,7 +4297,7 @@ func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3967,7 +4469,7 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.Call }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4142,7 +4644,7 @@ func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4449,7 +4951,7 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4601,3 +5103,936 @@ func (c *ProjectsTimeSeriesListCall) Pages(ctx context.Context, f func(*ListTime c.PageToken(x.NextPageToken) } } + +// method id "monitoring.projects.uptimeCheckConfigs.create": + +type ProjectsUptimeCheckConfigsCreateCall struct { + s *Service + parent string + uptimecheckconfig *UptimeCheckConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new uptime check configuration. +func (r *ProjectsUptimeCheckConfigsService) Create(parent string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsCreateCall { + c := &ProjectsUptimeCheckConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.uptimecheckconfig = uptimecheckconfig + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsUptimeCheckConfigsCreateCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsUptimeCheckConfigsCreateCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsUptimeCheckConfigsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsUptimeCheckConfigsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.uptimecheckconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/uptimeCheckConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.uptimeCheckConfigs.create" call. +// Exactly one of *UptimeCheckConfig or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UptimeCheckConfig.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsUptimeCheckConfigsCreateCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UptimeCheckConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new uptime check configuration.", + // "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs", + // "httpMethod": "POST", + // "id": "monitoring.projects.uptimeCheckConfigs.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The project in which to create the uptime check. The format is:projects/[PROJECT_ID].", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+parent}/uptimeCheckConfigs", + // "request": { + // "$ref": "UptimeCheckConfig" + // }, + // "response": { + // "$ref": "UptimeCheckConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.projects.uptimeCheckConfigs.delete": + +type ProjectsUptimeCheckConfigsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an uptime check configuration. Note that this method +// will fail if the uptime check configuration is referenced by an alert +// policy or other dependent configs that would be rendered invalid by +// the deletion. +func (r *ProjectsUptimeCheckConfigsService) Delete(name string) *ProjectsUptimeCheckConfigsDeleteCall { + c := &ProjectsUptimeCheckConfigsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsUptimeCheckConfigsDeleteCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsUptimeCheckConfigsDeleteCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsUptimeCheckConfigsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsUptimeCheckConfigsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.uptimeCheckConfigs.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsUptimeCheckConfigsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an uptime check configuration. Note that this method will fail if the uptime check configuration is referenced by an alert policy or other dependent configs that would be rendered invalid by the deletion.", + // "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}", + // "httpMethod": "DELETE", + // "id": "monitoring.projects.uptimeCheckConfigs.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The uptime check configuration to delete. The format isprojects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].", + // "location": "path", + // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.projects.uptimeCheckConfigs.get": + +type ProjectsUptimeCheckConfigsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a single uptime check configuration. +func (r *ProjectsUptimeCheckConfigsService) Get(name string) *ProjectsUptimeCheckConfigsGetCall { + c := &ProjectsUptimeCheckConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsUptimeCheckConfigsGetCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsUptimeCheckConfigsGetCall) IfNoneMatch(entityTag string) *ProjectsUptimeCheckConfigsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsUptimeCheckConfigsGetCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsUptimeCheckConfigsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsUptimeCheckConfigsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.uptimeCheckConfigs.get" call. +// Exactly one of *UptimeCheckConfig or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UptimeCheckConfig.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsUptimeCheckConfigsGetCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UptimeCheckConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a single uptime check configuration.", + // "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}", + // "httpMethod": "GET", + // "id": "monitoring.projects.uptimeCheckConfigs.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The uptime check configuration to retrieve. The format isprojects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].", + // "location": "path", + // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "response": { + // "$ref": "UptimeCheckConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// method id "monitoring.projects.uptimeCheckConfigs.list": + +type ProjectsUptimeCheckConfigsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the existing valid uptime check configurations for the +// project, leaving out any invalid configurations. +func (r *ProjectsUptimeCheckConfigsService) List(parent string) *ProjectsUptimeCheckConfigsListCall { + c := &ProjectsUptimeCheckConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return in a single response. The server may further +// constrain the maximum number of results returned in a single page. If +// the page_size is <=0, the server will decide the number of results to +// be returned. +func (c *ProjectsUptimeCheckConfigsListCall) PageSize(pageSize int64) *ProjectsUptimeCheckConfigsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return more results from the previous method call. +func (c *ProjectsUptimeCheckConfigsListCall) PageToken(pageToken string) *ProjectsUptimeCheckConfigsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsUptimeCheckConfigsListCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsUptimeCheckConfigsListCall) IfNoneMatch(entityTag string) *ProjectsUptimeCheckConfigsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsUptimeCheckConfigsListCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsUptimeCheckConfigsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsUptimeCheckConfigsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/uptimeCheckConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.uptimeCheckConfigs.list" call. +// Exactly one of *ListUptimeCheckConfigsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListUptimeCheckConfigsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsUptimeCheckConfigsListCall) Do(opts ...googleapi.CallOption) (*ListUptimeCheckConfigsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListUptimeCheckConfigsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the existing valid uptime check configurations for the project, leaving out any invalid configurations.", + // "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs", + // "httpMethod": "GET", + // "id": "monitoring.projects.uptimeCheckConfigs.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The project whose uptime check configurations are listed. The format isprojects/[PROJECT_ID].", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+parent}/uptimeCheckConfigs", + // "response": { + // "$ref": "ListUptimeCheckConfigsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsUptimeCheckConfigsListCall) Pages(ctx context.Context, f func(*ListUptimeCheckConfigsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "monitoring.projects.uptimeCheckConfigs.patch": + +type ProjectsUptimeCheckConfigsPatchCall struct { + s *Service + name string + uptimecheckconfig *UptimeCheckConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an uptime check configuration. You can either replace +// the entire configuration with a new one or replace only certain +// fields in the current configuration by specifying the fields to be +// updated via "updateMask". Returns the updated configuration. +func (r *ProjectsUptimeCheckConfigsService) Patch(name string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsPatchCall { + c := &ProjectsUptimeCheckConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.uptimecheckconfig = uptimecheckconfig + return c +} + +// UpdateMask sets the optional parameter "updateMask": If present, only +// the listed fields in the current uptime check configuration are +// updated with values from the new configuration. If this field is +// empty, then the current configuration is completely replaced with the +// new configuration. +func (c *ProjectsUptimeCheckConfigsPatchCall) UpdateMask(updateMask string) *ProjectsUptimeCheckConfigsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsUptimeCheckConfigsPatchCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsUptimeCheckConfigsPatchCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsUptimeCheckConfigsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsUptimeCheckConfigsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.uptimecheckconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.uptimeCheckConfigs.patch" call. +// Exactly one of *UptimeCheckConfig or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UptimeCheckConfig.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsUptimeCheckConfigsPatchCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UptimeCheckConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an uptime check configuration. You can either replace the entire configuration with a new one or replace only certain fields in the current configuration by specifying the fields to be updated via \"updateMask\". Returns the updated configuration.", + // "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}", + // "httpMethod": "PATCH", + // "id": "monitoring.projects.uptimeCheckConfigs.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "A unique resource name for this UptimeCheckConfig. The format is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This field should be omitted when creating the uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + // "location": "path", + // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. If present, only the listed fields in the current uptime check configuration are updated with values from the new configuration. If this field is empty, then the current configuration is completely replaced with the new configuration.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "request": { + // "$ref": "UptimeCheckConfig" + // }, + // "response": { + // "$ref": "UptimeCheckConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.uptimeCheckIps.list": + +type UptimeCheckIpsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of IPs that checkers run from +func (r *UptimeCheckIpsService) List() *UptimeCheckIpsListCall { + c := &UptimeCheckIpsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return in a single response. The server may further +// constrain the maximum number of results returned in a single page. If +// the page_size is <=0, the server will decide the number of results to +// be returned. NOTE: this field is not yet implemented +func (c *UptimeCheckIpsListCall) PageSize(pageSize int64) *UptimeCheckIpsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return more results from the previous method call. NOTE: this field +// is not yet implemented +func (c *UptimeCheckIpsListCall) PageToken(pageToken string) *UptimeCheckIpsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UptimeCheckIpsListCall) Fields(s ...googleapi.Field) *UptimeCheckIpsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UptimeCheckIpsListCall) IfNoneMatch(entityTag string) *UptimeCheckIpsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UptimeCheckIpsListCall) Context(ctx context.Context) *UptimeCheckIpsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UptimeCheckIpsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UptimeCheckIpsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/uptimeCheckIps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.uptimeCheckIps.list" call. +// Exactly one of *ListUptimeCheckIpsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListUptimeCheckIpsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UptimeCheckIpsListCall) Do(opts ...googleapi.CallOption) (*ListUptimeCheckIpsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListUptimeCheckIpsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of IPs that checkers run from", + // "flatPath": "v3/uptimeCheckIps", + // "httpMethod": "GET", + // "id": "monitoring.uptimeCheckIps.list", + // "parameterOrder": [], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned. NOTE: this field is not yet implemented", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call. NOTE: this field is not yet implemented", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/uptimeCheckIps", + // "response": { + // "$ref": "ListUptimeCheckIpsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *UptimeCheckIpsListCall) Pages(ctx context.Context, f func(*ListUptimeCheckIpsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json index 0ccf0544e0b..a65431d9707 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -2,490 +2,9 @@ "resources": { "projects": { "resources": { - "snapshots": { - "methods": { - "setIamPolicy": { - "id": "pubsub.projects.snapshots.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy" - }, - "testIamPermissions": { - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", - "id": "pubsub.projects.snapshots.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions" - }, - "getIamPolicy": { - "httpMethod": "GET", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/snapshots/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "id": "pubsub.projects.snapshots.getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." - } - } - }, - "subscriptions": { - "methods": { - "acknowledge": { - "request": { - "$ref": "AcknowledgeRequest" - }, - "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", - "id": "pubsub.projects.subscriptions.acknowledge", - "path": "v1/{+subscription}:acknowledge" - }, - "modifyAckDeadline": { - "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", - "request": { - "$ref": "ModifyAckDeadlineRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "subscription" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "subscription": { - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", - "path": "v1/{+subscription}:modifyAckDeadline", - "id": "pubsub.projects.subscriptions.modifyAckDeadline" - }, - "getIamPolicy": { - "path": "v1/{+resource}:getIamPolicy", - "id": "pubsub.projects.subscriptions.getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "httpMethod": "GET", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy" - }, - "get": { - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "id": "pubsub.projects.subscriptions.get", - "path": "v1/{+subscription}", - "description": "Gets the configuration details of a subscription.", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - } - }, - "testIamPermissions": { - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "id": "pubsub.projects.subscriptions.testIamPermissions", - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "request": { - "$ref": "TestIamPermissionsRequest" - } - }, - "modifyPushConfig": { - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "location": "path", - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", - "id": "pubsub.projects.subscriptions.modifyPushConfig", - "path": "v1/{+subscription}:modifyPushConfig", - "request": { - "$ref": "ModifyPushConfigRequest" - }, - "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST" - }, - "pull": { - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", - "path": "v1/{+subscription}:pull", - "id": "pubsub.projects.subscriptions.pull", - "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", - "request": { - "$ref": "PullRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "subscription" - ], - "response": { - "$ref": "PullResponse" - }, - "parameters": { - "subscription": { - "description": "The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "delete": { - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path", - "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "path": "v1/{+subscription}", - "id": "pubsub.projects.subscriptions.delete", - "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified." - }, - "list": { - "description": "Lists matching subscriptions.", - "httpMethod": "GET", - "response": { - "$ref": "ListSubscriptionsResponse" - }, - "parameterOrder": [ - "project" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", - "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Maximum number of subscriptions to return.", - "type": "integer", - "location": "query" - }, - "project": { - "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions", - "path": "v1/{+project}/subscriptions", - "id": "pubsub.projects.subscriptions.list" - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", - "id": "pubsub.projects.subscriptions.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy" - }, - "create": { - "request": { - "$ref": "Subscription" - }, - "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request.", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "id": "pubsub.projects.subscriptions.create", - "path": "v1/{+name}" - } - } - }, "topics": { - "resources": { - "subscriptions": { - "methods": { - "list": { - "id": "pubsub.projects.topics.subscriptions.list", - "path": "v1/{+topic}/subscriptions", - "description": "Lists the name of the subscriptions for this topic.", - "response": { - "$ref": "ListTopicSubscriptionsResponse" - }, - "parameterOrder": [ - "topic" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", - "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Maximum number of subscription names to return.", - "type": "integer", - "location": "query" - }, - "topic": { - "location": "path", - "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions" - } - } - } - }, "methods": { - "delete": { - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", - "httpMethod": "DELETE", - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "topic": { - "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "path": "v1/{+topic}", - "id": "pubsub.projects.topics.delete" - }, "list": { - "id": "pubsub.projects.topics.list", - "path": "v1/{+project}/topics", "description": "Lists matching topics.", "response": { "$ref": "ListTopicsResponse" @@ -500,69 +19,40 @@ ], "parameters": { "pageToken": { + "location": "query", "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", - "type": "string", - "location": "query" + "type": "string" }, "pageSize": { - "format": "int32", + "location": "query", "description": "Maximum number of topics to return.", - "type": "integer", - "location": "query" + "format": "int32", + "type": "integer" }, "project": { "description": "The name of the cloud project that topics belong to.\nFormat is `projects/{project}`.", - "type": "string", "required": true, + "type": "string", "pattern": "^projects/[^/]+$", "location": "path" } }, - "flatPath": "v1/projects/{projectsId}/topics" - }, - "create": { - "request": { - "$ref": "Topic" - }, - "description": "Creates the given topic with the given name.", - "response": { - "$ref": "Topic" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "name": { - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`." - } - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "id": "pubsub.projects.topics.create", - "path": "v1/{+name}" + "flatPath": "v1/projects/{projectsId}/topics", + "path": "v1/{+project}/topics", + "id": "pubsub.projects.topics.list" }, "setIamPolicy": { - "path": "v1/{+resource}:setIamPolicy", - "id": "pubsub.projects.topics.setIamPolicy", + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", "request": { "$ref": "SetIamPolicyRequest" }, - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], "response": { "$ref": "Policy" }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" @@ -570,28 +60,29 @@ "parameters": { "resource": { "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", "required": true, + "type": "string", "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path" } }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy" + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "pubsub.projects.topics.setIamPolicy" }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "create": { "response": { - "$ref": "Policy" + "$ref": "Topic" }, "parameterOrder": [ - "resource" + "name" ], - "httpMethod": "GET", + "httpMethod": "PUT", "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", + "name": { + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path" } @@ -600,20 +91,57 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", + "path": "v1/{+name}", + "id": "pubsub.projects.topics.create", + "request": { + "$ref": "Topic" + }, + "description": "Creates the given topic with the given name." + }, + "getIamPolicy": { "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", "id": "pubsub.projects.topics.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy" - }, - "get": { + "path": "v1/{+resource}:getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "httpMethod": "GET", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, "parameters": { - "topic": { - "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", - "type": "string", + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "get": { + "httpMethod": "GET", + "response": { + "$ref": "Topic" + }, + "parameterOrder": [ + "topic" + ], + "parameters": { + "topic": { + "location": "path", + "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" + } + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub" @@ -621,36 +149,29 @@ "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "id": "pubsub.projects.topics.get", "path": "v1/{+topic}", - "description": "Gets the configuration of a topic.", - "response": { - "$ref": "Topic" - }, - "httpMethod": "GET", - "parameterOrder": [ - "topic" - ] + "description": "Gets the configuration of a topic." }, "publish": { + "httpMethod": "POST", + "parameterOrder": [ + "topic" + ], "response": { "$ref": "PublishResponse" }, - "parameterOrder": [ - "topic" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], "parameters": { "topic": { "description": "The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", - "type": "string", "required": true, + "type": "string", "pattern": "^projects/[^/]+/topics/[^/]+$", "location": "path" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", "id": "pubsub.projects.topics.publish", "path": "v1/{+topic}:publish", @@ -660,10 +181,13 @@ "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute." }, "testIamPermissions": { + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "pubsub.projects.topics.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", "request": { "$ref": "TestIamPermissionsRequest" }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", "response": { "$ref": "TestIamPermissionsResponse" }, @@ -677,16 +201,712 @@ ], "parameters": { "resource": { - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path", "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "required": true, "type": "string", - "required": true + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path" + } + } + }, + "delete": { + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", + "path": "v1/{+topic}", + "id": "pubsub.projects.topics.delete", + "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "topic" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "topic": { + "location": "path", + "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" + } + } + } + }, + "resources": { + "subscriptions": { + "methods": { + "list": { + "description": "Lists the names of the subscriptions on this topic.", + "httpMethod": "GET", + "response": { + "$ref": "ListTopicSubscriptionsResponse" + }, + "parameterOrder": [ + "topic" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum number of subscription names to return.", + "format": "int32", + "type": "integer" + }, + "topic": { + "location": "path", + "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", + "id": "pubsub.projects.topics.subscriptions.list", + "path": "v1/{+topic}/subscriptions" + } + } + }, + "snapshots": { + "methods": { + "list": { + "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/snapshots", + "id": "pubsub.projects.topics.snapshots.list", + "path": "v1/{+topic}/snapshots", + "description": "Lists the names of the snapshots on this topic.", + "httpMethod": "GET", + "parameterOrder": [ + "topic" + ], + "response": { + "$ref": "ListTopicSnapshotsResponse" + }, + "parameters": { + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSnapshots` call, and\nthat the system should return the next page of data.", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of snapshot names to return.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "topic": { + "description": "The name of the topic that snapshots are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/topics/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + } + } + } + } + }, + "subscriptions": { + "methods": { + "acknowledge": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", + "id": "pubsub.projects.subscriptions.acknowledge", + "path": "v1/{+subscription}:acknowledge", + "request": { + "$ref": "AcknowledgeRequest" + }, + "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", + "httpMethod": "POST", + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "subscription": { + "location": "path", + "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" } }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", - "id": "pubsub.projects.topics.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "patch": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "path": "v1/{+name}", + "id": "pubsub.projects.subscriptions.patch", + "request": { + "$ref": "UpdateSubscriptionRequest" + }, + "description": "Updates an existing subscription. Note that certain properties of a\nsubscription, such as its topic, are not modifiable.", + "response": { + "$ref": "Subscription" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PATCH", + "parameters": { + "name": { + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ] + }, + "get": { + "description": "Gets the configuration details of a subscription.", + "httpMethod": "GET", + "response": { + "$ref": "Subscription" + }, + "parameterOrder": [ + "subscription" + ], + "parameters": { + "subscription": { + "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "id": "pubsub.projects.subscriptions.get", + "path": "v1/{+subscription}" + }, + "testIamPermissions": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "pubsub.projects.subscriptions.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + } + }, + "modifyPushConfig": { + "request": { + "$ref": "ModifyPushConfigRequest" + }, + "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", + "httpMethod": "POST", + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "subscription": { + "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", + "id": "pubsub.projects.subscriptions.modifyPushConfig", + "path": "v1/{+subscription}:modifyPushConfig" + }, + "seek": { + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:seek", + "path": "v1/{+subscription}:seek", + "id": "pubsub.projects.subscriptions.seek", + "description": "Seeks an existing subscription to a point in time or to a given snapshot,\nwhichever is provided in the request.", + "request": { + "$ref": "SeekRequest" + }, + "response": { + "$ref": "SeekResponse" + }, + "parameterOrder": [ + "subscription" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "subscription": { + "description": "The subscription to affect.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + } + }, + "delete": { + "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", + "httpMethod": "DELETE", + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "subscription": { + "location": "path", + "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "id": "pubsub.projects.subscriptions.delete", + "path": "v1/{+subscription}" + }, + "setIamPolicy": { + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "pubsub.projects.subscriptions.setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy." + }, + "getIamPolicy": { + "httpMethod": "GET", + "parameterOrder": [ + "resource" + ], + "response": { + "$ref": "Policy" + }, + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", + "id": "pubsub.projects.subscriptions.getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." + }, + "modifyAckDeadline": { + "httpMethod": "POST", + "parameterOrder": [ + "subscription" + ], + "response": { + "$ref": "Empty" + }, + "parameters": { + "subscription": { + "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", + "id": "pubsub.projects.subscriptions.modifyAckDeadline", + "path": "v1/{+subscription}:modifyAckDeadline", + "request": { + "$ref": "ModifyAckDeadlineRequest" + }, + "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages." + }, + "pull": { + "response": { + "$ref": "PullResponse" + }, + "parameterOrder": [ + "subscription" + ], + "httpMethod": "POST", + "parameters": { + "subscription": { + "description": "The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", + "path": "v1/{+subscription}:pull", + "id": "pubsub.projects.subscriptions.pull", + "request": { + "$ref": "PullRequest" + }, + "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription." + }, + "list": { + "description": "Lists matching subscriptions.", + "httpMethod": "GET", + "response": { + "$ref": "ListSubscriptionsResponse" + }, + "parameterOrder": [ + "project" + ], + "parameters": { + "pageSize": { + "location": "query", + "description": "Maximum number of subscriptions to return.", + "format": "int32", + "type": "integer" + }, + "project": { + "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + }, + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions", + "id": "pubsub.projects.subscriptions.list", + "path": "v1/{+project}/subscriptions" + }, + "create": { + "response": { + "$ref": "Subscription" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PUT", + "parameters": { + "name": { + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + "path": "v1/{+name}", + "id": "pubsub.projects.subscriptions.create", + "request": { + "$ref": "Subscription" + }, + "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request." + } + } + }, + "snapshots": { + "methods": { + "getIamPolicy": { + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", + "path": "v1/{+resource}:getIamPolicy", + "id": "pubsub.projects.snapshots.getIamPolicy", + "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "location": "path" + } + } + }, + "patch": { + "description": "Updates an existing snapshot. Note that certain properties of a\nsnapshot are not modifiable.", + "request": { + "$ref": "UpdateSnapshotRequest" + }, + "httpMethod": "PATCH", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Snapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "name": { + "description": "The name of the snapshot.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + "id": "pubsub.projects.snapshots.patch", + "path": "v1/{+name}" + }, + "testIamPermissions": { + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", + "path": "v1/{+resource}:testIamPermissions", + "id": "pubsub.projects.snapshots.testIamPermissions", + "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", + "request": { + "$ref": "TestIamPermissionsRequest" + } + }, + "delete": { + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "snapshot" + ], + "httpMethod": "DELETE", + "parameters": { + "snapshot": { + "location": "path", + "description": "The name of the snapshot to delete.\nFormat is `projects/{project}/snapshots/{snap}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + "path": "v1/{+snapshot}", + "id": "pubsub.projects.snapshots.delete", + "description": "Removes an existing snapshot. All messages retained in the snapshot\nare immediately dropped. After a snapshot is deleted, a new one may be\ncreated with the same name, but the new one has no association with the old\nsnapshot or its subscription, unless the same subscription is specified." + }, + "list": { + "flatPath": "v1/projects/{projectsId}/snapshots", + "path": "v1/{+project}/snapshots", + "id": "pubsub.projects.snapshots.list", + "description": "Lists the existing snapshots.", + "response": { + "$ref": "ListSnapshotsResponse" + }, + "parameterOrder": [ + "project" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "parameters": { + "pageToken": { + "location": "query", + "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this\nis a continuation of a prior `ListSnapshots` call, and that the system\nshould return the next page of data.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Maximum number of snapshots to return.", + "format": "int32", + "type": "integer" + }, + "project": { + "location": "path", + "description": "The name of the cloud project that snapshots belong to.\nFormat is `projects/{project}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + } + }, + "create": { + "request": { + "$ref": "CreateSnapshotRequest" + }, + "description": "Creates a snapshot from the requested subscription.\nIf the snapshot already exists, returns `ALREADY_EXISTS`.\nIf the requested subscription doesn't exist, returns `NOT_FOUND`.\nIf the backlog in the subscription is too old -- and the resulting snapshot\nwould expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned.\nSee also the `Snapshot.expire_time` field.\n\nIf the name is not provided in the request, the server will assign a random\nname for this snapshot on the same project as the subscription, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/overview#names). The generated\nname is populated in the returned Snapshot object. Note that for REST API\nrequests, you must specify a name in the request.", + "httpMethod": "PUT", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Snapshot" + }, + "parameters": { + "name": { + "description": "Optional user-provided name for this snapshot.\nIf the name is not provided in the request, the server will assign a random\nname for this snapshot on the same project as the subscription.\nNote that for REST API requests, you must specify a name.\nFormat is `projects/{project}/snapshots/{snap}`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + "id": "pubsub.projects.snapshots.create", + "path": "v1/{+name}" + }, + "setIamPolicy": { + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "parameters": { + "resource": { + "location": "path", + "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/snapshots/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub" + ], + "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "pubsub.projects.snapshots.setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy." } } } @@ -694,47 +914,16 @@ } }, "parameters": { - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" - }, - "access_token": { - "type": "string", - "location": "query", - "description": "OAuth access token." - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "pp": { - "description": "Pretty-print response.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - }, "upload_protocol": { "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", "location": "query" }, "prettyPrint": { + "location": "query", "description": "Returns response with indentations and line breaks.", - "default": "true", "type": "boolean", - "location": "query" + "default": "true" }, "fields": { "location": "query", @@ -742,34 +931,29 @@ "type": "string" }, "uploadType": { + "location": "query", "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, + "callback": { + "description": "JSONP", "type": "string", "location": "query" }, "$.xgafv": { - "enum": [ - "1", - "2" - ], "description": "V1 error format.", "type": "string", "enumDescriptions": [ "v1 error format", "v2 error format" ], - "location": "query" - }, - "callback": { - "type": "string", "location": "query", - "description": "JSONP" + "enum": [ + "1", + "2" + ] }, "alt": { - "enum": [ - "json", - "media", - "proto" - ], "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", @@ -778,28 +962,65 @@ ], "location": "query", "description": "Data format for response.", - "default": "json" + "default": "json", + "enum": [ + "json", + "media", + "proto" + ] + }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "type": "string", + "location": "query" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "type": "string", + "location": "query" + }, + "pp": { + "description": "Pretty-print response.", + "type": "boolean", + "default": "true", + "location": "query" + }, + "bearer_token": { + "location": "query", + "description": "OAuth bearer token.", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "type": "string", + "location": "query" } }, "schemas": { - "PullRequest": { - "description": "Request for the `Pull` method.", + "ListSnapshotsResponse": { + "description": "Response for the `ListSnapshots` method.", "type": "object", "properties": { - "maxMessages": { - "format": "int32", - "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", - "type": "integer" + "nextPageToken": { + "description": "If not empty, indicates that there may be more snapshot that match the\nrequest; this value should be passed in a new `ListSnapshotsRequest`.", + "type": "string" }, - "returnImmediately": { - "description": "If this field set to true, the system will respond immediately even if\nit there are no messages available to return in the `Pull` response.\nOtherwise, the system may wait (for a bounded amount of time) until at\nleast one message is available, rather than returning no messages. The\nclient may cancel the request if it does not wish to wait any longer for\nthe response.", - "type": "boolean" + "snapshots": { + "description": "The resulting snapshots.", + "type": "array", + "items": { + "$ref": "Snapshot" + } } }, - "id": "PullRequest" + "id": "ListSnapshotsResponse" }, "ListSubscriptionsResponse": { - "id": "ListSubscriptionsResponse", "description": "Response for the `ListSubscriptions` method.", "type": "object", "properties": { @@ -809,26 +1030,218 @@ }, "subscriptions": { "description": "The subscriptions that match the request.", + "type": "array", "items": { "$ref": "Subscription" - }, - "type": "array" + } } - } + }, + "id": "ListSubscriptionsResponse" + }, + "ListTopicSnapshotsResponse": { + "description": "Response for the `ListTopicSnapshots` method.", + "type": "object", + "properties": { + "nextPageToken": { + "description": "If not empty, indicates that there may be more snapshots that match\nthe request; this value should be passed in a new\n`ListTopicSnapshotsRequest` to get more snapshots.", + "type": "string" + }, + "snapshots": { + "description": "The names of the snapshots that match the request.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "ListTopicSnapshotsResponse" + }, + "CreateSnapshotRequest": { + "description": "Request for the `CreateSnapshot` method.", + "type": "object", + "properties": { + "subscription": { + "description": "The subscription whose backlog the snapshot retains.\nSpecifically, the created snapshot is guaranteed to retain:\n (a) The existing backlog on the subscription. More precisely, this is\n defined as the messages in the subscription's backlog that are\n unacknowledged upon the successful completion of the\n `CreateSnapshot` request; as well as:\n (b) Any messages published to the subscription's topic following the\n successful completion of the CreateSnapshot request.\nFormat is `projects/{project}/subscriptions/{sub}`.", + "type": "string" + } + }, + "id": "CreateSnapshotRequest" + }, + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsRequest" + }, + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", + "type": "object", + "properties": { + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", + "type": "integer" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + } + }, + "id": "Policy" + }, + "Topic": { + "description": "A topic resource.", + "type": "object", + "properties": { + "name": { + "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", + "type": "string" + } + }, + "id": "Topic" + }, + "Binding": { + "description": "Associates `members` with a `role`.", + "type": "object", + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", + "items": { + "type": "string" + } + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "id": "Binding" + }, + "SeekRequest": { + "description": "Request for the `Seek` method.", + "type": "object", + "properties": { + "time": { + "description": "The time to seek to.\nMessages retained in the subscription that were published before this\ntime are marked as acknowledged, and messages retained in the\nsubscription that were published after this time are marked as\nunacknowledged. Note that this operation affects only those messages\nretained in the subscription (configured by the combination of\n`message_retention_duration` and `retain_acked_messages`). For example,\nif `time` corresponds to a point before the message retention\nwindow (or to a point before the system's notion of the subscription\ncreation time), only retained messages will be marked as unacknowledged,\nand already-expunged messages will not be restored.", + "format": "google-datetime", + "type": "string" + }, + "snapshot": { + "description": "The snapshot to seek to. The snapshot's topic must be the same as that of\nthe provided subscription.\nFormat is `projects/{project}/snapshots/{snap}`.", + "type": "string" + } + }, + "id": "SeekRequest" + }, + "ReceivedMessage": { + "description": "A message and its corresponding acknowledgment ID.", + "type": "object", + "properties": { + "message": { + "description": "The message.", + "$ref": "PubsubMessage" + }, + "ackId": { + "description": "This ID can be used to acknowledge the received message.", + "type": "string" + } + }, + "id": "ReceivedMessage" + }, + "PushConfig": { + "description": "Configuration for a push delivery endpoint.", + "type": "object", + "properties": { + "pushEndpoint": { + "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", + "type": "string" + }, + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the pushed message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the pushed message (i.e., its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub API.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", + "type": "object" + } + }, + "id": "PushConfig" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, + "PullRequest": { + "description": "Request for the `Pull` method.", + "type": "object", + "properties": { + "maxMessages": { + "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", + "format": "int32", + "type": "integer" + }, + "returnImmediately": { + "description": "If this field set to true, the system will respond immediately even if\nit there are no messages available to return in the `Pull` response.\nOtherwise, the system may wait (for a bounded amount of time) until at\nleast one message is available, rather than returning no messages. The\nclient may cancel the request if it does not wish to wait any longer for\nthe response.", + "type": "boolean" + } + }, + "id": "PullRequest" }, "PublishRequest": { + "description": "Request for the Publish method.", "type": "object", "properties": { "messages": { "description": "The messages to publish.", + "type": "array", "items": { "$ref": "PubsubMessage" - }, - "type": "array" + } } }, - "id": "PublishRequest", - "description": "Request for the Publish method." + "id": "PublishRequest" + }, + "UpdateSubscriptionRequest": { + "description": "Request for the UpdateSubscription method.", + "type": "object", + "properties": { + "subscription": { + "$ref": "Subscription", + "description": "The updated subscription object." + }, + "updateMask": { + "description": "Indicates which fields in the provided subscription to update.\nMust be specified and non-empty.", + "format": "google-fieldmask", + "type": "string" + } + }, + "id": "UpdateSubscriptionRequest" }, "PublishResponse": { "description": "Response for the `Publish` method.", @@ -836,10 +1249,10 @@ "properties": { "messageIds": { "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } } }, "id": "PublishResponse" @@ -848,112 +1261,97 @@ "description": "A subscription resource.", "type": "object", "properties": { - "pushConfig": { - "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods.", - "$ref": "PushConfig" - }, "ackDeadlineSeconds": { + "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\nnon-streaming pull or send the `ack_id` in a\n`StreamingModifyAckDeadlineRequest` if using streaming pull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", "format": "int32", - "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\npull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", "type": "integer" }, + "messageRetentionDuration": { + "description": "How long to retain unacknowledged messages in the subscription's backlog,\nfrom the moment a message is published.\nIf `retain_acked_messages` is true, then this also configures the retention\nof acknowledged messages, and thus configures how far back in time a `Seek`\ncan be done. Defaults to 7 days. Cannot be more than 7 days or less than 10\nminutes.", + "format": "google-duration", + "type": "string" + }, + "retainAckedMessages": { + "description": "Indicates whether to retain acknowledged messages. If true, then\nmessages are not expunged from the subscription's backlog, even if they are\nacknowledged, until they fall out of the `message_retention_duration`\nwindow.", + "type": "boolean" + }, "name": { - "type": "string", - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`." + "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + "type": "string" }, "topic": { "description": "The name of the topic from which this subscription is receiving messages.\nFormat is `projects/{project}/topics/{topic}`.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", "type": "string" + }, + "pushConfig": { + "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods.", + "$ref": "PushConfig" } }, "id": "Subscription" }, - "TestIamPermissionsRequest": { + "SeekResponse": { "type": "object", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "TestIamPermissionsRequest", - "description": "Request message for `TestIamPermissions` method." - }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "etag": { - "type": "string", - "format": "byte", - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly." - }, - "version": { - "format": "int32", - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - }, - "type": "array" - } - }, - "id": "Policy" - }, - "Topic": { - "properties": { - "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - "type": "string" - } - }, - "id": "Topic", - "description": "A topic resource.", - "type": "object" + "properties": {}, + "id": "SeekResponse" }, "ModifyAckDeadlineRequest": { + "description": "Request for the ModifyAckDeadline method.", "type": "object", "properties": { "ackIds": { "description": "List of acknowledgment IDs.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } }, "ackDeadlineSeconds": { - "format": "int32", "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.\nThe minimum deadline you can specify is 0 seconds.\nThe maximum deadline you can specify is 600 seconds (10 minutes).", + "format": "int32", "type": "integer" } }, - "id": "ModifyAckDeadlineRequest", - "description": "Request for the ModifyAckDeadline method." + "id": "ModifyAckDeadlineRequest" + }, + "Snapshot": { + "description": "A snapshot resource.", + "type": "object", + "properties": { + "expireTime": { + "description": "The snapshot is guaranteed to exist up until this time.\nA newly-created snapshot expires no later than 7 days from the time of its\ncreation. Its exact lifetime is determined at creation by the existing\nbacklog in the source subscription. Specifically, the lifetime of the\nsnapshot is `7 days - (age of oldest unacked message in the subscription)`.\nFor example, consider a subscription whose oldest unacked message is 3 days\nold. If a snapshot is created from this subscription, the snapshot -- which\nwill always capture this 3-day-old backlog as long as the snapshot\nexists -- will expire in 4 days. The service will refuse to create a\nsnapshot that would expire in less than 1 hour after creation.", + "format": "google-datetime", + "type": "string" + }, + "name": { + "description": "The name of the snapshot.", + "type": "string" + }, + "topic": { + "description": "The name of the topic from which this snapshot is retaining messages.", + "type": "string" + } + }, + "id": "Snapshot" }, "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", "type": "object", "properties": { "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", + "$ref": "Policy" } }, - "id": "SetIamPolicyRequest", - "description": "Request message for `SetIamPolicy` method." + "id": "SetIamPolicyRequest" }, "ModifyPushConfigRequest": { "description": "Request for the ModifyPushConfig method.", "type": "object", "properties": { "pushConfig": { - "$ref": "PushConfig", - "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called." + "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` or `StreamingPull` is not called.", + "$ref": "PushConfig" } }, "id": "ModifyPushConfigRequest" @@ -962,10 +1360,6 @@ "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", "type": "object", "properties": { - "messageId": { - "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", - "type": "string" - }, "attributes": { "description": "Optional attributes for this message.", "type": "object", @@ -973,77 +1367,62 @@ "type": "string" } }, + "messageId": { + "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", + "type": "string" + }, "publishTime": { - "format": "google-datetime", "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", + "format": "google-datetime", "type": "string" }, "data": { - "format": "byte", "description": "The message payload.", + "format": "byte", "type": "string" } }, "id": "PubsubMessage" }, - "Binding": { - "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding", - "description": "Associates `members` with a `role`.", - "type": "object" - }, "AcknowledgeRequest": { "description": "Request for the Acknowledge method.", "type": "object", "properties": { "ackIds": { "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } } }, "id": "AcknowledgeRequest" }, "Empty": { - "id": "Empty", "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "type": "object", - "properties": {} + "properties": {}, + "id": "Empty" }, "ListTopicsResponse": { "description": "Response for the `ListTopics` method.", "type": "object", "properties": { "nextPageToken": { - "type": "string", - "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`." + "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", + "type": "string" }, "topics": { "description": "The resulting topics.", + "type": "array", "items": { "$ref": "Topic" - }, - "type": "array" + } } }, "id": "ListTopicsResponse" }, "ListTopicSubscriptionsResponse": { - "id": "ListTopicSubscriptionsResponse", "description": "Response for the `ListTopicSubscriptions` method.", "type": "object", "properties": { @@ -1053,80 +1432,50 @@ }, "subscriptions": { "description": "The names of the subscriptions that match the request.", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } } - } + }, + "id": "ListTopicSubscriptionsResponse" }, "PullResponse": { - "id": "PullResponse", "description": "Response for the `Pull` method.", "type": "object", "properties": { "receivedMessages": { "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog.", + "type": "array", "items": { "$ref": "ReceivedMessage" - }, - "type": "array" - } - } - }, - "ReceivedMessage": { - "properties": { - "ackId": { - "description": "This ID can be used to acknowledge the received message.", - "type": "string" - }, - "message": { - "description": "The message.", - "$ref": "PubsubMessage" - } - }, - "id": "ReceivedMessage", - "description": "A message and its corresponding acknowledgment ID.", - "type": "object" - }, - "PushConfig": { - "type": "object", - "properties": { - "attributes": { - "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the pushed message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the pushed message (i.e., its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub API.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", - "type": "object", - "additionalProperties": { - "type": "string" } - }, - "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", - "type": "string" } }, - "id": "PushConfig", - "description": "Configuration for a push delivery endpoint." + "id": "PullResponse" }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", + "UpdateSnapshotRequest": { + "description": "Request for the UpdateSnapshot method.", "type": "object", "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "items": { - "type": "string" - }, - "type": "array" + "updateMask": { + "description": "Indicates which fields in the provided snapshot to update.\nMust be specified and non-empty.", + "format": "google-fieldmask", + "type": "string" + }, + "snapshot": { + "description": "The updated snpashot object.", + "$ref": "Snapshot" } }, - "id": "TestIamPermissionsResponse" + "id": "UpdateSnapshotRequest" } }, - "protocol": "rest", "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" }, + "protocol": "rest", "version": "v1", "baseUrl": "https://pubsub.googleapis.com/", "canonicalName": "Pubsub", @@ -1150,10 +1499,10 @@ "ownerDomain": "google.com", "name": "pubsub", "batchPath": "batch", + "revision": "20171205", "documentationLink": "https://cloud.google.com/pubsub/docs", "id": "pubsub:v1", - "revision": "20170829", "title": "Google Cloud Pub/Sub API", - "ownerName": "Google", - "discoveryVersion": "v1" + "discoveryVersion": "v1", + "ownerName": "Google" } diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go index 12ea3588d8a..9511d37953c 100644 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -116,6 +116,7 @@ type ProjectsSubscriptionsService struct { func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { rs := &ProjectsTopicsService{s: s} + rs.Snapshots = NewProjectsTopicsSnapshotsService(s) rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s) return rs } @@ -123,9 +124,20 @@ func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { type ProjectsTopicsService struct { s *Service + Snapshots *ProjectsTopicsSnapshotsService + Subscriptions *ProjectsTopicsSubscriptionsService } +func NewProjectsTopicsSnapshotsService(s *Service) *ProjectsTopicsSnapshotsService { + rs := &ProjectsTopicsSnapshotsService{s: s} + return rs +} + +type ProjectsTopicsSnapshotsService struct { + s *Service +} + func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService { rs := &ProjectsTopicsSubscriptionsService{s: s} return rs @@ -160,8 +172,8 @@ type AcknowledgeRequest struct { } func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { - type noMethod AcknowledgeRequest - raw := noMethod(*s) + type NoMethod AcknowledgeRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -226,8 +238,47 @@ type Binding struct { } func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) + type NoMethod Binding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateSnapshotRequest: Request for the `CreateSnapshot` method. +type CreateSnapshotRequest struct { + // Subscription: The subscription whose backlog the snapshot + // retains. + // Specifically, the created snapshot is guaranteed to retain: + // (a) The existing backlog on the subscription. More precisely, this + // is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following + // the + // successful completion of the CreateSnapshot request. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `json:"subscription,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subscription") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subscription") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateSnapshotRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreateSnapshotRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -249,6 +300,43 @@ type Empty struct { googleapi.ServerResponse `json:"-"` } +// ListSnapshotsResponse: Response for the `ListSnapshots` method. +type ListSnapshotsResponse struct { + // NextPageToken: If not empty, indicates that there may be more + // snapshot that match the + // request; this value should be passed in a new `ListSnapshotsRequest`. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Snapshots: The resulting snapshots. + Snapshots []*Snapshot `json:"snapshots,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListSnapshotsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListSubscriptionsResponse: Response for the `ListSubscriptions` // method. type ListSubscriptionsResponse struct { @@ -284,8 +372,48 @@ type ListSubscriptionsResponse struct { } func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListSubscriptionsResponse - raw := noMethod(*s) + type NoMethod ListSubscriptionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListTopicSnapshotsResponse: Response for the `ListTopicSnapshots` +// method. +type ListTopicSnapshotsResponse struct { + // NextPageToken: If not empty, indicates that there may be more + // snapshots that match + // the request; this value should be passed in a + // new + // `ListTopicSnapshotsRequest` to get more snapshots. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Snapshots: The names of the snapshots that match the request. + Snapshots []string `json:"snapshots,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListTopicSnapshotsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListTopicSnapshotsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -324,8 +452,8 @@ type ListTopicSubscriptionsResponse struct { } func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicSubscriptionsResponse - raw := noMethod(*s) + type NoMethod ListTopicSubscriptionsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -361,8 +489,8 @@ type ListTopicsResponse struct { } func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicsResponse - raw := noMethod(*s) + type NoMethod ListTopicsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -402,8 +530,8 @@ type ModifyAckDeadlineRequest struct { } func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyAckDeadlineRequest - raw := noMethod(*s) + type NoMethod ModifyAckDeadlineRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -414,7 +542,7 @@ type ModifyPushConfigRequest struct { // An empty `pushConfig` indicates that the Pub/Sub system should // stop pushing messages from the given subscription and allow // messages to be pulled and acknowledged - effectively pausing - // the subscription if `Pull` is not called. + // the subscription if `Pull` or `StreamingPull` is not called. PushConfig *PushConfig `json:"pushConfig,omitempty"` // ForceSendFields is a list of field names (e.g. "PushConfig") to @@ -435,8 +563,8 @@ type ModifyPushConfigRequest struct { } func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyPushConfigRequest - raw := noMethod(*s) + type NoMethod ModifyPushConfigRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -526,8 +654,8 @@ type Policy struct { } func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) + type NoMethod Policy + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -554,8 +682,8 @@ type PublishRequest struct { } func (s *PublishRequest) MarshalJSON() ([]byte, error) { - type noMethod PublishRequest - raw := noMethod(*s) + type NoMethod PublishRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -590,8 +718,8 @@ type PublishResponse struct { } func (s *PublishResponse) MarshalJSON() ([]byte, error) { - type noMethod PublishResponse - raw := noMethod(*s) + type NoMethod PublishResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -641,8 +769,8 @@ type PubsubMessage struct { } func (s *PubsubMessage) MarshalJSON() ([]byte, error) { - type noMethod PubsubMessage - raw := noMethod(*s) + type NoMethod PubsubMessage + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -684,8 +812,8 @@ type PullRequest struct { } func (s *PullRequest) MarshalJSON() ([]byte, error) { - type noMethod PullRequest - raw := noMethod(*s) + type NoMethod PullRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -723,8 +851,8 @@ type PullResponse struct { } func (s *PullResponse) MarshalJSON() ([]byte, error) { - type noMethod PullResponse - raw := noMethod(*s) + type NoMethod PullResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -787,8 +915,8 @@ type PushConfig struct { } func (s *PushConfig) MarshalJSON() ([]byte, error) { - type noMethod PushConfig - raw := noMethod(*s) + type NoMethod PushConfig + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -818,11 +946,69 @@ type ReceivedMessage struct { } func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { - type noMethod ReceivedMessage - raw := noMethod(*s) + type NoMethod ReceivedMessage + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SeekRequest: Request for the `Seek` method. +type SeekRequest struct { + // Snapshot: The snapshot to seek to. The snapshot's topic must be the + // same as that of + // the provided subscription. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `json:"snapshot,omitempty"` + + // Time: The time to seek to. + // Messages retained in the subscription that were published before + // this + // time are marked as acknowledged, and messages retained in + // the + // subscription that were published after this time are marked + // as + // unacknowledged. Note that this operation affects only those + // messages + // retained in the subscription (configured by the combination + // of + // `message_retention_duration` and `retain_acked_messages`). For + // example, + // if `time` corresponds to a point before the message retention + // window (or to a point before the system's notion of the + // subscription + // creation time), only retained messages will be marked as + // unacknowledged, + // and already-expunged messages will not be restored. + Time string `json:"time,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Snapshot") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Snapshot") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SeekRequest) MarshalJSON() ([]byte, error) { + type NoMethod SeekRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SeekResponse struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the @@ -851,8 +1037,64 @@ type SetIamPolicyRequest struct { } func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type noMethod SetIamPolicyRequest - raw := noMethod(*s) + type NoMethod SetIamPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Snapshot: A snapshot resource. +type Snapshot struct { + // ExpireTime: The snapshot is guaranteed to exist up until this time. + // A newly-created snapshot expires no later than 7 days from the time + // of its + // creation. Its exact lifetime is determined at creation by the + // existing + // backlog in the source subscription. Specifically, the lifetime of + // the + // snapshot is `7 days - (age of oldest unacked message in the + // subscription)`. + // For example, consider a subscription whose oldest unacked message is + // 3 days + // old. If a snapshot is created from this subscription, the snapshot -- + // which + // will always capture this 3-day-old backlog as long as the + // snapshot + // exists -- will expire in 4 days. The service will refuse to create + // a + // snapshot that would expire in less than 1 hour after creation. + ExpireTime string `json:"expireTime,omitempty"` + + // Name: The name of the snapshot. + Name string `json:"name,omitempty"` + + // Topic: The name of the topic from which this snapshot is retaining + // messages. + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ExpireTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpireTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type NoMethod Snapshot + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -874,7 +1116,9 @@ type Subscription struct { // call // `ModifyAckDeadline` with the corresponding `ack_id` if // using - // pull. + // non-streaming pull or send the `ack_id` in + // a + // `StreamingModifyAckDeadlineRequest` if using streaming pull. // The minimum custom deadline you can specify is 10 seconds. // The maximum custom deadline you can specify is 600 seconds (10 // minutes). @@ -888,6 +1132,18 @@ type Subscription struct { // system will eventually redeliver the message. AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` + // MessageRetentionDuration: How long to retain unacknowledged messages + // in the subscription's backlog, + // from the moment a message is published. + // If `retain_acked_messages` is true, then this also configures the + // retention + // of acknowledged messages, and thus configures how far back in time a + // `Seek` + // can be done. Defaults to 7 days. Cannot be more than 7 days or less + // than 10 + // minutes. + MessageRetentionDuration string `json:"messageRetentionDuration,omitempty"` + // Name: The name of the subscription. It must have the // format // "projects/{project}/subscriptions/{subscription}". `{subscription}` @@ -908,6 +1164,15 @@ type Subscription struct { // will pull and ack messages using API methods. PushConfig *PushConfig `json:"pushConfig,omitempty"` + // RetainAckedMessages: Indicates whether to retain acknowledged + // messages. If true, then + // messages are not expunged from the subscription's backlog, even if + // they are + // acknowledged, until they fall out of the + // `message_retention_duration` + // window. + RetainAckedMessages bool `json:"retainAckedMessages,omitempty"` + // Topic: The name of the topic from which this subscription is // receiving messages. // Format is `projects/{project}/topics/{topic}`. @@ -939,8 +1204,8 @@ type Subscription struct { } func (s *Subscription) MarshalJSON() ([]byte, error) { - type noMethod Subscription - raw := noMethod(*s) + type NoMethod Subscription + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -974,8 +1239,8 @@ type TestIamPermissionsRequest struct { } func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsRequest - raw := noMethod(*s) + type NoMethod TestIamPermissionsRequest + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1009,8 +1274,8 @@ type TestIamPermissionsResponse struct { } func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsResponse - raw := noMethod(*s) + type NoMethod TestIamPermissionsResponse + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -1051,11 +1316,366 @@ type Topic struct { } func (s *Topic) MarshalJSON() ([]byte, error) { - type noMethod Topic - raw := noMethod(*s) + type NoMethod Topic + raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpdateSnapshotRequest: Request for the UpdateSnapshot method. +type UpdateSnapshotRequest struct { + // Snapshot: The updated snpashot object. + Snapshot *Snapshot `json:"snapshot,omitempty"` + + // UpdateMask: Indicates which fields in the provided snapshot to + // update. + // Must be specified and non-empty. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Snapshot") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Snapshot") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateSnapshotRequest) MarshalJSON() ([]byte, error) { + type NoMethod UpdateSnapshotRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateSubscriptionRequest: Request for the UpdateSubscription method. +type UpdateSubscriptionRequest struct { + // Subscription: The updated subscription object. + Subscription *Subscription `json:"subscription,omitempty"` + + // UpdateMask: Indicates which fields in the provided subscription to + // update. + // Must be specified and non-empty. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subscription") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subscription") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateSubscriptionRequest) MarshalJSON() ([]byte, error) { + type NoMethod UpdateSubscriptionRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "pubsub.projects.snapshots.create": + +type ProjectsSnapshotsCreateCall struct { + s *Service + name string + createsnapshotrequest *CreateSnapshotRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a snapshot from the requested subscription. +// If the snapshot already exists, returns `ALREADY_EXISTS`. +// If the requested subscription doesn't exist, returns `NOT_FOUND`. +// If the backlog in the subscription is too old -- and the resulting +// snapshot +// would expire in less than 1 hour -- then `FAILED_PRECONDITION` is +// returned. +// See also the `Snapshot.expire_time` field. +// +// If the name is not provided in the request, the server will assign a +// random +// name for this snapshot on the same project as the subscription, +// conforming +// to the +// [resource +// name +// format](https://cloud.google.com/pubsub/docs/overview#names). The +// generated +// name is populated in the returned Snapshot object. Note that for REST +// API +// requests, you must specify a name in the request. +func (r *ProjectsSnapshotsService) Create(name string, createsnapshotrequest *CreateSnapshotRequest) *ProjectsSnapshotsCreateCall { + c := &ProjectsSnapshotsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.createsnapshotrequest = createsnapshotrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSnapshotsCreateCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSnapshotsCreateCall) Context(ctx context.Context) *ProjectsSnapshotsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSnapshotsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSnapshotsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createsnapshotrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.snapshots.create" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSnapshotsCreateCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Snapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot from the requested subscription.\nIf the snapshot already exists, returns `ALREADY_EXISTS`.\nIf the requested subscription doesn't exist, returns `NOT_FOUND`.\nIf the backlog in the subscription is too old -- and the resulting snapshot\nwould expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned.\nSee also the `Snapshot.expire_time` field.\n\nIf the name is not provided in the request, the server will assign a random\nname for this snapshot on the same project as the subscription, conforming\nto the\n[resource name\nformat](https://cloud.google.com/pubsub/docs/overview#names). The generated\nname is populated in the returned Snapshot object. Note that for REST API\nrequests, you must specify a name in the request.", + // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + // "httpMethod": "PUT", + // "id": "pubsub.projects.snapshots.create", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Optional user-provided name for this snapshot.\nIf the name is not provided in the request, the server will assign a random\nname for this snapshot on the same project as the subscription.\nNote that for REST API requests, you must specify a name.\nFormat is `projects/{project}/snapshots/{snap}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/snapshots/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "CreateSnapshotRequest" + // }, + // "response": { + // "$ref": "Snapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// method id "pubsub.projects.snapshots.delete": + +type ProjectsSnapshotsDeleteCall struct { + s *Service + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes an existing snapshot. All messages retained in the +// snapshot +// are immediately dropped. After a snapshot is deleted, a new one may +// be +// created with the same name, but the new one has no association with +// the old +// snapshot or its subscription, unless the same subscription is +// specified. +func (r *ProjectsSnapshotsService) Delete(snapshot string) *ProjectsSnapshotsDeleteCall { + c := &ProjectsSnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSnapshotsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSnapshotsDeleteCall) Context(ctx context.Context) *ProjectsSnapshotsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+snapshot}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "snapshot": c.snapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.snapshots.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes an existing snapshot. All messages retained in the snapshot\nare immediately dropped. After a snapshot is deleted, a new one may be\ncreated with the same name, but the new one has no association with the old\nsnapshot or its subscription, unless the same subscription is specified.", + // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + // "httpMethod": "DELETE", + // "id": "pubsub.projects.snapshots.delete", + // "parameterOrder": [ + // "snapshot" + // ], + // "parameters": { + // "snapshot": { + // "description": "The name of the snapshot to delete.\nFormat is `projects/{project}/snapshots/{snap}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/snapshots/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+snapshot}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + // method id "pubsub.projects.snapshots.getIamPolicy": type ProjectsSnapshotsGetIamPolicyCall struct { @@ -1166,7 +1786,7 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1199,6 +1819,333 @@ func (c *ProjectsSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } +// method id "pubsub.projects.snapshots.list": + +type ProjectsSnapshotsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the existing snapshots. +func (r *ProjectsSnapshotsService) List(project string) *ProjectsSnapshotsListCall { + c := &ProjectsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// snapshots to return. +func (c *ProjectsSnapshotsListCall) PageSize(pageSize int64) *ProjectsSnapshotsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned +// by the last `ListSnapshotsResponse`; indicates that this +// is a continuation of a prior `ListSnapshots` call, and that the +// system +// should return the next page of data. +func (c *ProjectsSnapshotsListCall) PageToken(pageToken string) *ProjectsSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSnapshotsListCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsSnapshotsListCall) IfNoneMatch(entityTag string) *ProjectsSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSnapshotsListCall) Context(ctx context.Context) *ProjectsSnapshotsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/snapshots") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.snapshots.list" call. +// Exactly one of *ListSnapshotsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListSnapshotsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnapshotsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListSnapshotsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the existing snapshots.", + // "flatPath": "v1/projects/{projectsId}/snapshots", + // "httpMethod": "GET", + // "id": "pubsub.projects.snapshots.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "pageSize": { + // "description": "Maximum number of snapshots to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this\nis a continuation of a prior `ListSnapshots` call, and that the system\nshould return the next page of data.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The name of the cloud project that snapshots belong to.\nFormat is `projects/{project}`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+project}/snapshots", + // "response": { + // "$ref": "ListSnapshotsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsSnapshotsListCall) Pages(ctx context.Context, f func(*ListSnapshotsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "pubsub.projects.snapshots.patch": + +type ProjectsSnapshotsPatchCall struct { + s *Service + name string + updatesnapshotrequest *UpdateSnapshotRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing snapshot. Note that certain properties of +// a +// snapshot are not modifiable. +func (r *ProjectsSnapshotsService) Patch(name string, updatesnapshotrequest *UpdateSnapshotRequest) *ProjectsSnapshotsPatchCall { + c := &ProjectsSnapshotsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.updatesnapshotrequest = updatesnapshotrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSnapshotsPatchCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSnapshotsPatchCall) Context(ctx context.Context) *ProjectsSnapshotsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSnapshotsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSnapshotsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatesnapshotrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.snapshots.patch" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSnapshotsPatchCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Snapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing snapshot. Note that certain properties of a\nsnapshot are not modifiable.", + // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", + // "httpMethod": "PATCH", + // "id": "pubsub.projects.snapshots.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the snapshot.", + // "location": "path", + // "pattern": "^projects/[^/]+/snapshots/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "UpdateSnapshotRequest" + // }, + // "response": { + // "$ref": "Snapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + // method id "pubsub.projects.snapshots.setIamPolicy": type ProjectsSnapshotsSetIamPolicyCall struct { @@ -1301,7 +2248,7 @@ func (c *ProjectsSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1447,7 +2394,7 @@ func (c *ProjectsSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1593,7 +2540,7 @@ func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1743,7 +2690,7 @@ func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Sub }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -1880,7 +2827,7 @@ func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Emp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2020,7 +2967,7 @@ func (c *ProjectsSubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscr }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2163,7 +3110,7 @@ func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2320,7 +3267,7 @@ func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListS }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2493,7 +3440,7 @@ func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2639,7 +3586,7 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOpt }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2675,6 +3622,144 @@ func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOpt } +// method id "pubsub.projects.subscriptions.patch": + +type ProjectsSubscriptionsPatchCall struct { + s *Service + name string + updatesubscriptionrequest *UpdateSubscriptionRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing subscription. Note that certain properties +// of a +// subscription, such as its topic, are not modifiable. +func (r *ProjectsSubscriptionsService) Patch(name string, updatesubscriptionrequest *UpdateSubscriptionRequest) *ProjectsSubscriptionsPatchCall { + c := &ProjectsSubscriptionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.updatesubscriptionrequest = updatesubscriptionrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsPatchCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsPatchCall) Context(ctx context.Context) *ProjectsSubscriptionsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSubscriptionsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSubscriptionsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatesubscriptionrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.subscriptions.patch" call. +// Exactly one of *Subscription or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Subscription.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsPatchCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Subscription{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing subscription. Note that certain properties of a\nsubscription, such as its topic, are not modifiable.", + // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", + // "httpMethod": "PATCH", + // "id": "pubsub.projects.subscriptions.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", + // "location": "path", + // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "UpdateSubscriptionRequest" + // }, + // "response": { + // "$ref": "Subscription" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + // method id "pubsub.projects.subscriptions.pull": type ProjectsSubscriptionsPullCall struct { @@ -2781,7 +3866,7 @@ func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullR }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -2817,6 +3902,144 @@ func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullR } +// method id "pubsub.projects.subscriptions.seek": + +type ProjectsSubscriptionsSeekCall struct { + s *Service + subscription string + seekrequest *SeekRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Seek: Seeks an existing subscription to a point in time or to a given +// snapshot, +// whichever is provided in the request. +func (r *ProjectsSubscriptionsService) Seek(subscription string, seekrequest *SeekRequest) *ProjectsSubscriptionsSeekCall { + c := &ProjectsSubscriptionsSeekCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.subscription = subscription + c.seekrequest = seekrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSubscriptionsSeekCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsSeekCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSubscriptionsSeekCall) Context(ctx context.Context) *ProjectsSubscriptionsSeekCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSubscriptionsSeekCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSubscriptionsSeekCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.seekrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:seek") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "subscription": c.subscription, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.subscriptions.seek" call. +// Exactly one of *SeekResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SeekResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSubscriptionsSeekCall) Do(opts ...googleapi.CallOption) (*SeekResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SeekResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Seeks an existing subscription to a point in time or to a given snapshot,\nwhichever is provided in the request.", + // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:seek", + // "httpMethod": "POST", + // "id": "pubsub.projects.subscriptions.seek", + // "parameterOrder": [ + // "subscription" + // ], + // "parameters": { + // "subscription": { + // "description": "The subscription to affect.", + // "location": "path", + // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+subscription}:seek", + // "request": { + // "$ref": "SeekRequest" + // }, + // "response": { + // "$ref": "SeekResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + // method id "pubsub.projects.subscriptions.setIamPolicy": type ProjectsSubscriptionsSetIamPolicyCall struct { @@ -2919,7 +4142,7 @@ func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3065,7 +4288,7 @@ func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallO }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3201,7 +4424,7 @@ func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3338,7 +4561,7 @@ func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, err }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3478,7 +4701,7 @@ func (c *ProjectsTopicsGetCall) Do(opts ...googleapi.CallOption) (*Topic, error) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3621,7 +4844,7 @@ func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3778,7 +5001,7 @@ func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -3947,7 +5170,7 @@ func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishRe }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4085,7 +5308,7 @@ func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4231,7 +5454,7 @@ func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil @@ -4267,6 +5490,195 @@ func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } +// method id "pubsub.projects.topics.snapshots.list": + +type ProjectsTopicsSnapshotsListCall struct { + s *Service + topic string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the names of the snapshots on this topic. +func (r *ProjectsTopicsSnapshotsService) List(topic string) *ProjectsTopicsSnapshotsListCall { + c := &ProjectsTopicsSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.topic = topic + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// snapshot names to return. +func (c *ProjectsTopicsSnapshotsListCall) PageSize(pageSize int64) *ProjectsTopicsSnapshotsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The value returned +// by the last `ListTopicSnapshotsResponse`; indicates +// that this is a continuation of a prior `ListTopicSnapshots` call, +// and +// that the system should return the next page of data. +func (c *ProjectsTopicsSnapshotsListCall) PageToken(pageToken string) *ProjectsTopicsSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsTopicsSnapshotsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsTopicsSnapshotsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsTopicsSnapshotsListCall) Context(ctx context.Context) *ProjectsTopicsSnapshotsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsTopicsSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsTopicsSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}/snapshots") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "topic": c.topic, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "pubsub.projects.topics.snapshots.list" call. +// Exactly one of *ListTopicSnapshotsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListTopicSnapshotsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsTopicsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListTopicSnapshotsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTopicSnapshotsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the names of the snapshots on this topic.", + // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/snapshots", + // "httpMethod": "GET", + // "id": "pubsub.projects.topics.snapshots.list", + // "parameterOrder": [ + // "topic" + // ], + // "parameters": { + // "pageSize": { + // "description": "Maximum number of snapshot names to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSnapshots` call, and\nthat the system should return the next page of data.", + // "location": "query", + // "type": "string" + // }, + // "topic": { + // "description": "The name of the topic that snapshots are attached to.\nFormat is `projects/{project}/topics/{topic}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/topics/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+topic}/snapshots", + // "response": { + // "$ref": "ListTopicSnapshotsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/pubsub" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsTopicsSnapshotsListCall) Pages(ctx context.Context, f func(*ListTopicSnapshotsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "pubsub.projects.topics.subscriptions.list": type ProjectsTopicsSubscriptionsListCall struct { @@ -4278,7 +5690,7 @@ type ProjectsTopicsSubscriptionsListCall struct { header_ http.Header } -// List: Lists the name of the subscriptions for this topic. +// List: Lists the names of the subscriptions on this topic. func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { c := &ProjectsTopicsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.topic = topic @@ -4391,12 +5803,12 @@ func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { + if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { - // "description": "Lists the name of the subscriptions for this topic.", + // "description": "Lists the names of the subscriptions on this topic.", // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", // "httpMethod": "GET", // "id": "pubsub.projects.topics.subscriptions.list", From 3909dc134172ee49f4ad92a0d9228d01df9de0f5 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Tue, 19 Dec 2017 16:18:19 -0800 Subject: [PATCH 475/794] Avoid array growth in FilteredList. The method (*schedulerCache).FilteredList builds an array of *v1.Pod that contains every pod in the cluster except for those filtered out by a predicate. Today, it starts with a nil slice and appends to it. Based on current usage, FilteredList is expected to return every pod in the cluster or omit some pods from a single node. This change reserves array capacity equal to the total number of pods in the cluster. --- plugin/pkg/scheduler/schedulercache/cache.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/schedulercache/cache.go b/plugin/pkg/scheduler/schedulercache/cache.go index 7e409903331..f891707d505 100644 --- a/plugin/pkg/scheduler/schedulercache/cache.go +++ b/plugin/pkg/scheduler/schedulercache/cache.go @@ -104,7 +104,14 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) { cache.mu.Lock() defer cache.mu.Unlock() - var pods []*v1.Pod + // podFilter is expected to return true for most or all of the pods. We + // can avoid expensive array growth without wasting too much memory by + // pre-allocating capacity. + maxSize := 0 + for _, info := range cache.nodes { + maxSize += len(info.pods) + } + pods := make([]*v1.Pod, 0, maxSize) for _, info := range cache.nodes { for _, pod := range info.pods { if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { From 4df4c8bdd2207104c4596b789186ac4f29fde6aa Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Thu, 21 Dec 2017 09:42:54 -0800 Subject: [PATCH 476/794] Allow integration test timeout override. This allows the test timeout to be overridden at the command line for integration tests. The default behavior is unchanged. e.g. make test-integration WHAT="./test/integration/scheduler" \ KUBE_TEST_ARGS="-run=. -count=10" \ KUBE_TIMEOUT="-timeout 1h" --- hack/make-rules/test-integration.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hack/make-rules/test-integration.sh b/hack/make-rules/test-integration.sh index c180f45e89e..b545e2574c2 100755 --- a/hack/make-rules/test-integration.sh +++ b/hack/make-rules/test-integration.sh @@ -30,10 +30,8 @@ source "${KUBE_ROOT}/hack/lib/init.sh" ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$) KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}" -# Give integration tests longer to run -# TODO: allow a larger value to be passed in -#KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s} -KUBE_TIMEOUT="-timeout 600s" +# Give integration tests longer to run by default. +KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 600s} KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"} LOG_LEVEL=${LOG_LEVEL:-2} KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-} From df60789a7e2ff02eb042f01727c92a5fb0291605 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 21 Dec 2017 02:56:09 -0500 Subject: [PATCH 477/794] Requeue unobserved nodes in attemptToDelete --- .../garbagecollector/garbagecollector.go | 40 +++++++------------ pkg/controller/garbagecollector/graph.go | 14 +++++++ .../garbagecollector/graph_builder.go | 20 +++++++++- 3 files changed, 46 insertions(+), 28 deletions(-) diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 87f98d09357..957775177ae 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -38,7 +38,6 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" _ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration // install the prometheus plugin _ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" @@ -259,24 +258,16 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool { } // retry if garbage collection of an object failed. gc.attemptToDelete.AddRateLimited(item) + } else if !n.isObserved() { + // requeue if item hasn't been observed via an informer event yet. + // otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed. + // see https://issue.k8s.io/56121 + glog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity) + gc.attemptToDelete.AddRateLimited(item) } return true } -func objectReferenceToMetadataOnlyObject(ref objectReference) *metaonly.MetadataOnlyObject { - return &metaonly.MetadataOnlyObject{ - TypeMeta: metav1.TypeMeta{ - APIVersion: ref.APIVersion, - Kind: ref.Kind, - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: ref.Namespace, - UID: ref.UID, - Name: ref.Name, - }, - } -} - // isDangling check if a reference is pointing to an object that doesn't exist. // If isDangling looks up the referenced object at the API server, it also // returns its latest state. @@ -353,15 +344,6 @@ func (gc *GarbageCollector) classifyReferences(item *node, latestReferences []me return solid, dangling, waitingForDependentsDeletion, nil } -func (gc *GarbageCollector) generateVirtualDeleteEvent(identity objectReference) { - event := &event{ - eventType: deleteEvent, - obj: objectReferenceToMetadataOnlyObject(identity), - } - glog.V(5).Infof("generating virtual delete event for %s\n\n", event.obj) - gc.dependencyGraphBuilder.enqueueChanges(event) -} - func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID { var ret []types.UID for _, ref := range refs { @@ -387,7 +369,10 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // exist yet, so we need to enqueue a virtual Delete event to remove // the virtual node from GraphBuilder.uidToNode. glog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity) - gc.generateVirtualDeleteEvent(item.identity) + gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity) + // since we're manually inserting a delete event to remove this node, + // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete + item.markObserved() return nil case err != nil: return err @@ -395,7 +380,10 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { if latest.GetUID() != item.identity.UID { glog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity) - gc.generateVirtualDeleteEvent(item.identity) + gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity) + // since we're manually inserting a delete event to remove this node, + // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete + item.markObserved() return nil } diff --git a/pkg/controller/garbagecollector/graph.go b/pkg/controller/garbagecollector/graph.go index 282256cbdba..7282d51b6f6 100644 --- a/pkg/controller/garbagecollector/graph.go +++ b/pkg/controller/garbagecollector/graph.go @@ -53,6 +53,9 @@ type node struct { // this records if the object's deletionTimestamp is non-nil. beingDeleted bool beingDeletedLock sync.RWMutex + // this records if the object was constructed virtually and never observed via informer event + virtual bool + virtualLock sync.RWMutex // when processing an Update event, we need to compare the updated // ownerReferences with the owners recorded in the graph. owners []metav1.OwnerReference @@ -72,6 +75,17 @@ func (n *node) isBeingDeleted() bool { return n.beingDeleted } +func (n *node) markObserved() { + n.virtualLock.Lock() + defer n.virtualLock.Unlock() + n.virtual = false +} +func (n *node) isObserved() bool { + n.virtualLock.RLock() + defer n.virtualLock.RUnlock() + return n.virtual == false +} + func (n *node) markDeletingDependents() { n.deletingDependentsLock.Lock() defer n.deletingDependentsLock.Unlock() diff --git a/pkg/controller/garbagecollector/graph_builder.go b/pkg/controller/garbagecollector/graph_builder.go index 355d3dea5fa..56afc5f5ce9 100644 --- a/pkg/controller/garbagecollector/graph_builder.go +++ b/pkg/controller/garbagecollector/graph_builder.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" ) type eventType int @@ -369,8 +370,16 @@ func DefaultIgnoredResources() map[schema.GroupResource]struct{} { return ignoredResources } -func (gb *GraphBuilder) enqueueChanges(e *event) { - gb.graphChanges.Add(e) +// enqueueVirtualDeleteEvent is used to add a virtual delete event to be processed for virtual nodes +// once it is determined they do not have backing objects in storage +func (gb *GraphBuilder) enqueueVirtualDeleteEvent(ref objectReference) { + gb.graphChanges.Add(&event{ + eventType: deleteEvent, + obj: &metaonly.MetadataOnlyObject{ + TypeMeta: metav1.TypeMeta{APIVersion: ref.APIVersion, Kind: ref.Kind}, + ObjectMeta: metav1.ObjectMeta{Namespace: ref.Namespace, UID: ref.UID, Name: ref.Name}, + }, + }) } // addDependentToOwners adds n to owners' dependents list. If the owner does not @@ -389,6 +398,7 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer Namespace: n.identity.Namespace, }, dependents: make(map[*node]struct{}), + virtual: true, } glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity) gb.uidToNode.Write(ownerNode) @@ -591,6 +601,12 @@ func (gb *GraphBuilder) processGraphChanges() bool { glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) // Check if the node already exsits existingNode, found := gb.uidToNode.Read(accessor.GetUID()) + if found { + // this marks the node as having been observed via an informer event + // 1. this depends on graphChanges only containing add/update events from the actual informer + // 2. this allows things tracking virtual nodes' existence to stop polling and rely on informer events + existingNode.markObserved() + } switch { case (event.eventType == addEvent || event.eventType == updateEvent) && !found: newNode := &node{ From d0f262444f1acec9ea21d61808be041401228270 Mon Sep 17 00:00:00 2001 From: jennybuckley Date: Thu, 21 Dec 2017 11:10:05 -0800 Subject: [PATCH 478/794] add podtolerationrestriction config to scheme --- plugin/pkg/admission/podtolerationrestriction/admission.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 14ff93f221e..88345a2fbd8 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" + pluginapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -49,6 +50,9 @@ func Register(plugins *admission.Plugins) { } return NewPodTolerationsPlugin(pluginConfig), nil }) + // add our config types + pluginapi.AddToScheme(plugins.ConfigScheme) + pluginapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // The annotation keys for default and whitelist of tolerations From 6ee191ab743d5f01cc4a135c183d5cfad98f20ff Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Wed, 20 Dec 2017 13:01:52 -0800 Subject: [PATCH 479/794] Refactor kubelet config controller bootstrap process This makes the bootstrap feel much more linear and as a result it is easier to read. Also simplifies status reporting for local config. --- pkg/kubelet/kubeletconfig/BUILD | 1 - pkg/kubelet/kubeletconfig/configsync.go | 2 +- pkg/kubelet/kubeletconfig/controller.go | 237 ++++++++++++------- pkg/kubelet/kubeletconfig/rollback.go | 70 ------ pkg/kubelet/kubeletconfig/status/status.go | 37 ++- test/e2e_node/dynamic_kubelet_config_test.go | 19 +- 6 files changed, 176 insertions(+), 190 deletions(-) delete mode 100644 pkg/kubelet/kubeletconfig/rollback.go diff --git a/pkg/kubelet/kubeletconfig/BUILD b/pkg/kubelet/kubeletconfig/BUILD index ba381a6fed7..d5ea255591a 100644 --- a/pkg/kubelet/kubeletconfig/BUILD +++ b/pkg/kubelet/kubeletconfig/BUILD @@ -10,7 +10,6 @@ go_library( srcs = [ "configsync.go", "controller.go", - "rollback.go", "watch.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index 66f9eb9160e..ce6b7e38bc3 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -168,7 +168,7 @@ func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bo updated, err := cc.checkpointStore.SetCurrentUpdated(source) if err != nil { if source == nil { - return false, status.FailSyncReasonSetCurrentDefault, err + return false, status.FailSyncReasonSetCurrentLocal, err } return false, fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.UID()), err } diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index 5fe998b1960..9b959b4c94b 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -28,6 +28,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" + + "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" @@ -55,11 +57,8 @@ type Controller struct { // defaultConfig is the configuration to use if no initConfig is provided defaultConfig *kubeletconfig.KubeletConfiguration - // initConfig is the unmarshaled init config, this will be loaded by the Controller if an initConfigDir is provided - initConfig *kubeletconfig.KubeletConfiguration - - // initLoader is for loading the Kubelet's init configuration files from disk - initLoader configfiles.Loader + // fileLoader is for loading the Kubelet's local config files from disk + fileLoader configfiles.Loader // pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server pendingConfigSource chan bool @@ -85,9 +84,9 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, fs := utilfs.DefaultFs{} - var initLoader configfiles.Loader + var fileLoader configfiles.Loader if len(initConfigDir) > 0 { - initLoader, err = configfiles.NewFsLoader(fs, initConfigDir) + fileLoader, err = configfiles.NewFsLoader(fs, initConfigDir) if err != nil { return nil, err } @@ -104,7 +103,7 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, pendingConfigSource: make(chan bool, 1), configOK: status.NewConfigOKCondition(), checkpointStore: store.NewFsStore(fs, filepath.Join(dynamicConfigDir, checkpointsDir)), - initLoader: initLoader, + fileLoader: fileLoader, }, nil } @@ -113,88 +112,72 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { utillog.Infof("starting controller") - // ALWAYS validate the local (default and init) configs. This makes incorrectly provisioned nodes an error. - // These must be valid because they are the foundational last-known-good configs. - utillog.Infof("validating combination of defaults and flags") - if err := validation.ValidateKubeletConfiguration(cc.defaultConfig); err != nil { - return nil, fmt.Errorf("combination of defaults and flags failed validation, error: %v", err) - } - // only attempt to load and validate the init config if the user provided a path - if cc.initLoader != nil { - utillog.Infof("loading init config") - kc, err := cc.initLoader.Load() - if err != nil { - return nil, err - } - // validate the init config - utillog.Infof("validating init config") - if err := validation.ValidateKubeletConfiguration(kc); err != nil { - return nil, fmt.Errorf("failed to validate the init config, error: %v", err) - } - cc.initConfig = kc - } - // Assert: the default and init configs are both valid + // Load and validate the local config (defaults + flags, file) + local, err := cc.loadLocalConfig() + if err != nil { + return nil, err + } // Assert: the default and init configs are both valid - // if dynamic config is disabled, skip trying to load any checkpoints because they won't exist + // if dynamic config is disabled, we just stop here if !cc.dynamicConfig { - return cc.localConfig(), nil - } + // NOTE(mtaufen): We still need to update the status. + // We expect to be able to disable dynamic config but still get a status update about the config. + // This is because the feature gate covers dynamic config AND config status reporting, while the + // --dynamic-config-dir flag just covers dynamic config. + cc.configOK.Set(status.NotDynamicLocalMessage, status.NotDynamicLocalReason, apiv1.ConditionTrue) + return local, nil + } // Assert: dynamic config is enabled - // assert: now we know that a dynamicConfigDir was provided, and we can rely on that existing - - // make sure the filesystem is set up properly + // ensure the filesystem is initialized if err := cc.initializeDynamicConfigDir(); err != nil { return nil, err } - // determine UID of the current config source - curUID := "" - if curSource, err := cc.checkpointStore.Current(); err != nil { - return nil, err - } else if curSource != nil { - curUID = curSource.UID() - } + assigned, curSource, reason, err := cc.loadAssignedConfig(local) + if err == nil { + // set the status to indicate we will use the assigned config + if curSource != nil { + cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.UID()), reason, apiv1.ConditionTrue) + } else { + cc.configOK.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue) + } - // if curUID indicates the local config should be used, return the correct one of those - if len(curUID) == 0 { - return cc.localConfig(), nil - } // Assert: we will not use the local configurations, unless we roll back to lkg; curUID is non-empty + // when the trial period is over, the assigned config becomes the last-known-good + if trial, err := cc.inTrial(assigned.ConfigTrialDuration.Duration); err != nil { + utillog.Errorf("failed to check trial period for assigned config, error: %v", err) + } else if !trial { + utillog.Infof("assigned config passed trial period, will set as last-known-good") + if err := cc.graduateAssignedToLastKnownGood(); err != nil { + utillog.Errorf("failed to set last-known-good to assigned config, error: %v", err) + } + } - // TODO(mtaufen): consider re-verifying integrity and re-attempting download when a load/verify/parse/validate + return assigned, nil + } // Assert: the assigned config failed to load, parse, or validate + + // TODO(mtaufen): consider re-attempting download when a load/verify/parse/validate // error happens outside trial period, we already made it past the trial so it's probably filesystem corruption // or something else scary (unless someone is using a 0-length trial period) + // load from checkpoint - // load the current config - checkpoint, err := cc.checkpointStore.Load(curUID) + // log the reason and error details for the failure to load the assigned config + utillog.Errorf(fmt.Sprintf("%s, error: %v", reason, err)) + + // load the last-known-good config + lkg, lkgSource, err := cc.loadLastKnownGoodConfig(local) if err != nil { - // TODO(mtaufen): rollback for now, but this could reasonably be handled by re-attempting a download, - // it probably indicates some sort of corruption - return cc.lkgRollback(fmt.Sprintf(status.CurFailLoadReasonFmt, curUID), fmt.Sprintf("error: %v", err)) - } - - // parse the checkpoint into a KubeletConfiguration - cur, err := checkpoint.Parse() - if err != nil { - return cc.lkgRollback(fmt.Sprintf(status.CurFailParseReasonFmt, curUID), fmt.Sprintf("error: %v", err)) - } - - // validate current config - if err := validation.ValidateKubeletConfiguration(cur); err != nil { - return cc.lkgRollback(fmt.Sprintf(status.CurFailValidateReasonFmt, curUID), fmt.Sprintf("error: %v", err)) - } - - // when the trial period is over, the current config becomes the last-known-good - if trial, err := cc.inTrial(cur.ConfigTrialDuration.Duration); err != nil { return nil, err - } else if !trial { - if err := cc.graduateCurrentToLastKnownGood(); err != nil { - return nil, err - } } - // update the status to note that we will use the current config - cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curUID), status.CurRemoteOKReason, apiv1.ConditionTrue) - return cur, nil + // set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config + if lkgSource != nil { + cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.UID()), reason, apiv1.ConditionFalse) + } else { + cc.configOK.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse) + } + + // return the last-known-good config + return lkg, nil } // StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty. @@ -243,6 +226,93 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E } } +// loadLocalConfig returns the local config: either the defaults provided to the controller or +// a local config file, if the Kubelet is configured to use the local file +func (cc *Controller) loadLocalConfig() (*kubeletconfig.KubeletConfiguration, error) { + // ALWAYS validate the local configs. This makes incorrectly provisioned nodes an error. + // These must be valid because they are the default last-known-good configs. + utillog.Infof("validating combination of defaults and flags") + if err := validation.ValidateKubeletConfiguration(cc.defaultConfig); err != nil { + return nil, fmt.Errorf("combination of defaults and flags failed validation, error: %v", err) + } + // only attempt to load and validate the Kubelet config file if the user provided a path + if cc.fileLoader != nil { + utillog.Infof("loading Kubelet config file") + kc, err := cc.fileLoader.Load() + if err != nil { + return nil, err + } + // validate the Kubelet config file config + utillog.Infof("validating Kubelet config file") + if err := validation.ValidateKubeletConfiguration(kc); err != nil { + return nil, fmt.Errorf("failed to validate the Kubelet config file, error: %v", err) + } + return kc, nil + } + // if no Kubelet config file config, just return the default + return cc.defaultConfig, nil +} + +// loadAssignedConfig loads the Kubelet's currently assigned config, +// based on the setting in the local checkpoint store. +// It returns the loaded configuration, the checkpoint store's config source record, +// a clean success or failure reason that can be reported in the status, and any error that occurs. +// If the local config should be used, it will be returned. You should validate local before passing it to this function. +func (cc *Controller) loadAssignedConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, string, error) { + src, err := cc.checkpointStore.Current() + if err != nil { + return nil, nil, fmt.Sprintf(status.CurFailLoadReasonFmt, "unknown"), err + } + // nil source is the signal to use the local config + if src == nil { + return local, src, status.CurLocalOkayReason, nil + } + curUID := src.UID() + // load from checkpoint + checkpoint, err := cc.checkpointStore.Load(curUID) + if err != nil { + return nil, src, fmt.Sprintf(status.CurFailLoadReasonFmt, curUID), err + } + cur, err := checkpoint.Parse() + if err != nil { + return nil, src, fmt.Sprintf(status.CurFailParseReasonFmt, curUID), err + } + if err := validation.ValidateKubeletConfiguration(cur); err != nil { + return nil, src, fmt.Sprintf(status.CurFailValidateReasonFmt, curUID), err + } + return cur, src, status.CurRemoteOkayReason, nil +} + +// loadLastKnownGoodConfig loads the Kubelet's last-known-good config, +// based on the setting in the local checkpoint store. +// It returns the loaded configuration, the checkpoint store's config source record, +// and any error that occurs. +// If the local config should be used, it will be returned. You should validate local before passing it to this function. +func (cc *Controller) loadLastKnownGoodConfig(local *kubeletconfig.KubeletConfiguration) (*kubeletconfig.KubeletConfiguration, checkpoint.RemoteConfigSource, error) { + src, err := cc.checkpointStore.LastKnownGood() + if err != nil { + return nil, nil, fmt.Errorf("unable to determine last-known-good config, error: %v", err) + } + // nil source is the signal to use the local config + if src == nil { + return local, src, nil + } + lkgUID := src.UID() + // load from checkpoint + checkpoint, err := cc.checkpointStore.Load(lkgUID) + if err != nil { + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, lkgUID), err) + } + lkg, err := checkpoint.Parse() + if err != nil { + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailParseReasonFmt, lkgUID), err) + } + if err := validation.ValidateKubeletConfiguration(lkg); err != nil { + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, lkgUID), err) + } + return lkg, src, nil +} + // initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly func (cc *Controller) initializeDynamicConfigDir() error { utillog.Infof("ensuring filesystem is set up correctly") @@ -250,17 +320,6 @@ func (cc *Controller) initializeDynamicConfigDir() error { return cc.checkpointStore.Initialize() } -// localConfig returns the initConfig if it is loaded, otherwise returns the defaultConfig. -// It also sets the local configOK condition to match the returned config. -func (cc *Controller) localConfig() *kubeletconfig.KubeletConfiguration { - if cc.initConfig != nil { - cc.configOK.Set(status.CurInitMessage, status.CurInitOKReason, apiv1.ConditionTrue) - return cc.initConfig - } - cc.configOK.Set(status.CurDefaultMessage, status.CurDefaultOKReason, apiv1.ConditionTrue) - return cc.defaultConfig -} - // inTrial returns true if the time elapsed since the last modification of the current config does not exceed `trialDur`, false otherwise func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { now := time.Now() @@ -274,16 +333,16 @@ func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { return false, nil } -// graduateCurrentToLastKnownGood sets the last-known-good UID on the checkpointStore +// graduateAssignedToLastKnownGood sets the last-known-good UID on the checkpointStore // to the same value as the current UID maintained by the checkpointStore -func (cc *Controller) graduateCurrentToLastKnownGood() error { +func (cc *Controller) graduateAssignedToLastKnownGood() error { curUID, err := cc.checkpointStore.Current() if err != nil { - return fmt.Errorf("could not graduate last-known-good config to current config, error: %v", err) + return err } err = cc.checkpointStore.SetLastKnownGood(curUID) if err != nil { - return fmt.Errorf("could not graduate last-known-good config to current config, error: %v", err) + return err } return nil } diff --git a/pkg/kubelet/kubeletconfig/rollback.go b/pkg/kubelet/kubeletconfig/rollback.go deleted file mode 100644 index a2789566345..00000000000 --- a/pkg/kubelet/kubeletconfig/rollback.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeletconfig - -import ( - "fmt" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" - "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" - utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" -) - -// lkgRollback returns a valid last-known-good configuration, and updates the `cc.configOK` condition -// regarding the `reason` for the rollback, or returns an error if a valid last-known-good could not be produced -func (cc *Controller) lkgRollback(reason, detail string) (*kubeletconfig.KubeletConfiguration, error) { - utillog.Errorf(fmt.Sprintf("%s, %s", reason, detail)) - - lkgUID := "" - if lkgSource, err := cc.checkpointStore.LastKnownGood(); err != nil { - return nil, fmt.Errorf("unable to determine last-known-good config, error: %v", err) - } else if lkgSource != nil { - lkgUID = lkgSource.UID() - } - - // if lkgUID indicates the default should be used, return initConfig or defaultConfig - if len(lkgUID) == 0 { - if cc.initConfig != nil { - cc.configOK.Set(status.LkgInitMessage, reason, apiv1.ConditionFalse) - return cc.initConfig, nil - } - cc.configOK.Set(status.LkgDefaultMessage, reason, apiv1.ConditionFalse) - return cc.defaultConfig, nil - } - - // load - checkpoint, err := cc.checkpointStore.Load(lkgUID) - if err != nil { - return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, lkgUID), err) - } - - // parse - lkg, err := checkpoint.Parse() - if err != nil { - return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailParseReasonFmt, lkgUID), err) - } - - // validate - if err := validation.ValidateKubeletConfiguration(lkg); err != nil { - return nil, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, lkgUID), err) - } - - cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgUID), reason, apiv1.ConditionFalse) - return lkg, nil -} diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index b83e6891d78..94598cae6eb 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -32,28 +32,27 @@ import ( utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" ) +// TODO(mtaufen): s/current/assigned, as this is more accurate e.g. if you are using lkg, you aren't currently using "current" :) const ( - // CurDefaultMessage indicates the Kubelet is using it's current config, which is the default - CurDefaultMessage = "using current (default)" - // LkgDefaultMessage indicates the Kubelet is using it's last-known-good config, which is the default - LkgDefaultMessage = "using last-known-good (default)" + // NotDynamicLocalMessage indicates that the Kubelet is using its local config - we send this when dynamic Kubelet config is disabled by omitting the --dynamic-config-dir flag + NotDynamicLocalMessage = "using local config" + // NotDynamicLocalReason indicates that the Kubelet is using its local config - we send this when dynamic Kubelet config is disabled by omitting the --dynamic-config-dir flag + NotDynamicLocalReason = "dynamic config is currently disabled by omission of --dynamic-config-dir Kubelet flag" - // CurInitMessage indicates the Kubelet is using it's current config, which is from the init config files - CurInitMessage = "using current (init)" - // LkgInitMessage indicates the Kubelet is using it's last-known-good config, which is from the init config files - LkgInitMessage = "using last-known-good (init)" + // CurLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files + CurLocalMessage = "using current (local)" + // LkgLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files + LkgLocalMessage = "using last-known-good (local)" - // CurRemoteMessageFmt indicates the Kubelet is usin it's current config, which is from an API source + // CurRemoteMessageFmt indicates the Kubelet is using its current config, which is from an API source CurRemoteMessageFmt = "using current (UID: %q)" - // LkgRemoteMessageFmt indicates the Kubelet is using it's last-known-good config, which is from an API source + // LkgRemoteMessageFmt indicates the Kubelet is using its last-known-good config, which is from an API source LkgRemoteMessageFmt = "using last-known-good (UID: %q)" - // CurDefaultOKReason indicates that no init config files were provided - CurDefaultOKReason = "current is set to the local default, and no init config was provided" - // CurInitOKReason indicates that init config files were provided - CurInitOKReason = "current is set to the local default, and an init config was provided" - // CurRemoteOKReason indicates that the config referenced by Node.ConfigSource is currently passing all checks - CurRemoteOKReason = "passing all checks" + // CurLocalOkayReason indicates that the Kubelet is using its local config + CurLocalOkayReason = "when the config source is nil, the Kubelet uses its local config" + // CurRemoteOkayReason indicates that the config referenced by Node.ConfigSource is currently passing all checks + CurRemoteOkayReason = "passing all checks" // CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source CurFailLoadReasonFmt = "failed to load current (UID: %q)" @@ -61,8 +60,6 @@ const ( CurFailParseReasonFmt = "failed to parse current (UID: %q)" // CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source CurFailValidateReasonFmt = "failed to validate current (UID: %q)" - // CurFailCrashLoopReasonFmt indicates that the Kubelet experienced a crash loop while using the current config checkpoint for an API source - CurFailCrashLoopReasonFmt = "current failed trial period due to crash loop (UID: %q)" // LkgFail*ReasonFmt reasons are currently used to print errors in the Kubelet log, but do not appear in Node.Status.Conditions @@ -87,13 +84,13 @@ const ( // FailSyncReasonInformer is used when the informer fails to report the Node object FailSyncReasonInformer = "failed to read Node from informer object cache" // FailSyncReasonReset is used when we can't reset the local configuration references, e.g. due to filesystem issues - FailSyncReasonReset = "failed to reset to local (default or init) config" + FailSyncReasonReset = "failed to reset to local config" // FailSyncReasonCheckpointExistenceFmt is used when we can't determine if a checkpoint already exists, e.g. due to filesystem issues FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object with UID %q was already checkpointed" // FailSyncReasonSaveCheckpointFmt is used when we can't save a checkpoint, e.g. due to filesystem issues FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object with UID %q" // FailSyncReasonSetCurrentDefault is used when we can't set the current config checkpoint to the local default, e.g. due to filesystem issues - FailSyncReasonSetCurrentDefault = "failed to set current config checkpoint to default" + FailSyncReasonSetCurrentLocal = "failed to set current config checkpoint to local config" // FailSyncReasonSetCurrentUIDFmt is used when we can't set the current config checkpoint to a checkpointed object, e.g. due to filesystem issues FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object with UID %q" diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index a0e9da80857..2ab9f7d8195 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: originalConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID), - Reason: status.CurRemoteOKReason}, + Reason: status.CurRemoteOkayReason}, expectConfig: originalKC, }, false) }) @@ -126,8 +126,8 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube {desc: "Node.Spec.ConfigSource is nil", configSource: nil, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: status.CurDefaultMessage, - Reason: status.CurDefaultOKReason}, + Message: status.CurLocalMessage, + Reason: status.CurLocalOkayReason}, expectConfig: nil, event: true, }, @@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: correctConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID), - Reason: status.CurRemoteOKReason}, + Reason: status.CurRemoteOkayReason}, expectConfig: correctKC, event: true, }, @@ -187,7 +187,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: failParseConfigMap.Namespace, Name: failParseConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, - Message: status.LkgDefaultMessage, + Message: status.LkgLocalMessage, Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)}, expectConfig: nil, event: true, @@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: failValidateConfigMap.Namespace, Name: failValidateConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, - Message: status.LkgDefaultMessage, + Message: status.LkgLocalMessage, Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)}, expectConfig: nil, event: true, @@ -245,7 +245,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: lkgConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID), - Reason: status.CurRemoteOKReason}, + Reason: status.CurRemoteOkayReason}, expectConfig: lkgKC, event: true, }, @@ -295,10 +295,11 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: cm1.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID), - Reason: status.CurRemoteOKReason}, + Reason: status.CurRemoteOkayReason}, expectConfig: kc1, event: true, }, + {desc: "cm2", configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{ UID: cm2.UID, @@ -306,7 +307,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: cm2.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID), - Reason: status.CurRemoteOKReason}, + Reason: status.CurRemoteOkayReason}, expectConfig: kc2, event: true, }, From 1b814c43ad105194742ad2add4e07045f90b15f2 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Thu, 21 Dec 2017 18:22:43 -0500 Subject: [PATCH 480/794] Changing ingress from manually scaling rc to a daemon set. --- .../reactive/kubernetes_worker.py | 84 ++++++++++++++----- ...ontroller.yaml => ingress-daemon-set.yaml} | 41 ++++----- 2 files changed, 83 insertions(+), 42 deletions(-) rename cluster/juju/layers/kubernetes-worker/templates/{ingress-replication-controller.yaml => ingress-daemon-set.yaml} (74%) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 3ff17998c83..917e4291406 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -20,6 +20,7 @@ import random import shutil import subprocess import time +import json from shlex import split from subprocess import check_call, check_output @@ -63,6 +64,10 @@ def upgrade_charm(): cleanup_pre_snap_services() check_resources_for_upgrade_needed() + # Remove the RC for nginx ingress if it exists + if hookenv.config().get('ingress'): + kubectl_success('delete', 'rc', 'nginx-ingress-controller') + # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') @@ -373,7 +378,7 @@ def sdn_changed(): @when('kubernetes-worker.config.created') @when_not('kubernetes-worker.ingress.available') def render_and_launch_ingress(): - ''' If configuration has ingress RC enabled, launch the ingress load + ''' If configuration has ingress daemon set enabled, launch the ingress load balancer and default http backend. Otherwise attempt deletion. ''' config = hookenv.config() # If ingress is enabled, launch the ingress controller @@ -384,23 +389,11 @@ def render_and_launch_ingress(): kubectl_manifest('delete', '/root/cdk/addons/default-http-backend.yaml') kubectl_manifest('delete', - '/root/cdk/addons/ingress-replication-controller.yaml') # noqa + '/root/cdk/addons/ingress-daemon-set.yaml') # noqa hookenv.close_port(80) hookenv.close_port(443) -@when('kubernetes-worker.ingress.available') -def scale_ingress_controller(): - ''' Scale the number of ingress controller replicas to match the number of - nodes. ''' - try: - output = kubectl('get', 'nodes', '-o', 'name') - count = len(output.splitlines()) - kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa - except CalledProcessError: - hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa - - @when('config.changed.labels', 'kubernetes-worker.config.created') def apply_node_labels(): ''' Parse the labels configuration option and apply the labels to the node. @@ -429,6 +422,10 @@ def apply_node_labels(): for label in user_labels: _apply_node_label(label, overwrite=True) + # Set label for application name + _apply_node_label('juju-application={}'.format(hookenv.service_name()), + overwrite=True) + @when_any('config.changed.kubelet-extra-args', 'config.changed.proxy-extra-args') @@ -653,15 +650,16 @@ def launch_default_ingress_controller(): hookenv.close_port(443) return - # Render the ingress replication controller manifest + # Render the ingress daemon set controller manifest context['ingress_image'] = \ "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" - manifest = addon_path.format('ingress-replication-controller.yaml') - render('ingress-replication-controller.yaml', manifest, context) - hookenv.log('Creating the ingress replication controller.') + context['juju_application'] = hookenv.service_name() + manifest = addon_path.format('ingress-daemon-set.yaml') + render('ingress-daemon-set.yaml', manifest, context) + hookenv.log('Creating the ingress daemon set.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: @@ -934,24 +932,66 @@ def _systemctl_is_active(application): return False +class GetNodeNameFailed(Exception): + pass + + +def get_node_name(): + # Get all the nodes in the cluster + cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path) + cmd = cmd.split() + deadline = time.time() + 60 + while time.time() < deadline: + try: + raw = check_output(cmd) + break + except CalledProcessError: + hookenv.log('Failed to get node name for node %s.' + ' Will retry.' % (gethostname())) + time.sleep(1) + else: + msg = 'Failed to get node name for node %s' % gethostname() + raise GetNodeNameFailed(msg) + + result = json.loads(raw.decode('utf-8')) + if 'items' in result: + for node in result['items']: + if 'status' not in node: + continue + if 'addresses' not in node['status']: + continue + + # find the hostname + for address in node['status']['addresses']: + if address['type'] == 'Hostname': + if address['address'] == gethostname(): + return node['metadata']['name'] + + # if we didn't match, just bail to the next node + break + return "" + + class ApplyNodeLabelFailed(Exception): pass def _apply_node_label(label, delete=False, overwrite=False): ''' Invoke kubectl to apply node label changes ''' + nodename = get_node_name() + if nodename == "": + msg = 'Unable to get node name for node {}'.format(gethostname()) + raise ApplyNodeLabelFailed(msg) - # k8s lowercases hostnames and uses them as node names - hostname = gethostname().lower() # TODO: Make this part of the kubectl calls instead of a special string cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}' if delete is True: label_key = label.split('=')[0] - cmd = cmd_base.format(kubeconfig_path, hostname, label_key) + cmd = cmd_base.format(kubeconfig_path, nodename, label_key) cmd = cmd + '-' else: - cmd = cmd_base.format(kubeconfig_path, hostname, label) + cmd = cmd_base.format(kubeconfig_path, nodename, label) if overwrite: cmd = '{} --overwrite'.format(cmd) cmd = cmd.split() diff --git a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml b/cluster/juju/layers/kubernetes-worker/templates/ingress-daemon-set.yaml similarity index 74% rename from cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml rename to cluster/juju/layers/kubernetes-worker/templates/ingress-daemon-set.yaml index 933b1e2c00b..1254f6d41db 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/ingress-daemon-set.yaml @@ -1,12 +1,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: nginx-ingress-serviceaccount + name: nginx-ingress-{{ juju_application }}-serviceaccount --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: - name: nginx-ingress-clusterrole + name: nginx-ingress-{{ juju_application }}-clusterrole rules: - apiGroups: - "" @@ -58,7 +58,7 @@ rules: apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: - name: nginx-ingress-role + name: nginx-ingress-{{ juju_application }}-role rules: - apiGroups: - "" @@ -100,57 +100,58 @@ rules: apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: - name: nginx-ingress-role-nisa-binding + name: nginx-ingress-role-nisa-{{ juju_application }}-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: nginx-ingress-role + name: nginx-ingress-{{ juju_application }}-role subjects: - kind: ServiceAccount - name: nginx-ingress-serviceaccount + name: nginx-ingress-{{ juju_application }}-serviceaccount --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: - name: nginx-ingress-clusterrole-nisa-binding + name: nginx-ingress-clusterrole-nisa-{{ juju_application }}-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: nginx-ingress-clusterrole + name: nginx-ingress-{{ juju_application }}-clusterrole subjects: - kind: ServiceAccount - name: nginx-ingress-serviceaccount + name: nginx-ingress-{{ juju_application }}-serviceaccount namespace: default --- apiVersion: v1 kind: ConfigMap metadata: - name: nginx-load-balancer-conf + name: nginx-load-balancer-{{ juju_application }}-conf --- -apiVersion: v1 -kind: ReplicationController +apiVersion: apps/v1beta2 +kind: DaemonSet metadata: - name: nginx-ingress-controller + name: nginx-ingress-{{ juju_application }}-controller labels: - k8s-app: nginx-ingress-lb + juju-application: nginx-ingress-{{ juju_application }} spec: - replicas: 1 selector: - k8s-app: nginx-ingress-lb + matchLabels: + name: nginx-ingress-{{ juju_application }} template: metadata: labels: - k8s-app: nginx-ingress-lb - name: nginx-ingress-lb + name: nginx-ingress-{{ juju_application }} spec: + nodeSelector: + juju-application: {{ juju_application }} terminationGracePeriodSeconds: 60 # hostPort doesn't work with CNI, so we have to use hostNetwork instead # see https://github.com/kubernetes/kubernetes/issues/23920 hostNetwork: true - serviceAccountName: nginx-ingress-serviceaccount + serviceAccountName: nginx-ingress-{{ juju_application }}-serviceaccount containers: - image: {{ ingress_image }} - name: nginx-ingress-lb + name: nginx-ingress-{{ juju_application }} imagePullPolicy: Always livenessProbe: httpGet: From 732e785e0a976bc1f31ced4bd4263438a9b865c7 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Mon, 18 Dec 2017 10:14:01 -0800 Subject: [PATCH 481/794] Performance improvement for affinity term matching. When a PodAffinityTerm uses TopologyKey=kubernetes.io/hostname, we can avoid searching the entire cluster for a match by only listing pods on the given node. --- .../scheduler/algorithm/predicates/predicates.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 7ae07750b29..eaac3e77b97 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -1116,7 +1116,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm // First return value indicates whether a matching pod exists on a node that matches the topology key, // while the second return value indicates whether a matching pod exists anywhere. // TODO: Do we really need any pod matching, or all pods matching? I think the latter. -func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) { +func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, pods []*v1.Pod, nodeInfo *schedulercache.NodeInfo, term *v1.PodAffinityTerm) (bool, bool, error) { if len(term.TopologyKey) == 0 { return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") } @@ -1126,7 +1126,12 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods [ if err != nil { return false, false, err } - for _, existingPod := range allPods { + // Special case: When the topological domain is node, we can limit our + // search to pods on that node without searching the entire cluster. + if term.TopologyKey == kubeletapis.LabelHostname { + pods = nodeInfo.Pods() + } + for _, existingPod := range pods { match := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) if match { matchingPodExists = true @@ -1134,7 +1139,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods [ if err != nil { return false, matchingPodExists, err } - if priorityutil.NodesHaveSameTopologyKey(node, existingPodNode, term.TopologyKey) { + if priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), existingPodNode, term.TopologyKey) { return true, matchingPodExists, nil } } @@ -1334,7 +1339,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node // Check all affinity terms. for _, term := range GetPodAffinityTerms(affinity.PodAffinity) { - termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, node, &term) + termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, nodeInfo, &term) if err != nil { errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) glog.Error(errMessage) @@ -1367,7 +1372,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node // Check all anti-affinity terms. for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - termMatches, _, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, node, &term) + termMatches, _, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, nodeInfo, &term) if err != nil || termMatches { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) From 33d4fe4074f8af9ebd11460a8836a90166446649 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 21 Dec 2017 16:05:50 -0800 Subject: [PATCH 482/794] dynamic config test: use a hyphen between the config name and the unique suffix --- test/e2e_node/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index de6f25d02e8..0d00f88944e 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -302,7 +302,7 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura framework.ExpectNoError(err) cmap := &apiv1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{GenerateName: name}, + ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"}, Data: map[string]string{ "kubelet": string(data), }, From 6ac0f4198a6ec81a1e289c6e5cf5015c6e426c3f Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Thu, 21 Dec 2017 16:06:45 +0800 Subject: [PATCH 483/794] rename key --- plugin/pkg/scheduler/core/generic_scheduler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index e9e7e95a6c2..f97e1e2feb6 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -64,7 +64,7 @@ const ( // The scheduler uses the annotation to find that the pod shouldn't preempt more pods // when it gets to the head of scheduling queue again. // See podEligibleToPreemptOthers() for more information. - NominatedNodeAnnotationKey = "NominatedNodeName" + NominatedNodeAnnotationKey = "scheduler.kubernetes.io/nominated-node-name" ) // Error returns detailed information of why the pod failed to fit on each node From a7504795da765a0e0f1a8df0b6d6adf23c4bf3c1 Mon Sep 17 00:00:00 2001 From: Malhar Vora Date: Thu, 21 Dec 2017 19:38:47 -0800 Subject: [PATCH 484/794] Correct wrong error message for kubeadm dns addon --- cmd/kubeadm/app/phases/addons/dns/dns.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index 072359a8e5b..40ab0d71fac 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -203,7 +203,7 @@ func createCoreDNSAddon(deploymentBytes, serviceBytes, configBytes []byte, clien coreDNSServiceAccount := &v1.ServiceAccount{} if err := kuberuntime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), []byte(CoreDNSServiceAccount), coreDNSServiceAccount); err != nil { - return fmt.Errorf("unable to decode CoreDNS configmap %v", err) + return fmt.Errorf("unable to decode CoreDNS serviceaccount %v", err) } // Create the ConfigMap for CoreDNS or update it in case it already exists From 5052f3aed0d8d975bf7745f73141ef96e86862a0 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 22 Dec 2017 13:49:47 +0800 Subject: [PATCH 485/794] add error string reference --- pkg/util/ipset/ipset.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/util/ipset/ipset.go b/pkg/util/ipset/ipset.go index 56993c26305..7ae28050097 100644 --- a/pkg/util/ipset/ipset.go +++ b/pkg/util/ipset/ipset.go @@ -329,10 +329,13 @@ func IsNotFoundError(err error) bool { es := err.Error() if strings.Contains(es, "does not exist") { // set with the same name already exists + // xref: https://github.com/Olipro/ipset/blob/master/lib/errcode.c#L32-L33 return true } if strings.Contains(es, "element is missing") { // entry is missing from the set + // xref: https://github.com/Olipro/ipset/blob/master/lib/parse.c#L1904 + // https://github.com/Olipro/ipset/blob/master/lib/parse.c#L1925 return true } return false From 068564a876e4bd8f9712a6cbfc5c90f7e20effb9 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 22 Dec 2017 15:10:20 +0800 Subject: [PATCH 486/794] Add generic interface for VirtualMachineScaleSetsClient and VirtualMachineScaleSetVMsClient --- pkg/cloudprovider/providers/azure/azure.go | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 2d8322ee58b..844651d6fc3 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -181,6 +181,23 @@ type SecurityGroupsClient interface { List(resourceGroupName string) (result network.SecurityGroupListResult, err error) } +// VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient +type VirtualMachineScaleSetsClient interface { + CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) + Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) + List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) +} + +// VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient +type VirtualMachineScaleSetVMsClient interface { + Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) + GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) + List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) +} + // Cloud holds the config and clients type Cloud struct { Config @@ -201,8 +218,8 @@ type Cloud struct { vmSet VMSet // Clients for vmss. - VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient - VirtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient + VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient + VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient *BlobDiskController *ManagedDiskController From 11fae2186b76c6e8a6db2bd000ea7f6f5b3d0432 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 22 Dec 2017 15:10:30 +0800 Subject: [PATCH 487/794] Add fake clients for VirtualMachineScaleSetsClient and VirtualMachineScaleSetVMsClient --- .../providers/azure/azure_fakes.go | 160 ++++++++++++++++++ .../providers/azure/azure_test.go | 2 + 2 files changed, 162 insertions(+) diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 1bc0d0c811d..18e7281fa43 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -625,3 +625,163 @@ func getRandomIPPtr() *string { rand.Seed(time.Now().UnixNano()) return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256))) } + +type fakeVirtualMachineScaleSetVMsClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]compute.VirtualMachineScaleSetVM +} + +func newFakeVirtualMachineScaleSetVMsClient() fakeVirtualMachineScaleSetVMsClient { + fVMC := fakeVirtualMachineScaleSetVMsClient{} + fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSetVM) + fVMC.mutex = &sync.Mutex{} + + return fVMC +} + +func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + + value := []compute.VirtualMachineScaleSetVM{} + if _, ok := fVMC.FakeStore[resourceGroupName]; ok { + for _, v := range fVMC.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { + return result, nil +} + +func (fVMC fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + + vmKey := fmt.Sprintf("%s-%s", VMScaleSetName, instanceID) + if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok { + if entity, ok := scaleSetMap[vmKey]; ok { + return entity, nil + } + } + + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "No such VirtualMachineScaleSetVM", + } +} + +func (fVMC fakeVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { + _, err = fVMC.Get(resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + return result, err + } + + return result, nil +} + +type fakeVirtualMachineScaleSetsClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]compute.VirtualMachineScaleSet +} + +func newFakeVirtualMachineScaleSetsClient() fakeVirtualMachineScaleSetsClient { + fVMSSC := fakeVirtualMachineScaleSetsClient{} + fVMSSC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSet) + fVMSSC.mutex = &sync.Mutex{} + + return fVMSSC +} + +func (fVMSSC fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { + fVMSSC.mutex.Lock() + defer fVMSSC.mutex.Unlock() + + resultChan := make(chan compute.VirtualMachineScaleSet, 1) + errChan := make(chan error, 1) + var result compute.VirtualMachineScaleSet + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + if _, ok := fVMSSC.FakeStore[resourceGroupName]; !ok { + fVMSSC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachineScaleSet) + } + fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] = parameters + result = fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fVMSSC fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { + fVMSSC.mutex.Lock() + defer fVMSSC.mutex.Unlock() + + if scaleSetMap, ok := fVMSSC.FakeStore[resourceGroupName]; ok { + if entity, ok := scaleSetMap[VMScaleSetName]; ok { + return entity, nil + } + } + + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "No such ScaleSet", + } +} + +func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { + fVMSSC.mutex.Lock() + defer fVMSSC.mutex.Unlock() + + value := []compute.VirtualMachineScaleSet{} + if _, ok := fVMSSC.FakeStore[resourceGroupName]; ok { + for _, v := range fVMSSC.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { + return result, nil +} + +func (fVMSSC fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { + resultChan := make(chan compute.OperationStatusResponse, 1) + errChan := make(chan error, 1) + var result compute.OperationStatusResponse + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index f141ae2d1b0..7960b21509e 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -870,6 +870,8 @@ func getTestCloud() (az *Cloud) { az.SecurityGroupsClient = newFakeAzureNSGClient() az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient() az.InterfacesClient = newFakeAzureInterfacesClient() + az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient() + az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient() az.vmSet = newAvailabilitySet(az) return az From 636a181d48ae6826c46e799f4e3d32e40c891084 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 22 Dec 2017 08:19:11 +0000 Subject: [PATCH 488/794] use /dev/disk/by-id instead of /dev/sd* for azure disk change uuid to id --- pkg/volume/azure_dd/azure_common_linux.go | 36 +++++++++++++++++++++-- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/pkg/volume/azure_dd/azure_common_linux.go b/pkg/volume/azure_dd/azure_common_linux.go index 304a2bc2855..792e4cbae55 100644 --- a/pkg/volume/azure_dd/azure_common_linux.go +++ b/pkg/volume/azure_dd/azure_common_linux.go @@ -19,6 +19,7 @@ limitations under the License. package azure_dd import ( + "fmt" "path" "strconv" libstrings "strings" @@ -45,6 +46,27 @@ func listAzureDiskPath(io ioHandler) []string { return azureDiskList } +// getDiskIDByPath get disk id by device name from /dev/disk/by-id +func getDiskIDByPath(io ioHandler, devName string) (string, error) { + diskIDPath := "/dev/disk/by-id/" + dirs, err := io.ReadDir(diskIDPath) + if err == nil { + for _, f := range dirs { + diskPath := diskIDPath + f.Name() + link, linkErr := io.Readlink(diskPath) + if linkErr != nil { + glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) + continue + } + if libstrings.HasSuffix(link, devName) { + return diskPath, nil + } + } + return "", fmt.Errorf("device name(%s) is not found under %s", devName, diskIDPath) + } + return "", fmt.Errorf("read %s error: %v", diskIDPath, err) +} + func scsiHostRescan(io ioHandler, exec mount.Exec) { scsi_path := "/sys/class/scsi_host/" if dirs, err := io.ReadDir(scsi_path); err == nil { @@ -129,15 +151,23 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st dir := path.Join(sys_path, name, "block") if dev, err := io.ReadDir(dir); err == nil { found := false + devName := dev[0].Name() for _, diskName := range azureDisks { - glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName) - if string(dev[0].Name()) == diskName { + glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) + if string(devName) == diskName { found = true break } } if !found { - return "/dev/" + dev[0].Name(), nil + diskPath, err := getDiskIDByPath(io, devName) + if err == nil { + glog.V(4).Infof("azureDisk - found %s by %s under /dev/disk/by-id", diskPath, devName) + return diskPath, nil + } else { + glog.Warningf("azureDisk - getDiskIDByPath by %s failed, error: %v", devName, err) + return "/dev/" + devName, nil + } } } } From a8cdeb4681975aded9d78a1f59cadb40e21e041b Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Fri, 22 Dec 2017 17:11:30 +0800 Subject: [PATCH 489/794] Add cache for VirtualMachinesClient.Get in azure cloud provider --- pkg/cloudprovider/providers/azure/BUILD | 3 + .../providers/azure/azure_controllerCommon.go | 4 + .../providers/azure/azure_util.go | 6 +- .../providers/azure/azure_util_cache.go | 81 ++++++++++++++++ .../providers/azure/azure_util_cache_test.go | 96 +++++++++++++++++++ .../providers/azure/azure_wrap.go | 66 ++++++++++--- 6 files changed, 243 insertions(+), 13 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_util_cache.go create mode 100644 pkg/cloudprovider/providers/azure/azure_util_cache_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 322024f2c61..83cbf462c58 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -23,6 +23,7 @@ go_library( "azure_storage.go", "azure_storageaccount.go", "azure_util.go", + "azure_util_cache.go", "azure_util_vmss.go", "azure_vmsets.go", "azure_wrap.go", @@ -53,6 +54,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], ) @@ -62,6 +64,7 @@ go_test( srcs = [ "azure_loadbalancer_test.go", "azure_test.go", + "azure_util_cache_test.go", "azure_util_test.go", ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index 881a7dbb2c4..fdb78e2af7b 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -134,6 +134,8 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } } else { glog.V(4).Infof("azureDisk - azure attach succeeded") + // Invalidate the cache right after updating + vmCache.Delete(vmName) } return err } @@ -192,6 +194,8 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t glog.Errorf("azureDisk - azure disk detach failed, err: %v", err) } else { glog.V(4).Infof("azureDisk - azure disk detach succeeded") + // Invalidate the cache right after updating + vmCache.Delete(vmName) } return err } diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 6181550571b..77996a0e5b5 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -435,11 +435,15 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error // GetZoneByNodeName gets zone from instance view. func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - vm, err := as.VirtualMachinesClient.Get(as.ResourceGroup, name, compute.InstanceView) + vm, exists, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { return cloudprovider.Zone{}, err } + if !exists { + return cloudprovider.Zone{}, cloudprovider.InstanceNotFound + } + failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)) zone := cloudprovider.Zone{ FailureDomain: failureDomain, diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache.go b/pkg/cloudprovider/providers/azure/azure_util_cache.go new file mode 100644 index 00000000000..8e416601ad8 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_util_cache.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "sync" + "time" + + "k8s.io/client-go/tools/cache" +) + +type timedcacheEntry struct { + key string + data interface{} +} + +type timedcache struct { + store cache.Store + lock sync.Mutex +} + +// ttl time.Duration +func newTimedcache(ttl time.Duration) timedcache { + return timedcache{ + store: cache.NewTTLStore(cacheKeyFunc, ttl), + } +} + +func cacheKeyFunc(obj interface{}) (string, error) { + return obj.(*timedcacheEntry).key, nil +} + +func (t *timedcache) GetOrCreate(key string, createFunc func() interface{}) (interface{}, error) { + entry, exists, err := t.store.GetByKey(key) + if err != nil { + return nil, err + } + if exists { + return (entry.(*timedcacheEntry)).data, nil + } + + t.lock.Lock() + defer t.lock.Unlock() + entry, exists, err = t.store.GetByKey(key) + if err != nil { + return nil, err + } + if exists { + return (entry.(*timedcacheEntry)).data, nil + } + + if createFunc == nil { + return nil, nil + } + created := createFunc() + t.store.Add(&timedcacheEntry{ + key: key, + data: created, + }) + return created, nil +} + +func (t *timedcache) Delete(key string) { + _ = t.store.Delete(&timedcacheEntry{ + key: key, + }) +} diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache_test.go b/pkg/cloudprovider/providers/azure/azure_util_cache_test.go new file mode 100644 index 00000000000..0ac26d2e98a --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_util_cache_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "sync/atomic" + "testing" + "time" +) + +func TestCacheReturnsSameObject(t *testing.T) { + type cacheTestingStruct struct{} + c := newTimedcache(1 * time.Minute) + o1 := cacheTestingStruct{} + get1, _ := c.GetOrCreate("b1", func() interface{} { + return o1 + }) + o2 := cacheTestingStruct{} + get2, _ := c.GetOrCreate("b1", func() interface{} { + return o2 + }) + if get1 != get2 { + t.Error("Get not equal") + } +} + +func TestCacheCallsCreateFuncOnce(t *testing.T) { + var callsCount uint32 + f1 := func() interface{} { + atomic.AddUint32(&callsCount, 1) + return 1 + } + c := newTimedcache(500 * time.Millisecond) + for index := 0; index < 20; index++ { + _, _ = c.GetOrCreate("b1", f1) + } + + if callsCount != 1 { + t.Error("Count not match") + } + time.Sleep(500 * time.Millisecond) + c.GetOrCreate("b1", f1) + if callsCount != 2 { + t.Error("Count not match") + } +} + +func TestCacheExpires(t *testing.T) { + f1 := func() interface{} { + return 1 + } + c := newTimedcache(500 * time.Millisecond) + get1, _ := c.GetOrCreate("b1", f1) + if get1 != 1 { + t.Error("Value not equal") + } + time.Sleep(500 * time.Millisecond) + get1, _ = c.GetOrCreate("b1", nil) + if get1 != nil { + t.Error("value not expired") + } +} + +func TestCacheDelete(t *testing.T) { + f1 := func() interface{} { + return 1 + } + c := newTimedcache(500 * time.Millisecond) + get1, _ := c.GetOrCreate("b1", f1) + if get1 != 1 { + t.Error("Value not equal") + } + get1, _ = c.GetOrCreate("b1", nil) + if get1 != 1 { + t.Error("Value not equal") + } + c.Delete("b1") + get1, _ = c.GetOrCreate("b1", nil) + if get1 != nil { + t.Error("value not deleted") + } +} diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index f1aa0def597..7c28d57e8f4 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -18,6 +18,8 @@ package azure import ( "net/http" + "sync" + "time" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" @@ -54,25 +56,65 @@ func ignoreStatusNotFoundFromError(err error) error { return err } +// cache used by getVirtualMachine +// 15s for expiration duration +var vmCache = newTimedcache(15 * time.Second) + +type vmRequest struct { + lock *sync.Mutex + vm *compute.VirtualMachine +} + +/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache +/// The service side has throttling control that delays responses if there're multiple requests onto certain vm +/// resource request in short period. func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) { var realErr error vmName := string(nodeName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName) - vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "") - glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName) - exists, realErr = checkResourceExistsFromError(err) - if realErr != nil { - return vm, false, realErr + cachedRequest, err := vmCache.GetOrCreate(vmName, func() interface{} { + return &vmRequest{ + lock: &sync.Mutex{}, + vm: nil, + } + }) + if err != nil { + return compute.VirtualMachine{}, false, err + } + request := cachedRequest.(*vmRequest) + + if request.vm == nil { + request.lock.Lock() + defer request.lock.Unlock() + if request.vm == nil { + // Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView + // request. If we first send an InstanceView request and then a non InstanceView request, the second + // request will still hit throttling. This is what happens now for cloud controller manager: In this + // case we do get instance view every time to fulfill the azure_zones requirement without hitting + // throttling. + // Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName) + vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, compute.InstanceView) + glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName) + + exists, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return vm, false, realErr + } + + if !exists { + return vm, false, nil + } + + request.vm = &vm + } + return vm, exists, err } - if !exists { - return vm, false, nil - } - - return vm, exists, err + glog.V(6).Infof("getVirtualMachine hits cache for(%s)", vmName) + return *request.vm, true, nil } func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { From bdab091476370e223421a6bf3c35d6a713528f72 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Fri, 22 Dec 2017 11:04:23 +0100 Subject: [PATCH 490/794] Disable the DNS autoscaler test in large clusters. This is to prevent colateral damage while details are being investigated. --- test/e2e/autoscaling/dns_autoscaling.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 9c57a3da8be..77316826425 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -157,7 +157,8 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred()) }) - It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() { + // TODO: Get rid of [DisabledForLargeClusters] tag when issue #55779 is fixed. + It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() { By("Replace the dns autoscaling parameters with testing parameters") err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1))) From 301488b235388d4a5396c4abe18597c61195e52d Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 22 Dec 2017 11:20:57 -0800 Subject: [PATCH 491/794] Expose all GCE cloud proivder services versions, not just the GA API --- pkg/cloudprovider/providers/gce/gce.go | 14 +++++++++++--- test/e2e/framework/firewall_util.go | 4 ++-- test/e2e/framework/util.go | 2 +- test/e2e/storage/volume_provisioning.go | 2 +- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 9a2d92f6ef0..833ff56d500 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -211,9 +211,17 @@ func init() { }) } -// Raw access to the underlying GCE service, probably should only be used for e2e tests -func (g *GCECloud) GetComputeService() *compute.Service { - return g.service +// Services is the set of all versions of the compute service. +type Services struct { + // GA, Alpha, Beta versions of the compute API. + GA *compute.Service + Alpha *computealpha.Service + Beta *computebeta.Service +} + +// ComputeServices returns access to the internal compute services. +func (g *GCECloud) ComputeServices() *Services { + return &Services{g.service, g.serviceAlpha, g.serviceBeta} } // newGCECloud creates a new instance of GCECloud. diff --git a/test/e2e/framework/firewall_util.go b/test/e2e/framework/firewall_util.go index 08bca6a26ca..69ab831c157 100644 --- a/test/e2e/framework/firewall_util.go +++ b/test/e2e/framework/firewall_util.go @@ -100,7 +100,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, // GetInstanceTags gets tags from GCE instance with given name. func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags { gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud) - res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, + res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, instanceName).Do() if err != nil { Failf("Failed to get instance tags for %v: %v", instanceName, err) @@ -113,7 +113,7 @@ func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags [] gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud) // Re-get instance everytime because we need the latest fingerprint for updating metadata resTags := GetInstanceTags(cloudConfig, instanceName) - _, err := gceCloud.GetComputeService().Instances.SetTags( + _, err := gceCloud.ComputeServices().GA.Instances.SetTags( cloudConfig.ProjectID, zone, instanceName, &compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do() if err != nil { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index e2f298d50d8..bdf52dfdd98 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -4335,7 +4335,7 @@ func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { } return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - service := gceCloud.GetComputeService() + service := gceCloud.ComputeServices().GA list, err := service.ForwardingRules.List(project, region).Do() if err != nil { return false, err diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index be7efd9cd45..3fb7c59f2df 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -501,7 +501,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Expect(err).NotTo(HaveOccurred()) // Get a list of all zones in the project - zones, err := gceCloud.GetComputeService().Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() + zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() Expect(err).NotTo(HaveOccurred()) for _, z := range zones.Items { allZones.Insert(z.Name) From e55938940d2d95e9cb1ff1def63cc54d7725f774 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Fri, 22 Dec 2017 13:24:00 -0500 Subject: [PATCH 492/794] Rewrite go_install_from_commit to handle pkgs that aren't in HEAD This is brittle and really only intended to workaround the fact that gazelle has moved out of the bazelbuild/rules_go repo to its own repo. I would rather see this reverted once we move to the same version of gazelle as used by kubernetes/test-infra --- hack/lib/util.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hack/lib/util.sh b/hack/lib/util.sh index 1114cca8902..3595a0c6650 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -475,10 +475,15 @@ kube::util::go_install_from_commit() { kube::util::ensure-temp-dir mkdir -p "${KUBE_TEMP}/go/src" - GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" + # TODO(spiffxp): remove this brittle workaround for go getting a package that doesn't exist at HEAD + repo=$(echo ${pkg} | cut -d/ -f1-3) + git clone "https://${repo}" "${KUBE_TEMP}/go/src/${repo}" + # GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" ( - cd "${KUBE_TEMP}/go/src/${pkg}" + cd "${KUBE_TEMP}/go/src/${repo}" + git fetch # TODO(spiffxp): workaround git checkout -q "${commit}" + GOPATH="${KUBE_TEMP}/go" go get -d "${pkg}" #TODO(spiffxp): workaround GOPATH="${KUBE_TEMP}/go" go install "${pkg}" ) PATH="${KUBE_TEMP}/go/bin:${PATH}" From e9dd8a68f60d2565069caed068f8fe4d493ea5ce Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Fri, 22 Dec 2017 14:36:16 -0800 Subject: [PATCH 493/794] Revert k8s.gcr.io vanity domain This reverts commit eba5b6092afcae27a7c925afea76b85d903e87a9. Fixes https://github.com/kubernetes/kubernetes/issues/57526 --- CHANGELOG-1.4.md | 8 +- CHANGELOG-1.5.md | 16 ++-- CHANGELOG-1.6.md | 8 +- api/openapi-spec/swagger.json | 2 +- api/swagger-spec/v1.json | 2 +- build/BUILD | 6 +- build/build-image/Dockerfile | 2 +- build/build-image/cross/Makefile | 4 +- build/common.sh | 10 +- build/debian-base/Makefile | 2 +- build/debian-hyperkube-base/Makefile | 6 +- build/debian-hyperkube-base/README.md | 12 +-- build/debian-iptables/Makefile | 4 +- build/debian-iptables/README.md | 12 +-- build/lib/release.sh | 13 +-- build/pause/Makefile | 4 +- build/root/WORKSPACE | 8 +- cluster/addons/addon-manager/Makefile | 2 +- cluster/addons/addon-manager/README.md | 12 +-- ...o-node-vertical-autoscaler-deployment.yaml | 2 +- ...ypha-horizontal-autoscaler-deployment.yaml | 2 +- .../typha-vertical-autoscaler-deployment.yaml | 2 +- .../glbc/default-svc-controller.yaml | 2 +- .../google/heapster-controller.yaml | 8 +- .../heapster-controller-combined.yaml | 9 +- .../influxdb/heapster-controller.yaml | 8 +- .../influxdb/influxdb-grafana-controller.yaml | 4 +- .../stackdriver/heapster-controller.yaml | 6 +- .../standalone/heapster-controller.yaml | 4 +- .../dashboard/dashboard-controller.yaml | 2 +- .../dns-horizontal-autoscaler.yaml | 2 +- cluster/addons/dns/kube-dns.yaml.base | 6 +- cluster/addons/dns/kube-dns.yaml.in | 6 +- cluster/addons/dns/kube-dns.yaml.sed | 6 +- .../etcd-empty-dir-cleanup.yaml | 2 +- .../fluentd-elasticsearch/es-image/Makefile | 2 +- .../fluentd-elasticsearch/es-statefulset.yaml | 2 +- .../fluentd-elasticsearch/fluentd-es-ds.yaml | 2 +- .../fluentd-es-image/Makefile | 2 +- .../fluentd-es-image/README.md | 4 +- .../addons/fluentd-gcp/event-exporter.yaml | 4 +- .../addons/fluentd-gcp/fluentd-gcp-ds.yaml | 4 +- .../addons/ip-masq-agent/ip-masq-agent.yaml | 2 +- .../metadata-proxy/gce/metadata-proxy.yaml | 4 +- .../metrics-server-deployment.yaml | 4 +- cluster/addons/node-problem-detector/npd.yaml | 2 +- cluster/addons/python-image/Makefile | 2 +- cluster/addons/registry/README.md | 2 +- cluster/addons/registry/images/Makefile | 2 +- cluster/common.sh | 4 +- cluster/gce/config-test.sh | 2 +- .../gce/container-linux/configure-helper.sh | 4 +- cluster/gce/container-linux/configure.sh | 8 +- cluster/gce/gci/configure-helper.sh | 6 +- cluster/gce/gci/configure.sh | 8 +- cluster/gce/gci/mounter/Makefile | 2 +- cluster/gce/util.sh | 6 +- cluster/get-kube-local.sh | 2 +- .../images/etcd-empty-dir-cleanup/Makefile | 2 +- cluster/images/etcd-version-monitor/Makefile | 12 +-- cluster/images/etcd-version-monitor/README.md | 2 +- .../etcd-version-monitor.yaml | 2 +- cluster/images/etcd/Makefile | 4 +- cluster/images/etcd/README.md | 12 +-- cluster/images/hyperkube/BUILD | 2 +- cluster/images/hyperkube/Dockerfile | 2 +- cluster/images/hyperkube/Makefile | 6 +- cluster/images/hyperkube/README.md | 14 +-- cluster/images/kubemark/Makefile | 4 +- .../reactive/kubernetes_worker.py | 6 +- cluster/kubemark/gce/config-default.sh | 2 +- cluster/log-dump/logexporter-daemonset.yaml | 2 +- cluster/restore-from-backup.sh | 4 +- cluster/saltbase/install.sh | 10 +- .../cluster-autoscaler.manifest | 6 +- .../e2e-image-puller.manifest | 96 +++++++++---------- cluster/saltbase/salt/etcd/etcd.manifest | 2 +- .../salt/kube-addons/kube-addon-manager.yaml | 2 +- .../kube-registry-proxy.yaml | 2 +- cluster/saltbase/salt/l7-gcp/glbc.manifest | 2 +- .../salt/rescheduler/rescheduler.manifest | 2 +- .../app/apis/kubeadm/v1alpha1/defaults.go | 2 +- cmd/kubeadm/app/images/images_test.go | 2 +- .../phases/selfhosting/selfhosting_test.go | 18 ++-- .../app/phases/upgrade/staticpods_test.go | 2 +- cmd/kubeadm/app/util/template_test.go | 8 +- cmd/kubelet/app/options/container_runtime.go | 2 +- docs/api-reference/v1/definitions.html | 2 +- examples/cluster-dns/dns-backend-rc.yaml | 2 +- examples/cluster-dns/dns-frontend-pod.yaml | 2 +- examples/cluster-dns/images/backend/Makefile | 2 +- examples/cluster-dns/images/frontend/Makefile | 2 +- examples/explorer/Makefile | 4 +- examples/explorer/pod.yaml | 2 +- examples/guestbook-go/Makefile | 4 +- .../guestbook-go/guestbook-controller.json | 2 +- .../all-in-one/guestbook-all-in-one.yaml | 2 +- .../legacy/redis-master-controller.yaml | 2 +- .../guestbook/redis-master-deployment.yaml | 2 +- examples/kubectl-container/Makefile | 4 +- examples/kubectl-container/pod.json | 4 +- .../spark-master-controller.yaml | 2 +- .../spark-worker-controller.yaml | 2 +- examples/spark/spark-master-controller.yaml | 2 +- examples/spark/spark-worker-controller.yaml | 2 +- examples/spark/zeppelin-controller.yaml | 2 +- examples/storage/cassandra/image/Dockerfile | 2 +- examples/storage/redis/redis-controller.yaml | 2 +- examples/storage/redis/redis-master.yaml | 2 +- .../redis/redis-sentinel-controller.yaml | 2 +- examples/storage/rethinkdb/admin-pod.yaml | 2 +- examples/storage/rethinkdb/rc.yaml | 2 +- .../volumes/portworx/portworx-volume-pod.yaml | 2 +- .../portworx/portworx-volume-pvcpod.yaml | 2 +- .../portworx/portworx-volume-pvcscpod.yaml | 2 +- examples/volumes/scaleio/pod-sc-pvc.yaml | 2 +- examples/volumes/scaleio/pod.yaml | 2 +- .../volumes/vsphere/simple-statefulset.yaml | 2 +- .../volumes/vsphere/vsphere-volume-pod.yaml | 2 +- .../vsphere/vsphere-volume-pvcpod.yaml | 2 +- .../vsphere/vsphere-volume-pvcscpod.yaml | 2 +- hack/gen-swagger-doc/README.md | 2 +- hack/lib/swagger.sh | 2 +- hack/local-up-cluster.sh | 2 +- hack/make-rules/test-cmd-util.sh | 38 ++++---- .../deployment-multicontainer-resources.yaml | 4 +- hack/testdata/deployment-multicontainer.yaml | 4 +- hack/testdata/deployment-revision1.yaml | 2 +- hack/testdata/deployment-revision2.yaml | 2 +- hack/testdata/filter/pod-apply-selector.yaml | 2 +- hack/testdata/filter/pod-dont-apply.yaml | 2 +- hack/testdata/multi-resource-json-modify.json | 2 +- hack/testdata/multi-resource-json.json | 2 +- hack/testdata/multi-resource-list-modify.json | 2 +- hack/testdata/multi-resource-list.json | 2 +- .../multi-resource-rclist-modify.json | 4 +- hack/testdata/multi-resource-rclist.json | 4 +- hack/testdata/multi-resource-yaml-modify.yaml | 2 +- hack/testdata/multi-resource-yaml.yaml | 2 +- .../null-propagation/deployment-l1.yaml | 2 +- .../null-propagation/deployment-l2.yaml | 2 +- hack/testdata/pod-apply.yaml | 2 +- hack/testdata/pod-with-api-env.yaml | 2 +- hack/testdata/pod-with-precision.json | 2 +- hack/testdata/pod.yaml | 2 +- hack/testdata/prune/a.yaml | 2 +- hack/testdata/prune/b.yaml | 2 +- .../deployment/deployment/nginx-broken.yaml | 2 +- .../deployment/deployment/nginx.yaml | 2 +- hack/testdata/recursive/deployment/nginx.yaml | 2 +- .../testdata/rollingupdate-daemonset-rv2.yaml | 4 +- hack/testdata/rollingupdate-daemonset.yaml | 2 +- .../rollingupdate-statefulset-rv2.yaml | 4 +- hack/testdata/rollingupdate-statefulset.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod1.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod2.yaml | 2 +- hack/testdata/sorted-pods/sorted-pod3.yaml | 2 +- pkg/api/testing/deep_copy_test.go | 4 +- .../replication_controller_example.json | 2 +- .../validation/testdata/v1/invalidPod1.json | 2 +- .../validation/testdata/v1/invalidPod3.json | 2 +- .../validation/testdata/v1/invalidPod4.yaml | 2 +- pkg/kubelet/dockershim/docker_sandbox.go | 2 +- pkg/kubelet/kubelet_node_status_test.go | 10 +- pkg/kubelet/kubelet_test.go | 4 +- .../operationexecutor/operation_executor.go | 4 +- .../operation_executor_test.go | 4 +- pkg/volume/util/util_test.go | 6 +- pkg/volume/util_test.go | 8 +- .../src/k8s.io/api/core/v1/generated.proto | 2 +- staging/src/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- .../testing-manifests/ingress/http/rc.yaml | 2 +- .../testing-manifests/ingress/nginx/rc.yaml | 2 +- .../ingress/static-ip/rc.yaml | 2 +- .../serviceloadbalancer/haproxyrc.yaml | 2 +- .../serviceloadbalancer/netexecrc.yaml | 2 +- .../statefulset/cassandra/tester.yaml | 2 +- .../statefulset/etcd/statefulset.yaml | 2 +- .../statefulset/etcd/tester.yaml | 2 +- .../statefulset/mysql-galera/statefulset.yaml | 6 +- .../statefulset/mysql-upgrade/tester.yaml | 2 +- .../statefulset/nginx/statefulset.yaml | 4 +- .../statefulset/redis/statefulset.yaml | 2 +- .../statefulset/zookeeper/statefulset.yaml | 2 +- test/e2e_node/conformance/build/Makefile | 4 +- test/e2e_node/conformance/run_test.sh | 2 +- test/e2e_node/gke_environment_test.go | 6 +- test/e2e_node/image_id_test.go | 2 +- test/e2e_node/image_list.go | 6 +- test/e2e_node/jenkins/gci-init-gpu.yaml | 2 +- test/e2e_node/memory_eviction_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- test/e2e_node/remote/node_conformance.go | 2 +- test/e2e_node/runtime_conformance_test.go | 4 +- .../admin/high-availability/etcd.yaml | 2 +- .../high-availability/kube-apiserver.yaml | 2 +- .../kube-controller-manager.yaml | 2 +- .../high-availability/kube-scheduler.yaml | 2 +- .../admin/limitrange/invalid-pod.yaml | 2 +- .../doc-yaml/admin/limitrange/valid-pod.yaml | 2 +- .../user-guide/downward-api/dapi-pod.yaml | 2 +- .../user-guide/liveness/exec-liveness.yaml | 2 +- .../user-guide/liveness/http-liveness.yaml | 2 +- .../doc-yaml/user-guide/multi-pod.yaml | 2 +- .../user-guide/secrets/secret-env-pod.yaml | 2 +- .../user-guide/secrets/secret-pod.yaml | 2 +- .../pkg/kubectl/builder/kitten-rc.yaml | 2 +- .../kubectl/cmd/auth/rbac-resource-plus.yaml | 2 +- test/images/iperf/BASEIMAGE | 8 +- test/images/logs-generator/README.md | 6 +- test/images/pets/peer-finder/BASEIMAGE | 8 +- test/images/pets/redis-installer/BASEIMAGE | 8 +- test/images/pets/redis-installer/README.md | 2 +- .../images/pets/zookeeper-installer/BASEIMAGE | 8 +- .../images/pets/zookeeper-installer/README.md | 2 +- test/images/resource-consumer/BASEIMAGE | 8 +- test/images/resource-consumer/README.md | 4 +- test/images/serve-hostname/README.md | 10 +- test/images/volumes-tester/ceph/Makefile | 2 +- test/images/volumes-tester/gluster/Makefile | 2 +- test/images/volumes-tester/iscsi/Makefile | 2 +- test/images/volumes-tester/nfs/Makefile | 2 +- test/images/volumes-tester/rbd/Makefile | 2 +- test/integration/benchmark-controller.json | 2 +- test/integration/framework/util.go | 2 +- .../master/synthetic_master_test.go | 4 +- .../scheduler/volume_binding_test.go | 2 +- .../cluster-autoscaler_template.json | 2 +- .../kubemark/resources/heapster_template.json | 4 +- .../resources/hollow-node_template.yaml | 2 +- .../resources/start-kubemark-master.sh | 2 +- test/utils/image/manifest.go | 2 +- 233 files changed, 470 insertions(+), 486 deletions(-) diff --git a/CHANGELOG-1.4.md b/CHANGELOG-1.4.md index 5f7adc42124..8c229de490b 100644 --- a/CHANGELOG-1.4.md +++ b/CHANGELOG-1.4.md @@ -177,15 +177,15 @@ filename | sha256 hash ### Other notable changes * kube-apiserver now drops unneeded path information if an older version of Windows kubectl sends it. ([#44586](https://github.com/kubernetes/kubernetes/pull/44586), [@mml](https://github.com/mml)) -* Bump k8s.gcr.io/glbc from 0.8.0 to 0.9.2. Release notes: [0.9.0](https://github.com/kubernetes/ingress/releases/tag/0.9.0), [0.9.1](https://github.com/kubernetes/ingress/releases/tag/0.9.1), [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43098](https://github.com/kubernetes/kubernetes/pull/43098), [@timstclair](https://github.com/timstclair)) +* Bump gcr.io/google_containers/glbc from 0.8.0 to 0.9.2. Release notes: [0.9.0](https://github.com/kubernetes/ingress/releases/tag/0.9.0), [0.9.1](https://github.com/kubernetes/ingress/releases/tag/0.9.1), [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43098](https://github.com/kubernetes/kubernetes/pull/43098), [@timstclair](https://github.com/timstclair)) * Patch CVE-2016-8859 in alpine based images: ([#42937](https://github.com/kubernetes/kubernetes/pull/42937), [@timstclair](https://github.com/timstclair)) - * - k8s.gcr.io/etcd-empty-dir-cleanup - * - k8s.gcr.io/kube-dnsmasq-amd64 + * - gcr.io/google-containers/etcd-empty-dir-cleanup + * - gcr.io/google-containers/kube-dnsmasq-amd64 * Check if pathExists before performing Unmount ([#39311](https://github.com/kubernetes/kubernetes/pull/39311), [@rkouj](https://github.com/rkouj)) * Unmount operation should not fail if volume is already unmounted ([#38547](https://github.com/kubernetes/kubernetes/pull/38547), [@rkouj](https://github.com/rkouj)) * Updates base image used for `kube-addon-manager` to latest `python:2.7-slim` and embedded `kubectl` to `v1.3.10`. No functionality changes expected. ([#42842](https://github.com/kubernetes/kubernetes/pull/42842), [@ixdy](https://github.com/ixdy)) * list-resources: don't fail if the grep fails to match any resources ([#41933](https://github.com/kubernetes/kubernetes/pull/41933), [@ixdy](https://github.com/ixdy)) -* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Backporting TPR fix to 1.4 ([#42380](https://github.com/kubernetes/kubernetes/pull/42380), [@foxish](https://github.com/foxish)) * Fix AWS device allocator to only use valid device names ([#41455](https://github.com/kubernetes/kubernetes/pull/41455), [@gnufied](https://github.com/gnufied)) * Reverts to looking up the current VM in vSphere using the machine's UUID, either obtained via sysfs or via the `vm-uuid` parameter in the cloud configuration file. ([#40892](https://github.com/kubernetes/kubernetes/pull/40892), [@robdaemon](https://github.com/robdaemon)) diff --git a/CHANGELOG-1.5.md b/CHANGELOG-1.5.md index 351ef979904..dd3b580688e 100644 --- a/CHANGELOG-1.5.md +++ b/CHANGELOG-1.5.md @@ -249,18 +249,18 @@ filename | sha256 hash * kube-up (with gce/gci and gce/coreos providers) now ensures the authentication token file contains correct tokens for the control plane components, even if the file already exists (ensures upgrades and downgrades work successfully) ([#43676](https://github.com/kubernetes/kubernetes/pull/43676), [@liggitt](https://github.com/liggitt)) * Patch CVE-2016-8859 in alpine based images: ([#42936](https://github.com/kubernetes/kubernetes/pull/42936), [@timstclair](https://github.com/timstclair)) - * - k8s.gcr.io/cluster-proportional-autoscaler-amd64 - * - k8s.gcr.io/dnsmasq-metrics-amd64 - * - k8s.gcr.io/etcd-empty-dir-cleanup - * - k8s.gcr.io/kube-addon-manager - * - k8s.gcr.io/kube-dnsmasq-amd64 + * - gcr.io/google-containers/cluster-proportional-autoscaler-amd64 + * - gcr.io/google-containers/dnsmasq-metrics-amd64 + * - gcr.io/google-containers/etcd-empty-dir-cleanup + * - gcr.io/google-containers/kube-addon-manager + * - gcr.io/google-containers/kube-dnsmasq-amd64 * - Disable thin_ls due to excessive iops ([#43113](https://github.com/kubernetes/kubernetes/pull/43113), [@dashpole](https://github.com/dashpole)) * - Ignore .mount cgroups, fixing dissappearing stats * - Fix wc goroutine leak * - Update aws-sdk-go dependency to 1.6.10 * PodSecurityPolicy authorization is correctly enforced by the PodSecurityPolicy admission plugin. ([#43489](https://github.com/kubernetes/kubernetes/pull/43489), [@liggitt](https://github.com/liggitt)) -* Bump k8s.gcr.io/glbc from 0.9.1 to 0.9.2. Release notes: [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43097](https://github.com/kubernetes/kubernetes/pull/43097), [@timstclair](https://github.com/timstclair)) -* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Bump gcr.io/google_containers/glbc from 0.9.1 to 0.9.2. Release notes: [0.9.2](https://github.com/kubernetes/ingress/releases/tag/0.9.2) ([#43097](https://github.com/kubernetes/kubernetes/pull/43097), [@timstclair](https://github.com/timstclair)) +* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * restored normalization of custom `--etcd-prefix` when `--storage-backend` is set to etcd3 ([#42506](https://github.com/kubernetes/kubernetes/pull/42506), [@liggitt](https://github.com/liggitt)) @@ -655,7 +655,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( - [alpha] Introducing the v1alpha1 CRI API to allow pluggable container runtimes; an experimental docker-CRI integration is ready for testing and feedback. ([docs](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md)) ([kubernetes/features#54](https://github.com/kubernetes/features/issues/54)) - [alpha] Kubelet launches container in a per pod cgroup hierarchy based on quality of service tier ([kubernetes/features#126](https://github.com/kubernetes/features/issues/126)) - [beta] Kubelet integrates with memcg notification API to detect when a hard eviction threshold is crossed ([kubernetes/features#125](https://github.com/kubernetes/features/issues/125)) - - [beta] Introducing the beta version containerized node conformance test k8s.gcr.io/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/features/issues/84)) + - [beta] Introducing the beta version containerized node conformance test gcr.io/google_containers/node-test:0.2 for users to verify node setup. ([docs](http://kubernetes.io/docs/admin/node-conformance/)) ([kubernetes/features#84](https://github.com/kubernetes/features/issues/84)) - **Scheduling** - [alpha] Added support for accounting opaque integer resources. ([docs](http://kubernetes.io/docs/user-guide/compute-resources/#opaque-integer-resources-alpha-feature)) ([kubernetes/features#76](https://github.com/kubernetes/features/issues/76)) - [beta] PodDisruptionBudget has been promoted to beta, can be used to safely drain nodes while respecting application SLO's ([docs](http://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)) ([kubernetes/features#85](https://github.com/kubernetes/features/issues/85)) diff --git a/CHANGELOG-1.6.md b/CHANGELOG-1.6.md index e7d5a4011f4..ae2ce75e411 100644 --- a/CHANGELOG-1.6.md +++ b/CHANGELOG-1.6.md @@ -1935,7 +1935,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * Fixes a bug in the OpenStack-Heat kubernetes provider, in the handling of differences between the Identity v2 and Identity v3 APIs ([#40105](https://github.com/kubernetes/kubernetes/pull/40105), [@sc68cal](https://github.com/sc68cal)) ### Container Images -* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Remove unnecessary metrics (http/process/go) from being exposed by etcd-version-monitor ([#41807](https://github.com/kubernetes/kubernetes/pull/41807), [@shyamjvs](https://github.com/shyamjvs)) * Align the hyperkube image to support running binaries at /usr/local/bin/ like the other server images ([#41017](https://github.com/kubernetes/kubernetes/pull/41017), [@luxas](https://github.com/luxas)) * Bump up GLBC version from 0.9.0-beta to 0.9.1 ([#41037](https://github.com/kubernetes/kubernetes/pull/41037), [@bprashanth](https://github.com/bprashanth)) @@ -1982,7 +1982,7 @@ Features for this release were tracked via the use of the [kubernetes/features]( * Use kube-dns:1.11.0 ([#39925](https://github.com/kubernetes/kubernetes/pull/39925), [@sadlil](https://github.com/sadlil)) ### DNS Autoscaler -* Patch CVE-2016-8859 in k8s.gcr.io/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) +* Patch CVE-2016-8859 in gcr.io/google-containers/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) ### Cluster Autoscaler * Allow the Horizontal Pod Autoscaler controller to talk to the metrics API and custom metrics API as standard APIs. ([#41824](https://github.com/kubernetes/kubernetes/pull/41824), [@DirectXMan12](https://github.com/DirectXMan12)) @@ -2149,7 +2149,7 @@ filename | sha256 hash * Rescheduler uses taints in v1beta1 and will remove old ones (in version v1alpha1) right after its start. ([#43106](https://github.com/kubernetes/kubernetes/pull/43106), [@piosz](https://github.com/piosz)) * kubeadm: `kubeadm reset` won't drain and remove the current node anymore ([#42713](https://github.com/kubernetes/kubernetes/pull/42713), [@luxas](https://github.com/luxas)) * hack/godep-restore.sh: use godep v79 which works ([#42965](https://github.com/kubernetes/kubernetes/pull/42965), [@sttts](https://github.com/sttts)) -* Patch CVE-2016-8859 in k8s.gcr.io/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) +* Patch CVE-2016-8859 in gcr.io/google-containers/cluster-proportional-autoscaler-amd64 ([#42933](https://github.com/kubernetes/kubernetes/pull/42933), [@timstclair](https://github.com/timstclair)) * Disable devicemapper thin_ls due to excessive iops ([#42899](https://github.com/kubernetes/kubernetes/pull/42899), [@dashpole](https://github.com/dashpole)) @@ -2383,7 +2383,7 @@ filename | sha256 hash * Add configurable limits to CronJob resource to specify how many successful and failed jobs are preserved. ([#40932](https://github.com/kubernetes/kubernetes/pull/40932), [@peay](https://github.com/peay)) * Deprecate outofdisk-transition-frequency and low-diskspace-threshold-mb flags ([#41941](https://github.com/kubernetes/kubernetes/pull/41941), [@dashpole](https://github.com/dashpole)) * Add OWNERS for sample-apiserver in staging ([#42094](https://github.com/kubernetes/kubernetes/pull/42094), [@sttts](https://github.com/sttts)) -* Update k8s.gcr.io/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) +* Update gcr.io/google-containers/rescheduler to v0.2.2, which uses busybox as a base image instead of ubuntu. ([#41911](https://github.com/kubernetes/kubernetes/pull/41911), [@ixdy](https://github.com/ixdy)) * Add storage.k8s.io/v1 API ([#40088](https://github.com/kubernetes/kubernetes/pull/40088), [@jsafrane](https://github.com/jsafrane)) * Juju - K8s master charm now properly keeps distributed master files in sync for an HA control plane. ([#41351](https://github.com/kubernetes/kubernetes/pull/41351), [@chuckbutler](https://github.com/chuckbutler)) * Fix zsh completion: unknown file attribute error ([#38104](https://github.com/kubernetes/kubernetes/pull/38104), [@elipapa](https://github.com/elipapa)) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 513f45afc68..1e463c37288 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -75018,7 +75018,7 @@ ], "properties": { "names": { - "description": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "description": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", "type": "array", "items": { "type": "string" diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 206efd37e2f..cc2cebe67cf 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20257,7 +20257,7 @@ "items": { "type": "string" }, - "description": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]" + "description": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]" }, "sizeBytes": { "type": "integer", diff --git a/build/BUILD b/build/BUILD index cfd10691b9d..5f531a1f663 100644 --- a/build/BUILD +++ b/build/BUILD @@ -62,11 +62,7 @@ DOCKERIZED_BINARIES = { [docker_bundle( name = binary, - # TODO(thockin): remove the google_containers name after release 1.10. - images = { - "k8s.gcr.io/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal", - "gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal", - }, + images = {"gcr.io/google_containers/%s:{STABLE_DOCKER_TAG}" % binary: binary + "-internal"}, stamp = True, ) for binary in DOCKERIZED_BINARIES.keys()] diff --git a/build/build-image/Dockerfile b/build/build-image/Dockerfile index f9d0adaf034..59be59e4091 100644 --- a/build/build-image/Dockerfile +++ b/build/build-image/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. # This file creates a standard build environment for building Kubernetes -FROM k8s.gcr.io/kube-cross:KUBE_BUILD_IMAGE_CROSS_TAG +FROM gcr.io/google_containers/kube-cross:KUBE_BUILD_IMAGE_CROSS_TAG # Mark this as a kube-build container RUN touch /kube-build-image diff --git a/build/build-image/cross/Makefile b/build/build-image/cross/Makefile index a9dbc53a7ee..c8ed25375ca 100644 --- a/build/build-image/cross/Makefile +++ b/build/build-image/cross/Makefile @@ -21,7 +21,7 @@ TAG=$(shell cat VERSION) all: push build: - docker build --pull -t k8s.gcr.io/$(IMAGE):$(TAG) . + docker build --pull -t gcr.io/google_containers/$(IMAGE):$(TAG) . push: build - gcloud docker -- push k8s.gcr.io/$(IMAGE):$(TAG) + gcloud docker -- push gcr.io/google_containers/$(IMAGE):$(TAG) diff --git a/build/common.sh b/build/common.sh index 6489f83c783..8f8254ca228 100755 --- a/build/common.sh +++ b/build/common.sh @@ -96,7 +96,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,busybox kube-scheduler,busybox kube-aggregator,busybox - kube-proxy,k8s.gcr.io/debian-iptables-amd64:${debian_iptables_version} + kube-proxy,gcr.io/google-containers/debian-iptables-amd64:${debian_iptables_version} );; "arm") local targets=( @@ -105,7 +105,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,arm32v7/busybox kube-scheduler,arm32v7/busybox kube-aggregator,arm32v7/busybox - kube-proxy,k8s.gcr.io/debian-iptables-arm:${debian_iptables_version} + kube-proxy,gcr.io/google-containers/debian-iptables-arm:${debian_iptables_version} );; "arm64") local targets=( @@ -114,7 +114,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,arm64v8/busybox kube-scheduler,arm64v8/busybox kube-aggregator,arm64v8/busybox - kube-proxy,k8s.gcr.io/debian-iptables-arm64:${debian_iptables_version} + kube-proxy,gcr.io/google-containers/debian-iptables-arm64:${debian_iptables_version} );; "ppc64le") local targets=( @@ -123,7 +123,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,ppc64le/busybox kube-scheduler,ppc64le/busybox kube-aggregator,ppc64le/busybox - kube-proxy,k8s.gcr.io/debian-iptables-ppc64le:${debian_iptables_version} + kube-proxy,gcr.io/google-containers/debian-iptables-ppc64le:${debian_iptables_version} );; "s390x") local targets=( @@ -132,7 +132,7 @@ kube::build::get_docker_wrapped_binaries() { kube-controller-manager,s390x/busybox kube-scheduler,s390x/busybox kube-aggregator,s390x/busybox - kube-proxy,k8s.gcr.io/debian-iptables-s390x:${debian_iptables_version} + kube-proxy,gcr.io/google-containers/debian-iptables-s390x:${debian_iptables_version} );; esac diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index c3db5eab27e..47eafa5c645 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -14,7 +14,7 @@ all: build -REGISTRY ?= k8s.gcr.io +REGISTRY ?= gcr.io/google-containers IMAGE ?= debian-base BUILD_IMAGE ?= debian-build diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index 0c89c1ded76..54ca29f7e9a 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -15,15 +15,15 @@ # Build the hyperkube base image. This image is used to build the hyperkube image. # # Usage: -# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] make (build|push) +# [ARCH=amd64] [REGISTRY="gcr.io/google-containers"] make (build|push) -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google-containers IMAGE?=debian-hyperkube-base TAG=0.8 ARCH?=amd64 CACHEBUST?=1 -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3 +BASEIMAGE=gcr.io/google-containers/debian-base-$(ARCH):0.3 CNI_VERSION=v0.6.0 TEMP_DIR:=$(shell mktemp -d) diff --git a/build/debian-hyperkube-base/README.md b/build/debian-hyperkube-base/README.md index c5a1216e6bd..2cef311cb6d 100644 --- a/build/debian-hyperkube-base/README.md +++ b/build/debian-hyperkube-base/README.md @@ -1,6 +1,6 @@ ### debian-hyperkube-base -Serves as the base image for `k8s.gcr.io/hyperkube-${ARCH}` +Serves as the base image for `gcr.io/google-containers/hyperkube-${ARCH}` images. This image is compiled for multiple architectures. @@ -12,19 +12,19 @@ If you're editing the Dockerfile or some other thing, please bump the `TAG` in t ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> k8s.gcr.io/debian-hyperkube-base-amd64:TAG +# ---> gcr.io/google-containers/debian-hyperkube-base-amd64:TAG $ make push ARCH=arm -# ---> k8s.gcr.io/debian-hyperkube-base-arm:TAG +# ---> gcr.io/google-containers/debian-hyperkube-base-arm:TAG $ make push ARCH=arm64 -# ---> k8s.gcr.io/debian-hyperkube-base-arm64:TAG +# ---> gcr.io/google-containers/debian-hyperkube-base-arm64:TAG $ make push ARCH=ppc64le -# ---> k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG +# ---> gcr.io/google-containers/debian-hyperkube-base-ppc64le:TAG $ make push ARCH=s390x -# ---> k8s.gcr.io/debian-hyperkube-base-s390x:TAG +# ---> gcr.io/google-containers/debian-hyperkube-base-s390x:TAG ``` If you don't want to push the images, run `make build` instead diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index b672f04752d..1cc75edf17a 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -14,7 +14,7 @@ .PHONY: build push -REGISTRY?="k8s.gcr.io" +REGISTRY?="gcr.io/google-containers" IMAGE=debian-iptables TAG=v10 ARCH?=amd64 @@ -34,7 +34,7 @@ ifeq ($(ARCH),s390x) QEMUARCH=s390x endif -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3 +BASEIMAGE=gcr.io/google-containers/debian-base-$(ARCH):0.3 build: cp ./* $(TEMP_DIR) diff --git a/build/debian-iptables/README.md b/build/debian-iptables/README.md index 04ea0c26633..f5c0b3c7e0f 100644 --- a/build/debian-iptables/README.md +++ b/build/debian-iptables/README.md @@ -1,6 +1,6 @@ ### debian-iptables -Serves as the base image for `k8s.gcr.io/kube-proxy-${ARCH}` and multiarch (not `amd64`) `k8s.gcr.io/flannel-${ARCH}` images. +Serves as the base image for `gcr.io/google_containers/kube-proxy-${ARCH}` and multiarch (not `amd64`) `gcr.io/google_containers/flannel-${ARCH}` images. This image is compiled for multiple architectures. @@ -11,19 +11,19 @@ If you're editing the Dockerfile or some other thing, please bump the `TAG` in t ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> k8s.gcr.io/debian-iptables-amd64:TAG +# ---> gcr.io/google_containers/debian-iptables-amd64:TAG $ make push ARCH=arm -# ---> k8s.gcr.io/debian-iptables-arm:TAG +# ---> gcr.io/google_containers/debian-iptables-arm:TAG $ make push ARCH=arm64 -# ---> k8s.gcr.io/debian-iptables-arm64:TAG +# ---> gcr.io/google_containers/debian-iptables-arm64:TAG $ make push ARCH=ppc64le -# ---> k8s.gcr.io/debian-iptables-ppc64le:TAG +# ---> gcr.io/google_containers/debian-iptables-ppc64le:TAG $ make push ARCH=s390x -# ---> k8s.gcr.io/debian-iptables-s390x:TAG +# ---> gcr.io/google_containers/debian-iptables-s390x:TAG ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/build/lib/release.sh b/build/lib/release.sh index ce466a98d37..870451601f6 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -285,11 +285,7 @@ function kube::release::create_docker_images_for_server() { local images_dir="${RELEASE_IMAGES}/${arch}" mkdir -p "${images_dir}" - local -r docker_registry="k8s.gcr.io" - # TODO(thockin): Remove all traces of this after 1.10 release. - # The following is the old non-indirected registry name. To ease the - # transition to the new name (above), we are double-tagging saved images. - local -r deprecated_registry="gcr.io/google_containers" + local -r docker_registry="gcr.io/google_containers" # Docker tags cannot contain '+' local docker_tag="${KUBE_GIT_VERSION/+/_}" if [[ -z "${docker_tag}" ]]; then @@ -310,17 +306,14 @@ function kube::release::create_docker_images_for_server() { local docker_file_path="${docker_build_path}/Dockerfile" local binary_file_path="${binary_dir}/${binary_name}" local docker_image_tag="${docker_registry}" - local deprecated_image_tag="${deprecated_registry}" if [[ ${arch} == "amd64" ]]; then # If we are building a amd64 docker image, preserve the original # image name docker_image_tag+="/${binary_name}:${docker_tag}" - deprecated_image_tag+="/${binary_name}:${docker_tag}" else # If we are building a docker image for another architecture, # append the arch in the image tag docker_image_tag+="/${binary_name}-${arch}:${docker_tag}" - deprecated_image_tag+="/${binary_name}-${arch}:${docker_tag}" fi @@ -332,8 +325,7 @@ function kube::release::create_docker_images_for_server() { printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > ${docker_file_path} "${DOCKER[@]}" build --pull -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null - "${DOCKER[@]}" tag "${docker_image_tag}" ${deprecated_image_tag} >/dev/null - "${DOCKER[@]}" save "${docker_image_tag}" ${deprecated_image_tag} > "${binary_dir}/${binary_name}.tar" + "${DOCKER[@]}" save "${docker_image_tag}" > "${binary_dir}/${binary_name}.tar" echo "${docker_tag}" > ${binary_dir}/${binary_name}.docker_tag rm -rf ${docker_build_path} ln "${binary_dir}/${binary_name}.tar" "${images_dir}/" @@ -352,7 +344,6 @@ function kube::release::create_docker_images_for_server() { # not a release kube::log::status "Deleting docker image ${docker_image_tag}" "${DOCKER[@]}" rmi ${docker_image_tag} &>/dev/null || true - "${DOCKER[@]}" rmi ${deprecated_image_tag} &>/dev/null || true fi ) & done diff --git a/build/pause/Makefile b/build/pause/Makefile index 151396d4c80..93240e8fd33 100644 --- a/build/pause/Makefile +++ b/build/pause/Makefile @@ -14,7 +14,7 @@ .PHONY: all push push-legacy container clean orphan -REGISTRY ?= k8s.gcr.io +REGISTRY ?= gcr.io/google_containers IMAGE = $(REGISTRY)/pause-$(ARCH) LEGACY_AMD64_IMAGE = $(REGISTRY)/pause @@ -27,7 +27,7 @@ ARCH ?= amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x CFLAGS = -Os -Wall -Werror -static -DVERSION=v$(TAG)-$(REV) -KUBE_CROSS_IMAGE ?= k8s.gcr.io/kube-cross +KUBE_CROSS_IMAGE ?= gcr.io/google_containers/kube-cross KUBE_CROSS_VERSION ?= $(shell cat ../build-image/cross/VERSION) BIN = pause diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 2a000c552df..7de0ffaf334 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -81,16 +81,16 @@ http_file( docker_pull( name = "debian-iptables-amd64", digest = "sha256:a3b936c0fb98a934eecd2cfb91f73658d402b29116084e778ce9ddb68e55383e", - registry = "k8s.gcr.io", - repository = "debian-iptables-amd64", + registry = "gcr.io", + repository = "google-containers/debian-iptables-amd64", tag = "v10", # ignored, but kept here for documentation ) docker_pull( name = "debian-hyperkube-base-amd64", digest = "sha256:fc1b461367730660ac5a40c1eb2d1b23221829acf8a892981c12361383b3742b", - registry = "k8s.gcr.io", - repository = "debian-hyperkube-base-amd64", + registry = "gcr.io", + repository = "google-containers/debian-hyperkube-base-amd64", tag = "0.8", # ignored, but kept here for documentation ) diff --git a/cluster/addons/addon-manager/Makefile b/cluster/addons/addon-manager/Makefile index ceed1c9697b..854cd4e2557 100644 --- a/cluster/addons/addon-manager/Makefile +++ b/cluster/addons/addon-manager/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -IMAGE=k8s.gcr.io/kube-addon-manager +IMAGE=gcr.io/google-containers/kube-addon-manager ARCH?=amd64 TEMP_DIR:=$(shell mktemp -d) VERSION=v8.4 diff --git a/cluster/addons/addon-manager/README.md b/cluster/addons/addon-manager/README.md index d8b231b6c5a..e9ae53458c9 100644 --- a/cluster/addons/addon-manager/README.md +++ b/cluster/addons/addon-manager/README.md @@ -40,20 +40,20 @@ The `addon-manager` is built for multiple architectures. ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> k8s.gcr.io/kube-addon-manager-amd64:VERSION -# ---> k8s.gcr.io/kube-addon-manager:VERSION (image with backwards-compatible naming) +# ---> gcr.io/google-containers/kube-addon-manager-amd64:VERSION +# ---> gcr.io/google-containers/kube-addon-manager:VERSION (image with backwards-compatible naming) $ make push ARCH=arm -# ---> k8s.gcr.io/kube-addon-manager-arm:VERSION +# ---> gcr.io/google-containers/kube-addon-manager-arm:VERSION $ make push ARCH=arm64 -# ---> k8s.gcr.io/kube-addon-manager-arm64:VERSION +# ---> gcr.io/google-containers/kube-addon-manager-arm64:VERSION $ make push ARCH=ppc64le -# ---> k8s.gcr.io/kube-addon-manager-ppc64le:VERSION +# ---> gcr.io/google-containers/kube-addon-manager-ppc64le:VERSION $ make push ARCH=s390x -# ---> k8s.gcr.io/kube-addon-manager-s390x:VERSION +# ---> gcr.io/google-containers/kube-addon-manager-s390x:VERSION ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml index 50ffcab83d9..c66c3e07200 100644 --- a/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/calico-node-vertical-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 + - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 name: autoscaler command: - /cpvpa diff --git a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml index b338df79df0..4f493b8bcf3 100644 --- a/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-horizontal-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2 + - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2 name: autoscaler command: - /cluster-proportional-autoscaler diff --git a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml index 4f559626855..c59be9af62a 100644 --- a/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml +++ b/cluster/addons/calico-policy-controller/typha-vertical-autoscaler-deployment.yaml @@ -17,7 +17,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/cpvpa-amd64:v0.6.0 + - image: gcr.io/google_containers/cpvpa-amd64:v0.6.0 name: autoscaler command: - /cpvpa diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml index a2955031360..febec626fc1 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml @@ -24,7 +24,7 @@ spec: # Any image is permissible as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint - image: k8s.gcr.io/defaultbackend:1.3 + image: gcr.io/google_containers/defaultbackend:1.3 livenessProbe: httpGet: path: /healthz diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 2bf907c6f0c..81b513281e6 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -71,7 +71,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -84,13 +84,13 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=gcm - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -123,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 67f0f3ac2ad..6896fccc232 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -71,7 +71,8 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 + name: heapster livenessProbe: httpGet: @@ -85,13 +86,13 @@ spec: - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - --sink=gcm:?metrics=autoscaling - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=gcl - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -124,7 +125,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 9f19bf36cd2..2c389a340b8 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -71,7 +71,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -84,13 +84,13 @@ spec: - /heapster - --source=kubernetes.summary_api:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: eventer command: - /eventer - --source=kubernetes:'' - --sink=influxdb:http://monitoring-influxdb:8086 - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: @@ -123,7 +123,7 @@ spec: - --container=heapster - --poll-period=300000 - --estimator=exponential - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: eventer-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index 7c78ed2c49c..d562c748471 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -29,7 +29,7 @@ spec: operator: "Exists" containers: - name: influxdb - image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3 + image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3 resources: limits: cpu: 100m @@ -46,7 +46,7 @@ spec: - name: influxdb-persistent-storage mountPath: /data - name: grafana - image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3 + image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3 env: resources: # keep request = limit to keep this container in guaranteed class diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index 5b1b38bec81..85e8383adf8 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -56,7 +56,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -71,7 +71,7 @@ spec: - --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110 # BEGIN_PROMETHEUS_TO_SD - name: prom-to-sd - image: k8s.gcr.io/prometheus-to-sd:v0.2.2 + image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 command: - /monitor - --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count @@ -89,7 +89,7 @@ spec: fieldRef: fieldPath: metadata.namespace # END_PROMETHEUS_TO_SD - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index 7eacad1e6f3..f82d1b70f22 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -56,7 +56,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: containers: - - image: k8s.gcr.io/heapster-amd64:v1.5.0 + - image: gcr.io/google_containers/heapster-amd64:v1.5.0 name: heapster livenessProbe: httpGet: @@ -68,7 +68,7 @@ spec: command: - /heapster - --source=kubernetes.summary_api:'' - - image: k8s.gcr.io/addon-resizer:1.8.1 + - image: gcr.io/google_containers/addon-resizer:1.8.1 name: heapster-nanny resources: limits: diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index 31bf7a24fd0..59bf7c4daf8 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: kubernetes-dashboard - image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.0 + image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0 resources: limits: cpu: 100m diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index cb413e8723c..9e4e38b8760 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -77,7 +77,7 @@ spec: spec: containers: - name: autoscaler - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2 resources: requests: cpu: "20m" diff --git a/cluster/addons/dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns.yaml.base index c7fdf3afa22..e93884df0ca 100644 --- a/cluster/addons/dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns.yaml.base @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns.yaml.in index 0aa1196e631..12b09236723 100644 --- a/cluster/addons/dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns.yaml.in @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns.yaml.sed index cf7794beb8f..101cf588e2d 100644 --- a/cluster/addons/dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns.yaml.sed @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml index 66b3caa36ab..05943c8c416 100644 --- a/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml +++ b/cluster/addons/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml @@ -23,4 +23,4 @@ spec: dnsPolicy: Default containers: - name: etcd-empty-dir-cleanup - image: k8s.gcr.io/etcd-empty-dir-cleanup:3.0.14.0 + image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0 diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index e6d950beae3..51222ad3800 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -14,7 +14,7 @@ .PHONY: binary build push -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google-containers IMAGE = elasticsearch TAG = v5.6.4 diff --git a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml index 7e611010990..b6357f47ef3 100644 --- a/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml @@ -73,7 +73,7 @@ spec: spec: serviceAccountName: elasticsearch-logging containers: - - image: k8s.gcr.io/elasticsearch:v5.6.4 + - image: gcr.io/google-containers/elasticsearch:v5.6.4 name: elasticsearch-logging resources: # need more cpu upon initialization, therefore burstable class diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index 3690fe1271b..74242adce74 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -75,7 +75,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: k8s.gcr.io/fluentd-elasticsearch:v2.0.2 + image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2 env: - name: FLUENTD_ARGS value: --no-supervisor -q diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 8f95ecde77d..0b5fa8a487c 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -14,7 +14,7 @@ .PHONY: build push -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google-containers IMAGE = fluentd-elasticsearch TAG = v2.0.2 diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md index 9a651d522f9..8b97511a009 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md @@ -4,11 +4,11 @@ that collects Docker container log files using [Fluentd][fluentd] and sends them to an instance of [Elasticsearch][elasticsearch]. This image is designed to be used as part of the [Kubernetes][kubernetes] cluster bring up process. The image resides at GCR under the name -[k8s.gcr.io/fluentd-elasticsearch][image]. +[gcr.io/google-containers/fluentd-elasticsearch][image]. [fluentd]: http://www.fluentd.org/ [elasticsearch]: https://www.elastic.co/products/elasticsearch [kubernetes]: https://kubernetes.io -[image]: https://k8s.gcr.io/fluentd-elasticsearch +[image]: https://gcr.io/google-containers/fluentd-elasticsearch [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md?pixel)]() diff --git a/cluster/addons/fluentd-gcp/event-exporter.yaml b/cluster/addons/fluentd-gcp/event-exporter.yaml index 6321b3844f3..246fa8c42bc 100644 --- a/cluster/addons/fluentd-gcp/event-exporter.yaml +++ b/cluster/addons/fluentd-gcp/event-exporter.yaml @@ -47,12 +47,12 @@ spec: serviceAccountName: event-exporter-sa containers: - name: event-exporter - image: k8s.gcr.io/event-exporter:v0.1.7 + image: gcr.io/google-containers/event-exporter:v0.1.7 command: - '/event-exporter' # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.2.2 + image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index 6996462685d..dd516db77e0 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: k8s.gcr.io/fluentd-gcp:2.0.11 + image: gcr.io/google-containers/fluentd-gcp:2.0.11 env: - name: FLUENTD_ARGS value: --no-supervisor -q @@ -82,7 +82,7 @@ spec: fi; # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.2.2 + image: gcr.io/google-containers/prometheus-to-sd:v0.2.2 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml index c27a802cb75..f6bb21c01b9 100644 --- a/cluster/addons/ip-masq-agent/ip-masq-agent.yaml +++ b/cluster/addons/ip-masq-agent/ip-masq-agent.yaml @@ -28,7 +28,7 @@ spec: hostNetwork: true containers: - name: ip-masq-agent - image: k8s.gcr.io/ip-masq-agent-amd64:v2.0.2 + image: gcr.io/google-containers/ip-masq-agent-amd64:v2.0.2 resources: requests: cpu: 10m diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index f18033b8d69..a710f917cd4 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -38,7 +38,7 @@ spec: dnsPolicy: Default containers: - name: metadata-proxy - image: k8s.gcr.io/metadata-proxy:v0.1.6 + image: gcr.io/google_containers/metadata-proxy:v0.1.6 securityContext: privileged: true # Request and limit resources to get guaranteed QoS. @@ -51,7 +51,7 @@ spec: cpu: "30m" # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.2.2 + image: gcr.io/google_containers/prometheus-to-sd:v0.2.2 # Request and limit resources to get guaranteed QoS. resources: requests: diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 0463ce8eacf..73375b2202b 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -47,7 +47,7 @@ spec: serviceAccountName: metrics-server containers: - name: metrics-server - image: k8s.gcr.io/metrics-server-amd64:v0.2.0 + image: gcr.io/google_containers/metrics-server-amd64:v0.2.0 command: - /metrics-server - --source=kubernetes.summary_api:'' @@ -56,7 +56,7 @@ spec: name: https protocol: TCP - name: metrics-server-nanny - image: k8s.gcr.io/addon-resizer:1.8.1 + image: gcr.io/google_containers/addon-resizer:1.8.1 resources: limits: cpu: 100m diff --git a/cluster/addons/node-problem-detector/npd.yaml b/cluster/addons/node-problem-detector/npd.yaml index 714125ea58c..87365ad17f7 100644 --- a/cluster/addons/node-problem-detector/npd.yaml +++ b/cluster/addons/node-problem-detector/npd.yaml @@ -43,7 +43,7 @@ spec: spec: containers: - name: node-problem-detector - image: k8s.gcr.io/node-problem-detector:v0.4.1 + image: gcr.io/google_containers/node-problem-detector:v0.4.1 command: - "/bin/sh" - "-c" diff --git a/cluster/addons/python-image/Makefile b/cluster/addons/python-image/Makefile index d8f927a5b8d..6da4f7d3ce6 100644 --- a/cluster/addons/python-image/Makefile +++ b/cluster/addons/python-image/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -IMAGE=k8s.gcr.io/python +IMAGE=gcr.io/google_containers/python VERSION=v1 .PHONY: build push diff --git a/cluster/addons/registry/README.md b/cluster/addons/registry/README.md index 009e11f5863..59542355eda 100644 --- a/cluster/addons/registry/README.md +++ b/cluster/addons/registry/README.md @@ -199,7 +199,7 @@ spec: spec: containers: - name: kube-registry-proxy - image: k8s.gcr.io/kube-registry-proxy:0.4 + image: gcr.io/google_containers/kube-registry-proxy:0.4 resources: limits: cpu: 100m diff --git a/cluster/addons/registry/images/Makefile b/cluster/addons/registry/images/Makefile index 566bf5a3744..c1b64de1c20 100644 --- a/cluster/addons/registry/images/Makefile +++ b/cluster/addons/registry/images/Makefile @@ -15,7 +15,7 @@ .PHONY: build push vet test clean TAG = 0.4 -REPO = k8s.gcr.io/kube-registry-proxy +REPO = gcr.io/google_containers/kube-registry-proxy build: docker build --pull -t $(REPO):$(TAG) . diff --git a/cluster/common.sh b/cluster/common.sh index 088de5dd680..95e286e89d0 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -478,7 +478,7 @@ function stage-images() { local docker_cmd=("docker") - if [[ "${KUBE_DOCKER_REGISTRY}" =~ "gcr.io/" ]]; then + if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/"* ]]; then local docker_push_cmd=("gcloud" "docker") else local docker_push_cmd=("${docker_cmd[@]}") @@ -493,7 +493,7 @@ function stage-images() { ( "${docker_cmd[@]}" load -i "${temp_dir}/kubernetes/server/bin/${binary}.tar" "${docker_cmd[@]}" rmi "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" 2>/dev/null || true - "${docker_cmd[@]}" tag "k8s.gcr.io/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" + "${docker_cmd[@]}" tag "gcr.io/google_containers/${binary}:${docker_tag}" "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" "${docker_push_cmd[@]}" push "${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_IMAGE_TAG}" ) &> "${temp_dir}/${binary}-push.log" & done diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 6b87d53a2a7..124ea4b7a83 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -156,7 +156,7 @@ ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}" # Useful for scheduling heapster in large clusters with nodes of small size. HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" -# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.10) if you need +# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}" diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index abb1d4a749f..4bd5811bbf8 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -677,7 +677,7 @@ function prepare-kube-proxy-manifest-variables { remove-salt-config-comments "${src_file}" local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="k8s.gcr.io" + local kube_docker_registry="gcr.io/google_containers" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then kube_docker_registry=${KUBE_DOCKER_REGISTRY} fi @@ -859,7 +859,7 @@ function compute-master-manifest-variables { CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," fi - DOCKER_REGISTRY="k8s.gcr.io" + DOCKER_REGISTRY="gcr.io/google_containers" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" fi diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh index 962a5c03312..16dcf27a044 100755 --- a/cluster/gce/container-linux/configure.sh +++ b/cluster/gce/container-linux/configure.sh @@ -146,12 +146,12 @@ function install-kube-binary-config { echo "Downloading k8s manifests tar" download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" - if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then + local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" + if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" fi cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh" chmod -R 755 "${kube_bin}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 0bf30051c9e..930cbe19234 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1280,7 +1280,7 @@ function prepare-kube-proxy-manifest-variables { remove-salt-config-comments "${src_file}" local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="k8s.gcr.io" + local kube_docker_registry="gcr.io/google_containers" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then kube_docker_registry=${KUBE_DOCKER_REGISTRY} fi @@ -1452,7 +1452,7 @@ function compute-master-manifest-variables { CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," fi - DOCKER_REGISTRY="k8s.gcr.io" + DOCKER_REGISTRY="gcr.io/google_containers" if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" fi @@ -2331,7 +2331,7 @@ spec: - name: vol containers: - name: pv-recycler - image: k8s.gcr.io/busybox:1.27 + image: gcr.io/google_containers/busybox:1.27 command: - /bin/sh args: diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index f8ac61b6136..317abe9afbb 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -227,12 +227,12 @@ function install-kube-manifests { echo "Downloading k8s manifests tar" download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" - if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then + local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" + if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" fi cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh" cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh" diff --git a/cluster/gce/gci/mounter/Makefile b/cluster/gce/gci/mounter/Makefile index 2af09207b77..72efa3b77c4 100644 --- a/cluster/gce/gci/mounter/Makefile +++ b/cluster/gce/gci/mounter/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG=v2 -REGISTRY=k8s.gcr.io +REGISTRY=gcr.io/google_containers IMAGE=gci-mounter all: container diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 43d258e962e..34d710ce7fe 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -236,12 +236,12 @@ function set-preferred-region() { fi # If we're using regional GCR, and we're outside the US, go to the - # regional registry. The k8s.gcr.io registry is + # regional registry. The gcr.io/google_containers registry is # appropriate for US (for now). if [[ "${REGIONAL_KUBE_ADDONS}" == "true" ]] && [[ "${preferred}" != "us" ]]; then - KUBE_ADDON_REGISTRY="${preferred}.k8s.gcr.io" + KUBE_ADDON_REGISTRY="${preferred}.gcr.io/google_containers" else - KUBE_ADDON_REGISTRY="k8s.gcr.io" + KUBE_ADDON_REGISTRY="gcr.io/google_containers" fi if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then diff --git a/cluster/get-kube-local.sh b/cluster/get-kube-local.sh index 21a8243dd94..972ebce6d70 100755 --- a/cluster/get-kube-local.sh +++ b/cluster/get-kube-local.sh @@ -97,7 +97,7 @@ function create_cluster { --pid=host \ --privileged=true \ -d \ - k8s.gcr.io/hyperkube-${arch}:${release} \ + gcr.io/google_containers/hyperkube-${arch}:${release} \ /hyperkube kubelet \ --containerized \ --hostname-override="127.0.0.1" \ diff --git a/cluster/images/etcd-empty-dir-cleanup/Makefile b/cluster/images/etcd-empty-dir-cleanup/Makefile index 0142216f2a0..d84a353ce00 100644 --- a/cluster/images/etcd-empty-dir-cleanup/Makefile +++ b/cluster/images/etcd-empty-dir-cleanup/Makefile @@ -15,7 +15,7 @@ .PHONY: build push ETCD_VERSION = 3.0.14 -IMAGE = k8s.gcr.io/etcd-empty-dir-cleanup +IMAGE = gcr.io/google-containers/etcd-empty-dir-cleanup TAG = 3.0.14.0 clean: diff --git a/cluster/images/etcd-version-monitor/Makefile b/cluster/images/etcd-version-monitor/Makefile index aea20eaa2b8..e061b900151 100644 --- a/cluster/images/etcd-version-monitor/Makefile +++ b/cluster/images/etcd-version-monitor/Makefile @@ -15,11 +15,11 @@ # Build the etcd-version-monitor image # # Usage: -# [GOLANG_VERSION=1.8.3] [REGISTRY=k8s.gcr.io] [TAG=test] make (build|push) +# [GOLANG_VERSION=1.8.3] [REGISTRY=gcr.io/google-containers] [TAG=test] make (build|push) # TODO(shyamjvs): Support architectures other than amd64 if needed. ARCH:=amd64 GOLANG_VERSION?=1.8.3 -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google-containers TAG?=0.1.1 IMAGE:=$(REGISTRY)/etcd-version-monitor:$(TAG) CURRENT_DIR:=$(pwd) @@ -30,12 +30,8 @@ build: cp etcd-version-monitor.go Dockerfile $(TEMP_DIR) # Compile etcd-version-monitor. - docker run -it \ - -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes \ - -v $(TEMP_DIR):/build \ - -e GOARCH=$(ARCH) \ - golang:$(GOLANG_VERSION) \ - /bin/bash -c "CGO_ENABLED=0 go build -o /build/etcd-version-monitor k8s.io/kubernetes/cluster/images/etcd-version-monitor" + docker run -it -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ + /bin/bash -c "CGO_ENABLED=0 go build -o /build/etcd-version-monitor k8s.io/kubernetes/cluster/images/etcd-version-monitor" docker build -t $(IMAGE) $(TEMP_DIR) diff --git a/cluster/images/etcd-version-monitor/README.md b/cluster/images/etcd-version-monitor/README.md index 16bd1d88ff2..3cfb675837d 100644 --- a/cluster/images/etcd-version-monitor/README.md +++ b/cluster/images/etcd-version-monitor/README.md @@ -18,7 +18,7 @@ latency metrics (`etcd_grpc_unary_requests_duration_seconds`) to be exposed. To run this tool as a docker container: - make build -- docker run --net=host -i -t k8s.gcr.io/etcd-version-monitor:test /etcd-version-monitor --logtostderr +- docker run --net=host -i -t gcr.io/google_containers/etcd-version-monitor:test /etcd-version-monitor --logtostderr To run this as a pod on the kubernetes cluster: - Place the 'etcd-version-monitor.yaml' in the manifests directory of diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml index f632ac2ed6d..49f1db39819 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.yaml @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: etcd-version-monitor - image: k8s.gcr.io/etcd-version-monitor:0.1.0 + image: gcr.io/google-containers/etcd-version-monitor:0.1.0 command: - /etcd-version-monitor - --logtostderr diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 2681d3a0794..f972fbb757f 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [TAGS=2.2.1 2.3.7 3.0.17 3.1.11] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [TAGS=2.2.1 2.3.7 3.0.17 3.1.11] [REGISTRY=gcr.io/google_containers] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any tag from here. @@ -29,7 +29,7 @@ TAGS?=2.2.1 2.3.7 3.0.17 3.1.11 REGISTRY_TAG?=3.1.11 ARCH?=amd64 -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google_containers GOLANG_VERSION?=1.7.6 GOARM=7 TEMP_DIR:=$(shell mktemp -d) diff --git a/cluster/images/etcd/README.md b/cluster/images/etcd/README.md index 3b7f11ed7e8..e7dfde8bf61 100644 --- a/cluster/images/etcd/README.md +++ b/cluster/images/etcd/README.md @@ -10,20 +10,20 @@ For other architectures, `etcd` is cross-compiled from source. Arch-specific `bu ```console # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> k8s.gcr.io/etcd-amd64:TAG -# ---> k8s.gcr.io/etcd:TAG +# ---> gcr.io/google_containers/etcd-amd64:TAG +# ---> gcr.io/google_containers/etcd:TAG $ make push ARCH=arm -# ---> k8s.gcr.io/etcd-arm:TAG +# ---> gcr.io/google_containers/etcd-arm:TAG $ make push ARCH=arm64 -# ---> k8s.gcr.io/etcd-arm64:TAG +# ---> gcr.io/google_containers/etcd-arm64:TAG $ make push ARCH=ppc64le -# ---> k8s.gcr.io/etcd-ppc64le:TAG +# ---> gcr.io/google_containers/etcd-ppc64le:TAG $ make push ARCH=s390x -# ---> k8s.gcr.io/etcd-s390x:TAG +# ---> gcr.io/google_containers/etcd-s390x:TAG ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/images/hyperkube/BUILD b/cluster/images/hyperkube/BUILD index 9d77f735a2b..6ca2ff6d61e 100644 --- a/cluster/images/hyperkube/BUILD +++ b/cluster/images/hyperkube/BUILD @@ -27,7 +27,7 @@ docker_build( docker_bundle( name = "hyperkube", - images = {"k8s.gcr.io/hyperkube-amd64:{STABLE_DOCKER_TAG}": "hyperkube-internal"}, + images = {"gcr.io/google-containers/hyperkube-amd64:{STABLE_DOCKER_TAG}": "hyperkube-internal"}, stamp = True, ) diff --git a/cluster/images/hyperkube/Dockerfile b/cluster/images/hyperkube/Dockerfile index ab905603410..71d2300a04b 100644 --- a/cluster/images/hyperkube/Dockerfile +++ b/cluster/images/hyperkube/Dockerfile @@ -16,7 +16,7 @@ FROM BASEIMAGE # Create symlinks for each hyperkube server # Also create symlinks to /usr/local/bin/ where the server image binaries live, so the hyperkube image may be -# used instead of k8s.gcr.io/kube-* without any modifications. +# used instead of gcr.io/google_containers/kube-* without any modifications. # TODO: replace manual symlink creation with --make-symlink command once # cross-building with qemu supports go binaries. See #28702 # RUN /hyperkube --make-symlinks diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 0dc17bbe45d..d7a8c7d936b 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -15,13 +15,13 @@ # Build the hyperkube image. # # Usage: -# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] make (build|push) VERSION={some_released_version_of_kubernetes} +# [ARCH=amd64] [REGISTRY="gcr.io/google-containers"] make (build|push) VERSION={some_released_version_of_kubernetes} -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google-containers ARCH?=amd64 HYPERKUBE_BIN?=_output/dockerized/bin/linux/$(ARCH)/hyperkube -BASEIMAGE=k8s.gcr.io/debian-hyperkube-base-$(ARCH):0.8 +BASEIMAGE=gcr.io/google-containers/debian-hyperkube-base-$(ARCH):0.8 TEMP_DIR:=$(shell mktemp -d -t hyperkubeXXXXXX) all: build diff --git a/cluster/images/hyperkube/README.md b/cluster/images/hyperkube/README.md index deed34b2c43..60fba63aa4b 100644 --- a/cluster/images/hyperkube/README.md +++ b/cluster/images/hyperkube/README.md @@ -10,23 +10,23 @@ $ build/run.sh make cross # Build for linux/amd64 (default) -# export REGISTRY=$HOST/$ORG to switch from k8s.gcr.io +# export REGISTRY=$HOST/$ORG to switch from gcr.io/google_containers $ make push VERSION={target_version} ARCH=amd64 -# ---> k8s.gcr.io/hyperkube-amd64:VERSION -# ---> k8s.gcr.io/hyperkube:VERSION (image with backwards-compatible naming) +# ---> gcr.io/google_containers/hyperkube-amd64:VERSION +# ---> gcr.io/google_containers/hyperkube:VERSION (image with backwards-compatible naming) $ make push VERSION={target_version} ARCH=arm -# ---> k8s.gcr.io/hyperkube-arm:VERSION +# ---> gcr.io/google_containers/hyperkube-arm:VERSION $ make push VERSION={target_version} ARCH=arm64 -# ---> k8s.gcr.io/hyperkube-arm64:VERSION +# ---> gcr.io/google_containers/hyperkube-arm64:VERSION $ make push VERSION={target_version} ARCH=ppc64le -# ---> k8s.gcr.io/hyperkube-ppc64le:VERSION +# ---> gcr.io/google_containers/hyperkube-ppc64le:VERSION $ make push VERSION={target_version} ARCH=s390x -# ---> k8s.gcr.io/hyperkube-s390x:VERSION +# ---> gcr.io/google_containers/hyperkube-s390x:VERSION ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/images/kubemark/Makefile b/cluster/images/kubemark/Makefile index 83b00d5f244..6786aeea245 100644 --- a/cluster/images/kubemark/Makefile +++ b/cluster/images/kubemark/Makefile @@ -15,8 +15,8 @@ # build Kubemark image from currently built binaries containing both 'real' master and Hollow Node. # This makefile assumes that the kubemark binary is present in this directory. -REGISTRY ?= gcr.io -PROJECT ?= +REGISTRY?=gcr.io +PROJECT?=google_containers all: gcloudpush diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 3ff17998c83..d89fa62448d 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -635,10 +635,10 @@ def launch_default_ingress_controller(): addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = \ - "k8s.gcr.io/defaultbackend:1.4" + "gcr.io/google_containers/defaultbackend:1.4" if arch() == 's390x': context['defaultbackend_image'] = \ - "k8s.gcr.io/defaultbackend-s390x:1.4" + "gcr.io/google_containers/defaultbackend-s390x:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') @@ -655,7 +655,7 @@ def launch_default_ingress_controller(): # Render the ingress replication controller manifest context['ingress_image'] = \ - "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13" + "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" diff --git a/cluster/kubemark/gce/config-default.sh b/cluster/kubemark/gce/config-default.sh index 432465dd5cb..9d159603a94 100644 --- a/cluster/kubemark/gce/config-default.sh +++ b/cluster/kubemark/gce/config-default.sh @@ -61,7 +61,7 @@ RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} KUBE_APISERVER_REQUEST_TIMEOUT=300 -# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.10) if you need +# Set etcd image (e.g. gcr.io/google_containers/etcd) and version (e.g. 3.1.10) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_VERSION="${TEST_ETCD_VERSION:-}" diff --git a/cluster/log-dump/logexporter-daemonset.yaml b/cluster/log-dump/logexporter-daemonset.yaml index 27aafa876f4..8099da1b9de 100644 --- a/cluster/log-dump/logexporter-daemonset.yaml +++ b/cluster/log-dump/logexporter-daemonset.yaml @@ -36,7 +36,7 @@ spec: spec: containers: - name: logexporter-test - image: k8s.gcr.io/logexporter:v0.1.1 + image: gcr.io/google-containers/logexporter:v0.1.1 env: - name: NODE_NAME valueFrom: diff --git a/cluster/restore-from-backup.sh b/cluster/restore-from-backup.sh index fec9efee3ab..912e877de98 100755 --- a/cluster/restore-from-backup.sh +++ b/cluster/restore-from-backup.sh @@ -160,7 +160,7 @@ if [ "${ETCD_API}" == "etcd2" ]; then echo "Starting etcd ${ETCD_VERSION} to restore data" image=$(docker run -d -v ${BACKUP_DIR}:/var/etcd/data \ --net=host -p ${etcd_port}:${etcd_port} \ - "k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \ + "gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \ "/usr/local/bin/etcd --data-dir /var/etcd/data --force-new-cluster") if [ "$?" -ne "0" ]; then echo "Docker container didn't started correctly" @@ -191,7 +191,7 @@ elif [ "${ETCD_API}" == "etcd3" ]; then # Run etcdctl snapshot restore command and wait until it is finished. # setting with --name in the etcd manifest file and then it seems to work. docker run -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \ - "k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \ + "gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \ "/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${INITIAL_CLUSTER} --initial-advertise-peer-urls ${INITIAL_ADVERTISE_PEER_URLS}; mv /${NAME}.etcd/member /var/tmp/backup/" if [ "$?" -ne "0" ]; then echo "Docker container didn't started correctly" diff --git a/cluster/saltbase/install.sh b/cluster/saltbase/install.sh index 3d730388527..fdf6c634d15 100755 --- a/cluster/saltbase/install.sh +++ b/cluster/saltbase/install.sh @@ -81,7 +81,7 @@ for docker_file in "${KUBE_DOCKER_WRAPPED_BINARIES[@]}"; do done cat <>"${docker_images_sls_file}" -kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-k8s.gcr.io})' +kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers})' EOF # TODO(zmerlynn): Forgive me, this is really gross. But in order to @@ -89,13 +89,13 @@ EOF # have to templatize a couple of the add-ons anyways, manually # templatize the addon registry for regional support. When we get # better templating, we can fix this. -readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" -if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then +readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" +if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then find /srv/salt-new -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" # All the legacy .manifest files with hardcoded gcr.io are JSON. find /srv/salt-new -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" + xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" fi echo "+++ Swapping in new configs" diff --git a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest index 20961835065..6e6ed2c2508 100644 --- a/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest +++ b/cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest @@ -25,15 +25,15 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.1.0", + "image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0", "livenessProbe": { "httpGet": { "path": "/health-check", "port": 8085 - }, + }, "initialDelaySeconds": 600, "periodSeconds": 60 - }, + }, "command": [ "./run.sh", "--kubernetes=http://127.0.0.1:8080?inClusterConfig=f", diff --git a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest index 835b61a8c6f..d7d5a430642 100644 --- a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest +++ b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest @@ -14,13 +14,13 @@ spec: cpu: 100m limits: cpu: 100m - image: k8s.gcr.io/busybox:1.24 + image: gcr.io/google_containers/busybox:1.24 # TODO: Replace this with a go script that pulls in parallel? # Currently it takes ~5m to pull all e2e images, so this is OK, and # fewer moving parts is always better. # TODO: Replace the hardcoded image list with an autogen list; the list is # currently hard-coded for static verification. It was generated via: - # grep -Iiroh "gcr.io/.*" "${KUBE_ROOT}/test/e2e" | \ + # grep -Iiroh "gcr.io/google_.*" "${KUBE_ROOT}/test/e2e" | \ # sed -e "s/[,\")}]//g" | awk '{print $1}' | sort | uniq | tr '\n' ' ' # We always want the subshell to exit 0 so this pod doesn't end up # blocking tests in an Error state. @@ -29,52 +29,52 @@ spec: - -c - > for i in - k8s.gcr.io/alpine-with-bash:1.0 - k8s.gcr.io/apparmor-loader:0.1 - k8s.gcr.io/busybox:1.24 - k8s.gcr.io/dnsutils:e2e - k8s.gcr.io/e2e-net-amd64:1.0 - k8s.gcr.io/echoserver:1.6 - k8s.gcr.io/eptest:0.1 - k8s.gcr.io/fakegitserver:0.1 - k8s.gcr.io/galera-install:0.1 - k8s.gcr.io/hostexec:1.2 - k8s.gcr.io/invalid-image:invalid-tag - k8s.gcr.io/iperf:e2e - k8s.gcr.io/jessie-dnsutils:e2e - k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5 - k8s.gcr.io/liveness:e2e - k8s.gcr.io/logs-generator:v0.1.0 - k8s.gcr.io/mounttest:0.8 - k8s.gcr.io/mounttest-user:0.5 - k8s.gcr.io/mysql-galera:e2e - k8s.gcr.io/mysql-healthz:1.0 - k8s.gcr.io/netexec:1.4 - k8s.gcr.io/netexec:1.5 - k8s.gcr.io/netexec:1.7 - k8s.gcr.io/nettest:1.7 - k8s.gcr.io/nginx:1.7.9 - k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1 - k8s.gcr.io/nginx-slim:0.7 - k8s.gcr.io/nginx-slim:0.8 - k8s.gcr.io/node-problem-detector:v0.3.0 - k8s.gcr.io/pause - k8s.gcr.io/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 - k8s.gcr.io/portforwardtester:1.2 - k8s.gcr.io/redis-install-3.2.0:e2e - k8s.gcr.io/resource_consumer:beta4 - k8s.gcr.io/resource_consumer/controller:beta4 + gcr.io/google_containers/alpine-with-bash:1.0 + gcr.io/google_containers/apparmor-loader:0.1 + gcr.io/google_containers/busybox:1.24 + gcr.io/google_containers/dnsutils:e2e + gcr.io/google_containers/e2e-net-amd64:1.0 + gcr.io/google_containers/echoserver:1.6 + gcr.io/google_containers/eptest:0.1 + gcr.io/google_containers/fakegitserver:0.1 + gcr.io/google_containers/galera-install:0.1 + gcr.io/google_containers/hostexec:1.2 + gcr.io/google_containers/invalid-image:invalid-tag + gcr.io/google_containers/iperf:e2e + gcr.io/google_containers/jessie-dnsutils:e2e + gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5 + gcr.io/google_containers/liveness:e2e + gcr.io/google_containers/logs-generator:v0.1.0 + gcr.io/google_containers/mounttest:0.8 + gcr.io/google_containers/mounttest-user:0.5 + gcr.io/google_containers/mysql-galera:e2e + gcr.io/google_containers/mysql-healthz:1.0 + gcr.io/google_containers/netexec:1.4 + gcr.io/google_containers/netexec:1.5 + gcr.io/google_containers/netexec:1.7 + gcr.io/google_containers/nettest:1.7 + gcr.io/google_containers/nginx:1.7.9 + gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1 + gcr.io/google_containers/nginx-slim:0.7 + gcr.io/google_containers/nginx-slim:0.8 + gcr.io/google_containers/node-problem-detector:v0.3.0 + gcr.io/google_containers/pause + gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 + gcr.io/google_containers/portforwardtester:1.2 + gcr.io/google_containers/redis-install-3.2.0:e2e + gcr.io/google_containers/resource_consumer:beta4 + gcr.io/google_containers/resource_consumer/controller:beta4 gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1 - k8s.gcr.io/servicelb:0.1 - k8s.gcr.io/test-webserver:e2e - k8s.gcr.io/update-demo:kitten - k8s.gcr.io/update-demo:nautilus - k8s.gcr.io/volume-ceph:0.1 - k8s.gcr.io/volume-gluster:0.2 - k8s.gcr.io/volume-iscsi:0.1 - k8s.gcr.io/volume-nfs:0.8 - k8s.gcr.io/volume-rbd:0.1 - k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e + gcr.io/google_containers/servicelb:0.1 + gcr.io/google_containers/test-webserver:e2e + gcr.io/google_containers/update-demo:kitten + gcr.io/google_containers/update-demo:nautilus + gcr.io/google_containers/volume-ceph:0.1 + gcr.io/google_containers/volume-gluster:0.2 + gcr.io/google_containers/volume-iscsi:0.1 + gcr.io/google_containers/volume-nfs:0.8 + gcr.io/google_containers/volume-rbd:0.1 + gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e gcr.io/google_samples/gb-redisslave:nonexistent ; do echo $(date '+%X') pulling $i; docker pull $i 1>/dev/null; done; exit 0; securityContext: @@ -91,7 +91,7 @@ spec: cpu: 100m limits: cpu: 100m - image: k8s.gcr.io/kube-nethealth-amd64:1.0 + image: gcr.io/google_containers/kube-nethealth-amd64:1.0 command: - /bin/sh - -c diff --git a/cluster/saltbase/salt/etcd/etcd.manifest b/cluster/saltbase/salt/etcd/etcd.manifest index e5f940c723c..44419aa744f 100644 --- a/cluster/saltbase/salt/etcd/etcd.manifest +++ b/cluster/saltbase/salt/etcd/etcd.manifest @@ -39,7 +39,7 @@ "containers":[ { "name": "etcd-container", - "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.1.10') }}", + "image": "{{ pillar.get('etcd_docker_repository', 'gcr.io/google_containers/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.1.10') }}", "resources": { "requests": { "cpu": {{ cpulimit }} diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml index d09d6e5df34..0b115c1f737 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml +++ b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml @@ -13,7 +13,7 @@ spec: - name: kube-addon-manager # When updating version also bump it in: # - test/kubemark/resources/manifests/kube-addon-manager.yaml - image: k8s.gcr.io/kube-addon-manager:v6.5 + image: gcr.io/google-containers/kube-addon-manager:v6.5 command: - /bin/bash - -c diff --git a/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml b/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml index 9f05f3ac151..f00142ad65f 100644 --- a/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml +++ b/cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml @@ -18,7 +18,7 @@ spec: spec: containers: - name: kube-registry-proxy - image: k8s.gcr.io/kube-registry-proxy:0.4 + image: gcr.io/google_containers/kube-registry-proxy:0.4 resources: limits: cpu: 100m diff --git a/cluster/saltbase/salt/l7-gcp/glbc.manifest b/cluster/saltbase/salt/l7-gcp/glbc.manifest index 4cc81331675..c808e5ee0e6 100644 --- a/cluster/saltbase/salt/l7-gcp/glbc.manifest +++ b/cluster/saltbase/salt/l7-gcp/glbc.manifest @@ -13,7 +13,7 @@ spec: terminationGracePeriodSeconds: 600 hostNetwork: true containers: - - image: k8s.gcr.io/glbc:0.9.7 + - image: gcr.io/google_containers/glbc:0.9.7 livenessProbe: httpGet: path: /healthz diff --git a/cluster/saltbase/salt/rescheduler/rescheduler.manifest b/cluster/saltbase/salt/rescheduler/rescheduler.manifest index 3ff18eb6bb4..ef9af1f5f7f 100644 --- a/cluster/saltbase/salt/rescheduler/rescheduler.manifest +++ b/cluster/saltbase/salt/rescheduler/rescheduler.manifest @@ -13,7 +13,7 @@ metadata: spec: hostNetwork: true containers: - - image: k8s.gcr.io/rescheduler:v0.3.1 + - image: gcr.io/google-containers/rescheduler:v0.3.1 name: rescheduler volumeMounts: - mountPath: /var/log/rescheduler.log diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 83dc4b19d04..ea28af88e5f 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -47,7 +47,7 @@ const ( // DefaultCertificatesDir defines default certificate directory DefaultCertificatesDir = "/etc/kubernetes/pki" // DefaultImageRepository defines default image registry - DefaultImageRepository = "k8s.gcr.io" + DefaultImageRepository = "gcr.io/google_containers" // DefaultManifestsDir defines default manifests directory DefaultManifestsDir = "/etc/kubernetes/manifests" diff --git a/cmd/kubeadm/app/images/images_test.go b/cmd/kubeadm/app/images/images_test.go index 466f204500d..6c1173e6c25 100644 --- a/cmd/kubeadm/app/images/images_test.go +++ b/cmd/kubeadm/app/images/images_test.go @@ -27,7 +27,7 @@ import ( const ( testversion = "v10.1.2-alpha.1.100+0123456789abcdef+SOMETHING" expected = "v10.1.2-alpha.1.100_0123456789abcdef_SOMETHING" - gcrPrefix = "k8s.gcr.io" + gcrPrefix = "gcr.io/google_containers" ) func TestGetCoreImage(t *testing.T) { diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go index 7921ce0696f..8a6ebff8dfa 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -66,7 +66,7 @@ spec: - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --authorization-mode=Node,RBAC - --etcd-servers=http://127.0.0.1:2379 - image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4 + image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -148,7 +148,7 @@ spec: - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --authorization-mode=Node,RBAC - --etcd-servers=http://127.0.0.1:2379 - image: k8s.gcr.io/kube-apiserver-amd64:v1.7.4 + image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -220,7 +220,7 @@ spec: - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --address=127.0.0.1 - --use-service-account-credentials=true - image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4 + image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -295,7 +295,7 @@ spec: - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --address=127.0.0.1 - --use-service-account-credentials=true - image: k8s.gcr.io/kube-controller-manager-amd64:v1.7.4 + image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -368,7 +368,7 @@ spec: - --leader-elect=true - --kubeconfig=/etc/kubernetes/scheduler.conf - --address=127.0.0.1 - image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4 + image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -419,7 +419,7 @@ spec: - --leader-elect=true - --kubeconfig=/etc/kubernetes/scheduler.conf - --address=127.0.0.1 - image: k8s.gcr.io/kube-scheduler-amd64:v1.7.4 + image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4 livenessProbe: failureThreshold: 8 httpGet: @@ -521,7 +521,7 @@ metadata: name: testpod spec: containers: - - image: k8s.gcr.io/busybox + - image: gcr.io/google_containers/busybox `, expectError: false, }, @@ -537,7 +537,7 @@ spec: "spec": { "containers": [ { - "image": "k8s.gcr.io/busybox" + "image": "gcr.io/google_containers/busybox" } ] } @@ -552,7 +552,7 @@ kind: Pod metadata: name: testpod spec: - - image: k8s.gcr.io/busybox + - image: gcr.io/google_containers/busybox `, expectError: true, }, diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 5b131fbca1b..e51a1295792 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -61,7 +61,7 @@ etcd: image: "" keyFile: "" featureFlags: null -imageRepository: k8s.gcr.io +imageRepository: gcr.io/google_containers kubernetesVersion: %s networking: dnsDomain: cluster.local diff --git a/cmd/kubeadm/app/util/template_test.go b/cmd/kubeadm/app/util/template_test.go index 6856539d2f5..3a00e05e601 100644 --- a/cmd/kubeadm/app/util/template_test.go +++ b/cmd/kubeadm/app/util/template_test.go @@ -22,8 +22,8 @@ import ( const ( validTmpl = "image: {{ .ImageRepository }}/pause-{{ .Arch }}:3.0" - validTmplOut = "image: k8s.gcr.io/pause-amd64:3.0" - doNothing = "image: k8s.gcr.io/pause-amd64:3.0" + validTmplOut = "image: gcr.io/google_containers/pause-amd64:3.0" + doNothing = "image: gcr.io/google_containers/pause-amd64:3.0" invalidTmpl1 = "{{ .baz }/d}" invalidTmpl2 = "{{ !foobar }}" ) @@ -39,7 +39,7 @@ func TestParseTemplate(t *testing.T) { { template: validTmpl, data: struct{ ImageRepository, Arch string }{ - ImageRepository: "k8s.gcr.io", + ImageRepository: "gcr.io/google_containers", Arch: "amd64", }, output: validTmplOut, @@ -49,7 +49,7 @@ func TestParseTemplate(t *testing.T) { { template: doNothing, data: struct{ ImageRepository, Arch string }{ - ImageRepository: "k8s.gcr.io", + ImageRepository: "gcr.io/google_containers", Arch: "amd64", }, output: doNothing, diff --git a/cmd/kubelet/app/options/container_runtime.go b/cmd/kubelet/app/options/container_runtime.go index c760ceb0dc3..d1174ea044c 100644 --- a/cmd/kubelet/app/options/container_runtime.go +++ b/cmd/kubelet/app/options/container_runtime.go @@ -27,7 +27,7 @@ import ( const ( // When these values are updated, also update test/e2e/framework/util.go - defaultPodSandboxImageName = "k8s.gcr.io/pause" + defaultPodSandboxImageName = "gcr.io/google_containers/pause" defaultPodSandboxImageVersion = "3.0" // From pkg/kubelet/rkt/rkt.go to avoid circular import defaultRktAPIServiceEndpoint = "localhost:15441" diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index 717e5493163..cbb2f013aad 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -5194,7 +5194,7 @@ Examples:

names

-

Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]

+

Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]

true

string array

diff --git a/examples/cluster-dns/dns-backend-rc.yaml b/examples/cluster-dns/dns-backend-rc.yaml index 4af1b0dcbaa..9649d367b54 100644 --- a/examples/cluster-dns/dns-backend-rc.yaml +++ b/examples/cluster-dns/dns-backend-rc.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: dns-backend - image: k8s.gcr.io/example-dns-backend:v1 + image: gcr.io/google_containers/example-dns-backend:v1 ports: - name: backend-port containerPort: 8000 diff --git a/examples/cluster-dns/dns-frontend-pod.yaml b/examples/cluster-dns/dns-frontend-pod.yaml index 4a7695f1138..b424478383a 100644 --- a/examples/cluster-dns/dns-frontend-pod.yaml +++ b/examples/cluster-dns/dns-frontend-pod.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: dns-frontend - image: k8s.gcr.io/example-dns-frontend:v1 + image: gcr.io/google_containers/example-dns-frontend:v1 command: - python - client.py diff --git a/examples/cluster-dns/images/backend/Makefile b/examples/cluster-dns/images/backend/Makefile index 6a9fe53de77..67992ec2666 100644 --- a/examples/cluster-dns/images/backend/Makefile +++ b/examples/cluster-dns/images/backend/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = v1 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers IMAGE = example-dns-backend all: push diff --git a/examples/cluster-dns/images/frontend/Makefile b/examples/cluster-dns/images/frontend/Makefile index 9b375525007..2f6337545fa 100644 --- a/examples/cluster-dns/images/frontend/Makefile +++ b/examples/cluster-dns/images/frontend/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = v1 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers IMAGE = example-dns-frontend all: push diff --git a/examples/explorer/Makefile b/examples/explorer/Makefile index 59bef7c2f4a..35dd5bd7e7a 100644 --- a/examples/explorer/Makefile +++ b/examples/explorer/Makefile @@ -21,10 +21,10 @@ explorer: explorer.go CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./explorer.go container: explorer - docker build --pull -t k8s.gcr.io/explorer:$(TAG) . + docker build --pull -t gcr.io/google_containers/explorer:$(TAG) . push: container - gcloud docker -- push k8s.gcr.io/explorer:$(TAG) + gcloud docker -- push gcr.io/google_containers/explorer:$(TAG) clean: rm -f explorer diff --git a/examples/explorer/pod.yaml b/examples/explorer/pod.yaml index 0437a249229..2c26c3e1744 100644 --- a/examples/explorer/pod.yaml +++ b/examples/explorer/pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: explorer - image: k8s.gcr.io/explorer:1.0 + image: gcr.io/google_containers/explorer:1.0 args: ["-port=8080"] ports: - containerPort: 8080 diff --git a/examples/guestbook-go/Makefile b/examples/guestbook-go/Makefile index 605916fdff3..9c63819ebc0 100644 --- a/examples/guestbook-go/Makefile +++ b/examples/guestbook-go/Makefile @@ -15,9 +15,9 @@ # Build the guestbook-go example # Usage: -# [VERSION=v3] [REGISTRY="k8s.gcr.io"] make build +# [VERSION=v3] [REGISTRY="gcr.io/google_containers"] make build VERSION?=v3 -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google_containers release: clean build push clean diff --git a/examples/guestbook-go/guestbook-controller.json b/examples/guestbook-go/guestbook-controller.json index 0e3553c865a..82c0e9134fd 100644 --- a/examples/guestbook-go/guestbook-controller.json +++ b/examples/guestbook-go/guestbook-controller.json @@ -22,7 +22,7 @@ "containers":[ { "name":"guestbook", - "image":"k8s.gcr.io/guestbook:v3", + "image":"gcr.io/google_containers/guestbook:v3", "ports":[ { "name":"http-server", diff --git a/examples/guestbook/all-in-one/guestbook-all-in-one.yaml b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml index a3415d6a99d..7735c798983 100644 --- a/examples/guestbook/all-in-one/guestbook-all-in-one.yaml +++ b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml @@ -30,7 +30,7 @@ spec: spec: containers: - name: master - image: k8s.gcr.io/redis:e2e # or just image: redis + image: gcr.io/google_containers/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/guestbook/legacy/redis-master-controller.yaml b/examples/guestbook/legacy/redis-master-controller.yaml index 28208103b43..0bdf9761752 100644 --- a/examples/guestbook/legacy/redis-master-controller.yaml +++ b/examples/guestbook/legacy/redis-master-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: master - image: k8s.gcr.io/redis:e2e # or just image: redis + image: gcr.io/google_containers/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/guestbook/redis-master-deployment.yaml b/examples/guestbook/redis-master-deployment.yaml index d457a09934f..3fbcc0f01cc 100644 --- a/examples/guestbook/redis-master-deployment.yaml +++ b/examples/guestbook/redis-master-deployment.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: master - image: k8s.gcr.io/redis:e2e # or just image: redis + image: gcr.io/google_containers/redis:e2e # or just image: redis resources: requests: cpu: 100m diff --git a/examples/kubectl-container/Makefile b/examples/kubectl-container/Makefile index 47fbfff923d..ea127f15142 100644 --- a/examples/kubectl-container/Makefile +++ b/examples/kubectl-container/Makefile @@ -37,11 +37,11 @@ tag: .tag container: $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) - docker build --pull -t k8s.gcr.io/kubectl:$(TAG) . + docker build --pull -t gcr.io/google_containers/kubectl:$(TAG) . push: container $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) - gcloud docker -- push k8s.gcr.io/kubectl:$(TAG) + gcloud docker -- push gcr.io/google_containers/kubectl:$(TAG) clean: rm -f kubectl diff --git a/examples/kubectl-container/pod.json b/examples/kubectl-container/pod.json index 540715a6c53..ed0ec6599c3 100644 --- a/examples/kubectl-container/pod.json +++ b/examples/kubectl-container/pod.json @@ -8,7 +8,7 @@ "containers": [ { "name": "bb", - "image": "k8s.gcr.io/busybox", + "image": "gcr.io/google_containers/busybox", "command": [ "sh", "-c", "sleep 5; wget -O - ${KUBERNETES_RO_SERVICE_HOST}:${KUBERNETES_RO_SERVICE_PORT}/api/v1/pods/; sleep 10000" ], @@ -36,7 +36,7 @@ }, { "name": "kubectl", - "image": "k8s.gcr.io/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", + "image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", "imagePullPolicy": "Always", "args": [ "proxy", "-p", "8001" diff --git a/examples/spark/spark-gluster/spark-master-controller.yaml b/examples/spark/spark-gluster/spark-master-controller.yaml index 28cb6ac3fb7..d0b365b7135 100644 --- a/examples/spark/spark-gluster/spark-master-controller.yaml +++ b/examples/spark/spark-gluster/spark-master-controller.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: spark-master - image: k8s.gcr.io/spark:1.5.2_v1 + image: gcr.io/google_containers/spark:1.5.2_v1 command: ["/start-master"] ports: - containerPort: 7077 diff --git a/examples/spark/spark-gluster/spark-worker-controller.yaml b/examples/spark/spark-gluster/spark-worker-controller.yaml index 0030d8a6674..69cc3cec95e 100644 --- a/examples/spark/spark-gluster/spark-worker-controller.yaml +++ b/examples/spark/spark-gluster/spark-worker-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: spark-worker - image: k8s.gcr.io/spark:1.5.2_v1 + image: gcr.io/google_containers/spark:1.5.2_v1 command: ["/start-worker"] ports: - containerPort: 8888 diff --git a/examples/spark/spark-master-controller.yaml b/examples/spark/spark-master-controller.yaml index ceaef078d24..60fb7ba8a15 100644 --- a/examples/spark/spark-master-controller.yaml +++ b/examples/spark/spark-master-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: spark-master - image: k8s.gcr.io/spark:1.5.2_v1 + image: gcr.io/google_containers/spark:1.5.2_v1 command: ["/start-master"] ports: - containerPort: 7077 diff --git a/examples/spark/spark-worker-controller.yaml b/examples/spark/spark-worker-controller.yaml index 3e5ed50ce05..9c748b3e048 100644 --- a/examples/spark/spark-worker-controller.yaml +++ b/examples/spark/spark-worker-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: spark-worker - image: k8s.gcr.io/spark:1.5.2_v1 + image: gcr.io/google_containers/spark:1.5.2_v1 command: ["/start-worker"] ports: - containerPort: 8081 diff --git a/examples/spark/zeppelin-controller.yaml b/examples/spark/zeppelin-controller.yaml index 2f578fcfc2c..56bb90d421d 100644 --- a/examples/spark/zeppelin-controller.yaml +++ b/examples/spark/zeppelin-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: zeppelin - image: k8s.gcr.io/zeppelin:v0.5.6_v1 + image: gcr.io/google_containers/zeppelin:v0.5.6_v1 ports: - containerPort: 8080 resources: diff --git a/examples/storage/cassandra/image/Dockerfile b/examples/storage/cassandra/image/Dockerfile index 5c4658d7f18..45b75951235 100644 --- a/examples/storage/cassandra/image/Dockerfile +++ b/examples/storage/cassandra/image/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM k8s.gcr.io/ubuntu-slim:0.9 +FROM gcr.io/google_containers/ubuntu-slim:0.9 ARG BUILD_DATE ARG VCS_REF diff --git a/examples/storage/redis/redis-controller.yaml b/examples/storage/redis/redis-controller.yaml index dab2f7f1ab8..fcb5e67cd6f 100644 --- a/examples/storage/redis/redis-controller.yaml +++ b/examples/storage/redis/redis-controller.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: redis - image: k8s.gcr.io/redis:v1 + image: gcr.io/google_containers/redis:v1 ports: - containerPort: 6379 resources: diff --git a/examples/storage/redis/redis-master.yaml b/examples/storage/redis/redis-master.yaml index 589de648f5f..57305a7a352 100644 --- a/examples/storage/redis/redis-master.yaml +++ b/examples/storage/redis/redis-master.yaml @@ -9,7 +9,7 @@ metadata: spec: containers: - name: master - image: k8s.gcr.io/redis:v1 + image: gcr.io/google_containers/redis:v1 env: - name: MASTER value: "true" diff --git a/examples/storage/redis/redis-sentinel-controller.yaml b/examples/storage/redis/redis-sentinel-controller.yaml index 6c4b14347a4..da09e10cbbc 100644 --- a/examples/storage/redis/redis-sentinel-controller.yaml +++ b/examples/storage/redis/redis-sentinel-controller.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: sentinel - image: k8s.gcr.io/redis:v1 + image: gcr.io/google_containers/redis:v1 env: - name: SENTINEL value: "true" diff --git a/examples/storage/rethinkdb/admin-pod.yaml b/examples/storage/rethinkdb/admin-pod.yaml index 12163909d61..eac07f33a5d 100644 --- a/examples/storage/rethinkdb/admin-pod.yaml +++ b/examples/storage/rethinkdb/admin-pod.yaml @@ -7,7 +7,7 @@ metadata: name: rethinkdb-admin spec: containers: - - image: k8s.gcr.io/rethinkdb:1.16.0_1 + - image: gcr.io/google_containers/rethinkdb:1.16.0_1 name: rethinkdb env: - name: POD_NAMESPACE diff --git a/examples/storage/rethinkdb/rc.yaml b/examples/storage/rethinkdb/rc.yaml index 23becb6e887..36b319191cd 100644 --- a/examples/storage/rethinkdb/rc.yaml +++ b/examples/storage/rethinkdb/rc.yaml @@ -16,7 +16,7 @@ spec: role: replicas spec: containers: - - image: k8s.gcr.io/rethinkdb:1.16.0_1 + - image: gcr.io/google_containers/rethinkdb:1.16.0_1 name: rethinkdb env: - name: POD_NAMESPACE diff --git a/examples/volumes/portworx/portworx-volume-pod.yaml b/examples/volumes/portworx/portworx-volume-pod.yaml index f44302f59ee..c5f195911a6 100644 --- a/examples/volumes/portworx/portworx-volume-pod.yaml +++ b/examples/volumes/portworx/portworx-volume-pod.yaml @@ -4,7 +4,7 @@ metadata: name: test-portworx-volume-pod spec: containers: - - image: k8s.gcr.io/test-webserver + - image: gcr.io/google_containers/test-webserver name: test-container volumeMounts: - mountPath: /test-portworx-volume diff --git a/examples/volumes/portworx/portworx-volume-pvcpod.yaml b/examples/volumes/portworx/portworx-volume-pvcpod.yaml index bef2d496567..fb92b320f10 100644 --- a/examples/volumes/portworx/portworx-volume-pvcpod.yaml +++ b/examples/volumes/portworx/portworx-volume-pvcpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/test-webserver + image: gcr.io/google_containers/test-webserver volumeMounts: - name: test-volume mountPath: /test-portworx-volume diff --git a/examples/volumes/portworx/portworx-volume-pvcscpod.yaml b/examples/volumes/portworx/portworx-volume-pvcscpod.yaml index 8bdd5131927..464bf5d8fdd 100644 --- a/examples/volumes/portworx/portworx-volume-pvcscpod.yaml +++ b/examples/volumes/portworx/portworx-volume-pvcscpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/test-webserver + image: gcr.io/google_containers/test-webserver volumeMounts: - name: test-volume mountPath: /test-portworx-volume diff --git a/examples/volumes/scaleio/pod-sc-pvc.yaml b/examples/volumes/scaleio/pod-sc-pvc.yaml index c94e7bc393e..ceed7b567e8 100644 --- a/examples/volumes/scaleio/pod-sc-pvc.yaml +++ b/examples/volumes/scaleio/pod-sc-pvc.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: pod-sio-small-container - image: k8s.gcr.io/test-webserver + image: gcr.io/google_containers/test-webserver volumeMounts: - mountPath: /test name: test-data diff --git a/examples/volumes/scaleio/pod.yaml b/examples/volumes/scaleio/pod.yaml index b13ec668c78..4b53b2b53cc 100644 --- a/examples/volumes/scaleio/pod.yaml +++ b/examples/volumes/scaleio/pod.yaml @@ -4,7 +4,7 @@ metadata: name: pod-0 spec: containers: - - image: k8s.gcr.io/test-webserver + - image: gcr.io/google_containers/test-webserver name: pod-0 volumeMounts: - mountPath: /test-pd diff --git a/examples/volumes/vsphere/simple-statefulset.yaml b/examples/volumes/vsphere/simple-statefulset.yaml index d9de93c2574..3684a3b64e1 100644 --- a/examples/volumes/vsphere/simple-statefulset.yaml +++ b/examples/volumes/vsphere/simple-statefulset.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx-slim:0.8 + image: gcr.io/google_containers/nginx-slim:0.8 ports: - containerPort: 80 name: web diff --git a/examples/volumes/vsphere/vsphere-volume-pod.yaml b/examples/volumes/vsphere/vsphere-volume-pod.yaml index 0204ad3a59b..8660d62e493 100644 --- a/examples/volumes/vsphere/vsphere-volume-pod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pod.yaml @@ -4,7 +4,7 @@ metadata: name: test-vmdk spec: containers: - - image: k8s.gcr.io/test-webserver + - image: gcr.io/google_containers/test-webserver name: test-container volumeMounts: - mountPath: /test-vmdk diff --git a/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml b/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml index 326c0031f32..291664adaa9 100644 --- a/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pvcpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/test-webserver + image: gcr.io/google_containers/test-webserver volumeMounts: - name: test-volume mountPath: /test-vmdk diff --git a/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml b/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml index c569a0b36d9..036aeb280cb 100644 --- a/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml +++ b/examples/volumes/vsphere/vsphere-volume-pvcscpod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/test-webserver + image: gcr.io/google_containers/test-webserver volumeMounts: - name: test-volume mountPath: /test-vmdk diff --git a/hack/gen-swagger-doc/README.md b/hack/gen-swagger-doc/README.md index 5b35be4aff7..eacf5119d84 100644 --- a/hack/gen-swagger-doc/README.md +++ b/hack/gen-swagger-doc/README.md @@ -3,7 +3,7 @@ This folder contains the sources needed to build the gen-swagger-doc container. To build the container image, ``` -$ sudo docker build -t k8s.gcr.io/gen-swagger-docs:v1 . +$ sudo docker build -t gcr.io/google_containers/gen-swagger-docs:v1 . ``` To generate the html docs, diff --git a/hack/lib/swagger.sh b/hack/lib/swagger.sh index af872d807e5..25bf16d8181 100644 --- a/hack/lib/swagger.sh +++ b/hack/lib/swagger.sh @@ -117,7 +117,7 @@ kube::swagger::gen_api_ref_docs() { -v "${swagger_spec_path}":/swagger-source:z \ -v "${register_file}":/register.go:z \ --net=host -e "https_proxy=${KUBERNETES_HTTPS_PROXY:-}" \ - k8s.gcr.io/gen-swagger-docs:v8 \ + gcr.io/google_containers/gen-swagger-docs:v8 \ "${swagger_json_name}" done diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 609968bca33..98a0a6b958e 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -752,7 +752,7 @@ function start_kubelet { --privileged=true \ -i \ --cidfile=$KUBELET_CIDFILE \ - k8s.gcr.io/kubelet \ + gcr.io/google_containers/kubelet \ /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG & fi } diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 9e05ffa867d..bce02c25fb0 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -37,15 +37,15 @@ KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} CTLRMGR_PORT=${CTLRMGR_PORT:-10252} PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. -IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9" -IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml +IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9" +IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml -IMAGE_PERL="k8s.gcr.io/perl" -IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0" -IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest" -IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml -IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7" -IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8" +IMAGE_PERL="gcr.io/google-containers/perl" +IMAGE_PAUSE_V2="gcr.io/google-containers/pause:2.0" +IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest" +IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml +IMAGE_STATEFULSET_R1="gcr.io/google_containers/nginx-slim:0.7" +IMAGE_STATEFULSET_R2="gcr.io/google_containers/nginx-slim:0.8" # Expose kubectl directly for readability PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH @@ -719,9 +719,9 @@ run_pod_tests() { kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:' ## Patch pod from JSON can change image # Command - kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause-amd64:3.0"}]}}' - # Post-condition: valid-pod POD has image k8s.gcr.io/pause-amd64:3.0 - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause-amd64:3.0:' + kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}' + # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0 + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:' ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected ERROR_FILE="${KUBE_TEMP}/conflict-error" @@ -802,13 +802,13 @@ __EOF__ kubectl delete node node-v1-test "${kube_flags[@]}" ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor - echo -e "#!/bin/bash\n${SED} -i \"s/nginx/k8s.gcr.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh + echo -e "#!/bin/bash\n${SED} -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh chmod +x /tmp/tmp-editor.sh # Pre-condition: valid-pod POD has image nginx kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' [[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]] - # Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:' + # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:' # cleaning rm /tmp/tmp-editor.sh @@ -2746,7 +2746,7 @@ run_deployment_tests() { create_and_use_new_namespace kube::log::status "Testing deployments" # Test kubectl create deployment (using default - old generator) - kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd + kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd # Post-Condition: Deployment "nginx" is created. kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx' # and old generator was used, iow. old defaults are applied @@ -2761,7 +2761,7 @@ run_deployment_tests() { kubectl delete deployment test-nginx-extensions "${kube_flags[@]}" # Test kubectl create deployment - kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 + kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 # Post-Condition: Deployment "nginx" is created. kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx' # and new generator was used, iow. new defaults are applied @@ -2806,7 +2806,7 @@ run_deployment_tests() { kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' # Create deployment - kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd + kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd # Wait for rs to come up. kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' # Delete the deployment with cascade set to false. @@ -3064,7 +3064,7 @@ run_rs_tests() { # Test set commands # Pre-condition: frontend replica set exists at generation 1 kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1' - kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd + kubectl set image rs/frontend "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2' kubectl set env rs/frontend "${kube_flags[@]}" foo=bar kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3' @@ -3151,7 +3151,7 @@ run_daemonset_tests() { # Template Generation should stay 1 kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' # Test set commands - kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd + kubectl set image daemonsets/bind "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2' kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3' diff --git a/hack/testdata/deployment-multicontainer-resources.yaml b/hack/testdata/deployment-multicontainer-resources.yaml index b36ace094b3..533e2e46f47 100644 --- a/hack/testdata/deployment-multicontainer-resources.yaml +++ b/hack/testdata/deployment-multicontainer-resources.yaml @@ -16,9 +16,9 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:test-cmd + image: gcr.io/google-containers/nginx:test-cmd ports: - containerPort: 80 - name: perl - image: k8s.gcr.io/perl + image: gcr.io/google-containers/perl terminationGracePeriodSeconds: 0 diff --git a/hack/testdata/deployment-multicontainer.yaml b/hack/testdata/deployment-multicontainer.yaml index 117483957fd..115888a0290 100644 --- a/hack/testdata/deployment-multicontainer.yaml +++ b/hack/testdata/deployment-multicontainer.yaml @@ -16,8 +16,8 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:test-cmd + image: gcr.io/google-containers/nginx:test-cmd ports: - containerPort: 80 - name: perl - image: k8s.gcr.io/perl + image: gcr.io/google-containers/perl diff --git a/hack/testdata/deployment-revision1.yaml b/hack/testdata/deployment-revision1.yaml index 2bcb8edea73..cfbec36c454 100644 --- a/hack/testdata/deployment-revision1.yaml +++ b/hack/testdata/deployment-revision1.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:test-cmd + image: gcr.io/google-containers/nginx:test-cmd ports: - containerPort: 80 diff --git a/hack/testdata/deployment-revision2.yaml b/hack/testdata/deployment-revision2.yaml index 1ed91c1375a..4b171f604bc 100644 --- a/hack/testdata/deployment-revision2.yaml +++ b/hack/testdata/deployment-revision2.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/filter/pod-apply-selector.yaml b/hack/testdata/filter/pod-apply-selector.yaml index f296b68b9af..73b83d6ba37 100644 --- a/hack/testdata/filter/pod-apply-selector.yaml +++ b/hack/testdata/filter/pod-apply-selector.yaml @@ -8,4 +8,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/filter/pod-dont-apply.yaml b/hack/testdata/filter/pod-dont-apply.yaml index 9b8f9f6e900..7ea1610de90 100644 --- a/hack/testdata/filter/pod-dont-apply.yaml +++ b/hack/testdata/filter/pod-dont-apply.yaml @@ -8,4 +8,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/multi-resource-json-modify.json b/hack/testdata/multi-resource-json-modify.json index 2f88c0ac8e3..3ff562e6d9a 100644 --- a/hack/testdata/multi-resource-json-modify.json +++ b/hack/testdata/multi-resource-json-modify.json @@ -43,7 +43,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-json.json b/hack/testdata/multi-resource-json.json index 869fdc7cbfc..8ba2198e6d2 100644 --- a/hack/testdata/multi-resource-json.json +++ b/hack/testdata/multi-resource-json.json @@ -41,7 +41,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-list-modify.json b/hack/testdata/multi-resource-list-modify.json index af902efcfc3..e0885c10241 100644 --- a/hack/testdata/multi-resource-list-modify.json +++ b/hack/testdata/multi-resource-list-modify.json @@ -47,7 +47,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-list.json b/hack/testdata/multi-resource-list.json index 17abca285aa..8918223aede 100644 --- a/hack/testdata/multi-resource-list.json +++ b/hack/testdata/multi-resource-list.json @@ -45,7 +45,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-rclist-modify.json b/hack/testdata/multi-resource-rclist-modify.json index a28169c8ee5..369d73399b9 100644 --- a/hack/testdata/multi-resource-rclist-modify.json +++ b/hack/testdata/multi-resource-rclist-modify.json @@ -26,7 +26,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" @@ -60,7 +60,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-rclist.json b/hack/testdata/multi-resource-rclist.json index a28169c8ee5..369d73399b9 100644 --- a/hack/testdata/multi-resource-rclist.json +++ b/hack/testdata/multi-resource-rclist.json @@ -26,7 +26,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" @@ -60,7 +60,7 @@ "spec":{ "containers":[{ "name": "mock-container", - "image": "k8s.gcr.io/pause:2.0", + "image": "gcr.io/google-containers/pause:2.0", "ports":[{ "containerPort":9949, "protocol":"TCP" diff --git a/hack/testdata/multi-resource-yaml-modify.yaml b/hack/testdata/multi-resource-yaml-modify.yaml index 067b75630ba..86fe824197b 100644 --- a/hack/testdata/multi-resource-yaml-modify.yaml +++ b/hack/testdata/multi-resource-yaml-modify.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: mock-container - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 ports: - containerPort: 9949 protocol: TCP diff --git a/hack/testdata/multi-resource-yaml.yaml b/hack/testdata/multi-resource-yaml.yaml index 642ebdb47ed..bef9e88b2c2 100644 --- a/hack/testdata/multi-resource-yaml.yaml +++ b/hack/testdata/multi-resource-yaml.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: mock-container - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 ports: - containerPort: 9949 protocol: TCP diff --git a/hack/testdata/null-propagation/deployment-l1.yaml b/hack/testdata/null-propagation/deployment-l1.yaml index 051fba91f8e..c5123abc5ee 100644 --- a/hack/testdata/null-propagation/deployment-l1.yaml +++ b/hack/testdata/null-propagation/deployment-l1.yaml @@ -10,4 +10,4 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 diff --git a/hack/testdata/null-propagation/deployment-l2.yaml b/hack/testdata/null-propagation/deployment-l2.yaml index 3b2426a768d..ffcbcc099b1 100644 --- a/hack/testdata/null-propagation/deployment-l2.yaml +++ b/hack/testdata/null-propagation/deployment-l2.yaml @@ -14,5 +14,5 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 terminationMessagePolicy: null diff --git a/hack/testdata/pod-apply.yaml b/hack/testdata/pod-apply.yaml index 235d1c8f0f3..a736a599d86 100644 --- a/hack/testdata/pod-apply.yaml +++ b/hack/testdata/pod-apply.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/pod-with-api-env.yaml b/hack/testdata/pod-with-api-env.yaml index 3f76d210dfb..aef451ac2fe 100644 --- a/hack/testdata/pod-with-api-env.yaml +++ b/hack/testdata/pod-with-api-env.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/busybox + image: gcr.io/google_containers/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: TEST_CMD_1 diff --git a/hack/testdata/pod-with-precision.json b/hack/testdata/pod-with-precision.json index 31a43896999..5aac946cd2b 100644 --- a/hack/testdata/pod-with-precision.json +++ b/hack/testdata/pod-with-precision.json @@ -9,7 +9,7 @@ "containers": [ { "name": "kubernetes-pause", - "image": "k8s.gcr.io/pause-amd64:3.0" + "image": "gcr.io/google_containers/pause-amd64:3.0" } ], "restartPolicy": "Never", diff --git a/hack/testdata/pod.yaml b/hack/testdata/pod.yaml index 92b504ef77a..8ccadcecdbd 100644 --- a/hack/testdata/pod.yaml +++ b/hack/testdata/pod.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/prune/a.yaml b/hack/testdata/prune/a.yaml index badd10caa8b..aa86f28df41 100644 --- a/hack/testdata/prune/a.yaml +++ b/hack/testdata/prune/a.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/prune/b.yaml b/hack/testdata/prune/b.yaml index f92fbc47f0a..6d212ead91f 100644 --- a/hack/testdata/prune/b.yaml +++ b/hack/testdata/prune/b.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml b/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml index 18c0d4ea7a2..6d98eda02ce 100644 --- a/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml +++ b/hack/testdata/recursive/deployment/deployment/nginx-broken.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/recursive/deployment/deployment/nginx.yaml b/hack/testdata/recursive/deployment/deployment/nginx.yaml index f416d6c25cd..f956f0272b1 100644 --- a/hack/testdata/recursive/deployment/deployment/nginx.yaml +++ b/hack/testdata/recursive/deployment/deployment/nginx.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/recursive/deployment/nginx.yaml b/hack/testdata/recursive/deployment/nginx.yaml index 7988317f466..9842a65da02 100644 --- a/hack/testdata/recursive/deployment/nginx.yaml +++ b/hack/testdata/recursive/deployment/nginx.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx:1.7.9 + image: gcr.io/google-containers/nginx:1.7.9 ports: - containerPort: 80 diff --git a/hack/testdata/rollingupdate-daemonset-rv2.yaml b/hack/testdata/rollingupdate-daemonset-rv2.yaml index b807e958dd2..3214dcffe91 100644 --- a/hack/testdata/rollingupdate-daemonset-rv2.yaml +++ b/hack/testdata/rollingupdate-daemonset-rv2.yaml @@ -24,6 +24,6 @@ spec: namespaces: [] containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:latest + image: gcr.io/google-containers/pause:latest - name: app - image: k8s.gcr.io/nginx:test-cmd + image: gcr.io/google-containers/nginx:test-cmd diff --git a/hack/testdata/rollingupdate-daemonset.yaml b/hack/testdata/rollingupdate-daemonset.yaml index 4bc77ed5bfe..c8a9cdca21d 100644 --- a/hack/testdata/rollingupdate-daemonset.yaml +++ b/hack/testdata/rollingupdate-daemonset.yaml @@ -24,4 +24,4 @@ spec: namespaces: [] containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/rollingupdate-statefulset-rv2.yaml b/hack/testdata/rollingupdate-statefulset-rv2.yaml index 4e4fc4e6091..fec5493ab69 100644 --- a/hack/testdata/rollingupdate-statefulset-rv2.yaml +++ b/hack/testdata/rollingupdate-statefulset-rv2.yaml @@ -18,7 +18,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: nginx - image: k8s.gcr.io/nginx-slim:0.8 + image: gcr.io/google_containers/nginx-slim:0.8 ports: - containerPort: 80 name: web @@ -27,7 +27,7 @@ spec: - -c - 'while true; do sleep 1; done' - name: pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 ports: - containerPort: 81 name: web-2 diff --git a/hack/testdata/rollingupdate-statefulset.yaml b/hack/testdata/rollingupdate-statefulset.yaml index ffdc242e17c..2acbf0f322b 100644 --- a/hack/testdata/rollingupdate-statefulset.yaml +++ b/hack/testdata/rollingupdate-statefulset.yaml @@ -18,7 +18,7 @@ spec: terminationGracePeriodSeconds: 5 containers: - name: nginx - image: k8s.gcr.io/nginx-slim:0.7 + image: gcr.io/google_containers/nginx-slim:0.7 ports: - containerPort: 80 name: web diff --git a/hack/testdata/sorted-pods/sorted-pod1.yaml b/hack/testdata/sorted-pods/sorted-pod1.yaml index aa767a2ea24..fba02b9017c 100644 --- a/hack/testdata/sorted-pods/sorted-pod1.yaml +++ b/hack/testdata/sorted-pods/sorted-pod1.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/sorted-pods/sorted-pod2.yaml b/hack/testdata/sorted-pods/sorted-pod2.yaml index f05040569b1..f0ab7e906ea 100644 --- a/hack/testdata/sorted-pods/sorted-pod2.yaml +++ b/hack/testdata/sorted-pods/sorted-pod2.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/hack/testdata/sorted-pods/sorted-pod3.yaml b/hack/testdata/sorted-pods/sorted-pod3.yaml index e02c501fa12..5d166786c77 100644 --- a/hack/testdata/sorted-pods/sorted-pod3.yaml +++ b/hack/testdata/sorted-pods/sorted-pod3.yaml @@ -7,4 +7,4 @@ metadata: spec: containers: - name: kubernetes-pause - image: k8s.gcr.io/pause:2.0 + image: gcr.io/google-containers/pause:2.0 diff --git a/pkg/api/testing/deep_copy_test.go b/pkg/api/testing/deep_copy_test.go index a93ccd00aac..27c9f42f6aa 100644 --- a/pkg/api/testing/deep_copy_test.go +++ b/pkg/api/testing/deep_copy_test.go @@ -64,7 +64,7 @@ var benchmarkPod api.Pod = api.Pod{ Containers: []api.Container{ { Name: "etcd-container", - Image: "k8s.gcr.io/etcd:2.0.9", + Image: "gcr.io/google_containers/etcd:2.0.9", Command: []string{ "/usr/local/bin/etcd", "--addr", @@ -120,7 +120,7 @@ var benchmarkPod api.Pod = api.Pod{ }, Ready: true, RestartCount: 0, - Image: "k8s.gcr.io/etcd:2.0.9", + Image: "gcr.io/google_containers/etcd:2.0.9", ImageID: "docker://b6b9a86dc06aa1361357ca1b105feba961f6a4145adca6c54e142c0be0fe87b0", ContainerID: "docker://3cbbf818f1addfc252957b4504f56ef2907a313fe6afc47fc75373674255d46d", }, diff --git a/pkg/api/testing/replication_controller_example.json b/pkg/api/testing/replication_controller_example.json index 8f858c023f2..70eef1cff32 100644 --- a/pkg/api/testing/replication_controller_example.json +++ b/pkg/api/testing/replication_controller_example.json @@ -47,7 +47,7 @@ "containers": [ { "name": "elasticsearch-logging", - "image": "k8s.gcr.io/elasticsearch:1.0", + "image": "gcr.io/google_containers/elasticsearch:1.0", "ports": [ { "name": "db", diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod1.json b/pkg/kubectl/validation/testdata/v1/invalidPod1.json index 384d1857916..d935742d77c 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod1.json +++ b/pkg/kubectl/validation/testdata/v1/invalidPod1.json @@ -11,7 +11,7 @@ "containers": [ { "name": "master", - "image": "gcr.io/fake_project/fake_image:fake_tag", + "image": "gcr.io/fake_project/fake_image:fake_tag", "args": "this is a bad command" } ] diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod3.json b/pkg/kubectl/validation/testdata/v1/invalidPod3.json index 69e0e853898..4d99181dc07 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod3.json +++ b/pkg/kubectl/validation/testdata/v1/invalidPod3.json @@ -14,7 +14,7 @@ "containers": [ { "name": "apache-php", - "image": "gcr.io/fake_project/fake_image:fake_tag", + "image": "gcr.io/fake_project/fake_image:fake_tag", "ports": [ { "name": "apache", diff --git a/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml b/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml index a6958db5eb6..f02bf7b336b 100644 --- a/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml +++ b/pkg/kubectl/validation/testdata/v1/invalidPod4.yaml @@ -11,4 +11,4 @@ spec: args: - command: - - + - \ No newline at end of file diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index bc81d5a8e00..b595e310096 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -36,7 +36,7 @@ import ( ) const ( - defaultSandboxImage = "k8s.gcr.io/pause-amd64:3.0" + defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.0" // Various default sandbox resources requests/limits. defaultSandboxCPUshares int64 = 2 diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 79ee6e88e25..38636f70e1d 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -96,7 +96,7 @@ func generateImageTags() []string { // that kubelet report up to maxNamesPerImageInNodeStatus tags. count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1) for ; count > 0; count-- { - tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count)) + tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count)) } return tagList } @@ -492,11 +492,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) { // images will be sorted from max to min in node status. Images: []v1.ContainerImage{ { - Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, + Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, }, { - Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, + Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, SizeBytes: 123, }, }, @@ -680,11 +680,11 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, Images: []v1.ContainerImage{ { - Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, + Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, SizeBytes: 456, }, { - Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, + Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, SizeBytes: 123, }, }, diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 823a2bb8d13..5019121d43a 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -127,12 +127,12 @@ func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubel imageList := []kubecontainer.Image{ { ID: "abc", - RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, + RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, Size: 123, }, { ID: "efg", - RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, + RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, Size: 456, }, } diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 1d35438c306..7df9f43d79a 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -453,7 +453,7 @@ type MountedVolume struct { // name: test-pd // spec: // containers: - // - image: k8s.gcr.io/test-webserver + // - image: gcr.io/google_containers/test-webserver // name: test-container // volumeMounts: // - mountPath: /test-pd @@ -491,7 +491,7 @@ type MountedVolume struct { // name: test-pd // spec: // containers: - // - image: k8s.gcr.io/test-webserver + // - image: gcr.io/google_containers/test-webserver // name: test-container // volumeMounts: // - mountPath: /test-pd diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index 6aabd54b1fb..18e68a3ab0d 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -493,7 +493,7 @@ func getTestPodWithSecret(podName, secretName string) *v1.Pod { Containers: []v1.Container{ { Name: "secret-volume-test", - Image: "k8s.gcr.io/mounttest:0.8", + Image: "gcr.io/google_containers/mounttest:0.8", Args: []string{ "--file_content=/etc/secret-volume/data-1", "--file_mode=/etc/secret-volume/data-1"}, @@ -532,7 +532,7 @@ func getTestPodWithGCEPD(podName, pdName string) *v1.Pod { Containers: []v1.Container{ { Name: "pd-volume-test", - Image: "k8s.gcr.io/mounttest:0.8", + Image: "gcr.io/google_containers/mounttest:0.8", Args: []string{ "--file_content=/etc/pd-volume/data-1", }, diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index 1b56485e137..b11be33eeb2 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -162,7 +162,7 @@ metadata: name: testpod spec: containers: - - image: k8s.gcr.io/busybox + - image: gcr.io/google_containers/busybox `, false, }, @@ -179,7 +179,7 @@ spec: "spec": { "containers": [ { - "image": "k8s.gcr.io/busybox" + "image": "gcr.io/google_containers/busybox" } ] } @@ -195,7 +195,7 @@ kind: Pod metadata: name: testpod spec: - - image: k8s.gcr.io/busybox + - image: gcr.io/google_containers/busybox `, true, }, diff --git a/pkg/volume/util_test.go b/pkg/volume/util_test.go index 011793defe0..273722a0c3d 100644 --- a/pkg/volume/util_test.go +++ b/pkg/volume/util_test.go @@ -94,8 +94,8 @@ func TestRecyclerPod(t *testing.T) { // Pod gets Running and Succeeded newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""), newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"), - newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""), - newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""), + newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""), + newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""), newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"), newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"), newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""), @@ -103,8 +103,8 @@ func TestRecyclerPod(t *testing.T) { }, expectedEvents: []mockEvent{ {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"}, - {v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""}, - {v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""}, + {v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""}, + {v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""}, {v1.EventTypeNormal, "Created container with docker id 83d929aeac82"}, {v1.EventTypeNormal, "Started container with docker id 83d929aeac82"}, }, diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index ab42a130c38..2cf7941e9dd 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -618,7 +618,7 @@ message Container { // Describe a container image message ContainerImage { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] repeated string names = 1; // The size of the image in bytes. diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 6cfbfc8eaa6..49ef6109276 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3910,7 +3910,7 @@ type PodSignature struct { // Describe a container image type ContainerImage struct { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index d89be951551..80cacc974e5 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -308,7 +308,7 @@ func (Container) SwaggerDoc() map[string]string { var map_ContainerImage = map[string]string{ "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", "sizeBytes": "The size of the image in bytes.", } diff --git a/test/e2e/testing-manifests/ingress/http/rc.yaml b/test/e2e/testing-manifests/ingress/http/rc.yaml index a4bcd5e3959..9b7d3b624db 100644 --- a/test/e2e/testing-manifests/ingress/http/rc.yaml +++ b/test/e2e/testing-manifests/ingress/http/rc.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: echoheaders - image: k8s.gcr.io/echoserver:1.6 + image: gcr.io/google_containers/echoserver:1.6 ports: - containerPort: 8080 readinessProbe: diff --git a/test/e2e/testing-manifests/ingress/nginx/rc.yaml b/test/e2e/testing-manifests/ingress/nginx/rc.yaml index b80b0837a17..9d21ff55003 100644 --- a/test/e2e/testing-manifests/ingress/nginx/rc.yaml +++ b/test/e2e/testing-manifests/ingress/nginx/rc.yaml @@ -17,7 +17,7 @@ spec: spec: terminationGracePeriodSeconds: 0 containers: - - image: k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.1 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1 livenessProbe: httpGet: path: /healthz diff --git a/test/e2e/testing-manifests/ingress/static-ip/rc.yaml b/test/e2e/testing-manifests/ingress/static-ip/rc.yaml index 391ad674448..abf9b036edd 100644 --- a/test/e2e/testing-manifests/ingress/static-ip/rc.yaml +++ b/test/e2e/testing-manifests/ingress/static-ip/rc.yaml @@ -11,6 +11,6 @@ spec: spec: containers: - name: echoheaders-https - image: k8s.gcr.io/echoserver:1.6 + image: gcr.io/google_containers/echoserver:1.6 ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml b/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml index b1ce4229cb0..528aa25a125 100644 --- a/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml +++ b/test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml @@ -17,7 +17,7 @@ spec: version: v1 spec: containers: - - image: k8s.gcr.io/servicelb:0.1 + - image: gcr.io/google_containers/servicelb:0.1 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml b/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml index f09c090d5b3..c2aa34ca1fa 100644 --- a/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml +++ b/test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml @@ -12,7 +12,7 @@ spec: spec: containers: - name: netexec - image: k8s.gcr.io/netexec:1.4 + image: gcr.io/google_containers/netexec:1.4 ports: - containerPort: 8080 # This is to force these pods to land on different hosts. diff --git a/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml b/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml index 8139cf5f99a..65699f8e3cf 100644 --- a/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/cassandra/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: k8s.gcr.io/cassandra-e2e-test:0.1 + image: gcr.io/google-containers/cassandra-e2e-test:0.1 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml b/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml index d91eda411a4..4ddeb8be398 100644 --- a/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: etcd - image: k8s.gcr.io/etcd-amd64:2.2.5 + image: gcr.io/google_containers/etcd-amd64:2.2.5 imagePullPolicy: Always ports: - containerPort: 2380 diff --git a/test/e2e/testing-manifests/statefulset/etcd/tester.yaml b/test/e2e/testing-manifests/statefulset/etcd/tester.yaml index ee53bc14196..c5ea0b90c14 100644 --- a/test/e2e/testing-manifests/statefulset/etcd/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/etcd/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0 + image: gcr.io/google-containers/etcd-statefulset-e2e-test:0.0 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml b/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml index facfb82802e..4d982ba218a 100644 --- a/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: k8s.gcr.io/galera-install:0.1 + image: gcr.io/google_containers/galera-install:0.1 imagePullPolicy: Always args: - "--work-dir=/work-dir" @@ -41,7 +41,7 @@ spec: mountPath: "/etc/mysql" containers: - name: mysql - image: k8s.gcr.io/mysql-galera:e2e + image: gcr.io/google_containers/mysql-galera:e2e ports: - containerPort: 3306 name: mysql @@ -55,7 +55,7 @@ spec: - --defaults-file=/etc/mysql/my-galera.cnf - --user=root readinessProbe: - # TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0 + # TODO: If docker exec is buggy just use gcr.io/google_containers/mysql-healthz:1.0 exec: command: - sh diff --git a/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml b/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml index 3a60b3473a6..c7e7ff8d4f8 100644 --- a/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml +++ b/test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: test-server - image: k8s.gcr.io/mysql-e2e-test:0.1 + image: gcr.io/google-containers/mysql-e2e-test:0.1 imagePullPolicy: Always ports: - containerPort: 8080 diff --git a/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml index 0436fe25bab..a483fd5dd3f 100644 --- a/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: nginx - image: k8s.gcr.io/nginx-slim:0.8 + image: gcr.io/google_containers/nginx-slim:0.8 ports: - containerPort: 80 name: web @@ -31,4 +31,4 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi + storage: 1Gi \ No newline at end of file diff --git a/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml b/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml index 7870a7df0d2..e324ef5613f 100644 --- a/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: k8s.gcr.io/redis-install-3.2.0:e2e + image: gcr.io/google_containers/redis-install-3.2.0:e2e imagePullPolicy: Always args: - "--install-into=/opt" diff --git a/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml b/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml index d2656bb74d0..4160bdff42d 100644 --- a/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml +++ b/test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml @@ -12,7 +12,7 @@ spec: spec: initContainers: - name: install - image: k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e + image: gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e imagePullPolicy: Always args: - "--install-into=/opt" diff --git a/test/e2e_node/conformance/build/Makefile b/test/e2e_node/conformance/build/Makefile index aa52d9eae4e..abc03366bc8 100644 --- a/test/e2e_node/conformance/build/Makefile +++ b/test/e2e_node/conformance/build/Makefile @@ -15,7 +15,7 @@ # Build the node-test image. # # Usage: -# [ARCH=amd64] [REGISTRY="k8s.gcr.io"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1} +# [ARCH=amd64] [REGISTRY="gcr.io/google_containers"] [BIN_DIR="../../../../_output/bin"] make (build|push) VERSION={some_version_number e.g. 0.1} # SYSTEM_SPEC_NAME is the name of the system spec used for the node conformance # test. The specs are expected to be in SYSTEM_SPEC_DIR. @@ -23,7 +23,7 @@ SYSTEM_SPEC_NAME?= SYSTEM_SPEC_DIR?=../../system/specs # TODO(random-liu): Add this into release progress. -REGISTRY?=k8s.gcr.io +REGISTRY?=gcr.io/google_containers ARCH?=amd64 # BIN_DIR is the directory to find binaries, overwrite with ../../../../_output/bin # for local development. diff --git a/test/e2e_node/conformance/run_test.sh b/test/e2e_node/conformance/run_test.sh index 6ddb2478832..af9f2febc83 100755 --- a/test/e2e_node/conformance/run_test.sh +++ b/test/e2e_node/conformance/run_test.sh @@ -44,7 +44,7 @@ SKIP=${SKIP:-""} TEST_ARGS=${TEST_ARGS:-""} # REGISTRY is the image registry for node test image. -REGISTRY=${REGISTRY:-"k8s.gcr.io"} +REGISTRY=${REGISTRY:-"gcr.io/google_containers"} # ARCH is the architecture of current machine, the script will use this to # select corresponding test container image. diff --git a/test/e2e_node/gke_environment_test.go b/test/e2e_node/gke_environment_test.go index dd88976b39c..ee1ddafd4ee 100644 --- a/test/e2e_node/gke_environment_test.go +++ b/test/e2e_node/gke_environment_test.go @@ -84,7 +84,7 @@ func checkIPTables() (err error) { // checkPublicGCR checks the access to the public Google Container Registry by // pulling the busybox image. func checkPublicGCR() error { - const image = "k8s.gcr.io/busybox" + const image = "gcr.io/google-containers/busybox" output, err := runCommand("docker", "images", "-q", image) if len(output) != 0 { if _, err := runCommand("docker", "rmi", "-f", image); err != nil { @@ -170,7 +170,7 @@ func checkDockerConfig() error { // checkDockerNetworkClient checks client networking by pinging an external IP // address from a container. func checkDockerNetworkClient() error { - const imageName = "k8s.gcr.io/busybox" + const imageName = "gcr.io/google-containers/busybox" output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com") if err != nil { return err @@ -185,7 +185,7 @@ func checkDockerNetworkClient() error { // within a container and accessing it from outside. func checkDockerNetworkServer() error { const ( - imageName = "k8s.gcr.io/nginx:1.7.9" + imageName = "gcr.io/google-containers/nginx:1.7.9" hostAddr = "127.0.0.1" hostPort = "8088" containerPort = "80" diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 090eafa2863..a131c398b82 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -28,7 +28,7 @@ import ( var _ = framework.KubeDescribe("ImageID", func() { - busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" + busyBoxImage := "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" f := framework.NewDefaultFramework("image-id-test") diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index fa323d90fd6..01360451fcb 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -47,10 +47,10 @@ const ( // before test running so that the image pulling won't fail in actual test. var NodeImageWhiteList = sets.NewString( "google/cadvisor:latest", - "k8s.gcr.io/stress:v1", + "gcr.io/google-containers/stress:v1", busyboxImage, - "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", - "k8s.gcr.io/node-problem-detector:v0.4.1", + "gcr.io/google_containers/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", + "gcr.io/google_containers/node-problem-detector:v0.4.1", imageutils.GetE2EImage(imageutils.NginxSlim), imageutils.GetE2EImage(imageutils.ServeHostname), imageutils.GetE2EImage(imageutils.Netexec), diff --git a/test/e2e_node/jenkins/gci-init-gpu.yaml b/test/e2e_node/jenkins/gci-init-gpu.yaml index 3b943de33f2..064b77095c9 100644 --- a/test/e2e_node/jenkins/gci-init-gpu.yaml +++ b/test/e2e_node/jenkins/gci-init-gpu.yaml @@ -2,7 +2,7 @@ runcmd: - modprobe configs - - docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged k8s.gcr.io/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c + - docker run -v /dev:/dev -v /home/kubernetes/bin/nvidia:/rootfs/nvidia -v /etc/os-release:/rootfs/etc/os-release -v /proc/sysrq-trigger:/sysrq -e BASE_DIR=/rootfs/nvidia --privileged gcr.io/google_containers/cos-nvidia-driver-install@sha256:cb55c7971c337fece62f2bfe858662522a01e43ac9984a2dd1dd5c71487d225c - mount /tmp /tmp -o remount,exec,suid - usermod -a -G docker jenkins - mkdir -p /var/lib/kubelet diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 41749098183..63489e2d389 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -272,7 +272,7 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) * Containers: []v1.Container{ { Name: ctnName, - Image: "k8s.gcr.io/stress:v1", + Image: "gcr.io/google-containers/stress:v1", ImagePullPolicy: "Always", Env: env, // 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 9333b66cba7..3c9b6453ec9 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { pollInterval = 1 * time.Second pollConsistent = 5 * time.Second pollTimeout = 1 * time.Minute - image = "k8s.gcr.io/node-problem-detector:v0.4.1" + image = "gcr.io/google_containers/node-problem-detector:v0.4.1" ) f := framework.NewDefaultFramework("node-problem-detector") var c clientset.Interface diff --git a/test/e2e_node/remote/node_conformance.go b/test/e2e_node/remote/node_conformance.go index 625940bf190..59e322a77b7 100644 --- a/test/e2e_node/remote/node_conformance.go +++ b/test/e2e_node/remote/node_conformance.go @@ -53,7 +53,7 @@ func commandToString(c *exec.Cmd) string { // Image path constants. const ( - conformanceRegistry = "k8s.gcr.io" + conformanceRegistry = "gcr.io/google_containers" conformanceArch = runtime.GOARCH conformanceTarfile = "node_conformance.tar" conformanceTestBinary = "e2e_node.test" diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 0a8b7ac57d6..a616dd9b1e1 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -275,13 +275,13 @@ while true; do sleep 1; done }, { description: "should not be able to pull non-existing image from gcr.io", - image: "k8s.gcr.io/invalid-image:invalid-tag", + image: "gcr.io/google_containers/invalid-image:invalid-tag", phase: v1.PodPending, waiting: true, }, { description: "should be able to pull image from gcr.io", - image: "k8s.gcr.io/alpine-with-bash:1.0", + image: "gcr.io/google_containers/alpine-with-bash:1.0", phase: v1.PodRunning, waiting: false, }, diff --git a/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml b/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml index d9f2b8e64d2..3c73984ba91 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/etcd.yaml @@ -5,7 +5,7 @@ metadata: spec: hostNetwork: true containers: - - image: k8s.gcr.io/etcd:2.0.9 + - image: gcr.io/google_containers/etcd:2.0.9 name: etcd-container command: - /usr/local/bin/etcd diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml index a415ebc19d9..a41a2c666dc 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-apiserver.yaml @@ -6,7 +6,7 @@ spec: hostNetwork: true containers: - name: kube-apiserver - image: k8s.gcr.io/kube-apiserver:9680e782e08a1a1c94c656190011bd02 + image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02 command: - /bin/sh - -c diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml index 79851929788..a7cfbd5785b 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-controller-manager.yaml @@ -10,7 +10,7 @@ spec: - /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns --cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key --v=2 1>>/var/log/kube-controller-manager.log --leader-elect 2>&1 - image: k8s.gcr.io/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 + image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 livenessProbe: httpGet: path: /healthz diff --git a/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml b/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml index 1da30918598..eeef9bb6286 100644 --- a/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml +++ b/test/fixtures/doc-yaml/admin/high-availability/kube-scheduler.yaml @@ -6,7 +6,7 @@ spec: hostNetwork: true containers: - name: kube-scheduler - image: k8s.gcr.io/kube-scheduler:34d0b8f8b31e27937327961528739bc9 + image: gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9 command: - /bin/sh - -c diff --git a/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml b/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml index ecb45dd95fe..b63f25debab 100644 --- a/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml +++ b/test/fixtures/doc-yaml/admin/limitrange/invalid-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: k8s.gcr.io/serve_hostname + image: gcr.io/google_containers/serve_hostname resources: limits: cpu: "3" diff --git a/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml b/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml index d83e91267a4..c1ec54183be 100644 --- a/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml +++ b/test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: k8s.gcr.io/serve_hostname + image: gcr.io/google_containers/serve_hostname resources: limits: cpu: "1" diff --git a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml index 64145553558..a4796fc24c0 100644 --- a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/busybox + image: gcr.io/google_containers/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: MY_POD_NAME diff --git a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml index 204c4ddf59d..461691df221 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml @@ -10,7 +10,7 @@ spec: - /bin/sh - -c - echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600 - image: k8s.gcr.io/busybox + image: gcr.io/google_containers/busybox livenessProbe: exec: command: diff --git a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml index 1a6ef7bc64c..e7196c65593 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml @@ -8,7 +8,7 @@ spec: containers: - args: - /server - image: k8s.gcr.io/liveness + image: gcr.io/google_containers/liveness livenessProbe: httpGet: path: /healthz diff --git a/test/fixtures/doc-yaml/user-guide/multi-pod.yaml b/test/fixtures/doc-yaml/user-guide/multi-pod.yaml index c795ce6f4d2..1a1d758e994 100644 --- a/test/fixtures/doc-yaml/user-guide/multi-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/multi-pod.yaml @@ -42,7 +42,7 @@ metadata: spec: containers: - name: kubernetes-serve-hostname - image: k8s.gcr.io/serve_hostname + image: gcr.io/google_containers/serve_hostname resources: limits: cpu: "1" diff --git a/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml b/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml index d93d4095e14..a5d9c0ff758 100644 --- a/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/secrets/secret-env-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/busybox + image: gcr.io/google_containers/busybox command: [ "/bin/sh", "-c", "env" ] env: - name: MY_SECRET_DATA diff --git a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml index 1ff2e8652f1..8f87a8dea5e 100644 --- a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: k8s.gcr.io/mounttest:0.8 + image: gcr.io/google_containers/mounttest:0.8 command: [ "/mt", "--file_content=/etc/secret-volume/data-1" ] volumeMounts: # name must match the volume name below diff --git a/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml b/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml index 48b15cc190c..91f1aa06c30 100644 --- a/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml +++ b/test/fixtures/pkg/kubectl/builder/kitten-rc.yaml @@ -13,7 +13,7 @@ spec: version: kitten spec: containers: - - image: k8s.gcr.io/update-demo:kitten + - image: gcr.io/google_containers/update-demo:kitten name: update-demo ports: - containerPort: 80 diff --git a/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml b/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml index 958e054ec51..c358d8aa282 100644 --- a/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml +++ b/test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml @@ -30,7 +30,7 @@ items: spec: containers: - name: kubernetes-serve-hostname - image: k8s.gcr.io/serve_hostname + image: gcr.io/google_containers/serve_hostname resources: limits: cpu: "1" diff --git a/test/images/iperf/BASEIMAGE b/test/images/iperf/BASEIMAGE index 7f7f90c89a0..1bf3907f760 100644 --- a/test/images/iperf/BASEIMAGE +++ b/test/images/iperf/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/ubuntu-slim:0.12 -arm=k8s.gcr.io/ubuntu-slim-arm:0.12 -arm64=k8s.gcr.io/ubuntu-slim-arm64:0.12 -ppc64le=k8s.gcr.io/ubuntu-slim-ppc64le:0.12 +amd64=gcr.io/google_containers/ubuntu-slim:0.12 +arm=gcr.io/google_containers/ubuntu-slim-arm:0.12 +arm64=gcr.io/google_containers/ubuntu-slim-arm64:0.12 +ppc64le=gcr.io/google_containers/ubuntu-slim-ppc64le:0.12 diff --git a/test/images/logs-generator/README.md b/test/images/logs-generator/README.md index 889ba2a7bdb..8f89bb4a4b5 100644 --- a/test/images/logs-generator/README.md +++ b/test/images/logs-generator/README.md @@ -33,7 +33,7 @@ line in a given run of the container. Image is located in the public repository of Google Container Registry under the name ``` -k8s.gcr.io/logs-generator:v0.1.1 +gcr.io/google_containers/logs-generator:v0.1.1 ``` ## Examples @@ -42,13 +42,13 @@ k8s.gcr.io/logs-generator:v0.1.1 docker run -i \ -e "LOGS_GENERATOR_LINES_TOTAL=10" \ -e "LOGS_GENERATOR_DURATION=1s" \ - k8s.gcr.io/logs-generator:v0.1.1 + gcr.io/google_containers/logs-generator:v0.1.1 ``` ``` kubectl run logs-generator \ --generator=run-pod/v1 \ - --image=k8s.gcr.io/logs-generator:v0.1.1 \ + --image=gcr.io/google_containers/logs-generator:v0.1.1 \ --restart=Never \ --env "LOGS_GENERATOR_LINES_TOTAL=1000" \ --env "LOGS_GENERATOR_DURATION=1m" diff --git a/test/images/pets/peer-finder/BASEIMAGE b/test/images/pets/peer-finder/BASEIMAGE index 0b04ef6e4d5..425fce2600a 100644 --- a/test/images/pets/peer-finder/BASEIMAGE +++ b/test/images/pets/peer-finder/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3 -arm=k8s.gcr.io/debian-base-arm:0.3 -arm64=k8s.gcr.io/debian-base-arm64:0.3 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 +amd64=gcr.io/google-containers/debian-base-amd64:0.3 +arm=gcr.io/google-containers/debian-base-arm:0.3 +arm64=gcr.io/google-containers/debian-base-arm64:0.3 +ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 diff --git a/test/images/pets/redis-installer/BASEIMAGE b/test/images/pets/redis-installer/BASEIMAGE index 0b04ef6e4d5..425fce2600a 100644 --- a/test/images/pets/redis-installer/BASEIMAGE +++ b/test/images/pets/redis-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3 -arm=k8s.gcr.io/debian-base-arm:0.3 -arm64=k8s.gcr.io/debian-base-arm64:0.3 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 +amd64=gcr.io/google-containers/debian-base-amd64:0.3 +arm=gcr.io/google-containers/debian-base-arm:0.3 +arm64=gcr.io/google-containers/debian-base-arm64:0.3 +ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 diff --git a/test/images/pets/redis-installer/README.md b/test/images/pets/redis-installer/README.md index e1c3587af34..04024c13592 100644 --- a/test/images/pets/redis-installer/README.md +++ b/test/images/pets/redis-installer/README.md @@ -4,7 +4,7 @@ The image in this directory is the init container for contrib/pets/redis but for You can execute the image locally via: ``` -$ docker run -it k8s.gcr.io/redis-install-3.2.0:e2e --cmd --install-into=/opt --work-dir=/work-dir +$ docker run -it gcr.io/google_containers/redis-install-3.2.0:e2e --cmd --install-into=/opt --work-dir=/work-dir ``` To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install redis into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts. diff --git a/test/images/pets/zookeeper-installer/BASEIMAGE b/test/images/pets/zookeeper-installer/BASEIMAGE index 0b04ef6e4d5..425fce2600a 100644 --- a/test/images/pets/zookeeper-installer/BASEIMAGE +++ b/test/images/pets/zookeeper-installer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3 -arm=k8s.gcr.io/debian-base-arm:0.3 -arm64=k8s.gcr.io/debian-base-arm64:0.3 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 +amd64=gcr.io/google-containers/debian-base-amd64:0.3 +arm=gcr.io/google-containers/debian-base-arm:0.3 +arm64=gcr.io/google-containers/debian-base-arm64:0.3 +ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 diff --git a/test/images/pets/zookeeper-installer/README.md b/test/images/pets/zookeeper-installer/README.md index 598ca7afe9b..071bc7aa484 100644 --- a/test/images/pets/zookeeper-installer/README.md +++ b/test/images/pets/zookeeper-installer/README.md @@ -4,7 +4,7 @@ The image in this directory is the init container for contrib/pets/zookeeper but You can execute the image locally via: ``` -$ docker run -it k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e --cmd --install-into=/opt --work-dir=/work-dir +$ docker run -it gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e --cmd --install-into=/opt --work-dir=/work-dir ``` To share the installation with other containers mount the appropriate volumes as `--install-into` and `--work-dir`, where `install-into` is the directory to install zookeeper into, and `work-dir` is the directory to install the user/admin supplied on-{start,change} hook scripts. diff --git a/test/images/resource-consumer/BASEIMAGE b/test/images/resource-consumer/BASEIMAGE index 0b04ef6e4d5..425fce2600a 100644 --- a/test/images/resource-consumer/BASEIMAGE +++ b/test/images/resource-consumer/BASEIMAGE @@ -1,4 +1,4 @@ -amd64=k8s.gcr.io/debian-base-amd64:0.3 -arm=k8s.gcr.io/debian-base-arm:0.3 -arm64=k8s.gcr.io/debian-base-arm64:0.3 -ppc64le=k8s.gcr.io/debian-base-ppc64le:0.3 +amd64=gcr.io/google-containers/debian-base-amd64:0.3 +arm=gcr.io/google-containers/debian-base-arm:0.3 +arm64=gcr.io/google-containers/debian-base-arm64:0.3 +ppc64le=gcr.io/google-containers/debian-base-ppc64le:0.3 diff --git a/test/images/resource-consumer/README.md b/test/images/resource-consumer/README.md index 742bafee369..19801af8e3d 100644 --- a/test/images/resource-consumer/README.md +++ b/test/images/resource-consumer/README.md @@ -48,7 +48,7 @@ Custom metrics in Prometheus format are exposed on "/metrics" endpoint. ###CURL example ```console -$ kubectl run resource-consumer --image=k8s.gcr.io/resource_consumer:beta --expose --service-overrides='{ "spec": { "type": "LoadBalancer" } }' --port 8080 +$ kubectl run resource-consumer --image=gcr.io/google_containers/resource_consumer:beta --expose --service-overrides='{ "spec": { "type": "LoadBalancer" } }' --port 8080 $ kubectl get services resource-consumer ``` @@ -62,7 +62,7 @@ $ curl --data "millicores=300&durationSec=600" http://:8080/Consume ## Image -Docker image of Resource Consumer can be found in Google Container Registry as k8s.gcr.io/resource_consumer:beta +Docker image of Resource Consumer can be found in Google Container Registry as gcr.io/google_containers/resource_consumer:beta ## Use cases diff --git a/test/images/serve-hostname/README.md b/test/images/serve-hostname/README.md index 957e039610d..28d37336642 100644 --- a/test/images/serve-hostname/README.md +++ b/test/images/serve-hostname/README.md @@ -15,19 +15,19 @@ $ make all-push # Build for linux/amd64 (default) $ make push ARCH=amd64 -# ---> k8s.gcr.io/serve_hostname-amd64:TAG +# ---> gcr.io/google_containers/serve_hostname-amd64:TAG $ make push ARCH=arm -# ---> k8s.gcr.io/serve_hostname-arm:TAG +# ---> gcr.io/google_containers/serve_hostname-arm:TAG $ make push ARCH=arm64 -# ---> k8s.gcr.io/serve_hostname-arm64:TAG +# ---> gcr.io/google_containers/serve_hostname-arm64:TAG $ make push ARCH=ppc64le -# ---> k8s.gcr.io/serve_hostname-ppc64le:TAG +# ---> gcr.io/google_containers/serve_hostname-ppc64le:TAG $ make push ARCH=s390x -# ---> k8s.gcr.io/serve_hostname-s390x:TAG +# ---> gcr.io/google_containers/serve_hostname-s390x:TAG ``` Of course, if you don't want to push the images, run `make all-container` or `make container ARCH={target_arch}` instead. diff --git a/test/images/volumes-tester/ceph/Makefile b/test/images/volumes-tester/ceph/Makefile index b23e5383a3d..2883aafb646 100644 --- a/test/images/volumes-tester/ceph/Makefile +++ b/test/images/volumes-tester/ceph/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers all: push diff --git a/test/images/volumes-tester/gluster/Makefile b/test/images/volumes-tester/gluster/Makefile index d18769d254e..4aa5b11351e 100644 --- a/test/images/volumes-tester/gluster/Makefile +++ b/test/images/volumes-tester/gluster/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.4 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers all: push diff --git a/test/images/volumes-tester/iscsi/Makefile b/test/images/volumes-tester/iscsi/Makefile index 00c69ef141a..dd830d4fd2b 100644 --- a/test/images/volumes-tester/iscsi/Makefile +++ b/test/images/volumes-tester/iscsi/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers all: push diff --git a/test/images/volumes-tester/nfs/Makefile b/test/images/volumes-tester/nfs/Makefile index 29123f0856d..1e53b19c8b0 100644 --- a/test/images/volumes-tester/nfs/Makefile +++ b/test/images/volumes-tester/nfs/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.8 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers all: push diff --git a/test/images/volumes-tester/rbd/Makefile b/test/images/volumes-tester/rbd/Makefile index 68629fffaa2..dcf3c69f371 100644 --- a/test/images/volumes-tester/rbd/Makefile +++ b/test/images/volumes-tester/rbd/Makefile @@ -13,7 +13,7 @@ # limitations under the License. TAG = 0.1 -PREFIX = k8s.gcr.io +PREFIX = gcr.io/google_containers all: push diff --git a/test/integration/benchmark-controller.json b/test/integration/benchmark-controller.json index 6b0c8feb9ac..00444f8900f 100644 --- a/test/integration/benchmark-controller.json +++ b/test/integration/benchmark-controller.json @@ -17,7 +17,7 @@ "spec": { "containers": [{ "name": "test-container", - "image": "k8s.gcr.io/pause-amd64:3.0" + "image": "gcr.io/google_containers/pause-amd64:3.0" }] } } diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 7639d58eb9d..88f4ac52497 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -40,7 +40,7 @@ import ( const ( // When these values are updated, also update cmd/kubelet/app/options/options.go // A copy of these values exist in e2e/framework/util.go. - currentPodInfraContainerImageName = "k8s.gcr.io/pause" + currentPodInfraContainerImageName = "gcr.io/google_containers/pause" currentPodInfraContainerImageVersion = "3.0" ) diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index aa063be5d2e..d00f8766b21 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -279,7 +279,7 @@ var deploymentExtensions string = ` "spec": { "containers": [{ "name": "nginx", - "image": "k8s.gcr.io/nginx:1.7.9" + "image": "gcr.io/google-containers/nginx:1.7.9" }] } } @@ -306,7 +306,7 @@ var deploymentApps string = ` "spec": { "containers": [{ "name": "nginx", - "image": "k8s.gcr.io/nginx:1.7.9" + "image": "gcr.io/google-containers/nginx:1.7.9" }] } } diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index dd13d9ef4fc..e185ce72b77 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -461,7 +461,7 @@ func makePod(name, ns string, pvcs []string) *v1.Pod { Containers: []v1.Container{ { Name: "write-pod", - Image: "k8s.gcr.io/busybox:1.24", + Image: "gcr.io/google_containers/busybox:1.24", Command: []string{"/bin/sh"}, Args: []string{"-c", "while true; do sleep 1; done"}, }, diff --git a/test/kubemark/resources/cluster-autoscaler_template.json b/test/kubemark/resources/cluster-autoscaler_template.json index ffe4a61f8b3..f42c060e259 100644 --- a/test/kubemark/resources/cluster-autoscaler_template.json +++ b/test/kubemark/resources/cluster-autoscaler_template.json @@ -14,7 +14,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.0.0", + "image": "gcr.io/google_containers/cluster-autoscaler:v1.0.0", "command": [ "./run.sh", "--kubernetes=https://{{master_ip}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/cluster_autoscaler.kubeconfig", diff --git a/test/kubemark/resources/heapster_template.json b/test/kubemark/resources/heapster_template.json index 5a9bb512ea1..491596b2ff8 100644 --- a/test/kubemark/resources/heapster_template.json +++ b/test/kubemark/resources/heapster_template.json @@ -33,7 +33,7 @@ "containers": [ { "name": "heapster", - "image": "k8s.gcr.io/heapster:v1.3.0", + "image": "gcr.io/google_containers/heapster:v1.3.0", "resources": { "requests": { "cpu": "{{METRICS_CPU}}m", @@ -55,7 +55,7 @@ }, { "name": "eventer", - "image": "k8s.gcr.io/heapster:v1.3.0", + "image": "gcr.io/google_containers/heapster:v1.3.0", "resources": { "requests": { "memory": "{{EVENTER_MEM}}Ki" diff --git a/test/kubemark/resources/hollow-node_template.yaml b/test/kubemark/resources/hollow-node_template.yaml index 3a16f1f3538..06352442c00 100644 --- a/test/kubemark/resources/hollow-node_template.yaml +++ b/test/kubemark/resources/hollow-node_template.yaml @@ -93,7 +93,7 @@ spec: cpu: {{HOLLOW_PROXY_CPU}}m memory: {{HOLLOW_PROXY_MEM}}Ki - name: hollow-node-problem-detector - image: k8s.gcr.io/node-problem-detector:v0.4.1 + image: gcr.io/google_containers/node-problem-detector:v0.4.1 env: - name: NODE_NAME valueFrom: diff --git a/test/kubemark/resources/start-kubemark-master.sh b/test/kubemark/resources/start-kubemark-master.sh index 18419667f54..4eddb383094 100755 --- a/test/kubemark/resources/start-kubemark-master.sh +++ b/test/kubemark/resources/start-kubemark-master.sh @@ -692,7 +692,7 @@ fi # Setup docker flags and load images of the master components. assemble-docker-flags -DOCKER_REGISTRY="k8s.gcr.io" +DOCKER_REGISTRY="gcr.io/google_containers" load-docker-images readonly audit_policy_file="/etc/audit_policy.config" diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index bde65ac80d4..84919b8953a 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -23,7 +23,7 @@ import ( const ( e2eRegistry = "gcr.io/kubernetes-e2e-test-images" - gcRegistry = "k8s.gcr.io" + gcRegistry = "gcr.io/google-containers" PrivateRegistry = "gcr.io/k8s-authenticated-test" sampleRegistry = "gcr.io/google-samples" ) From 6bb16c65853aaa3f6f48678e8511fc68aa73a3c6 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 22 Dec 2017 23:37:33 +0100 Subject: [PATCH 494/794] metrics: make IMPLEMENTATIONS.md and CONTRIBUTING.md authorative in k/k --- staging/src/k8s.io/metrics/CONTRIBUTING.md | 26 +++++++++++++++++++ staging/src/k8s.io/metrics/IMPLEMENTATIONS.md | 24 +++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 staging/src/k8s.io/metrics/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/metrics/IMPLEMENTATIONS.md diff --git a/staging/src/k8s.io/metrics/CONTRIBUTING.md b/staging/src/k8s.io/metrics/CONTRIBUTING.md new file mode 100644 index 00000000000..18eca2f0726 --- /dev/null +++ b/staging/src/k8s.io/metrics/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing guidelines + +## How to become a contributor and submit your own code + +### Contributor License Agreements + +We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. + +Please fill out either the individual or corporate Contributor License Agreement (CLA). + + * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](https://identity.linuxfoundation.org/node/285/node/285/individual-signup). + * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](https://identity.linuxfoundation.org/node/285/organization-signup). + +Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. + +### Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +### Adding dependencies + +If your patch depends on new packages, add that package with [`godep`](https://github.com/tools/godep). Follow the [instructions to add a dependency](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md#godep-and-dependency-management). diff --git a/staging/src/k8s.io/metrics/IMPLEMENTATIONS.md b/staging/src/k8s.io/metrics/IMPLEMENTATIONS.md new file mode 100644 index 00000000000..cb5a565c583 --- /dev/null +++ b/staging/src/k8s.io/metrics/IMPLEMENTATIONS.md @@ -0,0 +1,24 @@ +# Implementations + +## Resource Metrics API + +- [Heapster](https://github.com/kubernetes/heapster): a application which + gathers metrics, writes them to metrics storage "sinks", and exposes the + resource metrics API from in-memory storage. + +- [Metrics Server](https://github.com/kubernetes-incubator/metrics-server): + a lighter-weight in-memory server specifically for the resource metrics + API. + +## Custom Metrics API + +***NB: None of the below implemenations are officially part of Kubernetes. +They are listed here for convenience.*** + +- [Prometheus + Adapter](https://github.com/directxman12/k8s-prometheus-adapter). An + implementation of the custom metrics API that attempts to support + arbitrary metrics following a set label and naming scheme. + +- [Google Stackdriver (coming + soon)](https://github.com/GoogleCloudPlatform/k8s-stackdriver) From 8749c5c989080acd3da8b04c93171b2dd19ada93 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Mon, 18 Dec 2017 23:31:07 -0500 Subject: [PATCH 495/794] Revert back #57278 --- pkg/kubelet/cm/deviceplugin/manager.go | 9 +++++---- pkg/kubelet/cm/deviceplugin/manager_test.go | 16 ++++++++++------ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 5e1137c1149..df5e36e2187 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -550,10 +550,11 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont podUID := string(pod.UID) contName := container.Name allocatedDevicesUpdated := false - // NOTE: Skipping the Resources.Limits is safe here because: - // 1. If container Spec mentions Limits only, implicitly Requests, equal to Limits, will get added to the Spec. - // 2. If container Spec mentions Limits, which are greater than or less than Requests, will fail at validation. - for k, v := range container.Resources.Requests { + // Extended resources are not allowed to be overcommitted. + // Since device plugin advertises extended resources, + // therefore Requests must be equal to Limits and iterating + // over the Limits should be sufficient. + for k, v := range container.Resources.Limits { resource := string(k) needed := int(v.Value()) glog.V(3).Infof("needs %d %s", needed, resource) diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index d7a032694c5..de4c0264a08 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -366,7 +366,7 @@ func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, err return nil, nil } -func makePod(requests v1.ResourceList) *v1.Pod { +func makePod(limits v1.ResourceList) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -375,7 +375,7 @@ func makePod(requests v1.ResourceList) *v1.Pod { Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ - Requests: requests, + Limits: limits, }, }, }, @@ -616,7 +616,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + Limits: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, }, }, @@ -624,7 +624,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + Limits: v1.ResourceList{ v1.ResourceName(res1.resourceName): res1.resourceQuantity, }, }, @@ -634,7 +634,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + Limits: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, @@ -643,7 +643,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) { { Name: string(uuid.NewUUID()), Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + Limits: v1.ResourceList{ v1.ResourceName(res1.resourceName): res2.resourceQuantity, v1.ResourceName(res2.resourceName): res2.resourceQuantity, }, @@ -664,6 +664,10 @@ func TestInitContainerDeviceAllocation(t *testing.T) { initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, res1.resourceName) normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, res1.resourceName) normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, res1.resourceName) + as.Equal(1, initCont1Devices.Len()) + as.Equal(2, initCont2Devices.Len()) + as.Equal(1, normalCont1Devices.Len()) + as.Equal(1, normalCont2Devices.Len()) as.True(initCont2Devices.IsSuperset(initCont1Devices)) as.True(initCont2Devices.IsSuperset(normalCont1Devices)) as.True(initCont2Devices.IsSuperset(normalCont2Devices)) From fa57f03f3e9f9c11687b73e2087cfac8ec22c31e Mon Sep 17 00:00:00 2001 From: Manjunath A Kumatagi Date: Sat, 23 Dec 2017 12:26:47 +0530 Subject: [PATCH 496/794] Use multi-arch pause image for tests --- test/utils/image/manifest.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 84919b8953a..73b586ff616 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -69,7 +69,7 @@ var ( JessieDnsutils = ImageConfig{e2eRegistry, "jessie-dnsutils", "1.0", true} Kitten = ImageConfig{e2eRegistry, "kitten", "1.0", true} Liveness = ImageConfig{e2eRegistry, "liveness", "1.0", true} - LogsGenerator = ImageConfig{gcRegistry, "logs-generator", "v0.1.0", false} + LogsGenerator = ImageConfig{e2eRegistry, "logs-generator", "1.0", true} Mounttest = ImageConfig{e2eRegistry, "mounttest", "1.0", true} MounttestUser = ImageConfig{e2eRegistry, "mounttest-user", "1.0", true} Nautilus = ImageConfig{e2eRegistry, "nautilus", "1.0", true} @@ -83,7 +83,7 @@ var ( NoSnatTestProxy = ImageConfig{e2eRegistry, "no-snat-test-proxy", "1.0", true} NWayHTTP = ImageConfig{e2eRegistry, "n-way-http", "1.0", true} // When these values are updated, also update cmd/kubelet/app/options/options.go - Pause = ImageConfig{gcRegistry, "pause", "3.0", false} + Pause = ImageConfig{gcRegistry, "pause", "3.0", true} Porter = ImageConfig{e2eRegistry, "porter", "1.0", true} PortForwardTester = ImageConfig{e2eRegistry, "port-forward-tester", "1.0", true} Redis = ImageConfig{e2eRegistry, "redis", "1.0", true} From cc4d2cbe9d80e75796b7bfd7444caa0030ea8d7c Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sat, 23 Dec 2017 03:02:33 -0500 Subject: [PATCH 497/794] Fix a race in the endpoint.go --- pkg/kubelet/cm/deviceplugin/endpoint.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint.go b/pkg/kubelet/cm/deviceplugin/endpoint.go index 29feaf52852..523922d9dc0 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint.go @@ -164,7 +164,11 @@ func (e *endpointImpl) run() { } e.mutex.Lock() - e.devices = devices + // NOTE: Return a copy of 'devices' instead of returning a direct reference to local 'devices' + e.devices = make(map[string]pluginapi.Device) + for _, d := range devices { + e.devices[d.ID] = d + } e.mutex.Unlock() e.callback(e.resourceName, added, updated, deleted) From 4dcc92e472a365814e676260c0deaeedc5dca270 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Sat, 23 Dec 2017 17:56:21 +0800 Subject: [PATCH 498/794] remove dead code in pkg/api --- pkg/api/endpoints/util.go | 8 -------- pkg/api/v1/endpoints/util.go | 8 -------- 2 files changed, 16 deletions(-) diff --git a/pkg/api/endpoints/util.go b/pkg/api/endpoints/util.go index 3d7b6e514f6..8fa72b56819 100644 --- a/pkg/api/endpoints/util.go +++ b/pkg/api/endpoints/util.go @@ -168,14 +168,6 @@ func LessEndpointAddress(a, b *api.EndpointAddress) bool { return a.TargetRef.UID < b.TargetRef.UID } -type addrPtrsByIpAndUID []*api.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - // SortSubsets sorts an array of EndpointSubset objects in place. For ease of // use it returns the input slice. func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { diff --git a/pkg/api/v1/endpoints/util.go b/pkg/api/v1/endpoints/util.go index 89b8d9e16ae..833af440c32 100644 --- a/pkg/api/v1/endpoints/util.go +++ b/pkg/api/v1/endpoints/util.go @@ -169,14 +169,6 @@ func LessEndpointAddress(a, b *v1.EndpointAddress) bool { return a.TargetRef.UID < b.TargetRef.UID } -type addrPtrsByIpAndUID []*v1.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - // SortSubsets sorts an array of EndpointSubset objects in place. For ease of // use it returns the input slice. func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset { From b13263eeb3384e1857e7db46132d7fcc9ea66300 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Mon, 13 Nov 2017 16:11:10 -0800 Subject: [PATCH 499/794] Bump rules_go to 0.8.1 --- build/root/WORKSPACE | 34 ++++++++-------------------------- hack/update-bazel.sh | 2 +- 2 files changed, 9 insertions(+), 27 deletions(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 7de0ffaf334..c6ba9c71059 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -1,15 +1,15 @@ http_archive( name = "io_bazel_rules_go", - sha256 = "441e560e947d8011f064bd7348d86940d6b6131ae7d7c4425a538e8d9f884274", - strip_prefix = "rules_go-c72631a220406c4fae276861ee286aaec82c5af2", - urls = ["https://github.com/bazelbuild/rules_go/archive/c72631a220406c4fae276861ee286aaec82c5af2.tar.gz"], + sha256 = "e8c7f1fda9ee482745a5b35e8314ac3ae744d4ba30f3e6de28148fd166044306", + strip_prefix = "rules_go-737df20c53499fd84b67f04c6ca9ccdee2e77089", + urls = ["https://github.com/bazelbuild/rules_go/archive/737df20c53499fd84b67f04c6ca9ccdee2e77089.tar.gz"], ) http_archive( name = "io_kubernetes_build", - sha256 = "89788eb30f10258ae0c6ab8b8625a28cb4c101fba93a8a6725ba227bb778ff27", - strip_prefix = "repo-infra-653485c1a6d554513266d55683da451bd41f7d65", - urls = ["https://github.com/kubernetes/repo-infra/archive/653485c1a6d554513266d55683da451bd41f7d65.tar.gz"], + sha256 = "cf138e48871629345548b4aaf23101314b5621c1bdbe45c4e75edb45b08891f0", + strip_prefix = "repo-infra-1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e", + urls = ["https://github.com/kubernetes/repo-infra/archive/1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e.tar.gz"], ) ETCD_VERSION = "3.1.10" @@ -41,33 +41,15 @@ http_archive( load("@io_kubernetes_build//defs:bazel_version.bzl", "check_version") -check_version("0.6.0") +check_version("0.8.0") load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains", "go_download_sdk") load("@io_bazel_rules_docker//docker:docker.bzl", "docker_repositories", "docker_pull") go_rules_dependencies() -# The upstream version of rules_go is broken in a number of ways. Until it's fixed, explicitly download and use go1.9.2 ourselves. -go_download_sdk( - name = "go_sdk", - sdks = { - "darwin_amd64": ("go1.9.2.darwin-amd64.tar.gz", "73fd5840d55f5566d8db6c0ffdd187577e8ebe650c783f68bd27cbf95bde6743"), - "linux_386": ("go1.9.2.linux-386.tar.gz", "574b2c4b1a248e58ef7d1f825beda15429610a2316d9cbd3096d8d3fa8c0bc1a"), - "linux_amd64": ("go1.9.2.linux-amd64.tar.gz", "de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b"), - "linux_armv6l": ("go1.9.2.linux-armv6l.tar.gz", "8a6758c8d390e28ef2bcea511f62dcb43056f38c1addc06a8bc996741987e7bb"), - "windows_386": ("go1.9.2.windows-386.zip", "35d3be5d7b97c6d11ffb76c1b19e20a824e427805ee918e82c08a2e5793eda20"), - "windows_amd64": ("go1.9.2.windows-amd64.zip", "682ec3626a9c45b657c2456e35cadad119057408d37f334c6c24d88389c2164c"), - "freebsd_386": ("go1.9.2.freebsd-386.tar.gz", "809dcb0a8457c8d0abf954f20311a1ee353486d0ae3f921e9478189721d37677"), - "freebsd_amd64": ("go1.9.2.freebsd-amd64.tar.gz", "8be985c3e251c8e007fa6ecd0189bc53e65cc519f4464ddf19fa11f7ed251134"), - "linux_arm64": ("go1.9.2.linux-arm64.tar.gz", "0016ac65ad8340c84f51bc11dbb24ee8265b0a4597dbfdf8d91776fc187456fa"), - "linux_ppc64le": ("go1.9.2.linux-ppc64le.tar.gz", "adb440b2b6ae9e448c253a20836d8e8aa4236f731d87717d9c7b241998dc7f9d"), - "linux_s390x": ("go1.9.2.linux-s390x.tar.gz", "a7137b4fbdec126823a12a4b696eeee2f04ec616e9fb8a54654c51d5884c1345"), - }, -) - go_register_toolchains( - go_version = "overridden by go_download_sdk", + go_version = "1.9.2", ) docker_repositories() diff --git a/hack/update-bazel.sh b/hack/update-bazel.sh index a01643356df..e569d94e9c7 100755 --- a/hack/update-bazel.sh +++ b/hack/update-bazel.sh @@ -32,7 +32,7 @@ kube::util::go_install_from_commit \ ae4e9a3906ace4ba657b7a09242610c6266e832c kube::util::go_install_from_commit \ github.com/bazelbuild/rules_go/go/tools/gazelle/gazelle \ - c72631a220406c4fae276861ee286aaec82c5af2 + 737df20c53499fd84b67f04c6ca9ccdee2e77089 touch "${KUBE_ROOT}/vendor/BUILD" From efee0704c60a2ee3049268a41535aaee7f661f6c Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Sat, 23 Dec 2017 13:06:26 -0800 Subject: [PATCH 500/794] Autogenerate BUILD files --- .../fluentd-elasticsearch/es-image/BUILD | 2 +- cluster/gce/gci/mounter/BUILD | 2 +- cluster/images/etcd-version-monitor/BUILD | 2 +- cluster/images/etcd/attachlease/BUILD | 2 +- cluster/images/etcd/rollback/BUILD | 2 +- cmd/clicheck/BUILD | 2 +- cmd/cloud-controller-manager/BUILD | 2 +- .../app/options/BUILD | 2 +- cmd/gendocs/BUILD | 2 +- cmd/genkubedocs/BUILD | 4 +- cmd/genman/BUILD | 2 +- cmd/genswaggertypedocs/BUILD | 2 +- cmd/genutils/BUILD | 2 +- cmd/genyaml/BUILD | 2 +- cmd/gke-certificates-controller/BUILD | 2 +- cmd/gke-certificates-controller/app/BUILD | 2 +- cmd/hyperkube/BUILD | 4 +- cmd/importverifier/BUILD | 2 +- cmd/kube-apiserver/BUILD | 2 +- cmd/kube-apiserver/app/options/BUILD | 2 +- cmd/kube-controller-manager/BUILD | 2 +- cmd/kube-controller-manager/app/BUILD | 2 +- cmd/kube-controller-manager/app/options/BUILD | 2 +- cmd/kube-proxy/BUILD | 2 +- cmd/kube-proxy/app/BUILD | 114 +++++- cmd/kubeadm/BUILD | 2 +- cmd/kubeadm/app/apis/kubeadm/install/BUILD | 2 +- cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD | 33 +- cmd/kubeadm/app/apis/kubeadm/validation/BUILD | 2 +- cmd/kubeadm/app/cmd/BUILD | 2 +- cmd/kubeadm/app/cmd/phases/BUILD | 2 +- cmd/kubeadm/app/cmd/upgrade/BUILD | 2 +- cmd/kubeadm/app/cmd/util/BUILD | 2 +- cmd/kubeadm/app/constants/BUILD | 2 +- cmd/kubeadm/app/discovery/BUILD | 2 +- cmd/kubeadm/app/discovery/token/BUILD | 2 +- cmd/kubeadm/app/features/BUILD | 2 +- cmd/kubeadm/app/images/BUILD | 2 +- cmd/kubeadm/app/phases/addons/dns/BUILD | 2 +- cmd/kubeadm/app/phases/addons/proxy/BUILD | 2 +- .../phases/bootstraptoken/clusterinfo/BUILD | 2 +- .../app/phases/bootstraptoken/node/BUILD | 2 +- cmd/kubeadm/app/phases/certs/BUILD | 2 +- cmd/kubeadm/app/phases/certs/pkiutil/BUILD | 2 +- cmd/kubeadm/app/phases/controlplane/BUILD | 2 +- cmd/kubeadm/app/phases/etcd/BUILD | 2 +- cmd/kubeadm/app/phases/kubeconfig/BUILD | 2 +- cmd/kubeadm/app/phases/kubelet/BUILD | 2 +- cmd/kubeadm/app/phases/markmaster/BUILD | 2 +- cmd/kubeadm/app/phases/selfhosting/BUILD | 2 +- cmd/kubeadm/app/phases/upgrade/BUILD | 2 +- cmd/kubeadm/app/phases/uploadconfig/BUILD | 2 +- cmd/kubeadm/app/preflight/BUILD | 35 +- cmd/kubeadm/app/util/BUILD | 2 +- cmd/kubeadm/app/util/apiclient/BUILD | 2 +- cmd/kubeadm/app/util/config/BUILD | 2 +- cmd/kubeadm/app/util/kubeconfig/BUILD | 2 +- cmd/kubeadm/app/util/pubkeypin/BUILD | 2 +- cmd/kubeadm/app/util/staticpod/BUILD | 2 +- cmd/kubeadm/app/util/token/BUILD | 2 +- cmd/kubeadm/test/cmd/BUILD | 2 +- cmd/kubectl/BUILD | 2 +- cmd/kubelet/BUILD | 2 +- cmd/kubelet/app/BUILD | 37 +- cmd/kubelet/app/options/BUILD | 2 +- cmd/kubemark/BUILD | 2 +- cmd/linkcheck/BUILD | 2 +- examples/explorer/BUILD | 2 +- examples/guestbook-go/BUILD | 2 +- examples/https-nginx/BUILD | 2 +- examples/sharing-clusters/BUILD | 2 +- hack/BUILD | 4 +- hack/cmd/teststale/BUILD | 4 +- pkg/api/endpoints/BUILD | 2 +- pkg/api/events/BUILD | 2 +- pkg/api/persistentvolume/BUILD | 2 +- pkg/api/persistentvolumeclaim/BUILD | 2 +- pkg/api/pod/BUILD | 2 +- pkg/api/ref/BUILD | 2 +- pkg/api/resource/BUILD | 2 +- pkg/api/service/BUILD | 2 +- pkg/api/testapi/BUILD | 2 +- pkg/api/testing/BUILD | 2 +- pkg/api/v1/endpoints/BUILD | 2 +- pkg/api/v1/pod/BUILD | 2 +- pkg/api/v1/resource/BUILD | 2 +- pkg/api/v1/service/BUILD | 2 +- .../admissionregistration/validation/BUILD | 2 +- pkg/apis/apps/validation/BUILD | 2 +- pkg/apis/authorization/validation/BUILD | 2 +- pkg/apis/autoscaling/validation/BUILD | 2 +- pkg/apis/batch/validation/BUILD | 2 +- pkg/apis/componentconfig/BUILD | 2 +- pkg/apis/componentconfig/v1alpha1/BUILD | 2 +- pkg/apis/core/BUILD | 2 +- pkg/apis/core/helper/BUILD | 2 +- pkg/apis/core/install/BUILD | 2 +- pkg/apis/core/pods/BUILD | 2 +- pkg/apis/core/v1/helper/BUILD | 2 +- pkg/apis/core/v1/helper/qos/BUILD | 2 +- pkg/apis/core/v1/validation/BUILD | 2 +- pkg/apis/core/validation/BUILD | 2 +- pkg/apis/extensions/BUILD | 2 +- pkg/apis/extensions/validation/BUILD | 2 +- pkg/apis/networking/validation/BUILD | 2 +- pkg/apis/policy/validation/BUILD | 2 +- pkg/apis/rbac/validation/BUILD | 2 +- pkg/apis/scheduling/validation/BUILD | 2 +- pkg/apis/settings/validation/BUILD | 2 +- pkg/apis/storage/util/BUILD | 2 +- pkg/apis/storage/validation/BUILD | 2 +- pkg/auth/authorizer/abac/BUILD | 2 +- pkg/auth/nodeidentifier/BUILD | 2 +- pkg/bootstrap/api/BUILD | 2 +- pkg/capabilities/BUILD | 2 +- pkg/client/chaosclient/BUILD | 2 +- .../listers/batch/internalversion/BUILD | 2 +- .../listers/extensions/internalversion/BUILD | 2 +- pkg/client/tests/BUILD | 2 +- pkg/client/unversioned/BUILD | 2 +- pkg/cloudprovider/providers/aws/BUILD | 2 +- pkg/cloudprovider/providers/azure/BUILD | 2 +- pkg/cloudprovider/providers/cloudstack/BUILD | 73 +++- pkg/cloudprovider/providers/gce/BUILD | 2 +- pkg/cloudprovider/providers/openstack/BUILD | 2 +- pkg/cloudprovider/providers/ovirt/BUILD | 2 +- pkg/cloudprovider/providers/photon/BUILD | 2 +- pkg/cloudprovider/providers/vsphere/BUILD | 2 +- pkg/controller/BUILD | 2 +- pkg/controller/bootstrap/BUILD | 2 +- pkg/controller/certificates/BUILD | 2 +- pkg/controller/certificates/approver/BUILD | 2 +- pkg/controller/certificates/cleaner/BUILD | 2 +- pkg/controller/certificates/signer/BUILD | 2 +- pkg/controller/cloud/BUILD | 2 +- pkg/controller/clusterroleaggregation/BUILD | 2 +- pkg/controller/cronjob/BUILD | 2 +- pkg/controller/daemon/BUILD | 2 +- pkg/controller/daemon/util/BUILD | 2 +- pkg/controller/deployment/BUILD | 2 +- pkg/controller/deployment/util/BUILD | 2 +- pkg/controller/disruption/BUILD | 2 +- pkg/controller/endpoint/BUILD | 2 +- pkg/controller/garbagecollector/BUILD | 2 +- .../garbagecollector/metaonly/BUILD | 2 +- pkg/controller/history/BUILD | 2 +- pkg/controller/job/BUILD | 2 +- pkg/controller/namespace/deletion/BUILD | 2 +- pkg/controller/node/BUILD | 2 +- pkg/controller/node/ipam/BUILD | 2 +- pkg/controller/node/ipam/cidrset/BUILD | 2 +- pkg/controller/node/ipam/sync/BUILD | 2 +- pkg/controller/node/scheduler/BUILD | 2 +- pkg/controller/podautoscaler/BUILD | 2 +- pkg/controller/podautoscaler/metrics/BUILD | 2 +- pkg/controller/podgc/BUILD | 2 +- pkg/controller/replicaset/BUILD | 2 +- pkg/controller/replication/BUILD | 2 +- pkg/controller/resourcequota/BUILD | 2 +- pkg/controller/route/BUILD | 2 +- pkg/controller/service/BUILD | 2 +- pkg/controller/serviceaccount/BUILD | 2 +- pkg/controller/statefulset/BUILD | 2 +- pkg/controller/ttl/BUILD | 2 +- pkg/controller/volume/attachdetach/BUILD | 2 +- .../volume/attachdetach/cache/BUILD | 2 +- .../volume/attachdetach/populator/BUILD | 2 +- .../volume/attachdetach/reconciler/BUILD | 2 +- pkg/controller/volume/expand/cache/BUILD | 2 +- pkg/controller/volume/persistentvolume/BUILD | 2 +- pkg/controller/volume/pvcprotection/BUILD | 2 +- pkg/credentialprovider/BUILD | 2 +- pkg/credentialprovider/aws/BUILD | 2 +- pkg/credentialprovider/azure/BUILD | 2 +- pkg/credentialprovider/gcp/BUILD | 2 +- pkg/credentialprovider/rancher/BUILD | 2 +- pkg/fieldpath/BUILD | 2 +- pkg/kubeapiserver/BUILD | 2 +- pkg/kubeapiserver/admission/BUILD | 2 +- pkg/kubeapiserver/authorizer/BUILD | 2 +- pkg/kubeapiserver/authorizer/modes/BUILD | 2 +- pkg/kubeapiserver/options/BUILD | 2 +- pkg/kubectl/BUILD | 2 +- pkg/kubectl/categories/BUILD | 2 +- pkg/kubectl/cmd/BUILD | 2 +- pkg/kubectl/cmd/auth/BUILD | 2 +- pkg/kubectl/cmd/config/BUILD | 2 +- pkg/kubectl/cmd/resource/BUILD | 2 +- pkg/kubectl/cmd/set/BUILD | 2 +- pkg/kubectl/cmd/testdata/edit/BUILD | 2 +- pkg/kubectl/cmd/util/BUILD | 2 +- pkg/kubectl/cmd/util/editor/BUILD | 2 +- pkg/kubectl/cmd/util/env/BUILD | 2 +- pkg/kubectl/cmd/util/openapi/validation/BUILD | 2 +- pkg/kubectl/explain/BUILD | 2 +- pkg/kubectl/plugins/BUILD | 2 +- pkg/kubectl/proxy/BUILD | 2 +- pkg/kubectl/resource/BUILD | 2 +- pkg/kubectl/util/BUILD | 70 +++- pkg/kubectl/util/hash/BUILD | 2 +- pkg/kubectl/util/i18n/BUILD | 2 +- pkg/kubectl/util/slice/BUILD | 2 +- pkg/kubectl/util/term/BUILD | 70 +++- pkg/kubectl/validation/BUILD | 2 +- pkg/kubelet/BUILD | 4 +- pkg/kubelet/apis/kubeletconfig/BUILD | 2 +- pkg/kubelet/apis/kubeletconfig/scheme/BUILD | 2 +- .../apis/kubeletconfig/validation/BUILD | 2 +- pkg/kubelet/cadvisor/BUILD | 59 ++- pkg/kubelet/certificate/BUILD | 2 +- pkg/kubelet/certificate/bootstrap/BUILD | 2 +- pkg/kubelet/checkpoint/BUILD | 2 +- pkg/kubelet/client/BUILD | 2 +- pkg/kubelet/cm/BUILD | 129 +++++- pkg/kubelet/cm/cpumanager/BUILD | 2 +- pkg/kubelet/cm/cpumanager/state/BUILD | 2 +- pkg/kubelet/cm/cpumanager/topology/BUILD | 2 +- pkg/kubelet/cm/cpuset/BUILD | 2 +- pkg/kubelet/cm/deviceplugin/BUILD | 2 +- pkg/kubelet/cm/util/BUILD | 38 +- pkg/kubelet/config/BUILD | 41 +- pkg/kubelet/configmap/BUILD | 2 +- pkg/kubelet/container/BUILD | 37 +- pkg/kubelet/custommetrics/BUILD | 2 +- pkg/kubelet/dockershim/BUILD | 56 ++- pkg/kubelet/dockershim/cm/BUILD | 69 +++- pkg/kubelet/dockershim/libdocker/BUILD | 2 +- pkg/kubelet/eviction/BUILD | 37 +- pkg/kubelet/gpu/nvidia/BUILD | 2 +- pkg/kubelet/images/BUILD | 2 +- pkg/kubelet/kubeletconfig/checkpoint/BUILD | 2 +- .../kubeletconfig/checkpoint/store/BUILD | 2 +- pkg/kubelet/kubeletconfig/configfiles/BUILD | 2 +- pkg/kubelet/kuberuntime/BUILD | 2 +- pkg/kubelet/kuberuntime/logs/BUILD | 2 +- pkg/kubelet/lifecycle/BUILD | 2 +- pkg/kubelet/mountpod/BUILD | 2 +- pkg/kubelet/network/cni/BUILD | 41 +- pkg/kubelet/network/dns/BUILD | 2 +- pkg/kubelet/network/hairpin/BUILD | 2 +- pkg/kubelet/network/hostport/BUILD | 2 +- pkg/kubelet/network/kubenet/BUILD | 100 ++++- pkg/kubelet/network/testing/BUILD | 2 +- pkg/kubelet/pleg/BUILD | 2 +- pkg/kubelet/pod/BUILD | 2 +- pkg/kubelet/preemption/BUILD | 2 +- pkg/kubelet/prober/BUILD | 2 +- pkg/kubelet/prober/results/BUILD | 2 +- pkg/kubelet/qos/BUILD | 2 +- pkg/kubelet/remote/BUILD | 2 +- pkg/kubelet/remote/fake/BUILD | 33 +- pkg/kubelet/rkt/BUILD | 2 +- pkg/kubelet/rktshim/BUILD | 2 +- pkg/kubelet/secret/BUILD | 2 +- pkg/kubelet/server/BUILD | 2 +- pkg/kubelet/server/portforward/BUILD | 2 +- pkg/kubelet/server/stats/BUILD | 2 +- pkg/kubelet/server/streaming/BUILD | 2 +- pkg/kubelet/stats/BUILD | 2 +- pkg/kubelet/status/BUILD | 2 +- pkg/kubelet/sysctl/BUILD | 2 +- pkg/kubelet/types/BUILD | 2 +- pkg/kubelet/util/BUILD | 41 +- pkg/kubelet/util/cache/BUILD | 2 +- pkg/kubelet/util/format/BUILD | 2 +- pkg/kubelet/util/queue/BUILD | 2 +- pkg/kubelet/util/sliceutils/BUILD | 2 +- pkg/kubelet/util/store/BUILD | 2 +- pkg/kubelet/volumemanager/BUILD | 2 +- pkg/kubelet/volumemanager/cache/BUILD | 2 +- pkg/kubelet/volumemanager/populator/BUILD | 2 +- pkg/kubelet/volumemanager/reconciler/BUILD | 2 +- pkg/kubelet/winstats/BUILD | 6 +- pkg/master/BUILD | 2 +- pkg/master/controller/crdregistration/BUILD | 2 +- pkg/master/reconcilers/BUILD | 2 +- pkg/master/tunneler/BUILD | 2 +- pkg/printers/BUILD | 2 +- pkg/printers/internalversion/BUILD | 2 +- pkg/probe/exec/BUILD | 2 +- pkg/probe/http/BUILD | 2 +- pkg/probe/tcp/BUILD | 2 +- .../apis/kubeproxyconfig/validation/BUILD | 2 +- pkg/proxy/config/BUILD | 2 +- pkg/proxy/healthcheck/BUILD | 2 +- pkg/proxy/iptables/BUILD | 2 +- pkg/proxy/ipvs/BUILD | 37 +- pkg/proxy/ipvs/testing/BUILD | 2 +- pkg/proxy/userspace/BUILD | 70 +++- pkg/proxy/util/BUILD | 2 +- pkg/proxy/winkernel/BUILD | 4 +- pkg/proxy/winuserspace/BUILD | 2 +- pkg/quota/BUILD | 2 +- pkg/quota/evaluator/core/BUILD | 2 +- pkg/registry/apps/controllerrevision/BUILD | 2 +- .../apps/controllerrevision/storage/BUILD | 2 +- pkg/registry/apps/statefulset/BUILD | 2 +- pkg/registry/apps/statefulset/storage/BUILD | 2 +- .../authorization/subjectaccessreview/BUILD | 2 +- pkg/registry/authorization/util/BUILD | 2 +- .../horizontalpodautoscaler/storage/BUILD | 2 +- pkg/registry/batch/cronjob/BUILD | 2 +- pkg/registry/batch/cronjob/storage/BUILD | 2 +- pkg/registry/batch/job/BUILD | 2 +- pkg/registry/batch/job/storage/BUILD | 2 +- pkg/registry/certificates/certificates/BUILD | 2 +- pkg/registry/core/componentstatus/BUILD | 2 +- pkg/registry/core/configmap/BUILD | 2 +- pkg/registry/core/configmap/storage/BUILD | 2 +- pkg/registry/core/endpoint/storage/BUILD | 2 +- pkg/registry/core/event/BUILD | 2 +- pkg/registry/core/event/storage/BUILD | 2 +- pkg/registry/core/limitrange/storage/BUILD | 2 +- pkg/registry/core/namespace/BUILD | 2 +- pkg/registry/core/namespace/storage/BUILD | 2 +- pkg/registry/core/node/BUILD | 2 +- pkg/registry/core/node/storage/BUILD | 2 +- pkg/registry/core/persistentvolume/BUILD | 2 +- .../core/persistentvolume/storage/BUILD | 2 +- pkg/registry/core/persistentvolumeclaim/BUILD | 2 +- .../core/persistentvolumeclaim/storage/BUILD | 2 +- pkg/registry/core/pod/BUILD | 2 +- pkg/registry/core/pod/rest/BUILD | 2 +- pkg/registry/core/pod/storage/BUILD | 2 +- pkg/registry/core/podtemplate/storage/BUILD | 2 +- pkg/registry/core/replicationcontroller/BUILD | 2 +- .../core/replicationcontroller/storage/BUILD | 2 +- pkg/registry/core/resourcequota/BUILD | 2 +- pkg/registry/core/resourcequota/storage/BUILD | 2 +- pkg/registry/core/rest/BUILD | 2 +- pkg/registry/core/secret/BUILD | 2 +- pkg/registry/core/secret/storage/BUILD | 2 +- pkg/registry/core/service/BUILD | 2 +- pkg/registry/core/service/allocator/BUILD | 2 +- .../core/service/allocator/storage/BUILD | 2 +- pkg/registry/core/service/ipallocator/BUILD | 2 +- .../core/service/ipallocator/controller/BUILD | 2 +- .../core/service/ipallocator/storage/BUILD | 2 +- pkg/registry/core/service/portallocator/BUILD | 2 +- .../service/portallocator/controller/BUILD | 2 +- pkg/registry/core/service/storage/BUILD | 2 +- .../core/serviceaccount/storage/BUILD | 2 +- .../extensions/controller/storage/BUILD | 2 +- pkg/registry/extensions/daemonset/BUILD | 2 +- .../extensions/daemonset/storage/BUILD | 2 +- pkg/registry/extensions/deployment/BUILD | 2 +- .../extensions/deployment/storage/BUILD | 2 +- pkg/registry/extensions/ingress/BUILD | 2 +- pkg/registry/extensions/ingress/storage/BUILD | 2 +- .../podsecuritypolicy/storage/BUILD | 2 +- pkg/registry/extensions/replicaset/BUILD | 2 +- .../extensions/replicaset/storage/BUILD | 2 +- .../networking/networkpolicy/storage/BUILD | 2 +- pkg/registry/policy/poddisruptionbudget/BUILD | 2 +- .../policy/poddisruptionbudget/storage/BUILD | 2 +- pkg/registry/rbac/BUILD | 2 +- pkg/registry/rbac/reconciliation/BUILD | 2 +- pkg/registry/rbac/validation/BUILD | 2 +- pkg/registry/scheduling/priorityclass/BUILD | 2 +- .../scheduling/priorityclass/storage/BUILD | 2 +- pkg/registry/settings/podpreset/storage/BUILD | 2 +- pkg/registry/storage/storageclass/BUILD | 2 +- .../storage/storageclass/storage/BUILD | 2 +- pkg/registry/storage/volumeattachment/BUILD | 2 +- .../storage/volumeattachment/storage/BUILD | 2 +- pkg/security/apparmor/BUILD | 37 +- pkg/security/podsecuritypolicy/BUILD | 2 +- pkg/security/podsecuritypolicy/apparmor/BUILD | 2 +- .../podsecuritypolicy/capabilities/BUILD | 2 +- pkg/security/podsecuritypolicy/group/BUILD | 2 +- pkg/security/podsecuritypolicy/seccomp/BUILD | 2 +- pkg/security/podsecuritypolicy/selinux/BUILD | 2 +- pkg/security/podsecuritypolicy/sysctl/BUILD | 2 +- pkg/security/podsecuritypolicy/user/BUILD | 2 +- pkg/security/podsecuritypolicy/util/BUILD | 2 +- pkg/securitycontext/BUILD | 2 +- pkg/ssh/BUILD | 2 +- pkg/util/async/BUILD | 2 +- pkg/util/bandwidth/BUILD | 41 +- pkg/util/config/BUILD | 2 +- pkg/util/configz/BUILD | 2 +- pkg/util/dbus/BUILD | 2 +- pkg/util/ebtables/BUILD | 2 +- pkg/util/env/BUILD | 2 +- pkg/util/file/BUILD | 2 +- pkg/util/flock/BUILD | 51 ++- pkg/util/goroutinemap/BUILD | 2 +- pkg/util/hash/BUILD | 2 +- pkg/util/ipconfig/BUILD | 2 +- pkg/util/ipset/BUILD | 2 +- pkg/util/ipset/testing/BUILD | 2 +- pkg/util/iptables/BUILD | 41 +- pkg/util/ipvs/BUILD | 76 +++- pkg/util/ipvs/testing/BUILD | 2 +- pkg/util/keymutex/BUILD | 2 +- pkg/util/labels/BUILD | 2 +- pkg/util/limitwriter/BUILD | 2 +- pkg/util/metrics/BUILD | 2 +- pkg/util/mount/BUILD | 66 +++- pkg/util/net/sets/BUILD | 2 +- pkg/util/netsh/BUILD | 2 +- pkg/util/node/BUILD | 2 +- pkg/util/nsenter/BUILD | 73 +++- pkg/util/oom/BUILD | 41 +- pkg/util/parsers/BUILD | 2 +- pkg/util/pointer/BUILD | 2 +- pkg/util/procfs/BUILD | 41 +- pkg/util/removeall/BUILD | 2 +- pkg/util/resizefs/BUILD | 73 +++- pkg/util/resourcecontainer/BUILD | 38 +- pkg/util/rlimit/BUILD | 38 +- pkg/util/selinux/BUILD | 35 +- pkg/util/slice/BUILD | 2 +- pkg/util/strings/BUILD | 2 +- pkg/util/system/BUILD | 2 +- pkg/util/tail/BUILD | 2 +- pkg/util/taints/BUILD | 2 +- pkg/util/template/BUILD | 2 +- pkg/util/term/BUILD | 86 +++- pkg/util/threading/BUILD | 2 +- pkg/util/tolerations/BUILD | 2 +- pkg/util/version/BUILD | 2 +- pkg/volume/BUILD | 39 +- pkg/volume/aws_ebs/BUILD | 2 +- pkg/volume/azure_dd/BUILD | 34 +- pkg/volume/azure_file/BUILD | 2 +- pkg/volume/cephfs/BUILD | 2 +- pkg/volume/cinder/BUILD | 2 +- pkg/volume/configmap/BUILD | 2 +- pkg/volume/csi/BUILD | 2 +- pkg/volume/downwardapi/BUILD | 2 +- pkg/volume/empty_dir/BUILD | 41 +- pkg/volume/fc/BUILD | 2 +- pkg/volume/flexvolume/BUILD | 2 +- pkg/volume/flocker/BUILD | 2 +- pkg/volume/gce_pd/BUILD | 2 +- pkg/volume/git_repo/BUILD | 2 +- pkg/volume/glusterfs/BUILD | 2 +- pkg/volume/host_path/BUILD | 2 +- pkg/volume/iscsi/BUILD | 2 +- pkg/volume/local/BUILD | 2 +- pkg/volume/nfs/BUILD | 2 +- pkg/volume/photon_pd/BUILD | 2 +- pkg/volume/portworx/BUILD | 2 +- pkg/volume/projected/BUILD | 2 +- pkg/volume/quobyte/BUILD | 2 +- pkg/volume/rbd/BUILD | 2 +- pkg/volume/scaleio/BUILD | 2 +- pkg/volume/secret/BUILD | 2 +- pkg/volume/storageos/BUILD | 2 +- pkg/volume/util/BUILD | 98 ++++- pkg/volume/util/nestedpendingoperations/BUILD | 2 +- pkg/volume/util/operationexecutor/BUILD | 2 +- pkg/volume/validation/BUILD | 2 +- pkg/volume/vsphere_volume/BUILD | 2 +- plugin/cmd/kube-scheduler/BUILD | 2 +- plugin/pkg/admission/admit/BUILD | 2 +- plugin/pkg/admission/alwayspullimages/BUILD | 2 +- plugin/pkg/admission/antiaffinity/BUILD | 2 +- .../admission/defaulttolerationseconds/BUILD | 2 +- plugin/pkg/admission/deny/BUILD | 2 +- plugin/pkg/admission/eventratelimit/BUILD | 2 +- .../apis/eventratelimit/validation/BUILD | 2 +- plugin/pkg/admission/exec/BUILD | 2 +- .../extendedresourcetoleration/BUILD | 2 +- plugin/pkg/admission/gc/BUILD | 2 +- plugin/pkg/admission/imagepolicy/BUILD | 2 +- plugin/pkg/admission/initialresources/BUILD | 2 +- plugin/pkg/admission/limitranger/BUILD | 2 +- .../admission/namespace/autoprovision/BUILD | 2 +- plugin/pkg/admission/namespace/exists/BUILD | 2 +- plugin/pkg/admission/noderestriction/BUILD | 2 +- .../admission/persistentvolume/label/BUILD | 2 +- .../admission/persistentvolume/resize/BUILD | 2 +- .../persistentvolumeclaim/pvcprotection/BUILD | 2 +- plugin/pkg/admission/podnodeselector/BUILD | 2 +- plugin/pkg/admission/podpreset/BUILD | 2 +- .../admission/podtolerationrestriction/BUILD | 2 +- .../podtolerationrestriction/validation/BUILD | 2 +- plugin/pkg/admission/priority/BUILD | 2 +- plugin/pkg/admission/resourcequota/BUILD | 2 +- .../apis/resourcequota/validation/BUILD | 2 +- .../security/podsecuritypolicy/BUILD | 2 +- .../admission/securitycontext/scdeny/BUILD | 2 +- plugin/pkg/admission/serviceaccount/BUILD | 2 +- .../admission/storageclass/setdefault/BUILD | 2 +- .../auth/authenticator/token/bootstrap/BUILD | 2 +- plugin/pkg/auth/authorizer/node/BUILD | 2 +- plugin/pkg/auth/authorizer/rbac/BUILD | 2 +- .../authorizer/rbac/bootstrappolicy/BUILD | 2 +- plugin/pkg/scheduler/BUILD | 2 +- plugin/pkg/scheduler/algorithm/BUILD | 2 +- .../pkg/scheduler/algorithm/predicates/BUILD | 2 +- .../pkg/scheduler/algorithm/priorities/BUILD | 2 +- .../scheduler/algorithm/priorities/util/BUILD | 2 +- plugin/pkg/scheduler/algorithmprovider/BUILD | 2 +- .../algorithmprovider/defaults/BUILD | 2 +- plugin/pkg/scheduler/api/validation/BUILD | 2 +- plugin/pkg/scheduler/core/BUILD | 2 +- plugin/pkg/scheduler/factory/BUILD | 2 +- plugin/pkg/scheduler/schedulercache/BUILD | 2 +- plugin/pkg/scheduler/util/BUILD | 2 +- staging/src/k8s.io/api/core/v1/BUILD | 2 +- .../src/k8s.io/apiextensions-apiserver/BUILD | 2 +- .../pkg/apis/apiextensions/BUILD | 2 +- .../pkg/apis/apiextensions/install/BUILD | 2 +- .../pkg/apis/apiextensions/v1beta1/BUILD | 2 +- .../pkg/apis/apiextensions/validation/BUILD | 2 +- .../pkg/apiserver/BUILD | 2 +- .../pkg/apiserver/validation/BUILD | 2 +- .../pkg/controller/status/BUILD | 2 +- .../k8s.io/apimachinery/pkg/api/errors/BUILD | 2 +- .../k8s.io/apimachinery/pkg/api/meta/BUILD | 2 +- .../apimachinery/pkg/api/resource/BUILD | 2 +- .../apimachinery/pkg/api/testing/fuzzer/BUILD | 2 +- .../apimachinery/pkg/api/validation/BUILD | 2 +- .../pkg/api/validation/path/BUILD | 2 +- .../apimachinery/pkg/apimachinery/BUILD | 2 +- .../pkg/apimachinery/announced/BUILD | 2 +- .../pkg/apimachinery/registered/BUILD | 2 +- .../pkg/apis/meta/internalversion/BUILD | 2 +- .../apimachinery/pkg/apis/meta/v1/BUILD | 2 +- .../pkg/apis/meta/v1/unstructured/BUILD | 2 +- .../pkg/apis/meta/v1/validation/BUILD | 2 +- .../pkg/apis/testapigroup/install/BUILD | 2 +- .../k8s.io/apimachinery/pkg/conversion/BUILD | 2 +- .../src/k8s.io/apimachinery/pkg/fields/BUILD | 2 +- .../src/k8s.io/apimachinery/pkg/labels/BUILD | 2 +- .../src/k8s.io/apimachinery/pkg/runtime/BUILD | 2 +- .../apimachinery/pkg/runtime/schema/BUILD | 2 +- .../apimachinery/pkg/runtime/serializer/BUILD | 2 +- .../pkg/runtime/serializer/json/BUILD | 2 +- .../pkg/runtime/serializer/streaming/BUILD | 2 +- .../pkg/runtime/serializer/versioning/BUILD | 2 +- .../src/k8s.io/apimachinery/pkg/test/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/cache/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/clock/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/diff/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/errors/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/framer/BUILD | 2 +- .../apimachinery/pkg/util/httpstream/BUILD | 2 +- .../pkg/util/httpstream/spdy/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/json/BUILD | 2 +- .../pkg/util/jsonmergepatch/BUILD | 2 +- .../apimachinery/pkg/util/mergepatch/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/net/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/proxy/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/rand/BUILD | 2 +- .../apimachinery/pkg/util/runtime/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/sets/BUILD | 2 +- .../pkg/util/strategicpatch/BUILD | 2 +- .../apimachinery/pkg/util/validation/BUILD | 2 +- .../pkg/util/validation/field/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/wait/BUILD | 2 +- .../apimachinery/pkg/util/waitgroup/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/yaml/BUILD | 2 +- .../src/k8s.io/apimachinery/pkg/watch/BUILD | 2 +- .../third_party/forked/golang/json/BUILD | 2 +- .../third_party/forked/golang/reflect/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/admission/BUILD | 2 +- .../pkg/admission/configuration/BUILD | 2 +- .../apiserver/pkg/admission/metrics/BUILD | 2 +- .../pkg/admission/plugin/initialization/BUILD | 2 +- .../plugin/namespace/lifecycle/BUILD | 2 +- .../pkg/admission/plugin/webhook/config/BUILD | 2 +- .../pkg/admission/plugin/webhook/errors/BUILD | 2 +- .../plugin/webhook/initializer/BUILD | 2 +- .../admission/plugin/webhook/mutating/BUILD | 2 +- .../admission/plugin/webhook/namespace/BUILD | 2 +- .../pkg/admission/plugin/webhook/rules/BUILD | 2 +- .../admission/plugin/webhook/validating/BUILD | 2 +- .../admission/plugin/webhook/versioned/BUILD | 2 +- .../apiserver/pkg/apis/audit/install/BUILD | 2 +- .../apiserver/pkg/apis/audit/v1alpha1/BUILD | 2 +- .../apiserver/pkg/apis/audit/v1beta1/BUILD | 2 +- .../apiserver/pkg/apis/audit/validation/BUILD | 2 +- .../apiserver/pkg/apis/example/install/BUILD | 2 +- .../apiserver/pkg/apis/example2/install/BUILD | 2 +- staging/src/k8s.io/apiserver/pkg/audit/BUILD | 2 +- .../k8s.io/apiserver/pkg/audit/policy/BUILD | 2 +- .../apiserver/pkg/authentication/group/BUILD | 2 +- .../authentication/request/anonymous/BUILD | 2 +- .../authentication/request/bearertoken/BUILD | 2 +- .../request/headerrequest/BUILD | 2 +- .../pkg/authentication/request/union/BUILD | 2 +- .../authentication/request/websocket/BUILD | 2 +- .../pkg/authentication/request/x509/BUILD | 2 +- .../pkg/authentication/serviceaccount/BUILD | 2 +- .../pkg/authentication/token/cache/BUILD | 2 +- .../pkg/authentication/token/tokenfile/BUILD | 2 +- .../pkg/authentication/token/union/BUILD | 2 +- .../pkg/authorization/authorizerfactory/BUILD | 2 +- .../apiserver/pkg/authorization/union/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/endpoints/BUILD | 2 +- .../apiserver/pkg/endpoints/discovery/BUILD | 2 +- .../apiserver/pkg/endpoints/filters/BUILD | 2 +- .../apiserver/pkg/endpoints/handlers/BUILD | 2 +- .../pkg/endpoints/handlers/negotiation/BUILD | 2 +- .../endpoints/handlers/responsewriters/BUILD | 2 +- .../apiserver/pkg/endpoints/metrics/BUILD | 2 +- .../apiserver/pkg/endpoints/openapi/BUILD | 2 +- .../apiserver/pkg/endpoints/request/BUILD | 2 +- .../pkg/registry/generic/registry/BUILD | 2 +- .../apiserver/pkg/registry/generic/rest/BUILD | 2 +- .../k8s.io/apiserver/pkg/registry/rest/BUILD | 2 +- staging/src/k8s.io/apiserver/pkg/server/BUILD | 35 +- .../k8s.io/apiserver/pkg/server/filters/BUILD | 2 +- .../k8s.io/apiserver/pkg/server/healthz/BUILD | 2 +- .../k8s.io/apiserver/pkg/server/httplog/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/server/mux/BUILD | 2 +- .../k8s.io/apiserver/pkg/server/options/BUILD | 2 +- .../pkg/server/options/encryptionconfig/BUILD | 2 +- .../k8s.io/apiserver/pkg/server/storage/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/storage/BUILD | 2 +- .../k8s.io/apiserver/pkg/storage/etcd/BUILD | 2 +- .../apiserver/pkg/storage/etcd/util/BUILD | 2 +- .../k8s.io/apiserver/pkg/storage/etcd3/BUILD | 2 +- .../pkg/storage/etcd3/preflight/BUILD | 2 +- .../k8s.io/apiserver/pkg/storage/names/BUILD | 2 +- .../pkg/storage/storagebackend/factory/BUILD | 2 +- .../k8s.io/apiserver/pkg/storage/tests/BUILD | 2 +- .../k8s.io/apiserver/pkg/storage/value/BUILD | 2 +- .../pkg/storage/value/encrypt/aes/BUILD | 2 +- .../pkg/storage/value/encrypt/envelope/BUILD | 2 +- .../pkg/storage/value/encrypt/secretbox/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/feature/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/util/flag/BUILD | 2 +- .../apiserver/pkg/util/flushwriter/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/util/proxy/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/webhook/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/wsstream/BUILD | 2 +- .../apiserver/plugin/pkg/audit/log/BUILD | 2 +- .../apiserver/plugin/pkg/audit/webhook/BUILD | 2 +- .../pkg/authenticator/password/allow/BUILD | 2 +- .../authenticator/password/passwordfile/BUILD | 2 +- .../pkg/authenticator/request/basicauth/BUILD | 2 +- .../plugin/pkg/authenticator/token/oidc/BUILD | 2 +- .../pkg/authenticator/token/webhook/BUILD | 2 +- .../plugin/pkg/authorizer/webhook/BUILD | 2 +- .../k8s.io/client-go/discovery/cached/BUILD | 2 +- staging/src/k8s.io/client-go/dynamic/BUILD | 2 +- .../create-update-delete-deployment/BUILD | 2 +- .../in-cluster-client-configuration/BUILD | 2 +- .../out-of-cluster-client-configuration/BUILD | 2 +- .../k8s.io/client-go/examples/workqueue/BUILD | 2 +- .../listers/extensions/v1beta1/BUILD | 2 +- .../plugin/pkg/client/auth/azure/BUILD | 2 +- .../plugin/pkg/client/auth/gcp/BUILD | 2 +- .../plugin/pkg/client/auth/oidc/BUILD | 2 +- .../plugin/pkg/client/auth/openstack/BUILD | 2 +- staging/src/k8s.io/client-go/rest/BUILD | 2 +- staging/src/k8s.io/client-go/scale/BUILD | 2 +- .../src/k8s.io/client-go/tools/cache/BUILD | 2 +- .../client-go/tools/cache/testing/BUILD | 2 +- .../k8s.io/client-go/tools/clientcmd/BUILD | 2 +- .../client-go/tools/clientcmd/api/BUILD | 2 +- .../client-go/tools/leaderelection/BUILD | 2 +- .../src/k8s.io/client-go/tools/pager/BUILD | 2 +- .../k8s.io/client-go/tools/portforward/BUILD | 2 +- .../src/k8s.io/client-go/tools/record/BUILD | 2 +- .../client-go/tools/remotecommand/BUILD | 2 +- staging/src/k8s.io/client-go/transport/BUILD | 2 +- .../src/k8s.io/client-go/util/buffer/BUILD | 2 +- staging/src/k8s.io/client-go/util/cert/BUILD | 2 +- .../k8s.io/client-go/util/certificate/BUILD | 2 +- .../client-go/util/certificate/csr/BUILD | 2 +- .../k8s.io/client-go/util/flowcontrol/BUILD | 2 +- .../src/k8s.io/client-go/util/integer/BUILD | 2 +- .../src/k8s.io/client-go/util/jsonpath/BUILD | 2 +- staging/src/k8s.io/client-go/util/retry/BUILD | 2 +- .../src/k8s.io/client-go/util/testing/BUILD | 2 +- .../src/k8s.io/client-go/util/workqueue/BUILD | 2 +- .../code-generator/cmd/client-gen/BUILD | 2 +- .../code-generator/cmd/client-gen/args/BUILD | 2 +- .../cmd/client-gen/generators/util/BUILD | 2 +- .../code-generator/cmd/client-gen/types/BUILD | 2 +- .../code-generator/cmd/conversion-gen/BUILD | 2 +- .../code-generator/cmd/deepcopy-gen/BUILD | 2 +- .../code-generator/cmd/defaulter-gen/BUILD | 2 +- .../code-generator/cmd/go-to-protobuf/BUILD | 2 +- .../cmd/go-to-protobuf/protobuf/BUILD | 2 +- .../cmd/go-to-protobuf/protoc-gen-gogo/BUILD | 2 +- .../code-generator/cmd/import-boss/BUILD | 2 +- .../code-generator/cmd/informer-gen/BUILD | 2 +- .../code-generator/cmd/lister-gen/BUILD | 2 +- .../code-generator/cmd/openapi-gen/BUILD | 2 +- .../k8s.io/code-generator/cmd/set-gen/BUILD | 2 +- staging/src/k8s.io/kube-aggregator/BUILD | 2 +- .../kube-aggregator/pkg/apiserver/BUILD | 2 +- .../pkg/controllers/autoregister/BUILD | 2 +- .../pkg/controllers/openapi/BUILD | 2 +- .../pkg/controllers/status/BUILD | 2 +- staging/src/k8s.io/sample-apiserver/BUILD | 2 +- .../pkg/apis/wardle/install/BUILD | 2 +- .../sample-apiserver/pkg/apiserver/BUILD | 2 +- staging/src/k8s.io/sample-controller/BUILD | 2 +- .../sample-controller/pkg/signals/BUILD | 33 +- test/conformance/BUILD | 2 +- test/e2e/BUILD | 2 +- test/e2e/chaosmonkey/BUILD | 2 +- test/e2e/framework/timer/BUILD | 2 +- test/e2e/scheduling/BUILD | 2 +- test/e2e_node/BUILD | 10 +- test/e2e_node/environment/BUILD | 2 +- test/e2e_node/runner/local/BUILD | 2 +- test/e2e_node/runner/remote/BUILD | 2 +- test/e2e_node/system/BUILD | 35 +- test/images/clusterapi-tester/BUILD | 2 +- test/images/entrypoint-tester/BUILD | 2 +- test/images/fakegitserver/BUILD | 2 +- test/images/goproxy/BUILD | 2 +- test/images/liveness/BUILD | 2 +- test/images/logs-generator/BUILD | 2 +- test/images/mounttest/BUILD | 2 +- test/images/n-way-http/BUILD | 2 +- test/images/net/BUILD | 2 +- test/images/netexec/BUILD | 2 +- test/images/nettest/BUILD | 2 +- test/images/no-snat-test-proxy/BUILD | 2 +- test/images/no-snat-test/BUILD | 2 +- test/images/nonewprivs/BUILD | 2 +- test/images/pets/peer-finder/BUILD | 2 +- test/images/port-forward-tester/BUILD | 2 +- test/images/porter/BUILD | 2 +- test/images/resource-consumer/BUILD | 2 +- .../resource-consumer/consume-cpu/BUILD | 2 +- .../images/resource-consumer/controller/BUILD | 2 +- test/images/serve-hostname/BUILD | 2 +- test/images/test-webserver/BUILD | 2 +- test/images/webhook/BUILD | 4 +- test/integration/deployment/BUILD | 2 +- test/integration/metrics/BUILD | 2 +- test/integration/scheduler/BUILD | 2 +- test/integration/scheduler_perf/BUILD | 2 +- test/list/BUILD | 4 +- test/soak/cauldron/BUILD | 2 +- test/soak/serve_hostnames/BUILD | 2 +- third_party/forked/etcd221/pkg/fileutil/BUILD | 50 ++- third_party/forked/etcd237/pkg/fileutil/BUILD | 61 ++- third_party/forked/golang/expansion/BUILD | 2 +- third_party/forked/golang/reflect/BUILD | 2 +- third_party/forked/gonum/graph/simple/BUILD | 2 +- .../bitbucket.org/bertimus9/systemstat/BUILD | 33 +- .../Azure/go-ansiterm/winterm/BUILD | 4 +- .../Azure/go-autorest/autorest/adal/BUILD | 33 +- vendor/github.com/JeffAshton/win_pdh/BUILD | 2 +- vendor/github.com/Microsoft/go-winio/BUILD | 4 +- .../aws/aws-sdk-go/aws/request/BUILD | 38 +- vendor/github.com/boltdb/bolt/BUILD | 71 +++- .../containerd/containerd/dialer/BUILD | 35 +- .../containernetworking/cni/pkg/invoke/BUILD | 18 +- .../github.com/coreos/etcd/mvcc/backend/BUILD | 33 +- .../github.com/coreos/etcd/pkg/fileutil/BUILD | 76 +++- .../github.com/coreos/etcd/pkg/netutil/BUILD | 46 ++- .../github.com/coreos/etcd/pkg/runtime/BUILD | 36 +- vendor/github.com/coreos/etcd/wal/BUILD | 33 +- .../github.com/coreos/go-systemd/util/BUILD | 1 + vendor/github.com/coreos/pkg/capnslog/BUILD | 89 ++++- vendor/github.com/coreos/pkg/dlopen/BUILD | 2 +- vendor/github.com/d2g/dhcp4client/BUILD | 4 +- .../github.com/daviddengcn/go-colortext/BUILD | 33 +- vendor/github.com/dchest/safefile/BUILD | 34 +- vendor/github.com/docker/docker/api/BUILD | 33 +- .../docker/docker/api/types/container/BUILD | 33 +- vendor/github.com/docker/docker/client/BUILD | 15 +- .../docker/docker/pkg/ioutils/BUILD | 35 +- .../github.com/docker/docker/pkg/mount/BUILD | 68 +++- .../docker/docker/pkg/symlink/BUILD | 35 +- .../github.com/docker/docker/pkg/system/BUILD | 195 ++++++++- .../github.com/docker/docker/pkg/term/BUILD | 94 ++++- .../docker/docker/pkg/term/windows/BUILD | 4 +- .../docker/go-connections/sockets/BUILD | 46 ++- .../github.com/docker/libnetwork/ipvs/BUILD | 4 +- vendor/github.com/fsnotify/fsnotify/BUILD | 59 ++- vendor/github.com/godbus/dbus/BUILD | 45 ++- vendor/github.com/google/cadvisor/fs/BUILD | 4 +- .../certificate-transparency/go/x509/BUILD | 26 +- vendor/github.com/howeyc/gopass/BUILD | 75 +++- .../inconshreveable/mousetrap/BUILD | 36 +- .../jteeuwen/go-bindata/go-bindata/BUILD | 2 +- vendor/github.com/kardianos/osext/BUILD | 24 +- vendor/github.com/kr/pty/BUILD | 71 +++- vendor/github.com/miekg/dns/BUILD | 43 +- vendor/github.com/onsi/ginkgo/ginkgo/BUILD | 2 +- .../onsi/ginkgo/ginkgo/interrupthandler/BUILD | 21 +- .../onsi/ginkgo/internal/remote/BUILD | 139 ++++++- .../stenographer/support/go-colorable/BUILD | 35 +- .../stenographer/support/go-isatty/BUILD | 24 +- .../opencontainers/runc/libcontainer/BUILD | 15 +- .../runc/libcontainer/apparmor/BUILD | 37 +- .../runc/libcontainer/cgroups/BUILD | 38 +- .../runc/libcontainer/cgroups/fs/BUILD | 38 +- .../runc/libcontainer/cgroups/rootless/BUILD | 4 +- .../runc/libcontainer/cgroups/systemd/BUILD | 85 +++- .../runc/libcontainer/configs/BUILD | 55 ++- .../runc/libcontainer/keys/BUILD | 4 +- .../runc/libcontainer/seccomp/BUILD | 42 +- .../runc/libcontainer/system/BUILD | 72 +++- .../runc/libcontainer/user/BUILD | 51 ++- .../runc/libcontainer/utils/BUILD | 35 +- .../opencontainers/selinux/go-selinux/BUILD | 2 +- .../selinux/go-selinux/label/BUILD | 37 +- vendor/github.com/pkg/sftp/BUILD | 60 ++- .../seccomp/libseccomp-golang/BUILD | 4 +- vendor/github.com/sirupsen/logrus/BUILD | 32 +- vendor/github.com/spf13/afero/BUILD | 33 +- vendor/github.com/spf13/cobra/BUILD | 35 +- vendor/github.com/storageos/go-api/BUILD | 35 +- .../syndtr/gocapability/capability/BUILD | 33 +- vendor/github.com/tools/godep/BUILD | 2 +- vendor/github.com/vishvananda/netlink/BUILD | 126 +++++- .../github.com/vishvananda/netlink/nl/BUILD | 35 +- vendor/github.com/vishvananda/netns/BUILD | 33 +- .../photon-controller-go-sdk/SSPI/BUILD | 36 +- .../photon/lightwave/BUILD | 35 +- vendor/golang.org/x/crypto/curve25519/BUILD | 52 ++- vendor/golang.org/x/crypto/poly1305/BUILD | 100 ++++- .../golang.org/x/crypto/salsa20/salsa/BUILD | 40 +- vendor/golang.org/x/crypto/ssh/terminal/BUILD | 49 ++- vendor/golang.org/x/exp/inotify/BUILD | 2 +- vendor/golang.org/x/sys/unix/BUILD | 371 +++++++++++++++++- vendor/golang.org/x/sys/windows/BUILD | 14 +- .../x/tools/container/intsets/BUILD | 40 +- vendor/gopkg.in/natefinch/lumberjack.v2/BUILD | 33 +- 825 files changed, 6006 insertions(+), 1311 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/es-image/BUILD b/cluster/addons/fluentd-elasticsearch/es-image/BUILD index be785094150..6ac051f91cf 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/BUILD +++ b/cluster/addons/fluentd-elasticsearch/es-image/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "es-image", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cluster/addons/fluentd-elasticsearch/es-image", - library = ":go_default_library", ) go_library( diff --git a/cluster/gce/gci/mounter/BUILD b/cluster/gce/gci/mounter/BUILD index 94653e86b05..9600d6e441d 100644 --- a/cluster/gce/gci/mounter/BUILD +++ b/cluster/gce/gci/mounter/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "mounter", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cluster/gce/gci/mounter", - library = ":go_default_library", ) go_library( diff --git a/cluster/images/etcd-version-monitor/BUILD b/cluster/images/etcd-version-monitor/BUILD index a97642e25dc..c946b1194d9 100644 --- a/cluster/images/etcd-version-monitor/BUILD +++ b/cluster/images/etcd-version-monitor/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "etcd-version-monitor", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cluster/images/etcd-version-monitor", - library = ":go_default_library", ) go_library( diff --git a/cluster/images/etcd/attachlease/BUILD b/cluster/images/etcd/attachlease/BUILD index 0e2cb9efbaf..abb4a3c831e 100644 --- a/cluster/images/etcd/attachlease/BUILD +++ b/cluster/images/etcd/attachlease/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "attachlease", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cluster/images/etcd/attachlease", - library = ":go_default_library", ) go_library( diff --git a/cluster/images/etcd/rollback/BUILD b/cluster/images/etcd/rollback/BUILD index bdf4514b563..252e974829e 100644 --- a/cluster/images/etcd/rollback/BUILD +++ b/cluster/images/etcd/rollback/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "rollback", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cluster/images/etcd/rollback", - library = ":go_default_library", ) go_library( diff --git a/cmd/clicheck/BUILD b/cmd/clicheck/BUILD index 7c0565e03bc..2d1df07eab8 100644 --- a/cmd/clicheck/BUILD +++ b/cmd/clicheck/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "clicheck", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/clicheck", - library = ":go_default_library", ) go_library( diff --git a/cmd/cloud-controller-manager/BUILD b/cmd/cloud-controller-manager/BUILD index 0a30843ab69..dbbae5c6f15 100644 --- a/cmd/cloud-controller-manager/BUILD +++ b/cmd/cloud-controller-manager/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "cloud-controller-manager", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index b02da213f59..9d9c0bb19cd 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -37,8 +37,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["options_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options", - library = ":go_default_library", deps = [ "//cmd/controller-manager/app/options:go_default_library", "//pkg/apis/componentconfig:go_default_library", diff --git a/cmd/gendocs/BUILD b/cmd/gendocs/BUILD index 372300d965f..aa36f4f8cb4 100644 --- a/cmd/gendocs/BUILD +++ b/cmd/gendocs/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "gendocs", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/gendocs", - library = ":go_default_library", ) go_library( diff --git a/cmd/genkubedocs/BUILD b/cmd/genkubedocs/BUILD index 4f70ec76780..ef6c78a34e9 100644 --- a/cmd/genkubedocs/BUILD +++ b/cmd/genkubedocs/BUILD @@ -9,8 +9,8 @@ load( go_binary( name = "genkubedocs", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genkubedocs", - library = ":go_default_library", ) go_library( @@ -51,6 +51,6 @@ filegroup( go_test( name = "go_default_test", srcs = ["postprocessing_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genkubedocs", - library = ":go_default_library", ) diff --git a/cmd/genman/BUILD b/cmd/genman/BUILD index 71dcfe750e3..e2e4f9df623 100644 --- a/cmd/genman/BUILD +++ b/cmd/genman/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "genman", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genman", - library = ":go_default_library", ) go_library( diff --git a/cmd/genswaggertypedocs/BUILD b/cmd/genswaggertypedocs/BUILD index 135dd03e678..8f493a171d6 100644 --- a/cmd/genswaggertypedocs/BUILD +++ b/cmd/genswaggertypedocs/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "genswaggertypedocs", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genswaggertypedocs", - library = ":go_default_library", ) go_library( diff --git a/cmd/genutils/BUILD b/cmd/genutils/BUILD index 343b221f72c..47c67d9cb16 100644 --- a/cmd/genutils/BUILD +++ b/cmd/genutils/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["genutils_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genutils", - library = ":go_default_library", ) filegroup( diff --git a/cmd/genyaml/BUILD b/cmd/genyaml/BUILD index 46855dfe317..527d060e3a2 100644 --- a/cmd/genyaml/BUILD +++ b/cmd/genyaml/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "genyaml", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/genyaml", - library = ":go_default_library", ) go_library( diff --git a/cmd/gke-certificates-controller/BUILD b/cmd/gke-certificates-controller/BUILD index e77d8b0cb2a..2799b64369e 100644 --- a/cmd/gke-certificates-controller/BUILD +++ b/cmd/gke-certificates-controller/BUILD @@ -37,6 +37,6 @@ filegroup( go_binary( name = "gke-certificates-controller", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/gke-certificates-controller", - library = ":go_default_library", ) diff --git a/cmd/gke-certificates-controller/app/BUILD b/cmd/gke-certificates-controller/app/BUILD index a6017f57810..f7f1d24af68 100644 --- a/cmd/gke-certificates-controller/app/BUILD +++ b/cmd/gke-certificates-controller/app/BUILD @@ -53,8 +53,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["gke_signer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/gke-certificates-controller/app", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", diff --git a/cmd/hyperkube/BUILD b/cmd/hyperkube/BUILD index af3f17aae7f..e806fae7438 100644 --- a/cmd/hyperkube/BUILD +++ b/cmd/hyperkube/BUILD @@ -10,16 +10,16 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "hyperkube", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/hyperkube", - library = ":go_default_library", x_defs = version_x_defs(), ) go_test( name = "go_default_test", srcs = ["hyperkube_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/hyperkube", - library = ":go_default_library", deps = [ "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cmd/importverifier/BUILD b/cmd/importverifier/BUILD index b3c71160e62..e5e1fe26d09 100644 --- a/cmd/importverifier/BUILD +++ b/cmd/importverifier/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "importverifier", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/importverifier", - library = ":go_default_library", ) go_library( diff --git a/cmd/kube-apiserver/BUILD b/cmd/kube-apiserver/BUILD index 81e46d79d24..55c7d08f305 100644 --- a/cmd/kube-apiserver/BUILD +++ b/cmd/kube-apiserver/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-apiserver", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/cmd/kube-apiserver", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/kube-apiserver/app/options/BUILD b/cmd/kube-apiserver/app/options/BUILD index 96087a582eb..ef12f9230ab 100644 --- a/cmd/kube-apiserver/app/options/BUILD +++ b/cmd/kube-apiserver/app/options/BUILD @@ -61,8 +61,8 @@ go_library( go_test( name = "go_default_test", srcs = ["options_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app/options", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/cmd/kube-controller-manager/BUILD b/cmd/kube-controller-manager/BUILD index 64d5a1ca3d8..94fa7797003 100644 --- a/cmd/kube-controller-manager/BUILD +++ b/cmd/kube-controller-manager/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-controller-manager", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 2d4ede18b41..7bee9288b4a 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -154,8 +154,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["controller_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index 41eeba489fa..cd814a31116 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -40,8 +40,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["options_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", - library = ":go_default_library", tags = ["automanaged"], deps = [ "//cmd/controller-manager/app/options:go_default_library", diff --git a/cmd/kube-proxy/BUILD b/cmd/kube-proxy/BUILD index fbc765cd357..ac19c0bd512 100644 --- a/cmd/kube-proxy/BUILD +++ b/cmd/kube-proxy/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-proxy", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/cmd/kube-proxy", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 02bbd56e48a..6a785fa8114 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -11,9 +11,38 @@ go_library( srcs = [ "conntrack.go", "server.go", - "server_others.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "server_others.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "server_windows.go", ], "//conditions:default": [], @@ -24,7 +53,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/features:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", @@ -37,10 +65,8 @@ go_library( "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/iptables:go_default_library", "//pkg/proxy/ipvs:go_default_library", - "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/userspace:go_default_library", "//pkg/util/configz:go_default_library", - "//pkg/util/dbus:go_default_library", "//pkg/util/ipset:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/ipvs:go_default_library", @@ -61,8 +87,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", @@ -75,10 +99,82 @@ go_library( "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/features:go_default_library", + "//pkg/proxy/metrics:go_default_library", + "//pkg/util/dbus:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ "//pkg/proxy/winkernel:go_default_library", "//pkg/proxy/winuserspace:go_default_library", "//pkg/util/netsh:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "//conditions:default": [], }), @@ -87,8 +183,8 @@ go_library( go_test( name = "go_default_test", srcs = ["server_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kube-proxy/app", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy/apis/kubeproxyconfig:go_default_library", diff --git a/cmd/kubeadm/BUILD b/cmd/kubeadm/BUILD index c04f62145f5..7c2dd89f035 100644 --- a/cmd/kubeadm/BUILD +++ b/cmd/kubeadm/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kubeadm", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/cmd/kubeadm", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/kubeadm/app/apis/kubeadm/install/BUILD b/cmd/kubeadm/app/apis/kubeadm/install/BUILD index 529b36d749e..89505db81c8 100644 --- a/cmd/kubeadm/app/apis/kubeadm/install/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/install/BUILD @@ -39,8 +39,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["install_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm/fuzzer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD index 70c1dee7788..edeb99e0b06 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "defaults.go", - "defaults_unix.go", "doc.go", "register.go", "types.go", @@ -12,7 +11,37 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "defaults_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "defaults_windows.go", ], "//conditions:default": [], diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD index 75cbf735129..01f8f8a4fc4 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD @@ -31,8 +31,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index 8c8e3a8e2d4..83eb272bf78 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -85,8 +85,8 @@ go_test( "reset_test.go", "token_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index 74709de119c..ec3ae74f96d 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -69,8 +69,8 @@ go_test( "etcd_test.go", "kubeconfig_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/install:go_default_library", diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index 0787faf9465..709a1e7d5fa 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -42,8 +42,8 @@ go_test( "common_test.go", "plan_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/upgrade", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/phases/upgrade:go_default_library", diff --git a/cmd/kubeadm/app/cmd/util/BUILD b/cmd/kubeadm/app/cmd/util/BUILD index f7a9b69cf5c..951960a273a 100644 --- a/cmd/kubeadm/app/cmd/util/BUILD +++ b/cmd/kubeadm/app/cmd/util/BUILD @@ -17,8 +17,8 @@ go_library( go_test( name = "go_default_test", srcs = ["cmdutil_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util", - library = ":go_default_library", ) filegroup( diff --git a/cmd/kubeadm/app/constants/BUILD b/cmd/kubeadm/app/constants/BUILD index c1a584258ea..d0f284c5ebb 100644 --- a/cmd/kubeadm/app/constants/BUILD +++ b/cmd/kubeadm/app/constants/BUILD @@ -33,7 +33,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["constants_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", - library = ":go_default_library", deps = ["//pkg/util/version:go_default_library"], ) diff --git a/cmd/kubeadm/app/discovery/BUILD b/cmd/kubeadm/app/discovery/BUILD index 3c3f591ba14..14fd0f3f4f6 100644 --- a/cmd/kubeadm/app/discovery/BUILD +++ b/cmd/kubeadm/app/discovery/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["discovery_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/discovery", - library = ":go_default_library", deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"], ) diff --git a/cmd/kubeadm/app/discovery/token/BUILD b/cmd/kubeadm/app/discovery/token/BUILD index fe59263de60..cb4de3f600f 100644 --- a/cmd/kubeadm/app/discovery/token/BUILD +++ b/cmd/kubeadm/app/discovery/token/BUILD @@ -41,8 +41,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["token_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/discovery/token", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", diff --git a/cmd/kubeadm/app/features/BUILD b/cmd/kubeadm/app/features/BUILD index 3bf69a4b2b9..8808be9d42b 100644 --- a/cmd/kubeadm/app/features/BUILD +++ b/cmd/kubeadm/app/features/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["features_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/features", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library"], ) diff --git a/cmd/kubeadm/app/images/BUILD b/cmd/kubeadm/app/images/BUILD index 05c83ab87fa..51d7334530d 100644 --- a/cmd/kubeadm/app/images/BUILD +++ b/cmd/kubeadm/app/images/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["images_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/images", - library = ":go_default_library", deps = ["//cmd/kubeadm/app/constants:go_default_library"], ) diff --git a/cmd/kubeadm/app/phases/addons/dns/BUILD b/cmd/kubeadm/app/phases/addons/dns/BUILD index f1279872ea1..ef8c19b2b8d 100644 --- a/cmd/kubeadm/app/phases/addons/dns/BUILD +++ b/cmd/kubeadm/app/phases/addons/dns/BUILD @@ -12,8 +12,8 @@ go_test( "dns_test.go", "versions_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", diff --git a/cmd/kubeadm/app/phases/addons/proxy/BUILD b/cmd/kubeadm/app/phases/addons/proxy/BUILD index 457cb360bff..170b09364fd 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/BUILD +++ b/cmd/kubeadm/app/phases/addons/proxy/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["proxy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/util:go_default_library", diff --git a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD index 9cc4d702e7c..df3f70f55f9 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD +++ b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["clusterinfo_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD b/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD index 175ea3575af..e20d68b5d57 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["token_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node", - library = ":go_default_library", deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"], ) diff --git a/cmd/kubeadm/app/phases/certs/BUILD b/cmd/kubeadm/app/phases/certs/BUILD index a1009be44dd..9e50fc2c73d 100644 --- a/cmd/kubeadm/app/phases/certs/BUILD +++ b/cmd/kubeadm/app/phases/certs/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["certs_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/BUILD b/cmd/kubeadm/app/phases/certs/pkiutil/BUILD index 6f0798ab08d..56f7a9383a5 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/BUILD +++ b/cmd/kubeadm/app/phases/certs/pkiutil/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["pki_helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil", - library = ":go_default_library", deps = ["//vendor/k8s.io/client-go/util/cert:go_default_library"], ) diff --git a/cmd/kubeadm/app/phases/controlplane/BUILD b/cmd/kubeadm/app/phases/controlplane/BUILD index 958b5cd9e09..830d6ae90bd 100644 --- a/cmd/kubeadm/app/phases/controlplane/BUILD +++ b/cmd/kubeadm/app/phases/controlplane/BUILD @@ -12,8 +12,8 @@ go_test( "manifests_test.go", "volumes_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/phases/etcd/BUILD b/cmd/kubeadm/app/phases/etcd/BUILD index 45f491b2cef..918598a6d0c 100644 --- a/cmd/kubeadm/app/phases/etcd/BUILD +++ b/cmd/kubeadm/app/phases/etcd/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["local_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubeconfig/BUILD b/cmd/kubeadm/app/phases/kubeconfig/BUILD index b044091a5bc..a402634a580 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/BUILD +++ b/cmd/kubeadm/app/phases/kubeconfig/BUILD @@ -41,8 +41,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["kubeconfig_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index 9c462bd2fd8..8da2b8243e1 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["kubelet_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/phases/markmaster/BUILD b/cmd/kubeadm/app/phases/markmaster/BUILD index 6a1d55d9474..8dbc4b44457 100644 --- a/cmd/kubeadm/app/phases/markmaster/BUILD +++ b/cmd/kubeadm/app/phases/markmaster/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["markmaster_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/cmd/kubeadm/app/phases/selfhosting/BUILD b/cmd/kubeadm/app/phases/selfhosting/BUILD index 3374322b9a1..ef1ad56d0f2 100644 --- a/cmd/kubeadm/app/phases/selfhosting/BUILD +++ b/cmd/kubeadm/app/phases/selfhosting/BUILD @@ -13,8 +13,8 @@ go_test( "selfhosting_test.go", "selfhosting_volumes_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index e8bd736c103..d1c6135b93e 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -75,8 +75,8 @@ go_test( "prepull_test.go", "staticpods_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", diff --git a/cmd/kubeadm/app/phases/uploadconfig/BUILD b/cmd/kubeadm/app/phases/uploadconfig/BUILD index 332592656ed..880fd4b4c37 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/BUILD +++ b/cmd/kubeadm/app/phases/uploadconfig/BUILD @@ -39,8 +39,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["uploadconfig_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index baaea43aa3a..ec8e552e1e9 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -10,10 +10,39 @@ go_library( name = "go_default_library", srcs = [ "checks.go", - "checks_unix.go", "utils.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "checks_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "checks_windows.go", ], "//conditions:default": [], @@ -47,8 +76,8 @@ go_test( "checks_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/preflight", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index b97951e73bc..5c3a8173b60 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -40,8 +40,8 @@ go_test( "template_test.go", "version_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", diff --git a/cmd/kubeadm/app/util/apiclient/BUILD b/cmd/kubeadm/app/util/apiclient/BUILD index 2ad41a61e82..598c1eec29f 100644 --- a/cmd/kubeadm/app/util/apiclient/BUILD +++ b/cmd/kubeadm/app/util/apiclient/BUILD @@ -58,8 +58,8 @@ go_test( "dryrunclient_test.go", "init_dryrun_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index 10c2574dd0c..90805eba5a0 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["masterconfig_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/config", - library = ":go_default_library", ) filegroup( diff --git a/cmd/kubeadm/app/util/kubeconfig/BUILD b/cmd/kubeadm/app/util/kubeconfig/BUILD index 23e626ea97d..a6a93b95b14 100644 --- a/cmd/kubeadm/app/util/kubeconfig/BUILD +++ b/cmd/kubeadm/app/util/kubeconfig/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["kubeconfig_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig", - library = ":go_default_library", ) go_library( diff --git a/cmd/kubeadm/app/util/pubkeypin/BUILD b/cmd/kubeadm/app/util/pubkeypin/BUILD index 017088f47a1..046cd8acd79 100644 --- a/cmd/kubeadm/app/util/pubkeypin/BUILD +++ b/cmd/kubeadm/app/util/pubkeypin/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["pubkeypin_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin", - library = ":go_default_library", ) go_library( diff --git a/cmd/kubeadm/app/util/staticpod/BUILD b/cmd/kubeadm/app/util/staticpod/BUILD index 343f457a60c..afa35d2712e 100644 --- a/cmd/kubeadm/app/util/staticpod/BUILD +++ b/cmd/kubeadm/app/util/staticpod/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["utils_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod", - library = ":go_default_library", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", diff --git a/cmd/kubeadm/app/util/token/BUILD b/cmd/kubeadm/app/util/token/BUILD index 6ec7aad0c6e..e5268563799 100644 --- a/cmd/kubeadm/app/util/token/BUILD +++ b/cmd/kubeadm/app/util/token/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["tokens_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/token", - library = ":go_default_library", deps = ["//cmd/kubeadm/app/apis/kubeadm:go_default_library"], ) diff --git a/cmd/kubeadm/test/cmd/BUILD b/cmd/kubeadm/test/cmd/BUILD index ce5920d53e3..e9334489056 100644 --- a/cmd/kubeadm/test/cmd/BUILD +++ b/cmd/kubeadm/test/cmd/BUILD @@ -24,8 +24,8 @@ go_test( ], args = ["--kubeadm-path=../../kubeadm"], data = ["//cmd/kubeadm"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubeadm/test/cmd", - library = ":go_default_library", tags = [ "integration", "skip", diff --git a/cmd/kubectl/BUILD b/cmd/kubectl/BUILD index 1e26c979678..f51b7909434 100644 --- a/cmd/kubectl/BUILD +++ b/cmd/kubectl/BUILD @@ -7,6 +7,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kubectl", + embed = [":go_default_library"], gc_linkopts = select({ # Mac OS X doesn't support static binaries: # https://developer.apple.com/library/content/qa/qa1118/_index.html @@ -19,7 +20,6 @@ go_binary( ], }), importpath = "k8s.io/kubernetes/cmd/kubectl", - library = ":go_default_library", visibility = ["//visibility:public"], x_defs = version_x_defs(), ) diff --git a/cmd/kubelet/BUILD b/cmd/kubelet/BUILD index fbcb8a7f6d3..a5b72a1e9ec 100644 --- a/cmd/kubelet/BUILD +++ b/cmd/kubelet/BUILD @@ -9,8 +9,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kubelet", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubelet", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index c7e482f49ea..8bb2d3f58bc 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["server_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubelet/app", - library = ":go_default_library", ) go_library( @@ -19,11 +19,40 @@ go_library( "auth.go", "plugins.go", "server.go", - "server_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "server_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "server_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "server_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/cmd/kubelet/app", @@ -126,7 +155,7 @@ go_library( "//vendor/k8s.io/client-go/util/cert:go_default_library", "//vendor/k8s.io/client-go/util/certificate:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/exp/inotify:go_default_library", ], "//conditions:default": [], diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index 846d42e9c45..aca4a34882f 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -47,8 +47,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["options_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", - library = ":go_default_library", deps = [ "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", diff --git a/cmd/kubemark/BUILD b/cmd/kubemark/BUILD index 8a60c3607fb..1948949aa07 100644 --- a/cmd/kubemark/BUILD +++ b/cmd/kubemark/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "kubemark", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/kubemark", - library = ":go_default_library", ) go_library( diff --git a/cmd/linkcheck/BUILD b/cmd/linkcheck/BUILD index 14e4b3fd6d1..16f3d84b518 100644 --- a/cmd/linkcheck/BUILD +++ b/cmd/linkcheck/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "linkcheck", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/cmd/linkcheck", - library = ":go_default_library", ) go_library( diff --git a/examples/explorer/BUILD b/examples/explorer/BUILD index c4c7bc13024..9485a3b6aaa 100644 --- a/examples/explorer/BUILD +++ b/examples/explorer/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "explorer", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/examples/explorer", - library = ":go_default_library", ) go_library( diff --git a/examples/guestbook-go/BUILD b/examples/guestbook-go/BUILD index a2a5b55ab1e..d18db9f7fc2 100644 --- a/examples/guestbook-go/BUILD +++ b/examples/guestbook-go/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "guestbook-go", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/examples/guestbook-go", - library = ":go_default_library", ) go_library( diff --git a/examples/https-nginx/BUILD b/examples/https-nginx/BUILD index d3c41df7bd3..d380f2eeb17 100644 --- a/examples/https-nginx/BUILD +++ b/examples/https-nginx/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "https-nginx", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/examples/https-nginx", - library = ":go_default_library", ) go_library( diff --git a/examples/sharing-clusters/BUILD b/examples/sharing-clusters/BUILD index 22171505818..e2ff3a26a88 100644 --- a/examples/sharing-clusters/BUILD +++ b/examples/sharing-clusters/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "sharing-clusters", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/examples/sharing-clusters", - library = ":go_default_library", ) go_library( diff --git a/hack/BUILD b/hack/BUILD index cec56b8df59..f8800daed27 100644 --- a/hack/BUILD +++ b/hack/BUILD @@ -57,16 +57,16 @@ test_suite( go_binary( name = "hack", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/hack", - library = ":go_default_library", ) go_test( name = "go_default_test", srcs = ["e2e_test.go"], data = glob(["testdata/**"]), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/hack", - library = ":go_default_library", ) go_library( diff --git a/hack/cmd/teststale/BUILD b/hack/cmd/teststale/BUILD index 7bf76a42cc7..c1976a57267 100644 --- a/hack/cmd/teststale/BUILD +++ b/hack/cmd/teststale/BUILD @@ -9,15 +9,15 @@ load( go_binary( name = "teststale", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/hack/cmd/teststale", - library = ":go_default_library", ) go_test( name = "go_default_test", srcs = ["teststale_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/hack/cmd/teststale", - library = ":go_default_library", ) go_library( diff --git a/pkg/api/endpoints/BUILD b/pkg/api/endpoints/BUILD index 07e3aeef187..e88d60d2880 100644 --- a/pkg/api/endpoints/BUILD +++ b/pkg/api/endpoints/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/endpoints", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", diff --git a/pkg/api/events/BUILD b/pkg/api/events/BUILD index 96becd0b701..64ab6d316f4 100644 --- a/pkg/api/events/BUILD +++ b/pkg/api/events/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["sorted_event_list_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/events", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/api/persistentvolume/BUILD b/pkg/api/persistentvolume/BUILD index f65e81e9f27..87cfdf076b8 100644 --- a/pkg/api/persistentvolume/BUILD +++ b/pkg/api/persistentvolume/BUILD @@ -33,8 +33,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/persistentvolume", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", diff --git a/pkg/api/persistentvolumeclaim/BUILD b/pkg/api/persistentvolumeclaim/BUILD index 6c1cea847cd..064f9c82ace 100644 --- a/pkg/api/persistentvolumeclaim/BUILD +++ b/pkg/api/persistentvolumeclaim/BUILD @@ -33,8 +33,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/persistentvolumeclaim", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", diff --git a/pkg/api/pod/BUILD b/pkg/api/pod/BUILD index 7b189d02eff..d29ddd25d64 100644 --- a/pkg/api/pod/BUILD +++ b/pkg/api/pod/BUILD @@ -34,8 +34,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/pod", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", diff --git a/pkg/api/ref/BUILD b/pkg/api/ref/BUILD index f4d23d86f80..84bbbf74261 100644 --- a/pkg/api/ref/BUILD +++ b/pkg/api/ref/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["ref_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/ref", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/api/resource/BUILD b/pkg/api/resource/BUILD index 47850ca889a..c27900c7a1e 100644 --- a/pkg/api/resource/BUILD +++ b/pkg/api/resource/BUILD @@ -32,8 +32,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/resource", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/api/service/BUILD b/pkg/api/service/BUILD index 735bf8d5a47..b57c5872f04 100644 --- a/pkg/api/service/BUILD +++ b/pkg/api/service/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/service", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/util/net/sets:go_default_library", diff --git a/pkg/api/testapi/BUILD b/pkg/api/testapi/BUILD index 8424f53a4fa..c1747a9236a 100644 --- a/pkg/api/testapi/BUILD +++ b/pkg/api/testapi/BUILD @@ -58,8 +58,8 @@ go_library( go_test( name = "go_default_test", srcs = ["testapi_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/testapi", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/api/testing/BUILD b/pkg/api/testing/BUILD index 8d52ce4c2b0..589cfa4ff9b 100644 --- a/pkg/api/testing/BUILD +++ b/pkg/api/testing/BUILD @@ -72,8 +72,8 @@ go_test( "serialization_test.go", "unstructured_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/testing", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/api/v1/endpoints/BUILD b/pkg/api/v1/endpoints/BUILD index f9597b402ea..d23918e050b 100644 --- a/pkg/api/v1/endpoints/BUILD +++ b/pkg/api/v1/endpoints/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/v1/endpoints", - library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/api/v1/pod/BUILD b/pkg/api/v1/pod/BUILD index 86a70f7b2c7..d9a5b0ef708 100644 --- a/pkg/api/v1/pod/BUILD +++ b/pkg/api/v1/pod/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/v1/pod", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/api/v1/resource/BUILD b/pkg/api/v1/resource/BUILD index 21979e2b6cb..93380294b42 100644 --- a/pkg/api/v1/resource/BUILD +++ b/pkg/api/v1/resource/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/v1/resource", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/api/v1/service/BUILD b/pkg/api/v1/service/BUILD index 6c56835787b..eff8992766f 100644 --- a/pkg/api/v1/service/BUILD +++ b/pkg/api/v1/service/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/api/v1/service", - library = ":go_default_library", deps = [ "//pkg/util/net/sets:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/apis/admissionregistration/validation/BUILD b/pkg/apis/admissionregistration/validation/BUILD index fa9e0a0fe95..d43f5b59054 100644 --- a/pkg/apis/admissionregistration/validation/BUILD +++ b/pkg/apis/admissionregistration/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", - library = ":go_default_library", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/apis/apps/validation/BUILD b/pkg/apis/apps/validation/BUILD index 3d80e8e2bb4..2a81a0eb6a4 100644 --- a/pkg/apis/apps/validation/BUILD +++ b/pkg/apis/apps/validation/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/apps/validation", - library = ":go_default_library", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/authorization/validation/BUILD b/pkg/apis/authorization/validation/BUILD index 58559f83da9..c5f76a8be4d 100644 --- a/pkg/apis/authorization/validation/BUILD +++ b/pkg/apis/authorization/validation/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/authorization/validation", - library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/apis/autoscaling/validation/BUILD b/pkg/apis/autoscaling/validation/BUILD index c6be1eff025..03c70d4c17b 100644 --- a/pkg/apis/autoscaling/validation/BUILD +++ b/pkg/apis/autoscaling/validation/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/validation", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/batch/validation/BUILD b/pkg/apis/batch/validation/BUILD index 318426b359b..1a6268f9f22 100644 --- a/pkg/apis/batch/validation/BUILD +++ b/pkg/apis/batch/validation/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/batch/validation", - library = ":go_default_library", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/componentconfig/BUILD b/pkg/apis/componentconfig/BUILD index 19d17029cb8..143d8b0223a 100644 --- a/pkg/apis/componentconfig/BUILD +++ b/pkg/apis/componentconfig/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/componentconfig", - library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/pkg/apis/componentconfig/v1alpha1/BUILD b/pkg/apis/componentconfig/v1alpha1/BUILD index 9726d08a84a..74755fbc998 100644 --- a/pkg/apis/componentconfig/v1alpha1/BUILD +++ b/pkg/apis/componentconfig/v1alpha1/BUILD @@ -46,7 +46,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["defaults_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - library = ":go_default_library", deps = ["//pkg/apis/componentconfig:go_default_library"], ) diff --git a/pkg/apis/core/BUILD b/pkg/apis/core/BUILD index bd2944f31b1..874383ef18d 100644 --- a/pkg/apis/core/BUILD +++ b/pkg/apis/core/BUILD @@ -36,8 +36,8 @@ go_test( "taint_test.go", "toleration_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core", - library = ":go_default_library", ) filegroup( diff --git a/pkg/apis/core/helper/BUILD b/pkg/apis/core/helper/BUILD index 81b0a2fbffa..7f4ec626543 100644 --- a/pkg/apis/core/helper/BUILD +++ b/pkg/apis/core/helper/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/helper", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/apis/core/install/BUILD b/pkg/apis/core/install/BUILD index 9ed882df994..1e760dbe8ea 100644 --- a/pkg/apis/core/install/BUILD +++ b/pkg/apis/core/install/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["install_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/install", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/apis/core/pods/BUILD b/pkg/apis/core/pods/BUILD index b78e256e7c1..34e8b8f3644 100644 --- a/pkg/apis/core/pods/BUILD +++ b/pkg/apis/core/pods/BUILD @@ -11,8 +11,8 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/pods", - library = ":go_default_library", ) filegroup( diff --git a/pkg/apis/core/v1/helper/BUILD b/pkg/apis/core/v1/helper/BUILD index ea7e2ae974a..fec9c1baaf0 100644 --- a/pkg/apis/core/v1/helper/BUILD +++ b/pkg/apis/core/v1/helper/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/pkg/apis/core/v1/helper/qos/BUILD b/pkg/apis/core/v1/helper/qos/BUILD index 7dd60de18c5..57ab07f7252 100644 --- a/pkg/apis/core/v1/helper/qos/BUILD +++ b/pkg/apis/core/v1/helper/qos/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["qos_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper/qos:go_default_library", diff --git a/pkg/apis/core/v1/validation/BUILD b/pkg/apis/core/v1/validation/BUILD index c5b694af7bd..21cf28f303f 100644 --- a/pkg/apis/core/v1/validation/BUILD +++ b/pkg/apis/core/v1/validation/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/apis/core/validation/BUILD b/pkg/apis/core/validation/BUILD index 19169985a54..69e50541a98 100644 --- a/pkg/apis/core/validation/BUILD +++ b/pkg/apis/core/validation/BUILD @@ -51,8 +51,8 @@ go_test( "events_test.go", "validation_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/core/validation", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/apis/extensions/BUILD b/pkg/apis/extensions/BUILD index c52d2a9ac9e..17076d4720e 100644 --- a/pkg/apis/extensions/BUILD +++ b/pkg/apis/extensions/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/extensions", - library = ":go_default_library", ) go_library( diff --git a/pkg/apis/extensions/validation/BUILD b/pkg/apis/extensions/validation/BUILD index d7ea42609a0..b9439bfac6d 100644 --- a/pkg/apis/extensions/validation/BUILD +++ b/pkg/apis/extensions/validation/BUILD @@ -31,8 +31,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/extensions/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/apis/networking/validation/BUILD b/pkg/apis/networking/validation/BUILD index 5f1ccf04174..44a0d65766b 100644 --- a/pkg/apis/networking/validation/BUILD +++ b/pkg/apis/networking/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/networking/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", diff --git a/pkg/apis/policy/validation/BUILD b/pkg/apis/policy/validation/BUILD index 1f372c738c1..692d0ca0c15 100644 --- a/pkg/apis/policy/validation/BUILD +++ b/pkg/apis/policy/validation/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/policy/validation", - library = ":go_default_library", deps = [ "//pkg/apis/policy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/pkg/apis/rbac/validation/BUILD b/pkg/apis/rbac/validation/BUILD index 3c34083071e..8b7d835d3a4 100644 --- a/pkg/apis/rbac/validation/BUILD +++ b/pkg/apis/rbac/validation/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/rbac/validation", - library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/apis/scheduling/validation/BUILD b/pkg/apis/scheduling/validation/BUILD index fc4680377ee..875e4f75cfd 100644 --- a/pkg/apis/scheduling/validation/BUILD +++ b/pkg/apis/scheduling/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/scheduling/validation", - library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/apis/settings/validation/BUILD b/pkg/apis/settings/validation/BUILD index f0e6d67f00c..74813488201 100644 --- a/pkg/apis/settings/validation/BUILD +++ b/pkg/apis/settings/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/settings/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", diff --git a/pkg/apis/storage/util/BUILD b/pkg/apis/storage/util/BUILD index 490e8372149..3e8c883b105 100644 --- a/pkg/apis/storage/util/BUILD +++ b/pkg/apis/storage/util/BUILD @@ -37,8 +37,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/storage/util", - library = ":go_default_library", deps = [ "//pkg/apis/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/pkg/apis/storage/validation/BUILD b/pkg/apis/storage/validation/BUILD index e80f7838d6b..b345c86a906 100644 --- a/pkg/apis/storage/validation/BUILD +++ b/pkg/apis/storage/validation/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/apis/storage/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/pkg/auth/authorizer/abac/BUILD b/pkg/auth/authorizer/abac/BUILD index efbea8e2568..f61659c62ac 100644 --- a/pkg/auth/authorizer/abac/BUILD +++ b/pkg/auth/authorizer/abac/BUILD @@ -35,8 +35,8 @@ go_test( data = [ ":example_policy", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/auth/authorizer/abac", - library = ":go_default_library", deps = [ "//pkg/apis/abac:go_default_library", "//pkg/apis/abac/v0:go_default_library", diff --git a/pkg/auth/nodeidentifier/BUILD b/pkg/auth/nodeidentifier/BUILD index f51a1dc6119..a573310579c 100644 --- a/pkg/auth/nodeidentifier/BUILD +++ b/pkg/auth/nodeidentifier/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["default_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/auth/nodeidentifier", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/pkg/bootstrap/api/BUILD b/pkg/bootstrap/api/BUILD index 0beb8e73331..f72517262a5 100644 --- a/pkg/bootstrap/api/BUILD +++ b/pkg/bootstrap/api/BUILD @@ -36,6 +36,6 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/bootstrap/api", - library = ":go_default_library", ) diff --git a/pkg/capabilities/BUILD b/pkg/capabilities/BUILD index 6ab539698cf..f011d7aa17d 100644 --- a/pkg/capabilities/BUILD +++ b/pkg/capabilities/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["capabilities_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/capabilities", - library = ":go_default_library", ) filegroup( diff --git a/pkg/client/chaosclient/BUILD b/pkg/client/chaosclient/BUILD index 8ba5b300d30..81179c06740 100644 --- a/pkg/client/chaosclient/BUILD +++ b/pkg/client/chaosclient/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["chaosclient_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/client/chaosclient", - library = ":go_default_library", ) filegroup( diff --git a/pkg/client/listers/batch/internalversion/BUILD b/pkg/client/listers/batch/internalversion/BUILD index 1f0b2079958..c0f5a4f8b27 100644 --- a/pkg/client/listers/batch/internalversion/BUILD +++ b/pkg/client/listers/batch/internalversion/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["job_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", - library = ":go_default_library", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/client/listers/extensions/internalversion/BUILD b/pkg/client/listers/extensions/internalversion/BUILD index 2e76730a7c3..f521b0c0df6 100644 --- a/pkg/client/listers/extensions/internalversion/BUILD +++ b/pkg/client/listers/extensions/internalversion/BUILD @@ -46,8 +46,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["daemonset_expansion_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/client/tests/BUILD b/pkg/client/tests/BUILD index cf7b5074753..d22b547f68f 100644 --- a/pkg/client/tests/BUILD +++ b/pkg/client/tests/BUILD @@ -14,8 +14,8 @@ go_test( "portfoward_test.go", "remotecommand_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/client/tests", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/client/unversioned/BUILD b/pkg/client/unversioned/BUILD index 4ed4a551f17..0d97be77f39 100644 --- a/pkg/client/unversioned/BUILD +++ b/pkg/client/unversioned/BUILD @@ -37,8 +37,8 @@ go_library( go_test( name = "go_default_test", srcs = ["helper_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/client/unversioned", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/cloudprovider/providers/aws/BUILD b/pkg/cloudprovider/providers/aws/BUILD index 9a4a6a76539..7fb2d5e4eb5 100644 --- a/pkg/cloudprovider/providers/aws/BUILD +++ b/pkg/cloudprovider/providers/aws/BUILD @@ -73,8 +73,8 @@ go_test( "retry_handler_test.go", "tags_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index eea1107d602..fa4d39f9f64 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -65,8 +65,8 @@ go_test( "azure_util_test.go", "azure_wrap_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - library = ":go_default_library", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/pkg/cloudprovider/providers/cloudstack/BUILD b/pkg/cloudprovider/providers/cloudstack/BUILD index 9798a63fb13..0ad40d29ea0 100644 --- a/pkg/cloudprovider/providers/cloudstack/BUILD +++ b/pkg/cloudprovider/providers/cloudstack/BUILD @@ -13,11 +13,40 @@ go_library( "cloudstack_instances.go", "cloudstack_loadbalancer.go", "metadata.go", - "metadata_other.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "metadata_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "metadata_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "metadata_other.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", @@ -25,21 +54,55 @@ go_library( "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/d2g/dhcp4:go_default_library", - "//vendor/github.com/d2g/dhcp4client:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", "//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/d2g/dhcp4client:go_default_library", + ], + "//conditions:default": [], + }), ) go_test( name = "go_default_test", srcs = ["cloudstack_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 69509a2243a..18205b9fcad 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -98,8 +98,8 @@ go_test( "gce_util_test.go", "metrics_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 0b90b0d399b..42a185e2f4c 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -72,8 +72,8 @@ go_test( "openstack_routes_test.go", "openstack_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", diff --git a/pkg/cloudprovider/providers/ovirt/BUILD b/pkg/cloudprovider/providers/ovirt/BUILD index a3ec5a0034a..35390b0e711 100644 --- a/pkg/cloudprovider/providers/ovirt/BUILD +++ b/pkg/cloudprovider/providers/ovirt/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ovirt_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - library = ":go_default_library", deps = ["//pkg/cloudprovider:go_default_library"], ) diff --git a/pkg/cloudprovider/providers/photon/BUILD b/pkg/cloudprovider/providers/photon/BUILD index 052159a13e8..3dc4b33d4b3 100644 --- a/pkg/cloudprovider/providers/photon/BUILD +++ b/pkg/cloudprovider/providers/photon/BUILD @@ -25,8 +25,8 @@ go_library( go_test( name = "go_default_test", srcs = ["photon_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/cloudprovider/providers/vsphere/BUILD b/pkg/cloudprovider/providers/vsphere/BUILD index 91de27e7adc..2ccae8c2f2d 100644 --- a/pkg/cloudprovider/providers/vsphere/BUILD +++ b/pkg/cloudprovider/providers/vsphere/BUILD @@ -37,8 +37,8 @@ go_library( go_test( name = "go_default_test", srcs = ["vsphere_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index fe5a3adc085..f1e808f2ee3 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -12,8 +12,8 @@ go_test( "controller_ref_manager_test.go", "controller_utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index 737dd4eae99..6776adbe55c 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -15,8 +15,8 @@ go_test( "tokencleaner_test.go", "util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/bootstrap", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/pkg/controller/certificates/BUILD b/pkg/controller/certificates/BUILD index b4470499d06..a6940009acc 100644 --- a/pkg/controller/certificates/BUILD +++ b/pkg/controller/certificates/BUILD @@ -54,8 +54,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["certificate_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/certificates", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", diff --git a/pkg/controller/certificates/approver/BUILD b/pkg/controller/certificates/approver/BUILD index 1e05a1408ac..81294579020 100644 --- a/pkg/controller/certificates/approver/BUILD +++ b/pkg/controller/certificates/approver/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["sarapprove_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/certificates/approver", - library = ":go_default_library", deps = [ "//pkg/apis/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", diff --git a/pkg/controller/certificates/cleaner/BUILD b/pkg/controller/certificates/cleaner/BUILD index 14ca29e60ee..ada38c62c26 100644 --- a/pkg/controller/certificates/cleaner/BUILD +++ b/pkg/controller/certificates/cleaner/BUILD @@ -35,8 +35,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["cleaner_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/certificates/cleaner", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/certificates/signer/BUILD b/pkg/controller/certificates/signer/BUILD index d7da4d31e07..d4119b034aa 100644 --- a/pkg/controller/certificates/signer/BUILD +++ b/pkg/controller/certificates/signer/BUILD @@ -14,8 +14,8 @@ go_test( "testdata/ca.key", "testdata/kubelet.csr", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 2202e0823ce..a482039381d 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -48,8 +48,8 @@ go_test( "node_controller_test.go", "pvlcontroller_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/cloud", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", diff --git a/pkg/controller/clusterroleaggregation/BUILD b/pkg/controller/clusterroleaggregation/BUILD index 94a93af996d..965c04691a6 100644 --- a/pkg/controller/clusterroleaggregation/BUILD +++ b/pkg/controller/clusterroleaggregation/BUILD @@ -40,8 +40,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["clusterroleaggregation_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", diff --git a/pkg/controller/cronjob/BUILD b/pkg/controller/cronjob/BUILD index e484d2c50a2..d1fee235557 100644 --- a/pkg/controller/cronjob/BUILD +++ b/pkg/controller/cronjob/BUILD @@ -46,8 +46,8 @@ go_test( "cronjob_controller_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/cronjob", - library = ":go_default_library", deps = [ "//pkg/apis/batch/install:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index dd082c9ac27..47a123cd929 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -65,8 +65,8 @@ go_test( "daemon_controller_test.go", "update_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/daemon", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index 4a87a1eacd9..d3e5dde3dc4 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -40,8 +40,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["daemonset_util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/daemon/util", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/deployment/BUILD b/pkg/controller/deployment/BUILD index 6995ca4c33d..41b1c163df3 100644 --- a/pkg/controller/deployment/BUILD +++ b/pkg/controller/deployment/BUILD @@ -56,8 +56,8 @@ go_test( "rolling_test.go", "sync_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/deployment", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", diff --git a/pkg/controller/deployment/util/BUILD b/pkg/controller/deployment/util/BUILD index fd967e837f8..693faa90c1a 100644 --- a/pkg/controller/deployment/util/BUILD +++ b/pkg/controller/deployment/util/BUILD @@ -45,8 +45,8 @@ go_test( "deployment_util_test.go", "hash_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/deployment/util", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/util/hash:go_default_library", diff --git a/pkg/controller/disruption/BUILD b/pkg/controller/disruption/BUILD index b92a2e2c949..a8e49825899 100644 --- a/pkg/controller/disruption/BUILD +++ b/pkg/controller/disruption/BUILD @@ -45,8 +45,8 @@ go_library( go_test( name = "go_default_test", srcs = ["disruption_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/disruption", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/controller/endpoint/BUILD b/pkg/controller/endpoint/BUILD index ed8acb206d5..0cfedaba881 100644 --- a/pkg/controller/endpoint/BUILD +++ b/pkg/controller/endpoint/BUILD @@ -40,8 +40,8 @@ go_library( go_test( name = "go_default_test", srcs = ["endpoints_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/endpoint", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/controller/garbagecollector/BUILD b/pkg/controller/garbagecollector/BUILD index 1ea321cce93..4541ca39b0a 100644 --- a/pkg/controller/garbagecollector/BUILD +++ b/pkg/controller/garbagecollector/BUILD @@ -50,8 +50,8 @@ go_library( go_test( name = "go_default_test", srcs = ["garbagecollector_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/controller/garbagecollector/metaonly/BUILD b/pkg/controller/garbagecollector/metaonly/BUILD index 157f4ba9a14..097f327d34d 100644 --- a/pkg/controller/garbagecollector/metaonly/BUILD +++ b/pkg/controller/garbagecollector/metaonly/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["metaonly_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", - library = ":go_default_library", deps = [ "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/history/BUILD b/pkg/controller/history/BUILD index 5a5f4ebae9c..fa3fe327bcb 100644 --- a/pkg/controller/history/BUILD +++ b/pkg/controller/history/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["controller_history_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/history", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/controller:go_default_library", diff --git a/pkg/controller/job/BUILD b/pkg/controller/job/BUILD index a5ab7ba7aea..49b22b6707c 100644 --- a/pkg/controller/job/BUILD +++ b/pkg/controller/job/BUILD @@ -45,8 +45,8 @@ go_test( "job_controller_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/job", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/controller/namespace/deletion/BUILD b/pkg/controller/namespace/deletion/BUILD index 3ff183f2e42..40a0517cd79 100644 --- a/pkg/controller/namespace/deletion/BUILD +++ b/pkg/controller/namespace/deletion/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["namespaced_resources_deleter_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/namespace/deletion", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/controller/node/BUILD b/pkg/controller/node/BUILD index 7ed48e2dc62..57a684c8feb 100644 --- a/pkg/controller/node/BUILD +++ b/pkg/controller/node/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["nodecontroller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/node", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", diff --git a/pkg/controller/node/ipam/BUILD b/pkg/controller/node/ipam/BUILD index e72f938e941..667f29f6b0c 100644 --- a/pkg/controller/node/ipam/BUILD +++ b/pkg/controller/node/ipam/BUILD @@ -13,8 +13,8 @@ go_test( "range_allocator_test.go", "timeout_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/node/ipam/cidrset:go_default_library", diff --git a/pkg/controller/node/ipam/cidrset/BUILD b/pkg/controller/node/ipam/cidrset/BUILD index 5ea716c59ff..e3accb73bc6 100644 --- a/pkg/controller/node/ipam/cidrset/BUILD +++ b/pkg/controller/node/ipam/cidrset/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["cidr_set_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", - library = ":go_default_library", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/pkg/controller/node/ipam/sync/BUILD b/pkg/controller/node/ipam/sync/BUILD index bc34f435e84..6530b5d8126 100644 --- a/pkg/controller/node/ipam/sync/BUILD +++ b/pkg/controller/node/ipam/sync/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["sync_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync", - library = ":go_default_library", deps = [ "//pkg/controller/node/ipam/cidrset:go_default_library", "//pkg/controller/node/ipam/test:go_default_library", diff --git a/pkg/controller/node/scheduler/BUILD b/pkg/controller/node/scheduler/BUILD index eb646535140..efe8ad0b8c0 100644 --- a/pkg/controller/node/scheduler/BUILD +++ b/pkg/controller/node/scheduler/BUILD @@ -13,8 +13,8 @@ go_test( "taint_controller_test.go", "timed_workers_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler", - library = ":go_default_library", deps = [ "//pkg/controller/testutil:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/podautoscaler/BUILD b/pkg/controller/podautoscaler/BUILD index 08701232be6..6f923b2c845 100644 --- a/pkg/controller/podautoscaler/BUILD +++ b/pkg/controller/podautoscaler/BUILD @@ -55,8 +55,8 @@ go_test( "legacy_replica_calculator_test.go", "replica_calculator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", diff --git a/pkg/controller/podautoscaler/metrics/BUILD b/pkg/controller/podautoscaler/metrics/BUILD index f0d328fdb57..8592a565082 100644 --- a/pkg/controller/podautoscaler/metrics/BUILD +++ b/pkg/controller/podautoscaler/metrics/BUILD @@ -38,8 +38,8 @@ go_test( "legacy_metrics_client_test.go", "rest_metrics_client_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions/install:go_default_library", diff --git a/pkg/controller/podgc/BUILD b/pkg/controller/podgc/BUILD index 01209d81fc7..da2b2317aee 100644 --- a/pkg/controller/podgc/BUILD +++ b/pkg/controller/podgc/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["gc_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/podgc", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/testutil:go_default_library", diff --git a/pkg/controller/replicaset/BUILD b/pkg/controller/replicaset/BUILD index 3711a751aa5..7ad0a4ca3df 100644 --- a/pkg/controller/replicaset/BUILD +++ b/pkg/controller/replicaset/BUILD @@ -48,8 +48,8 @@ go_test( "replica_set_test.go", "replica_set_utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/replicaset", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/controller:go_default_library", diff --git a/pkg/controller/replication/BUILD b/pkg/controller/replication/BUILD index 17eb6701a8d..6e9e1710be5 100644 --- a/pkg/controller/replication/BUILD +++ b/pkg/controller/replication/BUILD @@ -47,8 +47,8 @@ go_library( go_test( name = "go_default_test", srcs = ["replication_controller_utils_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/replication", - library = ":go_default_library", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/pkg/controller/resourcequota/BUILD b/pkg/controller/resourcequota/BUILD index 2b4bf45648d..001f1dbfa79 100644 --- a/pkg/controller/resourcequota/BUILD +++ b/pkg/controller/resourcequota/BUILD @@ -46,8 +46,8 @@ go_library( go_test( name = "go_default_test", srcs = ["resource_quota_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/resourcequota", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/quota:go_default_library", diff --git a/pkg/controller/route/BUILD b/pkg/controller/route/BUILD index 02efb7343d9..9f9fea10732 100644 --- a/pkg/controller/route/BUILD +++ b/pkg/controller/route/BUILD @@ -40,8 +40,8 @@ go_library( go_test( name = "go_default_test", srcs = ["route_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/route", - library = ":go_default_library", deps = [ "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index cfb191f9ca0..459108f05ac 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["service_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/service", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", diff --git a/pkg/controller/serviceaccount/BUILD b/pkg/controller/serviceaccount/BUILD index ef389de512c..dc5bc8fd5fe 100644 --- a/pkg/controller/serviceaccount/BUILD +++ b/pkg/controller/serviceaccount/BUILD @@ -51,8 +51,8 @@ go_test( "serviceaccounts_controller_test.go", "tokens_controller_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/serviceaccount", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", diff --git a/pkg/controller/statefulset/BUILD b/pkg/controller/statefulset/BUILD index d35a9de0780..b38f1119d94 100644 --- a/pkg/controller/statefulset/BUILD +++ b/pkg/controller/statefulset/BUILD @@ -54,8 +54,8 @@ go_test( "stateful_set_test.go", "stateful_set_utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/statefulset", - library = ":go_default_library", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps/install:go_default_library", diff --git a/pkg/controller/ttl/BUILD b/pkg/controller/ttl/BUILD index 7c96860f757..7ba7c587065 100644 --- a/pkg/controller/ttl/BUILD +++ b/pkg/controller/ttl/BUILD @@ -44,8 +44,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["ttl_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/ttl", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/volume/attachdetach/BUILD b/pkg/controller/volume/attachdetach/BUILD index d3b76f22ff4..77a6feacd8f 100644 --- a/pkg/controller/volume/attachdetach/BUILD +++ b/pkg/controller/volume/attachdetach/BUILD @@ -42,8 +42,8 @@ go_library( go_test( name = "go_default_test", srcs = ["attach_detach_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", diff --git a/pkg/controller/volume/attachdetach/cache/BUILD b/pkg/controller/volume/attachdetach/cache/BUILD index 6bc3d938c54..91ab48ee3f0 100644 --- a/pkg/controller/volume/attachdetach/cache/BUILD +++ b/pkg/controller/volume/attachdetach/cache/BUILD @@ -30,8 +30,8 @@ go_test( "actual_state_of_world_test.go", "desired_state_of_world_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", - library = ":go_default_library", deps = [ "//pkg/controller/volume/attachdetach/testing:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/controller/volume/attachdetach/populator/BUILD b/pkg/controller/volume/attachdetach/populator/BUILD index 78f4ac36473..e479149bf12 100644 --- a/pkg/controller/volume/attachdetach/populator/BUILD +++ b/pkg/controller/volume/attachdetach/populator/BUILD @@ -41,8 +41,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["desired_state_of_world_populator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", diff --git a/pkg/controller/volume/attachdetach/reconciler/BUILD b/pkg/controller/volume/attachdetach/reconciler/BUILD index 16379f5c0b9..f36d643138a 100644 --- a/pkg/controller/volume/attachdetach/reconciler/BUILD +++ b/pkg/controller/volume/attachdetach/reconciler/BUILD @@ -27,8 +27,8 @@ go_library( go_test( name = "go_default_test", srcs = ["reconciler_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", diff --git a/pkg/controller/volume/expand/cache/BUILD b/pkg/controller/volume/expand/cache/BUILD index c9f64da1a97..2085dcda258 100644 --- a/pkg/controller/volume/expand/cache/BUILD +++ b/pkg/controller/volume/expand/cache/BUILD @@ -39,8 +39,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["volume_resize_map_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - library = ":go_default_library", deps = [ "//pkg/volume/util/types:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index 01ab29857e6..eaef2490e52 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -71,8 +71,8 @@ go_test( "scheduler_binder_cache_test.go", "scheduler_binder_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/controller/volume/pvcprotection/BUILD b/pkg/controller/volume/pvcprotection/BUILD index a296dd22c0c..ebc1bb28def 100644 --- a/pkg/controller/volume/pvcprotection/BUILD +++ b/pkg/controller/volume/pvcprotection/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["pvc_protection_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", - library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/pkg/credentialprovider/BUILD b/pkg/credentialprovider/BUILD index 89f836f4905..42ec2cb9ddc 100644 --- a/pkg/credentialprovider/BUILD +++ b/pkg/credentialprovider/BUILD @@ -31,8 +31,8 @@ go_test( "keyring_test.go", "provider_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/credentialprovider", - library = ":go_default_library", deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], ) diff --git a/pkg/credentialprovider/aws/BUILD b/pkg/credentialprovider/aws/BUILD index 5cb5b7647f7..5c66c309dbb 100644 --- a/pkg/credentialprovider/aws/BUILD +++ b/pkg/credentialprovider/aws/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["aws_credentials_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/credentialprovider/aws", - library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", diff --git a/pkg/credentialprovider/azure/BUILD b/pkg/credentialprovider/azure/BUILD index ee3b570baf0..c7213861b7f 100644 --- a/pkg/credentialprovider/azure/BUILD +++ b/pkg/credentialprovider/azure/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["azure_credentials_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/credentialprovider/azure", - library = ":go_default_library", deps = [ "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", diff --git a/pkg/credentialprovider/gcp/BUILD b/pkg/credentialprovider/gcp/BUILD index 4d903e0dcf7..0a3b647a963 100644 --- a/pkg/credentialprovider/gcp/BUILD +++ b/pkg/credentialprovider/gcp/BUILD @@ -31,8 +31,8 @@ go_test( "jwt_test.go", "metadata_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/credentialprovider/gcp", - library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", diff --git a/pkg/credentialprovider/rancher/BUILD b/pkg/credentialprovider/rancher/BUILD index afc26b6ddef..8d5150e6aaa 100644 --- a/pkg/credentialprovider/rancher/BUILD +++ b/pkg/credentialprovider/rancher/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["rancher_registry_credentials_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/credentialprovider/rancher", - library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/github.com/rancher/go-rancher/client:go_default_library", diff --git a/pkg/fieldpath/BUILD b/pkg/fieldpath/BUILD index 0e4dae6a921..00cda4dc143 100644 --- a/pkg/fieldpath/BUILD +++ b/pkg/fieldpath/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["fieldpath_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/fieldpath", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubeapiserver/BUILD b/pkg/kubeapiserver/BUILD index e72de1f9f93..63cc305a016 100644 --- a/pkg/kubeapiserver/BUILD +++ b/pkg/kubeapiserver/BUILD @@ -46,8 +46,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["default_storage_factory_builder_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubeapiserver", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/kubeapiserver/admission/BUILD b/pkg/kubeapiserver/admission/BUILD index b3ec0cb6f9f..eb3da8d7b3c 100644 --- a/pkg/kubeapiserver/admission/BUILD +++ b/pkg/kubeapiserver/admission/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["initializer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubeapiserver/admission", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/admission:go_default_library"], ) diff --git a/pkg/kubeapiserver/authorizer/BUILD b/pkg/kubeapiserver/authorizer/BUILD index 5d5acebfb76..d15558e2079 100644 --- a/pkg/kubeapiserver/authorizer/BUILD +++ b/pkg/kubeapiserver/authorizer/BUILD @@ -12,8 +12,8 @@ go_test( data = [ "//pkg/auth/authorizer/abac:example_policy", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", - library = ":go_default_library", deps = ["//pkg/kubeapiserver/authorizer/modes:go_default_library"], ) diff --git a/pkg/kubeapiserver/authorizer/modes/BUILD b/pkg/kubeapiserver/authorizer/modes/BUILD index 9be3d02c9a8..038d64aa1f2 100644 --- a/pkg/kubeapiserver/authorizer/modes/BUILD +++ b/pkg/kubeapiserver/authorizer/modes/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["modes_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", - library = ":go_default_library", ) go_library( diff --git a/pkg/kubeapiserver/options/BUILD b/pkg/kubeapiserver/options/BUILD index 6d26b666571..8236765c55e 100644 --- a/pkg/kubeapiserver/options/BUILD +++ b/pkg/kubeapiserver/options/BUILD @@ -52,7 +52,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["storage_versions_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubeapiserver/options", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], ) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index c0180bf03c4..ecfbc750b11 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -36,8 +36,8 @@ go_test( "serviceaccount_test.go", "sorting_printer_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/kubectl/categories/BUILD b/pkg/kubectl/categories/BUILD index d26c196dd58..433cad9130d 100644 --- a/pkg/kubectl/categories/BUILD +++ b/pkg/kubectl/categories/BUILD @@ -14,8 +14,8 @@ go_library( go_test( name = "go_default_test", srcs = ["categories_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/categories", - library = ":go_default_library", deps = [ "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 19adc5220d1..0bf32689830 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -203,8 +203,8 @@ go_test( "//examples:config", "//test/fixtures", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/ref:go_default_library", diff --git a/pkg/kubectl/cmd/auth/BUILD b/pkg/kubectl/cmd/auth/BUILD index c702b790d7f..3393ed5aab6 100644 --- a/pkg/kubectl/cmd/auth/BUILD +++ b/pkg/kubectl/cmd/auth/BUILD @@ -50,8 +50,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["cani_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/auth", - library = ":go_default_library", deps = [ "//pkg/kubectl/cmd/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/kubectl/cmd/config/BUILD b/pkg/kubectl/cmd/config/BUILD index e5d53853ea2..5bdadef9ce7 100644 --- a/pkg/kubectl/cmd/config/BUILD +++ b/pkg/kubectl/cmd/config/BUILD @@ -61,8 +61,8 @@ go_test( "use_context_test.go", "view_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/config", - library = ":go_default_library", deps = [ "//pkg/kubectl/cmd/util:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/pkg/kubectl/cmd/resource/BUILD b/pkg/kubectl/cmd/resource/BUILD index 9c97f41d24a..cea1a70a4d8 100644 --- a/pkg/kubectl/cmd/resource/BUILD +++ b/pkg/kubectl/cmd/resource/BUILD @@ -36,8 +36,8 @@ go_test( "//examples:config", "//test/fixtures", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/resource", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index f0f1848d898..64f1e6dc7d8 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -56,8 +56,8 @@ go_test( "//examples:config", "//test/fixtures", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/set", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/rbac:go_default_library", diff --git a/pkg/kubectl/cmd/testdata/edit/BUILD b/pkg/kubectl/cmd/testdata/edit/BUILD index ce8c860c43c..18cfd71b784 100644 --- a/pkg/kubectl/cmd/testdata/edit/BUILD +++ b/pkg/kubectl/cmd/testdata/edit/BUILD @@ -6,8 +6,8 @@ load( go_binary( name = "edit", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index bf458175abb..63406eaf30d 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -94,8 +94,8 @@ go_test( data = [ "//api/swagger-spec", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", - library = ":go_default_library", visibility = [ "//build/visible_to:COMMON_testing", ], diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index c156ddb6ebf..7ccb044444e 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -42,8 +42,8 @@ go_library( go_test( name = "go_default_test", srcs = ["editor_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor", - library = ":go_default_library", ) filegroup( diff --git a/pkg/kubectl/cmd/util/env/BUILD b/pkg/kubectl/cmd/util/env/BUILD index c6084cfea45..076e0b3a046 100644 --- a/pkg/kubectl/cmd/util/env/BUILD +++ b/pkg/kubectl/cmd/util/env/BUILD @@ -37,6 +37,6 @@ filegroup( go_test( name = "go_default_test", srcs = ["env_parse_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/env", - library = ":go_default_library", ) diff --git a/pkg/kubectl/cmd/util/openapi/validation/BUILD b/pkg/kubectl/cmd/util/openapi/validation/BUILD index cf05b074a9a..c0a473ebff4 100644 --- a/pkg/kubectl/cmd/util/openapi/validation/BUILD +++ b/pkg/kubectl/cmd/util/openapi/validation/BUILD @@ -27,8 +27,8 @@ go_test( "validation_test.go", ], data = ["//api/openapi-spec:swagger-spec"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", diff --git a/pkg/kubectl/explain/BUILD b/pkg/kubectl/explain/BUILD index 7143f21b354..c2a86e3b8e2 100644 --- a/pkg/kubectl/explain/BUILD +++ b/pkg/kubectl/explain/BUILD @@ -47,8 +47,8 @@ go_test( "typename_test.go", ], data = ["test-swagger.json"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/explain", - library = ":go_default_library", deps = [ "//pkg/kubectl/cmd/testing:go_default_library", "//pkg/kubectl/cmd/util/openapi/testing:go_default_library", diff --git a/pkg/kubectl/plugins/BUILD b/pkg/kubectl/plugins/BUILD index 4c497cac940..a2762649a90 100644 --- a/pkg/kubectl/plugins/BUILD +++ b/pkg/kubectl/plugins/BUILD @@ -44,7 +44,7 @@ go_test( "plugins_test.go", "runner_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/plugins", - library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/pkg/kubectl/proxy/BUILD b/pkg/kubectl/proxy/BUILD index 58a68c07a6b..a1b5e471bf0 100644 --- a/pkg/kubectl/proxy/BUILD +++ b/pkg/kubectl/proxy/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["proxy_server_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/proxy", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/pkg/kubectl/resource/BUILD b/pkg/kubectl/resource/BUILD index 2df1069b41f..e10135dcda6 100644 --- a/pkg/kubectl/resource/BUILD +++ b/pkg/kubectl/resource/BUILD @@ -54,8 +54,8 @@ go_test( "//examples:config", "//test/fixtures", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/resource", - library = ":go_default_library", deps = [ "//pkg/kubectl/categories:go_default_library", "//pkg/kubectl/scheme:go_default_library", diff --git a/pkg/kubectl/util/BUILD b/pkg/kubectl/util/BUILD index 535db1587a2..96457f550ef 100644 --- a/pkg/kubectl/util/BUILD +++ b/pkg/kubectl/util/BUILD @@ -7,10 +7,39 @@ load( go_library( name = "go_default_library", srcs = [ - "umask.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "umask.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "umask_windows.go", ], "//conditions:default": [], @@ -18,10 +47,41 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/util", visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], deps = [ - "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( @@ -49,6 +109,6 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/util", - library = ":go_default_library", ) diff --git a/pkg/kubectl/util/hash/BUILD b/pkg/kubectl/util/hash/BUILD index 7e8885b6535..6e6804a3844 100644 --- a/pkg/kubectl/util/hash/BUILD +++ b/pkg/kubectl/util/hash/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["hash_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/util/hash", - library = ":go_default_library", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/pkg/kubectl/util/i18n/BUILD b/pkg/kubectl/util/i18n/BUILD index 30168608640..365ff2d7e12 100644 --- a/pkg/kubectl/util/i18n/BUILD +++ b/pkg/kubectl/util/i18n/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["i18n_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/util/i18n", - library = ":go_default_library", ) filegroup( diff --git a/pkg/kubectl/util/slice/BUILD b/pkg/kubectl/util/slice/BUILD index 03567ebbaf9..2e7cd8e01ee 100644 --- a/pkg/kubectl/util/slice/BUILD +++ b/pkg/kubectl/util/slice/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["slice_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/util/slice", - library = ":go_default_library", ) filegroup( diff --git a/pkg/kubectl/util/term/BUILD b/pkg/kubectl/util/term/BUILD index d05dc402edd..8d0e6cfcc67 100644 --- a/pkg/kubectl/util/term/BUILD +++ b/pkg/kubectl/util/term/BUILD @@ -10,11 +10,40 @@ go_library( name = "go_default_library", srcs = [ "resize.go", - "resizeevents.go", "term.go", "term_writer.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "resizeevents.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "resizeevents_windows.go", ], "//conditions:default": [], @@ -24,17 +53,48 @@ go_library( "//pkg/util/interrupt:go_default_library", "//vendor/github.com/docker/docker/pkg/term:go_default_library", "//vendor/github.com/mitchellh/go-wordwrap:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) go_test( name = "go_default_test", srcs = ["term_writer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/util/term", - library = ":go_default_library", ) filegroup( diff --git a/pkg/kubectl/validation/BUILD b/pkg/kubectl/validation/BUILD index 0854e64950c..647f0a8e203 100644 --- a/pkg/kubectl/validation/BUILD +++ b/pkg/kubectl/validation/BUILD @@ -17,8 +17,8 @@ go_test( data = [ ":testdata", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubectl/validation", - library = ":go_default_library", ) go_library( diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 3dc3fab3661..5ab4a2d6b6c 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -156,13 +156,13 @@ go_test( "reason_cache_test.go", "runonce_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "kubelet_pods_windows_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/kubelet/apis/kubeletconfig/BUILD b/pkg/kubelet/apis/kubeletconfig/BUILD index c62439d6261..bb8b5d591d3 100644 --- a/pkg/kubelet/apis/kubeletconfig/BUILD +++ b/pkg/kubelet/apis/kubeletconfig/BUILD @@ -45,8 +45,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/pkg/kubelet/apis/kubeletconfig/scheme/BUILD b/pkg/kubelet/apis/kubeletconfig/scheme/BUILD index 9bc425ac6b0..1b318508c9a 100644 --- a/pkg/kubelet/apis/kubeletconfig/scheme/BUILD +++ b/pkg/kubelet/apis/kubeletconfig/scheme/BUILD @@ -30,8 +30,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["scheme_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig/fuzzer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", diff --git a/pkg/kubelet/apis/kubeletconfig/validation/BUILD b/pkg/kubelet/apis/kubeletconfig/validation/BUILD index cbbd5b9f70b..cbcbb789c81 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/BUILD +++ b/pkg/kubelet/apis/kubeletconfig/validation/BUILD @@ -34,8 +34,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/pkg/kubelet/cadvisor/BUILD b/pkg/kubelet/cadvisor/BUILD index faa9d5005b2..3c430e1ed6b 100644 --- a/pkg/kubelet/cadvisor/BUILD +++ b/pkg/kubelet/cadvisor/BUILD @@ -9,18 +9,55 @@ load( go_library( name = "go_default_library", srcs = [ - "cadvisor_unsupported.go", "doc.go", - "helpers_unsupported.go", "types.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "cadvisor_linux.go", - "helpers_linux.go", + "@io_bazel_rules_go//go/platform:android": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "cadvisor_linux.go", + "cadvisor_unsupported.go", + "helpers_linux.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cadvisor_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "cadvisor_windows.go", + "helpers_unsupported.go", ], "//conditions:default": [], }), @@ -36,7 +73,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", @@ -47,7 +84,7 @@ go_library( "//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/winstats:go_default_library", ], "//conditions:default": [], @@ -57,15 +94,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "cadvisor_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cadvisor", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/types:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", diff --git a/pkg/kubelet/certificate/BUILD b/pkg/kubelet/certificate/BUILD index 4d30fec92ec..ae9d656c342 100644 --- a/pkg/kubelet/certificate/BUILD +++ b/pkg/kubelet/certificate/BUILD @@ -32,8 +32,8 @@ go_library( go_test( name = "go_default_test", srcs = ["transport_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/certificate", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/pkg/kubelet/certificate/bootstrap/BUILD b/pkg/kubelet/certificate/bootstrap/BUILD index 05d24e0c0f4..ac22f80b2cc 100644 --- a/pkg/kubelet/certificate/bootstrap/BUILD +++ b/pkg/kubelet/certificate/bootstrap/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["bootstrap_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/pkg/kubelet/checkpoint/BUILD b/pkg/kubelet/checkpoint/BUILD index c8035650883..8886a834dcd 100644 --- a/pkg/kubelet/checkpoint/BUILD +++ b/pkg/kubelet/checkpoint/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["checkpoint_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/checkpoint", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/client/BUILD b/pkg/kubelet/client/BUILD index 3bf79dc1f08..036817bf99c 100644 --- a/pkg/kubelet/client/BUILD +++ b/pkg/kubelet/client/BUILD @@ -25,8 +25,8 @@ go_test( name = "go_default_test", srcs = ["kubelet_client_test.go"], data = ["//pkg/client/testdata"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/client", - library = ":go_default_library", deps = [ "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 7c4f78db12b..a331a8be770 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -3,18 +3,38 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "cgroup_manager_unsupported.go", "container_manager.go", "container_manager_stub.go", - "container_manager_unsupported.go", "fake_internal_container_lifecycle.go", - "helpers_unsupported.go", "internal_container_lifecycle.go", "pod_container_manager_stub.go", - "pod_container_manager_unsupported.go", "types.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "cgroup_manager_linux.go", "container_manager_linux.go", "helpers_linux.go", @@ -22,8 +42,41 @@ go_library( "pod_container_manager_linux.go", "qos_container_manager_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cgroup_manager_unsupported.go", + "container_manager_unsupported.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "cgroup_manager_unsupported.go", "container_manager_windows.go", + "helpers_unsupported.go", + "pod_container_manager_unsupported.go", ], "//conditions:default": [], }), @@ -32,26 +85,44 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", - "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/cm/cpumanager:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/status:go_default_library", - "//pkg/util/mount:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/api/v1/resource:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", + "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/cm/deviceplugin:go_default_library", "//pkg/kubelet/cm/util:go_default_library", "//pkg/kubelet/events:go_default_library", @@ -59,6 +130,7 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/util/file:go_default_library", + "//pkg/util/mount:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", "//pkg/util/sysctl:go_default_library", @@ -71,6 +143,37 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/util/mount:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], "//conditions:default": [], }), @@ -79,7 +182,7 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "cgroup_manager_linux_test.go", "cgroup_manager_test.go", "container_manager_linux_test.go", @@ -88,10 +191,10 @@ go_test( ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/eviction/api:go_default_library", "//pkg/util/mount:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/cm/cpumanager/BUILD b/pkg/kubelet/cm/cpumanager/BUILD index f76a6a9312e..d3330084825 100644 --- a/pkg/kubelet/cm/cpumanager/BUILD +++ b/pkg/kubelet/cm/cpumanager/BUILD @@ -36,8 +36,8 @@ go_test( "policy_static_test.go", "policy_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/cm/cpumanager/state:go_default_library", diff --git a/pkg/kubelet/cm/cpumanager/state/BUILD b/pkg/kubelet/cm/cpumanager/state/BUILD index 8af631c9ad6..31f18d0a3a0 100644 --- a/pkg/kubelet/cm/cpumanager/state/BUILD +++ b/pkg/kubelet/cm/cpumanager/state/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["state_file_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", - library = ":go_default_library", deps = ["//pkg/kubelet/cm/cpuset:go_default_library"], ) diff --git a/pkg/kubelet/cm/cpumanager/topology/BUILD b/pkg/kubelet/cm/cpumanager/topology/BUILD index eb6ba3d6e63..e50c09ffd00 100644 --- a/pkg/kubelet/cm/cpumanager/topology/BUILD +++ b/pkg/kubelet/cm/cpumanager/topology/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["topology_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", - library = ":go_default_library", deps = ["//vendor/github.com/google/cadvisor/info/v1:go_default_library"], ) diff --git a/pkg/kubelet/cm/cpuset/BUILD b/pkg/kubelet/cm/cpuset/BUILD index 7ed863a5ee6..eadcb49b2b8 100644 --- a/pkg/kubelet/cm/cpuset/BUILD +++ b/pkg/kubelet/cm/cpuset/BUILD @@ -11,8 +11,8 @@ go_library( go_test( name = "go_default_test", srcs = ["cpuset_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", - library = ":go_default_library", ) filegroup( diff --git a/pkg/kubelet/cm/deviceplugin/BUILD b/pkg/kubelet/cm/deviceplugin/BUILD index 8474da6fac7..eb780952c41 100644 --- a/pkg/kubelet/cm/deviceplugin/BUILD +++ b/pkg/kubelet/cm/deviceplugin/BUILD @@ -55,8 +55,8 @@ go_test( "endpoint_test.go", "manager_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", diff --git a/pkg/kubelet/cm/util/BUILD b/pkg/kubelet/cm/util/BUILD index 9de2c5ba011..e344dc6e66e 100644 --- a/pkg/kubelet/cm/util/BUILD +++ b/pkg/kubelet/cm/util/BUILD @@ -7,17 +7,45 @@ load( go_library( name = "go_default_library", - srcs = [ - "cgroups_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "cgroups_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "cgroups_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/cm/util", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", ], diff --git a/pkg/kubelet/config/BUILD b/pkg/kubelet/config/BUILD index f5e082e7c25..f7023cb08c6 100644 --- a/pkg/kubelet/config/BUILD +++ b/pkg/kubelet/config/BUILD @@ -9,14 +9,43 @@ go_library( "defaults.go", "doc.go", "file.go", - "file_unsupported.go", "flags.go", "http.go", "sources.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "file_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "file_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "file_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/config", @@ -49,7 +78,7 @@ go_library( "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/exp/inotify:go_default_library", ], "//conditions:default": [], @@ -64,13 +93,13 @@ go_test( "config_test.go", "http_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "file_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/config", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", @@ -90,7 +119,7 @@ go_test( "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], "//conditions:default": [], diff --git a/pkg/kubelet/configmap/BUILD b/pkg/kubelet/configmap/BUILD index 27612f889f2..f7ad67e8ee8 100644 --- a/pkg/kubelet/configmap/BUILD +++ b/pkg/kubelet/configmap/BUILD @@ -42,8 +42,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["configmap_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/configmap", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/container/BUILD b/pkg/kubelet/container/BUILD index ac38ebd8004..00c403b2ca6 100644 --- a/pkg/kubelet/container/BUILD +++ b/pkg/kubelet/container/BUILD @@ -8,7 +8,6 @@ go_library( "container_reference_manager.go", "helpers.go", "os.go", - "pty_unsupported.go", "ref.go", "resize.go", "runtime.go", @@ -16,9 +15,39 @@ go_library( "runtime_cache_fake.go", "sync_result.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "pty_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "pty_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/container", @@ -43,7 +72,7 @@ go_library( "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/kr/pty:go_default_library", ], "//conditions:default": [], @@ -58,8 +87,8 @@ go_test( "ref_test.go", "sync_result_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/container", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/kubelet/custommetrics/BUILD b/pkg/kubelet/custommetrics/BUILD index 2e1efaed2f9..4ef23949667 100644 --- a/pkg/kubelet/custommetrics/BUILD +++ b/pkg/kubelet/custommetrics/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["custom_metrics_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/custommetrics", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/dockershim/BUILD b/pkg/kubelet/dockershim/BUILD index 748a74c79e8..a201ddd57e6 100644 --- a/pkg/kubelet/dockershim/BUILD +++ b/pkg/kubelet/dockershim/BUILD @@ -14,24 +14,66 @@ go_library( "docker_checkpoint.go", "docker_container.go", "docker_image.go", - "docker_image_unsupported.go", "docker_sandbox.go", "docker_service.go", - "docker_stats_unsupported.go", "docker_streaming.go", "exec.go", "helpers.go", - "helpers_unsupported.go", "naming.go", "security_context.go", "selinux_util.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "docker_image_linux.go", "docker_stats_linux.go", "helpers_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "docker_image_unsupported.go", + "docker_stats_unsupported.go", + "helpers_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "docker_image_windows.go", "docker_stats_windows.go", "helpers_windows.go", @@ -95,15 +137,15 @@ go_test( "security_context_test.go", "selinux_util_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "helpers_linux_test.go", ], "//conditions:default": [], }), data = [ ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/pkg/kubelet/dockershim/cm/BUILD b/pkg/kubelet/dockershim/cm/BUILD index fb5661cd50a..871d6f9b1be 100644 --- a/pkg/kubelet/dockershim/cm/BUILD +++ b/pkg/kubelet/dockershim/cm/BUILD @@ -9,22 +9,59 @@ go_library( name = "go_default_library", srcs = [ "container_manager.go", - "container_manager_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "container_manager_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "container_manager_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "container_manager_windows.go", ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - deps = [ - "//pkg/kubelet/dockershim/libdocker:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm:go_default_library", + "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -32,6 +69,24 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/kubelet/dockershim/libdocker:go_default_library", + ], "//conditions:default": [], }), ) diff --git a/pkg/kubelet/dockershim/libdocker/BUILD b/pkg/kubelet/dockershim/libdocker/BUILD index 574ca6eca9d..364600ea0b2 100644 --- a/pkg/kubelet/dockershim/libdocker/BUILD +++ b/pkg/kubelet/dockershim/libdocker/BUILD @@ -12,8 +12,8 @@ go_test( "helpers_test.go", "kube_docker_client_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - library = ":go_default_library", deps = [ "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index 44f167752f1..dba5bf9540a 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -12,8 +12,8 @@ go_test( "eviction_manager_test.go", "helpers_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/eviction", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", @@ -38,12 +38,41 @@ go_library( "doc.go", "eviction_manager.go", "helpers.go", - "threshold_notifier_unsupported.go", "types.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "threshold_notifier_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "threshold_notifier_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "threshold_notifier_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/eviction", @@ -70,7 +99,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/pkg/kubelet/gpu/nvidia/BUILD b/pkg/kubelet/gpu/nvidia/BUILD index 62082545e71..8f8dd72a4c4 100644 --- a/pkg/kubelet/gpu/nvidia/BUILD +++ b/pkg/kubelet/gpu/nvidia/BUILD @@ -40,8 +40,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["nvidia_gpu_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", - library = ":go_default_library", deps = [ "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", diff --git a/pkg/kubelet/images/BUILD b/pkg/kubelet/images/BUILD index 8a8684920f4..c47aabd43fc 100644 --- a/pkg/kubelet/images/BUILD +++ b/pkg/kubelet/images/BUILD @@ -39,8 +39,8 @@ go_test( "image_gc_manager_test.go", "image_manager_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/images", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/pkg/kubelet/kubeletconfig/checkpoint/BUILD b/pkg/kubelet/kubeletconfig/checkpoint/BUILD index 4a2f55aa6a7..96ac5db59d5 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/BUILD +++ b/pkg/kubelet/kubeletconfig/checkpoint/BUILD @@ -13,8 +13,8 @@ go_test( "configmap_test.go", "download_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD b/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD index 37529c46abb..1c61c026a2c 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD @@ -12,8 +12,8 @@ go_test( "fsstore_test.go", "store_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", - library = ":go_default_library", deps = [ "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", "//pkg/kubelet/kubeletconfig/util/files:go_default_library", diff --git a/pkg/kubelet/kubeletconfig/configfiles/BUILD b/pkg/kubelet/kubeletconfig/configfiles/BUILD index 939e728f012..db3b6cd8242 100644 --- a/pkg/kubelet/kubeletconfig/configfiles/BUILD +++ b/pkg/kubelet/kubeletconfig/configfiles/BUILD @@ -35,8 +35,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["configfiles_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index e46a4dcc412..c2719a329d1 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -78,8 +78,8 @@ go_test( "legacy_test.go", "security_context_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", "//pkg/kubelet/apis/cri/testing:go_default_library", diff --git a/pkg/kubelet/kuberuntime/logs/BUILD b/pkg/kubelet/kuberuntime/logs/BUILD index 211a0b2c0fe..6385e448859 100644 --- a/pkg/kubelet/kuberuntime/logs/BUILD +++ b/pkg/kubelet/kuberuntime/logs/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["logs_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index d2463e5de9d..f3f4255321f 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["handlers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/lifecycle", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/util/format:go_default_library", diff --git a/pkg/kubelet/mountpod/BUILD b/pkg/kubelet/mountpod/BUILD index d21981739a7..cd187e515bd 100644 --- a/pkg/kubelet/mountpod/BUILD +++ b/pkg/kubelet/mountpod/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["mount_pod_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/mountpod", - library = ":go_default_library", deps = [ "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/pod:go_default_library", diff --git a/pkg/kubelet/network/cni/BUILD b/pkg/kubelet/network/cni/BUILD index b91992366ec..ce04e2d465b 100644 --- a/pkg/kubelet/network/cni/BUILD +++ b/pkg/kubelet/network/cni/BUILD @@ -10,9 +10,38 @@ go_library( name = "go_default_library", srcs = [ "cni.go", - "cni_others.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cni_others.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "cni_windows.go", ], "//conditions:default": [], @@ -27,7 +56,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", ], "//conditions:default": [], @@ -37,15 +66,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "cni_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/cni", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/pkg/kubelet/network/dns/BUILD b/pkg/kubelet/network/dns/BUILD index c0a4c05463a..3088a32b803 100644 --- a/pkg/kubelet/network/dns/BUILD +++ b/pkg/kubelet/network/dns/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["dns_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/dns", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/network/hairpin/BUILD b/pkg/kubelet/network/hairpin/BUILD index d10f32a5c86..8ef0197c84b 100644 --- a/pkg/kubelet/network/hairpin/BUILD +++ b/pkg/kubelet/network/hairpin/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["hairpin_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/hairpin", - library = ":go_default_library", deps = [ "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", diff --git a/pkg/kubelet/network/hostport/BUILD b/pkg/kubelet/network/hostport/BUILD index a6bb4d0e14b..24864a3a538 100644 --- a/pkg/kubelet/network/hostport/BUILD +++ b/pkg/kubelet/network/hostport/BUILD @@ -33,8 +33,8 @@ go_test( "hostport_syncer_test.go", "hostport_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/hostport", - library = ":go_default_library", deps = [ "//pkg/util/iptables:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/network/kubenet/BUILD b/pkg/kubelet/network/kubenet/BUILD index 13c4bce3db3..7b451dae9b0 100644 --- a/pkg/kubelet/network/kubenet/BUILD +++ b/pkg/kubelet/network/kubenet/BUILD @@ -10,20 +10,68 @@ go_library( name = "go_default_library", srcs = [ "kubenet.go", - "kubenet_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "kubenet_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "kubenet_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "kubenet_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - deps = [ - "//pkg/kubelet/apis/kubeletconfig:go_default_library", - "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/network:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", "//pkg/kubelet/network/hostport:go_default_library", "//pkg/util/bandwidth:go_default_library", "//pkg/util/dbus:go_default_library", @@ -42,6 +90,36 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/network:go_default_library", + ], "//conditions:default": [], }), ) @@ -49,15 +127,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "kubenet_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/network:go_default_library", diff --git a/pkg/kubelet/network/testing/BUILD b/pkg/kubelet/network/testing/BUILD index 9107cd2297e..2e4525df632 100644 --- a/pkg/kubelet/network/testing/BUILD +++ b/pkg/kubelet/network/testing/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["plugins_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/network/testing", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/pkg/kubelet/pleg/BUILD b/pkg/kubelet/pleg/BUILD index 122fd02436d..2033ea4be74 100644 --- a/pkg/kubelet/pleg/BUILD +++ b/pkg/kubelet/pleg/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["generic_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/pleg", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/pkg/kubelet/pod/BUILD b/pkg/kubelet/pod/BUILD index 2287b7cf9e9..aa3f1feecb1 100644 --- a/pkg/kubelet/pod/BUILD +++ b/pkg/kubelet/pod/BUILD @@ -34,8 +34,8 @@ go_test( "mirror_client_test.go", "pod_manager_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/pod", - library = ":go_default_library", deps = [ "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index 736a457de10..7c0b90843c3 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -44,8 +44,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["preemption_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/preemption", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/kubelet/types:go_default_library", diff --git a/pkg/kubelet/prober/BUILD b/pkg/kubelet/prober/BUILD index ef21ca9173f..4e0d70e290a 100644 --- a/pkg/kubelet/prober/BUILD +++ b/pkg/kubelet/prober/BUILD @@ -45,8 +45,8 @@ go_test( "prober_test.go", "worker_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/prober", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/pkg/kubelet/prober/results/BUILD b/pkg/kubelet/prober/results/BUILD index 2e73fb74483..7aaf9701dc9 100644 --- a/pkg/kubelet/prober/results/BUILD +++ b/pkg/kubelet/prober/results/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["results_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/qos/BUILD b/pkg/kubelet/qos/BUILD index 6160d7b4137..30fd70d5a2b 100644 --- a/pkg/kubelet/qos/BUILD +++ b/pkg/kubelet/qos/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["policy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/qos", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/kubelet/remote/BUILD b/pkg/kubelet/remote/BUILD index cee27f87387..ad83fa72b9c 100644 --- a/pkg/kubelet/remote/BUILD +++ b/pkg/kubelet/remote/BUILD @@ -45,8 +45,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["remote_runtime_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/remote", - library = ":go_default_library", tags = ["automanaged"], deps = [ "//pkg/kubelet/apis/cri:go_default_library", diff --git a/pkg/kubelet/remote/fake/BUILD b/pkg/kubelet/remote/fake/BUILD index 265aced9de7..909c88edb2b 100644 --- a/pkg/kubelet/remote/fake/BUILD +++ b/pkg/kubelet/remote/fake/BUILD @@ -11,11 +11,40 @@ go_library( name = "go_default_library", srcs = [ "doc.go", - "endpoint.go", "fake_image_service.go", "fake_runtime.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "endpoint.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "endpoint_windows.go", ], "//conditions:default": [], diff --git a/pkg/kubelet/rkt/BUILD b/pkg/kubelet/rkt/BUILD index 3b687934fd3..989583b9272 100644 --- a/pkg/kubelet/rkt/BUILD +++ b/pkg/kubelet/rkt/BUILD @@ -66,8 +66,8 @@ go_test( "fake_rkt_interface_test.go", "rkt_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/rkt", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/pkg/kubelet/rktshim/BUILD b/pkg/kubelet/rktshim/BUILD index 403d4dfdd89..cf6baa7dcda 100644 --- a/pkg/kubelet/rktshim/BUILD +++ b/pkg/kubelet/rktshim/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["imagestore_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/rktshim", - library = ":go_default_library", deps = ["//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library"], ) diff --git a/pkg/kubelet/secret/BUILD b/pkg/kubelet/secret/BUILD index 26a879d8fa6..6ece4b21f3a 100644 --- a/pkg/kubelet/secret/BUILD +++ b/pkg/kubelet/secret/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["secret_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/secret", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/server/BUILD b/pkg/kubelet/server/BUILD index 7584f70d522..bfc788686a2 100644 --- a/pkg/kubelet/server/BUILD +++ b/pkg/kubelet/server/BUILD @@ -56,8 +56,8 @@ go_test( "server_test.go", "server_websocket_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/server", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/pkg/kubelet/server/portforward/BUILD b/pkg/kubelet/server/portforward/BUILD index 190ff72197f..b2ceea570ee 100644 --- a/pkg/kubelet/server/portforward/BUILD +++ b/pkg/kubelet/server/portforward/BUILD @@ -33,8 +33,8 @@ go_test( "httpstream_test.go", "websocket_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/server/portforward", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", diff --git a/pkg/kubelet/server/stats/BUILD b/pkg/kubelet/server/stats/BUILD index 00140b342bf..d6d400af483 100644 --- a/pkg/kubelet/server/stats/BUILD +++ b/pkg/kubelet/server/stats/BUILD @@ -35,8 +35,8 @@ go_test( "summary_test.go", "volume_stat_calculator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/server/stats", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", diff --git a/pkg/kubelet/server/streaming/BUILD b/pkg/kubelet/server/streaming/BUILD index 70fe31d096a..1f0e2949ec9 100644 --- a/pkg/kubelet/server/streaming/BUILD +++ b/pkg/kubelet/server/streaming/BUILD @@ -34,8 +34,8 @@ go_test( "request_cache_test.go", "server_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/server/streaming", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", diff --git a/pkg/kubelet/stats/BUILD b/pkg/kubelet/stats/BUILD index 07732d9ad09..e272ed4f00c 100644 --- a/pkg/kubelet/stats/BUILD +++ b/pkg/kubelet/stats/BUILD @@ -54,8 +54,8 @@ go_test( "helper_test.go", "stats_provider_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/stats", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/testing:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", diff --git a/pkg/kubelet/status/BUILD b/pkg/kubelet/status/BUILD index 0483a6a3b6a..99f5a3ad36c 100644 --- a/pkg/kubelet/status/BUILD +++ b/pkg/kubelet/status/BUILD @@ -37,8 +37,8 @@ go_test( "generate_test.go", "status_manager_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/status", - library = ":go_default_library", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/kubelet/sysctl/BUILD b/pkg/kubelet/sysctl/BUILD index 7d7cc778dc7..d36e7675b50 100644 --- a/pkg/kubelet/sysctl/BUILD +++ b/pkg/kubelet/sysctl/BUILD @@ -30,8 +30,8 @@ go_test( "namespace_test.go", "whitelist_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/sysctl", - library = ":go_default_library", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/pkg/kubelet/types/BUILD b/pkg/kubelet/types/BUILD index 515d1e8a096..bebe62b0937 100644 --- a/pkg/kubelet/types/BUILD +++ b/pkg/kubelet/types/BUILD @@ -31,8 +31,8 @@ go_test( "pod_update_test.go", "types_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/types", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/pkg/kubelet/util/BUILD b/pkg/kubelet/util/BUILD index ad092614a9b..0d67713211d 100644 --- a/pkg/kubelet/util/BUILD +++ b/pkg/kubelet/util/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util", - library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) @@ -19,15 +19,38 @@ go_library( srcs = [ "doc.go", "util.go", - "util_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "util_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ "util_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ + "util_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "util_windows.go", ], "//conditions:default": [], @@ -36,11 +59,15 @@ go_library( deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/pkg/kubelet/util/cache/BUILD b/pkg/kubelet/util/cache/BUILD index 47910609800..5fc8634d1f9 100644 --- a/pkg/kubelet/util/cache/BUILD +++ b/pkg/kubelet/util/cache/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["object_cache_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util/cache", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/pkg/kubelet/util/format/BUILD b/pkg/kubelet/util/format/BUILD index 887330b96aa..7c008a863cd 100644 --- a/pkg/kubelet/util/format/BUILD +++ b/pkg/kubelet/util/format/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["resources_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util/format", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/kubelet/util/queue/BUILD b/pkg/kubelet/util/queue/BUILD index 0b43effc87a..b241e4687b7 100644 --- a/pkg/kubelet/util/queue/BUILD +++ b/pkg/kubelet/util/queue/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["work_queue_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util/queue", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/pkg/kubelet/util/sliceutils/BUILD b/pkg/kubelet/util/sliceutils/BUILD index 4134706c80e..04c56005ca8 100644 --- a/pkg/kubelet/util/sliceutils/BUILD +++ b/pkg/kubelet/util/sliceutils/BUILD @@ -32,8 +32,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["sliceutils_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/util/store/BUILD b/pkg/kubelet/util/store/BUILD index 66640ca1975..f056a5ae692 100644 --- a/pkg/kubelet/util/store/BUILD +++ b/pkg/kubelet/util/store/BUILD @@ -18,8 +18,8 @@ go_test( "filestore_test.go", "store_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/util/store", - library = ":go_default_library", deps = [ "//pkg/util/filesystem:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/volumemanager/BUILD b/pkg/kubelet/volumemanager/BUILD index 9139cae32c3..5393d2becfc 100644 --- a/pkg/kubelet/volumemanager/BUILD +++ b/pkg/kubelet/volumemanager/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["volume_manager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager", - library = ":go_default_library", deps = [ "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/configmap:go_default_library", diff --git a/pkg/kubelet/volumemanager/cache/BUILD b/pkg/kubelet/volumemanager/cache/BUILD index cf5e351aa3b..dc2899147a5 100644 --- a/pkg/kubelet/volumemanager/cache/BUILD +++ b/pkg/kubelet/volumemanager/cache/BUILD @@ -30,8 +30,8 @@ go_test( "actual_state_of_world_test.go", "desired_state_of_world_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index f8938c1ce3d..78639a6f749 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -47,8 +47,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["desired_state_of_world_populator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - library = ":go_default_library", deps = [ "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index 6fca7c17d82..23b243dd796 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -36,8 +36,8 @@ go_library( go_test( name = "go_default_test", srcs = ["reconciler_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - library = ":go_default_library", deps = [ "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/kubelet/winstats/BUILD b/pkg/kubelet/winstats/BUILD index 049fae2385f..34211f896b0 100644 --- a/pkg/kubelet/winstats/BUILD +++ b/pkg/kubelet/winstats/BUILD @@ -22,7 +22,7 @@ go_library( srcs = [ "winstats.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "perfcounter_nodestats.go", "perfcounters.go", "version.go", @@ -34,7 +34,7 @@ go_library( "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/JeffAshton/win_pdh:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", @@ -47,8 +47,8 @@ go_library( go_test( name = "go_default_test", srcs = ["winstats_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/kubelet/winstats", - library = ":go_default_library", deps = [ "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", diff --git a/pkg/master/BUILD b/pkg/master/BUILD index bcb36660bba..71380163d2a 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -119,9 +119,9 @@ go_test( "master_openapi_test.go", "master_test.go", ], + embed = [":go_default_library"], features = ["-race"], importpath = "k8s.io/kubernetes/pkg/master", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/master/controller/crdregistration/BUILD b/pkg/master/controller/crdregistration/BUILD index b902953bb81..ad9b5c2ea59 100644 --- a/pkg/master/controller/crdregistration/BUILD +++ b/pkg/master/controller/crdregistration/BUILD @@ -43,8 +43,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["crdregistration_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/master/controller/crdregistration", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", diff --git a/pkg/master/reconcilers/BUILD b/pkg/master/reconcilers/BUILD index 3fbb514b0e6..95f7d7ec631 100644 --- a/pkg/master/reconcilers/BUILD +++ b/pkg/master/reconcilers/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["lease_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/master/reconcilers", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/master/tunneler/BUILD b/pkg/master/tunneler/BUILD index dcaa51b8ea7..6e817fe31c3 100644 --- a/pkg/master/tunneler/BUILD +++ b/pkg/master/tunneler/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["ssh_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/master/tunneler", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/pkg/printers/BUILD b/pkg/printers/BUILD index c59f547fc22..f4b90a98054 100644 --- a/pkg/printers/BUILD +++ b/pkg/printers/BUILD @@ -70,8 +70,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["humanreadable_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/printers", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index 522478a731e..eb2e66920a1 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -13,8 +13,8 @@ go_test( "printers_test.go", "sorted_resource_name_list_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/printers/internalversion", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/probe/exec/BUILD b/pkg/probe/exec/BUILD index 7d69fa359ec..223945cf080 100644 --- a/pkg/probe/exec/BUILD +++ b/pkg/probe/exec/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["exec_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/probe/exec", - library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/pkg/probe/http/BUILD b/pkg/probe/http/BUILD index 7ef26023546..a86e7f981e0 100644 --- a/pkg/probe/http/BUILD +++ b/pkg/probe/http/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["http_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/probe/http", - library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/pkg/probe/tcp/BUILD b/pkg/probe/tcp/BUILD index 5238534016f..6ff7817654d 100644 --- a/pkg/probe/tcp/BUILD +++ b/pkg/probe/tcp/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["tcp_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/probe/tcp", - library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/pkg/proxy/apis/kubeproxyconfig/validation/BUILD b/pkg/proxy/apis/kubeproxyconfig/validation/BUILD index 9737c96624a..319f8034534 100644 --- a/pkg/proxy/apis/kubeproxyconfig/validation/BUILD +++ b/pkg/proxy/apis/kubeproxyconfig/validation/BUILD @@ -35,8 +35,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", - library = ":go_default_library", deps = [ "//pkg/proxy/apis/kubeproxyconfig:go_default_library", "//pkg/util/pointer:go_default_library", diff --git a/pkg/proxy/config/BUILD b/pkg/proxy/config/BUILD index a3f59aa45b2..4585bccddad 100644 --- a/pkg/proxy/config/BUILD +++ b/pkg/proxy/config/BUILD @@ -30,8 +30,8 @@ go_test( "api_test.go", "config_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/config", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/pkg/proxy/healthcheck/BUILD b/pkg/proxy/healthcheck/BUILD index a909cef8b54..badc9fdb7fd 100644 --- a/pkg/proxy/healthcheck/BUILD +++ b/pkg/proxy/healthcheck/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["healthcheck_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck", - library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/proxy/iptables/BUILD b/pkg/proxy/iptables/BUILD index cd7e76524e6..64e1dc78a5c 100644 --- a/pkg/proxy/iptables/BUILD +++ b/pkg/proxy/iptables/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["proxier_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/iptables", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 520ffee5178..01b3fd4d6bc 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -12,8 +12,8 @@ go_test( "ipset_test.go", "proxier_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/ipvs", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", @@ -40,12 +40,41 @@ go_library( srcs = [ "ipset.go", "netlink.go", - "netlink_unsupported.go", "proxier.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "netlink_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "netlink_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "netlink_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/proxy/ipvs", @@ -73,7 +102,7 @@ go_library( "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/vishvananda/netlink:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/pkg/proxy/ipvs/testing/BUILD b/pkg/proxy/ipvs/testing/BUILD index 90c05cb0c11..0e5b33f4b8b 100644 --- a/pkg/proxy/ipvs/testing/BUILD +++ b/pkg/proxy/ipvs/testing/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["fake_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/ipvs/testing", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) diff --git a/pkg/proxy/userspace/BUILD b/pkg/proxy/userspace/BUILD index f533d0daa20..dc03a2daa02 100644 --- a/pkg/proxy/userspace/BUILD +++ b/pkg/proxy/userspace/BUILD @@ -13,11 +13,40 @@ go_library( "port_allocator.go", "proxier.go", "proxysocket.go", - "rlimit.go", "roundrobin.go", "udp_server.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "rlimit.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "rlimit_windows.go", ], "//conditions:default": [], @@ -31,7 +60,6 @@ go_library( "//pkg/util/iptables:go_default_library", "//pkg/util/slice:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -39,7 +67,39 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) go_test( @@ -49,8 +109,8 @@ go_test( "proxier_test.go", "roundrobin_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/userspace", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", diff --git a/pkg/proxy/util/BUILD b/pkg/proxy/util/BUILD index 2956b521269..5d92739ec2d 100644 --- a/pkg/proxy/util/BUILD +++ b/pkg/proxy/util/BUILD @@ -27,8 +27,8 @@ go_test( "port_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/util", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/proxy/winkernel/BUILD b/pkg/proxy/winkernel/BUILD index 8fd2b1f1509..427371d9331 100644 --- a/pkg/proxy/winkernel/BUILD +++ b/pkg/proxy/winkernel/BUILD @@ -5,7 +5,7 @@ go_library( srcs = [ "metrics.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "proxier.go", ], "//conditions:default": [], @@ -15,7 +15,7 @@ go_library( deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/pkg/proxy/winuserspace/BUILD b/pkg/proxy/winuserspace/BUILD index cd583ef2695..94c05e18749 100644 --- a/pkg/proxy/winuserspace/BUILD +++ b/pkg/proxy/winuserspace/BUILD @@ -39,8 +39,8 @@ go_test( "proxier_test.go", "roundrobin_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", diff --git a/pkg/quota/BUILD b/pkg/quota/BUILD index 7d5cd09601d..0ae4478e655 100644 --- a/pkg/quota/BUILD +++ b/pkg/quota/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["resources_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/quota", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/quota/evaluator/core/BUILD b/pkg/quota/evaluator/core/BUILD index 85d50d0855a..40a50ed5f64 100644 --- a/pkg/quota/evaluator/core/BUILD +++ b/pkg/quota/evaluator/core/BUILD @@ -48,8 +48,8 @@ go_test( "pods_test.go", "services_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/quota/evaluator/core", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/quota:go_default_library", diff --git a/pkg/registry/apps/controllerrevision/BUILD b/pkg/registry/apps/controllerrevision/BUILD index 24e6e13da2c..3d9b303ec03 100644 --- a/pkg/registry/apps/controllerrevision/BUILD +++ b/pkg/registry/apps/controllerrevision/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", - library = ":go_default_library", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/apps/controllerrevision/storage/BUILD b/pkg/registry/apps/controllerrevision/storage/BUILD index 083eb3f06d1..4e79bf9fec2 100644 --- a/pkg/registry/apps/controllerrevision/storage/BUILD +++ b/pkg/registry/apps/controllerrevision/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", - library = ":go_default_library", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/apps/statefulset/BUILD b/pkg/registry/apps/statefulset/BUILD index deb73b96dc6..6b0b73a9c8b 100644 --- a/pkg/registry/apps/statefulset/BUILD +++ b/pkg/registry/apps/statefulset/BUILD @@ -38,8 +38,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset", - library = ":go_default_library", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/apps/statefulset/storage/BUILD b/pkg/registry/apps/statefulset/storage/BUILD index 6bbc1eee204..da6e4a68788 100644 --- a/pkg/registry/apps/statefulset/storage/BUILD +++ b/pkg/registry/apps/statefulset/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", - library = ":go_default_library", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", diff --git a/pkg/registry/authorization/subjectaccessreview/BUILD b/pkg/registry/authorization/subjectaccessreview/BUILD index fb4474b4398..569a8065188 100644 --- a/pkg/registry/authorization/subjectaccessreview/BUILD +++ b/pkg/registry/authorization/subjectaccessreview/BUILD @@ -38,8 +38,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["rest_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", - library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/pkg/registry/authorization/util/BUILD b/pkg/registry/authorization/util/BUILD index 0d1fc9be2de..285ea9dd9b3 100644 --- a/pkg/registry/authorization/util/BUILD +++ b/pkg/registry/authorization/util/BUILD @@ -33,8 +33,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/authorization/util", - library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD index 209cfeeea2e..6883f986284 100644 --- a/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD +++ b/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/batch/cronjob/BUILD b/pkg/registry/batch/cronjob/BUILD index 4b037adf2d7..794a014ff95 100644 --- a/pkg/registry/batch/cronjob/BUILD +++ b/pkg/registry/batch/cronjob/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob", - library = ":go_default_library", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/batch/cronjob/storage/BUILD b/pkg/registry/batch/cronjob/storage/BUILD index c5c5c535c7f..0705136a135 100644 --- a/pkg/registry/batch/cronjob/storage/BUILD +++ b/pkg/registry/batch/cronjob/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/batch:go_default_library", diff --git a/pkg/registry/batch/job/BUILD b/pkg/registry/batch/job/BUILD index d5d3f23ffb3..c440213b87b 100644 --- a/pkg/registry/batch/job/BUILD +++ b/pkg/registry/batch/job/BUILD @@ -34,8 +34,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/batch/job", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", diff --git a/pkg/registry/batch/job/storage/BUILD b/pkg/registry/batch/job/storage/BUILD index 40d02ffecd8..393ecfa3d67 100644 --- a/pkg/registry/batch/job/storage/BUILD +++ b/pkg/registry/batch/job/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/batch/job/storage", - library = ":go_default_library", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/certificates/certificates/BUILD b/pkg/registry/certificates/certificates/BUILD index 91af9ed73cd..b4800e75e9b 100644 --- a/pkg/registry/certificates/certificates/BUILD +++ b/pkg/registry/certificates/certificates/BUILD @@ -32,8 +32,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/certificates/certificates", - library = ":go_default_library", deps = [ "//pkg/apis/certificates:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/registry/core/componentstatus/BUILD b/pkg/registry/core/componentstatus/BUILD index 48e5f6c73bf..640e3b9483e 100644 --- a/pkg/registry/core/componentstatus/BUILD +++ b/pkg/registry/core/componentstatus/BUILD @@ -33,8 +33,8 @@ go_test( "rest_test.go", "validator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/componentstatus", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/probe:go_default_library", diff --git a/pkg/registry/core/configmap/BUILD b/pkg/registry/core/configmap/BUILD index 7af8141a14a..01985ff76c4 100644 --- a/pkg/registry/core/configmap/BUILD +++ b/pkg/registry/core/configmap/BUILD @@ -32,8 +32,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/configmap", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/core/configmap/storage/BUILD b/pkg/registry/core/configmap/storage/BUILD index 948fa36104a..966388a5d23 100644 --- a/pkg/registry/core/configmap/storage/BUILD +++ b/pkg/registry/core/configmap/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/configmap/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/endpoint/storage/BUILD b/pkg/registry/core/endpoint/storage/BUILD index 93b2770d254..d4a4b4fcbc5 100644 --- a/pkg/registry/core/endpoint/storage/BUILD +++ b/pkg/registry/core/endpoint/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/event/BUILD b/pkg/registry/core/event/BUILD index 8bdfa1d2d71..9e8b3ea38d6 100644 --- a/pkg/registry/core/event/BUILD +++ b/pkg/registry/core/event/BUILD @@ -32,8 +32,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/event", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/event/storage/BUILD b/pkg/registry/core/event/storage/BUILD index efb61b4676f..990e1cfd2a8 100644 --- a/pkg/registry/core/event/storage/BUILD +++ b/pkg/registry/core/event/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/event/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/limitrange/storage/BUILD b/pkg/registry/core/limitrange/storage/BUILD index 6de922d9d2a..636d6244678 100644 --- a/pkg/registry/core/limitrange/storage/BUILD +++ b/pkg/registry/core/limitrange/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/namespace/BUILD b/pkg/registry/core/namespace/BUILD index d9c43922006..82f4203eb92 100644 --- a/pkg/registry/core/namespace/BUILD +++ b/pkg/registry/core/namespace/BUILD @@ -36,8 +36,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/namespace", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/namespace/storage/BUILD b/pkg/registry/core/namespace/storage/BUILD index 51a01d93ca8..1a3c6b2641c 100644 --- a/pkg/registry/core/namespace/storage/BUILD +++ b/pkg/registry/core/namespace/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/namespace/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/node/BUILD b/pkg/registry/core/node/BUILD index ad3fb4995df..d82f0c41a47 100644 --- a/pkg/registry/core/node/BUILD +++ b/pkg/registry/core/node/BUILD @@ -42,8 +42,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/node", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/node/storage/BUILD b/pkg/registry/core/node/storage/BUILD index 1cb9081d261..d444a05f464 100644 --- a/pkg/registry/core/node/storage/BUILD +++ b/pkg/registry/core/node/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/node/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/kubelet/client:go_default_library", diff --git a/pkg/registry/core/persistentvolume/BUILD b/pkg/registry/core/persistentvolume/BUILD index b98e547e596..6f43c442fac 100644 --- a/pkg/registry/core/persistentvolume/BUILD +++ b/pkg/registry/core/persistentvolume/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/persistentvolume/storage/BUILD b/pkg/registry/core/persistentvolume/storage/BUILD index d8cf0d27fdc..7c2b907ee65 100644 --- a/pkg/registry/core/persistentvolume/storage/BUILD +++ b/pkg/registry/core/persistentvolume/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/persistentvolumeclaim/BUILD b/pkg/registry/core/persistentvolumeclaim/BUILD index 6da3f612b1a..d4eba81e137 100644 --- a/pkg/registry/core/persistentvolumeclaim/BUILD +++ b/pkg/registry/core/persistentvolumeclaim/BUILD @@ -32,8 +32,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/persistentvolumeclaim/storage/BUILD b/pkg/registry/core/persistentvolumeclaim/storage/BUILD index 0ef87b1507c..f8eb094f446 100644 --- a/pkg/registry/core/persistentvolumeclaim/storage/BUILD +++ b/pkg/registry/core/persistentvolumeclaim/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/pod/BUILD b/pkg/registry/core/pod/BUILD index cf35e0ba5bb..e124ee08fcc 100644 --- a/pkg/registry/core/pod/BUILD +++ b/pkg/registry/core/pod/BUILD @@ -42,8 +42,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/pod", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/pod/rest/BUILD b/pkg/registry/core/pod/rest/BUILD index 122911086bc..dcdc7a925b1 100644 --- a/pkg/registry/core/pod/rest/BUILD +++ b/pkg/registry/core/pod/rest/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["log_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/pod/rest", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/pod/storage/BUILD b/pkg/registry/core/pod/storage/BUILD index 8545fae9445..a005dc937f0 100644 --- a/pkg/registry/core/pod/storage/BUILD +++ b/pkg/registry/core/pod/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/pod/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/podtemplate/storage/BUILD b/pkg/registry/core/podtemplate/storage/BUILD index 103296b4a23..5bce6101070 100644 --- a/pkg/registry/core/podtemplate/storage/BUILD +++ b/pkg/registry/core/podtemplate/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/replicationcontroller/BUILD b/pkg/registry/core/replicationcontroller/BUILD index 931c1bb2877..2e0a9e0aa7a 100644 --- a/pkg/registry/core/replicationcontroller/BUILD +++ b/pkg/registry/core/replicationcontroller/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/replicationcontroller/storage/BUILD b/pkg/registry/core/replicationcontroller/storage/BUILD index d5177a1ff90..3d75f1f6004 100644 --- a/pkg/registry/core/replicationcontroller/storage/BUILD +++ b/pkg/registry/core/replicationcontroller/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/core/resourcequota/BUILD b/pkg/registry/core/resourcequota/BUILD index 77150f2f29b..b4b79d00985 100644 --- a/pkg/registry/core/resourcequota/BUILD +++ b/pkg/registry/core/resourcequota/BUILD @@ -27,8 +27,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/registry/core/resourcequota/storage/BUILD b/pkg/registry/core/resourcequota/storage/BUILD index 38b63c3b8ab..f4781b7a963 100644 --- a/pkg/registry/core/resourcequota/storage/BUILD +++ b/pkg/registry/core/resourcequota/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/rest/BUILD b/pkg/registry/core/rest/BUILD index 9e81ccda779..cde65909195 100644 --- a/pkg/registry/core/rest/BUILD +++ b/pkg/registry/core/rest/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_core_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/rest", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", diff --git a/pkg/registry/core/secret/BUILD b/pkg/registry/core/secret/BUILD index 48332003ed0..235615911ac 100644 --- a/pkg/registry/core/secret/BUILD +++ b/pkg/registry/core/secret/BUILD @@ -37,8 +37,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/secret", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/pkg/registry/core/secret/storage/BUILD b/pkg/registry/core/secret/storage/BUILD index 8e73a457f1c..b1b981e1096 100644 --- a/pkg/registry/core/secret/storage/BUILD +++ b/pkg/registry/core/secret/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/secret/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/service/BUILD b/pkg/registry/core/service/BUILD index ca6517abf27..590aac0bd3d 100644 --- a/pkg/registry/core/service/BUILD +++ b/pkg/registry/core/service/BUILD @@ -50,8 +50,8 @@ go_test( "rest_test.go", "strategy_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service", - library = ":go_default_library", deps = [ "//pkg/api/service:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/core/service/allocator/BUILD b/pkg/registry/core/service/allocator/BUILD index ef863f8c3a2..ff4bab7d02b 100644 --- a/pkg/registry/core/service/allocator/BUILD +++ b/pkg/registry/core/service/allocator/BUILD @@ -22,8 +22,8 @@ go_test( "bitmap_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) diff --git a/pkg/registry/core/service/allocator/storage/BUILD b/pkg/registry/core/service/allocator/storage/BUILD index 18a175e63bb..55ad4895e2c 100644 --- a/pkg/registry/core/service/allocator/storage/BUILD +++ b/pkg/registry/core/service/allocator/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", diff --git a/pkg/registry/core/service/ipallocator/BUILD b/pkg/registry/core/service/ipallocator/BUILD index 17b5d492552..eef62fec40b 100644 --- a/pkg/registry/core/service/ipallocator/BUILD +++ b/pkg/registry/core/service/ipallocator/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["allocator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/registry/core/service/ipallocator/controller/BUILD b/pkg/registry/core/service/ipallocator/controller/BUILD index 43498a212ec..9962ffb437e 100644 --- a/pkg/registry/core/service/ipallocator/controller/BUILD +++ b/pkg/registry/core/service/ipallocator/controller/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["repair_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/pkg/registry/core/service/ipallocator/storage/BUILD b/pkg/registry/core/service/ipallocator/storage/BUILD index e836d56a911..76902d0e553 100644 --- a/pkg/registry/core/service/ipallocator/storage/BUILD +++ b/pkg/registry/core/service/ipallocator/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", diff --git a/pkg/registry/core/service/portallocator/BUILD b/pkg/registry/core/service/portallocator/BUILD index 4d038b33add..e51791669ae 100644 --- a/pkg/registry/core/service/portallocator/BUILD +++ b/pkg/registry/core/service/portallocator/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["allocator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", diff --git a/pkg/registry/core/service/portallocator/controller/BUILD b/pkg/registry/core/service/portallocator/controller/BUILD index 3483044bcd7..39c66aefb7a 100644 --- a/pkg/registry/core/service/portallocator/controller/BUILD +++ b/pkg/registry/core/service/portallocator/controller/BUILD @@ -31,8 +31,8 @@ go_library( go_test( name = "go_default_test", srcs = ["repair_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/pkg/registry/core/service/storage/BUILD b/pkg/registry/core/service/storage/BUILD index 49826b6330f..97af1d4f0be 100644 --- a/pkg/registry/core/service/storage/BUILD +++ b/pkg/registry/core/service/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/service/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/core/serviceaccount/storage/BUILD b/pkg/registry/core/serviceaccount/storage/BUILD index 441485c32ef..002a862e8f7 100644 --- a/pkg/registry/core/serviceaccount/storage/BUILD +++ b/pkg/registry/core/serviceaccount/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/extensions/controller/storage/BUILD b/pkg/registry/extensions/controller/storage/BUILD index 8f4cf02eec6..2f40b995f78 100644 --- a/pkg/registry/extensions/controller/storage/BUILD +++ b/pkg/registry/extensions/controller/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/extensions/daemonset/BUILD b/pkg/registry/extensions/daemonset/BUILD index 76dcae9bd98..9d2773c6eb3 100644 --- a/pkg/registry/extensions/daemonset/BUILD +++ b/pkg/registry/extensions/daemonset/BUILD @@ -34,8 +34,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/daemonset/storage/BUILD b/pkg/registry/extensions/daemonset/storage/BUILD index b1dd9b2ceb8..daeb8305f34 100644 --- a/pkg/registry/extensions/daemonset/storage/BUILD +++ b/pkg/registry/extensions/daemonset/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/deployment/BUILD b/pkg/registry/extensions/deployment/BUILD index 850dda20a54..8d365a15109 100644 --- a/pkg/registry/extensions/deployment/BUILD +++ b/pkg/registry/extensions/deployment/BUILD @@ -38,8 +38,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/deployment/storage/BUILD b/pkg/registry/extensions/deployment/storage/BUILD index 81b341d8208..3663653fc26 100644 --- a/pkg/registry/extensions/deployment/storage/BUILD +++ b/pkg/registry/extensions/deployment/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/extensions/ingress/BUILD b/pkg/registry/extensions/ingress/BUILD index 277a12e4a8a..d93aabfddb0 100644 --- a/pkg/registry/extensions/ingress/BUILD +++ b/pkg/registry/extensions/ingress/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/ingress/storage/BUILD b/pkg/registry/extensions/ingress/storage/BUILD index cc12d254a9f..78b07c4abb1 100644 --- a/pkg/registry/extensions/ingress/storage/BUILD +++ b/pkg/registry/extensions/ingress/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/podsecuritypolicy/storage/BUILD b/pkg/registry/extensions/podsecuritypolicy/storage/BUILD index b57880b72fa..fb7ecda243e 100644 --- a/pkg/registry/extensions/podsecuritypolicy/storage/BUILD +++ b/pkg/registry/extensions/podsecuritypolicy/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", - library = ":go_default_library", deps = [ "//pkg/apis/extensions:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/extensions/replicaset/BUILD b/pkg/registry/extensions/replicaset/BUILD index fd4e7eaa367..5d7c8c4b814 100644 --- a/pkg/registry/extensions/replicaset/BUILD +++ b/pkg/registry/extensions/replicaset/BUILD @@ -42,8 +42,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/registry/extensions/replicaset/storage/BUILD b/pkg/registry/extensions/replicaset/storage/BUILD index 77500a739a8..4c65ff3644a 100644 --- a/pkg/registry/extensions/replicaset/storage/BUILD +++ b/pkg/registry/extensions/replicaset/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", - library = ":go_default_library", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/registry/networking/networkpolicy/storage/BUILD b/pkg/registry/networking/networkpolicy/storage/BUILD index 1570fd609d4..b8d6a19af8f 100644 --- a/pkg/registry/networking/networkpolicy/storage/BUILD +++ b/pkg/registry/networking/networkpolicy/storage/BUILD @@ -36,8 +36,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", diff --git a/pkg/registry/policy/poddisruptionbudget/BUILD b/pkg/registry/policy/poddisruptionbudget/BUILD index e0be96921f5..d7e8e00e6cb 100644 --- a/pkg/registry/policy/poddisruptionbudget/BUILD +++ b/pkg/registry/policy/poddisruptionbudget/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", - library = ":go_default_library", deps = [ "//pkg/apis/policy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/policy/poddisruptionbudget/storage/BUILD b/pkg/registry/policy/poddisruptionbudget/storage/BUILD index a4ce1e3d40d..fc91a4cde03 100644 --- a/pkg/registry/policy/poddisruptionbudget/storage/BUILD +++ b/pkg/registry/policy/poddisruptionbudget/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", - library = ":go_default_library", deps = [ "//pkg/apis/policy:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/rbac/BUILD b/pkg/registry/rbac/BUILD index 10bb6820839..353211bf0da 100644 --- a/pkg/registry/rbac/BUILD +++ b/pkg/registry/rbac/BUILD @@ -50,8 +50,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/rbac", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/pkg/registry/rbac/reconciliation/BUILD b/pkg/registry/rbac/reconciliation/BUILD index 72d6ae1d5b1..f26d72e35de 100644 --- a/pkg/registry/rbac/reconciliation/BUILD +++ b/pkg/registry/rbac/reconciliation/BUILD @@ -12,8 +12,8 @@ go_test( "reconcile_role_test.go", "reconcile_rolebindings_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", - library = ":go_default_library", deps = [ "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", diff --git a/pkg/registry/rbac/validation/BUILD b/pkg/registry/rbac/validation/BUILD index 7618b8ff4ec..b48ca3d71c5 100644 --- a/pkg/registry/rbac/validation/BUILD +++ b/pkg/registry/rbac/validation/BUILD @@ -13,8 +13,8 @@ go_test( "policy_comparator_test.go", "rule_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", - library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/scheduling/priorityclass/BUILD b/pkg/registry/scheduling/priorityclass/BUILD index 0ed675fcd9e..f8b8351970b 100644 --- a/pkg/registry/scheduling/priorityclass/BUILD +++ b/pkg/registry/scheduling/priorityclass/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass", - library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/scheduling/priorityclass/storage/BUILD b/pkg/registry/scheduling/priorityclass/storage/BUILD index cec9fe2e28f..bf7a5ca6347 100644 --- a/pkg/registry/scheduling/priorityclass/storage/BUILD +++ b/pkg/registry/scheduling/priorityclass/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage", - library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", "//pkg/registry/registrytest:go_default_library", diff --git a/pkg/registry/settings/podpreset/storage/BUILD b/pkg/registry/settings/podpreset/storage/BUILD index 0057bb4acac..a53d46c8639 100644 --- a/pkg/registry/settings/podpreset/storage/BUILD +++ b/pkg/registry/settings/podpreset/storage/BUILD @@ -35,8 +35,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", diff --git a/pkg/registry/storage/storageclass/BUILD b/pkg/registry/storage/storageclass/BUILD index d8d1d0a375c..611efaee3ed 100644 --- a/pkg/registry/storage/storageclass/BUILD +++ b/pkg/registry/storage/storageclass/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/pkg/registry/storage/storageclass/storage/BUILD b/pkg/registry/storage/storageclass/storage/BUILD index 1d52ca0fb37..a72f4a80875 100644 --- a/pkg/registry/storage/storageclass/storage/BUILD +++ b/pkg/registry/storage/storageclass/storage/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/pkg/registry/storage/volumeattachment/BUILD b/pkg/registry/storage/volumeattachment/BUILD index 7c014068604..8170812f29d 100644 --- a/pkg/registry/storage/volumeattachment/BUILD +++ b/pkg/registry/storage/volumeattachment/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment", - library = ":go_default_library", deps = [ "//pkg/apis/storage:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/registry/storage/volumeattachment/storage/BUILD b/pkg/registry/storage/volumeattachment/storage/BUILD index 5601437087c..1442fae44e6 100644 --- a/pkg/registry/storage/volumeattachment/storage/BUILD +++ b/pkg/registry/storage/volumeattachment/storage/BUILD @@ -17,8 +17,8 @@ go_library( go_test( name = "go_default_test", srcs = ["storage_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage", - library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/pkg/security/apparmor/BUILD b/pkg/security/apparmor/BUILD index ad686ff2ab2..70877c9fcee 100644 --- a/pkg/security/apparmor/BUILD +++ b/pkg/security/apparmor/BUILD @@ -11,8 +11,39 @@ go_library( srcs = [ "helpers.go", "validate.go", - "validate_disabled.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "validate_disabled.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "validate_disabled.go", + ], + "//conditions:default": [], + }), importpath = "k8s.io/kubernetes/pkg/security/apparmor", deps = [ "//pkg/features:go_default_library", @@ -29,8 +60,8 @@ go_test( data = [ "testdata/profiles", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/apparmor", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/security/podsecuritypolicy/BUILD b/pkg/security/podsecuritypolicy/BUILD index e76448d470d..06e0e0af5fd 100644 --- a/pkg/security/podsecuritypolicy/BUILD +++ b/pkg/security/podsecuritypolicy/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["provider_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", diff --git a/pkg/security/podsecuritypolicy/apparmor/BUILD b/pkg/security/podsecuritypolicy/apparmor/BUILD index d1cf962c8fa..28f2fcb440e 100644 --- a/pkg/security/podsecuritypolicy/apparmor/BUILD +++ b/pkg/security/podsecuritypolicy/apparmor/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/security/apparmor:go_default_library", diff --git a/pkg/security/podsecuritypolicy/capabilities/BUILD b/pkg/security/podsecuritypolicy/capabilities/BUILD index 2d307a808d2..931ae7e3935 100644 --- a/pkg/security/podsecuritypolicy/capabilities/BUILD +++ b/pkg/security/podsecuritypolicy/capabilities/BUILD @@ -25,8 +25,8 @@ go_library( go_test( name = "go_default_test", srcs = ["capabilities_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/security/podsecuritypolicy/group/BUILD b/pkg/security/podsecuritypolicy/group/BUILD index 2a7bbe66e3f..601e7476dee 100644 --- a/pkg/security/podsecuritypolicy/group/BUILD +++ b/pkg/security/podsecuritypolicy/group/BUILD @@ -29,8 +29,8 @@ go_test( "mustrunas_test.go", "runasany_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/security/podsecuritypolicy/seccomp/BUILD b/pkg/security/podsecuritypolicy/seccomp/BUILD index c49f6fc6996..995943e4041 100644 --- a/pkg/security/podsecuritypolicy/seccomp/BUILD +++ b/pkg/security/podsecuritypolicy/seccomp/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/security/podsecuritypolicy/selinux/BUILD b/pkg/security/podsecuritypolicy/selinux/BUILD index 97f5e5292c0..628436d8163 100644 --- a/pkg/security/podsecuritypolicy/selinux/BUILD +++ b/pkg/security/podsecuritypolicy/selinux/BUILD @@ -28,8 +28,8 @@ go_test( "mustrunas_test.go", "runasany_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/security/podsecuritypolicy/sysctl/BUILD b/pkg/security/podsecuritypolicy/sysctl/BUILD index dbd473ec837..63001584a90 100644 --- a/pkg/security/podsecuritypolicy/sysctl/BUILD +++ b/pkg/security/podsecuritypolicy/sysctl/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["mustmatchpatterns_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/pkg/security/podsecuritypolicy/user/BUILD b/pkg/security/podsecuritypolicy/user/BUILD index f4e57284ad0..f6c2e123548 100644 --- a/pkg/security/podsecuritypolicy/user/BUILD +++ b/pkg/security/podsecuritypolicy/user/BUILD @@ -31,8 +31,8 @@ go_test( "nonroot_test.go", "runasany_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/security/podsecuritypolicy/util/BUILD b/pkg/security/podsecuritypolicy/util/BUILD index 118784e9f0f..5515c016352 100644 --- a/pkg/security/podsecuritypolicy/util/BUILD +++ b/pkg/security/podsecuritypolicy/util/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/securitycontext/BUILD b/pkg/securitycontext/BUILD index fa4eb424a0e..50d0e5eda41 100644 --- a/pkg/securitycontext/BUILD +++ b/pkg/securitycontext/BUILD @@ -27,8 +27,8 @@ go_test( "accessors_test.go", "util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/securitycontext", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/ssh/BUILD b/pkg/ssh/BUILD index 28794b702c4..d1b42b6b4f5 100644 --- a/pkg/ssh/BUILD +++ b/pkg/ssh/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["ssh_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/ssh", - library = ":go_default_library", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", diff --git a/pkg/util/async/BUILD b/pkg/util/async/BUILD index a00d4843d7b..5cb7bfa0722 100644 --- a/pkg/util/async/BUILD +++ b/pkg/util/async/BUILD @@ -25,8 +25,8 @@ go_test( "bounded_frequency_runner_test.go", "runner_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/async", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/bandwidth/BUILD b/pkg/util/bandwidth/BUILD index f440a61cef4..af79e9d87f4 100644 --- a/pkg/util/bandwidth/BUILD +++ b/pkg/util/bandwidth/BUILD @@ -12,19 +12,48 @@ go_library( "doc.go", "fake_shaper.go", "interfaces.go", - "unsupported.go", "utils.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/bandwidth", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -38,19 +67,19 @@ go_test( srcs = [ "utils_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/bandwidth", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/pkg/util/config/BUILD b/pkg/util/config/BUILD index 68bf1ccc107..6567106eea0 100644 --- a/pkg/util/config/BUILD +++ b/pkg/util/config/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["config_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/config", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/configz/BUILD b/pkg/util/configz/BUILD index 2fbc52d4c92..cf5a458b9fd 100644 --- a/pkg/util/configz/BUILD +++ b/pkg/util/configz/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["configz_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/configz", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/dbus/BUILD b/pkg/util/dbus/BUILD index d596d30649f..e0079ff6a64 100644 --- a/pkg/util/dbus/BUILD +++ b/pkg/util/dbus/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["dbus_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/dbus", - library = ":go_default_library", deps = ["//vendor/github.com/godbus/dbus:go_default_library"], ) diff --git a/pkg/util/ebtables/BUILD b/pkg/util/ebtables/BUILD index 6a9f7f0a71e..68fb9b3ad52 100644 --- a/pkg/util/ebtables/BUILD +++ b/pkg/util/ebtables/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ebtables_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ebtables", - library = ":go_default_library", deps = [ "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", diff --git a/pkg/util/env/BUILD b/pkg/util/env/BUILD index 3e864094f69..d1d623aaf1c 100644 --- a/pkg/util/env/BUILD +++ b/pkg/util/env/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["env_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/env", - library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) diff --git a/pkg/util/file/BUILD b/pkg/util/file/BUILD index 325d1da146e..059895a66b4 100644 --- a/pkg/util/file/BUILD +++ b/pkg/util/file/BUILD @@ -28,8 +28,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["file_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/file", - library = ":go_default_library", deps = [ "//vendor/github.com/spf13/afero:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/util/flock/BUILD b/pkg/util/flock/BUILD index ef9212a2075..040ed5d0c97 100644 --- a/pkg/util/flock/BUILD +++ b/pkg/util/flock/BUILD @@ -7,23 +7,60 @@ load( go_library( name = "go_default_library", - srcs = [ - "flock_other.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "flock_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "flock_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "flock_unix.go", ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "flock_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "flock_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "flock_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "flock_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "flock_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "flock_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "flock_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "flock_other.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/flock", deps = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/goroutinemap/BUILD b/pkg/util/goroutinemap/BUILD index fe6895f0a6c..1085fe19d99 100644 --- a/pkg/util/goroutinemap/BUILD +++ b/pkg/util/goroutinemap/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["goroutinemap_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/goroutinemap", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library"], ) diff --git a/pkg/util/hash/BUILD b/pkg/util/hash/BUILD index bafca6ba8bc..f719b40347f 100644 --- a/pkg/util/hash/BUILD +++ b/pkg/util/hash/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["hash_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/hash", - library = ":go_default_library", deps = ["//vendor/github.com/davecgh/go-spew/spew:go_default_library"], ) diff --git a/pkg/util/ipconfig/BUILD b/pkg/util/ipconfig/BUILD index f452283eab7..af4bf61b044 100644 --- a/pkg/util/ipconfig/BUILD +++ b/pkg/util/ipconfig/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ipconfig_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ipconfig", - library = ":go_default_library", deps = ["//vendor/k8s.io/utils/exec:go_default_library"], ) diff --git a/pkg/util/ipset/BUILD b/pkg/util/ipset/BUILD index 20f172c010e..9baf26f5809 100644 --- a/pkg/util/ipset/BUILD +++ b/pkg/util/ipset/BUILD @@ -14,8 +14,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ipset_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ipset", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/pkg/util/ipset/testing/BUILD b/pkg/util/ipset/testing/BUILD index 001a6887b23..43a42038c9b 100644 --- a/pkg/util/ipset/testing/BUILD +++ b/pkg/util/ipset/testing/BUILD @@ -28,8 +28,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["fake_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ipset/testing", - library = ":go_default_library", deps = [ "//pkg/util/ipset:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/util/iptables/BUILD b/pkg/util/iptables/BUILD index 0f3dbcef4bc..feb59d5c9fb 100644 --- a/pkg/util/iptables/BUILD +++ b/pkg/util/iptables/BUILD @@ -11,12 +11,41 @@ go_library( srcs = [ "doc.go", "iptables.go", - "iptables_unsupported.go", "save_restore.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "iptables_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "iptables_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "iptables_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/iptables", @@ -28,7 +57,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -40,15 +69,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "iptables_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/iptables", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/dbus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/pkg/util/ipvs/BUILD b/pkg/util/ipvs/BUILD index 21c27d219c7..252abb61296 100644 --- a/pkg/util/ipvs/BUILD +++ b/pkg/util/ipvs/BUILD @@ -11,15 +11,15 @@ go_test( srcs = [ "ipvs_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "ipvs_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ipvs", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", ], "//conditions:default": [], @@ -30,20 +30,78 @@ go_library( name = "go_default_library", srcs = [ "ipvs.go", - "ipvs_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "ipvs_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "ipvs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "ipvs_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/ipvs", - deps = [ - "//vendor/k8s.io/utils/exec:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/utils/exec:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/ipvs/testing/BUILD b/pkg/util/ipvs/testing/BUILD index cb64dd03d74..8d1dae2ca5d 100644 --- a/pkg/util/ipvs/testing/BUILD +++ b/pkg/util/ipvs/testing/BUILD @@ -29,7 +29,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["fake_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/ipvs/testing", - library = ":go_default_library", deps = ["//pkg/util/ipvs:go_default_library"], ) diff --git a/pkg/util/keymutex/BUILD b/pkg/util/keymutex/BUILD index c954729108d..1f0d644afad 100644 --- a/pkg/util/keymutex/BUILD +++ b/pkg/util/keymutex/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["keymutex_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/keymutex", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/labels/BUILD b/pkg/util/labels/BUILD index 96f42ae579d..1438ee12451 100644 --- a/pkg/util/labels/BUILD +++ b/pkg/util/labels/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["labels_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/labels", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/pkg/util/limitwriter/BUILD b/pkg/util/limitwriter/BUILD index e04f353a41b..e29c8910078 100644 --- a/pkg/util/limitwriter/BUILD +++ b/pkg/util/limitwriter/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["limitwriter_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/limitwriter", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/metrics/BUILD b/pkg/util/metrics/BUILD index d0629b2b359..f14811992a6 100644 --- a/pkg/util/metrics/BUILD +++ b/pkg/util/metrics/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/metrics", - library = ":go_default_library", deps = [ "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index af94fd0380e..4cc9f1c27d5 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -11,19 +11,63 @@ go_library( srcs = [ "doc.go", "exec.go", - "exec_mount_unsupported.go", "fake.go", "mount.go", - "mount_unsupported.go", - "nsenter_mount_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "exec_mount.go", "mount_linux.go", "nsenter_mount.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "exec_mount_unsupported.go", + "mount_unsupported.go", + "nsenter_mount_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "exec_mount_unsupported.go", "mount_windows.go", + "nsenter_mount_unsupported.go", ], "//conditions:default": [], }), @@ -32,7 +76,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/io:go_default_library", "//pkg/util/nsenter:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", @@ -47,19 +91,21 @@ go_test( srcs = [ "safe_format_and_mount_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "exec_mount_test.go", "mount_linux_test.go", "nsenter_mount_test.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "mount_windows_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/mount", - library = ":go_default_library", - deps = ["//vendor/k8s.io/utils/exec/testing:go_default_library"], + deps = [ + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], ) filegroup( diff --git a/pkg/util/net/sets/BUILD b/pkg/util/net/sets/BUILD index 413f5e39053..035282d5c13 100644 --- a/pkg/util/net/sets/BUILD +++ b/pkg/util/net/sets/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ipnet_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/net/sets", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/netsh/BUILD b/pkg/util/netsh/BUILD index 88c4ef7de78..2781e51d3cf 100644 --- a/pkg/util/netsh/BUILD +++ b/pkg/util/netsh/BUILD @@ -38,8 +38,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["netsh_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/netsh", - library = ":go_default_library", deps = [ "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/util/node/BUILD b/pkg/util/node/BUILD index 6ce01d7e6fb..5188b9e6f80 100644 --- a/pkg/util/node/BUILD +++ b/pkg/util/node/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["node_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/node", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/util/nsenter/BUILD b/pkg/util/nsenter/BUILD index b3f2d2b031d..988fef01b59 100644 --- a/pkg/util/nsenter/BUILD +++ b/pkg/util/nsenter/BUILD @@ -2,21 +2,78 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "nsenter_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "nsenter.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "nsenter_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "nsenter_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/nsenter", visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/utils/exec:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/utils/exec:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/oom/BUILD b/pkg/util/oom/BUILD index 9e027c7f482..127a3bdb4c9 100644 --- a/pkg/util/oom/BUILD +++ b/pkg/util/oom/BUILD @@ -12,16 +12,45 @@ go_library( "doc.go", "oom.go", "oom_fake.go", - "oom_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "oom_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "oom_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "oom_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/oom", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], @@ -32,15 +61,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "oom_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/oom", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/parsers/BUILD b/pkg/util/parsers/BUILD index 7070f4b5f89..a60c591351e 100644 --- a/pkg/util/parsers/BUILD +++ b/pkg/util/parsers/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["parsers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/parsers", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/pointer/BUILD b/pkg/util/pointer/BUILD index 57cde0c2f07..615acda2dac 100644 --- a/pkg/util/pointer/BUILD +++ b/pkg/util/pointer/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["pointer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/pointer", - library = ":go_default_library", ) go_library( diff --git a/pkg/util/procfs/BUILD b/pkg/util/procfs/BUILD index ea7c3be29f8..2f486249167 100644 --- a/pkg/util/procfs/BUILD +++ b/pkg/util/procfs/BUILD @@ -12,16 +12,45 @@ go_library( "doc.go", "procfs.go", "procfs_fake.go", - "procfs_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "procfs_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "procfs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "procfs_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/procfs", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", ], @@ -32,7 +61,7 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "procfs_linux_test.go", ], "//conditions:default": [], @@ -40,10 +69,10 @@ go_test( data = [ "example_proc_cgroup", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/procfs", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/stretchr/testify/assert:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/removeall/BUILD b/pkg/util/removeall/BUILD index 54d7332eb57..682b646bada 100644 --- a/pkg/util/removeall/BUILD +++ b/pkg/util/removeall/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["removeall_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/removeall", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", diff --git a/pkg/util/resizefs/BUILD b/pkg/util/resizefs/BUILD index aaa1f9aa5f4..6301c603a99 100644 --- a/pkg/util/resizefs/BUILD +++ b/pkg/util/resizefs/BUILD @@ -2,23 +2,80 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "resizefs_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "resizefs_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "resizefs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "resizefs_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/resizefs", visibility = ["//visibility:public"], - deps = [ - "//pkg/util/mount:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/util/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//pkg/util/mount:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//pkg/util/mount:go_default_library", + ], "//conditions:default": [], }), ) diff --git a/pkg/util/resourcecontainer/BUILD b/pkg/util/resourcecontainer/BUILD index 05b817b4b12..e13b8aea09e 100644 --- a/pkg/util/resourcecontainer/BUILD +++ b/pkg/util/resourcecontainer/BUILD @@ -7,17 +7,45 @@ load( go_library( name = "go_default_library", - srcs = [ - "resource_container_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "resource_container_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "resource_container_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "resource_container_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/resourcecontainer", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", ], diff --git a/pkg/util/rlimit/BUILD b/pkg/util/rlimit/BUILD index 52fe550b103..7735de7909e 100644 --- a/pkg/util/rlimit/BUILD +++ b/pkg/util/rlimit/BUILD @@ -7,17 +7,45 @@ load( go_library( name = "go_default_library", - srcs = [ - "rlimit_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "rlimit_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "rlimit_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "rlimit_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/rlimit", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/selinux/BUILD b/pkg/util/selinux/BUILD index f5eab6022f0..dae3792f4c2 100644 --- a/pkg/util/selinux/BUILD +++ b/pkg/util/selinux/BUILD @@ -10,16 +10,45 @@ go_library( srcs = [ "doc.go", "selinux.go", - "selinux_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "selinux_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "selinux_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "selinux_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/selinux", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/slice/BUILD b/pkg/util/slice/BUILD index 611545484c0..5b13d93e4d9 100644 --- a/pkg/util/slice/BUILD +++ b/pkg/util/slice/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["slice_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/slice", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/strings/BUILD b/pkg/util/strings/BUILD index f71a3326e39..b50d78b265b 100644 --- a/pkg/util/strings/BUILD +++ b/pkg/util/strings/BUILD @@ -23,8 +23,8 @@ go_test( "line_delimiter_test.go", "strings_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/strings", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/system/BUILD b/pkg/util/system/BUILD index 1c2f1a067f1..58caa161917 100644 --- a/pkg/util/system/BUILD +++ b/pkg/util/system/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["system_utils_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/system", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/util/tail/BUILD b/pkg/util/tail/BUILD index 4e81446dd40..253a66e4aa0 100644 --- a/pkg/util/tail/BUILD +++ b/pkg/util/tail/BUILD @@ -22,8 +22,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["tail_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/tail", - library = ":go_default_library", ) go_library( diff --git a/pkg/util/taints/BUILD b/pkg/util/taints/BUILD index 957af3c05f2..09fdb4b2f71 100644 --- a/pkg/util/taints/BUILD +++ b/pkg/util/taints/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["taints_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/taints", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/pkg/util/template/BUILD b/pkg/util/template/BUILD index 015aafcd846..30ed5135c01 100644 --- a/pkg/util/template/BUILD +++ b/pkg/util/template/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["template_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/template", - library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) diff --git a/pkg/util/term/BUILD b/pkg/util/term/BUILD index 01b8ca19dc5..894c6008ced 100644 --- a/pkg/util/term/BUILD +++ b/pkg/util/term/BUILD @@ -7,19 +7,89 @@ load( go_library( name = "go_default_library", - srcs = [ - "setsize.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "setsize.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "setsize_unsupported.go", ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/util/term", - deps = [ - "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", - ], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/pkg/util/threading/BUILD b/pkg/util/threading/BUILD index 79af21a79bb..2f031931b21 100644 --- a/pkg/util/threading/BUILD +++ b/pkg/util/threading/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["deadlock-detector_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/threading", - library = ":go_default_library", ) filegroup( diff --git a/pkg/util/tolerations/BUILD b/pkg/util/tolerations/BUILD index 1e3b1189b0d..a4f8868176a 100644 --- a/pkg/util/tolerations/BUILD +++ b/pkg/util/tolerations/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["tolerations_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/tolerations", - library = ":go_default_library", deps = ["//pkg/apis/core:go_default_library"], ) diff --git a/pkg/util/version/BUILD b/pkg/util/version/BUILD index 9b3cf2bc921..7ab62aae5bd 100644 --- a/pkg/util/version/BUILD +++ b/pkg/util/version/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["version_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/util/version", - library = ":go_default_library", ) filegroup( diff --git a/pkg/volume/BUILD b/pkg/volume/BUILD index 8b7ef61afc3..4656b9cff97 100644 --- a/pkg/volume/BUILD +++ b/pkg/volume/BUILD @@ -18,11 +18,40 @@ go_library( "plugins.go", "util.go", "volume.go", - "volume_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "volume_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "volume_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "volume_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/volume", @@ -53,8 +82,8 @@ go_test( "plugins_test.go", "util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/util/slice:go_default_library", @@ -73,7 +102,7 @@ go_test( srcs = [ "metrics_statfs_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "metrics_du_test.go", ], "//conditions:default": [], @@ -84,7 +113,7 @@ go_test( "//pkg/volume/testing:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/pkg/volume/aws_ebs/BUILD b/pkg/volume/aws_ebs/BUILD index e61d65a9da2..9fd852900bb 100644 --- a/pkg/volume/aws_ebs/BUILD +++ b/pkg/volume/aws_ebs/BUILD @@ -37,8 +37,8 @@ go_test( "attacher_test.go", "aws_ebs_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs", - library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 7a71a2c4bb2..1751afd4d50 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -11,15 +11,41 @@ go_library( srcs = [ "attacher.go", "azure_common.go", - "azure_common_unsupported.go", "azure_dd.go", "azure_mounter.go", "azure_provision.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "azure_common_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "azure_common_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "azure_common_windows.go", ], "//conditions:default": [], @@ -66,8 +92,8 @@ go_test( "azure_common_test.go", "azure_dd_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/azure_dd", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/azure_file/BUILD b/pkg/volume/azure_file/BUILD index fec0c0a9504..ea904ced6ce 100644 --- a/pkg/volume/azure_file/BUILD +++ b/pkg/volume/azure_file/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["azure_file_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/azure_file", - library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", diff --git a/pkg/volume/cephfs/BUILD b/pkg/volume/cephfs/BUILD index a61e1fb8595..ca4cf8c54fc 100644 --- a/pkg/volume/cephfs/BUILD +++ b/pkg/volume/cephfs/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["cephfs_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/cephfs", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index 35ef7a2a21f..4606eb515b5 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -43,8 +43,8 @@ go_test( "attacher_test.go", "cinder_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/cinder", - library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/configmap/BUILD b/pkg/volume/configmap/BUILD index 7cdad062d83..b3a3b3b9574 100644 --- a/pkg/volume/configmap/BUILD +++ b/pkg/volume/configmap/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["configmap_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/configmap", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", diff --git a/pkg/volume/csi/BUILD b/pkg/volume/csi/BUILD index 88a2e857fe5..f1bd2c1e7be 100644 --- a/pkg/volume/csi/BUILD +++ b/pkg/volume/csi/BUILD @@ -36,8 +36,8 @@ go_test( "csi_mounter_test.go", "csi_plugin_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/csi", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/csi/fake:go_default_library", diff --git a/pkg/volume/downwardapi/BUILD b/pkg/volume/downwardapi/BUILD index d84c809751a..3b5bd7a6e06 100644 --- a/pkg/volume/downwardapi/BUILD +++ b/pkg/volume/downwardapi/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["downwardapi_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/downwardapi", - library = ":go_default_library", deps = [ "//pkg/fieldpath:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/empty_dir/BUILD b/pkg/volume/empty_dir/BUILD index c20dd4f39f5..b57aa47ffe4 100644 --- a/pkg/volume/empty_dir/BUILD +++ b/pkg/volume/empty_dir/BUILD @@ -11,11 +11,40 @@ go_library( srcs = [ "doc.go", "empty_dir.go", - "empty_dir_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "empty_dir_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "empty_dir_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "empty_dir_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/volume/empty_dir", @@ -31,7 +60,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], @@ -41,15 +70,15 @@ go_library( go_test( name = "go_default_test", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "empty_dir_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/empty_dir", - library = ":go_default_library", deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/fc/BUILD b/pkg/volume/fc/BUILD index f6e513d34a3..2085bc4f2b3 100644 --- a/pkg/volume/fc/BUILD +++ b/pkg/volume/fc/BUILD @@ -37,8 +37,8 @@ go_test( "fc_test.go", "fc_util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/fc", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/flexvolume/BUILD b/pkg/volume/flexvolume/BUILD index b55261ee62b..5d28f4963cc 100644 --- a/pkg/volume/flexvolume/BUILD +++ b/pkg/volume/flexvolume/BUILD @@ -54,8 +54,8 @@ go_test( "probe_test.go", "unmounter_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/flexvolume", - library = ":go_default_library", deps = [ "//pkg/util/filesystem:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/flocker/BUILD b/pkg/volume/flocker/BUILD index 3d21d92dc91..a213590e937 100644 --- a/pkg/volume/flocker/BUILD +++ b/pkg/volume/flocker/BUILD @@ -39,8 +39,8 @@ go_test( "flocker_util_test.go", "flocker_volume_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/flocker", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/gce_pd/BUILD b/pkg/volume/gce_pd/BUILD index 8c83b4d0d70..7a4feab0d1c 100644 --- a/pkg/volume/gce_pd/BUILD +++ b/pkg/volume/gce_pd/BUILD @@ -39,8 +39,8 @@ go_test( "attacher_test.go", "gce_pd_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/gce_pd", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/git_repo/BUILD b/pkg/volume/git_repo/BUILD index cd24945cf33..b726378cf1b 100644 --- a/pkg/volume/git_repo/BUILD +++ b/pkg/volume/git_repo/BUILD @@ -26,8 +26,8 @@ go_library( go_test( name = "go_default_test", srcs = ["git_repo_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/git_repo", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", diff --git a/pkg/volume/glusterfs/BUILD b/pkg/volume/glusterfs/BUILD index b8371b5e8df..a57114c412e 100644 --- a/pkg/volume/glusterfs/BUILD +++ b/pkg/volume/glusterfs/BUILD @@ -42,8 +42,8 @@ go_test( "glusterfs_minmax_test.go", "glusterfs_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/glusterfs", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/host_path/BUILD b/pkg/volume/host_path/BUILD index b158f6de8b1..4297d1b19e5 100644 --- a/pkg/volume/host_path/BUILD +++ b/pkg/volume/host_path/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["host_path_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/host_path", - library = ":go_default_library", deps = [ "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index 3c7a7598136..e056ff00889 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -34,8 +34,8 @@ go_test( "iscsi_test.go", "iscsi_util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/iscsi", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/local/BUILD b/pkg/volume/local/BUILD index 8c8d3b1d8a7..477e5adb88d 100644 --- a/pkg/volume/local/BUILD +++ b/pkg/volume/local/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["local_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/local", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/nfs/BUILD b/pkg/volume/nfs/BUILD index ad091ffb4d7..8a3253f8e0f 100644 --- a/pkg/volume/nfs/BUILD +++ b/pkg/volume/nfs/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["nfs_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/nfs", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/photon_pd/BUILD b/pkg/volume/photon_pd/BUILD index bc72ab05def..3d6dc74f2c1 100644 --- a/pkg/volume/photon_pd/BUILD +++ b/pkg/volume/photon_pd/BUILD @@ -36,8 +36,8 @@ go_test( "attacher_test.go", "photon_pd_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/photon_pd", - library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/photon:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/portworx/BUILD b/pkg/volume/portworx/BUILD index 36714e9635f..6cf06417976 100644 --- a/pkg/volume/portworx/BUILD +++ b/pkg/volume/portworx/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["portworx_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/portworx", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/projected/BUILD b/pkg/volume/projected/BUILD index 219b8aa59d4..4e9511aa644 100644 --- a/pkg/volume/projected/BUILD +++ b/pkg/volume/projected/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["projected_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/projected", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", diff --git a/pkg/volume/quobyte/BUILD b/pkg/volume/quobyte/BUILD index 7adf8b7d7aa..7b6ea3a0bae 100644 --- a/pkg/volume/quobyte/BUILD +++ b/pkg/volume/quobyte/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["quobyte_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/quobyte", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/rbd/BUILD b/pkg/volume/rbd/BUILD index e97b0fc7567..ea058ed7c67 100644 --- a/pkg/volume/rbd/BUILD +++ b/pkg/volume/rbd/BUILD @@ -38,8 +38,8 @@ go_library( go_test( name = "go_default_test", srcs = ["rbd_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/rbd", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/scaleio/BUILD b/pkg/volume/scaleio/BUILD index 5c103e6e268..c744150d0ef 100644 --- a/pkg/volume/scaleio/BUILD +++ b/pkg/volume/scaleio/BUILD @@ -13,8 +13,8 @@ go_test( "sio_util_test.go", "sio_volume_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/scaleio", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/secret/BUILD b/pkg/volume/secret/BUILD index 9fc423f420a..61998067431 100644 --- a/pkg/volume/secret/BUILD +++ b/pkg/volume/secret/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["secret_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/secret", - library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/empty_dir:go_default_library", diff --git a/pkg/volume/storageos/BUILD b/pkg/volume/storageos/BUILD index a66bdc936e3..f20e382beb7 100644 --- a/pkg/volume/storageos/BUILD +++ b/pkg/volume/storageos/BUILD @@ -37,8 +37,8 @@ go_test( "storageos_test.go", "storageos_util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/storageos", - library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index 7a4ed937692..22c8fd82cd4 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -11,24 +11,68 @@ go_library( srcs = [ "atomic_writer.go", "device_util.go", - "device_util_unsupported.go", "doc.go", "error.go", "finalizer.go", - "fs_unsupported.go", "io_util.go", "metrics.go", "util.go", - "util_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "fs.go", + "@io_bazel_rules_go//go/platform:android": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ + "device_util_unsupported.go", + "fs.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "device_util_linux.go", "fs.go", "util_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "device_util_unsupported.go", + "fs_unsupported.go", + "util_unsupported.go", + ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/volume/util", @@ -41,7 +85,6 @@ go_library( "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -49,11 +92,40 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "//vendor/golang.org/x/sys/unix:go_default_library", + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], "//conditions:default": [], }), @@ -64,14 +136,14 @@ go_test( srcs = [ "util_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "atomic_writer_test.go", "device_util_linux_test.go", ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/util", - library = ":go_default_library", deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", @@ -79,7 +151,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/k8s.io/client-go/util/testing:go_default_library", ], "//conditions:default": [], diff --git a/pkg/volume/util/nestedpendingoperations/BUILD b/pkg/volume/util/nestedpendingoperations/BUILD index c0623b47e1d..b048f8121c3 100644 --- a/pkg/volume/util/nestedpendingoperations/BUILD +++ b/pkg/volume/util/nestedpendingoperations/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["nestedpendingoperations_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - library = ":go_default_library", deps = [ "//pkg/volume/util/types:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/volume/util/operationexecutor/BUILD b/pkg/volume/util/operationexecutor/BUILD index 15c2ac27d83..4133ca39449 100644 --- a/pkg/volume/util/operationexecutor/BUILD +++ b/pkg/volume/util/operationexecutor/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["operation_executor_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - library = ":go_default_library", deps = [ "//pkg/controller/volume/expand/cache:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/pkg/volume/validation/BUILD b/pkg/volume/validation/BUILD index a3cc585f7a4..aa814925ee7 100644 --- a/pkg/volume/validation/BUILD +++ b/pkg/volume/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["pv_validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/vsphere_volume/BUILD b/pkg/volume/vsphere_volume/BUILD index a2bd11e8fbc..bde1c437bdf 100644 --- a/pkg/volume/vsphere_volume/BUILD +++ b/pkg/volume/vsphere_volume/BUILD @@ -38,8 +38,8 @@ go_test( "attacher_test.go", "vsphere_volume_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume", - library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/plugin/cmd/kube-scheduler/BUILD b/plugin/cmd/kube-scheduler/BUILD index 1e51fd6a99d..c89e49a18fc 100644 --- a/plugin/cmd/kube-scheduler/BUILD +++ b/plugin/cmd/kube-scheduler/BUILD @@ -9,6 +9,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-scheduler", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/plugin/pkg/admission/admit/BUILD b/plugin/pkg/admission/admit/BUILD index d660932d219..957b91d3e4c 100644 --- a/plugin/pkg/admission/admit/BUILD +++ b/plugin/pkg/admission/admit/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/admit", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/alwayspullimages/BUILD b/plugin/pkg/admission/alwayspullimages/BUILD index ab8a3fd14db..2d1d398f810 100644 --- a/plugin/pkg/admission/alwayspullimages/BUILD +++ b/plugin/pkg/admission/alwayspullimages/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/antiaffinity/BUILD b/plugin/pkg/admission/antiaffinity/BUILD index c0e381995ab..5b42d1afb67 100644 --- a/plugin/pkg/admission/antiaffinity/BUILD +++ b/plugin/pkg/admission/antiaffinity/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/plugin/pkg/admission/defaulttolerationseconds/BUILD b/plugin/pkg/admission/defaulttolerationseconds/BUILD index 00bfbc9d5dd..fa601d0f0bb 100644 --- a/plugin/pkg/admission/defaulttolerationseconds/BUILD +++ b/plugin/pkg/admission/defaulttolerationseconds/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/plugin/pkg/admission/deny/BUILD b/plugin/pkg/admission/deny/BUILD index c5eb8bf5af3..9bb617c49a4 100644 --- a/plugin/pkg/admission/deny/BUILD +++ b/plugin/pkg/admission/deny/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/deny", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/eventratelimit/BUILD b/plugin/pkg/admission/eventratelimit/BUILD index 7dbb98cfc80..7a392d0c143 100644 --- a/plugin/pkg/admission/eventratelimit/BUILD +++ b/plugin/pkg/admission/eventratelimit/BUILD @@ -12,8 +12,8 @@ go_test( "admission_test.go", "cache_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD index 1a272b78321..964679a062d 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation", - library = ":go_default_library", deps = ["//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library"], ) diff --git a/plugin/pkg/admission/exec/BUILD b/plugin/pkg/admission/exec/BUILD index 61fe9109954..b47eea96e42 100644 --- a/plugin/pkg/admission/exec/BUILD +++ b/plugin/pkg/admission/exec/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/exec", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/plugin/pkg/admission/extendedresourcetoleration/BUILD b/plugin/pkg/admission/extendedresourcetoleration/BUILD index 882b966cd7b..f6c8b5b0ad3 100644 --- a/plugin/pkg/admission/extendedresourcetoleration/BUILD +++ b/plugin/pkg/admission/extendedresourcetoleration/BUILD @@ -17,8 +17,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", diff --git a/plugin/pkg/admission/gc/BUILD b/plugin/pkg/admission/gc/BUILD index e4d316030fe..944eeebb50e 100644 --- a/plugin/pkg/admission/gc/BUILD +++ b/plugin/pkg/admission/gc/BUILD @@ -25,8 +25,8 @@ go_library( go_test( name = "go_default_test", srcs = ["gc_admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/gc", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/imagepolicy/BUILD b/plugin/pkg/admission/imagepolicy/BUILD index 9ea88c749b9..64ca678a745 100644 --- a/plugin/pkg/admission/imagepolicy/BUILD +++ b/plugin/pkg/admission/imagepolicy/BUILD @@ -37,8 +37,8 @@ go_test( "certs_test.go", "config_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/imagepolicy/install:go_default_library", diff --git a/plugin/pkg/admission/initialresources/BUILD b/plugin/pkg/admission/initialresources/BUILD index a10961168a1..b159034690b 100644 --- a/plugin/pkg/admission/initialresources/BUILD +++ b/plugin/pkg/admission/initialresources/BUILD @@ -43,8 +43,8 @@ go_test( "hawkular_test.go", "influxdb_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/initialresources", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/plugin/pkg/admission/limitranger/BUILD b/plugin/pkg/admission/limitranger/BUILD index f858867d3a6..238ae838747 100644 --- a/plugin/pkg/admission/limitranger/BUILD +++ b/plugin/pkg/admission/limitranger/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/limitranger", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/plugin/pkg/admission/namespace/autoprovision/BUILD b/plugin/pkg/admission/namespace/autoprovision/BUILD index 649aa56785b..7891b9d2535 100644 --- a/plugin/pkg/admission/namespace/autoprovision/BUILD +++ b/plugin/pkg/admission/namespace/autoprovision/BUILD @@ -25,8 +25,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/plugin/pkg/admission/namespace/exists/BUILD b/plugin/pkg/admission/namespace/exists/BUILD index 8da3f51b78f..615ec29263e 100644 --- a/plugin/pkg/admission/namespace/exists/BUILD +++ b/plugin/pkg/admission/namespace/exists/BUILD @@ -25,8 +25,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/plugin/pkg/admission/noderestriction/BUILD b/plugin/pkg/admission/noderestriction/BUILD index 8d2777ea41f..cab4287a3dd 100644 --- a/plugin/pkg/admission/noderestriction/BUILD +++ b/plugin/pkg/admission/noderestriction/BUILD @@ -31,8 +31,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/policy:go_default_library", diff --git a/plugin/pkg/admission/persistentvolume/label/BUILD b/plugin/pkg/admission/persistentvolume/label/BUILD index b88f19f863e..3afaf06391c 100644 --- a/plugin/pkg/admission/persistentvolume/label/BUILD +++ b/plugin/pkg/admission/persistentvolume/label/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", diff --git a/plugin/pkg/admission/persistentvolume/resize/BUILD b/plugin/pkg/admission/persistentvolume/resize/BUILD index 90a12f1331c..95f1d3d5a64 100644 --- a/plugin/pkg/admission/persistentvolume/resize/BUILD +++ b/plugin/pkg/admission/persistentvolume/resize/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD b/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD index e13f63e5934..c7cece04286 100644 --- a/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD +++ b/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD @@ -21,8 +21,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/plugin/pkg/admission/podnodeselector/BUILD b/plugin/pkg/admission/podnodeselector/BUILD index f0c03fb8c71..289c0814501 100644 --- a/plugin/pkg/admission/podnodeselector/BUILD +++ b/plugin/pkg/admission/podnodeselector/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/plugin/pkg/admission/podpreset/BUILD b/plugin/pkg/admission/podpreset/BUILD index ca7376afd71..862a5cbe6e6 100644 --- a/plugin/pkg/admission/podpreset/BUILD +++ b/plugin/pkg/admission/podpreset/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/podpreset", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", diff --git a/plugin/pkg/admission/podtolerationrestriction/BUILD b/plugin/pkg/admission/podtolerationrestriction/BUILD index d3af4b936c4..2dd2627f147 100644 --- a/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD index 42a0a1328b9..ee21ec69553 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD @@ -33,8 +33,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", diff --git a/plugin/pkg/admission/priority/BUILD b/plugin/pkg/admission/priority/BUILD index db47cedb027..753e5bf2096 100644 --- a/plugin/pkg/admission/priority/BUILD +++ b/plugin/pkg/admission/priority/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/priority", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index d2518c15c88..b51287670b4 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -52,8 +52,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD index 7067a2ed9c5..a812219624e 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD @@ -32,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", - library = ":go_default_library", deps = ["//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library"], ) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/BUILD b/plugin/pkg/admission/security/podsecuritypolicy/BUILD index a2f5467e019..6d96b49e456 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/BUILD +++ b/plugin/pkg/admission/security/podsecuritypolicy/BUILD @@ -34,8 +34,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/plugin/pkg/admission/securitycontext/scdeny/BUILD b/plugin/pkg/admission/securitycontext/scdeny/BUILD index a90da5e8f7f..61f5962b09c 100644 --- a/plugin/pkg/admission/securitycontext/scdeny/BUILD +++ b/plugin/pkg/admission/securitycontext/scdeny/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/serviceaccount/BUILD b/plugin/pkg/admission/serviceaccount/BUILD index aaeedacddc4..392c6a1b9c6 100644 --- a/plugin/pkg/admission/serviceaccount/BUILD +++ b/plugin/pkg/admission/serviceaccount/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/plugin/pkg/admission/storageclass/setdefault/BUILD b/plugin/pkg/admission/storageclass/setdefault/BUILD index 27b114d0643..a3840f47e94 100644 --- a/plugin/pkg/admission/storageclass/setdefault/BUILD +++ b/plugin/pkg/admission/storageclass/setdefault/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", diff --git a/plugin/pkg/auth/authenticator/token/bootstrap/BUILD b/plugin/pkg/auth/authenticator/token/bootstrap/BUILD index b6f0426ab13..d2f3a239d45 100644 --- a/plugin/pkg/auth/authenticator/token/bootstrap/BUILD +++ b/plugin/pkg/auth/authenticator/token/bootstrap/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["bootstrap_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", diff --git a/plugin/pkg/auth/authorizer/node/BUILD b/plugin/pkg/auth/authorizer/node/BUILD index 46bb915ba8a..029fbcf5f43 100644 --- a/plugin/pkg/auth/authorizer/node/BUILD +++ b/plugin/pkg/auth/authorizer/node/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["node_authorizer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", - library = ":go_default_library", deps = [ "//pkg/apis/core:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", diff --git a/plugin/pkg/auth/authorizer/rbac/BUILD b/plugin/pkg/auth/authorizer/rbac/BUILD index 73dff482900..bd96795f0e7 100644 --- a/plugin/pkg/auth/authorizer/rbac/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/BUILD @@ -31,8 +31,8 @@ go_test( "rbac_test.go", "subject_locator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", - library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD index e503bb50c65..87651f3ad36 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD @@ -55,8 +55,8 @@ go_test( name = "go_default_test", srcs = ["controller_policy_test.go"], data = glob(["testdata/**"]), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/plugin/pkg/scheduler/BUILD b/plugin/pkg/scheduler/BUILD index e20acec4060..9e8ae5b5902 100644 --- a/plugin/pkg/scheduler/BUILD +++ b/plugin/pkg/scheduler/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["scheduler_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/controller/volume/persistentvolume:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/BUILD b/plugin/pkg/scheduler/algorithm/BUILD index 58a4684e198..644d0e1c04a 100644 --- a/plugin/pkg/scheduler/algorithm/BUILD +++ b/plugin/pkg/scheduler/algorithm/BUILD @@ -31,8 +31,8 @@ go_test( "scheduler_interface_test.go", "types_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/predicates/BUILD b/plugin/pkg/scheduler/algorithm/predicates/BUILD index 453fbafb726..ebaf3171942 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/BUILD +++ b/plugin/pkg/scheduler/algorithm/predicates/BUILD @@ -50,8 +50,8 @@ go_test( "predicates_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - library = ":go_default_library", deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/priorities/BUILD b/plugin/pkg/scheduler/algorithm/priorities/BUILD index b6e8d618cd2..027d0112e27 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/BUILD +++ b/plugin/pkg/scheduler/algorithm/priorities/BUILD @@ -60,8 +60,8 @@ go_test( "selector_spreading_test.go", "taint_toleration_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", - library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/BUILD b/plugin/pkg/scheduler/algorithm/priorities/util/BUILD index 9638eeab511..d997e4d68b5 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/util/BUILD +++ b/plugin/pkg/scheduler/algorithm/priorities/util/BUILD @@ -13,8 +13,8 @@ go_test( "topologies_test.go", "util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/plugin/pkg/scheduler/algorithmprovider/BUILD b/plugin/pkg/scheduler/algorithmprovider/BUILD index 904796b74aa..e9fe7f977f3 100644 --- a/plugin/pkg/scheduler/algorithmprovider/BUILD +++ b/plugin/pkg/scheduler/algorithmprovider/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["plugins_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/factory:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD b/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD index e72930b90f9..15cbd49b81d 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -29,8 +29,8 @@ go_test( "compatibility_test.go", "defaults_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", diff --git a/plugin/pkg/scheduler/api/validation/BUILD b/plugin/pkg/scheduler/api/validation/BUILD index 2120ba3dc09..3ec8db39c74 100644 --- a/plugin/pkg/scheduler/api/validation/BUILD +++ b/plugin/pkg/scheduler/api/validation/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - library = ":go_default_library", deps = ["//plugin/pkg/scheduler/api:go_default_library"], ) diff --git a/plugin/pkg/scheduler/core/BUILD b/plugin/pkg/scheduler/core/BUILD index ee42cd1fdb3..ba8f7875106 100644 --- a/plugin/pkg/scheduler/core/BUILD +++ b/plugin/pkg/scheduler/core/BUILD @@ -14,8 +14,8 @@ go_test( "generic_scheduler_test.go", "scheduling_queue_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/core", - library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", diff --git a/plugin/pkg/scheduler/factory/BUILD b/plugin/pkg/scheduler/factory/BUILD index c4fe5102bb5..41700276833 100644 --- a/plugin/pkg/scheduler/factory/BUILD +++ b/plugin/pkg/scheduler/factory/BUILD @@ -62,8 +62,8 @@ go_test( "factory_test.go", "plugins_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/factory", - library = ":go_default_library", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testing:go_default_library", diff --git a/plugin/pkg/scheduler/schedulercache/BUILD b/plugin/pkg/scheduler/schedulercache/BUILD index 6d5e6204f55..b8d18e6c424 100644 --- a/plugin/pkg/scheduler/schedulercache/BUILD +++ b/plugin/pkg/scheduler/schedulercache/BUILD @@ -27,8 +27,8 @@ go_library( go_test( name = "go_default_test", srcs = ["cache_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", diff --git a/plugin/pkg/scheduler/util/BUILD b/plugin/pkg/scheduler/util/BUILD index 1eed06de183..94174d181c7 100644 --- a/plugin/pkg/scheduler/util/BUILD +++ b/plugin/pkg/scheduler/util/BUILD @@ -13,8 +13,8 @@ go_test( "testutil_test.go", "utils_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", - library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/staging/src/k8s.io/api/core/v1/BUILD b/staging/src/k8s.io/api/core/v1/BUILD index 2ffdd345309..80409236152 100644 --- a/staging/src/k8s.io/api/core/v1/BUILD +++ b/staging/src/k8s.io/api/core/v1/BUILD @@ -12,8 +12,8 @@ go_test( "taint_test.go", "toleration_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/api/core/v1", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiextensions-apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/BUILD index ad62604f378..12f3029b253 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "apiextensions-apiserver", + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD index 303d94b9382..5f6161f0cbb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD @@ -28,8 +28,8 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD index 3a0a056a283..1ae69bfa797 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD index 32f2ab8fed5..46ee524ea10 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD @@ -60,8 +60,8 @@ go_test( "conversion_test.go", "marshal_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD index 25b1bbd5919..15884eb628e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index 7b5d2d28337..a3842388097 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -85,7 +85,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["customresource_handler_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library"], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD index a85afaef513..0fff89c68a6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD @@ -33,8 +33,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver/validation", - library = ":go_default_library", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD index 2abd62ff18d..a2e00d653b0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["naming_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiextensions-apiserver/pkg/controller/status", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/errors/BUILD index 80e205320ad..384b432bd99 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/errors", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD index f42cd77fbec..aeff38a57bd 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -14,8 +14,8 @@ go_test( "priority_test.go", "restmapper_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/meta", - library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD index 1fb88704e32..fab98203507 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -15,8 +15,8 @@ go_test( "quantity_test.go", "scale_int_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/resource", - library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer/BUILD index 3b58ce2489b..00a56d25050 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["valuefuzz_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/testing/fuzzer", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/api/validation/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/validation/BUILD index 8546d1a5932..8f6cd93d4c0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/validation/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["objectmeta_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/validation", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/validation/path/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/validation/path/BUILD index e50632c5092..33546216a38 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/validation/path/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/validation/path/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["name_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/api/validation/path", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/apimachinery/BUILD b/staging/src/k8s.io/apimachinery/pkg/apimachinery/BUILD index 2c6753d141e..90d016fab84 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apimachinery/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apimachinery/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["types_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apimachinery", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD b/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD index 314ddcad227..27734e8f529 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["announced_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apimachinery/announced", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD index 8982b562911..873b34838c0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["registered_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apimachinery/registered", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD index 636707fe2dc..5f11cb1a997 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD @@ -12,8 +12,8 @@ go_test( "register_test.go", "roundtrip_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 4a96c3f948c..c851816d782 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -18,8 +18,8 @@ go_test( "time_test.go", "types_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", - library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD index 7ae9cb0f9d8..4f3bf562de9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD @@ -12,8 +12,8 @@ go_test( "helpers_test.go", "unstructured_list_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD index 216ef6368b2..436ae33a5c3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD index d0e74e87091..b2107d46f02 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/install/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/apis/testapigroup/install", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/testapigroup/fuzzer:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/conversion/BUILD b/staging/src/k8s.io/apimachinery/pkg/conversion/BUILD index 184dafbff1d..653418164c3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/conversion/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/conversion/BUILD @@ -12,8 +12,8 @@ go_test( "converter_test.go", "helper_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/conversion", - library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/fields/BUILD b/staging/src/k8s.io/apimachinery/pkg/fields/BUILD index 2bae1350393..addb286a230 100644 --- a/staging/src/k8s.io/apimachinery/pkg/fields/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/fields/BUILD @@ -12,8 +12,8 @@ go_test( "fields_test.go", "selector_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/fields", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/BUILD b/staging/src/k8s.io/apimachinery/pkg/labels/BUILD index fba6648e00f..dc6af2643d9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/labels/BUILD @@ -12,8 +12,8 @@ go_test( "labels_test.go", "selector_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/labels", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD index 93c6dcbfc77..ab87922aa45 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["swagger_doc_generator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/BUILD index 032d866edb4..91ead696c1b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["group_version_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime/schema", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD index 9403c3376f4..43d36a79277 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["codec_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime/serializer", - library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD index 0d43ce95429..0fdceeda9ce 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["meta_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD index 4903338fa6b..f1a2f341b7f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["streaming_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD index a1b0e6eb277..2e262cb6b19 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["versioning_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/test/BUILD b/staging/src/k8s.io/apimachinery/pkg/test/BUILD index 4af395d3ce4..c4946d52ac5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/test/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/test/BUILD @@ -16,8 +16,8 @@ go_test( "runtime_serializer_protobuf_protobuf_test.go", "runtime_unversioned_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/test", - library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/cache/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/cache/BUILD index d589c0d152a..3b868ef57e8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/cache/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/cache/BUILD @@ -12,8 +12,8 @@ go_test( "cache_test.go", "lruexpirecache_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/cache", - library = ":go_default_library", deps = [ "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/clock/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/clock/BUILD index 62ad5a87b18..e5c117d6646 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/clock/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/clock/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["clock_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/clock", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD index 4ba69bc6354..47d9732b093 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/diff/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["diff_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/diff", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/errors/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/errors/BUILD index d13ff240719..61999329a19 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/errors/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/errors/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/errors", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/framer/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/framer/BUILD index f0b7cdec52a..8022f0aba20 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/framer/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/framer/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["framer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/framer", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/BUILD index 94c1d94a249..6450c3a6d75 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["httpstream_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/httpstream", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD index 8342083ad9f..278bf12ae8e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD @@ -13,8 +13,8 @@ go_test( "roundtripper_test.go", "upgrade_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/httpstream/spdy", - library = ":go_default_library", deps = [ "//vendor/github.com/elazarl/goproxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD index 2e3fe651619..8c66be54fc8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["intstr_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/intstr", - library = ":go_default_library", deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/json/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/json/BUILD index c9b57bcba3f..5838be3f7b1 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/json/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/json/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["json_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/json", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch/BUILD index 79b5e54d6a4..233ccad91d1 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["patch_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/jsonmergepatch", - library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/BUILD index 00715956646..3f50c7618d9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/mergepatch", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD index d7390ed5c29..8f6999c92e6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD @@ -15,8 +15,8 @@ go_test( "port_split_test.go", "util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/net", - library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD index 368915c2c81..24083b248d2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD @@ -13,8 +13,8 @@ go_test( "transport_test.go", "upgradeaware_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/proxy", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD index b7769be86e4..12254b4a20d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["rand_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/rand", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD index 40892fa783c..521efc220e4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["runtime_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/runtime", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/sets/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/sets/BUILD index 5a6175ad4fd..17bb4010ed2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/sets/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -51,8 +51,8 @@ $(location //vendor/k8s.io/code-generator/cmd/set-gen) \ go_test( name = "go_default_test", srcs = ["set_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/sets", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD index e69fe0abb1d..2f4bcea4d8d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD @@ -13,8 +13,8 @@ go_test( "testdata/swagger-merge-item.json", "testdata/swagger-precision-item.json", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", - library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/pkg/util/validation/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/validation/BUILD index 9680c1fa7b7..40ee235010c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/validation/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/validation", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/validation/field/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/validation/field/BUILD index 5508ab94c8e..6a2f815ed88 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/validation/field/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/validation/field/BUILD @@ -12,8 +12,8 @@ go_test( "errors_test.go", "path_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/validation/field", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/wait/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/wait/BUILD index 6eca13c02b5..20046645a33 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/wait/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/wait/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["wait_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/wait", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/waitgroup/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/waitgroup/BUILD index a35c520103b..a7ecb9d68e4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/waitgroup/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/waitgroup/BUILD @@ -13,8 +13,8 @@ go_library( go_test( name = "go_default_test", srcs = ["waitgroup_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/waitgroup", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD index e660edfe7cb..0208039a88c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["decoder_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/util/yaml", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD index 3e850d2dda0..36c4ad64e3f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD @@ -48,8 +48,8 @@ go_test( go_test( name = "go_default_test", srcs = ["until_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/pkg/watch", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apimachinery/third_party/forked/golang/json/BUILD b/staging/src/k8s.io/apimachinery/third_party/forked/golang/json/BUILD index 4c20d9771d1..d4b5f696ffa 100644 --- a/staging/src/k8s.io/apimachinery/third_party/forked/golang/json/BUILD +++ b/staging/src/k8s.io/apimachinery/third_party/forked/golang/json/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["fields_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/third_party/forked/golang/json", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD b/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD index 9f09628b627..1069d9b93d2 100644 --- a/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD +++ b/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["deep_equal_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/admission/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/BUILD index 65542c02016..aab87e45791 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/BUILD @@ -14,8 +14,8 @@ go_test( "errors_test.go", "handler_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD index d6d2f3ee199..c892344c3e7 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD @@ -14,8 +14,8 @@ go_test( "mutating_webhook_manager_test.go", "validating_webhook_manager_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/configuration", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD index 32feb2f9f24..22e4b1f3b2d 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD @@ -17,8 +17,8 @@ go_test( "metrics_test.go", "testutil_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/metrics", - library = ":go_default_library", deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD index 6479a44881c..9410779b8f7 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD @@ -46,8 +46,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["initialization_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/initialization", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD index 1c821c23cc0..101cd77000b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD index 76c2fd3dcad..b4cfcdfb6e6 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD @@ -32,8 +32,8 @@ go_test( "authentication_test.go", "serviceresolver_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD index 28800723843..60b64519b11 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD @@ -18,8 +18,8 @@ go_library( go_test( name = "go_default_test", srcs = ["statuserror_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/errors", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD index 14fa71007c7..f81cea70d67 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD @@ -14,8 +14,8 @@ go_library( go_test( name = "go_default_test", srcs = ["initializer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD index d94dc0f6b00..0d46b5d7627 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -38,8 +38,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD index a75286a6886..3c25272691b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD @@ -24,8 +24,8 @@ go_library( go_test( name = "go_default_test", srcs = ["matcher_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD index 6152811d136..4e62c6ac0f2 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD @@ -14,8 +14,8 @@ go_library( go_test( name = "go_default_test", srcs = ["rules_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index f4a982d121a..5ab45072db6 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -38,8 +38,8 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/admission/v1beta1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD index 4bd5efbf383..f16c617ac15 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["conversion_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/install/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/audit/install/BUILD index f6489a53416..c4acc260e50 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/install/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/install/BUILD @@ -37,8 +37,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/audit/install", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/fuzzer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD index 04b9b943db9..93ebb21c528 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD @@ -53,8 +53,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["conversion_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/audit/v1alpha1", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD index 07e14e74c03..93cc89ad620 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD @@ -54,8 +54,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["conversion_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/audit/v1beta1", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD index 956db3591a8..60740ace095 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/validation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/audit/validation", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/install/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/example/install/BUILD index 69f1cbb7b70..51f33af0079 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/install/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/install/BUILD @@ -22,8 +22,8 @@ go_library( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/example/install", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/example/fuzzer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/install/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/example2/install/BUILD index 30d67d3136c..9909a6a7beb 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/install/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/install/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/apis/example2/install", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/example/fuzzer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/audit/BUILD b/staging/src/k8s.io/apiserver/pkg/audit/BUILD index 525424cdded..0df9fd91f06 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/audit/BUILD @@ -39,8 +39,8 @@ go_library( go_test( name = "go_default_test", srcs = ["union_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/audit", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD b/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD index 17d5881b066..0873ac9955f 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD @@ -12,8 +12,8 @@ go_test( "checker_test.go", "reader_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/audit/policy", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/group/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/group/BUILD index ea84fb9dcd9..cdb74e4e42e 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/group/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/group/BUILD @@ -12,8 +12,8 @@ go_test( "group_adder_test.go", "token_group_adder_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/group", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD index c20d90c88c8..78165e4f4c8 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["anonymous_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/anonymous", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD index 93f6ad9e932..b338a0c7862 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["bearertoken_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD index a37735742f5..351b1b64520 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["requestheader_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/headerrequest", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/union/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/union/BUILD index 4b2dcadf2df..ce7c9bb5007 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/union/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/union/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["unionauth_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/union", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD index 1ea8e4c5b4c..bab85829524 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["protocol_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/websocket", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/BUILD index 2114dc00bd7..4297b8c4e7c 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/BUILD @@ -15,8 +15,8 @@ go_test( "testdata/intermediate.pem", "testdata/root.pem", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/request/x509", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD index db06fbde332..21dc23720c6 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/serviceaccount", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/token/cache/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/token/cache/BUILD index 59cd4322f72..55387ec4696 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/token/cache/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/token/cache/BUILD @@ -12,8 +12,8 @@ go_test( "cache_test.go", "cached_token_authenticator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/token/cache", - library = ":go_default_library", deps = [ "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD index 4d32dd1d31c..6b2233591c5 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["tokenfile_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/token/union/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/token/union/BUILD index 0f940c33475..3167a6bc1df 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/token/union/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/token/union/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["unionauth_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authentication/token/union", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD b/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD index 7fff4c90a32..660c4daacfc 100644 --- a/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["builtin_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authorization/authorizerfactory", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/authorization/union/BUILD b/staging/src/k8s.io/apiserver/pkg/authorization/union/BUILD index 0614f330cc5..84f5aac6476 100644 --- a/staging/src/k8s.io/apiserver/pkg/authorization/union/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authorization/union/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["union_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/authorization/union", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD index f73d619255a..f2c1ce34509 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD @@ -15,8 +15,8 @@ go_test( "proxy_test.go", "watch_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints", - library = ":go_default_library", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/discovery/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/discovery/BUILD index 008dcff5679..d3d12009205 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/discovery/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/discovery/BUILD @@ -12,8 +12,8 @@ go_test( "addresses_test.go", "root_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/discovery", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD index 664756aba15..0e64044e688 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -17,8 +17,8 @@ go_test( "legacy_audit_test.go", "requestinfo_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/filters", - library = ":go_default_library", deps = [ "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 7e2cdcc1b19..16901d6a6b0 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -12,8 +12,8 @@ go_test( "namer_test.go", "rest_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/handlers", - library = ":go_default_library", deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD index c0ceb323d8d..936a35639d0 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["negotiate_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD index 31963fd3b6d..a1ee2eda2f4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD @@ -12,8 +12,8 @@ go_test( "errors_test.go", "status_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD index 8d57d273e3f..57b4aa6e92a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["metrics_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/metrics", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/BUILD index 0e972d7f988..a4b24bdc2b3 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["openapi_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/openapi", - library = ":go_default_library", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD index ca759c1b66b..182f87ee25d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["requestinfo_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/request", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD index 02c6becc733..5472bb807a1 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD @@ -12,8 +12,8 @@ go_test( "decorated_watcher_test.go", "store_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/registry/generic/registry", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD index 2bf06aae705..32cb78d8adb 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD @@ -12,8 +12,8 @@ go_test( "response_checker_test.go", "streamer_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/registry/generic/rest", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/rest/BUILD index e662a4ee7d7..30b23efa2ce 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["meta_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/registry/rest", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/server/BUILD b/staging/src/k8s.io/apiserver/pkg/server/BUILD index 40bc126b194..5a5214003b9 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/BUILD @@ -13,8 +13,8 @@ go_test( "config_test.go", "genericapiserver_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -56,9 +56,38 @@ go_library( "plugins.go", "serve.go", "signal.go", - "signal_posix.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "signal_windows.go", ], "//conditions:default": [], diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD b/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD index bf72edc372f..69272f623e1 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD @@ -14,8 +14,8 @@ go_test( "maxinflight_test.go", "timeout_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/filters", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD b/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD index 8b3901bb8b2..c95bc6d5161 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["healthz_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/healthz", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD b/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD index 16ea0eb730a..638975d0390 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["httplog_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/httplog", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD b/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD index fecbbe7b722..0df54a3f068 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["pathrecorder_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/mux", - library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD index 7a9196f14d6..63446af4771 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD @@ -72,8 +72,8 @@ go_test( "serving_test.go", ], data = glob(["testdata/**"]), + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/options", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD index bfea74f8843..94de9c826c0 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD @@ -29,8 +29,8 @@ go_library( go_test( name = "go_default_test", srcs = ["config_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/options/encryptionconfig", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD b/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD index 18676472107..6bedc3672f0 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD @@ -12,8 +12,8 @@ go_test( "resource_config_test.go", "storage_factory_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/server/storage", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/BUILD index 1838e84d85c..0eba66022a0 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/BUILD @@ -15,8 +15,8 @@ go_test( "util_test.go", "watch_cache_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/BUILD index 7ac2e710240..77f87b633d0 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/BUILD @@ -13,8 +13,8 @@ go_test( "etcd_helper_test.go", "etcd_watcher_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/etcd", - library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/util/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/util/BUILD index f1405e87296..537638e61e1 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/util/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/util/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["etcd_util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/etcd/util", - library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD index 0b7c4311fdc..27b429e70fd 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -13,8 +13,8 @@ go_test( "store_test.go", "watcher_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/etcd3", - library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD index 864a1141d65..6396c47e344 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD @@ -15,8 +15,8 @@ go_library( go_test( name = "go_default_test", srcs = ["checks_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/etcd3/preflight", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/names/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/names/BUILD index 4e7511282f3..020b441038b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/names/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/names/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["generate_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/names", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD index 474d8990339..d604cf5e870 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["tls_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/storagebackend/factory", - library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/integration:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/tests/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/tests/BUILD index d1ac9608352..5637b197ce5 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/tests/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/tests/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["cacher_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/tests", - library = ":go_default_library", deps = [ "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/value/BUILD index 22d5ec1761a..cfb01af40e0 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["transformer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/value", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD index 4c3bc7a875f..43fdb6a81b7 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["aes_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/aes", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD index 3ceb65a02c6..cb5d4db5948 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["envelope_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/envelope", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD index 7c26c0808c0..bbd06a77d1d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["secretbox_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD b/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD index 60a1a866d83..73ed3f79568 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["feature_gate_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/feature", - library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD b/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD index 57ab4058da1..7bf54563110 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD @@ -15,8 +15,8 @@ go_test( "map_string_string_test.go", "namedcertkey_flag_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/flag", - library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flushwriter/BUILD b/staging/src/k8s.io/apiserver/pkg/util/flushwriter/BUILD index 707628ae1ec..b8f71d9eb1c 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flushwriter/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/flushwriter/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["writer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/flushwriter", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/BUILD b/staging/src/k8s.io/apiserver/pkg/util/proxy/BUILD index ca57a14dcce..ae20ee258e4 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/proxy/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["proxy_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/proxy", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/BUILD b/staging/src/k8s.io/apiserver/pkg/util/webhook/BUILD index e3624b96b5e..7904c80d38d 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/BUILD @@ -29,8 +29,8 @@ go_test( "certs_test.go", "webhook_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/webhook", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD b/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD index 3dfab49fdee..d55cd63e4e8 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD @@ -12,8 +12,8 @@ go_test( "conn_test.go", "stream_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/util/wsstream", - library = ":go_default_library", deps = ["//vendor/golang.org/x/net/websocket:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/audit/log/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/audit/log/BUILD index a9011fc5692..2a813130e99 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/audit/log/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/audit/log/BUILD @@ -34,8 +34,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["backend_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/audit/log", - library = ":go_default_library", deps = [ "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook/BUILD index c233394a878..da2537d3163 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook/BUILD @@ -12,8 +12,8 @@ go_test( "webhook_test.go", "webhook_v1alpha1_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/audit/webhook", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/BUILD index 780baffe0fb..e30c73595f8 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["allow_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authenticator/password/allow", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD index e06ad49b9d3..e1c08c1c882 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["passwordfile_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile", - library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/BUILD index 219a41d2dd4..70b75a247a3 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["basicauth_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD index 0d75a259855..8618d20de0a 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["oidc_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authenticator/token/oidc", - library = ":go_default_library", deps = [ "//vendor/github.com/coreos/go-oidc/jose:go_default_library", "//vendor/github.com/coreos/go-oidc/oidc:go_default_library", diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD index f064d07c5d7..be598daa9e7 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD @@ -12,8 +12,8 @@ go_test( "certs_test.go", "webhook_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD index 662b710d838..2cee86fd344 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD @@ -12,8 +12,8 @@ go_test( "certs_test.go", "webhook_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/apiserver/plugin/pkg/authorizer/webhook", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/client-go/discovery/cached/BUILD b/staging/src/k8s.io/client-go/discovery/cached/BUILD index 7f95f83e41a..03fa7edcf83 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/BUILD +++ b/staging/src/k8s.io/client-go/discovery/cached/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["memcache_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/discovery/cached", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/client-go/discovery/fake:go_default_library", diff --git a/staging/src/k8s.io/client-go/dynamic/BUILD b/staging/src/k8s.io/client-go/dynamic/BUILD index a0271859e3b..c8d61b292b4 100644 --- a/staging/src/k8s.io/client-go/dynamic/BUILD +++ b/staging/src/k8s.io/client-go/dynamic/BUILD @@ -12,8 +12,8 @@ go_test( "client_test.go", "dynamic_util_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/dynamic", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/BUILD b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/BUILD index 741403b314a..752e68ed54b 100644 --- a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/BUILD +++ b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "create-update-delete-deployment", + embed = [":go_default_library"], importpath = "k8s.io/client-go/examples/create-update-delete-deployment", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/BUILD b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/BUILD index d6c8dacc1da..509d79a810c 100644 --- a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/BUILD +++ b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "in-cluster-client-configuration", + embed = [":go_default_library"], importpath = "k8s.io/client-go/examples/in-cluster-client-configuration", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/BUILD b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/BUILD index cf8f05e2bc8..1cee29c83fe 100644 --- a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/BUILD +++ b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "out-of-cluster-client-configuration", + embed = [":go_default_library"], importpath = "k8s.io/client-go/examples/out-of-cluster-client-configuration", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/examples/workqueue/BUILD b/staging/src/k8s.io/client-go/examples/workqueue/BUILD index b0890ae792a..d65b8a2f1fe 100644 --- a/staging/src/k8s.io/client-go/examples/workqueue/BUILD +++ b/staging/src/k8s.io/client-go/examples/workqueue/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "workqueue", + embed = [":go_default_library"], importpath = "k8s.io/client-go/examples/workqueue", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/BUILD b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/BUILD index 9bab4a64f83..510c8df6d8e 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/BUILD +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["daemonset_expansion_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/listers/extensions/v1beta1", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD index 135ba355f99..768919b5a2b 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["azure_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/plugin/pkg/client/auth/azure", - library = ":go_default_library", deps = ["//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD index d9ef7bded3e..30c710fc1ac 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["gcp_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/plugin/pkg/client/auth/gcp", - library = ":go_default_library", deps = ["//vendor/golang.org/x/oauth2:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD index 03ccf499ce0..d2ac1466432 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["oidc_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/plugin/pkg/client/auth/oidc", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD index 6ebaf302d2e..1ee50f38aed 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["openstack_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/plugin/pkg/client/auth/openstack", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/rest/BUILD b/staging/src/k8s.io/client-go/rest/BUILD index 44958631da1..3d0972dd577 100644 --- a/staging/src/k8s.io/client-go/rest/BUILD +++ b/staging/src/k8s.io/client-go/rest/BUILD @@ -16,8 +16,8 @@ go_test( "url_utils_test.go", "urlbackoff_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/rest", - library = ":go_default_library", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", diff --git a/staging/src/k8s.io/client-go/scale/BUILD b/staging/src/k8s.io/client-go/scale/BUILD index 4fb0a949e40..1f1e0b2a44f 100644 --- a/staging/src/k8s.io/client-go/scale/BUILD +++ b/staging/src/k8s.io/client-go/scale/BUILD @@ -35,8 +35,8 @@ go_test( "client_test.go", "roundtrip_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/scale", - library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/cache/BUILD b/staging/src/k8s.io/client-go/tools/cache/BUILD index 79e21e1ea51..3b5a6081350 100644 --- a/staging/src/k8s.io/client-go/tools/cache/BUILD +++ b/staging/src/k8s.io/client-go/tools/cache/BUILD @@ -22,9 +22,9 @@ go_test( "store_test.go", "undelta_store_test.go", ], + embed = [":go_default_library"], features = ["-race"], importpath = "k8s.io/client-go/tools/cache", - library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/cache/testing/BUILD b/staging/src/k8s.io/client-go/tools/cache/testing/BUILD index f19cb7f3cff..b4816f9ed41 100644 --- a/staging/src/k8s.io/client-go/tools/cache/testing/BUILD +++ b/staging/src/k8s.io/client-go/tools/cache/testing/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["fake_controller_source_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/cache/testing", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/BUILD b/staging/src/k8s.io/client-go/tools/clientcmd/BUILD index 77a8d2229f6..4e3ad95b15f 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/BUILD +++ b/staging/src/k8s.io/client-go/tools/clientcmd/BUILD @@ -15,8 +15,8 @@ go_test( "overrides_test.go", "validation_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/clientcmd", - library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/imdario/mergo:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/api/BUILD b/staging/src/k8s.io/client-go/tools/clientcmd/api/BUILD index d46f4e28fb1..ecb65fa30eb 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/api/BUILD +++ b/staging/src/k8s.io/client-go/tools/clientcmd/api/BUILD @@ -12,8 +12,8 @@ go_test( "helpers_test.go", "types_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/clientcmd/api", - library = ":go_default_library", deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/BUILD b/staging/src/k8s.io/client-go/tools/leaderelection/BUILD index 5ea32efdebf..d48e68690d4 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/BUILD +++ b/staging/src/k8s.io/client-go/tools/leaderelection/BUILD @@ -23,8 +23,8 @@ go_library( go_test( name = "go_default_test", srcs = ["leaderelection_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/leaderelection", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/pager/BUILD b/staging/src/k8s.io/client-go/tools/pager/BUILD index c4a2d4d1cac..2bbc814890d 100644 --- a/staging/src/k8s.io/client-go/tools/pager/BUILD +++ b/staging/src/k8s.io/client-go/tools/pager/BUILD @@ -37,8 +37,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["pager_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/pager", - library = ":go_default_library", deps = [ "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/portforward/BUILD b/staging/src/k8s.io/client-go/tools/portforward/BUILD index 1232b7ebb30..c9239116723 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/BUILD +++ b/staging/src/k8s.io/client-go/tools/portforward/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["portforward_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/portforward", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/tools/record/BUILD b/staging/src/k8s.io/client-go/tools/record/BUILD index f89aa3e2896..5fbd06bbf1d 100644 --- a/staging/src/k8s.io/client-go/tools/record/BUILD +++ b/staging/src/k8s.io/client-go/tools/record/BUILD @@ -12,8 +12,8 @@ go_test( "event_test.go", "events_cache_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/record", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/BUILD b/staging/src/k8s.io/client-go/tools/remotecommand/BUILD index e665af3f4d8..8d856e93e60 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/BUILD +++ b/staging/src/k8s.io/client-go/tools/remotecommand/BUILD @@ -12,8 +12,8 @@ go_test( "v2_test.go", "v4_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/tools/remotecommand", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", diff --git a/staging/src/k8s.io/client-go/transport/BUILD b/staging/src/k8s.io/client-go/transport/BUILD index 91c3831b234..e5a5a570939 100644 --- a/staging/src/k8s.io/client-go/transport/BUILD +++ b/staging/src/k8s.io/client-go/transport/BUILD @@ -13,8 +13,8 @@ go_test( "round_trippers_test.go", "transport_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/transport", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/util/buffer/BUILD b/staging/src/k8s.io/client-go/util/buffer/BUILD index b5629d5cb97..da23420e936 100644 --- a/staging/src/k8s.io/client-go/util/buffer/BUILD +++ b/staging/src/k8s.io/client-go/util/buffer/BUILD @@ -10,8 +10,8 @@ go_library( go_test( name = "go_default_test", srcs = ["ring_growing_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/buffer", - library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/util/cert/BUILD b/staging/src/k8s.io/client-go/util/cert/BUILD index 93ca7c9c6c5..121de49fb9d 100644 --- a/staging/src/k8s.io/client-go/util/cert/BUILD +++ b/staging/src/k8s.io/client-go/util/cert/BUILD @@ -13,8 +13,8 @@ go_test( "pem_test.go", ], data = glob(["testdata/**"]), + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/cert", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/util/certificate/BUILD b/staging/src/k8s.io/client-go/util/certificate/BUILD index f10a2d9e21e..7f54ae3df0b 100644 --- a/staging/src/k8s.io/client-go/util/certificate/BUILD +++ b/staging/src/k8s.io/client-go/util/certificate/BUILD @@ -14,8 +14,8 @@ go_test( "certificate_manager_test.go", "certificate_store_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/certificate", - library = ":go_default_library", tags = ["automanaged"], deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", diff --git a/staging/src/k8s.io/client-go/util/certificate/csr/BUILD b/staging/src/k8s.io/client-go/util/certificate/csr/BUILD index c6def5bbf0c..b45251b98fd 100644 --- a/staging/src/k8s.io/client-go/util/certificate/csr/BUILD +++ b/staging/src/k8s.io/client-go/util/certificate/csr/BUILD @@ -42,8 +42,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["csr_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/certificate/csr", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/client-go/util/flowcontrol/BUILD b/staging/src/k8s.io/client-go/util/flowcontrol/BUILD index d74b3f55446..410c369c6e6 100644 --- a/staging/src/k8s.io/client-go/util/flowcontrol/BUILD +++ b/staging/src/k8s.io/client-go/util/flowcontrol/BUILD @@ -12,8 +12,8 @@ go_test( "backoff_test.go", "throttle_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/flowcontrol", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library"], ) diff --git a/staging/src/k8s.io/client-go/util/integer/BUILD b/staging/src/k8s.io/client-go/util/integer/BUILD index 67f050e422d..6de17569a93 100644 --- a/staging/src/k8s.io/client-go/util/integer/BUILD +++ b/staging/src/k8s.io/client-go/util/integer/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["integer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/integer", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/util/jsonpath/BUILD b/staging/src/k8s.io/client-go/util/jsonpath/BUILD index 0856e29caff..2686d5ee86e 100644 --- a/staging/src/k8s.io/client-go/util/jsonpath/BUILD +++ b/staging/src/k8s.io/client-go/util/jsonpath/BUILD @@ -12,8 +12,8 @@ go_test( "jsonpath_test.go", "parser_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/jsonpath", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/util/retry/BUILD b/staging/src/k8s.io/client-go/util/retry/BUILD index 9f6f4b84886..bf008c9162f 100644 --- a/staging/src/k8s.io/client-go/util/retry/BUILD +++ b/staging/src/k8s.io/client-go/util/retry/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/retry", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/staging/src/k8s.io/client-go/util/testing/BUILD b/staging/src/k8s.io/client-go/util/testing/BUILD index 1280a1ed1ea..956b9d1ae8e 100644 --- a/staging/src/k8s.io/client-go/util/testing/BUILD +++ b/staging/src/k8s.io/client-go/util/testing/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["fake_handler_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/testing", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/client-go/util/workqueue/BUILD b/staging/src/k8s.io/client-go/util/workqueue/BUILD index 2abd2f82d8b..5cc87e6b36a 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/BUILD +++ b/staging/src/k8s.io/client-go/util/workqueue/BUILD @@ -13,8 +13,8 @@ go_test( "delaying_queue_test.go", "rate_limitting_queue_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/client-go/util/workqueue", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD index c4734980e4f..c32c5bf367c 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "client-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/client-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD index fc7ed844947..d9068b6677a 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/args/BUILD @@ -38,8 +38,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["gvpackages_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/client-gen/args", - library = ":go_default_library", deps = [ "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/code-generator/cmd/client-gen/types:go_default_library", diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/BUILD index e25c24631a5..e355fb513e2 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["tags_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/client-gen/generators/util", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/types/BUILD index 681d4baf8c4..d5d6e8607e7 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/BUILD @@ -19,8 +19,8 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/client-gen/types", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD index a244fae00f8..2a2572b8a0e 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "conversion-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/conversion-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD index e399aab102c..8b004d683a3 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "deepcopy-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/deepcopy-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD index 8306dd8992a..329da12e79c 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "defaulter-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/defaulter-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/BUILD b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/BUILD index 4ad6c27bf79..8c334b01444 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "go-to-protobuf", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/go-to-protobuf", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD index 5750481bf7a..17b25ab0066 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD @@ -33,8 +33,8 @@ go_library( go_test( name = "go_default_test", srcs = ["namer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/go-to-protobuf/protobuf", - library = ":go_default_library", ) filegroup( diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/BUILD b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/BUILD index 83383bf6c87..af0299b4a98 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "protoc-gen-gogo", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD index 3e3e3a0356e..189cc09f12c 100644 --- a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "import-boss", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/import-boss", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD index cd80dab10fe..c5b4f917029 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "informer-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/informer-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD index 5d44c37176b..1d7e9a65c73 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "lister-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/lister-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD index 533b7cb5d01..2a464aae535 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "openapi-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/openapi-gen", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD index 74bfefd5444..50c3e45930b 100644 --- a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "set-gen", + embed = [":go_default_library"], importpath = "k8s.io/code-generator/cmd/set-gen", - library = ":go_default_library", ) exports_files([ diff --git a/staging/src/k8s.io/kube-aggregator/BUILD b/staging/src/k8s.io/kube-aggregator/BUILD index d41da9d03de..df4b3e9ad69 100644 --- a/staging/src/k8s.io/kube-aggregator/BUILD +++ b/staging/src/k8s.io/kube-aggregator/BUILD @@ -9,6 +9,7 @@ load("//vendor/k8s.io/client-go/pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-aggregator", + embed = [":go_default_library"], gc_linkopts = [ "-linkmode", "external", @@ -16,7 +17,6 @@ go_binary( "-static", ], importpath = "k8s.io/kube-aggregator", - library = ":go_default_library", x_defs = version_x_defs(), ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD index caddde3de3b..41f11c57c3b 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -12,8 +12,8 @@ go_test( "handler_apis_test.go", "handler_proxy_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kube-aggregator/pkg/apiserver", - library = ":go_default_library", deps = [ "//vendor/golang.org/x/net/websocket:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD index 3d8a16741b2..4d8f673cf16 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["autoregister_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kube-aggregator/pkg/controllers/autoregister", - library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD index 48dda150497..a85d6224f92 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD @@ -30,8 +30,8 @@ go_library( go_test( name = "go_default_test", srcs = ["aggregator_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kube-aggregator/pkg/controllers/openapi", - library = ":go_default_library", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD index c34f9c18053..dcd7cf8fa65 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD @@ -35,8 +35,8 @@ go_library( go_test( name = "go_default_test", srcs = ["available_controller_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kube-aggregator/pkg/controllers/status", - library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/staging/src/k8s.io/sample-apiserver/BUILD b/staging/src/k8s.io/sample-apiserver/BUILD index 0b6bad23c3d..12c7076a7cb 100644 --- a/staging/src/k8s.io/sample-apiserver/BUILD +++ b/staging/src/k8s.io/sample-apiserver/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "sample-apiserver", + embed = [":go_default_library"], importpath = "k8s.io/sample-apiserver", - library = ":go_default_library", ) go_library( diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install/BUILD index 5b74397c466..88655f34413 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/install/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/sample-apiserver/pkg/apis/wardle/install", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library"], ) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/sample-apiserver/pkg/apiserver/BUILD index a36767eb3ea..603c014fa53 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/sample-apiserver/pkg/apiserver/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["scheme_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/sample-apiserver/pkg/apiserver", - library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library"], ) diff --git a/staging/src/k8s.io/sample-controller/BUILD b/staging/src/k8s.io/sample-controller/BUILD index 3bdd428f777..fcda819a6cc 100644 --- a/staging/src/k8s.io/sample-controller/BUILD +++ b/staging/src/k8s.io/sample-controller/BUILD @@ -37,8 +37,8 @@ go_library( go_binary( name = "sample-controller", + embed = [":go_default_library"], importpath = "k8s.io/sample-controller", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/staging/src/k8s.io/sample-controller/pkg/signals/BUILD b/staging/src/k8s.io/sample-controller/pkg/signals/BUILD index 190d148992e..d0a33feb0b4 100644 --- a/staging/src/k8s.io/sample-controller/pkg/signals/BUILD +++ b/staging/src/k8s.io/sample-controller/pkg/signals/BUILD @@ -4,9 +4,38 @@ go_library( name = "go_default_library", srcs = [ "signal.go", - "signal_posix.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "signal_posix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "signal_windows.go", ], "//conditions:default": [], diff --git a/test/conformance/BUILD b/test/conformance/BUILD index 828fb29402b..3d3b8018c8c 100644 --- a/test/conformance/BUILD +++ b/test/conformance/BUILD @@ -9,8 +9,8 @@ go_library( go_binary( name = "conformance", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/conformance", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 8210ef377f7..a619f53166f 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["e2e_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e", - library = ":go_default_library", tags = ["e2e"], deps = [ "//test/e2e/apimachinery:go_default_library", diff --git a/test/e2e/chaosmonkey/BUILD b/test/e2e/chaosmonkey/BUILD index d30fbdbf7fb..f7b42fc966b 100644 --- a/test/e2e/chaosmonkey/BUILD +++ b/test/e2e/chaosmonkey/BUILD @@ -16,8 +16,8 @@ go_library( go_test( name = "go_default_test", srcs = ["chaosmonkey_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e/chaosmonkey", - library = ":go_default_library", tags = ["e2e"], ) diff --git a/test/e2e/framework/timer/BUILD b/test/e2e/framework/timer/BUILD index afdc516b975..27517f4d200 100644 --- a/test/e2e/framework/timer/BUILD +++ b/test/e2e/framework/timer/BUILD @@ -14,8 +14,8 @@ go_library( go_test( name = "go_default_test", srcs = ["timer_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e/framework/timer", - library = ":go_default_library", deps = ["//vendor/github.com/onsi/gomega:go_default_library"], ) diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index d81922bedbd..64f27ed2008 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -49,8 +49,8 @@ go_library( go_test( name = "go_default_test", srcs = ["taints_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e/scheduling", - library = ":go_default_library", deps = [ "//test/e2e/framework:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index df312031edf..e6431d05dee 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -19,7 +19,7 @@ go_library( "simple_mount.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "benchmark_util.go", "node_problem_detector_linux.go", "resource_collector.go", @@ -58,7 +58,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//pkg/api/v1/node:go_default_library", "//pkg/util/procfs:go_default_library", "//test/e2e/perftype:go_default_library", @@ -103,7 +103,7 @@ go_test( "summary_test.go", "volume_manager_test.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "container_manager_test.go", "density_test.go", "node_container_manager_test.go", @@ -112,8 +112,8 @@ go_test( ], "//conditions:default": [], }), + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e_node", - library = ":go_default_library", tags = ["e2e"], deps = [ "//pkg/api/v1/node:go_default_library", @@ -164,7 +164,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//test/e2e/framework/metrics:go_default_library", "//test/utils:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/test/e2e_node/environment/BUILD b/test/e2e_node/environment/BUILD index 7c918ef0e3c..a1aea56fabe 100644 --- a/test/e2e_node/environment/BUILD +++ b/test/e2e_node/environment/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "environment", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e_node/environment", - library = ":go_default_library", ) go_library( diff --git a/test/e2e_node/runner/local/BUILD b/test/e2e_node/runner/local/BUILD index a1479bc7fdd..2293886aad4 100644 --- a/test/e2e_node/runner/local/BUILD +++ b/test/e2e_node/runner/local/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "local", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e_node/runner/local", - library = ":go_default_library", ) go_library( diff --git a/test/e2e_node/runner/remote/BUILD b/test/e2e_node/runner/remote/BUILD index 4d0db332a59..79d4dfb0c2b 100644 --- a/test/e2e_node/runner/remote/BUILD +++ b/test/e2e_node/runner/remote/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "remote", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote", - library = ":go_default_library", ) go_library( diff --git a/test/e2e_node/system/BUILD b/test/e2e_node/system/BUILD index 8c5b957da02..b99caf0698c 100644 --- a/test/e2e_node/system/BUILD +++ b/test/e2e_node/system/BUILD @@ -17,10 +17,39 @@ go_library( "package_validator.go", "report.go", "types.go", - "types_unix.go", "validators.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "types_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "types_windows.go", ], "//conditions:default": [], @@ -45,8 +74,8 @@ go_test( "os_validator_test.go", "package_validator_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/e2e_node/system", - library = ":go_default_library", tags = ["e2e"], deps = [ "//vendor/github.com/docker/docker/api/types:go_default_library", diff --git a/test/images/clusterapi-tester/BUILD b/test/images/clusterapi-tester/BUILD index e5a03afe29b..c99cb3fc95e 100644 --- a/test/images/clusterapi-tester/BUILD +++ b/test/images/clusterapi-tester/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "clusterapi-tester", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/clusterapi-tester", - library = ":go_default_library", ) go_library( diff --git a/test/images/entrypoint-tester/BUILD b/test/images/entrypoint-tester/BUILD index abaf8e740fd..4616e5952d4 100644 --- a/test/images/entrypoint-tester/BUILD +++ b/test/images/entrypoint-tester/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "entrypoint-tester", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/entrypoint-tester", - library = ":go_default_library", ) go_library( diff --git a/test/images/fakegitserver/BUILD b/test/images/fakegitserver/BUILD index eda1526c909..bd35428c494 100644 --- a/test/images/fakegitserver/BUILD +++ b/test/images/fakegitserver/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "fakegitserver", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/fakegitserver", - library = ":go_default_library", ) go_library( diff --git a/test/images/goproxy/BUILD b/test/images/goproxy/BUILD index c9dba31c5af..f4497a29223 100644 --- a/test/images/goproxy/BUILD +++ b/test/images/goproxy/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "goproxy", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/goproxy", - library = ":go_default_library", ) go_library( diff --git a/test/images/liveness/BUILD b/test/images/liveness/BUILD index d385a02f18d..9bdc8d873a2 100644 --- a/test/images/liveness/BUILD +++ b/test/images/liveness/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "liveness", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/liveness", - library = ":go_default_library", ) go_library( diff --git a/test/images/logs-generator/BUILD b/test/images/logs-generator/BUILD index da5c6efa070..74441a32205 100644 --- a/test/images/logs-generator/BUILD +++ b/test/images/logs-generator/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "logs-generator", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/logs-generator", - library = ":go_default_library", ) go_library( diff --git a/test/images/mounttest/BUILD b/test/images/mounttest/BUILD index 23e2f8f2cdb..5b6c0ce023c 100644 --- a/test/images/mounttest/BUILD +++ b/test/images/mounttest/BUILD @@ -27,6 +27,6 @@ filegroup( go_binary( name = "mounttest", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/mounttest", - library = ":go_default_library", ) diff --git a/test/images/n-way-http/BUILD b/test/images/n-way-http/BUILD index a32c7ec330d..5567ca5358d 100644 --- a/test/images/n-way-http/BUILD +++ b/test/images/n-way-http/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "n-way-http", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/n-way-http", - library = ":go_default_library", ) go_library( diff --git a/test/images/net/BUILD b/test/images/net/BUILD index 9fbd62c85c1..e1fd501e19b 100644 --- a/test/images/net/BUILD +++ b/test/images/net/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "net", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/net", - library = ":go_default_library", ) go_library( diff --git a/test/images/netexec/BUILD b/test/images/netexec/BUILD index 741d1869aa4..6de769ecfb7 100644 --- a/test/images/netexec/BUILD +++ b/test/images/netexec/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "netexec", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/netexec", - library = ":go_default_library", ) go_library( diff --git a/test/images/nettest/BUILD b/test/images/nettest/BUILD index 6d1eccbd2bb..e439da6437c 100644 --- a/test/images/nettest/BUILD +++ b/test/images/nettest/BUILD @@ -33,6 +33,6 @@ filegroup( go_binary( name = "nettest", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/nettest", - library = ":go_default_library", ) diff --git a/test/images/no-snat-test-proxy/BUILD b/test/images/no-snat-test-proxy/BUILD index 2c2d960479c..3aa65fce09f 100644 --- a/test/images/no-snat-test-proxy/BUILD +++ b/test/images/no-snat-test-proxy/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "no-snat-test-proxy", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/no-snat-test-proxy", - library = ":go_default_library", ) go_library( diff --git a/test/images/no-snat-test/BUILD b/test/images/no-snat-test/BUILD index 45df92da701..9c8e29cc6d8 100644 --- a/test/images/no-snat-test/BUILD +++ b/test/images/no-snat-test/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "no-snat-test", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/no-snat-test", - library = ":go_default_library", ) go_library( diff --git a/test/images/nonewprivs/BUILD b/test/images/nonewprivs/BUILD index 7722f5d9276..fb1d2a20afe 100644 --- a/test/images/nonewprivs/BUILD +++ b/test/images/nonewprivs/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "nonewprivs", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/nonewprivs", - library = ":go_default_library", ) go_library( diff --git a/test/images/pets/peer-finder/BUILD b/test/images/pets/peer-finder/BUILD index d9b3f1402f0..cd468b08726 100644 --- a/test/images/pets/peer-finder/BUILD +++ b/test/images/pets/peer-finder/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "peer-finder", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/pets/peer-finder", - library = ":go_default_library", ) go_library( diff --git a/test/images/port-forward-tester/BUILD b/test/images/port-forward-tester/BUILD index 19612bade97..1f589d7f989 100644 --- a/test/images/port-forward-tester/BUILD +++ b/test/images/port-forward-tester/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "port-forward-tester", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/port-forward-tester", - library = ":go_default_library", ) go_library( diff --git a/test/images/porter/BUILD b/test/images/porter/BUILD index 614a1a08bb7..290b0749bab 100644 --- a/test/images/porter/BUILD +++ b/test/images/porter/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "porter", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/porter", - library = ":go_default_library", ) go_library( diff --git a/test/images/resource-consumer/BUILD b/test/images/resource-consumer/BUILD index bb84e85849c..59ebefec274 100644 --- a/test/images/resource-consumer/BUILD +++ b/test/images/resource-consumer/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "resource-consumer", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/resource-consumer", - library = ":go_default_library", ) go_library( diff --git a/test/images/resource-consumer/consume-cpu/BUILD b/test/images/resource-consumer/consume-cpu/BUILD index cb504d3efc0..fbb2b23c385 100644 --- a/test/images/resource-consumer/consume-cpu/BUILD +++ b/test/images/resource-consumer/consume-cpu/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "consume-cpu", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/resource-consumer/consume-cpu", - library = ":go_default_library", ) go_library( diff --git a/test/images/resource-consumer/controller/BUILD b/test/images/resource-consumer/controller/BUILD index d40191d8c18..5c74e58b1cf 100644 --- a/test/images/resource-consumer/controller/BUILD +++ b/test/images/resource-consumer/controller/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "controller", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/resource-consumer/controller", - library = ":go_default_library", ) go_library( diff --git a/test/images/serve-hostname/BUILD b/test/images/serve-hostname/BUILD index d1eb795eb37..f97f427bc4f 100644 --- a/test/images/serve-hostname/BUILD +++ b/test/images/serve-hostname/BUILD @@ -27,6 +27,6 @@ filegroup( go_binary( name = "serve-hostname", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/serve-hostname", - library = ":go_default_library", ) diff --git a/test/images/test-webserver/BUILD b/test/images/test-webserver/BUILD index f74e5641f21..19f660268fd 100644 --- a/test/images/test-webserver/BUILD +++ b/test/images/test-webserver/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "test-webserver", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/test-webserver", - library = ":go_default_library", ) go_library( diff --git a/test/images/webhook/BUILD b/test/images/webhook/BUILD index 249b807ad83..384cce5f429 100644 --- a/test/images/webhook/BUILD +++ b/test/images/webhook/BUILD @@ -24,8 +24,8 @@ go_library( go_binary( name = "webhook", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/webhook", - library = ":go_default_library", visibility = ["//visibility:public"], ) @@ -46,8 +46,8 @@ filegroup( go_test( name = "go_default_test", srcs = ["patch_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/images/webhook", - library = ":go_default_library", deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index 4e88a855b86..28c4a79b388 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -13,8 +13,8 @@ go_test( "deployment_test.go", "main_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/integration/deployment", - library = ":go_default_library", tags = ["integration"], deps = [ "//pkg/controller/deployment/util:go_default_library", diff --git a/test/integration/metrics/BUILD b/test/integration/metrics/BUILD index 768bf419fd2..c51ec2246b0 100644 --- a/test/integration/metrics/BUILD +++ b/test/integration/metrics/BUILD @@ -32,8 +32,8 @@ go_test( "main_test.go", "metrics_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/integration/metrics", - library = ":go_default_library", tags = ["integration"], deps = [ "//pkg/api/testapi:go_default_library", diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 2e01d918b6f..9bebcc6055f 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -19,8 +19,8 @@ go_test( "taint_test.go", "volume_binding_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/integration/scheduler", - library = ":go_default_library", tags = ["integration"], deps = [ "//pkg/api/legacyscheme:go_default_library", diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index 435f10919aa..56b23a44f7b 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -38,8 +38,8 @@ go_test( "scheduler_bench_test.go", "scheduler_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/integration/scheduler_perf", - library = ":go_default_library", tags = ["integration"], deps = [ "//pkg/kubelet/apis:go_default_library", diff --git a/test/list/BUILD b/test/list/BUILD index 4635d8c897a..92227d64ba4 100644 --- a/test/list/BUILD +++ b/test/list/BUILD @@ -9,15 +9,15 @@ load( go_binary( name = "list", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/list", - library = ":go_default_library", ) go_test( name = "go_default_test", srcs = ["main_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/list", - library = ":go_default_library", ) go_library( diff --git a/test/soak/cauldron/BUILD b/test/soak/cauldron/BUILD index 2c82694ae37..777e64a7305 100644 --- a/test/soak/cauldron/BUILD +++ b/test/soak/cauldron/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "cauldron", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/soak/cauldron", - library = ":go_default_library", ) go_library( diff --git a/test/soak/serve_hostnames/BUILD b/test/soak/serve_hostnames/BUILD index 86569314953..b51cd019fd2 100644 --- a/test/soak/serve_hostnames/BUILD +++ b/test/soak/serve_hostnames/BUILD @@ -8,8 +8,8 @@ load( go_binary( name = "serve_hostnames", + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/test/soak/serve_hostnames", - library = ":go_default_library", ) go_library( diff --git a/third_party/forked/etcd221/pkg/fileutil/BUILD b/third_party/forked/etcd221/pkg/fileutil/BUILD index 31fd2462035..dfeb309341f 100644 --- a/third_party/forked/etcd221/pkg/fileutil/BUILD +++ b/third_party/forked/etcd221/pkg/fileutil/BUILD @@ -16,28 +16,66 @@ go_test( "preallocate_test.go", "purge_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/third_party/forked/etcd221/pkg/fileutil", - library = ":go_default_library", ) go_library( name = "go_default_library", srcs = [ "fileutil.go", - "lock_unix.go", - "perallocate_unsupported.go", "purge.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "lock_unix.go", "preallocate.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "lock_plan9.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "lock_solaris.go", + "perallocate_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "lock_windows.go", + "perallocate_unsupported.go", ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/third_party/forked/etcd221/pkg/fileutil", - deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], + deps = [ + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], ) filegroup( diff --git a/third_party/forked/etcd237/pkg/fileutil/BUILD b/third_party/forked/etcd237/pkg/fileutil/BUILD index 11afc094617..721b960b12d 100644 --- a/third_party/forked/etcd237/pkg/fileutil/BUILD +++ b/third_party/forked/etcd237/pkg/fileutil/BUILD @@ -16,8 +16,8 @@ go_test( "preallocate_test.go", "purge_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/third_party/forked/etcd237/pkg/fileutil", - library = ":go_default_library", ) go_library( @@ -25,22 +25,69 @@ go_library( srcs = [ "fileutil.go", "lock.go", - "lock_unix.go", - "perallocate_unsupported.go", "purge.go", - "sync.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "lock_unix.go", "preallocate.go", "sync_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "lock_unix.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "lock_plan9.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "lock_solaris.go", + "perallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "lock_windows.go", + "perallocate_unsupported.go", + "sync.go", ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/third_party/forked/etcd237/pkg/fileutil", - deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], + deps = [ + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], ) filegroup( diff --git a/third_party/forked/golang/expansion/BUILD b/third_party/forked/golang/expansion/BUILD index 41a2e08cc63..f660ebfaad9 100644 --- a/third_party/forked/golang/expansion/BUILD +++ b/third_party/forked/golang/expansion/BUILD @@ -17,8 +17,8 @@ go_library( go_test( name = "go_default_test", srcs = ["expand_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/third_party/forked/golang/expansion", - library = ":go_default_library", deps = ["//pkg/apis/core:go_default_library"], ) diff --git a/third_party/forked/golang/reflect/BUILD b/third_party/forked/golang/reflect/BUILD index 19ed2746319..26ff8e4bff7 100644 --- a/third_party/forked/golang/reflect/BUILD +++ b/third_party/forked/golang/reflect/BUILD @@ -20,8 +20,8 @@ go_library( go_test( name = "go_default_test", srcs = ["deep_equal_test.go"], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/third_party/forked/golang/reflect", - library = ":go_default_library", ) filegroup( diff --git a/third_party/forked/gonum/graph/simple/BUILD b/third_party/forked/gonum/graph/simple/BUILD index d363239f26b..fb17c408703 100644 --- a/third_party/forked/gonum/graph/simple/BUILD +++ b/third_party/forked/gonum/graph/simple/BUILD @@ -15,8 +15,8 @@ go_test( "edgeholder_test.go", "undirected_test.go", ], + embed = [":go_default_library"], importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", - library = ":go_default_library", deps = ["//third_party/forked/gonum/graph:go_default_library"], ) diff --git a/vendor/bitbucket.org/bertimus9/systemstat/BUILD b/vendor/bitbucket.org/bertimus9/systemstat/BUILD index 70507f160b1..5bec8520e75 100644 --- a/vendor/bitbucket.org/bertimus9/systemstat/BUILD +++ b/vendor/bitbucket.org/bertimus9/systemstat/BUILD @@ -4,12 +4,41 @@ go_library( name = "go_default_library", srcs = [ "systemstat.go", - "systemstat_ex.go", "utils.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "systemstat_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "systemstat_ex.go", + ], "//conditions:default": [], }), importpath = "bitbucket.org/bertimus9/systemstat", diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/BUILD b/vendor/github.com/Azure/go-ansiterm/winterm/BUILD index d1a144f05bc..31c24079a9d 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/BUILD +++ b/vendor/github.com/Azure/go-ansiterm/winterm/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "ansi.go", "api.go", "attr_translation.go", @@ -18,7 +18,7 @@ go_library( importpath = "github.com/Azure/go-ansiterm/winterm", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Azure/go-ansiterm:go_default_library", "//vendor/github.com/sirupsen/logrus:go_default_library", ], diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD b/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD index 6b93feee9f5..6879a67fac9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD @@ -5,12 +5,41 @@ go_library( srcs = [ "config.go", "devicetoken.go", - "msi.go", "persist.go", "sender.go", "token.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "msi.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "msi_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/JeffAshton/win_pdh/BUILD b/vendor/github.com/JeffAshton/win_pdh/BUILD index 8ae9ca0438a..9dde55f2bce 100644 --- a/vendor/github.com/JeffAshton/win_pdh/BUILD +++ b/vendor/github.com/JeffAshton/win_pdh/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "pdh.go", ], "//conditions:default": [], diff --git a/vendor/github.com/Microsoft/go-winio/BUILD b/vendor/github.com/Microsoft/go-winio/BUILD index cd2e317d072..17d79cb0699 100644 --- a/vendor/github.com/Microsoft/go-winio/BUILD +++ b/vendor/github.com/Microsoft/go-winio/BUILD @@ -7,7 +7,7 @@ go_library( "reparse.go", "syscall.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "backup.go", "file.go", "fileinfo.go", @@ -21,7 +21,7 @@ go_library( importpath = "github.com/Microsoft/go-winio", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/golang.org/x/sys/windows:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD b/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD index 17604f67b07..50ac74ef45a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "connection_reset_error.go", "handlers.go", "http_request.go", "offset_reader.go", @@ -17,7 +16,42 @@ go_library( "timeout_read_closer.go", "validation.go", "waiter.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "connection_reset_error_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "connection_reset_error.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "connection_reset_error.go", + ], + "//conditions:default": [], + }), importpath = "github.com/aws/aws-sdk-go/aws/request", visibility = ["//visibility:public"], deps = [ diff --git a/vendor/github.com/boltdb/bolt/BUILD b/vendor/github.com/boltdb/bolt/BUILD index d29a61e9df0..a3885567cf2 100644 --- a/vendor/github.com/boltdb/bolt/BUILD +++ b/vendor/github.com/boltdb/bolt/BUILD @@ -3,8 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "bolt_unix.go", - "boltsync_unix.go", "bucket.go", "cursor.go", "db.go", @@ -15,21 +13,78 @@ go_library( "page.go", "tx.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "bolt_amd64.go", + "@io_bazel_rules_go//go/platform:android": [ + "bolt_unix.go", + "boltsync_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "bolt_amd64.go", + "@io_bazel_rules_go//go/platform:darwin": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "bolt_linux.go", + "bolt_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "bolt_amd64.go", + "@io_bazel_rules_go//go/platform:nacl": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "bolt_openbsd.go", + "bolt_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "bolt_unix_solaris.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "bolt_windows.go", ], "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:386": [ + "bolt_386.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ + "bolt_amd64.go", + ], + "@io_bazel_rules_go//go/platform:arm": [ + "bolt_arm.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "bolt_arm64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "bolt_ppc64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "bolt_ppc64le.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "bolt_s390x.go", + ], + "//conditions:default": [], }), importpath = "github.com/boltdb/bolt", visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/containerd/containerd/dialer/BUILD b/vendor/github.com/containerd/containerd/dialer/BUILD index 6824e72ac85..e20c8da7b8f 100644 --- a/vendor/github.com/containerd/containerd/dialer/BUILD +++ b/vendor/github.com/containerd/containerd/dialer/BUILD @@ -4,9 +4,38 @@ go_library( name = "go_default_library", srcs = [ "dialer.go", - "dialer_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "dialer_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "dialer_windows.go", ], "//conditions:default": [], @@ -16,7 +45,7 @@ go_library( deps = [ "//vendor/github.com/pkg/errors:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Microsoft/go-winio:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/BUILD b/vendor/github.com/containernetworking/cni/pkg/invoke/BUILD index 77ceab71a04..fc0ae0798b3 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/BUILD +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/BUILD @@ -9,13 +9,25 @@ go_library( "find.go", "raw_exec.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "os_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "os_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "os_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "os_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "os_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "os_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "os_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD index 137f4aacc21..e8456f33cca 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD @@ -5,13 +5,42 @@ go_library( srcs = [ "backend.go", "batch_tx.go", - "boltoption_default.go", "doc.go", "metrics.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "boltoption_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "boltoption_default.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "boltoption_default.go", + ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/mvcc/backend", diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD b/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD index e442b3fbd95..058eb06def9 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD @@ -3,34 +3,92 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "dir_unix.go", "fileutil.go", "lock.go", - "lock_flock.go", - "lock_unix.go", "preallocate.go", - "preallocate_unsupported.go", "purge.go", - "sync.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", "preallocate_darwin.go", "sync_darwin.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "dir_unix.go", + "lock_flock.go", "lock_linux.go", "preallocate_unix.go", "sync_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "dir_unix.go", + "lock_plan9.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "dir_unix.go", + "lock_solaris.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "dir_windows.go", "lock_windows.go", + "preallocate_unsupported.go", + "sync.go", ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/pkg/fileutil", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], + deps = [ + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], ) filegroup( diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD index 2455b3c3edd..7c84c7d5abe 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD @@ -3,14 +3,52 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "isolate_stub.go", "netutil.go", - "routes.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "isolate_linux.go", "routes_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "isolate_stub.go", + "routes.go", + ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/pkg/netutil", @@ -20,7 +58,7 @@ go_library( "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/BUILD b/vendor/github.com/coreos/etcd/pkg/runtime/BUILD index 44884204d7a..80885d945ec 100644 --- a/vendor/github.com/coreos/etcd/pkg/runtime/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/runtime/BUILD @@ -2,12 +2,40 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "fds_other.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "fds_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fds_other.go", + ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/pkg/runtime", diff --git a/vendor/github.com/coreos/etcd/wal/BUILD b/vendor/github.com/coreos/etcd/wal/BUILD index d1c717a5637..ff8d471353e 100644 --- a/vendor/github.com/coreos/etcd/wal/BUILD +++ b/vendor/github.com/coreos/etcd/wal/BUILD @@ -11,9 +11,38 @@ go_library( "repair.go", "util.go", "wal.go", - "wal_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "wal_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/coreos/go-systemd/util/BUILD b/vendor/github.com/coreos/go-systemd/util/BUILD index f75507e6135..7826ab124c0 100644 --- a/vendor/github.com/coreos/go-systemd/util/BUILD +++ b/vendor/github.com/coreos/go-systemd/util/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "util.go", "util_cgo.go", + "util_stub.go", ], cgo = True, importpath = "github.com/coreos/go-systemd/util", diff --git a/vendor/github.com/coreos/pkg/capnslog/BUILD b/vendor/github.com/coreos/pkg/capnslog/BUILD index 55019bf78a1..c00b0b02df4 100644 --- a/vendor/github.com/coreos/pkg/capnslog/BUILD +++ b/vendor/github.com/coreos/pkg/capnslog/BUILD @@ -5,21 +5,100 @@ go_library( srcs = [ "formatters.go", "glog_formatter.go", - "init.go", - "journald_formatter.go", "log_hijack.go", "logmap.go", "pkg_logger.go", - "syslog_formatter.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "init_windows.go", ], "//conditions:default": [], }), importpath = "github.com/coreos/pkg/capnslog", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/go-systemd/journal:go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/coreos/pkg/dlopen/BUILD b/vendor/github.com/coreos/pkg/dlopen/BUILD index 4e1144355fb..d8a835aa06b 100644 --- a/vendor/github.com/coreos/pkg/dlopen/BUILD +++ b/vendor/github.com/coreos/pkg/dlopen/BUILD @@ -5,7 +5,7 @@ go_library( srcs = [ "dlopen.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "dlopen_example.go", ], "//conditions:default": [], diff --git a/vendor/github.com/d2g/dhcp4client/BUILD b/vendor/github.com/d2g/dhcp4client/BUILD index 50d0eafeb09..4442a3dc035 100644 --- a/vendor/github.com/d2g/dhcp4client/BUILD +++ b/vendor/github.com/d2g/dhcp4client/BUILD @@ -7,7 +7,7 @@ go_library( "generatexid.go", "inetsock.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "pktsock_linux.go", ], "//conditions:default": [], @@ -17,7 +17,7 @@ go_library( deps = [ "//vendor/github.com/d2g/dhcp4:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/daviddengcn/go-colortext/BUILD b/vendor/github.com/daviddengcn/go-colortext/BUILD index 5ba8f252db4..eea5e175282 100644 --- a/vendor/github.com/daviddengcn/go-colortext/BUILD +++ b/vendor/github.com/daviddengcn/go-colortext/BUILD @@ -4,9 +4,38 @@ go_library( name = "go_default_library", srcs = [ "ct.go", - "ct_ansi.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "ct_ansi.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "ct_win.go", ], "//conditions:default": [], diff --git a/vendor/github.com/dchest/safefile/BUILD b/vendor/github.com/dchest/safefile/BUILD index f7640e74ce6..5378474f3e9 100644 --- a/vendor/github.com/dchest/safefile/BUILD +++ b/vendor/github.com/dchest/safefile/BUILD @@ -3,10 +3,40 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "rename.go", "safefile.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "rename_nonatomic.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "rename.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "rename.go", "rename_nonatomic.go", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/api/BUILD b/vendor/github.com/docker/docker/api/BUILD index d5b2371a6fc..717fb17de37 100644 --- a/vendor/github.com/docker/docker/api/BUILD +++ b/vendor/github.com/docker/docker/api/BUILD @@ -4,10 +4,39 @@ go_library( name = "go_default_library", srcs = [ "common.go", - "common_unix.go", "names.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "common_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "common_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/api/types/container/BUILD b/vendor/github.com/docker/docker/api/types/container/BUILD index de9ed87a2af..ac7fe21cfbe 100644 --- a/vendor/github.com/docker/docker/api/types/container/BUILD +++ b/vendor/github.com/docker/docker/api/types/container/BUILD @@ -10,10 +10,39 @@ go_library( "container_update.go", "container_wait.go", "host_config.go", - "hostconfig_unix.go", "waitcondition.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "hostconfig_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "hostconfig_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/client/BUILD b/vendor/github.com/docker/docker/client/BUILD index 9a91a4fadae..0201de78d74 100644 --- a/vendor/github.com/docker/docker/client/BUILD +++ b/vendor/github.com/docker/docker/client/BUILD @@ -116,13 +116,22 @@ go_library( "volume_prune.go", "volume_remove.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "client_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ "client_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "client_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/pkg/ioutils/BUILD b/vendor/github.com/docker/docker/pkg/ioutils/BUILD index 6810d304a11..0b6d1f736cb 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/BUILD +++ b/vendor/github.com/docker/docker/pkg/ioutils/BUILD @@ -7,11 +7,40 @@ go_library( "bytespipe.go", "fswriters.go", "readers.go", - "temp_unix.go", "writeflusher.go", "writers.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "temp_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "temp_windows.go", ], "//conditions:default": [], @@ -21,7 +50,7 @@ go_library( deps = [ "//vendor/golang.org/x/net/context:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/pkg/mount/BUILD b/vendor/github.com/docker/docker/pkg/mount/BUILD index 88bb24cc6bc..6f48ef46a35 100644 --- a/vendor/github.com/docker/docker/pkg/mount/BUILD +++ b/vendor/github.com/docker/docker/pkg/mount/BUILD @@ -4,19 +4,69 @@ go_library( name = "go_default_library", srcs = [ "flags.go", - "flags_unsupported.go", "mount.go", - "mounter_unsupported.go", "mountinfo.go", - "mountinfo_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "flags_freebsd.go", + "flags_unsupported.go", + "mounter_freebsd.go", + "mounter_unsupported.go", + "mountinfo_freebsd.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "flags_linux.go", "mounter_linux.go", "mountinfo_linux.go", "sharedsubtree_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "flags_unsupported.go", + "mounter_unsupported.go", + "mountinfo_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "flags_unsupported.go", + "mounter_solaris.go", + "mounter_unsupported.go", + "mountinfo_solaris.go", + "mountinfo_unsupported.go", + "sharedsubtree_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "flags_unsupported.go", + "mounter_unsupported.go", "mountinfo_windows.go", ], "//conditions:default": [], @@ -25,7 +75,13 @@ go_library( importpath = "github.com/docker/docker/pkg/mount", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/docker/pkg/symlink/BUILD b/vendor/github.com/docker/docker/pkg/symlink/BUILD index 1029760c17d..f15ddab2d8d 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/BUILD +++ b/vendor/github.com/docker/docker/pkg/symlink/BUILD @@ -4,9 +4,38 @@ go_library( name = "go_default_library", srcs = [ "fs.go", - "fs_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fs_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "fs_windows.go", ], "//conditions:default": [], @@ -16,7 +45,7 @@ go_library( deps = [ "//vendor/github.com/docker/docker/pkg/system:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", ], diff --git a/vendor/github.com/docker/docker/pkg/system/BUILD b/vendor/github.com/docker/docker/pkg/system/BUILD index 75854dfb444..50bd68494da 100644 --- a/vendor/github.com/docker/docker/pkg/system/BUILD +++ b/vendor/github.com/docker/docker/pkg/system/BUILD @@ -4,37 +4,155 @@ go_library( name = "go_default_library", srcs = [ "chtimes.go", - "chtimes_unix.go", "errors.go", "exitcode.go", - "filesys.go", "init.go", - "lcow_unix.go", - "lstat_unix.go", "meminfo.go", - "meminfo_unsupported.go", - "mknod.go", "path.go", - "path_unix.go", "rm.go", - "stat_unix.go", - "umask.go", - "utimes_unsupported.go", - "xattrs_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", "process_unix.go", "stat_darwin.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "process_unix.go", + "stat_freebsd.go", + "stat_unix.go", + "syscall_unix.go", + "umask.go", + "utimes_freebsd.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", "meminfo_linux.go", + "mknod.go", + "path_unix.go", "process_unix.go", "stat_linux.go", + "stat_unix.go", "syscall_unix.go", + "umask.go", "utimes_linux.go", "xattrs_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_openbsd.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_unsupported.go", + "mknod.go", + "path_unix.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "chtimes_unix.go", + "filesys.go", + "lcow_unix.go", + "lstat_unix.go", + "meminfo_solaris.go", + "mknod.go", + "path_unix.go", + "process_unix.go", + "stat_solaris.go", + "stat_unix.go", + "umask.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "chtimes_windows.go", "events_windows.go", "filesys_windows.go", @@ -47,21 +165,62 @@ go_library( "stat_windows.go", "syscall_windows.go", "umask_windows.go", + "utimes_unsupported.go", + "xattrs_unsupported.go", ], "//conditions:default": [], }), cgo = True, + clinkopts = select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "-lkstat", + ], + "//conditions:default": [], + }), + copts = select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "-std=c99", + ], + "//conditions:default": [], + }), importpath = "github.com/docker/docker/pkg/system", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/docker/docker/pkg/mount:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//vendor/github.com/docker/go-units:go_default_library", + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/docker/go-units:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Microsoft/go-winio:go_default_library", "//vendor/github.com/sirupsen/logrus:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", diff --git a/vendor/github.com/docker/docker/pkg/term/BUILD b/vendor/github.com/docker/docker/pkg/term/BUILD index a0d16b50ce4..77c3b107482 100644 --- a/vendor/github.com/docker/docker/pkg/term/BUILD +++ b/vendor/github.com/docker/docker/pkg/term/BUILD @@ -5,17 +5,63 @@ go_library( srcs = [ "ascii.go", "proxy.go", - "tc.go", - "term.go", - "winsize.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "tc.go", + "term.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "tc.go", + "term.go", "termios_bsd.go", + "winsize.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "tc.go", + "term.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "tc.go", + "term.go", + "termios_bsd.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "tc.go", + "term.go", "termios_linux.go", + "winsize.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "tc.go", + "term.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "tc.go", + "term.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "tc.go", + "term.go", + "termios_bsd.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "tc.go", + "term.go", + "winsize.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "tc.go", + "tc_solaris_cgo.go", + "term.go", + "winsize_solaris_cgo.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "term_windows.go", ], "//conditions:default": [], @@ -23,10 +69,38 @@ go_library( cgo = True, importpath = "github.com/docker/docker/pkg/term", visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Azure/go-ansiterm/winterm:go_default_library", "//vendor/github.com/docker/docker/pkg/term/windows:go_default_library", ], diff --git a/vendor/github.com/docker/docker/pkg/term/windows/BUILD b/vendor/github.com/docker/docker/pkg/term/windows/BUILD index 3de0b85824d..5477a5b5aa8 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/BUILD +++ b/vendor/github.com/docker/docker/pkg/term/windows/BUILD @@ -5,7 +5,7 @@ go_library( srcs = [ "windows.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "ansi_reader.go", "ansi_writer.go", "console.go", @@ -18,7 +18,7 @@ go_library( "//vendor/github.com/Azure/go-ansiterm:go_default_library", "//vendor/github.com/sirupsen/logrus:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Azure/go-ansiterm/winterm:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/go-connections/sockets/BUILD b/vendor/github.com/docker/go-connections/sockets/BUILD index 23965c13e93..42273ac0d49 100644 --- a/vendor/github.com/docker/go-connections/sockets/BUILD +++ b/vendor/github.com/docker/go-connections/sockets/BUILD @@ -6,11 +6,49 @@ go_library( "inmem_socket.go", "proxy.go", "sockets.go", - "sockets_unix.go", "tcp_socket.go", - "unix_socket.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "sockets_unix.go", + "unix_socket.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "sockets_windows.go", ], "//conditions:default": [], @@ -20,7 +58,7 @@ go_library( deps = [ "//vendor/golang.org/x/net/proxy:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Microsoft/go-winio:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/docker/libnetwork/ipvs/BUILD b/vendor/github.com/docker/libnetwork/ipvs/BUILD index 23a6aae5fee..a878d43e1ad 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/BUILD +++ b/vendor/github.com/docker/libnetwork/ipvs/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "constants.go", "ipvs.go", "netlink.go", @@ -13,7 +13,7 @@ go_library( importpath = "github.com/docker/libnetwork/ipvs", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/sirupsen/logrus:go_default_library", "//vendor/github.com/vishvananda/netlink/nl:go_default_library", "//vendor/github.com/vishvananda/netns:go_default_library", diff --git a/vendor/github.com/fsnotify/fsnotify/BUILD b/vendor/github.com/fsnotify/fsnotify/BUILD index 54ed45cea5e..f0823d4e35b 100644 --- a/vendor/github.com/fsnotify/fsnotify/BUILD +++ b/vendor/github.com/fsnotify/fsnotify/BUILD @@ -2,18 +2,49 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "fsnotify.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "fsnotify.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fsnotify.go", "kqueue.go", "open_mode_darwin.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fsnotify.go", + "kqueue.go", + "open_mode_bsd.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fsnotify.go", + "kqueue.go", + "open_mode_bsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "fsnotify.go", "inotify.go", "inotify_poller.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "fsnotify.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fsnotify.go", + "kqueue.go", + "open_mode_bsd.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fsnotify.go", + "kqueue.go", + "open_mode_bsd.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fen.go", + "fsnotify.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fsnotify.go", "windows.go", ], "//conditions:default": [], @@ -21,10 +52,22 @@ go_library( importpath = "github.com/fsnotify/fsnotify", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/godbus/dbus/BUILD b/vendor/github.com/godbus/dbus/BUILD index 03e63f4dc3c..c4339751d0f 100644 --- a/vendor/github.com/godbus/dbus/BUILD +++ b/vendor/github.com/godbus/dbus/BUILD @@ -8,7 +8,6 @@ go_library( "auth_sha1.go", "call.go", "conn.go", - "conn_other.go", "dbus.go", "decoder.go", "doc.go", @@ -20,18 +19,56 @@ go_library( "object.go", "sig.go", "transport_generic.go", - "transport_unix.go", "variant.go", "variant_lexer.go", "variant_parser.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "conn_darwin.go", "transport_darwin.go", + "transport_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "conn_other.go", + "transport_unix.go", + "transport_unixcred_dragonfly.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "conn_other.go", + "transport_unix.go", "transport_unixcred_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "conn_other.go", + "transport_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "conn_other.go", + ], "//conditions:default": [], }), cgo = True, diff --git a/vendor/github.com/google/cadvisor/fs/BUILD b/vendor/github.com/google/cadvisor/fs/BUILD index 72d6b14ce07..a5d4bdae7b9 100644 --- a/vendor/github.com/google/cadvisor/fs/BUILD +++ b/vendor/github.com/google/cadvisor/fs/BUILD @@ -5,7 +5,7 @@ go_library( srcs = [ "types.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "fs.go", ], "//conditions:default": [], @@ -13,7 +13,7 @@ go_library( importpath = "github.com/google/cadvisor/fs", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/docker/pkg/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/devicemapper:go_default_library", diff --git a/vendor/github.com/google/certificate-transparency/go/x509/BUILD b/vendor/github.com/google/certificate-transparency/go/x509/BUILD index cf3f89191cc..89ddc43348f 100644 --- a/vendor/github.com/google/certificate-transparency/go/x509/BUILD +++ b/vendor/github.com/google/certificate-transparency/go/x509/BUILD @@ -12,26 +12,42 @@ go_library( "verify.go", "x509.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "root_darwin.go", + "root_stub.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "root_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "root_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "root_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "root_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "root_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "root_plan9.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "root_windows.go", ], "//conditions:default": [], }), cgo = True, clinkopts = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "-framework CoreFoundation -framework Security", ], "//conditions:default": [], }), copts = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "-mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060", ], "//conditions:default": [], diff --git a/vendor/github.com/howeyc/gopass/BUILD b/vendor/github.com/howeyc/gopass/BUILD index 71fc2ee5234..13c81e84512 100644 --- a/vendor/github.com/howeyc/gopass/BUILD +++ b/vendor/github.com/howeyc/gopass/BUILD @@ -4,11 +4,80 @@ go_library( name = "go_default_library", srcs = [ "pass.go", - "terminal.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "terminal_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "terminal.go", + ], + "//conditions:default": [], + }), importpath = "github.com/howeyc/gopass", visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/crypto/ssh/terminal:go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/inconshreveable/mousetrap/BUILD b/vendor/github.com/inconshreveable/mousetrap/BUILD index 2b8e060c6e9..f687bb2512b 100644 --- a/vendor/github.com/inconshreveable/mousetrap/BUILD +++ b/vendor/github.com/inconshreveable/mousetrap/BUILD @@ -2,10 +2,38 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "trap_others.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "trap_others.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "trap_windows.go", "trap_windows_1.4.go", ], diff --git a/vendor/github.com/jteeuwen/go-bindata/go-bindata/BUILD b/vendor/github.com/jteeuwen/go-bindata/go-bindata/BUILD index 07f2d1ce955..494eb6a414a 100644 --- a/vendor/github.com/jteeuwen/go-bindata/go-bindata/BUILD +++ b/vendor/github.com/jteeuwen/go-bindata/go-bindata/BUILD @@ -14,8 +14,8 @@ go_library( go_binary( name = "go-bindata", + embed = [":go_default_library"], importpath = "github.com/jteeuwen/go-bindata/go-bindata", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/kardianos/osext/BUILD b/vendor/github.com/kardianos/osext/BUILD index b3ea55b8001..d67ff74992a 100644 --- a/vendor/github.com/kardianos/osext/BUILD +++ b/vendor/github.com/kardianos/osext/BUILD @@ -5,13 +5,31 @@ go_library( srcs = [ "osext.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "osext_sysctl.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "osext_procfs.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "osext_sysctl.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "osext_procfs.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "osext_procfs.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "osext_procfs.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "osext_plan9.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "osext_procfs.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "osext_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/kr/pty/BUILD b/vendor/github.com/kr/pty/BUILD index 8f703442009..e3733e1b578 100644 --- a/vendor/github.com/kr/pty/BUILD +++ b/vendor/github.com/kr/pty/BUILD @@ -5,21 +5,80 @@ go_library( srcs = [ "doc.go", "ioctl.go", - "pty_unsupported.go", "run.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "ioctl_bsd.go", "pty_darwin.go", - "ztypes_amd64.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "ioctl_bsd.go", + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "ioctl_bsd.go", + "pty_freebsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "pty_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "ioctl_bsd.go", + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "ioctl_bsd.go", + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "pty_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "pty_unsupported.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:386": [ + "ztypes_386.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ "ztypes_amd64.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "ztypes_amd64.go", + "@io_bazel_rules_go//go/platform:arm": [ + "ztypes_arm.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "ztypes_arm64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "ztypes_ppc64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "ztypes_ppc64le.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "ztypes_s390x.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:freebsd_386": [ + "ztypes_freebsd_386.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_amd64": [ + "ztypes_freebsd_amd64.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_arm": [ + "ztypes_freebsd_arm.go", ], "//conditions:default": [], }), diff --git a/vendor/github.com/miekg/dns/BUILD b/vendor/github.com/miekg/dns/BUILD index 859dd77c9b2..c721621636f 100644 --- a/vendor/github.com/miekg/dns/BUILD +++ b/vendor/github.com/miekg/dns/BUILD @@ -32,17 +32,52 @@ go_library( "tlsa.go", "tsig.go", "types.go", - "udp.go", - "udp_other.go", "update.go", "xfr.go", "zmsg.go", "ztypes.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "udp.go", "udp_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "udp_plan9.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "udp.go", + "udp_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "udp_other.go", "udp_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/BUILD b/vendor/github.com/onsi/ginkgo/ginkgo/BUILD index 279447c5321..9f90145eb02 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/BUILD +++ b/vendor/github.com/onsi/ginkgo/ginkgo/BUILD @@ -35,8 +35,8 @@ go_library( go_binary( name = "ginkgo", + embed = [":go_default_library"], importpath = "github.com/onsi/ginkgo/ginkgo", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/BUILD b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/BUILD index e3050f15c47..4912b767086 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/BUILD +++ b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/BUILD @@ -5,13 +5,28 @@ go_library( srcs = [ "interrupt_handler.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "sigquit_swallower_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "sigquit_swallower_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "sigquit_swallower_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "sigquit_swallower_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "sigquit_swallower_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "sigquit_swallower_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "sigquit_swallower_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "sigquit_swallower_windows.go", ], "//conditions:default": [], diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/BUILD b/vendor/github.com/onsi/ginkgo/internal/remote/BUILD index c9a33667a31..bddb9ca9f68 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/BUILD +++ b/vendor/github.com/onsi/ginkgo/internal/remote/BUILD @@ -7,16 +7,138 @@ go_library( "forwarding_reporter.go", "output_interceptor.go", "server.go", - "syscall_dup_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "output_interceptor_unix.go", ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "output_interceptor_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "output_interceptor_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "output_interceptor_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "output_interceptor_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "output_interceptor_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "output_interceptor_unix.go", + "syscall_dup_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "output_interceptor_win.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:android_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:android_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:android_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:android_arm64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin_arm64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_386": [ + "syscall_dup_unix.go", + ], "@io_bazel_rules_go//go/platform:linux_amd64": [ - "output_interceptor_unix.go", + "syscall_dup_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "output_interceptor_win.go", + "@io_bazel_rules_go//go/platform:linux_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_arm64": [ + "syscall_dup_linux_arm64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64le": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_mipsle": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64le": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux_s390x": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl_amd64p32": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_arm": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9_386": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9_amd64": [ + "syscall_dup_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9_arm": [ + "syscall_dup_unix.go", ], "//conditions:default": [], }), @@ -28,7 +150,12 @@ go_library( "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library", "//vendor/github.com/onsi/ginkgo/types:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/BUILD b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/BUILD index 670394f9774..3088ed1c331 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/BUILD +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/BUILD @@ -3,10 +3,39 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "colorable_others.go", "noncolorable.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "colorable_others.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "colorable_windows.go", ], "//conditions:default": [], @@ -14,7 +43,7 @@ go_library( importpath = "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/BUILD b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/BUILD index 19150011002..bb13c628622 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/BUILD +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/BUILD @@ -5,19 +5,37 @@ go_library( srcs = [ "doc.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "isatty_bsd.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "isatty_bsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "isatty_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:netbsd": [ + "isatty_bsd.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "isatty_bsd.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "isatty_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "isatty_windows.go", ], "//conditions:default": [], }), importpath = "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty", visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/BUILD index b9370c1ec8e..78d32f02804 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/BUILD @@ -12,7 +12,11 @@ go_library( "stats.go", "sync.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "console_freebsd.go", + "stats_freebsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "capabilities_linux.go", "compat_1.5_linux.go", "console_linux.go", @@ -32,7 +36,12 @@ go_library( "state_linux.go", "stats_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:solaris": [ + "console_solaris.go", + "container_solaris.go", + "stats_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "console_windows.go", "container_windows.go", "criu_opts_windows.go", @@ -47,7 +56,7 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/stacktrace:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/docker/pkg/mount:go_default_library", "//vendor/github.com/docker/docker/pkg/symlink:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD index cb7b06dc7e4..db8484c7c8c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD @@ -2,7 +2,42 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["apparmor_disabled.go"], + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "apparmor_disabled.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "apparmor_disabled.go", + ], + "//conditions:default": [], + }), cgo = True, importpath = "github.com/opencontainers/runc/libcontainer/apparmor", visibility = ["//visibility:public"], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD index a0a04f1cf81..95dd26783e7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD @@ -2,20 +2,48 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "cgroups_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "cgroups.go", "stats.go", "utils.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cgroups_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "cgroups_unsupported.go", + ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/cgroups", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/go-units:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", ], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/BUILD index f64d682eabe..5a646950da0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/BUILD @@ -2,10 +2,20 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "fs_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "apply_raw.go", "blkio.go", "cpu.go", @@ -22,12 +32,30 @@ go_library( "pids.go", "utils.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fs_unsupported.go", + ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/cgroups/fs", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD index 73326d81f6e..82406c88c1a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "rootless.go", ], "//conditions:default": [], @@ -11,7 +11,7 @@ go_library( importpath = "github.com/opencontainers/runc/libcontainer/cgroups/rootless", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/BUILD index bf89b78c74c..1edd23adf4f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/BUILD @@ -2,25 +2,92 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "apply_nosystemd.go", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "apply_systemd.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "apply_nosystemd.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "apply_nosystemd.go", + ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/cgroups/systemd", visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/coreos/go-systemd/dbus:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library", "//vendor/github.com/godbus/dbus:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", ], "//conditions:default": [], }), diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD index 0b532942161..01bd8e8c03b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD @@ -4,26 +4,69 @@ go_library( name = "go_default_library", srcs = [ "blkio_device.go", - "cgroup_unsupported.go", "config.go", "device.go", "hugepage_limit.go", "interface_priority_map.go", "mount.go", "namespaces.go", - "namespaces_syscall_unsupported.go", - "namespaces_unsupported.go", "network.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "device_defaults.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "cgroup_linux.go", "config_linux.go", "device_defaults.go", "namespaces_linux.go", "namespaces_syscall.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "cgroup_unsupported.go", + "namespaces_syscall_unsupported.go", + "namespaces_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "cgroup_windows.go", + "namespaces_unsupported.go", ], "//conditions:default": [], }), @@ -33,7 +76,7 @@ go_library( "//vendor/github.com/opencontainers/runtime-spec/specs-go:go_default_library", "//vendor/github.com/sirupsen/logrus:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/keys/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/keys/BUILD index 175883b5cb7..0ccecb93f79 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/keys/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/keys/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "keyctl.go", ], "//conditions:default": [], @@ -11,7 +11,7 @@ go_library( importpath = "github.com/opencontainers/runc/libcontainer/keys", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/BUILD index b75de219600..f4b7e1fb426 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/BUILD @@ -4,11 +4,47 @@ go_library( name = "go_default_library", srcs = [ "config.go", - "seccomp_unsupported.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "seccomp_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "seccomp_unsupported.go", + ], + "//conditions:default": [], + }), importpath = "github.com/opencontainers/runc/libcontainer/seccomp", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library"], + deps = [ + "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + ], ) filegroup( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD index 545e460904e..7ba719fd195 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD @@ -4,16 +4,76 @@ go_library( name = "go_default_library", srcs = [ "proc.go", - "unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "linux.go", - "syscall_linux_64.go", + "@io_bazel_rules_go//go/platform:android": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ "sysconfig.go", + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "linux.go", + "sysconfig.go", + "sysconfig_notcgo.go", "xattrs_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "sysconfig_notcgo.go", + "unsupported.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:linux_386": [ + "syscall_linux_386.go", + ], + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_arm": [ + "syscall_linux_arm.go", + ], + "@io_bazel_rules_go//go/platform:linux_arm64": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64le": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_s390x": [ + "syscall_linux_64.go", ], "//conditions:default": [], }), @@ -21,7 +81,7 @@ go_library( importpath = "github.com/opencontainers/runc/libcontainer/system", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD index b5c876f6eb6..d70f0eabb1d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD @@ -4,24 +4,65 @@ go_library( name = "go_default_library", srcs = [ "lookup.go", - "lookup_unsupported.go", "user.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "lookup_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "lookup_unix.go", ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "lookup_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "lookup_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "lookup_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "lookup_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "lookup_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "lookup_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "lookup_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "lookup_unsupported.go", + ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/user", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/utils/BUILD index 5837fc6883f..8dd78ccef89 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/BUILD @@ -4,16 +4,45 @@ go_library( name = "go_default_library", srcs = [ "utils.go", - "utils_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "cmsg.go", + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "utils_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "utils_unix.go", ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/utils", visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/sys/unix:go_default_library"], + deps = [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], ) filegroup( diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/BUILD b/vendor/github.com/opencontainers/selinux/go-selinux/BUILD index 108a7fc2342..06d41a6b0b3 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/BUILD +++ b/vendor/github.com/opencontainers/selinux/go-selinux/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "selinux.go", "xattrs.go", ], diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/BUILD b/vendor/github.com/opencontainers/selinux/go-selinux/label/BUILD index 451f734cb75..4185ed76249 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/BUILD +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/BUILD @@ -2,7 +2,42 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["label.go"], + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "label.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "label.go", + ], + "//conditions:default": [], + }), importpath = "github.com/opencontainers/selinux/go-selinux/label", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/pkg/sftp/BUILD b/vendor/github.com/pkg/sftp/BUILD index cba7fdb1432..db838e7e686 100644 --- a/vendor/github.com/pkg/sftp/BUILD +++ b/vendor/github.com/pkg/sftp/BUILD @@ -9,23 +9,75 @@ go_library( "packet.go", "release.go", "server.go", - "server_statvfs_stubs.go", "sftp.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "attrs_stubs.go", + "server_statvfs_stubs.go", + "server_stubs.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "attrs_stubs.go", "attrs_unix.go", "server_statvfs_darwin.go", "server_statvfs_impl.go", + "server_stubs.go", "server_unix.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "attrs_stubs.go", + "attrs_unix.go", + "server_statvfs_stubs.go", + "server_stubs.go", + "server_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "attrs_stubs.go", + "attrs_unix.go", + "server_statvfs_stubs.go", + "server_stubs.go", + "server_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "attrs_stubs.go", "attrs_unix.go", "server_statvfs_impl.go", "server_statvfs_linux.go", + "server_stubs.go", "server_unix.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:nacl": [ "attrs_stubs.go", + "server_statvfs_stubs.go", + "server_stubs.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "attrs_stubs.go", + "attrs_unix.go", + "server_statvfs_stubs.go", + "server_stubs.go", + "server_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "attrs_stubs.go", + "attrs_unix.go", + "server_statvfs_stubs.go", + "server_stubs.go", + "server_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "server_statvfs_stubs.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "attrs_stubs.go", + "attrs_unix.go", + "server_statvfs_stubs.go", + "server_stubs.go", + "server_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "attrs_stubs.go", + "server_statvfs_stubs.go", "server_stubs.go", ], "//conditions:default": [], diff --git a/vendor/github.com/seccomp/libseccomp-golang/BUILD b/vendor/github.com/seccomp/libseccomp-golang/BUILD index a5bb375e580..5e2c35f1876 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/BUILD +++ b/vendor/github.com/seccomp/libseccomp-golang/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "seccomp.go", "seccomp_internal.go", ], @@ -11,7 +11,7 @@ go_library( }), cgo = True, clinkopts = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "-lseccomp", "-lseccomp", ], diff --git a/vendor/github.com/sirupsen/logrus/BUILD b/vendor/github.com/sirupsen/logrus/BUILD index 4efd4d3ba2a..c20a77c250a 100644 --- a/vendor/github.com/sirupsen/logrus/BUILD +++ b/vendor/github.com/sirupsen/logrus/BUILD @@ -15,12 +15,24 @@ go_library( "text_formatter.go", "writer.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "terminal_bsd.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "terminal_bsd.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "terminal_bsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "terminal_linux.go", ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "terminal_bsd.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "terminal_bsd.go", + ], "//conditions:default": [], }), importpath = "github.com/sirupsen/logrus", @@ -28,10 +40,22 @@ go_library( deps = [ "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/spf13/afero/BUILD b/vendor/github.com/spf13/afero/BUILD index 2aae323c6c4..d8a292e095c 100644 --- a/vendor/github.com/spf13/afero/BUILD +++ b/vendor/github.com/spf13/afero/BUILD @@ -6,7 +6,6 @@ go_library( "afero.go", "basepath.go", "cacheOnReadFs.go", - "const_win_unix.go", "copyOnWriteFs.go", "httpFs.go", "ioutil.go", @@ -20,9 +19,39 @@ go_library( "unionFile.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "const_win_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ "const_bsds.go", ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "const_bsds.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "const_bsds.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "const_win_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "const_win_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "const_bsds.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "const_bsds.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "const_win_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "const_win_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "const_win_unix.go", + ], "//conditions:default": [], }), importpath = "github.com/spf13/afero", diff --git a/vendor/github.com/spf13/cobra/BUILD b/vendor/github.com/spf13/cobra/BUILD index 6efad8565dc..5c16bacbce2 100644 --- a/vendor/github.com/spf13/cobra/BUILD +++ b/vendor/github.com/spf13/cobra/BUILD @@ -6,9 +6,38 @@ go_library( "bash_completions.go", "cobra.go", "command.go", - "command_notwin.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "command_notwin.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "command_win.go", ], "//conditions:default": [], @@ -18,7 +47,7 @@ go_library( deps = [ "//vendor/github.com/spf13/pflag:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/inconshreveable/mousetrap:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/storageos/go-api/BUILD b/vendor/github.com/storageos/go-api/BUILD index 938677061e8..8422771fa29 100644 --- a/vendor/github.com/storageos/go-api/BUILD +++ b/vendor/github.com/storageos/go-api/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "client.go", - "client_unix.go", "controller.go", "event.go", "namespace.go", @@ -16,7 +15,37 @@ go_library( "validation.go", "volume.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "client_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "client_windows.go", ], "//conditions:default": [], @@ -27,7 +56,7 @@ go_library( "//vendor/github.com/gorilla/websocket:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Microsoft/go-winio:go_default_library", ], "//conditions:default": [], diff --git a/vendor/github.com/syndtr/gocapability/capability/BUILD b/vendor/github.com/syndtr/gocapability/capability/BUILD index 3d1995516e4..de18a5ecbf6 100644 --- a/vendor/github.com/syndtr/gocapability/capability/BUILD +++ b/vendor/github.com/syndtr/gocapability/capability/BUILD @@ -4,14 +4,43 @@ go_library( name = "go_default_library", srcs = [ "capability.go", - "capability_noop.go", "enum.go", "enum_gen.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "capability_linux.go", "syscall_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "capability_noop.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "capability_noop.go", + ], "//conditions:default": [], }), importpath = "github.com/syndtr/gocapability/capability", diff --git a/vendor/github.com/tools/godep/BUILD b/vendor/github.com/tools/godep/BUILD index 35a0f42230e..5c75f8aae65 100644 --- a/vendor/github.com/tools/godep/BUILD +++ b/vendor/github.com/tools/godep/BUILD @@ -36,8 +36,8 @@ go_library( go_binary( name = "godep", + embed = [":go_default_library"], importpath = "github.com/tools/godep", - library = ":go_default_library", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/vishvananda/netlink/BUILD b/vendor/github.com/vishvananda/netlink/BUILD index 7ed3253d2f2..c7a7de7342c 100644 --- a/vendor/github.com/vishvananda/netlink/BUILD +++ b/vendor/github.com/vishvananda/netlink/BUILD @@ -5,28 +5,54 @@ go_library( srcs = [ "addr.go", "class.go", - "conntrack_unspecified.go", "filter.go", "fou.go", - "fou_unspecified.go", - "genetlink_unspecified.go", - "handle_unspecified.go", "link.go", "neigh.go", "netlink.go", - "netlink_unspecified.go", "order.go", "protinfo.go", "qdisc.go", "route.go", - "route_unspecified.go", "rule.go", "socket.go", "xfrm.go", "xfrm_policy.go", "xfrm_state.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "addr_linux.go", "bpf_linux.go", "bridge_linux.go", @@ -50,6 +76,54 @@ go_library( "xfrm_policy_linux.go", "xfrm_state_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "conntrack_unspecified.go", + "fou_unspecified.go", + "genetlink_unspecified.go", + "handle_unspecified.go", + "netlink_unspecified.go", + "route_unspecified.go", + ], "//conditions:default": [], }), cgo = True, @@ -57,9 +131,43 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/vishvananda/netlink/nl:go_default_library", - "//vendor/github.com/vishvananda/netns:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/vishvananda/netns:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/github.com/vishvananda/netlink/nl/BUILD b/vendor/github.com/vishvananda/netlink/nl/BUILD index 11f21534cc4..b203fd4be8a 100644 --- a/vendor/github.com/vishvananda/netlink/nl/BUILD +++ b/vendor/github.com/vishvananda/netlink/nl/BUILD @@ -3,10 +3,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "nl_unspecified.go", "syscall.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "addr_linux.go", "bridge_linux.go", "conntrack_linux.go", @@ -22,12 +33,30 @@ go_library( "xfrm_policy_linux.go", "xfrm_state_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "nl_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "nl_unspecified.go", + ], "//conditions:default": [], }), importpath = "github.com/vishvananda/netlink/nl", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/vishvananda/netns:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", ], diff --git a/vendor/github.com/vishvananda/netns/BUILD b/vendor/github.com/vishvananda/netns/BUILD index 03987084b58..1b0b236f71e 100644 --- a/vendor/github.com/vishvananda/netns/BUILD +++ b/vendor/github.com/vishvananda/netns/BUILD @@ -4,11 +4,40 @@ go_library( name = "go_default_library", srcs = [ "netns.go", - "netns_unspecified.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "netns_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "netns_unspecified.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "netns_unspecified.go", + ], "//conditions:default": [], }), importpath = "github.com/vishvananda/netns", diff --git a/vendor/github.com/vmware/photon-controller-go-sdk/SSPI/BUILD b/vendor/github.com/vmware/photon-controller-go-sdk/SSPI/BUILD index a48aae3acf3..4754568a402 100644 --- a/vendor/github.com/vmware/photon-controller-go-sdk/SSPI/BUILD +++ b/vendor/github.com/vmware/photon-controller-go-sdk/SSPI/BUILD @@ -2,10 +2,38 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "sspi_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "sspi.go", ], "//conditions:default": [], diff --git a/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/BUILD b/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/BUILD index 2a87b4d3d3f..7584ef54d0d 100644 --- a/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/BUILD +++ b/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/BUILD @@ -5,9 +5,38 @@ go_library( srcs = [ "jwttoken.go", "oidcclient.go", - "oidcclient_sspi_unsupported.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "oidcclient_sspi_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "oidcclient_sspi.go", ], "//conditions:default": [], @@ -15,7 +44,7 @@ go_library( importpath = "github.com/vmware/photon-controller-go-sdk/photon/lightwave", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/vmware/photon-controller-go-sdk/SSPI:go_default_library", ], "//conditions:default": [], diff --git a/vendor/golang.org/x/crypto/curve25519/BUILD b/vendor/golang.org/x/crypto/curve25519/BUILD index 2c5638b2dcc..959d1692330 100644 --- a/vendor/golang.org/x/crypto/curve25519/BUILD +++ b/vendor/golang.org/x/crypto/curve25519/BUILD @@ -3,10 +3,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "curve25519.go", "doc.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:386": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ "const_amd64.h", "const_amd64.s", "cswap_amd64.s", @@ -16,25 +18,35 @@ go_library( "mul_amd64.s", "square_amd64.s", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "const_amd64.h", - "const_amd64.s", - "cswap_amd64.s", - "freeze_amd64.s", - "ladderstep_amd64.s", - "mont25519_amd64.go", - "mul_amd64.s", - "square_amd64.s", + "@io_bazel_rules_go//go/platform:amd64p32": [ + "curve25519.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "const_amd64.h", - "const_amd64.s", - "cswap_amd64.s", - "freeze_amd64.s", - "ladderstep_amd64.s", - "mont25519_amd64.go", - "mul_amd64.s", - "square_amd64.s", + "@io_bazel_rules_go//go/platform:arm": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:mips": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:mips64": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:mips64le": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:mipsle": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "curve25519.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "curve25519.go", ], "//conditions:default": [], }), diff --git a/vendor/golang.org/x/crypto/poly1305/BUILD b/vendor/golang.org/x/crypto/poly1305/BUILD index d320fdfca10..53ccf324a03 100644 --- a/vendor/golang.org/x/crypto/poly1305/BUILD +++ b/vendor/golang.org/x/crypto/poly1305/BUILD @@ -4,19 +4,103 @@ go_library( name = "go_default_library", srcs = [ "poly1305.go", - "sum_ref.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:amd64": [ "sum_amd64.go", "sum_amd64.s", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "sum_amd64.go", - "sum_amd64.s", + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:android_386": [ + "sum_ref.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "sum_amd64.go", - "sum_amd64.s", + "@io_bazel_rules_go//go/platform:android_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:android_arm64": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:darwin_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:darwin_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:darwin_arm64": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:linux_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:linux_arm64": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64le": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_mipsle": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64le": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:linux_s390x": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:nacl_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:nacl_amd64p32": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:nacl_arm": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:openbsd_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:plan9_386": [ + "sum_ref.go", + ], + "@io_bazel_rules_go//go/platform:plan9_arm": [ + "sum_arm.go", + "sum_arm.s", + ], + "@io_bazel_rules_go//go/platform:windows_386": [ + "sum_ref.go", ], "//conditions:default": [], }), diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/BUILD b/vendor/golang.org/x/crypto/salsa20/salsa/BUILD index fc814b4046d..abe5b83aa36 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/BUILD +++ b/vendor/golang.org/x/crypto/salsa20/salsa/BUILD @@ -5,19 +5,43 @@ go_library( srcs = [ "hsalsa20.go", "salsa208.go", - "salsa20_ref.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:386": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ "salsa2020_amd64.s", "salsa20_amd64.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "salsa2020_amd64.s", - "salsa20_amd64.go", + "@io_bazel_rules_go//go/platform:amd64p32": [ + "salsa20_ref.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "salsa2020_amd64.s", - "salsa20_amd64.go", + "@io_bazel_rules_go//go/platform:arm": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:mips": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:mips64": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:mips64le": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:mipsle": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "salsa20_ref.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "salsa20_ref.go", ], "//conditions:default": [], }), diff --git a/vendor/golang.org/x/crypto/ssh/terminal/BUILD b/vendor/golang.org/x/crypto/ssh/terminal/BUILD index 8230d6d09c3..540ec7afd65 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/BUILD +++ b/vendor/golang.org/x/crypto/ssh/terminal/BUILD @@ -5,15 +5,37 @@ go_library( srcs = [ "terminal.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "util.go", "util_bsd.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ + "util.go", + "util_bsd.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "util.go", + "util_bsd.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "util.go", "util_linux.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:netbsd": [ + "util.go", + "util_bsd.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "util.go", + "util_bsd.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "util_plan9.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "util_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ "util_windows.go", ], "//conditions:default": [], @@ -21,13 +43,28 @@ go_library( importpath = "golang.org/x/crypto/ssh/terminal", visibility = ["//visibility:public"], deps = select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:darwin": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:dragonfly": [ "//vendor/golang.org/x/sys/unix:go_default_library", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ "//vendor/golang.org/x/sys/windows:go_default_library", ], "//conditions:default": [], diff --git a/vendor/golang.org/x/exp/inotify/BUILD b/vendor/golang.org/x/exp/inotify/BUILD index 2564a0f4fac..90cd3ecd0aa 100644 --- a/vendor/golang.org/x/exp/inotify/BUILD +++ b/vendor/golang.org/x/exp/inotify/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:linux": [ "inotify_linux.go", ], "//conditions:default": [], diff --git a/vendor/golang.org/x/sys/unix/BUILD b/vendor/golang.org/x/sys/unix/BUILD index 4448575b4f6..a474a47ac9d 100644 --- a/vendor/golang.org/x/sys/unix/BUILD +++ b/vendor/golang.org/x/sys/unix/BUILD @@ -6,12 +6,10 @@ go_library( "env_unset.go", "file_unix.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "asm_darwin_amd64.s", + "@io_bazel_rules_go//go/platform:darwin": [ "constants.go", "dev_darwin.go", "dirent.go", - "endian_little.go", "env_unix.go", "flock.go", "pagesize_unix.go", @@ -21,22 +19,52 @@ go_library( "syscall.go", "syscall_bsd.go", "syscall_darwin.go", - "syscall_darwin_amd64.go", "syscall_unix.go", "syscall_unix_gc.go", "timestruct.go", - "zerrors_darwin_amd64.go", - "zsyscall_darwin_amd64.go", - "zsysnum_darwin_amd64.go", - "ztypes_darwin_amd64.go", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "asm_linux_amd64.s", + "@io_bazel_rules_go//go/platform:dragonfly": [ + "constants.go", + "dev_dragonfly.go", + "dirent.go", + "env_unix.go", + "flock.go", + "pagesize_unix.go", + "race0.go", + "sockcmsg_unix.go", + "str.go", + "syscall.go", + "syscall_bsd.go", + "syscall_dragonfly.go", + "syscall_no_getwd.go", + "syscall_unix.go", + "syscall_unix_gc.go", + "timestruct.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "cap_freebsd.go", + "constants.go", + "dev_freebsd.go", + "dirent.go", + "env_unix.go", + "flock.go", + "pagesize_unix.go", + "race0.go", + "sockcmsg_unix.go", + "str.go", + "syscall.go", + "syscall_bsd.go", + "syscall_freebsd.go", + "syscall_no_getwd.go", + "syscall_unix.go", + "syscall_unix_gc.go", + "timestruct.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "bluetooth_linux.go", "constants.go", "dev_linux.go", "dirent.go", - "endian_little.go", "env_unix.go", "flock.go", "pagesize_unix.go", @@ -46,19 +74,332 @@ go_library( "str.go", "syscall.go", "syscall_linux.go", - "syscall_linux_amd64.go", - "syscall_linux_amd64_gc.go", "syscall_unix.go", "syscall_unix_gc.go", "timestruct.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "dirent.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "constants.go", + "dev_netbsd.go", + "dirent.go", + "env_unix.go", + "flock.go", + "pagesize_unix.go", + "race0.go", + "sockcmsg_unix.go", + "str.go", + "syscall.go", + "syscall_bsd.go", + "syscall_netbsd.go", + "syscall_no_getwd.go", + "syscall_unix.go", + "syscall_unix_gc.go", + "timestruct.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "constants.go", + "dev_openbsd.go", + "dirent.go", + "env_unix.go", + "flock.go", + "pagesize_unix.go", + "race0.go", + "sockcmsg_unix.go", + "str.go", + "syscall.go", + "syscall_bsd.go", + "syscall_no_getwd.go", + "syscall_openbsd.go", + "syscall_unix.go", + "syscall_unix_gc.go", + "timestruct.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "constants.go", + "dirent.go", + "env_unix.go", + "pagesize_unix.go", + "race0.go", + "sockcmsg_unix.go", + "str.go", + "syscall.go", + "syscall_solaris.go", + "syscall_unix.go", + "syscall_unix_gc.go", + "timestruct.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:386": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:amd64p32": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:arm": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:mips": [ + "endian_big.go", + ], + "@io_bazel_rules_go//go/platform:mips64": [ + "endian_big.go", + ], + "@io_bazel_rules_go//go/platform:mips64le": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:mipsle": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "endian_big.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "endian_little.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "endian_big.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:darwin_386": [ + "asm_darwin_386.s", + "syscall_darwin_386.go", + "zerrors_darwin_386.go", + "zsyscall_darwin_386.go", + "zsysnum_darwin_386.go", + "ztypes_darwin_386.go", + ], + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "asm_darwin_amd64.s", + "syscall_darwin_amd64.go", + "zerrors_darwin_amd64.go", + "zsyscall_darwin_amd64.go", + "zsysnum_darwin_amd64.go", + "ztypes_darwin_amd64.go", + ], + "@io_bazel_rules_go//go/platform:darwin_arm": [ + "asm_darwin_arm.s", + "syscall_darwin_arm.go", + "zerrors_darwin_arm.go", + "zsyscall_darwin_arm.go", + "zsysnum_darwin_arm.go", + "ztypes_darwin_arm.go", + ], + "@io_bazel_rules_go//go/platform:darwin_arm64": [ + "asm_darwin_arm64.s", + "syscall_darwin_arm64.go", + "zerrors_darwin_arm64.go", + "zsyscall_darwin_arm64.go", + "zsysnum_darwin_arm64.go", + "ztypes_darwin_arm64.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly_amd64": [ + "asm_dragonfly_amd64.s", + "syscall_dragonfly_amd64.go", + "zerrors_dragonfly_amd64.go", + "zsyscall_dragonfly_amd64.go", + "zsysnum_dragonfly_amd64.go", + "ztypes_dragonfly_amd64.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_386": [ + "asm_freebsd_386.s", + "errors_freebsd_386.go", + "syscall_freebsd_386.go", + "zerrors_freebsd_386.go", + "zsyscall_freebsd_386.go", + "zsysnum_freebsd_386.go", + "ztypes_freebsd_386.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_amd64": [ + "asm_freebsd_amd64.s", + "errors_freebsd_amd64.go", + "syscall_freebsd_amd64.go", + "zerrors_freebsd_amd64.go", + "zsyscall_freebsd_amd64.go", + "zsysnum_freebsd_amd64.go", + "ztypes_freebsd_amd64.go", + ], + "@io_bazel_rules_go//go/platform:freebsd_arm": [ + "asm_freebsd_arm.s", + "errors_freebsd_arm.go", + "syscall_freebsd_arm.go", + "zerrors_freebsd_arm.go", + "zsyscall_freebsd_arm.go", + "zsysnum_freebsd_arm.go", + "ztypes_freebsd_arm.go", + ], + "@io_bazel_rules_go//go/platform:linux_386": [ + "asm_linux_386.s", + "flock_linux_32bit.go", + "syscall_linux_386.go", + "zerrors_linux_386.go", + "zptrace386_linux.go", + "zsyscall_linux_386.go", + "zsysnum_linux_386.go", + "ztypes_linux_386.go", + ], + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "asm_linux_amd64.s", + "syscall_linux_amd64.go", + "syscall_linux_amd64_gc.go", "zerrors_linux_amd64.go", "zptrace386_linux.go", "zsyscall_linux_amd64.go", "zsysnum_linux_amd64.go", "ztypes_linux_amd64.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "endian_little.go", + "@io_bazel_rules_go//go/platform:linux_arm": [ + "asm_linux_arm.s", + "flock_linux_32bit.go", + "syscall_linux_arm.go", + "zerrors_linux_arm.go", + "zptracearm_linux.go", + "zsyscall_linux_arm.go", + "zsysnum_linux_arm.go", + "ztypes_linux_arm.go", + ], + "@io_bazel_rules_go//go/platform:linux_arm64": [ + "asm_linux_arm64.s", + "syscall_linux_arm64.go", + "zerrors_linux_arm64.go", + "zptracearm_linux.go", + "zsyscall_linux_arm64.go", + "zsysnum_linux_arm64.go", + "ztypes_linux_arm64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips": [ + "asm_linux_mipsx.s", + "flock_linux_32bit.go", + "syscall_linux_mipsx.go", + "zerrors_linux_mips.go", + "zptracemips_linux.go", + "zsyscall_linux_mips.go", + "zsysnum_linux_mips.go", + "ztypes_linux_mips.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64": [ + "asm_linux_mips64x.s", + "syscall_linux_mips64x.go", + "zerrors_linux_mips64.go", + "zptracemips_linux.go", + "zsyscall_linux_mips64.go", + "zsysnum_linux_mips64.go", + "ztypes_linux_mips64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64le": [ + "asm_linux_mips64x.s", + "syscall_linux_mips64x.go", + "zerrors_linux_mips64le.go", + "zptracemipsle_linux.go", + "zsyscall_linux_mips64le.go", + "zsysnum_linux_mips64le.go", + "ztypes_linux_mips64le.go", + ], + "@io_bazel_rules_go//go/platform:linux_mipsle": [ + "asm_linux_mipsx.s", + "flock_linux_32bit.go", + "syscall_linux_mipsx.go", + "zerrors_linux_mipsle.go", + "zptracemipsle_linux.go", + "zsyscall_linux_mipsle.go", + "zsysnum_linux_mipsle.go", + "ztypes_linux_mipsle.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64": [ + "asm_linux_ppc64x.s", + "syscall_linux_ppc64x.go", + "zerrors_linux_ppc64.go", + "zsyscall_linux_ppc64.go", + "zsysnum_linux_ppc64.go", + "ztypes_linux_ppc64.go", + ], + "@io_bazel_rules_go//go/platform:linux_ppc64le": [ + "asm_linux_ppc64x.s", + "syscall_linux_ppc64x.go", + "zerrors_linux_ppc64le.go", + "zsyscall_linux_ppc64le.go", + "zsysnum_linux_ppc64le.go", + "ztypes_linux_ppc64le.go", + ], + "@io_bazel_rules_go//go/platform:linux_s390x": [ + "asm_linux_s390x.s", + "syscall_linux_s390x.go", + "zerrors_linux_s390x.go", + "zsyscall_linux_s390x.go", + "zsysnum_linux_s390x.go", + "ztypes_linux_s390x.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_386": [ + "asm_netbsd_386.s", + "syscall_netbsd_386.go", + "zerrors_netbsd_386.go", + "zsyscall_netbsd_386.go", + "zsysnum_netbsd_386.go", + "ztypes_netbsd_386.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_amd64": [ + "asm_netbsd_amd64.s", + "syscall_netbsd_amd64.go", + "zerrors_netbsd_amd64.go", + "zsyscall_netbsd_amd64.go", + "zsysnum_netbsd_amd64.go", + "ztypes_netbsd_amd64.go", + ], + "@io_bazel_rules_go//go/platform:netbsd_arm": [ + "asm_netbsd_arm.s", + "syscall_netbsd_arm.go", + "zerrors_netbsd_arm.go", + "zsyscall_netbsd_arm.go", + "zsysnum_netbsd_arm.go", + "ztypes_netbsd_arm.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_386": [ + "asm_openbsd_386.s", + "openbsd_pledge.go", + "syscall_openbsd_386.go", + "zerrors_openbsd_386.go", + "zsyscall_openbsd_386.go", + "zsysctl_openbsd_386.go", + "zsysnum_openbsd_386.go", + "ztypes_openbsd_386.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_amd64": [ + "asm_openbsd_amd64.s", + "openbsd_pledge.go", + "syscall_openbsd_amd64.go", + "zerrors_openbsd_amd64.go", + "zsyscall_openbsd_amd64.go", + "zsysctl_openbsd_amd64.go", + "zsysnum_openbsd_amd64.go", + "ztypes_openbsd_amd64.go", + ], + "@io_bazel_rules_go//go/platform:openbsd_arm": [ + "asm_openbsd_arm.s", + "openbsd_pledge.go", + "syscall_openbsd_arm.go", + "zerrors_openbsd_arm.go", + "zsyscall_openbsd_arm.go", + "zsysctl_openbsd_arm.go", + "zsysnum_openbsd_arm.go", + "ztypes_openbsd_arm.go", + ], + "@io_bazel_rules_go//go/platform:solaris_amd64": [ + "asm_solaris_amd64.s", + "syscall_solaris_amd64.go", + "zerrors_solaris_amd64.go", + "zsyscall_solaris_amd64.go", + "zsysnum_solaris_amd64.go", + "ztypes_solaris_amd64.go", ], "//conditions:default": [], }), diff --git a/vendor/golang.org/x/sys/windows/BUILD b/vendor/golang.org/x/sys/windows/BUILD index af77e37ec71..01bb8b3b616 100644 --- a/vendor/golang.org/x/sys/windows/BUILD +++ b/vendor/golang.org/x/sys/windows/BUILD @@ -5,8 +5,7 @@ go_library( srcs = [ "mksyscall.go", ] + select({ - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "asm_windows_amd64.s", + "@io_bazel_rules_go//go/platform:windows": [ "dll_windows.go", "env_unset.go", "env_windows.go", @@ -20,10 +19,19 @@ go_library( "syscall.go", "syscall_windows.go", "types_windows.go", - "types_windows_amd64.go", "zsyscall_windows.go", ], "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:windows_386": [ + "asm_windows_386.s", + "types_windows_386.go", + ], + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "asm_windows_amd64.s", + "types_windows_amd64.go", + ], + "//conditions:default": [], }), importpath = "golang.org/x/sys/windows", visibility = ["//visibility:public"], diff --git a/vendor/golang.org/x/tools/container/intsets/BUILD b/vendor/golang.org/x/tools/container/intsets/BUILD index 4b0f952c7bb..08b7fd583f2 100644 --- a/vendor/golang.org/x/tools/container/intsets/BUILD +++ b/vendor/golang.org/x/tools/container/intsets/BUILD @@ -3,21 +3,45 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "popcnt_generic.go", "sparse.go", "util.go", ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "@io_bazel_rules_go//go/platform:386": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ "popcnt_amd64.go", "popcnt_amd64.s", ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "popcnt_amd64.go", - "popcnt_amd64.s", + "@io_bazel_rules_go//go/platform:amd64p32": [ + "popcnt_generic.go", ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "popcnt_amd64.go", - "popcnt_amd64.s", + "@io_bazel_rules_go//go/platform:arm": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:mips": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:mips64": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:mips64le": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:mipsle": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "popcnt_generic.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "popcnt_generic.go", ], "//conditions:default": [], }), diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/BUILD b/vendor/gopkg.in/natefinch/lumberjack.v2/BUILD index e75d45953ec..66beaa1e90b 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/BUILD +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/BUILD @@ -3,12 +3,41 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "chown.go", "lumberjack.go", ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ + "@io_bazel_rules_go//go/platform:android": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ "chown_linux.go", ], + "@io_bazel_rules_go//go/platform:nacl": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "chown.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "chown.go", + ], "//conditions:default": [], }), importpath = "gopkg.in/natefinch/lumberjack.v2", From a532ecd7041f59962f8fb29d4628be07edb02741 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Mon, 4 Dec 2017 14:23:44 -0800 Subject: [PATCH 501/794] Use race="off" mode instead of disabling race feature --- pkg/master/BUILD | 2 +- staging/src/k8s.io/client-go/tools/cache/BUILD | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/master/BUILD b/pkg/master/BUILD index 71380163d2a..3162a764c33 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -120,8 +120,8 @@ go_test( "master_test.go", ], embed = [":go_default_library"], - features = ["-race"], importpath = "k8s.io/kubernetes/pkg/master", + race = "off", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", diff --git a/staging/src/k8s.io/client-go/tools/cache/BUILD b/staging/src/k8s.io/client-go/tools/cache/BUILD index 3b5a6081350..0fbb4469595 100644 --- a/staging/src/k8s.io/client-go/tools/cache/BUILD +++ b/staging/src/k8s.io/client-go/tools/cache/BUILD @@ -23,8 +23,8 @@ go_test( "undelta_store_test.go", ], embed = [":go_default_library"], - features = ["-race"], importpath = "k8s.io/client-go/tools/cache", + race = "off", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", From 924fd3b058d86b84b6c4fe4c94a4c652478ae8f4 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Sat, 16 Dec 2017 11:57:17 -0800 Subject: [PATCH 502/794] Update helper scripts to find binaries in new bazel-bin paths --- cluster/clientbin.sh | 7 ++++++- hack/lib/util.sh | 7 ++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cluster/clientbin.sh b/cluster/clientbin.sh index b34a6e65866..a03a2ca3e36 100755 --- a/cluster/clientbin.sh +++ b/cluster/clientbin.sh @@ -84,9 +84,14 @@ function get_bin() { "${KUBE_ROOT}/_output/bin/${bin}" "${KUBE_ROOT}/_output/dockerized/bin/${host_os}/${host_arch}/${bin}" "${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}/${bin}" - "${KUBE_ROOT}/bazel-bin/${srcdir}/${bin}" "${KUBE_ROOT}/platforms/${host_os}/${host_arch}/${bin}" ) + # Also search for binary in bazel build tree. + # The bazel go rules place binaries in subtrees like + # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure + # the platform name is matched in the path. + locations+=($(find "${KUBE_ROOT}/bazel-bin/${srcdir}" -type f -executable \ + -path "*/${host_os}_${host_arch}*/${bin}" 2>/dev/null || true) ) echo $( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) } diff --git a/hack/lib/util.sh b/hack/lib/util.sh index 3595a0c6650..0035ced815f 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -148,10 +148,11 @@ kube::util::find-binary-for-platform() { "${KUBE_ROOT}/platforms/${platform}/${lookfor}" ) # Also search for binary in bazel build tree. - # In some cases we have to name the binary $BINARY_bin, since there was a - # directory named $BINARY next to it. + # The bazel go rules place binaries in subtrees like + # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure + # the platform name is matched in the path. locations+=($(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \ - \( -name "${lookfor}" -o -name "${lookfor}_bin" \) 2>/dev/null || true) ) + -path "*/${platform/\//_}*/${lookfor}" 2>/dev/null || true) ) # List most recently-updated location. local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) From 46e894bfd3bd23cb67a4324f86fc7bd91a7423ce Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Tue, 14 Nov 2017 17:07:05 -0800 Subject: [PATCH 503/794] Switch go binaries from (hacky) static to pure Go --- cmd/cloud-controller-manager/BUILD | 7 +------ cmd/kube-apiserver/BUILD | 7 +------ cmd/kube-controller-manager/BUILD | 7 +------ cmd/kube-proxy/BUILD | 7 +------ cmd/kubeadm/BUILD | 7 +------ cmd/kubectl/BUILD | 12 +----------- plugin/cmd/kube-scheduler/BUILD | 7 +------ staging/src/k8s.io/kube-aggregator/BUILD | 7 +------ 8 files changed, 8 insertions(+), 53 deletions(-) diff --git a/cmd/cloud-controller-manager/BUILD b/cmd/cloud-controller-manager/BUILD index dbbae5c6f15..7ab8666cab0 100644 --- a/cmd/cloud-controller-manager/BUILD +++ b/cmd/cloud-controller-manager/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "cloud-controller-manager", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager", + pure = "on", x_defs = version_x_defs(), ) diff --git a/cmd/kube-apiserver/BUILD b/cmd/kube-apiserver/BUILD index 55c7d08f305..29a454f06e0 100644 --- a/cmd/kube-apiserver/BUILD +++ b/cmd/kube-apiserver/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-apiserver", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/cmd/kube-apiserver", + pure = "on", x_defs = version_x_defs(), ) diff --git a/cmd/kube-controller-manager/BUILD b/cmd/kube-controller-manager/BUILD index 94fa7797003..d5152c0e1cf 100644 --- a/cmd/kube-controller-manager/BUILD +++ b/cmd/kube-controller-manager/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-controller-manager", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager", + pure = "on", x_defs = version_x_defs(), ) diff --git a/cmd/kube-proxy/BUILD b/cmd/kube-proxy/BUILD index ac19c0bd512..92f618488e7 100644 --- a/cmd/kube-proxy/BUILD +++ b/cmd/kube-proxy/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-proxy", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/cmd/kube-proxy", + pure = "on", x_defs = version_x_defs(), ) diff --git a/cmd/kubeadm/BUILD b/cmd/kubeadm/BUILD index 7c2dd89f035..fad320975ef 100644 --- a/cmd/kubeadm/BUILD +++ b/cmd/kubeadm/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kubeadm", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/cmd/kubeadm", + pure = "on", x_defs = version_x_defs(), ) diff --git a/cmd/kubectl/BUILD b/cmd/kubectl/BUILD index f51b7909434..74501eed97b 100644 --- a/cmd/kubectl/BUILD +++ b/cmd/kubectl/BUILD @@ -8,18 +8,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kubectl", embed = [":go_default_library"], - gc_linkopts = select({ - # Mac OS X doesn't support static binaries: - # https://developer.apple.com/library/content/qa/qa1118/_index.html - "@io_bazel_rules_go//go/platform:darwin_amd64": [], - "//conditions:default": [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], - }), importpath = "k8s.io/kubernetes/cmd/kubectl", + pure = "on", visibility = ["//visibility:public"], x_defs = version_x_defs(), ) diff --git a/plugin/cmd/kube-scheduler/BUILD b/plugin/cmd/kube-scheduler/BUILD index c89e49a18fc..ae3a6e3a117 100644 --- a/plugin/cmd/kube-scheduler/BUILD +++ b/plugin/cmd/kube-scheduler/BUILD @@ -10,13 +10,8 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-scheduler", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler", + pure = "on", x_defs = version_x_defs(), ) diff --git a/staging/src/k8s.io/kube-aggregator/BUILD b/staging/src/k8s.io/kube-aggregator/BUILD index df4b3e9ad69..f3ca649c442 100644 --- a/staging/src/k8s.io/kube-aggregator/BUILD +++ b/staging/src/k8s.io/kube-aggregator/BUILD @@ -10,13 +10,8 @@ load("//vendor/k8s.io/client-go/pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-aggregator", embed = [":go_default_library"], - gc_linkopts = [ - "-linkmode", - "external", - "-extldflags", - "-static", - ], importpath = "k8s.io/kube-aggregator", + pure = "on", x_defs = version_x_defs(), ) From 205cbf470efa0fd0e0a906f8e02da1ef2db14e30 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Sat, 16 Dec 2017 13:09:09 +0800 Subject: [PATCH 504/794] Update DNS version in kubeadm of 1.10 cycle. --- cmd/kubeadm/app/phases/addons/dns/dns.go | 3 +- cmd/kubeadm/app/phases/addons/dns/dns_test.go | 3 +- .../app/phases/addons/dns/manifests.go | 4 +- cmd/kubeadm/app/phases/addons/dns/versions.go | 28 +----- .../app/phases/addons/dns/versions_test.go | 92 ++++--------------- 5 files changed, 24 insertions(+), 106 deletions(-) diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index ffa7250c119..4f1db3d045e 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -75,7 +75,7 @@ func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac // Get the YAML manifest conditionally based on the k8s version kubeDNSDeploymentBytes := GetKubeDNSManifest(k8sVersion) dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(kubeDNSDeploymentBytes, - struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{ + struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ ImageRepository: cfg.ImageRepository, Arch: runtime.GOARCH, // Get the kube-dns version conditionally based on the k8s version @@ -83,7 +83,6 @@ func kubeDNSAddon(cfg *kubeadmapi.MasterConfiguration, client clientset.Interfac DNSBindAddr: dnsBindAddr, DNSProbeAddr: dnsProbeAddr, DNSDomain: cfg.Networking.DNSDomain, - DNSProbeType: GetKubeDNSProbeType(k8sVersion), MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster, }) if err != nil { diff --git a/cmd/kubeadm/app/phases/addons/dns/dns_test.go b/cmd/kubeadm/app/phases/addons/dns/dns_test.go index 55967aae3d7..c144dc49e04 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns_test.go @@ -92,14 +92,13 @@ func TestCompileManifests(t *testing.T) { }{ { manifest: v180AndAboveKubeDNSDeployment, - data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, DNSProbeType, MasterTaintKey string }{ + data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ ImageRepository: "foo", Arch: "foo", Version: "foo", DNSBindAddr: "foo", DNSProbeAddr: "foo", DNSDomain: "foo", - DNSProbeType: "foo", MasterTaintKey: "foo", }, expected: true, diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index ce2de307400..e2d0f60a3d1 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -156,8 +156,8 @@ spec: args: - --v=2 - --logtostderr - - --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }} - - --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,{{ .DNSProbeType }} + - --probe=kubedns,{{ .DNSProbeAddr }}:10053,kubernetes.default.svc.{{ .DNSDomain }},5,SRV + - --probe=dnsmasq,{{ .DNSProbeAddr }}:53,kubernetes.default.svc.{{ .DNSDomain }},5,SRV ports: - containerPort: 10054 name: metrics diff --git a/cmd/kubeadm/app/phases/addons/dns/versions.go b/cmd/kubeadm/app/phases/addons/dns/versions.go index 96267a9f7f8..c728f55f09c 100644 --- a/cmd/kubeadm/app/phases/addons/dns/versions.go +++ b/cmd/kubeadm/app/phases/addons/dns/versions.go @@ -22,48 +22,26 @@ import ( ) const ( - kubeDNSv180AndAboveVersion = "1.14.5" kubeDNSv190AndAboveVersion = "1.14.7" - - kubeDNSProbeSRV = "SRV" - kubeDNSProbeA = "A" - coreDNSVersion = "1.0.1" + coreDNSVersion = "1.0.1" ) // GetDNSVersion returns the right kube-dns version for a specific k8s version func GetDNSVersion(kubeVersion *version.Version, dns string) string { - // v1.8.0+ uses kube-dns 1.14.5 // v1.9.0+ uses kube-dns 1.14.7 - // v1.9.0+ uses CoreDNS 1.0.1 + // v1.9.0+ uses CoreDNS 1.0.1 if feature gate "CoreDNS" is enabled. // In the future when the version is bumped at HEAD; add conditional logic to return the right versions // Also, the version might be bumped for different k8s releases on the same branch switch dns { - case kubeadmconstants.KubeDNS: - // return the kube-dns version - if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 { - return kubeDNSv190AndAboveVersion - } - return kubeDNSv180AndAboveVersion case kubeadmconstants.CoreDNS: // return the CoreDNS version return coreDNSVersion default: - return kubeDNSv180AndAboveVersion + return kubeDNSv190AndAboveVersion } } -// GetKubeDNSProbeType returns the right kube-dns probe for a specific k8s version -func GetKubeDNSProbeType(kubeVersion *version.Version) string { - // v1.8.0+ uses type A, just return that here - // In the future when the kube-dns version is bumped at HEAD; add conditional logic to return the right versions - // Also, the version might be bumped for different k8s releases on the same branch - if kubeVersion.Major() == 1 && kubeVersion.Minor() >= 9 { - return kubeDNSProbeSRV - } - return kubeDNSProbeA -} - // GetKubeDNSManifest returns the right kube-dns YAML manifest for a specific k8s version func GetKubeDNSManifest(kubeVersion *version.Version) string { // v1.8.0+ has only one known YAML manifest spec, just return that here diff --git a/cmd/kubeadm/app/phases/addons/dns/versions_test.go b/cmd/kubeadm/app/phases/addons/dns/versions_test.go index 7580ba19a31..c4ca3e78f89 100644 --- a/cmd/kubeadm/app/phases/addons/dns/versions_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/versions_test.go @@ -25,45 +25,38 @@ import ( func TestGetKubeDNSVersion(t *testing.T) { var tests = []struct { - k8sVersion, expected string + k8sVersion string + dns string + expected string }{ { - k8sVersion: "v1.7.0", - expected: "1.14.5", + k8sVersion: "v1.9.0", + dns: kubeadmconstants.KubeDNS, + expected: kubeDNSv190AndAboveVersion, }, { - k8sVersion: "v1.7.1", - expected: "1.14.5", - }, - { - k8sVersion: "v1.7.2", - expected: "1.14.5", - }, - { - k8sVersion: "v1.7.3", - expected: "1.14.5", - }, - { - k8sVersion: "v1.8.0-alpha.2", - expected: "1.14.5", - }, - { - k8sVersion: "v1.8.0", - expected: "1.14.5", + k8sVersion: "v1.10.0", + dns: kubeadmconstants.KubeDNS, + expected: kubeDNSv190AndAboveVersion, }, { k8sVersion: "v1.9.0", - expected: "1.14.7", + dns: kubeadmconstants.CoreDNS, + expected: coreDNSVersion, + }, + { + k8sVersion: "v1.10.0", + dns: kubeadmconstants.CoreDNS, + expected: coreDNSVersion, }, } for _, rt := range tests { - k8sVersion, err := version.ParseSemantic(rt.k8sVersion) if err != nil { t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err) } - actualDNSVersion := GetDNSVersion(k8sVersion, kubeadmconstants.KubeDNS) + actualDNSVersion := GetDNSVersion(k8sVersion, rt.dns) if actualDNSVersion != rt.expected { t.Errorf( "failed GetDNSVersion:\n\texpected: %s\n\t actual: %s", @@ -73,54 +66,3 @@ func TestGetKubeDNSVersion(t *testing.T) { } } } - -func TestGetKubeDNSProbeType(t *testing.T) { - var tests = []struct { - k8sVersion, expected string - }{ - { - k8sVersion: "v1.7.0", - expected: "A", - }, - { - k8sVersion: "v1.7.1", - expected: "A", - }, - { - k8sVersion: "v1.7.2", - expected: "A", - }, - { - k8sVersion: "v1.7.3", - expected: "A", - }, - { - k8sVersion: "v1.8.0-alpha.2", - expected: "A", - }, - { - k8sVersion: "v1.8.0", - expected: "A", - }, - { - k8sVersion: "v1.9.0", - expected: "SRV", - }, - } - for _, rt := range tests { - - k8sVersion, err := version.ParseSemantic(rt.k8sVersion) - if err != nil { - t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err) - } - - actualDNSProbeType := GetKubeDNSProbeType(k8sVersion) - if actualDNSProbeType != rt.expected { - t.Errorf( - "failed GetKubeDNSProbeType:\n\texpected: %s\n\t actual: %s", - rt.expected, - actualDNSProbeType, - ) - } - } -} From c07ab6800500e6d1ce23e8ee42ec5a8a3e44a8a3 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 21 Dec 2017 10:12:46 +0800 Subject: [PATCH 505/794] Support multiple scale sets in same cluster --- .../providers/azure/azure_backoff.go | 17 - .../providers/azure/azure_loadbalancer.go | 10 +- .../providers/azure/azure_util_vmss.go | 370 +++++++++++------- 3 files changed, 239 insertions(+), 158 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 33fc9064095..0494201b6b2 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -58,23 +58,6 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return machine, exists, err } -// GetScaleSetsVMWithRetry invokes ss.getScaleSetVM with exponential backoff retry -func (ss *scaleSet) GetScaleSetsVMWithRetry(name types.NodeName, scaleSetName string) (compute.VirtualMachineScaleSetVM, bool, error) { - var machine compute.VirtualMachineScaleSetVM - var exists bool - err := wait.ExponentialBackoff(ss.resourceRequestBackoff, func() (bool, error) { - var retryErr error - machine, exists, retryErr = ss.getScaleSetVM(string(name), scaleSetName) - if retryErr != nil { - glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr) - return false, nil - } - glog.V(10).Infof("GetScaleSetsVMWithRetry backoff: success") - return true, nil - }) - return machine, exists, err -} - // VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) { var machine compute.VirtualMachine diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 54e108fcc84..d6f4bdfac38 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -753,12 +753,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName) + glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) + err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName) + if err != nil { + glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) + return nil, err + } + glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. az.operationPollRateLimiter.Accept() glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) - err := az.DeleteLBWithRetry(lbName) + err = az.DeleteLBWithRetry(lbName) if err != nil { glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) return nil, err diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 45f631af951..71a881c9fef 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -23,6 +23,8 @@ import ( "sort" "strconv" "strings" + "sync" + "time" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" @@ -40,9 +42,29 @@ var ( // ErrorNotVmssInstance indicates an instance is not belongint to any vmss. ErrorNotVmssInstance = errors.New("not a vmss instance") - scaleSetNameRE = regexp.MustCompile(`^/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`) + scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`) ) +// scaleSetVMInfo includes basic information of a virtual machine. +type scaleSetVMInfo struct { + // The ID of the machine. + ID string + // Instance ID of the machine (only for scale sets vm). + InstanceID string + // Node name of the machine. + NodeName string + // Set name of the machine. + ScaleSetName string + // The type of the machine. + Type string + // The region of the machine. + Region string + // Primary interface ID of the machine. + PrimaryInterfaceID string + // Fault domain of the machine. + FaultDomain string +} + // scaleSet implements VMSet interface for Azure scale set. type scaleSet struct { *Cloud @@ -50,23 +72,173 @@ type scaleSet struct { // availabilitySet is also required for scaleSet because some instances // (e.g. master nodes) may not belong to any scale sets. availabilitySet VMSet + + cacheMutex sync.Mutex + // A local cache of scale sets. The key is scale set name and the value is a + // list of virtual machines belonging to the scale set. + cache map[string][]scaleSetVMInfo } // newScaleSet creates a new scaleSet. func newScaleSet(az *Cloud) VMSet { - return &scaleSet{ + ss := &scaleSet{ Cloud: az, availabilitySet: newAvailabilitySet(az), + cache: make(map[string][]scaleSetVMInfo), } + + go wait.Until(func() { + ss.cacheMutex.Lock() + defer ss.cacheMutex.Unlock() + + if err := ss.updateCache(); err != nil { + glog.Errorf("updateCache failed: %v", err) + } + }, 5*time.Minute, wait.NeverStop) + + return ss +} + +// updateCache updates scale sets cache. It should be called within a lock. +func (ss *scaleSet) updateCache() error { + scaleSetNames, err := ss.listScaleSetsWithRetry() + if err != nil { + return err + } + + localCache := make(map[string][]scaleSetVMInfo) + for _, scaleSetName := range scaleSetNames { + if _, ok := localCache[scaleSetName]; !ok { + localCache[scaleSetName] = make([]scaleSetVMInfo, 0) + } + vms, err := ss.listScaleSetVMsWithRetry(scaleSetName) + if err != nil { + return err + } + + for _, vm := range vms { + nodeName := "" + if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil { + nodeName = *vm.OsProfile.ComputerName + } + + vmSize := "" + if vm.Sku != nil && vm.Sku.Name != nil { + vmSize = *vm.Sku.Name + } + + primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) + if err != nil { + glog.Errorf("getPrimaryInterfaceID for %s failed: %v", nodeName, err) + return err + } + + faultDomain := "" + if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil { + faultDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)) + } + + localCache[scaleSetName] = append(localCache[scaleSetName], scaleSetVMInfo{ + ID: *vm.ID, + Type: vmSize, + NodeName: nodeName, + FaultDomain: faultDomain, + ScaleSetName: scaleSetName, + Region: *vm.Location, + InstanceID: *vm.InstanceID, + PrimaryInterfaceID: primaryInterfaceID, + }) + } + } + + // Only update cache after all steps are success. + ss.cache = localCache + + return nil +} + +// getCachedVirtualMachine gets virtualMachine by nodeName from cache. +func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, error) { + ss.cacheMutex.Lock() + defer ss.cacheMutex.Unlock() + + getVMFromCache := func(nodeName string) (scaleSetVMInfo, error) { + for scaleSetName := range ss.cache { + for _, vm := range ss.cache[scaleSetName] { + if vm.NodeName == nodeName { + return vm, nil + } + } + } + + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + } + + vm, err := getVMFromCache(nodeName) + if err == nil { + return vm, nil + } + + // Update cache and try again. + if err = ss.updateCache(); err != nil { + return scaleSetVMInfo{}, err + } + vm, err = getVMFromCache(nodeName) + if err == nil { + return vm, nil + } + + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound +} + +func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID string) (scaleSetVMInfo, error) { + ss.cacheMutex.Lock() + defer ss.cacheMutex.Unlock() + + getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, error) { + vms, ok := ss.cache[scaleSetName] + if !ok { + glog.V(4).Infof("scale set (%s) not found", scaleSetName) + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + } + + for _, vm := range vms { + if vm.InstanceID == instanceID { + glog.V(4).Infof("getCachedVirtualMachineByInstanceID gets vm (%s) by instanceID (%s) within scale set (%s)", vm.NodeName, instanceID, scaleSetName) + return vm, nil + } + } + + glog.V(4).Infof("instanceID (%s) not found in scale set (%s)", instanceID, scaleSetName) + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + } + + vm, err := getVMByID(scaleSetName, instanceID) + if err == nil { + return vm, nil + } + + // Update cache and try again. + if err = ss.updateCache(); err != nil { + return scaleSetVMInfo{}, err + } + vm, err = getVMByID(scaleSetName, instanceID) + if err == nil { + return vm, nil + } + + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound } // GetInstanceIDByNodeName gets the cloud provider ID by node name. // It must return ("", cloudprovider.InstanceNotFound) if the instance does // not exist or is no longer running. func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { - instanceID, err := ss.getScaleSetInstanceIDByName(name, ss.PrimaryScaleSetName) + vm, err := ss.getCachedVirtualMachine(name) if err != nil { - if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + if err == cloudprovider.InstanceNotFound { + glog.V(4).Infof("GetInstanceIDByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name) + // Retry with standard type because master nodes may not belong to any vmss. // TODO: find a better way to identify the type of VM. return ss.availabilitySet.GetInstanceIDByNodeName(name) @@ -75,89 +247,39 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { return "", err } - return instanceID, nil -} - -func (ss *scaleSet) getScaleSetInstanceIDByName(name, scaleSetName string) (string, error) { - var machine compute.VirtualMachineScaleSetVM - var exists bool - var err error - - ss.operationPollRateLimiter.Accept() - machine, exists, err = ss.getScaleSetVM(name, scaleSetName) - if err != nil { - if ss.CloudProviderBackoff { - glog.V(2).Infof("InstanceID(%s) backing off", name) - machine, exists, err = ss.GetScaleSetsVMWithRetry(types.NodeName(name), scaleSetName) - if err != nil { - glog.V(2).Infof("InstanceID(%s) abort backoff", name) - return "", err - } - } else { - return "", err - } - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - - return *machine.ID, nil -} - -func (ss *scaleSet) getScaleSetVM(nodeName, scaleSetName string) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { - instanceID, err := getScaleSetVMInstanceID(nodeName) - if err != nil { - return vm, false, err - } - - return ss.getScaleSetVMByID(instanceID, scaleSetName) -} - -func (ss *scaleSet) getScaleSetVMByID(instanceID, scaleSetName string) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { - var realErr error - - // scaleSetName is required to query VM info. - if scaleSetName == "" { - scaleSetName = ss.PrimaryScaleSetName - } - - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", instanceID) - vm, err = ss.VirtualMachineScaleSetVMsClient.Get(ss.ResourceGroup, scaleSetName, instanceID) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", instanceID) - - exists, realErr = checkResourceExistsFromError(err) - if realErr != nil { - return vm, false, realErr - } - - if !exists { - return vm, false, nil - } - - return vm, exists, err + return vm.ID, nil } // GetNodeNameByProviderID gets the node name by provider ID. func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { // NodeName is not part of providerID for vmss instances. - parts := strings.Split(providerID, "/") - instanceID := parts[len(parts)-1] - machine, exist, err := ss.getScaleSetVMByID(instanceID, ss.PrimaryScaleSetName) - if !exist { - return "", cloudprovider.InstanceNotFound + scaleSetName, err := extractScaleSetNameByVMID(providerID) + if err != nil { + glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + return ss.availabilitySet.GetNodeNameByProviderID(providerID) } + + instanceID, err := getLastSegment(providerID) + if err != nil { + glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + return ss.availabilitySet.GetNodeNameByProviderID(providerID) + } + + vm, err := ss.getCachedVirtualMachineByInstanceID(scaleSetName, instanceID) if err != nil { return "", err } - return types.NodeName(*machine.OsProfile.ComputerName), nil + return types.NodeName(vm.NodeName), nil } // GetInstanceTypeByNodeName gets the instance type by node name. func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { - instanceType, err := ss.getScaleSetInstanceTypeByNodeName(name) + vm, err := ss.getCachedVirtualMachine(name) if err != nil { - if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + if err == cloudprovider.InstanceNotFound { + glog.V(4).Infof("GetInstanceTypeByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name) + // Retry with standard type because master nodes may not belong to any vmss. // TODO: find a better way to identify the type of VM. return ss.availabilitySet.GetInstanceTypeByNodeName(name) @@ -166,30 +288,15 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { return "", err } - return instanceType, nil -} - -func (ss *scaleSet) getScaleSetInstanceTypeByNodeName(name string) (string, error) { - machine, exists, err := ss.getScaleSetVM(name, ss.PrimaryScaleSetName) - if err != nil { - glog.Errorf("error: ss.getScaleSetInstanceTypeByNodeName(%s), ss.getScaleSetVM(%s) err=%v", name, name, err) - return "", err - } else if !exists { - return "", cloudprovider.InstanceNotFound - } - - if machine.Sku.Name != nil { - return *machine.Sku.Name, nil - } - - return "", fmt.Errorf("instance type is not defined") + return vm.Type, nil } // GetZoneByNodeName gets cloudprovider.Zone by node name. func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - instanceID, err := getScaleSetVMInstanceID(name) + vm, err := ss.getCachedVirtualMachine(name) if err != nil { - if err == ErrorNotVmssInstance { + if err == cloudprovider.InstanceNotFound { + glog.V(4).Infof("GetZoneByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name) // Retry with standard type because master nodes may not belong to any vmss. // TODO: find a better way to identify the type of VM. return ss.availabilitySet.GetZoneByNodeName(name) @@ -197,23 +304,10 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return cloudprovider.Zone{}, err } - vm, err := ss.VirtualMachineScaleSetVMsClient.Get(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, instanceID) - if err != nil { - return cloudprovider.Zone{}, err - } - - // PlatformFaultDomain is not included in VirtualMachineScaleSetVM, so we get it from VirtualMachineScaleSetVMInstanceView. - vmView, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, instanceID) - if err != nil { - return cloudprovider.Zone{}, err - } - - failureDomain := strconv.Itoa(int(*vmView.PlatformFaultDomain)) - zone := cloudprovider.Zone{ - FailureDomain: failureDomain, - Region: *(vm.Location), - } - return zone, nil + return cloudprovider.Zone{ + FailureDomain: vm.FaultDomain, + Region: vm.Region, + }, nil } // GetPrimaryVMSetName returns the VM set name depending on the configured vmType. @@ -345,7 +439,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { ss.operationPollRateLimiter.Accept() glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List start for %v", scaleSetName) - result, err = ss.VirtualMachineScaleSetVMsClient.List(ss.ResourceGroup, scaleSetName, "", "", "") + result, err = ss.VirtualMachineScaleSetVMsClient.List(ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView)) glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List end for %v", scaleSetName) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.List for %v failed: %v", scaleSetName, err) @@ -388,25 +482,24 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir return allVMs, nil } -// getAgentPoolAvailabiliySets lists the virtual machines for for the resource group and then builds -// a list of availability sets that match the nodes available to k8s. +// getAgentPoolScaleSets lists the virtual machines for for the resource group and then builds +// a list of scale sets that match the nodes available to k8s. func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { - scaleSetNames, err := ss.listScaleSetsWithRetry() - if err != nil { + ss.cacheMutex.Lock() + defer ss.cacheMutex.Unlock() + + // Always update cache to get latest lists of scale sets and virtual machines. + if err := ss.updateCache(); err != nil { return nil, err } - vmNameToScaleSetName := make(map[string]string, len(scaleSetNames)) - for _, scaleSetName := range scaleSetNames { - vms, err := ss.listScaleSetVMsWithRetry(scaleSetName) - if err != nil { - return nil, err - } - + vmNameToScaleSetName := make(map[string]string) + for scaleSetName := range ss.cache { + vms := ss.cache[scaleSetName] for idx := range vms { vm := vms[idx] - if vm.OsProfile != nil || vm.OsProfile.ComputerName != nil { - vmNameToScaleSetName[*vm.OsProfile.ComputerName] = scaleSetName + if vm.NodeName != "" { + vmNameToScaleSetName[vm.NodeName] = scaleSetName } } } @@ -488,43 +581,42 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN // GetPrimaryInterface gets machine primary network interface by node name and vmSet. func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) { - ss.operationPollRateLimiter.Accept() - machine, exists, err := ss.getScaleSetVM(nodeName, vmSetName) - if !exists || err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { - // Retry with standard type because master nodes may not belong to any vmss. - // TODO: find a better way to identify the type of VM. - return ss.availabilitySet.GetPrimaryInterface(nodeName, "") - } + vm, err := ss.getCachedVirtualMachine(nodeName) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getScaleSetVM(%s), err=%v", nodeName, nodeName, err) + if err == cloudprovider.InstanceNotFound { + // Retry with standard type because master nodes may not belong to any vmss. + // TODO: find a better way to identify the type of VM. + return ss.availabilitySet.GetPrimaryInterface(nodeName, "") + } + + glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getCachedVirtualMachine(%s), err=%v", nodeName, nodeName, err) return network.Interface{}, err } - nicID, err := ss.getPrimaryInterfaceID(machine) - if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err) - return network.Interface{}, err + // Check scale set name. + if vmSetName != "" && !strings.EqualFold(vm.ScaleSetName, vmSetName) { + return network.Interface{}, errNotInVMSet } - nicName, err := getLastSegment(nicID) + nicName, err := getLastSegment(vm.PrimaryInterfaceID) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, nicID, err) + glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, vm.PrimaryInterfaceID, err) return network.Interface{}, err } ss.operationPollRateLimiter.Accept() glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) - nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ss.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "") + nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, vm.ScaleSetName, vm.InstanceID, nicName, "") glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, nicName, "", err) + glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, vm.ScaleSetName, nicName, err) return network.Interface{}, err } // Fix interface's location, which is required when updating the interface. // TODO: is this a bug of azure SDK? if nic.Location == nil || *nic.Location == "" { - nic.Location = &ss.Config.Location + nic.Location = &vm.Region } return nic, nil From 8b2da625fe273edeb0f2bd41d470af2aacd8b5fe Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 25 Dec 2017 10:53:33 +0800 Subject: [PATCH 506/794] Fix kubeadm upgrade unit test failure. --- cmd/kubeadm/app/constants/constants.go | 1 + .../app/phases/upgrade/compute_test.go | 320 +++++++++--------- 2 files changed, 161 insertions(+), 160 deletions(-) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index d8b9c763eb7..36584da4add 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -236,6 +236,7 @@ var ( 8: "3.0.17", 9: "3.1.10", 10: "3.1.10", + 11: "3.1.10", } ) diff --git a/cmd/kubeadm/app/phases/upgrade/compute_test.go b/cmd/kubeadm/app/phases/upgrade/compute_test.go index 0ca86fa184c..7835493f79a 100644 --- a/cmd/kubeadm/app/phases/upgrade/compute_test.go +++ b/cmd/kubeadm/app/phases/upgrade/compute_test.go @@ -47,7 +47,7 @@ func (f *fakeVersionGetter) VersionFromCILabel(ciVersionLabel, _ string) (string if ciVersionLabel == "latest" { return f.latestVersion, versionutil.MustParseSemantic(f.latestVersion), nil } - if ciVersionLabel == "latest-1.9" { + if ciVersionLabel == "latest-1.10" { return f.latestDevBranchVersion, versionutil.MustParseSemantic(f.latestDevBranchVersion), nil } return f.stablePatchVersion, versionutil.MustParseSemantic(f.stablePatchVersion), nil @@ -64,7 +64,7 @@ type fakeEtcdCluster struct{} func (f fakeEtcdCluster) GetEtcdClusterStatus() (*clientv3.StatusResponse, error) { client := &clientv3.StatusResponse{} - client.Version = "3.0.14" + client.Version = "3.1.10" return client, nil } @@ -78,12 +78,12 @@ func TestGetAvailableUpgrades(t *testing.T) { }{ { // no action needed, already up-to-date vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", - kubeadmVersion: "v1.8.3", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", + kubeadmVersion: "v1.9.3", - stablePatchVersion: "v1.8.3", - stableVersion: "v1.8.3", + stablePatchVersion: "v1.9.3", + stableVersion: "v1.9.3", }, expectedUpgrades: []Upgrade{}, allowExperimental: false, @@ -91,30 +91,30 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // simple patch version upgrade vg: &fakeVersionGetter{ - clusterVersion: "v1.8.1", - kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane - kubeadmVersion: "v1.8.2", + clusterVersion: "v1.9.1", + kubeletVersion: "v1.9.1", // the kubelet are on the same version as the control plane + kubeadmVersion: "v1.9.2", - stablePatchVersion: "v1.8.3", - stableVersion: "v1.8.3", + stablePatchVersion: "v1.9.3", + stableVersion: "v1.9.3", }, expectedUpgrades: []Upgrade{ { - Description: "version in the v1.8 series", + Description: "version in the v1.9 series", Before: ClusterState{ - KubeVersion: "v1.8.1", + KubeVersion: "v1.9.1", KubeletVersions: map[string]uint16{ - "v1.8.1": 1, + "v1.9.1": 1, }, - KubeadmVersion: "v1.8.2", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.2", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.8.3", - KubeadmVersion: "v1.8.3", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.17", + KubeVersion: "v1.9.3", + KubeadmVersion: "v1.9.3", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, }, }, @@ -123,28 +123,28 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // minor version upgrade only vg: &fakeVersionGetter{ - clusterVersion: "v1.8.1", - kubeletVersion: "v1.8.1", // the kubelet are on the same version as the control plane - kubeadmVersion: "v1.9.0", + clusterVersion: "v1.9.1", + kubeletVersion: "v1.9.1", // the kubelet are on the same version as the control plane + kubeadmVersion: "v1.10.0", - stablePatchVersion: "v1.8.1", - stableVersion: "v1.9.0", + stablePatchVersion: "v1.9.1", + stableVersion: "v1.10.0", }, expectedUpgrades: []Upgrade{ { Description: "stable version", Before: ClusterState{ - KubeVersion: "v1.8.1", + KubeVersion: "v1.9.1", KubeletVersions: map[string]uint16{ - "v1.8.1": 1, + "v1.9.1": 1, }, - KubeadmVersion: "v1.9.0", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.10.0", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0", - KubeadmVersion: "v1.9.0", + KubeVersion: "v1.10.0", + KubeadmVersion: "v1.10.0", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -155,46 +155,46 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // both minor version upgrade and patch version upgrade available vg: &fakeVersionGetter{ - clusterVersion: "v1.8.3", - kubeletVersion: "v1.8.3", // the kubelet are on the same version as the control plane - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.3", + kubeletVersion: "v1.9.3", // the kubelet are on the same version as the control plane + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.9.1", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.10.1", }, expectedUpgrades: []Upgrade{ { - Description: "version in the v1.8 series", + Description: "version in the v1.9 series", Before: ClusterState{ - KubeVersion: "v1.8.3", + KubeVersion: "v1.9.3", KubeletVersions: map[string]uint16{ - "v1.8.3": 1, + "v1.9.3": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.8.5", - KubeadmVersion: "v1.8.5", // Note: The kubeadm version mustn't be "downgraded" here - DNSVersion: "1.14.5", - EtcdVersion: "3.0.17", + KubeVersion: "v1.9.5", + KubeadmVersion: "v1.9.5", // Note: The kubeadm version mustn't be "downgraded" here + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, }, { Description: "stable version", Before: ClusterState{ - KubeVersion: "v1.8.3", + KubeVersion: "v1.9.3", KubeletVersions: map[string]uint16{ - "v1.8.3": 1, + "v1.9.3": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.1", - KubeadmVersion: "v1.9.1", + KubeVersion: "v1.10.1", + KubeadmVersion: "v1.10.1", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -205,13 +205,13 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // allow experimental upgrades, but no upgrade available vg: &fakeVersionGetter{ - clusterVersion: "v1.9.0-alpha.2", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.10.0-alpha.2", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestVersion: "v1.9.0-alpha.2", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestVersion: "v1.10.0-alpha.2", }, expectedUpgrades: []Upgrade{}, allowExperimental: true, @@ -219,29 +219,29 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // upgrade to an unstable version should be supported vg: &fakeVersionGetter{ - clusterVersion: "v1.8.5", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.5", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestVersion: "v1.9.0-alpha.2", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestVersion: "v1.10.0-alpha.2", }, expectedUpgrades: []Upgrade{ { Description: "experimental version", Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0-alpha.2", - KubeadmVersion: "v1.9.0-alpha.2", + KubeVersion: "v1.10.0-alpha.2", + KubeadmVersion: "v1.10.0-alpha.2", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -252,29 +252,29 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // upgrade from an unstable version to an unstable version should be supported vg: &fakeVersionGetter{ - clusterVersion: "v1.9.0-alpha.1", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.10.0-alpha.1", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestVersion: "v1.9.0-alpha.2", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestVersion: "v1.10.0-alpha.2", }, expectedUpgrades: []Upgrade{ { Description: "experimental version", Before: ClusterState{ - KubeVersion: "v1.9.0-alpha.1", + KubeVersion: "v1.10.0-alpha.1", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", + KubeadmVersion: "v1.9.5", DNSVersion: "1.14.7", - EtcdVersion: "3.0.14", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0-alpha.2", - KubeadmVersion: "v1.9.0-alpha.2", + KubeVersion: "v1.10.0-alpha.2", + KubeadmVersion: "v1.10.0-alpha.2", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -285,30 +285,30 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // v1.X.0-alpha.0 should be ignored vg: &fakeVersionGetter{ - clusterVersion: "v1.8.5", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.5", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestDevBranchVersion: "v1.9.0-beta.1", - latestVersion: "v1.10.0-alpha.0", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestDevBranchVersion: "v1.10.0-beta.1", + latestVersion: "v1.11.0-alpha.0", }, expectedUpgrades: []Upgrade{ { Description: "experimental version", Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0-beta.1", - KubeadmVersion: "v1.9.0-beta.1", + KubeVersion: "v1.10.0-beta.1", + KubeadmVersion: "v1.10.0-beta.1", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -319,30 +319,30 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // upgrade to an RC version should be supported vg: &fakeVersionGetter{ - clusterVersion: "v1.8.5", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.5", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestDevBranchVersion: "v1.9.0-rc.1", - latestVersion: "v1.10.0-alpha.1", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestDevBranchVersion: "v1.10.0-rc.1", + latestVersion: "v1.11.0-alpha.1", }, expectedUpgrades: []Upgrade{ { Description: "release candidate version", Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0-rc.1", - KubeadmVersion: "v1.9.0-rc.1", + KubeVersion: "v1.10.0-rc.1", + KubeadmVersion: "v1.10.0-rc.1", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -353,30 +353,30 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // it is possible (but very uncommon) that the latest version from the previous branch is an rc and the current latest version is alpha.0. In that case, show the RC vg: &fakeVersionGetter{ - clusterVersion: "v1.8.5", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.5", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestDevBranchVersion: "v1.9.6-rc.1", - latestVersion: "v1.10.1-alpha.0", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestDevBranchVersion: "v1.10.6-rc.1", + latestVersion: "v1.11.1-alpha.0", }, expectedUpgrades: []Upgrade{ { Description: "experimental version", // Note that this is considered an experimental version in this uncommon scenario Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.6-rc.1", - KubeadmVersion: "v1.9.6-rc.1", + KubeVersion: "v1.10.6-rc.1", + KubeadmVersion: "v1.10.6-rc.1", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -387,30 +387,30 @@ func TestGetAvailableUpgrades(t *testing.T) { }, { // upgrade to an RC version should be supported. There may also be an even newer unstable version. vg: &fakeVersionGetter{ - clusterVersion: "v1.8.5", - kubeletVersion: "v1.8.5", - kubeadmVersion: "v1.8.5", + clusterVersion: "v1.9.5", + kubeletVersion: "v1.9.5", + kubeadmVersion: "v1.9.5", - stablePatchVersion: "v1.8.5", - stableVersion: "v1.8.5", - latestDevBranchVersion: "v1.9.0-rc.1", - latestVersion: "v1.10.0-alpha.2", + stablePatchVersion: "v1.9.5", + stableVersion: "v1.9.5", + latestDevBranchVersion: "v1.10.0-rc.1", + latestVersion: "v1.11.0-alpha.2", }, expectedUpgrades: []Upgrade{ { Description: "release candidate version", Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.9.0-rc.1", - KubeadmVersion: "v1.9.0-rc.1", + KubeVersion: "v1.10.0-rc.1", + KubeadmVersion: "v1.10.0-rc.1", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -418,17 +418,17 @@ func TestGetAvailableUpgrades(t *testing.T) { { Description: "experimental version", Before: ClusterState{ - KubeVersion: "v1.8.5", + KubeVersion: "v1.9.5", KubeletVersions: map[string]uint16{ - "v1.8.5": 1, + "v1.9.5": 1, }, - KubeadmVersion: "v1.8.5", - DNSVersion: "1.14.5", - EtcdVersion: "3.0.14", + KubeadmVersion: "v1.9.5", + DNSVersion: "1.14.7", + EtcdVersion: "3.1.10", }, After: ClusterState{ - KubeVersion: "v1.10.0-alpha.2", - KubeadmVersion: "v1.10.0-alpha.2", + KubeVersion: "v1.11.0-alpha.2", + KubeadmVersion: "v1.11.0-alpha.2", DNSVersion: "1.14.7", EtcdVersion: "3.1.10", }, @@ -463,36 +463,36 @@ func TestKubeletUpgrade(t *testing.T) { }{ { // upgrade available before: map[string]uint16{ - "v1.7.1": 1, + "v1.9.1": 1, }, - after: "v1.7.3", + after: "v1.9.3", expected: true, }, { // upgrade available before: map[string]uint16{ - "v1.7.1": 1, - "v1.7.3": 100, + "v1.9.1": 1, + "v1.9.3": 100, }, - after: "v1.7.3", + after: "v1.9.3", expected: true, }, { // upgrade not available before: map[string]uint16{ - "v1.7.3": 1, + "v1.9.3": 1, }, - after: "v1.7.3", + after: "v1.9.3", expected: false, }, { // upgrade not available before: map[string]uint16{ - "v1.7.3": 100, + "v1.9.3": 100, }, - after: "v1.7.3", + after: "v1.9.3", expected: false, }, { // upgrade not available if we don't know anything about the earlier state before: map[string]uint16{}, - after: "v1.7.3", + after: "v1.9.3", expected: false, }, } From 035598b94a5ceef1bd2ddd3ad3dcb45f393cc8c4 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sat, 23 Dec 2017 17:06:28 +0800 Subject: [PATCH 507/794] fix wrong hairpin-mode value --- pkg/kubelet/apis/kubeletconfig/types.go | 2 +- pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/apis/kubeletconfig/types.go b/pkg/kubelet/apis/kubeletconfig/types.go index 3b7f42509a5..f4392f6ed6f 100644 --- a/pkg/kubelet/apis/kubeletconfig/types.go +++ b/pkg/kubelet/apis/kubeletconfig/types.go @@ -192,7 +192,7 @@ type KubeletConfiguration struct { // "promiscuous-bridge": make the container bridge promiscuous. // "hairpin-veth": set the hairpin flag on container veth interfaces. // "none": do nothing. - // Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, + // Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, // because promiscous-bridge assumes the existence of a container bridge named cbr0. HairpinMode string // maxPods is the number of pods that can run on this Kubelet. diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go index 0c661f8a0ee..96f92d4465f 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go @@ -189,7 +189,7 @@ type KubeletConfiguration struct { // "promiscuous-bridge": make the container bridge promiscuous. // "hairpin-veth": set the hairpin flag on container veth interfaces. // "none": do nothing. - // Generally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, + // Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, // because promiscous-bridge assumes the existence of a container bridge named cbr0. HairpinMode string `json:"hairpinMode"` // maxPods is the number of pods that can run on this Kubelet. From 3406af70bc4a0289046e37c7b9fee1c660a4300e Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Sat, 23 Dec 2017 17:05:51 +0800 Subject: [PATCH 508/794] validate --hairpin-mode in kubelet config --- pkg/kubelet/apis/kubeletconfig/validation/validation.go | 8 ++++++++ .../apis/kubeletconfig/validation/validation_test.go | 6 ++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation.go b/pkg/kubelet/apis/kubeletconfig/validation/validation.go index d3f95a0b88c..cb4d65bd782 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation.go @@ -100,5 +100,13 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error val, kubetypes.NodeAllocatableEnforcementKey, kubetypes.SystemReservedEnforcementKey, kubetypes.KubeReservedEnforcementKey)) } } + switch kc.HairpinMode { + case kubeletconfig.HairpinNone: + case kubeletconfig.HairpinVeth: + case kubeletconfig.PromiscuousBridge: + default: + allErrors = append(allErrors, fmt.Errorf("Invalid option %q specified for HairpinMode (--hairpin-mode) setting. Valid options are %q, %q or %q", + kc.HairpinMode, kubeletconfig.HairpinNone, kubeletconfig.HairpinVeth, kubeletconfig.PromiscuousBridge)) + } return utilerrors.NewAggregate(allErrors) } diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation_test.go b/pkg/kubelet/apis/kubeletconfig/validation/validation_test.go index 1e07e416120..134771fcfb0 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation_test.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation_test.go @@ -47,6 +47,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { ReadOnlyPort: 0, RegistryBurst: 10, RegistryPullQPS: 5, + HairpinMode: kubeletconfig.PromiscuousBridge, } if allErrors := ValidateKubeletConfiguration(successCase); allErrors != nil { t.Errorf("expect no errors got %v", allErrors) @@ -75,8 +76,9 @@ func TestValidateKubeletConfiguration(t *testing.T) { ReadOnlyPort: -10, RegistryBurst: -10, RegistryPullQPS: -10, + HairpinMode: "foo", } - if allErrors := ValidateKubeletConfiguration(errorCase); len(allErrors.(utilerrors.Aggregate).Errors()) != 21 { - t.Errorf("expect 21 errors got %v", len(allErrors.(utilerrors.Aggregate).Errors())) + if allErrors := ValidateKubeletConfiguration(errorCase); len(allErrors.(utilerrors.Aggregate).Errors()) != 22 { + t.Errorf("expect 22 errors got %v", len(allErrors.(utilerrors.Aggregate).Errors())) } } From 120a23a02578c5744716634f7515131ed44dd190 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Mon, 25 Dec 2017 11:36:22 +0800 Subject: [PATCH 509/794] update kubeadm validation test to fix test error --- cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 085065f36be..d23215d8590 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -591,6 +591,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { ReadOnlyPort: utilpointer.Int32Ptr(0), RegistryBurst: 10, RegistryPullQPS: utilpointer.Int32Ptr(5), + HairpinMode: "promiscuous-bridge", }, } if allErrors := ValidateKubeletConfiguration(successCase, nil); len(allErrors) != 0 { From 392903c9f3571cf603509cfe1187f877847a18fd Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 25 Dec 2017 19:12:08 -0600 Subject: [PATCH 510/794] Fix PodCIDR flag: defaults come from the object, not as literal args to the flag function --- cmd/kubelet/app/options/options.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 6d8e13abe8c..584a9a2467e 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -452,7 +452,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.StringVar(&c.HairpinMode, "hairpin-mode", c.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".") fs.Int32Var(&c.MaxPods, "max-pods", c.MaxPods, "Number of Pods that can run on this Kubelet.") - fs.StringVar(&c.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.") + fs.StringVar(&c.PodCIDR, "pod-cidr", c.PodCIDR, "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.") fs.StringVar(&c.ResolverConfig, "resolv-conf", c.ResolverConfig, "Resolver configuration file used as the basis for the container DNS resolution configuration.") fs.BoolVar(&c.CPUCFSQuota, "cpu-cfs-quota", c.CPUCFSQuota, "Enable CPU CFS quota enforcement for containers that specify CPU limits") fs.BoolVar(&c.EnableControllerAttachDetach, "enable-controller-attach-detach", c.EnableControllerAttachDetach, "Enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations") From 926baf5fe7e1dec98ca189cb70046dd157abc8ea Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Thu, 23 Nov 2017 20:05:28 +0800 Subject: [PATCH 511/794] Refactoring ValidateUsages for for bootstrap tokens. --- cmd/kubeadm/app/constants/constants.go | 3 ++- pkg/bootstrap/api/helpers.go | 7 +++---- pkg/bootstrap/api/types.go | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 36584da4add..390b8c5f1d8 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -25,6 +25,7 @@ import ( "time" "k8s.io/api/core/v1" + bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/version" ) @@ -220,7 +221,7 @@ var ( AuthorizationWebhookConfigPath = filepath.Join(KubernetesDir, "webhook_authz.conf") // DefaultTokenUsages specifies the default functions a token will get - DefaultTokenUsages = []string{"signing", "authentication"} + DefaultTokenUsages = bootstrapapi.KnownTokenUsages // MasterComponents defines the master component names MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} diff --git a/pkg/bootstrap/api/helpers.go b/pkg/bootstrap/api/helpers.go index 01859bc37fe..c750cf75830 100644 --- a/pkg/bootstrap/api/helpers.go +++ b/pkg/bootstrap/api/helpers.go @@ -37,16 +37,15 @@ func ValidateBootstrapGroupName(name string) error { // ValidateUsages validates that the passed in string are valid usage strings for bootstrap tokens. func ValidateUsages(usages []string) error { - usageAuthentication := strings.TrimPrefix(BootstrapTokenUsageAuthentication, BootstrapTokenUsagePrefix) - usageSigning := strings.TrimPrefix(BootstrapTokenUsageSigningKey, BootstrapTokenUsagePrefix) + validUsages := sets.NewString(KnownTokenUsages...) invalidUsages := sets.NewString() for _, usage := range usages { - if usage != usageAuthentication && usage != usageSigning { + if !validUsages.Has(usage) { invalidUsages.Insert(usage) } } if len(invalidUsages) > 0 { - return fmt.Errorf("invalide bootstrap token usage string: %s, valid usage option: %s, %s", strings.Join(invalidUsages.List(), ","), usageAuthentication, usageSigning) + return fmt.Errorf("invalide bootstrap token usage string: %s, valid usage options: %s", strings.Join(invalidUsages.List(), ","), strings.Join(KnownTokenUsages, ",")) } return nil } diff --git a/pkg/bootstrap/api/types.go b/pkg/bootstrap/api/types.go index a4e67a1c249..c30814c0e26 100644 --- a/pkg/bootstrap/api/types.go +++ b/pkg/bootstrap/api/types.go @@ -95,3 +95,6 @@ const ( // tokens (in addition to any groups from BootstrapTokenExtraGroupsKey). BootstrapDefaultGroup = "system:bootstrappers" ) + +// KnownTokenUsages specifies the known functions a token will get. +var KnownTokenUsages = []string{"signing", "authentication"} From 347cdcf198e5311699caf26c5cf59af29a949b3f Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Mon, 27 Nov 2017 12:05:31 +0800 Subject: [PATCH 512/794] Auto generated BUILD files. --- cmd/kubeadm/app/constants/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/kubeadm/app/constants/BUILD b/cmd/kubeadm/app/constants/BUILD index d0f284c5ebb..30d303111a7 100644 --- a/cmd/kubeadm/app/constants/BUILD +++ b/cmd/kubeadm/app/constants/BUILD @@ -11,6 +11,7 @@ go_library( srcs = ["constants.go"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", deps = [ + "//pkg/bootstrap/api:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", From 1cb195759f644e2060db190e199970945cf9cdf5 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Tue, 26 Dec 2017 10:21:13 +0800 Subject: [PATCH 513/794] Add OWNERS file to pkg/bootstrap/api --- pkg/bootstrap/api/OWNERS | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 pkg/bootstrap/api/OWNERS diff --git a/pkg/bootstrap/api/OWNERS b/pkg/bootstrap/api/OWNERS new file mode 100644 index 00000000000..8a2f5b5915b --- /dev/null +++ b/pkg/bootstrap/api/OWNERS @@ -0,0 +1,5 @@ +approvers: +- jbeda +- luxas +reviewers: +- mattmoyer From b0aff1ebeaa67be8a91bc297f71bb798e873cd6b Mon Sep 17 00:00:00 2001 From: Manjunath A Kumatagi Date: Tue, 26 Dec 2017 20:44:45 +0530 Subject: [PATCH 514/794] Add owners file for test images --- test/images/OWNERS | 7 +++++++ test/utils/image/OWNERS | 7 +++++++ 2 files changed, 14 insertions(+) create mode 100644 test/images/OWNERS create mode 100644 test/utils/image/OWNERS diff --git a/test/images/OWNERS b/test/images/OWNERS new file mode 100644 index 00000000000..1e4e74e7d50 --- /dev/null +++ b/test/images/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - luxas + - mkumatag + - ixdy +approvers: + - luxas + - ixdy diff --git a/test/utils/image/OWNERS b/test/utils/image/OWNERS new file mode 100644 index 00000000000..1e4e74e7d50 --- /dev/null +++ b/test/utils/image/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - luxas + - mkumatag + - ixdy +approvers: + - luxas + - ixdy From 49e01b05e7791d8bbb4863f83c0b2f420cec73ab Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Mon, 20 Nov 2017 13:45:50 -0500 Subject: [PATCH 515/794] kubeadm: set kube-apiserver advertise address using downward API --- .../phases/selfhosting/podspec_mutation.go | 22 ++++++ .../selfhosting/podspec_mutation_test.go | 79 ++++++++++++++++++- .../phases/selfhosting/selfhosting_test.go | 7 +- 3 files changed, 106 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go index 78c54fd51b3..fd01d8c391a 100644 --- a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go +++ b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation.go @@ -18,6 +18,7 @@ package selfhosting import ( "path/filepath" + "strings" "k8s.io/api/core/v1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -42,6 +43,7 @@ func GetDefaultMutators() map[string][]PodSpecMutatorFunc { addNodeSelectorToPodSpec, setMasterTolerationOnPodSpec, setRightDNSPolicyOnPodSpec, + setHostIPOnPodSpec, }, kubeadmconstants.KubeControllerManager: { addNodeSelectorToPodSpec, @@ -101,6 +103,26 @@ func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) { podSpec.Tolerations = append(podSpec.Tolerations, kubeadmconstants.MasterToleration) } +// setHostIPOnPodSpec sets the environment variable HOST_IP using downward API +func setHostIPOnPodSpec(podSpec *v1.PodSpec) { + envVar := v1.EnvVar{ + Name: "HOST_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.hostIP", + }, + }, + } + + podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, envVar) + + for i := range podSpec.Containers[0].Command { + if strings.Contains(podSpec.Containers[0].Command[i], "advertise-address") { + podSpec.Containers[0].Command[i] = "--advertise-address=$(HOST_IP)" + } + } +} + // setRightDNSPolicyOnPodSpec makes sure the self-hosted components can look up things via kube-dns if necessary func setRightDNSPolicyOnPodSpec(podSpec *v1.PodSpec) { podSpec.DNSPolicy = v1.DNSClusterFirstWithHostNet diff --git a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go index 391ea4bad0e..6a5f1da8a8b 100644 --- a/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/podspec_mutation_test.go @@ -33,8 +33,36 @@ func TestMutatePodSpec(t *testing.T) { }{ { component: kubeadmconstants.KubeAPIServer, - podSpec: &v1.PodSpec{}, + podSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-apiserver", + Command: []string{ + "--advertise-address=10.0.0.1", + }, + }, + }, + }, expected: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-apiserver", + Command: []string{ + "--advertise-address=$(HOST_IP)", + }, + Env: []v1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.hostIP", + }, + }, + }, + }, + }, + }, + NodeSelector: map[string]string{ kubeadmconstants.LabelNodeRoleMaster: "", }, @@ -185,6 +213,55 @@ func TestSetRightDNSPolicyOnPodSpec(t *testing.T) { } } +func TestSetHostIPOnPodSpec(t *testing.T) { + var tests = []struct { + podSpec *v1.PodSpec + expected v1.PodSpec + }{ + { + podSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-apiserver", + Command: []string{ + "--advertise-address=10.0.0.1", + }, + Env: []v1.EnvVar{}, + }, + }, + }, + expected: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-apiserver", + Command: []string{ + "--advertise-address=$(HOST_IP)", + }, + Env: []v1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.hostIP", + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, rt := range tests { + setHostIPOnPodSpec(rt.podSpec) + + if !reflect.DeepEqual(*rt.podSpec, rt.expected) { + t.Errorf("failed setHostIPOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec) + } + } +} + func TestSetSelfHostedVolumesForAPIServer(t *testing.T) { hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate var tests = []struct { diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go index 8a6ebff8dfa..9b1b3306b56 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -134,7 +134,7 @@ spec: - --service-cluster-ip-range=10.96.0.0/12 - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt - - --advertise-address=192.168.1.115 + - --advertise-address=$(HOST_IP) - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt - --insecure-port=0 - --experimental-bootstrap-token-auth=true @@ -148,6 +148,11 @@ spec: - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --authorization-mode=Node,RBAC - --etcd-servers=http://127.0.0.1:2379 + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 livenessProbe: failureThreshold: 8 From 39418b175f19e21b9e0b4550b4800aea02001486 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 26 Dec 2017 19:59:47 -0500 Subject: [PATCH 516/794] Fix TestCadvisorListPodStats failure under mac/darwin GetPodCgroupNameSuffix is not really implemented under darwin (or windows for that matter). So let's just skip over the check for CPU and Memory if that is not set. --- pkg/kubelet/stats/cadvisor_stats_provider_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/stats/cadvisor_stats_provider_test.go b/pkg/kubelet/stats/cadvisor_stats_provider_test.go index 2eb9d4a25c9..942fa2e2c50 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider_test.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider_test.go @@ -230,8 +230,12 @@ func TestCadvisorListPodStats(t *testing.T) { assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix()) checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network) checkEphemeralStats(t, "Pod0", []int{seedPod0Container0, seedPod0Container1}, []int{seedEphemeralVolume1, seedEphemeralVolume2}, ps.EphemeralStorage) - checkCPUStats(t, "Pod0", seedPod0Infra, ps.CPU) - checkMemoryStats(t, "Pod0", seedPod0Infra, infos["/pod0-i"], ps.Memory) + if ps.CPU != nil { + checkCPUStats(t, "Pod0", seedPod0Infra, ps.CPU) + } + if ps.Memory != nil { + checkMemoryStats(t, "Pod0", seedPod0Infra, infos["/pod0-i"], ps.Memory) + } // Validate Pod1 Results ps, found = indexPods[prf1] From 96f30d49dc21daff649811272070c408c726c9c9 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 25 Dec 2017 20:39:10 -0600 Subject: [PATCH 517/794] Replace --init-config-dir with --config --- cmd/kubelet/app/options/options.go | 12 +++++----- cmd/kubelet/app/options/options_test.go | 2 +- cmd/kubelet/app/server.go | 14 ++++++------ cmd/kubelet/kubelet.go | 2 +- .../kubeletconfig/configfiles/configfiles.go | 22 ++++++++----------- .../configfiles/configfiles_test.go | 6 +++-- pkg/kubelet/kubeletconfig/controller.go | 11 +++++----- 7 files changed, 33 insertions(+), 36 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 6d8e13abe8c..5c432dc7361 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -101,11 +101,11 @@ type KubeletFlags struct { // To use this flag, the DynamicKubeletConfig feature gate must be enabled. DynamicConfigDir flag.StringFlag - // The Kubelet will look in this directory for an init configuration. + // The Kubelet will load its initial configuration from this file. // The path may be absolute or relative; relative paths are under the Kubelet's current working directory. // Omit this flag to use the combination of built-in default configuration values and flags. // To use this flag, the KubeletConfigFile feature gate must be enabled. - InitConfigDir flag.StringFlag + KubeletConfigFile flag.StringFlag // registerNode enables automatic registration with the apiserver. RegisterNode bool @@ -229,9 +229,9 @@ func ValidateKubeletFlags(f *KubeletFlags) error { if f.DynamicConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { return fmt.Errorf("the DynamicKubeletConfig feature gate must be enabled in order to use the --dynamic-config-dir flag") } - // ensure that nobody sets InitConfigDir if the KubeletConfigFile feature gate is turned off - if f.InitConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.KubeletConfigFile) { - return fmt.Errorf("the KubeletConfigFile feature gate must be enabled in order to use the --init-config-dir flag") + // ensure that nobody sets KubeletConfigFile if the KubeletConfigFile feature gate is turned off + if f.KubeletConfigFile.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.KubeletConfigFile) { + return fmt.Errorf("the KubeletConfigFile feature gate must be enabled in order to use the --config flag") } return nil } @@ -325,7 +325,7 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") - fs.Var(&f.InitConfigDir, "init-config-dir", "The Kubelet will look in this directory for the init configuration. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this argument to use the built-in default configuration values. Presently, you must also enable the KubeletConfigFile feature gate to pass this flag.") + fs.Var(&f.KubeletConfigFile, "config", "The Kubelet will load its initial configuration from this file. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this flag to use the built-in default configuration values. You must also enable the KubeletConfigFile feature gate to pass this flag.") fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") fs.Var(utiltaints.NewTaintsVar(&f.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") diff --git a/cmd/kubelet/app/options/options_test.go b/cmd/kubelet/app/options/options_test.go index 5a82a336335..33fd10b9d9f 100644 --- a/cmd/kubelet/app/options/options_test.go +++ b/cmd/kubelet/app/options/options_test.go @@ -38,7 +38,7 @@ func newKubeletServerOrDie() *KubeletServer { func cleanFlags(s *KubeletServer) { s.KubeConfig = utilflag.NewStringFlag(s.KubeConfig.Value()) s.DynamicConfigDir = utilflag.NewStringFlag(s.DynamicConfigDir.Value()) - s.InitConfigDir = utilflag.NewStringFlag(s.InitConfigDir.Value()) + s.KubeletConfigFile = utilflag.NewStringFlag(s.KubeletConfigFile.Value()) } // TestRoundTrip ensures that flag values from the Kubelet can be serialized diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 680894a5c1c..00156112825 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -866,16 +866,16 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) { // BootstrapKubeletConfigController constructs and bootstrap a configuration controller func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.KubeletConfiguration, - initConfigDirFlag flag.StringFlag, + kubeletConfigFileFlag flag.StringFlag, dynamicConfigDirFlag flag.StringFlag) (*kubeletconfiginternal.KubeletConfiguration, *kubeletconfig.Controller, error) { var err error // Alpha Dynamic Configuration Implementation; this section only loads config from disk, it does not contact the API server // compute absolute paths based on current working dir - initConfigDir := "" - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletConfigFile) && initConfigDirFlag.Provided() { - initConfigDir, err = filepath.Abs(initConfigDirFlag.Value()) + kubeletConfigFile := "" + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletConfigFile) && kubeletConfigFileFlag.Provided() { + kubeletConfigFile, err = filepath.Abs(kubeletConfigFileFlag.Value()) if err != nil { - return nil, nil, fmt.Errorf("failed to get absolute path for --init-config-dir") + return nil, nil, fmt.Errorf("failed to get absolute path for --config") } } dynamicConfigDir := "" @@ -886,8 +886,8 @@ func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.Kubel } } - // get the latest KubeletConfiguration checkpoint from disk, or load the init or default config if no valid checkpoints exist - kubeletConfigController, err := kubeletconfig.NewController(defaultConfig, initConfigDir, dynamicConfigDir) + // get the latest KubeletConfiguration checkpoint from disk, or load the kubelet config file or default config if no valid checkpoints exist + kubeletConfigController, err := kubeletconfig.NewController(defaultConfig, kubeletConfigFile, dynamicConfigDir) if err != nil { return nil, nil, fmt.Errorf("failed to construct controller, error: %v", err) } diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go index 33ed06f3f81..891aace197c 100644 --- a/cmd/kubelet/kubelet.go +++ b/cmd/kubelet/kubelet.go @@ -75,7 +75,7 @@ func main() { // bootstrap the kubelet config controller, app.BootstrapKubeletConfigController will check // feature gates and only turn on relevant parts of the controller kubeletConfig, kubeletConfigController, err := app.BootstrapKubeletConfigController( - defaultConfig, kubeletFlags.InitConfigDir, kubeletFlags.DynamicConfigDir) + defaultConfig, kubeletFlags.KubeletConfigFile, kubeletFlags.DynamicConfigDir) if err != nil { die(err) } diff --git a/pkg/kubelet/kubeletconfig/configfiles/configfiles.go b/pkg/kubelet/kubeletconfig/configfiles/configfiles.go index fe55e0b938f..c3b5fc9b479 100644 --- a/pkg/kubelet/kubeletconfig/configfiles/configfiles.go +++ b/pkg/kubelet/kubeletconfig/configfiles/configfiles.go @@ -27,8 +27,6 @@ import ( utilfs "k8s.io/kubernetes/pkg/util/filesystem" ) -const kubeletFile = "kubelet" - // Loader loads configuration from a storage layer type Loader interface { // Load loads and returns the KubeletConfiguration from the storage layer, or an error if a configuration could not be loaded @@ -41,12 +39,12 @@ type fsLoader struct { fs utilfs.Filesystem // kubeletCodecs is the scheme used to decode config files kubeletCodecs *serializer.CodecFactory - // configDir is the absolute path to the directory containing the configuration files - configDir string + // kubeletFile is an absolute path to the file containing a serialized KubeletConfiguration + kubeletFile string } -// NewFsLoader returns a Loader that loads a KubeletConfiguration from the files in `configDir` -func NewFsLoader(fs utilfs.Filesystem, configDir string) (Loader, error) { +// NewFsLoader returns a Loader that loads a KubeletConfiguration from the `kubeletFile` +func NewFsLoader(fs utilfs.Filesystem, kubeletFile string) (Loader, error) { _, kubeletCodecs, err := kubeletscheme.NewSchemeAndCodecs() if err != nil { return nil, err @@ -55,21 +53,19 @@ func NewFsLoader(fs utilfs.Filesystem, configDir string) (Loader, error) { return &fsLoader{ fs: fs, kubeletCodecs: kubeletCodecs, - configDir: configDir, + kubeletFile: kubeletFile, }, nil } func (loader *fsLoader) Load() (*kubeletconfig.KubeletConfiguration, error) { - // require the config be in a file called "kubelet" - path := filepath.Join(loader.configDir, kubeletFile) - data, err := loader.fs.ReadFile(path) + data, err := loader.fs.ReadFile(loader.kubeletFile) if err != nil { - return nil, fmt.Errorf("failed to read init config file %q, error: %v", path, err) + return nil, fmt.Errorf("failed to read kubelet config file %q, error: %v", loader.kubeletFile, err) } // no configuration is an error, some parameters are required if len(data) == 0 { - return nil, fmt.Errorf("init config file %q was empty, but some parameters are required", path) + return nil, fmt.Errorf("kubelet config file %q was empty", loader.kubeletFile) } kc, err := utilcodec.DecodeKubeletConfiguration(loader.kubeletCodecs, data) @@ -78,7 +74,7 @@ func (loader *fsLoader) Load() (*kubeletconfig.KubeletConfiguration, error) { } // make all paths absolute - resolveRelativePaths(kubeletconfig.KubeletConfigurationPathRefs(kc), loader.configDir) + resolveRelativePaths(kubeletconfig.KubeletConfigurationPathRefs(kc), filepath.Dir(loader.kubeletFile)) return kc, nil } diff --git a/pkg/kubelet/kubeletconfig/configfiles/configfiles_test.go b/pkg/kubelet/kubeletconfig/configfiles/configfiles_test.go index 6c92e8bc9b3..77f20373ead 100644 --- a/pkg/kubelet/kubeletconfig/configfiles/configfiles_test.go +++ b/pkg/kubelet/kubeletconfig/configfiles/configfiles_test.go @@ -32,6 +32,7 @@ import ( const configDir = "/test-config-dir" const relativePath = "relative/path/test" +const kubeletFile = "kubelet" func TestLoad(t *testing.T) { cases := []struct { @@ -136,12 +137,13 @@ podManifestPath: %s`, relativePath)), for _, c := range cases { t.Run(c.desc, func(t *testing.T) { fs := utilfs.NewFakeFs() + path := filepath.Join(configDir, kubeletFile) if c.file != nil { - if err := addFile(fs, filepath.Join(configDir, kubeletFile), *c.file); err != nil { + if err := addFile(fs, path, *c.file); err != nil { t.Fatalf("unexpected error: %v", err) } } - loader, err := NewFsLoader(fs, configDir) + loader, err := NewFsLoader(fs, path) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index 9b959b4c94b..be93e3d4db9 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -40,7 +40,6 @@ import ( const ( checkpointsDir = "checkpoints" - initConfigDir = "init" ) // Controller is the controller which, among other things: @@ -74,19 +73,19 @@ type Controller struct { } // NewController constructs a new Controller object and returns it. Directory paths must be absolute. -// If the `initConfigDir` is an empty string, skips trying to load the init config. +// If the `kubeletConfigFile` is an empty string, skips trying to load the kubelet config file. // If the `dynamicConfigDir` is an empty string, skips trying to load checkpoints or download new config, // but will still sync the ConfigOK condition if you call StartSync with a non-nil client. func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, - initConfigDir string, + kubeletConfigFile string, dynamicConfigDir string) (*Controller, error) { var err error fs := utilfs.DefaultFs{} var fileLoader configfiles.Loader - if len(initConfigDir) > 0 { - fileLoader, err = configfiles.NewFsLoader(fs, initConfigDir) + if len(kubeletConfigFile) > 0 { + fileLoader, err = configfiles.NewFsLoader(fs, kubeletConfigFile) if err != nil { return nil, err } @@ -116,7 +115,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { local, err := cc.loadLocalConfig() if err != nil { return nil, err - } // Assert: the default and init configs are both valid + } // Assert: the default and file configs are both valid // if dynamic config is disabled, we just stop here if !cc.dynamicConfig { From c690ab8cd0b7d669df3895ed2c8452fdfc80da3c Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 27 Dec 2017 13:40:33 +0800 Subject: [PATCH 518/794] Fix typo of compute.VirtualMachinesClient --- pkg/cloudprovider/providers/azure/azure.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 844651d6fc3..eab6b95f047 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -132,7 +132,7 @@ type Config struct { MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` } -// VirtualMachinesClient defines needed functions for azure network.VirtualMachinesClient +// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient type VirtualMachinesClient interface { CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) From 80d89e022158d16d40f48e3f1e827581ef1af5a6 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 27 Dec 2017 13:50:49 +0800 Subject: [PATCH 519/794] Reduce VirtualMachineScaleSetsClient#List calls --- .../providers/azure/azure_util_vmss.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 71a881c9fef..01a4d928767 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -76,15 +76,17 @@ type scaleSet struct { cacheMutex sync.Mutex // A local cache of scale sets. The key is scale set name and the value is a // list of virtual machines belonging to the scale set. - cache map[string][]scaleSetVMInfo + cache map[string][]scaleSetVMInfo + availabilitySetNodesCache sets.String } // newScaleSet creates a new scaleSet. func newScaleSet(az *Cloud) VMSet { ss := &scaleSet{ - Cloud: az, - availabilitySet: newAvailabilitySet(az), - cache: make(map[string][]scaleSetVMInfo), + Cloud: az, + availabilitySet: newAvailabilitySet(az), + availabilitySetNodesCache: sets.NewString(), + cache: make(map[string][]scaleSetVMInfo), } go wait.Until(func() { @@ -179,6 +181,11 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er return vm, nil } + // Known node not managed by scale sets. + if ss.availabilitySetNodesCache.Has(nodeName) { + return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + } + // Update cache and try again. if err = ss.updateCache(); err != nil { return scaleSetVMInfo{}, err @@ -188,6 +195,8 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er return vm, nil } + // Node still not found, assuming it is not managed by scale sets. + ss.availabilitySetNodesCache.Insert(nodeName) return scaleSetVMInfo{}, cloudprovider.InstanceNotFound } From 5d10dcd9836622db9235e3ccac1924c3947ed32c Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sun, 24 Dec 2017 19:03:53 -0500 Subject: [PATCH 520/794] Remove redundant sleep from ReRegistration unit test case --- pkg/kubelet/cm/deviceplugin/manager_test.go | 29 +++++++-------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index d7a032694c5..9f52351125d 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -24,7 +24,6 @@ import ( "reflect" "sync/atomic" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -68,36 +67,29 @@ func TestDevicePluginReRegistration(t *testing.T) { {ID: "Dev3", Health: pluginapi.Healthy}, } - callbackCount := 0 - callbackChan := make(chan int) - var stopping int32 - stopping = 0 + expCallbackCount := int32(0) + callbackCount := int32(0) + callbackChan := make(chan int32) callback := func(n string, a, u, r []pluginapi.Device) { - // Should be called three times, one for each plugin registration, till we are stopping. - if callbackCount > 2 && atomic.LoadInt32(&stopping) <= 0 { + callbackCount++ + if callbackCount > atomic.LoadInt32(&expCallbackCount) { t.FailNow() } - callbackCount++ callbackChan <- callbackCount } m, p1 := setup(t, devs, callback) + atomic.StoreInt32(&expCallbackCount, 1) p1.Register(socketName, testResourceName) // Wait for the first callback to be issued. <-callbackChan - // Wait till the endpoint is added to the manager. - for i := 0; i < 20; i++ { - if len(m.Devices()) > 0 { - break - } - time.Sleep(1) - } devices := m.Devices() require.Equal(t, 2, len(devices[testResourceName]), "Devices are not updated.") p2 := NewDevicePluginStub(devs, pluginSocketName+".new") err := p2.Start() require.NoError(t, err) + atomic.StoreInt32(&expCallbackCount, 2) p2.Register(socketName, testResourceName) // Wait for the second callback to be issued. <-callbackChan @@ -109,20 +101,17 @@ func TestDevicePluginReRegistration(t *testing.T) { p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third") err = p3.Start() require.NoError(t, err) + atomic.StoreInt32(&expCallbackCount, 3) p3.Register(socketName, testResourceName) // Wait for the second callback to be issued. <-callbackChan devices3 := m.Devices() require.Equal(t, 1, len(devices3[testResourceName]), "Devices of plugin previously registered should be removed.") - // Wait long enough to catch unexpected callbacks. - time.Sleep(5 * time.Second) - - atomic.StoreInt32(&stopping, 1) p2.Stop() p3.Stop() cleanup(t, m, p1) - + close(callbackChan) } func setup(t *testing.T, devs []*pluginapi.Device, callback monitorCallback) (Manager, *Stub) { From bb1e797b2816701ed316a48846eb27e4182220e5 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 27 Dec 2017 16:20:54 +0800 Subject: [PATCH 521/794] Remove useInstanceMetadata param from Azure cloud provider --- pkg/cloudprovider/providers/azure/azure.go | 6 - .../azure/azure_instance_metadata.go | 113 ------------------ .../providers/azure/azure_instances.go | 52 -------- .../providers/azure/azure_test.go | 71 ----------- 4 files changed, 242 deletions(-) delete mode 100644 pkg/cloudprovider/providers/azure/azure_instance_metadata.go diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 844651d6fc3..1429e4e9331 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -122,9 +122,6 @@ type Config struct { // Rate limit Bucket Size CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"` - // Use instance metadata service where possible - UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"` - // Use managed service identity for the virtual machine to access Azure ARM APIs UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` @@ -214,7 +211,6 @@ type Cloud struct { DisksClient disk.DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff - metadata *InstanceMetadata vmSet VMSet // Clients for vmss. @@ -433,8 +429,6 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.CloudProviderBackoffJitter) } - az.metadata = NewInstanceMetadata() - if az.MaximumLoadBalancerRuleCount == 0 { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount } diff --git a/pkg/cloudprovider/providers/azure/azure_instance_metadata.go b/pkg/cloudprovider/providers/azure/azure_instance_metadata.go deleted file mode 100644 index 6df99083272..00000000000 --- a/pkg/cloudprovider/providers/azure/azure_instance_metadata.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "encoding/json" - "io/ioutil" - "net/http" -) - -const metadataURL = "http://169.254.169.254/metadata/" - -// NetworkMetadata contains metadata about an instance's network -type NetworkMetadata struct { - Interface []NetworkInterface `json:"interface"` -} - -// NetworkInterface represents an instances network interface. -type NetworkInterface struct { - IPV4 NetworkData `json:"ipv4"` - IPV6 NetworkData `json:"ipv6"` - MAC string `json:"macAddress"` -} - -// NetworkData contains IP information for a network. -type NetworkData struct { - IPAddress []IPAddress `json:"ipAddress"` - Subnet []Subnet `json:"subnet"` -} - -// IPAddress represents IP address information. -type IPAddress struct { - PrivateIP string `json:"privateIPAddress"` - PublicIP string `json:"publicIPAddress"` -} - -// Subnet represents subnet information. -type Subnet struct { - Address string `json:"address"` - Prefix string `json:"prefix"` -} - -// InstanceMetadata knows how to query the Azure instance metadata server. -type InstanceMetadata struct { - baseURL string -} - -// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object. -func NewInstanceMetadata() *InstanceMetadata { - return &InstanceMetadata{ - baseURL: metadataURL, - } -} - -// makeMetadataURL makes a complete metadata URL from the given path. -func (i *InstanceMetadata) makeMetadataURL(path string) string { - return i.baseURL + path -} - -// Object queries the metadata server and populates the passed in object -func (i *InstanceMetadata) Object(path string, obj interface{}) error { - data, err := i.queryMetadataBytes(path, "json") - if err != nil { - return err - } - return json.Unmarshal(data, obj) -} - -// Text queries the metadata server and returns the corresponding text -func (i *InstanceMetadata) Text(path string) (string, error) { - data, err := i.queryMetadataBytes(path, "text") - if err != nil { - return "", err - } - return string(data), err -} - -func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) { - client := &http.Client{} - - req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil) - if err != nil { - return nil, err - } - req.Header.Add("Metadata", "True") - - q := req.URL.Query() - q.Add("format", format) - q.Add("api-version", "2017-04-02") - req.URL.RawQuery = q.Encode() - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ioutil.ReadAll(resp.Body) -} diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index 9c5976c3dfc..9f93c80750c 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -28,26 +28,6 @@ import ( // NodeAddresses returns the addresses of the specified instance. func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { - if az.UseInstanceMetadata { - ipAddress := IPAddress{} - err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress) - if err != nil { - return nil, err - } - addresses := []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP}, - {Type: v1.NodeHostName, Address: string(name)}, - } - if len(ipAddress.PublicIP) > 0 { - addr := v1.NodeAddress{ - Type: v1.NodeExternalIP, - Address: ipAddress.PublicIP, - } - addresses = append(addresses, addr) - } - return addresses, nil - } - ip, err := az.GetIPForMachineWithRetry(name) if err != nil { glog.V(2).Infof("NodeAddresses(%s) abort backoff", name) @@ -96,28 +76,9 @@ func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) { return true, nil } -func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) { - nodeName := mapNodeNameToVMName(name) - metadataName, err := az.metadata.Text("instance/compute/name") - return (metadataName == nodeName), err -} - // InstanceID returns the cloud provider ID of the specified instance. // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) func (az *Cloud) InstanceID(name types.NodeName) (string, error) { - if az.UseInstanceMetadata { - isLocalInstance, err := az.isCurrentInstance(name) - if err != nil { - return "", err - } - if isLocalInstance { - externalInstanceID, err := az.metadata.Text("instance/compute/vmId") - if err == nil { - return externalInstanceID, nil - } - } - } - return az.vmSet.GetInstanceIDByNodeName(string(name)) } @@ -138,19 +99,6 @@ func (az *Cloud) InstanceTypeByProviderID(providerID string) (string, error) { // (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet: // Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] func (az *Cloud) InstanceType(name types.NodeName) (string, error) { - if az.UseInstanceMetadata { - isLocalInstance, err := az.isCurrentInstance(name) - if err != nil { - return "", err - } - if isLocalInstance { - machineType, err := az.metadata.Text("instance/compute/vmSize") - if err == nil { - return machineType, nil - } - } - } - return az.vmSet.GetInstanceTypeByNodeName(string(name)) } diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 7960b21509e..91b1d48549f 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -17,12 +17,8 @@ limitations under the License. package azure import ( - "encoding/json" "fmt" "math" - "net/http" - "net/http/httptest" - "reflect" "strings" "testing" @@ -1686,73 +1682,6 @@ func TestGetNodeNameByProviderID(t *testing.T) { } } -func TestMetadataURLGeneration(t *testing.T) { - metadata := NewInstanceMetadata() - fullPath := metadata.makeMetadataURL("some/path") - if fullPath != "http://169.254.169.254/metadata/some/path" { - t.Errorf("Expected http://169.254.169.254/metadata/some/path saw %s", fullPath) - } -} - -func TestMetadataParsing(t *testing.T) { - data := ` -{ - "interface": [ - { - "ipv4": { - "ipAddress": [ - { - "privateIpAddress": "10.0.1.4", - "publicIpAddress": "X.X.X.X" - } - ], - "subnet": [ - { - "address": "10.0.1.0", - "prefix": "24" - } - ] - }, - "ipv6": { - "ipAddress": [ - - ] - }, - "macAddress": "002248020E1E" - } - ] -} -` - - network := NetworkMetadata{} - if err := json.Unmarshal([]byte(data), &network); err != nil { - t.Errorf("Unexpected error: %v", err) - } - - ip := network.Interface[0].IPV4.IPAddress[0].PrivateIP - if ip != "10.0.1.4" { - t.Errorf("Unexpected value: %s, expected 10.0.1.4", ip) - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, data) - })) - defer server.Close() - - metadata := &InstanceMetadata{ - baseURL: server.URL, - } - - networkJSON := NetworkMetadata{} - if err := metadata.Object("/some/path", &networkJSON); err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if !reflect.DeepEqual(network, networkJSON) { - t.Errorf("Unexpected inequality:\n%#v\nvs\n%#v", network, networkJSON) - } -} - func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) { if svc.Annotations[ServiceAnnotationLoadBalancerInternal] != "true" { t.Error("Subnet added to non-internal service") From 5614b7fc610ece1aae9a4da887f8f5cee60141b9 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 27 Dec 2017 16:32:32 +0800 Subject: [PATCH 522/794] update bazel build files --- pkg/cloudprovider/providers/azure/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index fa4d39f9f64..811cef603ea 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -15,7 +15,6 @@ go_library( "azure_controllerCommon.go", "azure_fakes.go", "azure_file.go", - "azure_instance_metadata.go", "azure_instances.go", "azure_loadbalancer.go", "azure_managedDiskController.go", From 075e8dce0002559f9db771896153645b64e70598 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Wed, 27 Dec 2017 16:06:04 +0800 Subject: [PATCH 523/794] fix local up cluster startup flag bug --- plugin/cmd/kube-scheduler/BUILD | 1 + plugin/cmd/kube-scheduler/app/BUILD | 1 - plugin/cmd/kube-scheduler/app/server.go | 5 +---- plugin/cmd/kube-scheduler/scheduler.go | 2 ++ 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plugin/cmd/kube-scheduler/BUILD b/plugin/cmd/kube-scheduler/BUILD index 74f35ec92fa..905919efa49 100644 --- a/plugin/cmd/kube-scheduler/BUILD +++ b/plugin/cmd/kube-scheduler/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", "//plugin/cmd/kube-scheduler/app:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], ) diff --git a/plugin/cmd/kube-scheduler/app/BUILD b/plugin/cmd/kube-scheduler/app/BUILD index 5584eebff35..9de3152c18c 100644 --- a/plugin/cmd/kube-scheduler/app/BUILD +++ b/plugin/cmd/kube-scheduler/app/BUILD @@ -38,7 +38,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index e8d80644627..a304257248a 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -68,11 +68,9 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/factory" "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/spf13/pflag" - utilflag "k8s.io/apiserver/pkg/util/flag" - - "github.com/prometheus/client_golang/prometheus" ) // SchedulerServer has all the context and params needed to run a Scheduler @@ -344,7 +342,6 @@ through the API as necessary.`, } opts.AddFlags(pflag.CommandLine) - utilflag.InitFlags() cmd.MarkFlagFilename("config", "yaml", "yml", "json") diff --git a/plugin/cmd/kube-scheduler/scheduler.go b/plugin/cmd/kube-scheduler/scheduler.go index f9e93db9c8c..047ef86cffa 100644 --- a/plugin/cmd/kube-scheduler/scheduler.go +++ b/plugin/cmd/kube-scheduler/scheduler.go @@ -19,6 +19,7 @@ package main import ( "os" + utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration @@ -28,6 +29,7 @@ import ( func main() { command := app.NewSchedulerCommand() + utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() From afb23afdb9f60593ae88dd7c172cbc0273e52676 Mon Sep 17 00:00:00 2001 From: Krishnakumar R <29471693+kkmsft@users.noreply.github.com> Date: Wed, 27 Dec 2017 11:24:06 +0000 Subject: [PATCH 524/794] Add 'ProviderID' to the output of kubectl describe node.... --- pkg/printers/internalversion/describe.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index e231df79692..85334b0e8d5 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -2635,6 +2635,9 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events if len(node.Spec.ExternalID) > 0 { w.Write(LEVEL_0, "ExternalID:\t%s\n", node.Spec.ExternalID) } + if len(node.Spec.ProviderID) > 0 { + w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID) + } if canViewPods && nodeNonTerminatedPodsList != nil { if err := describeNodeResource(nodeNonTerminatedPodsList, node, w); err != nil { return err From d6ed5d3107a35507d129916a28d0491eb752ff52 Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Wed, 27 Dec 2017 11:27:43 -0800 Subject: [PATCH 525/794] Use GA API for managing addresses --- .../providers/gce/gce_address_manager.go | 14 ++++++------ .../providers/gce/gce_address_manager_test.go | 22 +++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_address_manager.go b/pkg/cloudprovider/providers/gce/gce_address_manager.go index 004148f249b..ad7c38b89d2 100644 --- a/pkg/cloudprovider/providers/gce/gce_address_manager.go +++ b/pkg/cloudprovider/providers/gce/gce_address_manager.go @@ -20,7 +20,7 @@ import ( "fmt" "net/http" - computebeta "google.golang.org/api/compute/v0.beta" + compute "google.golang.org/api/compute/v1" "github.com/golang/glog" ) @@ -63,7 +63,7 @@ func (am *addressManager) HoldAddress() (string, error) { // calls since it indicates whether a Delete is necessary before Reserve. glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) // Get the address in case it was orphaned earlier - addr, err := am.svc.GetBetaRegionAddress(am.name, am.region) + addr, err := am.svc.GetRegionAddress(am.name, am.region) if err != nil && !isNotFound(err) { return "", err } @@ -118,7 +118,7 @@ func (am *addressManager) ReleaseAddress() error { func (am *addressManager) ensureAddressReservation() (string, error) { // Try reserving the IP with controller-owned address name // If am.targetIP is an empty string, a new IP will be created. - newAddr := &computebeta.Address{ + newAddr := &compute.Address{ Name: am.name, Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, am.serviceName), Address: am.targetIP, @@ -126,7 +126,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { Subnetwork: am.subnetURL, } - reserveErr := am.svc.ReserveBetaRegionAddress(newAddr, am.region) + reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region) if reserveErr == nil { if newAddr.Address != "" { glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) @@ -155,7 +155,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { // Reserving the address failed due to a conflict or bad request. The address manager just checked that no address // exists with the name, so it may belong to the user. - addr, err := am.svc.GetBetaRegionAddressByIP(am.region, am.targetIP) + addr, err := am.svc.GetRegionAddressByIP(am.region, am.targetIP) if err != nil { return "", fmt.Errorf("failed to get address by IP %q after reservation attempt, err: %q, reservation err: %q", am.targetIP, err, reserveErr) } @@ -178,7 +178,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { return addr.Address, nil } -func (am *addressManager) validateAddress(addr *computebeta.Address) error { +func (am *addressManager) validateAddress(addr *compute.Address) error { if am.targetIP != "" && am.targetIP != addr.Address { return fmt.Errorf("address %q does not have the expected IP %q, actual: %q", addr.Name, am.targetIP, addr.Address) } @@ -189,7 +189,7 @@ func (am *addressManager) validateAddress(addr *computebeta.Address) error { return nil } -func (am *addressManager) isManagedAddress(addr *computebeta.Address) bool { +func (am *addressManager) isManagedAddress(addr *compute.Address) bool { return addr.Name == am.name } diff --git a/pkg/cloudprovider/providers/gce/gce_address_manager_test.go b/pkg/cloudprovider/providers/gce/gce_address_manager_test.go index 6a83171074d..1eee47cae93 100644 --- a/pkg/cloudprovider/providers/gce/gce_address_manager_test.go +++ b/pkg/cloudprovider/providers/gce/gce_address_manager_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - computebeta "google.golang.org/api/compute/v0.beta" + compute "google.golang.org/api/compute/v1" ) const testSvcName = "my-service" @@ -55,8 +55,8 @@ func TestAddressManagerOrphaned(t *testing.T) { svc := NewFakeCloudAddressService() targetIP := "1.1.1.1" - addr := &computebeta.Address{Name: testLBName, Address: targetIP, AddressType: string(schemeInternal)} - err := svc.ReserveBetaRegionAddress(addr, testRegion) + addr := &compute.Address{Name: testLBName, Address: targetIP, AddressType: string(schemeInternal)} + err := svc.ReserveRegionAddress(addr, testRegion) require.NoError(t, err) mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal) @@ -71,8 +71,8 @@ func TestAddressManagerOutdatedOrphan(t *testing.T) { previousAddress := "1.1.0.0" targetIP := "1.1.1.1" - addr := &computebeta.Address{Name: testLBName, Address: previousAddress, AddressType: string(schemeExternal)} - err := svc.ReserveBetaRegionAddress(addr, testRegion) + addr := &compute.Address{Name: testLBName, Address: previousAddress, AddressType: string(schemeExternal)} + err := svc.ReserveRegionAddress(addr, testRegion) require.NoError(t, err) mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal) @@ -86,8 +86,8 @@ func TestAddressManagerExternallyOwned(t *testing.T) { svc := NewFakeCloudAddressService() targetIP := "1.1.1.1" - addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeInternal)} - err := svc.ReserveBetaRegionAddress(addr, testRegion) + addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeInternal)} + err := svc.ReserveRegionAddress(addr, testRegion) require.NoError(t, err) mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal) @@ -107,8 +107,8 @@ func TestAddressManagerBadExternallyOwned(t *testing.T) { svc := NewFakeCloudAddressService() targetIP := "1.1.1.1" - addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeExternal)} - err := svc.ReserveBetaRegionAddress(addr, testRegion) + addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeExternal)} + err := svc.ReserveRegionAddress(addr, testRegion) require.NoError(t, err) mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal) @@ -121,7 +121,7 @@ func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, require.NoError(t, err) assert.NotEmpty(t, ipToUse) - addr, err := svc.GetBetaRegionAddress(name, region) + addr, err := svc.GetRegionAddress(name, region) require.NoError(t, err) if targetIP != "" { assert.EqualValues(t, targetIP, addr.Address) @@ -132,6 +132,6 @@ func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, func testReleaseAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region string) { err := mgr.ReleaseAddress() require.NoError(t, err) - _, err = svc.GetBetaRegionAddress(name, region) + _, err = svc.GetRegionAddress(name, region) assert.True(t, isNotFound(err)) } From 3087a10b7b3e7d46a9480b9d166757e1a4dd3d1c Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Sat, 23 Dec 2017 20:06:34 -0800 Subject: [PATCH 526/794] Updated local-volume boostrapper/provisioner e2e test for new config format The local-volume bootstrapper/provisioner configuration format changed in https://github.com/kubernetes-incubator/external-storage/pull/352 This format is exposed in v2.0.0 of the provisioner and boostratpper images. This PR updates the e2e test config, so that the existing tests continue to work. --- test/e2e/storage/BUILD | 1 + test/e2e/storage/persistent_volumes-local.go | 30 ++++++++++++++------ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index f7c76328535..1aa80260956 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -40,6 +40,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 4c09ed26099..70c4fcbb1cb 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -17,7 +17,6 @@ limitations under the License. package storage import ( - "encoding/json" "fmt" "math/rand" "path" @@ -26,6 +25,7 @@ import ( "strings" "time" + "github.com/ghodss/yaml" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -118,8 +118,8 @@ const ( // volumeConfigName is the configmap passed to bootstrapper and provisioner volumeConfigName = "local-volume-config" // bootstrapper and provisioner images used for e2e tests - bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v1.0.1" - provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v1.0.1" + bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v2.0.0" + provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.0.0" // provisioner daemonSetName name, must match the one defined in bootstrapper daemonSetName = "local-volume-provisioner" // provisioner node/pv cluster role binding, must match the one defined in bootstrapper @@ -959,12 +959,24 @@ func deleteClusterRoleBinding(config *localTestConfig) { } func createVolumeConfigMap(config *localTestConfig) { - mountConfig := struct { - HostDir string `json:"hostDir"` - }{ - HostDir: path.Join(hostBase, discoveryDir), + // MountConfig and ProvisionerConfiguration from + // https://github.com/kubernetes-incubator/external-storage/blob/master/local-volume/provisioner/pkg/common/common.go + type MountConfig struct { + // The hostpath directory + HostDir string `json:"hostDir" yaml:"hostDir"` } - data, err := json.Marshal(&mountConfig) + type ProvisionerConfiguration struct { + // StorageClassConfig defines configuration of Provisioner's storage classes + StorageClassConfig map[string]MountConfig `json:"storageClassMap" yaml:"storageClassMap"` + } + var provisionerConfig ProvisionerConfiguration + provisionerConfig.StorageClassConfig = map[string]MountConfig{ + config.scName: { + HostDir: path.Join(hostBase, discoveryDir), + }, + } + + data, err := yaml.Marshal(&provisionerConfig.StorageClassConfig) Expect(err).NotTo(HaveOccurred()) configMap := v1.ConfigMap{ @@ -977,7 +989,7 @@ func createVolumeConfigMap(config *localTestConfig) { Namespace: config.ns, }, Data: map[string]string{ - config.scName: string(data), + "storageClassMap": string(data), }, } _, err = config.client.CoreV1().ConfigMaps(config.ns).Create(&configMap) From c2c845f5224c5e661ef483a92c7aeaf3aff826df Mon Sep 17 00:00:00 2001 From: Jake Sanders <1200829+dekkagaijin@users.noreply.github.com> Date: Wed, 27 Dec 2017 15:47:09 -0800 Subject: [PATCH 527/794] Use the regionless mirror alias --- cluster/gce/util.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 34d710ce7fe..a0d7d8c66d8 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -243,12 +243,12 @@ function set-preferred-region() { else KUBE_ADDON_REGISTRY="gcr.io/google_containers" fi - - if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then - DOCKER_REGISTRY_MIRROR_URL="https://${preferred}-mirror.gcr.io" - fi } +if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then + DOCKER_REGISTRY_MIRROR_URL="https://mirror.gcr.io" +fi + # Take the local tar files and upload them to Google Storage. They will then be # downloaded by the master as part of the start up script for the master. # From bfa462a8c013d8bfd6a86ea004315ef7fc9db168 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 28 Dec 2017 14:44:36 +0800 Subject: [PATCH 528/794] optimize volumeResizeMap lock --- .../volume/expand/cache/volume_resize_map.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/pkg/controller/volume/expand/cache/volume_resize_map.go b/pkg/controller/volume/expand/cache/volume_resize_map.go index 2645a294c23..c35af900f02 100644 --- a/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -52,7 +52,7 @@ type volumeResizeMap struct { // kube client for making API calls kubeClient clientset.Interface // for guarding access to pvcrs map - sync.RWMutex + sync.Mutex } // PVCWithResizeRequest struct defines data structure that stores state needed for @@ -103,9 +103,6 @@ func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv return } - resizeMap.Lock() - defer resizeMap.Unlock() - pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcStatusSize := pvc.Status.Capacity[v1.ResourceStorage] @@ -121,6 +118,9 @@ func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv ExpectedSize: pvcSize, PersistentVolume: pv, } + + resizeMap.Lock() + defer resizeMap.Unlock() resizeMap.pvcrs[types.UniquePVCName(pvc.UID)] = pvcRequest } @@ -141,18 +141,15 @@ func (resizeMap *volumeResizeMap) GetPVCsWithResizeRequest() []*PVCWithResizeReq // DeletePVC removes given pvc object from list of pvcs that needs resizing. // deleting a pvc in this map doesn't affect operations that are already inflight. func (resizeMap *volumeResizeMap) DeletePVC(pvc *v1.PersistentVolumeClaim) { - resizeMap.Lock() - defer resizeMap.Unlock() pvcUniqueName := types.UniquePVCName(pvc.UID) glog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName) + resizeMap.Lock() + defer resizeMap.Unlock() delete(resizeMap.pvcrs, pvcUniqueName) } // MarkAsResized marks a pvc as fully resized func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newSize resource.Quantity) error { - resizeMap.Lock() - defer resizeMap.Unlock() - emptyCondition := []v1.PersistentVolumeClaimCondition{} err := resizeMap.updatePVCCapacityAndConditions(pvcr, newSize, emptyCondition) @@ -165,9 +162,6 @@ func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newS // UpdatePVSize updates just pv size after cloudprovider resizing is successful func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSize resource.Quantity) error { - resizeMap.Lock() - defer resizeMap.Unlock() - oldPv := pvcr.PersistentVolume pvClone := oldPv.DeepCopy() @@ -201,7 +195,6 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi } func (resizeMap *volumeResizeMap) updatePVCCapacityAndConditions(pvcr *PVCWithResizeRequest, newSize resource.Quantity, pvcConditions []v1.PersistentVolumeClaimCondition) error { - claimClone := pvcr.PVC.DeepCopy() claimClone.Status.Capacity[v1.ResourceStorage] = newSize From 6dd9cc6dbfe7a92a26597e0b3ba736591f6a7eff Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 29 Dec 2017 10:50:57 +0800 Subject: [PATCH 529/794] Fix vmss listing for Azure cloud provider --- .../providers/azure/azure_util_vmss.go | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 01a4d928767..1e03f70187d 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -164,20 +164,20 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er ss.cacheMutex.Lock() defer ss.cacheMutex.Unlock() - getVMFromCache := func(nodeName string) (scaleSetVMInfo, error) { + getVMFromCache := func(nodeName string) (scaleSetVMInfo, bool) { for scaleSetName := range ss.cache { for _, vm := range ss.cache[scaleSetName] { if vm.NodeName == nodeName { - return vm, nil + return vm, true } } } - return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + return scaleSetVMInfo{}, false } - vm, err := getVMFromCache(nodeName) - if err == nil { + vm, found := getVMFromCache(nodeName) + if found { return vm, nil } @@ -187,11 +187,11 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er } // Update cache and try again. - if err = ss.updateCache(); err != nil { + if err := ss.updateCache(); err != nil { return scaleSetVMInfo{}, err } - vm, err = getVMFromCache(nodeName) - if err == nil { + vm, found = getVMFromCache(nodeName) + if found { return vm, nil } @@ -204,35 +204,35 @@ func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID ss.cacheMutex.Lock() defer ss.cacheMutex.Unlock() - getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, error) { + getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, bool) { vms, ok := ss.cache[scaleSetName] if !ok { glog.V(4).Infof("scale set (%s) not found", scaleSetName) - return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + return scaleSetVMInfo{}, false } for _, vm := range vms { if vm.InstanceID == instanceID { glog.V(4).Infof("getCachedVirtualMachineByInstanceID gets vm (%s) by instanceID (%s) within scale set (%s)", vm.NodeName, instanceID, scaleSetName) - return vm, nil + return vm, true } } glog.V(4).Infof("instanceID (%s) not found in scale set (%s)", instanceID, scaleSetName) - return scaleSetVMInfo{}, cloudprovider.InstanceNotFound + return scaleSetVMInfo{}, false } - vm, err := getVMByID(scaleSetName, instanceID) - if err == nil { + vm, found := getVMByID(scaleSetName, instanceID) + if found { return vm, nil } // Update cache and try again. - if err = ss.updateCache(); err != nil { + if err := ss.updateCache(); err != nil { return scaleSetVMInfo{}, err } - vm, err = getVMByID(scaleSetName, instanceID) - if err == nil { + vm, found = getVMByID(scaleSetName, instanceID) + if found { return vm, nil } @@ -407,7 +407,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { return nil, backoffError } - appendResults := (result.Value != nil && len(*result.Value) > 1) + appendResults := (result.Value != nil && len(*result.Value) > 0) for appendResults { for _, scaleSet := range *result.Value { allScaleSets = append(allScaleSets, *scaleSet.Name) @@ -431,7 +431,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { return nil, backoffError } - appendResults = (result.Value != nil && len(*result.Value) > 1) + appendResults = (result.Value != nil && len(*result.Value) > 0) } } @@ -461,7 +461,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir return nil, backoffError } - appendResults := (result.Value != nil && len(*result.Value) > 1) + appendResults := (result.Value != nil && len(*result.Value) > 0) for appendResults { allVMs = append(allVMs, *result.Value...) appendResults = false @@ -483,7 +483,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir return nil, backoffError } - appendResults = (result.Value != nil && len(*result.Value) > 1) + appendResults = (result.Value != nil && len(*result.Value) > 0) } } From b842f008fcbd20be5c37670c38eef905db2d48b6 Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Thu, 28 Dec 2017 21:54:58 +0800 Subject: [PATCH 530/794] Modify ipvs real server equal --- pkg/util/ipvs/testing/fake.go | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/pkg/util/ipvs/testing/fake.go b/pkg/util/ipvs/testing/fake.go index b33f091213d..6e015a20ee6 100644 --- a/pkg/util/ipvs/testing/fake.go +++ b/pkg/util/ipvs/testing/fake.go @@ -18,6 +18,8 @@ package testing import ( "fmt" + "net" + "strconv" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" ) @@ -39,6 +41,15 @@ func (s *serviceKey) String() string { return fmt.Sprintf("%s:%d/%s", s.IP, s.Port, s.Protocol) } +type realServerKey struct { + Address net.IP + Port uint16 +} + +func (r *realServerKey) String() string { + return net.JoinHostPort(r.Address.String(), strconv.Itoa(int(r.Port))) +} + //NewFake creates a fake ipvs implementation - a cache store. func NewFake() *FakeIPVS { return &FakeIPVS{ @@ -55,6 +66,13 @@ func toServiceKey(serv *utilipvs.VirtualServer) serviceKey { } } +func toRealServerKey(rs *utilipvs.RealServer) *realServerKey { + return &realServerKey{ + Address: rs.Address, + Port: rs.Port, + } +} + //AddVirtualServer is a fake implementation, it simply adds the VirtualServer into the cache store. func (f *FakeIPVS) AddVirtualServer(serv *utilipvs.VirtualServer) error { if serv == nil { @@ -159,18 +177,19 @@ func (f *FakeIPVS) DeleteRealServer(serv *utilipvs.VirtualServer, dest *utilipvs return fmt.Errorf("Failed to delete destination for service %v, service not found", key.String()) } dests := f.Destinations[key] - var i int - for i = range dests { - if dests[i].Equal(dest) { + exist := false + for i := range dests { + if toRealServerKey(dests[i]).String() == toRealServerKey(dest).String() { + // Delete one element + f.Destinations[key] = append(f.Destinations[key][:i], f.Destinations[key][i+1:]...) + exist = true break } } // Not Found - if i >= len(f.Destinations[key]) { + if !exist { return fmt.Errorf("Failed to delete real server for service %v, real server not found", key.String()) } - // Delete one element - f.Destinations[key] = append(f.Destinations[key][:i], f.Destinations[key][i+1:]...) return nil } From e852ad38f3a885ed87bf25028b336daf845efd06 Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Thu, 28 Dec 2017 21:55:15 +0800 Subject: [PATCH 531/794] add fake.DeleteRealServer UT --- pkg/util/ipvs/testing/fake_test.go | 79 +++++++++++++++++------------- 1 file changed, 46 insertions(+), 33 deletions(-) diff --git a/pkg/util/ipvs/testing/fake_test.go b/pkg/util/ipvs/testing/fake_test.go index 2682bf25825..a38d40840e0 100644 --- a/pkg/util/ipvs/testing/fake_test.go +++ b/pkg/util/ipvs/testing/fake_test.go @@ -113,46 +113,59 @@ func TestRealServer(t *testing.T) { Port: uint16(80), Protocol: string("TCP"), } + rss := []*utilipvs.RealServer{ + {net.ParseIP("172.16.2.1"), 8080, 1}, + {net.ParseIP("172.16.2.2"), 8080, 2}, + {net.ParseIP("172.16.2.3"), 8080, 3}, + } err := fake.AddVirtualServer(vs) if err != nil { t.Errorf("Fail to add virutal server, error: %v", err) } - // Add a real server to the virtual server - rs1 := &utilipvs.RealServer{ - Address: net.ParseIP("172.16.2.1"), + // Add real server to the virtual server + for i := range rss { + if err = fake.AddRealServer(vs, rss[i]); err != nil { + t.Errorf("Fail to add real server, error: %v", err) + } + } + // Delete a real server of the virtual server + // Make sure any position of the list can be real deleted + rssLen := len(rss) + for i := range rss { + // List all real servers of the virtual server + list, err := fake.GetRealServers(vs) + if err != nil { + t.Errorf("Fail to get real servers of the virtual server, error: %v", err) + } + if len(list) != rssLen { + t.Errorf("Expect %d virutal servers, got: %d", len(rss), len(list)) + } + rsToDel := list[i] + if err = fake.DeleteRealServer(vs, rsToDel); err != nil { + t.Errorf("Fail to delete real server of the virtual server, error: %v", err) + } else { + dests, err := fake.GetRealServers(vs) + if err != nil { + t.Errorf("Fail to get real servers of the virtual server, error: %v", err) + } + for _, dest := range dests { + if toRealServerKey(dest).String() == toRealServerKey(rsToDel).String() { + t.Errorf("Expect real server %q be deleted.", rsToDel.String()) + } + } + if err = fake.AddRealServer(vs, rsToDel); err != nil { + t.Errorf("Fail to add real server, error: %v", err) + } + } + } + // Test delete real server that not exist + rs := &utilipvs.RealServer{ + Address: net.ParseIP("172.16.2.4"), Port: uint16(8080), Weight: 1, } - err = fake.AddRealServer(vs, rs1) - if err != nil { - t.Errorf("Fail to add real server, error: %v", err) - } - // Add another real server to the virtual server - rs2 := &utilipvs.RealServer{ - Address: net.ParseIP("172.16.3.2"), - Port: uint16(8080), - Weight: 2, - } - err = fake.AddRealServer(vs, rs2) - if err != nil { - t.Errorf("Fail to add real server, error: %v", err) - } - // List all real servers of the virtual server - list, err := fake.GetRealServers(vs) - if err != nil { - t.Errorf("Fail to get real servers of the virtual server, error: %v", err) - } - if len(list) != 2 { - t.Errorf("Expect 2 virutal servers, got: %d", len(list)) - } - // Delete a real server of the virtual server - err = fake.DeleteRealServer(vs, rs2) - list, err = fake.GetRealServers(vs) - if err != nil { - t.Errorf("Fail to get real servers of the virtual server, error: %v", err) - } - if len(list) != 1 { - t.Errorf("Expect 1 real server, got: %d", len(list)) + if err = fake.DeleteRealServer(vs, rs); err == nil { + t.Errorf("Delete real server that not exist, Expect error, got nil") } // Delete the virtual server err = fake.DeleteVirtualServer(vs) From e2918f08011d79a027693b7d8c16bc26eb1ce4b5 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 29 Dec 2017 14:36:25 +0800 Subject: [PATCH 532/794] fix ipvs virutal server update --- pkg/proxy/ipvs/proxier.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 9f0def1e1de..2ae7ad45b05 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1589,7 +1589,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, // IPVS service was changed, update the existing one // During updates, service VIP will not go down glog.V(3).Infof("IPVS service %s was changed", svcName) - if err := proxier.ipvs.UpdateVirtualServer(appliedVirtualServer); err != nil { + if err := proxier.ipvs.UpdateVirtualServer(vs); err != nil { glog.Errorf("Failed to update IPVS service, err:%v", err) return err } From 5bd0d98d355df983090f888f5c4a81774924b0cb Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Fri, 29 Dec 2017 13:42:49 +0100 Subject: [PATCH 533/794] Update CHANGELOG-1.7.md for v1.7.12. --- CHANGELOG-1.7.md | 208 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 143 insertions(+), 65 deletions(-) diff --git a/CHANGELOG-1.7.md b/CHANGELOG-1.7.md index 68f2541844b..dce11c3515d 100644 --- a/CHANGELOG-1.7.md +++ b/CHANGELOG-1.7.md @@ -1,86 +1,93 @@ -- [v1.7.11](#v1711) - - [Downloads for v1.7.11](#downloads-for-v1711) +- [v1.7.12](#v1712) + - [Downloads for v1.7.12](#downloads-for-v1712) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.7.10](#changelog-since-v1710) + - [Changelog since v1.7.11](#changelog-since-v1711) - [Other notable changes](#other-notable-changes) -- [v1.7.10](#v1710) - - [Downloads for v1.7.10](#downloads-for-v1710) +- [v1.7.11](#v1711) + - [Downloads for v1.7.11](#downloads-for-v1711) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.7.9](#changelog-since-v179) + - [Changelog since v1.7.10](#changelog-since-v1710) - [Other notable changes](#other-notable-changes-1) -- [v1.7.9](#v179) - - [Downloads for v1.7.9](#downloads-for-v179) +- [v1.7.10](#v1710) + - [Downloads for v1.7.10](#downloads-for-v1710) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.7.8](#changelog-since-v178) + - [Changelog since v1.7.9](#changelog-since-v179) - [Other notable changes](#other-notable-changes-2) -- [v1.7.8](#v178) - - [Downloads for v1.7.8](#downloads-for-v178) +- [v1.7.9](#v179) + - [Downloads for v1.7.9](#downloads-for-v179) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.7.7](#changelog-since-v177) + - [Changelog since v1.7.8](#changelog-since-v178) - [Other notable changes](#other-notable-changes-3) -- [v1.7.7](#v177) - - [Downloads for v1.7.7](#downloads-for-v177) +- [v1.7.8](#v178) + - [Downloads for v1.7.8](#downloads-for-v178) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.7.6](#changelog-since-v176) + - [Changelog since v1.7.7](#changelog-since-v177) - [Other notable changes](#other-notable-changes-4) -- [v1.7.6](#v176) - - [Downloads for v1.7.6](#downloads-for-v176) +- [v1.7.7](#v177) + - [Downloads for v1.7.7](#downloads-for-v177) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.7.5](#changelog-since-v175) + - [Changelog since v1.7.6](#changelog-since-v176) - [Other notable changes](#other-notable-changes-5) -- [v1.7.5](#v175) - - [Downloads for v1.7.5](#downloads-for-v175) +- [v1.7.6](#v176) + - [Downloads for v1.7.6](#downloads-for-v176) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.7.4](#changelog-since-v174) + - [Changelog since v1.7.5](#changelog-since-v175) - [Other notable changes](#other-notable-changes-6) -- [v1.7.4](#v174) - - [Downloads for v1.7.4](#downloads-for-v174) +- [v1.7.5](#v175) + - [Downloads for v1.7.5](#downloads-for-v175) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.7.3](#changelog-since-v173) + - [Changelog since v1.7.4](#changelog-since-v174) - [Other notable changes](#other-notable-changes-7) -- [v1.7.3](#v173) - - [Downloads for v1.7.3](#downloads-for-v173) +- [v1.7.4](#v174) + - [Downloads for v1.7.4](#downloads-for-v174) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.7.2](#changelog-since-v172) + - [Changelog since v1.7.3](#changelog-since-v173) - [Other notable changes](#other-notable-changes-8) -- [v1.7.2](#v172) - - [Downloads for v1.7.2](#downloads-for-v172) +- [v1.7.3](#v173) + - [Downloads for v1.7.3](#downloads-for-v173) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.7.1](#changelog-since-v171) + - [Changelog since v1.7.2](#changelog-since-v172) - [Other notable changes](#other-notable-changes-9) -- [v1.7.1](#v171) - - [Downloads for v1.7.1](#downloads-for-v171) +- [v1.7.2](#v172) + - [Downloads for v1.7.2](#downloads-for-v172) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.7.0](#changelog-since-v170) + - [Changelog since v1.7.1](#changelog-since-v171) - [Other notable changes](#other-notable-changes-10) -- [v1.7.0](#v170) - - [Downloads for v1.7.0](#downloads-for-v170) +- [v1.7.1](#v171) + - [Downloads for v1.7.1](#downloads-for-v171) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) + - [Changelog since v1.7.0](#changelog-since-v170) + - [Other notable changes](#other-notable-changes-11) +- [v1.7.0](#v170) + - [Downloads for v1.7.0](#downloads-for-v170) + - [Client Binaries](#client-binaries-12) + - [Server Binaries](#server-binaries-12) + - [Node Binaries](#node-binaries-12) - [**Major Themes**](#major-themes) - [**Action Required Before Upgrading**](#action-required-before-upgrading) - [Network](#network) @@ -136,7 +143,7 @@ - [Local Storage](#local-storage) - [Volume Plugins](#volume-plugins) - [Metrics](#metrics) - - [**Other notable changes**](#other-notable-changes-11) + - [**Other notable changes**](#other-notable-changes-12) - [Admission plugin](#admission-plugin) - [API Machinery](#api-machinery-1) - [Application autoscaling](#application-autoscaling-1) @@ -164,62 +171,133 @@ - [Previous Releases Included in v1.7.0](#previous-releases-included-in-v170) - [v1.7.0-rc.1](#v170-rc1) - [Downloads for v1.7.0-rc.1](#downloads-for-v170-rc1) - - [Client Binaries](#client-binaries-12) - - [Server Binaries](#server-binaries-12) - - [Node Binaries](#node-binaries-12) - - [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2) - - [Action Required](#action-required) - - [Other notable changes](#other-notable-changes-12) -- [v1.7.0-beta.2](#v170-beta2) - - [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - [Node Binaries](#node-binaries-13) - - [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1) - - [Action Required](#action-required-1) + - [Changelog since v1.7.0-beta.2](#changelog-since-v170-beta2) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-13) -- [v1.7.0-beta.1](#v170-beta1) - - [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1) +- [v1.7.0-beta.2](#v170-beta2) + - [Downloads for v1.7.0-beta.2](#downloads-for-v170-beta2) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - [Node Binaries](#node-binaries-14) - - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4) - - [Action Required](#action-required-2) + - [Changelog since v1.7.0-beta.1](#changelog-since-v170-beta1) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-14) -- [v1.7.0-alpha.4](#v170-alpha4) - - [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4) +- [v1.7.0-beta.1](#v170-beta1) + - [Downloads for v1.7.0-beta.1](#downloads-for-v170-beta1) - [Client Binaries](#client-binaries-15) - [Server Binaries](#server-binaries-15) - [Node Binaries](#node-binaries-15) - - [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3) - - [Action Required](#action-required-3) + - [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-15) -- [v1.7.0-alpha.3](#v170-alpha3) - - [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3) +- [v1.7.0-alpha.4](#v170-alpha4) + - [Downloads for v1.7.0-alpha.4](#downloads-for-v170-alpha4) - [Client Binaries](#client-binaries-16) - [Server Binaries](#server-binaries-16) - [Node Binaries](#node-binaries-16) - - [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2) - - [Action Required](#action-required-4) + - [Changelog since v1.7.0-alpha.3](#changelog-since-v170-alpha3) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-16) -- [v1.7.0-alpha.2](#v170-alpha2) - - [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2) +- [v1.7.0-alpha.3](#v170-alpha3) + - [Downloads for v1.7.0-alpha.3](#downloads-for-v170-alpha3) - [Client Binaries](#client-binaries-17) - [Server Binaries](#server-binaries-17) - - [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1) - - [Action Required](#action-required-5) + - [Node Binaries](#node-binaries-17) + - [Changelog since v1.7.0-alpha.2](#changelog-since-v170-alpha2) + - [Action Required](#action-required-4) - [Other notable changes](#other-notable-changes-17) -- [v1.7.0-alpha.1](#v170-alpha1) - - [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1) +- [v1.7.0-alpha.2](#v170-alpha2) + - [Downloads for v1.7.0-alpha.2](#downloads-for-v170-alpha2) - [Client Binaries](#client-binaries-18) - [Server Binaries](#server-binaries-18) - - [Changelog since v1.6.0](#changelog-since-v160) + - [Changelog since v1.7.0-alpha.1](#changelog-since-v170-alpha1) + - [Action Required](#action-required-5) - [Other notable changes](#other-notable-changes-18) +- [v1.7.0-alpha.1](#v170-alpha1) + - [Downloads for v1.7.0-alpha.1](#downloads-for-v170-alpha1) + - [Client Binaries](#client-binaries-19) + - [Server Binaries](#server-binaries-19) + - [Changelog since v1.6.0](#changelog-since-v160) + - [Other notable changes](#other-notable-changes-19) +# v1.7.12 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) + +## Downloads for v1.7.12 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes.tar.gz) | `749f811fb77daca197ecce2eacfea13f28e9fa69748d1b9fa7521850a5e77b93` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-src.tar.gz) | `86804d5a20a929429f1a8ed4aecba78d391a0dbaee7ffca914724b37e56eeebe` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-darwin-386.tar.gz) | `7fa3e25fa63a31955de12f1cfa67bb94bcc09ccd3e90e5c5ad090b2ea9d90f94` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-darwin-amd64.tar.gz) | `107fa0f038b3530f57a6b04512262cbde04c888b771a1b931c6ff0a98adc1bc9` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-386.tar.gz) | `22827bee712441a57dfa2c6d87182128c82a0f0ded34970910d1aebdb968d4db` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-amd64.tar.gz) | `01e87c03e4c928a105ac64618a8923d9d5afa321f9ce2c4d739dad5aa564da72` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-arm64.tar.gz) | `5d44328b0f2070885102fd15e9bb142d53b8b0c431cc5bfc5018fe07642c0380` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-arm.tar.gz) | `30986808b540706a88855e87bd997103b506635dcc62b02e34e6d6ac507301ef` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-ppc64le.tar.gz) | `d577a244e0f09f47d926fbcbd097e149a53488406952089225545f591f2c1945` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-linux-s390x.tar.gz) | `2f5eab8cb47eb467727649ef2683abe72232f9b6f481384244c535507d15a3d7` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-windows-386.tar.gz) | `e0c060c5fa1fa61ff6477485fb40329d57e6dd20cc6a1bbc50a5f98f54f61d1a` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-client-windows-amd64.tar.gz) | `bc824cf320dc94a96998665fad5925fb1b6c66569aa9bb34b12e7dfa7d437c73` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-server-linux-amd64.tar.gz) | `2bf0fee82996eaae55547852c5082ecbc2389356b4c929294ed3bc198f80ec33` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-server-linux-arm64.tar.gz) | `b7b193a53650bac279fed535fa6e5a0cb4cff6376731ef4ca3a383af97b94486` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-server-linux-arm.tar.gz) | `ecee8f65c62f4a79c423b585bf0f78e3c64ed4bb1afc7a87f0ac6dfcfb262908` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-server-linux-ppc64le.tar.gz) | `eb9058d726fd48eb6797e99ba2d9353ab2bae4dec21836deaafb2ded0b412acc` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-server-linux-s390x.tar.gz) | `b6eb522fb1aac7ea82ae2d04b456e4e69740ce40dd48eb205c5d071f4aa49d76` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-linux-amd64.tar.gz) | `1ab49460eb34ebab60a9109479e2f43194c763ae24a1922889e301d8c1b0644e` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-linux-arm64.tar.gz) | `16bf9e50d74d8b66e791ee9d23498e7b4a6e49f499df02f84baaf277128da9c2` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-linux-arm.tar.gz) | `c64fe4901f94076f6df2d464e13799f6399f68bc439ad966357ea3790e73a22e` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-linux-ppc64le.tar.gz) | `4c641014245741fd0835e430c6cc61bae0c1f30526ec07313343d59eee462a01` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-linux-s390x.tar.gz) | `9262f3821d02ac6a6d3d5fe51fc56cb264e2bf1adaa4b63b8b87612f1e01411d` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.7.12/kubernetes-node-windows-amd64.tar.gz) | `266b57c417190621ee9583fa556336dfe447ce8847f8be64d383fa48a81b22e2` + +## Changelog since v1.7.11 + +### Other notable changes + +* fix azure disk storage account init issue ([#55927](https://github.com/kubernetes/kubernetes/pull/55927), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes a bug where if an error was returned that was not an `autorest.DetailedError` we would return `"not found", nil` which caused nodes to go to `NotReady` state. ([#57484](https://github.com/kubernetes/kubernetes/pull/57484), [@brendandburns](https://github.com/brendandburns)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57394](https://github.com/kubernetes/kubernetes/pull/57394), [@mborsz](https://github.com/mborsz)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57394](https://github.com/kubernetes/kubernetes/pull/57394), [@mborsz](https://github.com/mborsz)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57394](https://github.com/kubernetes/kubernetes/pull/57394), [@mborsz](https://github.com/mborsz)) +* Fix a problem of not respecting TerminationGracePeriodSeconds of the Pods created by DaemonSet controller. ([#51279](https://github.com/kubernetes/kubernetes/pull/51279), [@kow3ns](https://github.com/kow3ns)) +* BUG FIX: Check both name and ports for azure health probes ([#56918](https://github.com/kubernetes/kubernetes/pull/56918), [@feiskyer](https://github.com/feiskyer)) +* Provides compatibility of fields SizeLimit in types.EmptyDirVolumeSource since v1.7.8 ([#56505](https://github.com/kubernetes/kubernetes/pull/56505), [@yue9944882](https://github.com/yue9944882)) +* Fixes issue where masquerade rules are flushed in GCE k8s clusters. ([#56728](https://github.com/kubernetes/kubernetes/pull/56728), [@dnardo](https://github.com/dnardo)) +* kubelet: fix bug where `runAsUser: MustRunAsNonRoot` strategy didn't reject a pod with a non-numeric `USER`. ([#56711](https://github.com/kubernetes/kubernetes/pull/56711), [@php-coder](https://github.com/php-coder)) +* Fix a bug in GCE multizonal clusters where PersistentVolumes were sometimes created in zones without nodes. ([#52322](https://github.com/kubernetes/kubernetes/pull/52322), [@davidz627](https://github.com/davidz627)) +* Fix validation of NetworkPolicy ([#56223](https://github.com/kubernetes/kubernetes/pull/56223), [@deads2k](https://github.com/deads2k)) +* add GRS, RAGRS storage account type support for azure disk ([#55931](https://github.com/kubernetes/kubernetes/pull/55931), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes server name verification of aggregated API servers and webhook admission endpoints ([#56415](https://github.com/kubernetes/kubernetes/pull/56415), [@liggitt](https://github.com/liggitt)) +* Fix a typo in prometheus-to-sd configuration, that drops some stackdriver metrics. ([#56473](https://github.com/kubernetes/kubernetes/pull/56473), [@loburm](https://github.com/loburm)) +* Update jquery and bootstrap dependencies ([#56447](https://github.com/kubernetes/kubernetes/pull/56447), [@dashpole](https://github.com/dashpole)) + + + # v1.7.11 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) From 5805a7fefbea8f3a4042af7cfdc1405baa02be90 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 29 Dec 2017 10:59:54 +0800 Subject: [PATCH 534/794] Add more verbose logs --- pkg/cloudprovider/providers/azure/azure_util_vmss.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 1e03f70187d..2c8630c2c83 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -160,11 +160,13 @@ func (ss *scaleSet) updateCache() error { } // getCachedVirtualMachine gets virtualMachine by nodeName from cache. +// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, error) { ss.cacheMutex.Lock() defer ss.cacheMutex.Unlock() getVMFromCache := func(nodeName string) (scaleSetVMInfo, bool) { + glog.V(8).Infof("Getting scaleSetVMInfo for %q from cache %v", nodeName, ss.cache) for scaleSetName := range ss.cache { for _, vm := range ss.cache[scaleSetName] { if vm.NodeName == nodeName { @@ -183,11 +185,13 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er // Known node not managed by scale sets. if ss.availabilitySetNodesCache.Has(nodeName) { + glog.V(10).Infof("Found node %q in availabilitySetNodesCache", nodeName) return scaleSetVMInfo{}, cloudprovider.InstanceNotFound } // Update cache and try again. if err := ss.updateCache(); err != nil { + glog.Errorf("updateCache failed with error: %v", err) return scaleSetVMInfo{}, err } vm, found = getVMFromCache(nodeName) @@ -196,15 +200,19 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er } // Node still not found, assuming it is not managed by scale sets. + glog.V(8).Infof("Node %q doesn't belong to any scale sets, adding it to availabilitySetNodesCache", nodeName) ss.availabilitySetNodesCache.Insert(nodeName) return scaleSetVMInfo{}, cloudprovider.InstanceNotFound } +// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. +// The node must belong to one of scale sets. func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID string) (scaleSetVMInfo, error) { ss.cacheMutex.Lock() defer ss.cacheMutex.Unlock() getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, bool) { + glog.V(8).Infof("Getting scaleSetVMInfo with scaleSetName: %q and instanceID %q from cache %v", scaleSetName, instanceID, ss.cache) vms, ok := ss.cache[scaleSetName] if !ok { glog.V(4).Infof("scale set (%s) not found", scaleSetName) @@ -229,6 +237,7 @@ func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID // Update cache and try again. if err := ss.updateCache(); err != nil { + glog.Errorf("updateCache failed with error: %v", err) return scaleSetVMInfo{}, err } vm, found = getVMByID(scaleSetName, instanceID) From 9cae364ce7fa0c1355ff03d31e34befcfb247658 Mon Sep 17 00:00:00 2001 From: m1093782566 Date: Fri, 29 Dec 2017 14:30:11 +0800 Subject: [PATCH 535/794] add test for syncvirtualServer --- pkg/proxy/ipvs/proxier_test.go | 108 +++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index b962685440e..88f7df51897 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -2408,3 +2408,111 @@ func Test_endpointsToEndpointsMap(t *testing.T) { } } } + +func Test_syncService(t *testing.T) { + testCases := []struct { + oldVirtualServer *utilipvs.VirtualServer + svcName string + newVirtualServer *utilipvs.VirtualServer + bindAddr bool + }{ + { + // case 0, old virtual server is same as new virtual server + oldVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 80, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + svcName: "foo", + newVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 80, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + bindAddr: false, + }, + { + // case 1, old virtual server is different from new virtual server + oldVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 8080, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + svcName: "bar", + newVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 8080, + Scheduler: "rr", + Flags: utilipvs.FlagPersistent, + }, + bindAddr: false, + }, + { + // case 2, old virtual server is different from new virtual server + oldVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 8080, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + svcName: "bar", + newVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolTCP), + Port: 8080, + Scheduler: "wlc", + Flags: utilipvs.FlagHashed, + }, + bindAddr: false, + }, + { + // case 3, old virtual server is nil, and create new virtual server + oldVirtualServer: nil, + svcName: "baz", + newVirtualServer: &utilipvs.VirtualServer{ + Address: net.ParseIP("1.2.3.4"), + Protocol: string(api.ProtocolUDP), + Port: 53, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + }, + bindAddr: true, + }, + } + + for i := range testCases { + ipt := iptablestest.NewFake() + ipvs := ipvstest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) + proxier := NewFakeProxier(ipt, ipvs, ipset, nil) + + if testCases[i].oldVirtualServer != nil { + if err := proxier.ipvs.AddVirtualServer(testCases[i].oldVirtualServer); err != nil { + t.Errorf("Case [%d], unexpected add IPVS virtual server error: %v", i, err) + } + } + if err := proxier.syncService(testCases[i].svcName, testCases[i].newVirtualServer, testCases[i].bindAddr); err != nil { + t.Errorf("Case [%d], unexpected sync IPVS virutal server error: %v", i, err) + } + // check + list, err := proxier.ipvs.GetVirtualServers() + if err != nil { + t.Errorf("Case [%d], unexpected list IPVS virtual server error: %v", i, err) + } + if len(list) != 1 { + t.Errorf("Case [%d], expect %d virtual servers, got %d", i, 1, len(list)) + continue + } + if !list[0].Equal(testCases[i].newVirtualServer) { + t.Errorf("Case [%d], unexpected mismatch, expect: %#v, got: %#v", i, testCases[i].newVirtualServer, list[0]) + } + } +} From 38a8c72f8adc7ba7d2fc2b2478fa50f9cc02ebd2 Mon Sep 17 00:00:00 2001 From: xiangpengzhao Date: Tue, 2 Jan 2018 14:33:48 +0800 Subject: [PATCH 536/794] Print the full path of Kubeconfig files. --- cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index 1d1474417ba..451fb0c0f19 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -231,7 +231,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda return fmt.Errorf("failed to save kubeconfig file %s on disk: %v", kubeConfigFilePath, err) } - fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename) + fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", kubeConfigFilePath) return nil } @@ -258,7 +258,7 @@ func createKubeConfigFileIfNotExists(outDir, filename string, config *clientcmda // kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid) // Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL; // kubeadm thinks those files are equal and doesn't bother writing a new file - fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", filename) + fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", kubeConfigFilePath) return nil } From bec420875ea5c64a9c4fa1269ada2a856b3d5ab7 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Mon, 1 Jan 2018 22:57:59 -0800 Subject: [PATCH 537/794] Update boilerplate for 2018 --- hack/boilerplate/boilerplate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index a5205f47002..9ff1d776da3 100755 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -107,7 +107,7 @@ def file_passes(filename, refs, regexs): print('File %s is missing the year' % filename, file=verbose_out) return False - # Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR" + # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR" p = regexs["date"] for i, d in enumerate(data): (data[i], found) = p.subn('YEAR', d) @@ -175,8 +175,8 @@ def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) - # dates can be 2014, 2015, 2016, or 2017; company holder names can be anything - regexs["date"] = re.compile( '(2014|2015|2016|2017)' ) + # dates can be 2014, 2015, 2016, 2017, or 2018; company holder names can be anything + regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' ) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts From cccd18333ba2daf3842eb9454b7c9af0ad2bf9ef Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 2 Jan 2018 10:22:32 +0800 Subject: [PATCH 538/794] fix type error in cteate Memory Threshold Notifier --- pkg/kubelet/eviction/eviction_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 90b1038cf15..b27d0e156b4 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -251,7 +251,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.synchronize(diskInfoProvider, podFunc, capacityProvider) }) if err != nil { - glog.Warningf("eviction manager: failed to create hard memory threshold notifier: %v", err) + glog.Warningf("eviction manager: failed to create soft memory threshold notifier: %v", err) } // start hard memory notification err = startMemoryThresholdNotifier(m.config.Thresholds, observations, true, func(desc string) { @@ -259,7 +259,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.synchronize(diskInfoProvider, podFunc, capacityProvider) }) if err != nil { - glog.Warningf("eviction manager: failed to create soft memory threshold notifier: %v", err) + glog.Warningf("eviction manager: failed to create hard memory threshold notifier: %v", err) } } From 83bc631bee60ef2c6f9024869a8747ba9eddf556 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 2 Jan 2018 00:15:05 -0800 Subject: [PATCH 539/794] Add generated runtime and generated device plugin to update-all --- hack/update-all.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/update-all.sh b/hack/update-all.sh index e62795e787d..71c58f28807 100755 --- a/hack/update-all.sh +++ b/hack/update-all.sh @@ -58,6 +58,8 @@ fi BASH_TARGETS=" update-generated-protobuf update-codegen + update-generated-runtime + update-generated-device-plugin update-generated-docs update-generated-swagger-docs update-swagger-spec From 80e344644e2b6222296f2f03551a8d0273c7cbce Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 2 Jan 2018 00:21:07 -0800 Subject: [PATCH 540/794] Regenerate all generated code --- .../app/apis/kubeadm/v1alpha1/zz_generated.conversion.go | 2 +- cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go | 2 +- cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go | 2 +- cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go | 2 +- cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go | 2 +- pkg/apis/abac/v0/zz_generated.deepcopy.go | 2 +- pkg/apis/abac/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/abac/v1beta1/zz_generated.deepcopy.go | 2 +- pkg/apis/abac/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/abac/zz_generated.deepcopy.go | 2 +- pkg/apis/admission/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/admission/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/admission/zz_generated.deepcopy.go | 2 +- .../admissionregistration/v1alpha1/zz_generated.conversion.go | 2 +- .../admissionregistration/v1alpha1/zz_generated.defaults.go | 2 +- .../admissionregistration/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/admissionregistration/zz_generated.deepcopy.go | 2 +- pkg/apis/apps/v1/zz_generated.conversion.go | 2 +- pkg/apis/apps/v1/zz_generated.defaults.go | 2 +- pkg/apis/apps/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/apps/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/apps/v1beta2/zz_generated.conversion.go | 2 +- pkg/apis/apps/v1beta2/zz_generated.defaults.go | 2 +- pkg/apis/apps/zz_generated.deepcopy.go | 2 +- pkg/apis/authentication/v1/zz_generated.conversion.go | 2 +- pkg/apis/authentication/v1/zz_generated.defaults.go | 2 +- pkg/apis/authentication/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/authentication/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/authentication/zz_generated.deepcopy.go | 2 +- pkg/apis/authorization/v1/zz_generated.conversion.go | 2 +- pkg/apis/authorization/v1/zz_generated.defaults.go | 2 +- pkg/apis/authorization/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/authorization/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/authorization/zz_generated.deepcopy.go | 2 +- pkg/apis/autoscaling/v1/zz_generated.conversion.go | 2 +- pkg/apis/autoscaling/v1/zz_generated.defaults.go | 2 +- pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go | 2 +- pkg/apis/autoscaling/v2beta1/zz_generated.defaults.go | 2 +- pkg/apis/autoscaling/zz_generated.deepcopy.go | 2 +- pkg/apis/batch/v1/zz_generated.conversion.go | 2 +- pkg/apis/batch/v1/zz_generated.defaults.go | 2 +- pkg/apis/batch/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/batch/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/batch/v2alpha1/zz_generated.conversion.go | 2 +- pkg/apis/batch/v2alpha1/zz_generated.defaults.go | 2 +- pkg/apis/batch/zz_generated.deepcopy.go | 2 +- pkg/apis/certificates/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/certificates/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/certificates/zz_generated.deepcopy.go | 2 +- pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go | 2 +- pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/componentconfig/zz_generated.deepcopy.go | 2 +- pkg/apis/core/v1/zz_generated.conversion.go | 2 +- pkg/apis/core/v1/zz_generated.defaults.go | 2 +- pkg/apis/core/zz_generated.deepcopy.go | 2 +- pkg/apis/events/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/events/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/extensions/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/extensions/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/extensions/zz_generated.deepcopy.go | 2 +- pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/imagepolicy/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/imagepolicy/zz_generated.deepcopy.go | 2 +- pkg/apis/networking/v1/zz_generated.conversion.go | 2 +- pkg/apis/networking/v1/zz_generated.defaults.go | 2 +- pkg/apis/networking/zz_generated.deepcopy.go | 2 +- pkg/apis/policy/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/policy/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/policy/zz_generated.deepcopy.go | 2 +- pkg/apis/rbac/v1/zz_generated.conversion.go | 2 +- pkg/apis/rbac/v1/zz_generated.defaults.go | 2 +- pkg/apis/rbac/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/rbac/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/rbac/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/rbac/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/rbac/zz_generated.deepcopy.go | 2 +- pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/scheduling/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/scheduling/zz_generated.deepcopy.go | 2 +- pkg/apis/settings/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/settings/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/settings/zz_generated.deepcopy.go | 2 +- pkg/apis/storage/v1/zz_generated.conversion.go | 2 +- pkg/apis/storage/v1/zz_generated.defaults.go | 2 +- pkg/apis/storage/v1alpha1/zz_generated.conversion.go | 2 +- pkg/apis/storage/v1alpha1/zz_generated.defaults.go | 2 +- pkg/apis/storage/v1beta1/zz_generated.conversion.go | 2 +- pkg/apis/storage/v1beta1/zz_generated.defaults.go | 2 +- pkg/apis/storage/zz_generated.deepcopy.go | 2 +- pkg/client/clientset_generated/internalclientset/clientset.go | 2 +- pkg/client/clientset_generated/internalclientset/doc.go | 2 +- .../internalclientset/fake/clientset_generated.go | 2 +- pkg/client/clientset_generated/internalclientset/fake/doc.go | 2 +- .../clientset_generated/internalclientset/fake/register.go | 2 +- pkg/client/clientset_generated/internalclientset/scheme/doc.go | 2 +- .../clientset_generated/internalclientset/scheme/register.go | 2 +- .../internalversion/admissionregistration_client.go | 2 +- .../typed/admissionregistration/internalversion/doc.go | 2 +- .../typed/admissionregistration/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_admissionregistration_client.go | 2 +- .../internalversion/fake/fake_initializerconfiguration.go | 2 +- .../internalversion/fake/fake_mutatingwebhookconfiguration.go | 2 +- .../internalversion/fake/fake_validatingwebhookconfiguration.go | 2 +- .../internalversion/generated_expansion.go | 2 +- .../internalversion/initializerconfiguration.go | 2 +- .../internalversion/mutatingwebhookconfiguration.go | 2 +- .../internalversion/validatingwebhookconfiguration.go | 2 +- .../internalclientset/typed/apps/internalversion/apps_client.go | 2 +- .../typed/apps/internalversion/controllerrevision.go | 2 +- .../internalclientset/typed/apps/internalversion/doc.go | 2 +- .../internalclientset/typed/apps/internalversion/fake/doc.go | 2 +- .../typed/apps/internalversion/fake/fake_apps_client.go | 2 +- .../typed/apps/internalversion/fake/fake_controllerrevision.go | 2 +- .../typed/apps/internalversion/fake/fake_statefulset.go | 2 +- .../typed/apps/internalversion/generated_expansion.go | 2 +- .../internalclientset/typed/apps/internalversion/statefulset.go | 2 +- .../authentication/internalversion/authentication_client.go | 2 +- .../typed/authentication/internalversion/doc.go | 2 +- .../typed/authentication/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_authentication_client.go | 2 +- .../authentication/internalversion/fake/fake_tokenreview.go | 2 +- .../typed/authentication/internalversion/generated_expansion.go | 2 +- .../typed/authentication/internalversion/tokenreview.go | 2 +- .../typed/authorization/internalversion/authorization_client.go | 2 +- .../typed/authorization/internalversion/doc.go | 2 +- .../typed/authorization/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_authorization_client.go | 2 +- .../internalversion/fake/fake_localsubjectaccessreview.go | 2 +- .../internalversion/fake/fake_selfsubjectaccessreview.go | 2 +- .../internalversion/fake/fake_selfsubjectrulesreview.go | 2 +- .../internalversion/fake/fake_subjectaccessreview.go | 2 +- .../typed/authorization/internalversion/generated_expansion.go | 2 +- .../authorization/internalversion/localsubjectaccessreview.go | 2 +- .../authorization/internalversion/selfsubjectaccessreview.go | 2 +- .../authorization/internalversion/selfsubjectrulesreview.go | 2 +- .../typed/authorization/internalversion/subjectaccessreview.go | 2 +- .../typed/autoscaling/internalversion/autoscaling_client.go | 2 +- .../internalclientset/typed/autoscaling/internalversion/doc.go | 2 +- .../typed/autoscaling/internalversion/fake/doc.go | 2 +- .../autoscaling/internalversion/fake/fake_autoscaling_client.go | 2 +- .../internalversion/fake/fake_horizontalpodautoscaler.go | 2 +- .../typed/autoscaling/internalversion/generated_expansion.go | 2 +- .../autoscaling/internalversion/horizontalpodautoscaler.go | 2 +- .../typed/batch/internalversion/batch_client.go | 2 +- .../internalclientset/typed/batch/internalversion/cronjob.go | 2 +- .../internalclientset/typed/batch/internalversion/doc.go | 2 +- .../internalclientset/typed/batch/internalversion/fake/doc.go | 2 +- .../typed/batch/internalversion/fake/fake_batch_client.go | 2 +- .../typed/batch/internalversion/fake/fake_cronjob.go | 2 +- .../typed/batch/internalversion/fake/fake_job.go | 2 +- .../typed/batch/internalversion/generated_expansion.go | 2 +- .../internalclientset/typed/batch/internalversion/job.go | 2 +- .../typed/certificates/internalversion/certificates_client.go | 2 +- .../certificates/internalversion/certificatesigningrequest.go | 2 +- .../internalclientset/typed/certificates/internalversion/doc.go | 2 +- .../typed/certificates/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_certificates_client.go | 2 +- .../internalversion/fake/fake_certificatesigningrequest.go | 2 +- .../typed/certificates/internalversion/generated_expansion.go | 2 +- .../typed/core/internalversion/componentstatus.go | 2 +- .../internalclientset/typed/core/internalversion/configmap.go | 2 +- .../internalclientset/typed/core/internalversion/core_client.go | 2 +- .../internalclientset/typed/core/internalversion/doc.go | 2 +- .../internalclientset/typed/core/internalversion/endpoints.go | 2 +- .../internalclientset/typed/core/internalversion/event.go | 2 +- .../internalclientset/typed/core/internalversion/fake/doc.go | 2 +- .../typed/core/internalversion/fake/fake_componentstatus.go | 2 +- .../typed/core/internalversion/fake/fake_configmap.go | 2 +- .../typed/core/internalversion/fake/fake_core_client.go | 2 +- .../typed/core/internalversion/fake/fake_endpoints.go | 2 +- .../typed/core/internalversion/fake/fake_event.go | 2 +- .../typed/core/internalversion/fake/fake_limitrange.go | 2 +- .../typed/core/internalversion/fake/fake_namespace.go | 2 +- .../typed/core/internalversion/fake/fake_node.go | 2 +- .../typed/core/internalversion/fake/fake_persistentvolume.go | 2 +- .../core/internalversion/fake/fake_persistentvolumeclaim.go | 2 +- .../typed/core/internalversion/fake/fake_pod.go | 2 +- .../typed/core/internalversion/fake/fake_podtemplate.go | 2 +- .../core/internalversion/fake/fake_replicationcontroller.go | 2 +- .../typed/core/internalversion/fake/fake_resourcequota.go | 2 +- .../typed/core/internalversion/fake/fake_secret.go | 2 +- .../typed/core/internalversion/fake/fake_service.go | 2 +- .../typed/core/internalversion/fake/fake_serviceaccount.go | 2 +- .../typed/core/internalversion/generated_expansion.go | 2 +- .../internalclientset/typed/core/internalversion/limitrange.go | 2 +- .../internalclientset/typed/core/internalversion/namespace.go | 2 +- .../internalclientset/typed/core/internalversion/node.go | 2 +- .../typed/core/internalversion/persistentvolume.go | 2 +- .../typed/core/internalversion/persistentvolumeclaim.go | 2 +- .../internalclientset/typed/core/internalversion/pod.go | 2 +- .../internalclientset/typed/core/internalversion/podtemplate.go | 2 +- .../typed/core/internalversion/replicationcontroller.go | 2 +- .../typed/core/internalversion/resourcequota.go | 2 +- .../internalclientset/typed/core/internalversion/secret.go | 2 +- .../internalclientset/typed/core/internalversion/service.go | 2 +- .../typed/core/internalversion/serviceaccount.go | 2 +- .../internalclientset/typed/events/internalversion/doc.go | 2 +- .../typed/events/internalversion/events_client.go | 2 +- .../internalclientset/typed/events/internalversion/fake/doc.go | 2 +- .../typed/events/internalversion/fake/fake_events_client.go | 2 +- .../typed/events/internalversion/generated_expansion.go | 2 +- .../typed/extensions/internalversion/daemonset.go | 2 +- .../typed/extensions/internalversion/deployment.go | 2 +- .../internalclientset/typed/extensions/internalversion/doc.go | 2 +- .../typed/extensions/internalversion/extensions_client.go | 2 +- .../typed/extensions/internalversion/fake/doc.go | 2 +- .../typed/extensions/internalversion/fake/fake_daemonset.go | 2 +- .../typed/extensions/internalversion/fake/fake_deployment.go | 2 +- .../extensions/internalversion/fake/fake_extensions_client.go | 2 +- .../typed/extensions/internalversion/fake/fake_ingress.go | 2 +- .../extensions/internalversion/fake/fake_podsecuritypolicy.go | 2 +- .../typed/extensions/internalversion/fake/fake_replicaset.go | 2 +- .../typed/extensions/internalversion/generated_expansion.go | 2 +- .../typed/extensions/internalversion/ingress.go | 2 +- .../typed/extensions/internalversion/podsecuritypolicy.go | 2 +- .../typed/extensions/internalversion/replicaset.go | 2 +- .../internalclientset/typed/networking/internalversion/doc.go | 2 +- .../typed/networking/internalversion/fake/doc.go | 2 +- .../networking/internalversion/fake/fake_networking_client.go | 2 +- .../typed/networking/internalversion/fake/fake_networkpolicy.go | 2 +- .../typed/networking/internalversion/generated_expansion.go | 2 +- .../typed/networking/internalversion/networking_client.go | 2 +- .../typed/networking/internalversion/networkpolicy.go | 2 +- .../internalclientset/typed/policy/internalversion/doc.go | 2 +- .../internalclientset/typed/policy/internalversion/eviction.go | 2 +- .../internalclientset/typed/policy/internalversion/fake/doc.go | 2 +- .../typed/policy/internalversion/fake/fake_eviction.go | 2 +- .../policy/internalversion/fake/fake_poddisruptionbudget.go | 2 +- .../typed/policy/internalversion/fake/fake_policy_client.go | 2 +- .../typed/policy/internalversion/generated_expansion.go | 2 +- .../typed/policy/internalversion/poddisruptionbudget.go | 2 +- .../typed/policy/internalversion/policy_client.go | 2 +- .../internalclientset/typed/rbac/internalversion/clusterrole.go | 2 +- .../typed/rbac/internalversion/clusterrolebinding.go | 2 +- .../internalclientset/typed/rbac/internalversion/doc.go | 2 +- .../internalclientset/typed/rbac/internalversion/fake/doc.go | 2 +- .../typed/rbac/internalversion/fake/fake_clusterrole.go | 2 +- .../typed/rbac/internalversion/fake/fake_clusterrolebinding.go | 2 +- .../typed/rbac/internalversion/fake/fake_rbac_client.go | 2 +- .../typed/rbac/internalversion/fake/fake_role.go | 2 +- .../typed/rbac/internalversion/fake/fake_rolebinding.go | 2 +- .../typed/rbac/internalversion/generated_expansion.go | 2 +- .../internalclientset/typed/rbac/internalversion/rbac_client.go | 2 +- .../internalclientset/typed/rbac/internalversion/role.go | 2 +- .../internalclientset/typed/rbac/internalversion/rolebinding.go | 2 +- .../internalclientset/typed/scheduling/internalversion/doc.go | 2 +- .../typed/scheduling/internalversion/fake/doc.go | 2 +- .../typed/scheduling/internalversion/fake/fake_priorityclass.go | 2 +- .../scheduling/internalversion/fake/fake_scheduling_client.go | 2 +- .../typed/scheduling/internalversion/generated_expansion.go | 2 +- .../typed/scheduling/internalversion/priorityclass.go | 2 +- .../typed/scheduling/internalversion/scheduling_client.go | 2 +- .../internalclientset/typed/settings/internalversion/doc.go | 2 +- .../typed/settings/internalversion/fake/doc.go | 2 +- .../typed/settings/internalversion/fake/fake_podpreset.go | 2 +- .../typed/settings/internalversion/fake/fake_settings_client.go | 2 +- .../typed/settings/internalversion/generated_expansion.go | 2 +- .../typed/settings/internalversion/podpreset.go | 2 +- .../typed/settings/internalversion/settings_client.go | 2 +- .../internalclientset/typed/storage/internalversion/doc.go | 2 +- .../internalclientset/typed/storage/internalversion/fake/doc.go | 2 +- .../typed/storage/internalversion/fake/fake_storage_client.go | 2 +- .../typed/storage/internalversion/fake/fake_storageclass.go | 2 +- .../typed/storage/internalversion/fake/fake_volumeattachment.go | 2 +- .../typed/storage/internalversion/generated_expansion.go | 2 +- .../typed/storage/internalversion/storage_client.go | 2 +- .../typed/storage/internalversion/storageclass.go | 2 +- .../typed/storage/internalversion/volumeattachment.go | 2 +- .../internalversion/admissionregistration/interface.go | 2 +- .../internalversion/initializerconfiguration.go | 2 +- .../admissionregistration/internalversion/interface.go | 2 +- .../internalversion/mutatingwebhookconfiguration.go | 2 +- .../internalversion/validatingwebhookconfiguration.go | 2 +- .../informers_generated/internalversion/apps/interface.go | 2 +- .../internalversion/apps/internalversion/controllerrevision.go | 2 +- .../internalversion/apps/internalversion/interface.go | 2 +- .../internalversion/apps/internalversion/statefulset.go | 2 +- .../internalversion/autoscaling/interface.go | 2 +- .../autoscaling/internalversion/horizontalpodautoscaler.go | 2 +- .../internalversion/autoscaling/internalversion/interface.go | 2 +- .../informers_generated/internalversion/batch/interface.go | 2 +- .../internalversion/batch/internalversion/cronjob.go | 2 +- .../internalversion/batch/internalversion/interface.go | 2 +- .../internalversion/batch/internalversion/job.go | 2 +- .../internalversion/certificates/interface.go | 2 +- .../certificates/internalversion/certificatesigningrequest.go | 2 +- .../internalversion/certificates/internalversion/interface.go | 2 +- .../informers_generated/internalversion/core/interface.go | 2 +- .../internalversion/core/internalversion/componentstatus.go | 2 +- .../internalversion/core/internalversion/configmap.go | 2 +- .../internalversion/core/internalversion/endpoints.go | 2 +- .../internalversion/core/internalversion/event.go | 2 +- .../internalversion/core/internalversion/interface.go | 2 +- .../internalversion/core/internalversion/limitrange.go | 2 +- .../internalversion/core/internalversion/namespace.go | 2 +- .../internalversion/core/internalversion/node.go | 2 +- .../internalversion/core/internalversion/persistentvolume.go | 2 +- .../core/internalversion/persistentvolumeclaim.go | 2 +- .../internalversion/core/internalversion/pod.go | 2 +- .../internalversion/core/internalversion/podtemplate.go | 2 +- .../core/internalversion/replicationcontroller.go | 2 +- .../internalversion/core/internalversion/resourcequota.go | 2 +- .../internalversion/core/internalversion/secret.go | 2 +- .../internalversion/core/internalversion/service.go | 2 +- .../internalversion/core/internalversion/serviceaccount.go | 2 +- .../informers_generated/internalversion/extensions/interface.go | 2 +- .../internalversion/extensions/internalversion/daemonset.go | 2 +- .../internalversion/extensions/internalversion/deployment.go | 2 +- .../internalversion/extensions/internalversion/ingress.go | 2 +- .../internalversion/extensions/internalversion/interface.go | 2 +- .../extensions/internalversion/podsecuritypolicy.go | 2 +- .../internalversion/extensions/internalversion/replicaset.go | 2 +- .../informers/informers_generated/internalversion/factory.go | 2 +- .../informers/informers_generated/internalversion/generic.go | 2 +- .../internalversion/internalinterfaces/factory_interfaces.go | 2 +- .../informers_generated/internalversion/networking/interface.go | 2 +- .../internalversion/networking/internalversion/interface.go | 2 +- .../internalversion/networking/internalversion/networkpolicy.go | 2 +- .../informers_generated/internalversion/policy/interface.go | 2 +- .../internalversion/policy/internalversion/interface.go | 2 +- .../policy/internalversion/poddisruptionbudget.go | 2 +- .../informers_generated/internalversion/rbac/interface.go | 2 +- .../internalversion/rbac/internalversion/clusterrole.go | 2 +- .../internalversion/rbac/internalversion/clusterrolebinding.go | 2 +- .../internalversion/rbac/internalversion/interface.go | 2 +- .../internalversion/rbac/internalversion/role.go | 2 +- .../internalversion/rbac/internalversion/rolebinding.go | 2 +- .../informers_generated/internalversion/scheduling/interface.go | 2 +- .../internalversion/scheduling/internalversion/interface.go | 2 +- .../internalversion/scheduling/internalversion/priorityclass.go | 2 +- .../informers_generated/internalversion/settings/interface.go | 2 +- .../internalversion/settings/internalversion/interface.go | 2 +- .../internalversion/settings/internalversion/podpreset.go | 2 +- .../informers_generated/internalversion/storage/interface.go | 2 +- .../internalversion/storage/internalversion/interface.go | 2 +- .../internalversion/storage/internalversion/storageclass.go | 2 +- .../internalversion/storage/internalversion/volumeattachment.go | 2 +- .../internalversion/expansion_generated.go | 2 +- .../internalversion/initializerconfiguration.go | 2 +- .../internalversion/mutatingwebhookconfiguration.go | 2 +- .../internalversion/validatingwebhookconfiguration.go | 2 +- pkg/client/listers/apps/internalversion/controllerrevision.go | 2 +- pkg/client/listers/apps/internalversion/expansion_generated.go | 2 +- pkg/client/listers/apps/internalversion/statefulset.go | 2 +- .../authentication/internalversion/expansion_generated.go | 2 +- .../listers/authentication/internalversion/tokenreview.go | 2 +- .../authorization/internalversion/expansion_generated.go | 2 +- .../authorization/internalversion/localsubjectaccessreview.go | 2 +- .../authorization/internalversion/selfsubjectaccessreview.go | 2 +- .../authorization/internalversion/selfsubjectrulesreview.go | 2 +- .../authorization/internalversion/subjectaccessreview.go | 2 +- .../listers/autoscaling/internalversion/expansion_generated.go | 2 +- .../autoscaling/internalversion/horizontalpodautoscaler.go | 2 +- pkg/client/listers/batch/internalversion/cronjob.go | 2 +- pkg/client/listers/batch/internalversion/expansion_generated.go | 2 +- pkg/client/listers/batch/internalversion/job.go | 2 +- .../certificates/internalversion/certificatesigningrequest.go | 2 +- .../listers/certificates/internalversion/expansion_generated.go | 2 +- pkg/client/listers/core/internalversion/componentstatus.go | 2 +- pkg/client/listers/core/internalversion/configmap.go | 2 +- pkg/client/listers/core/internalversion/endpoints.go | 2 +- pkg/client/listers/core/internalversion/event.go | 2 +- pkg/client/listers/core/internalversion/expansion_generated.go | 2 +- pkg/client/listers/core/internalversion/limitrange.go | 2 +- pkg/client/listers/core/internalversion/namespace.go | 2 +- pkg/client/listers/core/internalversion/node.go | 2 +- pkg/client/listers/core/internalversion/persistentvolume.go | 2 +- .../listers/core/internalversion/persistentvolumeclaim.go | 2 +- pkg/client/listers/core/internalversion/pod.go | 2 +- pkg/client/listers/core/internalversion/podtemplate.go | 2 +- .../listers/core/internalversion/replicationcontroller.go | 2 +- pkg/client/listers/core/internalversion/resourcequota.go | 2 +- pkg/client/listers/core/internalversion/secret.go | 2 +- pkg/client/listers/core/internalversion/service.go | 2 +- pkg/client/listers/core/internalversion/serviceaccount.go | 2 +- pkg/client/listers/extensions/internalversion/daemonset.go | 2 +- pkg/client/listers/extensions/internalversion/deployment.go | 2 +- .../listers/extensions/internalversion/expansion_generated.go | 2 +- pkg/client/listers/extensions/internalversion/ingress.go | 2 +- .../listers/extensions/internalversion/podsecuritypolicy.go | 2 +- pkg/client/listers/extensions/internalversion/replicaset.go | 2 +- .../listers/imagepolicy/internalversion/expansion_generated.go | 2 +- pkg/client/listers/imagepolicy/internalversion/imagereview.go | 2 +- .../listers/networking/internalversion/expansion_generated.go | 2 +- pkg/client/listers/networking/internalversion/networkpolicy.go | 2 +- pkg/client/listers/policy/internalversion/eviction.go | 2 +- .../listers/policy/internalversion/expansion_generated.go | 2 +- .../listers/policy/internalversion/poddisruptionbudget.go | 2 +- pkg/client/listers/rbac/internalversion/clusterrole.go | 2 +- pkg/client/listers/rbac/internalversion/clusterrolebinding.go | 2 +- pkg/client/listers/rbac/internalversion/expansion_generated.go | 2 +- pkg/client/listers/rbac/internalversion/role.go | 2 +- pkg/client/listers/rbac/internalversion/rolebinding.go | 2 +- .../listers/scheduling/internalversion/expansion_generated.go | 2 +- pkg/client/listers/scheduling/internalversion/priorityclass.go | 2 +- .../listers/settings/internalversion/expansion_generated.go | 2 +- pkg/client/listers/settings/internalversion/podpreset.go | 2 +- .../listers/storage/internalversion/expansion_generated.go | 2 +- pkg/client/listers/storage/internalversion/storageclass.go | 2 +- pkg/client/listers/storage/internalversion/volumeattachment.go | 2 +- .../garbagecollector/metaonly/zz_generated.deepcopy.go | 2 +- pkg/kubectl/cmd/testing/zz_generated.deepcopy.go | 2 +- pkg/kubectl/testing/zz_generated.deepcopy.go | 2 +- pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go | 2 +- pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go | 2 +- .../apis/kubeletconfig/v1alpha1/zz_generated.conversion.go | 2 +- .../apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apis/kubeletconfig/v1alpha1/zz_generated.defaults.go | 2 +- pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go | 2 +- .../apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go | 2 +- .../apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go | 2 +- pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go | 2 +- pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go | 2 +- .../apis/eventratelimit/v1alpha1/zz_generated.conversion.go | 2 +- .../apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apis/eventratelimit/v1alpha1/zz_generated.defaults.go | 2 +- .../eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.conversion.go | 2 +- .../podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go | 2 +- .../podtolerationrestriction/v1alpha1/zz_generated.defaults.go | 2 +- .../apis/podtolerationrestriction/zz_generated.deepcopy.go | 2 +- .../apis/resourcequota/v1alpha1/zz_generated.conversion.go | 2 +- .../apis/resourcequota/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apis/resourcequota/v1alpha1/zz_generated.defaults.go | 2 +- .../resourcequota/apis/resourcequota/zz_generated.deepcopy.go | 2 +- plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go | 2 +- plugin/pkg/scheduler/api/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/admission/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/admission/v1beta1/generated.proto | 2 +- .../src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go | 2 +- .../k8s.io/api/admissionregistration/v1alpha1/generated.pb.go | 2 +- .../k8s.io/api/admissionregistration/v1alpha1/generated.proto | 2 +- .../api/admissionregistration/v1alpha1/zz_generated.deepcopy.go | 2 +- .../k8s.io/api/admissionregistration/v1beta1/generated.pb.go | 2 +- .../k8s.io/api/admissionregistration/v1beta1/generated.proto | 2 +- .../api/admissionregistration/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/apps/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/apps/v1/generated.proto | 2 +- staging/src/k8s.io/api/apps/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/apps/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/apps/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/apps/v1beta2/generated.pb.go | 2 +- staging/src/k8s.io/api/apps/v1beta2/generated.proto | 2 +- staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/authentication/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/authentication/v1/generated.proto | 2 +- .../src/k8s.io/api/authentication/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/authentication/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/authentication/v1beta1/generated.proto | 2 +- .../k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/authorization/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/authorization/v1/generated.proto | 2 +- .../src/k8s.io/api/authorization/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/authorization/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/authorization/v1beta1/generated.proto | 2 +- .../k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/autoscaling/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/autoscaling/v1/generated.proto | 2 +- staging/src/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/autoscaling/v2beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto | 2 +- .../src/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/batch/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/batch/v1/generated.proto | 2 +- staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/batch/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/batch/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/batch/v2alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/batch/v2alpha1/generated.proto | 2 +- staging/src/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/certificates/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/certificates/v1beta1/generated.proto | 2 +- .../k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/core/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/core/v1/generated.proto | 2 +- staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/events/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/events/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/extensions/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/extensions/v1beta1/generated.proto | 2 +- .../src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.proto | 2 +- .../k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/networking/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/networking/v1/generated.proto | 2 +- staging/src/k8s.io/api/networking/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/policy/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/policy/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/rbac/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/rbac/v1/generated.proto | 2 +- staging/src/k8s.io/api/rbac/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/rbac/v1alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/rbac/v1alpha1/generated.proto | 2 +- staging/src/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/rbac/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/rbac/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/scheduling/v1alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto | 2 +- .../src/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/settings/v1alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/settings/v1alpha1/generated.proto | 2 +- .../src/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/storage/v1/generated.pb.go | 2 +- staging/src/k8s.io/api/storage/v1/generated.proto | 2 +- staging/src/k8s.io/api/storage/v1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/storage/v1alpha1/generated.pb.go | 2 +- staging/src/k8s.io/api/storage/v1alpha1/generated.proto | 2 +- .../src/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/api/storage/v1beta1/generated.pb.go | 2 +- staging/src/k8s.io/api/storage/v1beta1/generated.proto | 2 +- staging/src/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go | 2 +- .../examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/apiextensions/v1beta1/generated.pb.go | 2 +- .../pkg/apis/apiextensions/v1beta1/generated.proto | 2 +- .../pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go | 2 +- .../pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go | 2 +- .../pkg/apis/apiextensions/zz_generated.deepcopy.go | 2 +- .../pkg/client/clientset/clientset/clientset.go | 2 +- .../pkg/client/clientset/clientset/doc.go | 2 +- .../pkg/client/clientset/clientset/fake/clientset_generated.go | 2 +- .../pkg/client/clientset/clientset/fake/doc.go | 2 +- .../pkg/client/clientset/clientset/fake/register.go | 2 +- .../pkg/client/clientset/clientset/scheme/doc.go | 2 +- .../pkg/client/clientset/clientset/scheme/register.go | 2 +- .../typed/apiextensions/v1beta1/apiextensions_client.go | 2 +- .../typed/apiextensions/v1beta1/customresourcedefinition.go | 2 +- .../clientset/clientset/typed/apiextensions/v1beta1/doc.go | 2 +- .../clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go | 2 +- .../apiextensions/v1beta1/fake/fake_apiextensions_client.go | 2 +- .../apiextensions/v1beta1/fake/fake_customresourcedefinition.go | 2 +- .../typed/apiextensions/v1beta1/generated_expansion.go | 2 +- .../pkg/client/clientset/internalclientset/clientset.go | 2 +- .../pkg/client/clientset/internalclientset/doc.go | 2 +- .../clientset/internalclientset/fake/clientset_generated.go | 2 +- .../pkg/client/clientset/internalclientset/fake/doc.go | 2 +- .../pkg/client/clientset/internalclientset/fake/register.go | 2 +- .../pkg/client/clientset/internalclientset/scheme/doc.go | 2 +- .../pkg/client/clientset/internalclientset/scheme/register.go | 2 +- .../typed/apiextensions/internalversion/apiextensions_client.go | 2 +- .../apiextensions/internalversion/customresourcedefinition.go | 2 +- .../typed/apiextensions/internalversion/doc.go | 2 +- .../typed/apiextensions/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_apiextensions_client.go | 2 +- .../internalversion/fake/fake_customresourcedefinition.go | 2 +- .../typed/apiextensions/internalversion/generated_expansion.go | 2 +- .../informers/externalversions/apiextensions/interface.go | 2 +- .../apiextensions/v1beta1/customresourcedefinition.go | 2 +- .../externalversions/apiextensions/v1beta1/interface.go | 2 +- .../pkg/client/informers/externalversions/factory.go | 2 +- .../pkg/client/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../client/informers/internalversion/apiextensions/interface.go | 2 +- .../apiextensions/internalversion/customresourcedefinition.go | 2 +- .../internalversion/apiextensions/internalversion/interface.go | 2 +- .../pkg/client/informers/internalversion/factory.go | 2 +- .../pkg/client/informers/internalversion/generic.go | 2 +- .../internalversion/internalinterfaces/factory_interfaces.go | 2 +- .../apiextensions/internalversion/customresourcedefinition.go | 2 +- .../apiextensions/internalversion/expansion_generated.go | 2 +- .../listers/apiextensions/v1beta1/customresourcedefinition.go | 2 +- .../client/listers/apiextensions/v1beta1/expansion_generated.go | 2 +- .../src/k8s.io/apimachinery/pkg/api/resource/generated.pb.go | 2 +- .../src/k8s.io/apimachinery/pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/internalversion/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go | 2 +- .../src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto | 2 +- .../pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go | 2 +- .../apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go | 2 +- .../apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go | 2 +- .../k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go | 2 +- .../k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto | 2 +- .../pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/v1alpha1/zz_generated.defaults.go | 2 +- .../pkg/apis/testapigroup/v1/zz_generated.conversion.go | 2 +- .../pkg/apis/testapigroup/v1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/testapigroup/v1/zz_generated.defaults.go | 2 +- .../apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/apimachinery/pkg/runtime/generated.pb.go | 2 +- staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto | 2 +- .../src/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go | 2 +- .../src/k8s.io/apimachinery/pkg/runtime/schema/generated.proto | 2 +- .../pkg/runtime/serializer/testing/zz_generated.deepcopy.go | 2 +- .../apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/apimachinery/pkg/test/zz_generated.deepcopy.go | 2 +- staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go | 2 +- staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.proto | 2 +- .../src/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go | 2 +- .../apis/webhookadmission/v1alpha1/zz_generated.conversion.go | 2 +- .../apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apis/webhookadmission/v1alpha1/zz_generated.defaults.go | 2 +- .../config/apis/webhookadmission/zz_generated.deepcopy.go | 2 +- .../pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go | 2 +- .../pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go | 2 +- .../apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go | 2 +- .../k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go | 2 +- .../k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto | 2 +- .../pkg/apis/audit/v1alpha1/zz_generated.conversion.go | 2 +- .../apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto | 2 +- .../apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go | 2 +- .../apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go | 2 +- .../k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto | 2 +- .../apiserver/pkg/apis/example/v1/zz_generated.conversion.go | 2 +- .../apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/apis/example/v1/zz_generated.defaults.go | 2 +- .../k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto | 2 +- .../apiserver/pkg/apis/example2/v1/zz_generated.conversion.go | 2 +- .../apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/apis/example2/v1/zz_generated.defaults.go | 2 +- .../k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go | 2 +- .../pkg/endpoints/openapi/testing/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/endpoints/testing/zz_generated.deepcopy.go | 2 +- .../k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go | 2 +- .../apiserver/pkg/storage/testing/zz_generated.deepcopy.go | 2 +- .../client-go/informers/admissionregistration/interface.go | 2 +- .../admissionregistration/v1alpha1/initializerconfiguration.go | 2 +- .../informers/admissionregistration/v1alpha1/interface.go | 2 +- .../informers/admissionregistration/v1beta1/interface.go | 2 +- .../v1beta1/mutatingwebhookconfiguration.go | 2 +- .../v1beta1/validatingwebhookconfiguration.go | 2 +- staging/src/k8s.io/client-go/informers/apps/interface.go | 2 +- .../k8s.io/client-go/informers/apps/v1/controllerrevision.go | 2 +- staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go | 2 +- staging/src/k8s.io/client-go/informers/apps/v1/deployment.go | 2 +- staging/src/k8s.io/client-go/informers/apps/v1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go | 2 +- staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go | 2 +- .../client-go/informers/apps/v1beta1/controllerrevision.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta1/deployment.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta1/interface.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go | 2 +- .../client-go/informers/apps/v1beta2/controllerrevision.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta2/deployment.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta2/interface.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go | 2 +- .../src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go | 2 +- staging/src/k8s.io/client-go/informers/autoscaling/interface.go | 2 +- .../informers/autoscaling/v1/horizontalpodautoscaler.go | 2 +- .../src/k8s.io/client-go/informers/autoscaling/v1/interface.go | 2 +- .../informers/autoscaling/v2beta1/horizontalpodautoscaler.go | 2 +- .../k8s.io/client-go/informers/autoscaling/v2beta1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/batch/interface.go | 2 +- staging/src/k8s.io/client-go/informers/batch/v1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/batch/v1/job.go | 2 +- staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go | 2 +- .../src/k8s.io/client-go/informers/batch/v1beta1/interface.go | 2 +- .../src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go | 2 +- .../src/k8s.io/client-go/informers/batch/v2alpha1/interface.go | 2 +- .../src/k8s.io/client-go/informers/certificates/interface.go | 2 +- .../informers/certificates/v1beta1/certificatesigningrequest.go | 2 +- .../client-go/informers/certificates/v1beta1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/core/interface.go | 2 +- .../src/k8s.io/client-go/informers/core/v1/componentstatus.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/configmap.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/endpoints.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/event.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/limitrange.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/namespace.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/node.go | 2 +- .../src/k8s.io/client-go/informers/core/v1/persistentvolume.go | 2 +- .../k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/pod.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go | 2 +- .../k8s.io/client-go/informers/core/v1/replicationcontroller.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/secret.go | 2 +- staging/src/k8s.io/client-go/informers/core/v1/service.go | 2 +- .../src/k8s.io/client-go/informers/core/v1/serviceaccount.go | 2 +- staging/src/k8s.io/client-go/informers/events/interface.go | 2 +- staging/src/k8s.io/client-go/informers/events/v1beta1/event.go | 2 +- .../src/k8s.io/client-go/informers/events/v1beta1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/extensions/interface.go | 2 +- .../k8s.io/client-go/informers/extensions/v1beta1/daemonset.go | 2 +- .../k8s.io/client-go/informers/extensions/v1beta1/deployment.go | 2 +- .../k8s.io/client-go/informers/extensions/v1beta1/ingress.go | 2 +- .../k8s.io/client-go/informers/extensions/v1beta1/interface.go | 2 +- .../client-go/informers/extensions/v1beta1/podsecuritypolicy.go | 2 +- .../k8s.io/client-go/informers/extensions/v1beta1/replicaset.go | 2 +- staging/src/k8s.io/client-go/informers/factory.go | 2 +- staging/src/k8s.io/client-go/informers/generic.go | 2 +- .../informers/internalinterfaces/factory_interfaces.go | 2 +- staging/src/k8s.io/client-go/informers/networking/interface.go | 2 +- .../src/k8s.io/client-go/informers/networking/v1/interface.go | 2 +- .../k8s.io/client-go/informers/networking/v1/networkpolicy.go | 2 +- staging/src/k8s.io/client-go/informers/policy/interface.go | 2 +- .../src/k8s.io/client-go/informers/policy/v1beta1/interface.go | 2 +- .../client-go/informers/policy/v1beta1/poddisruptionbudget.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/interface.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go | 2 +- .../k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1/role.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go | 2 +- .../client-go/informers/rbac/v1alpha1/clusterrolebinding.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1alpha1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go | 2 +- .../client-go/informers/rbac/v1beta1/clusterrolebinding.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1beta1/interface.go | 2 +- staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go | 2 +- .../src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go | 2 +- staging/src/k8s.io/client-go/informers/scheduling/interface.go | 2 +- .../k8s.io/client-go/informers/scheduling/v1alpha1/interface.go | 2 +- .../client-go/informers/scheduling/v1alpha1/priorityclass.go | 2 +- staging/src/k8s.io/client-go/informers/settings/interface.go | 2 +- .../k8s.io/client-go/informers/settings/v1alpha1/interface.go | 2 +- .../k8s.io/client-go/informers/settings/v1alpha1/podpreset.go | 2 +- staging/src/k8s.io/client-go/informers/storage/interface.go | 2 +- staging/src/k8s.io/client-go/informers/storage/v1/interface.go | 2 +- .../src/k8s.io/client-go/informers/storage/v1/storageclass.go | 2 +- .../k8s.io/client-go/informers/storage/v1alpha1/interface.go | 2 +- .../client-go/informers/storage/v1alpha1/volumeattachment.go | 2 +- .../src/k8s.io/client-go/informers/storage/v1beta1/interface.go | 2 +- .../k8s.io/client-go/informers/storage/v1beta1/storageclass.go | 2 +- staging/src/k8s.io/client-go/kubernetes/clientset.go | 2 +- staging/src/k8s.io/client-go/kubernetes/doc.go | 2 +- .../src/k8s.io/client-go/kubernetes/fake/clientset_generated.go | 2 +- staging/src/k8s.io/client-go/kubernetes/fake/doc.go | 2 +- staging/src/k8s.io/client-go/kubernetes/fake/register.go | 2 +- staging/src/k8s.io/client-go/kubernetes/scheme/doc.go | 2 +- staging/src/k8s.io/client-go/kubernetes/scheme/register.go | 2 +- .../v1alpha1/admissionregistration_client.go | 2 +- .../kubernetes/typed/admissionregistration/v1alpha1/doc.go | 2 +- .../kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go | 2 +- .../v1alpha1/fake/fake_admissionregistration_client.go | 2 +- .../v1alpha1/fake/fake_initializerconfiguration.go | 2 +- .../typed/admissionregistration/v1alpha1/generated_expansion.go | 2 +- .../admissionregistration/v1alpha1/initializerconfiguration.go | 2 +- .../v1beta1/admissionregistration_client.go | 2 +- .../kubernetes/typed/admissionregistration/v1beta1/doc.go | 2 +- .../kubernetes/typed/admissionregistration/v1beta1/fake/doc.go | 2 +- .../v1beta1/fake/fake_admissionregistration_client.go | 2 +- .../v1beta1/fake/fake_mutatingwebhookconfiguration.go | 2 +- .../v1beta1/fake/fake_validatingwebhookconfiguration.go | 2 +- .../typed/admissionregistration/v1beta1/generated_expansion.go | 2 +- .../v1beta1/mutatingwebhookconfiguration.go | 2 +- .../v1beta1/validatingwebhookconfiguration.go | 2 +- .../k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go | 2 +- .../client-go/kubernetes/typed/apps/v1/controllerrevision.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go | 2 +- .../client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go | 2 +- .../kubernetes/typed/apps/v1/fake/fake_controllerrevision.go | 2 +- .../client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go | 2 +- .../client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go | 2 +- .../client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go | 2 +- .../client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go | 2 +- .../client-go/kubernetes/typed/apps/v1/generated_expansion.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go | 2 +- .../k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta1/apps_client.go | 2 +- .../kubernetes/typed/apps/v1beta1/controllerrevision.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta1/deployment.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go | 2 +- .../typed/apps/v1beta1/fake/fake_controllerrevision.go | 2 +- .../kubernetes/typed/apps/v1beta1/fake/fake_deployment.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go | 2 +- .../kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go | 2 +- .../kubernetes/typed/apps/v1beta1/generated_expansion.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta1/statefulset.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta2/apps_client.go | 2 +- .../kubernetes/typed/apps/v1beta2/controllerrevision.go | 2 +- .../k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta2/deployment.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go | 2 +- .../typed/apps/v1beta2/fake/fake_controllerrevision.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/fake_deployment.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go | 2 +- .../kubernetes/typed/apps/v1beta2/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta2/replicaset.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go | 2 +- .../client-go/kubernetes/typed/apps/v1beta2/statefulset.go | 2 +- .../kubernetes/typed/authentication/v1/authentication_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go | 2 +- .../client-go/kubernetes/typed/authentication/v1/fake/doc.go | 2 +- .../typed/authentication/v1/fake/fake_authentication_client.go | 2 +- .../kubernetes/typed/authentication/v1/fake/fake_tokenreview.go | 2 +- .../kubernetes/typed/authentication/v1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/authentication/v1/tokenreview.go | 2 +- .../typed/authentication/v1beta1/authentication_client.go | 2 +- .../client-go/kubernetes/typed/authentication/v1beta1/doc.go | 2 +- .../kubernetes/typed/authentication/v1beta1/fake/doc.go | 2 +- .../authentication/v1beta1/fake/fake_authentication_client.go | 2 +- .../typed/authentication/v1beta1/fake/fake_tokenreview.go | 2 +- .../typed/authentication/v1beta1/generated_expansion.go | 2 +- .../kubernetes/typed/authentication/v1beta1/tokenreview.go | 2 +- .../kubernetes/typed/authorization/v1/authorization_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go | 2 +- .../client-go/kubernetes/typed/authorization/v1/fake/doc.go | 2 +- .../typed/authorization/v1/fake/fake_authorization_client.go | 2 +- .../authorization/v1/fake/fake_localsubjectaccessreview.go | 2 +- .../typed/authorization/v1/fake/fake_selfsubjectaccessreview.go | 2 +- .../typed/authorization/v1/fake/fake_selfsubjectrulesreview.go | 2 +- .../typed/authorization/v1/fake/fake_subjectaccessreview.go | 2 +- .../kubernetes/typed/authorization/v1/generated_expansion.go | 2 +- .../typed/authorization/v1/localsubjectaccessreview.go | 2 +- .../typed/authorization/v1/selfsubjectaccessreview.go | 2 +- .../kubernetes/typed/authorization/v1/selfsubjectrulesreview.go | 2 +- .../kubernetes/typed/authorization/v1/subjectaccessreview.go | 2 +- .../typed/authorization/v1beta1/authorization_client.go | 2 +- .../client-go/kubernetes/typed/authorization/v1beta1/doc.go | 2 +- .../kubernetes/typed/authorization/v1beta1/fake/doc.go | 2 +- .../authorization/v1beta1/fake/fake_authorization_client.go | 2 +- .../authorization/v1beta1/fake/fake_localsubjectaccessreview.go | 2 +- .../authorization/v1beta1/fake/fake_selfsubjectaccessreview.go | 2 +- .../authorization/v1beta1/fake/fake_selfsubjectrulesreview.go | 2 +- .../authorization/v1beta1/fake/fake_subjectaccessreview.go | 2 +- .../typed/authorization/v1beta1/generated_expansion.go | 2 +- .../typed/authorization/v1beta1/localsubjectaccessreview.go | 2 +- .../typed/authorization/v1beta1/selfsubjectaccessreview.go | 2 +- .../typed/authorization/v1beta1/selfsubjectrulesreview.go | 2 +- .../typed/authorization/v1beta1/subjectaccessreview.go | 2 +- .../kubernetes/typed/autoscaling/v1/autoscaling_client.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go | 2 +- .../client-go/kubernetes/typed/autoscaling/v1/fake/doc.go | 2 +- .../typed/autoscaling/v1/fake/fake_autoscaling_client.go | 2 +- .../typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go | 2 +- .../kubernetes/typed/autoscaling/v1/generated_expansion.go | 2 +- .../kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go | 2 +- .../kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go | 2 +- .../client-go/kubernetes/typed/autoscaling/v2beta1/doc.go | 2 +- .../client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go | 2 +- .../typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go | 2 +- .../autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go | 2 +- .../kubernetes/typed/autoscaling/v2beta1/generated_expansion.go | 2 +- .../typed/autoscaling/v2beta1/horizontalpodautoscaler.go | 2 +- .../k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go | 2 +- .../kubernetes/typed/batch/v1/fake/fake_batch_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go | 2 +- .../client-go/kubernetes/typed/batch/v1/generated_expansion.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go | 2 +- .../client-go/kubernetes/typed/batch/v1beta1/batch_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go | 2 +- .../kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go | 2 +- .../kubernetes/typed/batch/v1beta1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/batch/v2alpha1/batch_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go | 2 +- .../client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go | 2 +- .../kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go | 2 +- .../kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go | 2 +- .../kubernetes/typed/batch/v2alpha1/generated_expansion.go | 2 +- .../typed/certificates/v1beta1/certificates_client.go | 2 +- .../typed/certificates/v1beta1/certificatesigningrequest.go | 2 +- .../client-go/kubernetes/typed/certificates/v1beta1/doc.go | 2 +- .../client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go | 2 +- .../typed/certificates/v1beta1/fake/fake_certificates_client.go | 2 +- .../certificates/v1beta1/fake/fake_certificatesigningrequest.go | 2 +- .../typed/certificates/v1beta1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/core/v1/componentstatus.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/core_client.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/core/v1/doc.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_componentstatus.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_configmap.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_core_client.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_event.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_namespace.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_persistentvolume.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_replicationcontroller.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_resourcequota.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_secret.go | 2 +- .../client-go/kubernetes/typed/core/v1/fake/fake_service.go | 2 +- .../kubernetes/typed/core/v1/fake/fake_serviceaccount.go | 2 +- .../client-go/kubernetes/typed/core/v1/generated_expansion.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go | 2 +- .../client-go/kubernetes/typed/core/v1/persistentvolume.go | 2 +- .../client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go | 2 +- .../client-go/kubernetes/typed/core/v1/replicationcontroller.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/core/v1/service.go | 2 +- .../k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go | 2 +- .../client-go/kubernetes/typed/events/v1beta1/events_client.go | 2 +- .../client-go/kubernetes/typed/events/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/events/v1beta1/fake/fake_event.go | 2 +- .../kubernetes/typed/events/v1beta1/fake/fake_events_client.go | 2 +- .../kubernetes/typed/events/v1beta1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/daemonset.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/deployment.go | 2 +- .../k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go | 2 +- .../kubernetes/typed/extensions/v1beta1/extensions_client.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go | 2 +- .../kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go | 2 +- .../typed/extensions/v1beta1/fake/fake_extensions_client.go | 2 +- .../kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go | 2 +- .../typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go | 2 +- .../kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go | 2 +- .../kubernetes/typed/extensions/v1beta1/fake/fake_scale.go | 2 +- .../kubernetes/typed/extensions/v1beta1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/ingress.go | 2 +- .../kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/replicaset.go | 2 +- .../client-go/kubernetes/typed/extensions/v1beta1/scale.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go | 2 +- .../typed/networking/v1/fake/fake_networking_client.go | 2 +- .../kubernetes/typed/networking/v1/fake/fake_networkpolicy.go | 2 +- .../kubernetes/typed/networking/v1/generated_expansion.go | 2 +- .../kubernetes/typed/networking/v1/networking_client.go | 2 +- .../client-go/kubernetes/typed/networking/v1/networkpolicy.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go | 2 +- .../client-go/kubernetes/typed/policy/v1beta1/eviction.go | 2 +- .../client-go/kubernetes/typed/policy/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/policy/v1beta1/fake/fake_eviction.go | 2 +- .../typed/policy/v1beta1/fake/fake_poddisruptionbudget.go | 2 +- .../kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go | 2 +- .../kubernetes/typed/policy/v1beta1/generated_expansion.go | 2 +- .../kubernetes/typed/policy/v1beta1/poddisruptionbudget.go | 2 +- .../client-go/kubernetes/typed/policy/v1beta1/policy_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go | 2 +- .../client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go | 2 +- .../client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go | 2 +- .../kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go | 2 +- .../client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go | 2 +- .../client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go | 2 +- .../client-go/kubernetes/typed/rbac/v1/generated_expansion.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go | 2 +- .../client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go | 2 +- .../kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go | 2 +- .../kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go | 2 +- .../typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go | 2 +- .../kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go | 2 +- .../client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go | 2 +- .../kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go | 2 +- .../kubernetes/typed/rbac/v1alpha1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go | 2 +- .../client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go | 2 +- .../client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go | 2 +- .../kubernetes/typed/rbac/v1beta1/clusterrolebinding.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go | 2 +- .../kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go | 2 +- .../typed/rbac/v1beta1/fake/fake_clusterrolebinding.go | 2 +- .../kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go | 2 +- .../client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go | 2 +- .../kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go | 2 +- .../kubernetes/typed/rbac/v1beta1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go | 2 +- .../src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go | 2 +- .../client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go | 2 +- .../client-go/kubernetes/typed/scheduling/v1alpha1/doc.go | 2 +- .../client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go | 2 +- .../typed/scheduling/v1alpha1/fake/fake_priorityclass.go | 2 +- .../typed/scheduling/v1alpha1/fake/fake_scheduling_client.go | 2 +- .../kubernetes/typed/scheduling/v1alpha1/generated_expansion.go | 2 +- .../kubernetes/typed/scheduling/v1alpha1/priorityclass.go | 2 +- .../kubernetes/typed/scheduling/v1alpha1/scheduling_client.go | 2 +- .../k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go | 2 +- .../client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go | 2 +- .../kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go | 2 +- .../typed/settings/v1alpha1/fake/fake_settings_client.go | 2 +- .../kubernetes/typed/settings/v1alpha1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/settings/v1alpha1/podpreset.go | 2 +- .../kubernetes/typed/settings/v1alpha1/settings_client.go | 2 +- staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go | 2 +- .../k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go | 2 +- .../kubernetes/typed/storage/v1/fake/fake_storage_client.go | 2 +- .../kubernetes/typed/storage/v1/fake/fake_storageclass.go | 2 +- .../kubernetes/typed/storage/v1/generated_expansion.go | 2 +- .../client-go/kubernetes/typed/storage/v1/storage_client.go | 2 +- .../client-go/kubernetes/typed/storage/v1/storageclass.go | 2 +- .../k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go | 2 +- .../client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go | 2 +- .../typed/storage/v1alpha1/fake/fake_storage_client.go | 2 +- .../typed/storage/v1alpha1/fake/fake_volumeattachment.go | 2 +- .../kubernetes/typed/storage/v1alpha1/generated_expansion.go | 2 +- .../kubernetes/typed/storage/v1alpha1/storage_client.go | 2 +- .../kubernetes/typed/storage/v1alpha1/volumeattachment.go | 2 +- .../k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go | 2 +- .../client-go/kubernetes/typed/storage/v1beta1/fake/doc.go | 2 +- .../typed/storage/v1beta1/fake/fake_storage_client.go | 2 +- .../kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go | 2 +- .../kubernetes/typed/storage/v1beta1/generated_expansion.go | 2 +- .../kubernetes/typed/storage/v1beta1/storage_client.go | 2 +- .../client-go/kubernetes/typed/storage/v1beta1/storageclass.go | 2 +- .../admissionregistration/v1alpha1/expansion_generated.go | 2 +- .../admissionregistration/v1alpha1/initializerconfiguration.go | 2 +- .../admissionregistration/v1beta1/expansion_generated.go | 2 +- .../v1beta1/mutatingwebhookconfiguration.go | 2 +- .../v1beta1/validatingwebhookconfiguration.go | 2 +- .../src/k8s.io/client-go/listers/apps/v1/controllerrevision.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1/daemonset.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1/deployment.go | 2 +- .../src/k8s.io/client-go/listers/apps/v1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1/replicaset.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1/statefulset.go | 2 +- .../k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta1/deployment.go | 2 +- .../client-go/listers/apps/v1beta1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta1/scale.go | 2 +- .../src/k8s.io/client-go/listers/apps/v1beta1/statefulset.go | 2 +- .../k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta2/daemonset.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta2/deployment.go | 2 +- .../client-go/listers/apps/v1beta2/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta2/replicaset.go | 2 +- staging/src/k8s.io/client-go/listers/apps/v1beta2/scale.go | 2 +- .../src/k8s.io/client-go/listers/apps/v1beta2/statefulset.go | 2 +- .../client-go/listers/authentication/v1/expansion_generated.go | 2 +- .../k8s.io/client-go/listers/authentication/v1/tokenreview.go | 2 +- .../listers/authentication/v1beta1/expansion_generated.go | 2 +- .../client-go/listers/authentication/v1beta1/tokenreview.go | 2 +- .../client-go/listers/authorization/v1/expansion_generated.go | 2 +- .../listers/authorization/v1/localsubjectaccessreview.go | 2 +- .../listers/authorization/v1/selfsubjectaccessreview.go | 2 +- .../listers/authorization/v1/selfsubjectrulesreview.go | 2 +- .../client-go/listers/authorization/v1/subjectaccessreview.go | 2 +- .../listers/authorization/v1beta1/expansion_generated.go | 2 +- .../listers/authorization/v1beta1/localsubjectaccessreview.go | 2 +- .../listers/authorization/v1beta1/selfsubjectaccessreview.go | 2 +- .../listers/authorization/v1beta1/selfsubjectrulesreview.go | 2 +- .../listers/authorization/v1beta1/subjectaccessreview.go | 2 +- .../client-go/listers/autoscaling/v1/expansion_generated.go | 2 +- .../client-go/listers/autoscaling/v1/horizontalpodautoscaler.go | 2 +- .../listers/autoscaling/v2beta1/expansion_generated.go | 2 +- .../listers/autoscaling/v2beta1/horizontalpodautoscaler.go | 2 +- .../k8s.io/client-go/listers/batch/v1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/batch/v1/job.go | 2 +- staging/src/k8s.io/client-go/listers/batch/v1beta1/cronjob.go | 2 +- .../client-go/listers/batch/v1beta1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go | 2 +- .../client-go/listers/batch/v2alpha1/expansion_generated.go | 2 +- .../listers/certificates/v1beta1/certificatesigningrequest.go | 2 +- .../listers/certificates/v1beta1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/componentstatus.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/configmap.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/endpoints.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/event.go | 2 +- .../src/k8s.io/client-go/listers/core/v1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/limitrange.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/namespace.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/node.go | 2 +- .../src/k8s.io/client-go/listers/core/v1/persistentvolume.go | 2 +- .../k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/pod.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/podtemplate.go | 2 +- .../k8s.io/client-go/listers/core/v1/replicationcontroller.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/resourcequota.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/secret.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/service.go | 2 +- staging/src/k8s.io/client-go/listers/core/v1/serviceaccount.go | 2 +- staging/src/k8s.io/client-go/listers/events/v1beta1/event.go | 2 +- .../client-go/listers/events/v1beta1/expansion_generated.go | 2 +- .../k8s.io/client-go/listers/extensions/v1beta1/daemonset.go | 2 +- .../k8s.io/client-go/listers/extensions/v1beta1/deployment.go | 2 +- .../client-go/listers/extensions/v1beta1/expansion_generated.go | 2 +- .../src/k8s.io/client-go/listers/extensions/v1beta1/ingress.go | 2 +- .../client-go/listers/extensions/v1beta1/podsecuritypolicy.go | 2 +- .../k8s.io/client-go/listers/extensions/v1beta1/replicaset.go | 2 +- .../src/k8s.io/client-go/listers/extensions/v1beta1/scale.go | 2 +- .../listers/imagepolicy/v1alpha1/expansion_generated.go | 2 +- .../client-go/listers/imagepolicy/v1alpha1/imagereview.go | 2 +- .../client-go/listers/networking/v1/expansion_generated.go | 2 +- .../src/k8s.io/client-go/listers/networking/v1/networkpolicy.go | 2 +- staging/src/k8s.io/client-go/listers/policy/v1beta1/eviction.go | 2 +- .../client-go/listers/policy/v1beta1/expansion_generated.go | 2 +- .../client-go/listers/policy/v1beta1/poddisruptionbudget.go | 2 +- staging/src/k8s.io/client-go/listers/rbac/v1/clusterrole.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/rbac/v1/role.go | 2 +- staging/src/k8s.io/client-go/listers/rbac/v1/rolebinding.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go | 2 +- .../client-go/listers/rbac/v1alpha1/clusterrolebinding.go | 2 +- .../client-go/listers/rbac/v1alpha1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/rbac/v1alpha1/role.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go | 2 +- .../k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go | 2 +- .../client-go/listers/rbac/v1beta1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/rbac/v1beta1/role.go | 2 +- .../src/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go | 2 +- .../listers/scheduling/v1alpha1/expansion_generated.go | 2 +- .../client-go/listers/scheduling/v1alpha1/priorityclass.go | 2 +- .../client-go/listers/settings/v1alpha1/expansion_generated.go | 2 +- .../src/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go | 2 +- .../k8s.io/client-go/listers/storage/v1/expansion_generated.go | 2 +- staging/src/k8s.io/client-go/listers/storage/v1/storageclass.go | 2 +- .../client-go/listers/storage/v1alpha1/expansion_generated.go | 2 +- .../client-go/listers/storage/v1alpha1/volumeattachment.go | 2 +- .../client-go/listers/storage/v1beta1/expansion_generated.go | 2 +- .../k8s.io/client-go/listers/storage/v1beta1/storageclass.go | 2 +- staging/src/k8s.io/client-go/rest/zz_generated.deepcopy.go | 2 +- .../scale/scheme/appsv1beta1/zz_generated.conversion.go | 2 +- .../scale/scheme/appsv1beta2/zz_generated.conversion.go | 2 +- .../scale/scheme/autoscalingv1/zz_generated.conversion.go | 2 +- .../scale/scheme/extensionsv1beta1/zz_generated.conversion.go | 2 +- .../src/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go | 2 +- .../client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go | 2 +- .../client-go/tools/clientcmd/api/zz_generated.deepcopy.go | 2 +- .../apiserver/apis/example/v1/zz_generated.conversion.go | 2 +- .../apiserver/apis/example/v1/zz_generated.deepcopy.go | 2 +- .../apiserver/apis/example/v1/zz_generated.defaults.go | 2 +- .../_examples/apiserver/apis/example/zz_generated.deepcopy.go | 2 +- .../apiserver/apis/example2/v1/zz_generated.conversion.go | 2 +- .../apiserver/apis/example2/v1/zz_generated.deepcopy.go | 2 +- .../apiserver/apis/example2/v1/zz_generated.defaults.go | 2 +- .../_examples/apiserver/apis/example2/zz_generated.deepcopy.go | 2 +- .../_examples/apiserver/clientset/internalversion/clientset.go | 2 +- .../_examples/apiserver/clientset/internalversion/doc.go | 2 +- .../clientset/internalversion/fake/clientset_generated.go | 2 +- .../_examples/apiserver/clientset/internalversion/fake/doc.go | 2 +- .../apiserver/clientset/internalversion/fake/register.go | 2 +- .../_examples/apiserver/clientset/internalversion/scheme/doc.go | 2 +- .../apiserver/clientset/internalversion/scheme/register.go | 2 +- .../internalversion/typed/example/internalversion/doc.go | 2 +- .../typed/example/internalversion/example_client.go | 2 +- .../internalversion/typed/example/internalversion/fake/doc.go | 2 +- .../typed/example/internalversion/fake/fake_example_client.go | 2 +- .../typed/example/internalversion/fake/fake_testtype.go | 2 +- .../typed/example/internalversion/generated_expansion.go | 2 +- .../internalversion/typed/example/internalversion/testtype.go | 2 +- .../internalversion/typed/example2/internalversion/doc.go | 2 +- .../typed/example2/internalversion/example2_client.go | 2 +- .../internalversion/typed/example2/internalversion/fake/doc.go | 2 +- .../typed/example2/internalversion/fake/fake_example2_client.go | 2 +- .../typed/example2/internalversion/fake/fake_testtype.go | 2 +- .../typed/example2/internalversion/generated_expansion.go | 2 +- .../internalversion/typed/example2/internalversion/testtype.go | 2 +- .../_examples/apiserver/clientset/versioned/clientset.go | 2 +- .../_examples/apiserver/clientset/versioned/doc.go | 2 +- .../apiserver/clientset/versioned/fake/clientset_generated.go | 2 +- .../_examples/apiserver/clientset/versioned/fake/doc.go | 2 +- .../_examples/apiserver/clientset/versioned/fake/register.go | 2 +- .../_examples/apiserver/clientset/versioned/scheme/doc.go | 2 +- .../_examples/apiserver/clientset/versioned/scheme/register.go | 2 +- .../apiserver/clientset/versioned/typed/example/v1/doc.go | 2 +- .../clientset/versioned/typed/example/v1/example_client.go | 2 +- .../apiserver/clientset/versioned/typed/example/v1/fake/doc.go | 2 +- .../versioned/typed/example/v1/fake/fake_example_client.go | 2 +- .../clientset/versioned/typed/example/v1/fake/fake_testtype.go | 2 +- .../clientset/versioned/typed/example/v1/generated_expansion.go | 2 +- .../apiserver/clientset/versioned/typed/example/v1/testtype.go | 2 +- .../apiserver/clientset/versioned/typed/example2/v1/doc.go | 2 +- .../clientset/versioned/typed/example2/v1/example2_client.go | 2 +- .../apiserver/clientset/versioned/typed/example2/v1/fake/doc.go | 2 +- .../versioned/typed/example2/v1/fake/fake_example2_client.go | 2 +- .../clientset/versioned/typed/example2/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example2/v1/generated_expansion.go | 2 +- .../apiserver/clientset/versioned/typed/example2/v1/testtype.go | 2 +- .../apiserver/informers/externalversions/example/interface.go | 2 +- .../informers/externalversions/example/v1/interface.go | 2 +- .../apiserver/informers/externalversions/example/v1/testtype.go | 2 +- .../apiserver/informers/externalversions/example2/interface.go | 2 +- .../informers/externalversions/example2/v1/interface.go | 2 +- .../informers/externalversions/example2/v1/testtype.go | 2 +- .../_examples/apiserver/informers/externalversions/factory.go | 2 +- .../_examples/apiserver/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../apiserver/informers/internalversion/example/interface.go | 2 +- .../internalversion/example/internalversion/interface.go | 2 +- .../internalversion/example/internalversion/testtype.go | 2 +- .../apiserver/informers/internalversion/example2/interface.go | 2 +- .../internalversion/example2/internalversion/interface.go | 2 +- .../internalversion/example2/internalversion/testtype.go | 2 +- .../_examples/apiserver/informers/internalversion/factory.go | 2 +- .../_examples/apiserver/informers/internalversion/generic.go | 2 +- .../internalversion/internalinterfaces/factory_interfaces.go | 2 +- .../listers/example/internalversion/expansion_generated.go | 2 +- .../apiserver/listers/example/internalversion/testtype.go | 2 +- .../apiserver/listers/example/v1/expansion_generated.go | 2 +- .../_examples/apiserver/listers/example/v1/testtype.go | 2 +- .../listers/example2/internalversion/expansion_generated.go | 2 +- .../apiserver/listers/example2/internalversion/testtype.go | 2 +- .../apiserver/listers/example2/v1/expansion_generated.go | 2 +- .../_examples/apiserver/listers/example2/v1/testtype.go | 2 +- .../_examples/crd/apis/example/v1/zz_generated.deepcopy.go | 2 +- .../_examples/crd/apis/example2/v1/zz_generated.deepcopy.go | 2 +- .../_examples/crd/clientset/versioned/clientset.go | 2 +- .../code-generator/_examples/crd/clientset/versioned/doc.go | 2 +- .../crd/clientset/versioned/fake/clientset_generated.go | 2 +- .../_examples/crd/clientset/versioned/fake/doc.go | 2 +- .../_examples/crd/clientset/versioned/fake/register.go | 2 +- .../_examples/crd/clientset/versioned/scheme/doc.go | 2 +- .../_examples/crd/clientset/versioned/scheme/register.go | 2 +- .../_examples/crd/clientset/versioned/typed/example/v1/doc.go | 2 +- .../crd/clientset/versioned/typed/example/v1/example_client.go | 2 +- .../crd/clientset/versioned/typed/example/v1/fake/doc.go | 2 +- .../versioned/typed/example/v1/fake/fake_example_client.go | 2 +- .../clientset/versioned/typed/example/v1/fake/fake_testtype.go | 2 +- .../clientset/versioned/typed/example/v1/generated_expansion.go | 2 +- .../crd/clientset/versioned/typed/example/v1/testtype.go | 2 +- .../_examples/crd/clientset/versioned/typed/example2/v1/doc.go | 2 +- .../clientset/versioned/typed/example2/v1/example2_client.go | 2 +- .../crd/clientset/versioned/typed/example2/v1/fake/doc.go | 2 +- .../versioned/typed/example2/v1/fake/fake_example2_client.go | 2 +- .../clientset/versioned/typed/example2/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example2/v1/generated_expansion.go | 2 +- .../crd/clientset/versioned/typed/example2/v1/testtype.go | 2 +- .../crd/informers/externalversions/example/interface.go | 2 +- .../crd/informers/externalversions/example/v1/interface.go | 2 +- .../crd/informers/externalversions/example/v1/testtype.go | 2 +- .../crd/informers/externalversions/example2/interface.go | 2 +- .../crd/informers/externalversions/example2/v1/interface.go | 2 +- .../crd/informers/externalversions/example2/v1/testtype.go | 2 +- .../_examples/crd/informers/externalversions/factory.go | 2 +- .../_examples/crd/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../_examples/crd/listers/example/v1/expansion_generated.go | 2 +- .../code-generator/_examples/crd/listers/example/v1/testtype.go | 2 +- .../_examples/crd/listers/example2/v1/expansion_generated.go | 2 +- .../_examples/crd/listers/example2/v1/testtype.go | 2 +- .../pkg/apis/apiregistration/v1beta1/generated.pb.go | 2 +- .../pkg/apis/apiregistration/v1beta1/generated.proto | 2 +- .../pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go | 2 +- .../pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/apiregistration/zz_generated.deepcopy.go | 2 +- .../pkg/client/clientset_generated/clientset/clientset.go | 2 +- .../pkg/client/clientset_generated/clientset/doc.go | 2 +- .../clientset_generated/clientset/fake/clientset_generated.go | 2 +- .../pkg/client/clientset_generated/clientset/fake/doc.go | 2 +- .../pkg/client/clientset_generated/clientset/fake/register.go | 2 +- .../pkg/client/clientset_generated/clientset/scheme/doc.go | 2 +- .../pkg/client/clientset_generated/clientset/scheme/register.go | 2 +- .../typed/apiregistration/v1beta1/apiregistration_client.go | 2 +- .../clientset/typed/apiregistration/v1beta1/apiservice.go | 2 +- .../clientset/typed/apiregistration/v1beta1/doc.go | 2 +- .../clientset/typed/apiregistration/v1beta1/fake/doc.go | 2 +- .../apiregistration/v1beta1/fake/fake_apiregistration_client.go | 2 +- .../typed/apiregistration/v1beta1/fake/fake_apiservice.go | 2 +- .../typed/apiregistration/v1beta1/generated_expansion.go | 2 +- .../client/clientset_generated/internalclientset/clientset.go | 2 +- .../pkg/client/clientset_generated/internalclientset/doc.go | 2 +- .../internalclientset/fake/clientset_generated.go | 2 +- .../client/clientset_generated/internalclientset/fake/doc.go | 2 +- .../clientset_generated/internalclientset/fake/register.go | 2 +- .../client/clientset_generated/internalclientset/scheme/doc.go | 2 +- .../clientset_generated/internalclientset/scheme/register.go | 2 +- .../apiregistration/internalversion/apiregistration_client.go | 2 +- .../typed/apiregistration/internalversion/apiservice.go | 2 +- .../typed/apiregistration/internalversion/doc.go | 2 +- .../typed/apiregistration/internalversion/fake/doc.go | 2 +- .../internalversion/fake/fake_apiregistration_client.go | 2 +- .../apiregistration/internalversion/fake/fake_apiservice.go | 2 +- .../apiregistration/internalversion/generated_expansion.go | 2 +- .../informers/externalversions/apiregistration/interface.go | 2 +- .../externalversions/apiregistration/v1beta1/apiservice.go | 2 +- .../externalversions/apiregistration/v1beta1/interface.go | 2 +- .../pkg/client/informers/externalversions/factory.go | 2 +- .../pkg/client/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../informers/internalversion/apiregistration/interface.go | 2 +- .../apiregistration/internalversion/apiservice.go | 2 +- .../apiregistration/internalversion/interface.go | 2 +- .../pkg/client/informers/internalversion/factory.go | 2 +- .../pkg/client/informers/internalversion/generic.go | 2 +- .../internalversion/internalinterfaces/factory_interfaces.go | 2 +- .../listers/apiregistration/internalversion/apiservice.go | 2 +- .../apiregistration/internalversion/expansion_generated.go | 2 +- .../pkg/client/listers/apiregistration/v1beta1/apiservice.go | 2 +- .../listers/apiregistration/v1beta1/expansion_generated.go | 2 +- .../metrics/pkg/apis/custom_metrics/v1beta1/generated.pb.go | 2 +- .../metrics/pkg/apis/custom_metrics/v1beta1/generated.proto | 2 +- .../pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go | 2 +- .../pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go | 2 +- .../metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go | 2 +- .../k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go | 2 +- .../k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto | 2 +- .../pkg/apis/metrics/v1alpha1/zz_generated.conversion.go | 2 +- .../metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go | 2 +- .../src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go | 2 +- .../src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto | 2 +- .../metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go | 2 +- .../metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go | 2 +- .../k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go | 2 +- .../pkg/client/clientset_generated/clientset/clientset.go | 2 +- .../metrics/pkg/client/clientset_generated/clientset/doc.go | 2 +- .../clientset_generated/clientset/fake/clientset_generated.go | 2 +- .../pkg/client/clientset_generated/clientset/fake/doc.go | 2 +- .../pkg/client/clientset_generated/clientset/fake/register.go | 2 +- .../pkg/client/clientset_generated/clientset/scheme/doc.go | 2 +- .../pkg/client/clientset_generated/clientset/scheme/register.go | 2 +- .../clientset_generated/clientset/typed/metrics/v1alpha1/doc.go | 2 +- .../clientset/typed/metrics/v1alpha1/fake/doc.go | 2 +- .../typed/metrics/v1alpha1/fake/fake_metrics_client.go | 2 +- .../clientset/typed/metrics/v1alpha1/fake/fake_nodemetrics.go | 2 +- .../clientset/typed/metrics/v1alpha1/fake/fake_podmetrics.go | 2 +- .../clientset/typed/metrics/v1alpha1/generated_expansion.go | 2 +- .../clientset/typed/metrics/v1alpha1/metrics_client.go | 2 +- .../clientset/typed/metrics/v1alpha1/nodemetrics.go | 2 +- .../clientset/typed/metrics/v1alpha1/podmetrics.go | 2 +- .../clientset_generated/clientset/typed/metrics/v1beta1/doc.go | 2 +- .../clientset/typed/metrics/v1beta1/fake/doc.go | 2 +- .../clientset/typed/metrics/v1beta1/fake/fake_metrics_client.go | 2 +- .../clientset/typed/metrics/v1beta1/fake/fake_nodemetrics.go | 2 +- .../clientset/typed/metrics/v1beta1/fake/fake_podmetrics.go | 2 +- .../clientset/typed/metrics/v1beta1/generated_expansion.go | 2 +- .../clientset/typed/metrics/v1beta1/metrics_client.go | 2 +- .../clientset/typed/metrics/v1beta1/nodemetrics.go | 2 +- .../clientset/typed/metrics/v1beta1/podmetrics.go | 2 +- .../pkg/apis/wardle/v1alpha1/zz_generated.conversion.go | 2 +- .../pkg/apis/wardle/v1alpha1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/wardle/v1alpha1/zz_generated.defaults.go | 2 +- .../sample-apiserver/pkg/apis/wardle/zz_generated.deepcopy.go | 2 +- .../pkg/client/clientset/internalversion/clientset.go | 2 +- .../pkg/client/clientset/internalversion/doc.go | 2 +- .../clientset/internalversion/fake/clientset_generated.go | 2 +- .../pkg/client/clientset/internalversion/fake/doc.go | 2 +- .../pkg/client/clientset/internalversion/fake/register.go | 2 +- .../pkg/client/clientset/internalversion/scheme/doc.go | 2 +- .../pkg/client/clientset/internalversion/scheme/register.go | 2 +- .../internalversion/typed/wardle/internalversion/doc.go | 2 +- .../internalversion/typed/wardle/internalversion/fake/doc.go | 2 +- .../typed/wardle/internalversion/fake/fake_fischer.go | 2 +- .../typed/wardle/internalversion/fake/fake_flunder.go | 2 +- .../typed/wardle/internalversion/fake/fake_wardle_client.go | 2 +- .../internalversion/typed/wardle/internalversion/fischer.go | 2 +- .../internalversion/typed/wardle/internalversion/flunder.go | 2 +- .../typed/wardle/internalversion/generated_expansion.go | 2 +- .../typed/wardle/internalversion/wardle_client.go | 2 +- .../pkg/client/clientset/versioned/clientset.go | 2 +- .../sample-apiserver/pkg/client/clientset/versioned/doc.go | 2 +- .../pkg/client/clientset/versioned/fake/clientset_generated.go | 2 +- .../sample-apiserver/pkg/client/clientset/versioned/fake/doc.go | 2 +- .../pkg/client/clientset/versioned/fake/register.go | 2 +- .../pkg/client/clientset/versioned/scheme/doc.go | 2 +- .../pkg/client/clientset/versioned/scheme/register.go | 2 +- .../pkg/client/clientset/versioned/typed/wardle/v1alpha1/doc.go | 2 +- .../clientset/versioned/typed/wardle/v1alpha1/fake/doc.go | 2 +- .../versioned/typed/wardle/v1alpha1/fake/fake_fischer.go | 2 +- .../versioned/typed/wardle/v1alpha1/fake/fake_flunder.go | 2 +- .../versioned/typed/wardle/v1alpha1/fake/fake_wardle_client.go | 2 +- .../client/clientset/versioned/typed/wardle/v1alpha1/fischer.go | 2 +- .../client/clientset/versioned/typed/wardle/v1alpha1/flunder.go | 2 +- .../versioned/typed/wardle/v1alpha1/generated_expansion.go | 2 +- .../clientset/versioned/typed/wardle/v1alpha1/wardle_client.go | 2 +- .../pkg/client/informers/externalversions/factory.go | 2 +- .../pkg/client/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../pkg/client/informers/externalversions/wardle/interface.go | 2 +- .../informers/externalversions/wardle/v1alpha1/fischer.go | 2 +- .../informers/externalversions/wardle/v1alpha1/flunder.go | 2 +- .../informers/externalversions/wardle/v1alpha1/interface.go | 2 +- .../pkg/client/informers/internalversion/factory.go | 2 +- .../pkg/client/informers/internalversion/generic.go | 2 +- .../internalversion/internalinterfaces/factory_interfaces.go | 2 +- .../pkg/client/informers/internalversion/wardle/interface.go | 2 +- .../informers/internalversion/wardle/internalversion/fischer.go | 2 +- .../informers/internalversion/wardle/internalversion/flunder.go | 2 +- .../internalversion/wardle/internalversion/interface.go | 2 +- .../listers/wardle/internalversion/expansion_generated.go | 2 +- .../pkg/client/listers/wardle/internalversion/fischer.go | 2 +- .../pkg/client/listers/wardle/internalversion/flunder.go | 2 +- .../pkg/client/listers/wardle/v1alpha1/expansion_generated.go | 2 +- .../pkg/client/listers/wardle/v1alpha1/fischer.go | 2 +- .../pkg/client/listers/wardle/v1alpha1/flunder.go | 2 +- .../pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go | 2 +- .../pkg/client/clientset/versioned/clientset.go | 2 +- .../sample-controller/pkg/client/clientset/versioned/doc.go | 2 +- .../pkg/client/clientset/versioned/fake/clientset_generated.go | 2 +- .../pkg/client/clientset/versioned/fake/doc.go | 2 +- .../pkg/client/clientset/versioned/fake/register.go | 2 +- .../pkg/client/clientset/versioned/scheme/doc.go | 2 +- .../pkg/client/clientset/versioned/scheme/register.go | 2 +- .../clientset/versioned/typed/samplecontroller/v1alpha1/doc.go | 2 +- .../versioned/typed/samplecontroller/v1alpha1/fake/doc.go | 2 +- .../versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go | 2 +- .../v1alpha1/fake/fake_samplecontroller_client.go | 2 +- .../clientset/versioned/typed/samplecontroller/v1alpha1/foo.go | 2 +- .../typed/samplecontroller/v1alpha1/generated_expansion.go | 2 +- .../typed/samplecontroller/v1alpha1/samplecontroller_client.go | 2 +- .../pkg/client/informers/externalversions/factory.go | 2 +- .../pkg/client/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- .../informers/externalversions/samplecontroller/interface.go | 2 +- .../informers/externalversions/samplecontroller/v1alpha1/foo.go | 2 +- .../externalversions/samplecontroller/v1alpha1/interface.go | 2 +- .../listers/samplecontroller/v1alpha1/expansion_generated.go | 2 +- .../pkg/client/listers/samplecontroller/v1alpha1/foo.go | 2 +- 1434 files changed, 1434 insertions(+), 1434 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 5cebdd5d5bc..0d7f685c3b8 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go index 444eb489fe0..720496b407e 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go index 098153aa823..cf5182a6cf9 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index 5c574f5f027..aa8c30e4b78 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go b/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go index 16d00bf0e8b..79217085e3f 100644 --- a/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/phases/etcd/spec/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/abac/v0/zz_generated.deepcopy.go b/pkg/apis/abac/v0/zz_generated.deepcopy.go index 1c0fe0c8b82..a4154af9825 100644 --- a/pkg/apis/abac/v0/zz_generated.deepcopy.go +++ b/pkg/apis/abac/v0/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/abac/v1beta1/zz_generated.conversion.go b/pkg/apis/abac/v1beta1/zz_generated.conversion.go index fe6204d2f59..f1eaf8a678b 100644 --- a/pkg/apis/abac/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/abac/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go b/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go index 05fbab899e9..3af42c83fcf 100644 --- a/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/abac/v1beta1/zz_generated.defaults.go b/pkg/apis/abac/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/abac/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/abac/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/abac/zz_generated.deepcopy.go b/pkg/apis/abac/zz_generated.deepcopy.go index dc41bc7cebb..791e9c2c6fd 100644 --- a/pkg/apis/abac/zz_generated.deepcopy.go +++ b/pkg/apis/abac/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admission/v1beta1/zz_generated.conversion.go b/pkg/apis/admission/v1beta1/zz_generated.conversion.go index 55dec435d60..c0c245859cc 100644 --- a/pkg/apis/admission/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admission/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admission/v1beta1/zz_generated.defaults.go b/pkg/apis/admission/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/admission/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/admission/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admission/zz_generated.deepcopy.go b/pkg/apis/admission/zz_generated.deepcopy.go index 3814ccc2920..84ce688d4b5 100644 --- a/pkg/apis/admission/zz_generated.deepcopy.go +++ b/pkg/apis/admission/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go b/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go index ef1d0fee81f..95f8559570a 100644 --- a/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go b/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go index ec0300f27a7..ad65e647291 100644 --- a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go b/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go index a88a86e6bf9..16620a79f1a 100644 --- a/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/admissionregistration/zz_generated.deepcopy.go b/pkg/apis/admissionregistration/zz_generated.deepcopy.go index 8bb1ab3fb6a..83562365c20 100644 --- a/pkg/apis/admissionregistration/zz_generated.deepcopy.go +++ b/pkg/apis/admissionregistration/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1/zz_generated.conversion.go b/pkg/apis/apps/v1/zz_generated.conversion.go index e7503b94787..4f1398c55f0 100644 --- a/pkg/apis/apps/v1/zz_generated.conversion.go +++ b/pkg/apis/apps/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1/zz_generated.defaults.go b/pkg/apis/apps/v1/zz_generated.defaults.go index d05eff3c5d6..8eb29e8d12b 100644 --- a/pkg/apis/apps/v1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 0bfb1e3f209..95bdc87c29d 100644 --- a/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/pkg/apis/apps/v1beta1/zz_generated.defaults.go index 47c74ab2c4a..b28d0de74fc 100644 --- a/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/pkg/apis/apps/v1beta2/zz_generated.conversion.go index cbe3ccaa91b..b253aede958 100644 --- a/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/v1beta2/zz_generated.defaults.go b/pkg/apis/apps/v1beta2/zz_generated.defaults.go index ddf7f6a169e..5d79d92d8b1 100644 --- a/pkg/apis/apps/v1beta2/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta2/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/apps/zz_generated.deepcopy.go b/pkg/apis/apps/zz_generated.deepcopy.go index 5e8a7474685..74fab2edd20 100644 --- a/pkg/apis/apps/zz_generated.deepcopy.go +++ b/pkg/apis/apps/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authentication/v1/zz_generated.conversion.go b/pkg/apis/authentication/v1/zz_generated.conversion.go index 9f335d61457..d98ef4a6fea 100644 --- a/pkg/apis/authentication/v1/zz_generated.conversion.go +++ b/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authentication/v1/zz_generated.defaults.go b/pkg/apis/authentication/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/pkg/apis/authentication/v1/zz_generated.defaults.go +++ b/pkg/apis/authentication/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/pkg/apis/authentication/v1beta1/zz_generated.conversion.go index 0fc0bf7ba79..c04aba53cbf 100644 --- a/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authentication/v1beta1/zz_generated.defaults.go b/pkg/apis/authentication/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/authentication/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/authentication/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authentication/zz_generated.deepcopy.go b/pkg/apis/authentication/zz_generated.deepcopy.go index 8f3ca0afe71..8a7904b19cd 100644 --- a/pkg/apis/authentication/zz_generated.deepcopy.go +++ b/pkg/apis/authentication/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authorization/v1/zz_generated.conversion.go b/pkg/apis/authorization/v1/zz_generated.conversion.go index 00caf7872a0..415a50ec84d 100644 --- a/pkg/apis/authorization/v1/zz_generated.conversion.go +++ b/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authorization/v1/zz_generated.defaults.go b/pkg/apis/authorization/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/pkg/apis/authorization/v1/zz_generated.defaults.go +++ b/pkg/apis/authorization/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/pkg/apis/authorization/v1beta1/zz_generated.conversion.go index 234b4ff0261..a6b08d93db5 100644 --- a/pkg/apis/authorization/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authorization/v1beta1/zz_generated.defaults.go b/pkg/apis/authorization/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/authorization/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/authorization/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/authorization/zz_generated.deepcopy.go b/pkg/apis/authorization/zz_generated.deepcopy.go index cdc23a2097c..b35902ecec8 100644 --- a/pkg/apis/authorization/zz_generated.deepcopy.go +++ b/pkg/apis/authorization/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/pkg/apis/autoscaling/v1/zz_generated.conversion.go index bb1bedd5208..6a1c65489f4 100644 --- a/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ b/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/autoscaling/v1/zz_generated.defaults.go b/pkg/apis/autoscaling/v1/zz_generated.defaults.go index 192e8f5011c..0cfceabce22 100644 --- a/pkg/apis/autoscaling/v1/zz_generated.defaults.go +++ b/pkg/apis/autoscaling/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go b/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go index 7b1f977df95..04e256fda7b 100644 --- a/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go +++ b/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/autoscaling/v2beta1/zz_generated.defaults.go b/pkg/apis/autoscaling/v2beta1/zz_generated.defaults.go index 5dfd8218dbe..42787931817 100644 --- a/pkg/apis/autoscaling/v2beta1/zz_generated.defaults.go +++ b/pkg/apis/autoscaling/v2beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/autoscaling/zz_generated.deepcopy.go b/pkg/apis/autoscaling/zz_generated.deepcopy.go index c0bccf3b90d..520fa8a577d 100644 --- a/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ b/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v1/zz_generated.conversion.go b/pkg/apis/batch/v1/zz_generated.conversion.go index dc346c5274d..e9e6f9e20e4 100644 --- a/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v1/zz_generated.defaults.go b/pkg/apis/batch/v1/zz_generated.defaults.go index 976735a1a4e..8140c28fa60 100644 --- a/pkg/apis/batch/v1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v1beta1/zz_generated.conversion.go b/pkg/apis/batch/v1beta1/zz_generated.conversion.go index b977a5e7419..677dad829cf 100644 --- a/pkg/apis/batch/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v1beta1/zz_generated.defaults.go b/pkg/apis/batch/v1beta1/zz_generated.defaults.go index e809e80967c..938bb14eb47 100644 --- a/pkg/apis/batch/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/pkg/apis/batch/v2alpha1/zz_generated.conversion.go index 5f6896debc9..119b705ccc0 100644 --- a/pkg/apis/batch/v2alpha1/zz_generated.conversion.go +++ b/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go index 8983fcddade..427ba5cff0f 100644 --- a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/batch/zz_generated.deepcopy.go b/pkg/apis/batch/zz_generated.deepcopy.go index 4e68dbd6d6d..27a03db2724 100644 --- a/pkg/apis/batch/zz_generated.deepcopy.go +++ b/pkg/apis/batch/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/pkg/apis/certificates/v1beta1/zz_generated.conversion.go index 96780e6730d..8379477bf36 100644 --- a/pkg/apis/certificates/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/certificates/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/certificates/v1beta1/zz_generated.defaults.go b/pkg/apis/certificates/v1beta1/zz_generated.defaults.go index 155ea36921d..edab232d23c 100644 --- a/pkg/apis/certificates/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/certificates/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/certificates/zz_generated.deepcopy.go b/pkg/apis/certificates/zz_generated.deepcopy.go index 5704a4e6c62..4b3e1d556da 100644 --- a/pkg/apis/certificates/zz_generated.deepcopy.go +++ b/pkg/apis/certificates/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 296fa21d042..1b78cf1c2a3 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 9e6a3b36d19..be2509123a4 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go index 746d4d98026..96685c35ba4 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/componentconfig/zz_generated.deepcopy.go b/pkg/apis/componentconfig/zz_generated.deepcopy.go index 3722020a327..31fad659dfc 100644 --- a/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 84a602bc465..34fb3a5ad80 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/core/v1/zz_generated.defaults.go b/pkg/apis/core/v1/zz_generated.defaults.go index c1ad46cd38a..468969442e0 100644 --- a/pkg/apis/core/v1/zz_generated.defaults.go +++ b/pkg/apis/core/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index c2b4e7b6a52..22913309a3e 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/events/v1beta1/zz_generated.conversion.go b/pkg/apis/events/v1beta1/zz_generated.conversion.go index 345e3e96041..c588a11747a 100644 --- a/pkg/apis/events/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/events/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/events/v1beta1/zz_generated.defaults.go b/pkg/apis/events/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/events/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/events/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 320ad17fb63..14a4f09993b 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go index f22282f85f8..6eb0bbedc54 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/extensions/zz_generated.deepcopy.go b/pkg/apis/extensions/zz_generated.deepcopy.go index ea801b3e8f5..852d4f09a9c 100644 --- a/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/pkg/apis/extensions/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go b/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go index 3cf6299140a..b38027736d9 100644 --- a/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/imagepolicy/v1alpha1/zz_generated.defaults.go b/pkg/apis/imagepolicy/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/pkg/apis/imagepolicy/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/imagepolicy/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/imagepolicy/zz_generated.deepcopy.go b/pkg/apis/imagepolicy/zz_generated.deepcopy.go index 985d6775372..790256b218f 100644 --- a/pkg/apis/imagepolicy/zz_generated.deepcopy.go +++ b/pkg/apis/imagepolicy/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/networking/v1/zz_generated.conversion.go b/pkg/apis/networking/v1/zz_generated.conversion.go index 0b1c29ddbf1..3a3b414b7a6 100644 --- a/pkg/apis/networking/v1/zz_generated.conversion.go +++ b/pkg/apis/networking/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/networking/v1/zz_generated.defaults.go b/pkg/apis/networking/v1/zz_generated.defaults.go index 0c7b575db98..5843b99ed54 100644 --- a/pkg/apis/networking/v1/zz_generated.defaults.go +++ b/pkg/apis/networking/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/networking/zz_generated.deepcopy.go b/pkg/apis/networking/zz_generated.deepcopy.go index aa5958efb1d..8ab8e33ec18 100644 --- a/pkg/apis/networking/zz_generated.deepcopy.go +++ b/pkg/apis/networking/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/pkg/apis/policy/v1beta1/zz_generated.conversion.go index aa07a7bd7d5..4e776634a2f 100644 --- a/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/policy/v1beta1/zz_generated.defaults.go b/pkg/apis/policy/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/pkg/apis/policy/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/policy/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/policy/zz_generated.deepcopy.go b/pkg/apis/policy/zz_generated.deepcopy.go index 564329c6019..539854524ca 100644 --- a/pkg/apis/policy/zz_generated.deepcopy.go +++ b/pkg/apis/policy/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1/zz_generated.conversion.go b/pkg/apis/rbac/v1/zz_generated.conversion.go index c46056a9263..e58d18435f4 100644 --- a/pkg/apis/rbac/v1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1/zz_generated.defaults.go b/pkg/apis/rbac/v1/zz_generated.defaults.go index 756d155b33b..32fae3fa9f0 100644 --- a/pkg/apis/rbac/v1/zz_generated.defaults.go +++ b/pkg/apis/rbac/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go index 6076e0d3da2..7d84a6ce5a6 100644 --- a/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go b/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go index 123ba49e915..ee2303e6554 100644 --- a/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/pkg/apis/rbac/v1beta1/zz_generated.conversion.go index d031b15fa34..952a4e4069b 100644 --- a/pkg/apis/rbac/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/v1beta1/zz_generated.defaults.go b/pkg/apis/rbac/v1beta1/zz_generated.defaults.go index 08ff0c71480..8486ab7a4bc 100644 --- a/pkg/apis/rbac/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/rbac/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/rbac/zz_generated.deepcopy.go b/pkg/apis/rbac/zz_generated.deepcopy.go index 8454f23086d..2d84b8bbeb6 100644 --- a/pkg/apis/rbac/zz_generated.deepcopy.go +++ b/pkg/apis/rbac/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go b/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go index 6a99b58a6e5..988c8679a34 100644 --- a/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/scheduling/v1alpha1/zz_generated.defaults.go b/pkg/apis/scheduling/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/pkg/apis/scheduling/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/scheduling/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/scheduling/zz_generated.deepcopy.go b/pkg/apis/scheduling/zz_generated.deepcopy.go index 7eb3fea0672..93b95da65d2 100644 --- a/pkg/apis/scheduling/zz_generated.deepcopy.go +++ b/pkg/apis/scheduling/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/pkg/apis/settings/v1alpha1/zz_generated.conversion.go index de7bb8672ff..f55bd43926c 100644 --- a/pkg/apis/settings/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/pkg/apis/settings/v1alpha1/zz_generated.defaults.go index af5867ce3a4..ef7e61652fa 100644 --- a/pkg/apis/settings/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/settings/zz_generated.deepcopy.go b/pkg/apis/settings/zz_generated.deepcopy.go index 742ebb50632..90ec26ed20c 100644 --- a/pkg/apis/settings/zz_generated.deepcopy.go +++ b/pkg/apis/settings/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1/zz_generated.conversion.go b/pkg/apis/storage/v1/zz_generated.conversion.go index 1e63af36e67..19e33a11ea6 100644 --- a/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1/zz_generated.defaults.go b/pkg/apis/storage/v1/zz_generated.defaults.go index 4db23e8cfba..3c2083f7f1f 100644 --- a/pkg/apis/storage/v1/zz_generated.defaults.go +++ b/pkg/apis/storage/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1alpha1/zz_generated.conversion.go b/pkg/apis/storage/v1alpha1/zz_generated.conversion.go index 9bea7b3ad91..652cc2f82f6 100644 --- a/pkg/apis/storage/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1alpha1/zz_generated.defaults.go b/pkg/apis/storage/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/pkg/apis/storage/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/storage/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/pkg/apis/storage/v1beta1/zz_generated.conversion.go index 9637b2ea494..1338df136b5 100644 --- a/pkg/apis/storage/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/v1beta1/zz_generated.defaults.go b/pkg/apis/storage/v1beta1/zz_generated.defaults.go index 1200af6b071..07847aebdfd 100644 --- a/pkg/apis/storage/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/storage/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/storage/zz_generated.deepcopy.go b/pkg/apis/storage/zz_generated.deepcopy.go index 287623fd426..61ced04a4c4 100644 --- a/pkg/apis/storage/zz_generated.deepcopy.go +++ b/pkg/apis/storage/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/clientset.go b/pkg/client/clientset_generated/internalclientset/clientset.go index 8c3b8a3cb62..77314b2e714 100644 --- a/pkg/client/clientset_generated/internalclientset/clientset.go +++ b/pkg/client/clientset_generated/internalclientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/doc.go b/pkg/client/clientset_generated/internalclientset/doc.go index b667dd5157a..4ede718dee1 100644 --- a/pkg/client/clientset_generated/internalclientset/doc.go +++ b/pkg/client/clientset_generated/internalclientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index 28efaac5a73..65bf12a7c04 100644 --- a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/fake/doc.go b/pkg/client/clientset_generated/internalclientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/pkg/client/clientset_generated/internalclientset/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/fake/register.go b/pkg/client/clientset_generated/internalclientset/fake/register.go index fe95ca73dd0..b5d85f76975 100644 --- a/pkg/client/clientset_generated/internalclientset/fake/register.go +++ b/pkg/client/clientset_generated/internalclientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/scheme/doc.go b/pkg/client/clientset_generated/internalclientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/pkg/client/clientset_generated/internalclientset/scheme/doc.go +++ b/pkg/client/clientset_generated/internalclientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/scheme/register.go b/pkg/client/clientset_generated/internalclientset/scheme/register.go index e9ef85e38b3..3bbf1846c60 100644 --- a/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go index 319d5ebd994..b2a30fa0d5c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_admissionregistration_client.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_admissionregistration_client.go index 7e1a6978b85..097363daad6 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_admissionregistration_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_initializerconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_initializerconfiguration.go index 5d0d153491a..b69df0f6eb7 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_initializerconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_mutatingwebhookconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_mutatingwebhookconfiguration.go index 3660ccfbac8..ac62029d39b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_mutatingwebhookconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_validatingwebhookconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_validatingwebhookconfiguration.go index 4056e54b75d..a78bbfdd25c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_validatingwebhookconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake/fake_validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go index d41780a7a66..06f4a44cf86 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go index 365bfdb4344..45e63241186 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go index a58d0fd1cdd..06f3a5f0b38 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go index 090cd6f6746..9a57c9a8174 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ b/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go index 718e82c478c..184ebd38968 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go index 1cec65d626d..120a56cfd36 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go index e677f9d283a..52151df20fa 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_controllerrevision.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_controllerrevision.go index f3ea25bb53d..9ca9e8cab23 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_controllerrevision.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go index 680142275d4..612832c9395 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake/fake_statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go index 3b3f37336a7..7fd67b1f712 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go index ae64cc00881..cb496b53a89 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go index 709621a10da..68c3efadc47 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go index ec72845a061..d92fa512a4f 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go index f88556a649b..1854aed7e88 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake/fake_tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go index b0f76eeede5..36261d29dad 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go index c0880edb874..16cbd1aaf6d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go index a5aeb4831ec..3da3b1f5277 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go index 65689a76490..d5be9aba1da 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go index ddc53b8dd6c..e7f649e8d4c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go index 7af01048ec7..4f5f5269277 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectrulesreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectrulesreview.go index 59841af9dbd..4c1f7f02858 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectrulesreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go index e0a92c737a8..e799e0e7ee1 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake/fake_subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go index b0f76eeede5..36261d29dad 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go index bf02fad768f..5a2916aa82b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go index cb19357c487..dd3120e199c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go index de80117b57b..89015541d51 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go index 456b469b196..67a2d9304ef 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go +++ b/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go index e0752ab5ff5..4f7d5ab377c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go index a6098274bcd..1da546a7d0f 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go index 519a50971e7..81afddd06d3 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake/fake_horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go index bf7296c51a1..413f0db67f9 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go index 4fb58f612fb..9657f0408b1 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go index 97be023a7ba..152ab445681 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go index fb0737ef811..12ef15132bb 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go index 34e3f16e40d..770887f60f6 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go index 8da7ded25db..6b91892c1ee 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go index 6e10cfd1384..154e2932932 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake/fake_job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go index 28a62926cd0..9fbcb660708 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go index 8098046f4f5..a1a97718d51 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go +++ b/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go index 53e6142d0c0..4ccb172fa74 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go index 0dcbe23f8b2..51232f9f5c6 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go index e38333579cf..f3a423f9609 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go index ad1379ddb39..d02bc624b74 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake/fake_certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go index b0f76eeede5..36261d29dad 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go index 66b4b464ffe..290ee6ebba6 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go index 7f5adbc4498..84f39b2ffb0 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go index 10313c3cb62..593b8872abf 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go index 8a35c807a06..4f1a2b76776 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go index cad5b84a3c9..0d7e427a295 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go index 48151941c76..d6171dce0d4 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go index 94b5e59b932..798efa92308 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go index 511cb36dbd1..6575e13da10 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go index bfe1ee92487..4918baee871 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go index 52dfe166742..04240b59f92 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go index 89287b620ba..75c2516dd53 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go index 7c5e8353ea1..c58b99c6c67 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go index 4fdff82f6cd..fa049258100 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go index a91ff70b31e..046e83d6843 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go index db5659ea95a..141cca4bbfe 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go index 2d4536ac666..7af6f9dc8ae 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go index 4da291103f2..6de3800bb65 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go index 9e14923e84d..8d0eb00074e 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go index b73040db664..3db010b7651 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go index 5920dba25b9..13a47e4e748 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go index 4c15e2e2fcf..8bc747d7465 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go index 1776b4ed671..d71ad3e4816 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake/fake_serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go index 6591a3af0da..bba3e73d108 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go index de59dda9981..41ab70187b6 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go index f4ae5388303..9e2e54b1ffe 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go index b5c92087bf9..a315e0f5c68 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go index 9f8921c4811..7ea769f1a0d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go index d8e2e5a3e9b..8c94f646554 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go index 7c855532909..e7f6a4099bd 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go index 49723c12070..0919e64081d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go index 5c787f7764c..9302f18f51a 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go index 5c5c3f13646..20e3ed8e9bd 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go index 36a1a16e0a9..478430f51f8 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go index 6a9cfc0cb67..239feca1f43 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go index 75f0191f6fa..d405955d172 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go +++ b/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go index e2181c74f48..dcadcff69ea 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/fake_events_client.go b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/fake_events_client.go index 19fd2366b76..0bcfe38d006 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/fake_events_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake/fake_events_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go index b0f76eeede5..36261d29dad 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go index c97e59d31a1..567e2d8d446 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go index 919ab32e2ae..4078b837dd1 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go index 1a26d5d3028..48f3fef1ea8 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go index c2c4de2516b..9605d275d98 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go index 73d8730c16f..69dc3ae4e8a 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go index 1032452a79e..144418c738b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go index f6293c03943..07ad3f67a3b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go index 1e75456f141..c58bbf27a4b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go index 3a5a59f1160..588d06e2ea4 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake/fake_replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go index cf6ee02f033..de02b2c1a4b 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go index 1b7babfe88d..6a7fb410b2c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go index 2dc679c35c3..14995cd4a7e 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go index 5da973b7168..231826f1ac7 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networking_client.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networking_client.go index 9c0941a3108..6be9641fa81 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networking_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networking_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networkpolicy.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networkpolicy.go index 5585a50e3fb..357ebb82f02 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networkpolicy.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake/fake_networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go index c2c8709d691..343fed42cb4 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go index 4469af17165..53a927adf41 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networking_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go index 23cf7bcf649..e6aa62cc13e 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go +++ b/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go index a43dabbb19d..42760568b35 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go index 52abe617c5d..1d699bd1959 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go index e6e411e7ddc..d8d34ed6f4d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go index 75012322141..6f8f5901af1 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go index d323ea6c626..a5d8910e60e 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go index 232a5508209..2ae24ea5620 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go index 04c7b36e352..86eebbdf86e 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go index 245d36d9309..2d9c34ffe07 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go index cdad43a63a3..103133da3af 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go index cf1cea4f220..3fe3b2c16a3 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go index 6dfbde29a6e..819dd653a28 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go index ab1f8b30a86..e6431ed0f6f 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go index 6f67a77766c..8361b08a877 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go index f92aafdd2bd..4bbf33bbea5 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go index 749e7d2ca82..35e44fd40cd 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go index 6e5a608cf32..c1a5bce5691 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go index c94637bbd86..46f2d54922c 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go index 1248a99f7ce..6fff7a16405 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go +++ b/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_priorityclass.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_priorityclass.go index b39c6994de8..fe59ea5a445 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_priorityclass.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_scheduling_client.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_scheduling_client.go index 0452e4f7a2f..6618b268725 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_scheduling_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/fake/fake_scheduling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go index 83f1102b3d6..8a32bee4ef4 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go index 20014824544..094a77c48e2 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go index c17b733f894..5500c19a22a 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/scheduling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_podpreset.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_podpreset.go index 76d13d6a997..ef3bd6d2740 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_podpreset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_settings_client.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_settings_client.go index 04776e59dda..cefb9e443cc 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_settings_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake/fake_settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go index b82b5f75184..5cc4c47d171 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go index c2b4220a592..c230b097417 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go index 448280d54b4..797355725d4 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go index 0dce78b5a63..3b64d29d68f 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go index f076b9ef1d0..4c52ec24faa 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_volumeattachment.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_volumeattachment.go index 68c421d88ac..0218d1dd92f 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_volumeattachment.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake/fake_volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go index a1b2c0f5b84..17366dbd688 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go index 2eca72b8f53..b482df23b10 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go index a23b7b0082e..704036ad7bd 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go index da7fb9e246a..b3dd6f6b5cc 100644 --- a/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go +++ b/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go index 30943e156ee..1a2aad20530 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go index bc31ce10214..68466616583 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go index 0d7bec0173c..4afa5cd9d1c 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go index 409f36b965d..075caa32531 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go index 3242e317f50..f513b63487d 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/apps/interface.go b/pkg/client/informers/informers_generated/internalversion/apps/interface.go index 7467b3eec09..a847b158afd 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go index 27407a12d6b..e03fe17f6e3 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go index 3ceeae009b1..1c75f780edb 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go index 055612f8d52..6e3bb29b802 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go b/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go index 00e9e7f0c9a..930736e5bc7 100644 --- a/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go index 4ba40676aae..7232de82217 100644 --- a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go index 3ac44fa6304..37690fd99a3 100644 --- a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/batch/interface.go b/pkg/client/informers/informers_generated/internalversion/batch/interface.go index ec3bef48d40..afb31334d9f 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go index eeff85dadc0..1f234cfb80c 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go index ca29cc526b1..7064bd64c77 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go index 1430f5b128c..023ca84faaf 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/certificates/interface.go b/pkg/client/informers/informers_generated/internalversion/certificates/interface.go index 881fc209e75..8aa1b6eb878 100644 --- a/pkg/client/informers/informers_generated/internalversion/certificates/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/certificates/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go index d4608c9e3a1..9ec66c83e01 100644 --- a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go +++ b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go index d017cfceae1..b319d89c87c 100644 --- a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/interface.go b/pkg/client/informers/informers_generated/internalversion/core/interface.go index 12dab3466f3..5be46b908f7 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/core/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go index 805e45bb53d..ec94828e3b7 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go index f96cdf8d9d3..1681a23540a 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go index f45d518174e..20a9319bf00 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go index 936b6018664..0a3d085ac29 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go index 295600cb5a9..48a4e4a25a6 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go index e167da6e37b..37181403c94 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go index da9f88eaa8c..8d321e22264 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go index 5a112bf8986..55ea586661e 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go index ecccd769ff7..90c539dc293 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go index bc0c55a12c2..57ceee071e3 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go index cf72a57fcd4..53029da5d25 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go index 536fafd63f8..d7b3551d550 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go index 4741b32b41c..defb354ec7a 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go index 2a50f2c498b..ea1f1fa026d 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go index 35c4d2067d1..41bb46aaa92 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go index 086529166d6..55bc3328d3b 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go index 54d4509c33b..aff62b4bc5c 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/interface.go b/pkg/client/informers/informers_generated/internalversion/extensions/interface.go index b909a563ff3..55b3aa788b7 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go index 21479d96ee9..ac77b8d34be 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go index 6b1044a465d..36b8eda664a 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go index f4024449441..41ca2e32821 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go index 2c5f3212d42..7d5d8573584 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go index c1015ae1018..dd4254f7337 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go index 7a4b889f6d0..5ec97fc57d1 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/factory.go b/pkg/client/informers/informers_generated/internalversion/factory.go index e66407f88cc..8d006ad7d8a 100644 --- a/pkg/client/informers/informers_generated/internalversion/factory.go +++ b/pkg/client/informers/informers_generated/internalversion/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/generic.go b/pkg/client/informers/informers_generated/internalversion/generic.go index 1de5711673f..ee1c51ce8ec 100644 --- a/pkg/client/informers/informers_generated/internalversion/generic.go +++ b/pkg/client/informers/informers_generated/internalversion/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go b/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go index 9c7a9190588..15fe3e9964a 100644 --- a/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/networking/interface.go b/pkg/client/informers/informers_generated/internalversion/networking/interface.go index e6b77621297..bf6917bb85d 100644 --- a/pkg/client/informers/informers_generated/internalversion/networking/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/networking/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go index 8a87627750b..48b5b4852cc 100644 --- a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go index 0b094a7ce0f..2e9a688a79b 100644 --- a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go +++ b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/policy/interface.go b/pkg/client/informers/informers_generated/internalversion/policy/interface.go index 2860283749b..93b04cf43f3 100644 --- a/pkg/client/informers/informers_generated/internalversion/policy/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/policy/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go index 74773e72e39..38239a69eb2 100644 --- a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go index 431d5898b4a..01c5b7be820 100644 --- a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go +++ b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/interface.go b/pkg/client/informers/informers_generated/internalversion/rbac/interface.go index bcd51670144..4d6cec593f4 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go index d723beeeb4f..42b69e1377a 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go index 07b15deaadf..7ba4297f2a7 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go index ac8b2040c0f..920ebda64dc 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go index 7a736633d06..f09a5a230be 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go index 6b0560953e7..5382f2d130e 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go b/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go index 54b493e2372..e9f36dc1e02 100644 --- a/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go index 37dd2d17d94..4e856b0b671 100644 --- a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go index 68600f23b5b..addf24cd4aa 100644 --- a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go +++ b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/settings/interface.go b/pkg/client/informers/informers_generated/internalversion/settings/interface.go index 6683fbdf54b..7e3ce2132c9 100644 --- a/pkg/client/informers/informers_generated/internalversion/settings/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/settings/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go index 679c8a2d563..b518c14f2f4 100644 --- a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go index 5ecfe1e6d0b..93a1dbe2bd8 100644 --- a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go +++ b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/storage/interface.go b/pkg/client/informers/informers_generated/internalversion/storage/interface.go index 01c55b1b6af..2bffda1951c 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go index cea995f8cdc..71960e41431 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go index 01e93262789..6c0d43ea527 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go index 54d411f4322..62a4bca5993 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go b/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go index 366db3d7745..c8cb4b85ea4 100644 --- a/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go +++ b/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go b/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go index 0a67f7d8e8b..34641e94dcb 100644 --- a/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go +++ b/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go index 31fcbdba72d..0f69668f5bb 100644 --- a/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ b/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go b/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go index 9b4693584f8..e17c0bec989 100644 --- a/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ b/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/apps/internalversion/controllerrevision.go b/pkg/client/listers/apps/internalversion/controllerrevision.go index 89efbf8c8cb..9d7be2425ae 100644 --- a/pkg/client/listers/apps/internalversion/controllerrevision.go +++ b/pkg/client/listers/apps/internalversion/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/apps/internalversion/expansion_generated.go b/pkg/client/listers/apps/internalversion/expansion_generated.go index 9aa9ff54cfd..caae093af0d 100644 --- a/pkg/client/listers/apps/internalversion/expansion_generated.go +++ b/pkg/client/listers/apps/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/apps/internalversion/statefulset.go b/pkg/client/listers/apps/internalversion/statefulset.go index 0548d14b0ed..b43156f59e3 100644 --- a/pkg/client/listers/apps/internalversion/statefulset.go +++ b/pkg/client/listers/apps/internalversion/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authentication/internalversion/expansion_generated.go b/pkg/client/listers/authentication/internalversion/expansion_generated.go index 18211cd78de..bf33d4c5df4 100644 --- a/pkg/client/listers/authentication/internalversion/expansion_generated.go +++ b/pkg/client/listers/authentication/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authentication/internalversion/tokenreview.go b/pkg/client/listers/authentication/internalversion/tokenreview.go index ca878ccd343..312161ba729 100644 --- a/pkg/client/listers/authentication/internalversion/tokenreview.go +++ b/pkg/client/listers/authentication/internalversion/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authorization/internalversion/expansion_generated.go b/pkg/client/listers/authorization/internalversion/expansion_generated.go index 7715a480aec..985fe34a01e 100644 --- a/pkg/client/listers/authorization/internalversion/expansion_generated.go +++ b/pkg/client/listers/authorization/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authorization/internalversion/localsubjectaccessreview.go b/pkg/client/listers/authorization/internalversion/localsubjectaccessreview.go index 41e780e80e3..dbf71c0faa3 100644 --- a/pkg/client/listers/authorization/internalversion/localsubjectaccessreview.go +++ b/pkg/client/listers/authorization/internalversion/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authorization/internalversion/selfsubjectaccessreview.go b/pkg/client/listers/authorization/internalversion/selfsubjectaccessreview.go index b883ef6035e..d6fc6ec032d 100644 --- a/pkg/client/listers/authorization/internalversion/selfsubjectaccessreview.go +++ b/pkg/client/listers/authorization/internalversion/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authorization/internalversion/selfsubjectrulesreview.go b/pkg/client/listers/authorization/internalversion/selfsubjectrulesreview.go index 9efca6cbf81..e34695af6fa 100644 --- a/pkg/client/listers/authorization/internalversion/selfsubjectrulesreview.go +++ b/pkg/client/listers/authorization/internalversion/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/authorization/internalversion/subjectaccessreview.go b/pkg/client/listers/authorization/internalversion/subjectaccessreview.go index d6980c8d69d..f8ef9460cee 100644 --- a/pkg/client/listers/authorization/internalversion/subjectaccessreview.go +++ b/pkg/client/listers/authorization/internalversion/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/autoscaling/internalversion/expansion_generated.go b/pkg/client/listers/autoscaling/internalversion/expansion_generated.go index 5f8b6b6d5ba..c1756faa9eb 100644 --- a/pkg/client/listers/autoscaling/internalversion/expansion_generated.go +++ b/pkg/client/listers/autoscaling/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/autoscaling/internalversion/horizontalpodautoscaler.go b/pkg/client/listers/autoscaling/internalversion/horizontalpodautoscaler.go index 280424c881b..69269d12fc4 100644 --- a/pkg/client/listers/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/pkg/client/listers/autoscaling/internalversion/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/batch/internalversion/cronjob.go b/pkg/client/listers/batch/internalversion/cronjob.go index 8e5dcb8982b..3df69bf9932 100644 --- a/pkg/client/listers/batch/internalversion/cronjob.go +++ b/pkg/client/listers/batch/internalversion/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/batch/internalversion/expansion_generated.go b/pkg/client/listers/batch/internalversion/expansion_generated.go index 5499876f60b..5334bff9279 100644 --- a/pkg/client/listers/batch/internalversion/expansion_generated.go +++ b/pkg/client/listers/batch/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/batch/internalversion/job.go b/pkg/client/listers/batch/internalversion/job.go index e02319336bb..139f794f23b 100644 --- a/pkg/client/listers/batch/internalversion/job.go +++ b/pkg/client/listers/batch/internalversion/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go b/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go index 4ded13d7a60..6b3d96bb6b0 100644 --- a/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go +++ b/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/certificates/internalversion/expansion_generated.go b/pkg/client/listers/certificates/internalversion/expansion_generated.go index c13b24a7846..9d8c9b52e85 100644 --- a/pkg/client/listers/certificates/internalversion/expansion_generated.go +++ b/pkg/client/listers/certificates/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/componentstatus.go b/pkg/client/listers/core/internalversion/componentstatus.go index 0be8bc557de..34672845411 100644 --- a/pkg/client/listers/core/internalversion/componentstatus.go +++ b/pkg/client/listers/core/internalversion/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/configmap.go b/pkg/client/listers/core/internalversion/configmap.go index 539a9751d40..e3cb54f6896 100644 --- a/pkg/client/listers/core/internalversion/configmap.go +++ b/pkg/client/listers/core/internalversion/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/endpoints.go b/pkg/client/listers/core/internalversion/endpoints.go index a65c98b0bb0..181f2ef52de 100644 --- a/pkg/client/listers/core/internalversion/endpoints.go +++ b/pkg/client/listers/core/internalversion/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/event.go b/pkg/client/listers/core/internalversion/event.go index 2c04cd1ddd0..0f029d19744 100644 --- a/pkg/client/listers/core/internalversion/event.go +++ b/pkg/client/listers/core/internalversion/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/expansion_generated.go b/pkg/client/listers/core/internalversion/expansion_generated.go index d184d4aa34f..506f5a84ab7 100644 --- a/pkg/client/listers/core/internalversion/expansion_generated.go +++ b/pkg/client/listers/core/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/limitrange.go b/pkg/client/listers/core/internalversion/limitrange.go index 92bcf032e0d..caf17946c6a 100644 --- a/pkg/client/listers/core/internalversion/limitrange.go +++ b/pkg/client/listers/core/internalversion/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/namespace.go b/pkg/client/listers/core/internalversion/namespace.go index a429b48a616..db1b724bf5c 100644 --- a/pkg/client/listers/core/internalversion/namespace.go +++ b/pkg/client/listers/core/internalversion/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/node.go b/pkg/client/listers/core/internalversion/node.go index e722b0fe65b..3e870fa437b 100644 --- a/pkg/client/listers/core/internalversion/node.go +++ b/pkg/client/listers/core/internalversion/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/persistentvolume.go b/pkg/client/listers/core/internalversion/persistentvolume.go index e5b0f7fff07..a4acc918165 100644 --- a/pkg/client/listers/core/internalversion/persistentvolume.go +++ b/pkg/client/listers/core/internalversion/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/persistentvolumeclaim.go b/pkg/client/listers/core/internalversion/persistentvolumeclaim.go index bfd54a63f82..0e844e2a6e7 100644 --- a/pkg/client/listers/core/internalversion/persistentvolumeclaim.go +++ b/pkg/client/listers/core/internalversion/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/pod.go b/pkg/client/listers/core/internalversion/pod.go index c89ae6f2084..50c423cc761 100644 --- a/pkg/client/listers/core/internalversion/pod.go +++ b/pkg/client/listers/core/internalversion/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/podtemplate.go b/pkg/client/listers/core/internalversion/podtemplate.go index ec834331435..b9107bae96e 100644 --- a/pkg/client/listers/core/internalversion/podtemplate.go +++ b/pkg/client/listers/core/internalversion/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/replicationcontroller.go b/pkg/client/listers/core/internalversion/replicationcontroller.go index 77cf24677f0..acd50236613 100644 --- a/pkg/client/listers/core/internalversion/replicationcontroller.go +++ b/pkg/client/listers/core/internalversion/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/resourcequota.go b/pkg/client/listers/core/internalversion/resourcequota.go index 2b695099bed..af9f1af09cf 100644 --- a/pkg/client/listers/core/internalversion/resourcequota.go +++ b/pkg/client/listers/core/internalversion/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/secret.go b/pkg/client/listers/core/internalversion/secret.go index e5585d9e0d2..18cd65735d0 100644 --- a/pkg/client/listers/core/internalversion/secret.go +++ b/pkg/client/listers/core/internalversion/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/service.go b/pkg/client/listers/core/internalversion/service.go index 34f3d7747ff..a2d0644af48 100644 --- a/pkg/client/listers/core/internalversion/service.go +++ b/pkg/client/listers/core/internalversion/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/core/internalversion/serviceaccount.go b/pkg/client/listers/core/internalversion/serviceaccount.go index 59a0e8aef7b..78e8e96e42b 100644 --- a/pkg/client/listers/core/internalversion/serviceaccount.go +++ b/pkg/client/listers/core/internalversion/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/daemonset.go b/pkg/client/listers/extensions/internalversion/daemonset.go index a9716d75f3b..36f6b1e3918 100644 --- a/pkg/client/listers/extensions/internalversion/daemonset.go +++ b/pkg/client/listers/extensions/internalversion/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/deployment.go b/pkg/client/listers/extensions/internalversion/deployment.go index b393add6882..da8b8efd0c0 100644 --- a/pkg/client/listers/extensions/internalversion/deployment.go +++ b/pkg/client/listers/extensions/internalversion/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/expansion_generated.go b/pkg/client/listers/extensions/internalversion/expansion_generated.go index 1872c99f048..6b63294a7e1 100644 --- a/pkg/client/listers/extensions/internalversion/expansion_generated.go +++ b/pkg/client/listers/extensions/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/ingress.go b/pkg/client/listers/extensions/internalversion/ingress.go index 8e295804a11..79892903afc 100644 --- a/pkg/client/listers/extensions/internalversion/ingress.go +++ b/pkg/client/listers/extensions/internalversion/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go b/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go index f57b903989e..8db690522cd 100644 --- a/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go +++ b/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/extensions/internalversion/replicaset.go b/pkg/client/listers/extensions/internalversion/replicaset.go index 03d53d77c1e..a6d0f4c26e9 100644 --- a/pkg/client/listers/extensions/internalversion/replicaset.go +++ b/pkg/client/listers/extensions/internalversion/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/imagepolicy/internalversion/expansion_generated.go b/pkg/client/listers/imagepolicy/internalversion/expansion_generated.go index 1beaad497da..42ff47094a6 100644 --- a/pkg/client/listers/imagepolicy/internalversion/expansion_generated.go +++ b/pkg/client/listers/imagepolicy/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/imagepolicy/internalversion/imagereview.go b/pkg/client/listers/imagepolicy/internalversion/imagereview.go index 68b75375902..e51f20c8de7 100644 --- a/pkg/client/listers/imagepolicy/internalversion/imagereview.go +++ b/pkg/client/listers/imagepolicy/internalversion/imagereview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/networking/internalversion/expansion_generated.go b/pkg/client/listers/networking/internalversion/expansion_generated.go index 085ac329ce1..55b40392ba6 100644 --- a/pkg/client/listers/networking/internalversion/expansion_generated.go +++ b/pkg/client/listers/networking/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/networking/internalversion/networkpolicy.go b/pkg/client/listers/networking/internalversion/networkpolicy.go index 3b7a618e398..c1abebedc18 100644 --- a/pkg/client/listers/networking/internalversion/networkpolicy.go +++ b/pkg/client/listers/networking/internalversion/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/policy/internalversion/eviction.go b/pkg/client/listers/policy/internalversion/eviction.go index 38e15234a7e..743271fda85 100644 --- a/pkg/client/listers/policy/internalversion/eviction.go +++ b/pkg/client/listers/policy/internalversion/eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/policy/internalversion/expansion_generated.go b/pkg/client/listers/policy/internalversion/expansion_generated.go index 65e4ddd42b0..7591e3eb6bd 100644 --- a/pkg/client/listers/policy/internalversion/expansion_generated.go +++ b/pkg/client/listers/policy/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/policy/internalversion/poddisruptionbudget.go b/pkg/client/listers/policy/internalversion/poddisruptionbudget.go index b1828e57be7..9ea20e4f728 100644 --- a/pkg/client/listers/policy/internalversion/poddisruptionbudget.go +++ b/pkg/client/listers/policy/internalversion/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/rbac/internalversion/clusterrole.go b/pkg/client/listers/rbac/internalversion/clusterrole.go index ae4089520d3..c18f546052b 100644 --- a/pkg/client/listers/rbac/internalversion/clusterrole.go +++ b/pkg/client/listers/rbac/internalversion/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/rbac/internalversion/clusterrolebinding.go b/pkg/client/listers/rbac/internalversion/clusterrolebinding.go index 258c41768e1..53e6ba1fd8a 100644 --- a/pkg/client/listers/rbac/internalversion/clusterrolebinding.go +++ b/pkg/client/listers/rbac/internalversion/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/rbac/internalversion/expansion_generated.go b/pkg/client/listers/rbac/internalversion/expansion_generated.go index 2e88685425f..a4ddf2437d9 100644 --- a/pkg/client/listers/rbac/internalversion/expansion_generated.go +++ b/pkg/client/listers/rbac/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/rbac/internalversion/role.go b/pkg/client/listers/rbac/internalversion/role.go index 8d4ee98b79a..2a1355214f5 100644 --- a/pkg/client/listers/rbac/internalversion/role.go +++ b/pkg/client/listers/rbac/internalversion/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/rbac/internalversion/rolebinding.go b/pkg/client/listers/rbac/internalversion/rolebinding.go index ad9a3d4785c..02392a00661 100644 --- a/pkg/client/listers/rbac/internalversion/rolebinding.go +++ b/pkg/client/listers/rbac/internalversion/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/scheduling/internalversion/expansion_generated.go b/pkg/client/listers/scheduling/internalversion/expansion_generated.go index a919cca03fd..e846b204bea 100644 --- a/pkg/client/listers/scheduling/internalversion/expansion_generated.go +++ b/pkg/client/listers/scheduling/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/scheduling/internalversion/priorityclass.go b/pkg/client/listers/scheduling/internalversion/priorityclass.go index 1692ba758dd..e2d510b1247 100644 --- a/pkg/client/listers/scheduling/internalversion/priorityclass.go +++ b/pkg/client/listers/scheduling/internalversion/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/settings/internalversion/expansion_generated.go b/pkg/client/listers/settings/internalversion/expansion_generated.go index c28f8d51958..169001b1738 100644 --- a/pkg/client/listers/settings/internalversion/expansion_generated.go +++ b/pkg/client/listers/settings/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/settings/internalversion/podpreset.go b/pkg/client/listers/settings/internalversion/podpreset.go index 024b599625b..afecfe1ddfe 100644 --- a/pkg/client/listers/settings/internalversion/podpreset.go +++ b/pkg/client/listers/settings/internalversion/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/storage/internalversion/expansion_generated.go b/pkg/client/listers/storage/internalversion/expansion_generated.go index e5a64c5d022..7b5bff6ba32 100644 --- a/pkg/client/listers/storage/internalversion/expansion_generated.go +++ b/pkg/client/listers/storage/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/storage/internalversion/storageclass.go b/pkg/client/listers/storage/internalversion/storageclass.go index b14973fe250..a1f2109bc01 100644 --- a/pkg/client/listers/storage/internalversion/storageclass.go +++ b/pkg/client/listers/storage/internalversion/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/storage/internalversion/volumeattachment.go b/pkg/client/listers/storage/internalversion/volumeattachment.go index 16fd281d032..c0d91fdbc90 100644 --- a/pkg/client/listers/storage/internalversion/volumeattachment.go +++ b/pkg/client/listers/storage/internalversion/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go b/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go index 9c766f26307..edc16e4f13d 100644 --- a/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go +++ b/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubectl/cmd/testing/zz_generated.deepcopy.go b/pkg/kubectl/cmd/testing/zz_generated.deepcopy.go index db572448711..f9b1342384a 100644 --- a/pkg/kubectl/cmd/testing/zz_generated.deepcopy.go +++ b/pkg/kubectl/cmd/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubectl/testing/zz_generated.deepcopy.go b/pkg/kubectl/testing/zz_generated.deepcopy.go index bfa6c1d7880..eb5bbb34af2 100644 --- a/pkg/kubectl/testing/zz_generated.deepcopy.go +++ b/pkg/kubectl/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go index f0b41f00a5d..9df3390c369 100644 --- a/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go +++ b/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go b/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go index 6a939acde52..c29447ccf97 100644 --- a/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go +++ b/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go index b4701bb564c..fd5d707b072 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go index e27e7bdd857..123cee3f73c 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.defaults.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.defaults.go index c23f8622ecb..739dadbd606 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.defaults.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go b/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go index 944e7b8985b..8a6b1a2a515 100644 --- a/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go index 1b3a91c5aaa..d91bac70ab2 100644 --- a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go index 44b2a69902f..65ba9af1ba5 100644 --- a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go index 843337ed89c..93d5f80fec4 100644 --- a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go +++ b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go b/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go index 7527116372a..989455bcd8e 100644 --- a/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go +++ b/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go b/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go index f6a288fdd9c..d165cb3b1ec 100644 --- a/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go +++ b/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go index b8da46acb6d..4f06087d785 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go index 3ed408c1a33..38307fe6132 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.defaults.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.defaults.go index 53f9cb92ef7..35985182f5c 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.defaults.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go index 006fcbc04ec..91ebbcca1b8 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go index d72f98f2fa5..7dfc7b0d993 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go index 98ab9907278..71857b1199a 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.defaults.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.defaults.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go index afe5bccec21..9f45f4afb3f 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go index 43b75e6b832..5406a11f501 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go index 130bdd7021c..9d36704613c 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.defaults.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.defaults.go index 53f9cb92ef7..35985182f5c 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.defaults.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go index 5dc53616ce7..4f8250a504a 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go b/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go index 0e23a656d0c..dab87e7e681 100644 --- a/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go +++ b/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugin/pkg/scheduler/api/zz_generated.deepcopy.go b/plugin/pkg/scheduler/api/zz_generated.deepcopy.go index df5e3e972e6..ad8c0c23743 100644 --- a/plugin/pkg/scheduler/api/zz_generated.deepcopy.go +++ b/plugin/pkg/scheduler/api/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go b/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go index 6f5edcb85da..f56a0f063c0 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/admission/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admission/v1beta1/generated.proto b/staging/src/k8s.io/api/admission/v1beta1/generated.proto index 159325f37d1..a4e4ca266e7 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/admission/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go index ad9cb851b8f..275a326d2fa 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index bdc26637f68..f5d4941d367 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.proto index 82508ca60ef..11de02ff405 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 850de37c487..667b262ab6d 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 392ebf99504..51e5762a871 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto index 513a3d167ee..cb1270ea47d 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index fbd9ad32901..fb0cf9c477f 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1/generated.pb.go b/staging/src/k8s.io/api/apps/v1/generated.pb.go index 38e7415b726..02123859bf7 100644 --- a/staging/src/k8s.io/api/apps/v1/generated.pb.go +++ b/staging/src/k8s.io/api/apps/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1/generated.proto b/staging/src/k8s.io/api/apps/v1/generated.proto index c0499d3258c..184c868e389 100644 --- a/staging/src/k8s.io/api/apps/v1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/apps/v1/zz_generated.deepcopy.go index c41db298155..9419e8e72eb 100644 --- a/staging/src/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta1/generated.pb.go b/staging/src/k8s.io/api/apps/v1beta1/generated.pb.go index baee7a9750a..04183fc2db8 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/apps/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta1/generated.proto b/staging/src/k8s.io/api/apps/v1beta1/generated.proto index 68397a026ba..da160922d7b 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index cad744ce0bb..d83e9d6fe5a 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go b/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go index 2572ab08136..cfd422edbbe 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/staging/src/k8s.io/api/apps/v1beta2/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.proto b/staging/src/k8s.io/api/apps/v1beta2/generated.proto index d8fac403ac8..4a8b28c150c 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/generated.proto +++ b/staging/src/k8s.io/api/apps/v1beta2/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index d25c869bb7f..f2d847ed597 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1/generated.pb.go b/staging/src/k8s.io/api/authentication/v1/generated.pb.go index e7367893981..2e66666eb01 100644 --- a/staging/src/k8s.io/api/authentication/v1/generated.pb.go +++ b/staging/src/k8s.io/api/authentication/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1/generated.proto b/staging/src/k8s.io/api/authentication/v1/generated.proto index fb7888b6323..ea7b3b2885a 100644 --- a/staging/src/k8s.io/api/authentication/v1/generated.proto +++ b/staging/src/k8s.io/api/authentication/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index 243de7590fc..f9b32192c3d 100644 --- a/staging/src/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1beta1/generated.pb.go b/staging/src/k8s.io/api/authentication/v1beta1/generated.pb.go index da085dd20fd..86e362b87b7 100644 --- a/staging/src/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1beta1/generated.proto b/staging/src/k8s.io/api/authentication/v1beta1/generated.proto index 300e53488a1..3d0abd15dac 100644 --- a/staging/src/k8s.io/api/authentication/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/authentication/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index aa8d2ef3adb..65aabe7c422 100644 --- a/staging/src/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1/generated.pb.go b/staging/src/k8s.io/api/authorization/v1/generated.pb.go index d34d105f4c4..bdb606c0bf0 100644 --- a/staging/src/k8s.io/api/authorization/v1/generated.pb.go +++ b/staging/src/k8s.io/api/authorization/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1/generated.proto b/staging/src/k8s.io/api/authorization/v1/generated.proto index 28c8d660365..2cd4af0c077 100644 --- a/staging/src/k8s.io/api/authorization/v1/generated.proto +++ b/staging/src/k8s.io/api/authorization/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index a415b2b1d62..06a78643fce 100644 --- a/staging/src/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1beta1/generated.pb.go b/staging/src/k8s.io/api/authorization/v1beta1/generated.pb.go index 9b1ad299e2a..a9a8116b3af 100644 --- a/staging/src/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1beta1/generated.proto b/staging/src/k8s.io/api/authorization/v1beta1/generated.proto index 59df0b6d44f..b64c0642cbd 100644 --- a/staging/src/k8s.io/api/authorization/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/authorization/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index fcb0067a381..fed07fbb216 100644 --- a/staging/src/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v1/generated.pb.go b/staging/src/k8s.io/api/autoscaling/v1/generated.pb.go index db580cdabbc..4c6a171279b 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/staging/src/k8s.io/api/autoscaling/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v1/generated.proto b/staging/src/k8s.io/api/autoscaling/v1/generated.proto index f5e4471317a..33eecf4de06 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/generated.proto +++ b/staging/src/k8s.io/api/autoscaling/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index de4e6daa382..3622cf450a9 100644 --- a/staging/src/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.pb.go index 11eb55f3f0c..908c049ed3d 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto index 332502fce1f..0c682b35ea2 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index 0bb3dd30c63..70bec2b5b6c 100644 --- a/staging/src/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1/generated.pb.go b/staging/src/k8s.io/api/batch/v1/generated.pb.go index 5909ab76633..8599b67339f 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.pb.go +++ b/staging/src/k8s.io/api/batch/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1/generated.proto b/staging/src/k8s.io/api/batch/v1/generated.proto index 0f43d2fd5a5..b0306f3bc97 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.proto +++ b/staging/src/k8s.io/api/batch/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go index fa9ff4f96cb..53392f45afd 100644 --- a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1beta1/generated.pb.go b/staging/src/k8s.io/api/batch/v1beta1/generated.pb.go index 29a624b094a..6544184dbd6 100644 --- a/staging/src/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/batch/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1beta1/generated.proto b/staging/src/k8s.io/api/batch/v1beta1/generated.proto index 11fa4e2aea1..9278a3d9bfc 100644 --- a/staging/src/k8s.io/api/batch/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/batch/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 0f8562f8f43..5282837731d 100644 --- a/staging/src/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v2alpha1/generated.pb.go b/staging/src/k8s.io/api/batch/v2alpha1/generated.pb.go index af90de0f569..2560953eb8b 100644 --- a/staging/src/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v2alpha1/generated.proto b/staging/src/k8s.io/api/batch/v2alpha1/generated.proto index cc90d419096..e4de3644c38 100644 --- a/staging/src/k8s.io/api/batch/v2alpha1/generated.proto +++ b/staging/src/k8s.io/api/batch/v2alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 5474235d199..387e5610ef6 100644 --- a/staging/src/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/certificates/v1beta1/generated.pb.go b/staging/src/k8s.io/api/certificates/v1beta1/generated.pb.go index 4e09a4bd3cc..7f704bf82ae 100644 --- a/staging/src/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/certificates/v1beta1/generated.proto b/staging/src/k8s.io/api/certificates/v1beta1/generated.proto index e90f4f9cc35..e3cd9000fa8 100644 --- a/staging/src/k8s.io/api/certificates/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/certificates/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index de0715db624..53634ad17b6 100644 --- a/staging/src/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index 78afdc75084..61aa1833f85 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 2cf7941e9dd..6bef759dece 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index a366d0ded9a..8bf91804510 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/events/v1beta1/generated.pb.go b/staging/src/k8s.io/api/events/v1beta1/generated.pb.go index 4861697c09d..9aac8420f80 100644 --- a/staging/src/k8s.io/api/events/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/events/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/events/v1beta1/generated.proto b/staging/src/k8s.io/api/events/v1beta1/generated.proto index c13a12a5eb7..81be470f053 100644 --- a/staging/src/k8s.io/api/events/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/events/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index 2886eba13c8..626feacf081 100644 --- a/staging/src/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/extensions/v1beta1/generated.pb.go b/staging/src/k8s.io/api/extensions/v1beta1/generated.pb.go index ba5998e2773..fcb80615f05 100644 --- a/staging/src/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/extensions/v1beta1/generated.proto b/staging/src/k8s.io/api/extensions/v1beta1/generated.proto index 7a3e70290d9..8308786d147 100644 --- a/staging/src/k8s.io/api/extensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/extensions/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 02a84cccd2c..564d4177d0a 100644 --- a/staging/src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go index e695bb5e70c..f521979b776 100644 --- a/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.proto index 9a09cba267d..a19967cbe94 100644 --- a/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go index 21fcd39e418..f0463d29d25 100644 --- a/staging/src/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/networking/v1/generated.pb.go b/staging/src/k8s.io/api/networking/v1/generated.pb.go index df4d9f97959..05aaf1d9a8d 100644 --- a/staging/src/k8s.io/api/networking/v1/generated.pb.go +++ b/staging/src/k8s.io/api/networking/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/networking/v1/generated.proto b/staging/src/k8s.io/api/networking/v1/generated.proto index ae28d2f2df2..06365ebe3fc 100644 --- a/staging/src/k8s.io/api/networking/v1/generated.proto +++ b/staging/src/k8s.io/api/networking/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 955e74344ef..3dbe87055cc 100644 --- a/staging/src/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/policy/v1beta1/generated.pb.go b/staging/src/k8s.io/api/policy/v1beta1/generated.pb.go index a66aeff354c..4ed4d29ca6b 100644 --- a/staging/src/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/policy/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/policy/v1beta1/generated.proto b/staging/src/k8s.io/api/policy/v1beta1/generated.proto index a276be1c93d..2e01cf3d9b6 100644 --- a/staging/src/k8s.io/api/policy/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/policy/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 70872f09817..78a597b5b9b 100644 --- a/staging/src/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1/generated.pb.go b/staging/src/k8s.io/api/rbac/v1/generated.pb.go index 1530d379c08..5343731cc68 100644 --- a/staging/src/k8s.io/api/rbac/v1/generated.pb.go +++ b/staging/src/k8s.io/api/rbac/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1/generated.proto b/staging/src/k8s.io/api/rbac/v1/generated.proto index 6edb2779a64..2f8d863df1d 100644 --- a/staging/src/k8s.io/api/rbac/v1/generated.proto +++ b/staging/src/k8s.io/api/rbac/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index e1aab58145d..085edaa121c 100644 --- a/staging/src/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/rbac/v1alpha1/generated.pb.go index c66cadd95b2..c07cdc75dc7 100644 --- a/staging/src/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1alpha1/generated.proto b/staging/src/k8s.io/api/rbac/v1alpha1/generated.proto index 28a4ae3d010..41a193f55d0 100644 --- a/staging/src/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/rbac/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index abbb994fdad..3037f666cb3 100644 --- a/staging/src/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1beta1/generated.pb.go b/staging/src/k8s.io/api/rbac/v1beta1/generated.pb.go index 8cb2c4bec6e..c2525e0dff2 100644 --- a/staging/src/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1beta1/generated.proto b/staging/src/k8s.io/api/rbac/v1beta1/generated.proto index 975de1096b4..aa9960b8ec4 100644 --- a/staging/src/k8s.io/api/rbac/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/rbac/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index ac23895615d..7e035cd27a4 100644 --- a/staging/src/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 1a68ffe1130..39c0b9e6a4e 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto index 625cae7bee0..75b4968cc31 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index fad4db662d5..344e6cc622d 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/settings/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/settings/v1alpha1/generated.pb.go index 47e24af73b2..bfc6a5a11b4 100644 --- a/staging/src/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/settings/v1alpha1/generated.proto b/staging/src/k8s.io/api/settings/v1alpha1/generated.proto index 430319d7dca..098e8dd9bf8 100644 --- a/staging/src/k8s.io/api/settings/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/settings/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 2c925622b7e..5376686586c 100644 --- a/staging/src/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1/generated.pb.go b/staging/src/k8s.io/api/storage/v1/generated.pb.go index 7157b72ff28..31988b70dc7 100644 --- a/staging/src/k8s.io/api/storage/v1/generated.pb.go +++ b/staging/src/k8s.io/api/storage/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1/generated.proto b/staging/src/k8s.io/api/storage/v1/generated.proto index 939ebde685f..72b3ceb832a 100644 --- a/staging/src/k8s.io/api/storage/v1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/storage/v1/zz_generated.deepcopy.go index a2b2f8e7152..63bdcc43c62 100644 --- a/staging/src/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/storage/v1alpha1/generated.pb.go index 3d55c6ec430..586a1b67244 100644 --- a/staging/src/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1alpha1/generated.proto b/staging/src/k8s.io/api/storage/v1alpha1/generated.proto index c9421cf06bb..289ef5f3ee6 100644 --- a/staging/src/k8s.io/api/storage/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 19ae948535d..e1561dba1c4 100644 --- a/staging/src/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1beta1/generated.pb.go b/staging/src/k8s.io/api/storage/v1beta1/generated.pb.go index f2c8ea96005..c9ae2c8b718 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/api/storage/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1beta1/generated.proto b/staging/src/k8s.io/api/storage/v1beta1/generated.proto index b0e030c01b4..f9e1d29503c 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 9d1e79823b0..715b17f43da 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go index fe626ef90d1..97a35e3f9df 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index e9b16e6db84..488f5356022 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 7cfc44c4cf1..c4d2b2dde31 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 26fcb6cc04c..5b045113828 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 697f87417fc..363e970dae6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go index 55798082edc..5c30d5b0a5f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index f7d57c22e8d..f94bbae2c92 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index bae98c78bf3..441572a4ffb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go index 7f670fed477..05edbcd1c2a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go index 473f88f47da..3d67b5e6ddd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go index 857e10e624c..1b0ef276dec 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go index 95f4bb41ab0..af2951a76db 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go index 6a15dadd7b9..90d280f892b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index 73238715b48..e1477df5f47 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go index 252845f99ff..c88a38f548f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_apiextensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go index 7a2edf9ab10..6ee92631ff8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake/fake_customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go index 2f721078b07..35554a068d2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/clientset.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/clientset.go index 72803e76923..076c61b1c3c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/clientset.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go index b667dd5157a..4ede718dee1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go index fdc0beee577..3bf97b3dac8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/register.go index 1ebf1f9d071..3c89961f165 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go index 92e65a67dc8..2f49e2e3c82 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/apiextensions_client.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/apiextensions_client.go index 28890867903..fedecdaa536 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/apiextensions_client.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/apiextensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/customresourcedefinition.go index 7758b635f74..b4f14bc16f0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_apiextensions_client.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_apiextensions_client.go index e90e8c6b45a..3c9d1e02ec0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_apiextensions_client.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_apiextensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_customresourcedefinition.go index 6cf8f7e7d3e..790b172e6f5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake/fake_customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/generated_expansion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/generated_expansion.go index b1721b0d04f..a1df3b36f56 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/generated_expansion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go index e1ad6afcc08..0a7e72223df 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go index cc86c42e957..32bc315c5b9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go index 2a582702655..667ce98e4f2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go index fbaa2c65d3c..ff396648168 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go index 059fd57247a..4d741b800d2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 80b53e77253..810ac9d4381 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go index e422eb4404b..923953a4809 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go index 20c86312598..f82a1c757c8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go index 4183e973d4a..d80ab3abbe8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go index ee83d6dfc85..2cc9ffbbba8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go index 4a013283ae5..77f4303a7b7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index e93f43f5003..12acf883077 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go index 1bd8480514e..0c3159fa6bb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/expansion_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/expansion_generated.go index 3be1e916499..33e966b806a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/expansion_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go index 316721bd68e..ba735065702 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go index 5723fc39b73..a4aa2c7efa1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 8b2e338a7ef..6de71e5087d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto index 091d11bdba1..40185777e7e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index eb49f819943..186d9007e66 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index deaf5309d63..2bc1c3f9686 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index b8218469f40..1fa478f5aea 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index ab23a0713b3..b37a445c2a8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 8f6a17bf6a6..e3bae45ecc0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index ed458550216..2aa20902547 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go index 2d43bf94f65..4fcddb3ab3f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto index f3aedd8014c..7509f6e867b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go index 4ae545d9119..1b3172004d6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go index bc060c309bf..461e960eaab 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go index 42e689f6366..d10f8865a2f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go index 43452928ac4..77a7bcae8f3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 823ef32a34e..d22cddbff76 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.pb.go index bce8336a8ad..f561fd476e6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto index b3fd09c3c5e..02e388e9087 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index e2cc1216617..5357628adde 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index ebc1a263d29..50c2f2a632e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go index 1385a8c569c..d43ae7f0449 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go index 569903b51ee..1927e34c554 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 929c67a9d13..82cf19ce1a8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/test/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/test/zz_generated.deepcopy.go index 30cf84983b7..bf67d16817a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/test/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/test/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 433dfa5cd9b..161e9a6f8a5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.proto index cccaf6f6891..6819d468d3f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/staging/src/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index ab590e13531..738d0a29cbb 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go index 1adab17ac9e..d7e35ffed5e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go index 7c875e12022..c9ef335c700 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go index 19da07207b2..890594a7d57 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 8b6a1695634..b1af97ec392 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index ba634e485ce..c8b46fac5d8 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 5a67e612dfc..7e5fb6edb45 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go index a4fc41537e6..12e674bb187 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 32a5c73640f..926eb65edc9 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go index 166c17bd940..b3b27c31c14 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go index e914b98ec35..bb4ea53105f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go index 1a663941691..b982df3ed8f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index dfe4e947803..bbac7f2b708 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go index e608d13817d..c29f61e743b 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go index e088db37ab1..67f5fdac322 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go index e24e70be38b..b61dda74c23 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go index ebc89e66b7d..c52aea05a07 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go index 2d6596bef94..d0055ba037f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto index 72075566c79..de59bc3ddaa 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go index 8fa603fde76..e1bfe92b2c5 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go index e3b7b26fc95..8a28b30b557 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go index 7d9cf5f2dbf..7a102f3e1ec 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go index c65d6f1da23..310a41f6c29 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto index d362291870e..e0bcac5e542 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go index 609ebef2c2a..7e22d8e0d72 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go index 0208e85669c..f843a758d52 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go index f100811697f..3d312899b0d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing/zz_generated.deepcopy.go index b454e1b6772..bd7fed91538 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/endpoints/testing/zz_generated.deepcopy.go index 2bc8d7d2f2d..bccd43c92c1 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/testing/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go index fd5212b3f34..b4e3fd8226a 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/testing/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/storage/testing/zz_generated.deepcopy.go index bbd0846f42d..464b48c606d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/testing/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/testing/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/interface.go b/staging/src/k8s.io/client-go/informers/admissionregistration/interface.go index 74bfb60198a..2f6d695384e 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go index 0f55c737f53..aa73b27a8e0 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go index 44da0479673..d932ff5e188 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go index 4f08d69a01d..3b4f3b992f2 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 31a2a865cb6..518403e998a 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index d87ab900285..35f15a24f26 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/interface.go b/staging/src/k8s.io/client-go/informers/apps/interface.go index fdd32de0f3f..cd954192863 100644 --- a/staging/src/k8s.io/client-go/informers/apps/interface.go +++ b/staging/src/k8s.io/client-go/informers/apps/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go index a69be9c70f4..97c5595dfa6 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go index 1c7abf7d09f..7ba662d42fe 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go index 9f6beed6e07..07396bb974e 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/interface.go b/staging/src/k8s.io/client-go/informers/apps/v1/interface.go index 6145fd6ccd9..8af8a25db38 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go index 1ac50607f2e..089fff08494 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go index 535790df974..07c384512de 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index 1e2de416bc5..da0b32509c2 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go index 4d2dea575ad..48cbf23e58e 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/interface.go index 3a51a1f5b48..b6bc410243c 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index 779ae2c60da..ba9ba5c025a 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index a7d55ab4c6d..8649636b765 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 5d3288026e4..87f03a12d64 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go index 6b6cd60352d..cfecc891c7c 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/interface.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/interface.go index 59a6e73d4ad..88c3b05bd4a 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/interface.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 988a3e4fbb9..f460c206f97 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index dff9c24083f..6be9bbb2351 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/interface.go b/staging/src/k8s.io/client-go/informers/autoscaling/interface.go index 63a5c0ccda2..8d9caaf2248 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/interface.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index 7d875e73566..cf9c7cdc340 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v1/interface.go b/staging/src/k8s.io/client-go/informers/autoscaling/v1/interface.go index 5ba90701207..e927a8ce3c1 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 9865f8e1333..73bf5a12a75 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go index 4c9ea84999f..ab7b04f7425 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/interface.go b/staging/src/k8s.io/client-go/informers/batch/interface.go index bbaec796483..c06b0b19e70 100644 --- a/staging/src/k8s.io/client-go/informers/batch/interface.go +++ b/staging/src/k8s.io/client-go/informers/batch/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v1/interface.go b/staging/src/k8s.io/client-go/informers/batch/v1/interface.go index 41c08ea2d97..60464d46118 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v1/job.go b/staging/src/k8s.io/client-go/informers/batch/v1/job.go index 8a2e5f0d8bf..1ab68fef445 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 4edfd4153d0..03a4e1598fe 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/batch/v1beta1/interface.go index 0ba1935dc6d..785a62f7ba8 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go index 03a6e6f883e..c0ea43a5f06 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/interface.go b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/interface.go index 39b6f33f05f..2bc64945f53 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/certificates/interface.go b/staging/src/k8s.io/client-go/informers/certificates/interface.go index 1eefe479737..d99ff192dd3 100644 --- a/staging/src/k8s.io/client-go/informers/certificates/interface.go +++ b/staging/src/k8s.io/client-go/informers/certificates/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 44aac5c7245..6d356371e61 100644 --- a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/interface.go index 8578023c789..4b8d9b1c7d8 100644 --- a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/interface.go b/staging/src/k8s.io/client-go/informers/core/interface.go index 7fc2a5cd5fa..82b4917e8ac 100644 --- a/staging/src/k8s.io/client-go/informers/core/interface.go +++ b/staging/src/k8s.io/client-go/informers/core/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go index 77b17fd3ee8..d03cf2f801f 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go index ed0f4c2d92c..6a903a45f18 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go index 8a7228bafb4..a6e36051e2f 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/event.go b/staging/src/k8s.io/client-go/informers/core/v1/event.go index 23f5ead665d..7c33925303a 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/event.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/interface.go b/staging/src/k8s.io/client-go/informers/core/v1/interface.go index e560b12f809..0b460d6b568 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go index 9588b940217..fe938ec2d1c 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go index eb841b157b4..da2af6206e3 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/node.go b/staging/src/k8s.io/client-go/informers/core/v1/node.go index 3c70e52b039..d9df80bcbb7 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/node.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go index e944560f793..13c154dff7d 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 136884d4c99..8017ce1dca1 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/pod.go b/staging/src/k8s.io/client-go/informers/core/v1/pod.go index b9720829024..1d1c6fc0f46 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go index c05753850c7..f7151e30f5b 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go index e04cd146992..68127512ad2 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go index 3ef4f4c12cc..d80660022b6 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/secret.go b/staging/src/k8s.io/client-go/informers/core/v1/secret.go index 7bc6395a448..69874bc4cdb 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/service.go b/staging/src/k8s.io/client-go/informers/core/v1/service.go index d1b5ed02f98..082b5925638 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/service.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go index fb9c50aa355..b5cd59b4f1f 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/events/interface.go b/staging/src/k8s.io/client-go/informers/events/interface.go index 81f6646f7be..cfb8d375f9d 100644 --- a/staging/src/k8s.io/client-go/informers/events/interface.go +++ b/staging/src/k8s.io/client-go/informers/events/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go index d604b4cf0da..fb44ff8c548 100644 --- a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/events/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/events/v1beta1/interface.go index d079afed593..8138b0a0b51 100644 --- a/staging/src/k8s.io/client-go/informers/events/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/events/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/interface.go b/staging/src/k8s.io/client-go/informers/extensions/interface.go index a6bfc3b44d3..3a5d8a52e63 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/interface.go +++ b/staging/src/k8s.io/client-go/informers/extensions/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index c64b14c3da7..911f51edf5c 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index 4bcfc5c252b..e37ec7eff2a 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index 22dac92b9ce..7ba79228aed 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/interface.go index ce060e0d90f..a3af9309fae 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go index 18ef2735b57..52126d57774 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index 856cb30bab5..23e50435f2e 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/factory.go b/staging/src/k8s.io/client-go/informers/factory.go index e922c1276cb..642e86cc0b0 100644 --- a/staging/src/k8s.io/client-go/informers/factory.go +++ b/staging/src/k8s.io/client-go/informers/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index 70ed43317d7..c1d01bec2f5 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index 61155f7404d..8410cca2caf 100644 --- a/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/networking/interface.go b/staging/src/k8s.io/client-go/informers/networking/interface.go index 79e0d0c1512..bd0b3aaa7b8 100644 --- a/staging/src/k8s.io/client-go/informers/networking/interface.go +++ b/staging/src/k8s.io/client-go/informers/networking/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/networking/v1/interface.go b/staging/src/k8s.io/client-go/informers/networking/v1/interface.go index 980a7be9938..aa75a918cab 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go index b712ba0305b..d3233c265b1 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/policy/interface.go b/staging/src/k8s.io/client-go/informers/policy/interface.go index f893c3d5b9f..5908e63c512 100644 --- a/staging/src/k8s.io/client-go/informers/policy/interface.go +++ b/staging/src/k8s.io/client-go/informers/policy/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/policy/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/policy/v1beta1/interface.go index f235ee1d0c8..e59a4aa9cd0 100644 --- a/staging/src/k8s.io/client-go/informers/policy/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index ba0da35b1ee..935c1b63efa 100644 --- a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/interface.go b/staging/src/k8s.io/client-go/informers/rbac/interface.go index df7adfcd3cb..edf21fa6a84 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/interface.go +++ b/staging/src/k8s.io/client-go/informers/rbac/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go index ac75abbc8ce..9c747345343 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index a3c73e586c3..6d0c1a68ca2 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/interface.go b/staging/src/k8s.io/client-go/informers/rbac/v1/interface.go index 1e46b039bdc..969dd563ceb 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go index fb1de46145a..8ed43fb750f 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go index 78c78fa1ac8..3bc54f08732 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index ec257965b75..2fa374454a9 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index a2d0c396072..2fa4e2d7ffb 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/interface.go index 586283d4a2e..2f4945f9a37 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 4564b336165..d4730a00405 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index 556f966a86f..abfae250887 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index 821746b90d7..16e597d04dd 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index c517ac45610..4fee022cd86 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/interface.go index 9d375d947c2..92e440ba0f8 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go index 0f13d3aaf6a..39c37568ba7 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index c951d97d501..8ff036b0948 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/scheduling/interface.go b/staging/src/k8s.io/client-go/informers/scheduling/interface.go index 60b63e8e5c3..1ea9a2e7163 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/interface.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go index 1cceef7b256..dabe3fb63f7 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index 5c90f43df0a..2a82206ea81 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/settings/interface.go b/staging/src/k8s.io/client-go/informers/settings/interface.go index 53bc6621709..205dbbf4d9e 100644 --- a/staging/src/k8s.io/client-go/informers/settings/interface.go +++ b/staging/src/k8s.io/client-go/informers/settings/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/interface.go index 39007ebe25f..5714bf06c75 100644 --- a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go index 2e630c73d03..b87f0e6138e 100644 --- a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/interface.go b/staging/src/k8s.io/client-go/informers/storage/interface.go index b91613a9216..bf95b0b92f2 100644 --- a/staging/src/k8s.io/client-go/informers/storage/interface.go +++ b/staging/src/k8s.io/client-go/informers/storage/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/interface.go b/staging/src/k8s.io/client-go/informers/storage/v1/interface.go index fadb1a0739b..ea84ebabfa7 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/interface.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go index 341549f0fb7..c6338fe67cd 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/interface.go index d84eb5fd27e..86d49a952d0 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index cab9ffc4690..413dc4ed008 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/interface.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/interface.go index 7fa1abf5f60..144e4e176f7 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index 3e96b282000..0a17418fb52 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/clientset.go b/staging/src/k8s.io/client-go/kubernetes/clientset.go index 7dcf86a7028..b6885590965 100644 --- a/staging/src/k8s.io/client-go/kubernetes/clientset.go +++ b/staging/src/k8s.io/client-go/kubernetes/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/doc.go b/staging/src/k8s.io/client-go/kubernetes/doc.go index d8e920a5cda..2c07131b1c2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 12cfac0a8f2..762d1405c2c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/register.go b/staging/src/k8s.io/client-go/kubernetes/fake/register.go index 7c78f5670d7..f9feb2cfbb3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/scheme/doc.go b/staging/src/k8s.io/client-go/kubernetes/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/scheme/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go index 7bfd3361859..6cc1bd32871 100644 --- a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index 5150fee3c55..e6f1a81e2ac 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go index d73267ee68f..7fc65625658 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go index a06b59f632a..34d56925712 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index ccdfb43f6b8..bfd47e53c45 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go index 757a1c1d9e2..11cc502f24f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 8d3774b4e4d..63e84abc515 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go index 148eaf9f871..e08020e4294 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index a02bf7b7c56..2b8829f9b4c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 422a0a24128..e0fd7ca656f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go index 012a8da7e77..a7780738b34 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 36711a50092..f6afa033977 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index d66849225d6..860ee16b5cf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 07936304ea2..0c81c1d96d9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index 1d9f831346e..5f2d90b63dd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index fd38c53c5ff..f9577d78783 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 34c06c8de51..af837714d31 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go index d4e34411bd9..79f62f1848e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index ae9d4f3ff1e..3fbea7ff44e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 3f32ed1f319..87167b688ac 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index b0e8f82d7fe..99b90371e85 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index ba8539578c7..1330460ad5e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 39d3c59fbe1..44845d31bbd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go index 500d67dd365..6d07e293dd3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 5047b0c5f8d..bf5330f065f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 2c927ac0cd1..05496f31165 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 7d1fd9e6ad9..eaf6c8b4514 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 348c955a05d..86cab3bb329 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 8a72cefbc66..1827d92dbea 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go index f1280bc3f36..22d280d2eb8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index f75db1bad1a..3d09b1a3a66 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 2907c2a8c62..98260c46746 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go index 8abb429acc6..19dfda1c47d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index 1ae9c6a58f5..b34b8a8fc5b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index a091b902adf..44edefdcd8a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go index d3bf9e10323..d67f31431ee 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 8e5b9f57032..cf96d006c2c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 771c7447e0b..cb44f155fe1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index d28240339e6..9fe5d129aa7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 4657c1b2c57..e062d392f6d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 7da8d2696dc..0e292ca17c4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go index e8d65fa241c..8d618920171 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go index 20bd67d2460..fff0882a1fe 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 62b89e20921..1209c868997 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index dfd1c4abe32..b94e4c92064 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index 3b204f2a7ea..6571c9e525e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 338f92c1147..8158ba983d3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go index a6ad9f1e71f..d873efede36 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index bdff8330067..937c38978aa 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index b0fe7ef70aa..98356ca7102 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index dc6ce105215..e0cd3a2864b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go index 2c2aa185f11..0295d4144b7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 516587faa1e..8bac75600a2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index dcc31d654bc..4d0af8c2629 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go index 85eb00ddfda..fbf88f6f504 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index b1c527a7113..a5e5bf53385 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 42e76d5e43c..94adbfbce31 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index 9cfef4e6ac0..ab95e6a8342 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 6004b0e3011..996de350a56 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go index 8f66d8c5b12..6dd34cca6d0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index e8c57210b31..6089e2f949b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 2b7e8ca0bfb..74ad7fa90d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 7f9f1e9fa08..eef5a968deb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index 385dffcd173..c75cc05b6d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go index 7ee26250b2c..0a0594179b9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index a49fc9b7295..7ea7aac8862 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index 26d9011b4c9..1b977f35419 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index dfe947b90ad..1acf4f52b21 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 778d06e593b..4301290b544 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go index 42e76d5e43c..94adbfbce31 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index b2085bceb23..7c9ff014f30 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index cfb019eaaf2..0957b231cd5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index fbd374a137c..9caaee2a0c4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 08f6d60952c..f4557d2baef 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 7c05341235b..5bf1261ad10 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go index 33d5746a53f..3d3dc67adf1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index 11987f1256c..ceb5c499fea 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index aeba4389536..11415d8438e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 4e221c282e7..b85ee6ddb06 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index 4413c6d34d8..43229d5723c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go index 2b7e8ca0bfb..74ad7fa90d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 9b8e1034193..18821d72e8f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 1ef3e49afea..7bc2a021194 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index 7f640d91bfe..7dfd314d2a6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index cd60e9df6be..aa4dfcaabb9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 5464ab59df8..1e504bd0f4e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go index c3e0d373491..3eb03bca59b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index e7e660bdd79..25064f6487f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go index effefbd50b6..15ea315c7c6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 4191c920fad..29da3e74051 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index 753e5dc7708..95eab71372d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go index de272bd126d..57c8c308606 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go index e997033c402..f1712975462 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 6b70822ad46..c2c2e5f3835 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go index eddac4a7808..9101766f627 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 9ff497c1070..b625fdafd32 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index 5cf75f9c3a5..0973bdc5ac4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go index 4e2d361e809..95ddc6b1f7a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index b51c8f5cd6b..5686550ce5f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go index 68d7741fa0b..a43cb4a958e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index d80205ddf7a..8454e576ed8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index 88baefe3b36..c46dc6f5dd1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index 48b68988bde..2c61fcf686a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go index fe95cd4c948..84959c363d5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 5bcb84e8ac8..d2382696845 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go index 22939f58d05..5b7e871b0e7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index 1dd5b82c7c7..7d286099989 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index a8aaa9474bc..5f92d35d793 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go index 7f1ef91b160..8739e8628af 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go index c8f5a40de82..25c4a4bac8f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 9ea0d3459b7..33bc87cdc83 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go index 078027ef495..d30c055a608 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index 7226d92d765..552056b7001 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 769f71c677e..b8c3a5b0704 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go index 550c5bba158..78e1dbb6a66 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 7b515240f77..08cbd943dbe 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go index 2b7e8ca0bfb..74ad7fa90d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 89c645a0e52..26e0a7f17d1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 899e492ade8..df75d2a81d6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index e5b901e5c5d..b75a3dc161c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 3c463daaa1f..f5bc3443a65 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 857c885ad86..bec36116e35 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 1f358b7e3da..aeaaf40bfbe 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index 262a4c5e800..ab5d7f10868 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go index 0956b4c63c1..5daba9b6835 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index d1fad00ddf2..d7413bfb81a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 60d6b45a588..05ace4f2c58 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index dc58c335c6d..efb03a01e61 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 8c03925a7fe..4b8b8ebd1dd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 76d2ac1e5a0..5639ec0c9ce 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 65b76e95e53..cb8c70afac4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index cb55df14bd0..97d104e02f6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index ed35ed7cbff..a2548fe55df 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index fa6233850d8..b8086addd40 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 2fe0a6cf335..f9b9beeb1aa 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 36a64903d6a..97ef36a0f3f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index 58c6dc9c6da..39bf08d4a78 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index c734ff5b56f..46678052eb7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 4c5f67b4953..cd74e93c735 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go index 5fe0585b419..3f4b5f89c7e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 63ac612b48c..a21d09273d2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 0effd3e0972..a1c59959d62 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 202a91df35c..d7625137374 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 175f3882165..36a881772ef 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index a6159f1255f..d60566f8305 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 267cd7f60cf..3825a1cc760 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index c286e2964c9..028423adc81 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 196c8d05a9b..36b3141c86f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 311d0e8cc47..99032622666 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index db4a70d95e7..a21a2167cb3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go index ff956020125..744714740cc 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index 9e3b126b6a8..c3fbebd5e99 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 3473e99c420..b186b967f76 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index 05cee7fb26c..5ae7fbc99dd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index 4b9ebd6e5e4..20ec2a7d685 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go index 54b28b6ee89..27ede5cd8b5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index 82b2fb4a1f7..e2522111d97 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 95ca28afbcf..f33ce3eb478 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 65f3820b9c4..91eab950e7c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index b4f8886ad25..73128ed3da3 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 3a3220a0593..4fb3df94fe1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index d5abace975d..804ffabf74f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index 8ec490c502b..6590b69fd55 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 5a6f93e0e05..56bbfb0138d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index 28bbbbb7f26..250a17eaa51 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index e8d22c6b49e..fcfb5dcbdaf 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go index 77f8c61a7dc..caa72a34a3e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index e693fe68c1b..e7062ca2cbb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 0bef3972aac..e9ba108af3e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index 071f78eac57..a1c274c2e76 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 8daa1cc01dc..ed68f458bc1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go index 733012adee1..8d62590caf6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go index d9733a0bffd..67ba6372e7f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 6fc226e187d..881b8a926ad 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go index cdd70ae3536..6c72ca5c50a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 1b9099eeb8f..d1059d02bf7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 96487885c5d..8b1518234c7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 9c4133e3695..63e3a6821d7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go index a091d8de397..657fd93d155 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index b00af71881c..cc45f524393 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go index c9039b5196c..f0f1c6a1fd5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 511adc6ef7a..a119239ca9c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 47b391d80b9..45d2c17662b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 40588ce97b1..f9020d0b734 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index e0ea45148e0..ab27fdf42f9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 11b2e216533..9bd87e91a88 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 645126f2c81..6ac4da0fc33 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index a9c0bb08917..a19b2b40c7d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go index cddaf6d5067..a27b6b8e164 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index f64ede63897..12a9f0fe751 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index e499cf95b31..e26ff9661b9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go index aa3df0df2fc..48b91811fc1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index dd0a0cb0dc5..82eb8f1f140 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index d83e722b734..1dc65551f0b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index a424365807f..d1287da7b53 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index e6c65ba99d9..9eb8bc78979 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index a7156214a95..6cf383b4f7d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 1b6bfc311ba..0b500c43ebd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 0aadc16b42a..64fdd845ff6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go index 11470696d07..33de3d3047d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 42de5487477..51dd9ba6a53 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 958da4a92a6..9b650eacfc9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go index f506fc3468a..9b61a750661 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index 936008d4a64..76ebf316691 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 8591128af74..0c7e49f0d6d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 0616691bb52..1bcfaf88e25 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index 6fdb04379ed..89494ed13ef 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 1aedea6ea4f..1602f7fce7a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 643988bb823..0058c37e4b5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 461b5466538..fe8c2617297 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go index 929d0352937..0f33d6c567e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 7038bcf0dd9..0167d5396ea 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index a04a2f89ac6..476a6973059 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go index d7f80c00423..8820cfb4633 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 87932b58d74..9d4259e4382 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 95e252937c9..d3d500b5e1c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index e8e611a954a..05da7329ce7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 8c82c186a52..fdf18546adb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go index cd0d80d4332..e3a5c47b6d1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go index 3eb42e01871..832addf07f9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 2b77c44eb4a..0422dc96ab7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 030e9db3298..a36c44928df 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index 292458ee4ef..56f52b0e6c4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go index 6feb9b218f1..dc926bb7ed9 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go index d599b2935c2..dc95d90d1a0 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index ce38680831d..38434cfa565 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index 595b23f0449..153069dbdb8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index 97972f0f74f..5d7cf3eda14 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 7e3378ad739..9e9effb2314 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index 39df9fb8791..c378b8b2178 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 1102653ae05..b010a58c974 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 4e7c5b71b01..6d9da9b42d5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go index 1d06eba6b8a..558b6fab5d7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index 5213d10bee4..daa75e5a7c2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go index afa636a2f09..f50925a4306 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 3e8c70bf1e9..a8d65f8fc3c 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 08bdfb2596d..fd0c3d4bf4f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go index a178091d98e..a8bd0e215a5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 9a830881a5d..1c80168301f 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 6f3f0c55e65..3aec572fadd 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4db3d137edd..c4d8baa1901 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 7c900b3e3da..bf18a6c87bb 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go index fb3b0098749..827c06371ca 100644 --- a/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go index 60b004ef9f7..4fa845728d6 100644 --- a/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go index c9bf0fa5da6..42d7e50ad5f 100644 --- a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 753dd18565c..2bb3bccb810 100644 --- a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 6cb6067a167..910e4f48e7d 100644 --- a/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/listers/apps/v1/controllerrevision.go index c05d14c2543..ce53507f018 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/listers/apps/v1/daemonset.go index 307f8bc7c6c..527cab1c927 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/deployment.go b/staging/src/k8s.io/client-go/listers/apps/v1/deployment.go index 36af9009478..85c5668617f 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/apps/v1/expansion_generated.go index 48917c2c051..447c08796e2 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/listers/apps/v1/replicaset.go index 7e316d6b4dd..d68f15ce435 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/listers/apps/v1/statefulset.go index fe584038e25..b4bdff2e9a5 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go index f3c85bfa77c..c0db171c011 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/deployment.go index f59f3a96248..fb1796bad0a 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go index 441ceecdd43..338fcd6334c 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/scale.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/scale.go index ec9a419a096..a0e7086e179 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/scale.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset.go index f10ef7318c9..2fc4042b44e 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go index f9f1ef06a31..02ad95d6cbf 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/daemonset.go index cbdb13ef54f..c05957b4cba 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/deployment.go index 0778a9fdd4e..7184a7468e9 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go index 6db63d4b094..846f1f621a9 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/replicaset.go index f76e2eeb546..8cdf0dccdf3 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/scale.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/scale.go index 11cb3e19569..27e76c36673 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/scale.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/listers/apps/v1beta2/statefulset.go index 13ef28f8536..fd051043504 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authentication/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/authentication/v1/expansion_generated.go index 3dcc8028926..ae7db9abc96 100644 --- a/staging/src/k8s.io/client-go/listers/authentication/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/authentication/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authentication/v1/tokenreview.go b/staging/src/k8s.io/client-go/listers/authentication/v1/tokenreview.go index cfae0476f37..aa060720518 100644 --- a/staging/src/k8s.io/client-go/listers/authentication/v1/tokenreview.go +++ b/staging/src/k8s.io/client-go/listers/authentication/v1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authentication/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/authentication/v1beta1/expansion_generated.go index 2ce7f7f92eb..525e8b91e3b 100644 --- a/staging/src/k8s.io/client-go/listers/authentication/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/authentication/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authentication/v1beta1/tokenreview.go b/staging/src/k8s.io/client-go/listers/authentication/v1beta1/tokenreview.go index 035ad62da3d..b377215b7ff 100644 --- a/staging/src/k8s.io/client-go/listers/authentication/v1beta1/tokenreview.go +++ b/staging/src/k8s.io/client-go/listers/authentication/v1beta1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/authorization/v1/expansion_generated.go index 0c91dd0c1c2..e0fa0208157 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1/localsubjectaccessreview.go index d0729f11206..6ef9963a058 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectaccessreview.go index 64ff1744be4..29481b71c9c 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectrulesreview.go index 63ad0f1ff74..42b54197b2b 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1/subjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1/subjectaccessreview.go index f389e07b1fb..1ebffcd6586 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/expansion_generated.go index 771d7740000..57c5ce75782 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/localsubjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/localsubjectaccessreview.go index b2478381bf6..984c505fe2a 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/localsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectaccessreview.go index 4936b9b6c99..cbbcc0552c5 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectrulesreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectrulesreview.go index 8eb7a21b506..ea885a6ec10 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectrulesreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/subjectaccessreview.go b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/subjectaccessreview.go index 4b155de1575..21dec7cd2a3 100644 --- a/staging/src/k8s.io/client-go/listers/authorization/v1beta1/subjectaccessreview.go +++ b/staging/src/k8s.io/client-go/listers/authorization/v1beta1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go index f7b00603f5d..831f9adff1f 100644 --- a/staging/src/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go index 48012203eae..d9a840385be 100644 --- a/staging/src/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go index 9e84ef13e94..dc3da8a0ec3 100644 --- a/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go index c8fbdecd710..b1b593931f6 100644 --- a/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/batch/v1/expansion_generated.go index 38b7e2720e2..6abde7f8912 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/batch/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v1/job.go b/staging/src/k8s.io/client-go/listers/batch/v1/job.go index 89280d9fa6c..948d53a734b 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/listers/batch/v1/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/listers/batch/v1beta1/cronjob.go index a8fa51ecfb0..10c3c3839b6 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go index 3d84d249a59..debf39dcdcf 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go index 51f5eef5549..fe144014a44 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go index 38ac70cdfbb..f630c1a7cee 100644 --- a/staging/src/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 425dc6b4d94..08550be05d2 100644 --- a/staging/src/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go index c240be44ff0..5c241556b0d 100644 --- a/staging/src/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/listers/core/v1/componentstatus.go index 6ba67d0bd56..76f097f3769 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/configmap.go b/staging/src/k8s.io/client-go/listers/core/v1/configmap.go index e976928d935..6e45dfc7ae3 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/endpoints.go b/staging/src/k8s.io/client-go/listers/core/v1/endpoints.go index 6f5a1133c73..f6215474d48 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/event.go b/staging/src/k8s.io/client-go/listers/core/v1/event.go index b087cd8bd95..533e656c310 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/event.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/core/v1/expansion_generated.go index a96db8dc966..247610a8cc1 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/limitrange.go b/staging/src/k8s.io/client-go/listers/core/v1/limitrange.go index f19943751a0..f872726b3cf 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/namespace.go b/staging/src/k8s.io/client-go/listers/core/v1/namespace.go index 21be6878a38..909b4a87c28 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/node.go b/staging/src/k8s.io/client-go/listers/core/v1/node.go index d43a682c90b..3c79200f31b 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/node.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/listers/core/v1/persistentvolume.go index 593ba14ed18..f3231d2e3e2 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go index 72ddac93e6f..e7298f18b23 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/pod.go b/staging/src/k8s.io/client-go/listers/core/v1/pod.go index 6cf4a8424a1..0762cd802de 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/listers/core/v1/podtemplate.go index d825c7475ae..14774acbf83 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/listers/core/v1/replicationcontroller.go index 6670a9d9253..a1cbe21fa53 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/listers/core/v1/resourcequota.go index 713a41511b9..3da91223b8d 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/secret.go b/staging/src/k8s.io/client-go/listers/core/v1/secret.go index 26ef13d9ebe..8f7ce949cae 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/service.go b/staging/src/k8s.io/client-go/listers/core/v1/service.go index 895a6922310..5b464ac2f50 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/service.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/listers/core/v1/serviceaccount.go index 2245d5d43d0..35de315e2ee 100644 --- a/staging/src/k8s.io/client-go/listers/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/events/v1beta1/event.go b/staging/src/k8s.io/client-go/listers/events/v1beta1/event.go index bca3c452adf..0f1dcfe5013 100644 --- a/staging/src/k8s.io/client-go/listers/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/listers/events/v1beta1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go index 7e8fb62b1bc..dae23607bf5 100644 --- a/staging/src/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go index 4672a5cb990..aa6741df2bf 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/deployment.go index 4c17085dd7e..09ce2e20a2a 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index 060c7a35aec..2c99c42ca46 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/ingress.go index 5615dfccc39..8489e0b2491 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go index 3189ff7c9da..8b44f179318 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go index 44de996e4fd..111bc02a6be 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/scale.go index a027af8283f..2f6c9f977f0 100644 --- a/staging/src/k8s.io/client-go/listers/extensions/v1beta1/scale.go +++ b/staging/src/k8s.io/client-go/listers/extensions/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/expansion_generated.go index 6f635d70489..a4691152840 100644 --- a/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/imagereview.go b/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/imagereview.go index 8ffd9d05f61..00fa27f7449 100644 --- a/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/imagereview.go +++ b/staging/src/k8s.io/client-go/listers/imagepolicy/v1alpha1/imagereview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/networking/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/networking/v1/expansion_generated.go index 91fe5e772f8..f8a9d153389 100644 --- a/staging/src/k8s.io/client-go/listers/networking/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/networking/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/listers/networking/v1/networkpolicy.go index 59e17eecbd1..c385aac625b 100644 --- a/staging/src/k8s.io/client-go/listers/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/staging/src/k8s.io/client-go/listers/policy/v1beta1/eviction.go index 742775f64b5..2dc7f575744 100644 --- a/staging/src/k8s.io/client-go/listers/policy/v1beta1/eviction.go +++ b/staging/src/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go index 4785fbc06f6..090199adeb2 100644 --- a/staging/src/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go index 6512f29f125..dfad3515a5e 100644 --- a/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrole.go index 5dc9a225e70..c806bb610ee 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index bb3186a067b..b7d43faea6b 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go index 4d9872d3e25..998b5739a45 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1/role.go b/staging/src/k8s.io/client-go/listers/rbac/v1/role.go index 8d7625dbe30..7f6e43bfb59 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1/rolebinding.go index b8209d85120..f32f6e4cc4d 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 9e20a6d1629..cf6ec258794 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index 155666aba02..9ddd6bf3401 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go index 0ab4fb991f1..0ab9deba2d8 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/role.go index 72ab79c9443..5aad1e1c804 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go index 7f9cfd45831..f1a539e67a1 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index 65ec3eb978f..4e3bd46bb60 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 146f2d7f294..911c8616738 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go index b6eeae833a9..9352061dd20 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/role.go index b795e98b029..694a29759f2 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go index d27ea2eb59d..8feb8358f15 100644 --- a/staging/src/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go index 8a644c804ed..a92884a8cdb 100644 --- a/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 9ed04fd2ae1..1dbe2f24281 100644 --- a/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go index 7a5ce38e92c..39f5b2d5704 100644 --- a/staging/src/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go index 18f62249313..fc8cbbe3838 100644 --- a/staging/src/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/storage/v1/expansion_generated.go index 2353b59d3f5..a4141351dbe 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/listers/storage/v1/storageclass.go index 7c37321fd9e..023a55d52fc 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go index 63abe94ab22..ca8816b4d33 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go index 02004629a67..2300bd24a25 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index 84e0f9c440b..e4ec7df6dad 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index 9253319bec6..f52ac413c9e 100644 --- a/staging/src/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/rest/zz_generated.deepcopy.go b/staging/src/k8s.io/client-go/rest/zz_generated.deepcopy.go index 59050fc491a..02d3b606e5c 100644 --- a/staging/src/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/staging/src/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go index 93e0e61640d..85e497e7f02 100644 --- a/staging/src/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/staging/src/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go index 410a0d90c5e..faa7319d71e 100644 --- a/staging/src/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go +++ b/staging/src/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/staging/src/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go index 1eaa0d18027..8b0d525624e 100644 --- a/staging/src/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go +++ b/staging/src/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go index 848cb8d2aa3..1c8e68dc59f 100644 --- a/staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/staging/src/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go index 19b67958350..61c553df6dd 100644 --- a/staging/src/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go index 8d634671b05..dbd943d151d 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/staging/src/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index 51668f05bbe..e575b23d724 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go index 183921e9dc7..85dd5b954e6 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go index 35ebeb23743..96771586c79 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.defaults.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go index c1a8a027912..7f46d2bbe20 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go index 6a62f96dd43..738d2d9bc84 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go index 35ebeb23743..96771586c79 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.defaults.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.defaults.go index 6df448eb9fd..88d7af085be 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.defaults.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go index 0039b2702c9..55746bfeb5e 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go index ec9559eaaa9..cf71a677266 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/doc.go index 5a63b0e724b..6a6cae177b1 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go index 8b474ba6d7e..62ab5446f21 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go index b1dac6b54bb..910e117d7ed 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go index 97269e17f51..5ee4dddba20 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/example_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/example_client.go index b2c73bb9a00..906aa3f47a8 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_example_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_example_client.go index c0973fb8f29..b8c081d9554 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go index df00542e9b6..bee016ecf5c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/generated_expansion.go index 163992082d6..189d4e23f26 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go index ad30acf3ef8..5077c0ca77c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/example2_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/example2_client.go index 2aba4085781..4939e1ec234 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_example2_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_example2_client.go index 76011b4b520..ea617dc8efe 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go index 24f8989b1e0..0691a49567a 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/generated_expansion.go index 163992082d6..189d4e23f26 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go index abfc86affc5..10725f494bc 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go index 2d403968194..86cca3b19fd 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/doc.go index 7d2f4d80d3a..9c6dfafa732 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 5cadbe26140..2a692a5ca46 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go index de6b67abc5e..b4a0ec52fa8 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go index d56db85bde0..cad0ee75cb5 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go index c9512a69ca7..407fa9e2662 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_example_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_example_client.go index 51e0c93e328..1a86363746c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go index 2c1b295db74..063d8cc79d8 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/generated_expansion.go index 426aa27c768..4e2775e3a69 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go index 05761001ac9..70fded42a2e 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go index a650d87b833..552a87f8af3 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go index 8095faf8238..43ad4e988cc 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 31a51e7380c..12bbdcbe529 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/generated_expansion.go index 426aa27c768..4e2775e3a69 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go index 00e68ae0885..fa39745adf4 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/interface.go index 74626cd1e15..21d8874a0db 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/interface.go index 613797aea26..21e19c623b6 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go index e6c7fca347a..0beb10fa72c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/interface.go index bb0656b32eb..c6a0444687f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/interface.go index 613797aea26..21e19c623b6 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go index 4f90a6c68a7..c07397c98ab 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go index da22407d4e2..6b77479ee51 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go index 33b226db0f8..6fd29b0b41f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go index dd26ca972d4..31df120f6a6 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/interface.go index 646527b5a02..836e7e01b6c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/interface.go index 5fb90301bad..5abe3852308 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go index c25308cf8ad..5c1c1fe8dc7 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/interface.go index 71d3de4664a..0656ffbdcdc 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/interface.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/interface.go index 5fb90301bad..5abe3852308 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go index 5b10a96ed37..2c00c3d8269 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go index cc7b7551d09..fee4e7910b3 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go index f93c4d8e1a9..bddd60f7b72 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go index 52c5ba46967..973d73d1de3 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/expansion_generated.go index 6fb3372a39a..f308c16734c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/testtype.go index 47c28eae75e..ccfe8f80fa1 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/expansion_generated.go index f4ece18ccbe..d258c22f06b 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/testtype.go index 0fbf7df1c00..5e420f12cf0 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/expansion_generated.go index 6fb3372a39a..f308c16734c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/testtype.go index 0a645aaf101..35abf86610d 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/expansion_generated.go index f4ece18ccbe..d258c22f06b 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/testtype.go index db56b8074f6..65f5a58b93e 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go index 35ebeb23743..96771586c79 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go index 35ebeb23743..96771586c79 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go index 9d9697fe8a4..c0c581df828 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/doc.go index 7d2f4d80d3a..9c6dfafa732 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go index 04135606b8c..7fd60de7b26 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/register.go index 9780e030d75..79d9250433e 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/register.go index 6edae5dc48a..1b55c1cf0d3 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go index 6382db1d1ee..1e282aa17ae 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_example_client.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_example_client.go index caa8af94ca3..c7bf2dfc421 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_example_client.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_example_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go index 067f69931b1..124396f2f25 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/generated_expansion.go index 426aa27c768..4e2775e3a69 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go index f5aa7c6b218..460b68abfb8 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/doc.go index b6a2a467285..95b44dfa87f 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go index 373fad03734..a753b9689b6 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go index 05ec52f32ad..89e64d6484d 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_example2_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index 594c69d1eca..ccba168aa3b 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/generated_expansion.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/generated_expansion.go index 426aa27c768..4e2775e3a69 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/generated_expansion.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go index ca9ce1c54d1..bf6b4de43c9 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/interface.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/interface.go index 26d9165d27b..d74e15f5cf7 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/interface.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/interface.go index ff4e55ec920..d0a447bda6d 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go index e8e305df7e4..7fe59ce3e3b 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/interface.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/interface.go index 863361fa3c2..d58b0edb4d3 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/interface.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/interface.go index ff4e55ec920..d0a447bda6d 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/interface.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go index 11ca2b9b2bd..303b3473642 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go index ff06d7c32a5..2d0b35f9694 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go index 3e11410876f..25acc5078e1 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go index 18b364af986..b8da4f9fc75 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/expansion_generated.go index f4ece18ccbe..d258c22f06b 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/testtype.go index d452e9ec432..ce132fd2f9c 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/listers/example/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/expansion_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/expansion_generated.go index f4ece18ccbe..d258c22f06b 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/expansion_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/testtype.go index 1e1c0efd1d5..a7ad6062cc1 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/listers/example2/v1/testtype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go index 53162a52871..881f5647e9e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index dabc9e0fec3..9a069e2c5e4 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go index e283d20b7c7..27385cbb226 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go index dd64fbd2115..b449aad3464 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go index ee9db9426ef..629193beb86 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go index 2770ab732a9..bb1570f10d5 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go index 7f670fed477..05edbcd1c2a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index bae14284200..3a6435645ce 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go index 08c83e75d70..76706cdf32a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go index 0b5de5ba5b1..9e93e1be9e6 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go index c7df301b84f..bd74a832b1c 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go index 5ff6b2313d6..3cd2130c7aa 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go index f3d12953bc3..1febe7c761f 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go index d2fd96fec42..fb24961cbb5 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go index 2f3741792eb..c883a457021 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/clientset.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/clientset.go index 1c16da25441..9a55bbe63c7 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/clientset.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go index b667dd5157a..4ede718dee1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index d938683de71..6e304607749 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/register.go index 89ddd7e7c33..f494b0b56d1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go index df7b9ba2713..37cf87b0984 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiregistration_client.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiregistration_client.go index 1c0fc64a725..67d398f2504 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiregistration_client.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiservice.go index 8d4dad86e2b..6ecb68572de 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiregistration_client.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiregistration_client.go index 3525e01db70..0cdcfcba9aa 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiregistration_client.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiregistration_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiservice.go index 1dfbcab9543..151cc90daa6 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/fake/fake_apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/generated_expansion.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/generated_expansion.go index 84d4644188c..22fbf1acb05 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/generated_expansion.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/interface.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/interface.go index c0af56d3589..fe844982044 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/interface.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go index 291fdc536b4..a73586143d8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/interface.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/interface.go index b89a77964ee..a234db31c7f 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/interface.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go index 15100697973..321a5aea285 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go index ffd303c7f26..2236d78af9a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 66970555034..8becb25b4e9 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go index 9d5c0dda9cc..11e3b1bab66 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go index f0e0cfdebca..6987342a674 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go index a1d6f9da5ea..6d65d490231 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go index 141c95f35f1..6a524bfc76e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go index e82fcf4b84c..4ef2786d3c9 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index 9cbdb8008e9..244ae5c9df1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go index 675d8228a96..c3baf3d8c0f 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/expansion_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/expansion_generated.go index 9a845ee0fa9..499f8ddc4b4 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/expansion_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go index 4ccaa66956d..0babf3672f1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/apiservice.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/expansion_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/expansion_generated.go index 12f959ed47d..9988cb28c23 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/expansion_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.pb.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.pb.go index 2a10d06d4b6..be45d55320d 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.proto b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.proto index 35789c2d4c6..1b8ad2f7910 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.proto +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go index ce07cbb0216..e169bb0d739 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go index 615905874cb..00130c62909 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go index 546411634fc..a74e50b2ddb 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go index 7fc447d5234..c92780182f3 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto index b0e1d9ed2a4..8546cf8be28 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go index 3ec0eba6b32..cb04bee119e 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go index d15c9ce8e46..3da622e3ec5 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go index 115df3ed250..184e7aa32db 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto index dad68d4d169..1d414f5f57e 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go index c44a1d5ce3c..f6a1c47e773 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go index 2a0571a9027..069ab546d94 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go index d551298c2d5..0bb9936ee23 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go index fa5da317aa8..8ca71f5722a 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go index 7f670fed477..05edbcd1c2a 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index a3e65fa49f1..6f60466ddf4 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/register.go index ebf9a844b3e..14804fd51b5 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/register.go index 74d09ac8645..772cd9d19ba 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_metrics_client.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_metrics_client.go index c81439eb6f4..450af71b72c 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_metrics_client.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_metrics_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_nodemetrics.go index f9e5c8849fd..1984cbaf8fe 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_nodemetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_podmetrics.go index 6bfc0675c53..91d6523441a 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake/fake_podmetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go index 919bdc2677e..f325d33dcd4 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go index b838b83e1ee..f31235ee5b7 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go index 4883f07c38d..b2ea591f7df 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go index 0fbb38c68bc..c0b617e228c 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go index 1b50aa19970..35b3db3f354 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/doc.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/doc.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_metrics_client.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_metrics_client.go index 17b070a58d4..44b2d4d470f 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_metrics_client.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_metrics_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_nodemetrics.go index bbefda32584..5df8c4be3db 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_nodemetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_podmetrics.go index 14c8c388c7e..2fea838dbf4 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/fake/fake_podmetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/generated_expansion.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/generated_expansion.go index 6c64173c476..419975b555b 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/generated_expansion.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/metrics_client.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/metrics_client.go index 53fe0d87f60..a540a03c4a6 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/metrics_client.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/metrics_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/nodemetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/nodemetrics.go index d23bc4053c3..18adf9dd40c 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/nodemetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/nodemetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/podmetrics.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/podmetrics.go index 459664cbf19..5d5e19e039e 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/podmetrics.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/podmetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go index 8a4fbab3321..ad1f0d83c4b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.deepcopy.go index 7be1df5b7ba..2fe16ecc977 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.defaults.go index 7e6df29d4ae..5e24d22cacd 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/zz_generated.deepcopy.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/zz_generated.deepcopy.go index a9cfabd34d6..851821e68b9 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go index 0e5459813db..e071d1590ae 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go index 5a63b0e724b..6a6cae177b1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go index b4030b659e1..5b7346494b7 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go index d22f921cdc1..3d0a24bb304 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go index 85772f9a952..a0fc576c663 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go index 3adf06d8934..8615019757d 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go index 30a3725c330..88a40551d57 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go index 89e217e5c01..d911afd9770 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go index 1d447b3d40c..3abbc4d86f5 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake/fake_wardle_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go index 9ee3f03b73e..cd030c7a64e 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go index 6ed19e118eb..0d6748e1f11 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go index 1d7892b7735..53c098daadc 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go index 7f0bea92bb2..02aa4727925 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/wardle_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/clientset.go index 2fb69551ed0..3637dee81db 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/doc.go index 7d2f4d80d3a..9c6dfafa732 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go index 6b627a3c7a3..5fce6f43f92 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/register.go index efe631b0779..9933ce8c25a 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/register.go index 7df989f1141..8d2a3772b5f 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go index c75a5940ad7..71211bb12a1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go index 3e19debe531..bc5bab9d608 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_wardle_client.go index 6c7e27b5a75..c815d546260 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_wardle_client.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake/fake_wardle_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fischer.go index d7dff8c6a34..459d0eac419 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/flunder.go index 06de3c5ed1e..97552af9260 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/generated_expansion.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/generated_expansion.go index 233381c19e8..fb9790cd9e7 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/wardle_client.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/wardle_client.go index 6f646d08cfb..b9b4867e536 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/wardle_client.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/wardle_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go index 1db54b25ec9..498c864a190 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go index f27d806ffd3..2ac41c341a2 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index b1ccf0b1849..23cb3ff63ab 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/interface.go index 818e02ee18c..29ed2a0076a 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/interface.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go index bea7a84175a..dfefb18466b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go index 99949638fd3..df0cbd92946 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/interface.go index 11e117b07aa..6b4e70d6b57 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/interface.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go index f8ded4cb719..4fabf593a57 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go index 56df2a786a0..18c673087b4 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index 249e618b3e8..d6dc4ab7964 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go index 331860b4b5e..701c97772ba 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go index 76107758576..8c2b54daf60 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go index fe6324b7799..b9019e3e107 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go index bb986f03edd..2af8588cdce 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go index d9421450c75..da059bb6311 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go index 9968bade8ca..6f181bda919 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go index 32447b480eb..56197050aba 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/expansion_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/expansion_generated.go index 0bb761ff63a..cf945e828bf 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/fischer.go index e5d452cf175..a6b38c917b2 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/fischer.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/flunder.go index 942896a38c8..90eebcd42fa 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1/flunder.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go index 1022261f542..ad850baa4c8 100644 --- a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/clientset.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/clientset.go index ee1011cb303..f73c0a1c7d4 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/clientset.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/doc.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/doc.go index 7d2f4d80d3a..9c6dfafa732 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go index 864cfe59b70..07930962879 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/doc.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/doc.go index 3fd8e1e2cdc..8a3101e3981 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/register.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/register.go index 97db71e50e4..0d178ac73db 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/doc.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/doc.go index 3ec2200d099..3d3ab5f4edf 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/register.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/register.go index 6afb0553192..572b50de277 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/doc.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/doc.go index cdaaf620786..08a9c7ceba4 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/doc.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/doc.go index c58fac35e4b..63e2c8a0821 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go index 66a5dfbf735..2089d53a365 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_foo.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_samplecontroller_client.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_samplecontroller_client.go index 3d04e5df30b..07b7e8f3089 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_samplecontroller_client.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake/fake_samplecontroller_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go index 1c631da7a59..9900ef03e5a 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/foo.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/generated_expansion.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/generated_expansion.go index 5bc2b313352..9e338f35f7c 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/samplecontroller_client.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/samplecontroller_client.go index 323b9ebcfd9..b0fd9f76bf9 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/samplecontroller_client.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/samplecontroller_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go index 64cb979356b..f85d3eece2f 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go index 355cca9da61..68728f478a3 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 294c902a3a8..230e22f352b 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/interface.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/interface.go index e5e7d464a67..8c21aae0d1a 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/interface.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go index 90bd8ff4ad0..35068e2c63a 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/interface.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/interface.go index cea72010a71..eb37a51ff6d 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/interface.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/expansion_generated.go b/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/expansion_generated.go index 8829daba673..1873fd4d294 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/foo.go index 646a7e38a5e..7c7c2264178 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1/foo.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 6c39b570fbbc9a18fa8c615c2923a0bcd8bf809c Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Wed, 20 Dec 2017 14:44:39 +0100 Subject: [PATCH 541/794] Bump Metrics Server to version v0.2.1 --- .../metrics-server/metrics-server-deployment.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 73375b2202b..e85afcbff9f 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -23,31 +23,31 @@ data: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: metrics-server-v0.2.0 + name: metrics-server-v0.2.1 namespace: kube-system labels: k8s-app: metrics-server kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v0.2.0 + version: v0.2.1 spec: selector: matchLabels: k8s-app: metrics-server - version: v0.2.0 + version: v0.2.1 template: metadata: name: metrics-server labels: k8s-app: metrics-server - version: v0.2.0 + version: v0.2.1 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: serviceAccountName: metrics-server containers: - name: metrics-server - image: gcr.io/google_containers/metrics-server-amd64:v0.2.0 + image: gcr.io/google_containers/metrics-server-amd64:v0.2.1 command: - /metrics-server - --source=kubernetes.summary_api:'' @@ -84,7 +84,7 @@ spec: - --memory=140Mi - --extra-memory=4Mi - --threshold=5 - - --deployment=metrics-server-v0.2.0 + - --deployment=metrics-server-v0.2.1 - --container=metrics-server - --poll-period=300000 - --estimator=exponential From 93952fb39ebb7a479ae82c0dd874e93c2097d774 Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Thu, 28 Dec 2017 11:47:27 +0800 Subject: [PATCH 542/794] create ipvs clusterIP rules in onlyNodeLocalEndpoints mode --- pkg/proxy/ipvs/proxier.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 9f0def1e1de..621d7a96f24 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1158,7 +1158,9 @@ func (proxier *Proxier) syncProxyRules() { // We need to bind ClusterIP to dummy interface, so set `bindAddr` parameter to `true` in syncService() if err := proxier.syncService(svcNameString, serv, true); err == nil { activeIPVSServices[serv.String()] = true - if err := proxier.syncEndpoint(svcName, svcInfo.onlyNodeLocalEndpoints, serv); err != nil { + // ExternalTrafficPolicy only works for NodePort and external LB traffic, does not affect ClusterIP + // So we still need clusterIP rules in onlyNodeLocalEndpoints mode. + if err := proxier.syncEndpoint(svcName, false, serv); err != nil { glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { From 9aca4057eb736ac6b6dfd16d6b7483e86cda63a7 Mon Sep 17 00:00:00 2001 From: qiu Date: Tue, 2 Jan 2018 20:29:11 +0800 Subject: [PATCH 543/794] edit line138 --- plugin/pkg/scheduler/factory/plugins.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index 6c7a7ab7d5f..ded02c90fab 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -135,7 +135,7 @@ func RemovePredicateKeyFromAlgoProvider(providerName, key string) error { return nil } -// RemovePredicateKeyFromAlgoProvider removes a fit predicate key from all algorithmProviders which in algorithmProviderMap. +// RemovePredicateKeyFromAlgorithmProviderMap removes a fit predicate key from all algorithmProviders which in algorithmProviderMap. func RemovePredicateKeyFromAlgorithmProviderMap(key string) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() From ad0674702713fc36ebe53c99f2bab1d54d7a82ff Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 2 Jan 2018 20:35:30 +0800 Subject: [PATCH 544/794] remove redundant deleting endpoint explicitly in endpoint controller --- pkg/controller/endpoint/endpoints_controller.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 8aa41da8a83..b7d46c3e07e 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -394,15 +394,7 @@ func (e *EndpointController) syncService(key string) error { } service, err := e.serviceLister.Services(namespace).Get(name) if err != nil { - // Delete the corresponding endpoint, as the service has been deleted. - // TODO: Please note that this will delete an endpoint when a - // service is deleted. However, if we're down at the time when - // the service is deleted, we will miss that deletion, so this - // doesn't completely solve the problem. See #6877. - err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) - if err != nil && !errors.IsNotFound(err) { - return err - } + // Service has been deleted. So no need to do any more operations. return nil } From aa4fd0b69aa7804b0f3c666aa734243cdc11c51d Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Tue, 2 Jan 2018 14:42:01 +0100 Subject: [PATCH 545/794] Do not time-out profiler requests. --- .../src/k8s.io/apiserver/pkg/server/filters/longrunning.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/longrunning.go b/staging/src/k8s.io/apiserver/pkg/server/filters/longrunning.go index 21c4562aa04..1b58f163865 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/longrunning.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/longrunning.go @@ -18,12 +18,13 @@ package filters import ( "net/http" + "strings" "k8s.io/apimachinery/pkg/util/sets" apirequest "k8s.io/apiserver/pkg/endpoints/request" ) -// BasicLongRunningRequestCheck returns true if the given request has one of the specified verbs or one of the specified subresources +// BasicLongRunningRequestCheck returns true if the given request has one of the specified verbs or one of the specified subresources, or is a profiler request. func BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) apirequest.LongRunningRequestCheck { return func(r *http.Request, requestInfo *apirequest.RequestInfo) bool { if longRunningVerbs.Has(requestInfo.Verb) { @@ -32,6 +33,9 @@ func BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets if requestInfo.IsResourceRequest && longRunningSubresources.Has(requestInfo.Subresource) { return true } + if !requestInfo.IsResourceRequest && strings.HasPrefix(requestInfo.Path, "/debug/pprof/") { + return true + } return false } } From c3b0a83a5c6a4c3ba1d3a980c436feaafbde0a8e Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 2 Jan 2018 13:13:03 +0100 Subject: [PATCH 546/794] Configurable liveness probe initial delays for etcd and kube-apiserver in GCE --- cluster/common.sh | 10 ++++++++++ cluster/gce/configure-vm.sh | 10 ++++++++++ cluster/gce/container-linux/configure-helper.sh | 2 ++ cluster/gce/gci/configure-helper.sh | 2 ++ cluster/saltbase/salt/etcd/etcd.manifest | 3 ++- .../salt/kube-apiserver/kube-apiserver.manifest | 4 +++- 6 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cluster/common.sh b/cluster/common.sh index 95e286e89d0..a2b947f1748 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -838,6 +838,16 @@ EOF if [ -n "${ETCD_HOSTNAME:-}" ]; then cat >>$file <>$file <>$file <>/srv/salt-overlay/pillar/cluster-params.sls kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")' +EOF + fi + if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then + cat <>/srv/salt-overlay/pillar/cluster-params.sls +etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' +EOF + fi + if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then + cat <>/srv/salt-overlay/pillar/cluster-params.sls +kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' EOF fi if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then diff --git a/cluster/gce/container-linux/configure-helper.sh b/cluster/gce/container-linux/configure-helper.sh index 4bd5811bbf8..90f677847e8 100755 --- a/cluster/gce/container-linux/configure-helper.sh +++ b/cluster/gce/container-linux/configure-helper.sh @@ -778,6 +778,7 @@ function prepare-etcd-manifest { sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}" sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}" sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}" + sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}" # Get default storage backend from manifest file. local -r default_storage_backend=$(cat "${temp_file}" | \ grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \ @@ -1060,6 +1061,7 @@ function start-kube-apiserver { sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}" sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" + sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" sed -i -e "s@{{secure_port}}@443@g" "${src_file}" sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 930cbe19234..93f00292c0e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1372,6 +1372,7 @@ function prepare-etcd-manifest { sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}" sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}" sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}" + sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}" # Get default storage backend from manifest file. local -r default_storage_backend=$(cat "${temp_file}" | \ grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \ @@ -1747,6 +1748,7 @@ function start-kube-apiserver { sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}" sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" + sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" sed -i -e "s@{{secure_port}}@443@g" "${src_file}" sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" diff --git a/cluster/saltbase/salt/etcd/etcd.manifest b/cluster/saltbase/salt/etcd/etcd.manifest index 44419aa744f..6a940ea9073 100644 --- a/cluster/saltbase/salt/etcd/etcd.manifest +++ b/cluster/saltbase/salt/etcd/etcd.manifest @@ -22,6 +22,7 @@ {% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%} {% set quota_bytes = '--quota-backend-bytes=4294967296' -%} {% endif -%} +{% set liveness_probe_initial_delay = pillar.get('etcd_liveness_probe_initial_delay', 15) -%} {% set srv_kube_path = "/srv/kubernetes" -%} { @@ -67,7 +68,7 @@ "port": {{ port }}, "path": "/health" }, - "initialDelaySeconds": 15, + "initialDelaySeconds": {{ liveness_probe_initial_delay }}, "timeoutSeconds": 15 }, "ports": [ diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 8b1fdc630c3..9a779050d89 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -66,6 +66,8 @@ {% set storage_media_type = "--storage-media-type=" + pillar['storage_media_type'] -%} {% endif -%} +{% set liveness_probe_initial_delay = pillar.get('kube_apiserver_liveness_probe_initial_delay', 15) -%} + {% set request_timeout = "" -%} {% if pillar['kube_apiserver_request_timeout_sec'] is defined -%} {% set request_timeout = "--request-timeout=" + pillar['kube_apiserver_request_timeout_sec'] + "s" -%} @@ -248,7 +250,7 @@ "port": 8080, "path": "/healthz" }, - "initialDelaySeconds": 15, + "initialDelaySeconds": {{liveness_probe_initial_delay}}, "timeoutSeconds": 15 }, "ports":[ From 05b0821f3cbb9f59001700583ad2616f0e61ba34 Mon Sep 17 00:00:00 2001 From: Maciej Borsz Date: Tue, 2 Jan 2018 17:12:48 +0100 Subject: [PATCH 547/794] Add 'exec' in all saltbase manifests using '/bin/sh -c'. Right now, if docker sends SIGTERM, /bin/sh doesn't pass it to underlying process, which breaks graceful process shutdown. Changing '/bin/sh -c CMD > /var/log/FILE.log' pattern to '/bin/sh -c exec CMD > /var/log/FILE.log' still allows to redirect output to log file, but also passes all signals to CMD process. --- cluster/saltbase/salt/etcd/etcd.manifest | 2 +- cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml | 2 +- cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest | 2 +- .../kube-controller-manager/kube-controller-manager.manifest | 2 +- cluster/saltbase/salt/kube-proxy/kube-proxy.manifest | 2 +- cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest | 2 +- cluster/saltbase/salt/l7-gcp/glbc.manifest | 2 +- cluster/saltbase/salt/rescheduler/rescheduler.manifest | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cluster/saltbase/salt/etcd/etcd.manifest b/cluster/saltbase/salt/etcd/etcd.manifest index 44419aa744f..4998af620c1 100644 --- a/cluster/saltbase/salt/etcd/etcd.manifest +++ b/cluster/saltbase/salt/etcd/etcd.manifest @@ -48,7 +48,7 @@ "command": [ "/bin/sh", "-c", - "if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1" + "if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd{{ suffix }}.log 2>&1; fi; exec /usr/local/bin/etcd --name etcd-{{ hostname }} --listen-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --initial-advertise-peer-urls {{ etcd_protocol }}://{{ hostname }}:{{ server_port }} --advertise-client-urls http://127.0.0.1:{{ port }} --listen-client-urls http://127.0.0.1:{{ port }} {{ quota_bytes }} --data-dir /var/etcd/data{{ suffix }} --initial-cluster-state {{ cluster_state }} --initial-cluster {{ etcd_cluster }} {{ etcd_creds }} 1>>/var/log/etcd{{ suffix }}.log 2>&1" ], "env": [ { "name": "TARGET_STORAGE", diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml index 0b115c1f737..d345a366aa9 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml +++ b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml @@ -17,7 +17,7 @@ spec: command: - /bin/bash - -c - - /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1 + - exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1 resources: requests: cpu: 5m diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 8b1fdc630c3..9a19c52c1e3 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -239,7 +239,7 @@ "command": [ "/bin/sh", "-c", - "/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" + "exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" ], {{container_env}} "livenessProbe": { diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index 8b7b0d4e45c..e037d880177 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -118,7 +118,7 @@ "command": [ "/bin/sh", "-c", - "/usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1" + "exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1" ], {{container_env}} "livenessProbe": { diff --git a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest index 69075cb9d04..d35692a3fd4 100644 --- a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest +++ b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest @@ -79,7 +79,7 @@ spec: command: - /bin/sh - -c - - kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1 + - exec kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" --oom-score-adj=-998 {{params}} 1>>/var/log/kube-proxy.log 2>&1 {{container_env}} {{kube_cache_mutation_detector_env_name}} {{kube_cache_mutation_detector_env_value}} diff --git a/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest b/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest index 6f946bd8c86..26436657ede 100644 --- a/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest +++ b/cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest @@ -51,7 +51,7 @@ "command": [ "/bin/sh", "-c", - "/usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1" + "exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1" ], "livenessProbe": { "httpGet": { diff --git a/cluster/saltbase/salt/l7-gcp/glbc.manifest b/cluster/saltbase/salt/l7-gcp/glbc.manifest index c808e5ee0e6..0ad0dc3de07 100644 --- a/cluster/saltbase/salt/l7-gcp/glbc.manifest +++ b/cluster/saltbase/salt/l7-gcp/glbc.manifest @@ -44,7 +44,7 @@ spec: # TODO: split this out into args when we no longer need to pipe stdout to a file #6428 - sh - -c - - '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' + - 'exec /glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1' volumes: - hostPath: path: /etc/gce.conf diff --git a/cluster/saltbase/salt/rescheduler/rescheduler.manifest b/cluster/saltbase/salt/rescheduler/rescheduler.manifest index ef9af1f5f7f..584d35ca797 100644 --- a/cluster/saltbase/salt/rescheduler/rescheduler.manifest +++ b/cluster/saltbase/salt/rescheduler/rescheduler.manifest @@ -28,7 +28,7 @@ spec: # TODO: split this out into args when we no longer need to pipe stdout to a file #6428 - sh - -c - - '/rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1' + - 'exec /rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1' volumes: - hostPath: path: /var/log/rescheduler.log From 406ef92623ea3c81e1101f11c5ffae59ab32f688 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 22 Dec 2017 17:12:51 -0500 Subject: [PATCH 548/794] Update to latest gophercloud Catch up with all the latest stuff from gophercloud 4b7db606 - only try to reauth once d13755e6 - BlockStorage v3: Rename VolumeType PublicAccess to IsPublic 614da04d - Add UPDATE support in V3 volume types (#656) be3fd784 - Flavor Extra Specs Create c2cafb46 - Flavor Extra Specs: List / Get 7b1b8775 - Compute v2: Flavor Access Add cf81d92c - Add DELETE support in V3 volume types a879b375 - Fix incorrect variable name 2997913a - Add pagination support in snapshots a5c71868 - Support pagination in volume resources 1db0312e - TrivialFix incorrect variable name 69194d93 - Add basic CRUD acceptance testcases in snapshot V3 22c7abce - Add CREATE support in V3 volume types aed60e9f - Add basic CRUD acceptance in volume V3 7cbf4661 - BlockStorage v3: volumetype get/list acc test bcab0f79 - Update README with Thank Yous f85e7c0f - Docs: Updating Contributing and Style Guides be1b616c - Fix a small syntax error of TestShareTypeExtraSpecs test 3f38a1ee - Add List/Get support for volume type in volume V3 48a40399 - Support for setting availability_zone_hints to a router 747776a7 - Fix the undefined function error of TestPortsbindingCRUD test a7ec61ea - Fix the undefined function error of TestNetworksProviderCRUD test 25e18920 - Compute v2: Add the extended status information API b63d2fd3 - availability_zone_hints for network(s) 157d7511 - Add support for ipv6_address_mode and ipv6_ra_mode in subnets ed468967 - DBv1: configurations acceptance test 578e2aab - Configuration group time parsing error 669959f8 - Compute v2: attachinterfaces acceptance test 8113f0cb - Add Nova interface-detach support d6484abc - Add Nova interface-attach support 7883fd95 - fix reauth deadlock by not calling Token() during reauth 4d0f8253 - Add support to get interface of a server 7dc13e0d - AccTests: BlockStorage v2 ForceDelete 1e86e54d - Refactor blockstorage actionURL e30da231 - Feature/support force delete e193578c - add UseTokenLock method in ProviderClient to allow safe concurrent access e6a5f874 - ObjectStorage v1: Rename ExtractLastMarker to extractLastMarker c47bb004 - BlockStorage v2/v3: Reorder snapshot/volume ListOpts and update godoc 2c05d0e4 - Add 'tenant' support in volume&snapshot API 639d71fd - Networking v2: Port Security Extension 755794a7 - ObjectStorage v1: Subdir and Marker detection a043441f - fixed bug with endless loop when using delimiter on folded directory a4799293 - OpenStack: support OS_PROJECT_* variables --- Godeps/Godeps.json | 60 +++++++------- .../src/k8s.io/apiserver/Godeps/Godeps.json | 14 ++-- .../src/k8s.io/client-go/Godeps/Godeps.json | 14 ++-- .../gophercloud/gophercloud/README.md | 16 ++++ .../gophercloud/gophercloud/STYLEGUIDE.md | 5 ++ .../gophercloud/openstack/auth_env.go | 17 +++- .../extensions/volumeactions/requests.go | 24 +++--- .../extensions/volumeactions/results.go | 5 ++ .../extensions/volumeactions/urls.go | 34 +------- .../blockstorage/v2/volumes/requests.go | 15 +++- .../blockstorage/v3/volumes/requests.go | 30 +++++-- .../blockstorage/v3/volumes/results.go | 13 +++- .../gophercloud/openstack/client.go | 52 +++++++++++-- .../v2/extensions/attachinterfaces/doc.go | 30 +++++++ .../extensions/attachinterfaces/requests.go | 59 ++++++++++++++ .../v2/extensions/attachinterfaces/results.go | 32 ++++++++ .../v2/extensions/attachinterfaces/urls.go | 11 +++ .../openstack/compute/v2/flavors/doc.go | 40 ++++++++++ .../openstack/compute/v2/flavors/requests.go | 69 ++++++++++++++++ .../openstack/compute/v2/flavors/results.go | 67 ++++++++++++++++ .../openstack/compute/v2/flavors/urls.go | 16 ++++ .../v2/extensions/layer3/routers/requests.go | 11 +-- .../v2/extensions/layer3/routers/results.go | 4 + .../networking/v2/networks/requests.go | 9 ++- .../networking/v2/networks/results.go | 4 + .../gophercloud/provider_client.go | 78 ++++++++++++++++--- 26 files changed, 605 insertions(+), 124 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 691bd76f46e..9a9d3aa2c2a 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1633,123 +1633,123 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/common/extensions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/networks", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gorilla/context", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 83c6b74dbaa..49ef5aaa071 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -408,31 +408,31 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gregjones/httpcache", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index d9c1c4f0410..c3d9ca80a46 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -172,31 +172,31 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, { "ImportPath": "github.com/gregjones/httpcache", diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index 60ca479de89..bb218c3fe9e 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -141,3 +141,19 @@ See the [contributing guide](./.github/CONTRIBUTING.md). If you're struggling with something or have spotted a potential bug, feel free to submit an issue to our [bug tracker](/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md index e7531a83d9d..22a29009412 100644 --- a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md +++ b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md @@ -1,6 +1,8 @@ ## On Pull Requests +- Please make sure to read our [contributing guide](/.github/CONTRIBUTING.md). + - Before you start a PR there needs to be a Github issue and a discussion about it on that issue with a core contributor, even if it's just a 'SGTM'. @@ -34,6 +36,9 @@ append. It makes it difficult for the reviewer to see what's changed from one review to the next. +- See [#583](https://github.com/gophercloud/gophercloud/issues/583) as an example of a + well-formatted issue which contains all relevant information we need to review and approve. + ## On Code - In re design: follow as closely as is reasonable the code already in the library. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index 95286041d66..b5482ba8c9f 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -16,7 +16,12 @@ The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. +or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and +OS_PROJECT_NAME are optional. + +OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and +OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will +still be referred as "tenant" in Gophercloud. To use this function, first set the OS_* environment variables (for example, by sourcing an `openrc` file), then: @@ -34,6 +39,16 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { domainID := os.Getenv("OS_DOMAIN_ID") domainName := os.Getenv("OS_DOMAIN_NAME") + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + if authURL == "" { err := gophercloud.ErrMissingInput{Argument: "authURL"} return nilOptions, err diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go index a3916c77c16..d18bff555b5 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go @@ -47,7 +47,7 @@ func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder r.Err = err return } - _, r.Err = client.Post(attachURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return @@ -56,7 +56,7 @@ func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder // BeginDetach will mark the volume as detaching. func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} - _, r.Err = client.Post(beginDetachingURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return @@ -87,7 +87,7 @@ func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder r.Err = err return } - _, r.Err = client.Post(detachURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return @@ -96,7 +96,7 @@ func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder // Reserve will reserve a volume based on volume ID. func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { b := map[string]interface{}{"os-reserve": make(map[string]interface{})} - _, r.Err = client.Post(reserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{200, 201, 202}, }) return @@ -105,7 +105,7 @@ func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { // Unreserve will unreserve a volume based on volume ID. func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} - _, r.Err = client.Post(unreserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{200, 201, 202}, }) return @@ -145,7 +145,7 @@ func InitializeConnection(client *gophercloud.ServiceClient, id string, opts Ini r.Err = err return } - _, r.Err = client.Post(initializeConnectionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ OkCodes: []int{200, 201, 202}, }) return @@ -183,7 +183,7 @@ func TerminateConnection(client *gophercloud.ServiceClient, id string, opts Term r.Err = err return } - _, r.Err = client.Post(teminateConnectionURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return @@ -216,7 +216,7 @@ func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOpt r.Err = err return } - _, r.Err = client.Post(extendSizeURL(client, id), b, nil, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return @@ -256,8 +256,14 @@ func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageO r.Err = err return } - _, r.Err = client.Post(uploadURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ OkCodes: []int{202}, }) return } + +// ForceDelete will delete the volume regardless of state. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ForceDeleteResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-force_delete": ""}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go index 9815f0c26ac..5cadd360f20 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go @@ -184,3 +184,8 @@ func (r UploadImageResult) Extract() (VolumeImage, error) { err := r.ExtractInto(&s) return s.VolumeImage, err } + +// ForceDeleteResult contains the response body and error from a ForceDelete request. +type ForceDeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go index 5efd2b25c05..20486ed7194 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go @@ -2,38 +2,6 @@ package volumeactions import "github.com/gophercloud/gophercloud" -func attachURL(c *gophercloud.ServiceClient, id string) string { +func actionURL(c *gophercloud.ServiceClient, id string) string { return c.ServiceURL("volumes", id, "action") } - -func beginDetachingURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func detachURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func uploadURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func reserveURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func unreserveURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func initializeConnectionURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func teminateConnectionURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} - -func extendSizeURL(c *gophercloud.ServiceClient, id string) string { - return attachURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go index 18c9cb272ec..2ec10ad55ed 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go @@ -83,14 +83,21 @@ type ListOptsBuilder interface { // ListOpts holds options for listing Volumes. It is passed to the volumes.List // function. type ListOpts struct { - // admin-only option. Set it to true to see all tenant volumes. + // AllTenants will retrieve volumes of all tenants/projects. AllTenants bool `q:"all_tenants"` - // List only volumes that contain Metadata. + + // Metadata will filter results based on specified metadata. Metadata map[string]string `q:"metadata"` - // List only volumes that have Name as the display name. + + // Name will filter by the specified volume name. Name string `q:"name"` - // List only volumes that have a status of Status. + + // Status will filter by the specified status. Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` } // ToVolumeListQuery formats a ListOpts into a query string. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go index 18c9cb272ec..43727409dd9 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go @@ -83,14 +83,34 @@ type ListOptsBuilder interface { // ListOpts holds options for listing Volumes. It is passed to the volumes.List // function. type ListOpts struct { - // admin-only option. Set it to true to see all tenant volumes. + // AllTenants will retrieve volumes of all tenants/projects. AllTenants bool `q:"all_tenants"` - // List only volumes that contain Metadata. + + // Metadata will filter results based on specified metadata. Metadata map[string]string `q:"metadata"` - // List only volumes that have Name as the display name. + + // Name will filter by the specified volume name. Name string `q:"name"` - // List only volumes that have a status of Status. + + // Status will filter by the specified status. Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` } // ToVolumeListQuery formats a ListOpts into a query string. @@ -111,7 +131,7 @@ func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pa } return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return VolumePage{pagination.SinglePageBase(r)} + return VolumePage{pagination.LinkedPageBase{PageResult: r}} }) } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go index 5ebe36a3385..87f71262c1d 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go @@ -101,7 +101,7 @@ func (r *Volume) UnmarshalJSON(b []byte) error { // VolumePage is a pagination.pager that is returned from a call to the List function. type VolumePage struct { - pagination.SinglePageBase + pagination.LinkedPageBase } // IsEmpty returns true if a ListResult contains no Volumes. @@ -110,6 +110,17 @@ func (r VolumePage) IsEmpty() (bool, error) { return len(volumes) == 0, err } +func (page VolumePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volumes_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + // ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. func ExtractVolumes(r pagination.Page) ([]Volume, error) { var s []Volume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index c796795b8ee..5a52e579148 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -56,11 +56,12 @@ func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { endpoint = gophercloud.NormalizeURL(endpoint) base = gophercloud.NormalizeURL(base) - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: endpoint, - }, nil + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + return p, nil } /* @@ -158,9 +159,21 @@ func v2auth(client *gophercloud.ProviderClient, endpoint string, options gopherc } if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + tao := options + tao.AllowReauth = false client.ReauthFunc = func() error { - client.TokenID = "" - return v2auth(client, endpoint, options, eo) + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil } } client.TokenID = token.ID @@ -202,9 +215,32 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.Au client.TokenID = token.ID if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } client.ReauthFunc = func() error { - client.TokenID = "" - return v3auth(client, endpoint, opts, eo) + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go index a9960137117..3653122bf30 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go @@ -18,5 +18,35 @@ Example of Listing a Server's Interfaces for _, interface := range allInterfaces { fmt.Printf("%+v\n", interface) } + +Example to Get a Server's Interface + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + interface, err := attachinterfaces.Get(computeClient, serverID, portID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Interface attachment on the Server + + networkID := "8a5fe506-7e9f-4091-899b-96336909d93c" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + attachOpts := attachinterfaces.CreateOpts{ + NetworkID: networkID, + } + interface, err := attachinterfaces.Create(computeClient, serverID, attachOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Interface attachment from the Server + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + err := attachinterfaces.Delete(computeClient, serverID, portID).ExtractErr() + if err != nil { + panic(err) + } */ package attachinterfaces diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go index faf2747246a..18dade837c1 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go @@ -11,3 +11,62 @@ func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { return InterfacePage{pagination.SinglePageBase(r)} }) } + +// Get requests details on a single interface attachment by the server and port IDs. +func Get(client *gophercloud.ServiceClient, serverID, portID string) (r GetResult) { + _, r.Err = client.Get(getInterfaceURL(client, serverID, portID), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToAttachInterfacesCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new interface attachment. +type CreateOpts struct { + + // PortID is the ID of the port for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the PortID parameter, the OpenStack Networking API + // v2.0 allocates a port and creates an interface for it on the network. + PortID string `json:"port_id,omitempty"` + + // NetworkID is the ID of the network for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the NetworkID parameter, the OpenStack Networking + // API v2.0 uses the network information cache that is associated with the instance. + NetworkID string `json:"net_id,omitempty"` + + // Slice of FixedIPs. If you request a specific FixedIP address without a + // NetworkID, the request returns a Bad Request (400) response code. + FixedIPs []FixedIP `json:"fixed_ips,omitempty"` +} + +// ToAttachInterfacesCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToAttachInterfacesCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "interfaceAttachment") +} + +// Create requests the creation of a new interface attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToAttachInterfacesCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createInterfaceURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete makes a request against the nova API to detach a single interface from the server. +// It needs server and port IDs to make a such request. +func Delete(client *gophercloud.ServiceClient, serverID, portID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteInterfaceURL(client, serverID, portID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go index e3987eaca84..a16fa14f759 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go @@ -1,9 +1,41 @@ package attachinterfaces import ( + "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/pagination" ) +type attachInterfaceResult struct { + gophercloud.Result +} + +// Extract interprets any attachInterfaceResult as an Interface, if possible. +func (r attachInterfaceResult) Extract() (*Interface, error) { + var s struct { + Interface *Interface `json:"interfaceAttachment"` + } + err := r.ExtractInto(&s) + return s.Interface, err +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as an Interface. +type GetResult struct { + attachInterfaceResult +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as an Interface. +type CreateResult struct { + attachInterfaceResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + // FixedIP represents a Fixed IP Address. type FixedIP struct { SubnetID string `json:"subnet_id"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go index 7d376f99bb5..50292e8b5a5 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go @@ -5,3 +5,14 @@ import "github.com/gophercloud/gophercloud" func listInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { return client.ServiceURL("servers", serverID, "os-interface") } + +func getInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} + +func createInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { + return client.ServiceURL("servers", serverID, "os-interface") +} +func deleteInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go index a7bc15c3e5c..867d53a8190 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -59,5 +59,45 @@ Example to List Flavor Access for _, access := range allAccesses { fmt.Printf("%+v", access) } + +Example to Grant Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.AddAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.AddAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(computeClient, flavorID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", createdExtraSpecs) + +Example to Get Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + extraSpecs, err := flavors.ListExtraSpecs(computeClient, flavorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", extraSpecs) + */ package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go index 6eb3678b2bd..965d271d1d3 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -162,6 +162,75 @@ func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager }) } +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess requests. +type AddAccessOptsBuilder interface { + ToAddAccessMap() (map[string]interface{}, error) +} + +// AddAccessOpts represents options for adding access to a flavor. +type AddAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToAddAccessMap constructs a request body from AddAccessOpts. +func (opts AddAccessOpts) ToAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addTenantAccess") +} + +// AddAccess grants a tenant/project access to a flavor. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToAddAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtraSpecs requests all the extra-specs for the given flavor ID. +func ListExtraSpecs(client *gophercloud.ServiceClient, flavorID string) (r ListExtraSpecsResult) { + _, r.Err = client.Get(extraSpecsListURL(client, flavorID), &r.Body, nil) + return +} + +func GetExtraSpec(client *gophercloud.ServiceClient, flavorID string, key string) (r GetExtraSpecResult) { + _, r.Err = client.Get(extraSpecsGetURL(client, flavorID, key), &r.Body, nil) + return +} + +// CreateExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// CreateExtraSpecs requests. +type CreateExtraSpecsOptsBuilder interface { + ToExtraSpecsCreateMap() (map[string]interface{}, error) +} + +// ExtraSpecsOpts is a map that contains key-value pairs. +type ExtraSpecsOpts map[string]string + +// ToExtraSpecsCreateMap assembles a body for a Create request based on the +// contents of a ExtraSpecsOpts +func (opts ExtraSpecsOpts) ToExtraSpecsCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"extra_specs": opts}, nil +} + +// CreateExtraSpecs will create or update the extra-specs key-value pairs for the specified Flavor +func CreateExtraSpecs(client *gophercloud.ServiceClient, flavorID string, opts CreateExtraSpecsOptsBuilder) (r CreateExtraSpecsResult) { + b, err := opts.ToExtraSpecsCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extraSpecsCreateURL(client, flavorID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + // IDFromName is a convienience function that returns a flavor's ID given its // name. func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go index fb5c335b8ea..4451be38c92 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -154,6 +154,26 @@ func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { return s.FlavorAccesses, err } +type accessResult struct { + gophercloud.Result +} + +// AddAccessResult is the response of an AddAccess operations. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type AddAccessResult struct { + accessResult +} + +// Extract provides access to the result of an access create or delete. +// The result will be all accesses that the flavor has. +func (r accessResult) Extract() ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := r.ExtractInto(&s) + return s.FlavorAccesses, err +} + // FlavorAccess represents an ACL of tenant access to a specific Flavor. type FlavorAccess struct { // FlavorID is the unique ID of the flavor. @@ -162,3 +182,50 @@ type FlavorAccess struct { // TenantID is the unique ID of the tenant. TenantID string `json:"tenant_id"` } + +// Extract interprets any extraSpecsResult as ExtraSpecs, if possible. +func (r extraSpecsResult) Extract() (map[string]string, error) { + var s struct { + ExtraSpecs map[string]string `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.ExtraSpecs, err +} + +// extraSpecsResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type extraSpecsResult struct { + gophercloud.Result +} + +// ListExtraSpecsResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type ListExtraSpecsResult struct { + extraSpecsResult +} + +// CreateExtraSpecResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateExtraSpecsResult struct { + extraSpecsResult +} + +// extraSpecResult contains the result of a call for individual a single +// key-value pair. +type extraSpecResult struct { + gophercloud.Result +} + +// GetExtraSpecResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetExtraSpecResult struct { + extraSpecResult +} + +// Extract interprets any extraSpecResult as an ExtraSpec, if possible. +func (r extraSpecResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go index 04d33bf1279..b74f81625d8 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -23,3 +23,19 @@ func deleteURL(client *gophercloud.ServiceClient, id string) string { func accessURL(client *gophercloud.ServiceClient, id string) string { return client.ServiceURL("flavors", id, "os-flavor-access") } + +func accessActionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "action") +} + +func extraSpecsListURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecsGetURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecsCreateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go index 6799d200b7a..fa346c8555f 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -49,11 +49,12 @@ type CreateOptsBuilder interface { // CreateOpts contains all the values needed to create a new router. There are // no required values. type CreateOpts struct { - Name string `json:"name,omitempty"` - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Distributed *bool `json:"distributed,omitempty"` - TenantID string `json:"tenant_id,omitempty"` - GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` } // ToRouterCreateMap builds a create request body from CreateOpts. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go index e19c8e74c47..da1b9e4bdfa 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -60,6 +60,10 @@ type Router struct { // Routes are a collection of static routes that the router will host. Routes []Route `json:"routes"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` } // RouterPage is the page returned by a pager when traversing over a diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go index 5b61b247192..040f32183b4 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -66,10 +66,11 @@ type CreateOptsBuilder interface { // CreateOpts represents options used to create a network. type CreateOpts struct { - AdminStateUp *bool `json:"admin_state_up,omitempty"` - Name string `json:"name,omitempty"` - Shared *bool `json:"shared,omitempty"` - TenantID string `json:"tenant_id,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` } // ToNetworkCreateMap builds a request body from CreateOpts. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go index ffd0259f1d2..c73f9e1a63f 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go @@ -69,6 +69,10 @@ type Network struct { // Specifies whether the network resource can be accessed by any tenant. Shared bool `json:"shared"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` } // NetworkPage is the page returned by a pager when traversing over a diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 01b3010739a..72daeb0a3eb 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net/http" "strings" + "sync" ) // DefaultUserAgent is the default User-Agent string set in the request header. @@ -51,6 +52,8 @@ type ProviderClient struct { IdentityEndpoint string // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively TokenID string // EndpointLocator describes how this provider discovers the endpoints for @@ -68,16 +71,59 @@ type ProviderClient struct { // authentication functions for different Identity service versions. ReauthFunc func() error - Debug bool + mut *sync.RWMutex + + reauthmut *reauthlock +} + +type reauthlock struct { + sync.RWMutex + reauthing bool } // AuthenticatedHeaders returns a map of HTTP headers that are common for all // authenticated service requests. -func (client *ProviderClient) AuthenticatedHeaders() map[string]string { - if client.TokenID == "" { - return map[string]string{} +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.reauthmut != nil { + client.reauthmut.RLock() + if client.reauthmut.reauthing { + client.reauthmut.RUnlock() + return + } + client.reauthmut.RUnlock() } - return map[string]string{"X-Auth-Token": client.TokenID} + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t } // RequestOpts customizes the behavior of the provider.Request() method. @@ -166,6 +212,8 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) // Set connection parameter to close the connection immediately when we've got the response req.Close = true + prereqtok := req.Header.Get("X-Auth-Token") + // Issue the request. resp, err := client.HTTPClient.Do(req) if err != nil { @@ -189,9 +237,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if !ok { body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() - //pc := make([]uintptr, 1) - //runtime.Callers(2, pc) - //f := runtime.FuncForPC(pc[0]) respErr := ErrUnexpectedResponseCode{ URL: url, Method: method, @@ -199,7 +244,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) Actual: resp.StatusCode, Body: body, } - //respErr.Function = "gophercloud.ProviderClient.Request" errType := options.ErrorContext switch resp.StatusCode { @@ -210,7 +254,21 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } case http.StatusUnauthorized: if client.ReauthFunc != nil { - err = client.ReauthFunc() + if client.mut != nil { + client.mut.Lock() + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.Unlock() + if curtok := client.TokenID; curtok == prereqtok { + err = client.ReauthFunc() + } + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.Unlock() + client.mut.Unlock() + } else { + err = client.ReauthFunc() + } if err != nil { e := &ErrUnableToReauthenticate{} e.ErrOriginal = respErr From 52daac8083b60cab84898745c60c1223676dd89a Mon Sep 17 00:00:00 2001 From: steveperry-53 Date: Tue, 2 Jan 2018 17:32:15 +0000 Subject: [PATCH 549/794] Fix typo in field description. --- staging/src/k8s.io/api/apps/v1/types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/api/apps/v1/types.go b/staging/src/k8s.io/api/apps/v1/types.go index f32300fe99f..b5df22c6fd4 100644 --- a/staging/src/k8s.io/api/apps/v1/types.go +++ b/staging/src/k8s.io/api/apps/v1/types.go @@ -362,7 +362,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } From 5880a40acab1db33ad5dfeda3dad67db5a1f5f54 Mon Sep 17 00:00:00 2001 From: steveperry-53 Date: Tue, 2 Jan 2018 17:59:53 +0000 Subject: [PATCH 550/794] Generate specs after fixing typo in documentation. --- api/openapi-spec/swagger.json | 2 +- api/swagger-spec/apps_v1.json | 2 +- staging/src/k8s.io/api/apps/v1/generated.proto | 2 +- staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 1e463c37288..795486975c8 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -70501,7 +70501,7 @@ "description": "Spec to control the desired behavior of rolling update.", "properties": { "maxSurge": { - "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" }, "maxUnavailable": { diff --git a/api/swagger-spec/apps_v1.json b/api/swagger-spec/apps_v1.json index 8109d5910fb..ebebe36d148 100644 --- a/api/swagger-spec/apps_v1.json +++ b/api/swagger-spec/apps_v1.json @@ -8893,7 +8893,7 @@ }, "maxSurge": { "type": "string", - "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods." + "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods." } } }, diff --git a/staging/src/k8s.io/api/apps/v1/generated.proto b/staging/src/k8s.io/api/apps/v1/generated.proto index 184c868e389..46473baa821 100644 --- a/staging/src/k8s.io/api/apps/v1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1/generated.proto @@ -519,7 +519,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } diff --git a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 76305393e21..4dec6f2f89e 100644 --- a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { From e042cd8ccd9649390739e036f8d8628441daa0ef Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Tue, 2 Jan 2018 11:40:10 -0800 Subject: [PATCH 551/794] Bump metadata proxy and test versions --- cluster/addons/metadata-proxy/gce/metadata-proxy.yaml | 2 +- test/utils/image/manifest.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index a710f917cd4..ced6e43f9db 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -38,7 +38,7 @@ spec: dnsPolicy: Default containers: - name: metadata-proxy - image: gcr.io/google_containers/metadata-proxy:v0.1.6 + image: gcr.io/google_containers/metadata-proxy:v0.1.7 securityContext: privileged: true # Request and limit resources to get guaranteed QoS. diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 73b586ff616..4dd4e8c66fe 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -52,7 +52,7 @@ var ( APIServer = ImageConfig{e2eRegistry, "k8s-aggregator-sample-apiserver", "1.7v2", true} AppArmorLoader = ImageConfig{gcRegistry, "apparmor-loader", "0.1", false} BusyBox = ImageConfig{gcRegistry, "busybox", "1.24", false} - CheckMetadataConcealment = ImageConfig{gcRegistry, "check-metadata-concealment", "v0.0.2", false} + CheckMetadataConcealment = ImageConfig{gcRegistry, "check-metadata-concealment", "v0.0.3", false} ClusterTester = ImageConfig{e2eRegistry, "clusterapi-tester", "1.0", true} CudaVectorAdd = ImageConfig{e2eRegistry, "cuda-vector-add", "1.0", true} Dnsutils = ImageConfig{e2eRegistry, "dnsutils", "1.0", true} From 916631f8c3713dddd2cfb01f7ed490b55e8869f4 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Sun, 31 Dec 2017 18:47:07 -0500 Subject: [PATCH 552/794] Removing bootstrapper related e2e tests --- test/e2e/storage/BUILD | 2 +- test/e2e/storage/persistent_volumes-local.go | 142 ++++++++++++++----- 2 files changed, 104 insertions(+), 40 deletions(-) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 1aa80260956..fa9b93b43bf 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -45,8 +45,8 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 70c4fcbb1cb..98528498a04 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,8 +30,8 @@ import ( . "github.com/onsi/gomega" appsv1beta1 "k8s.io/api/apps/v1beta1" - batchv1 "k8s.io/api/batch/v1" "k8s.io/api/core/v1" + extv1beta1 "k8s.io/api/extensions/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -112,19 +112,21 @@ const ( // Following are constants used for provisioner e2e tests. // // testServiceAccount is the service account for bootstrapper - testServiceAccount = "local-storage-bootstrapper" - // testRoleBinding is the cluster-admin rolebinding for bootstrapper - testRoleBinding = "local-storage:bootstrapper" + testServiceAccount = "local-storage-admin" // volumeConfigName is the configmap passed to bootstrapper and provisioner volumeConfigName = "local-volume-config" - // bootstrapper and provisioner images used for e2e tests - bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v2.0.0" - provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.0.0" - // provisioner daemonSetName name, must match the one defined in bootstrapper + // provisioner image used for e2e tests + provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.0.0" + // provisioner daemonSetName name daemonSetName = "local-volume-provisioner" - // provisioner node/pv cluster role binding, must match the one defined in bootstrapper - nodeBindingName = "local-storage:provisioner-node-binding" - pvBindingName = "local-storage:provisioner-pv-binding" + // provisioner default mount point folder + provisionerDefaultMountRoot = "/mnt-local-storage" + // provisioner node/pv cluster role binding + nodeBindingName = "local-storage:provisioner-node-binding" + pvBindingName = "local-storage:provisioner-pv-binding" + systemRoleNode = "system:node" + systemRolePVProvisioner = "system:persistent-volume-provisioner" + // A sample request size testRequestSize = "10Mi" ) @@ -342,8 +344,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum }) It("should create and recreate local persistent volume", func() { - By("Creating bootstrapper pod to start provisioner daemonset") - createBootstrapperJob(config) + By("Starting a provisioner daemonset") + createProvisionerDaemonset(config) kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"} framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind) @@ -862,7 +864,7 @@ func setupLocalVolumesPVCsPVs( func setupLocalVolumeProvisioner(config *localTestConfig) { By("Bootstrapping local volume provisioner") createServiceAccount(config) - createClusterRoleBinding(config) + createProvisionerClusterRoleBinding(config) createVolumeConfigMap(config) By("Initializing local volume discovery base path") @@ -920,7 +922,10 @@ func createServiceAccount(config *localTestConfig) { Expect(err).NotTo(HaveOccurred()) } -func createClusterRoleBinding(config *localTestConfig) { +// createProvisionerClusterRoleBinding creates two cluster role bindings for local volume provisioner's +// service account: systemRoleNode and systemRolePVProvisioner. These are required for +// provisioner to get node information and create persistent volumes. +func createProvisionerClusterRoleBinding(config *localTestConfig) { subjects := []rbacv1beta1.Subject{ { Kind: rbacv1beta1.ServiceAccountKind, @@ -929,29 +934,44 @@ func createClusterRoleBinding(config *localTestConfig) { }, } - binding := rbacv1beta1.ClusterRoleBinding{ + pvBinding := rbacv1beta1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRoleBinding", }, ObjectMeta: metav1.ObjectMeta{ - Name: testRoleBinding, + Name: pvBindingName, }, RoleRef: rbacv1beta1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: "cluster-admin", + Name: systemRolePVProvisioner, + }, + Subjects: subjects, + } + nodeBinding := rbacv1beta1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1beta1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: nodeBindingName, + }, + RoleRef: rbacv1beta1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: systemRoleNode, }, Subjects: subjects, } - _, err := config.client.RbacV1beta1().ClusterRoleBindings().Create(&binding) + _, err := config.client.RbacV1beta1().ClusterRoleBindings().Create(&pvBinding) + Expect(err).NotTo(HaveOccurred()) + _, err = config.client.RbacV1beta1().ClusterRoleBindings().Create(&nodeBinding) Expect(err).NotTo(HaveOccurred()) } func deleteClusterRoleBinding(config *localTestConfig) { - err := config.client.RbacV1beta1().ClusterRoleBindings().Delete(testRoleBinding, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred()) // These role bindings are created in provisioner; we just ensure it's // deleted and do not panic on error. config.client.RbacV1beta1().ClusterRoleBindings().Delete(nodeBindingName, metav1.NewDeleteOptions(0)) @@ -963,7 +983,8 @@ func createVolumeConfigMap(config *localTestConfig) { // https://github.com/kubernetes-incubator/external-storage/blob/master/local-volume/provisioner/pkg/common/common.go type MountConfig struct { // The hostpath directory - HostDir string `json:"hostDir" yaml:"hostDir"` + HostDir string `json:"hostDir" yaml:"hostDir"` + MountDir string `json:"mountDir" yaml:"mountDir"` } type ProvisionerConfiguration struct { // StorageClassConfig defines configuration of Provisioner's storage classes @@ -972,7 +993,8 @@ func createVolumeConfigMap(config *localTestConfig) { var provisionerConfig ProvisionerConfiguration provisionerConfig.StorageClassConfig = map[string]MountConfig{ config.scName: { - HostDir: path.Join(hostBase, discoveryDir), + HostDir: path.Join(hostBase, discoveryDir), + MountDir: provisionerDefaultMountRoot, }, } @@ -996,25 +1018,43 @@ func createVolumeConfigMap(config *localTestConfig) { Expect(err).NotTo(HaveOccurred()) } -func createBootstrapperJob(config *localTestConfig) { - bootJob := &batchv1.Job{ +func createProvisionerDaemonset(config *localTestConfig) { + provisionerPrivileged := true + provisioner := &extv1beta1.DaemonSet{ TypeMeta: metav1.TypeMeta{ - Kind: "Job", - APIVersion: "batch/v1", + Kind: "DaemonSet", + APIVersion: "extensions/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ - GenerateName: "local-volume-tester-", + Name: daemonSetName, }, - Spec: batchv1.JobSpec{ + Spec: extv1beta1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "local-volume-provisioner"}, + }, Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "local-volume-provisioner"}, + }, Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyNever, ServiceAccountName: testServiceAccount, Containers: []v1.Container{ { - Name: "volume-tester", - Image: bootstrapperImageName, + Name: "provisioner", + Image: provisionerImageName, + ImagePullPolicy: "Always", + SecurityContext: &v1.SecurityContext{ + Privileged: &provisionerPrivileged, + }, Env: []v1.EnvVar{ + { + Name: "MY_NODE_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, { Name: "MY_NAMESPACE", ValueFrom: &v1.EnvVarSource{ @@ -1024,9 +1064,35 @@ func createBootstrapperJob(config *localTestConfig) { }, }, }, - Args: []string{ - fmt.Sprintf("--image=%v", provisionerImageName), - fmt.Sprintf("--volume-config=%v", volumeConfigName), + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeConfigName, + MountPath: "/etc/provisioner/config/", + }, + { + Name: "local-disks", + MountPath: "/mnt/local-storage", + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: volumeConfigName, + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: volumeConfigName, + }, + }, + }, + }, + { + Name: "local-disks", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: path.Join(hostBase, discoveryDir), + }, }, }, }, @@ -1034,9 +1100,7 @@ func createBootstrapperJob(config *localTestConfig) { }, }, } - job, err := config.client.BatchV1().Jobs(config.ns).Create(bootJob) - Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForJobFinish(config.client, config.ns, job.Name, 1) + _, err := config.client.ExtensionsV1beta1().DaemonSets(config.ns).Create(provisioner) Expect(err).NotTo(HaveOccurred()) } From e21ed03bd121d2c8cb1fbac306cffea4af955c53 Mon Sep 17 00:00:00 2001 From: steveperry-53 Date: Wed, 3 Jan 2018 00:18:03 +0000 Subject: [PATCH 553/794] Run update-api-reference-docs.sh. --- docs/api-reference/apps/v1/definitions.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api-reference/apps/v1/definitions.html b/docs/api-reference/apps/v1/definitions.html index af89c45aa2c..f4f24b8d229 100755 --- a/docs/api-reference/apps/v1/definitions.html +++ b/docs/api-reference/apps/v1/definitions.html @@ -5903,7 +5903,7 @@ Examples:

maxSurge

-

The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.

+

The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.

false

string

From 071200128f55776379690adef917392bebcf0d13 Mon Sep 17 00:00:00 2001 From: Lion-Wei Date: Fri, 8 Dec 2017 15:24:25 +0800 Subject: [PATCH 554/794] remove /k8s.io/kubernetes/pkg/kubectl/testing --- hack/.golint_failures | 1 - pkg/kubectl/BUILD | 1 - pkg/kubectl/testing/BUILD | 30 -------- pkg/kubectl/testing/doc.go | 19 ----- pkg/kubectl/testing/types.go | 33 --------- pkg/kubectl/testing/zz_generated.deepcopy.go | 69 ------------------- pkg/printers/internalversion/BUILD | 1 - pkg/printers/internalversion/printers_test.go | 28 +++++--- 8 files changed, 20 insertions(+), 162 deletions(-) delete mode 100644 pkg/kubectl/testing/BUILD delete mode 100644 pkg/kubectl/testing/doc.go delete mode 100644 pkg/kubectl/testing/types.go delete mode 100644 pkg/kubectl/testing/zz_generated.deepcopy.go diff --git a/hack/.golint_failures b/hack/.golint_failures index 659d1c25490..1d88b07b7f6 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -154,7 +154,6 @@ pkg/kubectl/cmd/util/jsonmerge pkg/kubectl/cmd/util/sanity pkg/kubectl/metricsutil pkg/kubectl/resource -pkg/kubectl/testing pkg/kubectl/util pkg/kubectl/util/crlf pkg/kubectl/util/slice diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index ecfbc750b11..a67266440bc 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -219,7 +219,6 @@ filegroup( "//pkg/kubectl/proxy:all-srcs", "//pkg/kubectl/resource:all-srcs", "//pkg/kubectl/scheme:all-srcs", - "//pkg/kubectl/testing:all-srcs", "//pkg/kubectl/util:all-srcs", "//pkg/kubectl/validation:all-srcs", ], diff --git a/pkg/kubectl/testing/BUILD b/pkg/kubectl/testing/BUILD deleted file mode 100644 index f6fff93f036..00000000000 --- a/pkg/kubectl/testing/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importpath = "k8s.io/kubernetes/pkg/kubectl/testing", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/pkg/kubectl/testing/doc.go b/pkg/kubectl/testing/doc.go deleted file mode 100644 index 0ea50660981..00000000000 --- a/pkg/kubectl/testing/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -package testing diff --git a/pkg/kubectl/testing/types.go b/pkg/kubectl/testing/types.go deleted file mode 100644 index 23e417e130a..00000000000 --- a/pkg/kubectl/testing/types.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type TestStruct struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - Key string `json:"Key"` - Map map[string]int `json:"Map"` - StringList []string `json:"StringList"` - IntList []int `json:"IntList"` -} diff --git a/pkg/kubectl/testing/zz_generated.deepcopy.go b/pkg/kubectl/testing/zz_generated.deepcopy.go deleted file mode 100644 index eb5bbb34af2..00000000000 --- a/pkg/kubectl/testing/zz_generated.deepcopy.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package testing - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestStruct) DeepCopyInto(out *TestStruct) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]int, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.StringList != nil { - in, out := &in.StringList, &out.StringList - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IntList != nil { - in, out := &in.IntList, &out.IntList - *out = make([]int, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestStruct. -func (in *TestStruct) DeepCopy() *TestStruct { - if in == nil { - return nil - } - out := new(TestStruct) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TestStruct) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index eb2e66920a1..edbd167520d 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -28,7 +28,6 @@ go_test( "//pkg/apis/storage:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/kubectl/testing:go_default_library", "//pkg/printers:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 8662c91fc37..0f2c530f863 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -50,30 +50,42 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/storage" - kubectltesting "k8s.io/kubernetes/pkg/kubectl/testing" "k8s.io/kubernetes/pkg/printers" ) func init() { - legacyscheme.Scheme.AddKnownTypes(testapi.Default.InternalGroupVersion(), &kubectltesting.TestStruct{}) - legacyscheme.Scheme.AddKnownTypes(legacyscheme.Registry.GroupOrDie(api.GroupName).GroupVersion, &kubectltesting.TestStruct{}) + legacyscheme.Scheme.AddKnownTypes(testapi.Default.InternalGroupVersion(), &TestPrintType{}) + legacyscheme.Scheme.AddKnownTypes(legacyscheme.Registry.GroupOrDie(api.GroupName).GroupVersion, &TestPrintType{}) } -var testData = kubectltesting.TestStruct{ +var testData = TestStruct{ Key: "testValue", Map: map[string]int{"TestSubkey": 1}, StringList: []string{"a", "b", "c"}, IntList: []int{1, 2, 3}, } +type TestStruct struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Key string `json:"Key"` + Map map[string]int `json:"Map"` + StringList []string `json:"StringList"` + IntList []int `json:"IntList"` +} + +func (in *TestStruct) DeepCopyObject() runtime.Object { + panic("never called") +} + func TestVersionedPrinter(t *testing.T) { - original := &kubectltesting.TestStruct{Key: "value"} + original := &TestPrintType{Data: "value"} p := printers.NewVersionedPrinter( printers.ResourcePrinterFunc(func(obj runtime.Object, w io.Writer) error { if obj == original { t.Fatalf("object should not be identical: %#v", obj) } - if obj.(*kubectltesting.TestStruct).Key != "value" { + if obj.(*TestPrintType).Data != "value" { t.Fatalf("object was not converted: %#v", obj) } return nil @@ -315,14 +327,14 @@ func testPrinter(t *testing.T, printer printers.ResourcePrinter, unmarshalFunc f if err != nil { t.Fatal(err) } - var poutput kubectltesting.TestStruct + var poutput TestStruct // Verify that given function runs without error. err = unmarshalFunc(buf.Bytes(), &poutput) if err != nil { t.Fatal(err) } // Use real decode function to undo the versioning process. - poutput = kubectltesting.TestStruct{} + poutput = TestStruct{} s := yamlserializer.NewDecodingSerializer(testapi.Default.Codec()) if err := runtime.DecodeInto(s, buf.Bytes(), &poutput); err != nil { t.Fatal(err) From 7eafa215f55a4b4f143f81bee5fc493250ffdd61 Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Tue, 19 Dec 2017 15:18:37 +0800 Subject: [PATCH 555/794] Split auth related config for Azure --- pkg/cloudprovider/providers/azure/BUILD | 9 +- pkg/cloudprovider/providers/azure/auth/BUILD | 28 ++++ .../providers/azure/auth/azure_auth.go | 124 +++++++++++++++++ pkg/cloudprovider/providers/azure/azure.go | 129 +++--------------- .../providers/azure/azure_controllerCommon.go | 1 - .../providers/azure/azure_test.go | 7 +- pkg/credentialprovider/azure/BUILD | 3 +- .../azure/azure_credentials.go | 40 +++++- 8 files changed, 218 insertions(+), 123 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/auth/BUILD create mode 100644 pkg/cloudprovider/providers/azure/auth/azure_auth.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index accc2594e30..fe131367e0c 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -32,6 +32,7 @@ go_library( deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", + "//pkg/cloudprovider/providers/azure/auth:go_default_library", "//pkg/controller:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", @@ -41,13 +42,11 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", - "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", - "//vendor/golang.org/x/crypto/pkcs12:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", @@ -71,6 +70,7 @@ go_test( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", deps = [ "//pkg/api/v1/service:go_default_library", + "//pkg/cloudprovider/providers/azure/auth:go_default_library", "//pkg/kubelet/apis:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", @@ -93,6 +93,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/azure/auth:all-srcs", + ], tags = ["automanaged"], ) diff --git a/pkg/cloudprovider/providers/azure/auth/BUILD b/pkg/cloudprovider/providers/azure/auth/BUILD new file mode 100644 index 00000000000..cc733d385aa --- /dev/null +++ b/pkg/cloudprovider/providers/azure/auth/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["azure_auth.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/crypto/pkcs12:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/azure/auth/azure_auth.go b/pkg/cloudprovider/providers/azure/auth/azure_auth.go new file mode 100644 index 00000000000..948206f8eb6 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/auth/azure_auth.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "crypto/rsa" + "crypto/x509" + "fmt" + "io/ioutil" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/golang/glog" + "golang.org/x/crypto/pkcs12" +) + +// AzureAuthConfig holds auth related part of cloud config +type AzureAuthConfig struct { + // The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13 + Cloud string `json:"cloud" yaml:"cloud"` + // The AAD Tenant ID for the Subscription that the cluster is deployed in + TenantID string `json:"tenantId" yaml:"tenantId"` + // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs + AADClientID string `json:"aadClientId" yaml:"aadClientId"` + // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs + AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"` + // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"` + // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"` + // Use managed service identity for the virtual machine to access Azure ARM APIs + UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` + // The ID of the Azure Subscription that the cluster is deployed in + SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"` +} + +// GetServicePrincipalToken creates a new service principal token based on the configuration +func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) + if err != nil { + return nil, fmt.Errorf("creating the OAuth config: %v", err) + } + + if config.UseManagedIdentityExtension { + glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) + } + return adal.NewServicePrincipalTokenFromMSI( + msiEndpoint, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientSecret) > 0 { + glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + return adal.NewServicePrincipalToken( + *oauthConfig, + config.AADClientID, + config.AADClientSecret, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + certData, err := ioutil.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) + } + certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) + } + + return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) +} + +// ParseAzureEnvironment returns azure environment by name +func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) { + var env azure.Environment + var err error + if cloudName == "" { + env = azure.PublicCloud + } else { + env, err = azure.EnvironmentFromName(cloudName) + } + return &env, err +} + +// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and +// the private RSA key +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err) + } + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") + } + + return certificate, rsaPrivateKey, nil +} diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 936c3cfbb67..170091cace9 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -17,16 +17,16 @@ limitations under the License. package azure import ( - "crypto/rsa" - "crypto/x509" "fmt" "io" "io/ioutil" "strings" "time" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/flowcontrol" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/version" @@ -35,12 +35,9 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" "github.com/golang/glog" - "golang.org/x/crypto/pkcs12" - "k8s.io/apimachinery/pkg/util/wait" ) const ( @@ -61,12 +58,8 @@ const ( // Config holds the configuration parsed from the --cloud-config flag // All fields are required unless otherwise specified type Config struct { - // The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13 - Cloud string `json:"cloud" yaml:"cloud"` - // The AAD Tenant ID for the Subscription that the cluster is deployed in - TenantID string `json:"tenantId" yaml:"tenantId"` - // The ID of the Azure Subscription that the cluster is deployed in - SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"` + auth.AzureAuthConfig + // The name of the resource group that the cluster is deployed in ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"` // The location of the resource group that the cluster is deployed in @@ -96,15 +89,6 @@ type Config struct { // the cloudprovider will try to add all nodes to a single backend pool which is forbidden. // In other words, if you use multiple agent pools (scale sets), you MUST set this field. PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"` - - // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs - AADClientID string `json:"aadClientId" yaml:"aadClientId"` - // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs - AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"` - // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"` - // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"` // Enable exponential backoff to manage resource request retries CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"` // Backoff retry limit @@ -122,9 +106,6 @@ type Config struct { // Rate limit Bucket Size CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"` - // Use managed service identity for the virtual machine to access Azure ARM APIs - UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` - // Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` } @@ -226,81 +207,24 @@ func init() { cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud) } -// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and -// the private RSA key -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err) - } - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") - } - - return certificate, rsaPrivateKey, nil -} - -// GetServicePrincipalToken creates a new service principal token based on the configuration -func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) - if err != nil { - return nil, fmt.Errorf("creating the OAuth config: %v", err) - } - - if config.UseManagedIdentityExtension { - glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") - msiEndpoint, err := adal.GetMSIVMEndpoint() - if err != nil { - return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) - } - return adal.NewServicePrincipalTokenFromMSI( - msiEndpoint, - env.ServiceManagementEndpoint) - } - - if len(config.AADClientSecret) > 0 { - glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") - return adal.NewServicePrincipalToken( - *oauthConfig, - config.AADClientID, - config.AADClientSecret, - env.ServiceManagementEndpoint) - } - - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") - certData, err := ioutil.ReadFile(config.AADClientCertPath) - if err != nil { - return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) - } - certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) - if err != nil { - return nil, fmt.Errorf("decoding the client certificate: %v", err) - } - return adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - config.AADClientID, - certificate, - privateKey, - env.ServiceManagementEndpoint) - } - - return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) -} - // NewCloud returns a Cloud with initialized clients func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { - config, env, err := ParseConfig(configReader) + config, err := parseConfig(configReader) if err != nil { return nil, err } + + env, err := auth.ParseAzureEnvironment(config.Cloud) + if err != nil { + return nil, err + } + az := Cloud{ Config: *config, Environment: *env, } - servicePrincipalToken, err := GetServicePrincipalToken(config, env) + servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env) if err != nil { return nil, err } @@ -433,7 +357,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount } - if az.Config.VMType == vmTypeVMSS { + if strings.EqualFold(vmTypeVMSS, az.Config.VMType) { az.vmSet = newScaleSet(&az) } else { az.vmSet = newAvailabilitySet(&az) @@ -445,38 +369,24 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { return &az, nil } -// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file -func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) { +// parseConfig returns a parsed configuration for an Azure cloudprovider config file +func parseConfig(configReader io.Reader) (*Config, error) { var config Config - var env azure.Environment if configReader == nil { - return &config, &env, nil + return &config, nil } configContents, err := ioutil.ReadAll(configReader) if err != nil { - return nil, nil, err + return nil, err } err = yaml.Unmarshal(configContents, &config) if err != nil { - return nil, nil, err + return nil, err } - if config.Cloud == "" { - env = azure.PublicCloud - } else { - env, err = azure.EnvironmentFromName(config.Cloud) - if err != nil { - return nil, nil, err - } - } - - if config.VMType != "" { - config.VMType = strings.ToLower(config.VMType) - } - - return &config, &env, nil + return &config, nil } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider @@ -538,7 +448,6 @@ func initDiskControllers(az *Cloud) error { storageEndpointSuffix: az.Environment.StorageEndpointSuffix, managementEndpoint: az.Environment.ResourceManagerEndpoint, resourceGroup: az.ResourceGroup, - tenantID: az.TenantID, tokenEndPoint: az.Environment.ActiveDirectoryEndpoint, subscriptionID: az.SubscriptionID, cloud: az, diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index fdb78e2af7b..c445c2783ec 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -53,7 +53,6 @@ var defaultBackOff = kwait.Backoff{ } type controllerCommon struct { - tenantID string subscriptionID string location string storageEndpointSuffix string diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 91b1d48549f..47181bb0d3f 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/flowcontrol" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -846,8 +847,10 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) { func getTestCloud() (az *Cloud) { az = &Cloud{ Config: Config{ - TenantID: "tenant", - SubscriptionID: "subscription", + AzureAuthConfig: auth.AzureAuthConfig{ + TenantID: "tenant", + SubscriptionID: "subscription", + }, ResourceGroup: "rg", VnetResourceGroup: "rg", Location: "westus", diff --git a/pkg/credentialprovider/azure/BUILD b/pkg/credentialprovider/azure/BUILD index c7213861b7f..afb23f31b7d 100644 --- a/pkg/credentialprovider/azure/BUILD +++ b/pkg/credentialprovider/azure/BUILD @@ -14,13 +14,14 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/credentialprovider/azure", deps = [ - "//pkg/cloudprovider/providers/azure:go_default_library", + "//pkg/cloudprovider/providers/azure/auth:go_default_library", "//pkg/credentialprovider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/dgrijalva/jwt-go:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", ], diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index ff251497ce3..e48d8133f55 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -18,16 +18,18 @@ package azure import ( "io" + "io/ioutil" "os" "time" "github.com/Azure/azure-sdk-for-go/arm/containerregistry" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" - azureapi "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/ghodss/yaml" "github.com/golang/glog" "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -60,18 +62,44 @@ func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider type acrProvider struct { file *string - config *azure.Config - environment *azureapi.Environment + config *auth.AzureAuthConfig + environment *azure.Environment registryClient RegistriesClient servicePrincipalToken *adal.ServicePrincipalToken } +// ParseConfig returns a parsed configuration for an Azure cloudprovider config file +func parseConfig(configReader io.Reader) (*auth.AzureAuthConfig, error) { + var config auth.AzureAuthConfig + + if configReader == nil { + return &config, nil + } + + configContents, err := ioutil.ReadAll(configReader) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(configContents, &config) + if err != nil { + return nil, err + } + + return &config, nil +} + func (a *acrProvider) loadConfig(rdr io.Reader) error { var err error - a.config, a.environment, err = azure.ParseConfig(rdr) + a.config, err = parseConfig(rdr) if err != nil { glog.Errorf("Failed to load azure credential file: %v", err) } + + a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud) + if err != nil { + return err + } + return nil } @@ -94,7 +122,7 @@ func (a *acrProvider) Enabled() bool { return false } - a.servicePrincipalToken, err = azure.GetServicePrincipalToken(a.config, a.environment) + a.servicePrincipalToken, err = auth.GetServicePrincipalToken(a.config, a.environment) if err != nil { glog.Errorf("Failed to create service principal token: %v", err) return false From dca1447f5fff20687575e90d3ba46d55114a3f23 Mon Sep 17 00:00:00 2001 From: chentao1596 Date: Thu, 2 Nov 2017 14:56:55 +0800 Subject: [PATCH 556/794] Optimizing the implementation of the error check for PriorityClass --- plugin/pkg/admission/priority/admission.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugin/pkg/admission/priority/admission.go b/plugin/pkg/admission/priority/admission.go index 5105809e909..c6416fea577 100644 --- a/plugin/pkg/admission/priority/admission.go +++ b/plugin/pkg/admission/priority/admission.go @@ -162,12 +162,15 @@ func (p *PriorityPlugin) admitPod(a admission.Attributes) error { if !ok { // Now that we didn't find any system priority, try resolving by user defined priority classes. pc, err := p.lister.Get(pod.Spec.PriorityClassName) + if err != nil { - return fmt.Errorf("failed to get default priority class %s: %v", pod.Spec.PriorityClassName, err) - } - if pc == nil { - return admission.NewForbidden(a, fmt.Errorf("no PriorityClass with name %v was found", pod.Spec.PriorityClassName)) + if errors.IsNotFound(err) { + return admission.NewForbidden(a, fmt.Errorf("no PriorityClass with name %v was found", pod.Spec.PriorityClassName)) + } + + return fmt.Errorf("failed to get PriorityClass with name %s: %v", pod.Spec.PriorityClassName, err) } + priority = pc.Value } } From 2bf6b54f05dce7ec68e15c413ae65c6f3361a3c7 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Fri, 29 Dec 2017 16:32:01 +0800 Subject: [PATCH 557/794] format error message and remove duplicated event for resize volume failure --- .../operationexecutor/operation_generator.go | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 2353e12615f..bf26620248d 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -497,10 +497,10 @@ func (og *operationGenerator) GenerateMountVolumeFunc( // resizeFileSystem will resize the file system if user has requested a resize of // underlying persistent volume and is allowed to do so. - resizeError := og.resizeFileSystem(volumeToMount, devicePath, volumePlugin.GetPluginName()) + resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, devicePath, volumePlugin.GetPluginName()) - if resizeError != nil { - return volumeToMount.GenerateError("MountVolume.Resize failed", resizeError) + if resizeSimpleError != nil || resizeDetailedError != nil { + return resizeSimpleError, resizeDetailedError } deviceMountPath, err := @@ -586,10 +586,10 @@ func (og *operationGenerator) GenerateMountVolumeFunc( }, nil } -func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath string, pluginName string) error { +func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath string, pluginName string) (simpleErr, detailedErr error) { if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { glog.V(6).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) - return nil + return nil, nil } mounter := og.volumePluginMgr.Host.GetMounter(pluginName) @@ -604,7 +604,7 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil { // Return error rather than leave the file system un-resized, caller will log and retry - return volumeToMount.GenerateErrorDetailed("MountVolume get PVC failed", err) + return volumeToMount.GenerateError("MountVolume.resizeFileSystem get PVC failed", err) } pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage] @@ -617,7 +617,7 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system") glog.Warningf(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) - return nil + return nil, nil } diskFormatter := &mount.SafeFormatAndMount{ @@ -629,10 +629,7 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi resizeStatus, resizeErr := resizer.Resize(devicePath) if resizeErr != nil { - resizeDetailedError := volumeToMount.GenerateErrorDetailed("MountVolume.resizeFileSystem failed", resizeErr) - glog.Error(resizeDetailedError) - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, resizeDetailedError.Error()) - return resizeDetailedError + return volumeToMount.GenerateError("MountVolume.resizeFileSystem failed", resizeErr) } if resizeStatus { @@ -645,12 +642,12 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi err = updatePVCStatusCapacity(pvc.Name, pvc, pv.Spec.Capacity, og.kubeClient) if err != nil { // On retry, resizeFileSystem will be called again but do nothing - return volumeToMount.GenerateErrorDetailed("MountVolume update PVC status failed", err) + return volumeToMount.GenerateError("MountVolume.resizeFileSystem update PVC status failed", err) } - return nil + return nil, nil } } - return nil + return nil, nil } func (og *operationGenerator) GenerateUnmountVolumeFunc( From 74b197e7feae9f68fc23b2271a243923be80ba75 Mon Sep 17 00:00:00 2001 From: NickrenREN Date: Wed, 3 Jan 2018 10:31:57 +0800 Subject: [PATCH 558/794] fix expand panic --- pkg/volume/util/operationexecutor/operation_generator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 2353e12615f..2b090a958d1 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -1253,6 +1253,9 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( if err != nil { return volumetypes.GeneratedOperations{}, fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", pvcWithResizeRequest.QualifiedName(), err) } + if volumePlugin == nil { + return volumetypes.GeneratedOperations{}, fmt.Errorf("Can not find plugin for expanding volume: %q", pvcWithResizeRequest.QualifiedName()) + } expandVolumeFunc := func() (error, error) { newSize := pvcWithResizeRequest.ExpectedSize From 4b738a7b400291b48191cef6c806f40a57f13b0f Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Tue, 5 Dec 2017 17:05:56 +0800 Subject: [PATCH 559/794] [PSP] always check validated policy first for update operation When update a pod with `kubernetes.io/psp` annotation set, we should check this policy first. Because this saved policy is `usually` the one we are looking for. --- .../security/podsecuritypolicy/BUILD | 1 + .../security/podsecuritypolicy/admission.go | 25 ++-- .../podsecuritypolicy/admission_test.go | 115 +++++++++++++++++- 3 files changed, 130 insertions(+), 11 deletions(-) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/BUILD b/plugin/pkg/admission/security/podsecuritypolicy/BUILD index 6d96b49e456..5c256b008b2 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/BUILD +++ b/plugin/pkg/admission/security/podsecuritypolicy/BUILD @@ -56,6 +56,7 @@ go_test( "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library", ], ) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/plugin/pkg/admission/security/podsecuritypolicy/admission.go index a8d80515535..380cf2b63a0 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -122,8 +122,8 @@ func (c *PodSecurityPolicyPlugin) Admit(a admission.Attributes) error { pod := a.GetObject().(*api.Pod) - // compute the context - allowedPod, pspName, validationErrs, err := c.computeSecurityContext(a, pod, true) + // compute the context. Mutation is allowed. ValidatedPSPAnnotation is not taken into account. + allowedPod, pspName, validationErrs, err := c.computeSecurityContext(a, pod, true, "") if err != nil { return admission.NewForbidden(a, err) } @@ -152,8 +152,8 @@ func (c *PodSecurityPolicyPlugin) Validate(a admission.Attributes) error { pod := a.GetObject().(*api.Pod) - // compute the context. Mutation is not allowed. - allowedPod, _, validationErrs, err := c.computeSecurityContext(a, pod, false) + // compute the context. Mutation is not allowed. ValidatedPSPAnnotation is used as a hint to gain same speed-up. + allowedPod, _, validationErrs, err := c.computeSecurityContext(a, pod, false, pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation]) if err != nil { return admission.NewForbidden(a, err) } @@ -193,8 +193,9 @@ func shouldIgnore(a admission.Attributes) (bool, error) { // computeSecurityContext derives a valid security context while trying to avoid any changes to the given pod. I.e. // if there is a matching policy with the same security context as given, it will be reused. If there is no -// matching policy the returned pod will be nil and the pspName empty. -func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, pod *api.Pod, specMutationAllowed bool) (*api.Pod, string, field.ErrorList, error) { +// matching policy the returned pod will be nil and the pspName empty. validatedPSPHint is the validated psp name +// saved in kubernetes.io/psp annotation. This psp is usually the one we are looking for. +func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, pod *api.Pod, specMutationAllowed bool, validatedPSPHint string) (*api.Pod, string, field.ErrorList, error) { // get all constraints that are usable by the user glog.V(4).Infof("getting pod security policies for pod %s (generate: %s)", pod.Name, pod.GenerateName) var saInfo user.Info @@ -213,9 +214,18 @@ func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, return pod, "", nil, nil } - // sort by name to make order deterministic + // sort policies by name to make order deterministic + // If mutation is not allowed and validatedPSPHint is provided, check the validated policy first. // TODO(liggitt): add priority field to allow admins to bucket differently sort.SliceStable(policies, func(i, j int) bool { + if !specMutationAllowed { + if policies[i].Name == validatedPSPHint { + return true + } + if policies[j].Name == validatedPSPHint { + return false + } + } return strings.Compare(policies[i].Name, policies[j].Name) < 0 }) @@ -244,7 +254,6 @@ func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, } // the entire pod validated - mutated := !apiequality.Semantic.DeepEqual(pod, podCopy) if mutated && !specMutationAllowed { continue diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go index 8b2c2b701ed..48febd89769 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" "k8s.io/kubernetes/pkg/api/legacyscheme" kapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" @@ -65,7 +66,7 @@ func NewTestAdmission(psps []*extensions.PodSecurityPolicy, authz authorizer.Aut } } -// TestAlwaysAllowedAuthorizer is a testing struct for testing that fulfills the authorizer interface. +// TestAuthorizer is a testing struct for testing that fulfills the authorizer interface. type TestAuthorizer struct { // usernameToNamespaceToAllowedPSPs contains the map of allowed PSPs. // if nil, all PSPs are allowed. @@ -344,6 +345,13 @@ func TestAdmitPreferNonmutating(t *testing.T) { gcChangedPod.OwnerReferences = []metav1.OwnerReference{{Kind: "Foo", Name: "bar"}} gcChangedPod.Finalizers = []string{"foo"} + podWithAnnotation := unprivilegedRunAsAnyPod.DeepCopy() + podWithAnnotation.ObjectMeta.Annotations = map[string]string{ + // "mutating2" is lexicographically behind "mutating1", so "mutating1" should be + // chosen because it's the canonical PSP order. + psputil.ValidatedPSPAnnotation: mutating2.Name, + } + tests := map[string]struct { operation kadmission.Operation pod *kapi.Pod @@ -381,6 +389,15 @@ func TestAdmitPreferNonmutating(t *testing.T) { expectedContainerUser: &mutating1.Spec.RunAsUser.Ranges[0].Min, expectedPSP: mutating1.Name, }, + "pod should use deterministic mutating PSP on create even if ValidatedPSPAnnotation is set": { + operation: kadmission.Create, + pod: podWithAnnotation, + psps: []*extensions.PodSecurityPolicy{mutating2, mutating1}, + shouldPassValidate: true, + expectMutation: true, + expectedContainerUser: &mutating1.Spec.RunAsUser.Ranges[0].Min, + expectedPSP: mutating1.Name, + }, "pod should prefer non-mutating PSP on update": { operation: kadmission.Update, pod: changedPodWithSC.DeepCopy(), @@ -2130,7 +2147,6 @@ func TestPolicyAuthorizationErrors(t *testing.T) { ) tests := map[string]struct { - priviliged bool inPolicies []*extensions.PodSecurityPolicy allowed map[string]map[string]map[string]bool expectValidationErrs int @@ -2197,7 +2213,7 @@ func TestPolicyAuthorizationErrors(t *testing.T) { plugin := NewTestAdmission(tc.inPolicies, authz) attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), ns, "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, &user.DefaultInfo{Name: userName}) - allowedPod, _, validationErrs, err := plugin.computeSecurityContext(attrs, pod, true) + allowedPod, _, validationErrs, err := plugin.computeSecurityContext(attrs, pod, true, "") assert.Nil(t, allowedPod) assert.NoError(t, err) assert.Len(t, validationErrs, tc.expectValidationErrs) @@ -2205,6 +2221,99 @@ func TestPolicyAuthorizationErrors(t *testing.T) { } } +func TestPreferValidatedPSP(t *testing.T) { + restrictivePSPWithName := func(name string) *extensions.PodSecurityPolicy { + p := restrictivePSP() + p.Name = name + return p + } + + permissivePSPWithName := func(name string) *extensions.PodSecurityPolicy { + p := permissivePSP() + p.Name = name + return p + } + + tests := map[string]struct { + inPolicies []*extensions.PodSecurityPolicy + expectValidationErrs int + validatedPSPHint string + expectedPSP string + }{ + "no policy saved in annotations, PSPs are ordered lexicographically": { + inPolicies: []*extensions.PodSecurityPolicy{ + restrictivePSPWithName("001restrictive"), + restrictivePSPWithName("002restrictive"), + permissivePSPWithName("002permissive"), + permissivePSPWithName("001permissive"), + permissivePSPWithName("003permissive"), + }, + expectValidationErrs: 0, + validatedPSPHint: "", + expectedPSP: "001permissive", + }, + "policy saved in annotations is prefered": { + inPolicies: []*extensions.PodSecurityPolicy{ + restrictivePSPWithName("001restrictive"), + restrictivePSPWithName("002restrictive"), + permissivePSPWithName("001permissive"), + permissivePSPWithName("002permissive"), + permissivePSPWithName("003permissive"), + }, + expectValidationErrs: 0, + validatedPSPHint: "002permissive", + expectedPSP: "002permissive", + }, + "policy saved in annotations is invalid": { + inPolicies: []*extensions.PodSecurityPolicy{ + restrictivePSPWithName("001restrictive"), + restrictivePSPWithName("002restrictive"), + }, + expectValidationErrs: 2, + validatedPSPHint: "foo", + expectedPSP: "", + }, + "policy saved in annotations is disallowed anymore": { + inPolicies: []*extensions.PodSecurityPolicy{ + restrictivePSPWithName("001restrictive"), + restrictivePSPWithName("002restrictive"), + }, + expectValidationErrs: 2, + validatedPSPHint: "001restrictive", + expectedPSP: "", + }, + "policy saved in annotations is disallowed anymore, but find another one": { + inPolicies: []*extensions.PodSecurityPolicy{ + restrictivePSPWithName("001restrictive"), + restrictivePSPWithName("002restrictive"), + permissivePSPWithName("002permissive"), + permissivePSPWithName("001permissive"), + }, + expectValidationErrs: 0, + validatedPSPHint: "001restrictive", + expectedPSP: "001permissive", + }, + } + for desc, tc := range tests { + t.Run(desc, func(t *testing.T) { + authz := authorizerfactory.NewAlwaysAllowAuthorizer() + allowPrivilegeEscalation := true + pod := goodPod() + pod.Namespace = "ns" + pod.Spec.ServiceAccountName = "sa" + pod.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = &allowPrivilegeEscalation + + plugin := NewTestAdmission(tc.inPolicies, authz) + attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "ns", "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Update, &user.DefaultInfo{Name: "test"}) + + _, pspName, validationErrs, err := plugin.computeSecurityContext(attrs, pod, false, tc.validatedPSPHint) + assert.NoError(t, err) + assert.Len(t, validationErrs, tc.expectValidationErrs) + assert.Equal(t, pspName, tc.expectedPSP) + }) + } +} + func restrictivePSP() *extensions.PodSecurityPolicy { return &extensions.PodSecurityPolicy{ ObjectMeta: metav1.ObjectMeta{ From b196301b67195b66fb28a12feef767ab86383a02 Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Wed, 3 Jan 2018 12:30:01 +0800 Subject: [PATCH 560/794] RBD Plugin: Fix comments and remove unnecessary locking code. --- pkg/volume/rbd/rbd_util.go | 97 ++++++++++---------------------------- 1 file changed, 24 insertions(+), 73 deletions(-) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 94c5f4efa9f..915e0e2e496 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -28,7 +28,6 @@ import ( "os" "os/exec" "path" - "regexp" "strconv" "strings" "time" @@ -49,10 +48,6 @@ const ( kubeLockMagic = "kubelet_lock_magic_" ) -var ( - clientKubeLockMagicRe = regexp.MustCompile("client.* " + kubeLockMagic + ".*") -) - // search /sys/bus for rbd device that matches given pool and image func getDevFromImageAndPool(pool, image string) (string, bool) { // /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool @@ -132,20 +127,21 @@ func rbdErrors(runErr, resultErr error) error { return resultErr } -// 'rbd' utility simply pass '-m ' parameter to kernel rbd/libceph -// modules, which takes a comma-seprated list of one or more monitor addresses -// (e.g. ip1[:port1][,ip2[:port2]...]) in its first version in linux (see -// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239) -// Also, libceph choose monitor randomly, so we can simply pass all addresses -// without randomization (see +// 'rbd' utility builds a comma-separated list of monitor addresses from '-m' / +// '--mon_host` parameter (comma, semi-colon, or white-space delimited monitor +// addresses) and send it to kernel rbd/libceph modules, which can accept +// comma-seprated list of monitor addresses (e.g. ip1[:port1][,ip2[:port2]...]) +// in theirs first version in linux (see +// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239). +// Also, libceph module choose monitor randomly, so we can simply pass all +// addresses without randomization (see // https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132). func (util *RBDUtil) kernelRBDMonitorsOpt(mons []string) string { return strings.Join(mons, ",") } -// rbdLock acquires a lock on image if lock is true, otherwise releases if a -// lock is found on image. -func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { +// rbdUnlock releases a lock on image if found. +func (util *RBDUtil) rbdUnlock(b rbdMounter) error { var err error var output, locker string var cmd []byte @@ -168,10 +164,7 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { mon := util.kernelRBDMonitorsOpt(b.Mon) - // cmd "rbd lock list" serves two purposes: - // for fencing, check if lock already held for this host - // this edge case happens if host crashes in the middle of acquiring lock and mounting rbd - // for defencing, get the locker name, something like "client.1234" + // get the locker name, something like "client.1234" args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) @@ -180,62 +173,21 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { if err != nil { return err } + ind := strings.LastIndex(output, lock_id) - 1 + for i := ind; i >= 0; i-- { + if output[i] == '\n' { + locker = output[(i + 1):ind] + break + } + } - if lock { - // check if lock is already held for this host by matching lock_id and rbd lock id - if strings.Contains(output, lock_id) { - // this host already holds the lock, exit - glog.V(1).Infof("rbd: lock already held for %s", lock_id) - return nil - } - // clean up orphaned lock if no watcher on the image - used, rbdOutput, statusErr := util.rbdStatus(&b) - if statusErr != nil { - return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput) - } - if used { - // this image is already used by a node other than this node - return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output) - } - - // best effort clean up orphaned locked if not used - locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1) - for _, v := range locks { - if len(v) > 0 { - lockInfo := strings.Split(v[0], " ") - if len(lockInfo) > 2 { - args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd)) - } - } - } - - // hold a lock: rbd lock add - args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon} + // remove a lock if found: rbd lock remove + if len(locker) > 0 { + args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) if err == nil { - glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) - } - } else { - // defencing, find locker name - ind := strings.LastIndex(output, lock_id) - 1 - for i := ind; i >= 0; i-- { - if output[i] == '\n' { - locker = output[(i + 1):ind] - break - } - } - // remove a lock if found: rbd lock remove - if len(locker) > 0 { - args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - if err == nil { - glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) - } + glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) } } @@ -243,7 +195,6 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { } // AttachDisk attaches the disk on the node. -// If Volume is not read-only, acquire a lock on image first. func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { var err error var output []byte @@ -334,7 +285,7 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic // At last, it removes rbd.json file. func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { mounter := &rbdMounter{ - // util.rbdLock needs it to run command. + // util.rbdUnlock needs it to run command. rbd: newRBD("", "", "", "", false, plugin, util), } fp, err := os.Open(rbdFile) @@ -355,7 +306,7 @@ func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { // remove rbd lock if found // the disk is not attached to this node anymore, so the lock on image // for this node can be removed safely - err = util.rbdLock(*mounter, false) + err = util.rbdUnlock(*mounter) if err == nil { os.Remove(rbdFile) } From f0b1dfd33fc390f9e3d81bddb0a8ef77cd6904d1 Mon Sep 17 00:00:00 2001 From: Allen Petersen Date: Tue, 2 Jan 2018 21:25:05 -0800 Subject: [PATCH 561/794] Update gengo version --- Godeps/Godeps.json | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 9a9d3aa2c2a..572d4e12423 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2864,10 +2864,18 @@ "ImportPath": "golang.org/x/tools/container/intsets", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, + { + "ImportPath": "golang.org/x/tools/go/ast/astutil", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, { "ImportPath": "golang.org/x/tools/go/vcs", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, + { + "ImportPath": "golang.org/x/tools/imports", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" @@ -3042,43 +3050,43 @@ }, { "ImportPath": "k8s.io/gengo/args", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/deepcopy-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/defaulter-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/import-boss/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/sets", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/generator", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/namer", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/parser", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/types", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/heapster/metrics/api/v1/types", @@ -3134,4 +3142,4 @@ "Rev": "db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394" } ] -} +} \ No newline at end of file From 92c9c9ad30df94f24f04740f5bf3603a07ba5832 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Fri, 29 Dec 2017 11:07:20 +0800 Subject: [PATCH 562/794] ignore nonexistent ns net file error when deleting container network --- pkg/kubelet/network/cni/cni.go | 4 +++- pkg/kubelet/network/kubenet/kubenet_linux.go | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/network/cni/cni.go b/pkg/kubelet/network/cni/cni.go index 540be4b70cf..15724afc486 100644 --- a/pkg/kubelet/network/cni/cni.go +++ b/pkg/kubelet/network/cni/cni.go @@ -273,7 +273,9 @@ func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName s netConf, cniNet := network.NetworkConfig, network.CNIConfig glog.V(4).Infof("About to del CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type) err = cniNet.DelNetworkList(netConf, rt) - if err != nil { + // The pod may not get deleted successfully at the first time. + // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. + if err != nil && !strings.Contains(err.Error(), "no such file or directory") { glog.Errorf("Error deleting network: %v", err) return err } diff --git a/pkg/kubelet/network/kubenet/kubenet_linux.go b/pkg/kubelet/network/kubenet/kubenet_linux.go index f41c59d843e..35e0a1b0cc7 100644 --- a/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -765,7 +765,10 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo } glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) - if err := plugin.cniConfig.DelNetwork(config, rt); err != nil { + err = plugin.cniConfig.DelNetwork(config, rt) + // The pod may not get deleted successfully at the first time. + // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. + if err != nil && !strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Error removing container from network: %v", err) } return nil From 6d19de343f40092c2e1511848b8e43df81da97fe Mon Sep 17 00:00:00 2001 From: Allen Petersen Date: Tue, 2 Jan 2018 21:55:25 -0800 Subject: [PATCH 563/794] Run godep-save.sh and update-staging-godeps.sh --- Godeps/Godeps.json | 164 +- Godeps/LICENSES | 70 + .../k8s.io/code-generator/Godeps/Godeps.json | 28 +- vendor/BUILD | 2 + .../golang.org/x/tools/go/ast/astutil/BUILD | 26 + .../x/tools/go/ast/astutil/enclosing.go | 627 ++ .../x/tools/go/ast/astutil/imports.go | 450 + .../golang.org/x/tools/go/ast/astutil/util.go | 14 + vendor/golang.org/x/tools/imports/BUILD | 71 + vendor/golang.org/x/tools/imports/fastwalk.go | 187 + .../x/tools/imports/fastwalk_dirent_fileno.go | 13 + .../x/tools/imports/fastwalk_dirent_ino.go | 13 + .../x/tools/imports/fastwalk_portable.go | 29 + .../x/tools/imports/fastwalk_unix.go | 122 + vendor/golang.org/x/tools/imports/fix.go | 974 ++ vendor/golang.org/x/tools/imports/imports.go | 289 + vendor/golang.org/x/tools/imports/mkindex.go | 173 + vendor/golang.org/x/tools/imports/mkstdlib.go | 104 + .../golang.org/x/tools/imports/sortimports.go | 212 + vendor/golang.org/x/tools/imports/zstdlib.go | 9376 +++++++++++++++++ vendor/k8s.io/gengo/generator/BUILD | 1 + vendor/k8s.io/gengo/generator/execute.go | 9 +- 22 files changed, 12859 insertions(+), 95 deletions(-) create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/BUILD create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/imports/BUILD create mode 100644 vendor/golang.org/x/tools/imports/fastwalk.go create mode 100644 vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go create mode 100644 vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go create mode 100644 vendor/golang.org/x/tools/imports/fastwalk_portable.go create mode 100644 vendor/golang.org/x/tools/imports/fastwalk_unix.go create mode 100644 vendor/golang.org/x/tools/imports/fix.go create mode 100644 vendor/golang.org/x/tools/imports/imports.go create mode 100644 vendor/golang.org/x/tools/imports/mkindex.go create mode 100644 vendor/golang.org/x/tools/imports/mkstdlib.go create mode 100644 vendor/golang.org/x/tools/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/imports/zstdlib.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 572d4e12423..dc0ec589a51 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -442,47 +442,47 @@ }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/services/tasks/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/services/version/v1", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/types", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/api/types/task", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/containers", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/dialer", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/errdefs", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { "ImportPath": "github.com/containerd/containerd/namespaces", - "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Comment": "v1.0.0-beta.2-159-g27d450a", "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" }, { @@ -907,152 +907,152 @@ }, { "ImportPath": "github.com/docker/distribution/digestset", - "Comment": "v2.6.0-rc.1-209-gedc3ab29", + "Comment": "v2.6.0-rc.1-209-gedc3ab2", "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.6.0-rc.1-209-gedc3ab29", + "Comment": "v2.6.0-rc.1-209-gedc3ab2", "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/docker/api", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/events", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/image", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/swarm/runtime", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/time", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/api/types/volume", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/client", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { "ImportPath": "github.com/docker/docker/pkg/tlsconfig", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { @@ -1077,7 +1077,7 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-910-gba46b928", + "Comment": "v0.8.0-dev.2-910-gba46b92", "Rev": "ba46b928444931e6865d8618dc03622cac79aa6f" }, { @@ -1204,132 +1204,132 @@ }, { "ImportPath": "github.com/gogo/protobuf/gogoproto", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/compare", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/description", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/equal", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/face", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/gostring", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/marshalto", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/populate", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/size", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/stringer", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/testgen", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/union", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/types", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/vanity", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/vanity/command", - "Comment": "v0.4-3-gc0656edd", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { @@ -2251,77 +2251,77 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc4-50-g4d6e6720", + "Comment": "v1.0.0-rc4-50-g4d6e672", "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" }, { @@ -3142,4 +3142,4 @@ "Rev": "db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394" } ] -} \ No newline at end of file +} diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 2b40e59609c..ecc5c08027a 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -85305,6 +85305,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/golang.org/x/tools/go/ast/astutil licensed under: = + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 +================================================================================ + + ================================================================================ = vendor/golang.org/x/tools/go/vcs licensed under: = @@ -85340,6 +85375,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/golang.org/x/tools/imports licensed under: = + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707 +================================================================================ + + ================================================================================ = vendor/google.golang.org/api/cloudmonitoring/v2beta2 licensed under: = diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index 239636cbe3d..506e4b88556 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -206,49 +206,57 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/tools/go/ast/astutil", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, + { + "ImportPath": "golang.org/x/tools/imports", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" }, { "ImportPath": "k8s.io/gengo/args", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/deepcopy-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/defaulter-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/import-boss/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/generators", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/examples/set-gen/sets", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/generator", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/namer", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/parser", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/gengo/types", - "Rev": "b58fc7edb82e0c6ffc9b8aef61813c7261b785d4" + "Rev": "b6c426f7730e6d66e6e476a85d1c3eb7633880e0" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", diff --git a/vendor/BUILD b/vendor/BUILD index 5b92af906f8..2f2776e4b3c 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -380,7 +380,9 @@ filegroup( "//vendor/golang.org/x/text/width:all-srcs", "//vendor/golang.org/x/time/rate:all-srcs", "//vendor/golang.org/x/tools/container/intsets:all-srcs", + "//vendor/golang.org/x/tools/go/ast/astutil:all-srcs", "//vendor/golang.org/x/tools/go/vcs:all-srcs", + "//vendor/golang.org/x/tools/imports:all-srcs", "//vendor/google.golang.org/api/cloudmonitoring/v2beta2:all-srcs", "//vendor/google.golang.org/api/compute/v0.alpha:all-srcs", "//vendor/google.golang.org/api/compute/v0.beta:all-srcs", diff --git a/vendor/golang.org/x/tools/go/ast/astutil/BUILD b/vendor/golang.org/x/tools/go/ast/astutil/BUILD new file mode 100644 index 00000000000..0f7c6304b4f --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "enclosing.go", + "imports.go", + "util.go", + ], + importpath = "golang.org/x/tools/go/ast/astutil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000000..6b7052b892c --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000000..0f5db8b57e5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,450 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) { + return AddNamedImport(fset, f, "", ipath) +} + +// AddNamedImport adds the import path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { + if imports(f, ipath) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(ipath), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with ipath. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group. + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + n := matchLen(importPath(impspec), ipath) + if n > bestMatch { + bestMatch = n + impDecl = gen + impIndex = j + } + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import goes after the package declaration and after + // the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + impDecl.TokPos = c.End() + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +// DeleteImport deletes the import path from the file f, if present. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if impspec.Name == nil && name != "" { + continue + } + if impspec.Name != nil && impspec.Name.Name != name { + continue + } + if importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports returns true if f imports path. +func imports(f *ast.File, path string) bool { + return importSpec(f, path) != nil +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000000..7630629824a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/imports/BUILD b/vendor/golang.org/x/tools/imports/BUILD new file mode 100644 index 00000000000..fa0d99ebe39 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "fastwalk.go", + "fix.go", + "imports.go", + "sortimports.go", + "zstdlib.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "fastwalk_portable.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fastwalk_dirent_ino.go", + "fastwalk_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fastwalk_portable.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fastwalk_dirent_fileno.go", + "fastwalk_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "fastwalk_dirent_ino.go", + "fastwalk_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fastwalk_portable.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fastwalk_dirent_fileno.go", + "fastwalk_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fastwalk_dirent_fileno.go", + "fastwalk_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fastwalk_portable.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fastwalk_portable.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fastwalk_portable.go", + ], + "//conditions:default": [], + }), + importpath = "golang.org/x/tools/imports", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/tools/go/ast/astutil:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/golang.org/x/tools/imports/fastwalk.go b/vendor/golang.org/x/tools/imports/fastwalk.go new file mode 100644 index 00000000000..31e6e27b0d5 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fastwalk.go @@ -0,0 +1,187 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. And goimports only need to know +// the type of each file. The kernel interface provides the type in +// the Readdir call but the standard library ignored it. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 + +package imports + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// traverseLink is a sentinel error for fastWalk, similar to filepath.SkipDir. +var traverseLink = errors.New("traverse symlink, assuming target is a directory") + +// fastWalk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the traverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func fastWalk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == traverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go new file mode 100644 index 00000000000..f1fd64949db --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package imports + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go new file mode 100644 index 00000000000..ee85bc4dd4d --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin + +package imports + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/imports/fastwalk_portable.go b/vendor/golang.org/x/tools/imports/fastwalk_portable.go new file mode 100644 index 00000000000..6c2658347d1 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fastwalk_portable.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package imports + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + for _, fi := range fis { + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/imports/fastwalk_unix.go b/vendor/golang.org/x/tools/imports/fastwalk_unix.go new file mode 100644 index 00000000000..5854233db92 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fastwalk_unix.go @@ -0,0 +1,122 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin freebsd openbsd netbsd + +package imports + +import ( + "bytes" + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return err + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if err := fn(dirName, name, typ); err != nil { + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := bytes.IndexByte(nameBuf[:], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go new file mode 100644 index 00000000000..61a5f062a62 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/fix.go @@ -0,0 +1,974 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + + "golang.org/x/tools/go/ast/astutil" +) + +// Debug controls verbose logging. +var Debug = false + +var ( + inTests = false // set true by fix_test.go; if false, no need to use testMu + testMu sync.RWMutex // guards globals reset by tests; used only if inTests +) + +// If set, LocalPrefix instructs Process to sort import paths with the given +// prefix into another group after 3rd-party packages. +var LocalPrefix string + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(importPath string) (num int, ok bool){ + func(importPath string) (num int, ok bool) { + if LocalPrefix != "" && strings.HasPrefix(importPath, LocalPrefix) { + return 3, true + } + return + }, + func(importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(importPath string) (num int, ok bool) { + if strings.Contains(importPath, ".") { + return 1, true + } + return + }, +} + +func importGroup(importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(importPath); ok { + return n + } + } + return 0 +} + +// packageInfo is a summary of features found in a package. +type packageInfo struct { + Globals map[string]bool // symbol => true +} + +// dirPackageInfo exposes the dirPackageInfoFile function so that it can be overridden. +var dirPackageInfo = dirPackageInfoFile + +// dirPackageInfoFile gets information from other files in the package. +func dirPackageInfoFile(pkgName, srcDir, filename string) (*packageInfo, error) { + considerTests := strings.HasSuffix(filename, "_test.go") + + // Handle file from stdin + if _, err := os.Stat(filename); err != nil { + if os.IsNotExist(err) { + return &packageInfo{}, nil + } + return nil, err + } + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil, err + } + + info := &packageInfo{Globals: make(map[string]bool)} + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + fileSet := token.NewFileSet() + root, err := parser.ParseFile(fileSet, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + for _, decl := range root.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + info.Globals[valueSpec.Names[0].Name] = true + } + } + } + return info, nil +} + +func fixImports(fset *token.FileSet, f *ast.File, filename string) (added []string, err error) { + // refs are a set of possible package references currently unsatisfied by imports. + // first key: either base package (e.g. "fmt") or renamed package + // second key: referenced package symbol (e.g. "Println") + refs := make(map[string]map[string]bool) + + // decls are the current package imports. key is base package or renamed package. + decls := make(map[string]*ast.ImportSpec) + + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if Debug { + log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + var packageInfo *packageInfo + var loadedPackageInfo bool + + // collect potential uses of packages. + var visitor visitFn + visitor = visitFn(func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.ImportSpec: + if v.Name != nil { + decls[v.Name.Name] = v + break + } + ipath := strings.Trim(v.Path.Value, `"`) + if ipath == "C" { + break + } + local := importPathToName(ipath, srcDir) + decls[local] = v + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // if the parser can resolve it, it's not a package ref + break + } + pkgName := xident.Name + if refs[pkgName] == nil { + refs[pkgName] = make(map[string]bool) + } + if !loadedPackageInfo { + loadedPackageInfo = true + packageInfo, _ = dirPackageInfo(f.Name.Name, srcDir, filename) + } + if decls[pkgName] == nil && (packageInfo == nil || !packageInfo.Globals[pkgName]) { + refs[pkgName][v.Sel.Name] = true + } + } + return visitor + }) + ast.Walk(visitor, f) + + // Nil out any unused ImportSpecs, to be removed in following passes + unusedImport := map[string]string{} + for pkg, is := range decls { + if refs[pkg] == nil && pkg != "_" && pkg != "." { + name := "" + if is.Name != nil { + name = is.Name.Name + } + unusedImport[strings.Trim(is.Path.Value, `"`)] = name + } + } + for ipath, name := range unusedImport { + if ipath == "C" { + // Don't remove cgo stuff. + continue + } + astutil.DeleteNamedImport(fset, f, name, ipath) + } + + for pkgName, symbols := range refs { + if len(symbols) == 0 { + // skip over packages already imported + delete(refs, pkgName) + } + } + + // Search for imports matching potential package references. + searches := 0 + type result struct { + ipath string // import path (if err == nil) + name string // optional name to rename import as + err error + } + results := make(chan result) + for pkgName, symbols := range refs { + go func(pkgName string, symbols map[string]bool) { + ipath, rename, err := findImport(pkgName, symbols, filename) + r := result{ipath: ipath, err: err} + if rename { + r.name = pkgName + } + results <- r + }(pkgName, symbols) + searches++ + } + for i := 0; i < searches; i++ { + result := <-results + if result.err != nil { + return nil, result.err + } + if result.ipath != "" { + if result.name != "" { + astutil.AddNamedImport(fset, f, result.name, result.ipath) + } else { + astutil.AddImport(fset, f, result.ipath) + } + added = append(added, result.ipath) + } + } + + return added, nil +} + +// importPathToName returns the package name for the given import path. +var importPathToName func(importPath, srcDir string) (packageName string) = importPathToNameGoPath + +// importPathToNameBasic assumes the package name is the base of import path. +func importPathToNameBasic(importPath, srcDir string) (packageName string) { + return path.Base(importPath) +} + +// importPathToNameGoPath finds out the actual package name, as declared in its .go files. +// If there's a problem, it falls back to using importPathToNameBasic. +func importPathToNameGoPath(importPath, srcDir string) (packageName string) { + // Fast path for standard library without going to disk. + if pkg, ok := stdImportPackage[importPath]; ok { + return pkg + } + + pkgName, err := importPathToNameGoPathParse(importPath, srcDir) + if Debug { + log.Printf("importPathToNameGoPathParse(%q, srcDir=%q) = %q, %v", importPath, srcDir, pkgName, err) + } + if err == nil { + return pkgName + } + return importPathToNameBasic(importPath, srcDir) +} + +// importPathToNameGoPathParse is a faster version of build.Import if +// the only thing desired is the package name. It uses build.FindOnly +// to find the directory and then only parses one file in the package, +// trusting that the files in the directory are consistent. +func importPathToNameGoPathParse(importPath, srcDir string) (packageName string, err error) { + buildPkg, err := build.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "", err + } + d, err := os.Open(buildPkg.Dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(buildPkg.Dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +var stdImportPackage = map[string]string{} // "net/http" => "http" + +func init() { + // Nothing in the standard library has a package name not + // matching its import base name. + for _, pkg := range stdlib { + if _, ok := stdImportPackage[pkg]; !ok { + stdImportPackage[pkg] = path.Base(pkg) + } + } +} + +// Directory-scanning state. +var ( + // scanGoRootOnce guards calling scanGoRoot (for $GOROOT) + scanGoRootOnce sync.Once + // scanGoPathOnce guards calling scanGoPath (for $GOPATH) + scanGoPathOnce sync.Once + + // populateIgnoreOnce guards calling populateIgnore + populateIgnoreOnce sync.Once + ignoredDirs []os.FileInfo + + dirScanMu sync.RWMutex + dirScan map[string]*pkg // abs dir path => *pkg +) + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPath string // full pkg import path ("net/http", "foo/bar/vendor/a/b") + importPathShort string // vendorless import path ("net/http", "a/b") +} + +// byImportPathShortLength sorts by the short import path length, breaking ties on the +// import string itself. +type byImportPathShortLength []*pkg + +func (s byImportPathShortLength) Len() int { return len(s) } +func (s byImportPathShortLength) Less(i, j int) bool { + vi, vj := s[i].importPathShort, s[j].importPathShort + return len(vi) < len(vj) || (len(vi) == len(vj) && vi < vj) + +} +func (s byImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// guarded by populateIgnoreOnce; populates ignoredDirs. +func populateIgnore() { + for _, srcDir := range build.Default.SrcDirs() { + if srcDir == filepath.Join(build.Default.GOROOT, "src") { + continue + } + populateIgnoredDirs(srcDir) + } +} + +// populateIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func populateIgnoredDirs(path string) { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return + } + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + full := filepath.Join(path, line) + if fi, err := os.Stat(full); err == nil { + ignoredDirs = append(ignoredDirs, fi) + if Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if Debug { + log.Printf("Error statting entry in .goimportsignore: %v", err) + } + } +} + +func skipDir(fi os.FileInfo) bool { + for _, ignoredDir := range ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + return false +} + +// shouldTraverse reports whether the symlink fi should, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + if !os.IsNotExist(err) { + fmt.Fprintln(os.Stderr, err) + } + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if skipDir(ts) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} + +var testHookScanDir = func(dir string) {} + +var scanGoRootDone = make(chan struct{}) // closed when scanGoRoot is done + +func scanGoRoot() { + go func() { + scanGoDirs(true) + close(scanGoRootDone) + }() +} + +func scanGoPath() { scanGoDirs(false) } + +func scanGoDirs(goRoot bool) { + if Debug { + which := "$GOROOT" + if !goRoot { + which = "$GOPATH" + } + log.Printf("scanning " + which) + defer log.Printf("scanned " + which) + } + dirScanMu.Lock() + if dirScan == nil { + dirScan = make(map[string]*pkg) + } + dirScanMu.Unlock() + + for _, srcDir := range build.Default.SrcDirs() { + isGoroot := srcDir == filepath.Join(build.Default.GOROOT, "src") + if isGoroot != goRoot { + continue + } + testHookScanDir(srcDir) + walkFn := func(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == srcDir { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return nil + } + if !strings.HasSuffix(path, ".go") { + return nil + } + dirScanMu.Lock() + if _, dup := dirScan[dir]; !dup { + importpath := filepath.ToSlash(dir[len(srcDir)+len("/"):]) + dirScan[dir] = &pkg{ + importPath: importpath, + importPathShort: vendorlessImportPath(importpath), + dir: dir, + } + } + dirScanMu.Unlock() + return nil + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || base == "node_modules" { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && skipDir(fi) { + if Debug { + log.Printf("skipping directory %q under %s", fi.Name(), dir) + } + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if shouldTraverse(dir, fi) { + return traverseLink + } + } + return nil + } + if err := fastWalk(srcDir, walkFn); err != nil { + log.Printf("goimports: scanning directory %v: %v", srcDir, err) + } + } +} + +// vendorlessImportPath returns the devendorized version of the provided import path. +// e.g. "foo/bar/vendor/a/b" => "a/b" +func vendorlessImportPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +// loadExports returns the set of exported symbols in the package at dir. +// It returns nil on error or if the package name in dir does not match expectPackage. +var loadExports func(expectPackage, dir string) map[string]bool = loadExportsGoPath + +func loadExportsGoPath(expectPackage, dir string) map[string]bool { + if Debug { + log.Printf("loading exports in dir %s (seeking package %s)", dir, expectPackage) + } + exports := make(map[string]bool) + + ctx := build.Default + + // ReadDir is like ioutil.ReadDir, but only returns *.go files + // and filters out _test.go files since they're not relevant + // and only slow things down. + ctx.ReadDir = func(dir string) (notTests []os.FileInfo, err error) { + all, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + notTests = all[:0] + for _, fi := range all { + name := fi.Name() + if strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go") { + notTests = append(notTests, fi) + } + } + return notTests, nil + } + + files, err := ctx.ReadDir(dir) + if err != nil { + log.Print(err) + return nil + } + + fset := token.NewFileSet() + + for _, fi := range files { + match, err := ctx.MatchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if Debug { + log.Printf("Parsing %s: %v", fullFile, err) + } + return nil + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName != expectPackage { + if Debug { + log.Printf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName) + } + return nil + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + + if Debug { + exportList := make([]string, 0, len(exports)) + for k := range exports { + exportList = append(exportList, k) + } + sort.Strings(exportList) + log.Printf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", ")) + } + return exports +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +// +// This is declared as a variable rather than a function so goimports +// can be easily extended by adding a file with an init function. +// +// The rename value tells goimports whether to use the package name as +// a local qualifier in an import. For example, if findImports("pkg", +// "X") returns ("foo/bar", rename=true), then goimports adds the +// import line: +// import pkg "foo/bar" +// to satisfy uses of pkg.X in the file. +var findImport func(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) = findImportGoPath + +// findImportGoPath is the normal implementation of findImport. +// (Some companies have their own internally.) +func findImportGoPath(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) { + if inTests { + testMu.RLock() + defer testMu.RUnlock() + } + + // Fast path for the standard library. + // In the common case we hopefully never have to scan the GOPATH, which can + // be slow with moving disks. + if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok { + return pkg, rename, nil + } + if pkgName == "rand" && symbols["Read"] { + // Special-case rand.Read. + // + // If findImportStdlib didn't find it above, don't go + // searching for it, lest it find and pick math/rand + // in GOROOT (new as of Go 1.6) + // + // crypto/rand is the safer choice. + return "", false, nil + } + + // TODO(sameer): look at the import lines for other Go files in the + // local directory, since the user is likely to import the same packages + // in the current Go file. Return rename=true when the other Go files + // use a renamed package that's also used in the current file. + + // Read all the $GOPATH/src/.goimportsignore files before scanning directories. + populateIgnoreOnce.Do(populateIgnore) + + // Start scanning the $GOROOT asynchronously, then run the + // GOPATH scan synchronously if needed, and then wait for the + // $GOROOT to finish. + // + // TODO(bradfitz): run each $GOPATH entry async. But nobody + // really has more than one anyway, so low priority. + scanGoRootOnce.Do(scanGoRoot) // async + if !fileInDir(filename, build.Default.GOROOT) { + scanGoPathOnce.Do(scanGoPath) // blocking + } + <-scanGoRootDone + + // Find candidate packages, looking only at their directory names first. + var candidates []*pkg + for _, pkg := range dirScan { + if pkgIsCandidate(filename, pkgName, pkg) { + candidates = append(candidates, pkg) + } + } + + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byImportPathShortLength(candidates)) + if Debug { + for i, pkg := range candidates { + log.Printf("%s candidate %d/%d: %v", pkgName, i+1, len(candidates), pkg.importPathShort) + } + } + + // Collect exports for packages with matching names. + + done := make(chan struct{}) // closed when we find the answer + defer close(done) + + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + go func() { + for i, pkg := range candidates { + select { + case loadExportsSem <- struct{}{}: + select { + case <-done: + return + default: + } + case <-done: + return + } + pkg := pkg + resc := rescv[i] + go func() { + if inTests { + testMu.RLock() + defer testMu.RUnlock() + } + defer func() { <-loadExportsSem }() + exports := loadExports(pkgName, pkg.dir) + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exports[symbol] { + pkg = nil + break + } + } + select { + case resc <- pkg: + case <-done: + } + }() + } + }() + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + // If the package name in the source doesn't match the import path's base, + // return true so the rewriter adds a name (import foo "github.com/bar/go-foo") + needsRename := path.Base(pkg.importPath) != pkgName + return pkg.importPathShort, needsRename, nil + } + return "", false, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accomodate that. + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) { + for symbol := range symbols { + key := shortPkg + "." + symbol + path := stdlib[key] + if path == "" { + if key == "rand.Read" { + continue + } + return "", false, false + } + if importPath != "" && importPath != path { + // Ambiguous. Symbols pointed to different things. + return "", false, false + } + importPath = path + } + if importPath == "" && shortPkg == "rand" && symbols["Read"] { + return "crypto/rand", false, true + } + return importPath, false, importPath != "" +} + +// fileInDir reports whether the provided file path looks like +// it's in dir. (without hitting the filesystem) +func fileInDir(file, dir string) bool { + rest := strings.TrimPrefix(file, dir) + if len(rest) == len(file) { + // dir is not a prefix of file. + return false + } + // Check for boundary: either nothing (file == dir), or a slash. + return len(rest) == 0 || rest[0] == '/' || rest[0] == '\\' +} diff --git a/vendor/golang.org/x/tools/imports/imports.go b/vendor/golang.org/x/tools/imports/imports.go new file mode 100644 index 00000000000..2c2eb5c62df --- /dev/null +++ b/vendor/golang.org/x/tools/imports/imports.go @@ -0,0 +1,289 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkstdlib.go + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data ``as if'' it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + _, err = fixImports(fileSet, file, filename) + if err != nil { + return nil, err + } + } + + sortImports(fileSet, file) + imps := astutil.Imports(fileSet, file) + + var spacesBefore []string // import paths we need spaces before + for _, impSection := range imps { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err = printConfig.Fprint(&buf, fileSet, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out = addImportSpaces(bytes.NewReader(out), spacesBefore) + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that the line numbers + // in psrc match the ones in src. + psrc := append([]byte("package main;"), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + // Gofmt has turned the ; into a \n. + src = src[len("package main\n"):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2) matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3) matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`) + +func addImportSpaces(r io.Reader, breaks []string) []byte { + var out bytes.Buffer + sc := bufio.NewScanner(r) + inImports := false + done := false + for sc.Scan() { + s := sc.Text() + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprintln(&out, s) + } + return out.Bytes() +} diff --git a/vendor/golang.org/x/tools/imports/mkindex.go b/vendor/golang.org/x/tools/imports/mkindex.go new file mode 100644 index 00000000000..755e2394f2d --- /dev/null +++ b/vendor/golang.org/x/tools/imports/mkindex.go @@ -0,0 +1,173 @@ +// +build ignore + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command mkindex creates the file "pkgindex.go" containing an index of the Go +// standard library. The file is intended to be built as part of the imports +// package, so that the package may be used in environments where a GOROOT is +// not available (such as App Engine). +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +var ( + pkgIndex = make(map[string][]pkg) + exports = make(map[string]map[string]bool) +) + +func main() { + // Don't use GOPATH. + ctx := build.Default + ctx.GOPATH = "" + + // Populate pkgIndex global from GOROOT. + for _, path := range ctx.SrcDirs() { + f, err := os.Open(path) + if err != nil { + log.Print(err) + continue + } + children, err := f.Readdir(-1) + f.Close() + if err != nil { + log.Print(err) + continue + } + for _, child := range children { + if child.IsDir() { + loadPkg(path, child.Name()) + } + } + } + // Populate exports global. + for _, ps := range pkgIndex { + for _, p := range ps { + e := loadExports(p.dir) + if e != nil { + exports[p.dir] = e + } + } + } + + // Construct source file. + var buf bytes.Buffer + fmt.Fprint(&buf, pkgIndexHead) + fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) + fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) + src := buf.Bytes() + + // Replace main.pkg type name with pkg. + src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) + // Replace actual GOROOT with "/go". + src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) + // Add some line wrapping. + src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) + src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) + + var err error + src, err = format.Source(src) + if err != nil { + log.Fatal(err) + } + + // Write out source file. + err = ioutil.WriteFile("pkgindex.go", src, 0644) + if err != nil { + log.Fatal(err) + } +} + +const pkgIndexHead = `package imports + +func init() { + pkgIndexOnce.Do(func() { + pkgIndex.m = pkgIndexMaster + }) + loadExports = func(dir string) map[string]bool { + return exportsMaster[dir] + } +} +` + +type pkg struct { + importpath string // full pkg import path, e.g. "net/http" + dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" +} + +var fset = token.NewFileSet() + +func loadPkg(root, importpath string) { + shortName := path.Base(importpath) + if shortName == "testdata" { + return + } + + dir := filepath.Join(root, importpath) + pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ + importpath: importpath, + dir: dir, + }) + + pkgDir, err := os.Open(dir) + if err != nil { + return + } + children, err := pkgDir.Readdir(-1) + pkgDir.Close() + if err != nil { + return + } + for _, child := range children { + name := child.Name() + if name == "" { + continue + } + if c := name[0]; c == '.' || ('0' <= c && c <= '9') { + continue + } + if child.IsDir() { + loadPkg(root, filepath.Join(importpath, name)) + } + } +} + +func loadExports(dir string) map[string]bool { + exports := make(map[string]bool) + buildPkg, err := build.ImportDir(dir, 0) + if err != nil { + if strings.Contains(err.Error(), "no buildable Go source files in") { + return nil + } + log.Printf("could not import %q: %v", dir, err) + return nil + } + for _, file := range buildPkg.GoFiles { + f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) + if err != nil { + log.Printf("could not parse %q: %v", file, err) + continue + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + return exports +} diff --git a/vendor/golang.org/x/tools/imports/mkstdlib.go b/vendor/golang.org/x/tools/imports/mkstdlib.go new file mode 100644 index 00000000000..5602244a7e8 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/mkstdlib.go @@ -0,0 +1,104 @@ +// +build ignore + +// mkstdlib generates the zstdlib.go file, containing the Go standard +// library API symbols. It's baked into the binary to avoid scanning +// GOPATH in the common case. +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/format" + "io" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strings" +) + +func mustOpen(name string) io.Reader { + f, err := os.Open(name) + if err != nil { + log.Fatal(err) + } + return f +} + +func api(base string) string { + return filepath.Join(os.Getenv("GOROOT"), "api", base) +} + +var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) + +func main() { + var buf bytes.Buffer + outf := func(format string, args ...interface{}) { + fmt.Fprintf(&buf, format, args...) + } + outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") + outf("package imports\n") + outf("var stdlib = map[string]string{\n") + f := io.MultiReader( + mustOpen(api("go1.txt")), + mustOpen(api("go1.1.txt")), + mustOpen(api("go1.2.txt")), + mustOpen(api("go1.3.txt")), + mustOpen(api("go1.4.txt")), + mustOpen(api("go1.5.txt")), + mustOpen(api("go1.6.txt")), + mustOpen(api("go1.7.txt")), + mustOpen(api("go1.8.txt")), + ) + sc := bufio.NewScanner(f) + fullImport := map[string]string{} // "zip.NewReader" => "archive/zip" + ambiguous := map[string]bool{} + var keys []string + for sc.Scan() { + l := sc.Text() + has := func(v string) bool { return strings.Contains(l, v) } + if has("struct, ") || has("interface, ") || has(", method (") { + continue + } + if m := sym.FindStringSubmatch(l); m != nil { + full := m[1] + key := path.Base(full) + "." + m[2] + if exist, ok := fullImport[key]; ok { + if exist != full { + ambiguous[key] = true + } + } else { + fullImport[key] = full + keys = append(keys, key) + } + } + } + if err := sc.Err(); err != nil { + log.Fatal(err) + } + sort.Strings(keys) + for _, key := range keys { + if ambiguous[key] { + outf("\t// %q is ambiguous\n", key) + } else { + outf("\t%q: %q,\n", key, fullImport[key]) + } + } + outf("\n") + for _, sym := range [...]string{"Alignof", "ArbitraryType", "Offsetof", "Pointer", "Sizeof"} { + outf("\t%q: %q,\n", "unsafe."+sym, "unsafe") + } + outf("}\n") + fmtbuf, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/tools/imports/sortimports.go b/vendor/golang.org/x/tools/imports/sortimports.go new file mode 100644 index 00000000000..653afc51776 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/sortimports.go @@ -0,0 +1,212 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go + +package imports + +import ( + "go/ast" + "go/token" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +func sortImports(fset *token.FileSet, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := fset.Position(lastSpec.Pos()).Line + if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { + fset.File(d.Rparen).MergeLine(rParenLine - 1) + } + } + } +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := fset.Position(pos[len(pos)-1].End).Line + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if fset.Position(g.End()).Line > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec(specs)) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + fset.File(p).MergeLine(fset.Position(p).Line) + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + } + } + } + + sort.Sort(byCommentPos(comments)) + + return specs +} + +type byImportSpec []ast.Spec // slice of *ast.ImportSpec + +func (x byImportSpec) Len() int { return len(x) } +func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x[i]) + jpath := importPath(x[j]) + + igroup := importGroup(ipath) + jgroup := importGroup(jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x[i]) + jname := importName(x[j]) + + if iname != jname { + return iname < jname + } + return importComment(x[i]) < importComment(x[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go new file mode 100644 index 00000000000..5b66a6cd42a --- /dev/null +++ b/vendor/golang.org/x/tools/imports/zstdlib.go @@ -0,0 +1,9376 @@ +// Code generated by mkstdlib.go. DO NOT EDIT. + +package imports + +var stdlib = map[string]string{ + "adler32.Checksum": "hash/adler32", + "adler32.New": "hash/adler32", + "adler32.Size": "hash/adler32", + "aes.BlockSize": "crypto/aes", + "aes.KeySizeError": "crypto/aes", + "aes.NewCipher": "crypto/aes", + "ascii85.CorruptInputError": "encoding/ascii85", + "ascii85.Decode": "encoding/ascii85", + "ascii85.Encode": "encoding/ascii85", + "ascii85.MaxEncodedLen": "encoding/ascii85", + "ascii85.NewDecoder": "encoding/ascii85", + "ascii85.NewEncoder": "encoding/ascii85", + "asn1.BitString": "encoding/asn1", + "asn1.ClassApplication": "encoding/asn1", + "asn1.ClassContextSpecific": "encoding/asn1", + "asn1.ClassPrivate": "encoding/asn1", + "asn1.ClassUniversal": "encoding/asn1", + "asn1.Enumerated": "encoding/asn1", + "asn1.Flag": "encoding/asn1", + "asn1.Marshal": "encoding/asn1", + "asn1.ObjectIdentifier": "encoding/asn1", + "asn1.RawContent": "encoding/asn1", + "asn1.RawValue": "encoding/asn1", + "asn1.StructuralError": "encoding/asn1", + "asn1.SyntaxError": "encoding/asn1", + "asn1.TagBitString": "encoding/asn1", + "asn1.TagBoolean": "encoding/asn1", + "asn1.TagEnum": "encoding/asn1", + "asn1.TagGeneralString": "encoding/asn1", + "asn1.TagGeneralizedTime": "encoding/asn1", + "asn1.TagIA5String": "encoding/asn1", + "asn1.TagInteger": "encoding/asn1", + "asn1.TagOID": "encoding/asn1", + "asn1.TagOctetString": "encoding/asn1", + "asn1.TagPrintableString": "encoding/asn1", + "asn1.TagSequence": "encoding/asn1", + "asn1.TagSet": "encoding/asn1", + "asn1.TagT61String": "encoding/asn1", + "asn1.TagUTCTime": "encoding/asn1", + "asn1.TagUTF8String": "encoding/asn1", + "asn1.Unmarshal": "encoding/asn1", + "asn1.UnmarshalWithParams": "encoding/asn1", + "ast.ArrayType": "go/ast", + "ast.AssignStmt": "go/ast", + "ast.Bad": "go/ast", + "ast.BadDecl": "go/ast", + "ast.BadExpr": "go/ast", + "ast.BadStmt": "go/ast", + "ast.BasicLit": "go/ast", + "ast.BinaryExpr": "go/ast", + "ast.BlockStmt": "go/ast", + "ast.BranchStmt": "go/ast", + "ast.CallExpr": "go/ast", + "ast.CaseClause": "go/ast", + "ast.ChanDir": "go/ast", + "ast.ChanType": "go/ast", + "ast.CommClause": "go/ast", + "ast.Comment": "go/ast", + "ast.CommentGroup": "go/ast", + "ast.CommentMap": "go/ast", + "ast.CompositeLit": "go/ast", + "ast.Con": "go/ast", + "ast.DeclStmt": "go/ast", + "ast.DeferStmt": "go/ast", + "ast.Ellipsis": "go/ast", + "ast.EmptyStmt": "go/ast", + "ast.ExprStmt": "go/ast", + "ast.Field": "go/ast", + "ast.FieldFilter": "go/ast", + "ast.FieldList": "go/ast", + "ast.File": "go/ast", + "ast.FileExports": "go/ast", + "ast.Filter": "go/ast", + "ast.FilterDecl": "go/ast", + "ast.FilterFile": "go/ast", + "ast.FilterFuncDuplicates": "go/ast", + "ast.FilterImportDuplicates": "go/ast", + "ast.FilterPackage": "go/ast", + "ast.FilterUnassociatedComments": "go/ast", + "ast.ForStmt": "go/ast", + "ast.Fprint": "go/ast", + "ast.Fun": "go/ast", + "ast.FuncDecl": "go/ast", + "ast.FuncLit": "go/ast", + "ast.FuncType": "go/ast", + "ast.GenDecl": "go/ast", + "ast.GoStmt": "go/ast", + "ast.Ident": "go/ast", + "ast.IfStmt": "go/ast", + "ast.ImportSpec": "go/ast", + "ast.Importer": "go/ast", + "ast.IncDecStmt": "go/ast", + "ast.IndexExpr": "go/ast", + "ast.Inspect": "go/ast", + "ast.InterfaceType": "go/ast", + "ast.IsExported": "go/ast", + "ast.KeyValueExpr": "go/ast", + "ast.LabeledStmt": "go/ast", + "ast.Lbl": "go/ast", + "ast.MapType": "go/ast", + "ast.MergeMode": "go/ast", + "ast.MergePackageFiles": "go/ast", + "ast.NewCommentMap": "go/ast", + "ast.NewIdent": "go/ast", + "ast.NewObj": "go/ast", + "ast.NewPackage": "go/ast", + "ast.NewScope": "go/ast", + "ast.Node": "go/ast", + "ast.NotNilFilter": "go/ast", + "ast.ObjKind": "go/ast", + "ast.Object": "go/ast", + "ast.Package": "go/ast", + "ast.PackageExports": "go/ast", + "ast.ParenExpr": "go/ast", + "ast.Pkg": "go/ast", + "ast.Print": "go/ast", + "ast.RECV": "go/ast", + "ast.RangeStmt": "go/ast", + "ast.ReturnStmt": "go/ast", + "ast.SEND": "go/ast", + "ast.Scope": "go/ast", + "ast.SelectStmt": "go/ast", + "ast.SelectorExpr": "go/ast", + "ast.SendStmt": "go/ast", + "ast.SliceExpr": "go/ast", + "ast.SortImports": "go/ast", + "ast.StarExpr": "go/ast", + "ast.StructType": "go/ast", + "ast.SwitchStmt": "go/ast", + "ast.Typ": "go/ast", + "ast.TypeAssertExpr": "go/ast", + "ast.TypeSpec": "go/ast", + "ast.TypeSwitchStmt": "go/ast", + "ast.UnaryExpr": "go/ast", + "ast.ValueSpec": "go/ast", + "ast.Var": "go/ast", + "ast.Visitor": "go/ast", + "ast.Walk": "go/ast", + "atomic.AddInt32": "sync/atomic", + "atomic.AddInt64": "sync/atomic", + "atomic.AddUint32": "sync/atomic", + "atomic.AddUint64": "sync/atomic", + "atomic.AddUintptr": "sync/atomic", + "atomic.CompareAndSwapInt32": "sync/atomic", + "atomic.CompareAndSwapInt64": "sync/atomic", + "atomic.CompareAndSwapPointer": "sync/atomic", + "atomic.CompareAndSwapUint32": "sync/atomic", + "atomic.CompareAndSwapUint64": "sync/atomic", + "atomic.CompareAndSwapUintptr": "sync/atomic", + "atomic.LoadInt32": "sync/atomic", + "atomic.LoadInt64": "sync/atomic", + "atomic.LoadPointer": "sync/atomic", + "atomic.LoadUint32": "sync/atomic", + "atomic.LoadUint64": "sync/atomic", + "atomic.LoadUintptr": "sync/atomic", + "atomic.StoreInt32": "sync/atomic", + "atomic.StoreInt64": "sync/atomic", + "atomic.StorePointer": "sync/atomic", + "atomic.StoreUint32": "sync/atomic", + "atomic.StoreUint64": "sync/atomic", + "atomic.StoreUintptr": "sync/atomic", + "atomic.SwapInt32": "sync/atomic", + "atomic.SwapInt64": "sync/atomic", + "atomic.SwapPointer": "sync/atomic", + "atomic.SwapUint32": "sync/atomic", + "atomic.SwapUint64": "sync/atomic", + "atomic.SwapUintptr": "sync/atomic", + "atomic.Value": "sync/atomic", + "base32.CorruptInputError": "encoding/base32", + "base32.Encoding": "encoding/base32", + "base32.HexEncoding": "encoding/base32", + "base32.NewDecoder": "encoding/base32", + "base32.NewEncoder": "encoding/base32", + "base32.NewEncoding": "encoding/base32", + "base32.StdEncoding": "encoding/base32", + "base64.CorruptInputError": "encoding/base64", + "base64.Encoding": "encoding/base64", + "base64.NewDecoder": "encoding/base64", + "base64.NewEncoder": "encoding/base64", + "base64.NewEncoding": "encoding/base64", + "base64.NoPadding": "encoding/base64", + "base64.RawStdEncoding": "encoding/base64", + "base64.RawURLEncoding": "encoding/base64", + "base64.StdEncoding": "encoding/base64", + "base64.StdPadding": "encoding/base64", + "base64.URLEncoding": "encoding/base64", + "big.Above": "math/big", + "big.Accuracy": "math/big", + "big.AwayFromZero": "math/big", + "big.Below": "math/big", + "big.ErrNaN": "math/big", + "big.Exact": "math/big", + "big.Float": "math/big", + "big.Int": "math/big", + "big.Jacobi": "math/big", + "big.MaxBase": "math/big", + "big.MaxExp": "math/big", + "big.MaxPrec": "math/big", + "big.MinExp": "math/big", + "big.NewFloat": "math/big", + "big.NewInt": "math/big", + "big.NewRat": "math/big", + "big.ParseFloat": "math/big", + "big.Rat": "math/big", + "big.RoundingMode": "math/big", + "big.ToNearestAway": "math/big", + "big.ToNearestEven": "math/big", + "big.ToNegativeInf": "math/big", + "big.ToPositiveInf": "math/big", + "big.ToZero": "math/big", + "big.Word": "math/big", + "binary.BigEndian": "encoding/binary", + "binary.ByteOrder": "encoding/binary", + "binary.LittleEndian": "encoding/binary", + "binary.MaxVarintLen16": "encoding/binary", + "binary.MaxVarintLen32": "encoding/binary", + "binary.MaxVarintLen64": "encoding/binary", + "binary.PutUvarint": "encoding/binary", + "binary.PutVarint": "encoding/binary", + "binary.Read": "encoding/binary", + "binary.ReadUvarint": "encoding/binary", + "binary.ReadVarint": "encoding/binary", + "binary.Size": "encoding/binary", + "binary.Uvarint": "encoding/binary", + "binary.Varint": "encoding/binary", + "binary.Write": "encoding/binary", + "bufio.ErrAdvanceTooFar": "bufio", + "bufio.ErrBufferFull": "bufio", + "bufio.ErrFinalToken": "bufio", + "bufio.ErrInvalidUnreadByte": "bufio", + "bufio.ErrInvalidUnreadRune": "bufio", + "bufio.ErrNegativeAdvance": "bufio", + "bufio.ErrNegativeCount": "bufio", + "bufio.ErrTooLong": "bufio", + "bufio.MaxScanTokenSize": "bufio", + "bufio.NewReadWriter": "bufio", + "bufio.NewReader": "bufio", + "bufio.NewReaderSize": "bufio", + "bufio.NewScanner": "bufio", + "bufio.NewWriter": "bufio", + "bufio.NewWriterSize": "bufio", + "bufio.ReadWriter": "bufio", + "bufio.Reader": "bufio", + "bufio.ScanBytes": "bufio", + "bufio.ScanLines": "bufio", + "bufio.ScanRunes": "bufio", + "bufio.ScanWords": "bufio", + "bufio.Scanner": "bufio", + "bufio.SplitFunc": "bufio", + "bufio.Writer": "bufio", + "build.AllowBinary": "go/build", + "build.ArchChar": "go/build", + "build.Context": "go/build", + "build.Default": "go/build", + "build.FindOnly": "go/build", + "build.IgnoreVendor": "go/build", + "build.Import": "go/build", + "build.ImportComment": "go/build", + "build.ImportDir": "go/build", + "build.ImportMode": "go/build", + "build.IsLocalImport": "go/build", + "build.MultiplePackageError": "go/build", + "build.NoGoError": "go/build", + "build.Package": "go/build", + "build.ToolDir": "go/build", + "bytes.Buffer": "bytes", + "bytes.Compare": "bytes", + "bytes.Contains": "bytes", + "bytes.ContainsAny": "bytes", + "bytes.ContainsRune": "bytes", + "bytes.Count": "bytes", + "bytes.Equal": "bytes", + "bytes.EqualFold": "bytes", + "bytes.ErrTooLarge": "bytes", + "bytes.Fields": "bytes", + "bytes.FieldsFunc": "bytes", + "bytes.HasPrefix": "bytes", + "bytes.HasSuffix": "bytes", + "bytes.Index": "bytes", + "bytes.IndexAny": "bytes", + "bytes.IndexByte": "bytes", + "bytes.IndexFunc": "bytes", + "bytes.IndexRune": "bytes", + "bytes.Join": "bytes", + "bytes.LastIndex": "bytes", + "bytes.LastIndexAny": "bytes", + "bytes.LastIndexByte": "bytes", + "bytes.LastIndexFunc": "bytes", + "bytes.Map": "bytes", + "bytes.MinRead": "bytes", + "bytes.NewBuffer": "bytes", + "bytes.NewBufferString": "bytes", + "bytes.NewReader": "bytes", + "bytes.Reader": "bytes", + "bytes.Repeat": "bytes", + "bytes.Replace": "bytes", + "bytes.Runes": "bytes", + "bytes.Split": "bytes", + "bytes.SplitAfter": "bytes", + "bytes.SplitAfterN": "bytes", + "bytes.SplitN": "bytes", + "bytes.Title": "bytes", + "bytes.ToLower": "bytes", + "bytes.ToLowerSpecial": "bytes", + "bytes.ToTitle": "bytes", + "bytes.ToTitleSpecial": "bytes", + "bytes.ToUpper": "bytes", + "bytes.ToUpperSpecial": "bytes", + "bytes.Trim": "bytes", + "bytes.TrimFunc": "bytes", + "bytes.TrimLeft": "bytes", + "bytes.TrimLeftFunc": "bytes", + "bytes.TrimPrefix": "bytes", + "bytes.TrimRight": "bytes", + "bytes.TrimRightFunc": "bytes", + "bytes.TrimSpace": "bytes", + "bytes.TrimSuffix": "bytes", + "bzip2.NewReader": "compress/bzip2", + "bzip2.StructuralError": "compress/bzip2", + "cgi.Handler": "net/http/cgi", + "cgi.Request": "net/http/cgi", + "cgi.RequestFromMap": "net/http/cgi", + "cgi.Serve": "net/http/cgi", + "cipher.AEAD": "crypto/cipher", + "cipher.Block": "crypto/cipher", + "cipher.BlockMode": "crypto/cipher", + "cipher.NewCBCDecrypter": "crypto/cipher", + "cipher.NewCBCEncrypter": "crypto/cipher", + "cipher.NewCFBDecrypter": "crypto/cipher", + "cipher.NewCFBEncrypter": "crypto/cipher", + "cipher.NewCTR": "crypto/cipher", + "cipher.NewGCM": "crypto/cipher", + "cipher.NewGCMWithNonceSize": "crypto/cipher", + "cipher.NewOFB": "crypto/cipher", + "cipher.Stream": "crypto/cipher", + "cipher.StreamReader": "crypto/cipher", + "cipher.StreamWriter": "crypto/cipher", + "cmplx.Abs": "math/cmplx", + "cmplx.Acos": "math/cmplx", + "cmplx.Acosh": "math/cmplx", + "cmplx.Asin": "math/cmplx", + "cmplx.Asinh": "math/cmplx", + "cmplx.Atan": "math/cmplx", + "cmplx.Atanh": "math/cmplx", + "cmplx.Conj": "math/cmplx", + "cmplx.Cos": "math/cmplx", + "cmplx.Cosh": "math/cmplx", + "cmplx.Cot": "math/cmplx", + "cmplx.Exp": "math/cmplx", + "cmplx.Inf": "math/cmplx", + "cmplx.IsInf": "math/cmplx", + "cmplx.IsNaN": "math/cmplx", + "cmplx.Log": "math/cmplx", + "cmplx.Log10": "math/cmplx", + "cmplx.NaN": "math/cmplx", + "cmplx.Phase": "math/cmplx", + "cmplx.Polar": "math/cmplx", + "cmplx.Pow": "math/cmplx", + "cmplx.Rect": "math/cmplx", + "cmplx.Sin": "math/cmplx", + "cmplx.Sinh": "math/cmplx", + "cmplx.Sqrt": "math/cmplx", + "cmplx.Tan": "math/cmplx", + "cmplx.Tanh": "math/cmplx", + "color.Alpha": "image/color", + "color.Alpha16": "image/color", + "color.Alpha16Model": "image/color", + "color.AlphaModel": "image/color", + "color.Black": "image/color", + "color.CMYK": "image/color", + "color.CMYKModel": "image/color", + "color.CMYKToRGB": "image/color", + "color.Color": "image/color", + "color.Gray": "image/color", + "color.Gray16": "image/color", + "color.Gray16Model": "image/color", + "color.GrayModel": "image/color", + "color.Model": "image/color", + "color.ModelFunc": "image/color", + "color.NRGBA": "image/color", + "color.NRGBA64": "image/color", + "color.NRGBA64Model": "image/color", + "color.NRGBAModel": "image/color", + "color.NYCbCrA": "image/color", + "color.NYCbCrAModel": "image/color", + "color.Opaque": "image/color", + "color.Palette": "image/color", + "color.RGBA": "image/color", + "color.RGBA64": "image/color", + "color.RGBA64Model": "image/color", + "color.RGBAModel": "image/color", + "color.RGBToCMYK": "image/color", + "color.RGBToYCbCr": "image/color", + "color.Transparent": "image/color", + "color.White": "image/color", + "color.YCbCr": "image/color", + "color.YCbCrModel": "image/color", + "color.YCbCrToRGB": "image/color", + "constant.BinaryOp": "go/constant", + "constant.BitLen": "go/constant", + "constant.Bool": "go/constant", + "constant.BoolVal": "go/constant", + "constant.Bytes": "go/constant", + "constant.Compare": "go/constant", + "constant.Complex": "go/constant", + "constant.Denom": "go/constant", + "constant.Float": "go/constant", + "constant.Float32Val": "go/constant", + "constant.Float64Val": "go/constant", + "constant.Imag": "go/constant", + "constant.Int": "go/constant", + "constant.Int64Val": "go/constant", + "constant.Kind": "go/constant", + "constant.MakeBool": "go/constant", + "constant.MakeFloat64": "go/constant", + "constant.MakeFromBytes": "go/constant", + "constant.MakeFromLiteral": "go/constant", + "constant.MakeImag": "go/constant", + "constant.MakeInt64": "go/constant", + "constant.MakeString": "go/constant", + "constant.MakeUint64": "go/constant", + "constant.MakeUnknown": "go/constant", + "constant.Num": "go/constant", + "constant.Real": "go/constant", + "constant.Shift": "go/constant", + "constant.Sign": "go/constant", + "constant.String": "go/constant", + "constant.StringVal": "go/constant", + "constant.ToComplex": "go/constant", + "constant.ToFloat": "go/constant", + "constant.ToInt": "go/constant", + "constant.Uint64Val": "go/constant", + "constant.UnaryOp": "go/constant", + "constant.Unknown": "go/constant", + "context.Background": "context", + "context.CancelFunc": "context", + "context.Canceled": "context", + "context.Context": "context", + "context.DeadlineExceeded": "context", + "context.TODO": "context", + "context.WithCancel": "context", + "context.WithDeadline": "context", + "context.WithTimeout": "context", + "context.WithValue": "context", + "cookiejar.Jar": "net/http/cookiejar", + "cookiejar.New": "net/http/cookiejar", + "cookiejar.Options": "net/http/cookiejar", + "cookiejar.PublicSuffixList": "net/http/cookiejar", + "crc32.Castagnoli": "hash/crc32", + "crc32.Checksum": "hash/crc32", + "crc32.ChecksumIEEE": "hash/crc32", + "crc32.IEEE": "hash/crc32", + "crc32.IEEETable": "hash/crc32", + "crc32.Koopman": "hash/crc32", + "crc32.MakeTable": "hash/crc32", + "crc32.New": "hash/crc32", + "crc32.NewIEEE": "hash/crc32", + "crc32.Size": "hash/crc32", + "crc32.Table": "hash/crc32", + "crc32.Update": "hash/crc32", + "crc64.Checksum": "hash/crc64", + "crc64.ECMA": "hash/crc64", + "crc64.ISO": "hash/crc64", + "crc64.MakeTable": "hash/crc64", + "crc64.New": "hash/crc64", + "crc64.Size": "hash/crc64", + "crc64.Table": "hash/crc64", + "crc64.Update": "hash/crc64", + "crypto.Decrypter": "crypto", + "crypto.DecrypterOpts": "crypto", + "crypto.Hash": "crypto", + "crypto.MD4": "crypto", + "crypto.MD5": "crypto", + "crypto.MD5SHA1": "crypto", + "crypto.PrivateKey": "crypto", + "crypto.PublicKey": "crypto", + "crypto.RIPEMD160": "crypto", + "crypto.RegisterHash": "crypto", + "crypto.SHA1": "crypto", + "crypto.SHA224": "crypto", + "crypto.SHA256": "crypto", + "crypto.SHA384": "crypto", + "crypto.SHA3_224": "crypto", + "crypto.SHA3_256": "crypto", + "crypto.SHA3_384": "crypto", + "crypto.SHA3_512": "crypto", + "crypto.SHA512": "crypto", + "crypto.SHA512_224": "crypto", + "crypto.SHA512_256": "crypto", + "crypto.Signer": "crypto", + "crypto.SignerOpts": "crypto", + "csv.ErrBareQuote": "encoding/csv", + "csv.ErrFieldCount": "encoding/csv", + "csv.ErrQuote": "encoding/csv", + "csv.ErrTrailingComma": "encoding/csv", + "csv.NewReader": "encoding/csv", + "csv.NewWriter": "encoding/csv", + "csv.ParseError": "encoding/csv", + "csv.Reader": "encoding/csv", + "csv.Writer": "encoding/csv", + "debug.FreeOSMemory": "runtime/debug", + "debug.GCStats": "runtime/debug", + "debug.PrintStack": "runtime/debug", + "debug.ReadGCStats": "runtime/debug", + "debug.SetGCPercent": "runtime/debug", + "debug.SetMaxStack": "runtime/debug", + "debug.SetMaxThreads": "runtime/debug", + "debug.SetPanicOnFault": "runtime/debug", + "debug.SetTraceback": "runtime/debug", + "debug.Stack": "runtime/debug", + "debug.WriteHeapDump": "runtime/debug", + "des.BlockSize": "crypto/des", + "des.KeySizeError": "crypto/des", + "des.NewCipher": "crypto/des", + "des.NewTripleDESCipher": "crypto/des", + "doc.AllDecls": "go/doc", + "doc.AllMethods": "go/doc", + "doc.Example": "go/doc", + "doc.Examples": "go/doc", + "doc.Filter": "go/doc", + "doc.Func": "go/doc", + "doc.IllegalPrefixes": "go/doc", + "doc.IsPredeclared": "go/doc", + "doc.Mode": "go/doc", + "doc.New": "go/doc", + "doc.Note": "go/doc", + "doc.Package": "go/doc", + "doc.Synopsis": "go/doc", + "doc.ToHTML": "go/doc", + "doc.ToText": "go/doc", + "doc.Type": "go/doc", + "doc.Value": "go/doc", + "draw.Draw": "image/draw", + "draw.DrawMask": "image/draw", + "draw.Drawer": "image/draw", + "draw.FloydSteinberg": "image/draw", + "draw.Image": "image/draw", + "draw.Op": "image/draw", + "draw.Over": "image/draw", + "draw.Quantizer": "image/draw", + "draw.Src": "image/draw", + "driver.Bool": "database/sql/driver", + "driver.ColumnConverter": "database/sql/driver", + "driver.Conn": "database/sql/driver", + "driver.ConnBeginTx": "database/sql/driver", + "driver.ConnPrepareContext": "database/sql/driver", + "driver.DefaultParameterConverter": "database/sql/driver", + "driver.Driver": "database/sql/driver", + "driver.ErrBadConn": "database/sql/driver", + "driver.ErrSkip": "database/sql/driver", + "driver.Execer": "database/sql/driver", + "driver.ExecerContext": "database/sql/driver", + "driver.Int32": "database/sql/driver", + "driver.IsScanValue": "database/sql/driver", + "driver.IsValue": "database/sql/driver", + "driver.IsolationLevel": "database/sql/driver", + "driver.NamedValue": "database/sql/driver", + "driver.NotNull": "database/sql/driver", + "driver.Null": "database/sql/driver", + "driver.Pinger": "database/sql/driver", + "driver.Queryer": "database/sql/driver", + "driver.QueryerContext": "database/sql/driver", + "driver.Result": "database/sql/driver", + "driver.ResultNoRows": "database/sql/driver", + "driver.Rows": "database/sql/driver", + "driver.RowsAffected": "database/sql/driver", + "driver.RowsColumnTypeDatabaseTypeName": "database/sql/driver", + "driver.RowsColumnTypeLength": "database/sql/driver", + "driver.RowsColumnTypeNullable": "database/sql/driver", + "driver.RowsColumnTypePrecisionScale": "database/sql/driver", + "driver.RowsColumnTypeScanType": "database/sql/driver", + "driver.RowsNextResultSet": "database/sql/driver", + "driver.Stmt": "database/sql/driver", + "driver.StmtExecContext": "database/sql/driver", + "driver.StmtQueryContext": "database/sql/driver", + "driver.String": "database/sql/driver", + "driver.Tx": "database/sql/driver", + "driver.TxOptions": "database/sql/driver", + "driver.Value": "database/sql/driver", + "driver.ValueConverter": "database/sql/driver", + "driver.Valuer": "database/sql/driver", + "dsa.ErrInvalidPublicKey": "crypto/dsa", + "dsa.GenerateKey": "crypto/dsa", + "dsa.GenerateParameters": "crypto/dsa", + "dsa.L1024N160": "crypto/dsa", + "dsa.L2048N224": "crypto/dsa", + "dsa.L2048N256": "crypto/dsa", + "dsa.L3072N256": "crypto/dsa", + "dsa.ParameterSizes": "crypto/dsa", + "dsa.Parameters": "crypto/dsa", + "dsa.PrivateKey": "crypto/dsa", + "dsa.PublicKey": "crypto/dsa", + "dsa.Sign": "crypto/dsa", + "dsa.Verify": "crypto/dsa", + "dwarf.AddrType": "debug/dwarf", + "dwarf.ArrayType": "debug/dwarf", + "dwarf.Attr": "debug/dwarf", + "dwarf.AttrAbstractOrigin": "debug/dwarf", + "dwarf.AttrAccessibility": "debug/dwarf", + "dwarf.AttrAddrClass": "debug/dwarf", + "dwarf.AttrAllocated": "debug/dwarf", + "dwarf.AttrArtificial": "debug/dwarf", + "dwarf.AttrAssociated": "debug/dwarf", + "dwarf.AttrBaseTypes": "debug/dwarf", + "dwarf.AttrBitOffset": "debug/dwarf", + "dwarf.AttrBitSize": "debug/dwarf", + "dwarf.AttrByteSize": "debug/dwarf", + "dwarf.AttrCallColumn": "debug/dwarf", + "dwarf.AttrCallFile": "debug/dwarf", + "dwarf.AttrCallLine": "debug/dwarf", + "dwarf.AttrCalling": "debug/dwarf", + "dwarf.AttrCommonRef": "debug/dwarf", + "dwarf.AttrCompDir": "debug/dwarf", + "dwarf.AttrConstValue": "debug/dwarf", + "dwarf.AttrContainingType": "debug/dwarf", + "dwarf.AttrCount": "debug/dwarf", + "dwarf.AttrDataLocation": "debug/dwarf", + "dwarf.AttrDataMemberLoc": "debug/dwarf", + "dwarf.AttrDeclColumn": "debug/dwarf", + "dwarf.AttrDeclFile": "debug/dwarf", + "dwarf.AttrDeclLine": "debug/dwarf", + "dwarf.AttrDeclaration": "debug/dwarf", + "dwarf.AttrDefaultValue": "debug/dwarf", + "dwarf.AttrDescription": "debug/dwarf", + "dwarf.AttrDiscr": "debug/dwarf", + "dwarf.AttrDiscrList": "debug/dwarf", + "dwarf.AttrDiscrValue": "debug/dwarf", + "dwarf.AttrEncoding": "debug/dwarf", + "dwarf.AttrEntrypc": "debug/dwarf", + "dwarf.AttrExtension": "debug/dwarf", + "dwarf.AttrExternal": "debug/dwarf", + "dwarf.AttrFrameBase": "debug/dwarf", + "dwarf.AttrFriend": "debug/dwarf", + "dwarf.AttrHighpc": "debug/dwarf", + "dwarf.AttrIdentifierCase": "debug/dwarf", + "dwarf.AttrImport": "debug/dwarf", + "dwarf.AttrInline": "debug/dwarf", + "dwarf.AttrIsOptional": "debug/dwarf", + "dwarf.AttrLanguage": "debug/dwarf", + "dwarf.AttrLocation": "debug/dwarf", + "dwarf.AttrLowerBound": "debug/dwarf", + "dwarf.AttrLowpc": "debug/dwarf", + "dwarf.AttrMacroInfo": "debug/dwarf", + "dwarf.AttrName": "debug/dwarf", + "dwarf.AttrNamelistItem": "debug/dwarf", + "dwarf.AttrOrdering": "debug/dwarf", + "dwarf.AttrPriority": "debug/dwarf", + "dwarf.AttrProducer": "debug/dwarf", + "dwarf.AttrPrototyped": "debug/dwarf", + "dwarf.AttrRanges": "debug/dwarf", + "dwarf.AttrReturnAddr": "debug/dwarf", + "dwarf.AttrSegment": "debug/dwarf", + "dwarf.AttrSibling": "debug/dwarf", + "dwarf.AttrSpecification": "debug/dwarf", + "dwarf.AttrStartScope": "debug/dwarf", + "dwarf.AttrStaticLink": "debug/dwarf", + "dwarf.AttrStmtList": "debug/dwarf", + "dwarf.AttrStride": "debug/dwarf", + "dwarf.AttrStrideSize": "debug/dwarf", + "dwarf.AttrStringLength": "debug/dwarf", + "dwarf.AttrTrampoline": "debug/dwarf", + "dwarf.AttrType": "debug/dwarf", + "dwarf.AttrUpperBound": "debug/dwarf", + "dwarf.AttrUseLocation": "debug/dwarf", + "dwarf.AttrUseUTF8": "debug/dwarf", + "dwarf.AttrVarParam": "debug/dwarf", + "dwarf.AttrVirtuality": "debug/dwarf", + "dwarf.AttrVisibility": "debug/dwarf", + "dwarf.AttrVtableElemLoc": "debug/dwarf", + "dwarf.BasicType": "debug/dwarf", + "dwarf.BoolType": "debug/dwarf", + "dwarf.CharType": "debug/dwarf", + "dwarf.Class": "debug/dwarf", + "dwarf.ClassAddress": "debug/dwarf", + "dwarf.ClassBlock": "debug/dwarf", + "dwarf.ClassConstant": "debug/dwarf", + "dwarf.ClassExprLoc": "debug/dwarf", + "dwarf.ClassFlag": "debug/dwarf", + "dwarf.ClassLinePtr": "debug/dwarf", + "dwarf.ClassLocListPtr": "debug/dwarf", + "dwarf.ClassMacPtr": "debug/dwarf", + "dwarf.ClassRangeListPtr": "debug/dwarf", + "dwarf.ClassReference": "debug/dwarf", + "dwarf.ClassReferenceAlt": "debug/dwarf", + "dwarf.ClassReferenceSig": "debug/dwarf", + "dwarf.ClassString": "debug/dwarf", + "dwarf.ClassStringAlt": "debug/dwarf", + "dwarf.ClassUnknown": "debug/dwarf", + "dwarf.CommonType": "debug/dwarf", + "dwarf.ComplexType": "debug/dwarf", + "dwarf.Data": "debug/dwarf", + "dwarf.DecodeError": "debug/dwarf", + "dwarf.DotDotDotType": "debug/dwarf", + "dwarf.Entry": "debug/dwarf", + "dwarf.EnumType": "debug/dwarf", + "dwarf.EnumValue": "debug/dwarf", + "dwarf.ErrUnknownPC": "debug/dwarf", + "dwarf.Field": "debug/dwarf", + "dwarf.FloatType": "debug/dwarf", + "dwarf.FuncType": "debug/dwarf", + "dwarf.IntType": "debug/dwarf", + "dwarf.LineEntry": "debug/dwarf", + "dwarf.LineFile": "debug/dwarf", + "dwarf.LineReader": "debug/dwarf", + "dwarf.LineReaderPos": "debug/dwarf", + "dwarf.New": "debug/dwarf", + "dwarf.Offset": "debug/dwarf", + "dwarf.PtrType": "debug/dwarf", + "dwarf.QualType": "debug/dwarf", + "dwarf.Reader": "debug/dwarf", + "dwarf.StructField": "debug/dwarf", + "dwarf.StructType": "debug/dwarf", + "dwarf.Tag": "debug/dwarf", + "dwarf.TagAccessDeclaration": "debug/dwarf", + "dwarf.TagArrayType": "debug/dwarf", + "dwarf.TagBaseType": "debug/dwarf", + "dwarf.TagCatchDwarfBlock": "debug/dwarf", + "dwarf.TagClassType": "debug/dwarf", + "dwarf.TagCommonDwarfBlock": "debug/dwarf", + "dwarf.TagCommonInclusion": "debug/dwarf", + "dwarf.TagCompileUnit": "debug/dwarf", + "dwarf.TagCondition": "debug/dwarf", + "dwarf.TagConstType": "debug/dwarf", + "dwarf.TagConstant": "debug/dwarf", + "dwarf.TagDwarfProcedure": "debug/dwarf", + "dwarf.TagEntryPoint": "debug/dwarf", + "dwarf.TagEnumerationType": "debug/dwarf", + "dwarf.TagEnumerator": "debug/dwarf", + "dwarf.TagFileType": "debug/dwarf", + "dwarf.TagFormalParameter": "debug/dwarf", + "dwarf.TagFriend": "debug/dwarf", + "dwarf.TagImportedDeclaration": "debug/dwarf", + "dwarf.TagImportedModule": "debug/dwarf", + "dwarf.TagImportedUnit": "debug/dwarf", + "dwarf.TagInheritance": "debug/dwarf", + "dwarf.TagInlinedSubroutine": "debug/dwarf", + "dwarf.TagInterfaceType": "debug/dwarf", + "dwarf.TagLabel": "debug/dwarf", + "dwarf.TagLexDwarfBlock": "debug/dwarf", + "dwarf.TagMember": "debug/dwarf", + "dwarf.TagModule": "debug/dwarf", + "dwarf.TagMutableType": "debug/dwarf", + "dwarf.TagNamelist": "debug/dwarf", + "dwarf.TagNamelistItem": "debug/dwarf", + "dwarf.TagNamespace": "debug/dwarf", + "dwarf.TagPackedType": "debug/dwarf", + "dwarf.TagPartialUnit": "debug/dwarf", + "dwarf.TagPointerType": "debug/dwarf", + "dwarf.TagPtrToMemberType": "debug/dwarf", + "dwarf.TagReferenceType": "debug/dwarf", + "dwarf.TagRestrictType": "debug/dwarf", + "dwarf.TagRvalueReferenceType": "debug/dwarf", + "dwarf.TagSetType": "debug/dwarf", + "dwarf.TagSharedType": "debug/dwarf", + "dwarf.TagStringType": "debug/dwarf", + "dwarf.TagStructType": "debug/dwarf", + "dwarf.TagSubprogram": "debug/dwarf", + "dwarf.TagSubrangeType": "debug/dwarf", + "dwarf.TagSubroutineType": "debug/dwarf", + "dwarf.TagTemplateAlias": "debug/dwarf", + "dwarf.TagTemplateTypeParameter": "debug/dwarf", + "dwarf.TagTemplateValueParameter": "debug/dwarf", + "dwarf.TagThrownType": "debug/dwarf", + "dwarf.TagTryDwarfBlock": "debug/dwarf", + "dwarf.TagTypeUnit": "debug/dwarf", + "dwarf.TagTypedef": "debug/dwarf", + "dwarf.TagUnionType": "debug/dwarf", + "dwarf.TagUnspecifiedParameters": "debug/dwarf", + "dwarf.TagUnspecifiedType": "debug/dwarf", + "dwarf.TagVariable": "debug/dwarf", + "dwarf.TagVariant": "debug/dwarf", + "dwarf.TagVariantPart": "debug/dwarf", + "dwarf.TagVolatileType": "debug/dwarf", + "dwarf.TagWithStmt": "debug/dwarf", + "dwarf.Type": "debug/dwarf", + "dwarf.TypedefType": "debug/dwarf", + "dwarf.UcharType": "debug/dwarf", + "dwarf.UintType": "debug/dwarf", + "dwarf.UnspecifiedType": "debug/dwarf", + "dwarf.VoidType": "debug/dwarf", + "ecdsa.GenerateKey": "crypto/ecdsa", + "ecdsa.PrivateKey": "crypto/ecdsa", + "ecdsa.PublicKey": "crypto/ecdsa", + "ecdsa.Sign": "crypto/ecdsa", + "ecdsa.Verify": "crypto/ecdsa", + "elf.ARM_MAGIC_TRAMP_NUMBER": "debug/elf", + "elf.COMPRESS_HIOS": "debug/elf", + "elf.COMPRESS_HIPROC": "debug/elf", + "elf.COMPRESS_LOOS": "debug/elf", + "elf.COMPRESS_LOPROC": "debug/elf", + "elf.COMPRESS_ZLIB": "debug/elf", + "elf.Chdr32": "debug/elf", + "elf.Chdr64": "debug/elf", + "elf.Class": "debug/elf", + "elf.CompressionType": "debug/elf", + "elf.DF_BIND_NOW": "debug/elf", + "elf.DF_ORIGIN": "debug/elf", + "elf.DF_STATIC_TLS": "debug/elf", + "elf.DF_SYMBOLIC": "debug/elf", + "elf.DF_TEXTREL": "debug/elf", + "elf.DT_BIND_NOW": "debug/elf", + "elf.DT_DEBUG": "debug/elf", + "elf.DT_ENCODING": "debug/elf", + "elf.DT_FINI": "debug/elf", + "elf.DT_FINI_ARRAY": "debug/elf", + "elf.DT_FINI_ARRAYSZ": "debug/elf", + "elf.DT_FLAGS": "debug/elf", + "elf.DT_HASH": "debug/elf", + "elf.DT_HIOS": "debug/elf", + "elf.DT_HIPROC": "debug/elf", + "elf.DT_INIT": "debug/elf", + "elf.DT_INIT_ARRAY": "debug/elf", + "elf.DT_INIT_ARRAYSZ": "debug/elf", + "elf.DT_JMPREL": "debug/elf", + "elf.DT_LOOS": "debug/elf", + "elf.DT_LOPROC": "debug/elf", + "elf.DT_NEEDED": "debug/elf", + "elf.DT_NULL": "debug/elf", + "elf.DT_PLTGOT": "debug/elf", + "elf.DT_PLTREL": "debug/elf", + "elf.DT_PLTRELSZ": "debug/elf", + "elf.DT_PREINIT_ARRAY": "debug/elf", + "elf.DT_PREINIT_ARRAYSZ": "debug/elf", + "elf.DT_REL": "debug/elf", + "elf.DT_RELA": "debug/elf", + "elf.DT_RELAENT": "debug/elf", + "elf.DT_RELASZ": "debug/elf", + "elf.DT_RELENT": "debug/elf", + "elf.DT_RELSZ": "debug/elf", + "elf.DT_RPATH": "debug/elf", + "elf.DT_RUNPATH": "debug/elf", + "elf.DT_SONAME": "debug/elf", + "elf.DT_STRSZ": "debug/elf", + "elf.DT_STRTAB": "debug/elf", + "elf.DT_SYMBOLIC": "debug/elf", + "elf.DT_SYMENT": "debug/elf", + "elf.DT_SYMTAB": "debug/elf", + "elf.DT_TEXTREL": "debug/elf", + "elf.DT_VERNEED": "debug/elf", + "elf.DT_VERNEEDNUM": "debug/elf", + "elf.DT_VERSYM": "debug/elf", + "elf.Data": "debug/elf", + "elf.Dyn32": "debug/elf", + "elf.Dyn64": "debug/elf", + "elf.DynFlag": "debug/elf", + "elf.DynTag": "debug/elf", + "elf.EI_ABIVERSION": "debug/elf", + "elf.EI_CLASS": "debug/elf", + "elf.EI_DATA": "debug/elf", + "elf.EI_NIDENT": "debug/elf", + "elf.EI_OSABI": "debug/elf", + "elf.EI_PAD": "debug/elf", + "elf.EI_VERSION": "debug/elf", + "elf.ELFCLASS32": "debug/elf", + "elf.ELFCLASS64": "debug/elf", + "elf.ELFCLASSNONE": "debug/elf", + "elf.ELFDATA2LSB": "debug/elf", + "elf.ELFDATA2MSB": "debug/elf", + "elf.ELFDATANONE": "debug/elf", + "elf.ELFMAG": "debug/elf", + "elf.ELFOSABI_86OPEN": "debug/elf", + "elf.ELFOSABI_AIX": "debug/elf", + "elf.ELFOSABI_ARM": "debug/elf", + "elf.ELFOSABI_FREEBSD": "debug/elf", + "elf.ELFOSABI_HPUX": "debug/elf", + "elf.ELFOSABI_HURD": "debug/elf", + "elf.ELFOSABI_IRIX": "debug/elf", + "elf.ELFOSABI_LINUX": "debug/elf", + "elf.ELFOSABI_MODESTO": "debug/elf", + "elf.ELFOSABI_NETBSD": "debug/elf", + "elf.ELFOSABI_NONE": "debug/elf", + "elf.ELFOSABI_NSK": "debug/elf", + "elf.ELFOSABI_OPENBSD": "debug/elf", + "elf.ELFOSABI_OPENVMS": "debug/elf", + "elf.ELFOSABI_SOLARIS": "debug/elf", + "elf.ELFOSABI_STANDALONE": "debug/elf", + "elf.ELFOSABI_TRU64": "debug/elf", + "elf.EM_386": "debug/elf", + "elf.EM_486": "debug/elf", + "elf.EM_68HC12": "debug/elf", + "elf.EM_68K": "debug/elf", + "elf.EM_860": "debug/elf", + "elf.EM_88K": "debug/elf", + "elf.EM_960": "debug/elf", + "elf.EM_AARCH64": "debug/elf", + "elf.EM_ALPHA": "debug/elf", + "elf.EM_ALPHA_STD": "debug/elf", + "elf.EM_ARC": "debug/elf", + "elf.EM_ARM": "debug/elf", + "elf.EM_COLDFIRE": "debug/elf", + "elf.EM_FR20": "debug/elf", + "elf.EM_H8S": "debug/elf", + "elf.EM_H8_300": "debug/elf", + "elf.EM_H8_300H": "debug/elf", + "elf.EM_H8_500": "debug/elf", + "elf.EM_IA_64": "debug/elf", + "elf.EM_M32": "debug/elf", + "elf.EM_ME16": "debug/elf", + "elf.EM_MIPS": "debug/elf", + "elf.EM_MIPS_RS3_LE": "debug/elf", + "elf.EM_MIPS_RS4_BE": "debug/elf", + "elf.EM_MIPS_X": "debug/elf", + "elf.EM_MMA": "debug/elf", + "elf.EM_NCPU": "debug/elf", + "elf.EM_NDR1": "debug/elf", + "elf.EM_NONE": "debug/elf", + "elf.EM_PARISC": "debug/elf", + "elf.EM_PCP": "debug/elf", + "elf.EM_PPC": "debug/elf", + "elf.EM_PPC64": "debug/elf", + "elf.EM_RCE": "debug/elf", + "elf.EM_RH32": "debug/elf", + "elf.EM_S370": "debug/elf", + "elf.EM_S390": "debug/elf", + "elf.EM_SH": "debug/elf", + "elf.EM_SPARC": "debug/elf", + "elf.EM_SPARC32PLUS": "debug/elf", + "elf.EM_SPARCV9": "debug/elf", + "elf.EM_ST100": "debug/elf", + "elf.EM_STARCORE": "debug/elf", + "elf.EM_TINYJ": "debug/elf", + "elf.EM_TRICORE": "debug/elf", + "elf.EM_V800": "debug/elf", + "elf.EM_VPP500": "debug/elf", + "elf.EM_X86_64": "debug/elf", + "elf.ET_CORE": "debug/elf", + "elf.ET_DYN": "debug/elf", + "elf.ET_EXEC": "debug/elf", + "elf.ET_HIOS": "debug/elf", + "elf.ET_HIPROC": "debug/elf", + "elf.ET_LOOS": "debug/elf", + "elf.ET_LOPROC": "debug/elf", + "elf.ET_NONE": "debug/elf", + "elf.ET_REL": "debug/elf", + "elf.EV_CURRENT": "debug/elf", + "elf.EV_NONE": "debug/elf", + "elf.ErrNoSymbols": "debug/elf", + "elf.File": "debug/elf", + "elf.FileHeader": "debug/elf", + "elf.FormatError": "debug/elf", + "elf.Header32": "debug/elf", + "elf.Header64": "debug/elf", + "elf.ImportedSymbol": "debug/elf", + "elf.Machine": "debug/elf", + "elf.NT_FPREGSET": "debug/elf", + "elf.NT_PRPSINFO": "debug/elf", + "elf.NT_PRSTATUS": "debug/elf", + "elf.NType": "debug/elf", + "elf.NewFile": "debug/elf", + "elf.OSABI": "debug/elf", + "elf.Open": "debug/elf", + "elf.PF_MASKOS": "debug/elf", + "elf.PF_MASKPROC": "debug/elf", + "elf.PF_R": "debug/elf", + "elf.PF_W": "debug/elf", + "elf.PF_X": "debug/elf", + "elf.PT_DYNAMIC": "debug/elf", + "elf.PT_HIOS": "debug/elf", + "elf.PT_HIPROC": "debug/elf", + "elf.PT_INTERP": "debug/elf", + "elf.PT_LOAD": "debug/elf", + "elf.PT_LOOS": "debug/elf", + "elf.PT_LOPROC": "debug/elf", + "elf.PT_NOTE": "debug/elf", + "elf.PT_NULL": "debug/elf", + "elf.PT_PHDR": "debug/elf", + "elf.PT_SHLIB": "debug/elf", + "elf.PT_TLS": "debug/elf", + "elf.Prog": "debug/elf", + "elf.Prog32": "debug/elf", + "elf.Prog64": "debug/elf", + "elf.ProgFlag": "debug/elf", + "elf.ProgHeader": "debug/elf", + "elf.ProgType": "debug/elf", + "elf.R_386": "debug/elf", + "elf.R_386_32": "debug/elf", + "elf.R_386_COPY": "debug/elf", + "elf.R_386_GLOB_DAT": "debug/elf", + "elf.R_386_GOT32": "debug/elf", + "elf.R_386_GOTOFF": "debug/elf", + "elf.R_386_GOTPC": "debug/elf", + "elf.R_386_JMP_SLOT": "debug/elf", + "elf.R_386_NONE": "debug/elf", + "elf.R_386_PC32": "debug/elf", + "elf.R_386_PLT32": "debug/elf", + "elf.R_386_RELATIVE": "debug/elf", + "elf.R_386_TLS_DTPMOD32": "debug/elf", + "elf.R_386_TLS_DTPOFF32": "debug/elf", + "elf.R_386_TLS_GD": "debug/elf", + "elf.R_386_TLS_GD_32": "debug/elf", + "elf.R_386_TLS_GD_CALL": "debug/elf", + "elf.R_386_TLS_GD_POP": "debug/elf", + "elf.R_386_TLS_GD_PUSH": "debug/elf", + "elf.R_386_TLS_GOTIE": "debug/elf", + "elf.R_386_TLS_IE": "debug/elf", + "elf.R_386_TLS_IE_32": "debug/elf", + "elf.R_386_TLS_LDM": "debug/elf", + "elf.R_386_TLS_LDM_32": "debug/elf", + "elf.R_386_TLS_LDM_CALL": "debug/elf", + "elf.R_386_TLS_LDM_POP": "debug/elf", + "elf.R_386_TLS_LDM_PUSH": "debug/elf", + "elf.R_386_TLS_LDO_32": "debug/elf", + "elf.R_386_TLS_LE": "debug/elf", + "elf.R_386_TLS_LE_32": "debug/elf", + "elf.R_386_TLS_TPOFF": "debug/elf", + "elf.R_386_TLS_TPOFF32": "debug/elf", + "elf.R_390": "debug/elf", + "elf.R_390_12": "debug/elf", + "elf.R_390_16": "debug/elf", + "elf.R_390_20": "debug/elf", + "elf.R_390_32": "debug/elf", + "elf.R_390_64": "debug/elf", + "elf.R_390_8": "debug/elf", + "elf.R_390_COPY": "debug/elf", + "elf.R_390_GLOB_DAT": "debug/elf", + "elf.R_390_GOT12": "debug/elf", + "elf.R_390_GOT16": "debug/elf", + "elf.R_390_GOT20": "debug/elf", + "elf.R_390_GOT32": "debug/elf", + "elf.R_390_GOT64": "debug/elf", + "elf.R_390_GOTENT": "debug/elf", + "elf.R_390_GOTOFF": "debug/elf", + "elf.R_390_GOTOFF16": "debug/elf", + "elf.R_390_GOTOFF64": "debug/elf", + "elf.R_390_GOTPC": "debug/elf", + "elf.R_390_GOTPCDBL": "debug/elf", + "elf.R_390_GOTPLT12": "debug/elf", + "elf.R_390_GOTPLT16": "debug/elf", + "elf.R_390_GOTPLT20": "debug/elf", + "elf.R_390_GOTPLT32": "debug/elf", + "elf.R_390_GOTPLT64": "debug/elf", + "elf.R_390_GOTPLTENT": "debug/elf", + "elf.R_390_GOTPLTOFF16": "debug/elf", + "elf.R_390_GOTPLTOFF32": "debug/elf", + "elf.R_390_GOTPLTOFF64": "debug/elf", + "elf.R_390_JMP_SLOT": "debug/elf", + "elf.R_390_NONE": "debug/elf", + "elf.R_390_PC16": "debug/elf", + "elf.R_390_PC16DBL": "debug/elf", + "elf.R_390_PC32": "debug/elf", + "elf.R_390_PC32DBL": "debug/elf", + "elf.R_390_PC64": "debug/elf", + "elf.R_390_PLT16DBL": "debug/elf", + "elf.R_390_PLT32": "debug/elf", + "elf.R_390_PLT32DBL": "debug/elf", + "elf.R_390_PLT64": "debug/elf", + "elf.R_390_RELATIVE": "debug/elf", + "elf.R_390_TLS_DTPMOD": "debug/elf", + "elf.R_390_TLS_DTPOFF": "debug/elf", + "elf.R_390_TLS_GD32": "debug/elf", + "elf.R_390_TLS_GD64": "debug/elf", + "elf.R_390_TLS_GDCALL": "debug/elf", + "elf.R_390_TLS_GOTIE12": "debug/elf", + "elf.R_390_TLS_GOTIE20": "debug/elf", + "elf.R_390_TLS_GOTIE32": "debug/elf", + "elf.R_390_TLS_GOTIE64": "debug/elf", + "elf.R_390_TLS_IE32": "debug/elf", + "elf.R_390_TLS_IE64": "debug/elf", + "elf.R_390_TLS_IEENT": "debug/elf", + "elf.R_390_TLS_LDCALL": "debug/elf", + "elf.R_390_TLS_LDM32": "debug/elf", + "elf.R_390_TLS_LDM64": "debug/elf", + "elf.R_390_TLS_LDO32": "debug/elf", + "elf.R_390_TLS_LDO64": "debug/elf", + "elf.R_390_TLS_LE32": "debug/elf", + "elf.R_390_TLS_LE64": "debug/elf", + "elf.R_390_TLS_LOAD": "debug/elf", + "elf.R_390_TLS_TPOFF": "debug/elf", + "elf.R_AARCH64": "debug/elf", + "elf.R_AARCH64_ABS16": "debug/elf", + "elf.R_AARCH64_ABS32": "debug/elf", + "elf.R_AARCH64_ABS64": "debug/elf", + "elf.R_AARCH64_ADD_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_ADR_GOT_PAGE": "debug/elf", + "elf.R_AARCH64_ADR_PREL_LO21": "debug/elf", + "elf.R_AARCH64_ADR_PREL_PG_HI21": "debug/elf", + "elf.R_AARCH64_ADR_PREL_PG_HI21_NC": "debug/elf", + "elf.R_AARCH64_CALL26": "debug/elf", + "elf.R_AARCH64_CONDBR19": "debug/elf", + "elf.R_AARCH64_COPY": "debug/elf", + "elf.R_AARCH64_GLOB_DAT": "debug/elf", + "elf.R_AARCH64_GOT_LD_PREL19": "debug/elf", + "elf.R_AARCH64_IRELATIVE": "debug/elf", + "elf.R_AARCH64_JUMP26": "debug/elf", + "elf.R_AARCH64_JUMP_SLOT": "debug/elf", + "elf.R_AARCH64_LD64_GOT_LO12_NC": "debug/elf", + "elf.R_AARCH64_LDST128_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_LDST16_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_LDST32_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_LDST64_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_LDST8_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_LD_PREL_LO19": "debug/elf", + "elf.R_AARCH64_MOVW_SABS_G0": "debug/elf", + "elf.R_AARCH64_MOVW_SABS_G1": "debug/elf", + "elf.R_AARCH64_MOVW_SABS_G2": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G0": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G0_NC": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G1": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G1_NC": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G2": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G2_NC": "debug/elf", + "elf.R_AARCH64_MOVW_UABS_G3": "debug/elf", + "elf.R_AARCH64_NONE": "debug/elf", + "elf.R_AARCH64_NULL": "debug/elf", + "elf.R_AARCH64_P32_ABS16": "debug/elf", + "elf.R_AARCH64_P32_ABS32": "debug/elf", + "elf.R_AARCH64_P32_ADD_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_ADR_GOT_PAGE": "debug/elf", + "elf.R_AARCH64_P32_ADR_PREL_LO21": "debug/elf", + "elf.R_AARCH64_P32_ADR_PREL_PG_HI21": "debug/elf", + "elf.R_AARCH64_P32_CALL26": "debug/elf", + "elf.R_AARCH64_P32_CONDBR19": "debug/elf", + "elf.R_AARCH64_P32_COPY": "debug/elf", + "elf.R_AARCH64_P32_GLOB_DAT": "debug/elf", + "elf.R_AARCH64_P32_GOT_LD_PREL19": "debug/elf", + "elf.R_AARCH64_P32_IRELATIVE": "debug/elf", + "elf.R_AARCH64_P32_JUMP26": "debug/elf", + "elf.R_AARCH64_P32_JUMP_SLOT": "debug/elf", + "elf.R_AARCH64_P32_LD32_GOT_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LDST128_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LDST16_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LDST32_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LDST64_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LDST8_ABS_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_LD_PREL_LO19": "debug/elf", + "elf.R_AARCH64_P32_MOVW_SABS_G0": "debug/elf", + "elf.R_AARCH64_P32_MOVW_UABS_G0": "debug/elf", + "elf.R_AARCH64_P32_MOVW_UABS_G0_NC": "debug/elf", + "elf.R_AARCH64_P32_MOVW_UABS_G1": "debug/elf", + "elf.R_AARCH64_P32_PREL16": "debug/elf", + "elf.R_AARCH64_P32_PREL32": "debug/elf", + "elf.R_AARCH64_P32_RELATIVE": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_ADD_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_ADR_PAGE21": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_ADR_PREL21": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_CALL": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_LD32_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSDESC_LD_PREL19": "debug/elf", + "elf.R_AARCH64_P32_TLSGD_ADD_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSGD_ADR_PAGE21": "debug/elf", + "elf.R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf", + "elf.R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": "debug/elf", + "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": "debug/elf", + "elf.R_AARCH64_P32_TLS_DTPMOD": "debug/elf", + "elf.R_AARCH64_P32_TLS_DTPREL": "debug/elf", + "elf.R_AARCH64_P32_TLS_TPREL": "debug/elf", + "elf.R_AARCH64_P32_TSTBR14": "debug/elf", + "elf.R_AARCH64_PREL16": "debug/elf", + "elf.R_AARCH64_PREL32": "debug/elf", + "elf.R_AARCH64_PREL64": "debug/elf", + "elf.R_AARCH64_RELATIVE": "debug/elf", + "elf.R_AARCH64_TLSDESC": "debug/elf", + "elf.R_AARCH64_TLSDESC_ADD": "debug/elf", + "elf.R_AARCH64_TLSDESC_ADD_LO12_NC": "debug/elf", + "elf.R_AARCH64_TLSDESC_ADR_PAGE21": "debug/elf", + "elf.R_AARCH64_TLSDESC_ADR_PREL21": "debug/elf", + "elf.R_AARCH64_TLSDESC_CALL": "debug/elf", + "elf.R_AARCH64_TLSDESC_LD64_LO12_NC": "debug/elf", + "elf.R_AARCH64_TLSDESC_LDR": "debug/elf", + "elf.R_AARCH64_TLSDESC_LD_PREL19": "debug/elf", + "elf.R_AARCH64_TLSDESC_OFF_G0_NC": "debug/elf", + "elf.R_AARCH64_TLSDESC_OFF_G1": "debug/elf", + "elf.R_AARCH64_TLSGD_ADD_LO12_NC": "debug/elf", + "elf.R_AARCH64_TLSGD_ADR_PAGE21": "debug/elf", + "elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf", + "elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": "debug/elf", + "elf.R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": "debug/elf", + "elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": "debug/elf", + "elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": "debug/elf", + "elf.R_AARCH64_TLSLE_ADD_TPREL_HI12": "debug/elf", + "elf.R_AARCH64_TLSLE_ADD_TPREL_LO12": "debug/elf", + "elf.R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": "debug/elf", + "elf.R_AARCH64_TLSLE_MOVW_TPREL_G0": "debug/elf", + "elf.R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": "debug/elf", + "elf.R_AARCH64_TLSLE_MOVW_TPREL_G1": "debug/elf", + "elf.R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": "debug/elf", + "elf.R_AARCH64_TLSLE_MOVW_TPREL_G2": "debug/elf", + "elf.R_AARCH64_TLS_DTPMOD64": "debug/elf", + "elf.R_AARCH64_TLS_DTPREL64": "debug/elf", + "elf.R_AARCH64_TLS_TPREL64": "debug/elf", + "elf.R_AARCH64_TSTBR14": "debug/elf", + "elf.R_ALPHA": "debug/elf", + "elf.R_ALPHA_BRADDR": "debug/elf", + "elf.R_ALPHA_COPY": "debug/elf", + "elf.R_ALPHA_GLOB_DAT": "debug/elf", + "elf.R_ALPHA_GPDISP": "debug/elf", + "elf.R_ALPHA_GPREL32": "debug/elf", + "elf.R_ALPHA_GPRELHIGH": "debug/elf", + "elf.R_ALPHA_GPRELLOW": "debug/elf", + "elf.R_ALPHA_GPVALUE": "debug/elf", + "elf.R_ALPHA_HINT": "debug/elf", + "elf.R_ALPHA_IMMED_BR_HI32": "debug/elf", + "elf.R_ALPHA_IMMED_GP_16": "debug/elf", + "elf.R_ALPHA_IMMED_GP_HI32": "debug/elf", + "elf.R_ALPHA_IMMED_LO32": "debug/elf", + "elf.R_ALPHA_IMMED_SCN_HI32": "debug/elf", + "elf.R_ALPHA_JMP_SLOT": "debug/elf", + "elf.R_ALPHA_LITERAL": "debug/elf", + "elf.R_ALPHA_LITUSE": "debug/elf", + "elf.R_ALPHA_NONE": "debug/elf", + "elf.R_ALPHA_OP_PRSHIFT": "debug/elf", + "elf.R_ALPHA_OP_PSUB": "debug/elf", + "elf.R_ALPHA_OP_PUSH": "debug/elf", + "elf.R_ALPHA_OP_STORE": "debug/elf", + "elf.R_ALPHA_REFLONG": "debug/elf", + "elf.R_ALPHA_REFQUAD": "debug/elf", + "elf.R_ALPHA_RELATIVE": "debug/elf", + "elf.R_ALPHA_SREL16": "debug/elf", + "elf.R_ALPHA_SREL32": "debug/elf", + "elf.R_ALPHA_SREL64": "debug/elf", + "elf.R_ARM": "debug/elf", + "elf.R_ARM_ABS12": "debug/elf", + "elf.R_ARM_ABS16": "debug/elf", + "elf.R_ARM_ABS32": "debug/elf", + "elf.R_ARM_ABS8": "debug/elf", + "elf.R_ARM_AMP_VCALL9": "debug/elf", + "elf.R_ARM_COPY": "debug/elf", + "elf.R_ARM_GLOB_DAT": "debug/elf", + "elf.R_ARM_GNU_VTENTRY": "debug/elf", + "elf.R_ARM_GNU_VTINHERIT": "debug/elf", + "elf.R_ARM_GOT32": "debug/elf", + "elf.R_ARM_GOTOFF": "debug/elf", + "elf.R_ARM_GOTPC": "debug/elf", + "elf.R_ARM_JUMP_SLOT": "debug/elf", + "elf.R_ARM_NONE": "debug/elf", + "elf.R_ARM_PC13": "debug/elf", + "elf.R_ARM_PC24": "debug/elf", + "elf.R_ARM_PLT32": "debug/elf", + "elf.R_ARM_RABS32": "debug/elf", + "elf.R_ARM_RBASE": "debug/elf", + "elf.R_ARM_REL32": "debug/elf", + "elf.R_ARM_RELATIVE": "debug/elf", + "elf.R_ARM_RPC24": "debug/elf", + "elf.R_ARM_RREL32": "debug/elf", + "elf.R_ARM_RSBREL32": "debug/elf", + "elf.R_ARM_SBREL32": "debug/elf", + "elf.R_ARM_SWI24": "debug/elf", + "elf.R_ARM_THM_ABS5": "debug/elf", + "elf.R_ARM_THM_PC22": "debug/elf", + "elf.R_ARM_THM_PC8": "debug/elf", + "elf.R_ARM_THM_RPC22": "debug/elf", + "elf.R_ARM_THM_SWI8": "debug/elf", + "elf.R_ARM_THM_XPC22": "debug/elf", + "elf.R_ARM_XPC25": "debug/elf", + "elf.R_INFO": "debug/elf", + "elf.R_INFO32": "debug/elf", + "elf.R_MIPS": "debug/elf", + "elf.R_MIPS_16": "debug/elf", + "elf.R_MIPS_26": "debug/elf", + "elf.R_MIPS_32": "debug/elf", + "elf.R_MIPS_64": "debug/elf", + "elf.R_MIPS_ADD_IMMEDIATE": "debug/elf", + "elf.R_MIPS_CALL16": "debug/elf", + "elf.R_MIPS_CALL_HI16": "debug/elf", + "elf.R_MIPS_CALL_LO16": "debug/elf", + "elf.R_MIPS_DELETE": "debug/elf", + "elf.R_MIPS_GOT16": "debug/elf", + "elf.R_MIPS_GOT_DISP": "debug/elf", + "elf.R_MIPS_GOT_HI16": "debug/elf", + "elf.R_MIPS_GOT_LO16": "debug/elf", + "elf.R_MIPS_GOT_OFST": "debug/elf", + "elf.R_MIPS_GOT_PAGE": "debug/elf", + "elf.R_MIPS_GPREL16": "debug/elf", + "elf.R_MIPS_GPREL32": "debug/elf", + "elf.R_MIPS_HI16": "debug/elf", + "elf.R_MIPS_HIGHER": "debug/elf", + "elf.R_MIPS_HIGHEST": "debug/elf", + "elf.R_MIPS_INSERT_A": "debug/elf", + "elf.R_MIPS_INSERT_B": "debug/elf", + "elf.R_MIPS_JALR": "debug/elf", + "elf.R_MIPS_LITERAL": "debug/elf", + "elf.R_MIPS_LO16": "debug/elf", + "elf.R_MIPS_NONE": "debug/elf", + "elf.R_MIPS_PC16": "debug/elf", + "elf.R_MIPS_PJUMP": "debug/elf", + "elf.R_MIPS_REL16": "debug/elf", + "elf.R_MIPS_REL32": "debug/elf", + "elf.R_MIPS_RELGOT": "debug/elf", + "elf.R_MIPS_SCN_DISP": "debug/elf", + "elf.R_MIPS_SHIFT5": "debug/elf", + "elf.R_MIPS_SHIFT6": "debug/elf", + "elf.R_MIPS_SUB": "debug/elf", + "elf.R_MIPS_TLS_DTPMOD32": "debug/elf", + "elf.R_MIPS_TLS_DTPMOD64": "debug/elf", + "elf.R_MIPS_TLS_DTPREL32": "debug/elf", + "elf.R_MIPS_TLS_DTPREL64": "debug/elf", + "elf.R_MIPS_TLS_DTPREL_HI16": "debug/elf", + "elf.R_MIPS_TLS_DTPREL_LO16": "debug/elf", + "elf.R_MIPS_TLS_GD": "debug/elf", + "elf.R_MIPS_TLS_GOTTPREL": "debug/elf", + "elf.R_MIPS_TLS_LDM": "debug/elf", + "elf.R_MIPS_TLS_TPREL32": "debug/elf", + "elf.R_MIPS_TLS_TPREL64": "debug/elf", + "elf.R_MIPS_TLS_TPREL_HI16": "debug/elf", + "elf.R_MIPS_TLS_TPREL_LO16": "debug/elf", + "elf.R_PPC": "debug/elf", + "elf.R_PPC64": "debug/elf", + "elf.R_PPC64_ADDR14": "debug/elf", + "elf.R_PPC64_ADDR14_BRNTAKEN": "debug/elf", + "elf.R_PPC64_ADDR14_BRTAKEN": "debug/elf", + "elf.R_PPC64_ADDR16": "debug/elf", + "elf.R_PPC64_ADDR16_DS": "debug/elf", + "elf.R_PPC64_ADDR16_HA": "debug/elf", + "elf.R_PPC64_ADDR16_HI": "debug/elf", + "elf.R_PPC64_ADDR16_HIGHER": "debug/elf", + "elf.R_PPC64_ADDR16_HIGHERA": "debug/elf", + "elf.R_PPC64_ADDR16_HIGHEST": "debug/elf", + "elf.R_PPC64_ADDR16_HIGHESTA": "debug/elf", + "elf.R_PPC64_ADDR16_LO": "debug/elf", + "elf.R_PPC64_ADDR16_LO_DS": "debug/elf", + "elf.R_PPC64_ADDR24": "debug/elf", + "elf.R_PPC64_ADDR32": "debug/elf", + "elf.R_PPC64_ADDR64": "debug/elf", + "elf.R_PPC64_DTPMOD64": "debug/elf", + "elf.R_PPC64_DTPREL16": "debug/elf", + "elf.R_PPC64_DTPREL16_DS": "debug/elf", + "elf.R_PPC64_DTPREL16_HA": "debug/elf", + "elf.R_PPC64_DTPREL16_HI": "debug/elf", + "elf.R_PPC64_DTPREL16_HIGHER": "debug/elf", + "elf.R_PPC64_DTPREL16_HIGHERA": "debug/elf", + "elf.R_PPC64_DTPREL16_HIGHEST": "debug/elf", + "elf.R_PPC64_DTPREL16_HIGHESTA": "debug/elf", + "elf.R_PPC64_DTPREL16_LO": "debug/elf", + "elf.R_PPC64_DTPREL16_LO_DS": "debug/elf", + "elf.R_PPC64_DTPREL64": "debug/elf", + "elf.R_PPC64_GOT16": "debug/elf", + "elf.R_PPC64_GOT16_DS": "debug/elf", + "elf.R_PPC64_GOT16_HA": "debug/elf", + "elf.R_PPC64_GOT16_HI": "debug/elf", + "elf.R_PPC64_GOT16_LO": "debug/elf", + "elf.R_PPC64_GOT16_LO_DS": "debug/elf", + "elf.R_PPC64_GOT_DTPREL16_DS": "debug/elf", + "elf.R_PPC64_GOT_DTPREL16_HA": "debug/elf", + "elf.R_PPC64_GOT_DTPREL16_HI": "debug/elf", + "elf.R_PPC64_GOT_DTPREL16_LO_DS": "debug/elf", + "elf.R_PPC64_GOT_TLSGD16": "debug/elf", + "elf.R_PPC64_GOT_TLSGD16_HA": "debug/elf", + "elf.R_PPC64_GOT_TLSGD16_HI": "debug/elf", + "elf.R_PPC64_GOT_TLSGD16_LO": "debug/elf", + "elf.R_PPC64_GOT_TLSLD16": "debug/elf", + "elf.R_PPC64_GOT_TLSLD16_HA": "debug/elf", + "elf.R_PPC64_GOT_TLSLD16_HI": "debug/elf", + "elf.R_PPC64_GOT_TLSLD16_LO": "debug/elf", + "elf.R_PPC64_GOT_TPREL16_DS": "debug/elf", + "elf.R_PPC64_GOT_TPREL16_HA": "debug/elf", + "elf.R_PPC64_GOT_TPREL16_HI": "debug/elf", + "elf.R_PPC64_GOT_TPREL16_LO_DS": "debug/elf", + "elf.R_PPC64_JMP_SLOT": "debug/elf", + "elf.R_PPC64_NONE": "debug/elf", + "elf.R_PPC64_REL14": "debug/elf", + "elf.R_PPC64_REL14_BRNTAKEN": "debug/elf", + "elf.R_PPC64_REL14_BRTAKEN": "debug/elf", + "elf.R_PPC64_REL16": "debug/elf", + "elf.R_PPC64_REL16_HA": "debug/elf", + "elf.R_PPC64_REL16_HI": "debug/elf", + "elf.R_PPC64_REL16_LO": "debug/elf", + "elf.R_PPC64_REL24": "debug/elf", + "elf.R_PPC64_REL32": "debug/elf", + "elf.R_PPC64_REL64": "debug/elf", + "elf.R_PPC64_TLS": "debug/elf", + "elf.R_PPC64_TLSGD": "debug/elf", + "elf.R_PPC64_TLSLD": "debug/elf", + "elf.R_PPC64_TOC": "debug/elf", + "elf.R_PPC64_TOC16": "debug/elf", + "elf.R_PPC64_TOC16_DS": "debug/elf", + "elf.R_PPC64_TOC16_HA": "debug/elf", + "elf.R_PPC64_TOC16_HI": "debug/elf", + "elf.R_PPC64_TOC16_LO": "debug/elf", + "elf.R_PPC64_TOC16_LO_DS": "debug/elf", + "elf.R_PPC64_TPREL16": "debug/elf", + "elf.R_PPC64_TPREL16_DS": "debug/elf", + "elf.R_PPC64_TPREL16_HA": "debug/elf", + "elf.R_PPC64_TPREL16_HI": "debug/elf", + "elf.R_PPC64_TPREL16_HIGHER": "debug/elf", + "elf.R_PPC64_TPREL16_HIGHERA": "debug/elf", + "elf.R_PPC64_TPREL16_HIGHEST": "debug/elf", + "elf.R_PPC64_TPREL16_HIGHESTA": "debug/elf", + "elf.R_PPC64_TPREL16_LO": "debug/elf", + "elf.R_PPC64_TPREL16_LO_DS": "debug/elf", + "elf.R_PPC64_TPREL64": "debug/elf", + "elf.R_PPC_ADDR14": "debug/elf", + "elf.R_PPC_ADDR14_BRNTAKEN": "debug/elf", + "elf.R_PPC_ADDR14_BRTAKEN": "debug/elf", + "elf.R_PPC_ADDR16": "debug/elf", + "elf.R_PPC_ADDR16_HA": "debug/elf", + "elf.R_PPC_ADDR16_HI": "debug/elf", + "elf.R_PPC_ADDR16_LO": "debug/elf", + "elf.R_PPC_ADDR24": "debug/elf", + "elf.R_PPC_ADDR32": "debug/elf", + "elf.R_PPC_COPY": "debug/elf", + "elf.R_PPC_DTPMOD32": "debug/elf", + "elf.R_PPC_DTPREL16": "debug/elf", + "elf.R_PPC_DTPREL16_HA": "debug/elf", + "elf.R_PPC_DTPREL16_HI": "debug/elf", + "elf.R_PPC_DTPREL16_LO": "debug/elf", + "elf.R_PPC_DTPREL32": "debug/elf", + "elf.R_PPC_EMB_BIT_FLD": "debug/elf", + "elf.R_PPC_EMB_MRKREF": "debug/elf", + "elf.R_PPC_EMB_NADDR16": "debug/elf", + "elf.R_PPC_EMB_NADDR16_HA": "debug/elf", + "elf.R_PPC_EMB_NADDR16_HI": "debug/elf", + "elf.R_PPC_EMB_NADDR16_LO": "debug/elf", + "elf.R_PPC_EMB_NADDR32": "debug/elf", + "elf.R_PPC_EMB_RELSDA": "debug/elf", + "elf.R_PPC_EMB_RELSEC16": "debug/elf", + "elf.R_PPC_EMB_RELST_HA": "debug/elf", + "elf.R_PPC_EMB_RELST_HI": "debug/elf", + "elf.R_PPC_EMB_RELST_LO": "debug/elf", + "elf.R_PPC_EMB_SDA21": "debug/elf", + "elf.R_PPC_EMB_SDA2I16": "debug/elf", + "elf.R_PPC_EMB_SDA2REL": "debug/elf", + "elf.R_PPC_EMB_SDAI16": "debug/elf", + "elf.R_PPC_GLOB_DAT": "debug/elf", + "elf.R_PPC_GOT16": "debug/elf", + "elf.R_PPC_GOT16_HA": "debug/elf", + "elf.R_PPC_GOT16_HI": "debug/elf", + "elf.R_PPC_GOT16_LO": "debug/elf", + "elf.R_PPC_GOT_TLSGD16": "debug/elf", + "elf.R_PPC_GOT_TLSGD16_HA": "debug/elf", + "elf.R_PPC_GOT_TLSGD16_HI": "debug/elf", + "elf.R_PPC_GOT_TLSGD16_LO": "debug/elf", + "elf.R_PPC_GOT_TLSLD16": "debug/elf", + "elf.R_PPC_GOT_TLSLD16_HA": "debug/elf", + "elf.R_PPC_GOT_TLSLD16_HI": "debug/elf", + "elf.R_PPC_GOT_TLSLD16_LO": "debug/elf", + "elf.R_PPC_GOT_TPREL16": "debug/elf", + "elf.R_PPC_GOT_TPREL16_HA": "debug/elf", + "elf.R_PPC_GOT_TPREL16_HI": "debug/elf", + "elf.R_PPC_GOT_TPREL16_LO": "debug/elf", + "elf.R_PPC_JMP_SLOT": "debug/elf", + "elf.R_PPC_LOCAL24PC": "debug/elf", + "elf.R_PPC_NONE": "debug/elf", + "elf.R_PPC_PLT16_HA": "debug/elf", + "elf.R_PPC_PLT16_HI": "debug/elf", + "elf.R_PPC_PLT16_LO": "debug/elf", + "elf.R_PPC_PLT32": "debug/elf", + "elf.R_PPC_PLTREL24": "debug/elf", + "elf.R_PPC_PLTREL32": "debug/elf", + "elf.R_PPC_REL14": "debug/elf", + "elf.R_PPC_REL14_BRNTAKEN": "debug/elf", + "elf.R_PPC_REL14_BRTAKEN": "debug/elf", + "elf.R_PPC_REL24": "debug/elf", + "elf.R_PPC_REL32": "debug/elf", + "elf.R_PPC_RELATIVE": "debug/elf", + "elf.R_PPC_SDAREL16": "debug/elf", + "elf.R_PPC_SECTOFF": "debug/elf", + "elf.R_PPC_SECTOFF_HA": "debug/elf", + "elf.R_PPC_SECTOFF_HI": "debug/elf", + "elf.R_PPC_SECTOFF_LO": "debug/elf", + "elf.R_PPC_TLS": "debug/elf", + "elf.R_PPC_TPREL16": "debug/elf", + "elf.R_PPC_TPREL16_HA": "debug/elf", + "elf.R_PPC_TPREL16_HI": "debug/elf", + "elf.R_PPC_TPREL16_LO": "debug/elf", + "elf.R_PPC_TPREL32": "debug/elf", + "elf.R_PPC_UADDR16": "debug/elf", + "elf.R_PPC_UADDR32": "debug/elf", + "elf.R_SPARC": "debug/elf", + "elf.R_SPARC_10": "debug/elf", + "elf.R_SPARC_11": "debug/elf", + "elf.R_SPARC_13": "debug/elf", + "elf.R_SPARC_16": "debug/elf", + "elf.R_SPARC_22": "debug/elf", + "elf.R_SPARC_32": "debug/elf", + "elf.R_SPARC_5": "debug/elf", + "elf.R_SPARC_6": "debug/elf", + "elf.R_SPARC_64": "debug/elf", + "elf.R_SPARC_7": "debug/elf", + "elf.R_SPARC_8": "debug/elf", + "elf.R_SPARC_COPY": "debug/elf", + "elf.R_SPARC_DISP16": "debug/elf", + "elf.R_SPARC_DISP32": "debug/elf", + "elf.R_SPARC_DISP64": "debug/elf", + "elf.R_SPARC_DISP8": "debug/elf", + "elf.R_SPARC_GLOB_DAT": "debug/elf", + "elf.R_SPARC_GLOB_JMP": "debug/elf", + "elf.R_SPARC_GOT10": "debug/elf", + "elf.R_SPARC_GOT13": "debug/elf", + "elf.R_SPARC_GOT22": "debug/elf", + "elf.R_SPARC_H44": "debug/elf", + "elf.R_SPARC_HH22": "debug/elf", + "elf.R_SPARC_HI22": "debug/elf", + "elf.R_SPARC_HIPLT22": "debug/elf", + "elf.R_SPARC_HIX22": "debug/elf", + "elf.R_SPARC_HM10": "debug/elf", + "elf.R_SPARC_JMP_SLOT": "debug/elf", + "elf.R_SPARC_L44": "debug/elf", + "elf.R_SPARC_LM22": "debug/elf", + "elf.R_SPARC_LO10": "debug/elf", + "elf.R_SPARC_LOPLT10": "debug/elf", + "elf.R_SPARC_LOX10": "debug/elf", + "elf.R_SPARC_M44": "debug/elf", + "elf.R_SPARC_NONE": "debug/elf", + "elf.R_SPARC_OLO10": "debug/elf", + "elf.R_SPARC_PC10": "debug/elf", + "elf.R_SPARC_PC22": "debug/elf", + "elf.R_SPARC_PCPLT10": "debug/elf", + "elf.R_SPARC_PCPLT22": "debug/elf", + "elf.R_SPARC_PCPLT32": "debug/elf", + "elf.R_SPARC_PC_HH22": "debug/elf", + "elf.R_SPARC_PC_HM10": "debug/elf", + "elf.R_SPARC_PC_LM22": "debug/elf", + "elf.R_SPARC_PLT32": "debug/elf", + "elf.R_SPARC_PLT64": "debug/elf", + "elf.R_SPARC_REGISTER": "debug/elf", + "elf.R_SPARC_RELATIVE": "debug/elf", + "elf.R_SPARC_UA16": "debug/elf", + "elf.R_SPARC_UA32": "debug/elf", + "elf.R_SPARC_UA64": "debug/elf", + "elf.R_SPARC_WDISP16": "debug/elf", + "elf.R_SPARC_WDISP19": "debug/elf", + "elf.R_SPARC_WDISP22": "debug/elf", + "elf.R_SPARC_WDISP30": "debug/elf", + "elf.R_SPARC_WPLT30": "debug/elf", + "elf.R_SYM32": "debug/elf", + "elf.R_SYM64": "debug/elf", + "elf.R_TYPE32": "debug/elf", + "elf.R_TYPE64": "debug/elf", + "elf.R_X86_64": "debug/elf", + "elf.R_X86_64_16": "debug/elf", + "elf.R_X86_64_32": "debug/elf", + "elf.R_X86_64_32S": "debug/elf", + "elf.R_X86_64_64": "debug/elf", + "elf.R_X86_64_8": "debug/elf", + "elf.R_X86_64_COPY": "debug/elf", + "elf.R_X86_64_DTPMOD64": "debug/elf", + "elf.R_X86_64_DTPOFF32": "debug/elf", + "elf.R_X86_64_DTPOFF64": "debug/elf", + "elf.R_X86_64_GLOB_DAT": "debug/elf", + "elf.R_X86_64_GOT32": "debug/elf", + "elf.R_X86_64_GOTPCREL": "debug/elf", + "elf.R_X86_64_GOTTPOFF": "debug/elf", + "elf.R_X86_64_JMP_SLOT": "debug/elf", + "elf.R_X86_64_NONE": "debug/elf", + "elf.R_X86_64_PC16": "debug/elf", + "elf.R_X86_64_PC32": "debug/elf", + "elf.R_X86_64_PC8": "debug/elf", + "elf.R_X86_64_PLT32": "debug/elf", + "elf.R_X86_64_RELATIVE": "debug/elf", + "elf.R_X86_64_TLSGD": "debug/elf", + "elf.R_X86_64_TLSLD": "debug/elf", + "elf.R_X86_64_TPOFF32": "debug/elf", + "elf.R_X86_64_TPOFF64": "debug/elf", + "elf.Rel32": "debug/elf", + "elf.Rel64": "debug/elf", + "elf.Rela32": "debug/elf", + "elf.Rela64": "debug/elf", + "elf.SHF_ALLOC": "debug/elf", + "elf.SHF_COMPRESSED": "debug/elf", + "elf.SHF_EXECINSTR": "debug/elf", + "elf.SHF_GROUP": "debug/elf", + "elf.SHF_INFO_LINK": "debug/elf", + "elf.SHF_LINK_ORDER": "debug/elf", + "elf.SHF_MASKOS": "debug/elf", + "elf.SHF_MASKPROC": "debug/elf", + "elf.SHF_MERGE": "debug/elf", + "elf.SHF_OS_NONCONFORMING": "debug/elf", + "elf.SHF_STRINGS": "debug/elf", + "elf.SHF_TLS": "debug/elf", + "elf.SHF_WRITE": "debug/elf", + "elf.SHN_ABS": "debug/elf", + "elf.SHN_COMMON": "debug/elf", + "elf.SHN_HIOS": "debug/elf", + "elf.SHN_HIPROC": "debug/elf", + "elf.SHN_HIRESERVE": "debug/elf", + "elf.SHN_LOOS": "debug/elf", + "elf.SHN_LOPROC": "debug/elf", + "elf.SHN_LORESERVE": "debug/elf", + "elf.SHN_UNDEF": "debug/elf", + "elf.SHN_XINDEX": "debug/elf", + "elf.SHT_DYNAMIC": "debug/elf", + "elf.SHT_DYNSYM": "debug/elf", + "elf.SHT_FINI_ARRAY": "debug/elf", + "elf.SHT_GNU_ATTRIBUTES": "debug/elf", + "elf.SHT_GNU_HASH": "debug/elf", + "elf.SHT_GNU_LIBLIST": "debug/elf", + "elf.SHT_GNU_VERDEF": "debug/elf", + "elf.SHT_GNU_VERNEED": "debug/elf", + "elf.SHT_GNU_VERSYM": "debug/elf", + "elf.SHT_GROUP": "debug/elf", + "elf.SHT_HASH": "debug/elf", + "elf.SHT_HIOS": "debug/elf", + "elf.SHT_HIPROC": "debug/elf", + "elf.SHT_HIUSER": "debug/elf", + "elf.SHT_INIT_ARRAY": "debug/elf", + "elf.SHT_LOOS": "debug/elf", + "elf.SHT_LOPROC": "debug/elf", + "elf.SHT_LOUSER": "debug/elf", + "elf.SHT_NOBITS": "debug/elf", + "elf.SHT_NOTE": "debug/elf", + "elf.SHT_NULL": "debug/elf", + "elf.SHT_PREINIT_ARRAY": "debug/elf", + "elf.SHT_PROGBITS": "debug/elf", + "elf.SHT_REL": "debug/elf", + "elf.SHT_RELA": "debug/elf", + "elf.SHT_SHLIB": "debug/elf", + "elf.SHT_STRTAB": "debug/elf", + "elf.SHT_SYMTAB": "debug/elf", + "elf.SHT_SYMTAB_SHNDX": "debug/elf", + "elf.STB_GLOBAL": "debug/elf", + "elf.STB_HIOS": "debug/elf", + "elf.STB_HIPROC": "debug/elf", + "elf.STB_LOCAL": "debug/elf", + "elf.STB_LOOS": "debug/elf", + "elf.STB_LOPROC": "debug/elf", + "elf.STB_WEAK": "debug/elf", + "elf.STT_COMMON": "debug/elf", + "elf.STT_FILE": "debug/elf", + "elf.STT_FUNC": "debug/elf", + "elf.STT_HIOS": "debug/elf", + "elf.STT_HIPROC": "debug/elf", + "elf.STT_LOOS": "debug/elf", + "elf.STT_LOPROC": "debug/elf", + "elf.STT_NOTYPE": "debug/elf", + "elf.STT_OBJECT": "debug/elf", + "elf.STT_SECTION": "debug/elf", + "elf.STT_TLS": "debug/elf", + "elf.STV_DEFAULT": "debug/elf", + "elf.STV_HIDDEN": "debug/elf", + "elf.STV_INTERNAL": "debug/elf", + "elf.STV_PROTECTED": "debug/elf", + "elf.ST_BIND": "debug/elf", + "elf.ST_INFO": "debug/elf", + "elf.ST_TYPE": "debug/elf", + "elf.ST_VISIBILITY": "debug/elf", + "elf.Section": "debug/elf", + "elf.Section32": "debug/elf", + "elf.Section64": "debug/elf", + "elf.SectionFlag": "debug/elf", + "elf.SectionHeader": "debug/elf", + "elf.SectionIndex": "debug/elf", + "elf.SectionType": "debug/elf", + "elf.Sym32": "debug/elf", + "elf.Sym32Size": "debug/elf", + "elf.Sym64": "debug/elf", + "elf.Sym64Size": "debug/elf", + "elf.SymBind": "debug/elf", + "elf.SymType": "debug/elf", + "elf.SymVis": "debug/elf", + "elf.Symbol": "debug/elf", + "elf.Type": "debug/elf", + "elf.Version": "debug/elf", + "elliptic.Curve": "crypto/elliptic", + "elliptic.CurveParams": "crypto/elliptic", + "elliptic.GenerateKey": "crypto/elliptic", + "elliptic.Marshal": "crypto/elliptic", + "elliptic.P224": "crypto/elliptic", + "elliptic.P256": "crypto/elliptic", + "elliptic.P384": "crypto/elliptic", + "elliptic.P521": "crypto/elliptic", + "elliptic.Unmarshal": "crypto/elliptic", + "encoding.BinaryMarshaler": "encoding", + "encoding.BinaryUnmarshaler": "encoding", + "encoding.TextMarshaler": "encoding", + "encoding.TextUnmarshaler": "encoding", + "errors.New": "errors", + "exec.Cmd": "os/exec", + "exec.Command": "os/exec", + "exec.CommandContext": "os/exec", + "exec.ErrNotFound": "os/exec", + "exec.Error": "os/exec", + "exec.ExitError": "os/exec", + "exec.LookPath": "os/exec", + "expvar.Do": "expvar", + "expvar.Float": "expvar", + "expvar.Func": "expvar", + "expvar.Get": "expvar", + "expvar.Handler": "expvar", + "expvar.Int": "expvar", + "expvar.KeyValue": "expvar", + "expvar.Map": "expvar", + "expvar.NewFloat": "expvar", + "expvar.NewInt": "expvar", + "expvar.NewMap": "expvar", + "expvar.NewString": "expvar", + "expvar.Publish": "expvar", + "expvar.String": "expvar", + "expvar.Var": "expvar", + "fcgi.ErrConnClosed": "net/http/fcgi", + "fcgi.ErrRequestAborted": "net/http/fcgi", + "fcgi.Serve": "net/http/fcgi", + "filepath.Abs": "path/filepath", + "filepath.Base": "path/filepath", + "filepath.Clean": "path/filepath", + "filepath.Dir": "path/filepath", + "filepath.ErrBadPattern": "path/filepath", + "filepath.EvalSymlinks": "path/filepath", + "filepath.Ext": "path/filepath", + "filepath.FromSlash": "path/filepath", + "filepath.Glob": "path/filepath", + "filepath.HasPrefix": "path/filepath", + "filepath.IsAbs": "path/filepath", + "filepath.Join": "path/filepath", + "filepath.ListSeparator": "path/filepath", + "filepath.Match": "path/filepath", + "filepath.Rel": "path/filepath", + "filepath.Separator": "path/filepath", + "filepath.SkipDir": "path/filepath", + "filepath.Split": "path/filepath", + "filepath.SplitList": "path/filepath", + "filepath.ToSlash": "path/filepath", + "filepath.VolumeName": "path/filepath", + "filepath.Walk": "path/filepath", + "filepath.WalkFunc": "path/filepath", + "flag.Arg": "flag", + "flag.Args": "flag", + "flag.Bool": "flag", + "flag.BoolVar": "flag", + "flag.CommandLine": "flag", + "flag.ContinueOnError": "flag", + "flag.Duration": "flag", + "flag.DurationVar": "flag", + "flag.ErrHelp": "flag", + "flag.ErrorHandling": "flag", + "flag.ExitOnError": "flag", + "flag.Flag": "flag", + "flag.FlagSet": "flag", + "flag.Float64": "flag", + "flag.Float64Var": "flag", + "flag.Getter": "flag", + "flag.Int": "flag", + "flag.Int64": "flag", + "flag.Int64Var": "flag", + "flag.IntVar": "flag", + "flag.Lookup": "flag", + "flag.NArg": "flag", + "flag.NFlag": "flag", + "flag.NewFlagSet": "flag", + "flag.PanicOnError": "flag", + "flag.Parse": "flag", + "flag.Parsed": "flag", + "flag.PrintDefaults": "flag", + "flag.Set": "flag", + "flag.String": "flag", + "flag.StringVar": "flag", + "flag.Uint": "flag", + "flag.Uint64": "flag", + "flag.Uint64Var": "flag", + "flag.UintVar": "flag", + "flag.UnquoteUsage": "flag", + "flag.Usage": "flag", + "flag.Value": "flag", + "flag.Var": "flag", + "flag.Visit": "flag", + "flag.VisitAll": "flag", + "flate.BestCompression": "compress/flate", + "flate.BestSpeed": "compress/flate", + "flate.CorruptInputError": "compress/flate", + "flate.DefaultCompression": "compress/flate", + "flate.HuffmanOnly": "compress/flate", + "flate.InternalError": "compress/flate", + "flate.NewReader": "compress/flate", + "flate.NewReaderDict": "compress/flate", + "flate.NewWriter": "compress/flate", + "flate.NewWriterDict": "compress/flate", + "flate.NoCompression": "compress/flate", + "flate.ReadError": "compress/flate", + "flate.Reader": "compress/flate", + "flate.Resetter": "compress/flate", + "flate.WriteError": "compress/flate", + "flate.Writer": "compress/flate", + "fmt.Errorf": "fmt", + "fmt.Formatter": "fmt", + "fmt.Fprint": "fmt", + "fmt.Fprintf": "fmt", + "fmt.Fprintln": "fmt", + "fmt.Fscan": "fmt", + "fmt.Fscanf": "fmt", + "fmt.Fscanln": "fmt", + "fmt.GoStringer": "fmt", + "fmt.Print": "fmt", + "fmt.Printf": "fmt", + "fmt.Println": "fmt", + "fmt.Scan": "fmt", + "fmt.ScanState": "fmt", + "fmt.Scanf": "fmt", + "fmt.Scanln": "fmt", + "fmt.Scanner": "fmt", + "fmt.Sprint": "fmt", + "fmt.Sprintf": "fmt", + "fmt.Sprintln": "fmt", + "fmt.Sscan": "fmt", + "fmt.Sscanf": "fmt", + "fmt.Sscanln": "fmt", + "fmt.State": "fmt", + "fmt.Stringer": "fmt", + "fnv.New32": "hash/fnv", + "fnv.New32a": "hash/fnv", + "fnv.New64": "hash/fnv", + "fnv.New64a": "hash/fnv", + "format.Node": "go/format", + "format.Source": "go/format", + "gif.Decode": "image/gif", + "gif.DecodeAll": "image/gif", + "gif.DecodeConfig": "image/gif", + "gif.DisposalBackground": "image/gif", + "gif.DisposalNone": "image/gif", + "gif.DisposalPrevious": "image/gif", + "gif.Encode": "image/gif", + "gif.EncodeAll": "image/gif", + "gif.GIF": "image/gif", + "gif.Options": "image/gif", + "gob.CommonType": "encoding/gob", + "gob.Decoder": "encoding/gob", + "gob.Encoder": "encoding/gob", + "gob.GobDecoder": "encoding/gob", + "gob.GobEncoder": "encoding/gob", + "gob.NewDecoder": "encoding/gob", + "gob.NewEncoder": "encoding/gob", + "gob.Register": "encoding/gob", + "gob.RegisterName": "encoding/gob", + "gosym.DecodingError": "debug/gosym", + "gosym.Func": "debug/gosym", + "gosym.LineTable": "debug/gosym", + "gosym.NewLineTable": "debug/gosym", + "gosym.NewTable": "debug/gosym", + "gosym.Obj": "debug/gosym", + "gosym.Sym": "debug/gosym", + "gosym.Table": "debug/gosym", + "gosym.UnknownFileError": "debug/gosym", + "gosym.UnknownLineError": "debug/gosym", + "gzip.BestCompression": "compress/gzip", + "gzip.BestSpeed": "compress/gzip", + "gzip.DefaultCompression": "compress/gzip", + "gzip.ErrChecksum": "compress/gzip", + "gzip.ErrHeader": "compress/gzip", + "gzip.Header": "compress/gzip", + "gzip.HuffmanOnly": "compress/gzip", + "gzip.NewReader": "compress/gzip", + "gzip.NewWriter": "compress/gzip", + "gzip.NewWriterLevel": "compress/gzip", + "gzip.NoCompression": "compress/gzip", + "gzip.Reader": "compress/gzip", + "gzip.Writer": "compress/gzip", + "hash.Hash": "hash", + "hash.Hash32": "hash", + "hash.Hash64": "hash", + "heap.Fix": "container/heap", + "heap.Init": "container/heap", + "heap.Interface": "container/heap", + "heap.Pop": "container/heap", + "heap.Push": "container/heap", + "heap.Remove": "container/heap", + "hex.Decode": "encoding/hex", + "hex.DecodeString": "encoding/hex", + "hex.DecodedLen": "encoding/hex", + "hex.Dump": "encoding/hex", + "hex.Dumper": "encoding/hex", + "hex.Encode": "encoding/hex", + "hex.EncodeToString": "encoding/hex", + "hex.EncodedLen": "encoding/hex", + "hex.ErrLength": "encoding/hex", + "hex.InvalidByteError": "encoding/hex", + "hmac.Equal": "crypto/hmac", + "hmac.New": "crypto/hmac", + "html.EscapeString": "html", + "html.UnescapeString": "html", + "http.CanonicalHeaderKey": "net/http", + "http.Client": "net/http", + "http.CloseNotifier": "net/http", + "http.ConnState": "net/http", + "http.Cookie": "net/http", + "http.CookieJar": "net/http", + "http.DefaultClient": "net/http", + "http.DefaultMaxHeaderBytes": "net/http", + "http.DefaultMaxIdleConnsPerHost": "net/http", + "http.DefaultServeMux": "net/http", + "http.DefaultTransport": "net/http", + "http.DetectContentType": "net/http", + "http.Dir": "net/http", + "http.ErrAbortHandler": "net/http", + "http.ErrBodyNotAllowed": "net/http", + "http.ErrBodyReadAfterClose": "net/http", + "http.ErrContentLength": "net/http", + "http.ErrHandlerTimeout": "net/http", + "http.ErrHeaderTooLong": "net/http", + "http.ErrHijacked": "net/http", + "http.ErrLineTooLong": "net/http", + "http.ErrMissingBoundary": "net/http", + "http.ErrMissingContentLength": "net/http", + "http.ErrMissingFile": "net/http", + "http.ErrNoCookie": "net/http", + "http.ErrNoLocation": "net/http", + "http.ErrNotMultipart": "net/http", + "http.ErrNotSupported": "net/http", + "http.ErrServerClosed": "net/http", + "http.ErrShortBody": "net/http", + "http.ErrSkipAltProtocol": "net/http", + "http.ErrUnexpectedTrailer": "net/http", + "http.ErrUseLastResponse": "net/http", + "http.ErrWriteAfterFlush": "net/http", + "http.Error": "net/http", + "http.File": "net/http", + "http.FileServer": "net/http", + "http.FileSystem": "net/http", + "http.Flusher": "net/http", + "http.Get": "net/http", + "http.Handle": "net/http", + "http.HandleFunc": "net/http", + "http.Handler": "net/http", + "http.HandlerFunc": "net/http", + "http.Head": "net/http", + "http.Header": "net/http", + "http.Hijacker": "net/http", + "http.ListenAndServe": "net/http", + "http.ListenAndServeTLS": "net/http", + "http.LocalAddrContextKey": "net/http", + "http.MaxBytesReader": "net/http", + "http.MethodConnect": "net/http", + "http.MethodDelete": "net/http", + "http.MethodGet": "net/http", + "http.MethodHead": "net/http", + "http.MethodOptions": "net/http", + "http.MethodPatch": "net/http", + "http.MethodPost": "net/http", + "http.MethodPut": "net/http", + "http.MethodTrace": "net/http", + "http.NewFileTransport": "net/http", + "http.NewRequest": "net/http", + "http.NewServeMux": "net/http", + "http.NoBody": "net/http", + "http.NotFound": "net/http", + "http.NotFoundHandler": "net/http", + "http.ParseHTTPVersion": "net/http", + "http.ParseTime": "net/http", + "http.Post": "net/http", + "http.PostForm": "net/http", + "http.ProtocolError": "net/http", + "http.ProxyFromEnvironment": "net/http", + "http.ProxyURL": "net/http", + "http.PushOptions": "net/http", + "http.Pusher": "net/http", + "http.ReadRequest": "net/http", + "http.ReadResponse": "net/http", + "http.Redirect": "net/http", + "http.RedirectHandler": "net/http", + "http.Request": "net/http", + "http.Response": "net/http", + "http.ResponseWriter": "net/http", + "http.RoundTripper": "net/http", + "http.Serve": "net/http", + "http.ServeContent": "net/http", + "http.ServeFile": "net/http", + "http.ServeMux": "net/http", + "http.Server": "net/http", + "http.ServerContextKey": "net/http", + "http.SetCookie": "net/http", + "http.StateActive": "net/http", + "http.StateClosed": "net/http", + "http.StateHijacked": "net/http", + "http.StateIdle": "net/http", + "http.StateNew": "net/http", + "http.StatusAccepted": "net/http", + "http.StatusAlreadyReported": "net/http", + "http.StatusBadGateway": "net/http", + "http.StatusBadRequest": "net/http", + "http.StatusConflict": "net/http", + "http.StatusContinue": "net/http", + "http.StatusCreated": "net/http", + "http.StatusExpectationFailed": "net/http", + "http.StatusFailedDependency": "net/http", + "http.StatusForbidden": "net/http", + "http.StatusFound": "net/http", + "http.StatusGatewayTimeout": "net/http", + "http.StatusGone": "net/http", + "http.StatusHTTPVersionNotSupported": "net/http", + "http.StatusIMUsed": "net/http", + "http.StatusInsufficientStorage": "net/http", + "http.StatusInternalServerError": "net/http", + "http.StatusLengthRequired": "net/http", + "http.StatusLocked": "net/http", + "http.StatusLoopDetected": "net/http", + "http.StatusMethodNotAllowed": "net/http", + "http.StatusMovedPermanently": "net/http", + "http.StatusMultiStatus": "net/http", + "http.StatusMultipleChoices": "net/http", + "http.StatusNetworkAuthenticationRequired": "net/http", + "http.StatusNoContent": "net/http", + "http.StatusNonAuthoritativeInfo": "net/http", + "http.StatusNotAcceptable": "net/http", + "http.StatusNotExtended": "net/http", + "http.StatusNotFound": "net/http", + "http.StatusNotImplemented": "net/http", + "http.StatusNotModified": "net/http", + "http.StatusOK": "net/http", + "http.StatusPartialContent": "net/http", + "http.StatusPaymentRequired": "net/http", + "http.StatusPermanentRedirect": "net/http", + "http.StatusPreconditionFailed": "net/http", + "http.StatusPreconditionRequired": "net/http", + "http.StatusProcessing": "net/http", + "http.StatusProxyAuthRequired": "net/http", + "http.StatusRequestEntityTooLarge": "net/http", + "http.StatusRequestHeaderFieldsTooLarge": "net/http", + "http.StatusRequestTimeout": "net/http", + "http.StatusRequestURITooLong": "net/http", + "http.StatusRequestedRangeNotSatisfiable": "net/http", + "http.StatusResetContent": "net/http", + "http.StatusSeeOther": "net/http", + "http.StatusServiceUnavailable": "net/http", + "http.StatusSwitchingProtocols": "net/http", + "http.StatusTeapot": "net/http", + "http.StatusTemporaryRedirect": "net/http", + "http.StatusText": "net/http", + "http.StatusTooManyRequests": "net/http", + "http.StatusUnauthorized": "net/http", + "http.StatusUnavailableForLegalReasons": "net/http", + "http.StatusUnprocessableEntity": "net/http", + "http.StatusUnsupportedMediaType": "net/http", + "http.StatusUpgradeRequired": "net/http", + "http.StatusUseProxy": "net/http", + "http.StatusVariantAlsoNegotiates": "net/http", + "http.StripPrefix": "net/http", + "http.TimeFormat": "net/http", + "http.TimeoutHandler": "net/http", + "http.TrailerPrefix": "net/http", + "http.Transport": "net/http", + "httptest.DefaultRemoteAddr": "net/http/httptest", + "httptest.NewRecorder": "net/http/httptest", + "httptest.NewRequest": "net/http/httptest", + "httptest.NewServer": "net/http/httptest", + "httptest.NewTLSServer": "net/http/httptest", + "httptest.NewUnstartedServer": "net/http/httptest", + "httptest.ResponseRecorder": "net/http/httptest", + "httptest.Server": "net/http/httptest", + "httptrace.ClientTrace": "net/http/httptrace", + "httptrace.ContextClientTrace": "net/http/httptrace", + "httptrace.DNSDoneInfo": "net/http/httptrace", + "httptrace.DNSStartInfo": "net/http/httptrace", + "httptrace.GotConnInfo": "net/http/httptrace", + "httptrace.WithClientTrace": "net/http/httptrace", + "httptrace.WroteRequestInfo": "net/http/httptrace", + "httputil.BufferPool": "net/http/httputil", + "httputil.ClientConn": "net/http/httputil", + "httputil.DumpRequest": "net/http/httputil", + "httputil.DumpRequestOut": "net/http/httputil", + "httputil.DumpResponse": "net/http/httputil", + "httputil.ErrClosed": "net/http/httputil", + "httputil.ErrLineTooLong": "net/http/httputil", + "httputil.ErrPersistEOF": "net/http/httputil", + "httputil.ErrPipeline": "net/http/httputil", + "httputil.NewChunkedReader": "net/http/httputil", + "httputil.NewChunkedWriter": "net/http/httputil", + "httputil.NewClientConn": "net/http/httputil", + "httputil.NewProxyClientConn": "net/http/httputil", + "httputil.NewServerConn": "net/http/httputil", + "httputil.NewSingleHostReverseProxy": "net/http/httputil", + "httputil.ReverseProxy": "net/http/httputil", + "httputil.ServerConn": "net/http/httputil", + "image.Alpha": "image", + "image.Alpha16": "image", + "image.Black": "image", + "image.CMYK": "image", + "image.Config": "image", + "image.Decode": "image", + "image.DecodeConfig": "image", + "image.ErrFormat": "image", + "image.Gray": "image", + "image.Gray16": "image", + "image.Image": "image", + "image.NRGBA": "image", + "image.NRGBA64": "image", + "image.NYCbCrA": "image", + "image.NewAlpha": "image", + "image.NewAlpha16": "image", + "image.NewCMYK": "image", + "image.NewGray": "image", + "image.NewGray16": "image", + "image.NewNRGBA": "image", + "image.NewNRGBA64": "image", + "image.NewNYCbCrA": "image", + "image.NewPaletted": "image", + "image.NewRGBA": "image", + "image.NewRGBA64": "image", + "image.NewUniform": "image", + "image.NewYCbCr": "image", + "image.Opaque": "image", + "image.Paletted": "image", + "image.PalettedImage": "image", + "image.Point": "image", + "image.Pt": "image", + "image.RGBA": "image", + "image.RGBA64": "image", + "image.Rect": "image", + "image.Rectangle": "image", + "image.RegisterFormat": "image", + "image.Transparent": "image", + "image.Uniform": "image", + "image.White": "image", + "image.YCbCr": "image", + "image.YCbCrSubsampleRatio": "image", + "image.YCbCrSubsampleRatio410": "image", + "image.YCbCrSubsampleRatio411": "image", + "image.YCbCrSubsampleRatio420": "image", + "image.YCbCrSubsampleRatio422": "image", + "image.YCbCrSubsampleRatio440": "image", + "image.YCbCrSubsampleRatio444": "image", + "image.ZP": "image", + "image.ZR": "image", + "importer.Default": "go/importer", + "importer.For": "go/importer", + "importer.Lookup": "go/importer", + "io.ByteReader": "io", + "io.ByteScanner": "io", + "io.ByteWriter": "io", + "io.Closer": "io", + "io.Copy": "io", + "io.CopyBuffer": "io", + "io.CopyN": "io", + "io.EOF": "io", + "io.ErrClosedPipe": "io", + "io.ErrNoProgress": "io", + "io.ErrShortBuffer": "io", + "io.ErrShortWrite": "io", + "io.ErrUnexpectedEOF": "io", + "io.LimitReader": "io", + "io.LimitedReader": "io", + "io.MultiReader": "io", + "io.MultiWriter": "io", + "io.NewSectionReader": "io", + "io.Pipe": "io", + "io.PipeReader": "io", + "io.PipeWriter": "io", + "io.ReadAtLeast": "io", + "io.ReadCloser": "io", + "io.ReadFull": "io", + "io.ReadSeeker": "io", + "io.ReadWriteCloser": "io", + "io.ReadWriteSeeker": "io", + "io.ReadWriter": "io", + "io.Reader": "io", + "io.ReaderAt": "io", + "io.ReaderFrom": "io", + "io.RuneReader": "io", + "io.RuneScanner": "io", + "io.SectionReader": "io", + "io.SeekCurrent": "io", + "io.SeekEnd": "io", + "io.SeekStart": "io", + "io.Seeker": "io", + "io.TeeReader": "io", + "io.WriteCloser": "io", + "io.WriteSeeker": "io", + "io.WriteString": "io", + "io.Writer": "io", + "io.WriterAt": "io", + "io.WriterTo": "io", + "iotest.DataErrReader": "testing/iotest", + "iotest.ErrTimeout": "testing/iotest", + "iotest.HalfReader": "testing/iotest", + "iotest.NewReadLogger": "testing/iotest", + "iotest.NewWriteLogger": "testing/iotest", + "iotest.OneByteReader": "testing/iotest", + "iotest.TimeoutReader": "testing/iotest", + "iotest.TruncateWriter": "testing/iotest", + "ioutil.Discard": "io/ioutil", + "ioutil.NopCloser": "io/ioutil", + "ioutil.ReadAll": "io/ioutil", + "ioutil.ReadDir": "io/ioutil", + "ioutil.ReadFile": "io/ioutil", + "ioutil.TempDir": "io/ioutil", + "ioutil.TempFile": "io/ioutil", + "ioutil.WriteFile": "io/ioutil", + "jpeg.Decode": "image/jpeg", + "jpeg.DecodeConfig": "image/jpeg", + "jpeg.DefaultQuality": "image/jpeg", + "jpeg.Encode": "image/jpeg", + "jpeg.FormatError": "image/jpeg", + "jpeg.Options": "image/jpeg", + "jpeg.Reader": "image/jpeg", + "jpeg.UnsupportedError": "image/jpeg", + "json.Compact": "encoding/json", + "json.Decoder": "encoding/json", + "json.Delim": "encoding/json", + "json.Encoder": "encoding/json", + "json.HTMLEscape": "encoding/json", + "json.Indent": "encoding/json", + "json.InvalidUTF8Error": "encoding/json", + "json.InvalidUnmarshalError": "encoding/json", + "json.Marshal": "encoding/json", + "json.MarshalIndent": "encoding/json", + "json.Marshaler": "encoding/json", + "json.MarshalerError": "encoding/json", + "json.NewDecoder": "encoding/json", + "json.NewEncoder": "encoding/json", + "json.Number": "encoding/json", + "json.RawMessage": "encoding/json", + "json.SyntaxError": "encoding/json", + "json.Token": "encoding/json", + "json.Unmarshal": "encoding/json", + "json.UnmarshalFieldError": "encoding/json", + "json.UnmarshalTypeError": "encoding/json", + "json.Unmarshaler": "encoding/json", + "json.UnsupportedTypeError": "encoding/json", + "json.UnsupportedValueError": "encoding/json", + "jsonrpc.Dial": "net/rpc/jsonrpc", + "jsonrpc.NewClient": "net/rpc/jsonrpc", + "jsonrpc.NewClientCodec": "net/rpc/jsonrpc", + "jsonrpc.NewServerCodec": "net/rpc/jsonrpc", + "jsonrpc.ServeConn": "net/rpc/jsonrpc", + "list.Element": "container/list", + "list.List": "container/list", + "list.New": "container/list", + "log.Fatal": "log", + "log.Fatalf": "log", + "log.Fatalln": "log", + "log.Flags": "log", + "log.LUTC": "log", + "log.Ldate": "log", + "log.Llongfile": "log", + "log.Lmicroseconds": "log", + "log.Logger": "log", + "log.Lshortfile": "log", + "log.LstdFlags": "log", + "log.Ltime": "log", + "log.New": "log", + "log.Output": "log", + "log.Panic": "log", + "log.Panicf": "log", + "log.Panicln": "log", + "log.Prefix": "log", + "log.Print": "log", + "log.Printf": "log", + "log.Println": "log", + "log.SetFlags": "log", + "log.SetOutput": "log", + "log.SetPrefix": "log", + "lzw.LSB": "compress/lzw", + "lzw.MSB": "compress/lzw", + "lzw.NewReader": "compress/lzw", + "lzw.NewWriter": "compress/lzw", + "lzw.Order": "compress/lzw", + "macho.Cpu": "debug/macho", + "macho.Cpu386": "debug/macho", + "macho.CpuAmd64": "debug/macho", + "macho.CpuArm": "debug/macho", + "macho.CpuPpc": "debug/macho", + "macho.CpuPpc64": "debug/macho", + "macho.Dylib": "debug/macho", + "macho.DylibCmd": "debug/macho", + "macho.Dysymtab": "debug/macho", + "macho.DysymtabCmd": "debug/macho", + "macho.ErrNotFat": "debug/macho", + "macho.FatArch": "debug/macho", + "macho.FatArchHeader": "debug/macho", + "macho.FatFile": "debug/macho", + "macho.File": "debug/macho", + "macho.FileHeader": "debug/macho", + "macho.FormatError": "debug/macho", + "macho.Load": "debug/macho", + "macho.LoadBytes": "debug/macho", + "macho.LoadCmd": "debug/macho", + "macho.LoadCmdDylib": "debug/macho", + "macho.LoadCmdDylinker": "debug/macho", + "macho.LoadCmdDysymtab": "debug/macho", + "macho.LoadCmdSegment": "debug/macho", + "macho.LoadCmdSegment64": "debug/macho", + "macho.LoadCmdSymtab": "debug/macho", + "macho.LoadCmdThread": "debug/macho", + "macho.LoadCmdUnixThread": "debug/macho", + "macho.Magic32": "debug/macho", + "macho.Magic64": "debug/macho", + "macho.MagicFat": "debug/macho", + "macho.NewFatFile": "debug/macho", + "macho.NewFile": "debug/macho", + "macho.Nlist32": "debug/macho", + "macho.Nlist64": "debug/macho", + "macho.Open": "debug/macho", + "macho.OpenFat": "debug/macho", + "macho.Regs386": "debug/macho", + "macho.RegsAMD64": "debug/macho", + "macho.Section": "debug/macho", + "macho.Section32": "debug/macho", + "macho.Section64": "debug/macho", + "macho.SectionHeader": "debug/macho", + "macho.Segment": "debug/macho", + "macho.Segment32": "debug/macho", + "macho.Segment64": "debug/macho", + "macho.SegmentHeader": "debug/macho", + "macho.Symbol": "debug/macho", + "macho.Symtab": "debug/macho", + "macho.SymtabCmd": "debug/macho", + "macho.Thread": "debug/macho", + "macho.Type": "debug/macho", + "macho.TypeBundle": "debug/macho", + "macho.TypeDylib": "debug/macho", + "macho.TypeExec": "debug/macho", + "macho.TypeObj": "debug/macho", + "mail.Address": "net/mail", + "mail.AddressParser": "net/mail", + "mail.ErrHeaderNotPresent": "net/mail", + "mail.Header": "net/mail", + "mail.Message": "net/mail", + "mail.ParseAddress": "net/mail", + "mail.ParseAddressList": "net/mail", + "mail.ParseDate": "net/mail", + "mail.ReadMessage": "net/mail", + "math.Abs": "math", + "math.Acos": "math", + "math.Acosh": "math", + "math.Asin": "math", + "math.Asinh": "math", + "math.Atan": "math", + "math.Atan2": "math", + "math.Atanh": "math", + "math.Cbrt": "math", + "math.Ceil": "math", + "math.Copysign": "math", + "math.Cos": "math", + "math.Cosh": "math", + "math.Dim": "math", + "math.E": "math", + "math.Erf": "math", + "math.Erfc": "math", + "math.Exp": "math", + "math.Exp2": "math", + "math.Expm1": "math", + "math.Float32bits": "math", + "math.Float32frombits": "math", + "math.Float64bits": "math", + "math.Float64frombits": "math", + "math.Floor": "math", + "math.Frexp": "math", + "math.Gamma": "math", + "math.Hypot": "math", + "math.Ilogb": "math", + "math.Inf": "math", + "math.IsInf": "math", + "math.IsNaN": "math", + "math.J0": "math", + "math.J1": "math", + "math.Jn": "math", + "math.Ldexp": "math", + "math.Lgamma": "math", + "math.Ln10": "math", + "math.Ln2": "math", + "math.Log": "math", + "math.Log10": "math", + "math.Log10E": "math", + "math.Log1p": "math", + "math.Log2": "math", + "math.Log2E": "math", + "math.Logb": "math", + "math.Max": "math", + "math.MaxFloat32": "math", + "math.MaxFloat64": "math", + "math.MaxInt16": "math", + "math.MaxInt32": "math", + "math.MaxInt64": "math", + "math.MaxInt8": "math", + "math.MaxUint16": "math", + "math.MaxUint32": "math", + "math.MaxUint64": "math", + "math.MaxUint8": "math", + "math.Min": "math", + "math.MinInt16": "math", + "math.MinInt32": "math", + "math.MinInt64": "math", + "math.MinInt8": "math", + "math.Mod": "math", + "math.Modf": "math", + "math.NaN": "math", + "math.Nextafter": "math", + "math.Nextafter32": "math", + "math.Phi": "math", + "math.Pi": "math", + "math.Pow": "math", + "math.Pow10": "math", + "math.Remainder": "math", + "math.Signbit": "math", + "math.Sin": "math", + "math.Sincos": "math", + "math.Sinh": "math", + "math.SmallestNonzeroFloat32": "math", + "math.SmallestNonzeroFloat64": "math", + "math.Sqrt": "math", + "math.Sqrt2": "math", + "math.SqrtE": "math", + "math.SqrtPhi": "math", + "math.SqrtPi": "math", + "math.Tan": "math", + "math.Tanh": "math", + "math.Trunc": "math", + "math.Y0": "math", + "math.Y1": "math", + "math.Yn": "math", + "md5.BlockSize": "crypto/md5", + "md5.New": "crypto/md5", + "md5.Size": "crypto/md5", + "md5.Sum": "crypto/md5", + "mime.AddExtensionType": "mime", + "mime.BEncoding": "mime", + "mime.ExtensionsByType": "mime", + "mime.FormatMediaType": "mime", + "mime.ParseMediaType": "mime", + "mime.QEncoding": "mime", + "mime.TypeByExtension": "mime", + "mime.WordDecoder": "mime", + "mime.WordEncoder": "mime", + "multipart.File": "mime/multipart", + "multipart.FileHeader": "mime/multipart", + "multipart.Form": "mime/multipart", + "multipart.NewReader": "mime/multipart", + "multipart.NewWriter": "mime/multipart", + "multipart.Part": "mime/multipart", + "multipart.Reader": "mime/multipart", + "multipart.Writer": "mime/multipart", + "net.Addr": "net", + "net.AddrError": "net", + "net.Buffers": "net", + "net.CIDRMask": "net", + "net.Conn": "net", + "net.DNSConfigError": "net", + "net.DNSError": "net", + "net.DefaultResolver": "net", + "net.Dial": "net", + "net.DialIP": "net", + "net.DialTCP": "net", + "net.DialTimeout": "net", + "net.DialUDP": "net", + "net.DialUnix": "net", + "net.Dialer": "net", + "net.ErrWriteToConnected": "net", + "net.Error": "net", + "net.FileConn": "net", + "net.FileListener": "net", + "net.FilePacketConn": "net", + "net.FlagBroadcast": "net", + "net.FlagLoopback": "net", + "net.FlagMulticast": "net", + "net.FlagPointToPoint": "net", + "net.FlagUp": "net", + "net.Flags": "net", + "net.HardwareAddr": "net", + "net.IP": "net", + "net.IPAddr": "net", + "net.IPConn": "net", + "net.IPMask": "net", + "net.IPNet": "net", + "net.IPv4": "net", + "net.IPv4Mask": "net", + "net.IPv4allrouter": "net", + "net.IPv4allsys": "net", + "net.IPv4bcast": "net", + "net.IPv4len": "net", + "net.IPv4zero": "net", + "net.IPv6interfacelocalallnodes": "net", + "net.IPv6len": "net", + "net.IPv6linklocalallnodes": "net", + "net.IPv6linklocalallrouters": "net", + "net.IPv6loopback": "net", + "net.IPv6unspecified": "net", + "net.IPv6zero": "net", + "net.Interface": "net", + "net.InterfaceAddrs": "net", + "net.InterfaceByIndex": "net", + "net.InterfaceByName": "net", + "net.Interfaces": "net", + "net.InvalidAddrError": "net", + "net.JoinHostPort": "net", + "net.Listen": "net", + "net.ListenIP": "net", + "net.ListenMulticastUDP": "net", + "net.ListenPacket": "net", + "net.ListenTCP": "net", + "net.ListenUDP": "net", + "net.ListenUnix": "net", + "net.ListenUnixgram": "net", + "net.Listener": "net", + "net.LookupAddr": "net", + "net.LookupCNAME": "net", + "net.LookupHost": "net", + "net.LookupIP": "net", + "net.LookupMX": "net", + "net.LookupNS": "net", + "net.LookupPort": "net", + "net.LookupSRV": "net", + "net.LookupTXT": "net", + "net.MX": "net", + "net.NS": "net", + "net.OpError": "net", + "net.PacketConn": "net", + "net.ParseCIDR": "net", + "net.ParseError": "net", + "net.ParseIP": "net", + "net.ParseMAC": "net", + "net.Pipe": "net", + "net.ResolveIPAddr": "net", + "net.ResolveTCPAddr": "net", + "net.ResolveUDPAddr": "net", + "net.ResolveUnixAddr": "net", + "net.Resolver": "net", + "net.SRV": "net", + "net.SplitHostPort": "net", + "net.TCPAddr": "net", + "net.TCPConn": "net", + "net.TCPListener": "net", + "net.UDPAddr": "net", + "net.UDPConn": "net", + "net.UnixAddr": "net", + "net.UnixConn": "net", + "net.UnixListener": "net", + "net.UnknownNetworkError": "net", + "os.Args": "os", + "os.Chdir": "os", + "os.Chmod": "os", + "os.Chown": "os", + "os.Chtimes": "os", + "os.Clearenv": "os", + "os.Create": "os", + "os.DevNull": "os", + "os.Environ": "os", + "os.ErrClosed": "os", + "os.ErrExist": "os", + "os.ErrInvalid": "os", + "os.ErrNotExist": "os", + "os.ErrPermission": "os", + "os.Executable": "os", + "os.Exit": "os", + "os.Expand": "os", + "os.ExpandEnv": "os", + "os.File": "os", + "os.FileInfo": "os", + "os.FileMode": "os", + "os.FindProcess": "os", + "os.Getegid": "os", + "os.Getenv": "os", + "os.Geteuid": "os", + "os.Getgid": "os", + "os.Getgroups": "os", + "os.Getpagesize": "os", + "os.Getpid": "os", + "os.Getppid": "os", + "os.Getuid": "os", + "os.Getwd": "os", + "os.Hostname": "os", + "os.Interrupt": "os", + "os.IsExist": "os", + "os.IsNotExist": "os", + "os.IsPathSeparator": "os", + "os.IsPermission": "os", + "os.Kill": "os", + "os.Lchown": "os", + "os.Link": "os", + "os.LinkError": "os", + "os.LookupEnv": "os", + "os.Lstat": "os", + "os.Mkdir": "os", + "os.MkdirAll": "os", + "os.ModeAppend": "os", + "os.ModeCharDevice": "os", + "os.ModeDevice": "os", + "os.ModeDir": "os", + "os.ModeExclusive": "os", + "os.ModeNamedPipe": "os", + "os.ModePerm": "os", + "os.ModeSetgid": "os", + "os.ModeSetuid": "os", + "os.ModeSocket": "os", + "os.ModeSticky": "os", + "os.ModeSymlink": "os", + "os.ModeTemporary": "os", + "os.ModeType": "os", + "os.NewFile": "os", + "os.NewSyscallError": "os", + "os.O_APPEND": "os", + "os.O_CREATE": "os", + "os.O_EXCL": "os", + "os.O_RDONLY": "os", + "os.O_RDWR": "os", + "os.O_SYNC": "os", + "os.O_TRUNC": "os", + "os.O_WRONLY": "os", + "os.Open": "os", + "os.OpenFile": "os", + "os.PathError": "os", + "os.PathListSeparator": "os", + "os.PathSeparator": "os", + "os.Pipe": "os", + "os.ProcAttr": "os", + "os.Process": "os", + "os.ProcessState": "os", + "os.Readlink": "os", + "os.Remove": "os", + "os.RemoveAll": "os", + "os.Rename": "os", + "os.SEEK_CUR": "os", + "os.SEEK_END": "os", + "os.SEEK_SET": "os", + "os.SameFile": "os", + "os.Setenv": "os", + "os.Signal": "os", + "os.StartProcess": "os", + "os.Stat": "os", + "os.Stderr": "os", + "os.Stdin": "os", + "os.Stdout": "os", + "os.Symlink": "os", + "os.SyscallError": "os", + "os.TempDir": "os", + "os.Truncate": "os", + "os.Unsetenv": "os", + "palette.Plan9": "image/color/palette", + "palette.WebSafe": "image/color/palette", + "parse.ActionNode": "text/template/parse", + "parse.BoolNode": "text/template/parse", + "parse.BranchNode": "text/template/parse", + "parse.ChainNode": "text/template/parse", + "parse.CommandNode": "text/template/parse", + "parse.DotNode": "text/template/parse", + "parse.FieldNode": "text/template/parse", + "parse.IdentifierNode": "text/template/parse", + "parse.IfNode": "text/template/parse", + "parse.IsEmptyTree": "text/template/parse", + "parse.ListNode": "text/template/parse", + "parse.New": "text/template/parse", + "parse.NewIdentifier": "text/template/parse", + "parse.NilNode": "text/template/parse", + "parse.Node": "text/template/parse", + "parse.NodeAction": "text/template/parse", + "parse.NodeBool": "text/template/parse", + "parse.NodeChain": "text/template/parse", + "parse.NodeCommand": "text/template/parse", + "parse.NodeDot": "text/template/parse", + "parse.NodeField": "text/template/parse", + "parse.NodeIdentifier": "text/template/parse", + "parse.NodeIf": "text/template/parse", + "parse.NodeList": "text/template/parse", + "parse.NodeNil": "text/template/parse", + "parse.NodeNumber": "text/template/parse", + "parse.NodePipe": "text/template/parse", + "parse.NodeRange": "text/template/parse", + "parse.NodeString": "text/template/parse", + "parse.NodeTemplate": "text/template/parse", + "parse.NodeText": "text/template/parse", + "parse.NodeType": "text/template/parse", + "parse.NodeVariable": "text/template/parse", + "parse.NodeWith": "text/template/parse", + "parse.NumberNode": "text/template/parse", + "parse.Parse": "text/template/parse", + "parse.PipeNode": "text/template/parse", + "parse.Pos": "text/template/parse", + "parse.RangeNode": "text/template/parse", + "parse.StringNode": "text/template/parse", + "parse.TemplateNode": "text/template/parse", + "parse.TextNode": "text/template/parse", + "parse.Tree": "text/template/parse", + "parse.VariableNode": "text/template/parse", + "parse.WithNode": "text/template/parse", + "parser.AllErrors": "go/parser", + "parser.DeclarationErrors": "go/parser", + "parser.ImportsOnly": "go/parser", + "parser.Mode": "go/parser", + "parser.PackageClauseOnly": "go/parser", + "parser.ParseComments": "go/parser", + "parser.ParseDir": "go/parser", + "parser.ParseExpr": "go/parser", + "parser.ParseExprFrom": "go/parser", + "parser.ParseFile": "go/parser", + "parser.SpuriousErrors": "go/parser", + "parser.Trace": "go/parser", + "path.Base": "path", + "path.Clean": "path", + "path.Dir": "path", + "path.ErrBadPattern": "path", + "path.Ext": "path", + "path.IsAbs": "path", + "path.Join": "path", + "path.Match": "path", + "path.Split": "path", + "pe.COFFSymbol": "debug/pe", + "pe.COFFSymbolSize": "debug/pe", + "pe.DataDirectory": "debug/pe", + "pe.File": "debug/pe", + "pe.FileHeader": "debug/pe", + "pe.FormatError": "debug/pe", + "pe.IMAGE_FILE_MACHINE_AM33": "debug/pe", + "pe.IMAGE_FILE_MACHINE_AMD64": "debug/pe", + "pe.IMAGE_FILE_MACHINE_ARM": "debug/pe", + "pe.IMAGE_FILE_MACHINE_EBC": "debug/pe", + "pe.IMAGE_FILE_MACHINE_I386": "debug/pe", + "pe.IMAGE_FILE_MACHINE_IA64": "debug/pe", + "pe.IMAGE_FILE_MACHINE_M32R": "debug/pe", + "pe.IMAGE_FILE_MACHINE_MIPS16": "debug/pe", + "pe.IMAGE_FILE_MACHINE_MIPSFPU": "debug/pe", + "pe.IMAGE_FILE_MACHINE_MIPSFPU16": "debug/pe", + "pe.IMAGE_FILE_MACHINE_POWERPC": "debug/pe", + "pe.IMAGE_FILE_MACHINE_POWERPCFP": "debug/pe", + "pe.IMAGE_FILE_MACHINE_R4000": "debug/pe", + "pe.IMAGE_FILE_MACHINE_SH3": "debug/pe", + "pe.IMAGE_FILE_MACHINE_SH3DSP": "debug/pe", + "pe.IMAGE_FILE_MACHINE_SH4": "debug/pe", + "pe.IMAGE_FILE_MACHINE_SH5": "debug/pe", + "pe.IMAGE_FILE_MACHINE_THUMB": "debug/pe", + "pe.IMAGE_FILE_MACHINE_UNKNOWN": "debug/pe", + "pe.IMAGE_FILE_MACHINE_WCEMIPSV2": "debug/pe", + "pe.ImportDirectory": "debug/pe", + "pe.NewFile": "debug/pe", + "pe.Open": "debug/pe", + "pe.OptionalHeader32": "debug/pe", + "pe.OptionalHeader64": "debug/pe", + "pe.Reloc": "debug/pe", + "pe.Section": "debug/pe", + "pe.SectionHeader": "debug/pe", + "pe.SectionHeader32": "debug/pe", + "pe.StringTable": "debug/pe", + "pe.Symbol": "debug/pe", + "pem.Block": "encoding/pem", + "pem.Decode": "encoding/pem", + "pem.Encode": "encoding/pem", + "pem.EncodeToMemory": "encoding/pem", + "pkix.AlgorithmIdentifier": "crypto/x509/pkix", + "pkix.AttributeTypeAndValue": "crypto/x509/pkix", + "pkix.AttributeTypeAndValueSET": "crypto/x509/pkix", + "pkix.CertificateList": "crypto/x509/pkix", + "pkix.Extension": "crypto/x509/pkix", + "pkix.Name": "crypto/x509/pkix", + "pkix.RDNSequence": "crypto/x509/pkix", + "pkix.RelativeDistinguishedNameSET": "crypto/x509/pkix", + "pkix.RevokedCertificate": "crypto/x509/pkix", + "pkix.TBSCertificateList": "crypto/x509/pkix", + "plan9obj.File": "debug/plan9obj", + "plan9obj.FileHeader": "debug/plan9obj", + "plan9obj.Magic386": "debug/plan9obj", + "plan9obj.Magic64": "debug/plan9obj", + "plan9obj.MagicAMD64": "debug/plan9obj", + "plan9obj.MagicARM": "debug/plan9obj", + "plan9obj.NewFile": "debug/plan9obj", + "plan9obj.Open": "debug/plan9obj", + "plan9obj.Section": "debug/plan9obj", + "plan9obj.SectionHeader": "debug/plan9obj", + "plan9obj.Sym": "debug/plan9obj", + "plugin.Open": "plugin", + "plugin.Plugin": "plugin", + "plugin.Symbol": "plugin", + "png.BestCompression": "image/png", + "png.BestSpeed": "image/png", + "png.CompressionLevel": "image/png", + "png.Decode": "image/png", + "png.DecodeConfig": "image/png", + "png.DefaultCompression": "image/png", + "png.Encode": "image/png", + "png.Encoder": "image/png", + "png.FormatError": "image/png", + "png.NoCompression": "image/png", + "png.UnsupportedError": "image/png", + "pprof.Cmdline": "net/http/pprof", + "pprof.Handler": "net/http/pprof", + "pprof.Index": "net/http/pprof", + "pprof.Lookup": "runtime/pprof", + "pprof.NewProfile": "runtime/pprof", + // "pprof.Profile" is ambiguous + "pprof.Profiles": "runtime/pprof", + "pprof.StartCPUProfile": "runtime/pprof", + "pprof.StopCPUProfile": "runtime/pprof", + "pprof.Symbol": "net/http/pprof", + "pprof.Trace": "net/http/pprof", + "pprof.WriteHeapProfile": "runtime/pprof", + "printer.CommentedNode": "go/printer", + "printer.Config": "go/printer", + "printer.Fprint": "go/printer", + "printer.Mode": "go/printer", + "printer.RawFormat": "go/printer", + "printer.SourcePos": "go/printer", + "printer.TabIndent": "go/printer", + "printer.UseSpaces": "go/printer", + "quick.Check": "testing/quick", + "quick.CheckEqual": "testing/quick", + "quick.CheckEqualError": "testing/quick", + "quick.CheckError": "testing/quick", + "quick.Config": "testing/quick", + "quick.Generator": "testing/quick", + "quick.SetupError": "testing/quick", + "quick.Value": "testing/quick", + "quotedprintable.NewReader": "mime/quotedprintable", + "quotedprintable.NewWriter": "mime/quotedprintable", + "quotedprintable.Reader": "mime/quotedprintable", + "quotedprintable.Writer": "mime/quotedprintable", + "rand.ExpFloat64": "math/rand", + "rand.Float32": "math/rand", + "rand.Float64": "math/rand", + // "rand.Int" is ambiguous + "rand.Int31": "math/rand", + "rand.Int31n": "math/rand", + "rand.Int63": "math/rand", + "rand.Int63n": "math/rand", + "rand.Intn": "math/rand", + "rand.New": "math/rand", + "rand.NewSource": "math/rand", + "rand.NewZipf": "math/rand", + "rand.NormFloat64": "math/rand", + "rand.Perm": "math/rand", + "rand.Prime": "crypto/rand", + "rand.Rand": "math/rand", + // "rand.Read" is ambiguous + "rand.Reader": "crypto/rand", + "rand.Seed": "math/rand", + "rand.Source": "math/rand", + "rand.Source64": "math/rand", + "rand.Uint32": "math/rand", + "rand.Uint64": "math/rand", + "rand.Zipf": "math/rand", + "rc4.Cipher": "crypto/rc4", + "rc4.KeySizeError": "crypto/rc4", + "rc4.NewCipher": "crypto/rc4", + "reflect.Append": "reflect", + "reflect.AppendSlice": "reflect", + "reflect.Array": "reflect", + "reflect.ArrayOf": "reflect", + "reflect.Bool": "reflect", + "reflect.BothDir": "reflect", + "reflect.Chan": "reflect", + "reflect.ChanDir": "reflect", + "reflect.ChanOf": "reflect", + "reflect.Complex128": "reflect", + "reflect.Complex64": "reflect", + "reflect.Copy": "reflect", + "reflect.DeepEqual": "reflect", + "reflect.Float32": "reflect", + "reflect.Float64": "reflect", + "reflect.Func": "reflect", + "reflect.FuncOf": "reflect", + "reflect.Indirect": "reflect", + "reflect.Int": "reflect", + "reflect.Int16": "reflect", + "reflect.Int32": "reflect", + "reflect.Int64": "reflect", + "reflect.Int8": "reflect", + "reflect.Interface": "reflect", + "reflect.Invalid": "reflect", + "reflect.Kind": "reflect", + "reflect.MakeChan": "reflect", + "reflect.MakeFunc": "reflect", + "reflect.MakeMap": "reflect", + "reflect.MakeSlice": "reflect", + "reflect.Map": "reflect", + "reflect.MapOf": "reflect", + "reflect.Method": "reflect", + "reflect.New": "reflect", + "reflect.NewAt": "reflect", + "reflect.Ptr": "reflect", + "reflect.PtrTo": "reflect", + "reflect.RecvDir": "reflect", + "reflect.Select": "reflect", + "reflect.SelectCase": "reflect", + "reflect.SelectDefault": "reflect", + "reflect.SelectDir": "reflect", + "reflect.SelectRecv": "reflect", + "reflect.SelectSend": "reflect", + "reflect.SendDir": "reflect", + "reflect.Slice": "reflect", + "reflect.SliceHeader": "reflect", + "reflect.SliceOf": "reflect", + "reflect.String": "reflect", + "reflect.StringHeader": "reflect", + "reflect.Struct": "reflect", + "reflect.StructField": "reflect", + "reflect.StructOf": "reflect", + "reflect.StructTag": "reflect", + "reflect.Swapper": "reflect", + "reflect.TypeOf": "reflect", + "reflect.Uint": "reflect", + "reflect.Uint16": "reflect", + "reflect.Uint32": "reflect", + "reflect.Uint64": "reflect", + "reflect.Uint8": "reflect", + "reflect.Uintptr": "reflect", + "reflect.UnsafePointer": "reflect", + "reflect.Value": "reflect", + "reflect.ValueError": "reflect", + "reflect.ValueOf": "reflect", + "reflect.Zero": "reflect", + "regexp.Compile": "regexp", + "regexp.CompilePOSIX": "regexp", + "regexp.Match": "regexp", + "regexp.MatchReader": "regexp", + "regexp.MatchString": "regexp", + "regexp.MustCompile": "regexp", + "regexp.MustCompilePOSIX": "regexp", + "regexp.QuoteMeta": "regexp", + "regexp.Regexp": "regexp", + "ring.New": "container/ring", + "ring.Ring": "container/ring", + "rpc.Accept": "net/rpc", + "rpc.Call": "net/rpc", + "rpc.Client": "net/rpc", + "rpc.ClientCodec": "net/rpc", + "rpc.DefaultDebugPath": "net/rpc", + "rpc.DefaultRPCPath": "net/rpc", + "rpc.DefaultServer": "net/rpc", + "rpc.Dial": "net/rpc", + "rpc.DialHTTP": "net/rpc", + "rpc.DialHTTPPath": "net/rpc", + "rpc.ErrShutdown": "net/rpc", + "rpc.HandleHTTP": "net/rpc", + "rpc.NewClient": "net/rpc", + "rpc.NewClientWithCodec": "net/rpc", + "rpc.NewServer": "net/rpc", + "rpc.Register": "net/rpc", + "rpc.RegisterName": "net/rpc", + "rpc.Request": "net/rpc", + "rpc.Response": "net/rpc", + "rpc.ServeCodec": "net/rpc", + "rpc.ServeConn": "net/rpc", + "rpc.ServeRequest": "net/rpc", + "rpc.Server": "net/rpc", + "rpc.ServerCodec": "net/rpc", + "rpc.ServerError": "net/rpc", + "rsa.CRTValue": "crypto/rsa", + "rsa.DecryptOAEP": "crypto/rsa", + "rsa.DecryptPKCS1v15": "crypto/rsa", + "rsa.DecryptPKCS1v15SessionKey": "crypto/rsa", + "rsa.EncryptOAEP": "crypto/rsa", + "rsa.EncryptPKCS1v15": "crypto/rsa", + "rsa.ErrDecryption": "crypto/rsa", + "rsa.ErrMessageTooLong": "crypto/rsa", + "rsa.ErrVerification": "crypto/rsa", + "rsa.GenerateKey": "crypto/rsa", + "rsa.GenerateMultiPrimeKey": "crypto/rsa", + "rsa.OAEPOptions": "crypto/rsa", + "rsa.PKCS1v15DecryptOptions": "crypto/rsa", + "rsa.PSSOptions": "crypto/rsa", + "rsa.PSSSaltLengthAuto": "crypto/rsa", + "rsa.PSSSaltLengthEqualsHash": "crypto/rsa", + "rsa.PrecomputedValues": "crypto/rsa", + "rsa.PrivateKey": "crypto/rsa", + "rsa.PublicKey": "crypto/rsa", + "rsa.SignPKCS1v15": "crypto/rsa", + "rsa.SignPSS": "crypto/rsa", + "rsa.VerifyPKCS1v15": "crypto/rsa", + "rsa.VerifyPSS": "crypto/rsa", + "runtime.BlockProfile": "runtime", + "runtime.BlockProfileRecord": "runtime", + "runtime.Breakpoint": "runtime", + "runtime.CPUProfile": "runtime", + "runtime.Caller": "runtime", + "runtime.Callers": "runtime", + "runtime.CallersFrames": "runtime", + "runtime.Compiler": "runtime", + "runtime.Error": "runtime", + "runtime.Frame": "runtime", + "runtime.Frames": "runtime", + "runtime.Func": "runtime", + "runtime.FuncForPC": "runtime", + "runtime.GC": "runtime", + "runtime.GOARCH": "runtime", + "runtime.GOMAXPROCS": "runtime", + "runtime.GOOS": "runtime", + "runtime.GOROOT": "runtime", + "runtime.Goexit": "runtime", + "runtime.GoroutineProfile": "runtime", + "runtime.Gosched": "runtime", + "runtime.KeepAlive": "runtime", + "runtime.LockOSThread": "runtime", + "runtime.MemProfile": "runtime", + "runtime.MemProfileRate": "runtime", + "runtime.MemProfileRecord": "runtime", + "runtime.MemStats": "runtime", + "runtime.MutexProfile": "runtime", + "runtime.NumCPU": "runtime", + "runtime.NumCgoCall": "runtime", + "runtime.NumGoroutine": "runtime", + "runtime.ReadMemStats": "runtime", + "runtime.ReadTrace": "runtime", + "runtime.SetBlockProfileRate": "runtime", + "runtime.SetCPUProfileRate": "runtime", + "runtime.SetCgoTraceback": "runtime", + "runtime.SetFinalizer": "runtime", + "runtime.SetMutexProfileFraction": "runtime", + "runtime.Stack": "runtime", + "runtime.StackRecord": "runtime", + "runtime.StartTrace": "runtime", + "runtime.StopTrace": "runtime", + "runtime.ThreadCreateProfile": "runtime", + "runtime.TypeAssertionError": "runtime", + "runtime.UnlockOSThread": "runtime", + "runtime.Version": "runtime", + "scanner.Char": "text/scanner", + "scanner.Comment": "text/scanner", + "scanner.EOF": "text/scanner", + "scanner.Error": "go/scanner", + "scanner.ErrorHandler": "go/scanner", + "scanner.ErrorList": "go/scanner", + "scanner.Float": "text/scanner", + "scanner.GoTokens": "text/scanner", + "scanner.GoWhitespace": "text/scanner", + "scanner.Ident": "text/scanner", + "scanner.Int": "text/scanner", + "scanner.Mode": "go/scanner", + "scanner.Position": "text/scanner", + "scanner.PrintError": "go/scanner", + "scanner.RawString": "text/scanner", + "scanner.ScanChars": "text/scanner", + // "scanner.ScanComments" is ambiguous + "scanner.ScanFloats": "text/scanner", + "scanner.ScanIdents": "text/scanner", + "scanner.ScanInts": "text/scanner", + "scanner.ScanRawStrings": "text/scanner", + "scanner.ScanStrings": "text/scanner", + // "scanner.Scanner" is ambiguous + "scanner.SkipComments": "text/scanner", + "scanner.String": "text/scanner", + "scanner.TokenString": "text/scanner", + "sha1.BlockSize": "crypto/sha1", + "sha1.New": "crypto/sha1", + "sha1.Size": "crypto/sha1", + "sha1.Sum": "crypto/sha1", + "sha256.BlockSize": "crypto/sha256", + "sha256.New": "crypto/sha256", + "sha256.New224": "crypto/sha256", + "sha256.Size": "crypto/sha256", + "sha256.Size224": "crypto/sha256", + "sha256.Sum224": "crypto/sha256", + "sha256.Sum256": "crypto/sha256", + "sha512.BlockSize": "crypto/sha512", + "sha512.New": "crypto/sha512", + "sha512.New384": "crypto/sha512", + "sha512.New512_224": "crypto/sha512", + "sha512.New512_256": "crypto/sha512", + "sha512.Size": "crypto/sha512", + "sha512.Size224": "crypto/sha512", + "sha512.Size256": "crypto/sha512", + "sha512.Size384": "crypto/sha512", + "sha512.Sum384": "crypto/sha512", + "sha512.Sum512": "crypto/sha512", + "sha512.Sum512_224": "crypto/sha512", + "sha512.Sum512_256": "crypto/sha512", + "signal.Ignore": "os/signal", + "signal.Notify": "os/signal", + "signal.Reset": "os/signal", + "signal.Stop": "os/signal", + "smtp.Auth": "net/smtp", + "smtp.CRAMMD5Auth": "net/smtp", + "smtp.Client": "net/smtp", + "smtp.Dial": "net/smtp", + "smtp.NewClient": "net/smtp", + "smtp.PlainAuth": "net/smtp", + "smtp.SendMail": "net/smtp", + "smtp.ServerInfo": "net/smtp", + "sort.Float64Slice": "sort", + "sort.Float64s": "sort", + "sort.Float64sAreSorted": "sort", + "sort.IntSlice": "sort", + "sort.Interface": "sort", + "sort.Ints": "sort", + "sort.IntsAreSorted": "sort", + "sort.IsSorted": "sort", + "sort.Reverse": "sort", + "sort.Search": "sort", + "sort.SearchFloat64s": "sort", + "sort.SearchInts": "sort", + "sort.SearchStrings": "sort", + "sort.Slice": "sort", + "sort.SliceIsSorted": "sort", + "sort.SliceStable": "sort", + "sort.Sort": "sort", + "sort.Stable": "sort", + "sort.StringSlice": "sort", + "sort.Strings": "sort", + "sort.StringsAreSorted": "sort", + "sql.ColumnType": "database/sql", + "sql.DB": "database/sql", + "sql.DBStats": "database/sql", + "sql.Drivers": "database/sql", + "sql.ErrNoRows": "database/sql", + "sql.ErrTxDone": "database/sql", + "sql.IsolationLevel": "database/sql", + "sql.LevelDefault": "database/sql", + "sql.LevelLinearizable": "database/sql", + "sql.LevelReadCommitted": "database/sql", + "sql.LevelReadUncommitted": "database/sql", + "sql.LevelRepeatableRead": "database/sql", + "sql.LevelSerializable": "database/sql", + "sql.LevelSnapshot": "database/sql", + "sql.LevelWriteCommitted": "database/sql", + "sql.Named": "database/sql", + "sql.NamedArg": "database/sql", + "sql.NullBool": "database/sql", + "sql.NullFloat64": "database/sql", + "sql.NullInt64": "database/sql", + "sql.NullString": "database/sql", + "sql.Open": "database/sql", + "sql.RawBytes": "database/sql", + "sql.Register": "database/sql", + "sql.Result": "database/sql", + "sql.Row": "database/sql", + "sql.Rows": "database/sql", + "sql.Scanner": "database/sql", + "sql.Stmt": "database/sql", + "sql.Tx": "database/sql", + "sql.TxOptions": "database/sql", + "strconv.AppendBool": "strconv", + "strconv.AppendFloat": "strconv", + "strconv.AppendInt": "strconv", + "strconv.AppendQuote": "strconv", + "strconv.AppendQuoteRune": "strconv", + "strconv.AppendQuoteRuneToASCII": "strconv", + "strconv.AppendQuoteRuneToGraphic": "strconv", + "strconv.AppendQuoteToASCII": "strconv", + "strconv.AppendQuoteToGraphic": "strconv", + "strconv.AppendUint": "strconv", + "strconv.Atoi": "strconv", + "strconv.CanBackquote": "strconv", + "strconv.ErrRange": "strconv", + "strconv.ErrSyntax": "strconv", + "strconv.FormatBool": "strconv", + "strconv.FormatFloat": "strconv", + "strconv.FormatInt": "strconv", + "strconv.FormatUint": "strconv", + "strconv.IntSize": "strconv", + "strconv.IsGraphic": "strconv", + "strconv.IsPrint": "strconv", + "strconv.Itoa": "strconv", + "strconv.NumError": "strconv", + "strconv.ParseBool": "strconv", + "strconv.ParseFloat": "strconv", + "strconv.ParseInt": "strconv", + "strconv.ParseUint": "strconv", + "strconv.Quote": "strconv", + "strconv.QuoteRune": "strconv", + "strconv.QuoteRuneToASCII": "strconv", + "strconv.QuoteRuneToGraphic": "strconv", + "strconv.QuoteToASCII": "strconv", + "strconv.QuoteToGraphic": "strconv", + "strconv.Unquote": "strconv", + "strconv.UnquoteChar": "strconv", + "strings.Compare": "strings", + "strings.Contains": "strings", + "strings.ContainsAny": "strings", + "strings.ContainsRune": "strings", + "strings.Count": "strings", + "strings.EqualFold": "strings", + "strings.Fields": "strings", + "strings.FieldsFunc": "strings", + "strings.HasPrefix": "strings", + "strings.HasSuffix": "strings", + "strings.Index": "strings", + "strings.IndexAny": "strings", + "strings.IndexByte": "strings", + "strings.IndexFunc": "strings", + "strings.IndexRune": "strings", + "strings.Join": "strings", + "strings.LastIndex": "strings", + "strings.LastIndexAny": "strings", + "strings.LastIndexByte": "strings", + "strings.LastIndexFunc": "strings", + "strings.Map": "strings", + "strings.NewReader": "strings", + "strings.NewReplacer": "strings", + "strings.Reader": "strings", + "strings.Repeat": "strings", + "strings.Replace": "strings", + "strings.Replacer": "strings", + "strings.Split": "strings", + "strings.SplitAfter": "strings", + "strings.SplitAfterN": "strings", + "strings.SplitN": "strings", + "strings.Title": "strings", + "strings.ToLower": "strings", + "strings.ToLowerSpecial": "strings", + "strings.ToTitle": "strings", + "strings.ToTitleSpecial": "strings", + "strings.ToUpper": "strings", + "strings.ToUpperSpecial": "strings", + "strings.Trim": "strings", + "strings.TrimFunc": "strings", + "strings.TrimLeft": "strings", + "strings.TrimLeftFunc": "strings", + "strings.TrimPrefix": "strings", + "strings.TrimRight": "strings", + "strings.TrimRightFunc": "strings", + "strings.TrimSpace": "strings", + "strings.TrimSuffix": "strings", + "subtle.ConstantTimeByteEq": "crypto/subtle", + "subtle.ConstantTimeCompare": "crypto/subtle", + "subtle.ConstantTimeCopy": "crypto/subtle", + "subtle.ConstantTimeEq": "crypto/subtle", + "subtle.ConstantTimeLessOrEq": "crypto/subtle", + "subtle.ConstantTimeSelect": "crypto/subtle", + "suffixarray.Index": "index/suffixarray", + "suffixarray.New": "index/suffixarray", + "sync.Cond": "sync", + "sync.Locker": "sync", + "sync.Mutex": "sync", + "sync.NewCond": "sync", + "sync.Once": "sync", + "sync.Pool": "sync", + "sync.RWMutex": "sync", + "sync.WaitGroup": "sync", + "syntax.ClassNL": "regexp/syntax", + "syntax.Compile": "regexp/syntax", + "syntax.DotNL": "regexp/syntax", + "syntax.EmptyBeginLine": "regexp/syntax", + "syntax.EmptyBeginText": "regexp/syntax", + "syntax.EmptyEndLine": "regexp/syntax", + "syntax.EmptyEndText": "regexp/syntax", + "syntax.EmptyNoWordBoundary": "regexp/syntax", + "syntax.EmptyOp": "regexp/syntax", + "syntax.EmptyOpContext": "regexp/syntax", + "syntax.EmptyWordBoundary": "regexp/syntax", + "syntax.ErrInternalError": "regexp/syntax", + "syntax.ErrInvalidCharClass": "regexp/syntax", + "syntax.ErrInvalidCharRange": "regexp/syntax", + "syntax.ErrInvalidEscape": "regexp/syntax", + "syntax.ErrInvalidNamedCapture": "regexp/syntax", + "syntax.ErrInvalidPerlOp": "regexp/syntax", + "syntax.ErrInvalidRepeatOp": "regexp/syntax", + "syntax.ErrInvalidRepeatSize": "regexp/syntax", + "syntax.ErrInvalidUTF8": "regexp/syntax", + "syntax.ErrMissingBracket": "regexp/syntax", + "syntax.ErrMissingParen": "regexp/syntax", + "syntax.ErrMissingRepeatArgument": "regexp/syntax", + "syntax.ErrTrailingBackslash": "regexp/syntax", + "syntax.ErrUnexpectedParen": "regexp/syntax", + "syntax.Error": "regexp/syntax", + "syntax.ErrorCode": "regexp/syntax", + "syntax.Flags": "regexp/syntax", + "syntax.FoldCase": "regexp/syntax", + "syntax.Inst": "regexp/syntax", + "syntax.InstAlt": "regexp/syntax", + "syntax.InstAltMatch": "regexp/syntax", + "syntax.InstCapture": "regexp/syntax", + "syntax.InstEmptyWidth": "regexp/syntax", + "syntax.InstFail": "regexp/syntax", + "syntax.InstMatch": "regexp/syntax", + "syntax.InstNop": "regexp/syntax", + "syntax.InstOp": "regexp/syntax", + "syntax.InstRune": "regexp/syntax", + "syntax.InstRune1": "regexp/syntax", + "syntax.InstRuneAny": "regexp/syntax", + "syntax.InstRuneAnyNotNL": "regexp/syntax", + "syntax.IsWordChar": "regexp/syntax", + "syntax.Literal": "regexp/syntax", + "syntax.MatchNL": "regexp/syntax", + "syntax.NonGreedy": "regexp/syntax", + "syntax.OneLine": "regexp/syntax", + "syntax.Op": "regexp/syntax", + "syntax.OpAlternate": "regexp/syntax", + "syntax.OpAnyChar": "regexp/syntax", + "syntax.OpAnyCharNotNL": "regexp/syntax", + "syntax.OpBeginLine": "regexp/syntax", + "syntax.OpBeginText": "regexp/syntax", + "syntax.OpCapture": "regexp/syntax", + "syntax.OpCharClass": "regexp/syntax", + "syntax.OpConcat": "regexp/syntax", + "syntax.OpEmptyMatch": "regexp/syntax", + "syntax.OpEndLine": "regexp/syntax", + "syntax.OpEndText": "regexp/syntax", + "syntax.OpLiteral": "regexp/syntax", + "syntax.OpNoMatch": "regexp/syntax", + "syntax.OpNoWordBoundary": "regexp/syntax", + "syntax.OpPlus": "regexp/syntax", + "syntax.OpQuest": "regexp/syntax", + "syntax.OpRepeat": "regexp/syntax", + "syntax.OpStar": "regexp/syntax", + "syntax.OpWordBoundary": "regexp/syntax", + "syntax.POSIX": "regexp/syntax", + "syntax.Parse": "regexp/syntax", + "syntax.Perl": "regexp/syntax", + "syntax.PerlX": "regexp/syntax", + "syntax.Prog": "regexp/syntax", + "syntax.Regexp": "regexp/syntax", + "syntax.Simple": "regexp/syntax", + "syntax.UnicodeGroups": "regexp/syntax", + "syntax.WasDollar": "regexp/syntax", + "syscall.AF_ALG": "syscall", + "syscall.AF_APPLETALK": "syscall", + "syscall.AF_ARP": "syscall", + "syscall.AF_ASH": "syscall", + "syscall.AF_ATM": "syscall", + "syscall.AF_ATMPVC": "syscall", + "syscall.AF_ATMSVC": "syscall", + "syscall.AF_AX25": "syscall", + "syscall.AF_BLUETOOTH": "syscall", + "syscall.AF_BRIDGE": "syscall", + "syscall.AF_CAIF": "syscall", + "syscall.AF_CAN": "syscall", + "syscall.AF_CCITT": "syscall", + "syscall.AF_CHAOS": "syscall", + "syscall.AF_CNT": "syscall", + "syscall.AF_COIP": "syscall", + "syscall.AF_DATAKIT": "syscall", + "syscall.AF_DECnet": "syscall", + "syscall.AF_DLI": "syscall", + "syscall.AF_E164": "syscall", + "syscall.AF_ECMA": "syscall", + "syscall.AF_ECONET": "syscall", + "syscall.AF_ENCAP": "syscall", + "syscall.AF_FILE": "syscall", + "syscall.AF_HYLINK": "syscall", + "syscall.AF_IEEE80211": "syscall", + "syscall.AF_IEEE802154": "syscall", + "syscall.AF_IMPLINK": "syscall", + "syscall.AF_INET": "syscall", + "syscall.AF_INET6": "syscall", + "syscall.AF_INET6_SDP": "syscall", + "syscall.AF_INET_SDP": "syscall", + "syscall.AF_IPX": "syscall", + "syscall.AF_IRDA": "syscall", + "syscall.AF_ISDN": "syscall", + "syscall.AF_ISO": "syscall", + "syscall.AF_IUCV": "syscall", + "syscall.AF_KEY": "syscall", + "syscall.AF_LAT": "syscall", + "syscall.AF_LINK": "syscall", + "syscall.AF_LLC": "syscall", + "syscall.AF_LOCAL": "syscall", + "syscall.AF_MAX": "syscall", + "syscall.AF_MPLS": "syscall", + "syscall.AF_NATM": "syscall", + "syscall.AF_NDRV": "syscall", + "syscall.AF_NETBEUI": "syscall", + "syscall.AF_NETBIOS": "syscall", + "syscall.AF_NETGRAPH": "syscall", + "syscall.AF_NETLINK": "syscall", + "syscall.AF_NETROM": "syscall", + "syscall.AF_NS": "syscall", + "syscall.AF_OROUTE": "syscall", + "syscall.AF_OSI": "syscall", + "syscall.AF_PACKET": "syscall", + "syscall.AF_PHONET": "syscall", + "syscall.AF_PPP": "syscall", + "syscall.AF_PPPOX": "syscall", + "syscall.AF_PUP": "syscall", + "syscall.AF_RDS": "syscall", + "syscall.AF_RESERVED_36": "syscall", + "syscall.AF_ROSE": "syscall", + "syscall.AF_ROUTE": "syscall", + "syscall.AF_RXRPC": "syscall", + "syscall.AF_SCLUSTER": "syscall", + "syscall.AF_SECURITY": "syscall", + "syscall.AF_SIP": "syscall", + "syscall.AF_SLOW": "syscall", + "syscall.AF_SNA": "syscall", + "syscall.AF_SYSTEM": "syscall", + "syscall.AF_TIPC": "syscall", + "syscall.AF_UNIX": "syscall", + "syscall.AF_UNSPEC": "syscall", + "syscall.AF_VENDOR00": "syscall", + "syscall.AF_VENDOR01": "syscall", + "syscall.AF_VENDOR02": "syscall", + "syscall.AF_VENDOR03": "syscall", + "syscall.AF_VENDOR04": "syscall", + "syscall.AF_VENDOR05": "syscall", + "syscall.AF_VENDOR06": "syscall", + "syscall.AF_VENDOR07": "syscall", + "syscall.AF_VENDOR08": "syscall", + "syscall.AF_VENDOR09": "syscall", + "syscall.AF_VENDOR10": "syscall", + "syscall.AF_VENDOR11": "syscall", + "syscall.AF_VENDOR12": "syscall", + "syscall.AF_VENDOR13": "syscall", + "syscall.AF_VENDOR14": "syscall", + "syscall.AF_VENDOR15": "syscall", + "syscall.AF_VENDOR16": "syscall", + "syscall.AF_VENDOR17": "syscall", + "syscall.AF_VENDOR18": "syscall", + "syscall.AF_VENDOR19": "syscall", + "syscall.AF_VENDOR20": "syscall", + "syscall.AF_VENDOR21": "syscall", + "syscall.AF_VENDOR22": "syscall", + "syscall.AF_VENDOR23": "syscall", + "syscall.AF_VENDOR24": "syscall", + "syscall.AF_VENDOR25": "syscall", + "syscall.AF_VENDOR26": "syscall", + "syscall.AF_VENDOR27": "syscall", + "syscall.AF_VENDOR28": "syscall", + "syscall.AF_VENDOR29": "syscall", + "syscall.AF_VENDOR30": "syscall", + "syscall.AF_VENDOR31": "syscall", + "syscall.AF_VENDOR32": "syscall", + "syscall.AF_VENDOR33": "syscall", + "syscall.AF_VENDOR34": "syscall", + "syscall.AF_VENDOR35": "syscall", + "syscall.AF_VENDOR36": "syscall", + "syscall.AF_VENDOR37": "syscall", + "syscall.AF_VENDOR38": "syscall", + "syscall.AF_VENDOR39": "syscall", + "syscall.AF_VENDOR40": "syscall", + "syscall.AF_VENDOR41": "syscall", + "syscall.AF_VENDOR42": "syscall", + "syscall.AF_VENDOR43": "syscall", + "syscall.AF_VENDOR44": "syscall", + "syscall.AF_VENDOR45": "syscall", + "syscall.AF_VENDOR46": "syscall", + "syscall.AF_VENDOR47": "syscall", + "syscall.AF_WANPIPE": "syscall", + "syscall.AF_X25": "syscall", + "syscall.AI_CANONNAME": "syscall", + "syscall.AI_NUMERICHOST": "syscall", + "syscall.AI_PASSIVE": "syscall", + "syscall.APPLICATION_ERROR": "syscall", + "syscall.ARPHRD_ADAPT": "syscall", + "syscall.ARPHRD_APPLETLK": "syscall", + "syscall.ARPHRD_ARCNET": "syscall", + "syscall.ARPHRD_ASH": "syscall", + "syscall.ARPHRD_ATM": "syscall", + "syscall.ARPHRD_AX25": "syscall", + "syscall.ARPHRD_BIF": "syscall", + "syscall.ARPHRD_CHAOS": "syscall", + "syscall.ARPHRD_CISCO": "syscall", + "syscall.ARPHRD_CSLIP": "syscall", + "syscall.ARPHRD_CSLIP6": "syscall", + "syscall.ARPHRD_DDCMP": "syscall", + "syscall.ARPHRD_DLCI": "syscall", + "syscall.ARPHRD_ECONET": "syscall", + "syscall.ARPHRD_EETHER": "syscall", + "syscall.ARPHRD_ETHER": "syscall", + "syscall.ARPHRD_EUI64": "syscall", + "syscall.ARPHRD_FCAL": "syscall", + "syscall.ARPHRD_FCFABRIC": "syscall", + "syscall.ARPHRD_FCPL": "syscall", + "syscall.ARPHRD_FCPP": "syscall", + "syscall.ARPHRD_FDDI": "syscall", + "syscall.ARPHRD_FRAD": "syscall", + "syscall.ARPHRD_FRELAY": "syscall", + "syscall.ARPHRD_HDLC": "syscall", + "syscall.ARPHRD_HIPPI": "syscall", + "syscall.ARPHRD_HWX25": "syscall", + "syscall.ARPHRD_IEEE1394": "syscall", + "syscall.ARPHRD_IEEE802": "syscall", + "syscall.ARPHRD_IEEE80211": "syscall", + "syscall.ARPHRD_IEEE80211_PRISM": "syscall", + "syscall.ARPHRD_IEEE80211_RADIOTAP": "syscall", + "syscall.ARPHRD_IEEE802154": "syscall", + "syscall.ARPHRD_IEEE802154_PHY": "syscall", + "syscall.ARPHRD_IEEE802_TR": "syscall", + "syscall.ARPHRD_INFINIBAND": "syscall", + "syscall.ARPHRD_IPDDP": "syscall", + "syscall.ARPHRD_IPGRE": "syscall", + "syscall.ARPHRD_IRDA": "syscall", + "syscall.ARPHRD_LAPB": "syscall", + "syscall.ARPHRD_LOCALTLK": "syscall", + "syscall.ARPHRD_LOOPBACK": "syscall", + "syscall.ARPHRD_METRICOM": "syscall", + "syscall.ARPHRD_NETROM": "syscall", + "syscall.ARPHRD_NONE": "syscall", + "syscall.ARPHRD_PIMREG": "syscall", + "syscall.ARPHRD_PPP": "syscall", + "syscall.ARPHRD_PRONET": "syscall", + "syscall.ARPHRD_RAWHDLC": "syscall", + "syscall.ARPHRD_ROSE": "syscall", + "syscall.ARPHRD_RSRVD": "syscall", + "syscall.ARPHRD_SIT": "syscall", + "syscall.ARPHRD_SKIP": "syscall", + "syscall.ARPHRD_SLIP": "syscall", + "syscall.ARPHRD_SLIP6": "syscall", + "syscall.ARPHRD_STRIP": "syscall", + "syscall.ARPHRD_TUNNEL": "syscall", + "syscall.ARPHRD_TUNNEL6": "syscall", + "syscall.ARPHRD_VOID": "syscall", + "syscall.ARPHRD_X25": "syscall", + "syscall.AUTHTYPE_CLIENT": "syscall", + "syscall.AUTHTYPE_SERVER": "syscall", + "syscall.Accept": "syscall", + "syscall.Accept4": "syscall", + "syscall.AcceptEx": "syscall", + "syscall.Access": "syscall", + "syscall.Acct": "syscall", + "syscall.AddrinfoW": "syscall", + "syscall.Adjtime": "syscall", + "syscall.Adjtimex": "syscall", + "syscall.AttachLsf": "syscall", + "syscall.B0": "syscall", + "syscall.B1000000": "syscall", + "syscall.B110": "syscall", + "syscall.B115200": "syscall", + "syscall.B1152000": "syscall", + "syscall.B1200": "syscall", + "syscall.B134": "syscall", + "syscall.B14400": "syscall", + "syscall.B150": "syscall", + "syscall.B1500000": "syscall", + "syscall.B1800": "syscall", + "syscall.B19200": "syscall", + "syscall.B200": "syscall", + "syscall.B2000000": "syscall", + "syscall.B230400": "syscall", + "syscall.B2400": "syscall", + "syscall.B2500000": "syscall", + "syscall.B28800": "syscall", + "syscall.B300": "syscall", + "syscall.B3000000": "syscall", + "syscall.B3500000": "syscall", + "syscall.B38400": "syscall", + "syscall.B4000000": "syscall", + "syscall.B460800": "syscall", + "syscall.B4800": "syscall", + "syscall.B50": "syscall", + "syscall.B500000": "syscall", + "syscall.B57600": "syscall", + "syscall.B576000": "syscall", + "syscall.B600": "syscall", + "syscall.B7200": "syscall", + "syscall.B75": "syscall", + "syscall.B76800": "syscall", + "syscall.B921600": "syscall", + "syscall.B9600": "syscall", + "syscall.BASE_PROTOCOL": "syscall", + "syscall.BIOCFEEDBACK": "syscall", + "syscall.BIOCFLUSH": "syscall", + "syscall.BIOCGBLEN": "syscall", + "syscall.BIOCGDIRECTION": "syscall", + "syscall.BIOCGDIRFILT": "syscall", + "syscall.BIOCGDLT": "syscall", + "syscall.BIOCGDLTLIST": "syscall", + "syscall.BIOCGETBUFMODE": "syscall", + "syscall.BIOCGETIF": "syscall", + "syscall.BIOCGETZMAX": "syscall", + "syscall.BIOCGFEEDBACK": "syscall", + "syscall.BIOCGFILDROP": "syscall", + "syscall.BIOCGHDRCMPLT": "syscall", + "syscall.BIOCGRSIG": "syscall", + "syscall.BIOCGRTIMEOUT": "syscall", + "syscall.BIOCGSEESENT": "syscall", + "syscall.BIOCGSTATS": "syscall", + "syscall.BIOCGSTATSOLD": "syscall", + "syscall.BIOCGTSTAMP": "syscall", + "syscall.BIOCIMMEDIATE": "syscall", + "syscall.BIOCLOCK": "syscall", + "syscall.BIOCPROMISC": "syscall", + "syscall.BIOCROTZBUF": "syscall", + "syscall.BIOCSBLEN": "syscall", + "syscall.BIOCSDIRECTION": "syscall", + "syscall.BIOCSDIRFILT": "syscall", + "syscall.BIOCSDLT": "syscall", + "syscall.BIOCSETBUFMODE": "syscall", + "syscall.BIOCSETF": "syscall", + "syscall.BIOCSETFNR": "syscall", + "syscall.BIOCSETIF": "syscall", + "syscall.BIOCSETWF": "syscall", + "syscall.BIOCSETZBUF": "syscall", + "syscall.BIOCSFEEDBACK": "syscall", + "syscall.BIOCSFILDROP": "syscall", + "syscall.BIOCSHDRCMPLT": "syscall", + "syscall.BIOCSRSIG": "syscall", + "syscall.BIOCSRTIMEOUT": "syscall", + "syscall.BIOCSSEESENT": "syscall", + "syscall.BIOCSTCPF": "syscall", + "syscall.BIOCSTSTAMP": "syscall", + "syscall.BIOCSUDPF": "syscall", + "syscall.BIOCVERSION": "syscall", + "syscall.BPF_A": "syscall", + "syscall.BPF_ABS": "syscall", + "syscall.BPF_ADD": "syscall", + "syscall.BPF_ALIGNMENT": "syscall", + "syscall.BPF_ALIGNMENT32": "syscall", + "syscall.BPF_ALU": "syscall", + "syscall.BPF_AND": "syscall", + "syscall.BPF_B": "syscall", + "syscall.BPF_BUFMODE_BUFFER": "syscall", + "syscall.BPF_BUFMODE_ZBUF": "syscall", + "syscall.BPF_DFLTBUFSIZE": "syscall", + "syscall.BPF_DIRECTION_IN": "syscall", + "syscall.BPF_DIRECTION_OUT": "syscall", + "syscall.BPF_DIV": "syscall", + "syscall.BPF_H": "syscall", + "syscall.BPF_IMM": "syscall", + "syscall.BPF_IND": "syscall", + "syscall.BPF_JA": "syscall", + "syscall.BPF_JEQ": "syscall", + "syscall.BPF_JGE": "syscall", + "syscall.BPF_JGT": "syscall", + "syscall.BPF_JMP": "syscall", + "syscall.BPF_JSET": "syscall", + "syscall.BPF_K": "syscall", + "syscall.BPF_LD": "syscall", + "syscall.BPF_LDX": "syscall", + "syscall.BPF_LEN": "syscall", + "syscall.BPF_LSH": "syscall", + "syscall.BPF_MAJOR_VERSION": "syscall", + "syscall.BPF_MAXBUFSIZE": "syscall", + "syscall.BPF_MAXINSNS": "syscall", + "syscall.BPF_MEM": "syscall", + "syscall.BPF_MEMWORDS": "syscall", + "syscall.BPF_MINBUFSIZE": "syscall", + "syscall.BPF_MINOR_VERSION": "syscall", + "syscall.BPF_MISC": "syscall", + "syscall.BPF_MSH": "syscall", + "syscall.BPF_MUL": "syscall", + "syscall.BPF_NEG": "syscall", + "syscall.BPF_OR": "syscall", + "syscall.BPF_RELEASE": "syscall", + "syscall.BPF_RET": "syscall", + "syscall.BPF_RSH": "syscall", + "syscall.BPF_ST": "syscall", + "syscall.BPF_STX": "syscall", + "syscall.BPF_SUB": "syscall", + "syscall.BPF_TAX": "syscall", + "syscall.BPF_TXA": "syscall", + "syscall.BPF_T_BINTIME": "syscall", + "syscall.BPF_T_BINTIME_FAST": "syscall", + "syscall.BPF_T_BINTIME_MONOTONIC": "syscall", + "syscall.BPF_T_BINTIME_MONOTONIC_FAST": "syscall", + "syscall.BPF_T_FAST": "syscall", + "syscall.BPF_T_FLAG_MASK": "syscall", + "syscall.BPF_T_FORMAT_MASK": "syscall", + "syscall.BPF_T_MICROTIME": "syscall", + "syscall.BPF_T_MICROTIME_FAST": "syscall", + "syscall.BPF_T_MICROTIME_MONOTONIC": "syscall", + "syscall.BPF_T_MICROTIME_MONOTONIC_FAST": "syscall", + "syscall.BPF_T_MONOTONIC": "syscall", + "syscall.BPF_T_MONOTONIC_FAST": "syscall", + "syscall.BPF_T_NANOTIME": "syscall", + "syscall.BPF_T_NANOTIME_FAST": "syscall", + "syscall.BPF_T_NANOTIME_MONOTONIC": "syscall", + "syscall.BPF_T_NANOTIME_MONOTONIC_FAST": "syscall", + "syscall.BPF_T_NONE": "syscall", + "syscall.BPF_T_NORMAL": "syscall", + "syscall.BPF_W": "syscall", + "syscall.BPF_X": "syscall", + "syscall.BRKINT": "syscall", + "syscall.Bind": "syscall", + "syscall.BindToDevice": "syscall", + "syscall.BpfBuflen": "syscall", + "syscall.BpfDatalink": "syscall", + "syscall.BpfHdr": "syscall", + "syscall.BpfHeadercmpl": "syscall", + "syscall.BpfInsn": "syscall", + "syscall.BpfInterface": "syscall", + "syscall.BpfJump": "syscall", + "syscall.BpfProgram": "syscall", + "syscall.BpfStat": "syscall", + "syscall.BpfStats": "syscall", + "syscall.BpfStmt": "syscall", + "syscall.BpfTimeout": "syscall", + "syscall.BpfTimeval": "syscall", + "syscall.BpfVersion": "syscall", + "syscall.BpfZbuf": "syscall", + "syscall.BpfZbufHeader": "syscall", + "syscall.ByHandleFileInformation": "syscall", + "syscall.BytePtrFromString": "syscall", + "syscall.ByteSliceFromString": "syscall", + "syscall.CCR0_FLUSH": "syscall", + "syscall.CERT_CHAIN_POLICY_AUTHENTICODE": "syscall", + "syscall.CERT_CHAIN_POLICY_AUTHENTICODE_TS": "syscall", + "syscall.CERT_CHAIN_POLICY_BASE": "syscall", + "syscall.CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": "syscall", + "syscall.CERT_CHAIN_POLICY_EV": "syscall", + "syscall.CERT_CHAIN_POLICY_MICROSOFT_ROOT": "syscall", + "syscall.CERT_CHAIN_POLICY_NT_AUTH": "syscall", + "syscall.CERT_CHAIN_POLICY_SSL": "syscall", + "syscall.CERT_E_CN_NO_MATCH": "syscall", + "syscall.CERT_E_EXPIRED": "syscall", + "syscall.CERT_E_PURPOSE": "syscall", + "syscall.CERT_E_ROLE": "syscall", + "syscall.CERT_E_UNTRUSTEDROOT": "syscall", + "syscall.CERT_STORE_ADD_ALWAYS": "syscall", + "syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": "syscall", + "syscall.CERT_STORE_PROV_MEMORY": "syscall", + "syscall.CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": "syscall", + "syscall.CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": "syscall", + "syscall.CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": "syscall", + "syscall.CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": "syscall", + "syscall.CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": "syscall", + "syscall.CERT_TRUST_INVALID_BASIC_CONSTRAINTS": "syscall", + "syscall.CERT_TRUST_INVALID_EXTENSION": "syscall", + "syscall.CERT_TRUST_INVALID_NAME_CONSTRAINTS": "syscall", + "syscall.CERT_TRUST_INVALID_POLICY_CONSTRAINTS": "syscall", + "syscall.CERT_TRUST_IS_CYCLIC": "syscall", + "syscall.CERT_TRUST_IS_EXPLICIT_DISTRUST": "syscall", + "syscall.CERT_TRUST_IS_NOT_SIGNATURE_VALID": "syscall", + "syscall.CERT_TRUST_IS_NOT_TIME_VALID": "syscall", + "syscall.CERT_TRUST_IS_NOT_VALID_FOR_USAGE": "syscall", + "syscall.CERT_TRUST_IS_OFFLINE_REVOCATION": "syscall", + "syscall.CERT_TRUST_IS_REVOKED": "syscall", + "syscall.CERT_TRUST_IS_UNTRUSTED_ROOT": "syscall", + "syscall.CERT_TRUST_NO_ERROR": "syscall", + "syscall.CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": "syscall", + "syscall.CERT_TRUST_REVOCATION_STATUS_UNKNOWN": "syscall", + "syscall.CFLUSH": "syscall", + "syscall.CLOCAL": "syscall", + "syscall.CLONE_CHILD_CLEARTID": "syscall", + "syscall.CLONE_CHILD_SETTID": "syscall", + "syscall.CLONE_CSIGNAL": "syscall", + "syscall.CLONE_DETACHED": "syscall", + "syscall.CLONE_FILES": "syscall", + "syscall.CLONE_FS": "syscall", + "syscall.CLONE_IO": "syscall", + "syscall.CLONE_NEWIPC": "syscall", + "syscall.CLONE_NEWNET": "syscall", + "syscall.CLONE_NEWNS": "syscall", + "syscall.CLONE_NEWPID": "syscall", + "syscall.CLONE_NEWUSER": "syscall", + "syscall.CLONE_NEWUTS": "syscall", + "syscall.CLONE_PARENT": "syscall", + "syscall.CLONE_PARENT_SETTID": "syscall", + "syscall.CLONE_PID": "syscall", + "syscall.CLONE_PTRACE": "syscall", + "syscall.CLONE_SETTLS": "syscall", + "syscall.CLONE_SIGHAND": "syscall", + "syscall.CLONE_SYSVSEM": "syscall", + "syscall.CLONE_THREAD": "syscall", + "syscall.CLONE_UNTRACED": "syscall", + "syscall.CLONE_VFORK": "syscall", + "syscall.CLONE_VM": "syscall", + "syscall.CPUID_CFLUSH": "syscall", + "syscall.CREAD": "syscall", + "syscall.CREATE_ALWAYS": "syscall", + "syscall.CREATE_NEW": "syscall", + "syscall.CREATE_NEW_PROCESS_GROUP": "syscall", + "syscall.CREATE_UNICODE_ENVIRONMENT": "syscall", + "syscall.CRYPT_DEFAULT_CONTAINER_OPTIONAL": "syscall", + "syscall.CRYPT_DELETEKEYSET": "syscall", + "syscall.CRYPT_MACHINE_KEYSET": "syscall", + "syscall.CRYPT_NEWKEYSET": "syscall", + "syscall.CRYPT_SILENT": "syscall", + "syscall.CRYPT_VERIFYCONTEXT": "syscall", + "syscall.CS5": "syscall", + "syscall.CS6": "syscall", + "syscall.CS7": "syscall", + "syscall.CS8": "syscall", + "syscall.CSIZE": "syscall", + "syscall.CSTART": "syscall", + "syscall.CSTATUS": "syscall", + "syscall.CSTOP": "syscall", + "syscall.CSTOPB": "syscall", + "syscall.CSUSP": "syscall", + "syscall.CTL_MAXNAME": "syscall", + "syscall.CTL_NET": "syscall", + "syscall.CTL_QUERY": "syscall", + "syscall.CTRL_BREAK_EVENT": "syscall", + "syscall.CTRL_C_EVENT": "syscall", + "syscall.CancelIo": "syscall", + "syscall.CancelIoEx": "syscall", + "syscall.CertAddCertificateContextToStore": "syscall", + "syscall.CertChainContext": "syscall", + "syscall.CertChainElement": "syscall", + "syscall.CertChainPara": "syscall", + "syscall.CertChainPolicyPara": "syscall", + "syscall.CertChainPolicyStatus": "syscall", + "syscall.CertCloseStore": "syscall", + "syscall.CertContext": "syscall", + "syscall.CertCreateCertificateContext": "syscall", + "syscall.CertEnhKeyUsage": "syscall", + "syscall.CertEnumCertificatesInStore": "syscall", + "syscall.CertFreeCertificateChain": "syscall", + "syscall.CertFreeCertificateContext": "syscall", + "syscall.CertGetCertificateChain": "syscall", + "syscall.CertOpenStore": "syscall", + "syscall.CertOpenSystemStore": "syscall", + "syscall.CertRevocationInfo": "syscall", + "syscall.CertSimpleChain": "syscall", + "syscall.CertTrustStatus": "syscall", + "syscall.CertUsageMatch": "syscall", + "syscall.CertVerifyCertificateChainPolicy": "syscall", + "syscall.Chdir": "syscall", + "syscall.CheckBpfVersion": "syscall", + "syscall.Chflags": "syscall", + "syscall.Chmod": "syscall", + "syscall.Chown": "syscall", + "syscall.Chroot": "syscall", + "syscall.Clearenv": "syscall", + "syscall.Close": "syscall", + "syscall.CloseHandle": "syscall", + "syscall.CloseOnExec": "syscall", + "syscall.Closesocket": "syscall", + "syscall.CmsgLen": "syscall", + "syscall.CmsgSpace": "syscall", + "syscall.Cmsghdr": "syscall", + "syscall.CommandLineToArgv": "syscall", + "syscall.ComputerName": "syscall", + "syscall.Connect": "syscall", + "syscall.ConnectEx": "syscall", + "syscall.ConvertSidToStringSid": "syscall", + "syscall.ConvertStringSidToSid": "syscall", + "syscall.CopySid": "syscall", + "syscall.Creat": "syscall", + "syscall.CreateDirectory": "syscall", + "syscall.CreateFile": "syscall", + "syscall.CreateFileMapping": "syscall", + "syscall.CreateHardLink": "syscall", + "syscall.CreateIoCompletionPort": "syscall", + "syscall.CreatePipe": "syscall", + "syscall.CreateProcess": "syscall", + "syscall.CreateSymbolicLink": "syscall", + "syscall.CreateToolhelp32Snapshot": "syscall", + "syscall.Credential": "syscall", + "syscall.CryptAcquireContext": "syscall", + "syscall.CryptGenRandom": "syscall", + "syscall.CryptReleaseContext": "syscall", + "syscall.DIOCBSFLUSH": "syscall", + "syscall.DIOCOSFPFLUSH": "syscall", + "syscall.DLL": "syscall", + "syscall.DLLError": "syscall", + "syscall.DLT_A429": "syscall", + "syscall.DLT_A653_ICM": "syscall", + "syscall.DLT_AIRONET_HEADER": "syscall", + "syscall.DLT_AOS": "syscall", + "syscall.DLT_APPLE_IP_OVER_IEEE1394": "syscall", + "syscall.DLT_ARCNET": "syscall", + "syscall.DLT_ARCNET_LINUX": "syscall", + "syscall.DLT_ATM_CLIP": "syscall", + "syscall.DLT_ATM_RFC1483": "syscall", + "syscall.DLT_AURORA": "syscall", + "syscall.DLT_AX25": "syscall", + "syscall.DLT_AX25_KISS": "syscall", + "syscall.DLT_BACNET_MS_TP": "syscall", + "syscall.DLT_BLUETOOTH_HCI_H4": "syscall", + "syscall.DLT_BLUETOOTH_HCI_H4_WITH_PHDR": "syscall", + "syscall.DLT_CAN20B": "syscall", + "syscall.DLT_CAN_SOCKETCAN": "syscall", + "syscall.DLT_CHAOS": "syscall", + "syscall.DLT_CHDLC": "syscall", + "syscall.DLT_CISCO_IOS": "syscall", + "syscall.DLT_C_HDLC": "syscall", + "syscall.DLT_C_HDLC_WITH_DIR": "syscall", + "syscall.DLT_DBUS": "syscall", + "syscall.DLT_DECT": "syscall", + "syscall.DLT_DOCSIS": "syscall", + "syscall.DLT_DVB_CI": "syscall", + "syscall.DLT_ECONET": "syscall", + "syscall.DLT_EN10MB": "syscall", + "syscall.DLT_EN3MB": "syscall", + "syscall.DLT_ENC": "syscall", + "syscall.DLT_ERF": "syscall", + "syscall.DLT_ERF_ETH": "syscall", + "syscall.DLT_ERF_POS": "syscall", + "syscall.DLT_FC_2": "syscall", + "syscall.DLT_FC_2_WITH_FRAME_DELIMS": "syscall", + "syscall.DLT_FDDI": "syscall", + "syscall.DLT_FLEXRAY": "syscall", + "syscall.DLT_FRELAY": "syscall", + "syscall.DLT_FRELAY_WITH_DIR": "syscall", + "syscall.DLT_GCOM_SERIAL": "syscall", + "syscall.DLT_GCOM_T1E1": "syscall", + "syscall.DLT_GPF_F": "syscall", + "syscall.DLT_GPF_T": "syscall", + "syscall.DLT_GPRS_LLC": "syscall", + "syscall.DLT_GSMTAP_ABIS": "syscall", + "syscall.DLT_GSMTAP_UM": "syscall", + "syscall.DLT_HDLC": "syscall", + "syscall.DLT_HHDLC": "syscall", + "syscall.DLT_HIPPI": "syscall", + "syscall.DLT_IBM_SN": "syscall", + "syscall.DLT_IBM_SP": "syscall", + "syscall.DLT_IEEE802": "syscall", + "syscall.DLT_IEEE802_11": "syscall", + "syscall.DLT_IEEE802_11_RADIO": "syscall", + "syscall.DLT_IEEE802_11_RADIO_AVS": "syscall", + "syscall.DLT_IEEE802_15_4": "syscall", + "syscall.DLT_IEEE802_15_4_LINUX": "syscall", + "syscall.DLT_IEEE802_15_4_NOFCS": "syscall", + "syscall.DLT_IEEE802_15_4_NONASK_PHY": "syscall", + "syscall.DLT_IEEE802_16_MAC_CPS": "syscall", + "syscall.DLT_IEEE802_16_MAC_CPS_RADIO": "syscall", + "syscall.DLT_IPFILTER": "syscall", + "syscall.DLT_IPMB": "syscall", + "syscall.DLT_IPMB_LINUX": "syscall", + "syscall.DLT_IPNET": "syscall", + "syscall.DLT_IPOIB": "syscall", + "syscall.DLT_IPV4": "syscall", + "syscall.DLT_IPV6": "syscall", + "syscall.DLT_IP_OVER_FC": "syscall", + "syscall.DLT_JUNIPER_ATM1": "syscall", + "syscall.DLT_JUNIPER_ATM2": "syscall", + "syscall.DLT_JUNIPER_ATM_CEMIC": "syscall", + "syscall.DLT_JUNIPER_CHDLC": "syscall", + "syscall.DLT_JUNIPER_ES": "syscall", + "syscall.DLT_JUNIPER_ETHER": "syscall", + "syscall.DLT_JUNIPER_FIBRECHANNEL": "syscall", + "syscall.DLT_JUNIPER_FRELAY": "syscall", + "syscall.DLT_JUNIPER_GGSN": "syscall", + "syscall.DLT_JUNIPER_ISM": "syscall", + "syscall.DLT_JUNIPER_MFR": "syscall", + "syscall.DLT_JUNIPER_MLFR": "syscall", + "syscall.DLT_JUNIPER_MLPPP": "syscall", + "syscall.DLT_JUNIPER_MONITOR": "syscall", + "syscall.DLT_JUNIPER_PIC_PEER": "syscall", + "syscall.DLT_JUNIPER_PPP": "syscall", + "syscall.DLT_JUNIPER_PPPOE": "syscall", + "syscall.DLT_JUNIPER_PPPOE_ATM": "syscall", + "syscall.DLT_JUNIPER_SERVICES": "syscall", + "syscall.DLT_JUNIPER_SRX_E2E": "syscall", + "syscall.DLT_JUNIPER_ST": "syscall", + "syscall.DLT_JUNIPER_VP": "syscall", + "syscall.DLT_JUNIPER_VS": "syscall", + "syscall.DLT_LAPB_WITH_DIR": "syscall", + "syscall.DLT_LAPD": "syscall", + "syscall.DLT_LIN": "syscall", + "syscall.DLT_LINUX_EVDEV": "syscall", + "syscall.DLT_LINUX_IRDA": "syscall", + "syscall.DLT_LINUX_LAPD": "syscall", + "syscall.DLT_LINUX_PPP_WITHDIRECTION": "syscall", + "syscall.DLT_LINUX_SLL": "syscall", + "syscall.DLT_LOOP": "syscall", + "syscall.DLT_LTALK": "syscall", + "syscall.DLT_MATCHING_MAX": "syscall", + "syscall.DLT_MATCHING_MIN": "syscall", + "syscall.DLT_MFR": "syscall", + "syscall.DLT_MOST": "syscall", + "syscall.DLT_MPEG_2_TS": "syscall", + "syscall.DLT_MPLS": "syscall", + "syscall.DLT_MTP2": "syscall", + "syscall.DLT_MTP2_WITH_PHDR": "syscall", + "syscall.DLT_MTP3": "syscall", + "syscall.DLT_MUX27010": "syscall", + "syscall.DLT_NETANALYZER": "syscall", + "syscall.DLT_NETANALYZER_TRANSPARENT": "syscall", + "syscall.DLT_NFC_LLCP": "syscall", + "syscall.DLT_NFLOG": "syscall", + "syscall.DLT_NG40": "syscall", + "syscall.DLT_NULL": "syscall", + "syscall.DLT_PCI_EXP": "syscall", + "syscall.DLT_PFLOG": "syscall", + "syscall.DLT_PFSYNC": "syscall", + "syscall.DLT_PPI": "syscall", + "syscall.DLT_PPP": "syscall", + "syscall.DLT_PPP_BSDOS": "syscall", + "syscall.DLT_PPP_ETHER": "syscall", + "syscall.DLT_PPP_PPPD": "syscall", + "syscall.DLT_PPP_SERIAL": "syscall", + "syscall.DLT_PPP_WITH_DIR": "syscall", + "syscall.DLT_PPP_WITH_DIRECTION": "syscall", + "syscall.DLT_PRISM_HEADER": "syscall", + "syscall.DLT_PRONET": "syscall", + "syscall.DLT_RAIF1": "syscall", + "syscall.DLT_RAW": "syscall", + "syscall.DLT_RAWAF_MASK": "syscall", + "syscall.DLT_RIO": "syscall", + "syscall.DLT_SCCP": "syscall", + "syscall.DLT_SITA": "syscall", + "syscall.DLT_SLIP": "syscall", + "syscall.DLT_SLIP_BSDOS": "syscall", + "syscall.DLT_STANAG_5066_D_PDU": "syscall", + "syscall.DLT_SUNATM": "syscall", + "syscall.DLT_SYMANTEC_FIREWALL": "syscall", + "syscall.DLT_TZSP": "syscall", + "syscall.DLT_USB": "syscall", + "syscall.DLT_USB_LINUX": "syscall", + "syscall.DLT_USB_LINUX_MMAPPED": "syscall", + "syscall.DLT_USER0": "syscall", + "syscall.DLT_USER1": "syscall", + "syscall.DLT_USER10": "syscall", + "syscall.DLT_USER11": "syscall", + "syscall.DLT_USER12": "syscall", + "syscall.DLT_USER13": "syscall", + "syscall.DLT_USER14": "syscall", + "syscall.DLT_USER15": "syscall", + "syscall.DLT_USER2": "syscall", + "syscall.DLT_USER3": "syscall", + "syscall.DLT_USER4": "syscall", + "syscall.DLT_USER5": "syscall", + "syscall.DLT_USER6": "syscall", + "syscall.DLT_USER7": "syscall", + "syscall.DLT_USER8": "syscall", + "syscall.DLT_USER9": "syscall", + "syscall.DLT_WIHART": "syscall", + "syscall.DLT_X2E_SERIAL": "syscall", + "syscall.DLT_X2E_XORAYA": "syscall", + "syscall.DNSMXData": "syscall", + "syscall.DNSPTRData": "syscall", + "syscall.DNSRecord": "syscall", + "syscall.DNSSRVData": "syscall", + "syscall.DNSTXTData": "syscall", + "syscall.DNS_INFO_NO_RECORDS": "syscall", + "syscall.DNS_TYPE_A": "syscall", + "syscall.DNS_TYPE_A6": "syscall", + "syscall.DNS_TYPE_AAAA": "syscall", + "syscall.DNS_TYPE_ADDRS": "syscall", + "syscall.DNS_TYPE_AFSDB": "syscall", + "syscall.DNS_TYPE_ALL": "syscall", + "syscall.DNS_TYPE_ANY": "syscall", + "syscall.DNS_TYPE_ATMA": "syscall", + "syscall.DNS_TYPE_AXFR": "syscall", + "syscall.DNS_TYPE_CERT": "syscall", + "syscall.DNS_TYPE_CNAME": "syscall", + "syscall.DNS_TYPE_DHCID": "syscall", + "syscall.DNS_TYPE_DNAME": "syscall", + "syscall.DNS_TYPE_DNSKEY": "syscall", + "syscall.DNS_TYPE_DS": "syscall", + "syscall.DNS_TYPE_EID": "syscall", + "syscall.DNS_TYPE_GID": "syscall", + "syscall.DNS_TYPE_GPOS": "syscall", + "syscall.DNS_TYPE_HINFO": "syscall", + "syscall.DNS_TYPE_ISDN": "syscall", + "syscall.DNS_TYPE_IXFR": "syscall", + "syscall.DNS_TYPE_KEY": "syscall", + "syscall.DNS_TYPE_KX": "syscall", + "syscall.DNS_TYPE_LOC": "syscall", + "syscall.DNS_TYPE_MAILA": "syscall", + "syscall.DNS_TYPE_MAILB": "syscall", + "syscall.DNS_TYPE_MB": "syscall", + "syscall.DNS_TYPE_MD": "syscall", + "syscall.DNS_TYPE_MF": "syscall", + "syscall.DNS_TYPE_MG": "syscall", + "syscall.DNS_TYPE_MINFO": "syscall", + "syscall.DNS_TYPE_MR": "syscall", + "syscall.DNS_TYPE_MX": "syscall", + "syscall.DNS_TYPE_NAPTR": "syscall", + "syscall.DNS_TYPE_NBSTAT": "syscall", + "syscall.DNS_TYPE_NIMLOC": "syscall", + "syscall.DNS_TYPE_NS": "syscall", + "syscall.DNS_TYPE_NSAP": "syscall", + "syscall.DNS_TYPE_NSAPPTR": "syscall", + "syscall.DNS_TYPE_NSEC": "syscall", + "syscall.DNS_TYPE_NULL": "syscall", + "syscall.DNS_TYPE_NXT": "syscall", + "syscall.DNS_TYPE_OPT": "syscall", + "syscall.DNS_TYPE_PTR": "syscall", + "syscall.DNS_TYPE_PX": "syscall", + "syscall.DNS_TYPE_RP": "syscall", + "syscall.DNS_TYPE_RRSIG": "syscall", + "syscall.DNS_TYPE_RT": "syscall", + "syscall.DNS_TYPE_SIG": "syscall", + "syscall.DNS_TYPE_SINK": "syscall", + "syscall.DNS_TYPE_SOA": "syscall", + "syscall.DNS_TYPE_SRV": "syscall", + "syscall.DNS_TYPE_TEXT": "syscall", + "syscall.DNS_TYPE_TKEY": "syscall", + "syscall.DNS_TYPE_TSIG": "syscall", + "syscall.DNS_TYPE_UID": "syscall", + "syscall.DNS_TYPE_UINFO": "syscall", + "syscall.DNS_TYPE_UNSPEC": "syscall", + "syscall.DNS_TYPE_WINS": "syscall", + "syscall.DNS_TYPE_WINSR": "syscall", + "syscall.DNS_TYPE_WKS": "syscall", + "syscall.DNS_TYPE_X25": "syscall", + "syscall.DT_BLK": "syscall", + "syscall.DT_CHR": "syscall", + "syscall.DT_DIR": "syscall", + "syscall.DT_FIFO": "syscall", + "syscall.DT_LNK": "syscall", + "syscall.DT_REG": "syscall", + "syscall.DT_SOCK": "syscall", + "syscall.DT_UNKNOWN": "syscall", + "syscall.DT_WHT": "syscall", + "syscall.DUPLICATE_CLOSE_SOURCE": "syscall", + "syscall.DUPLICATE_SAME_ACCESS": "syscall", + "syscall.DeleteFile": "syscall", + "syscall.DetachLsf": "syscall", + "syscall.DeviceIoControl": "syscall", + "syscall.Dirent": "syscall", + "syscall.DnsNameCompare": "syscall", + "syscall.DnsQuery": "syscall", + "syscall.DnsRecordListFree": "syscall", + "syscall.DnsSectionAdditional": "syscall", + "syscall.DnsSectionAnswer": "syscall", + "syscall.DnsSectionAuthority": "syscall", + "syscall.DnsSectionQuestion": "syscall", + "syscall.Dup": "syscall", + "syscall.Dup2": "syscall", + "syscall.Dup3": "syscall", + "syscall.DuplicateHandle": "syscall", + "syscall.E2BIG": "syscall", + "syscall.EACCES": "syscall", + "syscall.EADDRINUSE": "syscall", + "syscall.EADDRNOTAVAIL": "syscall", + "syscall.EADV": "syscall", + "syscall.EAFNOSUPPORT": "syscall", + "syscall.EAGAIN": "syscall", + "syscall.EALREADY": "syscall", + "syscall.EAUTH": "syscall", + "syscall.EBADARCH": "syscall", + "syscall.EBADE": "syscall", + "syscall.EBADEXEC": "syscall", + "syscall.EBADF": "syscall", + "syscall.EBADFD": "syscall", + "syscall.EBADMACHO": "syscall", + "syscall.EBADMSG": "syscall", + "syscall.EBADR": "syscall", + "syscall.EBADRPC": "syscall", + "syscall.EBADRQC": "syscall", + "syscall.EBADSLT": "syscall", + "syscall.EBFONT": "syscall", + "syscall.EBUSY": "syscall", + "syscall.ECANCELED": "syscall", + "syscall.ECAPMODE": "syscall", + "syscall.ECHILD": "syscall", + "syscall.ECHO": "syscall", + "syscall.ECHOCTL": "syscall", + "syscall.ECHOE": "syscall", + "syscall.ECHOK": "syscall", + "syscall.ECHOKE": "syscall", + "syscall.ECHONL": "syscall", + "syscall.ECHOPRT": "syscall", + "syscall.ECHRNG": "syscall", + "syscall.ECOMM": "syscall", + "syscall.ECONNABORTED": "syscall", + "syscall.ECONNREFUSED": "syscall", + "syscall.ECONNRESET": "syscall", + "syscall.EDEADLK": "syscall", + "syscall.EDEADLOCK": "syscall", + "syscall.EDESTADDRREQ": "syscall", + "syscall.EDEVERR": "syscall", + "syscall.EDOM": "syscall", + "syscall.EDOOFUS": "syscall", + "syscall.EDOTDOT": "syscall", + "syscall.EDQUOT": "syscall", + "syscall.EEXIST": "syscall", + "syscall.EFAULT": "syscall", + "syscall.EFBIG": "syscall", + "syscall.EFER_LMA": "syscall", + "syscall.EFER_LME": "syscall", + "syscall.EFER_NXE": "syscall", + "syscall.EFER_SCE": "syscall", + "syscall.EFTYPE": "syscall", + "syscall.EHOSTDOWN": "syscall", + "syscall.EHOSTUNREACH": "syscall", + "syscall.EHWPOISON": "syscall", + "syscall.EIDRM": "syscall", + "syscall.EILSEQ": "syscall", + "syscall.EINPROGRESS": "syscall", + "syscall.EINTR": "syscall", + "syscall.EINVAL": "syscall", + "syscall.EIO": "syscall", + "syscall.EIPSEC": "syscall", + "syscall.EISCONN": "syscall", + "syscall.EISDIR": "syscall", + "syscall.EISNAM": "syscall", + "syscall.EKEYEXPIRED": "syscall", + "syscall.EKEYREJECTED": "syscall", + "syscall.EKEYREVOKED": "syscall", + "syscall.EL2HLT": "syscall", + "syscall.EL2NSYNC": "syscall", + "syscall.EL3HLT": "syscall", + "syscall.EL3RST": "syscall", + "syscall.ELAST": "syscall", + "syscall.ELF_NGREG": "syscall", + "syscall.ELF_PRARGSZ": "syscall", + "syscall.ELIBACC": "syscall", + "syscall.ELIBBAD": "syscall", + "syscall.ELIBEXEC": "syscall", + "syscall.ELIBMAX": "syscall", + "syscall.ELIBSCN": "syscall", + "syscall.ELNRNG": "syscall", + "syscall.ELOOP": "syscall", + "syscall.EMEDIUMTYPE": "syscall", + "syscall.EMFILE": "syscall", + "syscall.EMLINK": "syscall", + "syscall.EMSGSIZE": "syscall", + "syscall.EMT_TAGOVF": "syscall", + "syscall.EMULTIHOP": "syscall", + "syscall.EMUL_ENABLED": "syscall", + "syscall.EMUL_LINUX": "syscall", + "syscall.EMUL_LINUX32": "syscall", + "syscall.EMUL_MAXID": "syscall", + "syscall.EMUL_NATIVE": "syscall", + "syscall.ENAMETOOLONG": "syscall", + "syscall.ENAVAIL": "syscall", + "syscall.ENDRUNDISC": "syscall", + "syscall.ENEEDAUTH": "syscall", + "syscall.ENETDOWN": "syscall", + "syscall.ENETRESET": "syscall", + "syscall.ENETUNREACH": "syscall", + "syscall.ENFILE": "syscall", + "syscall.ENOANO": "syscall", + "syscall.ENOATTR": "syscall", + "syscall.ENOBUFS": "syscall", + "syscall.ENOCSI": "syscall", + "syscall.ENODATA": "syscall", + "syscall.ENODEV": "syscall", + "syscall.ENOENT": "syscall", + "syscall.ENOEXEC": "syscall", + "syscall.ENOKEY": "syscall", + "syscall.ENOLCK": "syscall", + "syscall.ENOLINK": "syscall", + "syscall.ENOMEDIUM": "syscall", + "syscall.ENOMEM": "syscall", + "syscall.ENOMSG": "syscall", + "syscall.ENONET": "syscall", + "syscall.ENOPKG": "syscall", + "syscall.ENOPOLICY": "syscall", + "syscall.ENOPROTOOPT": "syscall", + "syscall.ENOSPC": "syscall", + "syscall.ENOSR": "syscall", + "syscall.ENOSTR": "syscall", + "syscall.ENOSYS": "syscall", + "syscall.ENOTBLK": "syscall", + "syscall.ENOTCAPABLE": "syscall", + "syscall.ENOTCONN": "syscall", + "syscall.ENOTDIR": "syscall", + "syscall.ENOTEMPTY": "syscall", + "syscall.ENOTNAM": "syscall", + "syscall.ENOTRECOVERABLE": "syscall", + "syscall.ENOTSOCK": "syscall", + "syscall.ENOTSUP": "syscall", + "syscall.ENOTTY": "syscall", + "syscall.ENOTUNIQ": "syscall", + "syscall.ENXIO": "syscall", + "syscall.EN_SW_CTL_INF": "syscall", + "syscall.EN_SW_CTL_PREC": "syscall", + "syscall.EN_SW_CTL_ROUND": "syscall", + "syscall.EN_SW_DATACHAIN": "syscall", + "syscall.EN_SW_DENORM": "syscall", + "syscall.EN_SW_INVOP": "syscall", + "syscall.EN_SW_OVERFLOW": "syscall", + "syscall.EN_SW_PRECLOSS": "syscall", + "syscall.EN_SW_UNDERFLOW": "syscall", + "syscall.EN_SW_ZERODIV": "syscall", + "syscall.EOPNOTSUPP": "syscall", + "syscall.EOVERFLOW": "syscall", + "syscall.EOWNERDEAD": "syscall", + "syscall.EPERM": "syscall", + "syscall.EPFNOSUPPORT": "syscall", + "syscall.EPIPE": "syscall", + "syscall.EPOLLERR": "syscall", + "syscall.EPOLLET": "syscall", + "syscall.EPOLLHUP": "syscall", + "syscall.EPOLLIN": "syscall", + "syscall.EPOLLMSG": "syscall", + "syscall.EPOLLONESHOT": "syscall", + "syscall.EPOLLOUT": "syscall", + "syscall.EPOLLPRI": "syscall", + "syscall.EPOLLRDBAND": "syscall", + "syscall.EPOLLRDHUP": "syscall", + "syscall.EPOLLRDNORM": "syscall", + "syscall.EPOLLWRBAND": "syscall", + "syscall.EPOLLWRNORM": "syscall", + "syscall.EPOLL_CLOEXEC": "syscall", + "syscall.EPOLL_CTL_ADD": "syscall", + "syscall.EPOLL_CTL_DEL": "syscall", + "syscall.EPOLL_CTL_MOD": "syscall", + "syscall.EPOLL_NONBLOCK": "syscall", + "syscall.EPROCLIM": "syscall", + "syscall.EPROCUNAVAIL": "syscall", + "syscall.EPROGMISMATCH": "syscall", + "syscall.EPROGUNAVAIL": "syscall", + "syscall.EPROTO": "syscall", + "syscall.EPROTONOSUPPORT": "syscall", + "syscall.EPROTOTYPE": "syscall", + "syscall.EPWROFF": "syscall", + "syscall.ERANGE": "syscall", + "syscall.EREMCHG": "syscall", + "syscall.EREMOTE": "syscall", + "syscall.EREMOTEIO": "syscall", + "syscall.ERESTART": "syscall", + "syscall.ERFKILL": "syscall", + "syscall.EROFS": "syscall", + "syscall.ERPCMISMATCH": "syscall", + "syscall.ERROR_ACCESS_DENIED": "syscall", + "syscall.ERROR_ALREADY_EXISTS": "syscall", + "syscall.ERROR_BROKEN_PIPE": "syscall", + "syscall.ERROR_BUFFER_OVERFLOW": "syscall", + "syscall.ERROR_DIR_NOT_EMPTY": "syscall", + "syscall.ERROR_ENVVAR_NOT_FOUND": "syscall", + "syscall.ERROR_FILE_EXISTS": "syscall", + "syscall.ERROR_FILE_NOT_FOUND": "syscall", + "syscall.ERROR_HANDLE_EOF": "syscall", + "syscall.ERROR_INSUFFICIENT_BUFFER": "syscall", + "syscall.ERROR_IO_PENDING": "syscall", + "syscall.ERROR_MOD_NOT_FOUND": "syscall", + "syscall.ERROR_MORE_DATA": "syscall", + "syscall.ERROR_NETNAME_DELETED": "syscall", + "syscall.ERROR_NOT_FOUND": "syscall", + "syscall.ERROR_NO_MORE_FILES": "syscall", + "syscall.ERROR_OPERATION_ABORTED": "syscall", + "syscall.ERROR_PATH_NOT_FOUND": "syscall", + "syscall.ERROR_PRIVILEGE_NOT_HELD": "syscall", + "syscall.ERROR_PROC_NOT_FOUND": "syscall", + "syscall.ESHLIBVERS": "syscall", + "syscall.ESHUTDOWN": "syscall", + "syscall.ESOCKTNOSUPPORT": "syscall", + "syscall.ESPIPE": "syscall", + "syscall.ESRCH": "syscall", + "syscall.ESRMNT": "syscall", + "syscall.ESTALE": "syscall", + "syscall.ESTRPIPE": "syscall", + "syscall.ETHERCAP_JUMBO_MTU": "syscall", + "syscall.ETHERCAP_VLAN_HWTAGGING": "syscall", + "syscall.ETHERCAP_VLAN_MTU": "syscall", + "syscall.ETHERMIN": "syscall", + "syscall.ETHERMTU": "syscall", + "syscall.ETHERMTU_JUMBO": "syscall", + "syscall.ETHERTYPE_8023": "syscall", + "syscall.ETHERTYPE_AARP": "syscall", + "syscall.ETHERTYPE_ACCTON": "syscall", + "syscall.ETHERTYPE_AEONIC": "syscall", + "syscall.ETHERTYPE_ALPHA": "syscall", + "syscall.ETHERTYPE_AMBER": "syscall", + "syscall.ETHERTYPE_AMOEBA": "syscall", + "syscall.ETHERTYPE_AOE": "syscall", + "syscall.ETHERTYPE_APOLLO": "syscall", + "syscall.ETHERTYPE_APOLLODOMAIN": "syscall", + "syscall.ETHERTYPE_APPLETALK": "syscall", + "syscall.ETHERTYPE_APPLITEK": "syscall", + "syscall.ETHERTYPE_ARGONAUT": "syscall", + "syscall.ETHERTYPE_ARP": "syscall", + "syscall.ETHERTYPE_AT": "syscall", + "syscall.ETHERTYPE_ATALK": "syscall", + "syscall.ETHERTYPE_ATOMIC": "syscall", + "syscall.ETHERTYPE_ATT": "syscall", + "syscall.ETHERTYPE_ATTSTANFORD": "syscall", + "syscall.ETHERTYPE_AUTOPHON": "syscall", + "syscall.ETHERTYPE_AXIS": "syscall", + "syscall.ETHERTYPE_BCLOOP": "syscall", + "syscall.ETHERTYPE_BOFL": "syscall", + "syscall.ETHERTYPE_CABLETRON": "syscall", + "syscall.ETHERTYPE_CHAOS": "syscall", + "syscall.ETHERTYPE_COMDESIGN": "syscall", + "syscall.ETHERTYPE_COMPUGRAPHIC": "syscall", + "syscall.ETHERTYPE_COUNTERPOINT": "syscall", + "syscall.ETHERTYPE_CRONUS": "syscall", + "syscall.ETHERTYPE_CRONUSVLN": "syscall", + "syscall.ETHERTYPE_DCA": "syscall", + "syscall.ETHERTYPE_DDE": "syscall", + "syscall.ETHERTYPE_DEBNI": "syscall", + "syscall.ETHERTYPE_DECAM": "syscall", + "syscall.ETHERTYPE_DECCUST": "syscall", + "syscall.ETHERTYPE_DECDIAG": "syscall", + "syscall.ETHERTYPE_DECDNS": "syscall", + "syscall.ETHERTYPE_DECDTS": "syscall", + "syscall.ETHERTYPE_DECEXPER": "syscall", + "syscall.ETHERTYPE_DECLAST": "syscall", + "syscall.ETHERTYPE_DECLTM": "syscall", + "syscall.ETHERTYPE_DECMUMPS": "syscall", + "syscall.ETHERTYPE_DECNETBIOS": "syscall", + "syscall.ETHERTYPE_DELTACON": "syscall", + "syscall.ETHERTYPE_DIDDLE": "syscall", + "syscall.ETHERTYPE_DLOG1": "syscall", + "syscall.ETHERTYPE_DLOG2": "syscall", + "syscall.ETHERTYPE_DN": "syscall", + "syscall.ETHERTYPE_DOGFIGHT": "syscall", + "syscall.ETHERTYPE_DSMD": "syscall", + "syscall.ETHERTYPE_ECMA": "syscall", + "syscall.ETHERTYPE_ENCRYPT": "syscall", + "syscall.ETHERTYPE_ES": "syscall", + "syscall.ETHERTYPE_EXCELAN": "syscall", + "syscall.ETHERTYPE_EXPERDATA": "syscall", + "syscall.ETHERTYPE_FLIP": "syscall", + "syscall.ETHERTYPE_FLOWCONTROL": "syscall", + "syscall.ETHERTYPE_FRARP": "syscall", + "syscall.ETHERTYPE_GENDYN": "syscall", + "syscall.ETHERTYPE_HAYES": "syscall", + "syscall.ETHERTYPE_HIPPI_FP": "syscall", + "syscall.ETHERTYPE_HITACHI": "syscall", + "syscall.ETHERTYPE_HP": "syscall", + "syscall.ETHERTYPE_IEEEPUP": "syscall", + "syscall.ETHERTYPE_IEEEPUPAT": "syscall", + "syscall.ETHERTYPE_IMLBL": "syscall", + "syscall.ETHERTYPE_IMLBLDIAG": "syscall", + "syscall.ETHERTYPE_IP": "syscall", + "syscall.ETHERTYPE_IPAS": "syscall", + "syscall.ETHERTYPE_IPV6": "syscall", + "syscall.ETHERTYPE_IPX": "syscall", + "syscall.ETHERTYPE_IPXNEW": "syscall", + "syscall.ETHERTYPE_KALPANA": "syscall", + "syscall.ETHERTYPE_LANBRIDGE": "syscall", + "syscall.ETHERTYPE_LANPROBE": "syscall", + "syscall.ETHERTYPE_LAT": "syscall", + "syscall.ETHERTYPE_LBACK": "syscall", + "syscall.ETHERTYPE_LITTLE": "syscall", + "syscall.ETHERTYPE_LLDP": "syscall", + "syscall.ETHERTYPE_LOGICRAFT": "syscall", + "syscall.ETHERTYPE_LOOPBACK": "syscall", + "syscall.ETHERTYPE_MATRA": "syscall", + "syscall.ETHERTYPE_MAX": "syscall", + "syscall.ETHERTYPE_MERIT": "syscall", + "syscall.ETHERTYPE_MICP": "syscall", + "syscall.ETHERTYPE_MOPDL": "syscall", + "syscall.ETHERTYPE_MOPRC": "syscall", + "syscall.ETHERTYPE_MOTOROLA": "syscall", + "syscall.ETHERTYPE_MPLS": "syscall", + "syscall.ETHERTYPE_MPLS_MCAST": "syscall", + "syscall.ETHERTYPE_MUMPS": "syscall", + "syscall.ETHERTYPE_NBPCC": "syscall", + "syscall.ETHERTYPE_NBPCLAIM": "syscall", + "syscall.ETHERTYPE_NBPCLREQ": "syscall", + "syscall.ETHERTYPE_NBPCLRSP": "syscall", + "syscall.ETHERTYPE_NBPCREQ": "syscall", + "syscall.ETHERTYPE_NBPCRSP": "syscall", + "syscall.ETHERTYPE_NBPDG": "syscall", + "syscall.ETHERTYPE_NBPDGB": "syscall", + "syscall.ETHERTYPE_NBPDLTE": "syscall", + "syscall.ETHERTYPE_NBPRAR": "syscall", + "syscall.ETHERTYPE_NBPRAS": "syscall", + "syscall.ETHERTYPE_NBPRST": "syscall", + "syscall.ETHERTYPE_NBPSCD": "syscall", + "syscall.ETHERTYPE_NBPVCD": "syscall", + "syscall.ETHERTYPE_NBS": "syscall", + "syscall.ETHERTYPE_NCD": "syscall", + "syscall.ETHERTYPE_NESTAR": "syscall", + "syscall.ETHERTYPE_NETBEUI": "syscall", + "syscall.ETHERTYPE_NOVELL": "syscall", + "syscall.ETHERTYPE_NS": "syscall", + "syscall.ETHERTYPE_NSAT": "syscall", + "syscall.ETHERTYPE_NSCOMPAT": "syscall", + "syscall.ETHERTYPE_NTRAILER": "syscall", + "syscall.ETHERTYPE_OS9": "syscall", + "syscall.ETHERTYPE_OS9NET": "syscall", + "syscall.ETHERTYPE_PACER": "syscall", + "syscall.ETHERTYPE_PAE": "syscall", + "syscall.ETHERTYPE_PCS": "syscall", + "syscall.ETHERTYPE_PLANNING": "syscall", + "syscall.ETHERTYPE_PPP": "syscall", + "syscall.ETHERTYPE_PPPOE": "syscall", + "syscall.ETHERTYPE_PPPOEDISC": "syscall", + "syscall.ETHERTYPE_PRIMENTS": "syscall", + "syscall.ETHERTYPE_PUP": "syscall", + "syscall.ETHERTYPE_PUPAT": "syscall", + "syscall.ETHERTYPE_QINQ": "syscall", + "syscall.ETHERTYPE_RACAL": "syscall", + "syscall.ETHERTYPE_RATIONAL": "syscall", + "syscall.ETHERTYPE_RAWFR": "syscall", + "syscall.ETHERTYPE_RCL": "syscall", + "syscall.ETHERTYPE_RDP": "syscall", + "syscall.ETHERTYPE_RETIX": "syscall", + "syscall.ETHERTYPE_REVARP": "syscall", + "syscall.ETHERTYPE_SCA": "syscall", + "syscall.ETHERTYPE_SECTRA": "syscall", + "syscall.ETHERTYPE_SECUREDATA": "syscall", + "syscall.ETHERTYPE_SGITW": "syscall", + "syscall.ETHERTYPE_SG_BOUNCE": "syscall", + "syscall.ETHERTYPE_SG_DIAG": "syscall", + "syscall.ETHERTYPE_SG_NETGAMES": "syscall", + "syscall.ETHERTYPE_SG_RESV": "syscall", + "syscall.ETHERTYPE_SIMNET": "syscall", + "syscall.ETHERTYPE_SLOW": "syscall", + "syscall.ETHERTYPE_SLOWPROTOCOLS": "syscall", + "syscall.ETHERTYPE_SNA": "syscall", + "syscall.ETHERTYPE_SNMP": "syscall", + "syscall.ETHERTYPE_SONIX": "syscall", + "syscall.ETHERTYPE_SPIDER": "syscall", + "syscall.ETHERTYPE_SPRITE": "syscall", + "syscall.ETHERTYPE_STP": "syscall", + "syscall.ETHERTYPE_TALARIS": "syscall", + "syscall.ETHERTYPE_TALARISMC": "syscall", + "syscall.ETHERTYPE_TCPCOMP": "syscall", + "syscall.ETHERTYPE_TCPSM": "syscall", + "syscall.ETHERTYPE_TEC": "syscall", + "syscall.ETHERTYPE_TIGAN": "syscall", + "syscall.ETHERTYPE_TRAIL": "syscall", + "syscall.ETHERTYPE_TRANSETHER": "syscall", + "syscall.ETHERTYPE_TYMSHARE": "syscall", + "syscall.ETHERTYPE_UBBST": "syscall", + "syscall.ETHERTYPE_UBDEBUG": "syscall", + "syscall.ETHERTYPE_UBDIAGLOOP": "syscall", + "syscall.ETHERTYPE_UBDL": "syscall", + "syscall.ETHERTYPE_UBNIU": "syscall", + "syscall.ETHERTYPE_UBNMC": "syscall", + "syscall.ETHERTYPE_VALID": "syscall", + "syscall.ETHERTYPE_VARIAN": "syscall", + "syscall.ETHERTYPE_VAXELN": "syscall", + "syscall.ETHERTYPE_VEECO": "syscall", + "syscall.ETHERTYPE_VEXP": "syscall", + "syscall.ETHERTYPE_VGLAB": "syscall", + "syscall.ETHERTYPE_VINES": "syscall", + "syscall.ETHERTYPE_VINESECHO": "syscall", + "syscall.ETHERTYPE_VINESLOOP": "syscall", + "syscall.ETHERTYPE_VITAL": "syscall", + "syscall.ETHERTYPE_VLAN": "syscall", + "syscall.ETHERTYPE_VLTLMAN": "syscall", + "syscall.ETHERTYPE_VPROD": "syscall", + "syscall.ETHERTYPE_VURESERVED": "syscall", + "syscall.ETHERTYPE_WATERLOO": "syscall", + "syscall.ETHERTYPE_WELLFLEET": "syscall", + "syscall.ETHERTYPE_X25": "syscall", + "syscall.ETHERTYPE_X75": "syscall", + "syscall.ETHERTYPE_XNSSM": "syscall", + "syscall.ETHERTYPE_XTP": "syscall", + "syscall.ETHER_ADDR_LEN": "syscall", + "syscall.ETHER_ALIGN": "syscall", + "syscall.ETHER_CRC_LEN": "syscall", + "syscall.ETHER_CRC_POLY_BE": "syscall", + "syscall.ETHER_CRC_POLY_LE": "syscall", + "syscall.ETHER_HDR_LEN": "syscall", + "syscall.ETHER_MAX_DIX_LEN": "syscall", + "syscall.ETHER_MAX_LEN": "syscall", + "syscall.ETHER_MAX_LEN_JUMBO": "syscall", + "syscall.ETHER_MIN_LEN": "syscall", + "syscall.ETHER_PPPOE_ENCAP_LEN": "syscall", + "syscall.ETHER_TYPE_LEN": "syscall", + "syscall.ETHER_VLAN_ENCAP_LEN": "syscall", + "syscall.ETH_P_1588": "syscall", + "syscall.ETH_P_8021Q": "syscall", + "syscall.ETH_P_802_2": "syscall", + "syscall.ETH_P_802_3": "syscall", + "syscall.ETH_P_AARP": "syscall", + "syscall.ETH_P_ALL": "syscall", + "syscall.ETH_P_AOE": "syscall", + "syscall.ETH_P_ARCNET": "syscall", + "syscall.ETH_P_ARP": "syscall", + "syscall.ETH_P_ATALK": "syscall", + "syscall.ETH_P_ATMFATE": "syscall", + "syscall.ETH_P_ATMMPOA": "syscall", + "syscall.ETH_P_AX25": "syscall", + "syscall.ETH_P_BPQ": "syscall", + "syscall.ETH_P_CAIF": "syscall", + "syscall.ETH_P_CAN": "syscall", + "syscall.ETH_P_CONTROL": "syscall", + "syscall.ETH_P_CUST": "syscall", + "syscall.ETH_P_DDCMP": "syscall", + "syscall.ETH_P_DEC": "syscall", + "syscall.ETH_P_DIAG": "syscall", + "syscall.ETH_P_DNA_DL": "syscall", + "syscall.ETH_P_DNA_RC": "syscall", + "syscall.ETH_P_DNA_RT": "syscall", + "syscall.ETH_P_DSA": "syscall", + "syscall.ETH_P_ECONET": "syscall", + "syscall.ETH_P_EDSA": "syscall", + "syscall.ETH_P_FCOE": "syscall", + "syscall.ETH_P_FIP": "syscall", + "syscall.ETH_P_HDLC": "syscall", + "syscall.ETH_P_IEEE802154": "syscall", + "syscall.ETH_P_IEEEPUP": "syscall", + "syscall.ETH_P_IEEEPUPAT": "syscall", + "syscall.ETH_P_IP": "syscall", + "syscall.ETH_P_IPV6": "syscall", + "syscall.ETH_P_IPX": "syscall", + "syscall.ETH_P_IRDA": "syscall", + "syscall.ETH_P_LAT": "syscall", + "syscall.ETH_P_LINK_CTL": "syscall", + "syscall.ETH_P_LOCALTALK": "syscall", + "syscall.ETH_P_LOOP": "syscall", + "syscall.ETH_P_MOBITEX": "syscall", + "syscall.ETH_P_MPLS_MC": "syscall", + "syscall.ETH_P_MPLS_UC": "syscall", + "syscall.ETH_P_PAE": "syscall", + "syscall.ETH_P_PAUSE": "syscall", + "syscall.ETH_P_PHONET": "syscall", + "syscall.ETH_P_PPPTALK": "syscall", + "syscall.ETH_P_PPP_DISC": "syscall", + "syscall.ETH_P_PPP_MP": "syscall", + "syscall.ETH_P_PPP_SES": "syscall", + "syscall.ETH_P_PUP": "syscall", + "syscall.ETH_P_PUPAT": "syscall", + "syscall.ETH_P_RARP": "syscall", + "syscall.ETH_P_SCA": "syscall", + "syscall.ETH_P_SLOW": "syscall", + "syscall.ETH_P_SNAP": "syscall", + "syscall.ETH_P_TEB": "syscall", + "syscall.ETH_P_TIPC": "syscall", + "syscall.ETH_P_TRAILER": "syscall", + "syscall.ETH_P_TR_802_2": "syscall", + "syscall.ETH_P_WAN_PPP": "syscall", + "syscall.ETH_P_WCCP": "syscall", + "syscall.ETH_P_X25": "syscall", + "syscall.ETIME": "syscall", + "syscall.ETIMEDOUT": "syscall", + "syscall.ETOOMANYREFS": "syscall", + "syscall.ETXTBSY": "syscall", + "syscall.EUCLEAN": "syscall", + "syscall.EUNATCH": "syscall", + "syscall.EUSERS": "syscall", + "syscall.EVFILT_AIO": "syscall", + "syscall.EVFILT_FS": "syscall", + "syscall.EVFILT_LIO": "syscall", + "syscall.EVFILT_MACHPORT": "syscall", + "syscall.EVFILT_PROC": "syscall", + "syscall.EVFILT_READ": "syscall", + "syscall.EVFILT_SIGNAL": "syscall", + "syscall.EVFILT_SYSCOUNT": "syscall", + "syscall.EVFILT_THREADMARKER": "syscall", + "syscall.EVFILT_TIMER": "syscall", + "syscall.EVFILT_USER": "syscall", + "syscall.EVFILT_VM": "syscall", + "syscall.EVFILT_VNODE": "syscall", + "syscall.EVFILT_WRITE": "syscall", + "syscall.EV_ADD": "syscall", + "syscall.EV_CLEAR": "syscall", + "syscall.EV_DELETE": "syscall", + "syscall.EV_DISABLE": "syscall", + "syscall.EV_DISPATCH": "syscall", + "syscall.EV_DROP": "syscall", + "syscall.EV_ENABLE": "syscall", + "syscall.EV_EOF": "syscall", + "syscall.EV_ERROR": "syscall", + "syscall.EV_FLAG0": "syscall", + "syscall.EV_FLAG1": "syscall", + "syscall.EV_ONESHOT": "syscall", + "syscall.EV_OOBAND": "syscall", + "syscall.EV_POLL": "syscall", + "syscall.EV_RECEIPT": "syscall", + "syscall.EV_SYSFLAGS": "syscall", + "syscall.EWINDOWS": "syscall", + "syscall.EWOULDBLOCK": "syscall", + "syscall.EXDEV": "syscall", + "syscall.EXFULL": "syscall", + "syscall.EXTA": "syscall", + "syscall.EXTB": "syscall", + "syscall.EXTPROC": "syscall", + "syscall.Environ": "syscall", + "syscall.EpollCreate": "syscall", + "syscall.EpollCreate1": "syscall", + "syscall.EpollCtl": "syscall", + "syscall.EpollEvent": "syscall", + "syscall.EpollWait": "syscall", + "syscall.Errno": "syscall", + "syscall.EscapeArg": "syscall", + "syscall.Exchangedata": "syscall", + "syscall.Exec": "syscall", + "syscall.Exit": "syscall", + "syscall.ExitProcess": "syscall", + "syscall.FD_CLOEXEC": "syscall", + "syscall.FD_SETSIZE": "syscall", + "syscall.FILE_ACTION_ADDED": "syscall", + "syscall.FILE_ACTION_MODIFIED": "syscall", + "syscall.FILE_ACTION_REMOVED": "syscall", + "syscall.FILE_ACTION_RENAMED_NEW_NAME": "syscall", + "syscall.FILE_ACTION_RENAMED_OLD_NAME": "syscall", + "syscall.FILE_APPEND_DATA": "syscall", + "syscall.FILE_ATTRIBUTE_ARCHIVE": "syscall", + "syscall.FILE_ATTRIBUTE_DIRECTORY": "syscall", + "syscall.FILE_ATTRIBUTE_HIDDEN": "syscall", + "syscall.FILE_ATTRIBUTE_NORMAL": "syscall", + "syscall.FILE_ATTRIBUTE_READONLY": "syscall", + "syscall.FILE_ATTRIBUTE_REPARSE_POINT": "syscall", + "syscall.FILE_ATTRIBUTE_SYSTEM": "syscall", + "syscall.FILE_BEGIN": "syscall", + "syscall.FILE_CURRENT": "syscall", + "syscall.FILE_END": "syscall", + "syscall.FILE_FLAG_BACKUP_SEMANTICS": "syscall", + "syscall.FILE_FLAG_OPEN_REPARSE_POINT": "syscall", + "syscall.FILE_FLAG_OVERLAPPED": "syscall", + "syscall.FILE_LIST_DIRECTORY": "syscall", + "syscall.FILE_MAP_COPY": "syscall", + "syscall.FILE_MAP_EXECUTE": "syscall", + "syscall.FILE_MAP_READ": "syscall", + "syscall.FILE_MAP_WRITE": "syscall", + "syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES": "syscall", + "syscall.FILE_NOTIFY_CHANGE_CREATION": "syscall", + "syscall.FILE_NOTIFY_CHANGE_DIR_NAME": "syscall", + "syscall.FILE_NOTIFY_CHANGE_FILE_NAME": "syscall", + "syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS": "syscall", + "syscall.FILE_NOTIFY_CHANGE_LAST_WRITE": "syscall", + "syscall.FILE_NOTIFY_CHANGE_SIZE": "syscall", + "syscall.FILE_SHARE_DELETE": "syscall", + "syscall.FILE_SHARE_READ": "syscall", + "syscall.FILE_SHARE_WRITE": "syscall", + "syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": "syscall", + "syscall.FILE_SKIP_SET_EVENT_ON_HANDLE": "syscall", + "syscall.FILE_TYPE_CHAR": "syscall", + "syscall.FILE_TYPE_DISK": "syscall", + "syscall.FILE_TYPE_PIPE": "syscall", + "syscall.FILE_TYPE_REMOTE": "syscall", + "syscall.FILE_TYPE_UNKNOWN": "syscall", + "syscall.FILE_WRITE_ATTRIBUTES": "syscall", + "syscall.FLUSHO": "syscall", + "syscall.FORMAT_MESSAGE_ALLOCATE_BUFFER": "syscall", + "syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY": "syscall", + "syscall.FORMAT_MESSAGE_FROM_HMODULE": "syscall", + "syscall.FORMAT_MESSAGE_FROM_STRING": "syscall", + "syscall.FORMAT_MESSAGE_FROM_SYSTEM": "syscall", + "syscall.FORMAT_MESSAGE_IGNORE_INSERTS": "syscall", + "syscall.FORMAT_MESSAGE_MAX_WIDTH_MASK": "syscall", + "syscall.FSCTL_GET_REPARSE_POINT": "syscall", + "syscall.F_ADDFILESIGS": "syscall", + "syscall.F_ADDSIGS": "syscall", + "syscall.F_ALLOCATEALL": "syscall", + "syscall.F_ALLOCATECONTIG": "syscall", + "syscall.F_CANCEL": "syscall", + "syscall.F_CHKCLEAN": "syscall", + "syscall.F_CLOSEM": "syscall", + "syscall.F_DUP2FD": "syscall", + "syscall.F_DUP2FD_CLOEXEC": "syscall", + "syscall.F_DUPFD": "syscall", + "syscall.F_DUPFD_CLOEXEC": "syscall", + "syscall.F_EXLCK": "syscall", + "syscall.F_FLUSH_DATA": "syscall", + "syscall.F_FREEZE_FS": "syscall", + "syscall.F_FSCTL": "syscall", + "syscall.F_FSDIRMASK": "syscall", + "syscall.F_FSIN": "syscall", + "syscall.F_FSINOUT": "syscall", + "syscall.F_FSOUT": "syscall", + "syscall.F_FSPRIV": "syscall", + "syscall.F_FSVOID": "syscall", + "syscall.F_FULLFSYNC": "syscall", + "syscall.F_GETFD": "syscall", + "syscall.F_GETFL": "syscall", + "syscall.F_GETLEASE": "syscall", + "syscall.F_GETLK": "syscall", + "syscall.F_GETLK64": "syscall", + "syscall.F_GETLKPID": "syscall", + "syscall.F_GETNOSIGPIPE": "syscall", + "syscall.F_GETOWN": "syscall", + "syscall.F_GETOWN_EX": "syscall", + "syscall.F_GETPATH": "syscall", + "syscall.F_GETPATH_MTMINFO": "syscall", + "syscall.F_GETPIPE_SZ": "syscall", + "syscall.F_GETPROTECTIONCLASS": "syscall", + "syscall.F_GETSIG": "syscall", + "syscall.F_GLOBAL_NOCACHE": "syscall", + "syscall.F_LOCK": "syscall", + "syscall.F_LOG2PHYS": "syscall", + "syscall.F_LOG2PHYS_EXT": "syscall", + "syscall.F_MARKDEPENDENCY": "syscall", + "syscall.F_MAXFD": "syscall", + "syscall.F_NOCACHE": "syscall", + "syscall.F_NODIRECT": "syscall", + "syscall.F_NOTIFY": "syscall", + "syscall.F_OGETLK": "syscall", + "syscall.F_OK": "syscall", + "syscall.F_OSETLK": "syscall", + "syscall.F_OSETLKW": "syscall", + "syscall.F_PARAM_MASK": "syscall", + "syscall.F_PARAM_MAX": "syscall", + "syscall.F_PATHPKG_CHECK": "syscall", + "syscall.F_PEOFPOSMODE": "syscall", + "syscall.F_PREALLOCATE": "syscall", + "syscall.F_RDADVISE": "syscall", + "syscall.F_RDAHEAD": "syscall", + "syscall.F_RDLCK": "syscall", + "syscall.F_READAHEAD": "syscall", + "syscall.F_READBOOTSTRAP": "syscall", + "syscall.F_SETBACKINGSTORE": "syscall", + "syscall.F_SETFD": "syscall", + "syscall.F_SETFL": "syscall", + "syscall.F_SETLEASE": "syscall", + "syscall.F_SETLK": "syscall", + "syscall.F_SETLK64": "syscall", + "syscall.F_SETLKW": "syscall", + "syscall.F_SETLKW64": "syscall", + "syscall.F_SETLK_REMOTE": "syscall", + "syscall.F_SETNOSIGPIPE": "syscall", + "syscall.F_SETOWN": "syscall", + "syscall.F_SETOWN_EX": "syscall", + "syscall.F_SETPIPE_SZ": "syscall", + "syscall.F_SETPROTECTIONCLASS": "syscall", + "syscall.F_SETSIG": "syscall", + "syscall.F_SETSIZE": "syscall", + "syscall.F_SHLCK": "syscall", + "syscall.F_TEST": "syscall", + "syscall.F_THAW_FS": "syscall", + "syscall.F_TLOCK": "syscall", + "syscall.F_ULOCK": "syscall", + "syscall.F_UNLCK": "syscall", + "syscall.F_UNLCKSYS": "syscall", + "syscall.F_VOLPOSMODE": "syscall", + "syscall.F_WRITEBOOTSTRAP": "syscall", + "syscall.F_WRLCK": "syscall", + "syscall.Faccessat": "syscall", + "syscall.Fallocate": "syscall", + "syscall.Fbootstraptransfer_t": "syscall", + "syscall.Fchdir": "syscall", + "syscall.Fchflags": "syscall", + "syscall.Fchmod": "syscall", + "syscall.Fchmodat": "syscall", + "syscall.Fchown": "syscall", + "syscall.Fchownat": "syscall", + "syscall.FcntlFlock": "syscall", + "syscall.FdSet": "syscall", + "syscall.Fdatasync": "syscall", + "syscall.FileNotifyInformation": "syscall", + "syscall.Filetime": "syscall", + "syscall.FindClose": "syscall", + "syscall.FindFirstFile": "syscall", + "syscall.FindNextFile": "syscall", + "syscall.Flock": "syscall", + "syscall.Flock_t": "syscall", + "syscall.FlushBpf": "syscall", + "syscall.FlushFileBuffers": "syscall", + "syscall.FlushViewOfFile": "syscall", + "syscall.ForkExec": "syscall", + "syscall.ForkLock": "syscall", + "syscall.FormatMessage": "syscall", + "syscall.Fpathconf": "syscall", + "syscall.FreeAddrInfoW": "syscall", + "syscall.FreeEnvironmentStrings": "syscall", + "syscall.FreeLibrary": "syscall", + "syscall.Fsid": "syscall", + "syscall.Fstat": "syscall", + "syscall.Fstatfs": "syscall", + "syscall.Fstore_t": "syscall", + "syscall.Fsync": "syscall", + "syscall.Ftruncate": "syscall", + "syscall.FullPath": "syscall", + "syscall.Futimes": "syscall", + "syscall.Futimesat": "syscall", + "syscall.GENERIC_ALL": "syscall", + "syscall.GENERIC_EXECUTE": "syscall", + "syscall.GENERIC_READ": "syscall", + "syscall.GENERIC_WRITE": "syscall", + "syscall.GUID": "syscall", + "syscall.GetAcceptExSockaddrs": "syscall", + "syscall.GetAdaptersInfo": "syscall", + "syscall.GetAddrInfoW": "syscall", + "syscall.GetCommandLine": "syscall", + "syscall.GetComputerName": "syscall", + "syscall.GetConsoleMode": "syscall", + "syscall.GetCurrentDirectory": "syscall", + "syscall.GetCurrentProcess": "syscall", + "syscall.GetEnvironmentStrings": "syscall", + "syscall.GetEnvironmentVariable": "syscall", + "syscall.GetExitCodeProcess": "syscall", + "syscall.GetFileAttributes": "syscall", + "syscall.GetFileAttributesEx": "syscall", + "syscall.GetFileExInfoStandard": "syscall", + "syscall.GetFileExMaxInfoLevel": "syscall", + "syscall.GetFileInformationByHandle": "syscall", + "syscall.GetFileType": "syscall", + "syscall.GetFullPathName": "syscall", + "syscall.GetHostByName": "syscall", + "syscall.GetIfEntry": "syscall", + "syscall.GetLastError": "syscall", + "syscall.GetLengthSid": "syscall", + "syscall.GetLongPathName": "syscall", + "syscall.GetProcAddress": "syscall", + "syscall.GetProcessTimes": "syscall", + "syscall.GetProtoByName": "syscall", + "syscall.GetQueuedCompletionStatus": "syscall", + "syscall.GetServByName": "syscall", + "syscall.GetShortPathName": "syscall", + "syscall.GetStartupInfo": "syscall", + "syscall.GetStdHandle": "syscall", + "syscall.GetSystemTimeAsFileTime": "syscall", + "syscall.GetTempPath": "syscall", + "syscall.GetTimeZoneInformation": "syscall", + "syscall.GetTokenInformation": "syscall", + "syscall.GetUserNameEx": "syscall", + "syscall.GetUserProfileDirectory": "syscall", + "syscall.GetVersion": "syscall", + "syscall.Getcwd": "syscall", + "syscall.Getdents": "syscall", + "syscall.Getdirentries": "syscall", + "syscall.Getdtablesize": "syscall", + "syscall.Getegid": "syscall", + "syscall.Getenv": "syscall", + "syscall.Geteuid": "syscall", + "syscall.Getfsstat": "syscall", + "syscall.Getgid": "syscall", + "syscall.Getgroups": "syscall", + "syscall.Getpagesize": "syscall", + "syscall.Getpeername": "syscall", + "syscall.Getpgid": "syscall", + "syscall.Getpgrp": "syscall", + "syscall.Getpid": "syscall", + "syscall.Getppid": "syscall", + "syscall.Getpriority": "syscall", + "syscall.Getrlimit": "syscall", + "syscall.Getrusage": "syscall", + "syscall.Getsid": "syscall", + "syscall.Getsockname": "syscall", + "syscall.Getsockopt": "syscall", + "syscall.GetsockoptByte": "syscall", + "syscall.GetsockoptICMPv6Filter": "syscall", + "syscall.GetsockoptIPMreq": "syscall", + "syscall.GetsockoptIPMreqn": "syscall", + "syscall.GetsockoptIPv6MTUInfo": "syscall", + "syscall.GetsockoptIPv6Mreq": "syscall", + "syscall.GetsockoptInet4Addr": "syscall", + "syscall.GetsockoptInt": "syscall", + "syscall.GetsockoptUcred": "syscall", + "syscall.Gettid": "syscall", + "syscall.Gettimeofday": "syscall", + "syscall.Getuid": "syscall", + "syscall.Getwd": "syscall", + "syscall.Getxattr": "syscall", + "syscall.HANDLE_FLAG_INHERIT": "syscall", + "syscall.HKEY_CLASSES_ROOT": "syscall", + "syscall.HKEY_CURRENT_CONFIG": "syscall", + "syscall.HKEY_CURRENT_USER": "syscall", + "syscall.HKEY_DYN_DATA": "syscall", + "syscall.HKEY_LOCAL_MACHINE": "syscall", + "syscall.HKEY_PERFORMANCE_DATA": "syscall", + "syscall.HKEY_USERS": "syscall", + "syscall.HUPCL": "syscall", + "syscall.Handle": "syscall", + "syscall.Hostent": "syscall", + "syscall.ICANON": "syscall", + "syscall.ICMP6_FILTER": "syscall", + "syscall.ICMPV6_FILTER": "syscall", + "syscall.ICMPv6Filter": "syscall", + "syscall.ICRNL": "syscall", + "syscall.IEXTEN": "syscall", + "syscall.IFAN_ARRIVAL": "syscall", + "syscall.IFAN_DEPARTURE": "syscall", + "syscall.IFA_ADDRESS": "syscall", + "syscall.IFA_ANYCAST": "syscall", + "syscall.IFA_BROADCAST": "syscall", + "syscall.IFA_CACHEINFO": "syscall", + "syscall.IFA_F_DADFAILED": "syscall", + "syscall.IFA_F_DEPRECATED": "syscall", + "syscall.IFA_F_HOMEADDRESS": "syscall", + "syscall.IFA_F_NODAD": "syscall", + "syscall.IFA_F_OPTIMISTIC": "syscall", + "syscall.IFA_F_PERMANENT": "syscall", + "syscall.IFA_F_SECONDARY": "syscall", + "syscall.IFA_F_TEMPORARY": "syscall", + "syscall.IFA_F_TENTATIVE": "syscall", + "syscall.IFA_LABEL": "syscall", + "syscall.IFA_LOCAL": "syscall", + "syscall.IFA_MAX": "syscall", + "syscall.IFA_MULTICAST": "syscall", + "syscall.IFA_ROUTE": "syscall", + "syscall.IFA_UNSPEC": "syscall", + "syscall.IFF_ALLMULTI": "syscall", + "syscall.IFF_ALTPHYS": "syscall", + "syscall.IFF_AUTOMEDIA": "syscall", + "syscall.IFF_BROADCAST": "syscall", + "syscall.IFF_CANTCHANGE": "syscall", + "syscall.IFF_CANTCONFIG": "syscall", + "syscall.IFF_DEBUG": "syscall", + "syscall.IFF_DRV_OACTIVE": "syscall", + "syscall.IFF_DRV_RUNNING": "syscall", + "syscall.IFF_DYING": "syscall", + "syscall.IFF_DYNAMIC": "syscall", + "syscall.IFF_LINK0": "syscall", + "syscall.IFF_LINK1": "syscall", + "syscall.IFF_LINK2": "syscall", + "syscall.IFF_LOOPBACK": "syscall", + "syscall.IFF_MASTER": "syscall", + "syscall.IFF_MONITOR": "syscall", + "syscall.IFF_MULTICAST": "syscall", + "syscall.IFF_NOARP": "syscall", + "syscall.IFF_NOTRAILERS": "syscall", + "syscall.IFF_NO_PI": "syscall", + "syscall.IFF_OACTIVE": "syscall", + "syscall.IFF_ONE_QUEUE": "syscall", + "syscall.IFF_POINTOPOINT": "syscall", + "syscall.IFF_POINTTOPOINT": "syscall", + "syscall.IFF_PORTSEL": "syscall", + "syscall.IFF_PPROMISC": "syscall", + "syscall.IFF_PROMISC": "syscall", + "syscall.IFF_RENAMING": "syscall", + "syscall.IFF_RUNNING": "syscall", + "syscall.IFF_SIMPLEX": "syscall", + "syscall.IFF_SLAVE": "syscall", + "syscall.IFF_SMART": "syscall", + "syscall.IFF_STATICARP": "syscall", + "syscall.IFF_TAP": "syscall", + "syscall.IFF_TUN": "syscall", + "syscall.IFF_TUN_EXCL": "syscall", + "syscall.IFF_UP": "syscall", + "syscall.IFF_VNET_HDR": "syscall", + "syscall.IFLA_ADDRESS": "syscall", + "syscall.IFLA_BROADCAST": "syscall", + "syscall.IFLA_COST": "syscall", + "syscall.IFLA_IFALIAS": "syscall", + "syscall.IFLA_IFNAME": "syscall", + "syscall.IFLA_LINK": "syscall", + "syscall.IFLA_LINKINFO": "syscall", + "syscall.IFLA_LINKMODE": "syscall", + "syscall.IFLA_MAP": "syscall", + "syscall.IFLA_MASTER": "syscall", + "syscall.IFLA_MAX": "syscall", + "syscall.IFLA_MTU": "syscall", + "syscall.IFLA_NET_NS_PID": "syscall", + "syscall.IFLA_OPERSTATE": "syscall", + "syscall.IFLA_PRIORITY": "syscall", + "syscall.IFLA_PROTINFO": "syscall", + "syscall.IFLA_QDISC": "syscall", + "syscall.IFLA_STATS": "syscall", + "syscall.IFLA_TXQLEN": "syscall", + "syscall.IFLA_UNSPEC": "syscall", + "syscall.IFLA_WEIGHT": "syscall", + "syscall.IFLA_WIRELESS": "syscall", + "syscall.IFNAMSIZ": "syscall", + "syscall.IFT_1822": "syscall", + "syscall.IFT_A12MPPSWITCH": "syscall", + "syscall.IFT_AAL2": "syscall", + "syscall.IFT_AAL5": "syscall", + "syscall.IFT_ADSL": "syscall", + "syscall.IFT_AFLANE8023": "syscall", + "syscall.IFT_AFLANE8025": "syscall", + "syscall.IFT_ARAP": "syscall", + "syscall.IFT_ARCNET": "syscall", + "syscall.IFT_ARCNETPLUS": "syscall", + "syscall.IFT_ASYNC": "syscall", + "syscall.IFT_ATM": "syscall", + "syscall.IFT_ATMDXI": "syscall", + "syscall.IFT_ATMFUNI": "syscall", + "syscall.IFT_ATMIMA": "syscall", + "syscall.IFT_ATMLOGICAL": "syscall", + "syscall.IFT_ATMRADIO": "syscall", + "syscall.IFT_ATMSUBINTERFACE": "syscall", + "syscall.IFT_ATMVCIENDPT": "syscall", + "syscall.IFT_ATMVIRTUAL": "syscall", + "syscall.IFT_BGPPOLICYACCOUNTING": "syscall", + "syscall.IFT_BLUETOOTH": "syscall", + "syscall.IFT_BRIDGE": "syscall", + "syscall.IFT_BSC": "syscall", + "syscall.IFT_CARP": "syscall", + "syscall.IFT_CCTEMUL": "syscall", + "syscall.IFT_CELLULAR": "syscall", + "syscall.IFT_CEPT": "syscall", + "syscall.IFT_CES": "syscall", + "syscall.IFT_CHANNEL": "syscall", + "syscall.IFT_CNR": "syscall", + "syscall.IFT_COFFEE": "syscall", + "syscall.IFT_COMPOSITELINK": "syscall", + "syscall.IFT_DCN": "syscall", + "syscall.IFT_DIGITALPOWERLINE": "syscall", + "syscall.IFT_DIGITALWRAPPEROVERHEADCHANNEL": "syscall", + "syscall.IFT_DLSW": "syscall", + "syscall.IFT_DOCSCABLEDOWNSTREAM": "syscall", + "syscall.IFT_DOCSCABLEMACLAYER": "syscall", + "syscall.IFT_DOCSCABLEUPSTREAM": "syscall", + "syscall.IFT_DOCSCABLEUPSTREAMCHANNEL": "syscall", + "syscall.IFT_DS0": "syscall", + "syscall.IFT_DS0BUNDLE": "syscall", + "syscall.IFT_DS1FDL": "syscall", + "syscall.IFT_DS3": "syscall", + "syscall.IFT_DTM": "syscall", + "syscall.IFT_DUMMY": "syscall", + "syscall.IFT_DVBASILN": "syscall", + "syscall.IFT_DVBASIOUT": "syscall", + "syscall.IFT_DVBRCCDOWNSTREAM": "syscall", + "syscall.IFT_DVBRCCMACLAYER": "syscall", + "syscall.IFT_DVBRCCUPSTREAM": "syscall", + "syscall.IFT_ECONET": "syscall", + "syscall.IFT_ENC": "syscall", + "syscall.IFT_EON": "syscall", + "syscall.IFT_EPLRS": "syscall", + "syscall.IFT_ESCON": "syscall", + "syscall.IFT_ETHER": "syscall", + "syscall.IFT_FAITH": "syscall", + "syscall.IFT_FAST": "syscall", + "syscall.IFT_FASTETHER": "syscall", + "syscall.IFT_FASTETHERFX": "syscall", + "syscall.IFT_FDDI": "syscall", + "syscall.IFT_FIBRECHANNEL": "syscall", + "syscall.IFT_FRAMERELAYINTERCONNECT": "syscall", + "syscall.IFT_FRAMERELAYMPI": "syscall", + "syscall.IFT_FRDLCIENDPT": "syscall", + "syscall.IFT_FRELAY": "syscall", + "syscall.IFT_FRELAYDCE": "syscall", + "syscall.IFT_FRF16MFRBUNDLE": "syscall", + "syscall.IFT_FRFORWARD": "syscall", + "syscall.IFT_G703AT2MB": "syscall", + "syscall.IFT_G703AT64K": "syscall", + "syscall.IFT_GIF": "syscall", + "syscall.IFT_GIGABITETHERNET": "syscall", + "syscall.IFT_GR303IDT": "syscall", + "syscall.IFT_GR303RDT": "syscall", + "syscall.IFT_H323GATEKEEPER": "syscall", + "syscall.IFT_H323PROXY": "syscall", + "syscall.IFT_HDH1822": "syscall", + "syscall.IFT_HDLC": "syscall", + "syscall.IFT_HDSL2": "syscall", + "syscall.IFT_HIPERLAN2": "syscall", + "syscall.IFT_HIPPI": "syscall", + "syscall.IFT_HIPPIINTERFACE": "syscall", + "syscall.IFT_HOSTPAD": "syscall", + "syscall.IFT_HSSI": "syscall", + "syscall.IFT_HY": "syscall", + "syscall.IFT_IBM370PARCHAN": "syscall", + "syscall.IFT_IDSL": "syscall", + "syscall.IFT_IEEE1394": "syscall", + "syscall.IFT_IEEE80211": "syscall", + "syscall.IFT_IEEE80212": "syscall", + "syscall.IFT_IEEE8023ADLAG": "syscall", + "syscall.IFT_IFGSN": "syscall", + "syscall.IFT_IMT": "syscall", + "syscall.IFT_INFINIBAND": "syscall", + "syscall.IFT_INTERLEAVE": "syscall", + "syscall.IFT_IP": "syscall", + "syscall.IFT_IPFORWARD": "syscall", + "syscall.IFT_IPOVERATM": "syscall", + "syscall.IFT_IPOVERCDLC": "syscall", + "syscall.IFT_IPOVERCLAW": "syscall", + "syscall.IFT_IPSWITCH": "syscall", + "syscall.IFT_IPXIP": "syscall", + "syscall.IFT_ISDN": "syscall", + "syscall.IFT_ISDNBASIC": "syscall", + "syscall.IFT_ISDNPRIMARY": "syscall", + "syscall.IFT_ISDNS": "syscall", + "syscall.IFT_ISDNU": "syscall", + "syscall.IFT_ISO88022LLC": "syscall", + "syscall.IFT_ISO88023": "syscall", + "syscall.IFT_ISO88024": "syscall", + "syscall.IFT_ISO88025": "syscall", + "syscall.IFT_ISO88025CRFPINT": "syscall", + "syscall.IFT_ISO88025DTR": "syscall", + "syscall.IFT_ISO88025FIBER": "syscall", + "syscall.IFT_ISO88026": "syscall", + "syscall.IFT_ISUP": "syscall", + "syscall.IFT_L2VLAN": "syscall", + "syscall.IFT_L3IPVLAN": "syscall", + "syscall.IFT_L3IPXVLAN": "syscall", + "syscall.IFT_LAPB": "syscall", + "syscall.IFT_LAPD": "syscall", + "syscall.IFT_LAPF": "syscall", + "syscall.IFT_LINEGROUP": "syscall", + "syscall.IFT_LOCALTALK": "syscall", + "syscall.IFT_LOOP": "syscall", + "syscall.IFT_MEDIAMAILOVERIP": "syscall", + "syscall.IFT_MFSIGLINK": "syscall", + "syscall.IFT_MIOX25": "syscall", + "syscall.IFT_MODEM": "syscall", + "syscall.IFT_MPC": "syscall", + "syscall.IFT_MPLS": "syscall", + "syscall.IFT_MPLSTUNNEL": "syscall", + "syscall.IFT_MSDSL": "syscall", + "syscall.IFT_MVL": "syscall", + "syscall.IFT_MYRINET": "syscall", + "syscall.IFT_NFAS": "syscall", + "syscall.IFT_NSIP": "syscall", + "syscall.IFT_OPTICALCHANNEL": "syscall", + "syscall.IFT_OPTICALTRANSPORT": "syscall", + "syscall.IFT_OTHER": "syscall", + "syscall.IFT_P10": "syscall", + "syscall.IFT_P80": "syscall", + "syscall.IFT_PARA": "syscall", + "syscall.IFT_PDP": "syscall", + "syscall.IFT_PFLOG": "syscall", + "syscall.IFT_PFLOW": "syscall", + "syscall.IFT_PFSYNC": "syscall", + "syscall.IFT_PLC": "syscall", + "syscall.IFT_PON155": "syscall", + "syscall.IFT_PON622": "syscall", + "syscall.IFT_POS": "syscall", + "syscall.IFT_PPP": "syscall", + "syscall.IFT_PPPMULTILINKBUNDLE": "syscall", + "syscall.IFT_PROPATM": "syscall", + "syscall.IFT_PROPBWAP2MP": "syscall", + "syscall.IFT_PROPCNLS": "syscall", + "syscall.IFT_PROPDOCSWIRELESSDOWNSTREAM": "syscall", + "syscall.IFT_PROPDOCSWIRELESSMACLAYER": "syscall", + "syscall.IFT_PROPDOCSWIRELESSUPSTREAM": "syscall", + "syscall.IFT_PROPMUX": "syscall", + "syscall.IFT_PROPVIRTUAL": "syscall", + "syscall.IFT_PROPWIRELESSP2P": "syscall", + "syscall.IFT_PTPSERIAL": "syscall", + "syscall.IFT_PVC": "syscall", + "syscall.IFT_Q2931": "syscall", + "syscall.IFT_QLLC": "syscall", + "syscall.IFT_RADIOMAC": "syscall", + "syscall.IFT_RADSL": "syscall", + "syscall.IFT_REACHDSL": "syscall", + "syscall.IFT_RFC1483": "syscall", + "syscall.IFT_RS232": "syscall", + "syscall.IFT_RSRB": "syscall", + "syscall.IFT_SDLC": "syscall", + "syscall.IFT_SDSL": "syscall", + "syscall.IFT_SHDSL": "syscall", + "syscall.IFT_SIP": "syscall", + "syscall.IFT_SIPSIG": "syscall", + "syscall.IFT_SIPTG": "syscall", + "syscall.IFT_SLIP": "syscall", + "syscall.IFT_SMDSDXI": "syscall", + "syscall.IFT_SMDSICIP": "syscall", + "syscall.IFT_SONET": "syscall", + "syscall.IFT_SONETOVERHEADCHANNEL": "syscall", + "syscall.IFT_SONETPATH": "syscall", + "syscall.IFT_SONETVT": "syscall", + "syscall.IFT_SRP": "syscall", + "syscall.IFT_SS7SIGLINK": "syscall", + "syscall.IFT_STACKTOSTACK": "syscall", + "syscall.IFT_STARLAN": "syscall", + "syscall.IFT_STF": "syscall", + "syscall.IFT_T1": "syscall", + "syscall.IFT_TDLC": "syscall", + "syscall.IFT_TELINK": "syscall", + "syscall.IFT_TERMPAD": "syscall", + "syscall.IFT_TR008": "syscall", + "syscall.IFT_TRANSPHDLC": "syscall", + "syscall.IFT_TUNNEL": "syscall", + "syscall.IFT_ULTRA": "syscall", + "syscall.IFT_USB": "syscall", + "syscall.IFT_V11": "syscall", + "syscall.IFT_V35": "syscall", + "syscall.IFT_V36": "syscall", + "syscall.IFT_V37": "syscall", + "syscall.IFT_VDSL": "syscall", + "syscall.IFT_VIRTUALIPADDRESS": "syscall", + "syscall.IFT_VIRTUALTG": "syscall", + "syscall.IFT_VOICEDID": "syscall", + "syscall.IFT_VOICEEM": "syscall", + "syscall.IFT_VOICEEMFGD": "syscall", + "syscall.IFT_VOICEENCAP": "syscall", + "syscall.IFT_VOICEFGDEANA": "syscall", + "syscall.IFT_VOICEFXO": "syscall", + "syscall.IFT_VOICEFXS": "syscall", + "syscall.IFT_VOICEOVERATM": "syscall", + "syscall.IFT_VOICEOVERCABLE": "syscall", + "syscall.IFT_VOICEOVERFRAMERELAY": "syscall", + "syscall.IFT_VOICEOVERIP": "syscall", + "syscall.IFT_X213": "syscall", + "syscall.IFT_X25": "syscall", + "syscall.IFT_X25DDN": "syscall", + "syscall.IFT_X25HUNTGROUP": "syscall", + "syscall.IFT_X25MLP": "syscall", + "syscall.IFT_X25PLE": "syscall", + "syscall.IFT_XETHER": "syscall", + "syscall.IGNBRK": "syscall", + "syscall.IGNCR": "syscall", + "syscall.IGNORE": "syscall", + "syscall.IGNPAR": "syscall", + "syscall.IMAXBEL": "syscall", + "syscall.INFINITE": "syscall", + "syscall.INLCR": "syscall", + "syscall.INPCK": "syscall", + "syscall.INVALID_FILE_ATTRIBUTES": "syscall", + "syscall.IN_ACCESS": "syscall", + "syscall.IN_ALL_EVENTS": "syscall", + "syscall.IN_ATTRIB": "syscall", + "syscall.IN_CLASSA_HOST": "syscall", + "syscall.IN_CLASSA_MAX": "syscall", + "syscall.IN_CLASSA_NET": "syscall", + "syscall.IN_CLASSA_NSHIFT": "syscall", + "syscall.IN_CLASSB_HOST": "syscall", + "syscall.IN_CLASSB_MAX": "syscall", + "syscall.IN_CLASSB_NET": "syscall", + "syscall.IN_CLASSB_NSHIFT": "syscall", + "syscall.IN_CLASSC_HOST": "syscall", + "syscall.IN_CLASSC_NET": "syscall", + "syscall.IN_CLASSC_NSHIFT": "syscall", + "syscall.IN_CLASSD_HOST": "syscall", + "syscall.IN_CLASSD_NET": "syscall", + "syscall.IN_CLASSD_NSHIFT": "syscall", + "syscall.IN_CLOEXEC": "syscall", + "syscall.IN_CLOSE": "syscall", + "syscall.IN_CLOSE_NOWRITE": "syscall", + "syscall.IN_CLOSE_WRITE": "syscall", + "syscall.IN_CREATE": "syscall", + "syscall.IN_DELETE": "syscall", + "syscall.IN_DELETE_SELF": "syscall", + "syscall.IN_DONT_FOLLOW": "syscall", + "syscall.IN_EXCL_UNLINK": "syscall", + "syscall.IN_IGNORED": "syscall", + "syscall.IN_ISDIR": "syscall", + "syscall.IN_LINKLOCALNETNUM": "syscall", + "syscall.IN_LOOPBACKNET": "syscall", + "syscall.IN_MASK_ADD": "syscall", + "syscall.IN_MODIFY": "syscall", + "syscall.IN_MOVE": "syscall", + "syscall.IN_MOVED_FROM": "syscall", + "syscall.IN_MOVED_TO": "syscall", + "syscall.IN_MOVE_SELF": "syscall", + "syscall.IN_NONBLOCK": "syscall", + "syscall.IN_ONESHOT": "syscall", + "syscall.IN_ONLYDIR": "syscall", + "syscall.IN_OPEN": "syscall", + "syscall.IN_Q_OVERFLOW": "syscall", + "syscall.IN_RFC3021_HOST": "syscall", + "syscall.IN_RFC3021_MASK": "syscall", + "syscall.IN_RFC3021_NET": "syscall", + "syscall.IN_RFC3021_NSHIFT": "syscall", + "syscall.IN_UNMOUNT": "syscall", + "syscall.IOC_IN": "syscall", + "syscall.IOC_INOUT": "syscall", + "syscall.IOC_OUT": "syscall", + "syscall.IOC_VENDOR": "syscall", + "syscall.IOC_WS2": "syscall", + "syscall.IO_REPARSE_TAG_SYMLINK": "syscall", + "syscall.IPMreq": "syscall", + "syscall.IPMreqn": "syscall", + "syscall.IPPROTO_3PC": "syscall", + "syscall.IPPROTO_ADFS": "syscall", + "syscall.IPPROTO_AH": "syscall", + "syscall.IPPROTO_AHIP": "syscall", + "syscall.IPPROTO_APES": "syscall", + "syscall.IPPROTO_ARGUS": "syscall", + "syscall.IPPROTO_AX25": "syscall", + "syscall.IPPROTO_BHA": "syscall", + "syscall.IPPROTO_BLT": "syscall", + "syscall.IPPROTO_BRSATMON": "syscall", + "syscall.IPPROTO_CARP": "syscall", + "syscall.IPPROTO_CFTP": "syscall", + "syscall.IPPROTO_CHAOS": "syscall", + "syscall.IPPROTO_CMTP": "syscall", + "syscall.IPPROTO_COMP": "syscall", + "syscall.IPPROTO_CPHB": "syscall", + "syscall.IPPROTO_CPNX": "syscall", + "syscall.IPPROTO_DCCP": "syscall", + "syscall.IPPROTO_DDP": "syscall", + "syscall.IPPROTO_DGP": "syscall", + "syscall.IPPROTO_DIVERT": "syscall", + "syscall.IPPROTO_DIVERT_INIT": "syscall", + "syscall.IPPROTO_DIVERT_RESP": "syscall", + "syscall.IPPROTO_DONE": "syscall", + "syscall.IPPROTO_DSTOPTS": "syscall", + "syscall.IPPROTO_EGP": "syscall", + "syscall.IPPROTO_EMCON": "syscall", + "syscall.IPPROTO_ENCAP": "syscall", + "syscall.IPPROTO_EON": "syscall", + "syscall.IPPROTO_ESP": "syscall", + "syscall.IPPROTO_ETHERIP": "syscall", + "syscall.IPPROTO_FRAGMENT": "syscall", + "syscall.IPPROTO_GGP": "syscall", + "syscall.IPPROTO_GMTP": "syscall", + "syscall.IPPROTO_GRE": "syscall", + "syscall.IPPROTO_HELLO": "syscall", + "syscall.IPPROTO_HMP": "syscall", + "syscall.IPPROTO_HOPOPTS": "syscall", + "syscall.IPPROTO_ICMP": "syscall", + "syscall.IPPROTO_ICMPV6": "syscall", + "syscall.IPPROTO_IDP": "syscall", + "syscall.IPPROTO_IDPR": "syscall", + "syscall.IPPROTO_IDRP": "syscall", + "syscall.IPPROTO_IGMP": "syscall", + "syscall.IPPROTO_IGP": "syscall", + "syscall.IPPROTO_IGRP": "syscall", + "syscall.IPPROTO_IL": "syscall", + "syscall.IPPROTO_INLSP": "syscall", + "syscall.IPPROTO_INP": "syscall", + "syscall.IPPROTO_IP": "syscall", + "syscall.IPPROTO_IPCOMP": "syscall", + "syscall.IPPROTO_IPCV": "syscall", + "syscall.IPPROTO_IPEIP": "syscall", + "syscall.IPPROTO_IPIP": "syscall", + "syscall.IPPROTO_IPPC": "syscall", + "syscall.IPPROTO_IPV4": "syscall", + "syscall.IPPROTO_IPV6": "syscall", + "syscall.IPPROTO_IPV6_ICMP": "syscall", + "syscall.IPPROTO_IRTP": "syscall", + "syscall.IPPROTO_KRYPTOLAN": "syscall", + "syscall.IPPROTO_LARP": "syscall", + "syscall.IPPROTO_LEAF1": "syscall", + "syscall.IPPROTO_LEAF2": "syscall", + "syscall.IPPROTO_MAX": "syscall", + "syscall.IPPROTO_MAXID": "syscall", + "syscall.IPPROTO_MEAS": "syscall", + "syscall.IPPROTO_MH": "syscall", + "syscall.IPPROTO_MHRP": "syscall", + "syscall.IPPROTO_MICP": "syscall", + "syscall.IPPROTO_MOBILE": "syscall", + "syscall.IPPROTO_MPLS": "syscall", + "syscall.IPPROTO_MTP": "syscall", + "syscall.IPPROTO_MUX": "syscall", + "syscall.IPPROTO_ND": "syscall", + "syscall.IPPROTO_NHRP": "syscall", + "syscall.IPPROTO_NONE": "syscall", + "syscall.IPPROTO_NSP": "syscall", + "syscall.IPPROTO_NVPII": "syscall", + "syscall.IPPROTO_OLD_DIVERT": "syscall", + "syscall.IPPROTO_OSPFIGP": "syscall", + "syscall.IPPROTO_PFSYNC": "syscall", + "syscall.IPPROTO_PGM": "syscall", + "syscall.IPPROTO_PIGP": "syscall", + "syscall.IPPROTO_PIM": "syscall", + "syscall.IPPROTO_PRM": "syscall", + "syscall.IPPROTO_PUP": "syscall", + "syscall.IPPROTO_PVP": "syscall", + "syscall.IPPROTO_RAW": "syscall", + "syscall.IPPROTO_RCCMON": "syscall", + "syscall.IPPROTO_RDP": "syscall", + "syscall.IPPROTO_ROUTING": "syscall", + "syscall.IPPROTO_RSVP": "syscall", + "syscall.IPPROTO_RVD": "syscall", + "syscall.IPPROTO_SATEXPAK": "syscall", + "syscall.IPPROTO_SATMON": "syscall", + "syscall.IPPROTO_SCCSP": "syscall", + "syscall.IPPROTO_SCTP": "syscall", + "syscall.IPPROTO_SDRP": "syscall", + "syscall.IPPROTO_SEND": "syscall", + "syscall.IPPROTO_SEP": "syscall", + "syscall.IPPROTO_SKIP": "syscall", + "syscall.IPPROTO_SPACER": "syscall", + "syscall.IPPROTO_SRPC": "syscall", + "syscall.IPPROTO_ST": "syscall", + "syscall.IPPROTO_SVMTP": "syscall", + "syscall.IPPROTO_SWIPE": "syscall", + "syscall.IPPROTO_TCF": "syscall", + "syscall.IPPROTO_TCP": "syscall", + "syscall.IPPROTO_TLSP": "syscall", + "syscall.IPPROTO_TP": "syscall", + "syscall.IPPROTO_TPXX": "syscall", + "syscall.IPPROTO_TRUNK1": "syscall", + "syscall.IPPROTO_TRUNK2": "syscall", + "syscall.IPPROTO_TTP": "syscall", + "syscall.IPPROTO_UDP": "syscall", + "syscall.IPPROTO_UDPLITE": "syscall", + "syscall.IPPROTO_VINES": "syscall", + "syscall.IPPROTO_VISA": "syscall", + "syscall.IPPROTO_VMTP": "syscall", + "syscall.IPPROTO_VRRP": "syscall", + "syscall.IPPROTO_WBEXPAK": "syscall", + "syscall.IPPROTO_WBMON": "syscall", + "syscall.IPPROTO_WSN": "syscall", + "syscall.IPPROTO_XNET": "syscall", + "syscall.IPPROTO_XTP": "syscall", + "syscall.IPV6_2292DSTOPTS": "syscall", + "syscall.IPV6_2292HOPLIMIT": "syscall", + "syscall.IPV6_2292HOPOPTS": "syscall", + "syscall.IPV6_2292NEXTHOP": "syscall", + "syscall.IPV6_2292PKTINFO": "syscall", + "syscall.IPV6_2292PKTOPTIONS": "syscall", + "syscall.IPV6_2292RTHDR": "syscall", + "syscall.IPV6_ADDRFORM": "syscall", + "syscall.IPV6_ADD_MEMBERSHIP": "syscall", + "syscall.IPV6_AUTHHDR": "syscall", + "syscall.IPV6_AUTH_LEVEL": "syscall", + "syscall.IPV6_AUTOFLOWLABEL": "syscall", + "syscall.IPV6_BINDANY": "syscall", + "syscall.IPV6_BINDV6ONLY": "syscall", + "syscall.IPV6_BOUND_IF": "syscall", + "syscall.IPV6_CHECKSUM": "syscall", + "syscall.IPV6_DEFAULT_MULTICAST_HOPS": "syscall", + "syscall.IPV6_DEFAULT_MULTICAST_LOOP": "syscall", + "syscall.IPV6_DEFHLIM": "syscall", + "syscall.IPV6_DONTFRAG": "syscall", + "syscall.IPV6_DROP_MEMBERSHIP": "syscall", + "syscall.IPV6_DSTOPTS": "syscall", + "syscall.IPV6_ESP_NETWORK_LEVEL": "syscall", + "syscall.IPV6_ESP_TRANS_LEVEL": "syscall", + "syscall.IPV6_FAITH": "syscall", + "syscall.IPV6_FLOWINFO_MASK": "syscall", + "syscall.IPV6_FLOWLABEL_MASK": "syscall", + "syscall.IPV6_FRAGTTL": "syscall", + "syscall.IPV6_FW_ADD": "syscall", + "syscall.IPV6_FW_DEL": "syscall", + "syscall.IPV6_FW_FLUSH": "syscall", + "syscall.IPV6_FW_GET": "syscall", + "syscall.IPV6_FW_ZERO": "syscall", + "syscall.IPV6_HLIMDEC": "syscall", + "syscall.IPV6_HOPLIMIT": "syscall", + "syscall.IPV6_HOPOPTS": "syscall", + "syscall.IPV6_IPCOMP_LEVEL": "syscall", + "syscall.IPV6_IPSEC_POLICY": "syscall", + "syscall.IPV6_JOIN_ANYCAST": "syscall", + "syscall.IPV6_JOIN_GROUP": "syscall", + "syscall.IPV6_LEAVE_ANYCAST": "syscall", + "syscall.IPV6_LEAVE_GROUP": "syscall", + "syscall.IPV6_MAXHLIM": "syscall", + "syscall.IPV6_MAXOPTHDR": "syscall", + "syscall.IPV6_MAXPACKET": "syscall", + "syscall.IPV6_MAX_GROUP_SRC_FILTER": "syscall", + "syscall.IPV6_MAX_MEMBERSHIPS": "syscall", + "syscall.IPV6_MAX_SOCK_SRC_FILTER": "syscall", + "syscall.IPV6_MIN_MEMBERSHIPS": "syscall", + "syscall.IPV6_MMTU": "syscall", + "syscall.IPV6_MSFILTER": "syscall", + "syscall.IPV6_MTU": "syscall", + "syscall.IPV6_MTU_DISCOVER": "syscall", + "syscall.IPV6_MULTICAST_HOPS": "syscall", + "syscall.IPV6_MULTICAST_IF": "syscall", + "syscall.IPV6_MULTICAST_LOOP": "syscall", + "syscall.IPV6_NEXTHOP": "syscall", + "syscall.IPV6_OPTIONS": "syscall", + "syscall.IPV6_PATHMTU": "syscall", + "syscall.IPV6_PIPEX": "syscall", + "syscall.IPV6_PKTINFO": "syscall", + "syscall.IPV6_PMTUDISC_DO": "syscall", + "syscall.IPV6_PMTUDISC_DONT": "syscall", + "syscall.IPV6_PMTUDISC_PROBE": "syscall", + "syscall.IPV6_PMTUDISC_WANT": "syscall", + "syscall.IPV6_PORTRANGE": "syscall", + "syscall.IPV6_PORTRANGE_DEFAULT": "syscall", + "syscall.IPV6_PORTRANGE_HIGH": "syscall", + "syscall.IPV6_PORTRANGE_LOW": "syscall", + "syscall.IPV6_PREFER_TEMPADDR": "syscall", + "syscall.IPV6_RECVDSTOPTS": "syscall", + "syscall.IPV6_RECVDSTPORT": "syscall", + "syscall.IPV6_RECVERR": "syscall", + "syscall.IPV6_RECVHOPLIMIT": "syscall", + "syscall.IPV6_RECVHOPOPTS": "syscall", + "syscall.IPV6_RECVPATHMTU": "syscall", + "syscall.IPV6_RECVPKTINFO": "syscall", + "syscall.IPV6_RECVRTHDR": "syscall", + "syscall.IPV6_RECVTCLASS": "syscall", + "syscall.IPV6_ROUTER_ALERT": "syscall", + "syscall.IPV6_RTABLE": "syscall", + "syscall.IPV6_RTHDR": "syscall", + "syscall.IPV6_RTHDRDSTOPTS": "syscall", + "syscall.IPV6_RTHDR_LOOSE": "syscall", + "syscall.IPV6_RTHDR_STRICT": "syscall", + "syscall.IPV6_RTHDR_TYPE_0": "syscall", + "syscall.IPV6_RXDSTOPTS": "syscall", + "syscall.IPV6_RXHOPOPTS": "syscall", + "syscall.IPV6_SOCKOPT_RESERVED1": "syscall", + "syscall.IPV6_TCLASS": "syscall", + "syscall.IPV6_UNICAST_HOPS": "syscall", + "syscall.IPV6_USE_MIN_MTU": "syscall", + "syscall.IPV6_V6ONLY": "syscall", + "syscall.IPV6_VERSION": "syscall", + "syscall.IPV6_VERSION_MASK": "syscall", + "syscall.IPV6_XFRM_POLICY": "syscall", + "syscall.IP_ADD_MEMBERSHIP": "syscall", + "syscall.IP_ADD_SOURCE_MEMBERSHIP": "syscall", + "syscall.IP_AUTH_LEVEL": "syscall", + "syscall.IP_BINDANY": "syscall", + "syscall.IP_BLOCK_SOURCE": "syscall", + "syscall.IP_BOUND_IF": "syscall", + "syscall.IP_DEFAULT_MULTICAST_LOOP": "syscall", + "syscall.IP_DEFAULT_MULTICAST_TTL": "syscall", + "syscall.IP_DF": "syscall", + "syscall.IP_DIVERTFL": "syscall", + "syscall.IP_DONTFRAG": "syscall", + "syscall.IP_DROP_MEMBERSHIP": "syscall", + "syscall.IP_DROP_SOURCE_MEMBERSHIP": "syscall", + "syscall.IP_DUMMYNET3": "syscall", + "syscall.IP_DUMMYNET_CONFIGURE": "syscall", + "syscall.IP_DUMMYNET_DEL": "syscall", + "syscall.IP_DUMMYNET_FLUSH": "syscall", + "syscall.IP_DUMMYNET_GET": "syscall", + "syscall.IP_EF": "syscall", + "syscall.IP_ERRORMTU": "syscall", + "syscall.IP_ESP_NETWORK_LEVEL": "syscall", + "syscall.IP_ESP_TRANS_LEVEL": "syscall", + "syscall.IP_FAITH": "syscall", + "syscall.IP_FREEBIND": "syscall", + "syscall.IP_FW3": "syscall", + "syscall.IP_FW_ADD": "syscall", + "syscall.IP_FW_DEL": "syscall", + "syscall.IP_FW_FLUSH": "syscall", + "syscall.IP_FW_GET": "syscall", + "syscall.IP_FW_NAT_CFG": "syscall", + "syscall.IP_FW_NAT_DEL": "syscall", + "syscall.IP_FW_NAT_GET_CONFIG": "syscall", + "syscall.IP_FW_NAT_GET_LOG": "syscall", + "syscall.IP_FW_RESETLOG": "syscall", + "syscall.IP_FW_TABLE_ADD": "syscall", + "syscall.IP_FW_TABLE_DEL": "syscall", + "syscall.IP_FW_TABLE_FLUSH": "syscall", + "syscall.IP_FW_TABLE_GETSIZE": "syscall", + "syscall.IP_FW_TABLE_LIST": "syscall", + "syscall.IP_FW_ZERO": "syscall", + "syscall.IP_HDRINCL": "syscall", + "syscall.IP_IPCOMP_LEVEL": "syscall", + "syscall.IP_IPSECFLOWINFO": "syscall", + "syscall.IP_IPSEC_LOCAL_AUTH": "syscall", + "syscall.IP_IPSEC_LOCAL_CRED": "syscall", + "syscall.IP_IPSEC_LOCAL_ID": "syscall", + "syscall.IP_IPSEC_POLICY": "syscall", + "syscall.IP_IPSEC_REMOTE_AUTH": "syscall", + "syscall.IP_IPSEC_REMOTE_CRED": "syscall", + "syscall.IP_IPSEC_REMOTE_ID": "syscall", + "syscall.IP_MAXPACKET": "syscall", + "syscall.IP_MAX_GROUP_SRC_FILTER": "syscall", + "syscall.IP_MAX_MEMBERSHIPS": "syscall", + "syscall.IP_MAX_SOCK_MUTE_FILTER": "syscall", + "syscall.IP_MAX_SOCK_SRC_FILTER": "syscall", + "syscall.IP_MAX_SOURCE_FILTER": "syscall", + "syscall.IP_MF": "syscall", + "syscall.IP_MINFRAGSIZE": "syscall", + "syscall.IP_MINTTL": "syscall", + "syscall.IP_MIN_MEMBERSHIPS": "syscall", + "syscall.IP_MSFILTER": "syscall", + "syscall.IP_MSS": "syscall", + "syscall.IP_MTU": "syscall", + "syscall.IP_MTU_DISCOVER": "syscall", + "syscall.IP_MULTICAST_IF": "syscall", + "syscall.IP_MULTICAST_IFINDEX": "syscall", + "syscall.IP_MULTICAST_LOOP": "syscall", + "syscall.IP_MULTICAST_TTL": "syscall", + "syscall.IP_MULTICAST_VIF": "syscall", + "syscall.IP_NAT__XXX": "syscall", + "syscall.IP_OFFMASK": "syscall", + "syscall.IP_OLD_FW_ADD": "syscall", + "syscall.IP_OLD_FW_DEL": "syscall", + "syscall.IP_OLD_FW_FLUSH": "syscall", + "syscall.IP_OLD_FW_GET": "syscall", + "syscall.IP_OLD_FW_RESETLOG": "syscall", + "syscall.IP_OLD_FW_ZERO": "syscall", + "syscall.IP_ONESBCAST": "syscall", + "syscall.IP_OPTIONS": "syscall", + "syscall.IP_ORIGDSTADDR": "syscall", + "syscall.IP_PASSSEC": "syscall", + "syscall.IP_PIPEX": "syscall", + "syscall.IP_PKTINFO": "syscall", + "syscall.IP_PKTOPTIONS": "syscall", + "syscall.IP_PMTUDISC": "syscall", + "syscall.IP_PMTUDISC_DO": "syscall", + "syscall.IP_PMTUDISC_DONT": "syscall", + "syscall.IP_PMTUDISC_PROBE": "syscall", + "syscall.IP_PMTUDISC_WANT": "syscall", + "syscall.IP_PORTRANGE": "syscall", + "syscall.IP_PORTRANGE_DEFAULT": "syscall", + "syscall.IP_PORTRANGE_HIGH": "syscall", + "syscall.IP_PORTRANGE_LOW": "syscall", + "syscall.IP_RECVDSTADDR": "syscall", + "syscall.IP_RECVDSTPORT": "syscall", + "syscall.IP_RECVERR": "syscall", + "syscall.IP_RECVIF": "syscall", + "syscall.IP_RECVOPTS": "syscall", + "syscall.IP_RECVORIGDSTADDR": "syscall", + "syscall.IP_RECVPKTINFO": "syscall", + "syscall.IP_RECVRETOPTS": "syscall", + "syscall.IP_RECVRTABLE": "syscall", + "syscall.IP_RECVTOS": "syscall", + "syscall.IP_RECVTTL": "syscall", + "syscall.IP_RETOPTS": "syscall", + "syscall.IP_RF": "syscall", + "syscall.IP_ROUTER_ALERT": "syscall", + "syscall.IP_RSVP_OFF": "syscall", + "syscall.IP_RSVP_ON": "syscall", + "syscall.IP_RSVP_VIF_OFF": "syscall", + "syscall.IP_RSVP_VIF_ON": "syscall", + "syscall.IP_RTABLE": "syscall", + "syscall.IP_SENDSRCADDR": "syscall", + "syscall.IP_STRIPHDR": "syscall", + "syscall.IP_TOS": "syscall", + "syscall.IP_TRAFFIC_MGT_BACKGROUND": "syscall", + "syscall.IP_TRANSPARENT": "syscall", + "syscall.IP_TTL": "syscall", + "syscall.IP_UNBLOCK_SOURCE": "syscall", + "syscall.IP_XFRM_POLICY": "syscall", + "syscall.IPv6MTUInfo": "syscall", + "syscall.IPv6Mreq": "syscall", + "syscall.ISIG": "syscall", + "syscall.ISTRIP": "syscall", + "syscall.IUCLC": "syscall", + "syscall.IUTF8": "syscall", + "syscall.IXANY": "syscall", + "syscall.IXOFF": "syscall", + "syscall.IXON": "syscall", + "syscall.IfAddrmsg": "syscall", + "syscall.IfAnnounceMsghdr": "syscall", + "syscall.IfData": "syscall", + "syscall.IfInfomsg": "syscall", + "syscall.IfMsghdr": "syscall", + "syscall.IfaMsghdr": "syscall", + "syscall.IfmaMsghdr": "syscall", + "syscall.IfmaMsghdr2": "syscall", + "syscall.ImplementsGetwd": "syscall", + "syscall.Inet4Pktinfo": "syscall", + "syscall.Inet6Pktinfo": "syscall", + "syscall.InotifyAddWatch": "syscall", + "syscall.InotifyEvent": "syscall", + "syscall.InotifyInit": "syscall", + "syscall.InotifyInit1": "syscall", + "syscall.InotifyRmWatch": "syscall", + "syscall.InterfaceAddrMessage": "syscall", + "syscall.InterfaceAnnounceMessage": "syscall", + "syscall.InterfaceInfo": "syscall", + "syscall.InterfaceMessage": "syscall", + "syscall.InterfaceMulticastAddrMessage": "syscall", + "syscall.InvalidHandle": "syscall", + "syscall.Ioperm": "syscall", + "syscall.Iopl": "syscall", + "syscall.Iovec": "syscall", + "syscall.IpAdapterInfo": "syscall", + "syscall.IpAddrString": "syscall", + "syscall.IpAddressString": "syscall", + "syscall.IpMaskString": "syscall", + "syscall.Issetugid": "syscall", + "syscall.KEY_ALL_ACCESS": "syscall", + "syscall.KEY_CREATE_LINK": "syscall", + "syscall.KEY_CREATE_SUB_KEY": "syscall", + "syscall.KEY_ENUMERATE_SUB_KEYS": "syscall", + "syscall.KEY_EXECUTE": "syscall", + "syscall.KEY_NOTIFY": "syscall", + "syscall.KEY_QUERY_VALUE": "syscall", + "syscall.KEY_READ": "syscall", + "syscall.KEY_SET_VALUE": "syscall", + "syscall.KEY_WOW64_32KEY": "syscall", + "syscall.KEY_WOW64_64KEY": "syscall", + "syscall.KEY_WRITE": "syscall", + "syscall.Kevent": "syscall", + "syscall.Kevent_t": "syscall", + "syscall.Kill": "syscall", + "syscall.Klogctl": "syscall", + "syscall.Kqueue": "syscall", + "syscall.LANG_ENGLISH": "syscall", + "syscall.LAYERED_PROTOCOL": "syscall", + "syscall.LCNT_OVERLOAD_FLUSH": "syscall", + "syscall.LINUX_REBOOT_CMD_CAD_OFF": "syscall", + "syscall.LINUX_REBOOT_CMD_CAD_ON": "syscall", + "syscall.LINUX_REBOOT_CMD_HALT": "syscall", + "syscall.LINUX_REBOOT_CMD_KEXEC": "syscall", + "syscall.LINUX_REBOOT_CMD_POWER_OFF": "syscall", + "syscall.LINUX_REBOOT_CMD_RESTART": "syscall", + "syscall.LINUX_REBOOT_CMD_RESTART2": "syscall", + "syscall.LINUX_REBOOT_CMD_SW_SUSPEND": "syscall", + "syscall.LINUX_REBOOT_MAGIC1": "syscall", + "syscall.LINUX_REBOOT_MAGIC2": "syscall", + "syscall.LOCK_EX": "syscall", + "syscall.LOCK_NB": "syscall", + "syscall.LOCK_SH": "syscall", + "syscall.LOCK_UN": "syscall", + "syscall.LazyDLL": "syscall", + "syscall.LazyProc": "syscall", + "syscall.Lchown": "syscall", + "syscall.Linger": "syscall", + "syscall.Link": "syscall", + "syscall.Listen": "syscall", + "syscall.Listxattr": "syscall", + "syscall.LoadCancelIoEx": "syscall", + "syscall.LoadConnectEx": "syscall", + "syscall.LoadCreateSymbolicLink": "syscall", + "syscall.LoadDLL": "syscall", + "syscall.LoadGetAddrInfo": "syscall", + "syscall.LoadLibrary": "syscall", + "syscall.LoadSetFileCompletionNotificationModes": "syscall", + "syscall.LocalFree": "syscall", + "syscall.Log2phys_t": "syscall", + "syscall.LookupAccountName": "syscall", + "syscall.LookupAccountSid": "syscall", + "syscall.LookupSID": "syscall", + "syscall.LsfJump": "syscall", + "syscall.LsfSocket": "syscall", + "syscall.LsfStmt": "syscall", + "syscall.Lstat": "syscall", + "syscall.MADV_AUTOSYNC": "syscall", + "syscall.MADV_CAN_REUSE": "syscall", + "syscall.MADV_CORE": "syscall", + "syscall.MADV_DOFORK": "syscall", + "syscall.MADV_DONTFORK": "syscall", + "syscall.MADV_DONTNEED": "syscall", + "syscall.MADV_FREE": "syscall", + "syscall.MADV_FREE_REUSABLE": "syscall", + "syscall.MADV_FREE_REUSE": "syscall", + "syscall.MADV_HUGEPAGE": "syscall", + "syscall.MADV_HWPOISON": "syscall", + "syscall.MADV_MERGEABLE": "syscall", + "syscall.MADV_NOCORE": "syscall", + "syscall.MADV_NOHUGEPAGE": "syscall", + "syscall.MADV_NORMAL": "syscall", + "syscall.MADV_NOSYNC": "syscall", + "syscall.MADV_PROTECT": "syscall", + "syscall.MADV_RANDOM": "syscall", + "syscall.MADV_REMOVE": "syscall", + "syscall.MADV_SEQUENTIAL": "syscall", + "syscall.MADV_SPACEAVAIL": "syscall", + "syscall.MADV_UNMERGEABLE": "syscall", + "syscall.MADV_WILLNEED": "syscall", + "syscall.MADV_ZERO_WIRED_PAGES": "syscall", + "syscall.MAP_32BIT": "syscall", + "syscall.MAP_ALIGNED_SUPER": "syscall", + "syscall.MAP_ALIGNMENT_16MB": "syscall", + "syscall.MAP_ALIGNMENT_1TB": "syscall", + "syscall.MAP_ALIGNMENT_256TB": "syscall", + "syscall.MAP_ALIGNMENT_4GB": "syscall", + "syscall.MAP_ALIGNMENT_64KB": "syscall", + "syscall.MAP_ALIGNMENT_64PB": "syscall", + "syscall.MAP_ALIGNMENT_MASK": "syscall", + "syscall.MAP_ALIGNMENT_SHIFT": "syscall", + "syscall.MAP_ANON": "syscall", + "syscall.MAP_ANONYMOUS": "syscall", + "syscall.MAP_COPY": "syscall", + "syscall.MAP_DENYWRITE": "syscall", + "syscall.MAP_EXECUTABLE": "syscall", + "syscall.MAP_FILE": "syscall", + "syscall.MAP_FIXED": "syscall", + "syscall.MAP_FLAGMASK": "syscall", + "syscall.MAP_GROWSDOWN": "syscall", + "syscall.MAP_HASSEMAPHORE": "syscall", + "syscall.MAP_HUGETLB": "syscall", + "syscall.MAP_INHERIT": "syscall", + "syscall.MAP_INHERIT_COPY": "syscall", + "syscall.MAP_INHERIT_DEFAULT": "syscall", + "syscall.MAP_INHERIT_DONATE_COPY": "syscall", + "syscall.MAP_INHERIT_NONE": "syscall", + "syscall.MAP_INHERIT_SHARE": "syscall", + "syscall.MAP_JIT": "syscall", + "syscall.MAP_LOCKED": "syscall", + "syscall.MAP_NOCACHE": "syscall", + "syscall.MAP_NOCORE": "syscall", + "syscall.MAP_NOEXTEND": "syscall", + "syscall.MAP_NONBLOCK": "syscall", + "syscall.MAP_NORESERVE": "syscall", + "syscall.MAP_NOSYNC": "syscall", + "syscall.MAP_POPULATE": "syscall", + "syscall.MAP_PREFAULT_READ": "syscall", + "syscall.MAP_PRIVATE": "syscall", + "syscall.MAP_RENAME": "syscall", + "syscall.MAP_RESERVED0080": "syscall", + "syscall.MAP_RESERVED0100": "syscall", + "syscall.MAP_SHARED": "syscall", + "syscall.MAP_STACK": "syscall", + "syscall.MAP_TRYFIXED": "syscall", + "syscall.MAP_TYPE": "syscall", + "syscall.MAP_WIRED": "syscall", + "syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE": "syscall", + "syscall.MAXLEN_IFDESCR": "syscall", + "syscall.MAXLEN_PHYSADDR": "syscall", + "syscall.MAX_ADAPTER_ADDRESS_LENGTH": "syscall", + "syscall.MAX_ADAPTER_DESCRIPTION_LENGTH": "syscall", + "syscall.MAX_ADAPTER_NAME_LENGTH": "syscall", + "syscall.MAX_COMPUTERNAME_LENGTH": "syscall", + "syscall.MAX_INTERFACE_NAME_LEN": "syscall", + "syscall.MAX_LONG_PATH": "syscall", + "syscall.MAX_PATH": "syscall", + "syscall.MAX_PROTOCOL_CHAIN": "syscall", + "syscall.MCL_CURRENT": "syscall", + "syscall.MCL_FUTURE": "syscall", + "syscall.MNT_DETACH": "syscall", + "syscall.MNT_EXPIRE": "syscall", + "syscall.MNT_FORCE": "syscall", + "syscall.MSG_BCAST": "syscall", + "syscall.MSG_CMSG_CLOEXEC": "syscall", + "syscall.MSG_COMPAT": "syscall", + "syscall.MSG_CONFIRM": "syscall", + "syscall.MSG_CONTROLMBUF": "syscall", + "syscall.MSG_CTRUNC": "syscall", + "syscall.MSG_DONTROUTE": "syscall", + "syscall.MSG_DONTWAIT": "syscall", + "syscall.MSG_EOF": "syscall", + "syscall.MSG_EOR": "syscall", + "syscall.MSG_ERRQUEUE": "syscall", + "syscall.MSG_FASTOPEN": "syscall", + "syscall.MSG_FIN": "syscall", + "syscall.MSG_FLUSH": "syscall", + "syscall.MSG_HAVEMORE": "syscall", + "syscall.MSG_HOLD": "syscall", + "syscall.MSG_IOVUSRSPACE": "syscall", + "syscall.MSG_LENUSRSPACE": "syscall", + "syscall.MSG_MCAST": "syscall", + "syscall.MSG_MORE": "syscall", + "syscall.MSG_NAMEMBUF": "syscall", + "syscall.MSG_NBIO": "syscall", + "syscall.MSG_NEEDSA": "syscall", + "syscall.MSG_NOSIGNAL": "syscall", + "syscall.MSG_NOTIFICATION": "syscall", + "syscall.MSG_OOB": "syscall", + "syscall.MSG_PEEK": "syscall", + "syscall.MSG_PROXY": "syscall", + "syscall.MSG_RCVMORE": "syscall", + "syscall.MSG_RST": "syscall", + "syscall.MSG_SEND": "syscall", + "syscall.MSG_SYN": "syscall", + "syscall.MSG_TRUNC": "syscall", + "syscall.MSG_TRYHARD": "syscall", + "syscall.MSG_USERFLAGS": "syscall", + "syscall.MSG_WAITALL": "syscall", + "syscall.MSG_WAITFORONE": "syscall", + "syscall.MSG_WAITSTREAM": "syscall", + "syscall.MS_ACTIVE": "syscall", + "syscall.MS_ASYNC": "syscall", + "syscall.MS_BIND": "syscall", + "syscall.MS_DEACTIVATE": "syscall", + "syscall.MS_DIRSYNC": "syscall", + "syscall.MS_INVALIDATE": "syscall", + "syscall.MS_I_VERSION": "syscall", + "syscall.MS_KERNMOUNT": "syscall", + "syscall.MS_KILLPAGES": "syscall", + "syscall.MS_MANDLOCK": "syscall", + "syscall.MS_MGC_MSK": "syscall", + "syscall.MS_MGC_VAL": "syscall", + "syscall.MS_MOVE": "syscall", + "syscall.MS_NOATIME": "syscall", + "syscall.MS_NODEV": "syscall", + "syscall.MS_NODIRATIME": "syscall", + "syscall.MS_NOEXEC": "syscall", + "syscall.MS_NOSUID": "syscall", + "syscall.MS_NOUSER": "syscall", + "syscall.MS_POSIXACL": "syscall", + "syscall.MS_PRIVATE": "syscall", + "syscall.MS_RDONLY": "syscall", + "syscall.MS_REC": "syscall", + "syscall.MS_RELATIME": "syscall", + "syscall.MS_REMOUNT": "syscall", + "syscall.MS_RMT_MASK": "syscall", + "syscall.MS_SHARED": "syscall", + "syscall.MS_SILENT": "syscall", + "syscall.MS_SLAVE": "syscall", + "syscall.MS_STRICTATIME": "syscall", + "syscall.MS_SYNC": "syscall", + "syscall.MS_SYNCHRONOUS": "syscall", + "syscall.MS_UNBINDABLE": "syscall", + "syscall.Madvise": "syscall", + "syscall.MapViewOfFile": "syscall", + "syscall.MaxTokenInfoClass": "syscall", + "syscall.Mclpool": "syscall", + "syscall.MibIfRow": "syscall", + "syscall.Mkdir": "syscall", + "syscall.Mkdirat": "syscall", + "syscall.Mkfifo": "syscall", + "syscall.Mknod": "syscall", + "syscall.Mknodat": "syscall", + "syscall.Mlock": "syscall", + "syscall.Mlockall": "syscall", + "syscall.Mmap": "syscall", + "syscall.Mount": "syscall", + "syscall.MoveFile": "syscall", + "syscall.Mprotect": "syscall", + "syscall.Msghdr": "syscall", + "syscall.Munlock": "syscall", + "syscall.Munlockall": "syscall", + "syscall.Munmap": "syscall", + "syscall.MustLoadDLL": "syscall", + "syscall.NAME_MAX": "syscall", + "syscall.NETLINK_ADD_MEMBERSHIP": "syscall", + "syscall.NETLINK_AUDIT": "syscall", + "syscall.NETLINK_BROADCAST_ERROR": "syscall", + "syscall.NETLINK_CONNECTOR": "syscall", + "syscall.NETLINK_DNRTMSG": "syscall", + "syscall.NETLINK_DROP_MEMBERSHIP": "syscall", + "syscall.NETLINK_ECRYPTFS": "syscall", + "syscall.NETLINK_FIB_LOOKUP": "syscall", + "syscall.NETLINK_FIREWALL": "syscall", + "syscall.NETLINK_GENERIC": "syscall", + "syscall.NETLINK_INET_DIAG": "syscall", + "syscall.NETLINK_IP6_FW": "syscall", + "syscall.NETLINK_ISCSI": "syscall", + "syscall.NETLINK_KOBJECT_UEVENT": "syscall", + "syscall.NETLINK_NETFILTER": "syscall", + "syscall.NETLINK_NFLOG": "syscall", + "syscall.NETLINK_NO_ENOBUFS": "syscall", + "syscall.NETLINK_PKTINFO": "syscall", + "syscall.NETLINK_RDMA": "syscall", + "syscall.NETLINK_ROUTE": "syscall", + "syscall.NETLINK_SCSITRANSPORT": "syscall", + "syscall.NETLINK_SELINUX": "syscall", + "syscall.NETLINK_UNUSED": "syscall", + "syscall.NETLINK_USERSOCK": "syscall", + "syscall.NETLINK_XFRM": "syscall", + "syscall.NET_RT_DUMP": "syscall", + "syscall.NET_RT_DUMP2": "syscall", + "syscall.NET_RT_FLAGS": "syscall", + "syscall.NET_RT_IFLIST": "syscall", + "syscall.NET_RT_IFLIST2": "syscall", + "syscall.NET_RT_IFLISTL": "syscall", + "syscall.NET_RT_IFMALIST": "syscall", + "syscall.NET_RT_MAXID": "syscall", + "syscall.NET_RT_OIFLIST": "syscall", + "syscall.NET_RT_OOIFLIST": "syscall", + "syscall.NET_RT_STAT": "syscall", + "syscall.NET_RT_STATS": "syscall", + "syscall.NET_RT_TABLE": "syscall", + "syscall.NET_RT_TRASH": "syscall", + "syscall.NLA_ALIGNTO": "syscall", + "syscall.NLA_F_NESTED": "syscall", + "syscall.NLA_F_NET_BYTEORDER": "syscall", + "syscall.NLA_HDRLEN": "syscall", + "syscall.NLMSG_ALIGNTO": "syscall", + "syscall.NLMSG_DONE": "syscall", + "syscall.NLMSG_ERROR": "syscall", + "syscall.NLMSG_HDRLEN": "syscall", + "syscall.NLMSG_MIN_TYPE": "syscall", + "syscall.NLMSG_NOOP": "syscall", + "syscall.NLMSG_OVERRUN": "syscall", + "syscall.NLM_F_ACK": "syscall", + "syscall.NLM_F_APPEND": "syscall", + "syscall.NLM_F_ATOMIC": "syscall", + "syscall.NLM_F_CREATE": "syscall", + "syscall.NLM_F_DUMP": "syscall", + "syscall.NLM_F_ECHO": "syscall", + "syscall.NLM_F_EXCL": "syscall", + "syscall.NLM_F_MATCH": "syscall", + "syscall.NLM_F_MULTI": "syscall", + "syscall.NLM_F_REPLACE": "syscall", + "syscall.NLM_F_REQUEST": "syscall", + "syscall.NLM_F_ROOT": "syscall", + "syscall.NOFLSH": "syscall", + "syscall.NOTE_ABSOLUTE": "syscall", + "syscall.NOTE_ATTRIB": "syscall", + "syscall.NOTE_CHILD": "syscall", + "syscall.NOTE_DELETE": "syscall", + "syscall.NOTE_EOF": "syscall", + "syscall.NOTE_EXEC": "syscall", + "syscall.NOTE_EXIT": "syscall", + "syscall.NOTE_EXITSTATUS": "syscall", + "syscall.NOTE_EXTEND": "syscall", + "syscall.NOTE_FFAND": "syscall", + "syscall.NOTE_FFCOPY": "syscall", + "syscall.NOTE_FFCTRLMASK": "syscall", + "syscall.NOTE_FFLAGSMASK": "syscall", + "syscall.NOTE_FFNOP": "syscall", + "syscall.NOTE_FFOR": "syscall", + "syscall.NOTE_FORK": "syscall", + "syscall.NOTE_LINK": "syscall", + "syscall.NOTE_LOWAT": "syscall", + "syscall.NOTE_NONE": "syscall", + "syscall.NOTE_NSECONDS": "syscall", + "syscall.NOTE_PCTRLMASK": "syscall", + "syscall.NOTE_PDATAMASK": "syscall", + "syscall.NOTE_REAP": "syscall", + "syscall.NOTE_RENAME": "syscall", + "syscall.NOTE_RESOURCEEND": "syscall", + "syscall.NOTE_REVOKE": "syscall", + "syscall.NOTE_SECONDS": "syscall", + "syscall.NOTE_SIGNAL": "syscall", + "syscall.NOTE_TRACK": "syscall", + "syscall.NOTE_TRACKERR": "syscall", + "syscall.NOTE_TRIGGER": "syscall", + "syscall.NOTE_TRUNCATE": "syscall", + "syscall.NOTE_USECONDS": "syscall", + "syscall.NOTE_VM_ERROR": "syscall", + "syscall.NOTE_VM_PRESSURE": "syscall", + "syscall.NOTE_VM_PRESSURE_SUDDEN_TERMINATE": "syscall", + "syscall.NOTE_VM_PRESSURE_TERMINATE": "syscall", + "syscall.NOTE_WRITE": "syscall", + "syscall.NameCanonical": "syscall", + "syscall.NameCanonicalEx": "syscall", + "syscall.NameDisplay": "syscall", + "syscall.NameDnsDomain": "syscall", + "syscall.NameFullyQualifiedDN": "syscall", + "syscall.NameSamCompatible": "syscall", + "syscall.NameServicePrincipal": "syscall", + "syscall.NameUniqueId": "syscall", + "syscall.NameUnknown": "syscall", + "syscall.NameUserPrincipal": "syscall", + "syscall.Nanosleep": "syscall", + "syscall.NetApiBufferFree": "syscall", + "syscall.NetGetJoinInformation": "syscall", + "syscall.NetSetupDomainName": "syscall", + "syscall.NetSetupUnjoined": "syscall", + "syscall.NetSetupUnknownStatus": "syscall", + "syscall.NetSetupWorkgroupName": "syscall", + "syscall.NetUserGetInfo": "syscall", + "syscall.NetlinkMessage": "syscall", + "syscall.NetlinkRIB": "syscall", + "syscall.NetlinkRouteAttr": "syscall", + "syscall.NetlinkRouteRequest": "syscall", + "syscall.NewCallback": "syscall", + "syscall.NewCallbackCDecl": "syscall", + "syscall.NewLazyDLL": "syscall", + "syscall.NlAttr": "syscall", + "syscall.NlMsgerr": "syscall", + "syscall.NlMsghdr": "syscall", + "syscall.NsecToFiletime": "syscall", + "syscall.NsecToTimespec": "syscall", + "syscall.NsecToTimeval": "syscall", + "syscall.Ntohs": "syscall", + "syscall.OCRNL": "syscall", + "syscall.OFDEL": "syscall", + "syscall.OFILL": "syscall", + "syscall.OFIOGETBMAP": "syscall", + "syscall.OID_PKIX_KP_SERVER_AUTH": "syscall", + "syscall.OID_SERVER_GATED_CRYPTO": "syscall", + "syscall.OID_SGC_NETSCAPE": "syscall", + "syscall.OLCUC": "syscall", + "syscall.ONLCR": "syscall", + "syscall.ONLRET": "syscall", + "syscall.ONOCR": "syscall", + "syscall.ONOEOT": "syscall", + "syscall.OPEN_ALWAYS": "syscall", + "syscall.OPEN_EXISTING": "syscall", + "syscall.OPOST": "syscall", + "syscall.O_ACCMODE": "syscall", + "syscall.O_ALERT": "syscall", + "syscall.O_ALT_IO": "syscall", + "syscall.O_APPEND": "syscall", + "syscall.O_ASYNC": "syscall", + "syscall.O_CLOEXEC": "syscall", + "syscall.O_CREAT": "syscall", + "syscall.O_DIRECT": "syscall", + "syscall.O_DIRECTORY": "syscall", + "syscall.O_DSYNC": "syscall", + "syscall.O_EVTONLY": "syscall", + "syscall.O_EXCL": "syscall", + "syscall.O_EXEC": "syscall", + "syscall.O_EXLOCK": "syscall", + "syscall.O_FSYNC": "syscall", + "syscall.O_LARGEFILE": "syscall", + "syscall.O_NDELAY": "syscall", + "syscall.O_NOATIME": "syscall", + "syscall.O_NOCTTY": "syscall", + "syscall.O_NOFOLLOW": "syscall", + "syscall.O_NONBLOCK": "syscall", + "syscall.O_NOSIGPIPE": "syscall", + "syscall.O_POPUP": "syscall", + "syscall.O_RDONLY": "syscall", + "syscall.O_RDWR": "syscall", + "syscall.O_RSYNC": "syscall", + "syscall.O_SHLOCK": "syscall", + "syscall.O_SYMLINK": "syscall", + "syscall.O_SYNC": "syscall", + "syscall.O_TRUNC": "syscall", + "syscall.O_TTY_INIT": "syscall", + "syscall.O_WRONLY": "syscall", + "syscall.Open": "syscall", + "syscall.OpenCurrentProcessToken": "syscall", + "syscall.OpenProcess": "syscall", + "syscall.OpenProcessToken": "syscall", + "syscall.Openat": "syscall", + "syscall.Overlapped": "syscall", + "syscall.PACKET_ADD_MEMBERSHIP": "syscall", + "syscall.PACKET_BROADCAST": "syscall", + "syscall.PACKET_DROP_MEMBERSHIP": "syscall", + "syscall.PACKET_FASTROUTE": "syscall", + "syscall.PACKET_HOST": "syscall", + "syscall.PACKET_LOOPBACK": "syscall", + "syscall.PACKET_MR_ALLMULTI": "syscall", + "syscall.PACKET_MR_MULTICAST": "syscall", + "syscall.PACKET_MR_PROMISC": "syscall", + "syscall.PACKET_MULTICAST": "syscall", + "syscall.PACKET_OTHERHOST": "syscall", + "syscall.PACKET_OUTGOING": "syscall", + "syscall.PACKET_RECV_OUTPUT": "syscall", + "syscall.PACKET_RX_RING": "syscall", + "syscall.PACKET_STATISTICS": "syscall", + "syscall.PAGE_EXECUTE_READ": "syscall", + "syscall.PAGE_EXECUTE_READWRITE": "syscall", + "syscall.PAGE_EXECUTE_WRITECOPY": "syscall", + "syscall.PAGE_READONLY": "syscall", + "syscall.PAGE_READWRITE": "syscall", + "syscall.PAGE_WRITECOPY": "syscall", + "syscall.PARENB": "syscall", + "syscall.PARMRK": "syscall", + "syscall.PARODD": "syscall", + "syscall.PENDIN": "syscall", + "syscall.PFL_HIDDEN": "syscall", + "syscall.PFL_MATCHES_PROTOCOL_ZERO": "syscall", + "syscall.PFL_MULTIPLE_PROTO_ENTRIES": "syscall", + "syscall.PFL_NETWORKDIRECT_PROVIDER": "syscall", + "syscall.PFL_RECOMMENDED_PROTO_ENTRY": "syscall", + "syscall.PF_FLUSH": "syscall", + "syscall.PKCS_7_ASN_ENCODING": "syscall", + "syscall.PMC5_PIPELINE_FLUSH": "syscall", + "syscall.PRIO_PGRP": "syscall", + "syscall.PRIO_PROCESS": "syscall", + "syscall.PRIO_USER": "syscall", + "syscall.PRI_IOFLUSH": "syscall", + "syscall.PROCESS_QUERY_INFORMATION": "syscall", + "syscall.PROCESS_TERMINATE": "syscall", + "syscall.PROT_EXEC": "syscall", + "syscall.PROT_GROWSDOWN": "syscall", + "syscall.PROT_GROWSUP": "syscall", + "syscall.PROT_NONE": "syscall", + "syscall.PROT_READ": "syscall", + "syscall.PROT_WRITE": "syscall", + "syscall.PROV_DH_SCHANNEL": "syscall", + "syscall.PROV_DSS": "syscall", + "syscall.PROV_DSS_DH": "syscall", + "syscall.PROV_EC_ECDSA_FULL": "syscall", + "syscall.PROV_EC_ECDSA_SIG": "syscall", + "syscall.PROV_EC_ECNRA_FULL": "syscall", + "syscall.PROV_EC_ECNRA_SIG": "syscall", + "syscall.PROV_FORTEZZA": "syscall", + "syscall.PROV_INTEL_SEC": "syscall", + "syscall.PROV_MS_EXCHANGE": "syscall", + "syscall.PROV_REPLACE_OWF": "syscall", + "syscall.PROV_RNG": "syscall", + "syscall.PROV_RSA_AES": "syscall", + "syscall.PROV_RSA_FULL": "syscall", + "syscall.PROV_RSA_SCHANNEL": "syscall", + "syscall.PROV_RSA_SIG": "syscall", + "syscall.PROV_SPYRUS_LYNKS": "syscall", + "syscall.PROV_SSL": "syscall", + "syscall.PR_CAPBSET_DROP": "syscall", + "syscall.PR_CAPBSET_READ": "syscall", + "syscall.PR_CLEAR_SECCOMP_FILTER": "syscall", + "syscall.PR_ENDIAN_BIG": "syscall", + "syscall.PR_ENDIAN_LITTLE": "syscall", + "syscall.PR_ENDIAN_PPC_LITTLE": "syscall", + "syscall.PR_FPEMU_NOPRINT": "syscall", + "syscall.PR_FPEMU_SIGFPE": "syscall", + "syscall.PR_FP_EXC_ASYNC": "syscall", + "syscall.PR_FP_EXC_DISABLED": "syscall", + "syscall.PR_FP_EXC_DIV": "syscall", + "syscall.PR_FP_EXC_INV": "syscall", + "syscall.PR_FP_EXC_NONRECOV": "syscall", + "syscall.PR_FP_EXC_OVF": "syscall", + "syscall.PR_FP_EXC_PRECISE": "syscall", + "syscall.PR_FP_EXC_RES": "syscall", + "syscall.PR_FP_EXC_SW_ENABLE": "syscall", + "syscall.PR_FP_EXC_UND": "syscall", + "syscall.PR_GET_DUMPABLE": "syscall", + "syscall.PR_GET_ENDIAN": "syscall", + "syscall.PR_GET_FPEMU": "syscall", + "syscall.PR_GET_FPEXC": "syscall", + "syscall.PR_GET_KEEPCAPS": "syscall", + "syscall.PR_GET_NAME": "syscall", + "syscall.PR_GET_PDEATHSIG": "syscall", + "syscall.PR_GET_SECCOMP": "syscall", + "syscall.PR_GET_SECCOMP_FILTER": "syscall", + "syscall.PR_GET_SECUREBITS": "syscall", + "syscall.PR_GET_TIMERSLACK": "syscall", + "syscall.PR_GET_TIMING": "syscall", + "syscall.PR_GET_TSC": "syscall", + "syscall.PR_GET_UNALIGN": "syscall", + "syscall.PR_MCE_KILL": "syscall", + "syscall.PR_MCE_KILL_CLEAR": "syscall", + "syscall.PR_MCE_KILL_DEFAULT": "syscall", + "syscall.PR_MCE_KILL_EARLY": "syscall", + "syscall.PR_MCE_KILL_GET": "syscall", + "syscall.PR_MCE_KILL_LATE": "syscall", + "syscall.PR_MCE_KILL_SET": "syscall", + "syscall.PR_SECCOMP_FILTER_EVENT": "syscall", + "syscall.PR_SECCOMP_FILTER_SYSCALL": "syscall", + "syscall.PR_SET_DUMPABLE": "syscall", + "syscall.PR_SET_ENDIAN": "syscall", + "syscall.PR_SET_FPEMU": "syscall", + "syscall.PR_SET_FPEXC": "syscall", + "syscall.PR_SET_KEEPCAPS": "syscall", + "syscall.PR_SET_NAME": "syscall", + "syscall.PR_SET_PDEATHSIG": "syscall", + "syscall.PR_SET_PTRACER": "syscall", + "syscall.PR_SET_SECCOMP": "syscall", + "syscall.PR_SET_SECCOMP_FILTER": "syscall", + "syscall.PR_SET_SECUREBITS": "syscall", + "syscall.PR_SET_TIMERSLACK": "syscall", + "syscall.PR_SET_TIMING": "syscall", + "syscall.PR_SET_TSC": "syscall", + "syscall.PR_SET_UNALIGN": "syscall", + "syscall.PR_TASK_PERF_EVENTS_DISABLE": "syscall", + "syscall.PR_TASK_PERF_EVENTS_ENABLE": "syscall", + "syscall.PR_TIMING_STATISTICAL": "syscall", + "syscall.PR_TIMING_TIMESTAMP": "syscall", + "syscall.PR_TSC_ENABLE": "syscall", + "syscall.PR_TSC_SIGSEGV": "syscall", + "syscall.PR_UNALIGN_NOPRINT": "syscall", + "syscall.PR_UNALIGN_SIGBUS": "syscall", + "syscall.PTRACE_ARCH_PRCTL": "syscall", + "syscall.PTRACE_ATTACH": "syscall", + "syscall.PTRACE_CONT": "syscall", + "syscall.PTRACE_DETACH": "syscall", + "syscall.PTRACE_EVENT_CLONE": "syscall", + "syscall.PTRACE_EVENT_EXEC": "syscall", + "syscall.PTRACE_EVENT_EXIT": "syscall", + "syscall.PTRACE_EVENT_FORK": "syscall", + "syscall.PTRACE_EVENT_VFORK": "syscall", + "syscall.PTRACE_EVENT_VFORK_DONE": "syscall", + "syscall.PTRACE_GETCRUNCHREGS": "syscall", + "syscall.PTRACE_GETEVENTMSG": "syscall", + "syscall.PTRACE_GETFPREGS": "syscall", + "syscall.PTRACE_GETFPXREGS": "syscall", + "syscall.PTRACE_GETHBPREGS": "syscall", + "syscall.PTRACE_GETREGS": "syscall", + "syscall.PTRACE_GETREGSET": "syscall", + "syscall.PTRACE_GETSIGINFO": "syscall", + "syscall.PTRACE_GETVFPREGS": "syscall", + "syscall.PTRACE_GETWMMXREGS": "syscall", + "syscall.PTRACE_GET_THREAD_AREA": "syscall", + "syscall.PTRACE_KILL": "syscall", + "syscall.PTRACE_OLDSETOPTIONS": "syscall", + "syscall.PTRACE_O_MASK": "syscall", + "syscall.PTRACE_O_TRACECLONE": "syscall", + "syscall.PTRACE_O_TRACEEXEC": "syscall", + "syscall.PTRACE_O_TRACEEXIT": "syscall", + "syscall.PTRACE_O_TRACEFORK": "syscall", + "syscall.PTRACE_O_TRACESYSGOOD": "syscall", + "syscall.PTRACE_O_TRACEVFORK": "syscall", + "syscall.PTRACE_O_TRACEVFORKDONE": "syscall", + "syscall.PTRACE_PEEKDATA": "syscall", + "syscall.PTRACE_PEEKTEXT": "syscall", + "syscall.PTRACE_PEEKUSR": "syscall", + "syscall.PTRACE_POKEDATA": "syscall", + "syscall.PTRACE_POKETEXT": "syscall", + "syscall.PTRACE_POKEUSR": "syscall", + "syscall.PTRACE_SETCRUNCHREGS": "syscall", + "syscall.PTRACE_SETFPREGS": "syscall", + "syscall.PTRACE_SETFPXREGS": "syscall", + "syscall.PTRACE_SETHBPREGS": "syscall", + "syscall.PTRACE_SETOPTIONS": "syscall", + "syscall.PTRACE_SETREGS": "syscall", + "syscall.PTRACE_SETREGSET": "syscall", + "syscall.PTRACE_SETSIGINFO": "syscall", + "syscall.PTRACE_SETVFPREGS": "syscall", + "syscall.PTRACE_SETWMMXREGS": "syscall", + "syscall.PTRACE_SET_SYSCALL": "syscall", + "syscall.PTRACE_SET_THREAD_AREA": "syscall", + "syscall.PTRACE_SINGLEBLOCK": "syscall", + "syscall.PTRACE_SINGLESTEP": "syscall", + "syscall.PTRACE_SYSCALL": "syscall", + "syscall.PTRACE_SYSEMU": "syscall", + "syscall.PTRACE_SYSEMU_SINGLESTEP": "syscall", + "syscall.PTRACE_TRACEME": "syscall", + "syscall.PT_ATTACH": "syscall", + "syscall.PT_ATTACHEXC": "syscall", + "syscall.PT_CONTINUE": "syscall", + "syscall.PT_DATA_ADDR": "syscall", + "syscall.PT_DENY_ATTACH": "syscall", + "syscall.PT_DETACH": "syscall", + "syscall.PT_FIRSTMACH": "syscall", + "syscall.PT_FORCEQUOTA": "syscall", + "syscall.PT_KILL": "syscall", + "syscall.PT_MASK": "syscall", + "syscall.PT_READ_D": "syscall", + "syscall.PT_READ_I": "syscall", + "syscall.PT_READ_U": "syscall", + "syscall.PT_SIGEXC": "syscall", + "syscall.PT_STEP": "syscall", + "syscall.PT_TEXT_ADDR": "syscall", + "syscall.PT_TEXT_END_ADDR": "syscall", + "syscall.PT_THUPDATE": "syscall", + "syscall.PT_TRACE_ME": "syscall", + "syscall.PT_WRITE_D": "syscall", + "syscall.PT_WRITE_I": "syscall", + "syscall.PT_WRITE_U": "syscall", + "syscall.ParseDirent": "syscall", + "syscall.ParseNetlinkMessage": "syscall", + "syscall.ParseNetlinkRouteAttr": "syscall", + "syscall.ParseRoutingMessage": "syscall", + "syscall.ParseRoutingSockaddr": "syscall", + "syscall.ParseSocketControlMessage": "syscall", + "syscall.ParseUnixCredentials": "syscall", + "syscall.ParseUnixRights": "syscall", + "syscall.PathMax": "syscall", + "syscall.Pathconf": "syscall", + "syscall.Pause": "syscall", + "syscall.Pipe": "syscall", + "syscall.Pipe2": "syscall", + "syscall.PivotRoot": "syscall", + "syscall.PostQueuedCompletionStatus": "syscall", + "syscall.Pread": "syscall", + "syscall.Proc": "syscall", + "syscall.ProcAttr": "syscall", + "syscall.Process32First": "syscall", + "syscall.Process32Next": "syscall", + "syscall.ProcessEntry32": "syscall", + "syscall.ProcessInformation": "syscall", + "syscall.Protoent": "syscall", + "syscall.PtraceAttach": "syscall", + "syscall.PtraceCont": "syscall", + "syscall.PtraceDetach": "syscall", + "syscall.PtraceGetEventMsg": "syscall", + "syscall.PtraceGetRegs": "syscall", + "syscall.PtracePeekData": "syscall", + "syscall.PtracePeekText": "syscall", + "syscall.PtracePokeData": "syscall", + "syscall.PtracePokeText": "syscall", + "syscall.PtraceRegs": "syscall", + "syscall.PtraceSetOptions": "syscall", + "syscall.PtraceSetRegs": "syscall", + "syscall.PtraceSingleStep": "syscall", + "syscall.PtraceSyscall": "syscall", + "syscall.Pwrite": "syscall", + "syscall.REG_BINARY": "syscall", + "syscall.REG_DWORD": "syscall", + "syscall.REG_DWORD_BIG_ENDIAN": "syscall", + "syscall.REG_DWORD_LITTLE_ENDIAN": "syscall", + "syscall.REG_EXPAND_SZ": "syscall", + "syscall.REG_FULL_RESOURCE_DESCRIPTOR": "syscall", + "syscall.REG_LINK": "syscall", + "syscall.REG_MULTI_SZ": "syscall", + "syscall.REG_NONE": "syscall", + "syscall.REG_QWORD": "syscall", + "syscall.REG_QWORD_LITTLE_ENDIAN": "syscall", + "syscall.REG_RESOURCE_LIST": "syscall", + "syscall.REG_RESOURCE_REQUIREMENTS_LIST": "syscall", + "syscall.REG_SZ": "syscall", + "syscall.RLIMIT_AS": "syscall", + "syscall.RLIMIT_CORE": "syscall", + "syscall.RLIMIT_CPU": "syscall", + "syscall.RLIMIT_DATA": "syscall", + "syscall.RLIMIT_FSIZE": "syscall", + "syscall.RLIMIT_NOFILE": "syscall", + "syscall.RLIMIT_STACK": "syscall", + "syscall.RLIM_INFINITY": "syscall", + "syscall.RTAX_ADVMSS": "syscall", + "syscall.RTAX_AUTHOR": "syscall", + "syscall.RTAX_BRD": "syscall", + "syscall.RTAX_CWND": "syscall", + "syscall.RTAX_DST": "syscall", + "syscall.RTAX_FEATURES": "syscall", + "syscall.RTAX_FEATURE_ALLFRAG": "syscall", + "syscall.RTAX_FEATURE_ECN": "syscall", + "syscall.RTAX_FEATURE_SACK": "syscall", + "syscall.RTAX_FEATURE_TIMESTAMP": "syscall", + "syscall.RTAX_GATEWAY": "syscall", + "syscall.RTAX_GENMASK": "syscall", + "syscall.RTAX_HOPLIMIT": "syscall", + "syscall.RTAX_IFA": "syscall", + "syscall.RTAX_IFP": "syscall", + "syscall.RTAX_INITCWND": "syscall", + "syscall.RTAX_INITRWND": "syscall", + "syscall.RTAX_LABEL": "syscall", + "syscall.RTAX_LOCK": "syscall", + "syscall.RTAX_MAX": "syscall", + "syscall.RTAX_MTU": "syscall", + "syscall.RTAX_NETMASK": "syscall", + "syscall.RTAX_REORDERING": "syscall", + "syscall.RTAX_RTO_MIN": "syscall", + "syscall.RTAX_RTT": "syscall", + "syscall.RTAX_RTTVAR": "syscall", + "syscall.RTAX_SRC": "syscall", + "syscall.RTAX_SRCMASK": "syscall", + "syscall.RTAX_SSTHRESH": "syscall", + "syscall.RTAX_TAG": "syscall", + "syscall.RTAX_UNSPEC": "syscall", + "syscall.RTAX_WINDOW": "syscall", + "syscall.RTA_ALIGNTO": "syscall", + "syscall.RTA_AUTHOR": "syscall", + "syscall.RTA_BRD": "syscall", + "syscall.RTA_CACHEINFO": "syscall", + "syscall.RTA_DST": "syscall", + "syscall.RTA_FLOW": "syscall", + "syscall.RTA_GATEWAY": "syscall", + "syscall.RTA_GENMASK": "syscall", + "syscall.RTA_IFA": "syscall", + "syscall.RTA_IFP": "syscall", + "syscall.RTA_IIF": "syscall", + "syscall.RTA_LABEL": "syscall", + "syscall.RTA_MAX": "syscall", + "syscall.RTA_METRICS": "syscall", + "syscall.RTA_MULTIPATH": "syscall", + "syscall.RTA_NETMASK": "syscall", + "syscall.RTA_OIF": "syscall", + "syscall.RTA_PREFSRC": "syscall", + "syscall.RTA_PRIORITY": "syscall", + "syscall.RTA_SRC": "syscall", + "syscall.RTA_SRCMASK": "syscall", + "syscall.RTA_TABLE": "syscall", + "syscall.RTA_TAG": "syscall", + "syscall.RTA_UNSPEC": "syscall", + "syscall.RTCF_DIRECTSRC": "syscall", + "syscall.RTCF_DOREDIRECT": "syscall", + "syscall.RTCF_LOG": "syscall", + "syscall.RTCF_MASQ": "syscall", + "syscall.RTCF_NAT": "syscall", + "syscall.RTCF_VALVE": "syscall", + "syscall.RTF_ADDRCLASSMASK": "syscall", + "syscall.RTF_ADDRCONF": "syscall", + "syscall.RTF_ALLONLINK": "syscall", + "syscall.RTF_ANNOUNCE": "syscall", + "syscall.RTF_BLACKHOLE": "syscall", + "syscall.RTF_BROADCAST": "syscall", + "syscall.RTF_CACHE": "syscall", + "syscall.RTF_CLONED": "syscall", + "syscall.RTF_CLONING": "syscall", + "syscall.RTF_CONDEMNED": "syscall", + "syscall.RTF_DEFAULT": "syscall", + "syscall.RTF_DELCLONE": "syscall", + "syscall.RTF_DONE": "syscall", + "syscall.RTF_DYNAMIC": "syscall", + "syscall.RTF_FLOW": "syscall", + "syscall.RTF_FMASK": "syscall", + "syscall.RTF_GATEWAY": "syscall", + "syscall.RTF_GWFLAG_COMPAT": "syscall", + "syscall.RTF_HOST": "syscall", + "syscall.RTF_IFREF": "syscall", + "syscall.RTF_IFSCOPE": "syscall", + "syscall.RTF_INTERFACE": "syscall", + "syscall.RTF_IRTT": "syscall", + "syscall.RTF_LINKRT": "syscall", + "syscall.RTF_LLDATA": "syscall", + "syscall.RTF_LLINFO": "syscall", + "syscall.RTF_LOCAL": "syscall", + "syscall.RTF_MASK": "syscall", + "syscall.RTF_MODIFIED": "syscall", + "syscall.RTF_MPATH": "syscall", + "syscall.RTF_MPLS": "syscall", + "syscall.RTF_MSS": "syscall", + "syscall.RTF_MTU": "syscall", + "syscall.RTF_MULTICAST": "syscall", + "syscall.RTF_NAT": "syscall", + "syscall.RTF_NOFORWARD": "syscall", + "syscall.RTF_NONEXTHOP": "syscall", + "syscall.RTF_NOPMTUDISC": "syscall", + "syscall.RTF_PERMANENT_ARP": "syscall", + "syscall.RTF_PINNED": "syscall", + "syscall.RTF_POLICY": "syscall", + "syscall.RTF_PRCLONING": "syscall", + "syscall.RTF_PROTO1": "syscall", + "syscall.RTF_PROTO2": "syscall", + "syscall.RTF_PROTO3": "syscall", + "syscall.RTF_REINSTATE": "syscall", + "syscall.RTF_REJECT": "syscall", + "syscall.RTF_RNH_LOCKED": "syscall", + "syscall.RTF_SOURCE": "syscall", + "syscall.RTF_SRC": "syscall", + "syscall.RTF_STATIC": "syscall", + "syscall.RTF_STICKY": "syscall", + "syscall.RTF_THROW": "syscall", + "syscall.RTF_TUNNEL": "syscall", + "syscall.RTF_UP": "syscall", + "syscall.RTF_USETRAILERS": "syscall", + "syscall.RTF_WASCLONED": "syscall", + "syscall.RTF_WINDOW": "syscall", + "syscall.RTF_XRESOLVE": "syscall", + "syscall.RTM_ADD": "syscall", + "syscall.RTM_BASE": "syscall", + "syscall.RTM_CHANGE": "syscall", + "syscall.RTM_CHGADDR": "syscall", + "syscall.RTM_DELACTION": "syscall", + "syscall.RTM_DELADDR": "syscall", + "syscall.RTM_DELADDRLABEL": "syscall", + "syscall.RTM_DELETE": "syscall", + "syscall.RTM_DELLINK": "syscall", + "syscall.RTM_DELMADDR": "syscall", + "syscall.RTM_DELNEIGH": "syscall", + "syscall.RTM_DELQDISC": "syscall", + "syscall.RTM_DELROUTE": "syscall", + "syscall.RTM_DELRULE": "syscall", + "syscall.RTM_DELTCLASS": "syscall", + "syscall.RTM_DELTFILTER": "syscall", + "syscall.RTM_DESYNC": "syscall", + "syscall.RTM_F_CLONED": "syscall", + "syscall.RTM_F_EQUALIZE": "syscall", + "syscall.RTM_F_NOTIFY": "syscall", + "syscall.RTM_F_PREFIX": "syscall", + "syscall.RTM_GET": "syscall", + "syscall.RTM_GET2": "syscall", + "syscall.RTM_GETACTION": "syscall", + "syscall.RTM_GETADDR": "syscall", + "syscall.RTM_GETADDRLABEL": "syscall", + "syscall.RTM_GETANYCAST": "syscall", + "syscall.RTM_GETDCB": "syscall", + "syscall.RTM_GETLINK": "syscall", + "syscall.RTM_GETMULTICAST": "syscall", + "syscall.RTM_GETNEIGH": "syscall", + "syscall.RTM_GETNEIGHTBL": "syscall", + "syscall.RTM_GETQDISC": "syscall", + "syscall.RTM_GETROUTE": "syscall", + "syscall.RTM_GETRULE": "syscall", + "syscall.RTM_GETTCLASS": "syscall", + "syscall.RTM_GETTFILTER": "syscall", + "syscall.RTM_IEEE80211": "syscall", + "syscall.RTM_IFANNOUNCE": "syscall", + "syscall.RTM_IFINFO": "syscall", + "syscall.RTM_IFINFO2": "syscall", + "syscall.RTM_LLINFO_UPD": "syscall", + "syscall.RTM_LOCK": "syscall", + "syscall.RTM_LOSING": "syscall", + "syscall.RTM_MAX": "syscall", + "syscall.RTM_MAXSIZE": "syscall", + "syscall.RTM_MISS": "syscall", + "syscall.RTM_NEWACTION": "syscall", + "syscall.RTM_NEWADDR": "syscall", + "syscall.RTM_NEWADDRLABEL": "syscall", + "syscall.RTM_NEWLINK": "syscall", + "syscall.RTM_NEWMADDR": "syscall", + "syscall.RTM_NEWMADDR2": "syscall", + "syscall.RTM_NEWNDUSEROPT": "syscall", + "syscall.RTM_NEWNEIGH": "syscall", + "syscall.RTM_NEWNEIGHTBL": "syscall", + "syscall.RTM_NEWPREFIX": "syscall", + "syscall.RTM_NEWQDISC": "syscall", + "syscall.RTM_NEWROUTE": "syscall", + "syscall.RTM_NEWRULE": "syscall", + "syscall.RTM_NEWTCLASS": "syscall", + "syscall.RTM_NEWTFILTER": "syscall", + "syscall.RTM_NR_FAMILIES": "syscall", + "syscall.RTM_NR_MSGTYPES": "syscall", + "syscall.RTM_OIFINFO": "syscall", + "syscall.RTM_OLDADD": "syscall", + "syscall.RTM_OLDDEL": "syscall", + "syscall.RTM_OOIFINFO": "syscall", + "syscall.RTM_REDIRECT": "syscall", + "syscall.RTM_RESOLVE": "syscall", + "syscall.RTM_RTTUNIT": "syscall", + "syscall.RTM_SETDCB": "syscall", + "syscall.RTM_SETGATE": "syscall", + "syscall.RTM_SETLINK": "syscall", + "syscall.RTM_SETNEIGHTBL": "syscall", + "syscall.RTM_VERSION": "syscall", + "syscall.RTNH_ALIGNTO": "syscall", + "syscall.RTNH_F_DEAD": "syscall", + "syscall.RTNH_F_ONLINK": "syscall", + "syscall.RTNH_F_PERVASIVE": "syscall", + "syscall.RTNLGRP_IPV4_IFADDR": "syscall", + "syscall.RTNLGRP_IPV4_MROUTE": "syscall", + "syscall.RTNLGRP_IPV4_ROUTE": "syscall", + "syscall.RTNLGRP_IPV4_RULE": "syscall", + "syscall.RTNLGRP_IPV6_IFADDR": "syscall", + "syscall.RTNLGRP_IPV6_IFINFO": "syscall", + "syscall.RTNLGRP_IPV6_MROUTE": "syscall", + "syscall.RTNLGRP_IPV6_PREFIX": "syscall", + "syscall.RTNLGRP_IPV6_ROUTE": "syscall", + "syscall.RTNLGRP_IPV6_RULE": "syscall", + "syscall.RTNLGRP_LINK": "syscall", + "syscall.RTNLGRP_ND_USEROPT": "syscall", + "syscall.RTNLGRP_NEIGH": "syscall", + "syscall.RTNLGRP_NONE": "syscall", + "syscall.RTNLGRP_NOTIFY": "syscall", + "syscall.RTNLGRP_TC": "syscall", + "syscall.RTN_ANYCAST": "syscall", + "syscall.RTN_BLACKHOLE": "syscall", + "syscall.RTN_BROADCAST": "syscall", + "syscall.RTN_LOCAL": "syscall", + "syscall.RTN_MAX": "syscall", + "syscall.RTN_MULTICAST": "syscall", + "syscall.RTN_NAT": "syscall", + "syscall.RTN_PROHIBIT": "syscall", + "syscall.RTN_THROW": "syscall", + "syscall.RTN_UNICAST": "syscall", + "syscall.RTN_UNREACHABLE": "syscall", + "syscall.RTN_UNSPEC": "syscall", + "syscall.RTN_XRESOLVE": "syscall", + "syscall.RTPROT_BIRD": "syscall", + "syscall.RTPROT_BOOT": "syscall", + "syscall.RTPROT_DHCP": "syscall", + "syscall.RTPROT_DNROUTED": "syscall", + "syscall.RTPROT_GATED": "syscall", + "syscall.RTPROT_KERNEL": "syscall", + "syscall.RTPROT_MRT": "syscall", + "syscall.RTPROT_NTK": "syscall", + "syscall.RTPROT_RA": "syscall", + "syscall.RTPROT_REDIRECT": "syscall", + "syscall.RTPROT_STATIC": "syscall", + "syscall.RTPROT_UNSPEC": "syscall", + "syscall.RTPROT_XORP": "syscall", + "syscall.RTPROT_ZEBRA": "syscall", + "syscall.RTV_EXPIRE": "syscall", + "syscall.RTV_HOPCOUNT": "syscall", + "syscall.RTV_MTU": "syscall", + "syscall.RTV_RPIPE": "syscall", + "syscall.RTV_RTT": "syscall", + "syscall.RTV_RTTVAR": "syscall", + "syscall.RTV_SPIPE": "syscall", + "syscall.RTV_SSTHRESH": "syscall", + "syscall.RTV_WEIGHT": "syscall", + "syscall.RT_CACHING_CONTEXT": "syscall", + "syscall.RT_CLASS_DEFAULT": "syscall", + "syscall.RT_CLASS_LOCAL": "syscall", + "syscall.RT_CLASS_MAIN": "syscall", + "syscall.RT_CLASS_MAX": "syscall", + "syscall.RT_CLASS_UNSPEC": "syscall", + "syscall.RT_DEFAULT_FIB": "syscall", + "syscall.RT_NORTREF": "syscall", + "syscall.RT_SCOPE_HOST": "syscall", + "syscall.RT_SCOPE_LINK": "syscall", + "syscall.RT_SCOPE_NOWHERE": "syscall", + "syscall.RT_SCOPE_SITE": "syscall", + "syscall.RT_SCOPE_UNIVERSE": "syscall", + "syscall.RT_TABLEID_MAX": "syscall", + "syscall.RT_TABLE_COMPAT": "syscall", + "syscall.RT_TABLE_DEFAULT": "syscall", + "syscall.RT_TABLE_LOCAL": "syscall", + "syscall.RT_TABLE_MAIN": "syscall", + "syscall.RT_TABLE_MAX": "syscall", + "syscall.RT_TABLE_UNSPEC": "syscall", + "syscall.RUSAGE_CHILDREN": "syscall", + "syscall.RUSAGE_SELF": "syscall", + "syscall.RUSAGE_THREAD": "syscall", + "syscall.Radvisory_t": "syscall", + "syscall.RawSockaddr": "syscall", + "syscall.RawSockaddrAny": "syscall", + "syscall.RawSockaddrDatalink": "syscall", + "syscall.RawSockaddrInet4": "syscall", + "syscall.RawSockaddrInet6": "syscall", + "syscall.RawSockaddrLinklayer": "syscall", + "syscall.RawSockaddrNetlink": "syscall", + "syscall.RawSockaddrUnix": "syscall", + "syscall.RawSyscall": "syscall", + "syscall.RawSyscall6": "syscall", + "syscall.Read": "syscall", + "syscall.ReadConsole": "syscall", + "syscall.ReadDirectoryChanges": "syscall", + "syscall.ReadDirent": "syscall", + "syscall.ReadFile": "syscall", + "syscall.Readlink": "syscall", + "syscall.Reboot": "syscall", + "syscall.Recvfrom": "syscall", + "syscall.Recvmsg": "syscall", + "syscall.RegCloseKey": "syscall", + "syscall.RegEnumKeyEx": "syscall", + "syscall.RegOpenKeyEx": "syscall", + "syscall.RegQueryInfoKey": "syscall", + "syscall.RegQueryValueEx": "syscall", + "syscall.RemoveDirectory": "syscall", + "syscall.Removexattr": "syscall", + "syscall.Rename": "syscall", + "syscall.Renameat": "syscall", + "syscall.Revoke": "syscall", + "syscall.Rlimit": "syscall", + "syscall.Rmdir": "syscall", + "syscall.RouteMessage": "syscall", + "syscall.RouteRIB": "syscall", + "syscall.RtAttr": "syscall", + "syscall.RtGenmsg": "syscall", + "syscall.RtMetrics": "syscall", + "syscall.RtMsg": "syscall", + "syscall.RtMsghdr": "syscall", + "syscall.RtNexthop": "syscall", + "syscall.Rusage": "syscall", + "syscall.SCM_BINTIME": "syscall", + "syscall.SCM_CREDENTIALS": "syscall", + "syscall.SCM_CREDS": "syscall", + "syscall.SCM_RIGHTS": "syscall", + "syscall.SCM_TIMESTAMP": "syscall", + "syscall.SCM_TIMESTAMPING": "syscall", + "syscall.SCM_TIMESTAMPNS": "syscall", + "syscall.SCM_TIMESTAMP_MONOTONIC": "syscall", + "syscall.SHUT_RD": "syscall", + "syscall.SHUT_RDWR": "syscall", + "syscall.SHUT_WR": "syscall", + "syscall.SID": "syscall", + "syscall.SIDAndAttributes": "syscall", + "syscall.SIGABRT": "syscall", + "syscall.SIGALRM": "syscall", + "syscall.SIGBUS": "syscall", + "syscall.SIGCHLD": "syscall", + "syscall.SIGCLD": "syscall", + "syscall.SIGCONT": "syscall", + "syscall.SIGEMT": "syscall", + "syscall.SIGFPE": "syscall", + "syscall.SIGHUP": "syscall", + "syscall.SIGILL": "syscall", + "syscall.SIGINFO": "syscall", + "syscall.SIGINT": "syscall", + "syscall.SIGIO": "syscall", + "syscall.SIGIOT": "syscall", + "syscall.SIGKILL": "syscall", + "syscall.SIGLIBRT": "syscall", + "syscall.SIGLWP": "syscall", + "syscall.SIGPIPE": "syscall", + "syscall.SIGPOLL": "syscall", + "syscall.SIGPROF": "syscall", + "syscall.SIGPWR": "syscall", + "syscall.SIGQUIT": "syscall", + "syscall.SIGSEGV": "syscall", + "syscall.SIGSTKFLT": "syscall", + "syscall.SIGSTOP": "syscall", + "syscall.SIGSYS": "syscall", + "syscall.SIGTERM": "syscall", + "syscall.SIGTHR": "syscall", + "syscall.SIGTRAP": "syscall", + "syscall.SIGTSTP": "syscall", + "syscall.SIGTTIN": "syscall", + "syscall.SIGTTOU": "syscall", + "syscall.SIGUNUSED": "syscall", + "syscall.SIGURG": "syscall", + "syscall.SIGUSR1": "syscall", + "syscall.SIGUSR2": "syscall", + "syscall.SIGVTALRM": "syscall", + "syscall.SIGWINCH": "syscall", + "syscall.SIGXCPU": "syscall", + "syscall.SIGXFSZ": "syscall", + "syscall.SIOCADDDLCI": "syscall", + "syscall.SIOCADDMULTI": "syscall", + "syscall.SIOCADDRT": "syscall", + "syscall.SIOCAIFADDR": "syscall", + "syscall.SIOCAIFGROUP": "syscall", + "syscall.SIOCALIFADDR": "syscall", + "syscall.SIOCARPIPLL": "syscall", + "syscall.SIOCATMARK": "syscall", + "syscall.SIOCAUTOADDR": "syscall", + "syscall.SIOCAUTONETMASK": "syscall", + "syscall.SIOCBRDGADD": "syscall", + "syscall.SIOCBRDGADDS": "syscall", + "syscall.SIOCBRDGARL": "syscall", + "syscall.SIOCBRDGDADDR": "syscall", + "syscall.SIOCBRDGDEL": "syscall", + "syscall.SIOCBRDGDELS": "syscall", + "syscall.SIOCBRDGFLUSH": "syscall", + "syscall.SIOCBRDGFRL": "syscall", + "syscall.SIOCBRDGGCACHE": "syscall", + "syscall.SIOCBRDGGFD": "syscall", + "syscall.SIOCBRDGGHT": "syscall", + "syscall.SIOCBRDGGIFFLGS": "syscall", + "syscall.SIOCBRDGGMA": "syscall", + "syscall.SIOCBRDGGPARAM": "syscall", + "syscall.SIOCBRDGGPRI": "syscall", + "syscall.SIOCBRDGGRL": "syscall", + "syscall.SIOCBRDGGSIFS": "syscall", + "syscall.SIOCBRDGGTO": "syscall", + "syscall.SIOCBRDGIFS": "syscall", + "syscall.SIOCBRDGRTS": "syscall", + "syscall.SIOCBRDGSADDR": "syscall", + "syscall.SIOCBRDGSCACHE": "syscall", + "syscall.SIOCBRDGSFD": "syscall", + "syscall.SIOCBRDGSHT": "syscall", + "syscall.SIOCBRDGSIFCOST": "syscall", + "syscall.SIOCBRDGSIFFLGS": "syscall", + "syscall.SIOCBRDGSIFPRIO": "syscall", + "syscall.SIOCBRDGSMA": "syscall", + "syscall.SIOCBRDGSPRI": "syscall", + "syscall.SIOCBRDGSPROTO": "syscall", + "syscall.SIOCBRDGSTO": "syscall", + "syscall.SIOCBRDGSTXHC": "syscall", + "syscall.SIOCDARP": "syscall", + "syscall.SIOCDELDLCI": "syscall", + "syscall.SIOCDELMULTI": "syscall", + "syscall.SIOCDELRT": "syscall", + "syscall.SIOCDEVPRIVATE": "syscall", + "syscall.SIOCDIFADDR": "syscall", + "syscall.SIOCDIFGROUP": "syscall", + "syscall.SIOCDIFPHYADDR": "syscall", + "syscall.SIOCDLIFADDR": "syscall", + "syscall.SIOCDRARP": "syscall", + "syscall.SIOCGARP": "syscall", + "syscall.SIOCGDRVSPEC": "syscall", + "syscall.SIOCGETKALIVE": "syscall", + "syscall.SIOCGETLABEL": "syscall", + "syscall.SIOCGETPFLOW": "syscall", + "syscall.SIOCGETPFSYNC": "syscall", + "syscall.SIOCGETSGCNT": "syscall", + "syscall.SIOCGETVIFCNT": "syscall", + "syscall.SIOCGETVLAN": "syscall", + "syscall.SIOCGHIWAT": "syscall", + "syscall.SIOCGIFADDR": "syscall", + "syscall.SIOCGIFADDRPREF": "syscall", + "syscall.SIOCGIFALIAS": "syscall", + "syscall.SIOCGIFALTMTU": "syscall", + "syscall.SIOCGIFASYNCMAP": "syscall", + "syscall.SIOCGIFBOND": "syscall", + "syscall.SIOCGIFBR": "syscall", + "syscall.SIOCGIFBRDADDR": "syscall", + "syscall.SIOCGIFCAP": "syscall", + "syscall.SIOCGIFCONF": "syscall", + "syscall.SIOCGIFCOUNT": "syscall", + "syscall.SIOCGIFDATA": "syscall", + "syscall.SIOCGIFDESCR": "syscall", + "syscall.SIOCGIFDEVMTU": "syscall", + "syscall.SIOCGIFDLT": "syscall", + "syscall.SIOCGIFDSTADDR": "syscall", + "syscall.SIOCGIFENCAP": "syscall", + "syscall.SIOCGIFFIB": "syscall", + "syscall.SIOCGIFFLAGS": "syscall", + "syscall.SIOCGIFGATTR": "syscall", + "syscall.SIOCGIFGENERIC": "syscall", + "syscall.SIOCGIFGMEMB": "syscall", + "syscall.SIOCGIFGROUP": "syscall", + "syscall.SIOCGIFHARDMTU": "syscall", + "syscall.SIOCGIFHWADDR": "syscall", + "syscall.SIOCGIFINDEX": "syscall", + "syscall.SIOCGIFKPI": "syscall", + "syscall.SIOCGIFMAC": "syscall", + "syscall.SIOCGIFMAP": "syscall", + "syscall.SIOCGIFMEDIA": "syscall", + "syscall.SIOCGIFMEM": "syscall", + "syscall.SIOCGIFMETRIC": "syscall", + "syscall.SIOCGIFMTU": "syscall", + "syscall.SIOCGIFNAME": "syscall", + "syscall.SIOCGIFNETMASK": "syscall", + "syscall.SIOCGIFPDSTADDR": "syscall", + "syscall.SIOCGIFPFLAGS": "syscall", + "syscall.SIOCGIFPHYS": "syscall", + "syscall.SIOCGIFPRIORITY": "syscall", + "syscall.SIOCGIFPSRCADDR": "syscall", + "syscall.SIOCGIFRDOMAIN": "syscall", + "syscall.SIOCGIFRTLABEL": "syscall", + "syscall.SIOCGIFSLAVE": "syscall", + "syscall.SIOCGIFSTATUS": "syscall", + "syscall.SIOCGIFTIMESLOT": "syscall", + "syscall.SIOCGIFTXQLEN": "syscall", + "syscall.SIOCGIFVLAN": "syscall", + "syscall.SIOCGIFWAKEFLAGS": "syscall", + "syscall.SIOCGIFXFLAGS": "syscall", + "syscall.SIOCGLIFADDR": "syscall", + "syscall.SIOCGLIFPHYADDR": "syscall", + "syscall.SIOCGLIFPHYRTABLE": "syscall", + "syscall.SIOCGLIFPHYTTL": "syscall", + "syscall.SIOCGLINKSTR": "syscall", + "syscall.SIOCGLOWAT": "syscall", + "syscall.SIOCGPGRP": "syscall", + "syscall.SIOCGPRIVATE_0": "syscall", + "syscall.SIOCGPRIVATE_1": "syscall", + "syscall.SIOCGRARP": "syscall", + "syscall.SIOCGSPPPPARAMS": "syscall", + "syscall.SIOCGSTAMP": "syscall", + "syscall.SIOCGSTAMPNS": "syscall", + "syscall.SIOCGVH": "syscall", + "syscall.SIOCGVNETID": "syscall", + "syscall.SIOCIFCREATE": "syscall", + "syscall.SIOCIFCREATE2": "syscall", + "syscall.SIOCIFDESTROY": "syscall", + "syscall.SIOCIFGCLONERS": "syscall", + "syscall.SIOCINITIFADDR": "syscall", + "syscall.SIOCPROTOPRIVATE": "syscall", + "syscall.SIOCRSLVMULTI": "syscall", + "syscall.SIOCRTMSG": "syscall", + "syscall.SIOCSARP": "syscall", + "syscall.SIOCSDRVSPEC": "syscall", + "syscall.SIOCSETKALIVE": "syscall", + "syscall.SIOCSETLABEL": "syscall", + "syscall.SIOCSETPFLOW": "syscall", + "syscall.SIOCSETPFSYNC": "syscall", + "syscall.SIOCSETVLAN": "syscall", + "syscall.SIOCSHIWAT": "syscall", + "syscall.SIOCSIFADDR": "syscall", + "syscall.SIOCSIFADDRPREF": "syscall", + "syscall.SIOCSIFALTMTU": "syscall", + "syscall.SIOCSIFASYNCMAP": "syscall", + "syscall.SIOCSIFBOND": "syscall", + "syscall.SIOCSIFBR": "syscall", + "syscall.SIOCSIFBRDADDR": "syscall", + "syscall.SIOCSIFCAP": "syscall", + "syscall.SIOCSIFDESCR": "syscall", + "syscall.SIOCSIFDSTADDR": "syscall", + "syscall.SIOCSIFENCAP": "syscall", + "syscall.SIOCSIFFIB": "syscall", + "syscall.SIOCSIFFLAGS": "syscall", + "syscall.SIOCSIFGATTR": "syscall", + "syscall.SIOCSIFGENERIC": "syscall", + "syscall.SIOCSIFHWADDR": "syscall", + "syscall.SIOCSIFHWBROADCAST": "syscall", + "syscall.SIOCSIFKPI": "syscall", + "syscall.SIOCSIFLINK": "syscall", + "syscall.SIOCSIFLLADDR": "syscall", + "syscall.SIOCSIFMAC": "syscall", + "syscall.SIOCSIFMAP": "syscall", + "syscall.SIOCSIFMEDIA": "syscall", + "syscall.SIOCSIFMEM": "syscall", + "syscall.SIOCSIFMETRIC": "syscall", + "syscall.SIOCSIFMTU": "syscall", + "syscall.SIOCSIFNAME": "syscall", + "syscall.SIOCSIFNETMASK": "syscall", + "syscall.SIOCSIFPFLAGS": "syscall", + "syscall.SIOCSIFPHYADDR": "syscall", + "syscall.SIOCSIFPHYS": "syscall", + "syscall.SIOCSIFPRIORITY": "syscall", + "syscall.SIOCSIFRDOMAIN": "syscall", + "syscall.SIOCSIFRTLABEL": "syscall", + "syscall.SIOCSIFRVNET": "syscall", + "syscall.SIOCSIFSLAVE": "syscall", + "syscall.SIOCSIFTIMESLOT": "syscall", + "syscall.SIOCSIFTXQLEN": "syscall", + "syscall.SIOCSIFVLAN": "syscall", + "syscall.SIOCSIFVNET": "syscall", + "syscall.SIOCSIFXFLAGS": "syscall", + "syscall.SIOCSLIFPHYADDR": "syscall", + "syscall.SIOCSLIFPHYRTABLE": "syscall", + "syscall.SIOCSLIFPHYTTL": "syscall", + "syscall.SIOCSLINKSTR": "syscall", + "syscall.SIOCSLOWAT": "syscall", + "syscall.SIOCSPGRP": "syscall", + "syscall.SIOCSRARP": "syscall", + "syscall.SIOCSSPPPPARAMS": "syscall", + "syscall.SIOCSVH": "syscall", + "syscall.SIOCSVNETID": "syscall", + "syscall.SIOCZIFDATA": "syscall", + "syscall.SIO_GET_EXTENSION_FUNCTION_POINTER": "syscall", + "syscall.SIO_GET_INTERFACE_LIST": "syscall", + "syscall.SIO_KEEPALIVE_VALS": "syscall", + "syscall.SIO_UDP_CONNRESET": "syscall", + "syscall.SOCK_CLOEXEC": "syscall", + "syscall.SOCK_DCCP": "syscall", + "syscall.SOCK_DGRAM": "syscall", + "syscall.SOCK_FLAGS_MASK": "syscall", + "syscall.SOCK_MAXADDRLEN": "syscall", + "syscall.SOCK_NONBLOCK": "syscall", + "syscall.SOCK_NOSIGPIPE": "syscall", + "syscall.SOCK_PACKET": "syscall", + "syscall.SOCK_RAW": "syscall", + "syscall.SOCK_RDM": "syscall", + "syscall.SOCK_SEQPACKET": "syscall", + "syscall.SOCK_STREAM": "syscall", + "syscall.SOL_AAL": "syscall", + "syscall.SOL_ATM": "syscall", + "syscall.SOL_DECNET": "syscall", + "syscall.SOL_ICMPV6": "syscall", + "syscall.SOL_IP": "syscall", + "syscall.SOL_IPV6": "syscall", + "syscall.SOL_IRDA": "syscall", + "syscall.SOL_PACKET": "syscall", + "syscall.SOL_RAW": "syscall", + "syscall.SOL_SOCKET": "syscall", + "syscall.SOL_TCP": "syscall", + "syscall.SOL_X25": "syscall", + "syscall.SOMAXCONN": "syscall", + "syscall.SO_ACCEPTCONN": "syscall", + "syscall.SO_ACCEPTFILTER": "syscall", + "syscall.SO_ATTACH_FILTER": "syscall", + "syscall.SO_BINDANY": "syscall", + "syscall.SO_BINDTODEVICE": "syscall", + "syscall.SO_BINTIME": "syscall", + "syscall.SO_BROADCAST": "syscall", + "syscall.SO_BSDCOMPAT": "syscall", + "syscall.SO_DEBUG": "syscall", + "syscall.SO_DETACH_FILTER": "syscall", + "syscall.SO_DOMAIN": "syscall", + "syscall.SO_DONTROUTE": "syscall", + "syscall.SO_DONTTRUNC": "syscall", + "syscall.SO_ERROR": "syscall", + "syscall.SO_KEEPALIVE": "syscall", + "syscall.SO_LABEL": "syscall", + "syscall.SO_LINGER": "syscall", + "syscall.SO_LINGER_SEC": "syscall", + "syscall.SO_LISTENINCQLEN": "syscall", + "syscall.SO_LISTENQLEN": "syscall", + "syscall.SO_LISTENQLIMIT": "syscall", + "syscall.SO_MARK": "syscall", + "syscall.SO_NETPROC": "syscall", + "syscall.SO_NKE": "syscall", + "syscall.SO_NOADDRERR": "syscall", + "syscall.SO_NOHEADER": "syscall", + "syscall.SO_NOSIGPIPE": "syscall", + "syscall.SO_NOTIFYCONFLICT": "syscall", + "syscall.SO_NO_CHECK": "syscall", + "syscall.SO_NO_DDP": "syscall", + "syscall.SO_NO_OFFLOAD": "syscall", + "syscall.SO_NP_EXTENSIONS": "syscall", + "syscall.SO_NREAD": "syscall", + "syscall.SO_NWRITE": "syscall", + "syscall.SO_OOBINLINE": "syscall", + "syscall.SO_OVERFLOWED": "syscall", + "syscall.SO_PASSCRED": "syscall", + "syscall.SO_PASSSEC": "syscall", + "syscall.SO_PEERCRED": "syscall", + "syscall.SO_PEERLABEL": "syscall", + "syscall.SO_PEERNAME": "syscall", + "syscall.SO_PEERSEC": "syscall", + "syscall.SO_PRIORITY": "syscall", + "syscall.SO_PROTOCOL": "syscall", + "syscall.SO_PROTOTYPE": "syscall", + "syscall.SO_RANDOMPORT": "syscall", + "syscall.SO_RCVBUF": "syscall", + "syscall.SO_RCVBUFFORCE": "syscall", + "syscall.SO_RCVLOWAT": "syscall", + "syscall.SO_RCVTIMEO": "syscall", + "syscall.SO_RESTRICTIONS": "syscall", + "syscall.SO_RESTRICT_DENYIN": "syscall", + "syscall.SO_RESTRICT_DENYOUT": "syscall", + "syscall.SO_RESTRICT_DENYSET": "syscall", + "syscall.SO_REUSEADDR": "syscall", + "syscall.SO_REUSEPORT": "syscall", + "syscall.SO_REUSESHAREUID": "syscall", + "syscall.SO_RTABLE": "syscall", + "syscall.SO_RXQ_OVFL": "syscall", + "syscall.SO_SECURITY_AUTHENTICATION": "syscall", + "syscall.SO_SECURITY_ENCRYPTION_NETWORK": "syscall", + "syscall.SO_SECURITY_ENCRYPTION_TRANSPORT": "syscall", + "syscall.SO_SETFIB": "syscall", + "syscall.SO_SNDBUF": "syscall", + "syscall.SO_SNDBUFFORCE": "syscall", + "syscall.SO_SNDLOWAT": "syscall", + "syscall.SO_SNDTIMEO": "syscall", + "syscall.SO_SPLICE": "syscall", + "syscall.SO_TIMESTAMP": "syscall", + "syscall.SO_TIMESTAMPING": "syscall", + "syscall.SO_TIMESTAMPNS": "syscall", + "syscall.SO_TIMESTAMP_MONOTONIC": "syscall", + "syscall.SO_TYPE": "syscall", + "syscall.SO_UPCALLCLOSEWAIT": "syscall", + "syscall.SO_UPDATE_ACCEPT_CONTEXT": "syscall", + "syscall.SO_UPDATE_CONNECT_CONTEXT": "syscall", + "syscall.SO_USELOOPBACK": "syscall", + "syscall.SO_USER_COOKIE": "syscall", + "syscall.SO_VENDOR": "syscall", + "syscall.SO_WANTMORE": "syscall", + "syscall.SO_WANTOOBFLAG": "syscall", + "syscall.SSLExtraCertChainPolicyPara": "syscall", + "syscall.STANDARD_RIGHTS_ALL": "syscall", + "syscall.STANDARD_RIGHTS_EXECUTE": "syscall", + "syscall.STANDARD_RIGHTS_READ": "syscall", + "syscall.STANDARD_RIGHTS_REQUIRED": "syscall", + "syscall.STANDARD_RIGHTS_WRITE": "syscall", + "syscall.STARTF_USESHOWWINDOW": "syscall", + "syscall.STARTF_USESTDHANDLES": "syscall", + "syscall.STD_ERROR_HANDLE": "syscall", + "syscall.STD_INPUT_HANDLE": "syscall", + "syscall.STD_OUTPUT_HANDLE": "syscall", + "syscall.SUBLANG_ENGLISH_US": "syscall", + "syscall.SW_FORCEMINIMIZE": "syscall", + "syscall.SW_HIDE": "syscall", + "syscall.SW_MAXIMIZE": "syscall", + "syscall.SW_MINIMIZE": "syscall", + "syscall.SW_NORMAL": "syscall", + "syscall.SW_RESTORE": "syscall", + "syscall.SW_SHOW": "syscall", + "syscall.SW_SHOWDEFAULT": "syscall", + "syscall.SW_SHOWMAXIMIZED": "syscall", + "syscall.SW_SHOWMINIMIZED": "syscall", + "syscall.SW_SHOWMINNOACTIVE": "syscall", + "syscall.SW_SHOWNA": "syscall", + "syscall.SW_SHOWNOACTIVATE": "syscall", + "syscall.SW_SHOWNORMAL": "syscall", + "syscall.SYMBOLIC_LINK_FLAG_DIRECTORY": "syscall", + "syscall.SYNCHRONIZE": "syscall", + "syscall.SYSCTL_VERSION": "syscall", + "syscall.SYSCTL_VERS_0": "syscall", + "syscall.SYSCTL_VERS_1": "syscall", + "syscall.SYSCTL_VERS_MASK": "syscall", + "syscall.SYS_ABORT2": "syscall", + "syscall.SYS_ACCEPT": "syscall", + "syscall.SYS_ACCEPT4": "syscall", + "syscall.SYS_ACCEPT_NOCANCEL": "syscall", + "syscall.SYS_ACCESS": "syscall", + "syscall.SYS_ACCESS_EXTENDED": "syscall", + "syscall.SYS_ACCT": "syscall", + "syscall.SYS_ADD_KEY": "syscall", + "syscall.SYS_ADD_PROFIL": "syscall", + "syscall.SYS_ADJFREQ": "syscall", + "syscall.SYS_ADJTIME": "syscall", + "syscall.SYS_ADJTIMEX": "syscall", + "syscall.SYS_AFS_SYSCALL": "syscall", + "syscall.SYS_AIO_CANCEL": "syscall", + "syscall.SYS_AIO_ERROR": "syscall", + "syscall.SYS_AIO_FSYNC": "syscall", + "syscall.SYS_AIO_READ": "syscall", + "syscall.SYS_AIO_RETURN": "syscall", + "syscall.SYS_AIO_SUSPEND": "syscall", + "syscall.SYS_AIO_SUSPEND_NOCANCEL": "syscall", + "syscall.SYS_AIO_WRITE": "syscall", + "syscall.SYS_ALARM": "syscall", + "syscall.SYS_ARCH_PRCTL": "syscall", + "syscall.SYS_ARM_FADVISE64_64": "syscall", + "syscall.SYS_ARM_SYNC_FILE_RANGE": "syscall", + "syscall.SYS_ATGETMSG": "syscall", + "syscall.SYS_ATPGETREQ": "syscall", + "syscall.SYS_ATPGETRSP": "syscall", + "syscall.SYS_ATPSNDREQ": "syscall", + "syscall.SYS_ATPSNDRSP": "syscall", + "syscall.SYS_ATPUTMSG": "syscall", + "syscall.SYS_ATSOCKET": "syscall", + "syscall.SYS_AUDIT": "syscall", + "syscall.SYS_AUDITCTL": "syscall", + "syscall.SYS_AUDITON": "syscall", + "syscall.SYS_AUDIT_SESSION_JOIN": "syscall", + "syscall.SYS_AUDIT_SESSION_PORT": "syscall", + "syscall.SYS_AUDIT_SESSION_SELF": "syscall", + "syscall.SYS_BDFLUSH": "syscall", + "syscall.SYS_BIND": "syscall", + "syscall.SYS_BINDAT": "syscall", + "syscall.SYS_BREAK": "syscall", + "syscall.SYS_BRK": "syscall", + "syscall.SYS_BSDTHREAD_CREATE": "syscall", + "syscall.SYS_BSDTHREAD_REGISTER": "syscall", + "syscall.SYS_BSDTHREAD_TERMINATE": "syscall", + "syscall.SYS_CAPGET": "syscall", + "syscall.SYS_CAPSET": "syscall", + "syscall.SYS_CAP_ENTER": "syscall", + "syscall.SYS_CAP_FCNTLS_GET": "syscall", + "syscall.SYS_CAP_FCNTLS_LIMIT": "syscall", + "syscall.SYS_CAP_GETMODE": "syscall", + "syscall.SYS_CAP_GETRIGHTS": "syscall", + "syscall.SYS_CAP_IOCTLS_GET": "syscall", + "syscall.SYS_CAP_IOCTLS_LIMIT": "syscall", + "syscall.SYS_CAP_NEW": "syscall", + "syscall.SYS_CAP_RIGHTS_GET": "syscall", + "syscall.SYS_CAP_RIGHTS_LIMIT": "syscall", + "syscall.SYS_CHDIR": "syscall", + "syscall.SYS_CHFLAGS": "syscall", + "syscall.SYS_CHFLAGSAT": "syscall", + "syscall.SYS_CHMOD": "syscall", + "syscall.SYS_CHMOD_EXTENDED": "syscall", + "syscall.SYS_CHOWN": "syscall", + "syscall.SYS_CHOWN32": "syscall", + "syscall.SYS_CHROOT": "syscall", + "syscall.SYS_CHUD": "syscall", + "syscall.SYS_CLOCK_ADJTIME": "syscall", + "syscall.SYS_CLOCK_GETCPUCLOCKID2": "syscall", + "syscall.SYS_CLOCK_GETRES": "syscall", + "syscall.SYS_CLOCK_GETTIME": "syscall", + "syscall.SYS_CLOCK_NANOSLEEP": "syscall", + "syscall.SYS_CLOCK_SETTIME": "syscall", + "syscall.SYS_CLONE": "syscall", + "syscall.SYS_CLOSE": "syscall", + "syscall.SYS_CLOSEFROM": "syscall", + "syscall.SYS_CLOSE_NOCANCEL": "syscall", + "syscall.SYS_CONNECT": "syscall", + "syscall.SYS_CONNECTAT": "syscall", + "syscall.SYS_CONNECT_NOCANCEL": "syscall", + "syscall.SYS_COPYFILE": "syscall", + "syscall.SYS_CPUSET": "syscall", + "syscall.SYS_CPUSET_GETAFFINITY": "syscall", + "syscall.SYS_CPUSET_GETID": "syscall", + "syscall.SYS_CPUSET_SETAFFINITY": "syscall", + "syscall.SYS_CPUSET_SETID": "syscall", + "syscall.SYS_CREAT": "syscall", + "syscall.SYS_CREATE_MODULE": "syscall", + "syscall.SYS_CSOPS": "syscall", + "syscall.SYS_DELETE": "syscall", + "syscall.SYS_DELETE_MODULE": "syscall", + "syscall.SYS_DUP": "syscall", + "syscall.SYS_DUP2": "syscall", + "syscall.SYS_DUP3": "syscall", + "syscall.SYS_EACCESS": "syscall", + "syscall.SYS_EPOLL_CREATE": "syscall", + "syscall.SYS_EPOLL_CREATE1": "syscall", + "syscall.SYS_EPOLL_CTL": "syscall", + "syscall.SYS_EPOLL_CTL_OLD": "syscall", + "syscall.SYS_EPOLL_PWAIT": "syscall", + "syscall.SYS_EPOLL_WAIT": "syscall", + "syscall.SYS_EPOLL_WAIT_OLD": "syscall", + "syscall.SYS_EVENTFD": "syscall", + "syscall.SYS_EVENTFD2": "syscall", + "syscall.SYS_EXCHANGEDATA": "syscall", + "syscall.SYS_EXECVE": "syscall", + "syscall.SYS_EXIT": "syscall", + "syscall.SYS_EXIT_GROUP": "syscall", + "syscall.SYS_EXTATTRCTL": "syscall", + "syscall.SYS_EXTATTR_DELETE_FD": "syscall", + "syscall.SYS_EXTATTR_DELETE_FILE": "syscall", + "syscall.SYS_EXTATTR_DELETE_LINK": "syscall", + "syscall.SYS_EXTATTR_GET_FD": "syscall", + "syscall.SYS_EXTATTR_GET_FILE": "syscall", + "syscall.SYS_EXTATTR_GET_LINK": "syscall", + "syscall.SYS_EXTATTR_LIST_FD": "syscall", + "syscall.SYS_EXTATTR_LIST_FILE": "syscall", + "syscall.SYS_EXTATTR_LIST_LINK": "syscall", + "syscall.SYS_EXTATTR_SET_FD": "syscall", + "syscall.SYS_EXTATTR_SET_FILE": "syscall", + "syscall.SYS_EXTATTR_SET_LINK": "syscall", + "syscall.SYS_FACCESSAT": "syscall", + "syscall.SYS_FADVISE64": "syscall", + "syscall.SYS_FADVISE64_64": "syscall", + "syscall.SYS_FALLOCATE": "syscall", + "syscall.SYS_FANOTIFY_INIT": "syscall", + "syscall.SYS_FANOTIFY_MARK": "syscall", + "syscall.SYS_FCHDIR": "syscall", + "syscall.SYS_FCHFLAGS": "syscall", + "syscall.SYS_FCHMOD": "syscall", + "syscall.SYS_FCHMODAT": "syscall", + "syscall.SYS_FCHMOD_EXTENDED": "syscall", + "syscall.SYS_FCHOWN": "syscall", + "syscall.SYS_FCHOWN32": "syscall", + "syscall.SYS_FCHOWNAT": "syscall", + "syscall.SYS_FCHROOT": "syscall", + "syscall.SYS_FCNTL": "syscall", + "syscall.SYS_FCNTL64": "syscall", + "syscall.SYS_FCNTL_NOCANCEL": "syscall", + "syscall.SYS_FDATASYNC": "syscall", + "syscall.SYS_FEXECVE": "syscall", + "syscall.SYS_FFCLOCK_GETCOUNTER": "syscall", + "syscall.SYS_FFCLOCK_GETESTIMATE": "syscall", + "syscall.SYS_FFCLOCK_SETESTIMATE": "syscall", + "syscall.SYS_FFSCTL": "syscall", + "syscall.SYS_FGETATTRLIST": "syscall", + "syscall.SYS_FGETXATTR": "syscall", + "syscall.SYS_FHOPEN": "syscall", + "syscall.SYS_FHSTAT": "syscall", + "syscall.SYS_FHSTATFS": "syscall", + "syscall.SYS_FILEPORT_MAKEFD": "syscall", + "syscall.SYS_FILEPORT_MAKEPORT": "syscall", + "syscall.SYS_FKTRACE": "syscall", + "syscall.SYS_FLISTXATTR": "syscall", + "syscall.SYS_FLOCK": "syscall", + "syscall.SYS_FORK": "syscall", + "syscall.SYS_FPATHCONF": "syscall", + "syscall.SYS_FREEBSD6_FTRUNCATE": "syscall", + "syscall.SYS_FREEBSD6_LSEEK": "syscall", + "syscall.SYS_FREEBSD6_MMAP": "syscall", + "syscall.SYS_FREEBSD6_PREAD": "syscall", + "syscall.SYS_FREEBSD6_PWRITE": "syscall", + "syscall.SYS_FREEBSD6_TRUNCATE": "syscall", + "syscall.SYS_FREMOVEXATTR": "syscall", + "syscall.SYS_FSCTL": "syscall", + "syscall.SYS_FSETATTRLIST": "syscall", + "syscall.SYS_FSETXATTR": "syscall", + "syscall.SYS_FSGETPATH": "syscall", + "syscall.SYS_FSTAT": "syscall", + "syscall.SYS_FSTAT64": "syscall", + "syscall.SYS_FSTAT64_EXTENDED": "syscall", + "syscall.SYS_FSTATAT": "syscall", + "syscall.SYS_FSTATAT64": "syscall", + "syscall.SYS_FSTATFS": "syscall", + "syscall.SYS_FSTATFS64": "syscall", + "syscall.SYS_FSTATV": "syscall", + "syscall.SYS_FSTATVFS1": "syscall", + "syscall.SYS_FSTAT_EXTENDED": "syscall", + "syscall.SYS_FSYNC": "syscall", + "syscall.SYS_FSYNC_NOCANCEL": "syscall", + "syscall.SYS_FSYNC_RANGE": "syscall", + "syscall.SYS_FTIME": "syscall", + "syscall.SYS_FTRUNCATE": "syscall", + "syscall.SYS_FTRUNCATE64": "syscall", + "syscall.SYS_FUTEX": "syscall", + "syscall.SYS_FUTIMENS": "syscall", + "syscall.SYS_FUTIMES": "syscall", + "syscall.SYS_FUTIMESAT": "syscall", + "syscall.SYS_GETATTRLIST": "syscall", + "syscall.SYS_GETAUDIT": "syscall", + "syscall.SYS_GETAUDIT_ADDR": "syscall", + "syscall.SYS_GETAUID": "syscall", + "syscall.SYS_GETCONTEXT": "syscall", + "syscall.SYS_GETCPU": "syscall", + "syscall.SYS_GETCWD": "syscall", + "syscall.SYS_GETDENTS": "syscall", + "syscall.SYS_GETDENTS64": "syscall", + "syscall.SYS_GETDIRENTRIES": "syscall", + "syscall.SYS_GETDIRENTRIES64": "syscall", + "syscall.SYS_GETDIRENTRIESATTR": "syscall", + "syscall.SYS_GETDTABLECOUNT": "syscall", + "syscall.SYS_GETDTABLESIZE": "syscall", + "syscall.SYS_GETEGID": "syscall", + "syscall.SYS_GETEGID32": "syscall", + "syscall.SYS_GETEUID": "syscall", + "syscall.SYS_GETEUID32": "syscall", + "syscall.SYS_GETFH": "syscall", + "syscall.SYS_GETFSSTAT": "syscall", + "syscall.SYS_GETFSSTAT64": "syscall", + "syscall.SYS_GETGID": "syscall", + "syscall.SYS_GETGID32": "syscall", + "syscall.SYS_GETGROUPS": "syscall", + "syscall.SYS_GETGROUPS32": "syscall", + "syscall.SYS_GETHOSTUUID": "syscall", + "syscall.SYS_GETITIMER": "syscall", + "syscall.SYS_GETLCID": "syscall", + "syscall.SYS_GETLOGIN": "syscall", + "syscall.SYS_GETLOGINCLASS": "syscall", + "syscall.SYS_GETPEERNAME": "syscall", + "syscall.SYS_GETPGID": "syscall", + "syscall.SYS_GETPGRP": "syscall", + "syscall.SYS_GETPID": "syscall", + "syscall.SYS_GETPMSG": "syscall", + "syscall.SYS_GETPPID": "syscall", + "syscall.SYS_GETPRIORITY": "syscall", + "syscall.SYS_GETRESGID": "syscall", + "syscall.SYS_GETRESGID32": "syscall", + "syscall.SYS_GETRESUID": "syscall", + "syscall.SYS_GETRESUID32": "syscall", + "syscall.SYS_GETRLIMIT": "syscall", + "syscall.SYS_GETRTABLE": "syscall", + "syscall.SYS_GETRUSAGE": "syscall", + "syscall.SYS_GETSGROUPS": "syscall", + "syscall.SYS_GETSID": "syscall", + "syscall.SYS_GETSOCKNAME": "syscall", + "syscall.SYS_GETSOCKOPT": "syscall", + "syscall.SYS_GETTHRID": "syscall", + "syscall.SYS_GETTID": "syscall", + "syscall.SYS_GETTIMEOFDAY": "syscall", + "syscall.SYS_GETUID": "syscall", + "syscall.SYS_GETUID32": "syscall", + "syscall.SYS_GETVFSSTAT": "syscall", + "syscall.SYS_GETWGROUPS": "syscall", + "syscall.SYS_GETXATTR": "syscall", + "syscall.SYS_GET_KERNEL_SYMS": "syscall", + "syscall.SYS_GET_MEMPOLICY": "syscall", + "syscall.SYS_GET_ROBUST_LIST": "syscall", + "syscall.SYS_GET_THREAD_AREA": "syscall", + "syscall.SYS_GTTY": "syscall", + "syscall.SYS_IDENTITYSVC": "syscall", + "syscall.SYS_IDLE": "syscall", + "syscall.SYS_INITGROUPS": "syscall", + "syscall.SYS_INIT_MODULE": "syscall", + "syscall.SYS_INOTIFY_ADD_WATCH": "syscall", + "syscall.SYS_INOTIFY_INIT": "syscall", + "syscall.SYS_INOTIFY_INIT1": "syscall", + "syscall.SYS_INOTIFY_RM_WATCH": "syscall", + "syscall.SYS_IOCTL": "syscall", + "syscall.SYS_IOPERM": "syscall", + "syscall.SYS_IOPL": "syscall", + "syscall.SYS_IOPOLICYSYS": "syscall", + "syscall.SYS_IOPRIO_GET": "syscall", + "syscall.SYS_IOPRIO_SET": "syscall", + "syscall.SYS_IO_CANCEL": "syscall", + "syscall.SYS_IO_DESTROY": "syscall", + "syscall.SYS_IO_GETEVENTS": "syscall", + "syscall.SYS_IO_SETUP": "syscall", + "syscall.SYS_IO_SUBMIT": "syscall", + "syscall.SYS_IPC": "syscall", + "syscall.SYS_ISSETUGID": "syscall", + "syscall.SYS_JAIL": "syscall", + "syscall.SYS_JAIL_ATTACH": "syscall", + "syscall.SYS_JAIL_GET": "syscall", + "syscall.SYS_JAIL_REMOVE": "syscall", + "syscall.SYS_JAIL_SET": "syscall", + "syscall.SYS_KDEBUG_TRACE": "syscall", + "syscall.SYS_KENV": "syscall", + "syscall.SYS_KEVENT": "syscall", + "syscall.SYS_KEVENT64": "syscall", + "syscall.SYS_KEXEC_LOAD": "syscall", + "syscall.SYS_KEYCTL": "syscall", + "syscall.SYS_KILL": "syscall", + "syscall.SYS_KLDFIND": "syscall", + "syscall.SYS_KLDFIRSTMOD": "syscall", + "syscall.SYS_KLDLOAD": "syscall", + "syscall.SYS_KLDNEXT": "syscall", + "syscall.SYS_KLDSTAT": "syscall", + "syscall.SYS_KLDSYM": "syscall", + "syscall.SYS_KLDUNLOAD": "syscall", + "syscall.SYS_KLDUNLOADF": "syscall", + "syscall.SYS_KQUEUE": "syscall", + "syscall.SYS_KQUEUE1": "syscall", + "syscall.SYS_KTIMER_CREATE": "syscall", + "syscall.SYS_KTIMER_DELETE": "syscall", + "syscall.SYS_KTIMER_GETOVERRUN": "syscall", + "syscall.SYS_KTIMER_GETTIME": "syscall", + "syscall.SYS_KTIMER_SETTIME": "syscall", + "syscall.SYS_KTRACE": "syscall", + "syscall.SYS_LCHFLAGS": "syscall", + "syscall.SYS_LCHMOD": "syscall", + "syscall.SYS_LCHOWN": "syscall", + "syscall.SYS_LCHOWN32": "syscall", + "syscall.SYS_LGETFH": "syscall", + "syscall.SYS_LGETXATTR": "syscall", + "syscall.SYS_LINK": "syscall", + "syscall.SYS_LINKAT": "syscall", + "syscall.SYS_LIO_LISTIO": "syscall", + "syscall.SYS_LISTEN": "syscall", + "syscall.SYS_LISTXATTR": "syscall", + "syscall.SYS_LLISTXATTR": "syscall", + "syscall.SYS_LOCK": "syscall", + "syscall.SYS_LOOKUP_DCOOKIE": "syscall", + "syscall.SYS_LPATHCONF": "syscall", + "syscall.SYS_LREMOVEXATTR": "syscall", + "syscall.SYS_LSEEK": "syscall", + "syscall.SYS_LSETXATTR": "syscall", + "syscall.SYS_LSTAT": "syscall", + "syscall.SYS_LSTAT64": "syscall", + "syscall.SYS_LSTAT64_EXTENDED": "syscall", + "syscall.SYS_LSTATV": "syscall", + "syscall.SYS_LSTAT_EXTENDED": "syscall", + "syscall.SYS_LUTIMES": "syscall", + "syscall.SYS_MAC_SYSCALL": "syscall", + "syscall.SYS_MADVISE": "syscall", + "syscall.SYS_MADVISE1": "syscall", + "syscall.SYS_MAXSYSCALL": "syscall", + "syscall.SYS_MBIND": "syscall", + "syscall.SYS_MIGRATE_PAGES": "syscall", + "syscall.SYS_MINCORE": "syscall", + "syscall.SYS_MINHERIT": "syscall", + "syscall.SYS_MKCOMPLEX": "syscall", + "syscall.SYS_MKDIR": "syscall", + "syscall.SYS_MKDIRAT": "syscall", + "syscall.SYS_MKDIR_EXTENDED": "syscall", + "syscall.SYS_MKFIFO": "syscall", + "syscall.SYS_MKFIFOAT": "syscall", + "syscall.SYS_MKFIFO_EXTENDED": "syscall", + "syscall.SYS_MKNOD": "syscall", + "syscall.SYS_MKNODAT": "syscall", + "syscall.SYS_MLOCK": "syscall", + "syscall.SYS_MLOCKALL": "syscall", + "syscall.SYS_MMAP": "syscall", + "syscall.SYS_MMAP2": "syscall", + "syscall.SYS_MODCTL": "syscall", + "syscall.SYS_MODFIND": "syscall", + "syscall.SYS_MODFNEXT": "syscall", + "syscall.SYS_MODIFY_LDT": "syscall", + "syscall.SYS_MODNEXT": "syscall", + "syscall.SYS_MODSTAT": "syscall", + "syscall.SYS_MODWATCH": "syscall", + "syscall.SYS_MOUNT": "syscall", + "syscall.SYS_MOVE_PAGES": "syscall", + "syscall.SYS_MPROTECT": "syscall", + "syscall.SYS_MPX": "syscall", + "syscall.SYS_MQUERY": "syscall", + "syscall.SYS_MQ_GETSETATTR": "syscall", + "syscall.SYS_MQ_NOTIFY": "syscall", + "syscall.SYS_MQ_OPEN": "syscall", + "syscall.SYS_MQ_TIMEDRECEIVE": "syscall", + "syscall.SYS_MQ_TIMEDSEND": "syscall", + "syscall.SYS_MQ_UNLINK": "syscall", + "syscall.SYS_MREMAP": "syscall", + "syscall.SYS_MSGCTL": "syscall", + "syscall.SYS_MSGGET": "syscall", + "syscall.SYS_MSGRCV": "syscall", + "syscall.SYS_MSGRCV_NOCANCEL": "syscall", + "syscall.SYS_MSGSND": "syscall", + "syscall.SYS_MSGSND_NOCANCEL": "syscall", + "syscall.SYS_MSGSYS": "syscall", + "syscall.SYS_MSYNC": "syscall", + "syscall.SYS_MSYNC_NOCANCEL": "syscall", + "syscall.SYS_MUNLOCK": "syscall", + "syscall.SYS_MUNLOCKALL": "syscall", + "syscall.SYS_MUNMAP": "syscall", + "syscall.SYS_NAME_TO_HANDLE_AT": "syscall", + "syscall.SYS_NANOSLEEP": "syscall", + "syscall.SYS_NEWFSTATAT": "syscall", + "syscall.SYS_NFSCLNT": "syscall", + "syscall.SYS_NFSSERVCTL": "syscall", + "syscall.SYS_NFSSVC": "syscall", + "syscall.SYS_NFSTAT": "syscall", + "syscall.SYS_NICE": "syscall", + "syscall.SYS_NLSTAT": "syscall", + "syscall.SYS_NMOUNT": "syscall", + "syscall.SYS_NSTAT": "syscall", + "syscall.SYS_NTP_ADJTIME": "syscall", + "syscall.SYS_NTP_GETTIME": "syscall", + "syscall.SYS_OABI_SYSCALL_BASE": "syscall", + "syscall.SYS_OBREAK": "syscall", + "syscall.SYS_OLDFSTAT": "syscall", + "syscall.SYS_OLDLSTAT": "syscall", + "syscall.SYS_OLDOLDUNAME": "syscall", + "syscall.SYS_OLDSTAT": "syscall", + "syscall.SYS_OLDUNAME": "syscall", + "syscall.SYS_OPEN": "syscall", + "syscall.SYS_OPENAT": "syscall", + "syscall.SYS_OPENBSD_POLL": "syscall", + "syscall.SYS_OPEN_BY_HANDLE_AT": "syscall", + "syscall.SYS_OPEN_EXTENDED": "syscall", + "syscall.SYS_OPEN_NOCANCEL": "syscall", + "syscall.SYS_OVADVISE": "syscall", + "syscall.SYS_PACCEPT": "syscall", + "syscall.SYS_PATHCONF": "syscall", + "syscall.SYS_PAUSE": "syscall", + "syscall.SYS_PCICONFIG_IOBASE": "syscall", + "syscall.SYS_PCICONFIG_READ": "syscall", + "syscall.SYS_PCICONFIG_WRITE": "syscall", + "syscall.SYS_PDFORK": "syscall", + "syscall.SYS_PDGETPID": "syscall", + "syscall.SYS_PDKILL": "syscall", + "syscall.SYS_PERF_EVENT_OPEN": "syscall", + "syscall.SYS_PERSONALITY": "syscall", + "syscall.SYS_PID_HIBERNATE": "syscall", + "syscall.SYS_PID_RESUME": "syscall", + "syscall.SYS_PID_SHUTDOWN_SOCKETS": "syscall", + "syscall.SYS_PID_SUSPEND": "syscall", + "syscall.SYS_PIPE": "syscall", + "syscall.SYS_PIPE2": "syscall", + "syscall.SYS_PIVOT_ROOT": "syscall", + "syscall.SYS_PMC_CONTROL": "syscall", + "syscall.SYS_PMC_GET_INFO": "syscall", + "syscall.SYS_POLL": "syscall", + "syscall.SYS_POLLTS": "syscall", + "syscall.SYS_POLL_NOCANCEL": "syscall", + "syscall.SYS_POSIX_FADVISE": "syscall", + "syscall.SYS_POSIX_FALLOCATE": "syscall", + "syscall.SYS_POSIX_OPENPT": "syscall", + "syscall.SYS_POSIX_SPAWN": "syscall", + "syscall.SYS_PPOLL": "syscall", + "syscall.SYS_PRCTL": "syscall", + "syscall.SYS_PREAD": "syscall", + "syscall.SYS_PREAD64": "syscall", + "syscall.SYS_PREADV": "syscall", + "syscall.SYS_PREAD_NOCANCEL": "syscall", + "syscall.SYS_PRLIMIT64": "syscall", + "syscall.SYS_PROCCTL": "syscall", + "syscall.SYS_PROCESS_POLICY": "syscall", + "syscall.SYS_PROCESS_VM_READV": "syscall", + "syscall.SYS_PROCESS_VM_WRITEV": "syscall", + "syscall.SYS_PROC_INFO": "syscall", + "syscall.SYS_PROF": "syscall", + "syscall.SYS_PROFIL": "syscall", + "syscall.SYS_PSELECT": "syscall", + "syscall.SYS_PSELECT6": "syscall", + "syscall.SYS_PSET_ASSIGN": "syscall", + "syscall.SYS_PSET_CREATE": "syscall", + "syscall.SYS_PSET_DESTROY": "syscall", + "syscall.SYS_PSYNCH_CVBROAD": "syscall", + "syscall.SYS_PSYNCH_CVCLRPREPOST": "syscall", + "syscall.SYS_PSYNCH_CVSIGNAL": "syscall", + "syscall.SYS_PSYNCH_CVWAIT": "syscall", + "syscall.SYS_PSYNCH_MUTEXDROP": "syscall", + "syscall.SYS_PSYNCH_MUTEXWAIT": "syscall", + "syscall.SYS_PSYNCH_RW_DOWNGRADE": "syscall", + "syscall.SYS_PSYNCH_RW_LONGRDLOCK": "syscall", + "syscall.SYS_PSYNCH_RW_RDLOCK": "syscall", + "syscall.SYS_PSYNCH_RW_UNLOCK": "syscall", + "syscall.SYS_PSYNCH_RW_UNLOCK2": "syscall", + "syscall.SYS_PSYNCH_RW_UPGRADE": "syscall", + "syscall.SYS_PSYNCH_RW_WRLOCK": "syscall", + "syscall.SYS_PSYNCH_RW_YIELDWRLOCK": "syscall", + "syscall.SYS_PTRACE": "syscall", + "syscall.SYS_PUTPMSG": "syscall", + "syscall.SYS_PWRITE": "syscall", + "syscall.SYS_PWRITE64": "syscall", + "syscall.SYS_PWRITEV": "syscall", + "syscall.SYS_PWRITE_NOCANCEL": "syscall", + "syscall.SYS_QUERY_MODULE": "syscall", + "syscall.SYS_QUOTACTL": "syscall", + "syscall.SYS_RASCTL": "syscall", + "syscall.SYS_RCTL_ADD_RULE": "syscall", + "syscall.SYS_RCTL_GET_LIMITS": "syscall", + "syscall.SYS_RCTL_GET_RACCT": "syscall", + "syscall.SYS_RCTL_GET_RULES": "syscall", + "syscall.SYS_RCTL_REMOVE_RULE": "syscall", + "syscall.SYS_READ": "syscall", + "syscall.SYS_READAHEAD": "syscall", + "syscall.SYS_READDIR": "syscall", + "syscall.SYS_READLINK": "syscall", + "syscall.SYS_READLINKAT": "syscall", + "syscall.SYS_READV": "syscall", + "syscall.SYS_READV_NOCANCEL": "syscall", + "syscall.SYS_READ_NOCANCEL": "syscall", + "syscall.SYS_REBOOT": "syscall", + "syscall.SYS_RECV": "syscall", + "syscall.SYS_RECVFROM": "syscall", + "syscall.SYS_RECVFROM_NOCANCEL": "syscall", + "syscall.SYS_RECVMMSG": "syscall", + "syscall.SYS_RECVMSG": "syscall", + "syscall.SYS_RECVMSG_NOCANCEL": "syscall", + "syscall.SYS_REMAP_FILE_PAGES": "syscall", + "syscall.SYS_REMOVEXATTR": "syscall", + "syscall.SYS_RENAME": "syscall", + "syscall.SYS_RENAMEAT": "syscall", + "syscall.SYS_REQUEST_KEY": "syscall", + "syscall.SYS_RESTART_SYSCALL": "syscall", + "syscall.SYS_REVOKE": "syscall", + "syscall.SYS_RFORK": "syscall", + "syscall.SYS_RMDIR": "syscall", + "syscall.SYS_RTPRIO": "syscall", + "syscall.SYS_RTPRIO_THREAD": "syscall", + "syscall.SYS_RT_SIGACTION": "syscall", + "syscall.SYS_RT_SIGPENDING": "syscall", + "syscall.SYS_RT_SIGPROCMASK": "syscall", + "syscall.SYS_RT_SIGQUEUEINFO": "syscall", + "syscall.SYS_RT_SIGRETURN": "syscall", + "syscall.SYS_RT_SIGSUSPEND": "syscall", + "syscall.SYS_RT_SIGTIMEDWAIT": "syscall", + "syscall.SYS_RT_TGSIGQUEUEINFO": "syscall", + "syscall.SYS_SBRK": "syscall", + "syscall.SYS_SCHED_GETAFFINITY": "syscall", + "syscall.SYS_SCHED_GETPARAM": "syscall", + "syscall.SYS_SCHED_GETSCHEDULER": "syscall", + "syscall.SYS_SCHED_GET_PRIORITY_MAX": "syscall", + "syscall.SYS_SCHED_GET_PRIORITY_MIN": "syscall", + "syscall.SYS_SCHED_RR_GET_INTERVAL": "syscall", + "syscall.SYS_SCHED_SETAFFINITY": "syscall", + "syscall.SYS_SCHED_SETPARAM": "syscall", + "syscall.SYS_SCHED_SETSCHEDULER": "syscall", + "syscall.SYS_SCHED_YIELD": "syscall", + "syscall.SYS_SCTP_GENERIC_RECVMSG": "syscall", + "syscall.SYS_SCTP_GENERIC_SENDMSG": "syscall", + "syscall.SYS_SCTP_GENERIC_SENDMSG_IOV": "syscall", + "syscall.SYS_SCTP_PEELOFF": "syscall", + "syscall.SYS_SEARCHFS": "syscall", + "syscall.SYS_SECURITY": "syscall", + "syscall.SYS_SELECT": "syscall", + "syscall.SYS_SELECT_NOCANCEL": "syscall", + "syscall.SYS_SEMCONFIG": "syscall", + "syscall.SYS_SEMCTL": "syscall", + "syscall.SYS_SEMGET": "syscall", + "syscall.SYS_SEMOP": "syscall", + "syscall.SYS_SEMSYS": "syscall", + "syscall.SYS_SEMTIMEDOP": "syscall", + "syscall.SYS_SEM_CLOSE": "syscall", + "syscall.SYS_SEM_DESTROY": "syscall", + "syscall.SYS_SEM_GETVALUE": "syscall", + "syscall.SYS_SEM_INIT": "syscall", + "syscall.SYS_SEM_OPEN": "syscall", + "syscall.SYS_SEM_POST": "syscall", + "syscall.SYS_SEM_TRYWAIT": "syscall", + "syscall.SYS_SEM_UNLINK": "syscall", + "syscall.SYS_SEM_WAIT": "syscall", + "syscall.SYS_SEM_WAIT_NOCANCEL": "syscall", + "syscall.SYS_SEND": "syscall", + "syscall.SYS_SENDFILE": "syscall", + "syscall.SYS_SENDFILE64": "syscall", + "syscall.SYS_SENDMMSG": "syscall", + "syscall.SYS_SENDMSG": "syscall", + "syscall.SYS_SENDMSG_NOCANCEL": "syscall", + "syscall.SYS_SENDTO": "syscall", + "syscall.SYS_SENDTO_NOCANCEL": "syscall", + "syscall.SYS_SETATTRLIST": "syscall", + "syscall.SYS_SETAUDIT": "syscall", + "syscall.SYS_SETAUDIT_ADDR": "syscall", + "syscall.SYS_SETAUID": "syscall", + "syscall.SYS_SETCONTEXT": "syscall", + "syscall.SYS_SETDOMAINNAME": "syscall", + "syscall.SYS_SETEGID": "syscall", + "syscall.SYS_SETEUID": "syscall", + "syscall.SYS_SETFIB": "syscall", + "syscall.SYS_SETFSGID": "syscall", + "syscall.SYS_SETFSGID32": "syscall", + "syscall.SYS_SETFSUID": "syscall", + "syscall.SYS_SETFSUID32": "syscall", + "syscall.SYS_SETGID": "syscall", + "syscall.SYS_SETGID32": "syscall", + "syscall.SYS_SETGROUPS": "syscall", + "syscall.SYS_SETGROUPS32": "syscall", + "syscall.SYS_SETHOSTNAME": "syscall", + "syscall.SYS_SETITIMER": "syscall", + "syscall.SYS_SETLCID": "syscall", + "syscall.SYS_SETLOGIN": "syscall", + "syscall.SYS_SETLOGINCLASS": "syscall", + "syscall.SYS_SETNS": "syscall", + "syscall.SYS_SETPGID": "syscall", + "syscall.SYS_SETPRIORITY": "syscall", + "syscall.SYS_SETPRIVEXEC": "syscall", + "syscall.SYS_SETREGID": "syscall", + "syscall.SYS_SETREGID32": "syscall", + "syscall.SYS_SETRESGID": "syscall", + "syscall.SYS_SETRESGID32": "syscall", + "syscall.SYS_SETRESUID": "syscall", + "syscall.SYS_SETRESUID32": "syscall", + "syscall.SYS_SETREUID": "syscall", + "syscall.SYS_SETREUID32": "syscall", + "syscall.SYS_SETRLIMIT": "syscall", + "syscall.SYS_SETRTABLE": "syscall", + "syscall.SYS_SETSGROUPS": "syscall", + "syscall.SYS_SETSID": "syscall", + "syscall.SYS_SETSOCKOPT": "syscall", + "syscall.SYS_SETTID": "syscall", + "syscall.SYS_SETTID_WITH_PID": "syscall", + "syscall.SYS_SETTIMEOFDAY": "syscall", + "syscall.SYS_SETUID": "syscall", + "syscall.SYS_SETUID32": "syscall", + "syscall.SYS_SETWGROUPS": "syscall", + "syscall.SYS_SETXATTR": "syscall", + "syscall.SYS_SET_MEMPOLICY": "syscall", + "syscall.SYS_SET_ROBUST_LIST": "syscall", + "syscall.SYS_SET_THREAD_AREA": "syscall", + "syscall.SYS_SET_TID_ADDRESS": "syscall", + "syscall.SYS_SGETMASK": "syscall", + "syscall.SYS_SHARED_REGION_CHECK_NP": "syscall", + "syscall.SYS_SHARED_REGION_MAP_AND_SLIDE_NP": "syscall", + "syscall.SYS_SHMAT": "syscall", + "syscall.SYS_SHMCTL": "syscall", + "syscall.SYS_SHMDT": "syscall", + "syscall.SYS_SHMGET": "syscall", + "syscall.SYS_SHMSYS": "syscall", + "syscall.SYS_SHM_OPEN": "syscall", + "syscall.SYS_SHM_UNLINK": "syscall", + "syscall.SYS_SHUTDOWN": "syscall", + "syscall.SYS_SIGACTION": "syscall", + "syscall.SYS_SIGALTSTACK": "syscall", + "syscall.SYS_SIGNAL": "syscall", + "syscall.SYS_SIGNALFD": "syscall", + "syscall.SYS_SIGNALFD4": "syscall", + "syscall.SYS_SIGPENDING": "syscall", + "syscall.SYS_SIGPROCMASK": "syscall", + "syscall.SYS_SIGQUEUE": "syscall", + "syscall.SYS_SIGQUEUEINFO": "syscall", + "syscall.SYS_SIGRETURN": "syscall", + "syscall.SYS_SIGSUSPEND": "syscall", + "syscall.SYS_SIGSUSPEND_NOCANCEL": "syscall", + "syscall.SYS_SIGTIMEDWAIT": "syscall", + "syscall.SYS_SIGWAIT": "syscall", + "syscall.SYS_SIGWAITINFO": "syscall", + "syscall.SYS_SOCKET": "syscall", + "syscall.SYS_SOCKETCALL": "syscall", + "syscall.SYS_SOCKETPAIR": "syscall", + "syscall.SYS_SPLICE": "syscall", + "syscall.SYS_SSETMASK": "syscall", + "syscall.SYS_SSTK": "syscall", + "syscall.SYS_STACK_SNAPSHOT": "syscall", + "syscall.SYS_STAT": "syscall", + "syscall.SYS_STAT64": "syscall", + "syscall.SYS_STAT64_EXTENDED": "syscall", + "syscall.SYS_STATFS": "syscall", + "syscall.SYS_STATFS64": "syscall", + "syscall.SYS_STATV": "syscall", + "syscall.SYS_STATVFS1": "syscall", + "syscall.SYS_STAT_EXTENDED": "syscall", + "syscall.SYS_STIME": "syscall", + "syscall.SYS_STTY": "syscall", + "syscall.SYS_SWAPCONTEXT": "syscall", + "syscall.SYS_SWAPCTL": "syscall", + "syscall.SYS_SWAPOFF": "syscall", + "syscall.SYS_SWAPON": "syscall", + "syscall.SYS_SYMLINK": "syscall", + "syscall.SYS_SYMLINKAT": "syscall", + "syscall.SYS_SYNC": "syscall", + "syscall.SYS_SYNCFS": "syscall", + "syscall.SYS_SYNC_FILE_RANGE": "syscall", + "syscall.SYS_SYSARCH": "syscall", + "syscall.SYS_SYSCALL": "syscall", + "syscall.SYS_SYSCALL_BASE": "syscall", + "syscall.SYS_SYSFS": "syscall", + "syscall.SYS_SYSINFO": "syscall", + "syscall.SYS_SYSLOG": "syscall", + "syscall.SYS_TEE": "syscall", + "syscall.SYS_TGKILL": "syscall", + "syscall.SYS_THREAD_SELFID": "syscall", + "syscall.SYS_THR_CREATE": "syscall", + "syscall.SYS_THR_EXIT": "syscall", + "syscall.SYS_THR_KILL": "syscall", + "syscall.SYS_THR_KILL2": "syscall", + "syscall.SYS_THR_NEW": "syscall", + "syscall.SYS_THR_SELF": "syscall", + "syscall.SYS_THR_SET_NAME": "syscall", + "syscall.SYS_THR_SUSPEND": "syscall", + "syscall.SYS_THR_WAKE": "syscall", + "syscall.SYS_TIME": "syscall", + "syscall.SYS_TIMERFD_CREATE": "syscall", + "syscall.SYS_TIMERFD_GETTIME": "syscall", + "syscall.SYS_TIMERFD_SETTIME": "syscall", + "syscall.SYS_TIMER_CREATE": "syscall", + "syscall.SYS_TIMER_DELETE": "syscall", + "syscall.SYS_TIMER_GETOVERRUN": "syscall", + "syscall.SYS_TIMER_GETTIME": "syscall", + "syscall.SYS_TIMER_SETTIME": "syscall", + "syscall.SYS_TIMES": "syscall", + "syscall.SYS_TKILL": "syscall", + "syscall.SYS_TRUNCATE": "syscall", + "syscall.SYS_TRUNCATE64": "syscall", + "syscall.SYS_TUXCALL": "syscall", + "syscall.SYS_UGETRLIMIT": "syscall", + "syscall.SYS_ULIMIT": "syscall", + "syscall.SYS_UMASK": "syscall", + "syscall.SYS_UMASK_EXTENDED": "syscall", + "syscall.SYS_UMOUNT": "syscall", + "syscall.SYS_UMOUNT2": "syscall", + "syscall.SYS_UNAME": "syscall", + "syscall.SYS_UNDELETE": "syscall", + "syscall.SYS_UNLINK": "syscall", + "syscall.SYS_UNLINKAT": "syscall", + "syscall.SYS_UNMOUNT": "syscall", + "syscall.SYS_UNSHARE": "syscall", + "syscall.SYS_USELIB": "syscall", + "syscall.SYS_USTAT": "syscall", + "syscall.SYS_UTIME": "syscall", + "syscall.SYS_UTIMENSAT": "syscall", + "syscall.SYS_UTIMES": "syscall", + "syscall.SYS_UTRACE": "syscall", + "syscall.SYS_UUIDGEN": "syscall", + "syscall.SYS_VADVISE": "syscall", + "syscall.SYS_VFORK": "syscall", + "syscall.SYS_VHANGUP": "syscall", + "syscall.SYS_VM86": "syscall", + "syscall.SYS_VM86OLD": "syscall", + "syscall.SYS_VMSPLICE": "syscall", + "syscall.SYS_VM_PRESSURE_MONITOR": "syscall", + "syscall.SYS_VSERVER": "syscall", + "syscall.SYS_WAIT4": "syscall", + "syscall.SYS_WAIT4_NOCANCEL": "syscall", + "syscall.SYS_WAIT6": "syscall", + "syscall.SYS_WAITEVENT": "syscall", + "syscall.SYS_WAITID": "syscall", + "syscall.SYS_WAITID_NOCANCEL": "syscall", + "syscall.SYS_WAITPID": "syscall", + "syscall.SYS_WATCHEVENT": "syscall", + "syscall.SYS_WORKQ_KERNRETURN": "syscall", + "syscall.SYS_WORKQ_OPEN": "syscall", + "syscall.SYS_WRITE": "syscall", + "syscall.SYS_WRITEV": "syscall", + "syscall.SYS_WRITEV_NOCANCEL": "syscall", + "syscall.SYS_WRITE_NOCANCEL": "syscall", + "syscall.SYS_YIELD": "syscall", + "syscall.SYS__LLSEEK": "syscall", + "syscall.SYS__LWP_CONTINUE": "syscall", + "syscall.SYS__LWP_CREATE": "syscall", + "syscall.SYS__LWP_CTL": "syscall", + "syscall.SYS__LWP_DETACH": "syscall", + "syscall.SYS__LWP_EXIT": "syscall", + "syscall.SYS__LWP_GETNAME": "syscall", + "syscall.SYS__LWP_GETPRIVATE": "syscall", + "syscall.SYS__LWP_KILL": "syscall", + "syscall.SYS__LWP_PARK": "syscall", + "syscall.SYS__LWP_SELF": "syscall", + "syscall.SYS__LWP_SETNAME": "syscall", + "syscall.SYS__LWP_SETPRIVATE": "syscall", + "syscall.SYS__LWP_SUSPEND": "syscall", + "syscall.SYS__LWP_UNPARK": "syscall", + "syscall.SYS__LWP_UNPARK_ALL": "syscall", + "syscall.SYS__LWP_WAIT": "syscall", + "syscall.SYS__LWP_WAKEUP": "syscall", + "syscall.SYS__NEWSELECT": "syscall", + "syscall.SYS__PSET_BIND": "syscall", + "syscall.SYS__SCHED_GETAFFINITY": "syscall", + "syscall.SYS__SCHED_GETPARAM": "syscall", + "syscall.SYS__SCHED_SETAFFINITY": "syscall", + "syscall.SYS__SCHED_SETPARAM": "syscall", + "syscall.SYS__SYSCTL": "syscall", + "syscall.SYS__UMTX_LOCK": "syscall", + "syscall.SYS__UMTX_OP": "syscall", + "syscall.SYS__UMTX_UNLOCK": "syscall", + "syscall.SYS___ACL_ACLCHECK_FD": "syscall", + "syscall.SYS___ACL_ACLCHECK_FILE": "syscall", + "syscall.SYS___ACL_ACLCHECK_LINK": "syscall", + "syscall.SYS___ACL_DELETE_FD": "syscall", + "syscall.SYS___ACL_DELETE_FILE": "syscall", + "syscall.SYS___ACL_DELETE_LINK": "syscall", + "syscall.SYS___ACL_GET_FD": "syscall", + "syscall.SYS___ACL_GET_FILE": "syscall", + "syscall.SYS___ACL_GET_LINK": "syscall", + "syscall.SYS___ACL_SET_FD": "syscall", + "syscall.SYS___ACL_SET_FILE": "syscall", + "syscall.SYS___ACL_SET_LINK": "syscall", + "syscall.SYS___CLONE": "syscall", + "syscall.SYS___DISABLE_THREADSIGNAL": "syscall", + "syscall.SYS___GETCWD": "syscall", + "syscall.SYS___GETLOGIN": "syscall", + "syscall.SYS___GET_TCB": "syscall", + "syscall.SYS___MAC_EXECVE": "syscall", + "syscall.SYS___MAC_GETFSSTAT": "syscall", + "syscall.SYS___MAC_GET_FD": "syscall", + "syscall.SYS___MAC_GET_FILE": "syscall", + "syscall.SYS___MAC_GET_LCID": "syscall", + "syscall.SYS___MAC_GET_LCTX": "syscall", + "syscall.SYS___MAC_GET_LINK": "syscall", + "syscall.SYS___MAC_GET_MOUNT": "syscall", + "syscall.SYS___MAC_GET_PID": "syscall", + "syscall.SYS___MAC_GET_PROC": "syscall", + "syscall.SYS___MAC_MOUNT": "syscall", + "syscall.SYS___MAC_SET_FD": "syscall", + "syscall.SYS___MAC_SET_FILE": "syscall", + "syscall.SYS___MAC_SET_LCTX": "syscall", + "syscall.SYS___MAC_SET_LINK": "syscall", + "syscall.SYS___MAC_SET_PROC": "syscall", + "syscall.SYS___MAC_SYSCALL": "syscall", + "syscall.SYS___OLD_SEMWAIT_SIGNAL": "syscall", + "syscall.SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": "syscall", + "syscall.SYS___POSIX_CHOWN": "syscall", + "syscall.SYS___POSIX_FCHOWN": "syscall", + "syscall.SYS___POSIX_LCHOWN": "syscall", + "syscall.SYS___POSIX_RENAME": "syscall", + "syscall.SYS___PTHREAD_CANCELED": "syscall", + "syscall.SYS___PTHREAD_CHDIR": "syscall", + "syscall.SYS___PTHREAD_FCHDIR": "syscall", + "syscall.SYS___PTHREAD_KILL": "syscall", + "syscall.SYS___PTHREAD_MARKCANCEL": "syscall", + "syscall.SYS___PTHREAD_SIGMASK": "syscall", + "syscall.SYS___QUOTACTL": "syscall", + "syscall.SYS___SEMCTL": "syscall", + "syscall.SYS___SEMWAIT_SIGNAL": "syscall", + "syscall.SYS___SEMWAIT_SIGNAL_NOCANCEL": "syscall", + "syscall.SYS___SETLOGIN": "syscall", + "syscall.SYS___SETUGID": "syscall", + "syscall.SYS___SET_TCB": "syscall", + "syscall.SYS___SIGACTION_SIGTRAMP": "syscall", + "syscall.SYS___SIGTIMEDWAIT": "syscall", + "syscall.SYS___SIGWAIT": "syscall", + "syscall.SYS___SIGWAIT_NOCANCEL": "syscall", + "syscall.SYS___SYSCTL": "syscall", + "syscall.SYS___TFORK": "syscall", + "syscall.SYS___THREXIT": "syscall", + "syscall.SYS___THRSIGDIVERT": "syscall", + "syscall.SYS___THRSLEEP": "syscall", + "syscall.SYS___THRWAKEUP": "syscall", + "syscall.S_ARCH1": "syscall", + "syscall.S_ARCH2": "syscall", + "syscall.S_BLKSIZE": "syscall", + "syscall.S_IEXEC": "syscall", + "syscall.S_IFBLK": "syscall", + "syscall.S_IFCHR": "syscall", + "syscall.S_IFDIR": "syscall", + "syscall.S_IFIFO": "syscall", + "syscall.S_IFLNK": "syscall", + "syscall.S_IFMT": "syscall", + "syscall.S_IFREG": "syscall", + "syscall.S_IFSOCK": "syscall", + "syscall.S_IFWHT": "syscall", + "syscall.S_IREAD": "syscall", + "syscall.S_IRGRP": "syscall", + "syscall.S_IROTH": "syscall", + "syscall.S_IRUSR": "syscall", + "syscall.S_IRWXG": "syscall", + "syscall.S_IRWXO": "syscall", + "syscall.S_IRWXU": "syscall", + "syscall.S_ISGID": "syscall", + "syscall.S_ISTXT": "syscall", + "syscall.S_ISUID": "syscall", + "syscall.S_ISVTX": "syscall", + "syscall.S_IWGRP": "syscall", + "syscall.S_IWOTH": "syscall", + "syscall.S_IWRITE": "syscall", + "syscall.S_IWUSR": "syscall", + "syscall.S_IXGRP": "syscall", + "syscall.S_IXOTH": "syscall", + "syscall.S_IXUSR": "syscall", + "syscall.S_LOGIN_SET": "syscall", + "syscall.SecurityAttributes": "syscall", + "syscall.Seek": "syscall", + "syscall.Select": "syscall", + "syscall.Sendfile": "syscall", + "syscall.Sendmsg": "syscall", + "syscall.SendmsgN": "syscall", + "syscall.Sendto": "syscall", + "syscall.Servent": "syscall", + "syscall.SetBpf": "syscall", + "syscall.SetBpfBuflen": "syscall", + "syscall.SetBpfDatalink": "syscall", + "syscall.SetBpfHeadercmpl": "syscall", + "syscall.SetBpfImmediate": "syscall", + "syscall.SetBpfInterface": "syscall", + "syscall.SetBpfPromisc": "syscall", + "syscall.SetBpfTimeout": "syscall", + "syscall.SetCurrentDirectory": "syscall", + "syscall.SetEndOfFile": "syscall", + "syscall.SetEnvironmentVariable": "syscall", + "syscall.SetFileAttributes": "syscall", + "syscall.SetFileCompletionNotificationModes": "syscall", + "syscall.SetFilePointer": "syscall", + "syscall.SetFileTime": "syscall", + "syscall.SetHandleInformation": "syscall", + "syscall.SetKevent": "syscall", + "syscall.SetLsfPromisc": "syscall", + "syscall.SetNonblock": "syscall", + "syscall.Setdomainname": "syscall", + "syscall.Setegid": "syscall", + "syscall.Setenv": "syscall", + "syscall.Seteuid": "syscall", + "syscall.Setfsgid": "syscall", + "syscall.Setfsuid": "syscall", + "syscall.Setgid": "syscall", + "syscall.Setgroups": "syscall", + "syscall.Sethostname": "syscall", + "syscall.Setlogin": "syscall", + "syscall.Setpgid": "syscall", + "syscall.Setpriority": "syscall", + "syscall.Setprivexec": "syscall", + "syscall.Setregid": "syscall", + "syscall.Setresgid": "syscall", + "syscall.Setresuid": "syscall", + "syscall.Setreuid": "syscall", + "syscall.Setrlimit": "syscall", + "syscall.Setsid": "syscall", + "syscall.Setsockopt": "syscall", + "syscall.SetsockoptByte": "syscall", + "syscall.SetsockoptICMPv6Filter": "syscall", + "syscall.SetsockoptIPMreq": "syscall", + "syscall.SetsockoptIPMreqn": "syscall", + "syscall.SetsockoptIPv6Mreq": "syscall", + "syscall.SetsockoptInet4Addr": "syscall", + "syscall.SetsockoptInt": "syscall", + "syscall.SetsockoptLinger": "syscall", + "syscall.SetsockoptString": "syscall", + "syscall.SetsockoptTimeval": "syscall", + "syscall.Settimeofday": "syscall", + "syscall.Setuid": "syscall", + "syscall.Setxattr": "syscall", + "syscall.Shutdown": "syscall", + "syscall.SidTypeAlias": "syscall", + "syscall.SidTypeComputer": "syscall", + "syscall.SidTypeDeletedAccount": "syscall", + "syscall.SidTypeDomain": "syscall", + "syscall.SidTypeGroup": "syscall", + "syscall.SidTypeInvalid": "syscall", + "syscall.SidTypeLabel": "syscall", + "syscall.SidTypeUnknown": "syscall", + "syscall.SidTypeUser": "syscall", + "syscall.SidTypeWellKnownGroup": "syscall", + "syscall.Signal": "syscall", + "syscall.SizeofBpfHdr": "syscall", + "syscall.SizeofBpfInsn": "syscall", + "syscall.SizeofBpfProgram": "syscall", + "syscall.SizeofBpfStat": "syscall", + "syscall.SizeofBpfVersion": "syscall", + "syscall.SizeofBpfZbuf": "syscall", + "syscall.SizeofBpfZbufHeader": "syscall", + "syscall.SizeofCmsghdr": "syscall", + "syscall.SizeofICMPv6Filter": "syscall", + "syscall.SizeofIPMreq": "syscall", + "syscall.SizeofIPMreqn": "syscall", + "syscall.SizeofIPv6MTUInfo": "syscall", + "syscall.SizeofIPv6Mreq": "syscall", + "syscall.SizeofIfAddrmsg": "syscall", + "syscall.SizeofIfAnnounceMsghdr": "syscall", + "syscall.SizeofIfData": "syscall", + "syscall.SizeofIfInfomsg": "syscall", + "syscall.SizeofIfMsghdr": "syscall", + "syscall.SizeofIfaMsghdr": "syscall", + "syscall.SizeofIfmaMsghdr": "syscall", + "syscall.SizeofIfmaMsghdr2": "syscall", + "syscall.SizeofInet4Pktinfo": "syscall", + "syscall.SizeofInet6Pktinfo": "syscall", + "syscall.SizeofInotifyEvent": "syscall", + "syscall.SizeofLinger": "syscall", + "syscall.SizeofMsghdr": "syscall", + "syscall.SizeofNlAttr": "syscall", + "syscall.SizeofNlMsgerr": "syscall", + "syscall.SizeofNlMsghdr": "syscall", + "syscall.SizeofRtAttr": "syscall", + "syscall.SizeofRtGenmsg": "syscall", + "syscall.SizeofRtMetrics": "syscall", + "syscall.SizeofRtMsg": "syscall", + "syscall.SizeofRtMsghdr": "syscall", + "syscall.SizeofRtNexthop": "syscall", + "syscall.SizeofSockFilter": "syscall", + "syscall.SizeofSockFprog": "syscall", + "syscall.SizeofSockaddrAny": "syscall", + "syscall.SizeofSockaddrDatalink": "syscall", + "syscall.SizeofSockaddrInet4": "syscall", + "syscall.SizeofSockaddrInet6": "syscall", + "syscall.SizeofSockaddrLinklayer": "syscall", + "syscall.SizeofSockaddrNetlink": "syscall", + "syscall.SizeofSockaddrUnix": "syscall", + "syscall.SizeofTCPInfo": "syscall", + "syscall.SizeofUcred": "syscall", + "syscall.SlicePtrFromStrings": "syscall", + "syscall.SockFilter": "syscall", + "syscall.SockFprog": "syscall", + "syscall.SockaddrDatalink": "syscall", + "syscall.SockaddrGen": "syscall", + "syscall.SockaddrInet4": "syscall", + "syscall.SockaddrInet6": "syscall", + "syscall.SockaddrLinklayer": "syscall", + "syscall.SockaddrNetlink": "syscall", + "syscall.SockaddrUnix": "syscall", + "syscall.Socket": "syscall", + "syscall.SocketControlMessage": "syscall", + "syscall.SocketDisableIPv6": "syscall", + "syscall.Socketpair": "syscall", + "syscall.Splice": "syscall", + "syscall.StartProcess": "syscall", + "syscall.StartupInfo": "syscall", + "syscall.Stat": "syscall", + "syscall.Stat_t": "syscall", + "syscall.Statfs": "syscall", + "syscall.Statfs_t": "syscall", + "syscall.Stderr": "syscall", + "syscall.Stdin": "syscall", + "syscall.Stdout": "syscall", + "syscall.StringBytePtr": "syscall", + "syscall.StringByteSlice": "syscall", + "syscall.StringSlicePtr": "syscall", + "syscall.StringToSid": "syscall", + "syscall.StringToUTF16": "syscall", + "syscall.StringToUTF16Ptr": "syscall", + "syscall.Symlink": "syscall", + "syscall.Sync": "syscall", + "syscall.SyncFileRange": "syscall", + "syscall.SysProcAttr": "syscall", + "syscall.SysProcIDMap": "syscall", + "syscall.Syscall": "syscall", + "syscall.Syscall12": "syscall", + "syscall.Syscall15": "syscall", + "syscall.Syscall6": "syscall", + "syscall.Syscall9": "syscall", + "syscall.Sysctl": "syscall", + "syscall.SysctlUint32": "syscall", + "syscall.Sysctlnode": "syscall", + "syscall.Sysinfo": "syscall", + "syscall.Sysinfo_t": "syscall", + "syscall.Systemtime": "syscall", + "syscall.TCGETS": "syscall", + "syscall.TCIFLUSH": "syscall", + "syscall.TCIOFLUSH": "syscall", + "syscall.TCOFLUSH": "syscall", + "syscall.TCPInfo": "syscall", + "syscall.TCPKeepalive": "syscall", + "syscall.TCP_CA_NAME_MAX": "syscall", + "syscall.TCP_CONGCTL": "syscall", + "syscall.TCP_CONGESTION": "syscall", + "syscall.TCP_CONNECTIONTIMEOUT": "syscall", + "syscall.TCP_CORK": "syscall", + "syscall.TCP_DEFER_ACCEPT": "syscall", + "syscall.TCP_INFO": "syscall", + "syscall.TCP_KEEPALIVE": "syscall", + "syscall.TCP_KEEPCNT": "syscall", + "syscall.TCP_KEEPIDLE": "syscall", + "syscall.TCP_KEEPINIT": "syscall", + "syscall.TCP_KEEPINTVL": "syscall", + "syscall.TCP_LINGER2": "syscall", + "syscall.TCP_MAXBURST": "syscall", + "syscall.TCP_MAXHLEN": "syscall", + "syscall.TCP_MAXOLEN": "syscall", + "syscall.TCP_MAXSEG": "syscall", + "syscall.TCP_MAXWIN": "syscall", + "syscall.TCP_MAX_SACK": "syscall", + "syscall.TCP_MAX_WINSHIFT": "syscall", + "syscall.TCP_MD5SIG": "syscall", + "syscall.TCP_MD5SIG_MAXKEYLEN": "syscall", + "syscall.TCP_MINMSS": "syscall", + "syscall.TCP_MINMSSOVERLOAD": "syscall", + "syscall.TCP_MSS": "syscall", + "syscall.TCP_NODELAY": "syscall", + "syscall.TCP_NOOPT": "syscall", + "syscall.TCP_NOPUSH": "syscall", + "syscall.TCP_NSTATES": "syscall", + "syscall.TCP_QUICKACK": "syscall", + "syscall.TCP_RXT_CONNDROPTIME": "syscall", + "syscall.TCP_RXT_FINDROP": "syscall", + "syscall.TCP_SACK_ENABLE": "syscall", + "syscall.TCP_SYNCNT": "syscall", + "syscall.TCP_VENDOR": "syscall", + "syscall.TCP_WINDOW_CLAMP": "syscall", + "syscall.TCSAFLUSH": "syscall", + "syscall.TCSETS": "syscall", + "syscall.TF_DISCONNECT": "syscall", + "syscall.TF_REUSE_SOCKET": "syscall", + "syscall.TF_USE_DEFAULT_WORKER": "syscall", + "syscall.TF_USE_KERNEL_APC": "syscall", + "syscall.TF_USE_SYSTEM_THREAD": "syscall", + "syscall.TF_WRITE_BEHIND": "syscall", + "syscall.TH32CS_INHERIT": "syscall", + "syscall.TH32CS_SNAPALL": "syscall", + "syscall.TH32CS_SNAPHEAPLIST": "syscall", + "syscall.TH32CS_SNAPMODULE": "syscall", + "syscall.TH32CS_SNAPMODULE32": "syscall", + "syscall.TH32CS_SNAPPROCESS": "syscall", + "syscall.TH32CS_SNAPTHREAD": "syscall", + "syscall.TIME_ZONE_ID_DAYLIGHT": "syscall", + "syscall.TIME_ZONE_ID_STANDARD": "syscall", + "syscall.TIME_ZONE_ID_UNKNOWN": "syscall", + "syscall.TIOCCBRK": "syscall", + "syscall.TIOCCDTR": "syscall", + "syscall.TIOCCONS": "syscall", + "syscall.TIOCDCDTIMESTAMP": "syscall", + "syscall.TIOCDRAIN": "syscall", + "syscall.TIOCDSIMICROCODE": "syscall", + "syscall.TIOCEXCL": "syscall", + "syscall.TIOCEXT": "syscall", + "syscall.TIOCFLAG_CDTRCTS": "syscall", + "syscall.TIOCFLAG_CLOCAL": "syscall", + "syscall.TIOCFLAG_CRTSCTS": "syscall", + "syscall.TIOCFLAG_MDMBUF": "syscall", + "syscall.TIOCFLAG_PPS": "syscall", + "syscall.TIOCFLAG_SOFTCAR": "syscall", + "syscall.TIOCFLUSH": "syscall", + "syscall.TIOCGDEV": "syscall", + "syscall.TIOCGDRAINWAIT": "syscall", + "syscall.TIOCGETA": "syscall", + "syscall.TIOCGETD": "syscall", + "syscall.TIOCGFLAGS": "syscall", + "syscall.TIOCGICOUNT": "syscall", + "syscall.TIOCGLCKTRMIOS": "syscall", + "syscall.TIOCGLINED": "syscall", + "syscall.TIOCGPGRP": "syscall", + "syscall.TIOCGPTN": "syscall", + "syscall.TIOCGQSIZE": "syscall", + "syscall.TIOCGRANTPT": "syscall", + "syscall.TIOCGRS485": "syscall", + "syscall.TIOCGSERIAL": "syscall", + "syscall.TIOCGSID": "syscall", + "syscall.TIOCGSIZE": "syscall", + "syscall.TIOCGSOFTCAR": "syscall", + "syscall.TIOCGTSTAMP": "syscall", + "syscall.TIOCGWINSZ": "syscall", + "syscall.TIOCINQ": "syscall", + "syscall.TIOCIXOFF": "syscall", + "syscall.TIOCIXON": "syscall", + "syscall.TIOCLINUX": "syscall", + "syscall.TIOCMBIC": "syscall", + "syscall.TIOCMBIS": "syscall", + "syscall.TIOCMGDTRWAIT": "syscall", + "syscall.TIOCMGET": "syscall", + "syscall.TIOCMIWAIT": "syscall", + "syscall.TIOCMODG": "syscall", + "syscall.TIOCMODS": "syscall", + "syscall.TIOCMSDTRWAIT": "syscall", + "syscall.TIOCMSET": "syscall", + "syscall.TIOCM_CAR": "syscall", + "syscall.TIOCM_CD": "syscall", + "syscall.TIOCM_CTS": "syscall", + "syscall.TIOCM_DCD": "syscall", + "syscall.TIOCM_DSR": "syscall", + "syscall.TIOCM_DTR": "syscall", + "syscall.TIOCM_LE": "syscall", + "syscall.TIOCM_RI": "syscall", + "syscall.TIOCM_RNG": "syscall", + "syscall.TIOCM_RTS": "syscall", + "syscall.TIOCM_SR": "syscall", + "syscall.TIOCM_ST": "syscall", + "syscall.TIOCNOTTY": "syscall", + "syscall.TIOCNXCL": "syscall", + "syscall.TIOCOUTQ": "syscall", + "syscall.TIOCPKT": "syscall", + "syscall.TIOCPKT_DATA": "syscall", + "syscall.TIOCPKT_DOSTOP": "syscall", + "syscall.TIOCPKT_FLUSHREAD": "syscall", + "syscall.TIOCPKT_FLUSHWRITE": "syscall", + "syscall.TIOCPKT_IOCTL": "syscall", + "syscall.TIOCPKT_NOSTOP": "syscall", + "syscall.TIOCPKT_START": "syscall", + "syscall.TIOCPKT_STOP": "syscall", + "syscall.TIOCPTMASTER": "syscall", + "syscall.TIOCPTMGET": "syscall", + "syscall.TIOCPTSNAME": "syscall", + "syscall.TIOCPTYGNAME": "syscall", + "syscall.TIOCPTYGRANT": "syscall", + "syscall.TIOCPTYUNLK": "syscall", + "syscall.TIOCRCVFRAME": "syscall", + "syscall.TIOCREMOTE": "syscall", + "syscall.TIOCSBRK": "syscall", + "syscall.TIOCSCONS": "syscall", + "syscall.TIOCSCTTY": "syscall", + "syscall.TIOCSDRAINWAIT": "syscall", + "syscall.TIOCSDTR": "syscall", + "syscall.TIOCSERCONFIG": "syscall", + "syscall.TIOCSERGETLSR": "syscall", + "syscall.TIOCSERGETMULTI": "syscall", + "syscall.TIOCSERGSTRUCT": "syscall", + "syscall.TIOCSERGWILD": "syscall", + "syscall.TIOCSERSETMULTI": "syscall", + "syscall.TIOCSERSWILD": "syscall", + "syscall.TIOCSER_TEMT": "syscall", + "syscall.TIOCSETA": "syscall", + "syscall.TIOCSETAF": "syscall", + "syscall.TIOCSETAW": "syscall", + "syscall.TIOCSETD": "syscall", + "syscall.TIOCSFLAGS": "syscall", + "syscall.TIOCSIG": "syscall", + "syscall.TIOCSLCKTRMIOS": "syscall", + "syscall.TIOCSLINED": "syscall", + "syscall.TIOCSPGRP": "syscall", + "syscall.TIOCSPTLCK": "syscall", + "syscall.TIOCSQSIZE": "syscall", + "syscall.TIOCSRS485": "syscall", + "syscall.TIOCSSERIAL": "syscall", + "syscall.TIOCSSIZE": "syscall", + "syscall.TIOCSSOFTCAR": "syscall", + "syscall.TIOCSTART": "syscall", + "syscall.TIOCSTAT": "syscall", + "syscall.TIOCSTI": "syscall", + "syscall.TIOCSTOP": "syscall", + "syscall.TIOCSTSTAMP": "syscall", + "syscall.TIOCSWINSZ": "syscall", + "syscall.TIOCTIMESTAMP": "syscall", + "syscall.TIOCUCNTL": "syscall", + "syscall.TIOCVHANGUP": "syscall", + "syscall.TIOCXMTFRAME": "syscall", + "syscall.TOKEN_ADJUST_DEFAULT": "syscall", + "syscall.TOKEN_ADJUST_GROUPS": "syscall", + "syscall.TOKEN_ADJUST_PRIVILEGES": "syscall", + "syscall.TOKEN_ALL_ACCESS": "syscall", + "syscall.TOKEN_ASSIGN_PRIMARY": "syscall", + "syscall.TOKEN_DUPLICATE": "syscall", + "syscall.TOKEN_EXECUTE": "syscall", + "syscall.TOKEN_IMPERSONATE": "syscall", + "syscall.TOKEN_QUERY": "syscall", + "syscall.TOKEN_QUERY_SOURCE": "syscall", + "syscall.TOKEN_READ": "syscall", + "syscall.TOKEN_WRITE": "syscall", + "syscall.TOSTOP": "syscall", + "syscall.TRUNCATE_EXISTING": "syscall", + "syscall.TUNATTACHFILTER": "syscall", + "syscall.TUNDETACHFILTER": "syscall", + "syscall.TUNGETFEATURES": "syscall", + "syscall.TUNGETIFF": "syscall", + "syscall.TUNGETSNDBUF": "syscall", + "syscall.TUNGETVNETHDRSZ": "syscall", + "syscall.TUNSETDEBUG": "syscall", + "syscall.TUNSETGROUP": "syscall", + "syscall.TUNSETIFF": "syscall", + "syscall.TUNSETLINK": "syscall", + "syscall.TUNSETNOCSUM": "syscall", + "syscall.TUNSETOFFLOAD": "syscall", + "syscall.TUNSETOWNER": "syscall", + "syscall.TUNSETPERSIST": "syscall", + "syscall.TUNSETSNDBUF": "syscall", + "syscall.TUNSETTXFILTER": "syscall", + "syscall.TUNSETVNETHDRSZ": "syscall", + "syscall.Tee": "syscall", + "syscall.TerminateProcess": "syscall", + "syscall.Termios": "syscall", + "syscall.Tgkill": "syscall", + "syscall.Time": "syscall", + "syscall.Time_t": "syscall", + "syscall.Times": "syscall", + "syscall.Timespec": "syscall", + "syscall.TimespecToNsec": "syscall", + "syscall.Timeval": "syscall", + "syscall.Timeval32": "syscall", + "syscall.TimevalToNsec": "syscall", + "syscall.Timex": "syscall", + "syscall.Timezoneinformation": "syscall", + "syscall.Tms": "syscall", + "syscall.Token": "syscall", + "syscall.TokenAccessInformation": "syscall", + "syscall.TokenAuditPolicy": "syscall", + "syscall.TokenDefaultDacl": "syscall", + "syscall.TokenElevation": "syscall", + "syscall.TokenElevationType": "syscall", + "syscall.TokenGroups": "syscall", + "syscall.TokenGroupsAndPrivileges": "syscall", + "syscall.TokenHasRestrictions": "syscall", + "syscall.TokenImpersonationLevel": "syscall", + "syscall.TokenIntegrityLevel": "syscall", + "syscall.TokenLinkedToken": "syscall", + "syscall.TokenLogonSid": "syscall", + "syscall.TokenMandatoryPolicy": "syscall", + "syscall.TokenOrigin": "syscall", + "syscall.TokenOwner": "syscall", + "syscall.TokenPrimaryGroup": "syscall", + "syscall.TokenPrivileges": "syscall", + "syscall.TokenRestrictedSids": "syscall", + "syscall.TokenSandBoxInert": "syscall", + "syscall.TokenSessionId": "syscall", + "syscall.TokenSessionReference": "syscall", + "syscall.TokenSource": "syscall", + "syscall.TokenStatistics": "syscall", + "syscall.TokenType": "syscall", + "syscall.TokenUIAccess": "syscall", + "syscall.TokenUser": "syscall", + "syscall.TokenVirtualizationAllowed": "syscall", + "syscall.TokenVirtualizationEnabled": "syscall", + "syscall.Tokenprimarygroup": "syscall", + "syscall.Tokenuser": "syscall", + "syscall.TranslateAccountName": "syscall", + "syscall.TranslateName": "syscall", + "syscall.TransmitFile": "syscall", + "syscall.TransmitFileBuffers": "syscall", + "syscall.Truncate": "syscall", + "syscall.USAGE_MATCH_TYPE_AND": "syscall", + "syscall.USAGE_MATCH_TYPE_OR": "syscall", + "syscall.UTF16FromString": "syscall", + "syscall.UTF16PtrFromString": "syscall", + "syscall.UTF16ToString": "syscall", + "syscall.Ucred": "syscall", + "syscall.Umask": "syscall", + "syscall.Uname": "syscall", + "syscall.Undelete": "syscall", + "syscall.UnixCredentials": "syscall", + "syscall.UnixRights": "syscall", + "syscall.Unlink": "syscall", + "syscall.Unlinkat": "syscall", + "syscall.UnmapViewOfFile": "syscall", + "syscall.Unmount": "syscall", + "syscall.Unsetenv": "syscall", + "syscall.Unshare": "syscall", + "syscall.UserInfo10": "syscall", + "syscall.Ustat": "syscall", + "syscall.Ustat_t": "syscall", + "syscall.Utimbuf": "syscall", + "syscall.Utime": "syscall", + "syscall.Utimes": "syscall", + "syscall.UtimesNano": "syscall", + "syscall.Utsname": "syscall", + "syscall.VDISCARD": "syscall", + "syscall.VDSUSP": "syscall", + "syscall.VEOF": "syscall", + "syscall.VEOL": "syscall", + "syscall.VEOL2": "syscall", + "syscall.VERASE": "syscall", + "syscall.VERASE2": "syscall", + "syscall.VINTR": "syscall", + "syscall.VKILL": "syscall", + "syscall.VLNEXT": "syscall", + "syscall.VMIN": "syscall", + "syscall.VQUIT": "syscall", + "syscall.VREPRINT": "syscall", + "syscall.VSTART": "syscall", + "syscall.VSTATUS": "syscall", + "syscall.VSTOP": "syscall", + "syscall.VSUSP": "syscall", + "syscall.VSWTC": "syscall", + "syscall.VT0": "syscall", + "syscall.VT1": "syscall", + "syscall.VTDLY": "syscall", + "syscall.VTIME": "syscall", + "syscall.VWERASE": "syscall", + "syscall.VirtualLock": "syscall", + "syscall.VirtualUnlock": "syscall", + "syscall.WAIT_ABANDONED": "syscall", + "syscall.WAIT_FAILED": "syscall", + "syscall.WAIT_OBJECT_0": "syscall", + "syscall.WAIT_TIMEOUT": "syscall", + "syscall.WALL": "syscall", + "syscall.WALLSIG": "syscall", + "syscall.WALTSIG": "syscall", + "syscall.WCLONE": "syscall", + "syscall.WCONTINUED": "syscall", + "syscall.WCOREFLAG": "syscall", + "syscall.WEXITED": "syscall", + "syscall.WLINUXCLONE": "syscall", + "syscall.WNOHANG": "syscall", + "syscall.WNOTHREAD": "syscall", + "syscall.WNOWAIT": "syscall", + "syscall.WNOZOMBIE": "syscall", + "syscall.WOPTSCHECKED": "syscall", + "syscall.WORDSIZE": "syscall", + "syscall.WSABuf": "syscall", + "syscall.WSACleanup": "syscall", + "syscall.WSADESCRIPTION_LEN": "syscall", + "syscall.WSAData": "syscall", + "syscall.WSAEACCES": "syscall", + "syscall.WSAECONNRESET": "syscall", + "syscall.WSAEnumProtocols": "syscall", + "syscall.WSAID_CONNECTEX": "syscall", + "syscall.WSAIoctl": "syscall", + "syscall.WSAPROTOCOL_LEN": "syscall", + "syscall.WSAProtocolChain": "syscall", + "syscall.WSAProtocolInfo": "syscall", + "syscall.WSARecv": "syscall", + "syscall.WSARecvFrom": "syscall", + "syscall.WSASYS_STATUS_LEN": "syscall", + "syscall.WSASend": "syscall", + "syscall.WSASendTo": "syscall", + "syscall.WSASendto": "syscall", + "syscall.WSAStartup": "syscall", + "syscall.WSTOPPED": "syscall", + "syscall.WTRAPPED": "syscall", + "syscall.WUNTRACED": "syscall", + "syscall.Wait4": "syscall", + "syscall.WaitForSingleObject": "syscall", + "syscall.WaitStatus": "syscall", + "syscall.Win32FileAttributeData": "syscall", + "syscall.Win32finddata": "syscall", + "syscall.Write": "syscall", + "syscall.WriteConsole": "syscall", + "syscall.WriteFile": "syscall", + "syscall.X509_ASN_ENCODING": "syscall", + "syscall.XCASE": "syscall", + "syscall.XP1_CONNECTIONLESS": "syscall", + "syscall.XP1_CONNECT_DATA": "syscall", + "syscall.XP1_DISCONNECT_DATA": "syscall", + "syscall.XP1_EXPEDITED_DATA": "syscall", + "syscall.XP1_GRACEFUL_CLOSE": "syscall", + "syscall.XP1_GUARANTEED_DELIVERY": "syscall", + "syscall.XP1_GUARANTEED_ORDER": "syscall", + "syscall.XP1_IFS_HANDLES": "syscall", + "syscall.XP1_MESSAGE_ORIENTED": "syscall", + "syscall.XP1_MULTIPOINT_CONTROL_PLANE": "syscall", + "syscall.XP1_MULTIPOINT_DATA_PLANE": "syscall", + "syscall.XP1_PARTIAL_MESSAGE": "syscall", + "syscall.XP1_PSEUDO_STREAM": "syscall", + "syscall.XP1_QOS_SUPPORTED": "syscall", + "syscall.XP1_SAN_SUPPORT_SDP": "syscall", + "syscall.XP1_SUPPORT_BROADCAST": "syscall", + "syscall.XP1_SUPPORT_MULTIPOINT": "syscall", + "syscall.XP1_UNI_RECV": "syscall", + "syscall.XP1_UNI_SEND": "syscall", + "syslog.Dial": "log/syslog", + "syslog.LOG_ALERT": "log/syslog", + "syslog.LOG_AUTH": "log/syslog", + "syslog.LOG_AUTHPRIV": "log/syslog", + "syslog.LOG_CRIT": "log/syslog", + "syslog.LOG_CRON": "log/syslog", + "syslog.LOG_DAEMON": "log/syslog", + "syslog.LOG_DEBUG": "log/syslog", + "syslog.LOG_EMERG": "log/syslog", + "syslog.LOG_ERR": "log/syslog", + "syslog.LOG_FTP": "log/syslog", + "syslog.LOG_INFO": "log/syslog", + "syslog.LOG_KERN": "log/syslog", + "syslog.LOG_LOCAL0": "log/syslog", + "syslog.LOG_LOCAL1": "log/syslog", + "syslog.LOG_LOCAL2": "log/syslog", + "syslog.LOG_LOCAL3": "log/syslog", + "syslog.LOG_LOCAL4": "log/syslog", + "syslog.LOG_LOCAL5": "log/syslog", + "syslog.LOG_LOCAL6": "log/syslog", + "syslog.LOG_LOCAL7": "log/syslog", + "syslog.LOG_LPR": "log/syslog", + "syslog.LOG_MAIL": "log/syslog", + "syslog.LOG_NEWS": "log/syslog", + "syslog.LOG_NOTICE": "log/syslog", + "syslog.LOG_SYSLOG": "log/syslog", + "syslog.LOG_USER": "log/syslog", + "syslog.LOG_UUCP": "log/syslog", + "syslog.LOG_WARNING": "log/syslog", + "syslog.New": "log/syslog", + "syslog.NewLogger": "log/syslog", + "syslog.Priority": "log/syslog", + "syslog.Writer": "log/syslog", + "tabwriter.AlignRight": "text/tabwriter", + "tabwriter.Debug": "text/tabwriter", + "tabwriter.DiscardEmptyColumns": "text/tabwriter", + "tabwriter.Escape": "text/tabwriter", + "tabwriter.FilterHTML": "text/tabwriter", + "tabwriter.NewWriter": "text/tabwriter", + "tabwriter.StripEscape": "text/tabwriter", + "tabwriter.TabIndent": "text/tabwriter", + "tabwriter.Writer": "text/tabwriter", + "tar.ErrFieldTooLong": "archive/tar", + "tar.ErrHeader": "archive/tar", + "tar.ErrWriteAfterClose": "archive/tar", + "tar.ErrWriteTooLong": "archive/tar", + "tar.FileInfoHeader": "archive/tar", + "tar.Header": "archive/tar", + "tar.NewReader": "archive/tar", + "tar.NewWriter": "archive/tar", + "tar.Reader": "archive/tar", + "tar.TypeBlock": "archive/tar", + "tar.TypeChar": "archive/tar", + "tar.TypeCont": "archive/tar", + "tar.TypeDir": "archive/tar", + "tar.TypeFifo": "archive/tar", + "tar.TypeGNULongLink": "archive/tar", + "tar.TypeGNULongName": "archive/tar", + "tar.TypeGNUSparse": "archive/tar", + "tar.TypeLink": "archive/tar", + "tar.TypeReg": "archive/tar", + "tar.TypeRegA": "archive/tar", + "tar.TypeSymlink": "archive/tar", + "tar.TypeXGlobalHeader": "archive/tar", + "tar.TypeXHeader": "archive/tar", + "tar.Writer": "archive/tar", + "template.CSS": "html/template", + "template.ErrAmbigContext": "html/template", + "template.ErrBadHTML": "html/template", + "template.ErrBranchEnd": "html/template", + "template.ErrEndContext": "html/template", + "template.ErrNoSuchTemplate": "html/template", + "template.ErrOutputContext": "html/template", + "template.ErrPartialCharset": "html/template", + "template.ErrPartialEscape": "html/template", + "template.ErrRangeLoopReentry": "html/template", + "template.ErrSlashAmbig": "html/template", + "template.Error": "html/template", + "template.ErrorCode": "html/template", + "template.ExecError": "text/template", + // "template.FuncMap" is ambiguous + "template.HTML": "html/template", + "template.HTMLAttr": "html/template", + // "template.HTMLEscape" is ambiguous + // "template.HTMLEscapeString" is ambiguous + // "template.HTMLEscaper" is ambiguous + // "template.IsTrue" is ambiguous + "template.JS": "html/template", + // "template.JSEscape" is ambiguous + // "template.JSEscapeString" is ambiguous + // "template.JSEscaper" is ambiguous + "template.JSStr": "html/template", + // "template.Must" is ambiguous + // "template.New" is ambiguous + "template.OK": "html/template", + // "template.ParseFiles" is ambiguous + // "template.ParseGlob" is ambiguous + // "template.Template" is ambiguous + "template.URL": "html/template", + // "template.URLQueryEscaper" is ambiguous + "testing.AllocsPerRun": "testing", + "testing.B": "testing", + "testing.Benchmark": "testing", + "testing.BenchmarkResult": "testing", + "testing.Cover": "testing", + "testing.CoverBlock": "testing", + "testing.CoverMode": "testing", + "testing.Coverage": "testing", + "testing.InternalBenchmark": "testing", + "testing.InternalExample": "testing", + "testing.InternalTest": "testing", + "testing.M": "testing", + "testing.Main": "testing", + "testing.MainStart": "testing", + "testing.PB": "testing", + "testing.RegisterCover": "testing", + "testing.RunBenchmarks": "testing", + "testing.RunExamples": "testing", + "testing.RunTests": "testing", + "testing.Short": "testing", + "testing.T": "testing", + "testing.Verbose": "testing", + "textproto.CanonicalMIMEHeaderKey": "net/textproto", + "textproto.Conn": "net/textproto", + "textproto.Dial": "net/textproto", + "textproto.Error": "net/textproto", + "textproto.MIMEHeader": "net/textproto", + "textproto.NewConn": "net/textproto", + "textproto.NewReader": "net/textproto", + "textproto.NewWriter": "net/textproto", + "textproto.Pipeline": "net/textproto", + "textproto.ProtocolError": "net/textproto", + "textproto.Reader": "net/textproto", + "textproto.TrimBytes": "net/textproto", + "textproto.TrimString": "net/textproto", + "textproto.Writer": "net/textproto", + "time.ANSIC": "time", + "time.After": "time", + "time.AfterFunc": "time", + "time.April": "time", + "time.August": "time", + "time.Date": "time", + "time.December": "time", + "time.Duration": "time", + "time.February": "time", + "time.FixedZone": "time", + "time.Friday": "time", + "time.Hour": "time", + "time.January": "time", + "time.July": "time", + "time.June": "time", + "time.Kitchen": "time", + "time.LoadLocation": "time", + "time.Local": "time", + "time.Location": "time", + "time.March": "time", + "time.May": "time", + "time.Microsecond": "time", + "time.Millisecond": "time", + "time.Minute": "time", + "time.Monday": "time", + "time.Month": "time", + "time.Nanosecond": "time", + "time.NewTicker": "time", + "time.NewTimer": "time", + "time.November": "time", + "time.Now": "time", + "time.October": "time", + "time.Parse": "time", + "time.ParseDuration": "time", + "time.ParseError": "time", + "time.ParseInLocation": "time", + "time.RFC1123": "time", + "time.RFC1123Z": "time", + "time.RFC3339": "time", + "time.RFC3339Nano": "time", + "time.RFC822": "time", + "time.RFC822Z": "time", + "time.RFC850": "time", + "time.RubyDate": "time", + "time.Saturday": "time", + "time.Second": "time", + "time.September": "time", + "time.Since": "time", + "time.Sleep": "time", + "time.Stamp": "time", + "time.StampMicro": "time", + "time.StampMilli": "time", + "time.StampNano": "time", + "time.Sunday": "time", + "time.Thursday": "time", + "time.Tick": "time", + "time.Ticker": "time", + "time.Time": "time", + "time.Timer": "time", + "time.Tuesday": "time", + "time.UTC": "time", + "time.Unix": "time", + "time.UnixDate": "time", + "time.Until": "time", + "time.Wednesday": "time", + "time.Weekday": "time", + "tls.Certificate": "crypto/tls", + "tls.CertificateRequestInfo": "crypto/tls", + "tls.Client": "crypto/tls", + "tls.ClientAuthType": "crypto/tls", + "tls.ClientHelloInfo": "crypto/tls", + "tls.ClientSessionCache": "crypto/tls", + "tls.ClientSessionState": "crypto/tls", + "tls.Config": "crypto/tls", + "tls.Conn": "crypto/tls", + "tls.ConnectionState": "crypto/tls", + "tls.CurveID": "crypto/tls", + "tls.CurveP256": "crypto/tls", + "tls.CurveP384": "crypto/tls", + "tls.CurveP521": "crypto/tls", + "tls.Dial": "crypto/tls", + "tls.DialWithDialer": "crypto/tls", + "tls.ECDSAWithP256AndSHA256": "crypto/tls", + "tls.ECDSAWithP384AndSHA384": "crypto/tls", + "tls.ECDSAWithP521AndSHA512": "crypto/tls", + "tls.Listen": "crypto/tls", + "tls.LoadX509KeyPair": "crypto/tls", + "tls.NewLRUClientSessionCache": "crypto/tls", + "tls.NewListener": "crypto/tls", + "tls.NoClientCert": "crypto/tls", + "tls.PKCS1WithSHA1": "crypto/tls", + "tls.PKCS1WithSHA256": "crypto/tls", + "tls.PKCS1WithSHA384": "crypto/tls", + "tls.PKCS1WithSHA512": "crypto/tls", + "tls.PSSWithSHA256": "crypto/tls", + "tls.PSSWithSHA384": "crypto/tls", + "tls.PSSWithSHA512": "crypto/tls", + "tls.RecordHeaderError": "crypto/tls", + "tls.RenegotiateFreelyAsClient": "crypto/tls", + "tls.RenegotiateNever": "crypto/tls", + "tls.RenegotiateOnceAsClient": "crypto/tls", + "tls.RenegotiationSupport": "crypto/tls", + "tls.RequestClientCert": "crypto/tls", + "tls.RequireAndVerifyClientCert": "crypto/tls", + "tls.RequireAnyClientCert": "crypto/tls", + "tls.Server": "crypto/tls", + "tls.SignatureScheme": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": "crypto/tls", + "tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": "crypto/tls", + "tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA": "crypto/tls", + "tls.TLS_FALLBACK_SCSV": "crypto/tls", + "tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls", + "tls.TLS_RSA_WITH_AES_128_CBC_SHA": "crypto/tls", + "tls.TLS_RSA_WITH_AES_128_CBC_SHA256": "crypto/tls", + "tls.TLS_RSA_WITH_AES_128_GCM_SHA256": "crypto/tls", + "tls.TLS_RSA_WITH_AES_256_CBC_SHA": "crypto/tls", + "tls.TLS_RSA_WITH_AES_256_GCM_SHA384": "crypto/tls", + "tls.TLS_RSA_WITH_RC4_128_SHA": "crypto/tls", + "tls.VerifyClientCertIfGiven": "crypto/tls", + "tls.VersionSSL30": "crypto/tls", + "tls.VersionTLS10": "crypto/tls", + "tls.VersionTLS11": "crypto/tls", + "tls.VersionTLS12": "crypto/tls", + "tls.X25519": "crypto/tls", + "tls.X509KeyPair": "crypto/tls", + "token.ADD": "go/token", + "token.ADD_ASSIGN": "go/token", + "token.AND": "go/token", + "token.AND_ASSIGN": "go/token", + "token.AND_NOT": "go/token", + "token.AND_NOT_ASSIGN": "go/token", + "token.ARROW": "go/token", + "token.ASSIGN": "go/token", + "token.BREAK": "go/token", + "token.CASE": "go/token", + "token.CHAN": "go/token", + "token.CHAR": "go/token", + "token.COLON": "go/token", + "token.COMMA": "go/token", + "token.COMMENT": "go/token", + "token.CONST": "go/token", + "token.CONTINUE": "go/token", + "token.DEC": "go/token", + "token.DEFAULT": "go/token", + "token.DEFER": "go/token", + "token.DEFINE": "go/token", + "token.ELLIPSIS": "go/token", + "token.ELSE": "go/token", + "token.EOF": "go/token", + "token.EQL": "go/token", + "token.FALLTHROUGH": "go/token", + "token.FLOAT": "go/token", + "token.FOR": "go/token", + "token.FUNC": "go/token", + "token.File": "go/token", + "token.FileSet": "go/token", + "token.GEQ": "go/token", + "token.GO": "go/token", + "token.GOTO": "go/token", + "token.GTR": "go/token", + "token.HighestPrec": "go/token", + "token.IDENT": "go/token", + "token.IF": "go/token", + "token.ILLEGAL": "go/token", + "token.IMAG": "go/token", + "token.IMPORT": "go/token", + "token.INC": "go/token", + "token.INT": "go/token", + "token.INTERFACE": "go/token", + "token.LAND": "go/token", + "token.LBRACE": "go/token", + "token.LBRACK": "go/token", + "token.LEQ": "go/token", + "token.LOR": "go/token", + "token.LPAREN": "go/token", + "token.LSS": "go/token", + "token.Lookup": "go/token", + "token.LowestPrec": "go/token", + "token.MAP": "go/token", + "token.MUL": "go/token", + "token.MUL_ASSIGN": "go/token", + "token.NEQ": "go/token", + "token.NOT": "go/token", + "token.NewFileSet": "go/token", + "token.NoPos": "go/token", + "token.OR": "go/token", + "token.OR_ASSIGN": "go/token", + "token.PACKAGE": "go/token", + "token.PERIOD": "go/token", + "token.Pos": "go/token", + "token.Position": "go/token", + "token.QUO": "go/token", + "token.QUO_ASSIGN": "go/token", + "token.RANGE": "go/token", + "token.RBRACE": "go/token", + "token.RBRACK": "go/token", + "token.REM": "go/token", + "token.REM_ASSIGN": "go/token", + "token.RETURN": "go/token", + "token.RPAREN": "go/token", + "token.SELECT": "go/token", + "token.SEMICOLON": "go/token", + "token.SHL": "go/token", + "token.SHL_ASSIGN": "go/token", + "token.SHR": "go/token", + "token.SHR_ASSIGN": "go/token", + "token.STRING": "go/token", + "token.STRUCT": "go/token", + "token.SUB": "go/token", + "token.SUB_ASSIGN": "go/token", + "token.SWITCH": "go/token", + "token.TYPE": "go/token", + "token.Token": "go/token", + "token.UnaryPrec": "go/token", + "token.VAR": "go/token", + "token.XOR": "go/token", + "token.XOR_ASSIGN": "go/token", + "trace.Start": "runtime/trace", + "trace.Stop": "runtime/trace", + "types.Array": "go/types", + "types.AssertableTo": "go/types", + "types.AssignableTo": "go/types", + "types.Basic": "go/types", + "types.BasicInfo": "go/types", + "types.BasicKind": "go/types", + "types.Bool": "go/types", + "types.Builtin": "go/types", + "types.Byte": "go/types", + "types.Chan": "go/types", + "types.ChanDir": "go/types", + "types.Checker": "go/types", + "types.Comparable": "go/types", + "types.Complex128": "go/types", + "types.Complex64": "go/types", + "types.Config": "go/types", + "types.Const": "go/types", + "types.ConvertibleTo": "go/types", + "types.DefPredeclaredTestFuncs": "go/types", + "types.Default": "go/types", + "types.Error": "go/types", + "types.Eval": "go/types", + "types.ExprString": "go/types", + "types.FieldVal": "go/types", + "types.Float32": "go/types", + "types.Float64": "go/types", + "types.Func": "go/types", + "types.Id": "go/types", + "types.Identical": "go/types", + "types.IdenticalIgnoreTags": "go/types", + "types.Implements": "go/types", + "types.ImportMode": "go/types", + "types.Importer": "go/types", + "types.ImporterFrom": "go/types", + "types.Info": "go/types", + "types.Initializer": "go/types", + "types.Int": "go/types", + "types.Int16": "go/types", + "types.Int32": "go/types", + "types.Int64": "go/types", + "types.Int8": "go/types", + "types.Interface": "go/types", + "types.Invalid": "go/types", + "types.IsBoolean": "go/types", + "types.IsComplex": "go/types", + "types.IsConstType": "go/types", + "types.IsFloat": "go/types", + "types.IsInteger": "go/types", + "types.IsInterface": "go/types", + "types.IsNumeric": "go/types", + "types.IsOrdered": "go/types", + "types.IsString": "go/types", + "types.IsUnsigned": "go/types", + "types.IsUntyped": "go/types", + "types.Label": "go/types", + "types.LookupFieldOrMethod": "go/types", + "types.Map": "go/types", + "types.MethodExpr": "go/types", + "types.MethodSet": "go/types", + "types.MethodVal": "go/types", + "types.MissingMethod": "go/types", + "types.Named": "go/types", + "types.NewArray": "go/types", + "types.NewChan": "go/types", + "types.NewChecker": "go/types", + "types.NewConst": "go/types", + "types.NewField": "go/types", + "types.NewFunc": "go/types", + "types.NewInterface": "go/types", + "types.NewLabel": "go/types", + "types.NewMap": "go/types", + "types.NewMethodSet": "go/types", + "types.NewNamed": "go/types", + "types.NewPackage": "go/types", + "types.NewParam": "go/types", + "types.NewPkgName": "go/types", + "types.NewPointer": "go/types", + "types.NewScope": "go/types", + "types.NewSignature": "go/types", + "types.NewSlice": "go/types", + "types.NewStruct": "go/types", + "types.NewTuple": "go/types", + "types.NewTypeName": "go/types", + "types.NewVar": "go/types", + "types.Nil": "go/types", + "types.ObjectString": "go/types", + "types.Package": "go/types", + "types.PkgName": "go/types", + "types.Pointer": "go/types", + "types.Qualifier": "go/types", + "types.RecvOnly": "go/types", + "types.RelativeTo": "go/types", + "types.Rune": "go/types", + "types.Scope": "go/types", + "types.Selection": "go/types", + "types.SelectionKind": "go/types", + "types.SelectionString": "go/types", + "types.SendOnly": "go/types", + "types.SendRecv": "go/types", + "types.Signature": "go/types", + "types.Sizes": "go/types", + "types.Slice": "go/types", + "types.StdSizes": "go/types", + "types.String": "go/types", + "types.Struct": "go/types", + "types.Tuple": "go/types", + "types.Typ": "go/types", + "types.Type": "go/types", + "types.TypeAndValue": "go/types", + "types.TypeName": "go/types", + "types.TypeString": "go/types", + "types.Uint": "go/types", + "types.Uint16": "go/types", + "types.Uint32": "go/types", + "types.Uint64": "go/types", + "types.Uint8": "go/types", + "types.Uintptr": "go/types", + "types.Universe": "go/types", + "types.Unsafe": "go/types", + "types.UnsafePointer": "go/types", + "types.UntypedBool": "go/types", + "types.UntypedComplex": "go/types", + "types.UntypedFloat": "go/types", + "types.UntypedInt": "go/types", + "types.UntypedNil": "go/types", + "types.UntypedRune": "go/types", + "types.UntypedString": "go/types", + "types.Var": "go/types", + "types.WriteExpr": "go/types", + "types.WriteSignature": "go/types", + "types.WriteType": "go/types", + "unicode.ASCII_Hex_Digit": "unicode", + "unicode.Adlam": "unicode", + "unicode.Ahom": "unicode", + "unicode.Anatolian_Hieroglyphs": "unicode", + "unicode.Arabic": "unicode", + "unicode.Armenian": "unicode", + "unicode.Avestan": "unicode", + "unicode.AzeriCase": "unicode", + "unicode.Balinese": "unicode", + "unicode.Bamum": "unicode", + "unicode.Bassa_Vah": "unicode", + "unicode.Batak": "unicode", + "unicode.Bengali": "unicode", + "unicode.Bhaiksuki": "unicode", + "unicode.Bidi_Control": "unicode", + "unicode.Bopomofo": "unicode", + "unicode.Brahmi": "unicode", + "unicode.Braille": "unicode", + "unicode.Buginese": "unicode", + "unicode.Buhid": "unicode", + "unicode.C": "unicode", + "unicode.Canadian_Aboriginal": "unicode", + "unicode.Carian": "unicode", + "unicode.CaseRange": "unicode", + "unicode.CaseRanges": "unicode", + "unicode.Categories": "unicode", + "unicode.Caucasian_Albanian": "unicode", + "unicode.Cc": "unicode", + "unicode.Cf": "unicode", + "unicode.Chakma": "unicode", + "unicode.Cham": "unicode", + "unicode.Cherokee": "unicode", + "unicode.Co": "unicode", + "unicode.Common": "unicode", + "unicode.Coptic": "unicode", + "unicode.Cs": "unicode", + "unicode.Cuneiform": "unicode", + "unicode.Cypriot": "unicode", + "unicode.Cyrillic": "unicode", + "unicode.Dash": "unicode", + "unicode.Deprecated": "unicode", + "unicode.Deseret": "unicode", + "unicode.Devanagari": "unicode", + "unicode.Diacritic": "unicode", + "unicode.Digit": "unicode", + "unicode.Duployan": "unicode", + "unicode.Egyptian_Hieroglyphs": "unicode", + "unicode.Elbasan": "unicode", + "unicode.Ethiopic": "unicode", + "unicode.Extender": "unicode", + "unicode.FoldCategory": "unicode", + "unicode.FoldScript": "unicode", + "unicode.Georgian": "unicode", + "unicode.Glagolitic": "unicode", + "unicode.Gothic": "unicode", + "unicode.Grantha": "unicode", + "unicode.GraphicRanges": "unicode", + "unicode.Greek": "unicode", + "unicode.Gujarati": "unicode", + "unicode.Gurmukhi": "unicode", + "unicode.Han": "unicode", + "unicode.Hangul": "unicode", + "unicode.Hanunoo": "unicode", + "unicode.Hatran": "unicode", + "unicode.Hebrew": "unicode", + "unicode.Hex_Digit": "unicode", + "unicode.Hiragana": "unicode", + "unicode.Hyphen": "unicode", + "unicode.IDS_Binary_Operator": "unicode", + "unicode.IDS_Trinary_Operator": "unicode", + "unicode.Ideographic": "unicode", + "unicode.Imperial_Aramaic": "unicode", + "unicode.In": "unicode", + "unicode.Inherited": "unicode", + "unicode.Inscriptional_Pahlavi": "unicode", + "unicode.Inscriptional_Parthian": "unicode", + "unicode.Is": "unicode", + "unicode.IsControl": "unicode", + "unicode.IsDigit": "unicode", + "unicode.IsGraphic": "unicode", + "unicode.IsLetter": "unicode", + "unicode.IsLower": "unicode", + "unicode.IsMark": "unicode", + "unicode.IsNumber": "unicode", + "unicode.IsOneOf": "unicode", + "unicode.IsPrint": "unicode", + "unicode.IsPunct": "unicode", + "unicode.IsSpace": "unicode", + "unicode.IsSymbol": "unicode", + "unicode.IsTitle": "unicode", + "unicode.IsUpper": "unicode", + "unicode.Javanese": "unicode", + "unicode.Join_Control": "unicode", + "unicode.Kaithi": "unicode", + "unicode.Kannada": "unicode", + "unicode.Katakana": "unicode", + "unicode.Kayah_Li": "unicode", + "unicode.Kharoshthi": "unicode", + "unicode.Khmer": "unicode", + "unicode.Khojki": "unicode", + "unicode.Khudawadi": "unicode", + "unicode.L": "unicode", + "unicode.Lao": "unicode", + "unicode.Latin": "unicode", + "unicode.Lepcha": "unicode", + "unicode.Letter": "unicode", + "unicode.Limbu": "unicode", + "unicode.Linear_A": "unicode", + "unicode.Linear_B": "unicode", + "unicode.Lisu": "unicode", + "unicode.Ll": "unicode", + "unicode.Lm": "unicode", + "unicode.Lo": "unicode", + "unicode.Logical_Order_Exception": "unicode", + "unicode.Lower": "unicode", + "unicode.LowerCase": "unicode", + "unicode.Lt": "unicode", + "unicode.Lu": "unicode", + "unicode.Lycian": "unicode", + "unicode.Lydian": "unicode", + "unicode.M": "unicode", + "unicode.Mahajani": "unicode", + "unicode.Malayalam": "unicode", + "unicode.Mandaic": "unicode", + "unicode.Manichaean": "unicode", + "unicode.Marchen": "unicode", + "unicode.Mark": "unicode", + "unicode.MaxASCII": "unicode", + "unicode.MaxCase": "unicode", + "unicode.MaxLatin1": "unicode", + "unicode.MaxRune": "unicode", + "unicode.Mc": "unicode", + "unicode.Me": "unicode", + "unicode.Meetei_Mayek": "unicode", + "unicode.Mende_Kikakui": "unicode", + "unicode.Meroitic_Cursive": "unicode", + "unicode.Meroitic_Hieroglyphs": "unicode", + "unicode.Miao": "unicode", + "unicode.Mn": "unicode", + "unicode.Modi": "unicode", + "unicode.Mongolian": "unicode", + "unicode.Mro": "unicode", + "unicode.Multani": "unicode", + "unicode.Myanmar": "unicode", + "unicode.N": "unicode", + "unicode.Nabataean": "unicode", + "unicode.Nd": "unicode", + "unicode.New_Tai_Lue": "unicode", + "unicode.Newa": "unicode", + "unicode.Nko": "unicode", + "unicode.Nl": "unicode", + "unicode.No": "unicode", + "unicode.Noncharacter_Code_Point": "unicode", + "unicode.Number": "unicode", + "unicode.Ogham": "unicode", + "unicode.Ol_Chiki": "unicode", + "unicode.Old_Hungarian": "unicode", + "unicode.Old_Italic": "unicode", + "unicode.Old_North_Arabian": "unicode", + "unicode.Old_Permic": "unicode", + "unicode.Old_Persian": "unicode", + "unicode.Old_South_Arabian": "unicode", + "unicode.Old_Turkic": "unicode", + "unicode.Oriya": "unicode", + "unicode.Osage": "unicode", + "unicode.Osmanya": "unicode", + "unicode.Other": "unicode", + "unicode.Other_Alphabetic": "unicode", + "unicode.Other_Default_Ignorable_Code_Point": "unicode", + "unicode.Other_Grapheme_Extend": "unicode", + "unicode.Other_ID_Continue": "unicode", + "unicode.Other_ID_Start": "unicode", + "unicode.Other_Lowercase": "unicode", + "unicode.Other_Math": "unicode", + "unicode.Other_Uppercase": "unicode", + "unicode.P": "unicode", + "unicode.Pahawh_Hmong": "unicode", + "unicode.Palmyrene": "unicode", + "unicode.Pattern_Syntax": "unicode", + "unicode.Pattern_White_Space": "unicode", + "unicode.Pau_Cin_Hau": "unicode", + "unicode.Pc": "unicode", + "unicode.Pd": "unicode", + "unicode.Pe": "unicode", + "unicode.Pf": "unicode", + "unicode.Phags_Pa": "unicode", + "unicode.Phoenician": "unicode", + "unicode.Pi": "unicode", + "unicode.Po": "unicode", + "unicode.Prepended_Concatenation_Mark": "unicode", + "unicode.PrintRanges": "unicode", + "unicode.Properties": "unicode", + "unicode.Ps": "unicode", + "unicode.Psalter_Pahlavi": "unicode", + "unicode.Punct": "unicode", + "unicode.Quotation_Mark": "unicode", + "unicode.Radical": "unicode", + "unicode.Range16": "unicode", + "unicode.Range32": "unicode", + "unicode.RangeTable": "unicode", + "unicode.Rejang": "unicode", + "unicode.ReplacementChar": "unicode", + "unicode.Runic": "unicode", + "unicode.S": "unicode", + "unicode.STerm": "unicode", + "unicode.Samaritan": "unicode", + "unicode.Saurashtra": "unicode", + "unicode.Sc": "unicode", + "unicode.Scripts": "unicode", + "unicode.Sentence_Terminal": "unicode", + "unicode.Sharada": "unicode", + "unicode.Shavian": "unicode", + "unicode.Siddham": "unicode", + "unicode.SignWriting": "unicode", + "unicode.SimpleFold": "unicode", + "unicode.Sinhala": "unicode", + "unicode.Sk": "unicode", + "unicode.Sm": "unicode", + "unicode.So": "unicode", + "unicode.Soft_Dotted": "unicode", + "unicode.Sora_Sompeng": "unicode", + "unicode.Space": "unicode", + "unicode.SpecialCase": "unicode", + "unicode.Sundanese": "unicode", + "unicode.Syloti_Nagri": "unicode", + "unicode.Symbol": "unicode", + "unicode.Syriac": "unicode", + "unicode.Tagalog": "unicode", + "unicode.Tagbanwa": "unicode", + "unicode.Tai_Le": "unicode", + "unicode.Tai_Tham": "unicode", + "unicode.Tai_Viet": "unicode", + "unicode.Takri": "unicode", + "unicode.Tamil": "unicode", + "unicode.Tangut": "unicode", + "unicode.Telugu": "unicode", + "unicode.Terminal_Punctuation": "unicode", + "unicode.Thaana": "unicode", + "unicode.Thai": "unicode", + "unicode.Tibetan": "unicode", + "unicode.Tifinagh": "unicode", + "unicode.Tirhuta": "unicode", + "unicode.Title": "unicode", + "unicode.TitleCase": "unicode", + "unicode.To": "unicode", + "unicode.ToLower": "unicode", + "unicode.ToTitle": "unicode", + "unicode.ToUpper": "unicode", + "unicode.TurkishCase": "unicode", + "unicode.Ugaritic": "unicode", + "unicode.Unified_Ideograph": "unicode", + "unicode.Upper": "unicode", + "unicode.UpperCase": "unicode", + "unicode.UpperLower": "unicode", + "unicode.Vai": "unicode", + "unicode.Variation_Selector": "unicode", + "unicode.Version": "unicode", + "unicode.Warang_Citi": "unicode", + "unicode.White_Space": "unicode", + "unicode.Yi": "unicode", + "unicode.Z": "unicode", + "unicode.Zl": "unicode", + "unicode.Zp": "unicode", + "unicode.Zs": "unicode", + "url.Error": "net/url", + "url.EscapeError": "net/url", + "url.InvalidHostError": "net/url", + "url.Parse": "net/url", + "url.ParseQuery": "net/url", + "url.ParseRequestURI": "net/url", + "url.PathEscape": "net/url", + "url.PathUnescape": "net/url", + "url.QueryEscape": "net/url", + "url.QueryUnescape": "net/url", + "url.URL": "net/url", + "url.User": "net/url", + "url.UserPassword": "net/url", + "url.Userinfo": "net/url", + "url.Values": "net/url", + "user.Current": "os/user", + "user.Group": "os/user", + "user.Lookup": "os/user", + "user.LookupGroup": "os/user", + "user.LookupGroupId": "os/user", + "user.LookupId": "os/user", + "user.UnknownGroupError": "os/user", + "user.UnknownGroupIdError": "os/user", + "user.UnknownUserError": "os/user", + "user.UnknownUserIdError": "os/user", + "user.User": "os/user", + "utf16.Decode": "unicode/utf16", + "utf16.DecodeRune": "unicode/utf16", + "utf16.Encode": "unicode/utf16", + "utf16.EncodeRune": "unicode/utf16", + "utf16.IsSurrogate": "unicode/utf16", + "utf8.DecodeLastRune": "unicode/utf8", + "utf8.DecodeLastRuneInString": "unicode/utf8", + "utf8.DecodeRune": "unicode/utf8", + "utf8.DecodeRuneInString": "unicode/utf8", + "utf8.EncodeRune": "unicode/utf8", + "utf8.FullRune": "unicode/utf8", + "utf8.FullRuneInString": "unicode/utf8", + "utf8.MaxRune": "unicode/utf8", + "utf8.RuneCount": "unicode/utf8", + "utf8.RuneCountInString": "unicode/utf8", + "utf8.RuneError": "unicode/utf8", + "utf8.RuneLen": "unicode/utf8", + "utf8.RuneSelf": "unicode/utf8", + "utf8.RuneStart": "unicode/utf8", + "utf8.UTFMax": "unicode/utf8", + "utf8.Valid": "unicode/utf8", + "utf8.ValidRune": "unicode/utf8", + "utf8.ValidString": "unicode/utf8", + "x509.CANotAuthorizedForThisName": "crypto/x509", + "x509.CertPool": "crypto/x509", + "x509.Certificate": "crypto/x509", + "x509.CertificateInvalidError": "crypto/x509", + "x509.CertificateRequest": "crypto/x509", + "x509.ConstraintViolationError": "crypto/x509", + "x509.CreateCertificate": "crypto/x509", + "x509.CreateCertificateRequest": "crypto/x509", + "x509.DSA": "crypto/x509", + "x509.DSAWithSHA1": "crypto/x509", + "x509.DSAWithSHA256": "crypto/x509", + "x509.DecryptPEMBlock": "crypto/x509", + "x509.ECDSA": "crypto/x509", + "x509.ECDSAWithSHA1": "crypto/x509", + "x509.ECDSAWithSHA256": "crypto/x509", + "x509.ECDSAWithSHA384": "crypto/x509", + "x509.ECDSAWithSHA512": "crypto/x509", + "x509.EncryptPEMBlock": "crypto/x509", + "x509.ErrUnsupportedAlgorithm": "crypto/x509", + "x509.Expired": "crypto/x509", + "x509.ExtKeyUsage": "crypto/x509", + "x509.ExtKeyUsageAny": "crypto/x509", + "x509.ExtKeyUsageClientAuth": "crypto/x509", + "x509.ExtKeyUsageCodeSigning": "crypto/x509", + "x509.ExtKeyUsageEmailProtection": "crypto/x509", + "x509.ExtKeyUsageIPSECEndSystem": "crypto/x509", + "x509.ExtKeyUsageIPSECTunnel": "crypto/x509", + "x509.ExtKeyUsageIPSECUser": "crypto/x509", + "x509.ExtKeyUsageMicrosoftServerGatedCrypto": "crypto/x509", + "x509.ExtKeyUsageNetscapeServerGatedCrypto": "crypto/x509", + "x509.ExtKeyUsageOCSPSigning": "crypto/x509", + "x509.ExtKeyUsageServerAuth": "crypto/x509", + "x509.ExtKeyUsageTimeStamping": "crypto/x509", + "x509.HostnameError": "crypto/x509", + "x509.IncompatibleUsage": "crypto/x509", + "x509.IncorrectPasswordError": "crypto/x509", + "x509.InsecureAlgorithmError": "crypto/x509", + "x509.InvalidReason": "crypto/x509", + "x509.IsEncryptedPEMBlock": "crypto/x509", + "x509.KeyUsage": "crypto/x509", + "x509.KeyUsageCRLSign": "crypto/x509", + "x509.KeyUsageCertSign": "crypto/x509", + "x509.KeyUsageContentCommitment": "crypto/x509", + "x509.KeyUsageDataEncipherment": "crypto/x509", + "x509.KeyUsageDecipherOnly": "crypto/x509", + "x509.KeyUsageDigitalSignature": "crypto/x509", + "x509.KeyUsageEncipherOnly": "crypto/x509", + "x509.KeyUsageKeyAgreement": "crypto/x509", + "x509.KeyUsageKeyEncipherment": "crypto/x509", + "x509.MD2WithRSA": "crypto/x509", + "x509.MD5WithRSA": "crypto/x509", + "x509.MarshalECPrivateKey": "crypto/x509", + "x509.MarshalPKCS1PrivateKey": "crypto/x509", + "x509.MarshalPKIXPublicKey": "crypto/x509", + "x509.NameMismatch": "crypto/x509", + "x509.NewCertPool": "crypto/x509", + "x509.NotAuthorizedToSign": "crypto/x509", + "x509.PEMCipher": "crypto/x509", + "x509.PEMCipher3DES": "crypto/x509", + "x509.PEMCipherAES128": "crypto/x509", + "x509.PEMCipherAES192": "crypto/x509", + "x509.PEMCipherAES256": "crypto/x509", + "x509.PEMCipherDES": "crypto/x509", + "x509.ParseCRL": "crypto/x509", + "x509.ParseCertificate": "crypto/x509", + "x509.ParseCertificateRequest": "crypto/x509", + "x509.ParseCertificates": "crypto/x509", + "x509.ParseDERCRL": "crypto/x509", + "x509.ParseECPrivateKey": "crypto/x509", + "x509.ParsePKCS1PrivateKey": "crypto/x509", + "x509.ParsePKCS8PrivateKey": "crypto/x509", + "x509.ParsePKIXPublicKey": "crypto/x509", + "x509.PublicKeyAlgorithm": "crypto/x509", + "x509.RSA": "crypto/x509", + "x509.SHA1WithRSA": "crypto/x509", + "x509.SHA256WithRSA": "crypto/x509", + "x509.SHA256WithRSAPSS": "crypto/x509", + "x509.SHA384WithRSA": "crypto/x509", + "x509.SHA384WithRSAPSS": "crypto/x509", + "x509.SHA512WithRSA": "crypto/x509", + "x509.SHA512WithRSAPSS": "crypto/x509", + "x509.SignatureAlgorithm": "crypto/x509", + "x509.SystemCertPool": "crypto/x509", + "x509.SystemRootsError": "crypto/x509", + "x509.TooManyIntermediates": "crypto/x509", + "x509.UnhandledCriticalExtension": "crypto/x509", + "x509.UnknownAuthorityError": "crypto/x509", + "x509.UnknownPublicKeyAlgorithm": "crypto/x509", + "x509.UnknownSignatureAlgorithm": "crypto/x509", + "x509.VerifyOptions": "crypto/x509", + "xml.Attr": "encoding/xml", + "xml.CharData": "encoding/xml", + "xml.Comment": "encoding/xml", + "xml.CopyToken": "encoding/xml", + "xml.Decoder": "encoding/xml", + "xml.Directive": "encoding/xml", + "xml.Encoder": "encoding/xml", + "xml.EndElement": "encoding/xml", + "xml.Escape": "encoding/xml", + "xml.EscapeText": "encoding/xml", + "xml.HTMLAutoClose": "encoding/xml", + "xml.HTMLEntity": "encoding/xml", + "xml.Header": "encoding/xml", + "xml.Marshal": "encoding/xml", + "xml.MarshalIndent": "encoding/xml", + "xml.Marshaler": "encoding/xml", + "xml.MarshalerAttr": "encoding/xml", + "xml.Name": "encoding/xml", + "xml.NewDecoder": "encoding/xml", + "xml.NewEncoder": "encoding/xml", + "xml.ProcInst": "encoding/xml", + "xml.StartElement": "encoding/xml", + "xml.SyntaxError": "encoding/xml", + "xml.TagPathError": "encoding/xml", + "xml.Token": "encoding/xml", + "xml.Unmarshal": "encoding/xml", + "xml.UnmarshalError": "encoding/xml", + "xml.Unmarshaler": "encoding/xml", + "xml.UnmarshalerAttr": "encoding/xml", + "xml.UnsupportedTypeError": "encoding/xml", + "zip.Compressor": "archive/zip", + "zip.Decompressor": "archive/zip", + "zip.Deflate": "archive/zip", + "zip.ErrAlgorithm": "archive/zip", + "zip.ErrChecksum": "archive/zip", + "zip.ErrFormat": "archive/zip", + "zip.File": "archive/zip", + "zip.FileHeader": "archive/zip", + "zip.FileInfoHeader": "archive/zip", + "zip.NewReader": "archive/zip", + "zip.NewWriter": "archive/zip", + "zip.OpenReader": "archive/zip", + "zip.ReadCloser": "archive/zip", + "zip.Reader": "archive/zip", + "zip.RegisterCompressor": "archive/zip", + "zip.RegisterDecompressor": "archive/zip", + "zip.Store": "archive/zip", + "zip.Writer": "archive/zip", + "zlib.BestCompression": "compress/zlib", + "zlib.BestSpeed": "compress/zlib", + "zlib.DefaultCompression": "compress/zlib", + "zlib.ErrChecksum": "compress/zlib", + "zlib.ErrDictionary": "compress/zlib", + "zlib.ErrHeader": "compress/zlib", + "zlib.HuffmanOnly": "compress/zlib", + "zlib.NewReader": "compress/zlib", + "zlib.NewReaderDict": "compress/zlib", + "zlib.NewWriter": "compress/zlib", + "zlib.NewWriterLevel": "compress/zlib", + "zlib.NewWriterLevelDict": "compress/zlib", + "zlib.NoCompression": "compress/zlib", + "zlib.Resetter": "compress/zlib", + "zlib.Writer": "compress/zlib", + + "unsafe.Alignof": "unsafe", + "unsafe.ArbitraryType": "unsafe", + "unsafe.Offsetof": "unsafe", + "unsafe.Pointer": "unsafe", + "unsafe.Sizeof": "unsafe", +} diff --git a/vendor/k8s.io/gengo/generator/BUILD b/vendor/k8s.io/gengo/generator/BUILD index 66401e9357d..1906ba3bfcd 100644 --- a/vendor/k8s.io/gengo/generator/BUILD +++ b/vendor/k8s.io/gengo/generator/BUILD @@ -16,6 +16,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/tools/imports:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/parser:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", diff --git a/vendor/k8s.io/gengo/generator/execute.go b/vendor/k8s.io/gengo/generator/execute.go index 489bc666acf..38dffff975a 100644 --- a/vendor/k8s.io/gengo/generator/execute.go +++ b/vendor/k8s.io/gengo/generator/execute.go @@ -19,13 +19,13 @@ package generator import ( "bytes" "fmt" - "go/format" "io" "io/ioutil" "os" "path/filepath" "strings" + "golang.org/x/tools/imports" "k8s.io/gengo/namer" "k8s.io/gengo/types" @@ -131,7 +131,6 @@ func assembleGolangFile(w io.Writer, f *File) { if len(f.Imports) > 0 { fmt.Fprint(w, "import (\n") - // TODO: sort imports like goimports does. for i := range f.Imports { if strings.Contains(i, "\"") { // they included quotes, or are using the @@ -159,9 +158,13 @@ func assembleGolangFile(w io.Writer, f *File) { w.Write(f.Body.Bytes()) } +func importsWrapper(src []byte) ([]byte, error) { + return imports.Process("", src, nil) +} + func NewGolangFile() *DefaultFileType { return &DefaultFileType{ - Format: format.Source, + Format: importsWrapper, Assemble: assembleGolangFile, } } From 3d69cea1e589add1d24fc72e9a8c46081664a719 Mon Sep 17 00:00:00 2001 From: Allen Petersen Date: Tue, 2 Jan 2018 22:07:30 -0800 Subject: [PATCH 564/794] Update generated files --- .../app/apis/kubeadm/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/admission/v1beta1/zz_generated.conversion.go | 3 ++- .../v1alpha1/zz_generated.conversion.go | 3 ++- .../v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/apps/v1/zz_generated.conversion.go | 3 ++- pkg/apis/apps/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/apps/v1beta2/zz_generated.conversion.go | 3 ++- pkg/apis/authentication/v1/zz_generated.conversion.go | 3 ++- pkg/apis/authentication/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/authorization/v1/zz_generated.conversion.go | 3 ++- pkg/apis/authorization/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/autoscaling/v1/zz_generated.conversion.go | 3 ++- pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go | 3 ++- pkg/apis/batch/v1/zz_generated.conversion.go | 3 ++- pkg/apis/batch/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/batch/v2alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/certificates/v1beta1/zz_generated.conversion.go | 3 ++- .../componentconfig/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/core/v1/zz_generated.conversion.go | 3 ++- pkg/apis/events/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/extensions/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/networking/v1/zz_generated.conversion.go | 3 ++- pkg/apis/policy/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/rbac/v1/zz_generated.conversion.go | 3 ++- pkg/apis/rbac/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/rbac/v1beta1/zz_generated.conversion.go | 3 ++- pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/settings/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/storage/v1/zz_generated.conversion.go | 3 ++- pkg/apis/storage/v1alpha1/zz_generated.conversion.go | 3 ++- pkg/apis/storage/v1beta1/zz_generated.conversion.go | 3 ++- .../internalclientset/scheme/register.go | 3 ++- .../internalversion/initializerconfiguration.go | 3 ++- .../internalversion/mutatingwebhookconfiguration.go | 3 ++- .../internalversion/validatingwebhookconfiguration.go | 3 ++- .../apps/internalversion/controllerrevision.go | 3 ++- .../internalversion/apps/internalversion/statefulset.go | 3 ++- .../autoscaling/internalversion/horizontalpodautoscaler.go | 3 ++- .../internalversion/batch/internalversion/cronjob.go | 3 ++- .../internalversion/batch/internalversion/job.go | 3 ++- .../internalversion/certificatesigningrequest.go | 3 ++- .../core/internalversion/componentstatus.go | 3 ++- .../internalversion/core/internalversion/configmap.go | 3 ++- .../internalversion/core/internalversion/endpoints.go | 3 ++- .../internalversion/core/internalversion/event.go | 3 ++- .../internalversion/core/internalversion/limitrange.go | 3 ++- .../internalversion/core/internalversion/namespace.go | 3 ++- .../internalversion/core/internalversion/node.go | 3 ++- .../core/internalversion/persistentvolume.go | 3 ++- .../core/internalversion/persistentvolumeclaim.go | 3 ++- .../internalversion/core/internalversion/pod.go | 3 ++- .../internalversion/core/internalversion/podtemplate.go | 3 ++- .../core/internalversion/replicationcontroller.go | 3 ++- .../internalversion/core/internalversion/resourcequota.go | 3 ++- .../internalversion/core/internalversion/secret.go | 3 ++- .../internalversion/core/internalversion/service.go | 3 ++- .../internalversion/core/internalversion/serviceaccount.go | 3 ++- .../extensions/internalversion/daemonset.go | 3 ++- .../extensions/internalversion/deployment.go | 3 ++- .../internalversion/extensions/internalversion/ingress.go | 3 ++- .../extensions/internalversion/podsecuritypolicy.go | 3 ++- .../extensions/internalversion/replicaset.go | 3 ++- .../informers_generated/internalversion/factory.go | 7 ++++--- .../informers_generated/internalversion/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../networking/internalversion/networkpolicy.go | 3 ++- .../policy/internalversion/poddisruptionbudget.go | 3 ++- .../internalversion/rbac/internalversion/clusterrole.go | 3 ++- .../rbac/internalversion/clusterrolebinding.go | 3 ++- .../internalversion/rbac/internalversion/role.go | 3 ++- .../internalversion/rbac/internalversion/rolebinding.go | 3 ++- .../scheduling/internalversion/priorityclass.go | 3 ++- .../internalversion/settings/internalversion/podpreset.go | 3 ++- .../storage/internalversion/storageclass.go | 3 ++- .../storage/internalversion/volumeattachment.go | 3 ++- .../apis/kubeletconfig/v1alpha1/zz_generated.conversion.go | 3 ++- .../kubeproxyconfig/v1alpha1/zz_generated.conversion.go | 3 ++- .../eventratelimit/v1alpha1/zz_generated.conversion.go | 3 ++- .../v1alpha1/zz_generated.conversion.go | 3 ++- .../apis/resourcequota/v1alpha1/zz_generated.conversion.go | 3 ++- .../apis/apiextensions/v1beta1/zz_generated.conversion.go | 3 ++- .../client/clientset/internalclientset/scheme/register.go | 3 ++- .../apiextensions/v1beta1/customresourcedefinition.go | 3 ++- .../pkg/client/informers/externalversions/factory.go | 7 ++++--- .../pkg/client/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../internalversion/customresourcedefinition.go | 3 ++- .../pkg/client/informers/internalversion/factory.go | 7 ++++--- .../pkg/client/informers/internalversion/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../pkg/apis/testapigroup/v1/zz_generated.conversion.go | 3 ++- .../pkg/apis/audit/v1alpha1/zz_generated.conversion.go | 3 ++- .../pkg/apis/audit/v1beta1/zz_generated.conversion.go | 3 ++- .../pkg/apis/example/v1/zz_generated.conversion.go | 3 ++- .../v1alpha1/initializerconfiguration.go | 3 ++- .../v1beta1/mutatingwebhookconfiguration.go | 3 ++- .../v1beta1/validatingwebhookconfiguration.go | 3 ++- .../client-go/informers/apps/v1/controllerrevision.go | 3 ++- .../src/k8s.io/client-go/informers/apps/v1/daemonset.go | 3 ++- .../src/k8s.io/client-go/informers/apps/v1/deployment.go | 3 ++- .../src/k8s.io/client-go/informers/apps/v1/replicaset.go | 3 ++- .../src/k8s.io/client-go/informers/apps/v1/statefulset.go | 3 ++- .../client-go/informers/apps/v1beta1/controllerrevision.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta1/deployment.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta1/statefulset.go | 3 ++- .../client-go/informers/apps/v1beta2/controllerrevision.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta2/daemonset.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta2/deployment.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta2/replicaset.go | 3 ++- .../k8s.io/client-go/informers/apps/v1beta2/statefulset.go | 3 ++- .../informers/autoscaling/v1/horizontalpodautoscaler.go | 3 ++- .../autoscaling/v2beta1/horizontalpodautoscaler.go | 3 ++- staging/src/k8s.io/client-go/informers/batch/v1/job.go | 3 ++- .../k8s.io/client-go/informers/batch/v1beta1/cronjob.go | 3 ++- .../k8s.io/client-go/informers/batch/v2alpha1/cronjob.go | 3 ++- .../certificates/v1beta1/certificatesigningrequest.go | 3 ++- .../k8s.io/client-go/informers/core/v1/componentstatus.go | 3 ++- .../src/k8s.io/client-go/informers/core/v1/configmap.go | 3 ++- .../src/k8s.io/client-go/informers/core/v1/endpoints.go | 3 ++- staging/src/k8s.io/client-go/informers/core/v1/event.go | 3 ++- .../src/k8s.io/client-go/informers/core/v1/limitrange.go | 3 ++- .../src/k8s.io/client-go/informers/core/v1/namespace.go | 3 ++- staging/src/k8s.io/client-go/informers/core/v1/node.go | 3 ++- .../k8s.io/client-go/informers/core/v1/persistentvolume.go | 3 ++- .../client-go/informers/core/v1/persistentvolumeclaim.go | 3 ++- staging/src/k8s.io/client-go/informers/core/v1/pod.go | 3 ++- .../src/k8s.io/client-go/informers/core/v1/podtemplate.go | 3 ++- .../client-go/informers/core/v1/replicationcontroller.go | 3 ++- .../k8s.io/client-go/informers/core/v1/resourcequota.go | 3 ++- staging/src/k8s.io/client-go/informers/core/v1/secret.go | 3 ++- staging/src/k8s.io/client-go/informers/core/v1/service.go | 3 ++- .../k8s.io/client-go/informers/core/v1/serviceaccount.go | 3 ++- .../src/k8s.io/client-go/informers/events/v1beta1/event.go | 3 ++- .../client-go/informers/extensions/v1beta1/daemonset.go | 3 ++- .../client-go/informers/extensions/v1beta1/deployment.go | 3 ++- .../client-go/informers/extensions/v1beta1/ingress.go | 3 ++- .../informers/extensions/v1beta1/podsecuritypolicy.go | 3 ++- .../client-go/informers/extensions/v1beta1/replicaset.go | 3 ++- staging/src/k8s.io/client-go/informers/factory.go | 7 ++++--- staging/src/k8s.io/client-go/informers/generic.go | 1 + .../informers/internalinterfaces/factory_interfaces.go | 3 ++- .../client-go/informers/networking/v1/networkpolicy.go | 3 ++- .../informers/policy/v1beta1/poddisruptionbudget.go | 3 ++- .../src/k8s.io/client-go/informers/rbac/v1/clusterrole.go | 3 ++- .../client-go/informers/rbac/v1/clusterrolebinding.go | 3 ++- staging/src/k8s.io/client-go/informers/rbac/v1/role.go | 3 ++- .../src/k8s.io/client-go/informers/rbac/v1/rolebinding.go | 3 ++- .../client-go/informers/rbac/v1alpha1/clusterrole.go | 3 ++- .../informers/rbac/v1alpha1/clusterrolebinding.go | 3 ++- .../src/k8s.io/client-go/informers/rbac/v1alpha1/role.go | 3 ++- .../client-go/informers/rbac/v1alpha1/rolebinding.go | 3 ++- .../k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go | 3 ++- .../client-go/informers/rbac/v1beta1/clusterrolebinding.go | 3 ++- .../src/k8s.io/client-go/informers/rbac/v1beta1/role.go | 3 ++- .../k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go | 3 ++- .../informers/scheduling/v1alpha1/priorityclass.go | 3 ++- .../client-go/informers/settings/v1alpha1/podpreset.go | 3 ++- .../k8s.io/client-go/informers/storage/v1/storageclass.go | 3 ++- .../informers/storage/v1alpha1/volumeattachment.go | 3 ++- .../client-go/informers/storage/v1beta1/storageclass.go | 3 ++- .../apiserver/apis/example/v1/zz_generated.conversion.go | 3 ++- .../apiserver/apis/example2/v1/zz_generated.conversion.go | 3 ++- .../apiserver/clientset/internalversion/scheme/register.go | 3 ++- .../informers/externalversions/example/v1/testtype.go | 3 ++- .../informers/externalversions/example2/v1/testtype.go | 3 ++- .../apiserver/informers/externalversions/factory.go | 7 ++++--- .../apiserver/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../internalversion/example/internalversion/testtype.go | 3 ++- .../internalversion/example2/internalversion/testtype.go | 3 ++- .../apiserver/informers/internalversion/factory.go | 7 ++++--- .../apiserver/informers/internalversion/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../crd/informers/externalversions/example/v1/testtype.go | 3 ++- .../crd/informers/externalversions/example2/v1/testtype.go | 3 ++- .../_examples/crd/informers/externalversions/factory.go | 7 ++++--- .../_examples/crd/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../apiregistration/v1beta1/zz_generated.conversion.go | 3 ++- .../internalclientset/scheme/register.go | 3 ++- .../externalversions/apiregistration/v1beta1/apiservice.go | 3 ++- .../pkg/client/informers/externalversions/factory.go | 7 ++++--- .../pkg/client/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../apiregistration/internalversion/apiservice.go | 3 ++- .../pkg/client/informers/internalversion/factory.go | 7 ++++--- .../pkg/client/informers/internalversion/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../apis/custom_metrics/v1beta1/zz_generated.conversion.go | 3 ++- .../pkg/apis/metrics/v1alpha1/zz_generated.conversion.go | 3 ++- .../pkg/apis/metrics/v1beta1/zz_generated.conversion.go | 3 ++- .../pkg/apis/wardle/v1alpha1/zz_generated.conversion.go | 3 ++- .../client/clientset/internalversion/scheme/register.go | 3 ++- .../pkg/client/informers/externalversions/factory.go | 7 ++++--- .../pkg/client/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../informers/externalversions/wardle/v1alpha1/fischer.go | 3 ++- .../informers/externalversions/wardle/v1alpha1/flunder.go | 3 ++- .../pkg/client/informers/internalversion/factory.go | 7 ++++--- .../pkg/client/informers/internalversion/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../internalversion/wardle/internalversion/fischer.go | 3 ++- .../internalversion/wardle/internalversion/flunder.go | 3 ++- .../pkg/client/informers/externalversions/factory.go | 7 ++++--- .../pkg/client/informers/externalversions/generic.go | 1 + .../internalinterfaces/factory_interfaces.go | 3 ++- .../externalversions/samplecontroller/v1alpha1/foo.go | 3 ++- 208 files changed, 428 insertions(+), 220 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 0d7f685c3b8..79e487a0802 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeletconfig_v1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" kubeproxyconfig_v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/admission/v1beta1/zz_generated.conversion.go b/pkg/apis/admission/v1beta1/zz_generated.conversion.go index c0c245859cc..971a4849ab2 100644 --- a/pkg/apis/admission/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admission/v1beta1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/admission/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" admission "k8s.io/kubernetes/pkg/apis/admission" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go b/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go index 95f8559570a..49ece4122ff 100644 --- a/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go index ad65e647291..d130633cacb 100644 --- a/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/apps/v1/zz_generated.conversion.go b/pkg/apis/apps/v1/zz_generated.conversion.go index 4f1398c55f0..62aaaf390e5 100644 --- a/pkg/apis/apps/v1/zz_generated.conversion.go +++ b/pkg/apis/apps/v1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/apps/v1" core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,7 +32,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" apis_core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 95bdc87c29d..56639c17dce 100644 --- a/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +33,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/pkg/apis/apps/v1beta2/zz_generated.conversion.go index b253aede958..a6325356007 100644 --- a/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1beta2 import ( + unsafe "unsafe" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +33,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/authentication/v1/zz_generated.conversion.go b/pkg/apis/authentication/v1/zz_generated.conversion.go index d98ef4a6fea..43acd2c41ed 100644 --- a/pkg/apis/authentication/v1/zz_generated.conversion.go +++ b/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/authentication/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" authentication "k8s.io/kubernetes/pkg/apis/authentication" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/pkg/apis/authentication/v1beta1/zz_generated.conversion.go index c04aba53cbf..392ea76c011 100644 --- a/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/authentication/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" authentication "k8s.io/kubernetes/pkg/apis/authentication" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/authorization/v1/zz_generated.conversion.go b/pkg/apis/authorization/v1/zz_generated.conversion.go index 415a50ec84d..0cab2b3a2a9 100644 --- a/pkg/apis/authorization/v1/zz_generated.conversion.go +++ b/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/authorization/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" authorization "k8s.io/kubernetes/pkg/apis/authorization" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/pkg/apis/authorization/v1beta1/zz_generated.conversion.go index a6b08d93db5..c9c1bd5e5ea 100644 --- a/pkg/apis/authorization/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/authorization/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" authorization "k8s.io/kubernetes/pkg/apis/authorization" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/pkg/apis/autoscaling/v1/zz_generated.conversion.go index 6a1c65489f4..71fac630477 100644 --- a/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ b/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/autoscaling/v1" core_v1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" @@ -29,7 +31,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go b/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go index 04e256fda7b..2de9ae9b433 100644 --- a/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go +++ b/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v2beta1 import ( + unsafe "unsafe" + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" @@ -29,7 +31,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/batch/v1/zz_generated.conversion.go b/pkg/apis/batch/v1/zz_generated.conversion.go index e9e6f9e20e4..3852fcd9bbf 100644 --- a/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/batch/v1" core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +31,6 @@ import ( batch "k8s.io/kubernetes/pkg/apis/batch" core "k8s.io/kubernetes/pkg/apis/core" apis_core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/batch/v1beta1/zz_generated.conversion.go b/pkg/apis/batch/v1beta1/zz_generated.conversion.go index 677dad829cf..8399c047dc3 100644 --- a/pkg/apis/batch/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1beta1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/batch/v1beta1" core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +31,6 @@ import ( batch "k8s.io/kubernetes/pkg/apis/batch" batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/pkg/apis/batch/v2alpha1/zz_generated.conversion.go index 119b705ccc0..b5d03d747c5 100644 --- a/pkg/apis/batch/v2alpha1/zz_generated.conversion.go +++ b/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v2alpha1 import ( + unsafe "unsafe" + v2alpha1 "k8s.io/api/batch/v2alpha1" core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +31,6 @@ import ( batch "k8s.io/kubernetes/pkg/apis/batch" batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/pkg/apis/certificates/v1beta1/zz_generated.conversion.go index 8379477bf36..b1df67c2ec0 100644 --- a/pkg/apis/certificates/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/certificates/v1beta1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/certificates/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" certificates "k8s.io/kubernetes/pkg/apis/certificates" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 1b78cf1c2a3..96555ba9eeb 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 34fb3a5ad80..e8ddfac91fb 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,7 +30,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/events/v1beta1/zz_generated.conversion.go b/pkg/apis/events/v1beta1/zz_generated.conversion.go index c588a11747a..e73b2081338 100644 --- a/pkg/apis/events/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/events/v1beta1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" v1beta1 "k8s.io/api/events/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" core "k8s.io/kubernetes/pkg/apis/core" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 14a4f09993b..7053558193e 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" v1beta1 "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,7 +32,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go b/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go index b38027736d9..ad1a808f71c 100644 --- a/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/imagepolicy/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1alpha1 "k8s.io/api/imagepolicy/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" imagepolicy "k8s.io/kubernetes/pkg/apis/imagepolicy" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/networking/v1/zz_generated.conversion.go b/pkg/apis/networking/v1/zz_generated.conversion.go index 3a3b414b7a6..df0a508f2aa 100644 --- a/pkg/apis/networking/v1/zz_generated.conversion.go +++ b/pkg/apis/networking/v1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + core_v1 "k8s.io/api/core/v1" v1 "k8s.io/api/networking/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +31,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" core "k8s.io/kubernetes/pkg/apis/core" networking "k8s.io/kubernetes/pkg/apis/networking" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/pkg/apis/policy/v1beta1/zz_generated.conversion.go index 4e776634a2f..7c897efb1b6 100644 --- a/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" policy "k8s.io/kubernetes/pkg/apis/policy" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/rbac/v1/zz_generated.conversion.go b/pkg/apis/rbac/v1/zz_generated.conversion.go index e58d18435f4..39e0c380e62 100644 --- a/pkg/apis/rbac/v1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + v1 "k8s.io/api/rbac/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go index 7d84a6ce5a6..6bd6c4e76e6 100644 --- a/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/pkg/apis/rbac/v1beta1/zz_generated.conversion.go index 952a4e4069b..88803f86fb6 100644 --- a/pkg/apis/rbac/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go b/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go index 988c8679a34..e3a3602bca5 100644 --- a/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/scheduling/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" scheduling "k8s.io/kubernetes/pkg/apis/scheduling" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/pkg/apis/settings/v1alpha1/zz_generated.conversion.go index f55bd43926c..08ab0049f66 100644 --- a/pkg/apis/settings/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" v1alpha1 "k8s.io/api/settings/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" core "k8s.io/kubernetes/pkg/apis/core" settings "k8s.io/kubernetes/pkg/apis/settings" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/storage/v1/zz_generated.conversion.go b/pkg/apis/storage/v1/zz_generated.conversion.go index 19e33a11ea6..cc86dc53505 100644 --- a/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + core_v1 "k8s.io/api/core/v1" v1 "k8s.io/api/storage/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" core "k8s.io/kubernetes/pkg/apis/core" storage "k8s.io/kubernetes/pkg/apis/storage" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/storage/v1alpha1/zz_generated.conversion.go b/pkg/apis/storage/v1alpha1/zz_generated.conversion.go index 652cc2f82f6..d1a73f48ae1 100644 --- a/pkg/apis/storage/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1alpha1 "k8s.io/api/storage/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" storage "k8s.io/kubernetes/pkg/apis/storage" - unsafe "unsafe" ) func init() { diff --git a/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/pkg/apis/storage/v1beta1/zz_generated.conversion.go index 1338df136b5..5b72dda9f7a 100644 --- a/pkg/apis/storage/v1beta1/zz_generated.conversion.go +++ b/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" v1beta1 "k8s.io/api/storage/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" core "k8s.io/kubernetes/pkg/apis/core" storage "k8s.io/kubernetes/pkg/apis/storage" - unsafe "unsafe" ) func init() { diff --git a/pkg/client/clientset_generated/internalclientset/scheme/register.go b/pkg/client/clientset_generated/internalclientset/scheme/register.go index 3bbf1846c60..69fb9e80a90 100644 --- a/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -17,6 +17,8 @@ limitations under the License. package scheme import ( + os "os" + announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,7 +41,6 @@ import ( scheduling "k8s.io/kubernetes/pkg/apis/scheduling/install" settings "k8s.io/kubernetes/pkg/apis/settings/install" storage "k8s.io/kubernetes/pkg/apis/storage/install" - os "os" ) var Scheme = runtime.NewScheme() diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go index 68466616583..b71f19f8e35 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" - time "time" ) // InitializerConfigurationInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go index 075caa32531..cca3c29d54c 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" - time "time" ) // MutatingWebhookConfigurationInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go index f513b63487d..f523b07c4ed 100644 --- a/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ b/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" - time "time" ) // ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go index e03fe17f6e3..19b79bce962 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/apps/internalversion" - time "time" ) // ControllerRevisionInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go index 6e3bb29b802..a3db402fb53 100644 --- a/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go +++ b/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/apps/internalversion" - time "time" ) // StatefulSetInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go index 7232de82217..a604fd981a5 100644 --- a/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion" - time "time" ) // HorizontalPodAutoscalerInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go index 1f234cfb80c..f47bfb24c30 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/batch/internalversion" - time "time" ) // CronJobInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go index 023ca84faaf..3ec10ba6a16 100644 --- a/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go +++ b/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/batch/internalversion" - time "time" ) // JobInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go index 9ec66c83e01..703bd632bc1 100644 --- a/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go +++ b/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion" - time "time" ) // CertificateSigningRequestInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go index ec94828e3b7..f61b6a082bf 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ComponentStatusInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go index 1681a23540a..2f1e3837b64 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ConfigMapInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go index 20a9319bf00..fa400679836 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // EndpointsInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go index 0a3d085ac29..87a3288956b 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // EventInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go index 37181403c94..843754a3744 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // LimitRangeInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go index 8d321e22264..668c5186210 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // NamespaceInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go index 55ea586661e..fddcf541b5c 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // NodeInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go index 90c539dc293..64a6014480d 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // PersistentVolumeInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go index 57ceee071e3..3d4092a37cf 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // PersistentVolumeClaimInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go index 53029da5d25..ec324fef2d7 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // PodInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go index d7b3551d550..69e51de17d9 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // PodTemplateInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go index defb354ec7a..d6bae5f9107 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ReplicationControllerInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go index ea1f1fa026d..c43cbdc6db9 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ResourceQuotaInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go index 41bb46aaa92..3e306f79b2e 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // SecretInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go index 55bc3328d3b..be412ab982b 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ServiceInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go b/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go index aff62b4bc5c..a3247a566e1 100644 --- a/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go +++ b/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - time "time" ) // ServiceAccountInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go index ac77b8d34be..7a6ad9f2570 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" ) // DaemonSetInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go index 36b8eda664a..c9d09f0a3c9 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" ) // DeploymentInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go index 41ca2e32821..5f12d307f5e 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" ) // IngressInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go index dd4254f7337..02248fda6fe 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" ) // PodSecurityPolicyInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go index 5ec97fc57d1..fa51fc575cb 100644 --- a/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go +++ b/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" ) // ReplicaSetInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/factory.go b/pkg/client/informers/informers_generated/internalversion/factory.go index 8d006ad7d8a..4b8394e9b15 100644 --- a/pkg/client/informers/informers_generated/internalversion/factory.go +++ b/pkg/client/informers/informers_generated/internalversion/factory.go @@ -19,6 +19,10 @@ limitations under the License. package internalversion import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -38,9 +42,6 @@ import ( scheduling "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling" settings "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings" storage "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/pkg/client/informers/informers_generated/internalversion/generic.go b/pkg/client/informers/informers_generated/internalversion/generic.go index ee1c51ce8ec..31370e7861c 100644 --- a/pkg/client/informers/informers_generated/internalversion/generic.go +++ b/pkg/client/informers/informers_generated/internalversion/generic.go @@ -20,6 +20,7 @@ package internalversion import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" diff --git a/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go b/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go index 15fe3e9964a..bf129d9217d 100644 --- a/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - time "time" ) type NewInformerFunc func(internalclientset.Interface, time.Duration) cache.SharedIndexInformer diff --git a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go index 2e9a688a79b..5220eabd7ef 100644 --- a/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go +++ b/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/networking/internalversion" - time "time" ) // NetworkPolicyInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go index 01c5b7be820..db2fe0410a6 100644 --- a/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go +++ b/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/policy/internalversion" - time "time" ) // PodDisruptionBudgetInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go index 42b69e1377a..2d7e68d9520 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" - time "time" ) // ClusterRoleInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go index 7ba4297f2a7..11f05913eaf 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" - time "time" ) // ClusterRoleBindingInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go index f09a5a230be..687bf1dc4aa 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" - time "time" ) // RoleInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go index 5382f2d130e..7b7163a1082 100644 --- a/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go +++ b/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" - time "time" ) // RoleBindingInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go index addf24cd4aa..40b1437ba51 100644 --- a/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go +++ b/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion" - time "time" ) // PriorityClassInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go index 93a1dbe2bd8..b4a0f89b9f2 100644 --- a/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go +++ b/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/settings/internalversion" - time "time" ) // PodPresetInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go index 6c0d43ea527..dbcb0c74f78 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/storage/internalversion" - time "time" ) // StorageClassInformer provides access to a shared informer and lister for diff --git a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go index 62a4bca5993..fc8e809742f 100644 --- a/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go +++ b/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/storage/internalversion" - time "time" ) // VolumeAttachmentInformer provides access to a shared informer and lister for diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go index fd5d707b072..7dec76e6c8f 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" - unsafe "unsafe" ) func init() { diff --git a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go index d91bac70ab2..ae2742d277c 100644 --- a/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" - unsafe "unsafe" ) func init() { diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go index 4f06087d785..d425411e1cc 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" eventratelimit "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" - unsafe "unsafe" ) func init() { diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go index 7dfc7b0d993..36414a7425c 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go @@ -21,12 +21,13 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" core "k8s.io/kubernetes/pkg/apis/core" podtolerationrestriction "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" - unsafe "unsafe" ) func init() { diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go index 5406a11f501..d26f8028166 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 5b045113828..f7d46f6e502 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go index 2f49e2e3c82..388104e0048 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/register.go @@ -17,6 +17,8 @@ limitations under the License. package scheme import ( + os "os" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install" announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" @@ -24,7 +26,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - os "os" ) var Scheme = runtime.NewScheme() diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go index 32bc315c5b9..a0bffb88f4b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + apiextensions_v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" @@ -27,7 +29,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - time "time" ) // CustomResourceDefinitionInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go index ff396648168..e39d41c13f3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" @@ -26,9 +30,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go index 4d741b800d2..4485a199b29 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 810ac9d4381..14f84b3063d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" - time "time" ) type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go index f82a1c757c8..f814ba73d2e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces" @@ -27,7 +29,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - time "time" ) // CustomResourceDefinitionInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go index 2cc9ffbbba8..dffd521ac66 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go @@ -19,6 +19,10 @@ limitations under the License. package internalversion import ( + reflect "reflect" + sync "sync" + time "time" + internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces" @@ -26,9 +30,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go index 77f4303a7b7..78d66fae20f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go @@ -20,6 +20,7 @@ package internalversion import ( "fmt" + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index 12acf883077..71a1acfc527 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" - time "time" ) type NewInformerFunc func(internalclientset.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go index 461e960eaab..67f5f803e67 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" testapigroup "k8s.io/apimachinery/pkg/apis/testapigroup" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go index b3b27c31c14..38aaf458349 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + authentication_v1 "k8s.io/api/authentication/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" audit "k8s.io/apiserver/pkg/apis/audit" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go index c29f61e743b..3bb79c7b7ae 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + authentication_v1 "k8s.io/api/authentication/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" audit "k8s.io/apiserver/pkg/apis/audit" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go index e1bfe92b2c5..848690ea1a8 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" example "k8s.io/apiserver/pkg/apis/example" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go index aa73b27a8e0..96b410d7693 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + admissionregistration_v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // InitializerConfigurationInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 518403e998a..d22e03759e5 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // MutatingWebhookConfigurationInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 35f15a24f26..0520d74c32b 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go index 97c5595dfa6..234aa90e1bf 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + apps_v1 "k8s.io/api/apps/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ControllerRevisionInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go index 7ba662d42fe..066b44a3024 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + apps_v1 "k8s.io/api/apps/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // DaemonSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go index 07396bb974e..209cbf4028a 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + apps_v1 "k8s.io/api/apps/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // DeploymentInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go index 089fff08494..c7c9d8940f1 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + apps_v1 "k8s.io/api/apps/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ReplicaSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go index 07c384512de..f421da82b4f 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + apps_v1 "k8s.io/api/apps/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // StatefulSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index da0b32509c2..04ef7201faa 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + apps_v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ControllerRevisionInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go index 48cbf23e58e..b5735542ea4 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + apps_v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // DeploymentInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index ba9ba5c025a..f19ddc8acf6 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + apps_v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // StatefulSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index 8649636b765..15b70e4e4a1 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + time "time" + apps_v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" - time "time" ) // ControllerRevisionInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 87f03a12d64..8b9d1584f70 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + time "time" + apps_v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" - time "time" ) // DaemonSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go index cfecc891c7c..3f0688c4899 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + time "time" + apps_v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" - time "time" ) // DeploymentInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index f460c206f97..5a82ecdb002 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + time "time" + apps_v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" - time "time" ) // ReplicaSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index 6be9bbb2351..7cc1dd5bf40 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + time "time" + apps_v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" - time "time" ) // StatefulSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index cf9c7cdc340..9d0a429e3fb 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + autoscaling_v1 "k8s.io/api/autoscaling/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/autoscaling/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // HorizontalPodAutoscalerInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 73bf5a12a75..98bfbde6208 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + time "time" + autoscaling_v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // HorizontalPodAutoscalerInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/batch/v1/job.go b/staging/src/k8s.io/client-go/informers/batch/v1/job.go index 1ab68fef445..ad3b2c6b7f6 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1/job.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + batch_v1 "k8s.io/api/batch/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/batch/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // JobInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 03a4e1598fe..933930fd943 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + batch_v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/batch/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // CronJobInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go index c0ea43a5f06..94e47e18013 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v2alpha1 import ( + time "time" + batch_v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v2alpha1 "k8s.io/client-go/listers/batch/v2alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // CronJobInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 6d356371e61..3fd95ed837b 100644 --- a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + certificates_v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/certificates/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // CertificateSigningRequestInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go index d03cf2f801f..5c1b4b1724c 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ComponentStatusInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go index 6a903a45f18..db58f2ab3b9 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ConfigMapInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go index a6e36051e2f..a184e558699 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // EndpointsInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/event.go b/staging/src/k8s.io/client-go/informers/core/v1/event.go index 7c33925303a..02712adc54d 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/event.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // EventInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go index fe938ec2d1c..82b21ba1b53 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // LimitRangeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go index da2af6206e3..ea36024bcd0 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // NamespaceInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/node.go b/staging/src/k8s.io/client-go/informers/core/v1/node.go index d9df80bcbb7..66ae4548a83 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/node.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/node.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // NodeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go index 13c154dff7d..df8a09d395d 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PersistentVolumeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 8017ce1dca1..2fbef8a6dff 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PersistentVolumeClaimInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/pod.go b/staging/src/k8s.io/client-go/informers/core/v1/pod.go index 1d1c6fc0f46..b70999bb7a2 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/pod.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PodInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go index f7151e30f5b..4e2fde7348a 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PodTemplateInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go index 68127512ad2..9c0bac1c70e 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ReplicationControllerInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go index d80660022b6..c1f593c2f98 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ResourceQuotaInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/secret.go b/staging/src/k8s.io/client-go/informers/core/v1/secret.go index 69874bc4cdb..c45f1c738ba 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/secret.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // SecretInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/service.go b/staging/src/k8s.io/client-go/informers/core/v1/service.go index 082b5925638..f4cd7091fe1 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/service.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/service.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ServiceInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go index b5cd59b4f1f..99729262529 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ServiceAccountInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go index fb44ff8c548..223e0e3dd6f 100644 --- a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + events_v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/events/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // EventInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index 911f51edf5c..4017b8d3e84 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // DaemonSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index e37ec7eff2a..01794d7c72d 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // DeploymentInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index 7ba79228aed..d5c9f167152 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // IngressInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go index 52126d57774..7cf8d5bbffa 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PodSecurityPolicyInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index 23e50435f2e..b826a99c303 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ReplicaSetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/factory.go b/staging/src/k8s.io/client-go/informers/factory.go index 642e86cc0b0..4a9296d07fa 100644 --- a/staging/src/k8s.io/client-go/informers/factory.go +++ b/staging/src/k8s.io/client-go/informers/factory.go @@ -19,6 +19,10 @@ limitations under the License. package informers import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,9 +43,6 @@ import ( storage "k8s.io/client-go/informers/storage" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index c1d01bec2f5..9fd51d78c3c 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -20,6 +20,7 @@ package informers import ( "fmt" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/api/apps/v1" diff --git a/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index 8410cca2caf..ce6e8994462 100644 --- a/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" - time "time" ) type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go index d3233c265b1..7d091cbb996 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + networking_v1 "k8s.io/api/networking/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // NetworkPolicyInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index 935c1b63efa..aefdc7681ea 100644 --- a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + policy_v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/policy/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PodDisruptionBudgetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go index 9c747345343..0f83c32ce9f 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + rbac_v1 "k8s.io/api/rbac/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 6d0c1a68ca2..f4bbe989897 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + rbac_v1 "k8s.io/api/rbac/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go index 8ed43fb750f..3b50e182125 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + rbac_v1 "k8s.io/api/rbac/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go index 3bc54f08732..32af822f6fb 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + rbac_v1 "k8s.io/api/rbac/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 2fa374454a9..b64bc8d43b3 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 2fa4e2d7ffb..ad8fe6007d4 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go index d4730a00405..56f19a03dff 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index abfae250887..22318e98294 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index 16e597d04dd..21fb3b6d956 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index 4fee022cd86..3c78ac34c70 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // ClusterRoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go index 39c37568ba7..3bd67d84bf4 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index 8ff036b0948..5180c1cffb7 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // RoleBindingInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index 2a82206ea81..0aeee10a3c0 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PriorityClassInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go index b87f0e6138e..7e8140585c5 100644 --- a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + settings_v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/settings/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // PodPresetInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go index c6338fe67cd..f356b590201 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + storage_v1 "k8s.io/api/storage/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" - time "time" ) // StorageClassInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index 413dc4ed008..6b5eeb3c542 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" - time "time" ) // VolumeAttachmentInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index 0a17418fb52..af6641afb3c 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + storage_v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -27,7 +29,6 @@ import ( kubernetes "k8s.io/client-go/kubernetes" v1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" - time "time" ) // StorageClassInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go index 85dd5b954e6..f430e1b32e5 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" example "k8s.io/code-generator/_examples/apiserver/apis/example" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go index 738d2d9bc84..432b17d446b 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" example2 "k8s.io/code-generator/_examples/apiserver/apis/example2" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go index 5ee4dddba20..07a154bcc83 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go @@ -17,6 +17,8 @@ limitations under the License. package scheme import ( + os "os" + announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +27,6 @@ import ( serializer "k8s.io/apimachinery/pkg/runtime/serializer" example "k8s.io/code-generator/_examples/apiserver/apis/example/install" secondexample "k8s.io/code-generator/_examples/apiserver/apis/example2/install" - os "os" ) var Scheme = runtime.NewScheme() diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go index 0beb10fa72c..8faaeb4d7e5 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" v1 "k8s.io/code-generator/_examples/apiserver/listers/example/v1" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go index c07397c98ab..a1574521c08 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" v1 "k8s.io/code-generator/_examples/apiserver/listers/example2/v1" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go index 6b77479ee51..4575655e1f2 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,9 +31,6 @@ import ( example "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example" example2 "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go index 6fd29b0b41f..4a22dcdfd6d 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" v1 "k8s.io/code-generator/_examples/apiserver/apis/example/v1" diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go index 31df120f6a6..d659b6f64d1 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" - time "time" ) type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go index 5c1c1fe8dc7..56619360b0c 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( clientset_internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" internalversion "k8s.io/code-generator/_examples/apiserver/listers/example/internalversion" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go index 2c00c3d8269..0024c91cd04 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( clientset_internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" internalversion "k8s.io/code-generator/_examples/apiserver/listers/example2/internalversion" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go index fee4e7910b3..69fd861aeed 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go @@ -19,6 +19,10 @@ limitations under the License. package internalversion import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,9 +31,6 @@ import ( example "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example" example2 "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go index bddd60f7b72..3486e0cd36a 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go @@ -20,6 +20,7 @@ package internalversion import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" example "k8s.io/code-generator/_examples/apiserver/apis/example" diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go index 973d73d1de3..1c770ab0cce 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" - time "time" ) type NewInformerFunc func(internalversion.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go index 7fe59ce3e3b..b5e307151db 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/code-generator/_examples/crd/clientset/versioned" internalinterfaces "k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces" v1 "k8s.io/code-generator/_examples/crd/listers/example/v1" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go index 303b3473642..b42802ffd27 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + time "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/code-generator/_examples/crd/clientset/versioned" internalinterfaces "k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces" v1 "k8s.io/code-generator/_examples/crd/listers/example2/v1" - time "time" ) // TestTypeInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go index 2d0b35f9694..7a1c864f223 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,9 +31,6 @@ import ( example "k8s.io/code-generator/_examples/crd/informers/externalversions/example" example2 "k8s.io/code-generator/_examples/crd/informers/externalversions/example2" internalinterfaces "k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go index 25acc5078e1..f9459e9bced 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" v1 "k8s.io/code-generator/_examples/crd/apis/example/v1" diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go index b8da4f9fc75..d784ac1d208 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" versioned "k8s.io/code-generator/_examples/crd/clientset/versioned" - time "time" ) type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go index 27385cbb226..51d62b2b877 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go index 37cf87b0984..5406c9f6b72 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -17,6 +17,8 @@ limitations under the License. package scheme import ( + os "os" + announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +26,6 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration/install" - os "os" ) var Scheme = runtime.NewScheme() diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go index a73586143d8..e90c59384d8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1" - time "time" ) // APIServiceInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go index 321a5aea285..581c9fff97b 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,9 +30,6 @@ import ( clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" apiregistration "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration" internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go index 2236d78af9a..f8ff52b1f9d 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 8becb25b4e9..5b5774a2a7c 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - time "time" ) type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go index 6987342a674..21bcb0a30c7 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( internalclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces" internalversion "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion" - time "time" ) // APIServiceInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go index 6a524bfc76e..cc051e9d1bb 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go @@ -19,6 +19,10 @@ limitations under the License. package internalversion import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,9 +30,6 @@ import ( internalclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset" apiregistration "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration" internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go index 4ef2786d3c9..a81c0be9e10 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go @@ -20,6 +20,7 @@ package internalversion import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration" diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index 244ae5c9df1..e3621d938db 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset" - time "time" ) type NewInformerFunc func(internalclientset.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go index e169bb0d739..5a573b10e12 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" custom_metrics "k8s.io/metrics/pkg/apis/custom_metrics" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go index cb04bee119e..6fcee2f6c78 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" metrics "k8s.io/metrics/pkg/apis/metrics" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go index f6a1c47e773..65f23a787ab 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go @@ -21,11 +21,12 @@ limitations under the License. package v1beta1 import ( + unsafe "unsafe" + v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" metrics "k8s.io/metrics/pkg/apis/metrics" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go index ad1f0d83c4b..8d14fb0a5ef 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/zz_generated.conversion.go @@ -21,10 +21,11 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" wardle "k8s.io/sample-apiserver/pkg/apis/wardle" - unsafe "unsafe" ) func init() { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go index a0fc576c663..007c94ceb5b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme/register.go @@ -17,6 +17,8 @@ limitations under the License. package scheme import ( + os "os" + announced "k8s.io/apimachinery/pkg/apimachinery/announced" registered "k8s.io/apimachinery/pkg/apimachinery/registered" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +26,6 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" wardle "k8s.io/sample-apiserver/pkg/apis/wardle/install" - os "os" ) var Scheme = runtime.NewScheme() diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go index 498c864a190..eca47a09069 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,9 +30,6 @@ import ( versioned "k8s.io/sample-apiserver/pkg/client/clientset/versioned" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces" wardle "k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go index 2ac41c341a2..92551a625c4 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" v1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 23cb3ff63ab..3b71c410a05 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" versioned "k8s.io/sample-apiserver/pkg/client/clientset/versioned" - time "time" ) type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go index dfefb18466b..3ea3ccc6eb0 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/fischer.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/sample-apiserver/pkg/client/clientset/versioned" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1" - time "time" ) // FischerInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go index df0cbd92946..7ce9dc9ff86 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/wardle/v1alpha1/flunder.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/sample-apiserver/pkg/client/clientset/versioned" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "k8s.io/sample-apiserver/pkg/client/listers/wardle/v1alpha1" - time "time" ) // FlunderInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go index 4fabf593a57..378b324f7c8 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/factory.go @@ -19,6 +19,10 @@ limitations under the License. package internalversion import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,9 +30,6 @@ import ( internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" wardle "k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go index 18c673087b4..7ec4041dc19 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/generic.go @@ -20,6 +20,7 @@ package internalversion import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" wardle "k8s.io/sample-apiserver/pkg/apis/wardle" diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index d6dc4ab7964..e7e568824b9 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" - time "time" ) type NewInformerFunc func(internalversion.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go index 8c2b54daf60..89d998f8486 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/fischer.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( clientset_internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" internalversion "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion" - time "time" ) // FischerInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go index b9019e3e107..c32019b48b7 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/wardle/internalversion/flunder.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( clientset_internalversion "k8s.io/sample-apiserver/pkg/client/clientset/internalversion" internalinterfaces "k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces" internalversion "k8s.io/sample-apiserver/pkg/client/listers/wardle/internalversion" - time "time" ) // FlunderInformer provides access to a shared informer and lister for diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go index f85d3eece2f..b65e22edec2 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/factory.go @@ -19,6 +19,10 @@ limitations under the License. package externalversions import ( + reflect "reflect" + sync "sync" + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -26,9 +30,6 @@ import ( versioned "k8s.io/sample-controller/pkg/client/clientset/versioned" internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces" samplecontroller "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller" - reflect "reflect" - sync "sync" - time "time" ) type sharedInformerFactory struct { diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go index 68728f478a3..fccb9a20420 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/generic.go @@ -20,6 +20,7 @@ package externalversions import ( "fmt" + schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1" diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 230e22f352b..fc89f84f25f 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -19,11 +19,12 @@ limitations under the License. package internalinterfaces import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" versioned "k8s.io/sample-controller/pkg/client/clientset/versioned" - time "time" ) type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer diff --git a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go index 35068e2c63a..952ae604a15 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1/foo.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + time "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -27,7 +29,6 @@ import ( versioned "k8s.io/sample-controller/pkg/client/clientset/versioned" internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces" v1alpha1 "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1" - time "time" ) // FooInformer provides access to a shared informer and lister for From 1ac4be5841021faf14a17ead0173f4b5bae4b621 Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Wed, 3 Jan 2018 15:14:20 +0800 Subject: [PATCH 565/794] Remove exists return value from getVirtualMachine --- .../providers/azure/azure_backoff.go | 7 +++---- .../providers/azure/azure_controllerCommon.go | 21 +++++++------------ .../providers/azure/azure_util.go | 17 ++++----------- .../providers/azure/azure_wrap.go | 17 +++++++-------- 4 files changed, 23 insertions(+), 39 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 0494201b6b2..7fda4cc78ae 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -42,12 +42,11 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) { } // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry -func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, bool, error) { +func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { var machine compute.VirtualMachine - var exists bool err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - machine, exists, retryErr = az.getVirtualMachine(name) + machine, retryErr = az.getVirtualMachine(name) if retryErr != nil { glog.Errorf("backoff: failure, will retry,err=%v", retryErr) return false, nil @@ -55,7 +54,7 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua glog.V(2).Infof("backoff: success") return true, nil }) - return machine, exists, err + return machine, err } // VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index fdb78e2af7b..0c4e22d4f96 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -71,12 +71,11 @@ type controllerCommon struct { // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, exists, err := c.cloud.getVirtualMachine(nodeName) + vm, err := c.cloud.getVirtualMachine(nodeName) if err != nil { return err - } else if !exists { - return cloudprovider.InstanceNotFound } + disks := *vm.StorageProfile.DataDisks if isManagedDisk { disks = append(disks, @@ -143,8 +142,8 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri // DetachDiskByName detaches a vhd from host // the vhd can be identified by diskName or diskURI func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - vm, exists, err := c.cloud.getVirtualMachine(nodeName) - if err != nil || !exists { + vm, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { // if host doesn't exist, no need to detach glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) return nil @@ -202,11 +201,9 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t // GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { - vm, exists, err := c.cloud.getVirtualMachine(nodeName) + vm, err := c.cloud.getVirtualMachine(nodeName) if err != nil { return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound } disks := *vm.StorageProfile.DataDisks for _, disk := range disks { @@ -224,11 +221,9 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N // GetNextDiskLun searches all vhd attachment on the host and find unused lun // return -1 if all luns are used func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - vm, exists, err := c.cloud.getVirtualMachine(nodeName) + vm, err := c.cloud.getVirtualMachine(nodeName) if err != nil { return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound } used := make([]bool, maxLUN) disks := *vm.StorageProfile.DataDisks @@ -251,8 +246,8 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N for _, diskName := range diskNames { attached[diskName] = false } - vm, exists, err := c.cloud.getVirtualMachine(nodeName) - if !exists { + vm, err := c.cloud.getVirtualMachine(nodeName) + if err == cloudprovider.InstanceNotFound { // if host doesn't exist, no need to detach glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", nodeName, diskNames) diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 77996a0e5b5..d0b062846d6 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -387,15 +387,14 @@ func newAvailabilitySet(az *Cloud) VMSet { // not exist or is no longer running. func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) { var machine compute.VirtualMachine - var exists bool var err error as.operationPollRateLimiter.Accept() - machine, exists, err = as.getVirtualMachine(types.NodeName(name)) + machine, err = as.getVirtualMachine(types.NodeName(name)) if err != nil { if as.CloudProviderBackoff { glog.V(2).Infof("InstanceID(%s) backing off", name) - machine, exists, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) + machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) if err != nil { glog.V(2).Infof("InstanceID(%s) abort backoff", name) return "", err @@ -403,8 +402,6 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) } else { return "", err } - } else if !exists { - return "", cloudprovider.InstanceNotFound } return *machine.ID, nil } @@ -422,12 +419,10 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod // GetInstanceTypeByNodeName gets the instance type by node name. func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { - machine, exists, err := as.getVirtualMachine(types.NodeName(name)) + machine, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err) return "", err - } else if !exists { - return "", cloudprovider.InstanceNotFound } return string(machine.HardwareProfile.VMSize), nil @@ -435,15 +430,11 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error // GetZoneByNodeName gets zone from instance view. func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - vm, exists, err := as.getVirtualMachine(types.NodeName(name)) + vm, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { return cloudprovider.Zone{}, err } - if !exists { - return cloudprovider.Zone{}, cloudprovider.InstanceNotFound - } - failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain)) zone := cloudprovider.Zone{ FailureDomain: failureDomain, diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 9d66293ae6b..85b67c456b0 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -27,6 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/cloudprovider" ) // checkExistsFromError inspects an error and returns a true if err is nil, @@ -71,9 +72,7 @@ type vmRequest struct { /// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache /// The service side has throttling control that delays responses if there're multiple requests onto certain vm /// resource request in short period. -func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) { - var realErr error - +func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) { vmName := string(nodeName) cachedRequest, err := vmCache.GetOrCreate(vmName, func() interface{} { @@ -83,7 +82,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM } }) if err != nil { - return compute.VirtualMachine{}, false, err + return compute.VirtualMachine{}, err } request := cachedRequest.(*vmRequest) @@ -102,22 +101,22 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, compute.InstanceView) glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName) - exists, realErr = checkResourceExistsFromError(err) + exists, realErr := checkResourceExistsFromError(err) if realErr != nil { - return vm, false, realErr + return vm, realErr } if !exists { - return vm, false, nil + return vm, cloudprovider.InstanceNotFound } request.vm = &vm } - return vm, exists, err + return vm, nil } glog.V(6).Infof("getVirtualMachine hits cache for(%s)", vmName) - return *request.vm, true, nil + return *request.vm, nil } func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { From a8127df3bb396717b4fb2a7f688c1f98e6bef6b4 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 20 Dec 2017 12:17:44 +0100 Subject: [PATCH 566/794] Simplify extra initializer logic --- .../src/k8s.io/apiserver/pkg/server/config.go | 5 -- .../apiserver/pkg/server/options/admission.go | 10 ++++ .../pkg/server/options/recommended.go | 57 ++++++------------- .../sample-apiserver/pkg/cmd/server/start.go | 7 +-- 4 files changed, 30 insertions(+), 49 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index cd98717d2c0..877071ad3b2 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -188,13 +188,9 @@ type Config struct { PublicAddress net.IP } -type AdmissionInitializersInitFunc func() (admission.PluginInitializer, error) - type RecommendedConfig struct { Config - ExtraAdmissionInitializersInitFunc []AdmissionInitializersInitFunc - // SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config // by default, or the kubeconfig given with kubeconfig command line flag. @@ -263,7 +259,6 @@ func NewConfig(codecs serializer.CodecFactory) *Config { func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { return &RecommendedConfig{ Config: *NewConfig(codecs), - ExtraAdmissionInitializersInitFunc: make([]AdmissionInitializersInitFunc, 0), } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 30716869146..2565c0a69cf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -104,6 +104,16 @@ func (a *AdmissionOptions) ApplyTo( return nil } + // Admission need scheme to construct admission initializer. + if scheme == nil { + return fmt.Errorf("admission depends on a scheme, it cannot be nil") + } + + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if informers == nil { + return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") + } + pluginNames := a.PluginNames if len(a.PluginNames) == 0 { pluginNames = a.enabledPluginNames() diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index eff7cde33d3..829647a2490 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "fmt" - "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" @@ -38,19 +36,24 @@ type RecommendedOptions struct { Audit *AuditOptions Features *FeatureOptions CoreAPI *CoreAPIOptions - Admission *AdmissionOptions + + // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned + // admission plugin initializers to Admission.ApplyTo. + ExtraAdmissionInitializers func() ([]admission.PluginInitializer, error) + Admission *AdmissionOptions } func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { return &RecommendedOptions{ - Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), - SecureServing: NewSecureServingOptions(), - Authentication: NewDelegatingAuthenticationOptions(), - Authorization: NewDelegatingAuthorizationOptions(), - Audit: NewAuditOptions(), - Features: NewFeatureOptions(), - CoreAPI: NewCoreAPIOptions(), - Admission: NewAdmissionOptions(), + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), + SecureServing: NewSecureServingOptions(), + Authentication: NewDelegatingAuthenticationOptions(), + Authorization: NewDelegatingAuthorizationOptions(), + Audit: NewAuditOptions(), + Features: NewFeatureOptions(), + CoreAPI: NewCoreAPIOptions(), + ExtraAdmissionInitializers: func() ([]admission.PluginInitializer, error) { return nil, nil }, + Admission: NewAdmissionOptions(), } } @@ -90,34 +93,10 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if o.Admission != nil { - // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. - if o.CoreAPI == nil { - return fmt.Errorf("admission depends on CoreAPI, so it must be set") - } - // Admission need scheme to construct admission initializer. - if scheme == nil { - return fmt.Errorf("admission depends on shceme, so it must be set") - } - - pluginInitializers := []admission.PluginInitializer{} - for _, initFunc := range config.ExtraAdmissionInitializersInitFunc { - intializer, err := initFunc() - if err != nil { - return err - } - pluginInitializers = append(pluginInitializers, intializer) - } - - err := o.Admission.ApplyTo( - &config.Config, - config.SharedInformerFactory, - config.ClientConfig, - scheme, - pluginInitializers...) - if err != nil { - return err - } + if initializers, err := o.ExtraAdmissionInitializers(); err != nil { + return err + } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, scheme, initializers...); err != nil { + return err } return nil diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index b375d46e609..e38eb749359 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -104,18 +104,15 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - admissionInitializerInitFunc := func() (admission.PluginInitializer, error) { + o.RecommendedOptions.ExtraAdmissionInitializers = func() ([]admission.PluginInitializer, error) { client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) if err != nil { return nil, err } informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) o.SharedInformerFactory = informerFactory - return wardleinitializer.New(informerFactory), nil + return []admission.PluginInitializer{wardleinitializer.New(informerFactory)}, nil } - - serverConfig.ExtraAdmissionInitializersInitFunc = []genericapiserver.AdmissionInitializersInitFunc{admissionInitializerInitFunc} - if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 5a3cfd27ed818b971f36032d85e2de2db586a4e5 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 2 Jan 2018 09:32:04 +0100 Subject: [PATCH 567/794] Pass RecommendedConfig into ExtraAdmissionInitializers --- .../k8s.io/apiserver/pkg/server/options/recommended.go | 6 +++--- .../k8s.io/sample-apiserver/pkg/cmd/server/start.go | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 829647a2490..148bfbdce5a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -39,7 +39,7 @@ type RecommendedOptions struct { // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned // admission plugin initializers to Admission.ApplyTo. - ExtraAdmissionInitializers func() ([]admission.PluginInitializer, error) + ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) Admission *AdmissionOptions } @@ -52,7 +52,7 @@ func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptio Audit: NewAuditOptions(), Features: NewFeatureOptions(), CoreAPI: NewCoreAPIOptions(), - ExtraAdmissionInitializers: func() ([]admission.PluginInitializer, error) { return nil, nil }, + ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, Admission: NewAdmissionOptions(), } } @@ -93,7 +93,7 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig, scheme *r if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if initializers, err := o.ExtraAdmissionInitializers(); err != nil { + if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { return err } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, scheme, initializers...); err != nil { return err diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index e38eb749359..0a6188305a1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -102,17 +102,17 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } - serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - - o.RecommendedOptions.ExtraAdmissionInitializers = func() ([]admission.PluginInitializer, error) { - client, err := clientset.NewForConfig(serverConfig.LoopbackClientConfig) + o.RecommendedOptions.ExtraAdmissionInitializers = func(c *genericapiserver.RecommendedConfig) ([]admission.PluginInitializer, error) { + client, err := clientset.NewForConfig(c.LoopbackClientConfig) if err != nil { return nil, err } - informerFactory := informers.NewSharedInformerFactory(client, serverConfig.LoopbackClientConfig.Timeout) + informerFactory := informers.NewSharedInformerFactory(client, c.LoopbackClientConfig.Timeout) o.SharedInformerFactory = informerFactory return []admission.PluginInitializer{wardleinitializer.New(informerFactory)}, nil } + + serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) if err := o.RecommendedOptions.ApplyTo(serverConfig, apiserver.Scheme); err != nil { return nil, err } From 71d3cffd1fb4896bd65b3896d8e3a22f956f58f0 Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Wed, 3 Jan 2018 15:16:16 +0800 Subject: [PATCH 568/794] Remove VirtualMachineClientGetWithRetry --- .../providers/azure/azure_backoff.go | 17 ----------------- pkg/cloudprovider/providers/azure/azure_util.go | 4 +--- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 7fda4cc78ae..5eb622950eb 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -57,23 +57,6 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return machine, err } -// VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry -func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) { - var machine compute.VirtualMachine - err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - var retryErr error - az.operationPollRateLimiter.Accept() - machine, retryErr = az.VirtualMachinesClient.Get(resourceGroup, vmName, types) - if retryErr != nil { - glog.Errorf("backoff: failure, will retry,err=%v", retryErr) - return false, nil - } - glog.V(2).Infof("backoff: success") - return true, nil - }) - return machine, err -} - // VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) { allNodes := []compute.VirtualMachine{} diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index d0b062846d6..c7fcef10ebd 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -564,13 +564,11 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw var machine compute.VirtualMachine as.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", nodeName) - machine, err := as.VirtualMachineClientGetWithRetry(as.ResourceGroup, nodeName, "") + machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) if err != nil { glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) return network.Interface{}, err } - glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", nodeName) primaryNicID, err := getPrimaryInterfaceID(machine) if err != nil { From eb35cee9f8ef4aa782746ba3b62fb3b44135f5cf Mon Sep 17 00:00:00 2001 From: jolestar Date: Wed, 16 Aug 2017 16:40:32 +0800 Subject: [PATCH 569/794] Remove unused command waitfordetach from flex volume driver --- pkg/volume/flexvolume/detacher.go | 13 ------------- pkg/volume/flexvolume/driver-call.go | 1 - 2 files changed, 14 deletions(-) diff --git a/pkg/volume/flexvolume/detacher.go b/pkg/volume/flexvolume/detacher.go index d2aba85b93f..c55ffbfa473 100644 --- a/pkg/volume/flexvolume/detacher.go +++ b/pkg/volume/flexvolume/detacher.go @@ -19,7 +19,6 @@ package flexvolume import ( "fmt" "os" - "time" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" @@ -47,18 +46,6 @@ func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) return err } -// WaitForDetach is part of the volume.Detacher interface. -func (d *flexVolumeDetacher) WaitForDetach(devicePath string, timeout time.Duration) error { - call := d.plugin.NewDriverCallWithTimeout(waitForDetachCmd, timeout) - call.Append(devicePath) - - _, err := call.Run() - if isCmdNotSupportedErr(err) { - return (*detacherDefaults)(d).WaitForDetach(devicePath, timeout) - } - return err -} - // UnmountDevice is part of the volume.Detacher interface. func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error { diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index 98e5640224e..5b089df07a6 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -39,7 +39,6 @@ const ( mountDeviceCmd = "mountdevice" detachCmd = "detach" - waitForDetachCmd = "waitfordetach" unmountDeviceCmd = "unmountdevice" mountCmd = "mount" From 6cf819165f6cb00faf06b9627c48d9de5d3791df Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Wed, 3 Jan 2018 16:58:12 +0800 Subject: [PATCH 570/794] Double check before setKubeletConfiguration --- test/e2e_node/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 0d00f88944e..bc08f03d27f 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -107,6 +107,10 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini framework.ExpectNoError(err) newCfg := oldCfg.DeepCopy() updateFunction(newCfg) + if reflect.DeepEqual(*newCfg, *oldCfg) { + return + } + framework.ExpectNoError(setKubeletConfiguration(f, newCfg)) }) AfterEach(func() { From a4786fc8b082de7a46ba890c98a802a67646fcea Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 3 Jan 2018 08:54:49 +0000 Subject: [PATCH 571/794] prefer /dev/disk/azure/scsi1/ over by-id for azure disk remove string conversion --- pkg/volume/azure_dd/azure_common_linux.go | 33 ++++++++++++----------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/pkg/volume/azure_dd/azure_common_linux.go b/pkg/volume/azure_dd/azure_common_linux.go index 792e4cbae55..3c19b9244b5 100644 --- a/pkg/volume/azure_dd/azure_common_linux.go +++ b/pkg/volume/azure_dd/azure_common_linux.go @@ -46,13 +46,14 @@ func listAzureDiskPath(io ioHandler) []string { return azureDiskList } -// getDiskIDByPath get disk id by device name from /dev/disk/by-id -func getDiskIDByPath(io ioHandler, devName string) (string, error) { - diskIDPath := "/dev/disk/by-id/" - dirs, err := io.ReadDir(diskIDPath) +// getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/ +func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) { + dirs, err := io.ReadDir(devLinkPath) + glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath) if err == nil { for _, f := range dirs { - diskPath := diskIDPath + f.Name() + diskPath := devLinkPath + f.Name() + glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath) link, linkErr := io.Readlink(diskPath) if linkErr != nil { glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) @@ -62,9 +63,9 @@ func getDiskIDByPath(io ioHandler, devName string) (string, error) { return diskPath, nil } } - return "", fmt.Errorf("device name(%s) is not found under %s", devName, diskIDPath) + return "", fmt.Errorf("device name(%s) is not found under %s", devName, devLinkPath) } - return "", fmt.Errorf("read %s error: %v", diskIDPath, err) + return "", fmt.Errorf("read %s error: %v", devLinkPath, err) } func scsiHostRescan(io ioHandler, exec mount.Exec) { @@ -154,20 +155,22 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st devName := dev[0].Name() for _, diskName := range azureDisks { glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) - if string(devName) == diskName { + if devName == diskName { found = true break } } if !found { - diskPath, err := getDiskIDByPath(io, devName) - if err == nil { - glog.V(4).Infof("azureDisk - found %s by %s under /dev/disk/by-id", diskPath, devName) - return diskPath, nil - } else { - glog.Warningf("azureDisk - getDiskIDByPath by %s failed, error: %v", devName, err) - return "/dev/" + devName, nil + devLinkPaths := []string{"/dev/disk/azure/scsi1/", "/dev/disk/by-id/"} + for _, devLinkPath := range devLinkPaths { + diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName) + if err == nil { + glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath) + return diskPath, nil + } + glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err) } + return "/dev/" + devName, nil } } } From 2eded687beab7daf509c412722ad2a7e56c13076 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Wed, 3 Jan 2018 11:46:13 +0100 Subject: [PATCH 572/794] Bump fluentd-gcp version --- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index dd516db77e0..fa039413d39 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-gcp-v2.0.11 + name: fluentd-gcp-v2.0.12 namespace: kube-system labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v2.0.11 + version: v2.0.12 spec: updateStrategy: type: RollingUpdate @@ -16,7 +16,7 @@ spec: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" - version: v2.0.11 + version: v2.0.12 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.11 + image: gcr.io/google-containers/fluentd-gcp:2.0.12 env: - name: FLUENTD_ARGS value: --no-supervisor -q From 300ceadf3984578a3002713098589130276cb92b Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 2 Jan 2018 16:18:54 -0800 Subject: [PATCH 573/794] More default fixups for Kubelet flags Similar to #57621, this fixes some other Kubelet flags that were defaulted wrong. --- cmd/kubelet/app/options/options.go | 6 +++--- pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go | 3 +++ pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go | 2 +- .../kubeletconfig/v1alpha1/zz_generated.conversion.go | 8 ++++++-- .../apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go | 9 +++++++++ 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 78b43a5f80d..a0562b20c33 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -371,8 +371,8 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { // AddKubeletConfigFlags adds flags for a specific kubeletconfig.KubeletConfiguration to the specified FlagSet func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfiguration) { - fs.BoolVar(&c.FailSwapOn, "fail-swap-on", true, "Makes the Kubelet fail to start if swap is enabled on the node. ") - fs.BoolVar(&c.FailSwapOn, "experimental-fail-swap-on", true, "DEPRECATED: please use --fail-swap-on instead.") + fs.BoolVar(&c.FailSwapOn, "fail-swap-on", c.FailSwapOn, "Makes the Kubelet fail to start if swap is enabled on the node. ") + fs.BoolVar(&c.FailSwapOn, "experimental-fail-swap-on", c.FailSwapOn, "DEPRECATED: please use --fail-swap-on instead.") fs.MarkDeprecated("experimental-fail-swap-on", "This flag is deprecated and will be removed in future releases. please use --fail-swap-on instead.") fs.StringVar(&c.PodManifestPath, "pod-manifest-path", c.PodManifestPath, "Path to the directory containing pod manifest files to run, or the path to a single pod manifest file. Files starting with dots will be ignored.") @@ -424,7 +424,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.Int32Var(&c.EventBurst, "event-burst", c.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0") fs.BoolVar(&c.EnableDebuggingHandlers, "enable-debugging-handlers", c.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands") - fs.BoolVar(&c.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") + fs.BoolVar(&c.EnableContentionProfiling, "contention-profiling", c.EnableContentionProfiling, "Enable lock contention profiling, if profiling is enabled") fs.Int32Var(&c.CAdvisorPort, "cadvisor-port", c.CAdvisorPort, "The port of the localhost cAdvisor endpoint (set to 0 to disable)") fs.Int32Var(&c.HealthzPort, "healthz-port", c.HealthzPort, "The port of the localhost healthz endpoint (set to 0 to disable)") fs.Var(componentconfig.IPVar{Val: &c.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go index 94780a441f7..030892ea908 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go @@ -211,6 +211,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { temp := int32(DefaultIPTablesDropBit) obj.IPTablesDropBit = &temp } + if obj.FailSwapOn == nil { + obj.FailSwapOn = utilpointer.BoolPtr(true) + } if obj.CgroupsPerQOS == nil { temp := true obj.CgroupsPerQOS = &temp diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go index 96f92d4465f..bc49f3361a2 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go @@ -258,7 +258,7 @@ type KubeletConfiguration struct { // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. FeatureGates map[string]bool `json:"featureGates,omitempty"` // Tells the Kubelet to fail to start if swap is enabled on the node. - FailSwapOn bool `json:"failSwapOn,omitempty"` + FailSwapOn *bool `json:"failSwapOn,omitempty"` /* following flags are meant for Node Allocatable */ diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go index fd5d707b072..cabede5eb10 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go @@ -250,7 +250,9 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura return err } out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.FailSwapOn = in.FailSwapOn + if err := v1.Convert_Pointer_bool_To_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil { + return err + } out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) out.SystemReservedCgroup = in.SystemReservedCgroup @@ -375,7 +377,9 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura return err } out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.FailSwapOn = in.FailSwapOn + if err := v1.Convert_bool_To_Pointer_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil { + return err + } out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) out.SystemReservedCgroup = in.SystemReservedCgroup diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go index 123cee3f73c..6165a73880b 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go @@ -349,6 +349,15 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { (*out)[key] = val } } + if in.FailSwapOn != nil { + in, out := &in.FailSwapOn, &out.FailSwapOn + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } if in.SystemReserved != nil { in, out := &in.SystemReserved, &out.SystemReserved *out = make(map[string]string, len(*in)) From dddee9139263775670d3ed94b788c8abc06d5c2b Mon Sep 17 00:00:00 2001 From: Michalis Kargakis Date: Wed, 3 Jan 2018 17:17:39 +0100 Subject: [PATCH 574/794] Add myself in kubeadm reviewers --- cmd/kubeadm/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/kubeadm/OWNERS b/cmd/kubeadm/OWNERS index ef4129c0b80..8df7e7cd3ee 100644 --- a/cmd/kubeadm/OWNERS +++ b/cmd/kubeadm/OWNERS @@ -15,3 +15,4 @@ reviewers: - kad - xiangpengzhao - mattmoyer +- kargakis From a57d49271355d63d058463e0b723180a3c463bad Mon Sep 17 00:00:00 2001 From: Andrew Pilloud Date: Thu, 28 Dec 2017 19:19:11 -0800 Subject: [PATCH 575/794] Move DefaultMaxEBSVolumes constant into scheduler A constant only used by the scheduler lives in the aws cloudprovider package. Moving the constant into the only package where it is used reduces import bloat. --- pkg/cloudprovider/providers/aws/aws.go | 5 ----- plugin/pkg/scheduler/algorithm/predicates/BUILD | 1 - plugin/pkg/scheduler/algorithm/predicates/predicates.go | 8 ++++++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 41a4a4b5041..2c2a953f117 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -247,11 +247,6 @@ const MaxReadThenCreateRetries = 30 // need hardcoded defaults. const DefaultVolumeType = "gp2" -// DefaultMaxEBSVolumes is the limit for volumes attached to an instance. -// Amazon recommends no more than 40; the system root volume uses at least one. -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits -const DefaultMaxEBSVolumes = 39 - // Used to call RecognizeWellKnownRegions just once var once sync.Once diff --git a/plugin/pkg/scheduler/algorithm/predicates/BUILD b/plugin/pkg/scheduler/algorithm/predicates/BUILD index ebaf3171942..ed72a2878a3 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/BUILD +++ b/plugin/pkg/scheduler/algorithm/predicates/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", - "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index eaac3e77b97..67c4caf7b45 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -35,7 +35,6 @@ import ( "k8s.io/client-go/util/workqueue" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -68,6 +67,11 @@ const ( NoVolumeZoneConflictPred = "NoVolumeZoneConflict" CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure" CheckNodeDiskPressurePred = "CheckNodeDiskPressure" + + // DefaultMaxEBSVolumes is the limit for volumes attached to an instance. + // Amazon recommends no more than 40; the system root volume uses at least one. + // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits + DefaultMaxEBSVolumes = 39 // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 @@ -283,7 +287,7 @@ func NewMaxPDVolumeCountPredicate(filterName string, pvInfo PersistentVolumeInfo case EBSVolumeFilterType: filter = EBSVolumeFilter - maxVolumes = getMaxVols(aws.DefaultMaxEBSVolumes) + maxVolumes = getMaxVols(DefaultMaxEBSVolumes) case GCEPDVolumeFilterType: filter = GCEPDVolumeFilter maxVolumes = getMaxVols(DefaultMaxGCEPDVolumes) From 2a401c7ec7e37f5013098b4da30dc6b908092922 Mon Sep 17 00:00:00 2001 From: Kai Chen Date: Wed, 3 Jan 2018 12:34:15 -0800 Subject: [PATCH 576/794] Improve comments for Azure Blob Disk Controller --- .../azure/azure_blobDiskController.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 5a8e7058a03..1fa074d5bb7 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -303,7 +303,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { func (c *BlobDiskController) setUniqueStrings() { uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID hash := MakeCRC32(uniqueString) - //used to generate a unqie container name used by this cluster PVC + //used to generate a unique container name used by this cluster PVC defaultContainerName = hash storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash) @@ -359,13 +359,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e var err error var blobSvc azstorage.BlobStorageClient - // short circut the check via local cache + // short circuit the check via local cache // we are forgiving the fact that account may not be in cache yet if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { return nil } - // not cached, check existance and readiness + // not cached, check existence and readiness bExist, provisionState, _ := c.getStorageAccountState(storageAccountName) // account does not exist @@ -392,7 +392,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e c.accounts[storageAccountName].isValidating = 0 }() - // short circut the check again. + // short circuit the check again. if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { return nil } @@ -559,9 +559,9 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam continue } - // note: we compute avge stratified by type. - // this to enable user to grow per SA type to avoid low - //avg utilization on one account type skewing all data. + // note: we compute avg stratified by type. + // this is to enable user to grow per SA type to avoid low + // avg utilization on one account type skewing all data. if v.saType == storageAccountType { // compute average @@ -574,7 +574,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // empty account if dCount == 0 { glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) - return v.name, nil // shortcircut, avg is good and no need to adjust + return v.name, nil // short circuit, avg is good and no need to adjust } // if this account is less allocated if dCount < maxDiskCount { @@ -600,7 +600,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts) aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing) - // avg are not create and we should craete more accounts if we can + // avg are not create and we should create more accounts if we can if aboveAvg && countAccounts < maxStorageAccounts { glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) SAName = getAccountNameForNum(c.getNextAccountNum()) From 66c1c5e22272098faaeb4ae20b1d3478dec31ed4 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Wed, 13 Dec 2017 17:56:51 -0800 Subject: [PATCH 577/794] Invalidate resource requirements on extended resources with only request set. --- pkg/apis/core/validation/validation.go | 6 +++--- pkg/apis/core/validation/validation_test.go | 22 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 655d0bf2163..b158654ce5b 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4366,14 +4366,14 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa // Check that request <= limit. limitQuantity, exists := requirements.Limits[resourceName] if exists { - // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + // For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) } else if quantity.Cmp(limitQuantity) > 0 { allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) } - } else if resourceName == core.ResourceNvidiaGPU { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU))) + } else if !helper.IsOvercommitAllowed(resourceName) { + allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources")) } } diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 25dd774ab86..0a92675acd1 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -6848,6 +6848,28 @@ func TestValidatePod(t *testing.T) { }, }, }, + "invalid extended resource requirement without limit": { + expectedError: "Limit must be set", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "invalid", + Image: "image", + ImagePullPolicy: "IfNotPresent", + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName("example.com/a"): resource.MustParse("2"), + }, + }, + }, + }, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + }, + }, + }, "invalid fractional extended resource in container request": { expectedError: "must be an integer", spec: core.Pod{ From 0d5b9dd3d407f486c51ddf9d0c0782c049c631ec Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 3 Jan 2018 16:40:57 -0500 Subject: [PATCH 578/794] Avoid error on closed pipe --- hack/lib/test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/lib/test.sh b/hack/lib/test.sh index b9e28912fa0..86c49105c48 100644 --- a/hack/lib/test.sh +++ b/hack/lib/test.sh @@ -265,7 +265,7 @@ kube::test::if_has_string() { local message=$1 local match=$2 - if echo "$message" | grep -q "$match"; then + if grep -q "${match}" <<< "${message}"; then echo "Successful" echo "message:$message" echo "has:$match" @@ -283,7 +283,7 @@ kube::test::if_has_not_string() { local message=$1 local match=$2 - if echo "$message" | grep -q "$match"; then + if grep -q "${match}" <<< "${message}"; then echo "FAIL!" echo "message:$message" echo "has:$match" From fe17387f1b87a31b4589838e24f6f51f342e5119 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 21 Dec 2017 16:32:07 -0800 Subject: [PATCH 579/794] periodically check whether assigned kubelet config should become last-known-good --- pkg/kubelet/kubeletconfig/controller.go | 27 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index be93e3d4db9..2bd7c31b79a 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -141,15 +141,11 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { cc.configOK.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue) } - // when the trial period is over, the assigned config becomes the last-known-good - if trial, err := cc.inTrial(assigned.ConfigTrialDuration.Duration); err != nil { - utillog.Errorf("failed to check trial period for assigned config, error: %v", err) - } else if !trial { - utillog.Infof("assigned config passed trial period, will set as last-known-good") - if err := cc.graduateAssignedToLastKnownGood(); err != nil { - utillog.Errorf("failed to set last-known-good to assigned config, error: %v", err) - } - } + // update the last-known-good config if necessary, and start a timer that + // periodically checks whether the last-known good needs to be updated + // we only do this when the assigned config loads and passes validation + // wait.Forever will call the func once before starting the timer + go wait.Forever(func() { cc.checkTrial(assigned.ConfigTrialDuration.Duration) }, 10*time.Second) return assigned, nil } // Assert: the assigned config failed to load, parse, or validate @@ -319,6 +315,19 @@ func (cc *Controller) initializeDynamicConfigDir() error { return cc.checkpointStore.Initialize() } +// checkTrial checks whether the trial duration has passed, and updates the last-known-good config if necessary +func (cc *Controller) checkTrial(duration time.Duration) { + // when the trial period is over, the assigned config becomes the last-known-good + if trial, err := cc.inTrial(duration); err != nil { + utillog.Errorf("failed to check trial period for assigned config, error: %v", err) + } else if !trial { + utillog.Infof("assigned config passed trial period, will set as last-known-good") + if err := cc.graduateAssignedToLastKnownGood(); err != nil { + utillog.Errorf("failed to set last-known-good to assigned config, error: %v", err) + } + } +} + // inTrial returns true if the time elapsed since the last modification of the current config does not exceed `trialDur`, false otherwise func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { now := time.Now() From dd74a397009d35c3f9a49f44b6eb0cf3fa53395e Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 21 Dec 2017 09:27:30 -0800 Subject: [PATCH 580/794] Make ConfigOK status messages more human readable by including the API path to the object instead of the UID --- .../kubeletconfig/checkpoint/download.go | 4 +-- .../kubeletconfig/checkpoint/download_test.go | 4 +-- pkg/kubelet/kubeletconfig/configsync.go | 6 ++-- pkg/kubelet/kubeletconfig/controller.go | 16 +++++----- pkg/kubelet/kubeletconfig/status/status.go | 30 +++++++++---------- test/e2e_node/dynamic_kubelet_config_test.go | 25 +++++++++------- 6 files changed, 45 insertions(+), 40 deletions(-) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download.go b/pkg/kubelet/kubeletconfig/checkpoint/download.go index 9778f318b7c..9b516a0eb72 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -128,13 +128,13 @@ func (r *remoteConfigMap) Download(client clientset.Interface) (Checkpoint, stri // get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMapRef.Namespace).Get(r.source.ConfigMapRef.Name, metav1.GetOptions{}) if err != nil { - reason = fmt.Sprintf(status.FailSyncReasonDownloadFmt, r.source.ConfigMapRef.Name, r.source.ConfigMapRef.Namespace) + reason = fmt.Sprintf(status.FailSyncReasonDownloadFmt, r.APIPath()) return nil, reason, fmt.Errorf("%s, error: %v", reason, err) } // ensure that UID matches the UID on the reference, the ObjectReference must be unambiguous if r.source.ConfigMapRef.UID != cm.UID { - reason = fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, r.source.ConfigMapRef.UID, cm.UID) + reason = fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, r.source.ConfigMapRef.UID, r.APIPath(), cm.UID) return nil, reason, fmt.Errorf(reason) } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go index ccca6c3161c..345319fd37c 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go @@ -131,11 +131,11 @@ func TestRemoteConfigMapDownload(t *testing.T) { // object doesn't exist {"object doesn't exist", &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "bogus", Namespace: "namespace", UID: "bogus"}}}, - nil, "failed to download ConfigMap"}, + nil, "not found"}, // UID of downloaded object doesn't match UID of referent found via namespace/name {"UID is incorrect for namespace/name", &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "bogus"}}}, - nil, "does not match UID"}, + nil, "does not match"}, // successful download {"object exists and reference is correct", &remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "uid"}}}, diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index ce6b7e38bc3..b128e95b92b 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -139,7 +139,7 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source // if the checkpoint already exists, skip downloading if ok, err := cc.checkpointStore.Exists(uid); err != nil { - reason := fmt.Sprintf(status.FailSyncReasonCheckpointExistenceFmt, uid) + reason := fmt.Sprintf(status.FailSyncReasonCheckpointExistenceFmt, source.APIPath(), uid) return reason, fmt.Errorf("%s, error: %v", reason, err) } else if ok { utillog.Infof("checkpoint already exists for object with UID %q, skipping download", uid) @@ -155,7 +155,7 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source // save err = cc.checkpointStore.Save(checkpoint) if err != nil { - reason := fmt.Sprintf(status.FailSyncReasonSaveCheckpointFmt, checkpoint.UID()) + reason := fmt.Sprintf(status.FailSyncReasonSaveCheckpointFmt, source.APIPath(), checkpoint.UID()) return reason, fmt.Errorf("%s, error: %v", reason, err) } @@ -170,7 +170,7 @@ func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bo if source == nil { return false, status.FailSyncReasonSetCurrentLocal, err } - return false, fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.UID()), err + return false, fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.APIPath(), source.UID()), err } return updated, "", nil } diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index 9b959b4c94b..83569622b81 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -137,7 +137,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { if err == nil { // set the status to indicate we will use the assigned config if curSource != nil { - cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.UID()), reason, apiv1.ConditionTrue) + cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue) } else { cc.configOK.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue) } @@ -171,7 +171,7 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { // set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config if lkgSource != nil { - cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.UID()), reason, apiv1.ConditionFalse) + cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse) } else { cc.configOK.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse) } @@ -271,14 +271,14 @@ func (cc *Controller) loadAssignedConfig(local *kubeletconfig.KubeletConfigurati // load from checkpoint checkpoint, err := cc.checkpointStore.Load(curUID) if err != nil { - return nil, src, fmt.Sprintf(status.CurFailLoadReasonFmt, curUID), err + return nil, src, fmt.Sprintf(status.CurFailLoadReasonFmt, src.APIPath()), err } cur, err := checkpoint.Parse() if err != nil { - return nil, src, fmt.Sprintf(status.CurFailParseReasonFmt, curUID), err + return nil, src, fmt.Sprintf(status.CurFailParseReasonFmt, src.APIPath()), err } if err := validation.ValidateKubeletConfiguration(cur); err != nil { - return nil, src, fmt.Sprintf(status.CurFailValidateReasonFmt, curUID), err + return nil, src, fmt.Sprintf(status.CurFailValidateReasonFmt, src.APIPath()), err } return cur, src, status.CurRemoteOkayReason, nil } @@ -301,14 +301,14 @@ func (cc *Controller) loadLastKnownGoodConfig(local *kubeletconfig.KubeletConfig // load from checkpoint checkpoint, err := cc.checkpointStore.Load(lkgUID) if err != nil { - return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, lkgUID), err) + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailLoadReasonFmt, src.APIPath()), err) } lkg, err := checkpoint.Parse() if err != nil { - return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailParseReasonFmt, lkgUID), err) + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailParseReasonFmt, src.APIPath()), err) } if err := validation.ValidateKubeletConfiguration(lkg); err != nil { - return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, lkgUID), err) + return nil, src, fmt.Errorf("%s, error: %v", fmt.Sprintf(status.LkgFailValidateReasonFmt, src.APIPath()), err) } return lkg, src, nil } diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index 94598cae6eb..797f7bbcb5a 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -40,14 +40,14 @@ const ( NotDynamicLocalReason = "dynamic config is currently disabled by omission of --dynamic-config-dir Kubelet flag" // CurLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files - CurLocalMessage = "using current (local)" + CurLocalMessage = "using current: local" // LkgLocalMessage indicates that the Kubelet is using its local config, which consists of defaults, flags, and/or local files - LkgLocalMessage = "using last-known-good (local)" + LkgLocalMessage = "using last-known-good: local" // CurRemoteMessageFmt indicates the Kubelet is using its current config, which is from an API source - CurRemoteMessageFmt = "using current (UID: %q)" + CurRemoteMessageFmt = "using current: %s" // LkgRemoteMessageFmt indicates the Kubelet is using its last-known-good config, which is from an API source - LkgRemoteMessageFmt = "using last-known-good (UID: %q)" + LkgRemoteMessageFmt = "using last-known-good: %s" // CurLocalOkayReason indicates that the Kubelet is using its local config CurLocalOkayReason = "when the config source is nil, the Kubelet uses its local config" @@ -55,20 +55,20 @@ const ( CurRemoteOkayReason = "passing all checks" // CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source - CurFailLoadReasonFmt = "failed to load current (UID: %q)" + CurFailLoadReasonFmt = "failed to load current: %s" // CurFailParseReasonFmt indicates that the Kubelet failed to parse the current config checkpoint for an API source - CurFailParseReasonFmt = "failed to parse current (UID: %q)" + CurFailParseReasonFmt = "failed to parse current: %s" // CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source - CurFailValidateReasonFmt = "failed to validate current (UID: %q)" + CurFailValidateReasonFmt = "failed to validate current: %s" // LkgFail*ReasonFmt reasons are currently used to print errors in the Kubelet log, but do not appear in Node.Status.Conditions // LkgFailLoadReasonFmt indicates that the Kubelet failed to load the last-known-good config checkpoint for an API source - LkgFailLoadReasonFmt = "failed to load last-known-good (UID: %q)" + LkgFailLoadReasonFmt = "failed to load last-known-good: %s" // LkgFailParseReasonFmt indicates that the Kubelet failed to parse the last-known-good config checkpoint for an API source - LkgFailParseReasonFmt = "failed to parse last-known-good (UID: %q)" + LkgFailParseReasonFmt = "failed to parse last-known-good: %s" // LkgFailValidateReasonFmt indicates that the Kubelet failed to validate the last-known-good config checkpoint for an API source - LkgFailValidateReasonFmt = "failed to validate last-known-good (UID: %q)" + LkgFailValidateReasonFmt = "failed to validate last-known-good: %s" // FailSyncReasonFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc. FailSyncReasonFmt = "failed to sync, reason: %s" @@ -78,21 +78,21 @@ const ( FailSyncReasonPartialObjectReference = "invalid ObjectReference, all of UID, Name, and Namespace must be specified" // FailSyncReasonUIDMismatchFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps, // this can happen because objects must be downloaded by namespace/name, rather than by UID - FailSyncReasonUIDMismatchFmt = "invalid ObjectReference, UID %q does not match UID of downloaded ConfigMap %q" + FailSyncReasonUIDMismatchFmt = "invalid ConfigSource.ConfigMapRef.UID: %s does not match %s.UID: %s" // FailSyncReasonDownloadFmt is used when the download fails, e.g. due to network issues - FailSyncReasonDownloadFmt = "failed to download ConfigMap with name %q from namespace %q" + FailSyncReasonDownloadFmt = "failed to download: %s" // FailSyncReasonInformer is used when the informer fails to report the Node object FailSyncReasonInformer = "failed to read Node from informer object cache" // FailSyncReasonReset is used when we can't reset the local configuration references, e.g. due to filesystem issues FailSyncReasonReset = "failed to reset to local config" // FailSyncReasonCheckpointExistenceFmt is used when we can't determine if a checkpoint already exists, e.g. due to filesystem issues - FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object with UID %q was already checkpointed" + FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object %s with UID %s was already checkpointed" // FailSyncReasonSaveCheckpointFmt is used when we can't save a checkpoint, e.g. due to filesystem issues - FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object with UID %q" + FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object %s with UID %s" // FailSyncReasonSetCurrentDefault is used when we can't set the current config checkpoint to the local default, e.g. due to filesystem issues FailSyncReasonSetCurrentLocal = "failed to set current config checkpoint to local config" // FailSyncReasonSetCurrentUIDFmt is used when we can't set the current config checkpoint to a checkpointed object, e.g. due to filesystem issues - FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object with UID %q" + FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object %s with UID %s" // EmptyMessage is a placeholder in the case that we accidentally set the condition's message to the empty string. // Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the message was not provided. diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 2ab9f7d8195..09debcee4fb 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: originalConfigMap.Namespace, Name: originalConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID), + Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)), Reason: status.CurRemoteOkayReason}, expectConfig: originalKC, }, false) @@ -162,7 +162,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: correctConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: "", - Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", correctConfigMap.UID))}, + Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))}, expectConfig: nil, event: false, }, @@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: correctConfigMap.Namespace, Name: correctConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID), + Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)), Reason: status.CurRemoteOkayReason}, expectConfig: correctKC, event: true, @@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: failParseConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: status.LkgLocalMessage, - Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)}, + Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(failParseConfigMap))}, expectConfig: nil, event: true, }, @@ -201,7 +201,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Name: failValidateConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, Message: status.LkgLocalMessage, - Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)}, + Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))}, expectConfig: nil, event: true, }, @@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: lkgConfigMap.Namespace, Name: lkgConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID), + Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)), Reason: status.CurRemoteOkayReason}, expectConfig: lkgKC, event: true, @@ -257,8 +257,8 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: badConfigMap.Namespace, Name: badConfigMap.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse, - Message: fmt.Sprintf(status.LkgRemoteMessageFmt, lkgConfigMap.UID), - Reason: fmt.Sprintf(status.CurFailParseReasonFmt, badConfigMap.UID)}, + Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)), + Reason: fmt.Sprintf(status.CurFailParseReasonFmt, configMapAPIPath(badConfigMap))}, expectConfig: lkgKC, event: true, }, @@ -294,7 +294,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: cm1.Namespace, Name: cm1.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID), + Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)), Reason: status.CurRemoteOkayReason}, expectConfig: kc1, event: true, @@ -306,7 +306,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube Namespace: cm2.Namespace, Name: cm2.Name}}, expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue, - Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID), + Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)), Reason: status.CurRemoteOkayReason}, expectConfig: kc2, event: true, @@ -488,3 +488,8 @@ func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSou return nil }, timeout, interval).Should(BeNil()) } + +// constructs the expected SelfLink for a config map +func configMapAPIPath(cm *apiv1.ConfigMap) string { + return fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", cm.Namespace, cm.Name) +} From d65005bdb2c3e057e215320a557f1d3f96a7eb21 Mon Sep 17 00:00:00 2001 From: David McMahon Date: Wed, 3 Jan 2018 17:46:03 -0800 Subject: [PATCH 581/794] Honor make variable OUT_DIR. --- cluster/images/hyperkube/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index d7a8c7d936b..ccecb9b80e0 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -19,7 +19,8 @@ REGISTRY?=gcr.io/google-containers ARCH?=amd64 -HYPERKUBE_BIN?=_output/dockerized/bin/linux/$(ARCH)/hyperkube +OUT_DIR?=_output +HYPERKUBE_BIN?=$(OUT_DIR)/dockerized/bin/linux/$(ARCH)/hyperkube BASEIMAGE=gcr.io/google-containers/debian-hyperkube-base-$(ARCH):0.8 TEMP_DIR:=$(shell mktemp -d -t hyperkubeXXXXXX) From 64a7c60e00a1f6cf92710415e0e3dee133ebab7c Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 30 Nov 2017 14:34:36 +0800 Subject: [PATCH 582/794] validate admission-control param --- cmd/kube-apiserver/app/options/validation.go | 3 +++ .../apiserver/pkg/server/options/admission.go | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/cmd/kube-apiserver/app/options/validation.go b/cmd/kube-apiserver/app/options/validation.go index fb937bdf7db..481552d3b29 100644 --- a/cmd/kube-apiserver/app/options/validation.go +++ b/cmd/kube-apiserver/app/options/validation.go @@ -66,6 +66,9 @@ func (options *ServerRunOptions) Validate() []error { if errs := options.Audit.Validate(); len(errs) > 0 { errors = append(errors, errs...) } + if errs := options.Admission.Validate(); len(errs) > 0 { + errors = append(errors, errs...) + } if errs := options.InsecureServing.Validate("insecure-port"); len(errs) > 0 { errors = append(errors, errs...) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 6232567f7a4..9466254b4d6 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/initializer" admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" @@ -129,7 +130,19 @@ func (a *AdmissionOptions) ApplyTo( } func (a *AdmissionOptions) Validate() []error { + if a == nil { + return nil + } + errs := []error{} + + registeredPlugins := sets.NewString(a.Plugins.Registered()...) + for _, name := range a.PluginNames { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("admission-control plugin %q is invalid", name)) + } + } + return errs } From 8ef2a8737168566ac47af12dfefff5b802b6061f Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Fri, 22 Dec 2017 17:10:56 -0500 Subject: [PATCH 583/794] Move local PV negative scheduling tests to integratiom Closes: #56088 --- test/integration/scheduler/BUILD | 1 + .../scheduler/local-pv-neg-affinity_test.go | 322 ++++++++++++++++++ .../scheduler/volume_binding_test.go | 145 +------- 3 files changed, 327 insertions(+), 141 deletions(-) create mode 100644 test/integration/scheduler/local-pv-neg-affinity_test.go diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 9bebcc6055f..0fbdc42db6b 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -11,6 +11,7 @@ go_test( size = "large", srcs = [ "extender_test.go", + "local-pv-neg-affinity_test.go", "main_test.go", "predicates_test.go", "preemption_test.go", diff --git a/test/integration/scheduler/local-pv-neg-affinity_test.go b/test/integration/scheduler/local-pv-neg-affinity_test.go new file mode 100644 index 00000000000..589089f1a3e --- /dev/null +++ b/test/integration/scheduler/local-pv-neg-affinity_test.go @@ -0,0 +1,322 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +// This file tests the VolumeScheduling feature. + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" + "k8s.io/kubernetes/plugin/pkg/scheduler" + "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/test/integration/framework" +) + +const ( + affinityLabelKey = "kubernetes.io/hostname" +) + +func TestLocalPVNegativeAffinity(t *testing.T) { + config := setupNodes(t, "volume-scheduling", 3) + defer config.teardown() + + pv := makeHostBoundPV(t, "local-pv", classImmediate, "", "", "node-1") + pvc := makePVC("local-pvc", config.ns, &classImmediate, "") + + // Create PV + if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) + } + + // Create PVC + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) + } + + nodeMarkers := []interface{}{ + markNodeAffinity, + markNodeSelector, + } + for i := 0; i < len(nodeMarkers); i++ { + podName := "local-pod-" + strconv.Itoa(i+1) + pod := makePod(podName, config.ns, []string{"local-pvc"}) + nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2") + // Create Pod + if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { + t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) + } + // Give time to shceduler to attempt to schedule pod + if err := waitForPodToSchedule(config.client, pod); err == nil { + t.Errorf("Failed as Pod %s was scheduled sucessfully but expected to fail", pod.Name) + } + // Deleting test pod + p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to access Pod %s status: %v", podName, err) + } + if strings.Compare(string(p.Status.Phase), "Pending") != 0 { + t.Fatalf("Failed as Pod %s was in: %s state and not in expected: Pending state", podName, p.Status.Phase) + } + if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 { + t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) + } + if !strings.Contains(p.Status.Conditions[0].Message, "MatchNodeSelector") || !strings.Contains(p.Status.Conditions[0].Message, "VolumeNodeAffinityConflict") { + t.Fatalf("Failed as Pod's %s failure message does not contain expected keywords: MatchNodeSelector, VolumeNodeAffinityConflict", podName) + } + if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete Pod %s: %v", podName, err) + } + } +} + +func setupNodes(t *testing.T, nsName string, numberOfNodes int) *testConfig { + h := &framework.MasterHolder{Initialized: make(chan struct{})} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + <-h.Initialized + h.M.GenericAPIServer.Handler.ServeHTTP(w, req) + })) + + // Enable feature gates + utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true") + + // Build clientset and informers for controllers. + clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) + informers := informers.NewSharedInformerFactory(clientset, time.Second) + + // Start master + masterConfig := framework.NewIntegrationTestMasterConfig() + + _, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h) + ns := framework.CreateTestingNamespace(nsName, s, t).Name + + controllerCh := make(chan struct{}) + + // Start PV controller for volume binding. + params := persistentvolume.ControllerParameters{ + KubeClient: clientset, + SyncPeriod: time.Hour, // test shouldn't need to resync + VolumePlugins: nil, // TODO; need later for dynamic provisioning + Cloud: nil, + ClusterName: "volume-test-cluster", + VolumeInformer: informers.Core().V1().PersistentVolumes(), + ClaimInformer: informers.Core().V1().PersistentVolumeClaims(), + ClassInformer: informers.Storage().V1().StorageClasses(), + EventRecorder: nil, // TODO: add one so we can test PV events + EnableDynamicProvisioning: true, + } + ctrl, err := persistentvolume.NewController(params) + if err != nil { + t.Fatalf("Failed to create PV controller: %v", err) + } + go ctrl.Run(controllerCh) + + // Start scheduler + configurator := factory.NewConfigFactory( + v1.DefaultSchedulerName, + clientset, + informers.Core().V1().Nodes(), + informers.Core().V1().Pods(), + informers.Core().V1().PersistentVolumes(), + informers.Core().V1().PersistentVolumeClaims(), + informers.Core().V1().ReplicationControllers(), + informers.Extensions().V1beta1().ReplicaSets(), + informers.Apps().V1beta1().StatefulSets(), + informers.Core().V1().Services(), + informers.Policy().V1beta1().PodDisruptionBudgets(), + informers.Storage().V1().StorageClasses(), + v1.DefaultHardPodAffinitySymmetricWeight, + true, // Enable EqualCache by default. + ) + + eventBroadcaster := record.NewBroadcaster() + sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { + cfg.StopEverything = controllerCh + cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")}) + }) + if err != nil { + t.Fatalf("Failed to create scheduler: %v.", err) + } + + go sched.Run() + + // Waiting for all controller sync. + informers.Start(controllerCh) + informers.WaitForCacheSync(controllerCh) + + // Create shared objects + // Create nodes + for i := 0; i < numberOfNodes; i++ { + testNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%d", i+1), + Labels: map[string]string{affinityLabelKey: fmt.Sprintf("node-%d", i+1)}, + }, + Spec: v1.NodeSpec{Unschedulable: false}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI), + }, + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + Reason: fmt.Sprintf("schedulable condition"), + LastHeartbeatTime: metav1.Time{Time: time.Now()}, + }, + }, + }, + } + if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { + t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) + } + } + + // Create SCs + scs := []*storagev1.StorageClass{ + makeStorageClass(classImmediate, &modeImmediate), + } + for _, sc := range scs { + if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { + t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) + } + } + + return &testConfig{ + client: clientset, + ns: ns, + stop: controllerCh, + teardown: func() { + clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + close(controllerCh) + closeFn() + utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false") + }, + } +} + +func makeHostBoundPV(t *testing.T, name, scName, pvcName, ns string, node string) *v1.PersistentVolume { + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{}, + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("5Gi"), + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + StorageClassName: scName, + PersistentVolumeSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{ + Path: "/tmp/" + node + "/test-path", + }, + }, + }, + } + + if pvcName != "" { + pv.Spec.ClaimRef = &v1.ObjectReference{Name: pvcName, Namespace: ns} + } + + testNodeAffinity := &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: affinityLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{node}, + }, + }, + }, + }, + }, + } + err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, testNodeAffinity) + if err != nil { + t.Fatalf("Setting storage node affinity failed: %v", err) + } + + return pv +} + +func markNodeAffinity(pod *v1.Pod, node string) { + affinity := &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: v1.NodeSelectorOpIn, + Values: []string{node}, + }, + }, + }, + }, + }, + }, + } + pod.Spec.Affinity = affinity +} + +func markNodeSelector(pod *v1.Pod, node string) { + ns := map[string]string{ + "kubernetes.io/hostname": node, + } + pod.Spec.NodeSelector = ns +} + +func printIndentedJson(data interface{}) string { + var indentedJSON []byte + + indentedJSON, err := json.MarshalIndent(data, "", "\t") + if err != nil { + return fmt.Sprintf("JSON parse error: %v", err) + } + return string(indentedJSON) +} diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index e185ce72b77..35f60f81023 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -20,10 +20,7 @@ package scheduler import ( "fmt" - "net/http" - "net/http/httptest" "testing" - "time" "github.com/golang/glog" @@ -31,17 +28,8 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - "k8s.io/kubernetes/test/integration/framework" ) type testConfig struct { @@ -64,15 +52,15 @@ var ( ) const ( - labelKey = "test-label" - labelValue = "test-value" + labelKey = "kubernetes.io/hostname" + labelValue = "node-1" nodeName = "node1" podLimit = 100 volsPerPod = 5 ) func TestVolumeBinding(t *testing.T) { - config := setup(t, "volume-scheduling") + config := setupNodes(t, "volume-scheduling", 1) defer config.teardown() cases := map[string]struct { @@ -181,7 +169,7 @@ func TestVolumeBinding(t *testing.T) { // TestVolumeBindingStress creates pods, each with unbound PVCs. func TestVolumeBindingStress(t *testing.T) { - config := setup(t, "volume-binding-stress") + config := setupNodes(t, "volume-binding-stress", 1) defer config.teardown() // Create enough PVs and PVCs for all the pods @@ -235,131 +223,6 @@ func TestVolumeBindingStress(t *testing.T) { // TODO: validate events on Pods and PVCs } -func setup(t *testing.T, nsName string) *testConfig { - h := &framework.MasterHolder{Initialized: make(chan struct{})} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - <-h.Initialized - h.M.GenericAPIServer.Handler.ServeHTTP(w, req) - })) - - // Enable feature gates - utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true") - - // Build clientset and informers for controllers. - clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) - informers := informers.NewSharedInformerFactory(clientset, time.Second) - - // Start master - masterConfig := framework.NewIntegrationTestMasterConfig() - _, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h) - ns := framework.CreateTestingNamespace(nsName, s, t).Name - - controllerCh := make(chan struct{}) - - // Start PV controller for volume binding. - params := persistentvolume.ControllerParameters{ - KubeClient: clientset, - SyncPeriod: time.Hour, // test shouldn't need to resync - VolumePlugins: nil, // TODO; need later for dynamic provisioning - Cloud: nil, - ClusterName: "volume-test-cluster", - VolumeInformer: informers.Core().V1().PersistentVolumes(), - ClaimInformer: informers.Core().V1().PersistentVolumeClaims(), - ClassInformer: informers.Storage().V1().StorageClasses(), - EventRecorder: nil, // TODO: add one so we can test PV events - EnableDynamicProvisioning: true, - } - ctrl, err := persistentvolume.NewController(params) - if err != nil { - t.Fatalf("Failed to create PV controller: %v", err) - } - go ctrl.Run(controllerCh) - - // Start scheduler - configurator := factory.NewConfigFactory( - v1.DefaultSchedulerName, - clientset, - informers.Core().V1().Nodes(), - informers.Core().V1().Pods(), - informers.Core().V1().PersistentVolumes(), - informers.Core().V1().PersistentVolumeClaims(), - informers.Core().V1().ReplicationControllers(), - informers.Extensions().V1beta1().ReplicaSets(), - informers.Apps().V1beta1().StatefulSets(), - informers.Core().V1().Services(), - informers.Policy().V1beta1().PodDisruptionBudgets(), - informers.Storage().V1().StorageClasses(), - v1.DefaultHardPodAffinitySymmetricWeight, - true, // Enable EqualCache by default. - ) - - sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { - cfg.StopEverything = controllerCh - cfg.Recorder = &record.FakeRecorder{} - }) - if err != nil { - t.Fatalf("Failed to create scheduler: %v.", err) - } - go sched.Run() - - // Waiting for all controller sync. - informers.Start(controllerCh) - informers.WaitForCacheSync(controllerCh) - - // Create shared objects - // Create node - testNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Labels: map[string]string{labelKey: labelValue}, - }, - Spec: v1.NodeSpec{Unschedulable: false}, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI), - }, - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - Reason: fmt.Sprintf("schedulable condition"), - LastHeartbeatTime: metav1.Time{Time: time.Now()}, - }, - }, - }, - } - if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { - t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) - } - - // Create SCs - scs := []*storagev1.StorageClass{ - makeStorageClass(classWait, &modeWait), - makeStorageClass(classImmediate, &modeImmediate), - } - for _, sc := range scs { - if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { - t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) - } - } - - return &testConfig{ - client: clientset, - ns: ns, - stop: controllerCh, - teardown: func() { - clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) - clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) - close(controllerCh) - closeFn() - utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false") - }, - } -} - func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass { return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ From 77e651aed16c69cd162774dd1d36427f1000ece4 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Tue, 2 Jan 2018 08:48:03 -0500 Subject: [PATCH 584/794] Clarify error messages in HPA metrics With the introduction of the RESTMetrics client, there are two ways to fetch metrics for auto-scaling. However, they previously shared error messages. This could be misleading. Make the error message more clearly show which method is in use. --- .../podautoscaler/metrics/rest_metrics_client.go | 8 ++++---- .../podautoscaler/metrics/rest_metrics_client_test.go | 2 +- pkg/controller/podautoscaler/replica_calculator_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go index 07dd290a563..e8894bfcc95 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go @@ -58,11 +58,11 @@ type resourceMetricsClient struct { func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) { metrics, err := c.client.PodMetricses(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { - return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err) + return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from resource metrics API: %v", err) } if len(metrics.Items) == 0 { - return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster") + return nil, time.Time{}, fmt.Errorf("no metrics returned from resource metrics API") } res := make(PodMetricsInfo, len(metrics.Items)) @@ -101,7 +101,7 @@ type customMetricsClient struct { func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) { metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName) if err != nil { - return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err) + return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from custom metrics API: %v", err) } if len(metrics.Items) == 0 { @@ -134,7 +134,7 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin } if err != nil { - return 0, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err) + return 0, time.Time{}, fmt.Errorf("unable to fetch metrics from custom metrics API: %v", err) } return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go index d9a178a7554..289b93f04d7 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go @@ -252,7 +252,7 @@ func TestRESTClientQpsSumEqualZero(t *testing.T) { func TestRESTClientCPUEmptyMetrics(t *testing.T) { tc := restClientTestCase{ resourceName: v1.ResourceCPU, - desiredError: fmt.Errorf("no metrics returned from heapster"), + desiredError: fmt.Errorf("no metrics returned from resource metrics API"), reportedMetricPoints: []metricPoint{}, reportedPodMetrics: [][]int64{}, } diff --git a/pkg/controller/podautoscaler/replica_calculator_test.go b/pkg/controller/podautoscaler/replica_calculator_test.go index b12b62519b9..a5c3bf03872 100644 --- a/pkg/controller/podautoscaler/replica_calculator_test.go +++ b/pkg/controller/podautoscaler/replica_calculator_test.go @@ -579,7 +579,7 @@ func TestReplicaCalcMissingMetrics(t *testing.T) { func TestReplicaCalcEmptyMetrics(t *testing.T) { tc := replicaCalcTestCase{ currentReplicas: 4, - expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"), + expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from resource metrics API"), resource: &resourceInfo{ name: v1.ResourceCPU, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, From ec6b9eb956dab11b4a4ead20bfae755671d91add Mon Sep 17 00:00:00 2001 From: zhangxiaoyu-zidif Date: Thu, 4 Jan 2018 11:08:40 +0800 Subject: [PATCH 585/794] fix-binary-check-cephfs --- pkg/volume/cephfs/cephfs.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index bbe681d71b5..59f72a248d1 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -331,9 +331,8 @@ func (cephfsMounter *cephfsMounter) checkFuseMount() bool { execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName()) switch runtime.GOOS { case "linux": - retBytes, err := execute.Run("/bin/ls", "/sbin/mount.fuse.ceph") - if err == nil && string(retBytes) == "/sbin/mount.fuse.ceph\n" { - glog.V(4).Infof("/sbin/mount.fuse.ceph exists, it should be fuse mount") + if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil { + glog.V(4).Infof("/sbin/mount.fuse.ceph exists, it should be fuse mount.") return true } return false @@ -356,7 +355,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { payload := make(map[string]util.FileProjection, 1) var fileProjection util.FileProjection - keyring := fmt.Sprintf("[client.%s]\n", cephfsVolume.id) + "key = " + cephfsVolume.secret + "\n" + keyring := fmt.Sprintf("[client.%s]\nkey = %s\n", cephfsVolume.id, cephfsVolume.secret) fileProjection.Data = []byte(keyring) fileProjection.Mode = int32(0644) From 1836e567a9899d3f8ab4060317aa18d6155c62fd Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Thu, 4 Jan 2018 11:46:29 +0800 Subject: [PATCH 586/794] Return actual error when backoff fails --- pkg/cloudprovider/providers/azure/azure_backoff.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 5eb622950eb..099fea81fe1 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -44,8 +44,8 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) { // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { var machine compute.VirtualMachine + var retryErr error err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - var retryErr error machine, retryErr = az.getVirtualMachine(name) if retryErr != nil { glog.Errorf("backoff: failure, will retry,err=%v", retryErr) @@ -54,6 +54,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua glog.V(2).Infof("backoff: success") return true, nil }) + if err == wait.ErrWaitTimeout { + err = retryErr + } + return machine, err } From 44e2e7a906d6c6dfe1a98647a47a903dcd61b872 Mon Sep 17 00:00:00 2001 From: lcfang Date: Mon, 11 Dec 2017 14:53:26 +0800 Subject: [PATCH 587/794] delete the unused function in kubectl --- pkg/kubectl/cmd/cmd.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 35baaaaaf7e..04429a76b4e 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -368,13 +368,3 @@ func deprecatedAlias(deprecatedVersion string, cmd *cobra.Command) *cobra.Comman cmd.Hidden = true return cmd } - -// deprecated is similar to deprecatedAlias, but it is used for deprecations -// that are not simple aliases; this command is actually a different -// (deprecated) codepath. -func deprecated(baseName, to string, parent, cmd *cobra.Command) string { - cmd.Long = fmt.Sprintf("Deprecated: all functionality can be found in \"%s %s\"", baseName, to) - cmd.Short = fmt.Sprintf("Deprecated: use %s", to) - parent.AddCommand(cmd) - return cmd.Name() -} From 62f29fcb398853749fbd3fc102225858936a872d Mon Sep 17 00:00:00 2001 From: lcfang Date: Tue, 12 Dec 2017 09:22:27 +0800 Subject: [PATCH 588/794] fixed the some typo in eviction_manager --- pkg/kubelet/eviction/eviction_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 90b1038cf15..3378bf864c1 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -312,7 +312,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.Unlock() // evict pods if there is a resource usage violation from local volume temporary storage - // If eviction happens in localVolumeEviction function, skip the rest of eviction action + // If eviction happens in localStorageEviction function, skip the rest of eviction action if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { if evictedPods := m.localStorageEviction(activePods); len(evictedPods) > 0 { return evictedPods From 9669acc38e8782b4d3f497e8e37e257952731f7f Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 4 Jan 2018 11:57:40 +0800 Subject: [PATCH 589/794] remove hard coding Namespace --- .../src/k8s.io/apiserver/pkg/endpoints/request/context.go | 7 +++---- .../k8s.io/apiserver/pkg/endpoints/request/requestinfo.go | 3 ++- .../apiserver/pkg/endpoints/request/requestinfo_test.go | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go index b63b84dc7db..e64facc5c4b 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -21,6 +21,7 @@ import ( "time" "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/authentication/user" @@ -66,8 +67,6 @@ const ( // auditKey is the context key for the audit event. auditKey - - namespaceDefault = "default" // TODO(sttts): solve import cycle when using metav1.NamespaceDefault ) // NewContext instantiates a base context object for request flows. @@ -77,7 +76,7 @@ func NewContext() Context { // NewDefaultContext instantiates a base context object for request flows in the default namespace func NewDefaultContext() Context { - return WithNamespace(NewContext(), namespaceDefault) + return WithNamespace(NewContext(), metav1.NamespaceDefault) } // WithValue returns a copy of parent in which the value associated with key is val. @@ -110,7 +109,7 @@ func NamespaceValue(ctx Context) string { func WithNamespaceDefaultIfNone(parent Context) Context { namespace, ok := NamespaceFrom(parent) if !ok || len(namespace) == 0 { - return WithNamespace(parent, namespaceDefault) + return WithNamespace(parent, metav1.NamespaceDefault) } return parent } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index bb0f0604d21..ddbbde3991d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -21,6 +21,7 @@ import ( "net/http" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" ) @@ -178,7 +179,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er } } } else { - requestInfo.Namespace = "" // TODO(sttts): solve import cycle when using metav1.NamespaceNone + requestInfo.Namespace = metav1.NamespaceNone } // parsing successful, so we now know the proper value for .Parts diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go index 8354cca06d7..9d5a4dbf9fa 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go @@ -21,6 +21,7 @@ import ( "reflect" "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" ) @@ -31,7 +32,7 @@ func (f fakeRL) TryAccept() bool { return bool(f) } func (f fakeRL) Accept() {} func TestGetAPIRequestInfo(t *testing.T) { - namespaceAll := "" // TODO(sttts): solve import cycle when using metav1.NamespaceAll + namespaceAll := metav1.NamespaceAll successCases := []struct { method string url string From 8b501cc364a446ab4b4f09c9a0b35e47720657bf Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 4 Jan 2018 12:08:28 +0800 Subject: [PATCH 590/794] update bazel --- staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD index 182f87ee25d..ff4914bb250 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -11,7 +11,10 @@ go_test( srcs = ["requestinfo_test.go"], embed = [":go_default_library"], importpath = "k8s.io/apiserver/pkg/endpoints/request", - deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) go_library( @@ -26,6 +29,7 @@ go_library( deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", From e29a45003868e429ac750053286f9579fde461b3 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Wed, 3 Jan 2018 22:07:32 -0800 Subject: [PATCH 591/794] Remove comments in get-kube.sh that imply support for environments that were removed long ago. --- cluster/get-kube.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cluster/get-kube.sh b/cluster/get-kube.sh index 39770b6d543..f0492d45ac6 100755 --- a/cluster/get-kube.sh +++ b/cluster/get-kube.sh @@ -24,14 +24,8 @@ # Set KUBERNETES_PROVIDER to choose between different providers: # Google Compute Engine [default] # * export KUBERNETES_PROVIDER=gce; wget -q -O - https://get.k8s.io | bash -# Google Container Engine -# * export KUBERNETES_PROVIDER=gke; wget -q -O - https://get.k8s.io | bash -# Amazon EC2 -# * export KUBERNETES_PROVIDER=aws; wget -q -O - https://get.k8s.io | bash # Libvirt (with CoreOS as a guest operating system) # * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash -# Microsoft Azure -# * export KUBERNETES_PROVIDER=azure-legacy; wget -q -O - https://get.k8s.io | bash # Vagrant (local virtual machines) # * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash # VMWare Photon Controller From 0a8948d8a9a579aabc980e23ab280d9919e2405c Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 4 Jan 2018 14:18:31 +0800 Subject: [PATCH 592/794] Update Azure GO SDK to v12.1.0 --- Godeps/Godeps.json | 30 ++++---- Godeps/LICENSES | 4 +- vendor/BUILD | 2 +- .../arm/compute/availabilitysets.go | 20 +++-- .../azure-sdk-for-go/arm/compute/client.go | 2 + .../arm/compute/containerservices.go | 14 +++- .../azure-sdk-for-go/arm/compute/disks.go | 17 ++++- .../azure-sdk-for-go/arm/compute/images.go | 14 +++- .../arm/compute/resourceskus.go | 4 +- .../azure-sdk-for-go/arm/compute/snapshots.go | 17 ++++- .../azure-sdk-for-go/arm/compute/usage.go | 4 +- .../azure-sdk-for-go/arm/compute/version.go | 4 +- .../compute/virtualmachineextensionimages.go | 12 ++- .../arm/compute/virtualmachineextensions.go | 6 +- .../arm/compute/virtualmachineimages.go | 20 +++-- .../arm/compute/virtualmachineruncommands.go | 8 +- .../arm/compute/virtualmachines.go | 35 +++++++-- .../virtualmachinescalesetextensions.go | 10 ++- .../virtualmachinescalesetrollingupgrades.go | 6 +- .../arm/compute/virtualmachinescalesets.go | 31 ++++++-- .../arm/compute/virtualmachinescalesetvms.go | 19 ++++- .../arm/compute/virtualmachinesizes.go | 4 +- .../arm/containerregistry/client.go | 2 +- .../arm/containerregistry/operations.go | 4 +- .../arm/containerregistry/registries.go | 31 ++++++-- .../arm/containerregistry/replications.go | 11 ++- .../arm/containerregistry/version.go | 4 +- .../arm/containerregistry/webhooks.go | 23 ++++-- .../Azure/azure-sdk-for-go/arm/disk/client.go | 2 + .../azure-sdk-for-go/arm/disk/version.go | 4 +- .../arm/network/applicationgateways.go | 33 ++++++-- .../arm/network/applicationsecuritygroups.go | 14 +++- .../arm/network/availableendpointservices.go | 4 +- .../arm/network/bgpservicecommunities.go | 4 +- .../azure-sdk-for-go/arm/network/client.go | 6 +- .../arm/network/defaultsecurityrules.go | 8 +- .../expressroutecircuitauthorizations.go | 10 ++- .../network/expressroutecircuitpeerings.go | 10 ++- .../arm/network/expressroutecircuits.go | 25 +++++-- .../network/expressrouteserviceproviders.go | 4 +- .../arm/network/inboundnatrules.go | 10 ++- .../arm/network/interfaceipconfigurations.go | 8 +- .../arm/network/interfaceloadbalancers.go | 4 +- .../arm/network/interfaces.go | 28 +++++-- .../loadbalancerbackendaddresspools.go | 8 +- .../loadbalancerfrontendipconfigurations.go | 8 +- .../network/loadbalancerloadbalancingrules.go | 8 +- .../network/loadbalancernetworkinterfaces.go | 4 +- .../arm/network/loadbalancerprobes.go | 8 +- .../arm/network/loadbalancers.go | 14 +++- .../arm/network/localnetworkgateways.go | 10 ++- .../arm/network/packetcaptures.go | 12 ++- .../arm/network/publicipaddresses.go | 26 +++++-- .../arm/network/routefilterrules.go | 11 ++- .../arm/network/routefilters.go | 15 +++- .../azure-sdk-for-go/arm/network/routes.go | 10 ++- .../arm/network/routetables.go | 14 +++- .../arm/network/securitygroups.go | 14 +++- .../arm/network/securityrules.go | 10 ++- .../azure-sdk-for-go/arm/network/subnets.go | 10 ++- .../azure-sdk-for-go/arm/network/usages.go | 4 +- .../azure-sdk-for-go/arm/network/version.go | 4 +- .../virtualnetworkgatewayconnections.go | 16 +++- .../arm/network/virtualnetworkgateways.go | 29 +++++-- .../arm/network/virtualnetworkpeerings.go | 10 ++- .../arm/network/virtualnetworks.go | 22 ++++-- .../azure-sdk-for-go/arm/network/watchers.go | 31 ++++++-- .../azure-sdk-for-go/arm/storage/accounts.go | 41 +++++++--- .../azure-sdk-for-go/arm/storage/client.go | 2 + .../arm/storage/operations.go | 4 +- .../azure-sdk-for-go/arm/storage/skus.go | 4 +- .../azure-sdk-for-go/arm/storage/usage.go | 4 +- .../azure-sdk-for-go/arm/storage/version.go | 4 +- .../Azure/azure-sdk-for-go/storage/BUILD | 4 +- .../Azure/azure-sdk-for-go/storage/README.md | 4 +- .../Azure/azure-sdk-for-go/storage/blob.go | 22 +----- .../storage/blobserviceclient.go | 66 ++++++++++++++-- .../Azure/azure-sdk-for-go/storage/client.go | 58 ++++++++++++-- .../azure-sdk-for-go/storage/container.go | 75 +++++++++++++++++++ .../Azure/azure-sdk-for-go/storage/entity.go | 2 +- .../azure-sdk-for-go/storage/pageblob.go | 6 +- .../azure-sdk-for-go/storage/table_batch.go | 2 +- .../Azure/azure-sdk-for-go/storage/util.go | 6 +- .../azure-sdk-for-go/storage/util_1.7.go | 26 ------- .../azure-sdk-for-go/storage/util_1.8.go | 32 -------- .../Azure/azure-sdk-for-go/storage/version.go | 2 +- .../satori/{uuid => go.uuid}/.travis.yml | 7 -- .../github.com/satori/{uuid => go.uuid}/BUILD | 2 +- .../satori/{uuid => go.uuid}/LICENSE | 0 .../satori/{uuid => go.uuid}/README.md | 0 .../satori/{uuid => go.uuid}/uuid.go | 17 +++-- 91 files changed, 904 insertions(+), 316 deletions(-) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go rename vendor/github.com/satori/{uuid => go.uuid}/.travis.yml (73%) rename vendor/github.com/satori/{uuid => go.uuid}/BUILD (90%) rename vendor/github.com/satori/{uuid => go.uuid}/LICENSE (100%) rename vendor/github.com/satori/{uuid => go.uuid}/README.md (100%) rename vendor/github.com/satori/{uuid => go.uuid}/uuid.go (97%) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index dc0ec589a51..2ddf4896e85 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -30,33 +30,33 @@ }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.1.0-beta", + "Rev": "934e2462aeb6e0c14186dcfeedd73a026d1b8eeb" }, { "ImportPath": "github.com/Azure/go-ansiterm", @@ -2435,9 +2435,9 @@ "Rev": "300106c228d52c8941d4b3de6054a6062a86dda3" }, { - "ImportPath": "github.com/satori/uuid", - "Comment": "v1.1.0-8-g5bf94b6", - "Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b" + "ImportPath": "github.com/satori/go.uuid", + "Comment": "v1.1.0", + "Rev": "879c5887cd475cd7864858769793b2ceb0d44feb" }, { "ImportPath": "github.com/seccomp/libseccomp-golang", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index ecc5c08027a..587fa18e32d 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -76983,7 +76983,7 @@ Blackfriday is distributed under the Simplified BSD License: ================================================================================ -= vendor/github.com/satori/uuid licensed under: = += vendor/github.com/satori/go.uuid licensed under: = Copyright (C) 2013-2016 by Maxim Bublis @@ -77006,7 +77006,7 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -= vendor/github.com/satori/uuid/LICENSE 02d5d17de0c82a23a09863acccc026f6 += vendor/github.com/satori/go.uuid/LICENSE 02d5d17de0c82a23a09863acccc026f6 ================================================================================ diff --git a/vendor/BUILD b/vendor/BUILD index 2f2776e4b3c..c8313075d4a 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -318,7 +318,7 @@ filegroup( "//vendor/github.com/robfig/cron:all-srcs", "//vendor/github.com/rubiojr/go-vhd/vhd:all-srcs", "//vendor/github.com/russross/blackfriday:all-srcs", - "//vendor/github.com/satori/uuid:all-srcs", + "//vendor/github.com/satori/go.uuid:all-srcs", "//vendor/github.com/seccomp/libseccomp-golang:all-srcs", "//vendor/github.com/shurcooL/sanitized_anchor_name:all-srcs", "//vendor/github.com/sirupsen/logrus:all-srcs", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go index bf1a8fc34e3..76c20408fb1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -90,7 +90,9 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName st // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -155,7 +157,9 @@ func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, av // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -220,7 +224,9 @@ func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, avail // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -284,7 +290,9 @@ func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -350,7 +358,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupNam // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go index 98b1c0dad64..93a44f2f20b 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -1,6 +1,8 @@ // Package compute implements the Azure ARM Compute service API version . // // Compute Client +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-03-30/compute package compute // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go index 98c1706eef7..a09f4c3357f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go @@ -144,6 +144,7 @@ func (client ContainerServicesClient) CreateOrUpdatePreparer(resourceGroupName s func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -229,6 +230,7 @@ func (client ContainerServicesClient) DeletePreparer(resourceGroupName string, c func (client ContainerServicesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -296,7 +298,9 @@ func (client ContainerServicesClient) GetPreparer(resourceGroupName string, cont // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -358,7 +362,9 @@ func (client ContainerServicesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -493,7 +499,9 @@ func (client ContainerServicesClient) ListByResourceGroupPreparer(resourceGroupN // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go index 47e9e4029ce..18efc73b229 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go @@ -131,6 +131,7 @@ func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskN func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -213,6 +214,7 @@ func (client DisksClient) DeletePreparer(resourceGroupName string, diskName stri func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -279,7 +281,9 @@ func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -372,6 +376,7 @@ func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -433,7 +438,9 @@ func (client DisksClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -566,7 +573,9 @@ func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -717,6 +726,7 @@ func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskNam func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -801,6 +811,7 @@ func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName stri func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go index bbde2bc7f95..6a5c06ad153 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go @@ -119,6 +119,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(resourceGroupName string, imag func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -200,6 +201,7 @@ func (client ImagesClient) DeletePreparer(resourceGroupName string, imageName st func (client ImagesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -269,7 +271,9 @@ func (client ImagesClient) GetPreparer(resourceGroupName string, imageName strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -331,7 +335,9 @@ func (client ImagesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -464,7 +470,9 @@ func (client ImagesClient) ListByResourceGroupPreparer(resourceGroupName string) // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go index 9dd7fffa856..c92374bfaa5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go @@ -83,7 +83,9 @@ func (client ResourceSkusClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go index 2c713529215..2c77c51ce8e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go @@ -131,6 +131,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, s func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -213,6 +214,7 @@ func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotN func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -279,7 +281,9 @@ func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -373,6 +377,7 @@ func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snap func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -434,7 +439,9 @@ func (client SnapshotsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -567,7 +574,9 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName stri // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -718,6 +727,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, sna func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -802,6 +812,7 @@ func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotN func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go index 86e9ebabf3b..ddf8917e3cb 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go @@ -94,7 +94,9 @@ func (client UsageClient) ListPreparer(location string) (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go index 2f9cac6f3bc..60188e9f1b2 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -19,10 +19,10 @@ package compute // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-compute/" + return "Azure-SDK-For-Go/v12.1.0-beta arm-compute/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go index c486f63ced5..2a9a4d28930 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -90,7 +90,9 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, p // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location str // ListTypesSender sends the ListTypes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListTypesResponder handles the response to the ListTypes request. The method always @@ -230,7 +234,9 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location // ListVersionsSender sends the ListVersions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVersionsResponder handles the response to the ListVersions request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go index e8f0b27f1b0..4b8af16c901 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -109,6 +109,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGrou func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName st func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -263,7 +265,9 @@ func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go index 9eda416e958..2e1e20b0442 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -91,7 +91,9 @@ func (client VirtualMachineImagesClient) GetPreparer(location string, publisherN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -168,7 +170,9 @@ func (client VirtualMachineImagesClient) ListPreparer(location string, publisher // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -233,7 +237,9 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(location string, pub // ListOffersSender sends the ListOffers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListOffersResponder handles the response to the ListOffers request. The method always @@ -297,7 +303,9 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) // ListPublishersSender sends the ListPublishers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListPublishersResponder handles the response to the ListPublishers request. The method always @@ -364,7 +372,9 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publi // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go index f422c92f58c..287566a6a98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go @@ -94,7 +94,9 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(location string, comma // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -164,7 +166,9 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(location string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go index ad1d829d14d..086463240a4 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -119,6 +119,7 @@ func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, VM func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -201,6 +202,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(resourceGroupN func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -310,6 +312,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName str func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -392,6 +395,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -473,6 +477,7 @@ func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, VMN func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -538,7 +543,9 @@ func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, // GeneralizeSender sends the Generalize request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GeneralizeResponder handles the response to the Generalize request. The method always @@ -607,7 +614,9 @@ func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, VMName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -672,7 +681,9 @@ func (client VirtualMachinesClient) InstanceViewPreparer(resourceGroupName strin // InstanceViewSender sends the InstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // InstanceViewResponder handles the response to the InstanceView request. The method always @@ -737,7 +748,9 @@ func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -868,7 +881,9 @@ func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1002,7 +1017,9 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -1084,6 +1101,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(resourceGroupName func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1167,6 +1185,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, V func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1249,6 +1268,7 @@ func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, V func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1330,6 +1350,7 @@ func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, VM func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1423,6 +1444,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(resourceGroupName string, func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1504,6 +1526,7 @@ func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, VMNa func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go index 9a198f148d2..882339202b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go @@ -110,6 +110,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(reso func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(resourceGrou func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -264,7 +266,9 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(resourceGroupNa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -330,7 +334,9 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(resourceGroupN // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go index 45cc6e5c51c..230b5da939c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go @@ -106,6 +106,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(resourc func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -171,7 +172,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(reso // GetLatestSender sends the GetLatest request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetLatestResponder handles the response to the GetLatest request. The method always @@ -254,6 +257,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go index 72a868579ac..64c5c6d7a17 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -133,6 +133,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroup func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -222,6 +223,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -303,6 +305,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName str func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -397,6 +400,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGrou func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -462,7 +466,9 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -527,7 +533,9 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGrou // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -591,7 +599,9 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName strin // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -723,7 +733,9 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, er // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -858,7 +870,9 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName s // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -1016,6 +1030,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName s func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1104,6 +1119,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName st func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1192,6 +1208,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(resourceGroupName func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1280,6 +1297,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName st func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1368,6 +1386,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName stri func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1452,6 +1471,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(resourceGroupName str func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1546,6 +1566,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGrou func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go index 5a15edae913..0be36c9ac63 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -107,6 +107,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupNa func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -191,6 +192,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName s func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -258,7 +260,9 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -325,7 +329,9 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGr // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -401,7 +407,9 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -555,6 +563,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -639,6 +648,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -723,6 +733,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(resourceGroupNa func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -807,6 +818,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -890,6 +902,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName st func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go index dc2f2778bea..0833578d5a6 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -93,7 +93,9 @@ func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Req // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go index b3eea6cb2f4..08485cae614 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go @@ -1,6 +1,6 @@ // Package containerregistry implements the Azure ARM Containerregistry service API version 2017-10-01. // -// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry package containerregistry // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go index a9143e73966..5a5aa26f42c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go @@ -79,7 +79,9 @@ func (client OperationsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go index c1c9af59e83..955b6ae1c43 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go @@ -100,7 +100,9 @@ func (client RegistriesClient) CheckNameAvailabilityPreparer(registryNameCheckRe // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -202,6 +204,7 @@ func (client RegistriesClient) CreatePreparer(resourceGroupName string, registry func (client RegistriesClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -295,6 +298,7 @@ func (client RegistriesClient) DeletePreparer(resourceGroupName string, registry func (client RegistriesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -368,7 +372,9 @@ func (client RegistriesClient) GetPreparer(resourceGroupName string, registryNam // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -429,7 +435,9 @@ func (client RegistriesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -562,7 +570,9 @@ func (client RegistriesClient) ListByResourceGroupPreparer(resourceGroupName str // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -705,7 +715,9 @@ func (client RegistriesClient) ListCredentialsPreparer(resourceGroupName string, // ListCredentialsSender sends the ListCredentials request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListCredentialsResponder handles the response to the ListCredentials request. The method always @@ -779,7 +791,9 @@ func (client RegistriesClient) ListUsagesPreparer(resourceGroupName string, regi // ListUsagesSender sends the ListUsages request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListUsagesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListUsagesResponder handles the response to the ListUsages request. The method always @@ -856,7 +870,9 @@ func (client RegistriesClient) RegenerateCredentialPreparer(resourceGroupName st // RegenerateCredentialSender sends the RegenerateCredential request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) RegenerateCredentialSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // RegenerateCredentialResponder handles the response to the RegenerateCredential request. The method always @@ -952,6 +968,7 @@ func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registry func (client RegistriesClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go index 79fce2d21ee..5b82038ef1d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go @@ -125,6 +125,7 @@ func (client ReplicationsClient) CreatePreparer(resourceGroupName string, regist func (client ReplicationsClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -224,6 +225,7 @@ func (client ReplicationsClient) DeletePreparer(resourceGroupName string, regist func (client ReplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -302,7 +304,9 @@ func (client ReplicationsClient) GetPreparer(resourceGroupName string, registryN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ReplicationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -376,7 +380,9 @@ func (client ReplicationsClient) ListPreparer(resourceGroupName string, registry // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ReplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -547,6 +553,7 @@ func (client ReplicationsClient) UpdatePreparer(resourceGroupName string, regist func (client ReplicationsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go index 6e3ee863831..b31ec726f21 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go @@ -19,10 +19,10 @@ package containerregistry // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.1.0-beta arm-containerregistry/2017-10-01" + return "Azure-SDK-For-Go/v12.1.0-beta arm-containerregistry/2017-10-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.1.0-beta" + return "v12.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go index ff5f9ce7844..e61ce83a973 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go @@ -131,6 +131,7 @@ func (client WebhooksClient) CreatePreparer(resourceGroupName string, registryNa func (client WebhooksClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -229,6 +230,7 @@ func (client WebhooksClient) DeletePreparer(resourceGroupName string, registryNa func (client WebhooksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -307,7 +309,9 @@ func (client WebhooksClient) GetPreparer(resourceGroupName string, registryName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -386,7 +390,9 @@ func (client WebhooksClient) GetCallbackConfigPreparer(resourceGroupName string, // GetCallbackConfigSender sends the GetCallbackConfig request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) GetCallbackConfigSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetCallbackConfigResponder handles the response to the GetCallbackConfig request. The method always @@ -460,7 +466,9 @@ func (client WebhooksClient) ListPreparer(resourceGroupName string, registryName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -608,7 +616,9 @@ func (client WebhooksClient) ListEventsPreparer(resourceGroupName string, regist // ListEventsSender sends the ListEvents request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) ListEventsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListEventsResponder handles the response to the ListEvents request. The method always @@ -756,7 +766,9 @@ func (client WebhooksClient) PingPreparer(resourceGroupName string, registryName // PingSender sends the Ping request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) PingSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // PingResponder handles the response to the Ping request. The method always @@ -858,6 +870,7 @@ func (client WebhooksClient) UpdatePreparer(resourceGroupName string, registryNa func (client WebhooksClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go index 8bab7acc132..54634794d67 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go @@ -2,6 +2,8 @@ // 2016-04-30-preview. // // The Disk Resource Provider Client. +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2016-04-30-preview/compute package disk // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go index 11c4a35ee9c..ac74fb4baf8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go @@ -19,10 +19,10 @@ package disk // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.2.0-beta arm-disk/2016-04-30-preview" + return "Azure-SDK-For-Go/v12.1.0-beta arm-disk/2016-04-30-preview" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.2.0-beta" + return "v12.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go index c000be117bc..0d1605ac297 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -109,6 +109,7 @@ func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -209,6 +210,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -290,6 +292,7 @@ func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -354,7 +357,9 @@ func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, ap // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -418,7 +423,9 @@ func (client ApplicationGatewaysClient) GetSslPredefinedPolicyPreparer(predefine // GetSslPredefinedPolicySender sends the GetSslPredefinedPolicy request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) GetSslPredefinedPolicySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetSslPredefinedPolicyResponder handles the response to the GetSslPredefinedPolicy request. The method always @@ -482,7 +489,9 @@ func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) ( // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -612,7 +621,9 @@ func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -742,7 +753,9 @@ func (client ApplicationGatewaysClient) ListAvailableSslOptionsPreparer() (*http // ListAvailableSslOptionsSender sends the ListAvailableSslOptions request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableSslOptionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSslOptionsResponder handles the response to the ListAvailableSslOptions request. The method always @@ -803,7 +816,9 @@ func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesPrepar // ListAvailableSslPredefinedPoliciesSender sends the ListAvailableSslPredefinedPolicies request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSslPredefinedPoliciesResponder handles the response to the ListAvailableSslPredefinedPolicies request. The method always @@ -933,7 +948,9 @@ func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsPreparer() (*htt // ListAvailableWafRuleSetsSender sends the ListAvailableWafRuleSets request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableWafRuleSetsResponder handles the response to the ListAvailableWafRuleSets request. The method always @@ -1014,6 +1031,7 @@ func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1095,6 +1113,7 @@ func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, a func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go index 1e84e706466..954b113d422 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go @@ -107,6 +107,7 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdatePreparer(resourceGro func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -190,6 +191,7 @@ func (client ApplicationSecurityGroupsClient) DeletePreparer(resourceGroupName s func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -255,7 +257,9 @@ func (client ApplicationSecurityGroupsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client ApplicationSecurityGroupsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -449,7 +455,9 @@ func (client ApplicationSecurityGroupsClient) ListAllPreparer() (*http.Request, // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go index 26bcaf8b647..a010418e9de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go @@ -86,7 +86,9 @@ func (client AvailableEndpointServicesClient) ListPreparer(location string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailableEndpointServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go index 10cfa2cdd55..5c4f4786a90 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go @@ -83,7 +83,9 @@ func (client BgpServiceCommunitiesClient) ListPreparer() (*http.Request, error) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client BgpServiceCommunitiesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go index a3576c393aa..ea0952c7ca9 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -1,6 +1,8 @@ // Package network implements the Azure ARM Network service API version . // // Network Client +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network package network // Copyright (c) Microsoft and contributors. All rights reserved. @@ -102,7 +104,9 @@ func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, // CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go index 9a3ded19d5c..1fd3e18d92a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go @@ -89,7 +89,9 @@ func (client DefaultSecurityRulesClient) GetPreparer(resourceGroupName string, n // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DefaultSecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client DefaultSecurityRulesClient) ListPreparer(resourceGroupName string, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DefaultSecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go index 9b7fd524e73..3ea822813dc 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -111,6 +111,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(res func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -195,6 +196,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGro func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -261,7 +263,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -326,7 +330,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroup // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go index af9fc67e039..c454167020d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -109,6 +109,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceG func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -259,7 +261,9 @@ func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName st // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -324,7 +328,9 @@ func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName s // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go index 63cfeff9e0a..8a497d166af 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -107,6 +107,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupNam func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -252,7 +254,9 @@ func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, c // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupNa // GetPeeringStatsSender sends the GetPeeringStats request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always @@ -384,7 +390,9 @@ func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName stri // GetStatsSender sends the GetStats request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetStatsResponder handles the response to the GetStats request. The method always @@ -448,7 +456,9 @@ func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -578,7 +588,9 @@ func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -732,6 +744,7 @@ func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -817,6 +830,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupNa func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -902,6 +916,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resource func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go index 5e39087b2a5..ccd76e9968d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -84,7 +84,9 @@ func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go index eab80ac5766..8a43aee1b91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go @@ -132,6 +132,7 @@ func (client InboundNatRulesClient) CreateOrUpdatePreparer(resourceGroupName str func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -216,6 +217,7 @@ func (client InboundNatRulesClient) DeletePreparer(resourceGroupName string, loa func (client InboundNatRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -285,7 +287,9 @@ func (client InboundNatRulesClient) GetPreparer(resourceGroupName string, loadBa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -350,7 +354,9 @@ func (client InboundNatRulesClient) ListPreparer(resourceGroupName string, loadB // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go index ca0359f0900..4885110b27f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go @@ -89,7 +89,9 @@ func (client InterfaceIPConfigurationsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InterfaceIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -154,7 +156,9 @@ func (client InterfaceIPConfigurationsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfaceIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go index c7b7d272f72..e9f00e491e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go @@ -87,7 +87,9 @@ func (client InterfaceLoadBalancersClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfaceLoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go index c2fdb3e901b..cfe3238c53a 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -107,6 +107,7 @@ func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkI func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInte // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -338,6 +342,7 @@ func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -410,7 +415,9 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer // GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always @@ -474,7 +481,9 @@ func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Req // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -604,7 +613,9 @@ func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -755,6 +766,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resour func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -821,7 +833,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPrepar // ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always @@ -958,7 +972,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPrep // ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go index de14c355608..e15b18fc583 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go @@ -90,7 +90,9 @@ func (client LoadBalancerBackendAddressPoolsClient) GetPreparer(resourceGroupNam // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerBackendAddressPoolsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client LoadBalancerBackendAddressPoolsClient) ListPreparer(resourceGroupNa // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerBackendAddressPoolsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go index 515b875a183..7ba6f72fb53 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go @@ -91,7 +91,9 @@ func (client LoadBalancerFrontendIPConfigurationsClient) GetPreparer(resourceGro // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerFrontendIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -156,7 +158,9 @@ func (client LoadBalancerFrontendIPConfigurationsClient) ListPreparer(resourceGr // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerFrontendIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go index 2ecd6be0d4f..4a4747ccda1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go @@ -90,7 +90,9 @@ func (client LoadBalancerLoadBalancingRulesClient) GetPreparer(resourceGroupName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerLoadBalancingRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client LoadBalancerLoadBalancingRulesClient) ListPreparer(resourceGroupNam // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerLoadBalancingRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go index 35650354255..def71135275 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go @@ -88,7 +88,9 @@ func (client LoadBalancerNetworkInterfacesClient) ListPreparer(resourceGroupName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerNetworkInterfacesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go index daee51e0f35..2b43d041b02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go @@ -89,7 +89,9 @@ func (client LoadBalancerProbesClient) GetPreparer(resourceGroupName string, loa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerProbesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -154,7 +156,9 @@ func (client LoadBalancerProbesClient) ListPreparer(resourceGroupName string, lo // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerProbesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go index 3a2528d85f4..980478937e8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -106,6 +106,7 @@ func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName strin func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -187,6 +188,7 @@ func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadB func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -255,7 +257,9 @@ func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBala // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http. // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -449,7 +455,9 @@ func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go index 64cdea20a72..5431d244efe 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -119,6 +119,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupNam func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -210,6 +211,7 @@ func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -281,7 +283,9 @@ func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, l // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -345,7 +349,9 @@ func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go index 1ad97a5e2aa..d089ca61dbb 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go @@ -122,6 +122,7 @@ func (client PacketCapturesClient) CreatePreparer(resourceGroupName string, netw func (client PacketCapturesClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -205,6 +206,7 @@ func (client PacketCapturesClient) DeletePreparer(resourceGroupName string, netw func (client PacketCapturesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client PacketCapturesClient) GetPreparer(resourceGroupName string, network // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PacketCapturesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -355,6 +359,7 @@ func (client PacketCapturesClient) GetStatusPreparer(resourceGroupName string, n func (client PacketCapturesClient) GetStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -420,7 +425,9 @@ func (client PacketCapturesClient) ListPreparer(resourceGroupName string, networ // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client PacketCapturesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -503,6 +510,7 @@ func (client PacketCapturesClient) StopPreparer(resourceGroupName string, networ func (client PacketCapturesClient) StopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go index af7e7ec85b6..63733fbd482 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -122,6 +122,7 @@ func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName s func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -203,6 +204,7 @@ func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, p func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publ // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -346,7 +350,9 @@ func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressPr // GetVirtualMachineScaleSetPublicIPAddressSender sends the GetVirtualMachineScaleSetPublicIPAddress request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetVirtualMachineScaleSetPublicIPAddressResponder handles the response to the GetVirtualMachineScaleSetPublicIPAddress request. The method always @@ -410,7 +416,9 @@ func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -540,7 +548,9 @@ func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -676,7 +686,9 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddresse // ListVirtualMachineScaleSetPublicIPAddressesSender sends the ListVirtualMachineScaleSetPublicIPAddresses request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetPublicIPAddresses request. The method always @@ -816,7 +828,9 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddres // ListVirtualMachineScaleSetVMPublicIPAddressesSender sends the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetVMPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go index 7c0ab17904c..b3dc3883bec 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go @@ -122,6 +122,7 @@ func (client RouteFilterRulesClient) CreateOrUpdatePreparer(resourceGroupName st func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -206,6 +207,7 @@ func (client RouteFilterRulesClient) DeletePreparer(resourceGroupName string, ro func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -272,7 +274,9 @@ func (client RouteFilterRulesClient) GetPreparer(resourceGroupName string, route // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteFilterRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -337,7 +341,9 @@ func (client RouteFilterRulesClient) ListByRouteFilterPreparer(resourceGroupName // ListByRouteFilterSender sends the ListByRouteFilter request. The method will close the // http.Response Body if it receives an error. func (client RouteFilterRulesClient) ListByRouteFilterSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByRouteFilterResponder handles the response to the ListByRouteFilter request. The method always @@ -493,6 +499,7 @@ func (client RouteFilterRulesClient) UpdatePreparer(resourceGroupName string, ro func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go index 711e58fc9c9..0d35b6d9c06 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go @@ -107,6 +107,7 @@ func (client RouteFiltersClient) CreateOrUpdatePreparer(resourceGroupName string func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client RouteFiltersClient) DeletePreparer(resourceGroupName string, routeF func (client RouteFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client RouteFiltersClient) GetPreparer(resourceGroupName string, routeFilt // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -317,7 +321,9 @@ func (client RouteFiltersClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -450,7 +456,9 @@ func (client RouteFiltersClient) ListByResourceGroupPreparer(resourceGroupName s // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -604,6 +612,7 @@ func (client RouteFiltersClient) UpdatePreparer(resourceGroupName string, routeF func (client RouteFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go index 0dcde6fa01d..48d11f3afce 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -108,6 +108,7 @@ func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, rout func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableNa func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -258,7 +260,9 @@ func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -323,7 +327,9 @@ func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go index e9b557bc0b3..3a2f4ca69e6 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -107,6 +107,7 @@ func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTa func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTable // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -320,7 +324,9 @@ func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Re // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -450,7 +456,9 @@ func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go index b1a5ae4a6e2..3606e6d82c8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -107,6 +107,7 @@ func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName stri func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -189,6 +190,7 @@ func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, netw func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -257,7 +259,9 @@ func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, network // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -321,7 +325,9 @@ func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -451,7 +457,9 @@ func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go index 9c76ef54e8b..6fcecae5f9c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -122,6 +122,7 @@ func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName strin func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -205,6 +206,7 @@ func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, netwo func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkS // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -337,7 +341,9 @@ func (client SecurityRulesClient) ListPreparer(resourceGroupName string, network // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go index 62a1789e591..298cb989ffa 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -109,6 +109,7 @@ func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, vir func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetw func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -261,7 +263,9 @@ func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetwork // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -326,7 +330,9 @@ func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetwor // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go index 85c22006540..ab2c9b8fcb3 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -93,7 +93,9 @@ func (client UsagesClient) ListPreparer(location string) (*http.Request, error) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go index 7f510b4e4c6..7d4ca0abc1c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -19,10 +19,10 @@ package network // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-network/" + return "Azure-SDK-For-Go/v12.1.0-beta arm-network/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go index 7db4d5de6c3..faac6f8a679 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -126,6 +126,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(reso func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -209,6 +210,7 @@ func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGrou func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -274,7 +276,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupNa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -341,7 +345,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resour // GetSharedKeySender sends the GetSharedKey request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetSharedKeyResponder handles the response to the GetSharedKey request. The method always @@ -406,7 +412,9 @@ func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupN // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -574,6 +582,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(reso func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -670,6 +679,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resour func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go index 5f56a204bad..6edd19644c5 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -117,6 +117,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupN func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -200,6 +201,7 @@ func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName stri func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -284,6 +286,7 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(reso func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -370,6 +373,7 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfilePreparer(resourceGr func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -436,7 +440,9 @@ func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -520,6 +526,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesPreparer(resourceG func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -606,6 +613,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusPreparer(resourceGrou func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -689,6 +697,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesPreparer(resourceGrou func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -773,6 +782,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLPreparer(resou func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -837,7 +847,9 @@ func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -972,7 +984,9 @@ func (client VirtualNetworkGatewaysClient) ListConnectionsPreparer(resourceGroup // ListConnectionsSender sends the ListConnections request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ListConnectionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListConnectionsResponder handles the response to the ListConnections request. The method always @@ -1128,6 +1142,7 @@ func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName strin func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1194,7 +1209,9 @@ func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesPreparer(resourceG // SupportedVpnDevicesSender sends the SupportedVpnDevices request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // SupportedVpnDevicesResponder handles the response to the SupportedVpnDevices request. The method always @@ -1263,7 +1280,9 @@ func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptPreparer( // VpnDeviceConfigurationScriptSender sends the VpnDeviceConfigurationScript request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // VpnDeviceConfigurationScriptResponder handles the response to the VpnDeviceConfigurationScript request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go index 0a945c1bb8d..30bed99c02c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -109,6 +109,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupN func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName stri func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -259,7 +261,9 @@ func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -324,7 +328,9 @@ func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go index 58ac8ab1122..47c71480661 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -91,7 +91,9 @@ func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceG // CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always @@ -176,6 +178,7 @@ func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName str func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -257,6 +260,7 @@ func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, vir func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -325,7 +329,9 @@ func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtua // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -389,7 +395,9 @@ func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -519,7 +527,9 @@ func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -653,7 +663,9 @@ func (client VirtualNetworksClient) ListUsagePreparer(resourceGroupName string, // ListUsageSender sends the ListUsage request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListUsageSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListUsageResponder handles the response to the ListUsage request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go index 798d8c1bb22..b075417ddbe 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go @@ -120,6 +120,7 @@ func (client WatchersClient) CheckConnectivityPreparer(resourceGroupName string, func (client WatchersClient) CheckConnectivitySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,7 +189,9 @@ func (client WatchersClient) CreateOrUpdatePreparer(resourceGroupName string, ne // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -270,6 +273,7 @@ func (client WatchersClient) DeletePreparer(resourceGroupName string, networkWat func (client WatchersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -334,7 +338,9 @@ func (client WatchersClient) GetPreparer(resourceGroupName string, networkWatche // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -431,6 +437,7 @@ func (client WatchersClient) GetAzureReachabilityReportPreparer(resourceGroupNam func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -525,6 +532,7 @@ func (client WatchersClient) GetFlowLogStatusPreparer(resourceGroupName string, func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -620,6 +628,7 @@ func (client WatchersClient) GetNextHopPreparer(resourceGroupName string, networ func (client WatchersClient) GetNextHopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -694,7 +703,9 @@ func (client WatchersClient) GetTopologyPreparer(resourceGroupName string, netwo // GetTopologySender sends the GetTopology request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) GetTopologySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetTopologyResponder handles the response to the GetTopology request. The method always @@ -792,6 +803,7 @@ func (client WatchersClient) GetTroubleshootingPreparer(resourceGroupName string func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -886,6 +898,7 @@ func (client WatchersClient) GetTroubleshootingResultPreparer(resourceGroupName func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -980,6 +993,7 @@ func (client WatchersClient) GetVMSecurityRulesPreparer(resourceGroupName string func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1044,7 +1058,9 @@ func (client WatchersClient) ListPreparer(resourceGroupName string) (*http.Reque // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -1105,7 +1121,9 @@ func (client WatchersClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1190,6 +1208,7 @@ func (client WatchersClient) ListAvailableProvidersPreparer(resourceGroupName st func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1288,6 +1307,7 @@ func (client WatchersClient) SetFlowLogConfigurationPreparer(resourceGroupName s func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1386,6 +1406,7 @@ func (client WatchersClient) VerifyIPFlowPreparer(resourceGroupName string, netw func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go index 0870a03ded6..dfa59f95783 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -96,7 +96,9 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCh // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -208,6 +210,7 @@ func (client AccountsClient) CreatePreparer(resourceGroupName string, accountNam func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -286,7 +289,9 @@ func (client AccountsClient) DeletePreparer(resourceGroupName string, accountNam // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -364,7 +369,9 @@ func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, acc // GetPropertiesSender sends the GetProperties request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetPropertiesResponder handles the response to the GetProperties request. The method always @@ -426,7 +433,9 @@ func (client AccountsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -509,7 +518,9 @@ func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, ac // ListAccountSASSender sends the ListAccountSAS request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAccountSASResponder handles the response to the ListAccountSAS request. The method always @@ -582,7 +593,9 @@ func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName strin // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -660,7 +673,9 @@ func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountN // ListKeysSender sends the ListKeys request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListKeysResponder handles the response to the ListKeys request. The method always @@ -745,7 +760,9 @@ func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, ac // ListServiceSASSender sends the ListServiceSAS request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListServiceSASResponder handles the response to the ListServiceSAS request. The method always @@ -828,7 +845,9 @@ func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, acc // RegenerateKeySender sends the RegenerateKey request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // RegenerateKeyResponder handles the response to the RegenerateKey request. The method always @@ -914,7 +933,9 @@ func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountNam // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go index 133386ddbeb..1c54682152d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -1,6 +1,8 @@ // Package storage implements the Azure ARM Storage service API version 2017-06-01. // // The Azure Storage Management API. +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-06-01/storage package storage // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go index cc46c699792..ca46f8e1360 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go @@ -79,7 +79,9 @@ func (client OperationsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go index 94d4d6f83ec..cfad757fb22 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go @@ -83,7 +83,9 @@ func (client SkusClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go index 682e5c16c36..933c5d9617e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go @@ -83,7 +83,9 @@ func (client UsageClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go index 467102d5973..95733c048c1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -19,10 +19,10 @@ package storage // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-storage/2017-06-01" + return "Azure-SDK-For-Go/v12.1.0-beta arm-storage/2017-06-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/BUILD b/vendor/github.com/Azure/azure-sdk-for-go/storage/BUILD index 3e01e59510b..42f492a4869 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/BUILD +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/BUILD @@ -31,8 +31,6 @@ go_library( "table_batch.go", "tableserviceclient.go", "util.go", - "util_1.7.go", - "util_1.8.go", "version.go", ], importpath = "github.com/Azure/azure-sdk-for-go/storage", @@ -40,7 +38,7 @@ go_library( deps = [ "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//vendor/github.com/satori/uuid:go_default_library", + "//vendor/github.com/satori/go.uuid:go_default_library", ], ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md index 6dc348e02af..85a0482d6ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -6,7 +6,7 @@ This package includes support for [Azure Storage Emulator](https://azure.microso # Getting Started - 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` + 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for-go/storage` 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. - This option is production ready, but can also be used for development. @@ -70,4 +70,4 @@ ok, err = queue2.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, true) } -``` \ No newline at end of file +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index a9d3cfccb68..5047bfbb24b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -549,27 +549,7 @@ func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { } func (b *Blob) writeMetadata(h http.Header) { - metadata := make(map[string]string) - for k, v := range h { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - b.Metadata = BlobMetadata(metadata) + b.Metadata = BlobMetadata(writeMetadata(h)) } // DeleteBlobOptions includes the options for a delete blob operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index 8fe21b0cfd9..e6b9704ee18 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -15,6 +15,7 @@ package storage // limitations under the License. import ( + "encoding/xml" "fmt" "net/http" "net/url" @@ -85,21 +86,53 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con uri := b.client.getEndpoint(blobServiceName, "", q) headers := b.client.getStandardHeaders() - var out ContainerListResponse + type ContainerAlias struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata BlobMetadata + sasuri url.URL + } + type ContainerListResponseAlias struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []ContainerAlias `xml:"Containers>Container"` + } + + var outAlias ContainerListResponseAlias resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return nil, err } defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.body, &outAlias) if err != nil { return nil, err } - // assign our client to the newly created Container objects - for i := range out.Containers { - out.Containers[i].bsc = &b + out := ContainerListResponse{ + XMLName: outAlias.XMLName, + Xmlns: outAlias.Xmlns, + Prefix: outAlias.Prefix, + Marker: outAlias.Marker, + NextMarker: outAlias.NextMarker, + MaxResults: outAlias.MaxResults, + Containers: make([]Container, len(outAlias.Containers)), } + for i, cnt := range outAlias.Containers { + out.Containers[i] = Container{ + bsc: &b, + Name: cnt.Name, + Properties: cnt.Properties, + Metadata: map[string]string(cnt.Metadata), + sasuri: cnt.sasuri, + } + } + return &out, err } @@ -124,3 +157,26 @@ func (p ListContainersParameters) getParameters() url.Values { return out } + +func writeMetadata(h http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range h { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 8f6cd95da71..a9ae9d11fc3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -31,6 +31,7 @@ import ( "net/url" "regexp" "runtime" + "strconv" "strings" "time" @@ -69,6 +70,11 @@ const ( userAgentHeader = "User-Agent" userDefinedMetadataHeaderPrefix = "x-ms-meta-" + + connectionStringAccountName = "accountname" + connectionStringAccountKey = "accountkey" + connectionStringEndpointSuffix = "endpointsuffix" + connectionStringEndpointProtocol = "defaultendpointsprotocol" ) var ( @@ -204,6 +210,45 @@ func (e UnexpectedStatusCodeError) Got() int { return e.got } +// NewClientFromConnectionString creates a Client from the connection string. +func NewClientFromConnectionString(input string) (Client, error) { + var ( + accountName, accountKey, endpointSuffix string + useHTTPS = defaultUseHTTPS + ) + + for _, pair := range strings.Split(input, ";") { + if pair == "" { + continue + } + + equalDex := strings.IndexByte(pair, '=') + if equalDex <= 0 { + return Client{}, fmt.Errorf("Invalid connection segment %q", pair) + } + + value := pair[equalDex+1:] + key := strings.ToLower(pair[:equalDex]) + switch key { + case connectionStringAccountName: + accountName = value + case connectionStringAccountKey: + accountKey = value + case connectionStringEndpointSuffix: + endpointSuffix = value + case connectionStringEndpointProtocol: + useHTTPS = value == "https" + default: + // ignored + } + } + + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, endpointSuffix, DefaultAPIVersion, useHTTPS) +} + // NewBasicClient constructs a Client with given storage service name and // key. func NewBasicClient(accountName, accountKey string) (Client, error) { @@ -613,12 +658,13 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } - // if a body was provided ensure that the content length was set. - // http.NewRequest() will automatically do this for a handful of types - // and for those that it doesn't we will handle here. - if body != nil && req.ContentLength < 1 { - if lr, ok := body.(*io.LimitedReader); ok { - setContentLengthFromLimitedReader(req, lr) + // http.NewRequest() will automatically set req.ContentLength for a handful of types + // otherwise we will handle here. + if req.ContentLength < 1 { + if clstr, ok := headers["Content-Length"]; ok { + if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { + req.ContentLength = cl + } } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index 8963c7a89b3..9f23248836f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -512,6 +512,81 @@ func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, err return out, err } +// ContainerMetadataOptions includes options for container metadata operations +type ContainerMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified container. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata +func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetMetadata returns all user-defined metadata for the specified container. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata +func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + c.writeMetadata(resp.headers) + return nil +} + +func (c *Container) writeMetadata(h http.Header) { + c.Metadata = writeMetadata(h) +} + func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { sil := SignedIdentifiers{ SignedIdentifiers: []SignedIdentifier{}, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go index 9668ea66949..4533d7d5edf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/satori/uuid" + "github.com/satori/go.uuid" ) // Annotating as secure for gas scanning diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go index c59fd4b50b9..f0716652169 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -87,10 +87,10 @@ func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPag return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") } if blobRange.Start%512 != 0 { - return errors.New("the value for rangeStart must be a modulus of 512") + return errors.New("the value for rangeStart must be a multiple of 512") } if blobRange.End%512 != 511 { - return errors.New("the value for rangeEnd must be a modulus of 511") + return errors.New("the value for rangeEnd must be a multiple of 512 - 1") } params := url.Values{"comp": {"page"}} @@ -147,7 +147,7 @@ func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesRespon params = addTimeout(params, options.Timeout) params = addSnapshot(params, options.Snapshot) if options.PreviousSnapshot != nil { - params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) + params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) } if options.Range != nil { headers["Range"] = options.Range.String() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go index 3f882417c65..155de0f274b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -26,7 +26,7 @@ import ( "sort" "strings" - "github.com/satori/uuid" + "github.com/satori/go.uuid" ) // Operation type. Insert, Delete, Replace etc. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 7734b8f886f..089a74a8cc6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -71,6 +71,10 @@ func timeRfc1123Formatted(t time.Time) string { return t.Format(http.TimeFormat) } +func timeRFC3339Formatted(t time.Time) string { + return t.Format("2006-01-02T15:04:05.0000000Z") +} + func mergeParams(v1, v2 url.Values) url.Values { out := url.Values{} for k, v := range v1 { @@ -172,7 +176,7 @@ func addTimeout(params url.Values, timeout uint) url.Values { func addSnapshot(params url.Values, snapshot *time.Time) url.Values { if snapshot != nil { - params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) + params.Add("snapshot", timeRFC3339Formatted(*snapshot)) } return params } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go deleted file mode 100644 index 67ff6ca03fe..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go deleted file mode 100644 index eada102c0cf..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "io/ioutil" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N - snapshot := *lr - req.GetBody = func() (io.ReadCloser, error) { - r := snapshot - return ioutil.NopCloser(&r), nil - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go index 1cd3e03d12a..cf2c7bdd5bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -15,5 +15,5 @@ package storage // limitations under the License. var ( - sdkVersion = "10.0.2" + sdkVersion = "v12.1.0-beta" ) diff --git a/vendor/github.com/satori/uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml similarity index 73% rename from vendor/github.com/satori/uuid/.travis.yml rename to vendor/github.com/satori/go.uuid/.travis.yml index fdf960e86b5..38517e2ed90 100644 --- a/vendor/github.com/satori/uuid/.travis.yml +++ b/vendor/github.com/satori/go.uuid/.travis.yml @@ -6,13 +6,6 @@ go: - 1.4 - 1.5 - 1.6 - - 1.7 - - 1.8 - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true before_install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/satori/uuid/BUILD b/vendor/github.com/satori/go.uuid/BUILD similarity index 90% rename from vendor/github.com/satori/uuid/BUILD rename to vendor/github.com/satori/go.uuid/BUILD index 9dd4cfea1ad..98cc6415394 100644 --- a/vendor/github.com/satori/uuid/BUILD +++ b/vendor/github.com/satori/go.uuid/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["uuid.go"], - importpath = "github.com/satori/uuid", + importpath = "github.com/satori/go.uuid", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/satori/uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE similarity index 100% rename from vendor/github.com/satori/uuid/LICENSE rename to vendor/github.com/satori/go.uuid/LICENSE diff --git a/vendor/github.com/satori/uuid/README.md b/vendor/github.com/satori/go.uuid/README.md similarity index 100% rename from vendor/github.com/satori/uuid/README.md rename to vendor/github.com/satori/go.uuid/README.md diff --git a/vendor/github.com/satori/uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go similarity index 97% rename from vendor/github.com/satori/uuid/uuid.go rename to vendor/github.com/satori/go.uuid/uuid.go index 295f3fc2c57..9c7fbaa54e6 100644 --- a/vendor/github.com/satori/uuid/uuid.go +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -251,12 +251,18 @@ func (u *UUID) UnmarshalText(text []byte) (err error) { b := u[:] for i, byteGroup := range byteGroups { - if i > 0 { - if t[0] != '-' { - err = fmt.Errorf("uuid: invalid string format") + if i > 0 && t[0] == '-' { + t = t[1:] + } else if i > 0 && t[0] != '-' { + err = fmt.Errorf("uuid: invalid string format") + return + } + + if i == 2 { + if !bytes.Contains([]byte("012345"), []byte{t[0]}) { + err = fmt.Errorf("uuid: invalid version number: %s", t[0]) return } - t = t[1:] } if len(t) < byteGroup { @@ -266,11 +272,12 @@ func (u *UUID) UnmarshalText(text []byte) (err error) { if i == 4 && len(t) > byteGroup && ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { - err = fmt.Errorf("uuid: UUID string too long: %s", text) + err = fmt.Errorf("uuid: UUID string too long: %s", t) return } _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) + if err != nil { return } From 86dc79373be2053f351ba1a79a579af8d3e69be5 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 4 Jan 2018 14:19:03 +0800 Subject: [PATCH 593/794] Add workaround for removing VMSS reference from LB --- .../providers/azure/azure_util_vmss.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 2c8630c2c83..c9b827c308e 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -939,5 +939,24 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { return err } + // Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB. + // TODO: remove this workaround when figuring out the root cause. + if len(newBackendPools) == 0 { + glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName) + ss.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) + respChan, errChan = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) + resp = <-respChan + err = <-errChan + glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + if ss.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + if retryErr != nil { + glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName) + } + } + } + return nil } From 91dc55562c9e7742a1cfdaa6d1db0b1ce262ff95 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 4 Jan 2018 15:39:18 +0800 Subject: [PATCH 594/794] fix possible panic --- .../src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index 7c179ab7cf3..d1e90acff77 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -207,7 +207,7 @@ func (r *proxyHandler) updateAPIService(apiService *apiregistrationapi.APIServic serviceNamespace: apiService.Spec.Service.Namespace, } newInfo.proxyRoundTripper, newInfo.transportBuildingError = restclient.TransportFor(newInfo.restConfig) - if newInfo.transportBuildingError == nil && r.proxyTransport.Dial != nil { + if newInfo.transportBuildingError == nil && r.proxyTransport != nil && r.proxyTransport.Dial != nil { switch transport := newInfo.proxyRoundTripper.(type) { case *http.Transport: transport.Dial = r.proxyTransport.Dial From 94d75929b68a6c65ab4ab7b28df3e356e06a2a79 Mon Sep 17 00:00:00 2001 From: Gavin Date: Wed, 3 Jan 2018 15:22:32 +0800 Subject: [PATCH 595/794] refactor function CalculateAntiAffinityPriority by using map/reduce pattern --- .../algorithm/priorities/metadata.go | 37 ++++-- .../priorities/selector_spreading.go | 105 +++++++++++------- .../priorities/selector_spreading_test.go | 24 +++- plugin/pkg/scheduler/factory/plugins.go | 2 +- 4 files changed, 111 insertions(+), 57 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/plugin/pkg/scheduler/algorithm/priorities/metadata.go index fb561241798..3a4d7831182 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/plugin/pkg/scheduler/algorithm/priorities/metadata.go @@ -44,11 +44,12 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle // priorityMetadata is a type that is passed as metadata for priority functions type priorityMetadata struct { - nonZeroRequest *schedulercache.Resource - podTolerations []v1.Toleration - affinity *v1.Affinity - podSelectors []labels.Selector - controllerRef *metav1.OwnerReference + nonZeroRequest *schedulercache.Resource + podTolerations []v1.Toleration + affinity *v1.Affinity + podSelectors []labels.Selector + controllerRef *metav1.OwnerReference + podFirstServiceSelector labels.Selector } // PriorityMetadata is a MetadataProducer. Node info can be nil. @@ -57,30 +58,40 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo if pod == nil { return nil } - tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations) - podSelectors := getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister) return &priorityMetadata{ - nonZeroRequest: getNonZeroRequests(pod), - podTolerations: tolerationsPreferNoSchedule, - affinity: pod.Spec.Affinity, - podSelectors: podSelectors, - controllerRef: priorityutil.GetControllerRef(pod), + nonZeroRequest: getNonZeroRequests(pod), + podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations), + affinity: pod.Spec.Affinity, + podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister), + controllerRef: priorityutil.GetControllerRef(pod), + podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister), } } +// getFirstServiceSelector returns one selector of services the given pod. +func getFirstServiceSelector(pod *v1.Pod, sl algorithm.ServiceLister) (firstServiceSelector labels.Selector) { + if services, err := sl.GetPodServices(pod); err == nil && len(services) > 0 { + return labels.SelectorFromSet(services[0].Spec.Selector) + } + return nil +} + // getSelectors returns selectors of services, RCs and RSs matching the given pod. func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector { var selectors []labels.Selector + if services, err := sl.GetPodServices(pod); err == nil { for _, service := range services { selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) } } + if rcs, err := cl.GetPodControllers(pod); err == nil { for _, rc := range rcs { selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector)) } } + if rss, err := rsl.GetPodReplicaSets(pod); err == nil { for _, rs := range rss { if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { @@ -88,6 +99,7 @@ func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.Controll } } } + if sss, err := ssl.GetPodStatefulSets(pod); err == nil { for _, ss := range sss { if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { @@ -95,5 +107,6 @@ func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.Controll } } } + return selectors } diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index 7258d52ea7b..53c5c3719ed 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -177,13 +177,13 @@ type ServiceAntiAffinity struct { label string } -func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) algorithm.PriorityFunction { +func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) { antiAffinity := &ServiceAntiAffinity{ podLister: podLister, serviceLister: serviceLister, label: label, } - return antiAffinity.CalculateAntiAffinityPriority + return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce } // Classifies nodes into ones with labels and without labels. @@ -201,52 +201,79 @@ func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (m return labeledNodes, nonLabeledNodes } -// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service -// on machines with the same value for a particular label. -// The label to be considered is provided to the struct (ServiceAntiAffinity). -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { - var nsServicePods []*v1.Pod - if services, err := s.serviceLister.GetPodServices(pod); err == nil && len(services) > 0 { - // just use the first service and get the other pods within the service - // TODO: a separate predicate can be created that tries to handle all services for the pod - selector := labels.SelectorFromSet(services[0].Spec.Selector) - pods, err := s.podLister.List(selector) - if err != nil { - return nil, err - } - // consider only the pods that belong to the same namespace - for _, nsPod := range pods { - if nsPod.Namespace == pod.Namespace { - nsServicePods = append(nsServicePods, nsPod) - } +// filteredPod get pods based on namespace and selector +func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulercache.NodeInfo) (pods []*v1.Pod) { + if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil { + return []*v1.Pod{} + } + for _, pod := range nodeInfo.Pods() { + if namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { + pods = append(pods, pod) } } + return +} - // separate out the nodes that have the label from the ones that don't - labeledNodes, nonLabeledNodes := s.getNodeClassificationByLabels(nodes) +// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service +// on given machine +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + var firstServiceSelector labels.Selector + + node := nodeInfo.Node() + if node == nil { + return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + } + priorityMeta, ok := meta.(*priorityMetadata) + if ok { + firstServiceSelector = priorityMeta.podFirstServiceSelector + } else { + firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister) + } + //pods matched namespace,selector on current node + matchedPodsOfNode := filteredPod(pod.Namespace, firstServiceSelector, nodeInfo) + + return schedulerapi.HostPriority{ + Host: node.Name, + Score: int(len(matchedPodsOfNode)), + }, nil +} + +// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. +// The label to be considered is provided to the struct (ServiceAntiAffinity). +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { + var numServicePods int + var label string podCounts := map[string]int{} - for _, pod := range nsServicePods { - label, exists := labeledNodes[pod.Spec.NodeName] - if !exists { + labelNodesStatus := map[string]string{} + maxPriorityFloat64 := float64(schedulerapi.MaxPriority) + + for _, hostPriority := range result { + numServicePods += hostPriority.Score + if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) { continue } - podCounts[label]++ + label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label) + labelNodesStatus[hostPriority.Host] = label + podCounts[label] += hostPriority.Score } - numServicePods := len(nsServicePods) - result := []schedulerapi.HostPriority{} + //score int - scale of 0-maxPriority // 0 being the lowest priority and maxPriority being the highest - for node := range labeledNodes { - // initializing to the default/max node score of maxPriority - fScore := float64(schedulerapi.MaxPriority) - if numServicePods > 0 { - fScore = float64(schedulerapi.MaxPriority) * (float64(numServicePods-podCounts[labeledNodes[node]]) / float64(numServicePods)) + for i, hostPriority := range result { + label, ok := labelNodesStatus[hostPriority.Host] + if !ok { + result[i].Host = hostPriority.Host + result[i].Score = int(0) + continue } - result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)}) + // initializing to the default/max node score of maxPriority + fScore := maxPriorityFloat64 + if numServicePods > 0 { + fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods)) + } + result[i].Host = hostPriority.Host + result[i].Score = int(fScore) } - // add the open nodes with a score of 0 - for _, node := range nonLabeledNodes { - result = append(result, schedulerapi.HostPriority{Host: node, Score: 0}) - } - return result, nil + + return nil } diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index d3cb19cb635..85e547dae4a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -757,19 +757,33 @@ func TestZoneSpreadPriority(t *testing.T) { test: "service pod on non-zoned node", }, } + // these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil + // when construct mataDataProducer + sss := []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}} + rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}} + rss := []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}} - for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nil) + for i, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"} - list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes)) + + mataDataProducer := NewPriorityMetadataFactory( + schedulertesting.FakeServiceLister(test.services), + schedulertesting.FakeControllerLister(rcs), + schedulertesting.FakeReplicaSetLister(rss), + schedulertesting.FakeStatefulSetLister(sss)) + mataData := mataDataProducer(test.pod, nodeNameToInfo) + ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData) + list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes)) if err != nil { - t.Errorf("unexpected error: %v", err) + t.Errorf("unexpected error: %v index : %d", err, i) } + // sort the two lists to avoid failures on account of different ordering sort.Sort(test.expectedList) sort.Sort(list) if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) + t.Errorf("test index %d (%s): expected %#v, got %#v", i, test.test, test.expectedList, list) } } } diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index 6c7a7ab7d5f..3bae125c351 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -305,7 +305,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { if policy.Argument != nil { if policy.Argument.ServiceAntiAffinity != nil { pcf = &PriorityConfigFactory{ - Function: func(args PluginFactoryArgs) algorithm.PriorityFunction { + MapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) { return priorities.NewServiceAntiAffinityPriority( args.PodLister, args.ServiceLister, From 2f9532f047034d607ec44217e5baaeaa3b604a6d Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 2 Jan 2018 12:17:29 +0100 Subject: [PATCH 596/794] Allow kubectl set image/env on a cronjob --- pkg/kubectl/cmd/util/BUILD | 1 + pkg/kubectl/cmd/util/factory_client_access.go | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 63406eaf30d..282666a1be4 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -53,6 +53,7 @@ go_library( "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index a0e8b042752..d03ebdc165c 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -38,6 +38,7 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -275,6 +276,11 @@ func (f *ring0Factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*v1.Po // Job case *batchv1.Job: return true, fn(&t.Spec.Template.Spec) + // CronJob + case *batchv1beta1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) + case *batchv2alpha1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) default: return false, fmt.Errorf("the object is not a pod or does not have a pod template") } From 083671dfa04d1a33c7903d629aefe6858abf508d Mon Sep 17 00:00:00 2001 From: Kai Chen Date: Thu, 4 Jan 2018 01:29:46 -0800 Subject: [PATCH 597/794] Minor commenting fixes for Azure Disk Controllers from CR --- pkg/cloudprovider/providers/azure/azure_blobDiskController.go | 2 +- .../providers/azure/azure_managedDiskController.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 1fa074d5bb7..e68c23b896c 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -611,7 +611,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam return SAName, nil } - // avergates are not ok and we are at capacity(max storage accounts allowed) + // averages are not ok and we are at capacity (max storage accounts allowed) if aboveAvg && countAccounts == maxStorageAccounts { glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 5acdf583583..71b341d9d3f 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -106,8 +106,8 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { if err != nil { return err } - // We don't need poll here, k8s will immediatly stop referencing the disk - // the disk will be evantually deleted - cleanly - by ARM + // We don't need poll here, k8s will immediately stop referencing the disk + // the disk will be eventually deleted - cleanly - by ARM glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) From 1ea697044adceac31f9bc5e148c8c1a7c1392b2d Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Thu, 21 Dec 2017 16:37:12 +0100 Subject: [PATCH 598/794] Update pause container version to 3.1 This updates the version of the pause container used by the kubelet and various test utilities to 3.1. This also adds a CHANGELOG.md for build/pause --- build/pause/CHANGELOG.md | 8 ++++++++ cmd/kubeadm/app/util/template_test.go | 6 +++--- cmd/kubelet/app/options/container_runtime.go | 4 ++-- hack/make-rules/test-cmd-util.sh | 6 +++--- hack/testdata/pod-with-precision.json | 2 +- pkg/kubelet/dockershim/docker_sandbox.go | 2 +- test/integration/benchmark-controller.json | 2 +- test/integration/framework/util.go | 6 +++--- test/utils/image/manifest.go | 4 ++-- 9 files changed, 24 insertions(+), 16 deletions(-) create mode 100644 build/pause/CHANGELOG.md diff --git a/build/pause/CHANGELOG.md b/build/pause/CHANGELOG.md new file mode 100644 index 00000000000..8f58bcdac0a --- /dev/null +++ b/build/pause/CHANGELOG.md @@ -0,0 +1,8 @@ +# 3.1 + +* The pause container gains a signal handler to clean up orphaned zombie processes. ([#36853](https://prs.k8s.io/36853), [@verb](https://github.com/verb)) +* `pause -v` will return build information for the pause binary. ([#56762](https://prs.k8s.io/56762), [@verb](https://github.com/verb)) + +# 3.0 + +* The pause container was rewritten entirely in C. ([#23009](https://prs.k8s.io/23009), [@uluyol](https://github.com/uluyol)) diff --git a/cmd/kubeadm/app/util/template_test.go b/cmd/kubeadm/app/util/template_test.go index 3a00e05e601..ed5fee9c330 100644 --- a/cmd/kubeadm/app/util/template_test.go +++ b/cmd/kubeadm/app/util/template_test.go @@ -21,9 +21,9 @@ import ( ) const ( - validTmpl = "image: {{ .ImageRepository }}/pause-{{ .Arch }}:3.0" - validTmplOut = "image: gcr.io/google_containers/pause-amd64:3.0" - doNothing = "image: gcr.io/google_containers/pause-amd64:3.0" + validTmpl = "image: {{ .ImageRepository }}/pause-{{ .Arch }}:3.1" + validTmplOut = "image: gcr.io/google_containers/pause-amd64:3.1" + doNothing = "image: gcr.io/google_containers/pause-amd64:3.1" invalidTmpl1 = "{{ .baz }/d}" invalidTmpl2 = "{{ !foobar }}" ) diff --git a/cmd/kubelet/app/options/container_runtime.go b/cmd/kubelet/app/options/container_runtime.go index d1174ea044c..b57a4e20bca 100644 --- a/cmd/kubelet/app/options/container_runtime.go +++ b/cmd/kubelet/app/options/container_runtime.go @@ -26,9 +26,9 @@ import ( ) const ( - // When these values are updated, also update test/e2e/framework/util.go + // When these values are updated, also update test/utils/image/manifest.go defaultPodSandboxImageName = "gcr.io/google_containers/pause" - defaultPodSandboxImageVersion = "3.0" + defaultPodSandboxImageVersion = "3.1" // From pkg/kubelet/rkt/rkt.go to avoid circular import defaultRktAPIServiceEndpoint = "localhost:15441" ) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index bce02c25fb0..aaed0980807 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -719,9 +719,9 @@ run_pod_tests() { kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:' ## Patch pod from JSON can change image # Command - kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}' - # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0 - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:' + kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.1"}]}}' + # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.1 + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.1:' ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected ERROR_FILE="${KUBE_TEMP}/conflict-error" diff --git a/hack/testdata/pod-with-precision.json b/hack/testdata/pod-with-precision.json index 5aac946cd2b..ce59d9c100b 100644 --- a/hack/testdata/pod-with-precision.json +++ b/hack/testdata/pod-with-precision.json @@ -9,7 +9,7 @@ "containers": [ { "name": "kubernetes-pause", - "image": "gcr.io/google_containers/pause-amd64:3.0" + "image": "gcr.io/google_containers/pause-amd64:3.1" } ], "restartPolicy": "Never", diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index b595e310096..be478d5e88e 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -36,7 +36,7 @@ import ( ) const ( - defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.0" + defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.1" // Various default sandbox resources requests/limits. defaultSandboxCPUshares int64 = 2 diff --git a/test/integration/benchmark-controller.json b/test/integration/benchmark-controller.json index 00444f8900f..47ac4ae0cdf 100644 --- a/test/integration/benchmark-controller.json +++ b/test/integration/benchmark-controller.json @@ -17,7 +17,7 @@ "spec": { "containers": [{ "name": "test-container", - "image": "gcr.io/google_containers/pause-amd64:3.0" + "image": "gcr.io/google_containers/pause-amd64:3.1" }] } } diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 88f4ac52497..afb1d68961e 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -38,10 +38,10 @@ import ( ) const ( - // When these values are updated, also update cmd/kubelet/app/options/options.go - // A copy of these values exist in e2e/framework/util.go. + // When these values are updated, also update cmd/kubelet/app/options/container_runtime.go + // A copy of these values exist in test/utils/image/manifest.go currentPodInfraContainerImageName = "gcr.io/google_containers/pause" - currentPodInfraContainerImageVersion = "3.0" + currentPodInfraContainerImageVersion = "3.1" ) // GetServerArchitecture fetches the architecture of the cluster's apiserver. diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 73b586ff616..f4bdb5f784c 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -82,8 +82,8 @@ var ( NoSnatTest = ImageConfig{e2eRegistry, "no-snat-test", "1.0", true} NoSnatTestProxy = ImageConfig{e2eRegistry, "no-snat-test-proxy", "1.0", true} NWayHTTP = ImageConfig{e2eRegistry, "n-way-http", "1.0", true} - // When these values are updated, also update cmd/kubelet/app/options/options.go - Pause = ImageConfig{gcRegistry, "pause", "3.0", true} + // When these values are updated, also update cmd/kubelet/app/options/container_runtime.go + Pause = ImageConfig{gcRegistry, "pause", "3.1", true} Porter = ImageConfig{e2eRegistry, "porter", "1.0", true} PortForwardTester = ImageConfig{e2eRegistry, "port-forward-tester", "1.0", true} Redis = ImageConfig{e2eRegistry, "redis", "1.0", true} From fe7a0c5a00e826630db9b5bb8384474a7a543ab7 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Wed, 3 Jan 2018 20:34:07 -0500 Subject: [PATCH 599/794] Fixing typo in e2e test variable --- test/e2e/storage/persistent_volumes-local.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 98528498a04..79fd1f7c636 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -120,7 +120,7 @@ const ( // provisioner daemonSetName name daemonSetName = "local-volume-provisioner" // provisioner default mount point folder - provisionerDefaultMountRoot = "/mnt-local-storage" + provisionerDefaultMountRoot = "/mnt/local-storage" // provisioner node/pv cluster role binding nodeBindingName = "local-storage:provisioner-node-binding" pvBindingName = "local-storage:provisioner-pv-binding" @@ -1071,7 +1071,7 @@ func createProvisionerDaemonset(config *localTestConfig) { }, { Name: "local-disks", - MountPath: "/mnt/local-storage", + MountPath: provisionerDefaultMountRoot, }, }, }, From 059fa35a842ae21f5ade374bf1f076e3b6b34ec3 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Thu, 4 Jan 2018 10:22:16 -0800 Subject: [PATCH 600/794] dockershim: bump the minimum supported docker version to 1.11 Drop the 1.10 compatibilty code. --- pkg/kubelet/dockershim/docker_container.go | 5 ++-- pkg/kubelet/dockershim/docker_sandbox.go | 10 ++------ pkg/kubelet/dockershim/helpers.go | 28 ++-------------------- pkg/kubelet/dockershim/helpers_test.go | 25 ------------------- pkg/kubelet/dockershim/libdocker/client.go | 4 ++-- 5 files changed, 8 insertions(+), 64 deletions(-) diff --git a/pkg/kubelet/dockershim/docker_container.go b/pkg/kubelet/dockershim/docker_container.go index 453c18c7566..33a05556dfd 100644 --- a/pkg/kubelet/dockershim/docker_container.go +++ b/pkg/kubelet/dockershim/docker_container.go @@ -102,7 +102,6 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi if err != nil { return "", fmt.Errorf("unable to get the docker API version: %v", err) } - securityOptSep := getSecurityOptSeparator(apiVersion) image := "" if iSpec := config.GetImage(); iSpec != nil { @@ -134,7 +133,7 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi } hc := createConfig.HostConfig - ds.updateCreateConfig(&createConfig, config, sandboxConfig, podSandboxID, securityOptSep, apiVersion) + ds.updateCreateConfig(&createConfig, config, sandboxConfig, podSandboxID, securityOptSeparator, apiVersion) // Set devices for container. devices := make([]dockercontainer.DeviceMapping, len(config.Devices)) for i, device := range config.Devices { @@ -146,7 +145,7 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi } hc.Resources.Devices = devices - securityOpts, err := ds.getSecurityOpts(config.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSep) + securityOpts, err := ds.getSecurityOpts(config.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator) if err != nil { return "", fmt.Errorf("failed to generate security options for container %q: %v", config.Metadata.Name, err) } diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index b595e310096..e388a16e3d9 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -528,12 +528,6 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, // TODO(random-liu): Deprecate this label once container metrics is directly got from CRI. labels[types.KubernetesContainerNameLabel] = sandboxContainerName - apiVersion, err := ds.getDockerAPIVersion() - if err != nil { - return nil, fmt.Errorf("unable to get the docker API version: %v", err) - } - securityOptSep := getSecurityOptSeparator(apiVersion) - hc := &dockercontainer.HostConfig{} createConfig := &dockertypes.ContainerCreateConfig{ Name: makeSandboxName(c), @@ -547,7 +541,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, } // Apply linux-specific options. - if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSep); err != nil { + if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSeparator); err != nil { return nil, err } @@ -565,7 +559,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, } // Set security options. - securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSep) + securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator) if err != nil { return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.Name, err) } diff --git a/pkg/kubelet/dockershim/helpers.go b/pkg/kubelet/dockershim/helpers.go index 595263a840b..8066b7b03c6 100644 --- a/pkg/kubelet/dockershim/helpers.go +++ b/pkg/kubelet/dockershim/helpers.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" @@ -39,12 +38,8 @@ import ( ) const ( - annotationPrefix = "annotation." - - // Docker changed the API for specifying options in v1.11 - securityOptSeparatorChangeVersion = "1.23.0" // Corresponds to docker 1.11.x - securityOptSeparatorOld = ':' - securityOptSeparatorNew = '=' + annotationPrefix = "annotation." + securityOptSeparator = '=' ) var ( @@ -54,10 +49,6 @@ var ( // if a container starts but the executable file is not found, runc gives a message that matches startRE = regexp.MustCompile(`\\\\\\\"(.*)\\\\\\\": executable file not found`) - // Docker changes the security option separator from ':' to '=' in the 1.23 - // API version. - optsSeparatorChangeVersion = semver.MustParse(securityOptSeparatorChangeVersion) - defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}} ) @@ -321,21 +312,6 @@ func transformStartContainerError(err error) error { return err } -// getSecurityOptSeparator returns the security option separator based on the -// docker API version. -// TODO: Remove this function along with the relevant code when we no longer -// need to support docker 1.10. -func getSecurityOptSeparator(v *semver.Version) rune { - switch v.Compare(optsSeparatorChangeVersion) { - case -1: - // Current version is less than the API change version; use the old - // separator. - return securityOptSeparatorOld - default: - return securityOptSeparatorNew - } -} - // ensureSandboxImageExists pulls the sandbox image when it's not present. func ensureSandboxImageExists(client libdocker.Interface, image string) error { _, err := client.InspectImageByRef(image) diff --git a/pkg/kubelet/dockershim/helpers_test.go b/pkg/kubelet/dockershim/helpers_test.go index 9532b6bc042..03809c4c5ff 100644 --- a/pkg/kubelet/dockershim/helpers_test.go +++ b/pkg/kubelet/dockershim/helpers_test.go @@ -23,7 +23,6 @@ import ( "path/filepath" "testing" - "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" dockernat "github.com/docker/go-connections/nat" "github.com/stretchr/testify/assert" @@ -129,30 +128,6 @@ func TestParsingCreationConflictError(t *testing.T) { require.Equal(t, matches[1], "24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e") } -func TestGetSecurityOptSeparator(t *testing.T) { - for c, test := range map[string]struct { - desc string - version *semver.Version - expected rune - }{ - "older docker version": { - version: &semver.Version{Major: 1, Minor: 22, Patch: 0}, - expected: ':', - }, - "changed docker version": { - version: &semver.Version{Major: 1, Minor: 23, Patch: 0}, - expected: '=', - }, - "newer docker version": { - version: &semver.Version{Major: 1, Minor: 24, Patch: 0}, - expected: '=', - }, - } { - actual := getSecurityOptSeparator(test.version) - assert.Equal(t, test.expected, actual, c) - } -} - // writeDockerConfig will write a config file into a temporary dir, and return that dir. // Caller is responsible for deleting the dir and its contents. func writeDockerConfig(cfg string) (string, error) { diff --git a/pkg/kubelet/dockershim/libdocker/client.go b/pkg/kubelet/dockershim/libdocker/client.go index 0400bbb9179..99de9084239 100644 --- a/pkg/kubelet/dockershim/libdocker/client.go +++ b/pkg/kubelet/dockershim/libdocker/client.go @@ -29,8 +29,8 @@ import ( const ( // https://docs.docker.com/engine/reference/api/docker_remote_api/ - // docker version should be at least 1.10.x - MinimumDockerAPIVersion = "1.22.0" + // docker version should be at least 1.11.x + MinimumDockerAPIVersion = "1.23.0" // Status of a container returned by ListContainers. StatusRunningPrefix = "Up" From 38f63321d7046cf793734a0e19347bac4cc7f82a Mon Sep 17 00:00:00 2001 From: mbohlool Date: Thu, 4 Jan 2018 04:54:12 -0800 Subject: [PATCH 601/794] Update CHANGELOG-1.9.md for v1.9.1. --- CHANGELOG-1.9.md | 120 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 22 deletions(-) diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 1ee4998990e..0094d62b279 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -1,9 +1,16 @@ -- [v1.9.0](#v190) - - [Downloads for v1.9.0](#downloads-for-v190) +- [v1.9.1](#v191) + - [Downloads for v1.9.1](#downloads-for-v191) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) + - [Changelog since v1.9.0](#changelog-since-v190) + - [Other notable changes](#other-notable-changes) +- [v1.9.0](#v190) + - [Downloads for v1.9.0](#downloads-for-v190) + - [Client Binaries](#client-binaries-1) + - [Server Binaries](#server-binaries-1) + - [Node Binaries](#node-binaries-1) - [1.9 Release Notes](#19-release-notes) - [WARNING: etcd backup strongly recommended](#warning-etcd-backup-strongly-recommended) - [Introduction to 1.9.0](#introduction-to-190) @@ -91,48 +98,117 @@ - [External Dependencies](#external-dependencies) - [v1.9.0-beta.2](#v190-beta2) - [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2) - - [Client Binaries](#client-binaries-1) - - [Server Binaries](#server-binaries-1) - - [Node Binaries](#node-binaries-1) - - [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1) - - [Other notable changes](#other-notable-changes) -- [v1.9.0-beta.1](#v190-beta1) - - [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3) - - [Action Required](#action-required) + - [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1) - [Other notable changes](#other-notable-changes-1) -- [v1.9.0-alpha.3](#v190-alpha3) - - [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3) +- [v1.9.0-beta.1](#v190-beta1) + - [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2) - - [Action Required](#action-required-1) + - [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-2) -- [v1.9.0-alpha.2](#v190-alpha2) - - [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2) +- [v1.9.0-alpha.3](#v190-alpha3) + - [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.8.0](#changelog-since-v180) - - [Action Required](#action-required-2) + - [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-3) -- [v1.9.0-alpha.1](#v190-alpha1) - - [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1) +- [v1.9.0-alpha.2](#v190-alpha2) + - [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) + - [Changelog since v1.8.0](#changelog-since-v180) + - [Action Required](#action-required-2) + - [Other notable changes](#other-notable-changes-4) +- [v1.9.0-alpha.1](#v190-alpha1) + - [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3) - [Action Required](#action-required-3) - - [Other notable changes](#other-notable-changes-4) + - [Other notable changes](#other-notable-changes-5) +# v1.9.1 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) + +## Downloads for v1.9.1 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee` +[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d` +[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48` +[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0` +[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e` +[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471` +[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874` +[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc` +[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc` +[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99` +[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e` +[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587` +[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200` +[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5` +[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a` +[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e` +[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905` +[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd` +[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c` +[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc` + +## Changelog since v1.9.0 + +### Other notable changes + +* Compare correct file names for volume detach operation ([#57053](https://github.com/kubernetes/kubernetes/pull/57053), [@prashima](https://github.com/prashima)) +* Fixed a garbage collection race condition where objects with ownerRefs pointing to cluster-scoped objects could be deleted incorrectly. ([#57211](https://github.com/kubernetes/kubernetes/pull/57211), [@liggitt](https://github.com/liggitt)) +* Free up CPU and memory requested but unused by Metrics Server Pod Nanny. ([#57252](https://github.com/kubernetes/kubernetes/pull/57252), [@kawych](https://github.com/kawych)) +* Configurable liveness probe initial delays for etcd and kube-apiserver in GCE ([#57749](https://github.com/kubernetes/kubernetes/pull/57749), [@wojtek-t](https://github.com/wojtek-t)) +* Fixed garbage collection hang ([#57503](https://github.com/kubernetes/kubernetes/pull/57503), [@liggitt](https://github.com/liggitt)) +* GCE: Fixes ILB creation on automatic networks with manually created subnetworks. ([#57351](https://github.com/kubernetes/kubernetes/pull/57351), [@nicksardo](https://github.com/nicksardo)) +* Check for known manifests during preflight instead of only checking for non-empty manifests directory. ([#57287](https://github.com/kubernetes/kubernetes/pull/57287), [@mattkelly](https://github.com/mattkelly)) +* enable flexvolume on Windows node ([#56921](https://github.com/kubernetes/kubernetes/pull/56921), [@andyzhangx](https://github.com/andyzhangx)) +* change default azure file/dir mode to 0755 ([#56551](https://github.com/kubernetes/kubernetes/pull/56551), [@andyzhangx](https://github.com/andyzhangx)) +* fix incorrect error info when creating an azure file PVC failed ([#56550](https://github.com/kubernetes/kubernetes/pull/56550), [@andyzhangx](https://github.com/andyzhangx)) +* Retry 'connection refused' errors when setting up clusters on GCE. ([#57394](https://github.com/kubernetes/kubernetes/pull/57394), [@mborsz](https://github.com/mborsz)) +* Fixes issue creating docker secrets with kubectl 1.9 for accessing docker private registries. ([#57463](https://github.com/kubernetes/kubernetes/pull/57463), [@dims](https://github.com/dims)) +* Fixes a bug where if an error was returned that was not an `autorest.DetailedError` we would return `"not found", nil` which caused nodes to go to `NotReady` state. ([#57484](https://github.com/kubernetes/kubernetes/pull/57484), [@brendandburns](https://github.com/brendandburns)) +* Fix Heapster configuration and Metrics Server configuration to enable overriding default resource requirements. ([#56965](https://github.com/kubernetes/kubernetes/pull/56965), [@kawych](https://github.com/kawych)) + + + # v1.9.0 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) From c258d4df84089b08b9cbd37b1dee4b00576a2532 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 4 Jan 2018 14:00:04 -0500 Subject: [PATCH 602/794] Fix ExternalAddress parsing problem under IPv6 `!strings.Contains(host, ":")` will fail miserably under ipv6 --- staging/src/k8s.io/apiserver/pkg/server/config.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index fe912a94d24..35acaa8125e 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -342,10 +342,10 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo if host == "" && c.PublicAddress != nil { host = c.PublicAddress.String() } - if !strings.Contains(host, ":") { - if c.ReadWritePort != 0 { - host = net.JoinHostPort(host, strconv.Itoa(c.ReadWritePort)) - } + + // if there is no port, and we have a ReadWritePort, use that + if _, _, err := net.SplitHostPort(host); err != nil && c.ReadWritePort != 0 { + host = net.JoinHostPort(host, strconv.Itoa(c.ReadWritePort)) } c.ExternalAddress = host From 9187b343e17db2bf8f3470cd6e1ef7f661814c15 Mon Sep 17 00:00:00 2001 From: Walter Fender Date: Wed, 11 Oct 2017 16:36:39 -0700 Subject: [PATCH 603/794] Split the NodeController into lifecycle and ipam pieces. Prepatory work fpr removing cloud provider dependency from node controller running in Kube Controller Manager. Splitting the node controller into its two major pieces life-cycle and CIDR/IP management. Both pieces currently need the the cloud system to do their work. Removing lifecycles dependency on cloud will be fixed ina followup PR. Moved node scheduler code to live with node lifecycle controller. Got the IPAM/Lifecycle split completed. Still need to rename pieces. Made changes to the utils and tests so they would be in the appropriate package. Moved the node based ipam code to nodeipam. Made the relevant tests pass. Moved common node controller util code to nodeutil. Removed unneeded pod informer sync from node ipam controller. Fixed linter issues. Factored in feedback from @gmarek. Factored in feedback from @mtaufen. Undoing unneeded change. --- cmd/kube-controller-manager/app/BUILD | 5 +- .../app/controllermanager.go | 6 +- cmd/kube-controller-manager/app/core.go | 44 +- pkg/controller/BUILD | 4 +- pkg/controller/nodeipam/BUILD | 48 ++ pkg/controller/{node => nodeipam}/OWNERS | 0 pkg/controller/{node => nodeipam}/doc.go | 4 +- pkg/controller/{node => nodeipam}/ipam/BUILD | 20 +- pkg/controller/{node => nodeipam}/ipam/OWNERS | 0 .../{node => nodeipam}/ipam/adapter.go | 0 .../{node => nodeipam}/ipam/cidr_allocator.go | 0 .../{node => nodeipam}/ipam/cidrset/BUILD | 4 +- .../ipam/cidrset/cidr_set.go | 0 .../ipam/cidrset/cidr_set_test.go | 0 .../ipam/cloud_cidr_allocator.go | 14 +- .../{node => nodeipam}/ipam/controller.go | 12 +- .../ipam/controller_test.go | 4 +- pkg/controller/{node => nodeipam}/ipam/doc.go | 0 .../ipam/range_allocator.go | 18 +- .../ipam/range_allocator_test.go | 0 .../{node => nodeipam}/ipam/sync/BUILD | 10 +- .../{node => nodeipam}/ipam/sync/sync.go | 2 +- .../{node => nodeipam}/ipam/sync/sync_test.go | 4 +- .../{node => nodeipam}/ipam/test/BUILD | 2 +- .../{node => nodeipam}/ipam/test/utils.go | 0 .../{node => nodeipam}/ipam/timeout.go | 0 .../{node => nodeipam}/ipam/timeout_test.go | 0 pkg/controller/nodeipam/metrics.go | 21 + .../nodeipam/node_ipam_controller.go | 187 +++++ pkg/controller/{node => nodelifecycle}/BUILD | 134 ++-- .../{node => nodelifecycle}/metrics.go | 4 +- .../node_lifecycle_controller.go} | 753 ++++++++---------- .../node_lifecycle_controller_test.go} | 285 ++++--- .../{node => nodelifecycle}/scheduler/BUILD | 54 +- .../scheduler/rate_limited_queue.go | 0 .../scheduler/rate_limited_queue_test.go | 0 .../scheduler/taint_manager.go} | 5 +- .../scheduler/taint_manager_test.go} | 0 .../scheduler/timed_workers.go | 0 .../scheduler/timed_workers_test.go | 0 pkg/controller/{node/util => util/node}/BUILD | 11 +- .../util => util/node}/controller_utils.go | 12 +- test/e2e/apps/BUILD | 2 +- test/e2e/apps/network_partition.go | 2 +- test/e2e/framework/BUILD | 2 +- test/e2e/framework/util.go | 2 +- test/integration/garbagecollector/BUILD | 8 +- test/integration/scheduler/BUILD | 3 +- test/integration/scheduler/taint_test.go | 24 +- 49 files changed, 972 insertions(+), 738 deletions(-) create mode 100644 pkg/controller/nodeipam/BUILD rename pkg/controller/{node => nodeipam}/OWNERS (100%) rename pkg/controller/{node => nodeipam}/doc.go (80%) rename pkg/controller/{node => nodeipam}/ipam/BUILD (82%) rename pkg/controller/{node => nodeipam}/ipam/OWNERS (100%) rename pkg/controller/{node => nodeipam}/ipam/adapter.go (100%) rename pkg/controller/{node => nodeipam}/ipam/cidr_allocator.go (100%) rename pkg/controller/{node => nodeipam}/ipam/cidrset/BUILD (80%) rename pkg/controller/{node => nodeipam}/ipam/cidrset/cidr_set.go (100%) rename pkg/controller/{node => nodeipam}/ipam/cidrset/cidr_set_test.go (100%) rename pkg/controller/{node => nodeipam}/ipam/cloud_cidr_allocator.go (94%) rename pkg/controller/{node => nodeipam}/ipam/controller.go (93%) rename pkg/controller/{node => nodeipam}/ipam/controller_test.go (94%) rename pkg/controller/{node => nodeipam}/ipam/doc.go (100%) rename pkg/controller/{node => nodeipam}/ipam/range_allocator.go (94%) rename pkg/controller/{node => nodeipam}/ipam/range_allocator_test.go (100%) rename pkg/controller/{node => nodeipam}/ipam/sync/BUILD (72%) rename pkg/controller/{node => nodeipam}/ipam/sync/sync.go (99%) rename pkg/controller/{node => nodeipam}/ipam/sync/sync_test.go (98%) rename pkg/controller/{node => nodeipam}/ipam/test/BUILD (85%) rename pkg/controller/{node => nodeipam}/ipam/test/utils.go (100%) rename pkg/controller/{node => nodeipam}/ipam/timeout.go (100%) rename pkg/controller/{node => nodeipam}/ipam/timeout_test.go (100%) create mode 100644 pkg/controller/nodeipam/metrics.go create mode 100644 pkg/controller/nodeipam/node_ipam_controller.go rename pkg/controller/{node => nodelifecycle}/BUILD (78%) rename pkg/controller/{node => nodelifecycle}/metrics.go (97%) rename pkg/controller/{node/node_controller.go => nodelifecycle/node_lifecycle_controller.go} (84%) rename pkg/controller/{node/nodecontroller_test.go => nodelifecycle/node_lifecycle_controller_test.go} (93%) rename pkg/controller/{node => nodelifecycle}/scheduler/BUILD (85%) rename pkg/controller/{node => nodelifecycle}/scheduler/rate_limited_queue.go (100%) rename pkg/controller/{node => nodelifecycle}/scheduler/rate_limited_queue_test.go (100%) rename pkg/controller/{node/scheduler/taint_controller.go => nodelifecycle/scheduler/taint_manager.go} (99%) rename pkg/controller/{node/scheduler/taint_controller_test.go => nodelifecycle/scheduler/taint_manager_test.go} (100%) rename pkg/controller/{node => nodelifecycle}/scheduler/timed_workers.go (100%) rename pkg/controller/{node => nodelifecycle}/scheduler/timed_workers_test.go (100%) rename pkg/controller/{node/util => util/node}/BUILD (87%) rename pkg/controller/{node/util => util/node}/controller_utils.go (96%) diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 7bee9288b4a..d6940c30d3b 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -58,8 +58,9 @@ go_library( "//pkg/controller/garbagecollector:go_default_library", "//pkg/controller/job:go_default_library", "//pkg/controller/namespace:go_default_library", - "//pkg/controller/node:go_default_library", - "//pkg/controller/node/ipam:go_default_library", + "//pkg/controller/nodeipam:go_default_library", + "//pkg/controller/nodeipam/ipam:go_default_library", + "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/podautoscaler:go_default_library", "//pkg/controller/podautoscaler/metrics:go_default_library", "//pkg/controller/podgc:go_default_library", diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 1f7832ed9d7..85dc51de5c8 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -368,10 +368,12 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc controllers["tokencleaner"] = startTokenCleanerController if loopMode == IncludeCloudLoops { controllers["service"] = startServiceController + controllers["nodeipam"] = startNodeIpamController controllers["route"] = startRouteController - // TODO: Move node controller and volume controller into the IncludeCloudLoops only set. + // TODO: volume controller into the IncludeCloudLoops only set. + // TODO: Separate cluster in cloud check from node lifecycle controller. } - controllers["node"] = startNodeController + controllers["nodelifecycle"] = startNodeLifecycleController controllers["persistentvolume-binder"] = startPersistentVolumeBinderController controllers["attachdetach"] = startAttachDetachController controllers["persistentvolume-expander"] = startVolumeExpandController diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 819d18e0f01..2fceb2370bc 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -41,8 +41,9 @@ import ( endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/garbagecollector" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" - nodecontroller "k8s.io/kubernetes/pkg/controller/node" - "k8s.io/kubernetes/pkg/controller/node/ipam" + nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" + lifecyclecontroller "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/podgc" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota" @@ -77,7 +78,7 @@ func startServiceController(ctx ControllerContext) (bool, error) { return true, nil } -func startNodeController(ctx ControllerContext) (bool, error) { +func startNodeIpamController(ctx ControllerContext) (bool, error) { var clusterCIDR *net.IPNet = nil var serviceCIDR *net.IPNet = nil if ctx.Options.AllocateNodeCIDRs { @@ -97,25 +98,38 @@ func startNodeController(ctx ControllerContext) (bool, error) { } } - nodeController, err := nodecontroller.NewNodeController( - ctx.InformerFactory.Core().V1().Pods(), + nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx.InformerFactory.Core().V1().Nodes(), - ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), ctx.Cloud, ctx.ClientBuilder.ClientOrDie("node-controller"), - ctx.Options.PodEvictionTimeout.Duration, - ctx.Options.NodeEvictionRate, - ctx.Options.SecondaryNodeEvictionRate, - ctx.Options.LargeClusterSizeThreshold, - ctx.Options.UnhealthyZoneThreshold, - ctx.Options.NodeMonitorGracePeriod.Duration, - ctx.Options.NodeStartupGracePeriod.Duration, - ctx.Options.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(ctx.Options.NodeCIDRMaskSize), ctx.Options.AllocateNodeCIDRs, ipam.CIDRAllocatorType(ctx.Options.CIDRAllocatorType), + ) + if err != nil { + return true, err + } + go nodeIpamController.Run(ctx.Stop) + return true, nil +} + +func startNodeLifecycleController(ctx ControllerContext) (bool, error) { + lifecycleController, err := lifecyclecontroller.NewNodeLifecycleController( + ctx.InformerFactory.Core().V1().Pods(), + ctx.InformerFactory.Core().V1().Nodes(), + ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), + ctx.Cloud, + ctx.ClientBuilder.ClientOrDie("node-controller"), + ctx.Options.NodeMonitorPeriod.Duration, + ctx.Options.NodeStartupGracePeriod.Duration, + ctx.Options.NodeMonitorGracePeriod.Duration, + ctx.Options.PodEvictionTimeout.Duration, + ctx.Options.NodeEvictionRate, + ctx.Options.SecondaryNodeEvictionRate, + ctx.Options.LargeClusterSizeThreshold, + ctx.Options.UnhealthyZoneThreshold, ctx.Options.EnableTaintManager, utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions), utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), @@ -123,7 +137,7 @@ func startNodeController(ctx ControllerContext) (bool, error) { if err != nil { return true, err } - go nodeController.Run(ctx.Stop) + go lifecycleController.Run(ctx.Stop) return true, nil } diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index f1e808f2ee3..7c2db32b5d4 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -117,7 +117,8 @@ filegroup( "//pkg/controller/history:all-srcs", "//pkg/controller/job:all-srcs", "//pkg/controller/namespace:all-srcs", - "//pkg/controller/node:all-srcs", + "//pkg/controller/nodeipam:all-srcs", + "//pkg/controller/nodelifecycle:all-srcs", "//pkg/controller/podautoscaler:all-srcs", "//pkg/controller/podgc:all-srcs", "//pkg/controller/replicaset:all-srcs", @@ -129,6 +130,7 @@ filegroup( "//pkg/controller/statefulset:all-srcs", "//pkg/controller/testutil:all-srcs", "//pkg/controller/ttl:all-srcs", + "//pkg/controller/util/node:all-srcs", "//pkg/controller/volume/attachdetach:all-srcs", "//pkg/controller/volume/events:all-srcs", "//pkg/controller/volume/expand:all-srcs", diff --git a/pkg/controller/nodeipam/BUILD b/pkg/controller/nodeipam/BUILD new file mode 100644 index 00000000000..46a62bb93b6 --- /dev/null +++ b/pkg/controller/nodeipam/BUILD @@ -0,0 +1,48 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "metrics.go", + "node_ipam_controller.go", + ], + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam", + deps = [ + "//pkg/cloudprovider:go_default_library", + "//pkg/controller:go_default_library", + "//pkg/controller/nodeipam/ipam:go_default_library", + "//pkg/controller/nodeipam/ipam/sync:go_default_library", + "//pkg/util/metrics:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/controller/nodeipam/ipam:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/controller/node/OWNERS b/pkg/controller/nodeipam/OWNERS similarity index 100% rename from pkg/controller/node/OWNERS rename to pkg/controller/nodeipam/OWNERS diff --git a/pkg/controller/node/doc.go b/pkg/controller/nodeipam/doc.go similarity index 80% rename from pkg/controller/node/doc.go rename to pkg/controller/nodeipam/doc.go index b649f1dda49..a7b2d12db8e 100644 --- a/pkg/controller/node/doc.go +++ b/pkg/controller/nodeipam/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package node contains code for syncing cloud instances with +// Package nodeipam contains code for syncing cloud instances with // node registry -package node // import "k8s.io/kubernetes/pkg/controller/node" +package nodeipam // import "k8s.io/kubernetes/pkg/controller/nodeipam" diff --git a/pkg/controller/node/ipam/BUILD b/pkg/controller/nodeipam/ipam/BUILD similarity index 82% rename from pkg/controller/node/ipam/BUILD rename to pkg/controller/nodeipam/ipam/BUILD index 667f29f6b0c..5a1e1018a20 100644 --- a/pkg/controller/node/ipam/BUILD +++ b/pkg/controller/nodeipam/ipam/BUILD @@ -14,11 +14,11 @@ go_test( "timeout_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", deps = [ "//pkg/controller:go_default_library", - "//pkg/controller/node/ipam/cidrset:go_default_library", - "//pkg/controller/node/ipam/test:go_default_library", + "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", + "//pkg/controller/nodeipam/ipam/test:go_default_library", "//pkg/controller/testutil:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -40,15 +40,15 @@ go_library( "range_allocator.go", "timeout.go", ], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", deps = [ "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/node/ipam/cidrset:go_default_library", - "//pkg/controller/node/ipam/sync:go_default_library", - "//pkg/controller/node/util:go_default_library", + "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", + "//pkg/controller/nodeipam/ipam/sync:go_default_library", + "//pkg/controller/util/node:go_default_library", "//pkg/util/node:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -82,9 +82,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/controller/node/ipam/cidrset:all-srcs", - "//pkg/controller/node/ipam/sync:all-srcs", - "//pkg/controller/node/ipam/test:all-srcs", + "//pkg/controller/nodeipam/ipam/cidrset:all-srcs", + "//pkg/controller/nodeipam/ipam/sync:all-srcs", + "//pkg/controller/nodeipam/ipam/test:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/controller/node/ipam/OWNERS b/pkg/controller/nodeipam/ipam/OWNERS similarity index 100% rename from pkg/controller/node/ipam/OWNERS rename to pkg/controller/nodeipam/ipam/OWNERS diff --git a/pkg/controller/node/ipam/adapter.go b/pkg/controller/nodeipam/ipam/adapter.go similarity index 100% rename from pkg/controller/node/ipam/adapter.go rename to pkg/controller/nodeipam/ipam/adapter.go diff --git a/pkg/controller/node/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go similarity index 100% rename from pkg/controller/node/ipam/cidr_allocator.go rename to pkg/controller/nodeipam/ipam/cidr_allocator.go diff --git a/pkg/controller/node/ipam/cidrset/BUILD b/pkg/controller/nodeipam/ipam/cidrset/BUILD similarity index 80% rename from pkg/controller/node/ipam/cidrset/BUILD rename to pkg/controller/nodeipam/ipam/cidrset/BUILD index e3accb73bc6..c1bbda1c697 100644 --- a/pkg/controller/node/ipam/cidrset/BUILD +++ b/pkg/controller/nodeipam/ipam/cidrset/BUILD @@ -10,14 +10,14 @@ go_test( name = "go_default_test", srcs = ["cidr_set_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) go_library( name = "go_default_library", srcs = ["cidr_set.go"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", ) filegroup( diff --git a/pkg/controller/node/ipam/cidrset/cidr_set.go b/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go similarity index 100% rename from pkg/controller/node/ipam/cidrset/cidr_set.go rename to pkg/controller/nodeipam/ipam/cidrset/cidr_set.go diff --git a/pkg/controller/node/ipam/cidrset/cidr_set_test.go b/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go similarity index 100% rename from pkg/controller/node/ipam/cidrset/cidr_set_test.go rename to pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go diff --git a/pkg/controller/node/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go similarity index 94% rename from pkg/controller/node/ipam/cloud_cidr_allocator.go rename to pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 98b65b22e67..7a07409c7cd 100644 --- a/pkg/controller/node/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -40,7 +40,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/node/util" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" utilnode "k8s.io/kubernetes/pkg/util/node" ) @@ -101,8 +101,8 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), + UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { if newNode.Spec.PodCIDR == "" { return ca.AllocateOrOccupyCIDR(newNode) } @@ -114,7 +114,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter } return nil }), - DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR), + DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR), }) glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) @@ -197,11 +197,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName)) if err != nil { - util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") + nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: %v", err) } if len(cidrs) == 0 { - util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") + nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name) } _, cidr, err := net.ParseCIDR(cidrs[0]) @@ -237,7 +237,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err) } if err != nil { - util.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") + nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") glog.Errorf("CIDR assignment for node %v failed: %v.", nodeName, err) return err } diff --git a/pkg/controller/node/ipam/controller.go b/pkg/controller/nodeipam/ipam/controller.go similarity index 93% rename from pkg/controller/node/ipam/controller.go rename to pkg/controller/nodeipam/ipam/controller.go index 4b1221b6781..6ab18d69f65 100644 --- a/pkg/controller/node/ipam/controller.go +++ b/pkg/controller/nodeipam/ipam/controller.go @@ -30,9 +30,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" - nodesync "k8s.io/kubernetes/pkg/controller/node/ipam/sync" - "k8s.io/kubernetes/pkg/controller/node/util" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" ) // Config for the IPAM controller. @@ -128,9 +128,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error { } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(c.onAdd), - UpdateFunc: util.CreateUpdateNodeHandler(c.onUpdate), - DeleteFunc: util.CreateDeleteNodeHandler(c.onDelete), + AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd), + UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate), + DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete), }) return nil diff --git a/pkg/controller/node/ipam/controller_test.go b/pkg/controller/nodeipam/ipam/controller_test.go similarity index 94% rename from pkg/controller/node/ipam/controller_test.go rename to pkg/controller/nodeipam/ipam/controller_test.go index 14fbb4340f3..6e5a6f99571 100644 --- a/pkg/controller/node/ipam/controller_test.go +++ b/pkg/controller/nodeipam/ipam/controller_test.go @@ -20,8 +20,8 @@ import ( "net" "testing" - "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" - "k8s.io/kubernetes/pkg/controller/node/ipam/test" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" ) func TestOccupyServiceCIDR(t *testing.T) { diff --git a/pkg/controller/node/ipam/doc.go b/pkg/controller/nodeipam/ipam/doc.go similarity index 100% rename from pkg/controller/node/ipam/doc.go rename to pkg/controller/nodeipam/ipam/doc.go diff --git a/pkg/controller/node/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go similarity index 94% rename from pkg/controller/node/ipam/range_allocator.go rename to pkg/controller/nodeipam/ipam/range_allocator.go index d3037b1d1d1..5de2195854b 100644 --- a/pkg/controller/node/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -36,9 +36,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" - "k8s.io/kubernetes/pkg/controller/node/util" - nodeutil "k8s.io/kubernetes/pkg/util/node" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + utilnode "k8s.io/kubernetes/pkg/util/node" ) type rangeAllocator struct { @@ -119,8 +119,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), + UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { // If the PodCIDR is not empty we either: // - already processed a Node that already had a CIDR after NC restarted // (cidr is marked as used), @@ -145,7 +145,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No } return nil }), - DeleteFunc: util.CreateDeleteNodeHandler(ra.ReleaseCIDR), + DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR), }) return ra, nil @@ -234,7 +234,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { podCIDR, err := r.cidrs.AllocateNext() if err != nil { r.removeNodeFromProcessing(node.Name) - util.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") + nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: %v", err) } @@ -303,14 +303,14 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } return nil } - if err = nodeutil.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { + if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err) } if err != nil { - util.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") + nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") // We accept the fact that we may leek CIDRs here. This is safer than releasing // them in case when we don't know if request went through. // NodeController restart will return all falsely allocated CIDRs to the pool. diff --git a/pkg/controller/node/ipam/range_allocator_test.go b/pkg/controller/nodeipam/ipam/range_allocator_test.go similarity index 100% rename from pkg/controller/node/ipam/range_allocator_test.go rename to pkg/controller/nodeipam/ipam/range_allocator_test.go diff --git a/pkg/controller/node/ipam/sync/BUILD b/pkg/controller/nodeipam/ipam/sync/BUILD similarity index 72% rename from pkg/controller/node/ipam/sync/BUILD rename to pkg/controller/nodeipam/ipam/sync/BUILD index 6530b5d8126..2ecba089f4e 100644 --- a/pkg/controller/node/ipam/sync/BUILD +++ b/pkg/controller/nodeipam/ipam/sync/BUILD @@ -3,10 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["sync.go"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", visibility = ["//visibility:public"], deps = [ - "//pkg/controller/node/ipam/cidrset:go_default_library", + "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], @@ -16,10 +16,10 @@ go_test( name = "go_default_test", srcs = ["sync_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", deps = [ - "//pkg/controller/node/ipam/cidrset:go_default_library", - "//pkg/controller/node/ipam/test:go_default_library", + "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", + "//pkg/controller/nodeipam/ipam/test:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/node/ipam/sync/sync.go b/pkg/controller/nodeipam/ipam/sync/sync.go similarity index 99% rename from pkg/controller/node/ipam/sync/sync.go rename to pkg/controller/nodeipam/ipam/sync/sync.go index 4995f425543..fabc3a11260 100644 --- a/pkg/controller/node/ipam/sync/sync.go +++ b/pkg/controller/nodeipam/ipam/sync/sync.go @@ -25,7 +25,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" ) const ( diff --git a/pkg/controller/node/ipam/sync/sync_test.go b/pkg/controller/nodeipam/ipam/sync/sync_test.go similarity index 98% rename from pkg/controller/node/ipam/sync/sync_test.go rename to pkg/controller/nodeipam/ipam/sync/sync_test.go index d3268480439..4a47280d94b 100644 --- a/pkg/controller/node/ipam/sync/sync_test.go +++ b/pkg/controller/nodeipam/ipam/sync/sync_test.go @@ -26,8 +26,8 @@ import ( "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" - "k8s.io/kubernetes/pkg/controller/node/ipam/test" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" "k8s.io/api/core/v1" ) diff --git a/pkg/controller/node/ipam/test/BUILD b/pkg/controller/nodeipam/ipam/test/BUILD similarity index 85% rename from pkg/controller/node/ipam/test/BUILD rename to pkg/controller/nodeipam/ipam/test/BUILD index 38155ed0970..0c6fd3a2816 100644 --- a/pkg/controller/node/ipam/test/BUILD +++ b/pkg/controller/nodeipam/ipam/test/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["utils.go"], - importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/test", + importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test", visibility = ["//visibility:public"], ) diff --git a/pkg/controller/node/ipam/test/utils.go b/pkg/controller/nodeipam/ipam/test/utils.go similarity index 100% rename from pkg/controller/node/ipam/test/utils.go rename to pkg/controller/nodeipam/ipam/test/utils.go diff --git a/pkg/controller/node/ipam/timeout.go b/pkg/controller/nodeipam/ipam/timeout.go similarity index 100% rename from pkg/controller/node/ipam/timeout.go rename to pkg/controller/nodeipam/ipam/timeout.go diff --git a/pkg/controller/node/ipam/timeout_test.go b/pkg/controller/nodeipam/ipam/timeout_test.go similarity index 100% rename from pkg/controller/node/ipam/timeout_test.go rename to pkg/controller/nodeipam/ipam/timeout_test.go diff --git a/pkg/controller/nodeipam/metrics.go b/pkg/controller/nodeipam/metrics.go new file mode 100644 index 00000000000..9211ce3f382 --- /dev/null +++ b/pkg/controller/nodeipam/metrics.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeipam + +// Register the metrics that are to be monitored. +func Register() { +} diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go new file mode 100644 index 00000000000..e2dad9e4f58 --- /dev/null +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -0,0 +1,187 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeipam + +import ( + "net" + "time" + + "github.com/golang/glog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + + "k8s.io/api/core/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" + nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync" + "k8s.io/kubernetes/pkg/util/metrics" +) + +func init() { + // Register prometheus metrics + Register() +} + +const ( + // ipamResyncInterval is the amount of time between when the cloud and node + // CIDR range assignments are synchronized. + ipamResyncInterval = 30 * time.Second + // ipamMaxBackoff is the maximum backoff for retrying synchronization of a + // given in the error state. + ipamMaxBackoff = 10 * time.Second + // ipamInitialRetry is the initial retry interval for retrying synchronization of a + // given in the error state. + ipamInitialBackoff = 250 * time.Millisecond +) + +// Controller is the controller that manages node ipam state. +type Controller struct { + allocateNodeCIDRs bool + allocatorType ipam.CIDRAllocatorType + + cloud cloudprovider.Interface + clusterCIDR *net.IPNet + serviceCIDR *net.IPNet + kubeClient clientset.Interface + // Method for easy mocking in unittest. + lookupIP func(host string) ([]net.IP, error) + + nodeLister corelisters.NodeLister + nodeInformerSynced cache.InformerSynced + + cidrAllocator ipam.CIDRAllocator + + forcefullyDeletePod func(*v1.Pod) error +} + +// NewNodeIpamController returns a new node IP Address Management controller to +// sync instances from cloudprovider. +// This method returns an error if it is unable to initialize the CIDR bitmap with +// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes +// currently, this should be handled as a fatal error. +func NewNodeIpamController( + nodeInformer coreinformers.NodeInformer, + cloud cloudprovider.Interface, + kubeClient clientset.Interface, + clusterCIDR *net.IPNet, + serviceCIDR *net.IPNet, + nodeCIDRMaskSize int, + allocateNodeCIDRs bool, + allocatorType ipam.CIDRAllocatorType) (*Controller, error) { + + if kubeClient == nil { + glog.Fatalf("kubeClient is nil when starting Controller") + } + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + + glog.V(0).Infof("Sending events to api server.") + eventBroadcaster.StartRecordingToSink( + &v1core.EventSinkImpl{ + Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""), + }) + + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) + } + + if allocateNodeCIDRs { + if clusterCIDR == nil { + glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.") + } + mask := clusterCIDR.Mask + if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { + glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.") + } + } + + ic := &Controller{ + cloud: cloud, + kubeClient: kubeClient, + lookupIP: net.LookupIP, + clusterCIDR: clusterCIDR, + serviceCIDR: serviceCIDR, + allocateNodeCIDRs: allocateNodeCIDRs, + allocatorType: allocatorType, + } + + // TODO: Abstract this check into a generic controller manager should run method. + if ic.allocateNodeCIDRs { + if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType { + cfg := &ipam.Config{ + Resync: ipamResyncInterval, + MaxBackoff: ipamMaxBackoff, + InitialRetry: ipamInitialBackoff, + } + switch ic.allocatorType { + case ipam.IPAMFromClusterAllocatorType: + cfg.Mode = nodesync.SyncFromCluster + case ipam.IPAMFromCloudAllocatorType: + cfg.Mode = nodesync.SyncFromCloud + } + ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize) + if err != nil { + glog.Fatalf("Error creating ipam controller: %v", err) + } + if err := ipamc.Start(nodeInformer); err != nil { + glog.Fatalf("Error trying to Init(): %v", err) + } + } else { + var err error + ic.cidrAllocator, err = ipam.New( + kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize) + if err != nil { + return nil, err + } + } + } + + ic.nodeLister = nodeInformer.Lister() + ic.nodeInformerSynced = nodeInformer.Informer().HasSynced + + return ic, nil +} + +// Run starts an asynchronous loop that monitors the status of cluster nodes. +func (nc *Controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + glog.Infof("Starting ipam controller") + defer glog.Infof("Shutting down ipam controller") + + if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) { + return + } + + // TODO: Abstract this check into a generic controller manager should run method. + if nc.allocateNodeCIDRs { + if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType { + go nc.cidrAllocator.Run(stopCh) + } + } + + <-stopCh +} diff --git a/pkg/controller/node/BUILD b/pkg/controller/nodelifecycle/BUILD similarity index 78% rename from pkg/controller/node/BUILD rename to pkg/controller/nodelifecycle/BUILD index 57a684c8feb..25d036e3cfb 100644 --- a/pkg/controller/node/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -1,24 +1,76 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "node_lifecycle_controller.go", + ], + importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/v1/node:go_default_library", + "//pkg/cloudprovider:go_default_library", + "//pkg/controller:go_default_library", + "//pkg/controller/nodelifecycle/scheduler:go_default_library", + "//pkg/controller/util/node:go_default_library", + "//pkg/util/metrics:go_default_library", + "//pkg/util/node:go_default_library", + "//pkg/util/system:go_default_library", + "//pkg/util/taints:go_default_library", + "//pkg/util/version:go_default_library", + "//plugin/pkg/scheduler/algorithm:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/controller/nodelifecycle/scheduler:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], ) go_test( name = "go_default_test", - srcs = ["nodecontroller_test.go"], + srcs = ["node_lifecycle_controller_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/pkg/controller/node", + importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/node/ipam:go_default_library", - "//pkg/controller/node/scheduler:go_default_library", - "//pkg/controller/node/util:go_default_library", + "//pkg/controller/nodelifecycle/scheduler:go_default_library", "//pkg/controller/testutil:go_default_library", + "//pkg/controller/util/node:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/taints:go_default_library", @@ -39,65 +91,3 @@ go_test( "//vendor/k8s.io/client-go/testing:go_default_library", ], ) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "metrics.go", - "node_controller.go", - ], - importpath = "k8s.io/kubernetes/pkg/controller/node", - deps = [ - "//pkg/api/v1/node:go_default_library", - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/controller/node/ipam:go_default_library", - "//pkg/controller/node/ipam/sync:go_default_library", - "//pkg/controller/node/scheduler:go_default_library", - "//pkg/controller/node/util:go_default_library", - "//pkg/util/metrics:go_default_library", - "//pkg/util/node:go_default_library", - "//pkg/util/system:go_default_library", - "//pkg/util/taints:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/controller/node/ipam:all-srcs", - "//pkg/controller/node/scheduler:all-srcs", - "//pkg/controller/node/util:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/pkg/controller/node/metrics.go b/pkg/controller/nodelifecycle/metrics.go similarity index 97% rename from pkg/controller/node/metrics.go rename to pkg/controller/nodelifecycle/metrics.go index 31bba5b2332..ae61266c8ad 100644 --- a/pkg/controller/node/metrics.go +++ b/pkg/controller/nodelifecycle/metrics.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package node +package nodelifecycle import ( "sync" diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go similarity index 84% rename from pkg/controller/node/node_controller.go rename to pkg/controller/nodelifecycle/node_lifecycle_controller.go index ab490d6a052..e2a47f2076b 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package node +// The Controller sets tainted annotations on nodes. +// Tainted nodes should not be used for new work loads and +// some effort should be given to getting existing work +// loads off of tainted nodes. + +package nodelifecycle import ( - "fmt" - "net" - "sync" - "time" - - "github.com/golang/glog" - + "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,31 +30,31 @@ import ( "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - - "k8s.io/client-go/kubernetes/scheme" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" - - "k8s.io/api/core/v1" coreinformers "k8s.io/client-go/informers/core/v1" extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" corelisters "k8s.io/client-go/listers/core/v1" extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/flowcontrol" v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/node/ipam" - nodesync "k8s.io/kubernetes/pkg/controller/node/ipam/sync" - "k8s.io/kubernetes/pkg/controller/node/scheduler" - "k8s.io/kubernetes/pkg/controller/node/util" + "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/kubernetes/pkg/util/metrics" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" + utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + + "fmt" + "github.com/golang/glog" + "sync" + "time" ) func init() { @@ -64,11 +63,14 @@ func init() { } var ( + gracefulDeletionVersion = utilversion.MustParseSemantic("v1.1.0") + // UnreachableTaintTemplate is the taint for when a node becomes unreachable. UnreachableTaintTemplate = &v1.Taint{ Key: algorithm.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute, } + // NotReadyTaintTemplate is the taint for when a node is not ready for // executing pods NotReadyTaintTemplate = &v1.Taint{ @@ -91,23 +93,6 @@ var ( } ) -const ( - // The amount of time the nodecontroller polls on the list nodes endpoint. - apiserverStartupGracePeriod = 10 * time.Minute - // The amount of time the nodecontroller should sleep between retrying NodeStatus updates - retrySleepTime = 20 * time.Millisecond - - // ipamResyncInterval is the amount of time between when the cloud and node - // CIDR range assignments are synchronized. - ipamResyncInterval = 30 * time.Second - // ipamMaxBackoff is the maximum backoff for retrying synchronization of a - // given in the error state. - ipamMaxBackoff = 10 * time.Second - // ipamInitialRetry is the initial retry interval for retrying synchronization of a - // given in the error state. - ipamInitialBackoff = 250 * time.Millisecond -) - // ZoneState is the state of a given zone. type ZoneState string @@ -118,24 +103,68 @@ const ( statePartialDisruption = ZoneState("PartialDisruption") ) +const ( + // The amount of time the nodecontroller polls on the list nodes endpoint. + apiserverStartupGracePeriod = 10 * time.Minute + // The amount of time the nodecontroller should sleep between retrying NodeStatus updates + retrySleepTime = 20 * time.Millisecond +) + type nodeStatusData struct { probeTimestamp metav1.Time readyTransitionTimestamp metav1.Time status v1.NodeStatus } -// Controller is the controller that manages node related cluster state. +// Controller is the controller that manages node's life cycle. type Controller struct { - allocateNodeCIDRs bool - allocatorType ipam.CIDRAllocatorType + taintManager *scheduler.NoExecuteTaintManager + + podInformerSynced cache.InformerSynced + cloud cloudprovider.Interface + kubeClient clientset.Interface + + // This timestamp is to be used instead of LastProbeTime stored in Condition. We do this + // to aviod the problem with time skew across the cluster. + now func() metav1.Time + + enterPartialDisruptionFunc func(nodeNum int) float32 + enterFullDisruptionFunc func(nodeNum int) float32 + computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, ZoneState) - cloud cloudprovider.Interface - clusterCIDR *net.IPNet - serviceCIDR *net.IPNet knownNodeSet map[string]*v1.Node - kubeClient clientset.Interface - // Method for easy mocking in unittest. - lookupIP func(host string) ([]net.IP, error) + // per Node map storing last observed Status together with a local time when it was observed. + nodeStatusMap map[string]nodeStatusData + + // Lock to access evictor workers + evictorLock sync.Mutex + + // workers that evicts pods from unresponsive nodes. + zonePodEvictor map[string]*scheduler.RateLimitedTimedQueue + + // workers that are responsible for tainting nodes. + zoneNoExecuteTainter map[string]*scheduler.RateLimitedTimedQueue + + zoneStates map[string]ZoneState + + daemonSetStore extensionslisters.DaemonSetLister + daemonSetInformerSynced cache.InformerSynced + + nodeLister corelisters.NodeLister + nodeInformerSynced cache.InformerSynced + nodeExistsInCloudProvider func(types.NodeName) (bool, error) + + recorder record.EventRecorder + + // Value controlling Controller monitoring period, i.e. how often does Controller + // check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod. + // TODO: Change node status monitor to watch based. + nodeMonitorPeriod time.Duration + + // Value used if sync_nodes_status=False, only for node startup. When node + // is just created, e.g. cluster bootstrap or node creation, we give a longer grace period. + nodeStartupGracePeriod time.Duration + // Value used if sync_nodes_status=False. Controller will not proactively // sync node status in this case, but will monitor node status updated from kubelet. If // it doesn't receive update for this amount of time, it will start posting "NodeReady== @@ -151,45 +180,8 @@ type Controller struct { // 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes // longer for user to see up-to-date node status. nodeMonitorGracePeriod time.Duration - // Value controlling Controller monitoring period, i.e. how often does Controller - // check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod. - // TODO: Change node status monitor to watch based. - nodeMonitorPeriod time.Duration - // Value used if sync_nodes_status=False, only for node startup. When node - // is just created, e.g. cluster bootstrap or node creation, we give a longer grace period. - nodeStartupGracePeriod time.Duration - // per Node map storing last observed Status together with a local time when it was observed. - nodeStatusMap map[string]nodeStatusData - // This timestamp is to be used instead of LastProbeTime stored in Condition. We do this - // to aviod the problem with time skew across the cluster. - now func() metav1.Time - // Lock to access evictor workers - evictorLock sync.Mutex - // workers that evicts pods from unresponsive nodes. - zonePodEvictor map[string]*scheduler.RateLimitedTimedQueue - // workers that are responsible for tainting nodes. - zoneNoExecuteTainer map[string]*scheduler.RateLimitedTimedQueue - podEvictionTimeout time.Duration - // The maximum duration before a pod evicted from a node can be forcefully terminated. - maximumGracePeriod time.Duration - recorder record.EventRecorder - nodeLister corelisters.NodeLister - nodeInformerSynced cache.InformerSynced - - daemonSetStore extensionslisters.DaemonSetLister - daemonSetInformerSynced cache.InformerSynced - - podInformerSynced cache.InformerSynced - cidrAllocator ipam.CIDRAllocator - taintManager *scheduler.NoExecuteTaintManager - - nodeExistsInCloudProvider func(types.NodeName) (bool, error) - computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, ZoneState) - enterPartialDisruptionFunc func(nodeNum int) float32 - enterFullDisruptionFunc func(nodeNum int) float32 - - zoneStates map[string]ZoneState + podEvictionTimeout time.Duration evictionLimiterQPS float32 secondaryEvictionLimiterQPS float32 largeClusterThreshold int32 @@ -208,29 +200,20 @@ type Controller struct { taintNodeByCondition bool } -// NewNodeController returns a new node controller to sync instances from cloudprovider. -// This method returns an error if it is unable to initialize the CIDR bitmap with -// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes -// currently, this should be handled as a fatal error. -func NewNodeController( - podInformer coreinformers.PodInformer, +// NewNodeLifecycleController returns a new taint controller. +func NewNodeLifecycleController(podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, daemonSetInformer extensionsinformers.DaemonSetInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, + nodeMonitorPeriod time.Duration, + nodeStartupGracePeriod time.Duration, + nodeMonitorGracePeriod time.Duration, podEvictionTimeout time.Duration, evictionLimiterQPS float32, secondaryEvictionLimiterQPS float32, largeClusterThreshold int32, unhealthyZoneThreshold float32, - nodeMonitorGracePeriod time.Duration, - nodeStartupGracePeriod time.Duration, - nodeMonitorPeriod time.Duration, - clusterCIDR *net.IPNet, - serviceCIDR *net.IPNet, - nodeCIDRMaskSize int, - allocateNodeCIDRs bool, - allocatorType ipam.CIDRAllocatorType, runTaintManager bool, useTaintBasedEvictions bool, taintNodeByCondition bool) (*Controller, error) { @@ -241,55 +224,32 @@ func NewNodeController( eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"}) - eventBroadcaster.StartLogging(glog.Infof) - - glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink( - &v1core.EventSinkImpl{ - Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""), - }) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) - } - - if allocateNodeCIDRs { - if clusterCIDR == nil { - glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.") - } - mask := clusterCIDR.Mask - if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { - glog.Fatalf("Controller: Invalid clusterCIDR, mask size of clusterCIDR(%d) must be less than nodeCIDRMaskSize(%d).", maskSize, nodeCIDRMaskSize) - } + metrics.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } nc := &Controller{ - cloud: cloud, - knownNodeSet: make(map[string]*v1.Node), - kubeClient: kubeClient, - recorder: recorder, - podEvictionTimeout: podEvictionTimeout, - maximumGracePeriod: 5 * time.Minute, - zonePodEvictor: make(map[string]*scheduler.RateLimitedTimedQueue), - zoneNoExecuteTainer: make(map[string]*scheduler.RateLimitedTimedQueue), - nodeStatusMap: make(map[string]nodeStatusData), - nodeMonitorGracePeriod: nodeMonitorGracePeriod, - nodeMonitorPeriod: nodeMonitorPeriod, - nodeStartupGracePeriod: nodeStartupGracePeriod, - lookupIP: net.LookupIP, - now: metav1.Now, - clusterCIDR: clusterCIDR, - serviceCIDR: serviceCIDR, - allocateNodeCIDRs: allocateNodeCIDRs, - allocatorType: allocatorType, + cloud: cloud, + kubeClient: kubeClient, + now: metav1.Now, + knownNodeSet: make(map[string]*v1.Node), + nodeStatusMap: make(map[string]nodeStatusData), nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { - return util.NodeExistsInCloudProvider(cloud, nodeName) + return nodeutil.ExistsInCloudProvider(cloud, nodeName) }, + recorder: recorder, + nodeMonitorPeriod: nodeMonitorPeriod, + nodeStartupGracePeriod: nodeStartupGracePeriod, + nodeMonitorGracePeriod: nodeMonitorGracePeriod, + zonePodEvictor: make(map[string]*scheduler.RateLimitedTimedQueue), + zoneNoExecuteTainter: make(map[string]*scheduler.RateLimitedTimedQueue), + zoneStates: make(map[string]ZoneState), + podEvictionTimeout: podEvictionTimeout, evictionLimiterQPS: evictionLimiterQPS, secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS, largeClusterThreshold: largeClusterThreshold, unhealthyZoneThreshold: unhealthyZoneThreshold, - zoneStates: make(map[string]ZoneState), runTaintManager: runTaintManager, useTaintBasedEvictions: useTaintBasedEvictions && runTaintManager, taintNodeByCondition: taintNodeByCondition, @@ -297,6 +257,7 @@ func NewNodeController( if useTaintBasedEvictions { glog.Infof("Controller is using taint based evictions.") } + nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc nc.enterFullDisruptionFunc = nc.HealthyQPSFunc nc.computeZoneStateFunc = nc.ComputeZoneState @@ -337,48 +298,18 @@ func NewNodeController( }) nc.podInformerSynced = podInformer.Informer().HasSynced - if nc.allocateNodeCIDRs { - if nc.allocatorType == ipam.IPAMFromClusterAllocatorType || nc.allocatorType == ipam.IPAMFromCloudAllocatorType { - cfg := &ipam.Config{ - Resync: ipamResyncInterval, - MaxBackoff: ipamMaxBackoff, - InitialRetry: ipamInitialBackoff, - } - switch nc.allocatorType { - case ipam.IPAMFromClusterAllocatorType: - cfg.Mode = nodesync.SyncFromCluster - case ipam.IPAMFromCloudAllocatorType: - cfg.Mode = nodesync.SyncFromCloud - } - ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize) - if err != nil { - glog.Fatalf("Error creating ipam controller: %v", err) - } - if err := ipamc.Start(nodeInformer); err != nil { - glog.Fatalf("Error trying to Init(): %v", err) - } - } else { - var err error - nc.cidrAllocator, err = ipam.New( - kubeClient, cloud, nodeInformer, nc.allocatorType, nc.clusterCIDR, nc.serviceCIDR, nodeCIDRMaskSize) - if err != nil { - return nil, err - } - } - } - if nc.runTaintManager { nc.taintManager = scheduler.NewNoExecuteTaintManager(kubeClient) nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(func(node *v1.Node) error { + AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(nil, node) return nil }), - UpdateFunc: util.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error { + UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error { nc.taintManager.NodeUpdated(oldNode, newNode) return nil }), - DeleteFunc: util.CreateDeleteNodeHandler(func(node *v1.Node) error { + DeleteFunc: nodeutil.CreateDeleteNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(node, nil) return nil }), @@ -388,10 +319,10 @@ func NewNodeController( if nc.taintNodeByCondition { glog.Infof("Controller will taint node by condition.") nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(func(node *v1.Node) error { + AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { return nc.doNoScheduleTaintingPass(node) }), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { return nc.doNoScheduleTaintingPass(newNode) }), }) @@ -400,10 +331,10 @@ func NewNodeController( // NOTE(resouer): nodeInformer to substitute deprecated taint key (notReady -> not-ready). // Remove this logic when we don't need this backwards compatibility nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(func(node *v1.Node) error { + AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { return nc.doFixDeprecatedTaintKeyPass(node) }), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { return nc.doFixDeprecatedTaintKeyPass(newNode) }), }) @@ -417,33 +348,40 @@ func NewNodeController( return nc, nil } -func (nc *Controller) doEvictionPass() { - nc.evictorLock.Lock() - defer nc.evictorLock.Unlock() - for k := range nc.zonePodEvictor { - // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). - nc.zonePodEvictor[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { - node, err := nc.nodeLister.Get(value.Value) - if apierrors.IsNotFound(err) { - glog.Warningf("Node %v no longer present in nodeLister!", value.Value) - } else if err != nil { - glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) - } else { - zone := utilnode.GetZoneKey(node) - evictionsNumber.WithLabelValues(zone).Inc() - } - nodeUID, _ := value.UID.(string) - remaining, err := util.DeletePods(nc.kubeClient, nc.recorder, value.Value, nodeUID, nc.daemonSetStore) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) - return false, 0 - } - if remaining { - glog.Infof("Pods awaiting deletion due to Controller eviction") - } - return true, 0 - }) +// Run starts an asynchronous loop that monitors the status of cluster nodes. +func (nc *Controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + glog.Infof("Starting node controller") + defer glog.Infof("Shutting down node controller") + + if !controller.WaitForCacheSync("taint", stopCh, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) { + return } + + if nc.runTaintManager { + go nc.taintManager.Run(wait.NeverStop) + } + + if nc.useTaintBasedEvictions { + // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated + // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. + go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, wait.NeverStop) + } else { + // Managing eviction of nodes: + // When we delete pods off a node, if the node was not empty at the time we then + // queue an eviction watcher. If we hit an error, retry deletion. + go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop) + } + + // Incorporate the results of node status pushed from kubelet to master. + go wait.Until(func() { + if err := nc.monitorNodeStatus(); err != nil { + glog.Errorf("Error monitoring node status: %v", err) + } + }, nc.nodeMonitorPeriod, wait.NeverStop) + + <-stopCh } // doFixDeprecatedTaintKeyPass checks and replaces deprecated taint key with proper key name if needed. @@ -478,7 +416,7 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error { glog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v", taintsToDel, node.GetName(), taintsToAdd) - if !util.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { + if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { return fmt.Errorf("failed to swap taints of node %+v", node) } return nil @@ -506,7 +444,7 @@ func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error { if len(taintsToAdd) == 0 && len(taintsToDel) == 0 { return nil } - if !util.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { + if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { return fmt.Errorf("failed to swap taints of node %+v", node) } return nil @@ -515,9 +453,9 @@ func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error { func (nc *Controller) doNoExecuteTaintingPass() { nc.evictorLock.Lock() defer nc.evictorLock.Unlock() - for k := range nc.zoneNoExecuteTainer { + for k := range nc.zoneNoExecuteTainter { // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). - nc.zoneNoExecuteTainer[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { + nc.zoneNoExecuteTainter[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { node, err := nc.nodeLister.Get(value.Value) if apierrors.IsNotFound(err) { glog.Warningf("Node %v no longer present in nodeLister!", value.Value) @@ -546,70 +484,37 @@ func (nc *Controller) doNoExecuteTaintingPass() { return true, 0 } - return util.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node), 0 + return nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node), 0 }) } } -// Run starts an asynchronous loop that monitors the status of cluster nodes. -func (nc *Controller) Run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - glog.Infof("Starting node controller") - defer glog.Infof("Shutting down node controller") - - if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) { - return - } - - // Incorporate the results of node status pushed from kubelet to master. - go wait.Until(func() { - if err := nc.monitorNodeStatus(); err != nil { - glog.Errorf("Error monitoring node status: %v", err) - } - }, nc.nodeMonitorPeriod, wait.NeverStop) - - if nc.runTaintManager { - go nc.taintManager.Run(wait.NeverStop) - } - - if nc.useTaintBasedEvictions { - // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated - // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. - go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, wait.NeverStop) - } else { - // Managing eviction of nodes: - // When we delete pods off a node, if the node was not empty at the time we then - // queue an eviction watcher. If we hit an error, retry deletion. - go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop) - } - - if nc.allocateNodeCIDRs { - if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType { - go nc.cidrAllocator.Run(wait.NeverStop) - } - } - - <-stopCh -} - -// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor. -func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { - zone := utilnode.GetZoneKey(node) - if _, found := nc.zoneStates[zone]; !found { - nc.zoneStates[zone] = stateInitial - if !nc.useTaintBasedEvictions { - nc.zonePodEvictor[zone] = - scheduler.NewRateLimitedTimedQueue( - flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) - } else { - nc.zoneNoExecuteTainer[zone] = - scheduler.NewRateLimitedTimedQueue( - flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) - } - // Init the metric for the new zone. - glog.Infof("Initializing eviction metric for zone: %v", zone) - evictionsNumber.WithLabelValues(zone).Add(0) +func (nc *Controller) doEvictionPass() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + for k := range nc.zonePodEvictor { + // Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded). + nc.zonePodEvictor[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { + node, err := nc.nodeLister.Get(value.Value) + if apierrors.IsNotFound(err) { + glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + } else if err != nil { + glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) + } else { + zone := utilnode.GetZoneKey(node) + evictionsNumber.WithLabelValues(zone).Inc() + } + nodeUID, _ := value.UID.(string) + remaining, err := nodeutil.DeletePods(nc.kubeClient, nc.recorder, value.Value, nodeUID, nc.daemonSetStore) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) + return false, 0 + } + if remaining { + glog.Infof("Pods awaiting deletion due to Controller eviction") + } + return true, 0 + }) } } @@ -631,7 +536,7 @@ func (nc *Controller) monitorNodeStatus() error { for i := range added { glog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name) - util.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) + nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) nc.knownNodeSet[added[i].Name] = added[i] nc.addPodEvictorForNewZone(added[i]) if nc.useTaintBasedEvictions { @@ -643,7 +548,7 @@ func (nc *Controller) monitorNodeStatus() error { for i := range deleted { glog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name) - util.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name)) + nodeutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name)) delete(nc.knownNodeSet, deleted[i].Name) } @@ -684,7 +589,7 @@ func (nc *Controller) monitorNodeStatus() error { // We want to update the taint straight away if Node is already tainted with the UnreachableTaint if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { taintToAdd := *NotReadyTaintTemplate - if !util.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { + if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node) { @@ -711,7 +616,7 @@ func (nc *Controller) monitorNodeStatus() error { // We want to update the taint straight away if Node is already tainted with the UnreachableTaint if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { taintToAdd := *UnreachableTaintTemplate - if !util.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { + if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node) { @@ -751,8 +656,8 @@ func (nc *Controller) monitorNodeStatus() error { // Report node event. if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue { - util.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady") - if err = util.MarkAllPodsNotReady(nc.kubeClient, node); err != nil { + nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady") + if err = nodeutil.MarkAllPodsNotReady(nc.kubeClient, node); err != nil { utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err)) } } @@ -767,13 +672,13 @@ func (nc *Controller) monitorNodeStatus() error { } if !exists { glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name) - util.RecordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) + nodeutil.RecordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) go func(nodeName string) { defer utilruntime.HandleCrash() // Kubelet is not reporting and Cloud Provider says node // is gone. Delete it without worrying about grace // periods. - if err := util.ForcefullyDeleteNode(nc.kubeClient, nodeName); err != nil { + if err := nodeutil.ForcefullyDeleteNode(nc.kubeClient, nodeName); err != nil { glog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err) } }(node.Name) @@ -786,131 +691,6 @@ func (nc *Controller) monitorNodeStatus() error { return nil } -func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) { - newZoneStates := map[string]ZoneState{} - allAreFullyDisrupted := true - for k, v := range zoneToNodeConditions { - zoneSize.WithLabelValues(k).Set(float64(len(v))) - unhealthy, newState := nc.computeZoneStateFunc(v) - zoneHealth.WithLabelValues(k).Set(float64(100*(len(v)-unhealthy)) / float64(len(v))) - unhealthyNodes.WithLabelValues(k).Set(float64(unhealthy)) - if newState != stateFullDisruption { - allAreFullyDisrupted = false - } - newZoneStates[k] = newState - if _, had := nc.zoneStates[k]; !had { - glog.Errorf("Setting initial state for unseen zone: %v", k) - nc.zoneStates[k] = stateInitial - } - } - - allWasFullyDisrupted := true - for k, v := range nc.zoneStates { - if _, have := zoneToNodeConditions[k]; !have { - zoneSize.WithLabelValues(k).Set(0) - zoneHealth.WithLabelValues(k).Set(100) - unhealthyNodes.WithLabelValues(k).Set(0) - delete(nc.zoneStates, k) - continue - } - if v != stateFullDisruption { - allWasFullyDisrupted = false - break - } - } - - // At least one node was responding in previous pass or in the current pass. Semantics is as follows: - // - if the new state is "partialDisruption" we call a user defined function that returns a new limiter to use, - // - if the new state is "normal" we resume normal operation (go back to default limiter settings), - // - if new state is "fullDisruption" we restore normal eviction rate, - // - unless all zones in the cluster are in "fullDisruption" - in that case we stop all evictions. - if !allAreFullyDisrupted || !allWasFullyDisrupted { - // We're switching to full disruption mode - if allAreFullyDisrupted { - glog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.") - for i := range nodes { - if nc.useTaintBasedEvictions { - _, err := nc.markNodeAsReachable(nodes[i]) - if err != nil { - glog.Errorf("Failed to remove taints from Node %v", nodes[i].Name) - } - } else { - nc.cancelPodEviction(nodes[i]) - } - } - // We stop all evictions. - for k := range nc.zoneStates { - if nc.useTaintBasedEvictions { - nc.zoneNoExecuteTainer[k].SwapLimiter(0) - } else { - nc.zonePodEvictor[k].SwapLimiter(0) - } - } - for k := range nc.zoneStates { - nc.zoneStates[k] = stateFullDisruption - } - // All rate limiters are updated, so we can return early here. - return - } - // We're exiting full disruption mode - if allWasFullyDisrupted { - glog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.") - // When exiting disruption mode update probe timestamps on all Nodes. - now := nc.now() - for i := range nodes { - v := nc.nodeStatusMap[nodes[i].Name] - v.probeTimestamp = now - v.readyTransitionTimestamp = now - nc.nodeStatusMap[nodes[i].Name] = v - } - // We reset all rate limiters to settings appropriate for the given state. - for k := range nc.zoneStates { - nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newZoneStates[k]) - nc.zoneStates[k] = newZoneStates[k] - } - return - } - // We know that there's at least one not-fully disrupted so, - // we can use default behavior for rate limiters - for k, v := range nc.zoneStates { - newState := newZoneStates[k] - if v == newState { - continue - } - glog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState) - nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newState) - nc.zoneStates[k] = newState - } - } -} - -func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) { - switch state { - case stateNormal: - if nc.useTaintBasedEvictions { - nc.zoneNoExecuteTainer[zone].SwapLimiter(nc.evictionLimiterQPS) - } else { - nc.zonePodEvictor[zone].SwapLimiter(nc.evictionLimiterQPS) - } - case statePartialDisruption: - if nc.useTaintBasedEvictions { - nc.zoneNoExecuteTainer[zone].SwapLimiter( - nc.enterPartialDisruptionFunc(zoneSize)) - } else { - nc.zonePodEvictor[zone].SwapLimiter( - nc.enterPartialDisruptionFunc(zoneSize)) - } - case stateFullDisruption: - if nc.useTaintBasedEvictions { - nc.zoneNoExecuteTainer[zone].SwapLimiter( - nc.enterFullDisruptionFunc(zoneSize)) - } else { - nc.zonePodEvictor[zone].SwapLimiter( - nc.enterFullDisruptionFunc(zoneSize)) - } - } -} - // tryUpdateNodeStatus checks a given node's conditions and tries to update it. Returns grace period to // which given node is entitled, state of current and last observed Ready Condition, and an error if it occurred. func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) { @@ -1082,6 +862,131 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node return gracePeriod, observedReadyCondition, currentReadyCondition, err } +func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) { + newZoneStates := map[string]ZoneState{} + allAreFullyDisrupted := true + for k, v := range zoneToNodeConditions { + zoneSize.WithLabelValues(k).Set(float64(len(v))) + unhealthy, newState := nc.computeZoneStateFunc(v) + zoneHealth.WithLabelValues(k).Set(float64(100*(len(v)-unhealthy)) / float64(len(v))) + unhealthyNodes.WithLabelValues(k).Set(float64(unhealthy)) + if newState != stateFullDisruption { + allAreFullyDisrupted = false + } + newZoneStates[k] = newState + if _, had := nc.zoneStates[k]; !had { + glog.Errorf("Setting initial state for unseen zone: %v", k) + nc.zoneStates[k] = stateInitial + } + } + + allWasFullyDisrupted := true + for k, v := range nc.zoneStates { + if _, have := zoneToNodeConditions[k]; !have { + zoneSize.WithLabelValues(k).Set(0) + zoneHealth.WithLabelValues(k).Set(100) + unhealthyNodes.WithLabelValues(k).Set(0) + delete(nc.zoneStates, k) + continue + } + if v != stateFullDisruption { + allWasFullyDisrupted = false + break + } + } + + // At least one node was responding in previous pass or in the current pass. Semantics is as follows: + // - if the new state is "partialDisruption" we call a user defined function that returns a new limiter to use, + // - if the new state is "normal" we resume normal operation (go back to default limiter settings), + // - if new state is "fullDisruption" we restore normal eviction rate, + // - unless all zones in the cluster are in "fullDisruption" - in that case we stop all evictions. + if !allAreFullyDisrupted || !allWasFullyDisrupted { + // We're switching to full disruption mode + if allAreFullyDisrupted { + glog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.") + for i := range nodes { + if nc.useTaintBasedEvictions { + _, err := nc.markNodeAsReachable(nodes[i]) + if err != nil { + glog.Errorf("Failed to remove taints from Node %v", nodes[i].Name) + } + } else { + nc.cancelPodEviction(nodes[i]) + } + } + // We stop all evictions. + for k := range nc.zoneStates { + if nc.useTaintBasedEvictions { + nc.zoneNoExecuteTainter[k].SwapLimiter(0) + } else { + nc.zonePodEvictor[k].SwapLimiter(0) + } + } + for k := range nc.zoneStates { + nc.zoneStates[k] = stateFullDisruption + } + // All rate limiters are updated, so we can return early here. + return + } + // We're exiting full disruption mode + if allWasFullyDisrupted { + glog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.") + // When exiting disruption mode update probe timestamps on all Nodes. + now := nc.now() + for i := range nodes { + v := nc.nodeStatusMap[nodes[i].Name] + v.probeTimestamp = now + v.readyTransitionTimestamp = now + nc.nodeStatusMap[nodes[i].Name] = v + } + // We reset all rate limiters to settings appropriate for the given state. + for k := range nc.zoneStates { + nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newZoneStates[k]) + nc.zoneStates[k] = newZoneStates[k] + } + return + } + // We know that there's at least one not-fully disrupted so, + // we can use default behavior for rate limiters + for k, v := range nc.zoneStates { + newState := newZoneStates[k] + if v == newState { + continue + } + glog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState) + nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newState) + nc.zoneStates[k] = newState + } + } +} + +func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) { + switch state { + case stateNormal: + if nc.useTaintBasedEvictions { + nc.zoneNoExecuteTainter[zone].SwapLimiter(nc.evictionLimiterQPS) + } else { + nc.zonePodEvictor[zone].SwapLimiter(nc.evictionLimiterQPS) + } + case statePartialDisruption: + if nc.useTaintBasedEvictions { + nc.zoneNoExecuteTainter[zone].SwapLimiter( + nc.enterPartialDisruptionFunc(zoneSize)) + } else { + nc.zonePodEvictor[zone].SwapLimiter( + nc.enterPartialDisruptionFunc(zoneSize)) + } + case stateFullDisruption: + if nc.useTaintBasedEvictions { + nc.zoneNoExecuteTainter[zone].SwapLimiter( + nc.enterFullDisruptionFunc(zoneSize)) + } else { + nc.zonePodEvictor[zone].SwapLimiter( + nc.enterFullDisruptionFunc(zoneSize)) + } + } +} + // classifyNodes classifies the allNodes to three categories: // 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet' // 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes' @@ -1116,6 +1021,41 @@ func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZon return } +// HealthyQPSFunc returns the default value for cluster eviction rate - we take +// nodeNum for consistency with ReducedQPSFunc. +func (nc *Controller) HealthyQPSFunc(nodeNum int) float32 { + return nc.evictionLimiterQPS +} + +// ReducedQPSFunc returns the QPS for when a the cluster is large make +// evictions slower, if they're small stop evictions altogether. +func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 { + if int32(nodeNum) > nc.largeClusterThreshold { + return nc.secondaryEvictionLimiterQPS + } + return 0 +} + +// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor. +func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { + zone := utilnode.GetZoneKey(node) + if _, found := nc.zoneStates[zone]; !found { + nc.zoneStates[zone] = stateInitial + if !nc.useTaintBasedEvictions { + nc.zonePodEvictor[zone] = + scheduler.NewRateLimitedTimedQueue( + flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) + } else { + nc.zoneNoExecuteTainter[zone] = + scheduler.NewRateLimitedTimedQueue( + flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) + } + // Init the metric for the new zone. + glog.Infof("Initializing eviction metric for zone: %v", zone) + evictionsNumber.WithLabelValues(zone).Add(0) + } +} + // cancelPodEviction removes any queued evictions, typically because the node is available again. It // returns true if an eviction was queued. func (nc *Controller) cancelPodEviction(node *v1.Node) bool { @@ -1141,7 +1081,7 @@ func (nc *Controller) evictPods(node *v1.Node) bool { func (nc *Controller) markNodeForTainting(node *v1.Node) bool { nc.evictorLock.Lock() defer nc.evictorLock.Unlock() - return nc.zoneNoExecuteTainer[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)) + return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)) } func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) { @@ -1157,22 +1097,7 @@ func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) { glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) return false, err } - return nc.zoneNoExecuteTainer[utilnode.GetZoneKey(node)].Remove(node.Name), nil -} - -// HealthyQPSFunc returns the default value for cluster eviction rate - we take -// nodeNum for consistency with ReducedQPSFunc. -func (nc *Controller) HealthyQPSFunc(nodeNum int) float32 { - return nc.evictionLimiterQPS -} - -// ReducedQPSFunc returns the QPS for when a the cluster is large make -// evictions slower, if they're small stop evictions altogether. -func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 { - if int32(nodeNum) > nc.largeClusterThreshold { - return nc.secondaryEvictionLimiterQPS - } - return 0 + return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil } // ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone. diff --git a/pkg/controller/node/nodecontroller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go similarity index 93% rename from pkg/controller/node/nodecontroller_test.go rename to pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index a871dcbc6ef..93affbb9d6f 100644 --- a/pkg/controller/node/nodecontroller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package node +package nodelifecycle import ( - "net" "strings" "testing" "time" @@ -39,10 +38,9 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/node/ipam" - "k8s.io/kubernetes/pkg/controller/node/scheduler" - "k8s.io/kubernetes/pkg/controller/node/util" + "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" "k8s.io/kubernetes/pkg/controller/testutil" + nodeutil "k8s.io/kubernetes/pkg/controller/util/node" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -60,13 +58,46 @@ const ( func alwaysReady() bool { return true } -type nodeController struct { +type nodeLifecycleController struct { *Controller nodeInformer coreinformers.NodeInformer daemonSetInformer extensionsinformers.DaemonSetInformer } -func newNodeControllerFromClient( +// doEviction does the fake eviction and returns the status of eviction operation. +func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool { + var podEvicted bool + zones := testutil.GetZones(fakeNodeHandler) + for _, zone := range zones { + nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) { + uid, _ := value.UID.(string) + nodeutil.DeletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore) + return true, 0 + }) + } + + for _, action := range fakeNodeHandler.Actions() { + if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { + podEvicted = true + return podEvicted + } + } + return podEvicted +} + +func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeNodeHandler) error { + nodes, err := fakeNodeHandler.List(metav1.ListOptions{}) + if err != nil { + return err + } + newElems := make([]interface{}, 0, len(nodes.Items)) + for i := range nodes.Items { + newElems = append(newElems, &nodes.Items[i]) + } + return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV") +} + +func newNodeLifecycleControllerFromClient( cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, @@ -77,37 +108,28 @@ func newNodeControllerFromClient( nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, - clusterCIDR *net.IPNet, - serviceCIDR *net.IPNet, - nodeCIDRMaskSize int, - allocateNodeCIDRs bool, useTaints bool, -) (*nodeController, error) { +) (*nodeLifecycleController, error) { factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) nodeInformer := factory.Core().V1().Nodes() daemonSetInformer := factory.Extensions().V1beta1().DaemonSets() - nc, err := NewNodeController( + nc, err := NewNodeLifecycleController( factory.Core().V1().Pods(), nodeInformer, daemonSetInformer, cloud, kubeClient, + nodeMonitorPeriod, + nodeStartupGracePeriod, + nodeMonitorGracePeriod, podEvictionTimeout, evictionLimiterQPS, secondaryEvictionLimiterQPS, largeClusterThreshold, unhealthyZoneThreshold, - nodeMonitorGracePeriod, - nodeStartupGracePeriod, - nodeMonitorPeriod, - clusterCIDR, - serviceCIDR, - nodeCIDRMaskSize, - allocateNodeCIDRs, - ipam.RangeAllocatorType, useTaints, useTaints, useTaints, @@ -120,19 +142,7 @@ func newNodeControllerFromClient( nc.nodeInformerSynced = alwaysReady nc.daemonSetInformerSynced = alwaysReady - return &nodeController{nc, nodeInformer, daemonSetInformer}, nil -} - -func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler) error { - nodes, err := fakeNodeHandler.List(metav1.ListOptions{}) - if err != nil { - return err - } - newElems := make([]interface{}, 0, len(nodes.Items)) - for i := range nodes.Items { - newElems = append(newElems, &nodes.Items[i]) - } - return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV") + return &nodeLifecycleController{nc, nodeInformer, daemonSetInformer}, nil } func TestMonitorNodeStatusEvictPods(t *testing.T) { @@ -597,7 +607,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { } for _, item := range table { - nodeController, _ := newNodeControllerFromClient( + nodeController, _ := newNodeLifecycleControllerFromClient( nil, item.fakeNodeHandler, evictionTimeout, @@ -608,17 +618,13 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, - nil, - nil, - 0, - false, false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() for _, ds := range item.daemonSets { nodeController.daemonSetInformer.Informer().GetStore().Add(&ds) } - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -633,7 +639,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { item.fakeNodeHandler.Existing[0].Labels = labels item.fakeNodeHandler.Existing[1].Labels = labels } - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -644,7 +650,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { if _, ok := nodeController.zonePodEvictor[zone]; ok { nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) { nodeUID, _ := value.UID.(string) - util.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister()) + nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister()) return true, 0 }) } else { @@ -763,12 +769,21 @@ func TestPodStatusChange(t *testing.T) { } for _, item := range table { - nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler, - evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + item.fakeNodeHandler, + evictionTimeout, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -779,7 +794,7 @@ func TestPodStatusChange(t *testing.T) { item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus } - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -789,7 +804,7 @@ func TestPodStatusChange(t *testing.T) { for _, zone := range zones { nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) { nodeUID, _ := value.UID.(string) - util.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore) + nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore) return true, 0 }) } @@ -809,7 +824,6 @@ func TestPodStatusChange(t *testing.T) { t.Errorf("expected pod update: %+v, got %+v for %+v", podReasonUpdate, item.expectedPodUpdate, item.description) } } - } func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { @@ -1280,9 +1294,18 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { Existing: item.nodeList, Clientset: fake.NewSimpleClientset(&v1.PodList{Items: item.podList}), } - nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, - evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fakeNodeHandler, + evictionTimeout, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 { return testRateLimiterQPS @@ -1291,7 +1314,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { nodeController.enterFullDisruptionFunc = func(nodeNum int) float32 { return testRateLimiterQPS } - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1309,7 +1332,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { fakeNodeHandler.Existing[i].Status = item.updatedNodeStatuses[i] } - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1337,27 +1360,6 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) { } } -// doEviction does the fake eviction and returns the status of eviction operation. -func (nc *nodeController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool { - var podEvicted bool - zones := testutil.GetZones(fakeNodeHandler) - for _, zone := range zones { - nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) { - uid, _ := value.UID.(string) - util.DeletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore) - return true, 0 - }) - } - - for _, action := range fakeNodeHandler.Actions() { - if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { - podEvicted = true - return podEvicted - } - } - return podEvicted -} - // TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes // pods and the node when kubelet has not reported, and the cloudprovider says // the node is gone. @@ -1384,10 +1386,18 @@ func TestCloudProviderNoRateLimit(t *testing.T) { Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0"), *testutil.NewPod("pod1", "node0")}}), DeleteWaitChan: make(chan struct{}), } - nodeController, _ := newNodeControllerFromClient(nil, fnh, 10*time.Minute, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, - testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fnh, + 10*time.Minute, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } nodeController.recorder = testutil.NewFakeRecorder() @@ -1395,7 +1405,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) { return false, nil } // monitorNodeStatus should allow this node to be immediately deleted - if err := syncNodeStore(nodeController, fnh); err != nil { + if err := nodeController.syncNodeStore(fnh); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1624,12 +1634,21 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { } for i, item := range table { - nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + item.fakeNodeHandler, + 5*time.Minute, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1638,7 +1657,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { if item.timeToPass > 0 { nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1768,12 +1787,21 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { } for i, item := range table { - nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + item.fakeNodeHandler, + 5*time.Minute, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1782,7 +1810,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { if item.timeToPass > 0 { nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus - if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1879,12 +1907,21 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { originalTaint := UnreachableTaintTemplate updatedTaint := NotReadyTaintTemplate - nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, - evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fakeNodeHandler, + evictionTimeout, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + true) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1922,7 +1959,7 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { return } - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -1972,9 +2009,18 @@ func TestTaintsNodeByCondition(t *testing.T) { Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}), } - nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fakeNodeHandler, + evictionTimeout, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + true) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() @@ -2098,11 +2144,11 @@ func TestTaintsNodeByCondition(t *testing.T) { for _, test := range tests { fakeNodeHandler.Update(test.Node) - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.doNoScheduleTaintingPass(test.Node) - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } node0, err := nodeController.nodeLister.Get("node0") @@ -2150,10 +2196,18 @@ func TestNodeEventGeneration(t *testing.T) { Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}), } - nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, - testNodeMonitorPeriod, nil, nil, 0, false, false) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fakeNodeHandler, + 5*time.Minute, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + false) nodeController.cloud = &fakecloud.FakeCloud{} nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) { return false, nil @@ -2161,7 +2215,7 @@ func TestNodeEventGeneration(t *testing.T) { nodeController.now = func() metav1.Time { return fakeNow } fakeRecorder := testutil.NewFakeRecorder() nodeController.recorder = fakeRecorder - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } if err := nodeController.monitorNodeStatus(); err != nil { @@ -2208,9 +2262,18 @@ func TestFixDeprecatedTaintKey(t *testing.T) { Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}), } - nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout, - testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true) + nodeController, _ := newNodeLifecycleControllerFromClient( + nil, + fakeNodeHandler, + evictionTimeout, + testRateLimiterQPS, + testRateLimiterQPS, + testLargeClusterThreshold, + testUnhealthyThreshold, + testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, + testNodeMonitorPeriod, + true) nodeController.now = func() metav1.Time { return fakeNow } nodeController.recorder = testutil.NewFakeRecorder() @@ -2319,11 +2382,11 @@ func TestFixDeprecatedTaintKey(t *testing.T) { for _, test := range tests { fakeNodeHandler.Update(test.Node) - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.doFixDeprecatedTaintKeyPass(test.Node) - if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil { + if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } node, err := nodeController.nodeLister.Get(test.Node.GetName()) diff --git a/pkg/controller/node/scheduler/BUILD b/pkg/controller/nodelifecycle/scheduler/BUILD similarity index 85% rename from pkg/controller/node/scheduler/BUILD rename to pkg/controller/nodelifecycle/scheduler/BUILD index efe8ad0b8c0..c9d54bd628b 100644 --- a/pkg/controller/node/scheduler/BUILD +++ b/pkg/controller/nodelifecycle/scheduler/BUILD @@ -1,39 +1,14 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "rate_limited_queue_test.go", - "taint_controller_test.go", - "timed_workers_test.go", - ], - embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler", - deps = [ - "//pkg/controller/testutil:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ "rate_limited_queue.go", - "taint_controller.go", + "taint_manager.go", "timed_workers.go", ], - importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler", + importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", + visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", @@ -53,6 +28,26 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "rate_limited_queue_test.go", + "taint_manager_test.go", + "timed_workers_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", + deps = [ + "//pkg/controller/testutil:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -64,4 +59,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/pkg/controller/node/scheduler/rate_limited_queue.go b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go similarity index 100% rename from pkg/controller/node/scheduler/rate_limited_queue.go rename to pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go diff --git a/pkg/controller/node/scheduler/rate_limited_queue_test.go b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue_test.go similarity index 100% rename from pkg/controller/node/scheduler/rate_limited_queue_test.go rename to pkg/controller/nodelifecycle/scheduler/rate_limited_queue_test.go diff --git a/pkg/controller/node/scheduler/taint_controller.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go similarity index 99% rename from pkg/controller/node/scheduler/taint_controller.go rename to pkg/controller/nodelifecycle/scheduler/taint_manager.go index bf641f69fb4..a71fa8fc788 100644 --- a/pkg/controller/node/scheduler/taint_controller.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -18,9 +18,6 @@ package scheduler import ( "fmt" - "sync" - "time" - "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/apis/core/helper" @@ -30,6 +27,8 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "sync" + "time" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" diff --git a/pkg/controller/node/scheduler/taint_controller_test.go b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go similarity index 100% rename from pkg/controller/node/scheduler/taint_controller_test.go rename to pkg/controller/nodelifecycle/scheduler/taint_manager_test.go diff --git a/pkg/controller/node/scheduler/timed_workers.go b/pkg/controller/nodelifecycle/scheduler/timed_workers.go similarity index 100% rename from pkg/controller/node/scheduler/timed_workers.go rename to pkg/controller/nodelifecycle/scheduler/timed_workers.go diff --git a/pkg/controller/node/scheduler/timed_workers_test.go b/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go similarity index 100% rename from pkg/controller/node/scheduler/timed_workers_test.go rename to pkg/controller/nodelifecycle/scheduler/timed_workers_test.go diff --git a/pkg/controller/node/util/BUILD b/pkg/controller/util/node/BUILD similarity index 87% rename from pkg/controller/node/util/BUILD rename to pkg/controller/util/node/BUILD index b14d0cb9260..8a462510406 100644 --- a/pkg/controller/node/util/BUILD +++ b/pkg/controller/util/node/BUILD @@ -1,14 +1,10 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["controller_utils.go"], - importpath = "k8s.io/kubernetes/pkg/controller/node/util", + importpath = "k8s.io/kubernetes/pkg/controller/util/node", + visibility = ["//visibility:public"], deps = [ "//pkg/apis/core:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -41,4 +37,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/pkg/controller/node/util/controller_utils.go b/pkg/controller/util/node/controller_utils.go similarity index 96% rename from pkg/controller/node/util/controller_utils.go rename to pkg/controller/util/node/controller_utils.go index 643defce961..4c9a9279acc 100644 --- a/pkg/controller/node/util/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package node import ( "errors" @@ -170,9 +170,9 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { return fmt.Errorf("%v", strings.Join(errMsg, "; ")) } -// NodeExistsInCloudProvider returns true if the node exists in the +// ExistsInCloudProvider returns true if the node exists in the // cloud provider. -func NodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) { +func ExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) { instances, ok := cloud.Instances() if !ok { return false, fmt.Errorf("%v", ErrCloudInstance) @@ -198,7 +198,7 @@ func RecordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event) } -// RecordNodeStatusChange records a event related to a node status change. +// RecordNodeStatusChange records a event related to a node status change. (Common to lifecycle and ipam) func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newStatus string) { ref := &v1.ObjectReference{ Kind: "Node", @@ -257,7 +257,7 @@ func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) { } } -// CreateUpdateNodeHandler creates a node update handler. +// CreateUpdateNodeHandler creates a node update handler. (Common to lifecycle and ipam) func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldObj, newObj interface{}) { return func(origOldObj, origNewObj interface{}) { node := origNewObj.(*v1.Node).DeepCopy() @@ -269,7 +269,7 @@ func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldOb } } -// CreateDeleteNodeHandler creates a delete node handler. +// CreateDeleteNodeHandler creates a delete node handler. (Common to lifecycle and ipam) func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{}) { return func(originalObj interface{}) { originalNode, isNode := originalObj.(*v1.Node) diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index b6bb7d4fbe4..d7194290d61 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -33,7 +33,7 @@ go_library( "//pkg/controller/daemon:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/controller/job:go_default_library", - "//pkg/controller/node:go_default_library", + "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/replicaset:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/kubectl:go_default_library", diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 4cabc2b46b1..b9a22afc483 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -33,7 +33,7 @@ import ( "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" - nodepkg "k8s.io/kubernetes/pkg/controller/node" + nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 66bab803c10..9f7e222fed8 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -57,7 +57,7 @@ go_library( "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", - "//pkg/controller/node:go_default_library", + "//pkg/controller/nodelifecycle:go_default_library", "//pkg/features:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index bdf52dfdd98..58c01bf61da 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -86,7 +86,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" - nodectlr "k8s.io/kubernetes/pkg/controller/node" + nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubelet/util/format" diff --git a/test/integration/garbagecollector/BUILD b/test/integration/garbagecollector/BUILD index 8bd1651d139..d6a537df07f 100644 --- a/test/integration/garbagecollector/BUILD +++ b/test/integration/garbagecollector/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "go_default_test", @@ -52,4 +47,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 0fbdc42db6b..ec4b7bb01ae 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -30,8 +30,7 @@ go_test( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/controller/node:go_default_library", - "//pkg/controller/node/ipam:go_default_library", + "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/volume/persistentvolume:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index da8fc51aa02..bb227ba4dd2 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -37,8 +37,7 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - "k8s.io/kubernetes/pkg/controller/node" - "k8s.io/kubernetes/pkg/controller/node/ipam" + "k8s.io/kubernetes/pkg/controller/nodelifecycle" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" @@ -85,29 +84,24 @@ func TestTaintNodeByCondition(t *testing.T) { controllerCh := make(chan struct{}) defer close(controllerCh) - // Start NodeController for taint. - nc, err := node.NewNodeController( + // Start NodeLifecycleController for taint. + nc, err := nodelifecycle.NewNodeLifecycleController( informers.Core().V1().Pods(), informers.Core().V1().Nodes(), informers.Extensions().V1beta1().DaemonSets(), nil, // CloudProvider clientset, + time.Second, // Node monitor grace period + time.Second, // Node startup grace period + time.Second, // Node monitor period time.Second, // Pod eviction timeout 100, // Eviction limiter QPS 100, // Secondary eviction limiter QPS 100, // Large cluster threshold 100, // Unhealthy zone threshold - time.Second, // Node monitor grace period - time.Second, // Node startup grace period - time.Second, // Node monitor period - nil, // Cluster CIDR - nil, // Service CIDR - 0, // Node CIDR mask size - false, // Allocate node CIDRs - ipam.RangeAllocatorType, // Allocator type - true, // Run taint manger - true, // Enabled taint based eviction - true, // Enabled TaintNodeByCondition feature + true, // Run taint manager + true, // Use taint based evictions + true, // Enabled TaintNodeByCondition feature ) if err != nil { t.Errorf("Failed to create node controller: %v", err) From 806759cfc9e3c4888815eccd9de6fc3726619f10 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Thu, 4 Jan 2018 17:45:40 -0500 Subject: [PATCH 604/794] Changed return of empty string to raise an exception as it should have been from the beginning. --- .../layers/kubernetes-worker/reactive/kubernetes_worker.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 917e4291406..c5e54144d86 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -969,7 +969,8 @@ def get_node_name(): # if we didn't match, just bail to the next node break - return "" + msg = 'Failed to get node name for node %s' % gethostname() + raise GetNodeNameFailed(msg) class ApplyNodeLabelFailed(Exception): @@ -979,9 +980,6 @@ class ApplyNodeLabelFailed(Exception): def _apply_node_label(label, delete=False, overwrite=False): ''' Invoke kubectl to apply node label changes ''' nodename = get_node_name() - if nodename == "": - msg = 'Unable to get node name for node {}'.format(gethostname()) - raise ApplyNodeLabelFailed(msg) # TODO: Make this part of the kubectl calls instead of a special string cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}' From 90814b3a9768791e262840e5c71e0401d37b832c Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 25 Dec 2017 21:55:26 -0600 Subject: [PATCH 605/794] e2e node framework can generate a base kubelet config file This allows the e2e node test framework to generate a kubelet config file containing the defaults it would typically pass to a test via flags, rather than passing these defaults as flags. --- .../jenkins/jenkins-ci-ubuntu.properties | 3 +- test/e2e_node/jenkins/jenkins-ci.properties | 3 +- .../e2e_node/jenkins/jenkins-flaky.properties | 4 +- test/e2e_node/jenkins/jenkins-pull.properties | 3 +- .../jenkins/jenkins-serial-ubuntu.properties | 4 +- .../jenkins/jenkins-serial.properties | 4 +- test/e2e_node/services/BUILD | 8 + test/e2e_node/services/kubelet.go | 202 +++++++++++++++--- 8 files changed, 191 insertions(+), 40 deletions(-) diff --git a/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties b/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties index 1e0c76fa14d..14e30d3d7e4 100644 --- a/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties +++ b/test/e2e_node/jenkins/jenkins-ci-ubuntu.properties @@ -6,7 +6,8 @@ GCE_ZONE=us-central1-f GCE_PROJECT=k8s-jkns-ubuntu-node CLEANUP=true GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' TIMEOUT=1h # Use the system spec defined in test/e2e_node/system/specs/gke.yaml. SYSTEM_SPEC_NAME=gke diff --git a/test/e2e_node/jenkins/jenkins-ci.properties b/test/e2e_node/jenkins/jenkins-ci.properties index 9c563b6a050..148f0cb8580 100644 --- a/test/e2e_node/jenkins/jenkins-ci.properties +++ b/test/e2e_node/jenkins/jenkins-ci.properties @@ -4,5 +4,6 @@ GCE_ZONE=us-central1-f GCE_PROJECT=k8s-jkns-ci-node-e2e CLEANUP=true GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' TIMEOUT=1h diff --git a/test/e2e_node/jenkins/jenkins-flaky.properties b/test/e2e_node/jenkins/jenkins-flaky.properties index 824c1309dcf..4689ebc90e4 100644 --- a/test/e2e_node/jenkins/jenkins-flaky.properties +++ b/test/e2e_node/jenkins/jenkins-flaky.properties @@ -4,8 +4,8 @@ GCE_ZONE=us-central1-f GCE_PROJECT=k8s-jkns-ci-node-e2e CLEANUP=true GINKGO_FLAGS='--focus="\[Flaky\]"' -TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,LocalStorageCapacityIsolation=true,PodPriority=true,KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' PARALLELISM=1 TIMEOUT=3h diff --git a/test/e2e_node/jenkins/jenkins-pull.properties b/test/e2e_node/jenkins/jenkins-pull.properties index 884e45884f1..3db3dbc7d75 100644 --- a/test/e2e_node/jenkins/jenkins-pull.properties +++ b/test/e2e_node/jenkins/jenkins-pull.properties @@ -4,5 +4,6 @@ GCE_ZONE=us-central1-f GCE_PROJECT=k8s-jkns-pr-node-e2e CLEANUP=true GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' diff --git a/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties b/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties index 5333bb8b037..7043d308f6f 100644 --- a/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties +++ b/test/e2e_node/jenkins/jenkins-serial-ubuntu.properties @@ -6,8 +6,8 @@ GCE_ZONE=us-central1-f GCE_PROJECT=k8s-jkns-ubuntu-node-serial CLEANUP=true GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"' -TEST_ARGS='--feature-gates=DynamicKubeletConfig=true' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' PARALLELISM=1 TIMEOUT=3h # Use the system spec defined at test/e2e_node/system/specs/gke.yaml. diff --git a/test/e2e_node/jenkins/jenkins-serial.properties b/test/e2e_node/jenkins/jenkins-serial.properties index 31bded6deb2..9cc243ce193 100644 --- a/test/e2e_node/jenkins/jenkins-serial.properties +++ b/test/e2e_node/jenkins/jenkins-serial.properties @@ -4,7 +4,7 @@ GCE_ZONE=us-west1-b GCE_PROJECT=k8s-jkns-ci-node-e2e CLEANUP=true GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"' -TEST_ARGS='--feature-gates=DynamicKubeletConfig=true' -KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/' +TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,KubeletConfigFile=true --generate-kubelet-config-file=true' +KUBELET_ARGS='' PARALLELISM=1 TIMEOUT=3h diff --git a/test/e2e_node/services/BUILD b/test/e2e_node/services/BUILD index b0852a5828a..e0ab73e37d7 100644 --- a/test/e2e_node/services/BUILD +++ b/test/e2e_node/services/BUILD @@ -22,9 +22,13 @@ go_library( deps = [ "//cmd/kube-apiserver/app:go_default_library", "//cmd/kube-apiserver/app/options:go_default_library", + "//cmd/kubelet/app/options:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/controller/namespace:go_default_library", "//pkg/features:go_default_library", + "//pkg/kubelet/apis/kubeletconfig:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e_node/builder:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", @@ -33,8 +37,12 @@ go_library( "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 42be2558c87..aa803859a39 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -25,11 +25,20 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/golang/glog" + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilflag "k8s.io/apiserver/pkg/util/flag" + "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/builder" ) @@ -62,11 +71,14 @@ func (a *args) Set(value string) error { var kubeletArgs args var kubeletContainerized bool var hyperkubeImage string +var genKubeletConfigFile bool func init() { flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.") flag.BoolVar(&kubeletContainerized, "kubelet-containerized", false, "Run kubelet in a docker container") flag.StringVar(&hyperkubeImage, "hyperkube-image", "", "Docker image with containerized kubelet") + flag.BoolVar(&genKubeletConfigFile, "generate-kubelet-config-file", false, "The test runner will generate a Kubelet config file containing test defaults instead of passing default flags to the Kubelet. "+ + "If you use this test framework feature, ensure that the KubeletConfigFile feature gate is enabled.") } // RunKubelet starts kubelet and waits for termination signal. Once receives the @@ -112,6 +124,12 @@ func (e *E2EServices) startKubelet() (*server, error) { return nil, err } + // KubeletConfiguration file path + kubeletConfigPath, err := kubeletConfigCWDPath() + if err != nil { + return nil, err + } + // Create pod manifest path manifestPath, err := createPodManifestDirectory() if err != nil { @@ -122,6 +140,58 @@ func (e *E2EServices) startKubelet() (*server, error) { if err != nil { return nil, err } + + // PLEASE NOTE: If you set new KubeletConfiguration values or stop setting values here, + // you must also update the flag names in kubeletConfigFlags! + kubeletConfigFlags := []string{} + + // set up the default kubeletconfiguration + kc, err := options.NewKubeletConfiguration() + if err != nil { + return nil, err + } + + kc.CgroupRoot = "/" + kubeletConfigFlags = append(kubeletConfigFlags, "cgroup-root") + + kc.VolumeStatsAggPeriod = metav1.Duration{Duration: 10 * time.Second} // Aggregate volumes frequently so tests don't need to wait as long + kubeletConfigFlags = append(kubeletConfigFlags, "volume-stats-agg-period") + + kc.AllowPrivileged = true + kubeletConfigFlags = append(kubeletConfigFlags, "allow-privileged") + + kc.SerializeImagePulls = false + kubeletConfigFlags = append(kubeletConfigFlags, "serialize-image-pulls") + + kc.PodManifestPath = manifestPath + kubeletConfigFlags = append(kubeletConfigFlags, "pod-manifest-path") + + kc.FileCheckFrequency = metav1.Duration{Duration: 10 * time.Second} // Check file frequently so tests won't wait too long + kubeletConfigFlags = append(kubeletConfigFlags, "file-check-frequency") + + // Assign a fixed CIDR to the node because there is no node controller. + // Note: this MUST be in sync with with the IP in + // - cluster/gce/config-test.sh and + // - test/e2e_node/conformance/run_test.sh. + kc.PodCIDR = "10.100.0.0/24" + kubeletConfigFlags = append(kubeletConfigFlags, "pod-cidr") + + kc.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 30 * time.Second} + kubeletConfigFlags = append(kubeletConfigFlags, "eviction-pressure-transition-period") + + kc.EvictionHard = map[string]string{ + "memory.available": "250Mi", + "nodefs.available": "10%", + "nodefs.inodesFree": "5%", + } + kubeletConfigFlags = append(kubeletConfigFlags, "eviction-hard") + + kc.EvictionMinimumReclaim = map[string]string{ + "nodefs.available": "5%", + "nodefs.inodesFree": "5%", + } + kubeletConfigFlags = append(kubeletConfigFlags, "eviction-minimum-reclaim") + var killCommand, restartCommand *exec.Cmd var isSystemd bool // Apply default kubelet flags. @@ -151,12 +221,22 @@ func (e *E2EServices) startKubelet() (*server, error) { "-v", "/var/lib/kubelet:/var/lib/kubelet:rw,rslave", "-v", "/var/log:/var/log", "-v", manifestPath+":"+manifestPath+":rw", - hyperkubeImage, "/hyperkube", "kubelet", - "--containerized", ) + + // if we will generate a kubelet config file, we need to mount that path into the container too + if genKubeletConfigFile { + cmdArgs = append(cmdArgs, "-v", filepath.Dir(kubeletConfigPath)+":"+filepath.Dir(kubeletConfigPath)+":ro") + } + + cmdArgs = append(cmdArgs, hyperkubeImage, "/hyperkube", "kubelet", "--containerized") kubeconfigPath = "/etc/kubernetes/kubeconfig" } else { - cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", builder.GetKubeletServerBin()) + cmdArgs = append(cmdArgs, + systemdRun, + "--unit="+unitName, + "--slice=runtime.slice", + "--remain-after-exit", + builder.GetKubeletServerBin()) } killCommand = exec.Command("systemctl", "kill", unitName) @@ -165,41 +245,24 @@ func (e *E2EServices) startKubelet() (*server, error) { Name: "kubelet.log", JournalctlCommand: []string{"-u", unitName}, } - cmdArgs = append(cmdArgs, - "--kubelet-cgroups=/kubelet.slice", - "--cgroup-root=/", - ) + + kc.KubeletCgroups = "/kubelet.slice" + kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups") } else { cmdArgs = append(cmdArgs, builder.GetKubeletServerBin()) - cmdArgs = append(cmdArgs, - // TODO(random-liu): Get rid of this docker specific thing. - "--runtime-cgroups=/docker-daemon", - "--kubelet-cgroups=/kubelet", - "--cgroup-root=/", - "--system-cgroups=/system", - ) + // TODO(random-liu): Get rid of this docker specific thing. + cmdArgs = append(cmdArgs, "--runtime-cgroups=/docker-daemon") + + kc.KubeletCgroups = "/kubelet" + kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups") + + kc.SystemCgroups = "/system" + kubeletConfigFlags = append(kubeletConfigFlags, "system-cgroups") } cmdArgs = append(cmdArgs, "--kubeconfig", kubeconfigPath, - "--address", "0.0.0.0", - "--port", kubeletPort, - "--read-only-port", kubeletReadOnlyPort, "--root-dir", KubeletRootDirectory, - "--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long - "--allow-privileged", "true", - "--serialize-image-pulls", "false", - "--pod-manifest-path", manifestPath, - "--file-check-frequency", "10s", // Check file frequently so tests won't wait too long "--docker-disable-shared-pid=false", - // Assign a fixed CIDR to the node because there is no node controller. - // - // Note: this MUST be in sync with with the IP in - // - cluster/gce/config-test.sh and - // - test/e2e_node/conformance/run_test.sh. - "--pod-cidr", "10.100.0.0/24", - "--eviction-pressure-transition-period", "30s", - "--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds. - "--eviction-minimum-reclaim", "nodefs.available=5%,nodefs.inodesFree=5%", // The minimum reclaimed resources after eviction. "--v", LOG_VERBOSITY_LEVEL, "--logtostderr", ) @@ -207,6 +270,7 @@ func (e *E2EServices) startKubelet() (*server, error) { // by kubelet-flags. if framework.TestContext.FeatureGates != "" { cmdArgs = append(cmdArgs, "--feature-gates", framework.TestContext.FeatureGates) + utilflag.NewMapStringBool(&kc.FeatureGates).Set(framework.TestContext.FeatureGates) } if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { @@ -239,6 +303,18 @@ func (e *E2EServices) startKubelet() (*server, error) { cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName) } + // Write config file or flags, depending on whether --generate-kubelet-config-file was provided + if genKubeletConfigFile { + if err := writeKubeletConfigFile(kc, kubeletConfigPath); err != nil { + return nil, err + } + // add the flag to load config from a file + cmdArgs = append(cmdArgs, "--config", kubeletConfigPath) + } else { + // generate command line flags from the default config, since --generate-kubelet-config-file was not provided + addKubeletConfigFlags(&cmdArgs, kc, kubeletConfigFlags) + } + // Override the default kubelet flags. cmdArgs = append(cmdArgs, kubeletArgs...) @@ -260,6 +336,61 @@ func (e *E2EServices) startKubelet() (*server, error) { return server, server.start() } +// addKubeletConfigFlags adds the flags we care about from the provided kubelet configuration object +func addKubeletConfigFlags(cmdArgs *[]string, kc *kubeletconfig.KubeletConfiguration, flags []string) { + fs := pflag.NewFlagSet("kubelet", pflag.ExitOnError) + options.AddKubeletConfigFlags(fs, kc) + for _, name := range flags { + *cmdArgs = append(*cmdArgs, "--"+name, fs.Lookup(name).Value.String()) + } +} + +// writeKubeletConfigFile writes the kubelet config file based on the args and returns the filename +func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path string) error { + // extract the KubeletConfiguration and convert to versioned + versioned := &v1alpha1.KubeletConfiguration{} + scheme, _, err := scheme.NewSchemeAndCodecs() + if err != nil { + return err + } + if err := scheme.Convert(internal, versioned, nil); err != nil { + return err + } + // encode + encoder, err := newKubeletConfigJSONEncoder() + if err != nil { + return err + } + data, err := runtime.Encode(encoder, versioned) + if err != nil { + return err + } + // create the directory, if it does not exist + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // write the file + if err := ioutil.WriteFile(path, data, 0755); err != nil { + return err + } + return nil +} + +func newKubeletConfigJSONEncoder() (runtime.Encoder, error) { + _, kubeletCodecs, err := scheme.NewSchemeAndCodecs() + if err != nil { + return nil, err + } + + mediaType := "application/json" + info, ok := runtime.SerializerInfoForMediaType(kubeletCodecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unsupported media type %q", mediaType) + } + return kubeletCodecs.EncoderForVersion(info.Serializer, v1alpha1.SchemeGroupVersion), nil +} + // createPodManifestDirectory creates pod manifest directory. func createPodManifestDirectory() (string, error) { cwd, err := os.Getwd() @@ -316,6 +447,15 @@ func kubeconfigCWDPath() (string, error) { return filepath.Join(cwd, "kubeconfig"), nil } +func kubeletConfigCWDPath() (string, error) { + cwd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get current working directory: %v", err) + } + // DO NOT name this file "kubelet" - you will overwrite the the kubelet binary and be very confused :) + return filepath.Join(cwd, "kubelet-config"), nil +} + // like createKubeconfig, but creates kubeconfig at current-working-directory/kubeconfig // returns a fully-qualified path to the kubeconfig file func createKubeconfigCWD() (string, error) { From 51f033a4f14b9e0ea8a6c59a361cb7d24478415d Mon Sep 17 00:00:00 2001 From: ymqytw Date: Thu, 4 Jan 2018 16:53:42 -0800 Subject: [PATCH 606/794] fix bug of swallowing missing merge key error --- .../pkg/util/strategicpatch/patch.go | 24 +++++++++---------- .../pkg/util/strategicpatch/patch_test.go | 15 ++++++++++++ 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index fd08759f391..09dcd0fd594 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1322,23 +1322,23 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // If they're both maps or lists, recurse into the value. switch originalType.Kind() { case reflect.Map: - subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(k) - if err != nil { - return nil, err + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 } - _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return nil, err + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 } original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) case reflect.Slice: - subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) - if err != nil { - return nil, err + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 } - _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return nil, err + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 } original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: diff --git a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go index 03b661d93e6..4721803ccd1 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go @@ -654,6 +654,21 @@ mergingIntList: ExpectedError: "doesn't match", }, }, + { + Description: "missing merge key should error out", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: a +`), + TwoWay: []byte(` +mergingList: + - value: b +`), + ExpectedError: "does not contain declared merge key", + }, + }, } func TestCustomStrategicMergePatch(t *testing.T) { From 42bd794654b303258621f3230e651ae64cf0b3a9 Mon Sep 17 00:00:00 2001 From: chentao1596 Date: Mon, 6 Nov 2017 15:26:09 +0800 Subject: [PATCH 607/794] fix-bug: version info should be printed when failed to execute 'kubectl apply -f XXXXX' --- pkg/controller/garbagecollector/operations.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/BUILD | 1 + .../apimachinery/pkg/api/meta/errors.go | 20 ++++- .../pkg/api/meta/multirestmapper.go | 4 +- .../pkg/api/meta/multirestmapper_test.go | 84 +++++++++++++------ .../apimachinery/pkg/api/meta/priority.go | 2 +- .../pkg/api/meta/priority_test.go | 6 +- .../apimachinery/pkg/api/meta/restmapper.go | 4 +- 8 files changed, 88 insertions(+), 35 deletions(-) diff --git a/pkg/controller/garbagecollector/operations.go b/pkg/controller/garbagecollector/operations.go index 16f631f489b..1c898431dc0 100644 --- a/pkg/controller/garbagecollector/operations.go +++ b/pkg/controller/garbagecollector/operations.go @@ -34,7 +34,7 @@ import ( // namespace> tuple to a unversioned.APIResource struct. func (gc *GarbageCollector) apiResource(apiVersion, kind string) (*metav1.APIResource, error) { fqKind := schema.FromAPIVersionAndKind(apiVersion, kind) - mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), apiVersion) + mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version) if err != nil { return nil, newRESTMappingError(kind, apiVersion) } diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD index aeff38a57bd..21097f9b9d4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -52,6 +52,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/errors.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/errors.go index 1503bd6d846..cbf5d0263c6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/errors.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" ) // AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource @@ -85,11 +86,26 @@ func (e *NoResourceMatchError) Error() string { // NoKindMatchError is returned if the RESTMapper can't find any match for a kind type NoKindMatchError struct { - PartialKind schema.GroupVersionKind + // GroupKind is the API group and kind that was searched + GroupKind schema.GroupKind + // SearchedVersions is the optional list of versions the search was restricted to + SearchedVersions []string } func (e *NoKindMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialKind) + searchedVersions := sets.NewString() + for _, v := range e.SearchedVersions { + searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String()) + } + + switch len(searchedVersions) { + case 0: + return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group) + case 1: + return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0]) + default: + return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List()) + } } func IsNoMatchError(err error) bool { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go index 679098fe56f..6b01bf197fa 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -179,7 +179,7 @@ func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (* if len(errors) > 0 { return nil, utilerrors.NewAggregate(errors) } - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } // RESTMappings returns all possible RESTMappings for the provided group kind, or an error @@ -204,7 +204,7 @@ func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ( return nil, utilerrors.NewAggregate(errors) } if len(allMappings) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } return allMappings, nil } diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper_test.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper_test.go index dec07a16f7b..b71ca468d32 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/multirestmapper_test.go @@ -261,42 +261,78 @@ func TestMultiRESTMapperRESTMappings(t *testing.T) { tcs := []struct { name string - mapper MultiRESTMapper - input schema.GroupKind - result []*RESTMapping - err error + mapper MultiRESTMapper + groupKind schema.GroupKind + versions []string + result []*RESTMapping + err error }{ { - name: "empty", - mapper: MultiRESTMapper{}, - input: schema.GroupKind{Kind: "Foo"}, - result: nil, - err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "Foo"}}, + name: "empty with no versions", + mapper: MultiRESTMapper{}, + groupKind: schema.GroupKind{Kind: "Foo"}, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}}, }, { - name: "ignore not found", - mapper: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "IGNORE_THIS"}}}}, - input: schema.GroupKind{Kind: "Foo"}, - result: nil, - err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "Foo"}}, + name: "empty with one version", + mapper: MultiRESTMapper{}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta"}, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1beta"}}, }, { - name: "accept first failure", - mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{mappings: []*RESTMapping{mapping1}}}, - input: schema.GroupKind{Kind: "Foo"}, - result: nil, - err: errors.New("fail on this"), + name: "empty with multi(two) vesions", + mapper: MultiRESTMapper{}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta", "v2"}, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1beta", "v2"}}, }, { - name: "return both", - mapper: MultiRESTMapper{fixedRESTMapper{mappings: []*RESTMapping{mapping1}}, fixedRESTMapper{mappings: []*RESTMapping{mapping2}}}, - input: schema.GroupKind{Kind: "Foo"}, - result: []*RESTMapping{mapping1, mapping2}, + name: "ignore not found with kind not exist", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "IGNORE_THIS"}}}}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: nil, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}}, + }, + { + name: "ignore not found with version not exist", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1"}}}}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta"}, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1beta"}}, + }, + { + name: "ignore not found with multi versions not exist", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1"}}}}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta", "v2"}, + result: nil, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}, SearchedVersions: []string{"v1beta", "v2"}}, + }, + { + name: "accept first failure", + mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{mappings: []*RESTMapping{mapping1}}}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta"}, + result: nil, + err: errors.New("fail on this"), + }, + { + name: "return both", + mapper: MultiRESTMapper{fixedRESTMapper{mappings: []*RESTMapping{mapping1}}, fixedRESTMapper{mappings: []*RESTMapping{mapping2}}}, + groupKind: schema.GroupKind{Kind: "Foo"}, + versions: []string{"v1beta"}, + result: []*RESTMapping{mapping1, mapping2}, }, } for _, tc := range tcs { - actualResult, actualErr := tc.mapper.RESTMappings(tc.input) + actualResult, actualErr := tc.mapper.RESTMappings(tc.groupKind, tc.versions...) if e, a := tc.result, actualResult; !reflect.DeepEqual(e, a) { t.Errorf("%s: expected %v, got %v", tc.name, e, a) } diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/priority.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/priority.go index 2a14aa7ab17..df28e64ffaa 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/priority.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -153,7 +153,7 @@ func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) } func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - mappings, err := m.Delegate.RESTMappings(gk) + mappings, err := m.Delegate.RESTMappings(gk, versions...) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/priority_test.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/priority_test.go index f273a39f9f6..098d53bd513 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/priority_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/priority_test.go @@ -234,13 +234,13 @@ func TestPriorityRESTMapperRESTMapping(t *testing.T) { name: "empty", mapper: PriorityRESTMapper{Delegate: MultiRESTMapper{}}, input: schema.GroupKind{Kind: "Foo"}, - err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "Foo"}}, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}}, }, { name: "ignore not found", - mapper: PriorityRESTMapper{Delegate: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "IGNORE_THIS"}}}}}, + mapper: PriorityRESTMapper{Delegate: MultiRESTMapper{fixedRESTMapper{err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "IGNORE_THIS"}}}}}, input: schema.GroupKind{Kind: "Foo"}, - err: &NoKindMatchError{PartialKind: schema.GroupVersionKind{Kind: "Foo"}}, + err: &NoKindMatchError{GroupKind: schema.GroupKind{Kind: "Foo"}}, }, { name: "accept first failure", diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 55155a6e437..ff945acd147 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -472,7 +472,7 @@ func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) return nil, err } if len(mappings) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } // since we rely on RESTMappings method // take the first match and return to the caller @@ -510,7 +510,7 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } if len(potentialGVK) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } for _, gvk := range potentialGVK { From 44f65449eb0cb0045d090adf15334b2897af436e Mon Sep 17 00:00:00 2001 From: zouyee Date: Fri, 5 Jan 2018 11:09:54 +0800 Subject: [PATCH 608/794] Update defaultbackend image to 1.4 and deployment apiVersion to apps/v1 --- .../cluster-loadbalancing/glbc/default-svc-controller.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml index febec626fc1..28c4a1f6edc 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: l7-default-backend @@ -24,7 +24,7 @@ spec: # Any image is permissible as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.3 + image: gcr.io/google_containers/defaultbackend:1.4 livenessProbe: httpGet: path: /healthz From fd16e37f44d3fc3d8f88a0e31acabb391f0709d2 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 5 Jan 2018 11:44:22 +0800 Subject: [PATCH 609/794] Add generic interface for Azure clients --- pkg/cloudprovider/providers/azure/azure.go | 76 ++++++++++++++++------ 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 170091cace9..5435036e6b8 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -176,20 +176,48 @@ type VirtualMachineScaleSetVMsClient interface { ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) } +// RoutesClient defines needed functions for azure network.RoutesClient +type RoutesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) + Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) +} + +// RouteTablesClient defines needed functions for azure network.RouteTablesClient +type RouteTablesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) + Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) +} + +// StorageAccountClient defines needed functions for azure storage.AccountsClient +type StorageAccountClient interface { + Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) + Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) + ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) + ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) + GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) +} + +// DisksClient defines needed functions for azure disk.DisksClient +type DisksClient interface { + CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) + Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) + Get(resourceGroupName string, diskName string) (result disk.Model, err error) +} + // Cloud holds the config and clients type Cloud struct { Config Environment azure.Environment - RoutesClient network.RoutesClient + RoutesClient RoutesClient SubnetsClient SubnetsClient InterfacesClient InterfacesClient - RouteTablesClient network.RouteTablesClient + RouteTablesClient RouteTablesClient LoadBalancerClient LoadBalancersClient PublicIPAddressesClient PublicIPAddressesClient SecurityGroupsClient SecurityGroupsClient VirtualMachinesClient VirtualMachinesClient - StorageAccountClient storage.AccountsClient - DisksClient disk.DisksClient + StorageAccountClient StorageAccountClient + DisksClient DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff vmSet VMSet @@ -236,17 +264,19 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { configureUserAgent(&subnetsClient.Client) az.SubnetsClient = subnetsClient - az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) - az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.RouteTablesClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.RouteTablesClient.Client) + routeTablesClient := network.NewRouteTablesClient(az.SubscriptionID) + routeTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint + routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routeTablesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routeTablesClient.Client) + az.RouteTablesClient = routeTablesClient - az.RoutesClient = network.NewRoutesClient(az.SubscriptionID) - az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.RoutesClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.RoutesClient.Client) + routesClient := network.NewRoutesClient(az.SubscriptionID) + routesClient.BaseURI = az.Environment.ResourceManagerEndpoint + routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routesClient.Client) + az.RoutesClient = routesClient interfacesClient := network.NewInterfacesClient(az.SubscriptionID) interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint @@ -297,13 +327,19 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { configureUserAgent(&virtualMachineScaleSetsClient.Client) az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient - az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - configureUserAgent(&az.StorageAccountClient.Client) + storageAccountClient := storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) + storageAccountClient.BaseURI = az.Environment.ResourceManagerEndpoint + storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + storageAccountClient.PollingDelay = 5 * time.Second + configureUserAgent(&storageAccountClient.Client) + az.StorageAccountClient = storageAccountClient - az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - configureUserAgent(&az.DisksClient.Client) + disksClient := disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) + disksClient.BaseURI = az.Environment.ResourceManagerEndpoint + disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + disksClient.PollingDelay = 5 * time.Second + configureUserAgent(&disksClient.Client) + az.DisksClient = disksClient // Conditionally configure rate limits if az.CloudProviderRateLimit { From 61d6084c97188532eb2e1d2bb964ae7f850b638d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Fri, 5 Jan 2018 14:24:44 +0800 Subject: [PATCH 610/794] Add fake clients --- .../providers/azure/azure_fakes.go | 321 +++++++++++++++++- 1 file changed, 319 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 18e7281fa43..dd66d509f17 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -24,11 +24,12 @@ import ( "sync" "time" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" ) type fakeAzureLBClient struct { @@ -785,3 +786,319 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupNam err = nil return resultChan, errChan } + +type fakeRoutesClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.Route +} + +func newFakeRoutesClient() fakeRoutesClient { + fRC := fakeRoutesClient{} + fRC.FakeStore = make(map[string]map[string]network.Route) + fRC.mutex = &sync.Mutex{} + return fRC +} + +func (fRC fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { + fRC.mutex.Lock() + defer fRC.mutex.Unlock() + + resultChan := make(chan network.Route, 1) + errChan := make(chan error, 1) + var result network.Route + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + if _, ok := fRC.FakeStore[routeTableName]; !ok { + fRC.FakeStore[routeTableName] = make(map[string]network.Route) + } + fRC.FakeStore[routeTableName][routeName] = routeParameters + result = fRC.FakeStore[routeTableName][routeName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fRC fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + fRC.mutex.Lock() + defer fRC.mutex.Unlock() + + respChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + var resp autorest.Response + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + if routes, ok := fRC.FakeStore[routeTableName]; ok { + if _, ok := routes[routeName]; ok { + delete(routes, routeName) + resp.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + + err = nil + return respChan, errChan + } + } + resp.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Route", + } + return respChan, errChan +} + +type fakeRouteTablesClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.RouteTable +} + +func newFakeRouteTablesClient() fakeRouteTablesClient { + fRTC := fakeRouteTablesClient{} + fRTC.FakeStore = make(map[string]map[string]network.RouteTable) + fRTC.mutex = &sync.Mutex{} + return fRTC +} + +func (fRTC fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { + fRTC.mutex.Lock() + defer fRTC.mutex.Unlock() + + resultChan := make(chan network.RouteTable, 1) + errChan := make(chan error, 1) + var result network.RouteTable + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + if _, ok := fRTC.FakeStore[resourceGroupName]; !ok { + fRTC.FakeStore[resourceGroupName] = make(map[string]network.RouteTable) + } + fRTC.FakeStore[resourceGroupName][routeTableName] = parameters + result = fRTC.FakeStore[resourceGroupName][routeTableName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fRTC fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { + fRTC.mutex.Lock() + defer fRTC.mutex.Unlock() + if _, ok := fRTC.FakeStore[resourceGroupName]; ok { + if entity, ok := fRTC.FakeStore[resourceGroupName][routeTableName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such RouteTable", + } +} + +type fakeStorageAccountClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]storage.Account +} + +func newFakeStorageAccountClient() fakeStorageAccountClient { + fSAC := fakeStorageAccountClient{} + fSAC.FakeStore = make(map[string]map[string]storage.Account) + fSAC.mutex = &sync.Mutex{} + return fSAC +} + +func (fSAC fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { + fSAC.mutex.Lock() + defer fSAC.mutex.Unlock() + + resultChan := make(chan storage.Account, 1) + errChan := make(chan error, 1) + var result storage.Account + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + if _, ok := fSAC.FakeStore[resourceGroupName]; !ok { + fSAC.FakeStore[resourceGroupName] = make(map[string]storage.Account) + } + fSAC.FakeStore[resourceGroupName][accountName] = storage.Account{ + Name: &accountName, + Sku: parameters.Sku, + Kind: parameters.Kind, + Location: parameters.Location, + Identity: parameters.Identity, + Tags: parameters.Tags, + AccountProperties: &storage.AccountProperties{}, + } + result = fSAC.FakeStore[resourceGroupName][accountName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fSAC fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + fSAC.mutex.Lock() + defer fSAC.mutex.Unlock() + + if rgAccounts, ok := fSAC.FakeStore[resourceGroupName]; ok { + if _, ok := rgAccounts[accountName]; ok { + delete(rgAccounts, accountName) + result.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + return result, nil + } + } + + result.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such StorageAccount", + } + return result, err +} + +func (fSAC fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { + return storage.AccountListKeysResult{}, nil +} + +func (fSAC fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { + return storage.AccountListResult{}, nil +} + +func (fSAC fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { + fSAC.mutex.Lock() + defer fSAC.mutex.Unlock() + + if _, ok := fSAC.FakeStore[resourceGroupName]; ok { + if entity, ok := fSAC.FakeStore[resourceGroupName][accountName]; ok { + return entity, nil + } + } + + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such StorageAccount", + } +} + +type fakeDisksClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]disk.Model +} + +func newFakeDisksClient() fakeDisksClient { + fDC := fakeDisksClient{} + fDC.FakeStore = make(map[string]map[string]disk.Model) + fDC.mutex = &sync.Mutex{} + return fDC +} + +func (fDC fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { + fDC.mutex.Lock() + defer fDC.mutex.Unlock() + + resultChan := make(chan disk.Model, 1) + errChan := make(chan error, 1) + var result disk.Model + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + + if _, ok := fDC.FakeStore[resourceGroupName]; !ok { + fDC.FakeStore[resourceGroupName] = make(map[string]disk.Model) + } + fDC.FakeStore[resourceGroupName][diskName] = diskParameter + result = fDC.FakeStore[resourceGroupName][diskName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fDC fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { + fDC.mutex.Lock() + defer fDC.mutex.Unlock() + + respChan := make(chan disk.OperationStatusResponse, 1) + errChan := make(chan error, 1) + var resp disk.OperationStatusResponse + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + if rgDisks, ok := fDC.FakeStore[resourceGroupName]; ok { + if _, ok := rgDisks[diskName]; ok { + delete(rgDisks, diskName) + resp.Response = autorest.Response{ + Response: &http.Response{ + StatusCode: http.StatusAccepted, + }, + } + + err = nil + return respChan, errChan + } + } + resp.Response = autorest.Response{ + Response: &http.Response{ + StatusCode: http.StatusNotFound, + }, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Disk", + } + return respChan, errChan +} + +func (fDC fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { + fDC.mutex.Lock() + defer fDC.mutex.Unlock() + + if _, ok := fDC.FakeStore[resourceGroupName]; ok { + if entity, ok := fDC.FakeStore[resourceGroupName][diskName]; ok { + return entity, nil + } + } + + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Disk", + } +} From 9e949b11aa001c4c10da28d7341385652fd32728 Mon Sep 17 00:00:00 2001 From: Kai Chen Date: Fri, 5 Jan 2018 01:01:49 -0800 Subject: [PATCH 611/794] Fix a broken link in the fluentd-elasticsearch addon README --- cluster/addons/fluentd-elasticsearch/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/fluentd-elasticsearch/README.md b/cluster/addons/fluentd-elasticsearch/README.md index ed012ac1fef..d51b3b142d2 100644 --- a/cluster/addons/fluentd-elasticsearch/README.md +++ b/cluster/addons/fluentd-elasticsearch/README.md @@ -71,7 +71,7 @@ avoid Fluentd pods scheduling there. [setupCreds]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#reset-built-in-user-passwords [fluentdCreds]: https://github.com/uken/fluent-plugin-elasticsearch#user-password-path-scheme-ssl_verify [fluentdEnvVar]: https://docs.fluentd.org/v0.12/articles/faq#how-can-i-use-environment-variables-to-configure-parameters-dynamically -[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configmap/ +[configMap]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ [secret]: https://kubernetes.io/docs/concepts/configuration/secret/ [statefulSet]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset [initContainer]: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ From 2780b8f9e4bba0a27b44e0647a579b1c435987b1 Mon Sep 17 00:00:00 2001 From: David Chang Date: Thu, 21 Dec 2017 17:10:47 +0800 Subject: [PATCH 612/794] Enable list option modification when create list watch --- .../src/k8s.io/client-go/tools/cache/listwatch.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/cache/listwatch.go b/staging/src/k8s.io/client-go/tools/cache/listwatch.go index db2329c55a2..06657a3b062 100644 --- a/staging/src/k8s.io/client-go/tools/cache/listwatch.go +++ b/staging/src/k8s.io/client-go/tools/cache/listwatch.go @@ -63,8 +63,18 @@ type Getter interface { // NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { - listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier := func(options *metav1.ListOptions) { options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). @@ -74,7 +84,7 @@ func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSe } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { options.Watch = true - options.FieldSelector = fieldSelector.String() + optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). From 93b11ad12d6dca4da479facaf1cddddc85f2133e Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Thu, 21 Dec 2017 20:07:59 +0800 Subject: [PATCH 613/794] Small improvement of showKind get --- pkg/kubectl/cmd/resource/get.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/kubectl/cmd/resource/get.go b/pkg/kubectl/cmd/resource/get.go index 4013cb42874..cf90b120210 100644 --- a/pkg/kubectl/cmd/resource/get.go +++ b/pkg/kubectl/cmd/resource/get.go @@ -301,11 +301,7 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str useOpenAPIPrintColumns := cmdutil.GetFlagBool(cmd, useOpenAPIPrintColumnFlagLabel) - showKind := options.ShowKind - // TODO: abstract more cleanly - if resource.MultipleTypesRequested(args) || cmdutil.MustPrintWithKinds(objs, infos, sorter) { - showKind = true - } + showKind := options.ShowKind || resource.MultipleTypesRequested(args) || cmdutil.MustPrintWithKinds(objs, infos, sorter) filteredResourceCount := 0 noHeaders := cmdutil.GetFlagBool(cmd, "no-headers") From eb688e098f5abd095584d84a50dba1beeea97db5 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Fri, 5 Jan 2018 08:40:24 -0500 Subject: [PATCH 614/794] Add RESTClient Custom metrics empty test Add testing for a previously untested path, which is tested when getting resource metrics. --- .../podautoscaler/metrics/rest_metrics_client_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go index 289b93f04d7..e51c5309f8c 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go @@ -249,6 +249,16 @@ func TestRESTClientQpsSumEqualZero(t *testing.T) { tc.runTest(t) } +func TestRESTClientQpsEmptyMetrics(t *testing.T) { + tc := restClientTestCase{ + metricName: "qps", + desiredError: fmt.Errorf("no metrics returned from custom metrics API"), + reportedMetricPoints: []metricPoint{}, + } + + tc.runTest(t) +} + func TestRESTClientCPUEmptyMetrics(t *testing.T) { tc := restClientTestCase{ resourceName: v1.ResourceCPU, From 4aef85aab4aba7c9e9e5a1ffaf66ef4c98a59959 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Fri, 5 Jan 2018 09:41:23 -0500 Subject: [PATCH 615/794] Removing duplicate import --- .../juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index c5e54144d86..ccb8e083b6c 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -20,7 +20,6 @@ import random import shutil import subprocess import time -import json from shlex import split from subprocess import check_call, check_output From cd7e57848995c1268c56e8d32ae71183d53358d3 Mon Sep 17 00:00:00 2001 From: Marek Grabowski Date: Fri, 5 Jan 2018 16:08:30 +0000 Subject: [PATCH 616/794] Re-add nodecontroller OWNERS file --- pkg/controller/nodelifecycle/OWNERS | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100755 pkg/controller/nodelifecycle/OWNERS diff --git a/pkg/controller/nodelifecycle/OWNERS b/pkg/controller/nodelifecycle/OWNERS new file mode 100755 index 00000000000..99dd2eda0e1 --- /dev/null +++ b/pkg/controller/nodelifecycle/OWNERS @@ -0,0 +1,9 @@ +approvers: +- gmarek +- bowei +reviewers: +- gmarek +- smarterclayton +- ingvagabund +- aveshagarwal +- k82cn From 5312989b04a8cea5128e78bb7f33975037ad3383 Mon Sep 17 00:00:00 2001 From: Karol Wychowaniec Date: Fri, 5 Jan 2018 17:37:56 +0100 Subject: [PATCH 617/794] Fix errors in Heapster deployment for google sink --- .../cluster-monitoring/google/heapster-controller.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 81b513281e6..769e36112e9 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -101,7 +101,7 @@ spec: memory: {{ nanny_memory }} volumeMounts: - name: heapster-config-volume - mountMath: /etc/config + mountPath: /etc/config env: - name: MY_POD_NAME valueFrom: @@ -143,7 +143,7 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: eventer-config-volume - mountMath: /etc/config + mountPath: /etc/config command: - /pod_nanny - --config-dir=/etc/config @@ -160,7 +160,6 @@ spec: - name: heapster-config-volume configMap: name: heapster-config - volumes: - name: eventer-config-volume configMap: name: eventer-config From 64c20676ac731f2247766846a9ac7ac298538c06 Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Thu, 4 Jan 2018 17:48:52 -0800 Subject: [PATCH 618/794] Use existing subnetwork of forwarding rule --- .../providers/gce/gce_loadbalancer_internal.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index d4acf9fa031..4b0c02c6925 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -82,10 +82,18 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s requestedIP := determineRequestedIP(svc, existingFwdRule) ipToUse := requestedIP + // If the ILB already exists, continue using the subnet that it's already using. + // This is to support existing ILBs that were setup using the wrong subnet. + subnetworkURL := gce.SubnetworkURL() + if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { + // external LBs have an empty Subnetwork field. + subnetworkURL = existingFwdRule.Subnetwork + } + var addrMgr *addressManager // If the network is not a legacy network, use the address manager if !gce.IsLegacyNetwork() { - addrMgr = newAddressManager(gce, nm.String(), gce.Region(), gce.SubnetworkURL(), loadBalancerName, requestedIP, schemeInternal) + addrMgr = newAddressManager(gce, nm.String(), gce.Region(), subnetworkURL, loadBalancerName, requestedIP, schemeInternal) ipToUse, err = addrMgr.HoldAddress() if err != nil { return nil, err @@ -108,9 +116,10 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s LoadBalancingScheme: string(scheme), } - // Specify subnetwork if known - if len(gce.subnetworkURL) > 0 { - expectedFwdRule.Subnetwork = gce.subnetworkURL + // Given that CreateGCECloud will attempt to determine the subnet based off the network, + // the subnetwork should rarely be unknown. + if subnetworkURL != "" { + expectedFwdRule.Subnetwork = subnetworkURL } else { expectedFwdRule.Network = gce.networkURL } From fc8020fd9e1cf73caa3f620b1a0df54dc70b4eb4 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Fri, 5 Jan 2018 11:29:28 -0800 Subject: [PATCH 619/794] Fix local e2e test with changed error message --- test/e2e/storage/persistent_volumes-local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 79fd1f7c636..59def78cb62 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -317,7 +317,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum reason: "FailedMount", pattern: make([]string, 2)} ep.pattern = append(ep.pattern, "NodeSelectorTerm") - ep.pattern = append(ep.pattern, "Storage node affinity check failed") + ep.pattern = append(ep.pattern, "MountVolume.NodeAffinity check failed") It("should not be able to mount due to different NodeName", func() { testPodWithNodeName(config, testVolType, ep, config.nodes[1].Name, makeLocalPodWithNodeName, testMode) From 761decc3b5911513825953b9dddae7cee73123e8 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Fri, 5 Jan 2018 14:46:14 -0800 Subject: [PATCH 620/794] Remove mikedanese from kubeadm owners since he's no longer actively working on the project. --- cmd/kubeadm/OWNERS | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/kubeadm/OWNERS b/cmd/kubeadm/OWNERS index 8df7e7cd3ee..8da4cfe9766 100644 --- a/cmd/kubeadm/OWNERS +++ b/cmd/kubeadm/OWNERS @@ -1,11 +1,9 @@ approvers: - jbeda - luxas -- mikedanese - krousey - timothysc reviewers: -- mikedanese - luxas - dmmcquay - krousey From 5210e6fefd58c08ca5071fa76f4dd86989fc4b86 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 5 Jan 2018 12:08:09 -0500 Subject: [PATCH 621/794] Remove dependency on v1 API in base credential provider Credential provider is useful without the v1 API, move the only dependency out so that we can more easily move credential provider to a utility library in the future (other callers besides Kubelet may need to load pull secrets like Docker). --- pkg/credentialprovider/BUILD | 2 +- pkg/credentialprovider/keyring.go | 46 ++-------------- pkg/credentialprovider/secrets/BUILD | 26 +++++++++ pkg/credentialprovider/secrets/secrets.go | 58 ++++++++++++++++++++ pkg/kubelet/kuberuntime/BUILD | 1 + pkg/kubelet/kuberuntime/kuberuntime_image.go | 3 +- pkg/kubelet/rkt/BUILD | 1 + pkg/kubelet/rkt/image.go | 3 +- 8 files changed, 95 insertions(+), 45 deletions(-) create mode 100644 pkg/credentialprovider/secrets/BUILD create mode 100644 pkg/credentialprovider/secrets/secrets.go diff --git a/pkg/credentialprovider/BUILD b/pkg/credentialprovider/BUILD index 42ec2cb9ddc..4cec1e472ce 100644 --- a/pkg/credentialprovider/BUILD +++ b/pkg/credentialprovider/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) @@ -51,6 +50,7 @@ filegroup( "//pkg/credentialprovider/azure:all-srcs", "//pkg/credentialprovider/gcp:all-srcs", "//pkg/credentialprovider/rancher:all-srcs", + "//pkg/credentialprovider/secrets:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index 9ec96312577..b269f474600 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -17,7 +17,6 @@ limitations under the License. package credentialprovider import ( - "encoding/json" "net" "net/url" "path/filepath" @@ -27,7 +26,6 @@ import ( "github.com/golang/glog" dockertypes "github.com/docker/docker/api/types" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" ) @@ -284,14 +282,12 @@ func (f *FakeKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { return f.auth, f.ok } -// unionDockerKeyring delegates to a set of keyrings. -type unionDockerKeyring struct { - keyrings []DockerKeyring -} +// UnionDockerKeyring delegates to a set of keyrings. +type UnionDockerKeyring []DockerKeyring -func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { +func (k UnionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { authConfigs := []LazyAuthConfiguration{} - for _, subKeyring := range k.keyrings { + for _, subKeyring := range k { if subKeyring == nil { continue } @@ -302,37 +298,3 @@ func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool return authConfigs, (len(authConfigs) > 0) } - -// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do, -// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring. -// If they do not, then the default keyring is returned -func MakeDockerKeyring(passedSecrets []v1.Secret, defaultKeyring DockerKeyring) (DockerKeyring, error) { - passedCredentials := []DockerConfig{} - for _, passedSecret := range passedSecrets { - if dockerConfigJsonBytes, dockerConfigJsonExists := passedSecret.Data[v1.DockerConfigJsonKey]; (passedSecret.Type == v1.SecretTypeDockerConfigJson) && dockerConfigJsonExists && (len(dockerConfigJsonBytes) > 0) { - dockerConfigJson := DockerConfigJson{} - if err := json.Unmarshal(dockerConfigJsonBytes, &dockerConfigJson); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockerConfigJson.Auths) - } else if dockercfgBytes, dockercfgExists := passedSecret.Data[v1.DockerConfigKey]; (passedSecret.Type == v1.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) { - dockercfg := DockerConfig{} - if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockercfg) - } - } - - if len(passedCredentials) > 0 { - basicKeyring := &BasicDockerKeyring{} - for _, currCredentials := range passedCredentials { - basicKeyring.Add(currCredentials) - } - return &unionDockerKeyring{[]DockerKeyring{basicKeyring, defaultKeyring}}, nil - } - - return defaultKeyring, nil -} diff --git a/pkg/credentialprovider/secrets/BUILD b/pkg/credentialprovider/secrets/BUILD new file mode 100644 index 00000000000..15d41a63711 --- /dev/null +++ b/pkg/credentialprovider/secrets/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["secrets.go"], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/secrets", + visibility = ["//visibility:public"], + deps = [ + "//pkg/credentialprovider:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/credentialprovider/secrets/secrets.go b/pkg/credentialprovider/secrets/secrets.go new file mode 100644 index 00000000000..d5397d931c2 --- /dev/null +++ b/pkg/credentialprovider/secrets/secrets.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secrets + +import ( + "encoding/json" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/credentialprovider" +) + +// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do, +// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring. +// If they do not, then the default keyring is returned +func MakeDockerKeyring(passedSecrets []v1.Secret, defaultKeyring credentialprovider.DockerKeyring) (credentialprovider.DockerKeyring, error) { + passedCredentials := []credentialprovider.DockerConfig{} + for _, passedSecret := range passedSecrets { + if dockerConfigJSONBytes, dockerConfigJSONExists := passedSecret.Data[v1.DockerConfigJsonKey]; (passedSecret.Type == v1.SecretTypeDockerConfigJson) && dockerConfigJSONExists && (len(dockerConfigJSONBytes) > 0) { + dockerConfigJSON := credentialprovider.DockerConfigJson{} + if err := json.Unmarshal(dockerConfigJSONBytes, &dockerConfigJSON); err != nil { + return nil, err + } + + passedCredentials = append(passedCredentials, dockerConfigJSON.Auths) + } else if dockercfgBytes, dockercfgExists := passedSecret.Data[v1.DockerConfigKey]; (passedSecret.Type == v1.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) { + dockercfg := credentialprovider.DockerConfig{} + if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil { + return nil, err + } + + passedCredentials = append(passedCredentials, dockercfg) + } + } + + if len(passedCredentials) > 0 { + basicKeyring := &credentialprovider.BasicDockerKeyring{} + for _, currCredentials := range passedCredentials { + basicKeyring.Add(currCredentials) + } + return credentialprovider.UnionDockerKeyring{basicKeyring, defaultKeyring}, nil + } + + return defaultKeyring, nil +} diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index c2719a329d1..9438ec410d3 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/credentialprovider:go_default_library", + "//pkg/credentialprovider/secrets:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/cm:go_default_library", diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image.go b/pkg/kubelet/kuberuntime/kuberuntime_image.go index 66ca5c7145a..7d7249034b5 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -21,6 +21,7 @@ import ( "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/credentialprovider" + credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/parsers" @@ -35,7 +36,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul return "", err } - keyring, err := credentialprovider.MakeDockerKeyring(pullSecrets, m.keyring) + keyring, err := credentialprovidersecrets.MakeDockerKeyring(pullSecrets, m.keyring) if err != nil { return "", err } diff --git a/pkg/kubelet/rkt/BUILD b/pkg/kubelet/rkt/BUILD index 989583b9272..40d41ba48e6 100644 --- a/pkg/kubelet/rkt/BUILD +++ b/pkg/kubelet/rkt/BUILD @@ -22,6 +22,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/rkt", deps = [ "//pkg/credentialprovider:go_default_library", + "//pkg/credentialprovider/secrets:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/images:go_default_library", diff --git a/pkg/kubelet/rkt/image.go b/pkg/kubelet/rkt/image.go index 225604c6129..180dcfa51a3 100644 --- a/pkg/kubelet/rkt/image.go +++ b/pkg/kubelet/rkt/image.go @@ -35,6 +35,7 @@ import ( "golang.org/x/net/context" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/credentialprovider" + credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -54,7 +55,7 @@ func (r *Runtime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secr return "", err } - keyring, err := credentialprovider.MakeDockerKeyring(pullSecrets, r.dockerKeyring) + keyring, err := credentialprovidersecrets.MakeDockerKeyring(pullSecrets, r.dockerKeyring) if err != nil { return "", err } From ce40f8db7ebe3b84158a58665ad1a7d272516abf Mon Sep 17 00:00:00 2001 From: Rohit Ramkumar Date: Fri, 5 Jan 2018 15:00:40 -0800 Subject: [PATCH 622/794] Update kube-dns to 1.14.8 --- cluster/addons/dns/kube-dns.yaml.base | 6 +++--- cluster/addons/dns/kube-dns.yaml.in | 6 +++--- cluster/addons/dns/kube-dns.yaml.sed | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cluster/addons/dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns.yaml.base index e93884df0ca..edf77c3569c 100644 --- a/cluster/addons/dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns.yaml.base @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns.yaml.in index 12b09236723..ea20f6d0ffd 100644 --- a/cluster/addons/dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns.yaml.in @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns.yaml.sed index 101cf588e2d..af5772fc341 100644 --- a/cluster/addons/dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns.yaml.sed @@ -94,7 +94,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -145,7 +145,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -184,7 +184,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8 livenessProbe: httpGet: path: /metrics From 30b89d830b7fd0827576853d6a0db44b66a90d3d Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Wed, 3 Jan 2018 18:12:18 -0800 Subject: [PATCH 623/794] Move scheduler code out of plugin directory. This moves plugin/pkg/scheduler to pkg/scheduler and plugin/cmd/kube-scheduler to cmd/kube-scheduler. Bulk of the work was done with gomvpkg, except for kube-scheduler main package. --- build/root/Makefile | 15 ---------- cmd/genkubedocs/gen_kube_docs.go | 2 +- cmd/genman/gen_kube_man.go | 2 +- cmd/hyperkube/kube-scheduler.go | 2 +- {plugin/cmd => cmd}/kube-scheduler/BUILD | 0 {plugin/cmd => cmd}/kube-scheduler/OWNERS | 0 {plugin/cmd => cmd}/kube-scheduler/app/BUILD | 0 .../cmd => cmd}/kube-scheduler/app/server.go | 10 +++---- .../cmd => cmd}/kube-scheduler/scheduler.go | 2 +- cmd/kubeadm/app/phases/addons/proxy/proxy.go | 2 +- cmd/kubeadm/app/preflight/checks.go | 2 +- examples/examples_test.go | 4 +-- hack/.golint_failures | 28 +++++++++---------- hack/lib/golang.sh | 2 +- hack/make-rules/make-help.sh | 16 ----------- pkg/controller/cloud/node_controller.go | 2 +- pkg/controller/cloud/node_controller_test.go | 2 +- pkg/controller/daemon/daemon_controller.go | 6 ++-- .../daemon/daemon_controller_test.go | 2 +- pkg/controller/daemon/util/daemonset_util.go | 2 +- .../node_lifecycle_controller.go | 2 +- .../node_lifecycle_controller_test.go | 2 +- pkg/kubectl/.import-restrictions | 15 +++++----- pkg/kubelet/cm/container_manager.go | 2 +- pkg/kubelet/cm/container_manager_linux.go | 2 +- pkg/kubelet/cm/container_manager_stub.go | 2 +- pkg/kubelet/cm/deviceplugin/manager.go | 2 +- pkg/kubelet/cm/deviceplugin/manager_stub.go | 2 +- pkg/kubelet/cm/deviceplugin/manager_test.go | 2 +- pkg/kubelet/cm/deviceplugin/types.go | 2 +- pkg/kubelet/eviction/helpers.go | 2 +- pkg/kubelet/kubelet.go | 2 +- pkg/kubelet/kubelet_node_status.go | 2 +- pkg/kubelet/kubelet_test.go | 2 +- .../admission_failure_handler_stub.go | 2 +- pkg/kubelet/lifecycle/predicate.go | 6 ++-- pkg/kubelet/preemption/preemption.go | 4 +-- {plugin/pkg => pkg}/scheduler/BUILD | 0 {plugin/pkg => pkg}/scheduler/OWNERS | 0 {plugin/pkg => pkg}/scheduler/algorithm/BUILD | 0 .../pkg => pkg}/scheduler/algorithm/doc.go | 2 +- .../scheduler/algorithm/predicates/BUILD | 0 .../scheduler/algorithm/predicates/error.go | 0 .../algorithm/predicates/metadata.go | 6 ++-- .../algorithm/predicates/metadata_test.go | 4 +-- .../algorithm/predicates/predicates.go | 10 +++---- .../algorithm/predicates/predicates_test.go | 8 +++--- .../algorithm/predicates/testing_helper.go | 0 .../scheduler/algorithm/predicates/utils.go | 4 +-- .../algorithm/predicates/utils_test.go | 0 .../scheduler/algorithm/priorities/BUILD | 0 .../balanced_resource_allocation.go | 4 +-- .../balanced_resource_allocation_test.go | 4 +-- .../algorithm/priorities/image_locality.go | 4 +-- .../priorities/image_locality_test.go | 4 +-- .../algorithm/priorities/interpod_affinity.go | 10 +++---- .../priorities/interpod_affinity_test.go | 6 ++-- .../algorithm/priorities/least_requested.go | 4 +-- .../priorities/least_requested_test.go | 4 +-- .../algorithm/priorities/metadata.go | 6 ++-- .../algorithm/priorities/metadata_test.go | 6 ++-- .../algorithm/priorities/most_requested.go | 4 +-- .../priorities/most_requested_test.go | 4 +-- .../algorithm/priorities/node_affinity.go | 4 +-- .../priorities/node_affinity_test.go | 4 +-- .../algorithm/priorities/node_label.go | 6 ++-- .../algorithm/priorities/node_label_test.go | 4 +-- .../priorities/node_prefer_avoid_pods.go | 6 ++-- .../priorities/node_prefer_avoid_pods_test.go | 4 +-- .../scheduler/algorithm/priorities/reduce.go | 6 ++-- .../priorities/resource_allocation.go | 6 ++-- .../algorithm/priorities/resource_limits.go | 4 +-- .../priorities/resource_limits_test.go | 4 +-- .../priorities/selector_spreading.go | 6 ++-- .../priorities/selector_spreading_test.go | 6 ++-- .../algorithm/priorities/taint_toleration.go | 4 +-- .../priorities/taint_toleration_test.go | 4 +-- .../algorithm/priorities/test_util.go | 6 ++-- .../scheduler/algorithm/priorities/util/BUILD | 0 .../algorithm/priorities/util/non_zero.go | 0 .../priorities/util/non_zero_test.go | 0 .../algorithm/priorities/util/topologies.go | 0 .../priorities/util/topologies_test.go | 0 .../algorithm/priorities/util/util.go | 0 .../algorithm/priorities/util/util_test.go | 0 .../algorithm/scheduler_interface.go | 4 +-- .../algorithm/scheduler_interface_test.go | 0 .../pkg => pkg}/scheduler/algorithm/types.go | 4 +-- .../scheduler/algorithm/types_test.go | 2 +- .../scheduler/algorithm/well_known_labels.go | 0 .../scheduler/algorithmprovider/BUILD | 0 .../algorithmprovider/defaults/BUILD | 0 .../defaults/compatibility_test.go | 6 ++-- .../algorithmprovider/defaults/defaults.go | 10 +++---- .../defaults/defaults_test.go | 2 +- .../scheduler/algorithmprovider/plugins.go | 2 +- .../algorithmprovider/plugins_test.go | 2 +- {plugin/pkg => pkg}/scheduler/api/BUILD | 0 {plugin/pkg => pkg}/scheduler/api/doc.go | 4 +-- .../pkg => pkg}/scheduler/api/latest/BUILD | 0 .../scheduler/api/latest/latest.go | 4 +-- {plugin/pkg => pkg}/scheduler/api/register.go | 0 {plugin/pkg => pkg}/scheduler/api/types.go | 0 {plugin/pkg => pkg}/scheduler/api/v1/BUILD | 0 {plugin/pkg => pkg}/scheduler/api/v1/doc.go | 4 +-- .../pkg => pkg}/scheduler/api/v1/register.go | 2 +- {plugin/pkg => pkg}/scheduler/api/v1/types.go | 0 .../scheduler/api/v1/zz_generated.deepcopy.go | 0 .../scheduler/api/validation/BUILD | 0 .../scheduler/api/validation/validation.go | 2 +- .../api/validation/validation_test.go | 2 +- .../scheduler/api/zz_generated.deepcopy.go | 0 {plugin/pkg => pkg}/scheduler/core/BUILD | 0 .../scheduler/core/equivalence_cache.go | 2 +- .../scheduler/core/equivalence_cache_test.go | 4 +-- .../pkg => pkg}/scheduler/core/extender.go | 6 ++-- .../scheduler/core/extender_test.go | 8 +++--- .../scheduler/core/generic_scheduler.go | 12 ++++---- .../scheduler/core/generic_scheduler_test.go | 18 ++++++------ .../scheduler/core/scheduling_queue.go | 6 ++-- .../scheduler/core/scheduling_queue_test.go | 2 +- {plugin/pkg => pkg}/scheduler/factory/BUILD | 0 .../pkg => pkg}/scheduler/factory/factory.go | 22 +++++++-------- .../scheduler/factory/factory_test.go | 14 +++++----- .../pkg => pkg}/scheduler/factory/plugins.go | 10 +++---- .../scheduler/factory/plugins_test.go | 4 +-- {plugin/pkg => pkg}/scheduler/metrics/BUILD | 0 .../pkg => pkg}/scheduler/metrics/metrics.go | 0 {plugin/pkg => pkg}/scheduler/scheduler.go | 16 +++++------ .../pkg => pkg}/scheduler/scheduler_test.go | 14 +++++----- .../scheduler/schedulercache/BUILD | 0 .../scheduler/schedulercache/cache.go | 0 .../scheduler/schedulercache/cache_test.go | 4 +-- .../scheduler/schedulercache/interface.go | 0 .../scheduler/schedulercache/node_info.go | 4 +-- .../scheduler/schedulercache/util.go | 0 {plugin/pkg => pkg}/scheduler/testing/BUILD | 0 .../scheduler/testing/fake_cache.go | 2 +- .../scheduler/testing/fake_lister.go | 4 +-- .../scheduler/testing/pods_to_cache.go | 2 +- {plugin/pkg => pkg}/scheduler/testutil.go | 8 +++--- {plugin/pkg => pkg}/scheduler/util/BUILD | 0 .../scheduler/util/backoff_utils.go | 0 .../scheduler/util/backoff_utils_test.go | 0 .../pkg => pkg}/scheduler/util/testutil.go | 0 .../scheduler/util/testutil_test.go | 0 {plugin/pkg => pkg}/scheduler/util/utils.go | 0 .../pkg => pkg}/scheduler/util/utils_test.go | 0 .../pkg => pkg}/scheduler/volumebinder/BUILD | 0 .../scheduler/volumebinder/volume_binder.go | 0 .../defaulttolerationseconds/admission.go | 2 +- .../admission_test.go | 2 +- .../podtolerationrestriction/admission.go | 2 +- .../admission_test.go | 2 +- test/e2e/apps/daemon_set.go | 2 +- test/e2e/framework/util.go | 4 +-- test/e2e/scheduling/priorities.go | 2 +- .../defaulttolerationseconds_test.go | 2 +- test/integration/scheduler/extender_test.go | 8 +++--- .../scheduler/local-pv-neg-affinity_test.go | 4 +-- test/integration/scheduler/preemption_test.go | 4 +-- test/integration/scheduler/scheduler_test.go | 14 +++++----- test/integration/scheduler/taint_test.go | 8 +++--- test/integration/scheduler/util.go | 6 ++-- .../scheduler_perf/scheduler_test.go | 2 +- test/integration/scheduler_perf/util.go | 6 ++-- test/test_owners.csv | 24 ++++++++-------- 167 files changed, 299 insertions(+), 329 deletions(-) rename {plugin/cmd => cmd}/kube-scheduler/BUILD (100%) rename {plugin/cmd => cmd}/kube-scheduler/OWNERS (100%) rename {plugin/cmd => cmd}/kube-scheduler/app/BUILD (100%) rename {plugin/cmd => cmd}/kube-scheduler/app/server.go (98%) rename {plugin/cmd => cmd}/kube-scheduler/scheduler.go (95%) rename {plugin/pkg => pkg}/scheduler/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/OWNERS (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/doc.go (89%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/error.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/metadata.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/metadata_test.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/predicates.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/predicates_test.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/testing_helper.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/utils.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/predicates/utils_test.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/balanced_resource_allocation.go (95%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/balanced_resource_allocation_test.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/image_locality.go (96%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/image_locality_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/interpod_affinity.go (96%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/interpod_affinity_test.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/least_requested.go (93%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/least_requested_test.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/metadata.go (95%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/metadata_test.go (95%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/most_requested.go (94%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/most_requested_test.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_affinity.go (96%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_affinity_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_label.go (91%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_label_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_prefer_avoid_pods.go (91%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/reduce.go (89%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/resource_allocation.go (91%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/resource_limits.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/resource_limits_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/selector_spreading.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/selector_spreading_test.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/taint_toleration.go (96%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/taint_toleration_test.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/test_util.go (92%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/non_zero.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/non_zero_test.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/topologies.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/topologies_test.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/util.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/priorities/util/util_test.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/scheduler_interface.go (95%) rename {plugin/pkg => pkg}/scheduler/algorithm/scheduler_interface_test.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithm/types.go (98%) rename {plugin/pkg => pkg}/scheduler/algorithm/types_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithm/well_known_labels.go (100%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/defaults/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/defaults/compatibility_test.go (99%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/defaults/defaults.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/defaults/defaults_test.go (97%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/plugins.go (91%) rename {plugin/pkg => pkg}/scheduler/algorithmprovider/plugins_test.go (98%) rename {plugin/pkg => pkg}/scheduler/api/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/api/doc.go (83%) rename {plugin/pkg => pkg}/scheduler/api/latest/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/api/latest/latest.go (94%) rename {plugin/pkg => pkg}/scheduler/api/register.go (100%) rename {plugin/pkg => pkg}/scheduler/api/types.go (100%) rename {plugin/pkg => pkg}/scheduler/api/v1/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/api/v1/doc.go (83%) rename {plugin/pkg => pkg}/scheduler/api/v1/register.go (96%) rename {plugin/pkg => pkg}/scheduler/api/v1/types.go (100%) rename {plugin/pkg => pkg}/scheduler/api/v1/zz_generated.deepcopy.go (100%) rename {plugin/pkg => pkg}/scheduler/api/validation/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/api/validation/validation.go (96%) rename {plugin/pkg => pkg}/scheduler/api/validation/validation_test.go (98%) rename {plugin/pkg => pkg}/scheduler/api/zz_generated.deepcopy.go (100%) rename {plugin/pkg => pkg}/scheduler/core/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/core/equivalence_cache.go (99%) rename {plugin/pkg => pkg}/scheduler/core/equivalence_cache_test.go (99%) rename {plugin/pkg => pkg}/scheduler/core/extender.go (97%) rename {plugin/pkg => pkg}/scheduler/core/extender_test.go (97%) rename {plugin/pkg => pkg}/scheduler/core/generic_scheduler.go (99%) rename {plugin/pkg => pkg}/scheduler/core/generic_scheduler_test.go (98%) rename {plugin/pkg => pkg}/scheduler/core/scheduling_queue.go (99%) rename {plugin/pkg => pkg}/scheduler/core/scheduling_queue_test.go (99%) rename {plugin/pkg => pkg}/scheduler/factory/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/factory/factory.go (98%) rename {plugin/pkg => pkg}/scheduler/factory/factory_test.go (97%) rename {plugin/pkg => pkg}/scheduler/factory/plugins.go (98%) rename {plugin/pkg => pkg}/scheduler/factory/plugins_test.go (95%) rename {plugin/pkg => pkg}/scheduler/metrics/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/metrics/metrics.go (100%) rename {plugin/pkg => pkg}/scheduler/scheduler.go (97%) rename {plugin/pkg => pkg}/scheduler/scheduler_test.go (98%) rename {plugin/pkg => pkg}/scheduler/schedulercache/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/schedulercache/cache.go (100%) rename {plugin/pkg => pkg}/scheduler/schedulercache/cache_test.go (99%) rename {plugin/pkg => pkg}/scheduler/schedulercache/interface.go (100%) rename {plugin/pkg => pkg}/scheduler/schedulercache/node_info.go (99%) rename {plugin/pkg => pkg}/scheduler/schedulercache/util.go (100%) rename {plugin/pkg => pkg}/scheduler/testing/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/testing/fake_cache.go (97%) rename {plugin/pkg => pkg}/scheduler/testing/fake_lister.go (98%) rename {plugin/pkg => pkg}/scheduler/testing/pods_to_cache.go (96%) rename {plugin/pkg => pkg}/scheduler/testutil.go (94%) rename {plugin/pkg => pkg}/scheduler/util/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/util/backoff_utils.go (100%) rename {plugin/pkg => pkg}/scheduler/util/backoff_utils_test.go (100%) rename {plugin/pkg => pkg}/scheduler/util/testutil.go (100%) rename {plugin/pkg => pkg}/scheduler/util/testutil_test.go (100%) rename {plugin/pkg => pkg}/scheduler/util/utils.go (100%) rename {plugin/pkg => pkg}/scheduler/util/utils_test.go (100%) rename {plugin/pkg => pkg}/scheduler/volumebinder/BUILD (100%) rename {plugin/pkg => pkg}/scheduler/volumebinder/volume_binder.go (100%) diff --git a/build/root/Makefile b/build/root/Makefile index ba50e2983e7..23636241f9b 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -466,21 +466,6 @@ $(filter-out %$(EXCLUDE_TARGET),$(notdir $(abspath $(wildcard cmd/*/)))): genera hack/make-rules/build.sh cmd/$@ endif -define PLUGIN_CMD_HELP_INFO -# Add rules for all directories in plugin/cmd/ -# -# Example: -# make kube-scheduler -endef -.PHONY: $(notdir $(abspath $(wildcard plugin/cmd/*/))) -ifeq ($(PRINT_HELP),y) -$(notdir $(abspath $(wildcard plugin/cmd/*/))): - @echo "$$PLUGIN_CMD_HELP_INFO" -else -$(notdir $(abspath $(wildcard plugin/cmd/*/))): generated_files - hack/make-rules/build.sh plugin/cmd/$@ -endif - define GENERATED_FILES_HELP_INFO # Produce auto-generated files needed for the build. # diff --git a/cmd/genkubedocs/gen_kube_docs.go b/cmd/genkubedocs/gen_kube_docs.go index 2285fe4d96d..975466f4418 100644 --- a/cmd/genkubedocs/gen_kube_docs.go +++ b/cmd/genkubedocs/gen_kube_docs.go @@ -27,9 +27,9 @@ import ( apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app" proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" + schapp "k8s.io/kubernetes/cmd/kube-scheduler/app" kubeadmapp "k8s.io/kubernetes/cmd/kubeadm/app/cmd" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" - schapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" ) func main() { diff --git a/cmd/genman/gen_kube_man.go b/cmd/genman/gen_kube_man.go index 182a2a1c141..416d4a9f9fa 100644 --- a/cmd/genman/gen_kube_man.go +++ b/cmd/genman/gen_kube_man.go @@ -31,11 +31,11 @@ import ( apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app" proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" + schapp "k8s.io/kubernetes/cmd/kube-scheduler/app" kubeadmapp "k8s.io/kubernetes/cmd/kubeadm/app/cmd" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd" kubectlcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - schapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" ) func main() { diff --git a/cmd/hyperkube/kube-scheduler.go b/cmd/hyperkube/kube-scheduler.go index cfd68dc15dc..ba48aebb2b7 100644 --- a/cmd/hyperkube/kube-scheduler.go +++ b/cmd/hyperkube/kube-scheduler.go @@ -20,7 +20,7 @@ import ( "flag" "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" + "k8s.io/kubernetes/cmd/kube-scheduler/app" ) // NewScheduler creates a new hyperkube Server object that includes the diff --git a/plugin/cmd/kube-scheduler/BUILD b/cmd/kube-scheduler/BUILD similarity index 100% rename from plugin/cmd/kube-scheduler/BUILD rename to cmd/kube-scheduler/BUILD diff --git a/plugin/cmd/kube-scheduler/OWNERS b/cmd/kube-scheduler/OWNERS similarity index 100% rename from plugin/cmd/kube-scheduler/OWNERS rename to cmd/kube-scheduler/OWNERS diff --git a/plugin/cmd/kube-scheduler/app/BUILD b/cmd/kube-scheduler/app/BUILD similarity index 100% rename from plugin/cmd/kube-scheduler/app/BUILD rename to cmd/kube-scheduler/app/BUILD diff --git a/plugin/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go similarity index 98% rename from plugin/cmd/kube-scheduler/app/server.go rename to cmd/kube-scheduler/app/server.go index a304257248a..a80a376f9f3 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -57,15 +57,15 @@ import ( "k8s.io/kubernetes/pkg/features" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" "k8s.io/kubernetes/pkg/util/configz" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version/verflag" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler/factory" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" diff --git a/plugin/cmd/kube-scheduler/scheduler.go b/cmd/kube-scheduler/scheduler.go similarity index 95% rename from plugin/cmd/kube-scheduler/scheduler.go rename to cmd/kube-scheduler/scheduler.go index 047ef86cffa..1f6fcf7d608 100644 --- a/plugin/cmd/kube-scheduler/scheduler.go +++ b/cmd/kube-scheduler/scheduler.go @@ -21,9 +21,9 @@ import ( utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" + "k8s.io/kubernetes/cmd/kube-scheduler/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration - "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" ) func main() { diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index e8f71d11be1..2f8f27e6d9f 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" kubeproxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm" ) const ( diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 8c53d700dc3..48462b80679 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -44,6 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" apiservoptions "k8s.io/kubernetes/cmd/kube-apiserver/app/options" cmoptions "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" + schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/pkg/apis/core/validation" @@ -52,7 +53,6 @@ import ( "k8s.io/kubernetes/pkg/util/initsystem" versionutil "k8s.io/kubernetes/pkg/util/version" kubeadmversion "k8s.io/kubernetes/pkg/version" - schedulerapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" "k8s.io/kubernetes/test/e2e_node/system" utilsexec "k8s.io/utils/exec" ) diff --git a/examples/examples_test.go b/examples/examples_test.go index c47be91a47a..33ec647fff0 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -42,8 +42,8 @@ import ( expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/registry/batch/job" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - schedulerapilatest "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + schedulerapilatest "k8s.io/kubernetes/pkg/scheduler/api/latest" ) func validateObject(obj runtime.Object) (errors field.ErrorList) { diff --git a/hack/.golint_failures b/hack/.golint_failures index 1d88b07b7f6..c7cd6939add 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -3,6 +3,7 @@ cmd/gke-certificates-controller/app cmd/hyperkube cmd/kube-controller-manager/app cmd/kube-proxy/app +cmd/kube-scheduler/app cmd/kubeadm/app cmd/kubeadm/app/apis/kubeadm cmd/kubeadm/app/apis/kubeadm/v1alpha1 @@ -330,6 +331,19 @@ pkg/registry/storage/rest pkg/registry/storage/storageclass pkg/registry/storage/storageclass/storage pkg/routes +pkg/scheduler/algorithm +pkg/scheduler/algorithm/predicates +pkg/scheduler/algorithm/priorities +pkg/scheduler/algorithm/priorities/util +pkg/scheduler/api +pkg/scheduler/api/latest +pkg/scheduler/api/v1 +pkg/scheduler/core +pkg/scheduler/factory +pkg/scheduler/metrics +pkg/scheduler/schedulercache +pkg/scheduler/testing +pkg/scheduler/util pkg/security/apparmor pkg/security/podsecuritypolicy pkg/security/podsecuritypolicy/group @@ -399,7 +413,6 @@ pkg/volume/storageos pkg/volume/testing pkg/volume/util pkg/volume/vsphere_volume -plugin/cmd/kube-scheduler/app plugin/pkg/admission/antiaffinity plugin/pkg/admission/eventratelimit/apis/eventratelimit plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1 @@ -421,19 +434,6 @@ plugin/pkg/admission/serviceaccount plugin/pkg/admission/storageclass/setdefault plugin/pkg/auth/authorizer/node plugin/pkg/auth/authorizer/rbac -plugin/pkg/scheduler/algorithm -plugin/pkg/scheduler/algorithm/predicates -plugin/pkg/scheduler/algorithm/priorities -plugin/pkg/scheduler/algorithm/priorities/util -plugin/pkg/scheduler/api -plugin/pkg/scheduler/api/latest -plugin/pkg/scheduler/api/v1 -plugin/pkg/scheduler/core -plugin/pkg/scheduler/factory -plugin/pkg/scheduler/metrics -plugin/pkg/scheduler/schedulercache -plugin/pkg/scheduler/testing -plugin/pkg/scheduler/util staging/src/k8s.io/api/admission/v1beta1 staging/src/k8s.io/api/admissionregistration/v1alpha1 staging/src/k8s.io/api/admissionregistration/v1beta1 diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index b9649ce6d0a..3e12b3170a4 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -29,9 +29,9 @@ kube::golang::server_targets() { cmd/kubelet cmd/kubeadm cmd/hyperkube + cmd/kube-scheduler vendor/k8s.io/kube-aggregator vendor/k8s.io/apiextensions-apiserver - plugin/cmd/kube-scheduler cluster/gce/gci/mounter ) echo "${targets[@]}" diff --git a/hack/make-rules/make-help.sh b/hack/make-rules/make-help.sh index f1261955a88..e34c4170865 100755 --- a/hack/make-rules/make-help.sh +++ b/hack/make-rules/make-help.sh @@ -24,7 +24,6 @@ readonly reset=$(tput sgr0) KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. ALL_TARGETS=$(make -C "${KUBE_ROOT}" PRINT_HELP=y -rpn | sed -n -e '/^$/ { n ; /^[^ .#][^ ]*:/ { s/:.*$// ; p ; } ; }' | sort) CMD_TARGETS=$(ls -l "${KUBE_ROOT}/cmd" |awk '/^d/ {print $NF}') -PLUGIN_CMD_TARGETS=$(ls -l "${KUBE_ROOT}/plugin/cmd" |awk '/^d/ {print $NF}') CMD_FLAG=false PLUGIN_CMD_FLAG=false @@ -45,21 +44,6 @@ for tar in $ALL_TARGETS; do fi done - for plugincmdtar in $PLUGIN_CMD_TARGETS; do - if [ $tar = $plugincmdtar ]; then - if [ $PLUGIN_CMD_FLAG = true ]; then - continue 2; - fi - - echo -e "${red}${PLUGIN_CMD_TARGETS}${reset}" - make -C "${KUBE_ROOT}" $tar PRINT_HELP=y - echo "---------------------------------------------------------------------------------" - - PLUGIN_CMD_FLAG=true - continue 2 - fi - done - echo -e "${red}${tar}${reset}" make -C "${KUBE_ROOT}" $tar PRINT_HELP=y echo "---------------------------------------------------------------------------------" diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 5475e5858e5..0f548e7d954 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -37,8 +37,8 @@ import ( nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/cloudprovider" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/pkg/scheduler/algorithm" nodeutil "k8s.io/kubernetes/pkg/util/node" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) var UpdateNodeSpecBackoff = wait.Backoff{ diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index 5e249140660..a8faf8352f4 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/testutil" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "github.com/golang/glog" "github.com/stretchr/testify/assert" diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 72308b93b5a..c316c52a933 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -54,10 +54,10 @@ import ( "k8s.io/kubernetes/pkg/controller/daemon/util" "k8s.io/kubernetes/pkg/features" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/util/metrics" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "github.com/golang/glog" ) diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 1a4ab365a3a..cd5c4f07376 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -45,9 +45,9 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/securitycontext" labelsutil "k8s.io/kubernetes/pkg/util/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) var ( diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index 1ea30b713fd..3c69e8a3a0b 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -27,8 +27,8 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/scheduler/algorithm" labelsutil "k8s.io/kubernetes/pkg/util/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) // CreatePodTemplate returns copy of provided template with additional diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index e2a47f2076b..17f87bad292 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -44,12 +44,12 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/metrics" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" utilversion "k8s.io/kubernetes/pkg/util/version" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "fmt" "github.com/golang/glog" diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 93affbb9d6f..6813a0909d3 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -42,9 +42,9 @@ import ( "k8s.io/kubernetes/pkg/controller/testutil" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) const ( diff --git a/pkg/kubectl/.import-restrictions b/pkg/kubectl/.import-restrictions index 875f997dd52..6d8fdf7cb8c 100644 --- a/pkg/kubectl/.import-restrictions +++ b/pkg/kubectl/.import-restrictions @@ -117,6 +117,13 @@ "k8s.io/kubernetes/pkg/printers/internalversion", "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", "k8s.io/kubernetes/pkg/registry/rbac/validation", + "k8s.io/kubernetes/pkg/scheduler/algorithm", + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", + "k8s.io/kubernetes/pkg/scheduler/api", + "k8s.io/kubernetes/pkg/scheduler/schedulercache", + "k8s.io/kubernetes/pkg/scheduler/util", + "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/serviceaccount", "k8s.io/kubernetes/pkg/util/file", @@ -137,13 +144,7 @@ "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/pkg/version/prometheus", "k8s.io/kubernetes/pkg/volume", - "k8s.io/kubernetes/pkg/volume/util", - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - "k8s.io/kubernetes/plugin/pkg/scheduler/api", - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/volume/util" ], "ForbiddenPrefixes": [] }] diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index da7bf4a4642..fd61f2a751b 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -28,7 +28,7 @@ import ( evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "fmt" "strconv" diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 6c6c7068172..f72ec699c96 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -52,13 +52,13 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilversion "k8s.io/kubernetes/pkg/util/version" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) const ( diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 27a86849582..83df4cfaad8 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -26,7 +26,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) type containerManagerStub struct{} diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index df5e36e2187..646dd658793 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -38,8 +38,8 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" utilfs "k8s.io/kubernetes/pkg/util/filesystem" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) // ActivePodsFunc is a function that returns a list of pods to reconcile. diff --git a/pkg/kubelet/cm/deviceplugin/manager_stub.go b/pkg/kubelet/cm/deviceplugin/manager_stub.go index 903a0077a2c..5d7a4b74c5d 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_stub.go +++ b/pkg/kubelet/cm/deviceplugin/manager_stub.go @@ -21,7 +21,7 @@ import ( pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // ManagerStub provides a simple stub implementation for the Device Manager. diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index 69699cf2d84..88147077c3a 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -35,8 +35,8 @@ import ( pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" "k8s.io/kubernetes/pkg/kubelet/lifecycle" utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" utilfs "k8s.io/kubernetes/pkg/util/filesystem" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) const ( diff --git a/pkg/kubelet/cm/deviceplugin/types.go b/pkg/kubelet/cm/deviceplugin/types.go index c4465a8be4c..3c6b30206f1 100644 --- a/pkg/kubelet/cm/deviceplugin/types.go +++ b/pkg/kubelet/cm/deviceplugin/types.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // Manager manages all the Device Plugins running on a node. diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 8be0c49a28e..3ae9c0306a6 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -32,7 +32,7 @@ import ( evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/pkg/kubelet/server/stats" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - schedulerutils "k8s.io/kubernetes/plugin/pkg/scheduler/util" + schedulerutils "k8s.io/kubernetes/pkg/scheduler/util" ) const ( diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index eb335301f31..2ed2dd9c7ac 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -95,6 +95,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/volumemanager" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/security/apparmor" utildbus "k8s.io/kubernetes/pkg/util/dbus" kubeio "k8s.io/kubernetes/pkg/util/io" @@ -103,7 +104,6 @@ import ( nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" utilexec "k8s.io/utils/exec" ) diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index c775a665acc..7094b66f996 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -42,10 +42,10 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" + "k8s.io/kubernetes/pkg/scheduler/algorithm" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume/util/volumehelper" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) const ( diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 5019121d43a..c1061355baa 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -67,12 +67,12 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" _ "k8s.io/kubernetes/pkg/volume/host_path" volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/pkg/volume/util/volumehelper" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) func init() { diff --git a/pkg/kubelet/lifecycle/admission_failure_handler_stub.go b/pkg/kubelet/lifecycle/admission_failure_handler_stub.go index c98339782e4..58e675e9a9b 100644 --- a/pkg/kubelet/lifecycle/admission_failure_handler_stub.go +++ b/pkg/kubelet/lifecycle/admission_failure_handler_stub.go @@ -18,7 +18,7 @@ package lifecycle import ( "k8s.io/api/core/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm" ) // AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure. diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index 9b8ad4d3cc1..e3890055f13 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -22,9 +22,9 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) type getNodeAnyWayFuncType func() (*v1.Node, error) diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index 1f9214b8306..96d829e4f8c 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" ) const message = "Preempted in order to admit critical pod" diff --git a/plugin/pkg/scheduler/BUILD b/pkg/scheduler/BUILD similarity index 100% rename from plugin/pkg/scheduler/BUILD rename to pkg/scheduler/BUILD diff --git a/plugin/pkg/scheduler/OWNERS b/pkg/scheduler/OWNERS similarity index 100% rename from plugin/pkg/scheduler/OWNERS rename to pkg/scheduler/OWNERS diff --git a/plugin/pkg/scheduler/algorithm/BUILD b/pkg/scheduler/algorithm/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithm/BUILD rename to pkg/scheduler/algorithm/BUILD diff --git a/plugin/pkg/scheduler/algorithm/doc.go b/pkg/scheduler/algorithm/doc.go similarity index 89% rename from plugin/pkg/scheduler/algorithm/doc.go rename to pkg/scheduler/algorithm/doc.go index 299051b0f8a..59c2cc4aa7c 100644 --- a/plugin/pkg/scheduler/algorithm/doc.go +++ b/pkg/scheduler/algorithm/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package scheduler contains a generic Scheduler interface and several // implementations. -package algorithm // import "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" +package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm" diff --git a/plugin/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithm/predicates/BUILD rename to pkg/scheduler/algorithm/predicates/BUILD diff --git a/plugin/pkg/scheduler/algorithm/predicates/error.go b/pkg/scheduler/algorithm/predicates/error.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/predicates/error.go rename to pkg/scheduler/algorithm/predicates/error.go diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata.go b/pkg/scheduler/algorithm/predicates/metadata.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/predicates/metadata.go rename to pkg/scheduler/algorithm/predicates/metadata.go index c0eda6a24de..af8c32e2c4c 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata.go +++ b/pkg/scheduler/algorithm/predicates/metadata.go @@ -22,9 +22,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" "github.com/golang/glog" ) diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go similarity index 99% rename from plugin/pkg/scheduler/algorithm/predicates/metadata_test.go rename to pkg/scheduler/algorithm/predicates/metadata_test.go index 0a96f5a0c91..31b88411015 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) // sortableAntiAffinityTerms lets us to sort anti-affinity terms. diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go similarity index 99% rename from plugin/pkg/scheduler/algorithm/predicates/predicates.go rename to pkg/scheduler/algorithm/predicates/predicates.go index 67c4caf7b45..48d7b509285 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -37,14 +37,14 @@ import ( v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" "github.com/golang/glog" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) const ( diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go similarity index 99% rename from plugin/pkg/scheduler/algorithm/predicates/predicates_test.go rename to pkg/scheduler/algorithm/predicates/predicates_test.go index 0ed3709e51b..1b05d9e4fde 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -29,10 +29,10 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" - schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) var ( diff --git a/plugin/pkg/scheduler/algorithm/predicates/testing_helper.go b/pkg/scheduler/algorithm/predicates/testing_helper.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/predicates/testing_helper.go rename to pkg/scheduler/algorithm/predicates/testing_helper.go diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils.go b/pkg/scheduler/algorithm/predicates/utils.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/predicates/utils.go rename to pkg/scheduler/algorithm/predicates/utils.go index 622bdc68359..9a25c85d9ac 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/utils.go +++ b/pkg/scheduler/algorithm/predicates/utils.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) // FindLabelsInSet gets as many key/value pairs as possible out of a label set. diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils_test.go b/pkg/scheduler/algorithm/predicates/utils_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/predicates/utils_test.go rename to pkg/scheduler/algorithm/predicates/utils_test.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/BUILD rename to pkg/scheduler/algorithm/priorities/BUILD diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go similarity index 95% rename from plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go rename to pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index c57bcbfc68c..0f3d98f6385 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -19,8 +19,8 @@ package priorities import ( "math" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) var ( diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go rename to pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index 381ff05307a..9b109d109c3 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestBalancedResourceAllocation(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go b/pkg/scheduler/algorithm/priorities/image_locality.go similarity index 96% rename from plugin/pkg/scheduler/algorithm/priorities/image_locality.go rename to pkg/scheduler/algorithm/priorities/image_locality.go index 86b3bb6c54f..5df5d35308d 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/pkg/scheduler/algorithm/priorities/image_locality.go @@ -20,8 +20,8 @@ import ( "fmt" "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go b/pkg/scheduler/algorithm/priorities/image_locality_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go rename to pkg/scheduler/algorithm/priorities/image_locality_test.go index f957e86375b..5a3bb66e792 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestImageLocalityPriority(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/pkg/scheduler/algorithm/priorities/interpod_affinity.go similarity index 96% rename from plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go rename to pkg/scheduler/algorithm/priorities/interpod_affinity.go index 16214ae1003..59ed117f82e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -23,11 +23,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "github.com/golang/glog" ) diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go similarity index 99% rename from plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go rename to pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index 6dd34299273..6987e1d9ef4 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -23,9 +23,9 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) type FakeNodeListInfo []*v1.Node diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go b/pkg/scheduler/algorithm/priorities/least_requested.go similarity index 93% rename from plugin/pkg/scheduler/algorithm/priorities/least_requested.go rename to pkg/scheduler/algorithm/priorities/least_requested.go index 39d3208f6fa..0c2f0481f93 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/pkg/scheduler/algorithm/priorities/least_requested.go @@ -17,8 +17,8 @@ limitations under the License. package priorities import ( - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) var ( diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go b/pkg/scheduler/algorithm/priorities/least_requested_test.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go rename to pkg/scheduler/algorithm/priorities/least_requested_test.go index f71ef43d1bc..3b5308d7ba1 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestLeastRequested(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/pkg/scheduler/algorithm/priorities/metadata.go similarity index 95% rename from plugin/pkg/scheduler/algorithm/priorities/metadata.go rename to pkg/scheduler/algorithm/priorities/metadata.go index 3a4d7831182..fe9dce79f47 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/pkg/scheduler/algorithm/priorities/metadata.go @@ -20,9 +20,9 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) type PriorityMetadataFactory struct { diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go b/pkg/scheduler/algorithm/priorities/metadata_test.go similarity index 95% rename from plugin/pkg/scheduler/algorithm/priorities/metadata_test.go rename to pkg/scheduler/algorithm/priorities/metadata_test.go index dbcb562598a..ada1a3c46aa 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata_test.go +++ b/pkg/scheduler/algorithm/priorities/metadata_test.go @@ -25,9 +25,9 @@ import ( extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) func TestPriorityMetadata(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go b/pkg/scheduler/algorithm/priorities/most_requested.go similarity index 94% rename from plugin/pkg/scheduler/algorithm/priorities/most_requested.go rename to pkg/scheduler/algorithm/priorities/most_requested.go index 9cba1a32ee5..ed9053aa1e6 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/pkg/scheduler/algorithm/priorities/most_requested.go @@ -17,8 +17,8 @@ limitations under the License. package priorities import ( - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) var ( diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go b/pkg/scheduler/algorithm/priorities/most_requested_test.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go rename to pkg/scheduler/algorithm/priorities/most_requested_test.go index 0cffea5a33b..4869ad85eb6 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestMostRequested(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/pkg/scheduler/algorithm/priorities/node_affinity.go similarity index 96% rename from plugin/pkg/scheduler/algorithm/priorities/node_affinity.go rename to pkg/scheduler/algorithm/priorities/node_affinity.go index 0ed713a3b5c..d1c79353614 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/pkg/scheduler/algorithm/priorities/node_affinity_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go rename to pkg/scheduler/algorithm/priorities/node_affinity_test.go index f5474134364..e7054b1514e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestNodeAffinityPriority(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label.go b/pkg/scheduler/algorithm/priorities/node_label.go similarity index 91% rename from plugin/pkg/scheduler/algorithm/priorities/node_label.go rename to pkg/scheduler/algorithm/priorities/node_label.go index 7eef5a3bd76..8c1a86f5900 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label.go +++ b/pkg/scheduler/algorithm/priorities/node_label.go @@ -21,9 +21,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) type NodeLabelPrioritizer struct { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go b/pkg/scheduler/algorithm/priorities/node_label_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/node_label_test.go rename to pkg/scheduler/algorithm/priorities/node_label_test.go index 7acc6ea7076..416fc9cc092 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go +++ b/pkg/scheduler/algorithm/priorities/node_label_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestNewNodeLabelPriority(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go similarity index 91% rename from plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go rename to pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index 7392f76b716..c0f40490822 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -22,9 +22,9 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go rename to pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index 0766b9e5488..8fb852fc6f5 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestNodePreferAvoidPriority(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/reduce.go b/pkg/scheduler/algorithm/priorities/reduce.go similarity index 89% rename from plugin/pkg/scheduler/algorithm/priorities/reduce.go rename to pkg/scheduler/algorithm/priorities/reduce.go index 9ce84fd9f6b..608a83355e9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/reduce.go +++ b/pkg/scheduler/algorithm/priorities/reduce.go @@ -18,9 +18,9 @@ package priorities import ( "k8s.io/api/core/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // NormalizeReduce generates a PriorityReduceFunction that can normalize the result diff --git a/plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go b/pkg/scheduler/algorithm/priorities/resource_allocation.go similarity index 91% rename from plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go rename to pkg/scheduler/algorithm/priorities/resource_allocation.go index 9723eff142e..c938cedfddb 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -21,9 +21,9 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) type ResourceAllocationPriority struct { diff --git a/plugin/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/resource_limits.go rename to pkg/scheduler/algorithm/priorities/resource_limits.go index 77ae0dca923..3267368d2f9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -21,8 +21,8 @@ import ( "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "github.com/golang/glog" ) diff --git a/plugin/pkg/scheduler/algorithm/priorities/resource_limits_test.go b/pkg/scheduler/algorithm/priorities/resource_limits_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/priorities/resource_limits_test.go rename to pkg/scheduler/algorithm/priorities/resource_limits_test.go index 0a48cc73308..e3056dcc6ba 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/resource_limits_test.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func TestResourceLimistPriority(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/pkg/scheduler/algorithm/priorities/selector_spreading.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go rename to pkg/scheduler/algorithm/priorities/selector_spreading.go index 53c5c3719ed..3b8eb609380 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -21,10 +21,10 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" utilnode "k8s.io/kubernetes/pkg/util/node" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "github.com/golang/glog" ) diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go similarity index 99% rename from plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go rename to pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 85e547dae4a..e6eff8cc275 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -26,9 +26,9 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) func controllerRef(kind, name, uid string) []metav1.OwnerReference { diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go b/pkg/scheduler/algorithm/priorities/taint_toleration.go similarity index 96% rename from plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go rename to pkg/scheduler/algorithm/priorities/taint_toleration.go index 9e2905e8edb..c6847c0ea1f 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -21,8 +21,8 @@ import ( "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go rename to pkg/scheduler/algorithm/priorities/taint_toleration_test.go index f54ce45613c..3ef61dd987e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { diff --git a/plugin/pkg/scheduler/algorithm/priorities/test_util.go b/pkg/scheduler/algorithm/priorities/test_util.go similarity index 92% rename from plugin/pkg/scheduler/algorithm/priorities/test_util.go rename to pkg/scheduler/algorithm/priorities/test_util.go index 312c7619410..d1756c0383c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/test_util.go +++ b/pkg/scheduler/algorithm/priorities/test_util.go @@ -20,9 +20,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) func makeNode(node string, milliCPU, memory int64) *v1.Node { diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/BUILD b/pkg/scheduler/algorithm/priorities/util/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/BUILD rename to pkg/scheduler/algorithm/priorities/util/BUILD diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go b/pkg/scheduler/algorithm/priorities/util/non_zero.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go rename to pkg/scheduler/algorithm/priorities/util/non_zero.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero_test.go b/pkg/scheduler/algorithm/priorities/util/non_zero_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/non_zero_test.go rename to pkg/scheduler/algorithm/priorities/util/non_zero_test.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/topologies.go b/pkg/scheduler/algorithm/priorities/util/topologies.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/topologies.go rename to pkg/scheduler/algorithm/priorities/util/topologies.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/topologies_test.go b/pkg/scheduler/algorithm/priorities/util/topologies_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/topologies_test.go rename to pkg/scheduler/algorithm/priorities/util/topologies_test.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/util.go b/pkg/scheduler/algorithm/priorities/util/util.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/util.go rename to pkg/scheduler/algorithm/priorities/util/util.go diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/util_test.go b/pkg/scheduler/algorithm/priorities/util/util_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/util/util_test.go rename to pkg/scheduler/algorithm/priorities/util/util_test.go diff --git a/plugin/pkg/scheduler/algorithm/scheduler_interface.go b/pkg/scheduler/algorithm/scheduler_interface.go similarity index 95% rename from plugin/pkg/scheduler/algorithm/scheduler_interface.go rename to pkg/scheduler/algorithm/scheduler_interface.go index 5ef4fd6f407..d64e8842601 100644 --- a/plugin/pkg/scheduler/algorithm/scheduler_interface.go +++ b/pkg/scheduler/algorithm/scheduler_interface.go @@ -18,8 +18,8 @@ package algorithm import ( "k8s.io/api/core/v1" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // SchedulerExtender is an interface for external processes to influence scheduling diff --git a/plugin/pkg/scheduler/algorithm/scheduler_interface_test.go b/pkg/scheduler/algorithm/scheduler_interface_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/scheduler_interface_test.go rename to pkg/scheduler/algorithm/scheduler_interface_test.go diff --git a/plugin/pkg/scheduler/algorithm/types.go b/pkg/scheduler/algorithm/types.go similarity index 98% rename from plugin/pkg/scheduler/algorithm/types.go rename to pkg/scheduler/algorithm/types.go index b3e34e02401..5fb2981f110 100644 --- a/plugin/pkg/scheduler/algorithm/types.go +++ b/pkg/scheduler/algorithm/types.go @@ -21,8 +21,8 @@ import ( "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/labels" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // FitPredicate is a function that indicates if a pod fits into an existing node. diff --git a/plugin/pkg/scheduler/algorithm/types_test.go b/pkg/scheduler/algorithm/types_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithm/types_test.go rename to pkg/scheduler/algorithm/types_test.go index 30b322caff5..862425f7218 100644 --- a/plugin/pkg/scheduler/algorithm/types_test.go +++ b/pkg/scheduler/algorithm/types_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // EmptyMetadataProducer should returns a no-op MetadataProducer type. diff --git a/plugin/pkg/scheduler/algorithm/well_known_labels.go b/pkg/scheduler/algorithm/well_known_labels.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/well_known_labels.go rename to pkg/scheduler/algorithm/well_known_labels.go diff --git a/plugin/pkg/scheduler/algorithmprovider/BUILD b/pkg/scheduler/algorithmprovider/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithmprovider/BUILD rename to pkg/scheduler/algorithmprovider/BUILD diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD b/pkg/scheduler/algorithmprovider/defaults/BUILD similarity index 100% rename from plugin/pkg/scheduler/algorithmprovider/defaults/BUILD rename to pkg/scheduler/algorithmprovider/defaults/BUILD diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go b/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go similarity index 99% rename from plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go rename to pkg/scheduler/algorithmprovider/defaults/compatibility_test.go index 332fb4f6796..77c421bc850 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go +++ b/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go @@ -31,9 +31,9 @@ import ( utiltesting "k8s.io/client-go/util/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" _ "k8s.io/kubernetes/pkg/apis/core/install" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" + "k8s.io/kubernetes/pkg/scheduler/factory" ) const enableEquivalenceCache = true diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go similarity index 97% rename from plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go rename to pkg/scheduler/algorithmprovider/defaults/defaults.go index 6ba618f3950..6cbc772ac99 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -21,11 +21,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/factory" "github.com/golang/glog" ) diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go b/pkg/scheduler/algorithmprovider/defaults/defaults_test.go similarity index 97% rename from plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go rename to pkg/scheduler/algorithmprovider/defaults/defaults_test.go index 16fc1b75e76..d78bb62835a 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults_test.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" ) func TestCopyAndReplace(t *testing.T) { diff --git a/plugin/pkg/scheduler/algorithmprovider/plugins.go b/pkg/scheduler/algorithmprovider/plugins.go similarity index 91% rename from plugin/pkg/scheduler/algorithmprovider/plugins.go rename to pkg/scheduler/algorithmprovider/plugins.go index f357a12d5a9..e2784f62609 100644 --- a/plugin/pkg/scheduler/algorithmprovider/plugins.go +++ b/pkg/scheduler/algorithmprovider/plugins.go @@ -17,7 +17,7 @@ limitations under the License. package algorithmprovider import ( - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults" + "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults" ) // ApplyFeatureGates applies algorithm by feature gates. diff --git a/plugin/pkg/scheduler/algorithmprovider/plugins_test.go b/pkg/scheduler/algorithmprovider/plugins_test.go similarity index 98% rename from plugin/pkg/scheduler/algorithmprovider/plugins_test.go rename to pkg/scheduler/algorithmprovider/plugins_test.go index 4044bfa52cd..16a1e8f5ac2 100644 --- a/plugin/pkg/scheduler/algorithmprovider/plugins_test.go +++ b/pkg/scheduler/algorithmprovider/plugins_test.go @@ -20,7 +20,7 @@ import ( "testing" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler/factory" ) var ( diff --git a/plugin/pkg/scheduler/api/BUILD b/pkg/scheduler/api/BUILD similarity index 100% rename from plugin/pkg/scheduler/api/BUILD rename to pkg/scheduler/api/BUILD diff --git a/plugin/pkg/scheduler/api/doc.go b/pkg/scheduler/api/doc.go similarity index 83% rename from plugin/pkg/scheduler/api/doc.go rename to pkg/scheduler/api/doc.go index 28c50754576..c768a8c92cf 100644 --- a/plugin/pkg/scheduler/api/doc.go +++ b/pkg/scheduler/api/doc.go @@ -16,5 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package -// Package api contains scheduler plugin API objects. -package api // import "k8s.io/kubernetes/plugin/pkg/scheduler/api" +// Package api contains scheduler API objects. +package api // import "k8s.io/kubernetes/pkg/scheduler/api" diff --git a/plugin/pkg/scheduler/api/latest/BUILD b/pkg/scheduler/api/latest/BUILD similarity index 100% rename from plugin/pkg/scheduler/api/latest/BUILD rename to pkg/scheduler/api/latest/BUILD diff --git a/plugin/pkg/scheduler/api/latest/latest.go b/pkg/scheduler/api/latest/latest.go similarity index 94% rename from plugin/pkg/scheduler/api/latest/latest.go rename to pkg/scheduler/api/latest/latest.go index f225847737a..4fa4bfb6cc9 100644 --- a/plugin/pkg/scheduler/api/latest/latest.go +++ b/pkg/scheduler/api/latest/latest.go @@ -21,8 +21,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + _ "k8s.io/kubernetes/pkg/scheduler/api/v1" ) // Version is the string that represents the current external default version. diff --git a/plugin/pkg/scheduler/api/register.go b/pkg/scheduler/api/register.go similarity index 100% rename from plugin/pkg/scheduler/api/register.go rename to pkg/scheduler/api/register.go diff --git a/plugin/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go similarity index 100% rename from plugin/pkg/scheduler/api/types.go rename to pkg/scheduler/api/types.go diff --git a/plugin/pkg/scheduler/api/v1/BUILD b/pkg/scheduler/api/v1/BUILD similarity index 100% rename from plugin/pkg/scheduler/api/v1/BUILD rename to pkg/scheduler/api/v1/BUILD diff --git a/plugin/pkg/scheduler/api/v1/doc.go b/pkg/scheduler/api/v1/doc.go similarity index 83% rename from plugin/pkg/scheduler/api/v1/doc.go rename to pkg/scheduler/api/v1/doc.go index 38679d991c2..3386c4d8d21 100644 --- a/plugin/pkg/scheduler/api/v1/doc.go +++ b/pkg/scheduler/api/v1/doc.go @@ -16,5 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package -// Package v1 contains scheduler plugin API objects. -package v1 // import "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1" +// Package v1 contains scheduler API objects. +package v1 // import "k8s.io/kubernetes/pkg/scheduler/api/v1" diff --git a/plugin/pkg/scheduler/api/v1/register.go b/pkg/scheduler/api/v1/register.go similarity index 96% rename from plugin/pkg/scheduler/api/v1/register.go rename to pkg/scheduler/api/v1/register.go index 292245a0a61..0b45a6a2d0d 100644 --- a/plugin/pkg/scheduler/api/v1/register.go +++ b/pkg/scheduler/api/v1/register.go @@ -19,7 +19,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) // SchemeGroupVersion is group version used to register these objects diff --git a/plugin/pkg/scheduler/api/v1/types.go b/pkg/scheduler/api/v1/types.go similarity index 100% rename from plugin/pkg/scheduler/api/v1/types.go rename to pkg/scheduler/api/v1/types.go diff --git a/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go b/pkg/scheduler/api/v1/zz_generated.deepcopy.go similarity index 100% rename from plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go rename to pkg/scheduler/api/v1/zz_generated.deepcopy.go diff --git a/plugin/pkg/scheduler/api/validation/BUILD b/pkg/scheduler/api/validation/BUILD similarity index 100% rename from plugin/pkg/scheduler/api/validation/BUILD rename to pkg/scheduler/api/validation/BUILD diff --git a/plugin/pkg/scheduler/api/validation/validation.go b/pkg/scheduler/api/validation/validation.go similarity index 96% rename from plugin/pkg/scheduler/api/validation/validation.go rename to pkg/scheduler/api/validation/validation.go index cec33b1955b..d8eb954c5fc 100644 --- a/plugin/pkg/scheduler/api/validation/validation.go +++ b/pkg/scheduler/api/validation/validation.go @@ -20,7 +20,7 @@ import ( "fmt" utilerrors "k8s.io/apimachinery/pkg/util/errors" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) // ValidatePolicy checks for errors in the Config diff --git a/plugin/pkg/scheduler/api/validation/validation_test.go b/pkg/scheduler/api/validation/validation_test.go similarity index 98% rename from plugin/pkg/scheduler/api/validation/validation_test.go rename to pkg/scheduler/api/validation/validation_test.go index b0b01a8573b..482bf92f665 100644 --- a/plugin/pkg/scheduler/api/validation/validation_test.go +++ b/pkg/scheduler/api/validation/validation_test.go @@ -21,7 +21,7 @@ import ( "fmt" "testing" - "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/api" ) func TestValidatePolicy(t *testing.T) { diff --git a/plugin/pkg/scheduler/api/zz_generated.deepcopy.go b/pkg/scheduler/api/zz_generated.deepcopy.go similarity index 100% rename from plugin/pkg/scheduler/api/zz_generated.deepcopy.go rename to pkg/scheduler/api/zz_generated.deepcopy.go diff --git a/plugin/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD similarity index 100% rename from plugin/pkg/scheduler/core/BUILD rename to pkg/scheduler/core/BUILD diff --git a/plugin/pkg/scheduler/core/equivalence_cache.go b/pkg/scheduler/core/equivalence_cache.go similarity index 99% rename from plugin/pkg/scheduler/core/equivalence_cache.go rename to pkg/scheduler/core/equivalence_cache.go index ca27f40d57a..5d9bda7eafe 100644 --- a/plugin/pkg/scheduler/core/equivalence_cache.go +++ b/pkg/scheduler/core/equivalence_cache.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/scheduler/algorithm" hashutil "k8s.io/kubernetes/pkg/util/hash" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "github.com/golang/glog" "github.com/golang/groupcache/lru" diff --git a/plugin/pkg/scheduler/core/equivalence_cache_test.go b/pkg/scheduler/core/equivalence_cache_test.go similarity index 99% rename from plugin/pkg/scheduler/core/equivalence_cache_test.go rename to pkg/scheduler/core/equivalence_cache_test.go index 3b098c1d2b1..54b903e2fbd 100644 --- a/plugin/pkg/scheduler/core/equivalence_cache_test.go +++ b/pkg/scheduler/core/equivalence_cache_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" ) type predicateItemType struct { diff --git a/plugin/pkg/scheduler/core/extender.go b/pkg/scheduler/core/extender.go similarity index 97% rename from plugin/pkg/scheduler/core/extender.go rename to pkg/scheduler/core/extender.go index 898ef4f4525..0eb1e0def5e 100644 --- a/plugin/pkg/scheduler/core/extender.go +++ b/pkg/scheduler/core/extender.go @@ -27,9 +27,9 @@ import ( "k8s.io/api/core/v1" utilnet "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) const ( diff --git a/plugin/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go similarity index 97% rename from plugin/pkg/scheduler/core/extender_test.go rename to pkg/scheduler/core/extender_test.go index 3f389aff4dc..23551a2415c 100644 --- a/plugin/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -24,10 +24,10 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error) diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go similarity index 99% rename from plugin/pkg/scheduler/core/generic_scheduler.go rename to pkg/scheduler/core/generic_scheduler.go index f97e1e2feb6..e1128c01cb8 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -33,14 +33,14 @@ import ( utiltrace "k8s.io/apiserver/pkg/util/trace" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/util" "github.com/golang/glog" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) type FailedPredicateMap map[string][]algorithm.PredicateFailureReason diff --git a/plugin/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go similarity index 98% rename from plugin/pkg/scheduler/core/generic_scheduler_test.go rename to pkg/scheduler/core/generic_scheduler_test.go index 4c09ba66956..cdfc6b20fe5 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -32,14 +32,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - algorithmpredicates "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - algorithmpriorities "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + algorithmpredicates "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) var ( @@ -601,7 +601,7 @@ func TestZeroRequest(t *testing.T) { const expectedPriority int = 25 for _, test := range tests { // This should match the configuration in defaultPriorities() in - // plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want + // pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // to test what's actually in production. priorityConfigs := []algorithm.PriorityConfig{ {Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1}, diff --git a/plugin/pkg/scheduler/core/scheduling_queue.go b/pkg/scheduler/core/scheduling_queue.go similarity index 99% rename from plugin/pkg/scheduler/core/scheduling_queue.go rename to pkg/scheduler/core/scheduling_queue.go index f258fd1ee76..79dfee0a1d7 100644 --- a/plugin/pkg/scheduler/core/scheduling_queue.go +++ b/pkg/scheduler/core/scheduling_queue.go @@ -35,9 +35,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/pkg/scheduler/util" "github.com/golang/glog" "reflect" diff --git a/plugin/pkg/scheduler/core/scheduling_queue_test.go b/pkg/scheduler/core/scheduling_queue_test.go similarity index 99% rename from plugin/pkg/scheduler/core/scheduling_queue_test.go rename to pkg/scheduler/core/scheduling_queue_test.go index cd3ba05db9b..e69ae867cc2 100644 --- a/plugin/pkg/scheduler/core/scheduling_queue_test.go +++ b/pkg/scheduler/core/scheduling_queue_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/util" ) var mediumPriority = (lowPriority + highPriority) / 2 diff --git a/plugin/pkg/scheduler/factory/BUILD b/pkg/scheduler/factory/BUILD similarity index 100% rename from plugin/pkg/scheduler/factory/BUILD rename to pkg/scheduler/factory/BUILD diff --git a/plugin/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go similarity index 98% rename from plugin/pkg/scheduler/factory/factory.go rename to pkg/scheduler/factory/factory.go index 45494fc73bb..04b7a585913 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -15,7 +15,7 @@ limitations under the License. */ // Package factory can set up a scheduler. This code is here instead of -// plugin/cmd/scheduler for both testability and reuse. +// cmd/scheduler for both testability and reuse. package factory import ( @@ -54,15 +54,15 @@ import ( "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/api/validation" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) const ( @@ -565,7 +565,7 @@ func (c *configFactory) addPodToCache(obj interface{}) { c.podQueue.AssignedPodAdded(pod) // NOTE: Updating equivalence cache of addPodToCache has been - // handled optimistically in: plugin/pkg/scheduler/scheduler.go#assume() + // handled optimistically in: pkg/scheduler/scheduler.go#assume() } func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/pkg/scheduler/factory/factory_test.go similarity index 97% rename from plugin/pkg/scheduler/factory/factory_test.go rename to pkg/scheduler/factory/factory_test.go index 92b0f271aa4..7ca0dc36cd1 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/pkg/scheduler/factory/factory_test.go @@ -33,13 +33,13 @@ import ( utiltesting "k8s.io/client-go/util/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" apitesting "k8s.io/kubernetes/pkg/api/testing" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/kubernetes/pkg/scheduler/util" ) const enableEquivalenceCache = true diff --git a/plugin/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go similarity index 98% rename from plugin/pkg/scheduler/factory/plugins.go rename to pkg/scheduler/factory/plugins.go index a096030775e..b8733d2961d 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -24,13 +24,13 @@ import ( "sync" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "github.com/golang/glog" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) // PluginFactoryArgs are passed to all plugin factory functions. diff --git a/plugin/pkg/scheduler/factory/plugins_test.go b/pkg/scheduler/factory/plugins_test.go similarity index 95% rename from plugin/pkg/scheduler/factory/plugins_test.go rename to pkg/scheduler/factory/plugins_test.go index 0f78fd1789d..a3508c139d2 100644 --- a/plugin/pkg/scheduler/factory/plugins_test.go +++ b/pkg/scheduler/factory/plugins_test.go @@ -19,8 +19,8 @@ package factory import ( "testing" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/api" ) func TestAlgorithmNameValidation(t *testing.T) { diff --git a/plugin/pkg/scheduler/metrics/BUILD b/pkg/scheduler/metrics/BUILD similarity index 100% rename from plugin/pkg/scheduler/metrics/BUILD rename to pkg/scheduler/metrics/BUILD diff --git a/plugin/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go similarity index 100% rename from plugin/pkg/scheduler/metrics/metrics.go rename to pkg/scheduler/metrics/metrics.go diff --git a/plugin/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go similarity index 97% rename from plugin/pkg/scheduler/scheduler.go rename to pkg/scheduler/scheduler.go index b58123eca27..9fae7d117f7 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -29,16 +29,16 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/metrics" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/metrics" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/util" "github.com/golang/glog" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) // Binder knows how to write a binding. diff --git a/plugin/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go similarity index 98% rename from plugin/pkg/scheduler/scheduler_test.go rename to pkg/scheduler/scheduler_test.go index cc943f6c7af..73d4abcc280 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -34,13 +34,13 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" + schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) type fakeBinder struct { diff --git a/plugin/pkg/scheduler/schedulercache/BUILD b/pkg/scheduler/schedulercache/BUILD similarity index 100% rename from plugin/pkg/scheduler/schedulercache/BUILD rename to pkg/scheduler/schedulercache/BUILD diff --git a/plugin/pkg/scheduler/schedulercache/cache.go b/pkg/scheduler/schedulercache/cache.go similarity index 100% rename from plugin/pkg/scheduler/schedulercache/cache.go rename to pkg/scheduler/schedulercache/cache.go diff --git a/plugin/pkg/scheduler/schedulercache/cache_test.go b/pkg/scheduler/schedulercache/cache_test.go similarity index 99% rename from plugin/pkg/scheduler/schedulercache/cache_test.go rename to pkg/scheduler/schedulercache/cache_test.go index 9a10324393e..b5e1243a474 100644 --- a/plugin/pkg/scheduler/schedulercache/cache_test.go +++ b/pkg/scheduler/schedulercache/cache_test.go @@ -29,8 +29,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *NodeInfo) { diff --git a/plugin/pkg/scheduler/schedulercache/interface.go b/pkg/scheduler/schedulercache/interface.go similarity index 100% rename from plugin/pkg/scheduler/schedulercache/interface.go rename to pkg/scheduler/schedulercache/interface.go diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/pkg/scheduler/schedulercache/node_info.go similarity index 99% rename from plugin/pkg/scheduler/schedulercache/node_info.go rename to pkg/scheduler/schedulercache/node_info.go index 99fb77430c1..c59a2ebd686 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/pkg/scheduler/schedulercache/node_info.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" clientcache "k8s.io/client-go/tools/cache" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/pkg/scheduler/util" ) var emptyResource = Resource{} diff --git a/plugin/pkg/scheduler/schedulercache/util.go b/pkg/scheduler/schedulercache/util.go similarity index 100% rename from plugin/pkg/scheduler/schedulercache/util.go rename to pkg/scheduler/schedulercache/util.go diff --git a/plugin/pkg/scheduler/testing/BUILD b/pkg/scheduler/testing/BUILD similarity index 100% rename from plugin/pkg/scheduler/testing/BUILD rename to pkg/scheduler/testing/BUILD diff --git a/plugin/pkg/scheduler/testing/fake_cache.go b/pkg/scheduler/testing/fake_cache.go similarity index 97% rename from plugin/pkg/scheduler/testing/fake_cache.go rename to pkg/scheduler/testing/fake_cache.go index feeb048e52b..be77503dd4b 100644 --- a/plugin/pkg/scheduler/testing/fake_cache.go +++ b/pkg/scheduler/testing/fake_cache.go @@ -20,7 +20,7 @@ import ( "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // FakeCache is used for testing diff --git a/plugin/pkg/scheduler/testing/fake_lister.go b/pkg/scheduler/testing/fake_lister.go similarity index 98% rename from plugin/pkg/scheduler/testing/fake_lister.go rename to pkg/scheduler/testing/fake_lister.go index f01457a5bcf..fdf5431fa26 100644 --- a/plugin/pkg/scheduler/testing/fake_lister.go +++ b/pkg/scheduler/testing/fake_lister.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" - . "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + . "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) var _ NodeLister = &FakeNodeLister{} diff --git a/plugin/pkg/scheduler/testing/pods_to_cache.go b/pkg/scheduler/testing/pods_to_cache.go similarity index 96% rename from plugin/pkg/scheduler/testing/pods_to_cache.go rename to pkg/scheduler/testing/pods_to_cache.go index 94f630b53cf..2c5f6c6bf02 100644 --- a/plugin/pkg/scheduler/testing/pods_to_cache.go +++ b/pkg/scheduler/testing/pods_to_cache.go @@ -19,7 +19,7 @@ package testing import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) // PodsToCache is used for testing diff --git a/plugin/pkg/scheduler/testutil.go b/pkg/scheduler/testutil.go similarity index 94% rename from plugin/pkg/scheduler/testutil.go rename to pkg/scheduler/testutil.go index 7976353ed4e..249ced16cd8 100644 --- a/plugin/pkg/scheduler/testutil.go +++ b/pkg/scheduler/testutil.go @@ -23,10 +23,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" - "k8s.io/kubernetes/plugin/pkg/scheduler/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/core" + "k8s.io/kubernetes/pkg/scheduler/util" ) // FakeConfigurator is an implementation for test. diff --git a/plugin/pkg/scheduler/util/BUILD b/pkg/scheduler/util/BUILD similarity index 100% rename from plugin/pkg/scheduler/util/BUILD rename to pkg/scheduler/util/BUILD diff --git a/plugin/pkg/scheduler/util/backoff_utils.go b/pkg/scheduler/util/backoff_utils.go similarity index 100% rename from plugin/pkg/scheduler/util/backoff_utils.go rename to pkg/scheduler/util/backoff_utils.go diff --git a/plugin/pkg/scheduler/util/backoff_utils_test.go b/pkg/scheduler/util/backoff_utils_test.go similarity index 100% rename from plugin/pkg/scheduler/util/backoff_utils_test.go rename to pkg/scheduler/util/backoff_utils_test.go diff --git a/plugin/pkg/scheduler/util/testutil.go b/pkg/scheduler/util/testutil.go similarity index 100% rename from plugin/pkg/scheduler/util/testutil.go rename to pkg/scheduler/util/testutil.go diff --git a/plugin/pkg/scheduler/util/testutil_test.go b/pkg/scheduler/util/testutil_test.go similarity index 100% rename from plugin/pkg/scheduler/util/testutil_test.go rename to pkg/scheduler/util/testutil_test.go diff --git a/plugin/pkg/scheduler/util/utils.go b/pkg/scheduler/util/utils.go similarity index 100% rename from plugin/pkg/scheduler/util/utils.go rename to pkg/scheduler/util/utils.go diff --git a/plugin/pkg/scheduler/util/utils_test.go b/pkg/scheduler/util/utils_test.go similarity index 100% rename from plugin/pkg/scheduler/util/utils_test.go rename to pkg/scheduler/util/utils_test.go diff --git a/plugin/pkg/scheduler/volumebinder/BUILD b/pkg/scheduler/volumebinder/BUILD similarity index 100% rename from plugin/pkg/scheduler/volumebinder/BUILD rename to pkg/scheduler/volumebinder/BUILD diff --git a/plugin/pkg/scheduler/volumebinder/volume_binder.go b/pkg/scheduler/volumebinder/volume_binder.go similarity index 100% rename from plugin/pkg/scheduler/volumebinder/volume_binder.go rename to pkg/scheduler/volumebinder/volume_binder.go diff --git a/plugin/pkg/admission/defaulttolerationseconds/admission.go b/plugin/pkg/admission/defaulttolerationseconds/admission.go index 05e170a96b4..408ead36eb6 100644 --- a/plugin/pkg/admission/defaulttolerationseconds/admission.go +++ b/plugin/pkg/admission/defaulttolerationseconds/admission.go @@ -25,7 +25,7 @@ import ( "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm" ) var ( diff --git a/plugin/pkg/admission/defaulttolerationseconds/admission_test.go b/plugin/pkg/admission/defaulttolerationseconds/admission_test.go index 391d374882a..b94e41fe5b1 100644 --- a/plugin/pkg/admission/defaulttolerationseconds/admission_test.go +++ b/plugin/pkg/admission/defaulttolerationseconds/admission_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithm" ) func TestForgivenessAdmission(t *testing.T) { diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 88345a2fbd8..3318e221b2c 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -35,10 +35,10 @@ import ( corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" pluginapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) // Register registers a plugin diff --git a/plugin/pkg/admission/podtolerationrestriction/admission_test.go b/plugin/pkg/admission/podtolerationrestriction/admission_test.go index 45b1cb9478a..1a60e5f2134 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission_test.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission_test.go @@ -30,9 +30,9 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) // TestPodAdmission verifies various scenarios involving pod/namespace tolerations diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index cd97d8f9df8..a8090eaf2ad 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 58c01bf61da..637a9eace0b 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -91,12 +91,12 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" sshutil "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" utilversion "k8s.io/kubernetes/pkg/util/version" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" testutil "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 4d0e27670b4..1c34ea8998c 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" - priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" + priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 29832a9da67..463ec7641f7 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -25,8 +25,8 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/core/helper" + "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 780c3a0376c..74289507f04 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -38,10 +38,10 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/plugin/pkg/scheduler" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler" + _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/factory" e2e "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/integration/scheduler/local-pv-neg-affinity_test.go b/test/integration/scheduler/local-pv-neg-affinity_test.go index 589089f1a3e..f613bc9a3c8 100644 --- a/test/integration/scheduler/local-pv-neg-affinity_test.go +++ b/test/integration/scheduler/local-pv-neg-affinity_test.go @@ -42,8 +42,8 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index ca92dbae836..6ad507353a2 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -33,8 +33,8 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/features" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - "k8s.io/kubernetes/plugin/pkg/scheduler/core" + _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/scheduler/core" testutils "k8s.io/kubernetes/test/utils" "github.com/golang/glog" diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index a6e851e5499..77770c59fd3 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -39,16 +39,16 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apis/componentconfig" - schedulerapp "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index bb227ba4dd2..bbdb6a7bd74 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -39,12 +39,12 @@ import ( internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller/nodelifecycle" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/algorithm" + "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" - "k8s.io/kubernetes/plugin/pkg/scheduler" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 4e66e0855cf..1ccd34826ea 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -35,9 +35,9 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/plugin/pkg/scheduler" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler" + _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/test/integration/framework" "net/http/httptest" diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index e6073377d75..4d13e22441f 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/plugin/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler" testutils "k8s.io/kubernetes/test/utils" "math" "strconv" diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 4e6a9025c9f..ba5de428d54 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -29,9 +29,9 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/plugin/pkg/scheduler" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/pkg/scheduler" + _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/test/integration/framework" ) diff --git a/test/test_owners.csv b/test/test_owners.csv index 170579118d5..a16928b5cb5 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -515,11 +515,13 @@ Volumes NFS should be mountable,rrati,0,storage Volumes PD should be mountable,caesarxuchao,1,storage Volumes iSCSI should be mountable,jsafrane,1,storage Volumes vsphere should be mountable,jsafrane,0,storage +k8s.io/client-go/tools/leaderelection,xiang90,1, k8s.io/kubernetes/cmd/genutils,rmmh,1, k8s.io/kubernetes/cmd/hyperkube,jbeda,0, k8s.io/kubernetes/cmd/kube-apiserver/app/options,nikhiljindal,0, k8s.io/kubernetes/cmd/kube-controller-manager/app,dchen1107,1, k8s.io/kubernetes/cmd/kube-proxy/app,luxas,1, +k8s.io/kubernetes/cmd/kube-scheduler/app,deads2k,1, k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install,ixdy,1, k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation,caesarxuchao,1, k8s.io/kubernetes/cmd/kubeadm/app/cmd,caesarxuchao,1, @@ -575,7 +577,6 @@ k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0, k8s.io/kubernetes/pkg/apis/storage/validation,caesarxuchao,1, k8s.io/kubernetes/pkg/auth/authorizer/abac,liggitt,0, k8s.io/kubernetes/pkg/client/chaosclient,deads2k,1, -k8s.io/client-go/tools/leaderelection,xiang90,1, k8s.io/kubernetes/pkg/client/legacylisters,jsafrane,1, k8s.io/kubernetes/pkg/client/listers/batch/internalversion,mqliang,0, k8s.io/kubernetes/pkg/client/listers/extensions/internalversion,eparis,1, @@ -760,6 +761,16 @@ k8s.io/kubernetes/pkg/registry/rbac/reconciliation,roberthbailey,1, k8s.io/kubernetes/pkg/registry/rbac/validation,rkouj,0, k8s.io/kubernetes/pkg/registry/storage/storageclass,brendandburns,1, k8s.io/kubernetes/pkg/registry/storage/storageclass/storage,wojtek-t,1, +k8s.io/kubernetes/pkg/scheduler,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/algorithm/predicates,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/algorithm/priorities,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/algorithmprovider,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/api/validation,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/core,madhusudancs,1, +k8s.io/kubernetes/pkg/scheduler/factory,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/schedulercache,fgrzadkowski,0, +k8s.io/kubernetes/pkg/scheduler/util,wojtek-t,1, k8s.io/kubernetes/pkg/security/apparmor,bgrant0607,1, k8s.io/kubernetes/pkg/security/podsecuritypolicy,erictune,0, k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,rrati,0, @@ -830,7 +841,6 @@ k8s.io/kubernetes/pkg/volume/util,saad-ali,0, k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations,freehan,1, k8s.io/kubernetes/pkg/volume/util/operationexecutor,rkouj,0, k8s.io/kubernetes/pkg/volume/vsphere_volume,deads2k,1, -k8s.io/kubernetes/plugin/cmd/kube-scheduler/app,deads2k,1, k8s.io/kubernetes/plugin/pkg/admission/admit,piosz,1, k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages,ncdc,1, k8s.io/kubernetes/plugin/pkg/admission/antiaffinity,timothysc,1, @@ -854,16 +864,6 @@ k8s.io/kubernetes/plugin/pkg/admission/serviceaccount,liggitt,0, k8s.io/kubernetes/plugin/pkg/admission/storageclass/default,pmorie,1, k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,rrati,0, k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy,mml,1, -k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/api/validation,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/core,madhusudancs,1, -k8s.io/kubernetes/plugin/pkg/scheduler/factory,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache,fgrzadkowski,0, -k8s.io/kubernetes/plugin/pkg/scheduler/util,wojtek-t,1, k8s.io/kubernetes/test/e2e,kevin-wangzefeng,1, k8s.io/kubernetes/test/e2e/chaosmonkey,pmorie,1, k8s.io/kubernetes/test/e2e_node,mml,1, From 85c586255290e867a6bf7e9e16684055618b5788 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Wed, 3 Jan 2018 18:23:05 -0800 Subject: [PATCH 624/794] Fix scheduler refs in BUILD files. Update references to moved scheduler code. --- build/BUILD | 4 +- build/debs/BUILD | 2 +- build/visible_to/BUILD | 2 +- cmd/BUILD | 1 + cmd/genkubedocs/BUILD | 2 +- cmd/genman/BUILD | 2 +- cmd/hyperkube/BUILD | 2 +- cmd/kube-scheduler/BUILD | 8 +-- cmd/kube-scheduler/app/BUILD | 12 ++--- cmd/kubeadm/app/phases/addons/proxy/BUILD | 2 +- cmd/kubeadm/app/preflight/BUILD | 2 +- examples/BUILD | 4 +- pkg/BUILD | 1 + pkg/controller/cloud/BUILD | 4 +- pkg/controller/daemon/BUILD | 8 +-- pkg/controller/daemon/util/BUILD | 2 +- pkg/controller/nodelifecycle/BUILD | 4 +- pkg/kubelet/BUILD | 6 +-- pkg/kubelet/cm/BUILD | 2 +- pkg/kubelet/cm/deviceplugin/BUILD | 4 +- pkg/kubelet/eviction/BUILD | 2 +- pkg/kubelet/lifecycle/BUILD | 6 +-- pkg/kubelet/preemption/BUILD | 4 +- pkg/scheduler/BUILD | 54 +++++++++---------- pkg/scheduler/algorithm/BUILD | 14 ++--- pkg/scheduler/algorithm/predicates/BUILD | 22 ++++---- pkg/scheduler/algorithm/priorities/BUILD | 24 ++++----- pkg/scheduler/algorithm/priorities/util/BUILD | 4 +- pkg/scheduler/algorithmprovider/BUILD | 10 ++-- .../algorithmprovider/defaults/BUILD | 22 ++++---- pkg/scheduler/api/BUILD | 8 +-- pkg/scheduler/api/latest/BUILD | 6 +-- pkg/scheduler/api/v1/BUILD | 4 +- pkg/scheduler/api/validation/BUILD | 8 +-- pkg/scheduler/core/BUILD | 34 ++++++------ pkg/scheduler/factory/BUILD | 38 ++++++------- pkg/scheduler/metrics/BUILD | 2 +- pkg/scheduler/schedulercache/BUILD | 12 ++--- pkg/scheduler/testing/BUILD | 6 +-- pkg/scheduler/util/BUILD | 4 +- pkg/scheduler/volumebinder/BUILD | 2 +- plugin/BUILD | 2 - .../admission/defaulttolerationseconds/BUILD | 4 +- .../admission/podtolerationrestriction/BUILD | 4 +- test/e2e/apps/BUILD | 2 +- test/e2e/framework/BUILD | 4 +- test/e2e/scheduling/BUILD | 2 +- .../defaulttolerationseconds/BUILD | 2 +- test/integration/scheduler/BUILD | 22 ++++---- test/integration/scheduler_perf/BUILD | 8 +-- 50 files changed, 205 insertions(+), 205 deletions(-) diff --git a/build/BUILD b/build/BUILD index 5f531a1f663..7e7beb7d671 100644 --- a/build/BUILD +++ b/build/BUILD @@ -38,7 +38,7 @@ DOCKERIZED_BINARIES = { }, "kube-scheduler": { "base": "@official_busybox//image", - "target": "//plugin/cmd/kube-scheduler:kube-scheduler", + "target": "//cmd/kube-scheduler:kube-scheduler", }, "kube-proxy": { "base": "@debian-iptables-amd64//image", @@ -127,7 +127,7 @@ release_filegroup( "//cmd/hyperkube", "//cmd/kube-apiserver", "//cmd/kube-controller-manager", - "//plugin/cmd/kube-scheduler", + "//cmd/kube-scheduler", "//vendor/k8s.io/kube-aggregator", ], ) diff --git a/build/debs/BUILD b/build/debs/BUILD index 1ff31b739ec..d8a195dbc74 100644 --- a/build/debs/BUILD +++ b/build/debs/BUILD @@ -41,7 +41,7 @@ deb_data( name = "kube-scheduler", data = [ { - "files": ["//plugin/cmd/kube-scheduler"], + "files": ["//cmd/kube-scheduler"], "mode": "0755", "dir": "/usr/bin", }, diff --git a/build/visible_to/BUILD b/build/visible_to/BUILD index b9f62f1f234..08ca5450af9 100644 --- a/build/visible_to/BUILD +++ b/build/visible_to/BUILD @@ -210,7 +210,7 @@ package_group( "//cmd/clicheck", "//cmd/hyperkube", "//cmd/kube-proxy/app", - "//plugin/cmd/kube-scheduler/app", + "//cmd/kube-scheduler/app", ], ) diff --git a/cmd/BUILD b/cmd/BUILD index e8ed83675ae..1d4c2545ec8 100644 --- a/cmd/BUILD +++ b/cmd/BUILD @@ -26,6 +26,7 @@ filegroup( "//cmd/kube-apiserver:all-srcs", "//cmd/kube-controller-manager:all-srcs", "//cmd/kube-proxy:all-srcs", + "//cmd/kube-scheduler:all-srcs", "//cmd/kubeadm:all-srcs", "//cmd/kubectl:all-srcs", "//cmd/kubelet:all-srcs", diff --git a/cmd/genkubedocs/BUILD b/cmd/genkubedocs/BUILD index ef6c78a34e9..4041333ae0d 100644 --- a/cmd/genkubedocs/BUILD +++ b/cmd/genkubedocs/BUILD @@ -26,9 +26,9 @@ go_library( "//cmd/kube-apiserver/app:go_default_library", "//cmd/kube-controller-manager/app:go_default_library", "//cmd/kube-proxy/app:go_default_library", + "//cmd/kube-scheduler/app:go_default_library", "//cmd/kubeadm/app/cmd:go_default_library", "//cmd/kubelet/app:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/cobra/doc:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/cmd/genman/BUILD b/cmd/genman/BUILD index e2e4f9df623..c3fcedb8074 100644 --- a/cmd/genman/BUILD +++ b/cmd/genman/BUILD @@ -22,11 +22,11 @@ go_library( "//cmd/kube-apiserver/app:go_default_library", "//cmd/kube-controller-manager/app:go_default_library", "//cmd/kube-proxy/app:go_default_library", + "//cmd/kube-scheduler/app:go_default_library", "//cmd/kubeadm/app/cmd:go_default_library", "//cmd/kubelet/app:go_default_library", "//pkg/kubectl/cmd:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", "//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/cmd/hyperkube/BUILD b/cmd/hyperkube/BUILD index e806fae7438..09ce16626f5 100644 --- a/cmd/hyperkube/BUILD +++ b/cmd/hyperkube/BUILD @@ -51,6 +51,7 @@ go_library( "//cmd/kube-controller-manager/app:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library", "//cmd/kube-proxy/app:go_default_library", + "//cmd/kube-scheduler/app:go_default_library", "//cmd/kubelet/app:go_default_library", "//cmd/kubelet/app/options:go_default_library", "//pkg/client/metrics/prometheus:go_default_library", @@ -59,7 +60,6 @@ go_library( "//pkg/util/template:go_default_library", "//pkg/version/prometheus:go_default_library", "//pkg/version/verflag:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", diff --git a/cmd/kube-scheduler/BUILD b/cmd/kube-scheduler/BUILD index 905919efa49..88e75f4e819 100644 --- a/cmd/kube-scheduler/BUILD +++ b/cmd/kube-scheduler/BUILD @@ -10,7 +10,7 @@ load("//pkg/version:def.bzl", "version_x_defs") go_binary( name = "kube-scheduler", embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler", + importpath = "k8s.io/kubernetes/cmd/kube-scheduler", pure = "on", x_defs = version_x_defs(), ) @@ -18,11 +18,11 @@ go_binary( go_library( name = "go_default_library", srcs = ["scheduler.go"], - importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler", + importpath = "k8s.io/kubernetes/cmd/kube-scheduler", deps = [ + "//cmd/kube-scheduler/app:go_default_library", "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], @@ -39,7 +39,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/cmd/kube-scheduler/app:all-srcs", + "//cmd/kube-scheduler/app:all-srcs", ], tags = ["automanaged"], ) diff --git a/cmd/kube-scheduler/app/BUILD b/cmd/kube-scheduler/app/BUILD index 9de3152c18c..990d72a0bcc 100644 --- a/cmd/kube-scheduler/app/BUILD +++ b/cmd/kube-scheduler/app/BUILD @@ -8,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["server.go"], - importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", + importpath = "k8s.io/kubernetes/cmd/kube-scheduler/app", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/componentconfig:go_default_library", @@ -18,14 +18,14 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/master/ports:go_default_library", + "//pkg/scheduler:go_default_library", + "//pkg/scheduler/algorithmprovider:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/latest:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/version:go_default_library", "//pkg/version/verflag:go_default_library", - "//plugin/pkg/scheduler:go_default_library", - "//plugin/pkg/scheduler/algorithmprovider:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/latest:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/cmd/kubeadm/app/phases/addons/proxy/BUILD b/cmd/kubeadm/app/phases/addons/proxy/BUILD index 170b09364fd..c08b0dd6a48 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/BUILD +++ b/cmd/kubeadm/app/phases/addons/proxy/BUILD @@ -41,7 +41,7 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index ec8e552e1e9..23f8484317a 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -51,6 +51,7 @@ go_library( deps = [ "//cmd/kube-apiserver/app/options:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library", + "//cmd/kube-scheduler/app:go_default_library", "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//pkg/apis/core/validation:go_default_library", @@ -59,7 +60,6 @@ go_library( "//pkg/util/initsystem:go_default_library", "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", "//test/e2e_node/system:go_default_library", "//vendor/github.com/PuerkitoBio/purell:go_default_library", "//vendor/github.com/blang/semver:go_default_library", diff --git a/examples/BUILD b/examples/BUILD index b2175aebcd2..8fa87a454e9 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -39,8 +39,8 @@ go_test( "//pkg/apis/extensions/validation:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/registry/batch/job:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/latest:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/latest:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/BUILD b/pkg/BUILD index 66f3fc06e3b..5e06162d6c4 100644 --- a/pkg/BUILD +++ b/pkg/BUILD @@ -95,6 +95,7 @@ filegroup( "//pkg/quota:all-srcs", "//pkg/registry:all-srcs", "//pkg/routes:all-srcs", + "//pkg/scheduler:all-srcs", "//pkg/security:all-srcs", "//pkg/securitycontext:all-srcs", "//pkg/serviceaccount:all-srcs", diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index a482039381d..dfd2739720b 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -18,8 +18,8 @@ go_library( "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/node:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -56,7 +56,7 @@ go_test( "//pkg/controller:go_default_library", "//pkg/controller/testutil:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 47a123cd929..761262f4108 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -21,11 +21,11 @@ go_library( "//pkg/controller/daemon/util:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -74,9 +74,9 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/types:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/securitycontext:go_default_library", "//pkg/util/labels:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index d3e5dde3dc4..bb9f1f11cf4 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -15,8 +15,8 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/labels:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/nodelifecycle/BUILD b/pkg/controller/nodelifecycle/BUILD index 25d036e3cfb..55f577fe54f 100644 --- a/pkg/controller/nodelifecycle/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -14,12 +14,12 @@ go_library( "//pkg/controller:go_default_library", "//pkg/controller/nodelifecycle/scheduler:go_default_library", "//pkg/controller/util/node:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/metrics:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/system:go_default_library", "//pkg/util/taints:go_default_library", "//pkg/util/version:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -72,9 +72,9 @@ go_test( "//pkg/controller/testutil:go_default_library", "//pkg/controller/util/node:go_default_library", "//pkg/kubelet/apis:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/taints:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 5ab4a2d6b6c..1a55be0ba0f 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -87,6 +87,8 @@ go_library( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/securitycontext:go_default_library", "//pkg/util/dbus:go_default_library", @@ -103,8 +105,6 @@ go_library( "//pkg/volume/util/types:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//pkg/volume/validation:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//third_party/forked/golang/expansion:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", @@ -199,13 +199,13 @@ go_test( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/host_path:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index a331a8be770..32a81750635 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -91,7 +91,7 @@ go_library( "//pkg/kubelet/eviction/api:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/status:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/cm/deviceplugin/BUILD b/pkg/kubelet/cm/deviceplugin/BUILD index eb780952c41..11df63bea10 100644 --- a/pkg/kubelet/cm/deviceplugin/BUILD +++ b/pkg/kubelet/cm/deviceplugin/BUILD @@ -25,8 +25,8 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/util/store:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/filesystem:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", @@ -61,8 +61,8 @@ go_test( "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/util/store:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/filesystem:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index dba5bf9540a..33c570f3b43 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -89,7 +89,7 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index f3f4255321f..a1a64a03e14 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -21,10 +21,10 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/security/apparmor:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index 7c0b90843c3..58899d1cd0e 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index 9e8ae5b5902..d1d7be40435 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -10,17 +10,17 @@ go_test( name = "go_default_test", srcs = ["scheduler_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler", + importpath = "k8s.io/kubernetes/pkg/scheduler", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/controller/volume/persistentvolume:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/testing:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", - "//plugin/pkg/scheduler/volumebinder:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -39,17 +39,17 @@ go_library( "scheduler.go", "testutil.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler", + importpath = "k8s.io/kubernetes/pkg/scheduler", deps = [ "//pkg/features:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/metrics:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", - "//plugin/pkg/scheduler/volumebinder:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -73,16 +73,16 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/pkg/scheduler/algorithm:all-srcs", - "//plugin/pkg/scheduler/algorithmprovider:all-srcs", - "//plugin/pkg/scheduler/api:all-srcs", - "//plugin/pkg/scheduler/core:all-srcs", - "//plugin/pkg/scheduler/factory:all-srcs", - "//plugin/pkg/scheduler/metrics:all-srcs", - "//plugin/pkg/scheduler/schedulercache:all-srcs", - "//plugin/pkg/scheduler/testing:all-srcs", - "//plugin/pkg/scheduler/util:all-srcs", - "//plugin/pkg/scheduler/volumebinder:all-srcs", + "//pkg/scheduler/algorithm:all-srcs", + "//pkg/scheduler/algorithmprovider:all-srcs", + "//pkg/scheduler/api:all-srcs", + "//pkg/scheduler/core:all-srcs", + "//pkg/scheduler/factory:all-srcs", + "//pkg/scheduler/metrics:all-srcs", + "//pkg/scheduler/schedulercache:all-srcs", + "//pkg/scheduler/testing:all-srcs", + "//pkg/scheduler/util:all-srcs", + "//pkg/scheduler/volumebinder:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/scheduler/algorithm/BUILD b/pkg/scheduler/algorithm/BUILD index 644d0e1c04a..30df762dd49 100644 --- a/pkg/scheduler/algorithm/BUILD +++ b/pkg/scheduler/algorithm/BUILD @@ -14,10 +14,10 @@ go_library( "types.go", "well_known_labels.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", deps = [ - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", @@ -32,9 +32,9 @@ go_test( "types_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", deps = [ - "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", ], @@ -51,8 +51,8 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/pkg/scheduler/algorithm/predicates:all-srcs", - "//plugin/pkg/scheduler/algorithm/priorities:all-srcs", + "//pkg/scheduler/algorithm/predicates:all-srcs", + "//pkg/scheduler/algorithm/priorities:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index ed72a2878a3..6c091d1381d 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -15,18 +15,18 @@ go_library( "testing_helper.go", "utils.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//pkg/volume/util:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", - "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", @@ -50,14 +50,14 @@ go_test( "utils_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/testing:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD index 027d0112e27..19bb57476f0 100644 --- a/pkg/scheduler/algorithm/priorities/BUILD +++ b/pkg/scheduler/algorithm/priorities/BUILD @@ -25,15 +25,15 @@ go_library( "taint_toleration.go", "test_util.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", deps = [ "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/node:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -61,13 +61,13 @@ go_test( "taint_toleration_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", deps = [ "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/testing:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", @@ -87,7 +87,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/pkg/scheduler/algorithm/priorities/util:all-srcs", + "//pkg/scheduler/algorithm/priorities/util:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/scheduler/algorithm/priorities/util/BUILD b/pkg/scheduler/algorithm/priorities/util/BUILD index d997e4d68b5..d58c0e65b21 100644 --- a/pkg/scheduler/algorithm/priorities/util/BUILD +++ b/pkg/scheduler/algorithm/priorities/util/BUILD @@ -14,7 +14,7 @@ go_test( "util_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -33,7 +33,7 @@ go_library( "topologies.go", "util.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/algorithmprovider/BUILD b/pkg/scheduler/algorithmprovider/BUILD index e9fe7f977f3..4e721f3c83f 100644 --- a/pkg/scheduler/algorithmprovider/BUILD +++ b/pkg/scheduler/algorithmprovider/BUILD @@ -9,17 +9,17 @@ load( go_library( name = "go_default_library", srcs = ["plugins.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - deps = ["//plugin/pkg/scheduler/algorithmprovider/defaults:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider", + deps = ["//pkg/scheduler/algorithmprovider/defaults:go_default_library"], ) go_test( name = "go_default_test", srcs = ["plugins_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider", deps = [ - "//plugin/pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -35,7 +35,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/pkg/scheduler/algorithmprovider/defaults:all-srcs", + "//pkg/scheduler/algorithmprovider/defaults:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/scheduler/algorithmprovider/defaults/BUILD b/pkg/scheduler/algorithmprovider/defaults/BUILD index 15cbd49b81d..f58bc3b9706 100644 --- a/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -9,14 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["defaults.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults", deps = [ "//pkg/features:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -30,14 +30,14 @@ go_test( "defaults_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", + importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/latest:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/latest:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/scheduler/api/BUILD b/pkg/scheduler/api/BUILD index 593cff9295a..eeaef16a422 100644 --- a/pkg/scheduler/api/BUILD +++ b/pkg/scheduler/api/BUILD @@ -13,7 +13,7 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api", + importpath = "k8s.io/kubernetes/pkg/scheduler/api", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -35,9 +35,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/pkg/scheduler/api/latest:all-srcs", - "//plugin/pkg/scheduler/api/v1:all-srcs", - "//plugin/pkg/scheduler/api/validation:all-srcs", + "//pkg/scheduler/api/latest:all-srcs", + "//pkg/scheduler/api/v1:all-srcs", + "//pkg/scheduler/api/validation:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/scheduler/api/latest/BUILD b/pkg/scheduler/api/latest/BUILD index ee0f0ffa1c3..439f952b52a 100644 --- a/pkg/scheduler/api/latest/BUILD +++ b/pkg/scheduler/api/latest/BUILD @@ -8,10 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["latest.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", + importpath = "k8s.io/kubernetes/pkg/scheduler/api/latest", deps = [ - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/v1:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", diff --git a/pkg/scheduler/api/v1/BUILD b/pkg/scheduler/api/v1/BUILD index 2516dc06b12..35e291a27d5 100644 --- a/pkg/scheduler/api/v1/BUILD +++ b/pkg/scheduler/api/v1/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", + importpath = "k8s.io/kubernetes/pkg/scheduler/api/v1", deps = [ - "//plugin/pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/api/validation/BUILD b/pkg/scheduler/api/validation/BUILD index 3ec8db39c74..eaffc9a1a8b 100644 --- a/pkg/scheduler/api/validation/BUILD +++ b/pkg/scheduler/api/validation/BUILD @@ -9,9 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", + importpath = "k8s.io/kubernetes/pkg/scheduler/api/validation", deps = [ - "//plugin/pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", ], ) @@ -20,8 +20,8 @@ go_test( name = "go_default_test", srcs = ["validation_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - deps = ["//plugin/pkg/scheduler/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/scheduler/api/validation", + deps = ["//pkg/scheduler/api:go_default_library"], ) filegroup( diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index ba8f7875106..6b652164e8c 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -15,16 +15,16 @@ go_test( "scheduling_queue_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/core", + importpath = "k8s.io/kubernetes/pkg/scheduler/core", deps = [ - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/testing:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", @@ -43,17 +43,17 @@ go_library( "generic_scheduler.go", "scheduling_queue.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/core", + importpath = "k8s.io/kubernetes/pkg/scheduler/core", deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//pkg/util/hash:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", - "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/factory/BUILD b/pkg/scheduler/factory/BUILD index 41700276833..90645c5a22e 100644 --- a/pkg/scheduler/factory/BUILD +++ b/pkg/scheduler/factory/BUILD @@ -12,22 +12,22 @@ go_library( "factory.go", "plugins.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/factory", + importpath = "k8s.io/kubernetes/pkg/scheduler/factory", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/validation:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", - "//plugin/pkg/scheduler/volumebinder:go_default_library", + "//pkg/scheduler:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/validation:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -63,17 +63,17 @@ go_test( "plugins_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/factory", + importpath = "k8s.io/kubernetes/pkg/scheduler/factory", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testing:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/api/latest:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//plugin/pkg/scheduler/testing:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/api/latest:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/metrics/BUILD b/pkg/scheduler/metrics/BUILD index 7d059ed2112..81d6d19d587 100644 --- a/pkg/scheduler/metrics/BUILD +++ b/pkg/scheduler/metrics/BUILD @@ -8,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["metrics.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", + importpath = "k8s.io/kubernetes/pkg/scheduler/metrics", deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], ) diff --git a/pkg/scheduler/schedulercache/BUILD b/pkg/scheduler/schedulercache/BUILD index b8d18e6c424..19d35198f16 100644 --- a/pkg/scheduler/schedulercache/BUILD +++ b/pkg/scheduler/schedulercache/BUILD @@ -8,12 +8,12 @@ go_library( "node_info.go", "util.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", + importpath = "k8s.io/kubernetes/pkg/scheduler/schedulercache", visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -28,10 +28,10 @@ go_test( name = "go_default_test", srcs = ["cache_test.go"], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", + importpath = "k8s.io/kubernetes/pkg/scheduler/schedulercache", deps = [ - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//plugin/pkg/scheduler/util:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/scheduler/testing/BUILD b/pkg/scheduler/testing/BUILD index 1a3d2c07d73..3b4b65c08d5 100644 --- a/pkg/scheduler/testing/BUILD +++ b/pkg/scheduler/testing/BUILD @@ -12,10 +12,10 @@ go_library( "fake_lister.go", "pods_to_cache.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/testing", + importpath = "k8s.io/kubernetes/pkg/scheduler/testing", deps = [ - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/pkg/scheduler/util/BUILD b/pkg/scheduler/util/BUILD index 94174d181c7..f333e8cd99a 100644 --- a/pkg/scheduler/util/BUILD +++ b/pkg/scheduler/util/BUILD @@ -14,7 +14,7 @@ go_test( "utils_test.go", ], embed = [":go_default_library"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", + importpath = "k8s.io/kubernetes/pkg/scheduler/util", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -31,7 +31,7 @@ go_library( "testutil.go", "utils.go", ], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", + importpath = "k8s.io/kubernetes/pkg/scheduler/util", deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", diff --git a/pkg/scheduler/volumebinder/BUILD b/pkg/scheduler/volumebinder/BUILD index f942bfecdb8..0656eeee274 100644 --- a/pkg/scheduler/volumebinder/BUILD +++ b/pkg/scheduler/volumebinder/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["volume_binder.go"], - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder", + importpath = "k8s.io/kubernetes/pkg/scheduler/volumebinder", visibility = ["//visibility:public"], deps = [ "//pkg/controller/volume/persistentvolume:go_default_library", diff --git a/plugin/BUILD b/plugin/BUILD index 03b74856108..2d7dd155c1c 100644 --- a/plugin/BUILD +++ b/plugin/BUILD @@ -11,7 +11,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//plugin/cmd/kube-scheduler:all-srcs", "//plugin/pkg/admission/admit:all-srcs", "//plugin/pkg/admission/alwayspullimages:all-srcs", "//plugin/pkg/admission/antiaffinity:all-srcs", @@ -40,7 +39,6 @@ filegroup( "//plugin/pkg/admission/serviceaccount:all-srcs", "//plugin/pkg/admission/storageclass/setdefault:all-srcs", "//plugin/pkg/auth:all-srcs", - "//plugin/pkg/scheduler:all-srcs", ], tags = ["automanaged"], ) diff --git a/plugin/pkg/admission/defaulttolerationseconds/BUILD b/plugin/pkg/admission/defaulttolerationseconds/BUILD index fa601d0f0bb..48ca06b3b83 100644 --- a/plugin/pkg/admission/defaulttolerationseconds/BUILD +++ b/plugin/pkg/admission/defaulttolerationseconds/BUILD @@ -14,7 +14,7 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], ) @@ -26,7 +26,7 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], diff --git a/plugin/pkg/admission/podtolerationrestriction/BUILD b/plugin/pkg/admission/podtolerationrestriction/BUILD index 2dd2627f147..9efce635d34 100644 --- a/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -17,9 +17,9 @@ go_test( "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/tolerations:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", @@ -43,12 +43,12 @@ go_library( "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//pkg/kubeapiserver/admission/util:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//pkg/util/tolerations:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index d7194290d61..6780918d0f5 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -38,8 +38,8 @@ go_library( "//pkg/controller/replication:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/master/ports:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/pointer:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 9f7e222fed8..0ecad57686d 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -69,6 +69,8 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/kubemark:go_default_library", "//pkg/master/ports:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", "//pkg/ssh:go_default_library", "//pkg/util/file:go_default_library", @@ -76,8 +78,6 @@ go_library( "//pkg/util/taints:go_default_library", "//pkg/util/version:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", - "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/manifest:go_default_library", diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 64f27ed2008..0a9d9bffa51 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -21,8 +21,8 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/quota/evaluator/core:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/util/version:go_default_library", - "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/utils:go_default_library", diff --git a/test/integration/defaulttolerationseconds/BUILD b/test/integration/defaulttolerationseconds/BUILD index febfcd318e1..85781840c39 100644 --- a/test/integration/defaulttolerationseconds/BUILD +++ b/test/integration/defaulttolerationseconds/BUILD @@ -20,8 +20,8 @@ go_test( deps = [ "//pkg/api/testapi:go_default_library", "//pkg/apis/core/helper:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/admission/defaulttolerationseconds:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", "//test/integration/framework:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index ec4b7bb01ae..8c627dfc7c6 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -24,6 +24,7 @@ go_test( importpath = "k8s.io/kubernetes/test/integration/scheduler", tags = ["integration"], deps = [ + "//cmd/kube-scheduler/app:go_default_library", "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/componentconfig:go_default_library", @@ -34,16 +35,15 @@ go_test( "//pkg/controller/volume/persistentvolume:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", - "//plugin/cmd/kube-scheduler/app:go_default_library", + "//pkg/scheduler:go_default_library", + "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/algorithmprovider:go_default_library", + "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/factory:go_default_library", + "//pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/admission/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", - "//plugin/pkg/scheduler:go_default_library", - "//plugin/pkg/scheduler/algorithm:go_default_library", - "//plugin/pkg/scheduler/algorithmprovider:go_default_library", - "//plugin/pkg/scheduler/api:go_default_library", - "//plugin/pkg/scheduler/core:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", - "//plugin/pkg/scheduler/schedulercache:go_default_library", "//test/e2e/framework:go_default_library", "//test/integration/framework:go_default_library", "//test/utils:go_default_library", @@ -89,9 +89,9 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", - "//plugin/pkg/scheduler:go_default_library", - "//plugin/pkg/scheduler/algorithmprovider:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", + "//pkg/scheduler:go_default_library", + "//pkg/scheduler/algorithmprovider:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//test/integration/framework:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index 56b23a44f7b..1feb38464b6 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -16,9 +16,9 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", - "//plugin/pkg/scheduler:go_default_library", - "//plugin/pkg/scheduler/algorithmprovider:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", + "//pkg/scheduler:go_default_library", + "//pkg/scheduler/algorithmprovider:go_default_library", + "//pkg/scheduler/factory:go_default_library", "//test/integration/framework:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -43,7 +43,7 @@ go_test( tags = ["integration"], deps = [ "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler:go_default_library", + "//pkg/scheduler:go_default_library", "//test/integration/framework:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/golang/glog:go_default_library", From 6becf18d5b120c181be0ec7efe43694840ab4fc4 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Fri, 5 Jan 2018 15:36:53 -0800 Subject: [PATCH 625/794] add folder named custom in gce --- cluster/gce/custom | 1 + 1 file changed, 1 insertion(+) create mode 120000 cluster/gce/custom diff --git a/cluster/gce/custom b/cluster/gce/custom new file mode 120000 index 00000000000..67a1dec2892 --- /dev/null +++ b/cluster/gce/custom @@ -0,0 +1 @@ +gci \ No newline at end of file From a69db7104dca2a440a837e4fea8cefd625c63447 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Sat, 6 Jan 2018 07:56:10 +0800 Subject: [PATCH 626/794] Do not set BaseURI again BaseURI has been set by NewAccountsClientWithBaseURI method. --- pkg/cloudprovider/providers/azure/azure.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 5435036e6b8..8f31f013508 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -328,14 +328,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient storageAccountClient := storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - storageAccountClient.BaseURI = az.Environment.ResourceManagerEndpoint storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) storageAccountClient.PollingDelay = 5 * time.Second configureUserAgent(&storageAccountClient.Client) az.StorageAccountClient = storageAccountClient disksClient := disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - disksClient.BaseURI = az.Environment.ResourceManagerEndpoint disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) disksClient.PollingDelay = 5 * time.Second configureUserAgent(&disksClient.Client) From c322f1d06504fa5b81e117ec79dd77fb4e769b4f Mon Sep 17 00:00:00 2001 From: Anish Ramasekar Date: Wed, 13 Dec 2017 01:46:06 +0530 Subject: [PATCH 627/794] fix typos in kubectl pkg --- pkg/kubectl/cmd/clusterinfo.go | 2 +- pkg/kubectl/cmd/completion.go | 2 +- pkg/kubectl/cmd/expose.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index 4924691c68a..2e2b7a8b959 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -135,7 +135,7 @@ func printService(out io.Writer, name, link string) { ct.ChangeColor(ct.Green, false, ct.None, false) fmt.Fprint(out, name) ct.ResetColor() - fmt.Fprintf(out, " is running at ") + fmt.Fprint(out, " is running at ") ct.ChangeColor(ct.Yellow, false, ct.None, false) fmt.Fprint(out, link) ct.ResetColor() diff --git a/pkg/kubectl/cmd/completion.go b/pkg/kubectl/cmd/completion.go index d14ca66329b..4a2f2b10ffe 100644 --- a/pkg/kubectl/cmd/completion.go +++ b/pkg/kubectl/cmd/completion.go @@ -46,7 +46,7 @@ const defaultBoilerPlate = ` var ( completion_long = templates.LongDesc(i18n.T(` Output shell completion code for the specified shell (bash or zsh). - The shell code must be evalutated to provide interactive + The shell code must be evaluated to provide interactive completion of kubectl commands. This can be done by sourcing it from the .bash_profile. diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index ed84c1d76b7..b1347fe1436 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -99,7 +99,7 @@ func NewCmdExposeService(f cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("protocol", "", i18n.T("The network protocol for the service to be created. Default is 'TCP'.")) cmd.Flags().String("port", "", i18n.T("The port that the service should serve on. Copied from the resource being exposed, if unspecified")) cmd.Flags().String("type", "", i18n.T("Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. Default is 'ClusterIP'.")) - cmd.Flags().String("load-balancer-ip", "", i18n.T("IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")) + cmd.Flags().String("load-balancer-ip", "", i18n.T("IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")) cmd.Flags().String("selector", "", i18n.T("A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.)")) cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.") cmd.Flags().String("container-port", "", i18n.T("Synonym for --target-port")) From 9aa4fc0d3b2936c78c8c7ea4aa75e751409aff01 Mon Sep 17 00:00:00 2001 From: Rye Terrell Date: Fri, 5 Jan 2018 21:20:31 -0600 Subject: [PATCH 628/794] Add proxy_read_timeout flag to kubeapi_load_balancer charm. --- cluster/juju/layers/kubeapi-load-balancer/config.yaml | 4 ++++ .../layers/kubeapi-load-balancer/reactive/load_balancer.py | 1 + .../juju/layers/kubeapi-load-balancer/templates/apilb.conf | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cluster/juju/layers/kubeapi-load-balancer/config.yaml b/cluster/juju/layers/kubeapi-load-balancer/config.yaml index a4678ae02f3..245d7890633 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/config.yaml +++ b/cluster/juju/layers/kubeapi-load-balancer/config.yaml @@ -9,3 +9,7 @@ options: description: | Space-separated list of extra SAN entries to add to the x509 certificate created for the load balancers. + proxy_read_timeout: + type: int + default: 90 + description: Timeout in seconds for reading a response from proxy server. diff --git a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py index ddb3845a10a..37420c993da 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py +++ b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py @@ -151,6 +151,7 @@ def install_load_balancer(apiserver, tls): port=port, server_certificate=server_cert_path, server_key=server_key_path, + proxy_read_timeout=hookenv.config('proxy_read_timeout') ) maybe_write_apilb_logrotate_config() diff --git a/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf b/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf index 6d1b23e25bf..f771b6d9748 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf +++ b/cluster/juju/layers/kubeapi-load-balancer/templates/apilb.conf @@ -36,6 +36,6 @@ server { add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version; proxy_pass https://target_service; - proxy_read_timeout 90; + proxy_read_timeout {{ proxy_read_timeout }}; } } From 19fb0da059ef4357a7100759ef23257cb38b6e44 Mon Sep 17 00:00:00 2001 From: Yanqiang Miao Date: Fri, 5 Jan 2018 17:07:51 +0800 Subject: [PATCH 629/794] Make sure is not nil Signed-off-by: Yanqiang Miao --- test/e2e_node/cpu_manager_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 7054933aaae..4186d78cb48 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -143,6 +143,9 @@ func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.Ku oldCfg, err := getCurrentKubeletConfig() framework.ExpectNoError(err) newCfg := oldCfg.DeepCopy() + if newCfg.FeatureGates == nil { + newCfg.FeatureGates = make(map[string]bool) + } // Enable CPU Manager using feature gate. newCfg.FeatureGates[string(features.CPUManager)] = true From 335c5d959fbcb019d249f50e8642c608803be215 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Thu, 30 Nov 2017 14:10:03 -0500 Subject: [PATCH 630/794] Adding support for Block Volume to rbd plugin --- pkg/volume/rbd/BUILD | 1 + pkg/volume/rbd/disk_manager.go | 4 + pkg/volume/rbd/rbd.go | 252 +++++++++++++++++++++++++++++++++ pkg/volume/rbd/rbd_test.go | 17 ++- pkg/volume/rbd/rbd_util.go | 70 ++++++++- 5 files changed, 337 insertions(+), 7 deletions(-) diff --git a/pkg/volume/rbd/BUILD b/pkg/volume/rbd/BUILD index ea058ed7c67..5a196a794e5 100644 --- a/pkg/volume/rbd/BUILD +++ b/pkg/volume/rbd/BUILD @@ -31,6 +31,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index 9610ebc2b7a..70db99b3cff 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -37,12 +37,16 @@ import ( type diskManager interface { // MakeGlobalPDName creates global persistent disk path. MakeGlobalPDName(disk rbd) string + // MakeGlobalVDPDName creates global block disk path. + MakeGlobalVDPDName(disk rbd) string // Attaches the disk to the kubelet's host machine. // If it successfully attaches, the path to the device // is returned. Otherwise, an error will be returned. AttachDisk(disk rbdMounter) (string, error) // Detaches the disk from the kubelet's host machine. DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error + // Detaches the block disk from the kubelet's host machine. + DetachBlockDisk(disk rbdDiskUnmapper, mntPath string) error // Creates a rbd image. CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) // Deletes a rbd image. diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 6042e743395..0d4e8c21958 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -18,6 +18,8 @@ package rbd import ( "fmt" + "os" + "path/filepath" dstrings "strings" "github.com/golang/glog" @@ -55,6 +57,7 @@ var _ volume.DeletableVolumePlugin = &rbdPlugin{} var _ volume.ProvisionableVolumePlugin = &rbdPlugin{} var _ volume.AttachableVolumePlugin = &rbdPlugin{} var _ volume.ExpandableVolumePlugin = &rbdPlugin{} +var _ volume.BlockVolumePlugin = &rbdPlugin{} const ( rbdPluginName = "kubernetes.io/rbd" @@ -368,6 +371,127 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol return volume.NewSpecFromVolume(rbdVolume), nil } +func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + pluginDir := plugin.host.GetVolumeDevicePluginDir(rbdPluginName) + blkutil := volutil.NewBlockVolumePathHandler() + + globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + return nil, err + } + glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + globalMapPath := filepath.Dir(globalMapPathUUID) + if len(globalMapPath) == 1 { + return nil, fmt.Errorf("failed to retreive volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) + } + return getVolumeSpecFromGlobalMapPath(globalMapPath) +} + +func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { + // Retreive volume spec information from globalMapPath + // globalMapPath example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath} + pool, image, err := getPoolAndImageFromMapPath(globalMapPath) + if err != nil { + return nil, err + } + block := v1.PersistentVolumeBlock + rbdVolume := &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + RBD: &v1.RBDPersistentVolumeSource{ + RBDImage: image, + RBDPool: pool, + }, + }, + VolumeMode: &block, + }, + } + + return volume.NewSpecFromPersistentVolume(rbdVolume, true), nil +} + +func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + + var uid types.UID + if pod != nil { + uid = pod.UID + } + secret := "" + // var err error + if pod != nil { + secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace) + if err != nil { + return nil, err + } + if len(secretName) > 0 && len(secretNs) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) + return nil, err + } + for _, data := range secrets.Data { + secret = string(data) + } + } + } + + return plugin.newBlockVolumeMapperInternal(spec, uid, &RBDUtil{}, secret, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) +} + +func (plugin *rbdPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) { + mon, err := getVolumeSourceMonitors(spec) + if err != nil { + return nil, err + } + img, err := getVolumeSourceImage(spec) + if err != nil { + return nil, err + } + pool, err := getVolumeSourcePool(spec) + if err != nil { + return nil, err + } + id, err := getVolumeSourceUser(spec) + if err != nil { + return nil, err + } + keyring, err := getVolumeSourceKeyRing(spec) + if err != nil { + return nil, err + } + ro, err := getVolumeSourceReadOnly(spec) + if err != nil { + return nil, err + } + + return &rbdDiskMapper{ + rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager), + mon: mon, + id: id, + keyring: keyring, + secret: secret, + }, nil +} + +func (plugin *rbdPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, &RBDUtil{}) +} + +func (plugin *rbdPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) { + return &rbdDiskUnmapper{ + rbdDiskMapper: &rbdDiskMapper{ + rbd: newRBD(podUID, volName, "", "", false, plugin, manager), + mon: make([]string, 0), + }, + }, nil +} + func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") @@ -661,6 +785,134 @@ func (c *rbdUnmounter) TearDownAt(dir string) error { return nil } +var _ volume.BlockVolumeMapper = &rbdDiskMapper{} + +type rbdDiskMapper struct { + *rbd + mon []string + id string + keyring string + secret string + adminSecret string + adminId string + imageFormat string + imageFeatures []string +} + +var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{rbd pool}-image-{rbd image-name}/{podUid} +func (rbd *rbd) GetGlobalMapPath(spec *volume.Spec) (string, error) { + return rbd.rbdGlobalMapPath(spec) +} + +// GetPodDeviceMapPath returns pod device map path and volume name +// path: pods/{podUid}/volumeDevices/kubernetes.io~rbd +// volumeName: pv0001 +func (rbd *rbd) GetPodDeviceMapPath() (string, string) { + return rbd.rbdPodDeviceMapPath() +} + +func (rbd *rbdDiskMapper) SetUpDevice() (string, error) { + return "", nil +} + +func (rbd *rbd) rbdGlobalMapPath(spec *volume.Spec) (string, error) { + var err error + mon, err := getVolumeSourceMonitors(spec) + if err != nil { + return "", err + } + img, err := getVolumeSourceImage(spec) + if err != nil { + return "", err + } + pool, err := getVolumeSourcePool(spec) + if err != nil { + return "", err + } + ro, err := getVolumeSourceReadOnly(spec) + if err != nil { + return "", err + } + + mounter := &rbdMounter{ + rbd: newRBD("", spec.Name(), img, pool, ro, rbd.plugin, &RBDUtil{}), + Mon: mon, + } + return rbd.manager.MakeGlobalVDPDName(*mounter.rbd), nil +} + +func (rbd *rbd) rbdPodDeviceMapPath() (string, string) { + name := rbdPluginName + return rbd.plugin.host.GetPodVolumeDeviceDir(rbd.podUID, strings.EscapeQualifiedNameForDisk(name)), rbd.volName +} + +type rbdDiskUnmapper struct { + *rbdDiskMapper +} + +func getPoolAndImageFromMapPath(mapPath string) (string, string, error) { + + pathParts := dstrings.Split(mapPath, "/") + if len(pathParts) < 2 { + return "", "", fmt.Errorf("corrupted mapPath") + } + rbdParts := dstrings.Split(pathParts[len(pathParts)-1], "-image-") + + if len(rbdParts) < 2 { + return "", "", fmt.Errorf("corrupted mapPath") + } + return string(rbdParts[0]), string(rbdParts[1]), nil +} + +func getBlockVolumeDevice(mapPath string) (string, error) { + pool, image, err := getPoolAndImageFromMapPath(mapPath) + if err != nil { + return "", err + } + // Getting full device path + device, found := getDevFromImageAndPool(pool, image) + if !found { + return "", err + } + return device, nil +} + +func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error { + + device, err := getBlockVolumeDevice(mapPath) + if err != nil { + return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err) + } + blkUtil := volutil.NewBlockVolumePathHandler() + loop, err := volutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, device) + if err != nil { + return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err) + } + // Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback. + err = volutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + if err != nil { + return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err) + } + glog.V(4).Infof("rbd: successfully removed loop device: %s", loop) + + err = rbd.manager.DetachBlockDisk(*rbd, mapPath) + if err != nil { + return fmt.Errorf("rbd: failed to detach disk: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath) + + err = os.RemoveAll(mapPath) + if err != nil { + return fmt.Errorf("rbd: failed to delete the directory: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("rbd: successfully detached disk: %s", mapPath) + + return nil +} + func getVolumeSourceMonitors(spec *volume.Spec) ([]string, error) { if spec.Volume != nil && spec.Volume.RBD != nil { return spec.Volume.RBD.CephMonitors, nil diff --git a/pkg/volume/rbd/rbd_test.go b/pkg/volume/rbd/rbd_test.go index d16ce140992..23b9b969f96 100644 --- a/pkg/volume/rbd/rbd_test.go +++ b/pkg/volume/rbd/rbd_test.go @@ -81,10 +81,14 @@ func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string { return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) } +func (fake *fakeDiskManager) MakeGlobalVDPDName(rbd rbd) string { + return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) +} + func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) { fake.mutex.Lock() defer fake.mutex.Unlock() - fake.rbdMapIndex += 1 + fake.rbdMapIndex++ devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex) fake.rbdDevices[devicePath] = true return devicePath, nil @@ -101,6 +105,17 @@ func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, de return nil } +func (fake *fakeDiskManager) DetachBlockDisk(r rbdDiskUnmapper, device string) error { + fake.mutex.Lock() + defer fake.mutex.Unlock() + ok := fake.rbdDevices[device] + if !ok { + return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device) + } + delete(fake.rbdDevices, device) + return nil +} + func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) { return nil, 0, fmt.Errorf("not implemented") } diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 915e0e2e496..4f6d9e0121f 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -35,6 +35,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/wait" fileutil "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" @@ -46,6 +47,11 @@ const ( imageSizeStr = "size " sizeDivStr = " MB in" kubeLockMagic = "kubelet_lock_magic_" + // The following three values are used for 30 seconds timeout + // while waiting for RBD Watcher to expire. + rbdImageWatcherInitDelay = 1 * time.Second + rbdImageWatcherFactor = 1.4 + rbdImageWatcherSteps = 10 ) // search /sys/bus for rbd device that matches given pool and image @@ -109,6 +115,11 @@ func makePDNameInternal(host volume.VolumeHost, pool string, image string) strin return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) } +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image +func makeVDPDNameInternal(host volume.VolumeHost, pool string, image string) string { + return path.Join(host.GetVolumeDevicePluginDir(rbdPluginName), pool+"-image-"+image) +} + // RBDUtil implements diskManager interface. type RBDUtil struct{} @@ -118,6 +129,10 @@ func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string { return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) } +func (util *RBDUtil) MakeGlobalVDPDName(rbd rbd) string { + return makeVDPDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) +} + func rbdErrors(runErr, resultErr error) error { if err, ok := runErr.(*exec.Error); ok { if err.Err == exec.ErrNotFound { @@ -217,13 +232,27 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { // Currently, we don't acquire advisory lock on image, but for backward // compatibility, we need to check if the image is being used by nodes running old kubelet. - found, rbdOutput, err := util.rbdStatus(&b) - if err != nil { - return "", fmt.Errorf("error: %v, rbd output: %v", err, rbdOutput) + // osd_client_watch_timeout defaults to 30 seconds, if the watcher stays active longer than 30 seconds, + // rbd image does not get mounted and failure message gets generated. + backoff := wait.Backoff{ + Duration: rbdImageWatcherInitDelay, + Factor: rbdImageWatcherFactor, + Steps: rbdImageWatcherSteps, } - if found { - glog.Infof("rbd image %s/%s is still being used ", b.Pool, b.Image) - return "", fmt.Errorf("rbd image %s/%s is still being used. rbd output: %s", b.Pool, b.Image, rbdOutput) + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + used, rbdOutput, err := util.rbdStatus(&b) + if err != nil { + return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) + } + return !used, nil + }) + // return error if rbd image has not become available for the specified timeout + if err == wait.ErrWaitTimeout { + return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image) + } + // return error if any other errors were encountered during wating for the image to becme avialble + if err != nil { + return "", err } mon := util.kernelRBDMonitorsOpt(b.Mon) @@ -281,6 +310,35 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic return nil } +// DetachBlockDisk detaches the disk from the node. +func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error { + + if pathExists, pathErr := volutil.PathExists(mapPath); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + return nil + } + // If we arrive here, device is no longer used, see if need to logout the target + device, err := getBlockVolumeDevice(mapPath) + if err != nil { + return err + } + + if len(device) == 0 { + return fmt.Errorf("DetachDisk failed , device is empty") + } + // rbd unmap + exec := disk.plugin.host.GetExec(disk.plugin.GetPluginName()) + output, err := exec.Run("rbd", "unmap", device) + if err != nil { + return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output))) + } + glog.V(3).Infof("rbd: successfully unmap device %s", device) + + return nil +} + // cleanOldRBDFile read rbd info from rbd.json file and removes lock if found. // At last, it removes rbd.json file. func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { From a3af8e1236b5d57c6dd5fd8329c0304a533eaa97 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Thu, 4 Jan 2018 17:37:56 +0800 Subject: [PATCH 631/794] remove useless service watch in APIServiceRegistrationController --- .../pkg/apiserver/apiserver.go | 2 +- .../pkg/apiserver/apiservice_controller.go | 69 +------------------ 2 files changed, 3 insertions(+), 68 deletions(-) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index f9188609ea8..2c805bc1f12 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -201,7 +201,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg s.GenericAPIServer.Handler.NonGoRestfulMux.Handle("/apis", apisHandler) s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandle("/apis/", apisHandler) - apiserviceRegistrationController := NewAPIServiceRegistrationController(informerFactory.Apiregistration().InternalVersion().APIServices(), c.GenericConfig.SharedInformerFactory.Core().V1().Services(), s) + apiserviceRegistrationController := NewAPIServiceRegistrationController(informerFactory.Apiregistration().InternalVersion().APIServices(), s) availableController := statuscontrollers.NewAvailableConditionController( informerFactory.Apiregistration().InternalVersion().APIServices(), c.GenericConfig.SharedInformerFactory.Core().V1().Services(), diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go index 2ed0a4b5294..25bfca9af6a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go @@ -22,13 +22,9 @@ import ( "github.com/golang/glog" - "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - v1informers "k8s.io/client-go/informers/core/v1" - v1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -49,23 +45,17 @@ type APIServiceRegistrationController struct { apiServiceLister listers.APIServiceLister apiServiceSynced cache.InformerSynced - // serviceLister is used to get the IP to create the transport for - serviceLister v1listers.ServiceLister - servicesSynced cache.InformerSynced - // To allow injection for testing. syncFn func(key string) error queue workqueue.RateLimitingInterface } -func NewAPIServiceRegistrationController(apiServiceInformer informers.APIServiceInformer, serviceInformer v1informers.ServiceInformer, apiHandlerManager APIHandlerManager) *APIServiceRegistrationController { +func NewAPIServiceRegistrationController(apiServiceInformer informers.APIServiceInformer, apiHandlerManager APIHandlerManager) *APIServiceRegistrationController { c := &APIServiceRegistrationController{ apiHandlerManager: apiHandlerManager, apiServiceLister: apiServiceInformer.Lister(), apiServiceSynced: apiServiceInformer.Informer().HasSynced, - serviceLister: serviceInformer.Lister(), - servicesSynced: serviceInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "APIServiceRegistrationController"), } @@ -75,12 +65,6 @@ func NewAPIServiceRegistrationController(apiServiceInformer informers.APIService DeleteFunc: c.deleteAPIService, }) - serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.addService, - UpdateFunc: c.updateService, - DeleteFunc: c.deleteService, - }) - c.syncFn = c.sync return c @@ -112,7 +96,7 @@ func (c *APIServiceRegistrationController) Run(stopCh <-chan struct{}) { glog.Infof("Starting APIServiceRegistrationController") defer glog.Infof("Shutting down APIServiceRegistrationController") - if !controllers.WaitForCacheSync("APIServiceRegistrationController", stopCh, c.apiServiceSynced, c.servicesSynced) { + if !controllers.WaitForCacheSync("APIServiceRegistrationController", stopCh, c.apiServiceSynced) { return } @@ -187,52 +171,3 @@ func (c *APIServiceRegistrationController) deleteAPIService(obj interface{}) { glog.V(4).Infof("Deleting %q", castObj.Name) c.enqueue(castObj) } - -// there aren't very many apiservices, just check them all. -func (c *APIServiceRegistrationController) getAPIServicesFor(service *v1.Service) []*apiregistration.APIService { - var ret []*apiregistration.APIService - apiServiceList, _ := c.apiServiceLister.List(labels.Everything()) - for _, apiService := range apiServiceList { - if apiService.Spec.Service == nil { - continue - } - if apiService.Spec.Service.Namespace == service.Namespace && apiService.Spec.Service.Name == service.Name { - ret = append(ret, apiService) - } - } - - return ret -} - -// TODO, think of a way to avoid checking on every service manipulation - -func (c *APIServiceRegistrationController) addService(obj interface{}) { - for _, apiService := range c.getAPIServicesFor(obj.(*v1.Service)) { - c.enqueue(apiService) - } -} - -func (c *APIServiceRegistrationController) updateService(obj, _ interface{}) { - for _, apiService := range c.getAPIServicesFor(obj.(*v1.Service)) { - c.enqueue(apiService) - } -} - -func (c *APIServiceRegistrationController) deleteService(obj interface{}) { - castObj, ok := obj.(*v1.Service) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) - return - } - castObj, ok = tombstone.Obj.(*v1.Service) - if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) - return - } - } - for _, apiService := range c.getAPIServicesFor(castObj) { - c.enqueue(apiService) - } -} From 2c438a8f21aead73e65d4b84103676d4b61f2a20 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 5 Jan 2018 10:36:40 +0800 Subject: [PATCH 632/794] update bazel --- staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD | 2 -- 1 file changed, 2 deletions(-) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD index 41f11c57c3b..c79678ff446 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -41,7 +41,6 @@ go_library( importpath = "k8s.io/kube-aggregator/pkg/apiserver", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", @@ -64,7 +63,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/proxy:go_default_library", - "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/pkg/version:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", From b12c3b86604d6d87d221db14f9cc013d5753689c Mon Sep 17 00:00:00 2001 From: zhangxiaoyu-zidif Date: Sat, 6 Jan 2018 16:16:37 +0800 Subject: [PATCH 633/794] use sets.String to replace slice when sort []string --- pkg/controller/service/BUILD | 1 + pkg/controller/service/service_controller.go | 30 ++++---------------- 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index 459108f05ac..3281122a1d8 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -23,6 +23,7 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index 3496f0681ca..e07d9f44c0c 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -18,7 +18,6 @@ package service import ( "fmt" - "sort" "sync" "time" @@ -28,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" @@ -567,10 +567,10 @@ func portEqualForLB(x, y *v1.ServicePort) bool { return true } -func nodeNames(nodes []*v1.Node) []string { - ret := make([]string, len(nodes)) - for i, node := range nodes { - ret[i] = node.Name +func nodeNames(nodes []*v1.Node) sets.String { + ret := sets.NewString() + for _, node := range nodes { + ret.Insert(node.Name) } return ret } @@ -579,25 +579,7 @@ func nodeSlicesEqualForLB(x, y []*v1.Node) bool { if len(x) != len(y) { return false } - return stringSlicesEqual(nodeNames(x), nodeNames(y)) -} - -func stringSlicesEqual(x, y []string) bool { - if len(x) != len(y) { - return false - } - if !sort.StringsAreSorted(x) { - sort.Strings(x) - } - if !sort.StringsAreSorted(y) { - sort.Strings(y) - } - for i := range x { - if x[i] != y[i] { - return false - } - } - return true + return nodeNames(x).Equal(nodeNames(x)) } func getNodeConditionPredicate() corelisters.NodeConditionPredicate { From 5a165b03878d2712731b6f7b989583ca54d00414 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Fri, 5 Jan 2018 09:05:54 -0500 Subject: [PATCH 634/794] Add test coverage for metrics/utilization.go Currently, there is no test coverage for this code. Since it does fairly important calculations, test coverage seems helpful. --- pkg/controller/podautoscaler/metrics/BUILD | 1 + .../podautoscaler/metrics/utilization_test.go | 149 ++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 pkg/controller/podautoscaler/metrics/utilization_test.go diff --git a/pkg/controller/podautoscaler/metrics/BUILD b/pkg/controller/podautoscaler/metrics/BUILD index 8592a565082..c9c11fe249f 100644 --- a/pkg/controller/podautoscaler/metrics/BUILD +++ b/pkg/controller/podautoscaler/metrics/BUILD @@ -37,6 +37,7 @@ go_test( srcs = [ "legacy_metrics_client_test.go", "rest_metrics_client_test.go", + "utilization_test.go", ], embed = [":go_default_library"], importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", diff --git a/pkg/controller/podautoscaler/metrics/utilization_test.go b/pkg/controller/podautoscaler/metrics/utilization_test.go new file mode 100644 index 00000000000..35e7df6eda4 --- /dev/null +++ b/pkg/controller/podautoscaler/metrics/utilization_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +type resourceUtilizationRatioTestCase struct { + metrics PodMetricsInfo + requests map[string]int64 + targetUtilization int32 + + expectedUtilizationRatio float64 + expectedCurrentUtilization int32 + expectedRawAverageValue int64 + expectedErr error +} + +func (tc *resourceUtilizationRatioTestCase) runTest(t *testing.T) { + actualUtilizationRatio, actualCurrentUtilization, actualRawAverageValue, actualErr := GetResourceUtilizationRatio(tc.metrics, tc.requests, tc.targetUtilization) + + if tc.expectedErr != nil { + assert.Error(t, actualErr, "there should be an error getting the utilization ratio") + assert.Contains(t, fmt.Sprintf("%v", actualErr), fmt.Sprintf("%v", tc.expectedErr), "the error message should be as expected") + return + } + + assert.NoError(t, actualErr, "there should be no error retrieving the utilization ratio") + assert.Equal(t, tc.expectedUtilizationRatio, actualUtilizationRatio, "the utilization ratios should be as expected") + assert.Equal(t, tc.expectedCurrentUtilization, actualCurrentUtilization, "the current utilization should be as expected") + assert.Equal(t, tc.expectedRawAverageValue, actualRawAverageValue, "the raw average value should be as expected") +} + +type metricUtilizationRatioTestCase struct { + metrics PodMetricsInfo + targetUtilization int64 + + expectedUtilizationRatio float64 + expectedCurrentUtilization int64 +} + +func (tc *metricUtilizationRatioTestCase) runTest(t *testing.T) { + actualUtilizationRatio, actualCurrentUtilization := GetMetricUtilizationRatio(tc.metrics, tc.targetUtilization) + + assert.Equal(t, tc.expectedUtilizationRatio, actualUtilizationRatio, "the utilization ratios should be as expected") + assert.Equal(t, tc.expectedCurrentUtilization, actualCurrentUtilization, "the current utilization should be as expected") +} + +func TestGetResourceUtilizationRatioBaseCase(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, "test-pod-no-request": 100, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{ + "test-pod-0": 100, "test-pod-1": 100, "test-pod-extra-request": 500, + }, + targetUtilization: 50, + expectedUtilizationRatio: 1.26, + expectedCurrentUtilization: 63, + expectedRawAverageValue: 63, + expectedErr: nil, + } + + tc.runTest(t) +} + +func TestGetResourceUtilizationRatioNoRequests(t *testing.T) { + tc := resourceUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 50, "test-pod-1": 76, + }, + requests: map[string]int64{}, + targetUtilization: 50, + + expectedUtilizationRatio: 0, + expectedCurrentUtilization: 0, + expectedRawAverageValue: 0, + expectedErr: fmt.Errorf("no metrics returned matched known pods"), + } + + tc.runTest(t) +} + +func TestGetMetricUtilizationRatioBaseCase(t *testing.T) { + tc := metricUtilizationRatioTestCase{ + metrics: PodMetricsInfo{ + "test-pod-0": 5000, "test-pod-1": 10000, + }, + targetUtilization: 10000, + expectedUtilizationRatio: .75, + expectedCurrentUtilization: 7500, + } + + tc.runTest(t) +} From 792a2299362ebadc9ca68c72347884330db23b2a Mon Sep 17 00:00:00 2001 From: Di Xu Date: Mon, 4 Dec 2017 14:39:05 +0800 Subject: [PATCH 635/794] forbid unnamed context --- pkg/kubectl/cmd/config/use_context.go | 2 +- .../pkg/util/webhook/webhook_test.go | 13 ++++++--- .../client-go/tools/clientcmd/validation.go | 4 +++ .../tools/clientcmd/validation_test.go | 29 +++++++++++++++++++ 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/pkg/kubectl/cmd/config/use_context.go b/pkg/kubectl/cmd/config/use_context.go index 38f77eea2c3..c90165aff44 100644 --- a/pkg/kubectl/cmd/config/use_context.go +++ b/pkg/kubectl/cmd/config/use_context.go @@ -88,7 +88,7 @@ func (o *useContextOptions) complete(cmd *cobra.Command) error { func (o useContextOptions) validate(config *clientcmdapi.Config) error { if len(o.contextName) == 0 { - return errors.New("you must specify a current-context") + return errors.New("empty context names are not allowed") } for name := range config.Contexts { diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go index f47e088beeb..3bda51bb894 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go @@ -114,15 +114,15 @@ func TestKubeConfigFile(t *testing.T) { errRegex: errNoConfiguration, }, { - test: "missing context (specified context is missing)", - cluster: &namedCluster, - currentContext: "missing-context", - errRegex: errNoConfiguration, + test: "missing context (specified context is missing)", + cluster: &namedCluster, + errRegex: errNoConfiguration, }, { test: "context without cluster", context: &v1.NamedContext{ Context: v1.Context{}, + Name: "testing-context", }, currentContext: "testing-context", errRegex: errNoConfiguration, @@ -134,6 +134,7 @@ func TestKubeConfigFile(t *testing.T) { Context: v1.Context{ Cluster: namedCluster.Name, }, + Name: "testing-context", }, currentContext: "testing-context", errRegex: "", // Not an error at parse time, only when using the webhook @@ -145,6 +146,7 @@ func TestKubeConfigFile(t *testing.T) { Context: v1.Context{ Cluster: "missing-cluster", }, + Name: "fake", }, errRegex: errNoConfiguration, }, @@ -156,6 +158,7 @@ func TestKubeConfigFile(t *testing.T) { Cluster: namedCluster.Name, AuthInfo: "missing-user", }, + Name: "testing-context", }, currentContext: "testing-context", errRegex: "", // Not an error at parse time, only when using the webhook @@ -267,6 +270,8 @@ func TestKubeConfigFile(t *testing.T) { kubeConfig.AuthInfos = []v1.NamedAuthInfo{*tt.user} } + kubeConfig.CurrentContext = tt.currentContext + kubeConfigFile, err := newKubeConfigFile(kubeConfig) if err == nil { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/validation.go b/staging/src/k8s.io/client-go/tools/clientcmd/validation.go index 2bae0c395d2..4c7b15b78c2 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/validation.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/validation.go @@ -253,6 +253,10 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { validationErrors := make([]error, 0) + if len(contextName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) + } + if len(context.AuthInfo) == 0 { validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/validation_test.go b/staging/src/k8s.io/client-go/tools/clientcmd/validation_test.go index 6441f148376..fcf86ab0a7f 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/validation_test.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/validation_test.go @@ -62,6 +62,7 @@ func TestConfirmUsableBadInfoButOkConfig(t *testing.T) { okTest.testConfirmUsable("clean", t) badValidation.testConfig(t) } + func TestConfirmUsableBadInfoConfig(t *testing.T) { config := clientcmdapi.NewConfig() config.Clusters["missing ca"] = &clientcmdapi.Cluster{ @@ -83,6 +84,7 @@ func TestConfirmUsableBadInfoConfig(t *testing.T) { test.testConfirmUsable("first", t) } + func TestConfirmUsableEmptyConfig(t *testing.T) { config := clientcmdapi.NewConfig() test := configValidationTest{ @@ -92,6 +94,7 @@ func TestConfirmUsableEmptyConfig(t *testing.T) { test.testConfirmUsable("", t) } + func TestConfirmUsableMissingConfig(t *testing.T) { config := clientcmdapi.NewConfig() test := configValidationTest{ @@ -101,6 +104,7 @@ func TestConfirmUsableMissingConfig(t *testing.T) { test.testConfirmUsable("not-here", t) } + func TestValidateEmptyConfig(t *testing.T) { config := clientcmdapi.NewConfig() test := configValidationTest{ @@ -110,6 +114,7 @@ func TestValidateEmptyConfig(t *testing.T) { test.testConfig(t) } + func TestValidateMissingCurrentContextConfig(t *testing.T) { config := clientcmdapi.NewConfig() config.CurrentContext = "anything" @@ -120,6 +125,7 @@ func TestValidateMissingCurrentContextConfig(t *testing.T) { test.testConfig(t) } + func TestIsContextNotFound(t *testing.T) { config := clientcmdapi.NewConfig() config.CurrentContext = "anything" @@ -172,6 +178,7 @@ func TestValidateMissingReferencesConfig(t *testing.T) { test.testContext("anything", t) test.testConfig(t) } + func TestValidateEmptyContext(t *testing.T) { config := clientcmdapi.NewConfig() config.CurrentContext = "anything" @@ -185,6 +192,19 @@ func TestValidateEmptyContext(t *testing.T) { test.testConfig(t) } +func TestValidateEmptyContextName(t *testing.T) { + config := clientcmdapi.NewConfig() + config.CurrentContext = "anything" + config.Contexts[""] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"} + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"empty context name", "is not allowed"}, + } + + test.testContext("", t) + test.testConfig(t) +} + func TestValidateEmptyClusterInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.Clusters["empty"] = clientcmdapi.NewCluster() @@ -223,6 +243,7 @@ func TestValidateMissingCAFileClusterInfo(t *testing.T) { test.testCluster("missing ca", t) test.testConfig(t) } + func TestValidateCleanClusterInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.Clusters["clean"] = &clientcmdapi.Cluster{ @@ -235,6 +256,7 @@ func TestValidateCleanClusterInfo(t *testing.T) { test.testCluster("clean", t) test.testConfig(t) } + func TestValidateCleanWithCAClusterInfo(t *testing.T) { tempFile, _ := ioutil.TempFile("", "") defer os.Remove(tempFile.Name()) @@ -262,6 +284,7 @@ func TestValidateEmptyAuthInfo(t *testing.T) { test.testAuthInfo("error", t) test.testConfig(t) } + func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ @@ -276,6 +299,7 @@ func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { test.testAuthInfo("error", t) test.testConfig(t) } + func TestValidateCertDataOverridesFiles(t *testing.T) { tempFile, _ := ioutil.TempFile("", "") defer os.Remove(tempFile.Name()) @@ -295,6 +319,7 @@ func TestValidateCertDataOverridesFiles(t *testing.T) { test.testAuthInfo("clean", t) test.testConfig(t) } + func TestValidateCleanCertFilesAuthInfo(t *testing.T) { tempFile, _ := ioutil.TempFile("", "") defer os.Remove(tempFile.Name()) @@ -311,6 +336,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) { test.testAuthInfo("clean", t) test.testConfig(t) } + func TestValidateCleanTokenAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ @@ -363,6 +389,7 @@ func (c configValidationTest) testContext(contextName string, t *testing.T) { } } } + func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T) { err := ConfirmUsable(*c.config, contextName) @@ -382,6 +409,7 @@ func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T } } } + func (c configValidationTest) testConfig(t *testing.T) { err := Validate(*c.config) @@ -404,6 +432,7 @@ func (c configValidationTest) testConfig(t *testing.T) { } } } + func (c configValidationTest) testCluster(clusterName string, t *testing.T) { errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName]) From 5e9704572fe28650d15a40de60859a931c0d4f7e Mon Sep 17 00:00:00 2001 From: "M.B.S. Sai Akhil" Date: Sun, 7 Jan 2018 08:45:11 +0530 Subject: [PATCH 636/794] Fix Typo in apiserver README --- staging/src/k8s.io/apiserver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/README.md b/staging/src/k8s.io/apiserver/README.md index 96927ae703c..130ba87ded6 100644 --- a/staging/src/k8s.io/apiserver/README.md +++ b/staging/src/k8s.io/apiserver/README.md @@ -25,6 +25,6 @@ Code changes are made in that location, merged into `k8s.io/kubernetes` and late ## Things you should *NOT* do - 1. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kuberenetes/staging/src/k8s.io/apiserver`. + 1. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/apiserver`. 2. Expect compatibility. This repo is changing quickly in direct support of Kubernetes and the API isn't yet stable enough for API guarantees. From 0e6ac1df76f30375c0ef81b58c4f1e575efd7957 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Mon, 1 Jan 2018 21:35:13 +0800 Subject: [PATCH 637/794] fix populateDesiredStateOfWorld bug for attach/detach controller --- pkg/controller/volume/attachdetach/attach_detach_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 3c01e6b05a4..e7576ea0c3a 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -335,7 +335,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { } for _, pod := range pods { podToAdd := pod - adc.podAdd(&podToAdd) + adc.podAdd(podToAdd) for _, podVolume := range podToAdd.Spec.Volumes { // The volume specs present in the ActualStateOfWorld are nil, let's replace those // with the correct ones found on pods. The present in the ASW with no corresponding From ef852650256e698a0298554452349122add35bb5 Mon Sep 17 00:00:00 2001 From: weekface Date: Sun, 7 Jan 2018 17:49:21 +0800 Subject: [PATCH 638/794] tiny fix --- staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index ae1e12dc75e..919810c8cb2 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -56,7 +56,7 @@ func NewWardleServerOptions(out, errOut io.Writer) *WardleServerOptions { return o } -// NewCommandStartMaster provides a CLI handler for 'start master' command +// NewCommandStartWardleServer provides a CLI handler for 'start master' command func NewCommandStartWardleServer(out, errOut io.Writer, stopCh <-chan struct{}) *cobra.Command { o := NewWardleServerOptions(out, errOut) From e31bab4640265dbec63c38f312dade3476593156 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 12 Dec 2017 16:20:42 -0800 Subject: [PATCH 639/794] Version bump to etcd v3.2.13 --- Godeps/Godeps.json | 341 +- Godeps/LICENSES | 4148 ++++++++++++++++- vendor/BUILD | 10 +- vendor/github.com/boltdb/bolt/Makefile | 18 - vendor/github.com/cockroachdb/cmux/.gitignore | 24 + .../github.com/cockroachdb/cmux/.travis.yml | 22 + vendor/github.com/cockroachdb/cmux/BUILD | 31 + vendor/github.com/cockroachdb/cmux/LICENSE | 202 + vendor/github.com/cockroachdb/cmux/README.md | 65 + vendor/github.com/cockroachdb/cmux/buffer.go | 35 + vendor/github.com/cockroachdb/cmux/cmux.go | 210 + .../github.com/cockroachdb/cmux/matchers.go | 150 + .../github.com/cockroachdb/cmux/patricia.go | 173 + .../{boltdb/bolt => coreos/bbolt}/.gitignore | 1 + .../{boltdb/bolt => coreos/bbolt}/BUILD | 14 +- .../{boltdb/bolt => coreos/bbolt}/LICENSE | 0 vendor/github.com/coreos/bbolt/Makefile | 30 + .../{boltdb/bolt => coreos/bbolt}/README.md | 100 +- .../bolt => coreos/bbolt}/appveyor.yml | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_386.go | 3 + .../bolt => coreos/bbolt}/bolt_amd64.go | 3 + vendor/github.com/coreos/bbolt/bolt_arm.go | 28 + .../bolt => coreos/bbolt}/bolt_arm64.go | 3 + .../bolt => coreos/bbolt}/bolt_linux.go | 0 .../github.com/coreos/bbolt/bolt_mips64x.go | 12 + .../bbolt/bolt_mipsx.go} | 7 +- .../bolt => coreos/bbolt}/bolt_openbsd.go | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_ppc.go | 0 .../bolt => coreos/bbolt}/bolt_ppc64.go | 3 + .../bolt => coreos/bbolt}/bolt_ppc64le.go | 3 + .../bolt => coreos/bbolt}/bolt_s390x.go | 3 + .../bolt => coreos/bbolt}/bolt_unix.go | 33 +- .../bbolt}/bolt_unix_solaris.go | 39 +- .../bolt => coreos/bbolt}/bolt_windows.go | 33 +- .../bolt => coreos/bbolt}/boltsync_unix.go | 0 .../{boltdb/bolt => coreos/bbolt}/bucket.go | 49 +- .../{boltdb/bolt => coreos/bbolt}/cursor.go | 0 .../{boltdb/bolt => coreos/bbolt}/db.go | 211 +- .../{boltdb/bolt => coreos/bbolt}/doc.go | 0 .../{boltdb/bolt => coreos/bbolt}/errors.go | 0 .../{boltdb/bolt => coreos/bbolt}/freelist.go | 167 +- .../{boltdb/bolt => coreos/bbolt}/node.go | 2 +- .../{boltdb/bolt => coreos/bbolt}/page.go | 31 +- .../{boltdb/bolt => coreos/bbolt}/tx.go | 83 +- vendor/github.com/coreos/etcd/auth/BUILD | 5 + .../coreos/etcd/auth/authpb/auth.pb.go | 2 +- vendor/github.com/coreos/etcd/auth/jwt.go | 137 + .../coreos/etcd/auth/range_perm_cache.go | 180 +- .../coreos/etcd/auth/simple_token.go | 129 +- vendor/github.com/coreos/etcd/auth/store.go | 260 +- vendor/github.com/coreos/etcd/client/BUILD | 3 +- .../github.com/coreos/etcd/client/client.go | 52 +- .../github.com/coreos/etcd/client/discover.go | 19 + vendor/github.com/coreos/etcd/client/srv.go | 65 - vendor/github.com/coreos/etcd/clientv3/BUILD | 17 +- .../github.com/coreos/etcd/clientv3/README.md | 10 +- .../github.com/coreos/etcd/clientv3/auth.go | 63 +- .../coreos/etcd/clientv3/balancer.go | 239 - .../github.com/coreos/etcd/clientv3/client.go | 249 +- .../coreos/etcd/clientv3/cluster.go | 62 +- .../coreos/etcd/clientv3/compact_op.go | 6 +- .../coreos/etcd/clientv3/compare.go | 27 + .../coreos/etcd/clientv3/concurrency/BUILD | 35 + .../coreos/etcd/clientv3/concurrency/doc.go | 17 + .../etcd/clientv3/concurrency/election.go | 246 + .../coreos/etcd/clientv3/concurrency/key.go | 66 + .../coreos/etcd/clientv3/concurrency/mutex.go | 119 + .../etcd/clientv3/concurrency/session.go | 142 + .../coreos/etcd/clientv3/concurrency/stm.go | 388 ++ .../github.com/coreos/etcd/clientv3/config.go | 120 +- vendor/github.com/coreos/etcd/clientv3/doc.go | 2 +- .../coreos/etcd/clientv3/grpc_options.go | 46 + .../coreos/etcd/clientv3/health_balancer.go | 627 +++ vendor/github.com/coreos/etcd/clientv3/kv.go | 81 +- .../github.com/coreos/etcd/clientv3/lease.go | 261 +- .../github.com/coreos/etcd/clientv3/logger.go | 34 +- .../coreos/etcd/clientv3/maintenance.go | 69 +- .../coreos/etcd/clientv3/namespace/BUILD | 34 + .../coreos/etcd/clientv3/namespace/doc.go | 43 + .../coreos/etcd/clientv3/namespace/kv.go | 189 + .../coreos/etcd/clientv3/namespace/lease.go | 58 + .../coreos/etcd/clientv3/namespace/util.go | 42 + .../coreos/etcd/clientv3/namespace/watch.go | 84 + .../coreos/etcd/clientv3/naming/BUILD | 32 + .../coreos/etcd/clientv3/naming/doc.go | 56 + .../coreos/etcd/clientv3/naming/grpc.go | 132 + vendor/github.com/coreos/etcd/clientv3/op.go | 133 +- .../coreos/etcd/clientv3/ready_wait.go | 30 + .../github.com/coreos/etcd/clientv3/retry.go | 376 +- vendor/github.com/coreos/etcd/clientv3/txn.go | 31 +- .../github.com/coreos/etcd/clientv3/watch.go | 95 +- .../coreos/etcd/compactor/compactor.go | 20 +- vendor/github.com/coreos/etcd/discovery/BUILD | 5 +- .../github.com/coreos/etcd/discovery/srv.go | 104 - vendor/github.com/coreos/etcd/embed/BUILD | 60 + vendor/github.com/coreos/etcd/embed/config.go | 464 ++ vendor/github.com/coreos/etcd/embed/doc.go | 45 + vendor/github.com/coreos/etcd/embed/etcd.go | 509 ++ vendor/github.com/coreos/etcd/embed/serve.go | 244 + vendor/github.com/coreos/etcd/embed/util.go | 30 + vendor/github.com/coreos/etcd/error/error.go | 5 +- .../github.com/coreos/etcd/etcdserver/BUILD | 2 +- .../coreos/etcd/etcdserver/api/BUILD | 4 + .../coreos/etcd/etcdserver/api/capability.go | 8 +- .../coreos/etcd/etcdserver/api/etcdhttp/BUILD | 40 + .../etcd/etcdserver/api/etcdhttp/base.go | 186 + .../api/{v2http => etcdhttp}/peer.go | 4 +- .../coreos/etcd/etcdserver/api/v2http/BUILD | 6 +- .../etcd/etcdserver/api/v2http/client.go | 146 +- .../coreos/etcd/etcdserver/api/v2http/http.go | 30 +- .../coreos/etcd/etcdserver/api/v3client/BUILD | 32 + .../etcd/etcdserver/api/v3client/doc.go | 45 + .../etcd/etcdserver/api/v3client/v3client.go | 67 + .../etcd/etcdserver/api/v3election/BUILD | 34 + .../etcd/etcdserver/api/v3election/doc.go | 16 + .../etcdserver/api/v3election/election.go | 123 + .../api/v3election/v3electionpb/BUILD | 39 + .../api/v3election/v3electionpb/gw/BUILD | 33 + .../v3electionpb/gw/v3election.pb.gw.go | 313 ++ .../v3election/v3electionpb/v3election.pb.go | 2098 +++++++++ .../v3election/v3electionpb/v3election.proto | 119 + .../coreos/etcd/etcdserver/api/v3lock/BUILD | 34 + .../coreos/etcd/etcdserver/api/v3lock/doc.go | 16 + .../coreos/etcd/etcdserver/api/v3lock/lock.go | 56 + .../etcd/etcdserver/api/v3lock/v3lockpb/BUILD | 38 + .../etcdserver/api/v3lock/v3lockpb/gw/BUILD | 33 + .../api/v3lock/v3lockpb/gw/v3lock.pb.gw.go | 167 + .../api/v3lock/v3lockpb/v3lock.pb.go | 978 ++++ .../api/v3lock/v3lockpb/v3lock.proto | 65 + .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 32 +- .../etcd/etcdserver/api/v3rpc/interceptor.go | 4 +- .../coreos/etcd/etcdserver/api/v3rpc/key.go | 10 +- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 31 +- .../etcd/etcdserver/api/v3rpc/maintenance.go | 3 +- .../etcd/etcdserver/api/v3rpc/member.go | 36 +- .../etcd/etcdserver/api/v3rpc/rpctypes/BUILD | 1 + .../etcdserver/api/v3rpc/rpctypes/error.go | 47 +- .../coreos/etcd/etcdserver/api/v3rpc/util.go | 6 +- .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 47 + .../coreos/etcd/etcdserver/apply.go | 292 +- .../coreos/etcd/etcdserver/apply_auth.go | 13 +- .../coreos/etcd/etcdserver/backend.go | 81 + .../coreos/etcd/etcdserver/cluster_util.go | 10 - .../coreos/etcd/etcdserver/config.go | 9 + .../coreos/etcd/etcdserver/errors.go | 1 + .../coreos/etcd/etcdserver/etcdserverpb/BUILD | 11 +- .../etcdserver/etcdserverpb/etcdserver.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/gw/BUILD | 33 + .../etcdserverpb/{ => gw}/rpc.pb.gw.go | 589 ++- .../etcdserverpb/raft_internal.pb.go | 2 +- .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 1517 +++++- .../etcd/etcdserver/etcdserverpb/rpc.proto | 32 +- .../coreos/etcd/etcdserver/membership/BUILD | 1 + .../etcd/etcdserver/membership/cluster.go | 2 +- .../coreos/etcd/etcdserver/membership/doc.go | 16 + .../etcd/etcdserver/membership/store.go | 6 +- .../coreos/etcd/etcdserver/metrics.go | 7 + .../coreos/etcd/etcdserver/quota.go | 17 +- .../github.com/coreos/etcd/etcdserver/raft.go | 131 +- .../coreos/etcd/etcdserver/server.go | 234 +- .../coreos/etcd/etcdserver/snapshot_merge.go | 7 +- .../coreos/etcd/etcdserver/stats/leader.go | 15 +- .../coreos/etcd/etcdserver/stats/server.go | 54 +- .../coreos/etcd/etcdserver/storage.go | 3 - .../github.com/coreos/etcd/etcdserver/util.go | 2 +- .../coreos/etcd/etcdserver/v3_server.go | 292 +- .../github.com/coreos/etcd/integration/BUILD | 9 +- .../coreos/etcd/integration/bridge.go | 67 +- .../coreos/etcd/integration/cluster.go | 187 +- .../coreos/etcd/integration/cluster_direct.go | 4 + .../coreos/etcd/integration/cluster_proxy.go | 46 +- .../coreos/etcd/lease/leasehttp/BUILD | 1 - .../coreos/etcd/lease/leasehttp/http.go | 54 +- .../coreos/etcd/lease/leasepb/lease.pb.go | 2 +- vendor/github.com/coreos/etcd/lease/lessor.go | 99 +- vendor/github.com/coreos/etcd/mvcc/BUILD | 4 + .../github.com/coreos/etcd/mvcc/backend/BUILD | 26 +- .../coreos/etcd/mvcc/backend/backend.go | 141 +- .../coreos/etcd/mvcc/backend/batch_tx.go | 174 +- ...oltoption_default.go => config_default.go} | 6 +- .../{boltoption_linux.go => config_linux.go} | 7 +- .../etcd/mvcc/backend/config_windows.go | 26 + .../coreos/etcd/mvcc/backend/metrics.go | 10 + .../coreos/etcd/mvcc/backend/read_tx.go | 92 + .../coreos/etcd/mvcc/backend/tx_buffer.go | 181 + vendor/github.com/coreos/etcd/mvcc/index.go | 21 +- .../github.com/coreos/etcd/mvcc/key_index.go | 1 - vendor/github.com/coreos/etcd/mvcc/kv.go | 82 +- vendor/github.com/coreos/etcd/mvcc/kv_view.go | 53 + vendor/github.com/coreos/etcd/mvcc/kvstore.go | 557 +-- .../coreos/etcd/mvcc/kvstore_txn.go | 253 + vendor/github.com/coreos/etcd/mvcc/metrics.go | 15 +- .../coreos/etcd/mvcc/metrics_txn.go | 67 + .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 2 +- .../coreos/etcd/mvcc/watchable_store.go | 165 +- .../coreos/etcd/mvcc/watchable_store_txn.go | 53 + .../coreos/etcd/mvcc/watcher_group.go | 2 +- .../coreos/etcd/pkg/adt/interval_tree.go | 81 +- vendor/github.com/coreos/etcd/pkg/cors/BUILD | 22 + .../github.com/coreos/etcd/pkg/cors/cors.go | 90 + .../etcd/pkg/debugutil}/BUILD | 11 +- .../coreos/etcd/pkg/debugutil/doc.go | 16 + .../coreos/etcd/pkg/debugutil/pprof.go | 47 + .../coreos/etcd/pkg/fileutil/fileutil.go | 7 +- .../coreos/etcd/pkg/fileutil/lock_linux.go | 3 +- .../coreos/etcd/pkg/fileutil/preallocate.go | 15 +- .../coreos/etcd/pkg/httputil/httputil.go | 9 - .../github.com/coreos/etcd/pkg/idutil/id.go | 4 +- .../github.com/coreos/etcd/pkg/netutil/BUILD | 1 - .../coreos/etcd/pkg/netutil/netutil.go | 34 +- .../coreos/etcd/pkg/schedule/schedule.go | 2 - vendor/github.com/coreos/etcd/pkg/srv/BUILD | 23 + vendor/github.com/coreos/etcd/pkg/srv/srv.go | 140 + .../github.com/coreos/etcd/pkg/testutil/BUILD | 1 + .../coreos/etcd/pkg/testutil/assert.go | 62 + .../coreos/etcd/pkg/testutil/leak.go | 23 +- .../coreos/etcd/pkg/transport/BUILD | 6 +- .../coreos/etcd/pkg/transport/listener.go | 66 +- .../coreos/etcd/pkg/transport/listener_tls.go | 217 + .../etcd/pkg/transport/timeout_listener.go | 5 +- .../etcd/pkg/transport/unix_listener.go | 4 +- .../github.com/coreos/etcd/pkg/wait/wait.go | 19 +- .../coreos/etcd/proxy/grpcproxy/BUILD | 14 +- .../coreos/etcd/proxy/grpcproxy/adapter/BUILD | 40 + .../chan_stream.go} | 125 +- .../adapter/cluster_client_adapter.go | 44 + .../etcd/proxy/grpcproxy/adapter/doc.go | 17 + .../adapter/election_client_adapter.go | 79 + .../{ => adapter}/kv_client_adapter.go | 2 +- .../grpcproxy/adapter/lease_client_adapter.go | 77 + .../grpcproxy/adapter/lock_client_adapter.go | 36 + .../adapter/maintenance_client_adapter.go | 79 + .../grpcproxy/adapter/watch_client_adapter.go | 66 + .../coreos/etcd/proxy/grpcproxy/cache/BUILD | 2 +- .../etcd/proxy/grpcproxy/cache/store.go | 42 +- .../coreos/etcd/proxy/grpcproxy/cluster.go | 151 +- .../coreos/etcd/proxy/grpcproxy/election.go | 65 + .../coreos/etcd/proxy/grpcproxy/kv.go | 30 +- .../coreos/etcd/proxy/grpcproxy/leader.go | 114 + .../coreos/etcd/proxy/grpcproxy/lease.go | 344 +- .../coreos/etcd/proxy/grpcproxy/lock.go | 38 + .../coreos/etcd/proxy/grpcproxy/logger.go | 19 + .../etcd/proxy/grpcproxy/maintenance.go | 5 + .../coreos/etcd/proxy/grpcproxy/metrics.go | 7 + .../coreos/etcd/proxy/grpcproxy/register.go | 94 + .../coreos/etcd/proxy/grpcproxy/watch.go | 77 +- .../etcd/proxy/grpcproxy/watch_broadcast.go | 33 +- .../coreos/etcd/proxy/grpcproxy/watcher.go | 11 +- vendor/github.com/coreos/etcd/raft/README.md | 91 +- .../coreos/etcd/raft/log_unstable.go | 20 + vendor/github.com/coreos/etcd/raft/node.go | 16 + vendor/github.com/coreos/etcd/raft/raft.go | 4 + .../coreos/etcd/raft/raftpb/raft.pb.go | 72 +- .../coreos/etcd/rafthttp/pipeline.go | 5 +- .../coreos/etcd/rafthttp/snapshot_sender.go | 6 +- .../github.com/coreos/etcd/rafthttp/stream.go | 21 +- .../github.com/coreos/etcd/rafthttp/util.go | 32 +- vendor/github.com/coreos/etcd/snap/db.go | 21 +- .../coreos/etcd/snap/snappb/snap.pb.go | 2 +- vendor/github.com/coreos/etcd/store/node.go | 1 - vendor/github.com/coreos/etcd/store/store.go | 3 + .../coreos/etcd/store/watcher_hub.go | 2 +- .../github.com/coreos/etcd/version/version.go | 2 +- vendor/github.com/coreos/etcd/wal/encoder.go | 2 +- vendor/github.com/coreos/etcd/wal/repair.go | 2 +- vendor/github.com/coreos/etcd/wal/wal.go | 23 +- .../coreos/etcd/wal/walpb/record.pb.go | 2 +- .../protobuf/protoc-gen-go/descriptor/BUILD | 29 + .../protoc-gen-go/descriptor/Makefile | 37 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2215 +++++++++ .../protoc-gen-go/descriptor/descriptor.proto | 849 ++++ vendor/github.com/karlseguin/ccache/Makefile | 5 - vendor/github.com/karlseguin/ccache/bucket.go | 41 - vendor/github.com/karlseguin/ccache/cache.go | 227 - .../karlseguin/ccache/configuration.go | 94 - vendor/github.com/karlseguin/ccache/item.go | 103 - .../karlseguin/ccache/layeredbucket.go | 82 - .../karlseguin/ccache/layeredcache.go | 237 - .../github.com/karlseguin/ccache/license.txt | 19 - vendor/github.com/karlseguin/ccache/readme.md | 172 - .../karlseguin/ccache/secondarycache.go | 72 - .../genproto/googleapis/api/annotations/BUILD | 29 + .../api/annotations/annotations.pb.go | 64 + .../googleapis/api/annotations/http.pb.go | 566 +++ vendor/google.golang.org/grpc/BUILD | 1 + .../grpc/health/grpc_health_v1/BUILD | 33 + .../grpc/health/grpc_health_v1/health.pb.go | 176 + .../grpc/health/grpc_health_v1/health.proto | 20 + 288 files changed, 26269 insertions(+), 5269 deletions(-) delete mode 100644 vendor/github.com/boltdb/bolt/Makefile create mode 100644 vendor/github.com/cockroachdb/cmux/.gitignore create mode 100644 vendor/github.com/cockroachdb/cmux/.travis.yml create mode 100644 vendor/github.com/cockroachdb/cmux/BUILD create mode 100644 vendor/github.com/cockroachdb/cmux/LICENSE create mode 100644 vendor/github.com/cockroachdb/cmux/README.md create mode 100644 vendor/github.com/cockroachdb/cmux/buffer.go create mode 100644 vendor/github.com/cockroachdb/cmux/cmux.go create mode 100644 vendor/github.com/cockroachdb/cmux/matchers.go create mode 100644 vendor/github.com/cockroachdb/cmux/patricia.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/.gitignore (65%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/BUILD (86%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/LICENSE (100%) create mode 100644 vendor/github.com/coreos/bbolt/Makefile rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/README.md (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/appveyor.yml (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_386.go (72%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_amd64.go (73%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_arm.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_arm64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_linux.go (100%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_mips64x.go rename vendor/github.com/{boltdb/bolt/bolt_arm.go => coreos/bbolt/bolt_mipsx.go} (55%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_openbsd.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64le.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_s390x.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix.go (80%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix_solaris.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_windows.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/boltsync_unix.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bucket.go (95%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/cursor.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/db.go (85%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/doc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/errors.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/freelist.go (56%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/node.go (99%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/page.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/tx.go (94%) create mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go delete mode 100644 vendor/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/election.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/key.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/session.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/grpc_options.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/kv.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/lease.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/util.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/namespace/watch.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/BUILD create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/doc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/naming/grpc.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go delete mode 100644 vendor/github.com/coreos/etcd/discovery/srv.go create mode 100644 vendor/github.com/coreos/etcd/embed/BUILD create mode 100644 vendor/github.com/coreos/etcd/embed/config.go create mode 100644 vendor/github.com/coreos/etcd/embed/doc.go create mode 100644 vendor/github.com/coreos/etcd/embed/etcd.go create mode 100644 vendor/github.com/coreos/etcd/embed/serve.go create mode 100644 vendor/github.com/coreos/etcd/embed/util.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go rename vendor/github.com/coreos/etcd/etcdserver/api/{v2http => etcdhttp}/peer.go (97%) create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto create mode 100644 vendor/github.com/coreos/etcd/etcdserver/backend.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD rename vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/{ => gw}/rpc.pb.gw.go (69%) create mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/doc.go rename vendor/github.com/coreos/etcd/mvcc/backend/{boltoption_default.go => config_default.go} (82%) rename vendor/github.com/coreos/etcd/mvcc/backend/{boltoption_linux.go => config_linux.go} (88%) create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kv_view.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go create mode 100644 vendor/github.com/coreos/etcd/pkg/cors/BUILD create mode 100644 vendor/github.com/coreos/etcd/pkg/cors/cors.go rename vendor/github.com/{karlseguin/ccache => coreos/etcd/pkg/debugutil}/BUILD (66%) create mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/doc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/BUILD create mode 100644 vendor/github.com/coreos/etcd/pkg/srv/srv.go create mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/assert.go create mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{watch_client_adapter.go => adapter/chan_stream.go} (65%) create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go rename vendor/github.com/coreos/etcd/proxy/grpcproxy/{ => adapter}/kv_client_adapter.go (98%) create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go create mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 vendor/github.com/karlseguin/ccache/Makefile delete mode 100644 vendor/github.com/karlseguin/ccache/bucket.go delete mode 100644 vendor/github.com/karlseguin/ccache/cache.go delete mode 100644 vendor/github.com/karlseguin/ccache/configuration.go delete mode 100644 vendor/github.com/karlseguin/ccache/item.go delete mode 100644 vendor/github.com/karlseguin/ccache/layeredbucket.go delete mode 100644 vendor/github.com/karlseguin/ccache/layeredcache.go delete mode 100644 vendor/github.com/karlseguin/ccache/license.txt delete mode 100644 vendor/github.com/karlseguin/ccache/readme.md delete mode 100644 vendor/github.com/karlseguin/ccache/secondarycache.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 2ddf4896e85..989543a931a 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -333,11 +333,6 @@ "Comment": "v3.5.0", "Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6" }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.3.0", - "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" - }, { "ImportPath": "github.com/chai2010/gettext-go/gettext", "Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723" @@ -423,6 +418,10 @@ "ImportPath": "github.com/clusterhq/flocker-go", "Rev": "2b8b7259d3139c96c4a6871031355808ab3fd3b3" }, + { + "ImportPath": "github.com/cockroachdb/cmux", + "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" + }, { "ImportPath": "github.com/codedellemc/goscaleio", "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" @@ -515,285 +514,375 @@ "Comment": "v0.6.0", "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, + { + "ImportPath": "github.com/coreos/bbolt", + "Comment": "v1.3.1-coreos.6", + "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" + }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/namespace", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/naming", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/embed", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/cors", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/debugutil", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -1352,6 +1441,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -1921,11 +2014,6 @@ "ImportPath": "github.com/kardianos/osext", "Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef" }, - { - "ImportPath": "github.com/karlseguin/ccache", - "Comment": "v2.0.2-5-g3ba9789", - "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" - }, { "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" @@ -2920,6 +3008,10 @@ "ImportPath": "google.golang.org/api/pubsub/v1", "Rev": "c0dae069ee96c9261a04c81efd9e0f1e55f565ac" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" @@ -2949,6 +3041,11 @@ "Comment": "v1.3.0", "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Comment": "v1.3.0", + "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + }, { "ImportPath": "google.golang.org/grpc/internal", "Comment": "v1.3.0", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 587fa18e32d..ab65bb6703b 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -11102,34 +11102,6 @@ THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/boltdb/bolt licensed under: = - -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -= vendor/github.com/boltdb/bolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a -================================================================================ - - ================================================================================ = vendor/github.com/chai2010/gettext-go/gettext licensed under: = @@ -11884,6 +11856,216 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/cockroachdb/cmux licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/cockroachdb/cmux/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/codedellemc/goscaleio licensed under: = @@ -15593,6 +15775,34 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/bbolt licensed under: = + +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/coreos/bbolt/LICENSE 13b2a308eefa10d841e3bf2467dbe07a +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/alarm licensed under: = @@ -16643,6 +16853,636 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/clientv3/concurrency licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/clientv3/namespace licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/clientv3/naming licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/compactor licensed under: = @@ -17063,6 +17903,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/embed licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/error licensed under: = @@ -17693,6 +18743,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v2http licensed under: = @@ -18113,6 +19373,1476 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3client licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/api/v3rpc licensed under: = @@ -18953,6 +21683,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/etcdserver/membership licensed under: = @@ -21263,6 +24203,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/cors licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/cpuutil licensed under: = @@ -21683,6 +24833,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/debugutil licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/fileutil licensed under: = @@ -23993,6 +27353,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/pkg/srv licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/pkg/testutil licensed under: = @@ -25253,6 +28823,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coreos/etcd/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/etcd/proxy/grpcproxy/cache licensed under: = @@ -44096,6 +47876,45 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/golang/protobuf/protoc-gen-go/descriptor licensed under: = + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + += vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 +================================================================================ + + ================================================================================ = vendor/github.com/golang/protobuf/ptypes licensed under: = @@ -66349,33 +70168,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ -================================================================================ -= vendor/github.com/karlseguin/ccache licensed under: = - -Copyright (c) 2013 Karl Seguin. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -= vendor/github.com/karlseguin/ccache/license.txt fb40cd712dfcf5e0a8de4c13c3399db2 -================================================================================ - - ================================================================================ = vendor/github.com/kr/fs licensed under: = @@ -85795,6 +89587,216 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/genproto/googleapis/api/annotations licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/google.golang.org/genproto/googleapis/rpc/status licensed under: = @@ -86185,6 +90187,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = + +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 +================================================================================ + + ================================================================================ = vendor/google.golang.org/grpc/internal licensed under: = diff --git a/vendor/BUILD b/vendor/BUILD index c8313075d4a..20495c2248a 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -45,7 +45,6 @@ filegroup( "//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs", "//vendor/github.com/beorn7/perks/quantile:all-srcs", "//vendor/github.com/blang/semver:all-srcs", - "//vendor/github.com/boltdb/bolt:all-srcs", "//vendor/github.com/chai2010/gettext-go/gettext:all-srcs", "//vendor/github.com/cloudflare/cfssl/auth:all-srcs", "//vendor/github.com/cloudflare/cfssl/certdb:all-srcs", @@ -59,6 +58,7 @@ filegroup( "//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs", "//vendor/github.com/cloudflare/cfssl/signer:all-srcs", "//vendor/github.com/clusterhq/flocker-go:all-srcs", + "//vendor/github.com/cockroachdb/cmux:all-srcs", "//vendor/github.com/codedellemc/goscaleio:all-srcs", "//vendor/github.com/codegangsta/negroni:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", @@ -74,12 +74,14 @@ filegroup( "//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/types:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/version:all-srcs", + "//vendor/github.com/coreos/bbolt:all-srcs", "//vendor/github.com/coreos/etcd/alarm:all-srcs", "//vendor/github.com/coreos/etcd/auth:all-srcs", "//vendor/github.com/coreos/etcd/client:all-srcs", "//vendor/github.com/coreos/etcd/clientv3:all-srcs", "//vendor/github.com/coreos/etcd/compactor:all-srcs", "//vendor/github.com/coreos/etcd/discovery:all-srcs", + "//vendor/github.com/coreos/etcd/embed:all-srcs", "//vendor/github.com/coreos/etcd/error:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver:all-srcs", "//vendor/github.com/coreos/etcd/integration:all-srcs", @@ -87,8 +89,10 @@ filegroup( "//vendor/github.com/coreos/etcd/mvcc:all-srcs", "//vendor/github.com/coreos/etcd/pkg/adt:all-srcs", "//vendor/github.com/coreos/etcd/pkg/contention:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/cors:all-srcs", "//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/crc:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/debugutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs", @@ -100,6 +104,7 @@ filegroup( "//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs", "//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/srv:all-srcs", "//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs", "//vendor/github.com/coreos/etcd/pkg/transport:all-srcs", @@ -211,6 +216,7 @@ filegroup( "//vendor/github.com/golang/mock/gomock:all-srcs", "//vendor/github.com/golang/protobuf/jsonpb:all-srcs", "//vendor/github.com/golang/protobuf/proto:all-srcs", + "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:all-srcs", "//vendor/github.com/golang/protobuf/ptypes:all-srcs", "//vendor/github.com/google/btree:all-srcs", "//vendor/github.com/google/cadvisor/accelerators:all-srcs", @@ -267,7 +273,6 @@ filegroup( "//vendor/github.com/jteeuwen/go-bindata:all-srcs", "//vendor/github.com/juju/ratelimit:all-srcs", "//vendor/github.com/kardianos/osext:all-srcs", - "//vendor/github.com/karlseguin/ccache:all-srcs", "//vendor/github.com/kr/fs:all-srcs", "//vendor/github.com/kr/pretty:all-srcs", "//vendor/github.com/kr/pty:all-srcs", @@ -393,6 +398,7 @@ filegroup( "//vendor/google.golang.org/api/logging/v2beta1:all-srcs", "//vendor/google.golang.org/api/monitoring/v3:all-srcs", "//vendor/google.golang.org/api/pubsub/v1:all-srcs", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs", "//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs", "//vendor/google.golang.org/grpc:all-srcs", "//vendor/gopkg.in/gcfg.v1:all-srcs", diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adcd..00000000000 --- a/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/github.com/cockroachdb/cmux/.gitignore b/vendor/github.com/cockroachdb/cmux/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/cockroachdb/cmux/.travis.yml b/vendor/github.com/cockroachdb/cmux/.travis.yml new file mode 100644 index 00000000000..e73780f2eb0 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + +gobuild_args: -race + +before_install: + - go get -u github.com/golang/lint/golint + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi + - go get -u golang.org/x/tools/cmd/vet + +before_script: + - '! gofmt -s -l . | read' + - golint ./... + - echo $TRAVIS_GO_VERSION + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi + - go vet . + - go tool vet --shadow . diff --git a/vendor/github.com/cockroachdb/cmux/BUILD b/vendor/github.com/cockroachdb/cmux/BUILD new file mode 100644 index 00000000000..b8a9413ba38 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "buffer.go", + "cmux.go", + "matchers.go", + "patricia.go", + ], + importpath = "github.com/cockroachdb/cmux", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/golang.org/x/net/http2/hpack:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/cockroachdb/cmux/LICENSE b/vendor/github.com/cockroachdb/cmux/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cockroachdb/cmux/README.md b/vendor/github.com/cockroachdb/cmux/README.md new file mode 100644 index 00000000000..b3713da5876 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/README.md @@ -0,0 +1,65 @@ +# cmux: Connection Mux [![Build Status](https://travis-ci.org/cockroachdb/cmux.svg?branch=master)](https://travis-ci.org/cockroachdb/cmux) [![GoDoc](https://godoc.org/github.com/cockroachdb/cmux?status.svg)](https://godoc.org/github.com/cockroachdb/cmux) + +cmux is a generic Go library to multiplex connections based on their payload. +Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any +other protocol on the same TCP listener. + +## How-To +Simply create your main listener, create a cmux for that listener, +and then match connections: +```go +// Create the main listener. +l, err := net.Listen("tcp", ":23456") +if err != nil { + log.Fatal(err) +} + +// Create a cmux. +m := cmux.New(l) + +// Match connections in order: +// First grpc, then HTTP, and otherwise Go RPC/TCP. +grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) +httpL := m.Match(cmux.HTTP1Fast()) +trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched. + +// Create your protocol servers. +grpcS := grpc.NewServer() +grpchello.RegisterGreeterServer(grpcs, &server{}) + +httpS := &http.Server{ + Handler: &helloHTTP1Handler{}, +} + +trpcS := rpc.NewServer() +s.Register(&ExampleRPCRcvr{}) + +// Use the muxed listeners for your servers. +go grpcS.Serve(grpcL) +go httpS.Serve(httpL) +go trpcS.Accept(trpcL) + +// Start serving! +m.Serve() +``` + +There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples). + +## Performance +Since we are only matching the very first bytes of a connection, the +performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP +streams) is negligible. + +## Limitations +* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221) +to identify TLS connections; since cmux's lookahead-implementing connection +wraps the underlying TLS connection, this type assertion fails. This means you +can serve HTTPS using cmux but `http.Request.TLS` will not be set in your +handlers. If you are able to wrap TLS around cmux, you can work around this +limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an +example of this approach. + +* *Different Protocols on The Same Connection*: `cmux` matches the connection +when it's accepted. For example, one connection can be either gRPC or REST, but +not both. That is, we assume that a client connection is either used for gRPC +or REST. diff --git a/vendor/github.com/cockroachdb/cmux/buffer.go b/vendor/github.com/cockroachdb/cmux/buffer.go new file mode 100644 index 00000000000..5c178585363 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/buffer.go @@ -0,0 +1,35 @@ +package cmux + +import ( + "bytes" + "io" +) + +// bufferedReader is an optimized implementation of io.Reader that behaves like +// ``` +// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer)) +// ``` +// without allocating. +type bufferedReader struct { + source io.Reader + buffer *bytes.Buffer + bufferRead int + bufferSize int +} + +func (s *bufferedReader) Read(p []byte) (int, error) { + // Functionality of bytes.Reader. + bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize]) + s.bufferRead += bn + + p = p[bn:] + + // Funtionality of io.TeeReader. + sn, sErr := s.source.Read(p) + if sn > 0 { + if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil { + return bn + wn, wErr + } + } + return bn + sn, sErr +} diff --git a/vendor/github.com/cockroachdb/cmux/cmux.go b/vendor/github.com/cockroachdb/cmux/cmux.go new file mode 100644 index 00000000000..89cc910b024 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/cmux.go @@ -0,0 +1,210 @@ +package cmux + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// Matcher matches a connection based on its content. +type Matcher func(io.Reader) bool + +// ErrorHandler handles an error and returns whether +// the mux should continue serving the listener. +type ErrorHandler func(error) bool + +var _ net.Error = ErrNotMatched{} + +// ErrNotMatched is returned whenever a connection is not matched by any of +// the matchers registered in the multiplexer. +type ErrNotMatched struct { + c net.Conn +} + +func (e ErrNotMatched) Error() string { + return fmt.Sprintf("mux: connection %v not matched by an matcher", + e.c.RemoteAddr()) +} + +// Temporary implements the net.Error interface. +func (e ErrNotMatched) Temporary() bool { return true } + +// Timeout implements the net.Error interface. +func (e ErrNotMatched) Timeout() bool { return false } + +type errListenerClosed string + +func (e errListenerClosed) Error() string { return string(e) } +func (e errListenerClosed) Temporary() bool { return false } +func (e errListenerClosed) Timeout() bool { return false } + +// ErrListenerClosed is returned from muxListener.Accept when the underlying +// listener is closed. +var ErrListenerClosed = errListenerClosed("mux: listener closed") + +// New instantiates a new connection multiplexer. +func New(l net.Listener) CMux { + return &cMux{ + root: l, + bufLen: 1024, + errh: func(_ error) bool { return true }, + donec: make(chan struct{}), + } +} + +// CMux is a multiplexer for network connections. +type CMux interface { + // Match returns a net.Listener that sees (i.e., accepts) only + // the connections matched by at least one of the matcher. + // + // The order used to call Match determines the priority of matchers. + Match(...Matcher) net.Listener + // Serve starts multiplexing the listener. Serve blocks and perhaps + // should be invoked concurrently within a go routine. + Serve() error + // HandleError registers an error handler that handles listener errors. + HandleError(ErrorHandler) +} + +type matchersListener struct { + ss []Matcher + l muxListener +} + +type cMux struct { + root net.Listener + bufLen int + errh ErrorHandler + donec chan struct{} + sls []matchersListener +} + +func (m *cMux) Match(matchers ...Matcher) net.Listener { + ml := muxListener{ + Listener: m.root, + connc: make(chan net.Conn, m.bufLen), + } + m.sls = append(m.sls, matchersListener{ss: matchers, l: ml}) + return ml +} + +func (m *cMux) Serve() error { + var wg sync.WaitGroup + + defer func() { + close(m.donec) + wg.Wait() + + for _, sl := range m.sls { + close(sl.l.connc) + // Drain the connections enqueued for the listener. + for c := range sl.l.connc { + _ = c.Close() + } + } + }() + + for { + c, err := m.root.Accept() + if err != nil { + if !m.handleErr(err) { + return err + } + continue + } + + wg.Add(1) + go m.serve(c, m.donec, &wg) + } +} + +func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + muc := newMuxConn(c) + for _, sl := range m.sls { + for _, s := range sl.ss { + matched := s(muc.getSniffer()) + if matched { + select { + case sl.l.connc <- muc: + case <-donec: + _ = c.Close() + } + return + } + } + } + + _ = c.Close() + err := ErrNotMatched{c: c} + if !m.handleErr(err) { + _ = m.root.Close() + } +} + +func (m *cMux) HandleError(h ErrorHandler) { + m.errh = h +} + +func (m *cMux) handleErr(err error) bool { + if !m.errh(err) { + return false + } + + if ne, ok := err.(net.Error); ok { + return ne.Temporary() + } + + return false +} + +type muxListener struct { + net.Listener + connc chan net.Conn +} + +func (l muxListener) Accept() (net.Conn, error) { + c, ok := <-l.connc + if !ok { + return nil, ErrListenerClosed + } + return c, nil +} + +// MuxConn wraps a net.Conn and provides transparent sniffing of connection data. +type MuxConn struct { + net.Conn + buf bytes.Buffer + sniffer bufferedReader +} + +func newMuxConn(c net.Conn) *MuxConn { + return &MuxConn{ + Conn: c, + } +} + +// From the io.Reader documentation: +// +// When Read encounters an error or end-of-file condition after +// successfully reading n > 0 bytes, it returns the number of +// bytes read. It may return the (non-nil) error from the same call +// or return the error (and n == 0) from a subsequent call. +// An instance of this general case is that a Reader returning +// a non-zero number of bytes at the end of the input stream may +// return either err == EOF or err == nil. The next Read should +// return 0, EOF. +func (m *MuxConn) Read(p []byte) (int, error) { + if n, err := m.buf.Read(p); err != io.EOF { + return n, err + } + return m.Conn.Read(p) +} + +func (m *MuxConn) getSniffer() io.Reader { + m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()} + return &m.sniffer +} diff --git a/vendor/github.com/cockroachdb/cmux/matchers.go b/vendor/github.com/cockroachdb/cmux/matchers.go new file mode 100644 index 00000000000..abc30f6e0ad --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/matchers.go @@ -0,0 +1,150 @@ +package cmux + +import ( + "bufio" + "io" + "io/ioutil" + "net/http" + "strings" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Any is a Matcher that matches any connection. +func Any() Matcher { + return func(r io.Reader) bool { return true } +} + +// PrefixMatcher returns a matcher that matches a connection if it +// starts with any of the strings in strs. +func PrefixMatcher(strs ...string) Matcher { + pt := newPatriciaTreeString(strs...) + return pt.matchPrefix +} + +var defaultHTTPMethods = []string{ + "OPTIONS", + "GET", + "HEAD", + "POST", + "PUT", + "DELETE", + "TRACE", + "CONNECT", +} + +// HTTP1Fast only matches the methods in the HTTP request. +// +// This matcher is very optimistic: if it returns true, it does not mean that +// the request is a valid HTTP response. If you want a correct but slower HTTP1 +// matcher, use HTTP1 instead. +func HTTP1Fast(extMethods ...string) Matcher { + return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...) +} + +const maxHTTPRead = 4096 + +// HTTP1 parses the first line or upto 4096 bytes of the request to see if +// the conection contains an HTTP request. +func HTTP1() Matcher { + return func(r io.Reader) bool { + br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead}) + l, part, err := br.ReadLine() + if err != nil || part { + return false + } + + _, _, proto, ok := parseRequestLine(string(l)) + if !ok { + return false + } + + v, _, ok := http.ParseHTTPVersion(proto) + return ok && v == 1 + } +} + +// grabbed from net/http. +func parseRequestLine(line string) (method, uri, proto string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + s2 += s1 + 1 + return line[:s1], line[s1+1 : s2], line[s2+1:], true +} + +// HTTP2 parses the frame header of the first frame to detect whether the +// connection is an HTTP2 connection. +func HTTP2() Matcher { + return hasHTTP2Preface +} + +// HTTP1HeaderField returns a matcher matching the header fields of the first +// request of an HTTP 1 connection. +func HTTP1HeaderField(name, value string) Matcher { + return func(r io.Reader) bool { + return matchHTTP1Field(r, name, value) + } +} + +// HTTP2HeaderField resturns a matcher matching the header fields of the first +// headers frame. +func HTTP2HeaderField(name, value string) Matcher { + return func(r io.Reader) bool { + return matchHTTP2Field(r, name, value) + } +} + +func hasHTTP2Preface(r io.Reader) bool { + var b [len(http2.ClientPreface)]byte + if _, err := io.ReadFull(r, b[:]); err != nil { + return false + } + + return string(b[:]) == http2.ClientPreface +} + +func matchHTTP1Field(r io.Reader, name, value string) (matched bool) { + req, err := http.ReadRequest(bufio.NewReader(r)) + if err != nil { + return false + } + + return req.Header.Get(name) == value +} + +func matchHTTP2Field(r io.Reader, name, value string) (matched bool) { + if !hasHTTP2Preface(r) { + return false + } + + framer := http2.NewFramer(ioutil.Discard, r) + hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) { + if hf.Name == name && hf.Value == value { + matched = true + } + }) + for { + f, err := framer.ReadFrame() + if err != nil { + return false + } + + switch f := f.(type) { + case *http2.HeadersFrame: + if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { + return false + } + if matched { + return true + } + + if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 { + return false + } + } + } +} diff --git a/vendor/github.com/cockroachdb/cmux/patricia.go b/vendor/github.com/cockroachdb/cmux/patricia.go new file mode 100644 index 00000000000..56ec4e7b287 --- /dev/null +++ b/vendor/github.com/cockroachdb/cmux/patricia.go @@ -0,0 +1,173 @@ +package cmux + +import ( + "bytes" + "io" +) + +// patriciaTree is a simple patricia tree that handles []byte instead of string +// and cannot be changed after instantiation. +type patriciaTree struct { + root *ptNode +} + +func newPatriciaTree(b ...[]byte) *patriciaTree { + return &patriciaTree{ + root: newNode(b), + } +} + +func newPatriciaTreeString(strs ...string) *patriciaTree { + b := make([][]byte, len(strs)) + for i, s := range strs { + b[i] = []byte(s) + } + return &patriciaTree{ + root: newNode(b), + } +} + +func (t *patriciaTree) matchPrefix(r io.Reader) bool { + return t.root.match(r, true) +} + +func (t *patriciaTree) match(r io.Reader) bool { + return t.root.match(r, false) +} + +type ptNode struct { + prefix []byte + next map[byte]*ptNode + terminal bool +} + +func newNode(strs [][]byte) *ptNode { + if len(strs) == 0 { + return &ptNode{ + prefix: []byte{}, + terminal: true, + } + } + + if len(strs) == 1 { + return &ptNode{ + prefix: strs[0], + terminal: true, + } + } + + p, strs := splitPrefix(strs) + n := &ptNode{ + prefix: p, + } + + nexts := make(map[byte][][]byte) + for _, s := range strs { + if len(s) == 0 { + n.terminal = true + continue + } + nexts[s[0]] = append(nexts[s[0]], s[1:]) + } + + n.next = make(map[byte]*ptNode) + for first, rests := range nexts { + n.next[first] = newNode(rests) + } + + return n +} + +func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { + if len(bss) == 0 || len(bss[0]) == 0 { + return prefix, bss + } + + if len(bss) == 1 { + return bss[0], [][]byte{{}} + } + + for i := 0; ; i++ { + var cur byte + eq := true + for j, b := range bss { + if len(b) <= i { + eq = false + break + } + + if j == 0 { + cur = b[i] + continue + } + + if cur != b[i] { + eq = false + break + } + } + + if !eq { + break + } + + prefix = append(prefix, cur) + } + + rest = make([][]byte, 0, len(bss)) + for _, b := range bss { + rest = append(rest, b[len(prefix):]) + } + + return prefix, rest +} + +func readBytes(r io.Reader, n int) (b []byte, err error) { + b = make([]byte, n) + o := 0 + for o < n { + nr, err := r.Read(b[o:]) + if err != nil && err != io.EOF { + return b, err + } + + o += nr + + if err == io.EOF { + break + } + } + return b[:o], nil +} + +func (n *ptNode) match(r io.Reader, prefix bool) bool { + if l := len(n.prefix); l > 0 { + b, err := readBytes(r, l) + if err != nil || len(b) != l || !bytes.Equal(b, n.prefix) { + return false + } + } + + if prefix && n.terminal { + return true + } + + b := make([]byte, 1) + for { + nr, err := r.Read(b) + if nr != 0 { + break + } + + if err == io.EOF { + return n.terminal + } + + if err != nil { + return false + } + } + + nextN, ok := n.next[b[0]] + return ok && nextN.match(r, prefix) +} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore similarity index 65% rename from vendor/github.com/boltdb/bolt/.gitignore rename to vendor/github.com/coreos/bbolt/.gitignore index c7bd2b7a5b8..c2a8cfa788c 100644 --- a/vendor/github.com/boltdb/bolt/.gitignore +++ b/vendor/github.com/coreos/bbolt/.gitignore @@ -2,3 +2,4 @@ *.test *.swp /bin/ +cmd/bolt/bolt diff --git a/vendor/github.com/boltdb/bolt/BUILD b/vendor/github.com/coreos/bbolt/BUILD similarity index 86% rename from vendor/github.com/boltdb/bolt/BUILD rename to vendor/github.com/coreos/bbolt/BUILD index a3885567cf2..cab44c170dd 100644 --- a/vendor/github.com/boltdb/bolt/BUILD +++ b/vendor/github.com/coreos/bbolt/BUILD @@ -66,6 +66,18 @@ go_library( "@io_bazel_rules_go//go/platform:arm64": [ "bolt_arm64.go", ], + "@io_bazel_rules_go//go/platform:mips": [ + "bolt_mipsx.go", + ], + "@io_bazel_rules_go//go/platform:mips64": [ + "bolt_mips64x.go", + ], + "@io_bazel_rules_go//go/platform:mips64le": [ + "bolt_mips64x.go", + ], + "@io_bazel_rules_go//go/platform:mipsle": [ + "bolt_mipsx.go", + ], "@io_bazel_rules_go//go/platform:ppc64": [ "bolt_ppc64.go", ], @@ -77,7 +89,7 @@ go_library( ], "//conditions:default": [], }), - importpath = "github.com/boltdb/bolt", + importpath = "github.com/coreos/bbolt", visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:solaris": [ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE similarity index 100% rename from vendor/github.com/boltdb/bolt/LICENSE rename to vendor/github.com/coreos/bbolt/LICENSE diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile new file mode 100644 index 00000000000..43b94f3bdfe --- /dev/null +++ b/vendor/github.com/coreos/bbolt/Makefile @@ -0,0 +1,30 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt + +test: + go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + go test -v ./cmd/bolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/coreos/bbolt/README.md similarity index 88% rename from vendor/github.com/boltdb/bolt/README.md rename to vendor/github.com/coreos/bbolt/README.md index 8523e337734..015f0efbe84 100644 --- a/vendor/github.com/boltdb/bolt/README.md +++ b/vendor/github.com/coreos/bbolt/README.md @@ -1,6 +1,16 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +bbolt ==== +[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt) +[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] [LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database @@ -10,16 +20,18 @@ Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. ## Table of Contents @@ -59,7 +71,7 @@ services every day. To start using Bolt, install Go and run `go get`: ```sh -$ go get github.com/boltdb/bolt/... +$ go get github.com/coreos/bbolt/... ``` This will retrieve the library and install the `bolt` command line utility into @@ -79,7 +91,7 @@ package main import ( "log" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func main() { @@ -209,7 +221,7 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close +You can use the `DB.Begin()` function directly but **please** be sure to close the transaction. ```go @@ -395,7 +407,7 @@ db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } @@ -448,6 +460,10 @@ db.View(func(tx *bolt.Tx) error { }) ``` +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. ### Nested buckets @@ -460,6 +476,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + ### Database backups @@ -469,7 +534,7 @@ this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx) documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to @@ -715,6 +780,9 @@ Here are a few things to note when evaluating and using Bolt: can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. @@ -755,7 +823,7 @@ Here are a few things to note when evaluating and using Bolt: ## Reading the Source -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, transactional key/value database so it can be a good starting point for people interested in how databases work. @@ -848,5 +916,13 @@ Below is a list of public, open source projects that use Bolt: * [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml similarity index 100% rename from vendor/github.com/boltdb/bolt/appveyor.yml rename to vendor/github.com/coreos/bbolt/appveyor.yml diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go similarity index 72% rename from vendor/github.com/boltdb/bolt/bolt_386.go rename to vendor/github.com/coreos/bbolt/bolt_386.go index e659bfb91f3..820d533c15f 100644 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ b/vendor/github.com/coreos/bbolt/bolt_386.go @@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go similarity index 73% rename from vendor/github.com/boltdb/bolt/bolt_amd64.go rename to vendor/github.com/coreos/bbolt/bolt_amd64.go index cca6b7eb707..98fafdb47d8 100644 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go @@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go new file mode 100644 index 00000000000..7e5cb4b9412 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_arm64.go rename to vendor/github.com/coreos/bbolt/bolt_arm64.go index 6d2309352e0..b26d84f91ba 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_linux.go rename to vendor/github.com/coreos/bbolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go new file mode 100644 index 00000000000..134b578bd44 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go similarity index 55% rename from vendor/github.com/boltdb/bolt/bolt_arm.go rename to vendor/github.com/coreos/bbolt/bolt_mipsx.go index e659bfb91f3..d5ecb0597e4 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ b/vendor/github.com/coreos/bbolt/bolt_mipsx.go @@ -1,7 +1,12 @@ +// +build mips mipsle + package bolt // maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB +const maxMapSize = 0x40000000 // 1GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_openbsd.go rename to vendor/github.com/coreos/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_ppc.go rename to vendor/github.com/coreos/bbolt/bolt_ppc.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_ppc64.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64.go index 2dc6be02e3e..9331d9771eb 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_ppc64le.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64le.go index 8351e129f6a..8c143bc5d19 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_s390x.go rename to vendor/github.com/coreos/bbolt/bolt_s390x.go index f4dd26bbba7..d7c39af9253 100644 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go similarity index 80% rename from vendor/github.com/boltdb/bolt/bolt_unix.go rename to vendor/github.com/coreos/bbolt/bolt_unix.go index cad62dda1e3..06592a08089 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix.go @@ -13,29 +13,32 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_unix_solaris.go rename to vendor/github.com/coreos/bbolt/bolt_unix_solaris.go index 307bf2b3ee9..fd8335ecc96 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go @@ -13,34 +13,33 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go similarity index 88% rename from vendor/github.com/boltdb/bolt/bolt_windows.go rename to vendor/github.com/coreos/bbolt/bolt_windows.go index d538e6afd77..ca6f9a11c24 100644 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ b/vendor/github.com/coreos/bbolt/bolt_windows.go @@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro db.lockfile = f var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := f.Fd() + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + // Attempt to obtain an exclusive lock. + err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } @@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro func funlock(db *DB) error { err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) db.lockfile.Close() - os.Remove(db.path+lockExt) + os.Remove(db.path + lockExt) return err } diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/boltdb/bolt/boltsync_unix.go rename to vendor/github.com/coreos/bbolt/boltsync_unix.go diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go similarity index 95% rename from vendor/github.com/boltdb/bolt/bucket.go rename to vendor/github.com/coreos/bbolt/bucket.go index d2f8c524e42..44db88b8abd 100644 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ b/vendor/github.com/coreos/bbolt/bucket.go @@ -14,13 +14,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { + if b.tx.writable && !unaligned { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue } + return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - _, _, flags := c.seek(key) + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error { return nil } +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go similarity index 100% rename from vendor/github.com/boltdb/bolt/cursor.go rename to vendor/github.com/coreos/bbolt/cursor.go diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/coreos/bbolt/db.go similarity index 85% rename from vendor/github.com/boltdb/bolt/db.go rename to vendor/github.com/coreos/bbolt/db.go index 1223493ca7b..4c8c156b23e 100644 --- a/vendor/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/coreos/bbolt/db.go @@ -7,8 +7,7 @@ import ( "log" "os" "runtime" - "runtime/debug" - "strings" + "sort" "sync" "time" "unsafe" @@ -23,6 +22,8 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED +const pgidNoFreelist pgid = 0xffffffffffffffff + // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -39,6 +40,9 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -61,6 +65,11 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -107,9 +116,11 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - freelist *freelist stats Stats + freelist *freelist + freelistLoad sync.Once + pagePool sync.Pool batchMu sync.Mutex @@ -148,14 +159,17 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - + db := &DB{ + opened: true, + } // Set default options if no options are provided. if options == nil { options = DefaultOptions } + db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + db.lockfile = nil // make 'unused' happy. TODO: rework locks _ = db.close() return nil, err } @@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err @@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { db.pageSize = int(m.pageSize) } + } else { + return nil, ErrInvalid } } @@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } // Mark the database as opened and return. return db, nil } +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t + db.freePages() + return t, nil +} - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid } if minid > 0 { db.freelist.release(minid - 1) } - - return t, nil + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. } +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] break } } @@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - if err := t.Rollback(); err != nil { - return err - } - - return nil + return t.Rollback() } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -734,9 +802,7 @@ retry: // pass success, or bolt internal errors, to all callers for _, c := range b.calls { - if c.err != nil { - c.err <- err - } + c.err <- err } break retry } @@ -823,7 +889,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { +func (db *DB) allocate(txid txid, count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -835,7 +901,7 @@ func (db *DB) allocate(count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { + if p.id = db.freelist.allocate(txid, count); p.id != 0 { return p, nil } @@ -890,6 +956,38 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -900,6 +998,10 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -916,6 +1018,14 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -952,15 +1062,11 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN + diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - type Info struct { Data uintptr PageSize int @@ -999,7 +1105,8 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1026,11 +1133,3 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/doc.go rename to vendor/github.com/coreos/bbolt/doc.go diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go similarity index 100% rename from vendor/github.com/boltdb/bolt/errors.go rename to vendor/github.com/coreos/bbolt/errors.go diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go similarity index 56% rename from vendor/github.com/boltdb/bolt/freelist.go rename to vendor/github.com/coreos/bbolt/freelist.go index 1b7ba91b2a5..266f1542945 100644 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -6,25 +6,40 @@ import ( "unsafe" ) +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ - pending: make(map[txid][]pgid), + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -40,27 +55,26 @@ func (f *freelist) free_count() int { // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int - for _, list := range f.pending { - count += len(list) + for _, txp := range f.pending { + count += len(txp.ids) } return count } -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) } - sort.Sort(m) - return pgids(f.ids).merge(m) + mergepgids(dst, f.ids, m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { +func (f *freelist) allocate(txid txid, n int) pgid { if len(f.ids) == 0 { return 0 } @@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid { for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } - + f.allocs[initial] = txid return initial } @@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) { } // Free page and all its overflow pages. - var ids = f.pending[txid] + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } - // Add to the freelist and cache. - ids = append(ids, id) + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) f.cache[id] = true } - f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) - for tid, ids := range f.pending { + for tid, txp := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. - m = append(m, ids...) + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { delete(f.pending, tid) } } @@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) { // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) + txp := f.pending[txid] + if txp == nil { + return } - - // Remove pages from pending list. + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) } // freed returns whether a given page is in the free list. @@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) @@ -169,7 +248,7 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) @@ -181,27 +260,33 @@ func (f *freelist) read(p *page) { f.reindex() } +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) } return nil @@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) { // Build a cache of only pending pages. pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { pcache[pendingID] = true } } @@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) + f.cache = make(map[pgid]bool, len(f.ids)) for _, id := range f.ids { f.cache[id] = true } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { f.cache[pendingID] = true } } diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/coreos/bbolt/node.go similarity index 99% rename from vendor/github.com/boltdb/bolt/node.go rename to vendor/github.com/coreos/bbolt/node.go index 159318b229c..f4ce240eddd 100644 --- a/vendor/github.com/boltdb/bolt/node.go +++ b/vendor/github.com/coreos/bbolt/node.go @@ -365,7 +365,7 @@ func (n *node) spill() error { } // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) if err != nil { return err } diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/coreos/bbolt/page.go similarity index 88% rename from vendor/github.com/boltdb/bolt/page.go rename to vendor/github.com/coreos/bbolt/page.go index 7651a6bf7d9..cde403ae86d 100644 --- a/vendor/github.com/boltdb/bolt/page.go +++ b/vendor/github.com/coreos/bbolt/page.go @@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } else if len(b) == 0 { + } + if len(b) == 0 { return a } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids { } // Append what's left in follow. - merged = append(merged, follow...) - - return merged + _ = append(merged, follow...) } diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go similarity index 94% rename from vendor/github.com/boltdb/bolt/tx.go rename to vendor/github.com/coreos/bbolt/tx.go index 1cfb4cde855..5c0290733f5 100644 --- a/vendor/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/coreos/bbolt/tx.go @@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil + return fn(k, tx.root.Bucket(k)) }) } @@ -169,28 +166,18 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { return err } + } else { + tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -235,6 +222,31 @@ func (tx *Tx) Commit() error { return nil } +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { _ = f.Close() }() + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, f.Close() + return n, nil } // CopyFile copies the entire database to file at the given path. @@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + // Check if any pages are double freed. freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } } // Recursively check buckets. @@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) + p, err := tx.db.allocate(tx.meta.txid, count) if err != nil { return nil, err } @@ -460,7 +483,7 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount++ + tx.stats.PageCount += count tx.stats.PageAlloc += count * tx.db.pageSize return p, nil diff --git a/vendor/github.com/coreos/etcd/auth/BUILD b/vendor/github.com/coreos/etcd/auth/BUILD index 7452a66ee29..892b00a396f 100644 --- a/vendor/github.com/coreos/etcd/auth/BUILD +++ b/vendor/github.com/coreos/etcd/auth/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "jwt.go", "range_perm_cache.go", "simple_token.go", "store.go", @@ -14,10 +15,14 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/dgrijalva/jwt-go:go_default_library", "//vendor/golang.org/x/crypto/bcrypt:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/peer:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index c6e2a12a7fa..009ebda70ca 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } var fileDescriptorAuth = []byte{ // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 00000000000..214ae48c83a --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/rsa" + "io/ioutil" + + jwt "github.com/dgrijalva/jwt-go" + "golang.org/x/net/context" +) + +type tokenJWT struct { + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + plog.Warningf("invalid jwt token: %s", token) + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + plog.Warningf("failed to parse jwt token: %s", err) + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + plog.Debugf("failed to sign jwt token: %s", err) + return "", err + } + + plog.Debugf("jwt token: %s", token) + + return token, err +} + +func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + default: + plog.Errorf("unknown token specific option: %s", k) + return "", "", "", ErrInvalidAuthOpts + } + } + + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil +} + +func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + t := &tokenJWT{} + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go index 3cd1ad2a411..691b65ba38e 100644 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -15,93 +15,11 @@ package auth import ( - "bytes" - "sort" - "github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/adt" ) -// isSubset returns true if a is a subset of b. -// If a is a prefix of b, then a is a subset of b. -// Given intervals [a1,a2) and [b1,b2), is -// the a interval a subset of b? -func isSubset(a, b *rangePerm) bool { - switch { - case len(a.end) == 0 && len(b.end) == 0: - // a, b are both keys - return bytes.Equal(a.begin, b.begin) - case len(b.end) == 0: - // b is a key, a is a range - return false - case len(a.end) == 0: - // a is a key, b is a range. need b1 <= a1 and a1 < b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0 - default: - // both are ranges. need b1 <= a1 and a2 <= b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0 - } -} - -func isRangeEqual(a, b *rangePerm) bool { - return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end) -} - -// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms. -// If there are equal ranges, removeSubsetRangePerms only keeps one of them. -// It returns a sorted rangePerm slice. -func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) { - sort.Sort(RangePermSliceByBegin(perms)) - var prev *rangePerm - for i := range perms { - if i == 0 { - prev = perms[i] - newp = append(newp, perms[i]) - continue - } - if isRangeEqual(perms[i], prev) { - continue - } - if isSubset(perms[i], prev) { - continue - } - if isSubset(prev, perms[i]) { - prev = perms[i] - newp[len(newp)-1] = perms[i] - continue - } - prev = perms[i] - newp = append(newp, perms[i]) - } - return newp -} - -// mergeRangePerms merges adjacent rangePerms. -func mergeRangePerms(perms []*rangePerm) []*rangePerm { - var merged []*rangePerm - perms = removeSubsetRangePerms(perms) - - i := 0 - for i < len(perms) { - begin, next := i, i - for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 { - next++ - } - // don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty. - if next != begin && len(perms[next].end) > 0 { - merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end}) - } else { - merged = append(merged, perms[begin]) - if next != begin { - merged = append(merged, perms[next]) - } - } - i = next + 1 - } - - return merged -} - func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { user := getUser(tx, userName) if user == nil { @@ -109,7 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission return nil } - var readPerms, writePerms []*rangePerm + readPerms := &adt.IntervalTree{} + writePerms := &adt.IntervalTree{} for _, roleName := range user.Roles { role := getRole(tx, roleName) @@ -118,48 +37,66 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } for _, perm := range role.KeyPermission { - rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd} + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = perm.RangeEnd + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint(perm.Key) + } switch perm.PermType { case authpb.READWRITE: - readPerms = append(readPerms, rp) - writePerms = append(writePerms, rp) + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) case authpb.READ: - readPerms = append(readPerms, rp) + readPerms.Insert(ivl, struct{}{}) case authpb.WRITE: - writePerms = append(writePerms, rp) + writePerms.Insert(ivl, struct{}{}) } } } return &unifiedRangePermissions{ - readPerms: mergeRangePerms(readPerms), - writePerms: mergeRangePerms(writePerms), + readPerms: readPerms, + writePerms: writePerms, } } -func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - var tocheck []*rangePerm +func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + ivl := adt.NewBytesAffineInterval(key, rangeEnd) switch permtyp { case authpb.READ: - tocheck = cachedPerms.readPerms + return cachedPerms.readPerms.Contains(ivl) case authpb.WRITE: - tocheck = cachedPerms.writePerms + return cachedPerms.writePerms.Contains(ivl) default: plog.Panicf("unknown auth type: %v", permtyp) } + return false +} - requiredPerm := &rangePerm{begin: key, end: rangeEnd} - - for _, perm := range tocheck { - if isSubset(requiredPerm, perm) { - return true - } +func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + plog.Panicf("unknown auth type: %v", permtyp) } - return false } @@ -175,7 +112,11 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key as.rangePermCache[userName] = perms } - return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp) + if len(rangeEnd) == 0 { + return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { @@ -187,35 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) { } type unifiedRangePermissions struct { - // readPerms[i] and readPerms[j] (i != j) don't overlap - readPerms []*rangePerm - // writePerms[i] and writePerms[j] (i != j) don't overlap, too - writePerms []*rangePerm -} - -type rangePerm struct { - begin, end []byte -} - -type RangePermSliceByBegin []*rangePerm - -func (slice RangePermSliceByBegin) Len() int { - return len(slice) -} - -func (slice RangePermSliceByBegin) Less(i, j int) bool { - switch bytes.Compare(slice[i].begin, slice[j].begin) { - case 0: // begin(i) == begin(j) - return bytes.Compare(slice[i].end, slice[j].end) == -1 - - case -1: // begin(i) < begin(j) - return true - - default: - return false - } -} - -func (slice RangePermSliceByBegin) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] + readPerms *adt.IntervalTree + writePerms *adt.IntervalTree } diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go index a39f3927685..94d92a115e2 100644 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -19,10 +19,14 @@ package auth import ( "crypto/rand" + "fmt" "math/big" + "strconv" "strings" "sync" "time" + + "golang.org/x/net/context" ) const ( @@ -90,24 +94,14 @@ func (tm *simpleTokenTTLKeeper) run() { } } -func (as *authStore) enable() { - delf := func(tk string) { - if username, ok := as.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(as.simpleTokens, tk) - } - } - as.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &as.simpleTokensMu, - } - go as.simpleTokenKeeper.run() +type tokenSimple struct { + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username } -func (as *authStore) GenSimpleToken() (string, error) { +func (t *tokenSimple) genTokenPrefix() (string, error) { ret := make([]byte, defaultSimpleTokenLength) for i := 0; i < defaultSimpleTokenLength; i++ { @@ -122,28 +116,105 @@ func (as *authStore) GenSimpleToken() (string, error) { return string(ret), nil } -func (as *authStore) assignSimpleTokenToUser(username, token string) { - as.simpleTokensMu.Lock() - _, ok := as.simpleTokens[token] +func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { + t.simpleTokensMu.Lock() + _, ok := t.simpleTokens[token] if ok { plog.Panicf("token %s is alredy used", token) } - as.simpleTokens[token] = username - as.simpleTokenKeeper.addSimpleToken(token) - as.simpleTokensMu.Unlock() + t.simpleTokens[token] = username + t.simpleTokenKeeper.addSimpleToken(token) + t.simpleTokensMu.Unlock() } -func (as *authStore) invalidateUser(username string) { - if as.simpleTokenKeeper == nil { +func (t *tokenSimple) invalidateUser(username string) { + if t.simpleTokenKeeper == nil { return } - as.simpleTokensMu.Lock() - for token, name := range as.simpleTokens { + t.simpleTokensMu.Lock() + for token, name := range t.simpleTokens { if strings.Compare(name, username) == 0 { - delete(as.simpleTokens, token) - as.simpleTokenKeeper.deleteSimpleToken(token) + delete(t.simpleTokens, token) + t.simpleTokenKeeper.deleteSimpleToken(token) } } - as.simpleTokensMu.Unlock() + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) enable() { + delf := func(tk string) { + if username, ok := t.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(t.simpleTokens, tk) + } + } + t.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &t.simpleTokensMu, + } + go t.simpleTokenKeeper.run() +} + +func (t *tokenSimple) disable() { + t.simpleTokensMu.Lock() + tk := t.simpleTokenKeeper + t.simpleTokenKeeper = nil + t.simpleTokens = make(map[string]string) // invalidate all tokens + t.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } +} + +func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { + if !t.isValidSimpleToken(ctx, token) { + return nil, false + } + t.simpleTokensMu.Lock() + username, ok := t.simpleTokens[token] + if ok && t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.resetSimpleToken(token) + } + t.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: revision}, ok +} + +func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { + // rev isn't used in simple token, it is only used in JWT + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + token := fmt.Sprintf("%s.%d", simpleToken, index) + t.assignSimpleTokenToUser(username, token) + + return token, nil +} + +func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false + } + + select { + case <-t.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): + } + + return false +} + +func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { + return &tokenSimple{ + simpleTokens: make(map[string]string), + indexWaiter: indexWaiter, + } } diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go index 236bb2c529d..3fac7f5a6fd 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -18,11 +18,10 @@ import ( "bytes" "encoding/binary" "errors" - "fmt" "sort" - "strconv" "strings" "sync" + "sync/atomic" "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -30,7 +29,9 @@ import ( "github.com/coreos/pkg/capnslog" "golang.org/x/crypto/bcrypt" "golang.org/x/net/context" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) var ( @@ -60,6 +61,8 @@ var ( ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") ErrAuthOldRevision = errors.New("auth: revision in header is old") ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") // BcryptCost is the algorithm cost / strength for hashing auth passwords BcryptCost = bcrypt.DefaultCost @@ -129,10 +132,6 @@ type AuthStore interface { // RoleList gets a list of all roles RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - // AuthInfoFromToken gets a username from the given Token and current revision number - // (The revision number is used for preventing the TOCTOU problem) - AuthInfoFromToken(token string) (*AuthInfo, bool) - // IsPutPermitted checks put permission of the user IsPutPermitted(authInfo *AuthInfo, key []byte) error @@ -145,8 +144,9 @@ type AuthStore interface { // IsAdminPermitted checks admin permission of the user IsAdminPermitted(authInfo *AuthInfo) error - // GenSimpleToken produces a simple random string - GenSimpleToken() (string, error) + // GenTokenPrefix produces a random string in a case of simple token + // in a case of JWT, it produces an empty string + GenTokenPrefix() (string, error) // Revision gets current revision of authStore Revision() uint64 @@ -159,33 +159,32 @@ type AuthStore interface { // AuthInfoFromCtx gets AuthInfo from gRPC's context AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) + + // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context + AuthInfoFromTLS(ctx context.Context) *AuthInfo +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + + invalidateUser(string) + genTokenPrefix() (string, error) } type authStore struct { + // atomic operations; need 64-bit align, or 32-bit tests will crash + revision uint64 + be backend.Backend enabled bool enabledMu sync.RWMutex rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - revision uint64 - - // tokenSimple in v3.2+ - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func newDeleterFunc(as *authStore) func(string) { - return func(t string) { - as.simpleTokensMu.Lock() - defer as.simpleTokensMu.Unlock() - if username, ok := as.simpleTokens[t]; ok { - plog.Infof("deleting token %s for user %s", t, username) - delete(as.simpleTokens, t) - } - } + tokenProvider TokenProvider } func (as *authStore) AuthEnable() error { @@ -215,11 +214,11 @@ func (as *authStore) AuthEnable() error { tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) as.enabled = true - as.enable() + as.tokenProvider.enable() as.rangePermCache = make(map[string]*unifiedRangePermissions) - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) plog.Noticef("Authentication enabled") @@ -241,15 +240,7 @@ func (as *authStore) AuthDisable() { b.ForceCommit() as.enabled = false - - as.simpleTokensMu.Lock() - tk := as.simpleTokenKeeper - as.simpleTokenKeeper = nil - as.simpleTokens = make(map[string]string) // invalidate all tokens - as.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } + as.tokenProvider.disable() plog.Noticef("Authentication disabled") } @@ -260,10 +251,7 @@ func (as *authStore) Close() error { if !as.enabled { return nil } - if as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.stop() - as.simpleTokenKeeper = nil - } + as.tokenProvider.disable() return nil } @@ -272,10 +260,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthNotEnabled } - // TODO(mitake): after adding jwt support, branching based on values of ctx is required - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -285,14 +269,23 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthFailed } - token := fmt.Sprintf("%s.%d", simpleToken, index) - as.assignSimpleTokenToUser(username, token) + // Password checking is already performed in the API layer, so we don't need to check for now. + // Staleness of password can be detected with OCC in the API layer, too. - plog.Infof("authorized %s, token is %s", username, token) + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } + + plog.Debugf("authorized %s, token is %s", username, token) return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -322,7 +315,7 @@ func (as *authStore) Recover(be backend.Backend) { } } - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) tx.Unlock() @@ -366,6 +359,11 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 { + plog.Errorf("the user root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -380,7 +378,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("deleted a user: %s", r.Name) @@ -416,7 +414,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("changed a password of a user: %s", r.Name) @@ -491,6 +489,11 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be revoked from the user root") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -593,17 +596,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - // TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles - // - // Assume a case like below: - // create a role r1 - // create a user u1 and grant r1 to u1 - // delete r1 - // - // After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1, - // the new r1 is automatically granted u1. - // In some cases, it would be confusing. So we need to provide an option for deleting the grant relation - // from all users. + if as.enabled && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be deleted") + return nil, ErrInvalidAuthMgmt + } tx := as.be.BatchTx() tx.Lock() @@ -616,6 +612,28 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) + users := getAllUsers(tx) + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(string(user.Name)) + } + as.commitRevision(tx) plog.Noticef("deleted role %s", r.Role) @@ -645,15 +663,8 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, return &pb.AuthRoleAddResponse{}, nil } -func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) { - // same as '(t *tokenSimple) info' in v3.2+ - as.simpleTokensMu.Lock() - username, ok := as.simpleTokens[token] - if ok && as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.resetSimpleToken(token) - } - as.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: as.revision}, ok +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) } type permSlice []*authpb.Permission @@ -723,7 +734,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE return ErrUserEmpty } - if revision < as.revision { + if revision < as.Revision() { return ErrAuthOldRevision } @@ -886,7 +897,7 @@ func (as *authStore) isAuthEnabled() bool { return as.enabled } -func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore { +func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { tx := be.BatchTx() tx.Lock() @@ -904,18 +915,17 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) as := &authStore{ be: be, - simpleTokens: make(map[string]string), revision: getRevision(tx), - indexWaiter: indexWaiter, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, } if enabled { - as.enable() + as.tokenProvider.enable() } - if as.revision == 0 { + if as.Revision() == 0 { as.commitRevision(tx) } @@ -935,9 +945,9 @@ func hasRootRole(u *authpb.User) bool { } func (as *authStore) commitRevision(tx backend.BatchTx) { - as.revision++ + atomic.AddUint64(&as.revision, 1) revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.revision) + binary.BigEndian.PutUint64(revBytes, as.Revision()) tx.UnsafePut(authBucketName, revisionKey, revBytes) } @@ -951,31 +961,38 @@ func getRevision(tx backend.BatchTx) uint64 { return binary.BigEndian.Uint64(vs[0]) } -func (as *authStore) Revision() uint64 { - return as.revision +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) } -func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false +func (as *authStore) Revision() uint64 { + return atomic.LoadUint64(&as.revision) +} + +func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil } - select { - case <-as.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + for _, chain := range chains { + cn := chain.Subject.CommonName + plog.Debugf("found common name %s", cn) + + return &AuthInfo{ + Username: cn, + Revision: as.Revision(), + } + } } - return false + return nil } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil } @@ -986,14 +1003,57 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } token := ts[0] - if !as.isValidSimpleToken(token, ctx) { - return nil, ErrInvalidAuthToken - } - - authInfo, uok := as.AuthInfoFromToken(token) + authInfo, uok := as.authInfoFromToken(ctx, token) if !uok { plog.Warningf("invalid auth token: %s", token) return nil, ErrInvalidAuthToken } return authInfo, nil } + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + plog.Errorf("invalid token specific option: %s", optstr) + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil + +} + +func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case "simple": + plog.Warningf("simple token is not cryptographically signed") + return newTokenProviderSimple(indexWaiter), nil + case "jwt": + return newTokenProviderJWT(typeSpecificOpts) + default: + plog.Errorf("unknown token type: %s", tokenType) + return nil, ErrInvalidAuthOpts + } +} diff --git a/vendor/github.com/coreos/etcd/client/BUILD b/vendor/github.com/coreos/etcd/client/BUILD index 16c78ceec37..00a5b08d87f 100644 --- a/vendor/github.com/coreos/etcd/client/BUILD +++ b/vendor/github.com/coreos/etcd/client/BUILD @@ -14,14 +14,15 @@ go_library( "keys.generated.go", "keys.go", "members.go", - "srv.go", "util.go", ], importpath = "github.com/coreos/etcd/client", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index f9131b4725c..19ce2ec01da 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -15,6 +15,7 @@ package client import ( + "encoding/json" "errors" "fmt" "io/ioutil" @@ -27,6 +28,8 @@ import ( "sync" "time" + "github.com/coreos/etcd/version" + "golang.org/x/net/context" ) @@ -201,6 +204,9 @@ type Client interface { // returned SetEndpoints(eps []string) error + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + httpClient } @@ -366,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } - if isOneShot { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { + } else if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response @@ -379,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } - if isOneShot { - return nil, nil, cerr.Errors[0] + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue } - continue + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err } if k != pinned { c.Lock() @@ -477,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration } } +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + type roundTripResponse struct { resp *http.Response err error diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go index bfd7aec93f5..442e35fe543 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -14,8 +14,27 @@ package client +import ( + "github.com/coreos/etcd/pkg/srv" +) + // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa3435921..00000000000 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/BUILD b/vendor/github.com/coreos/etcd/clientv3/BUILD index c4cec492504..a78bb9f2064 100644 --- a/vendor/github.com/coreos/etcd/clientv3/BUILD +++ b/vendor/github.com/coreos/etcd/clientv3/BUILD @@ -4,18 +4,20 @@ go_library( name = "go_default_library", srcs = [ "auth.go", - "balancer.go", "client.go", "cluster.go", "compact_op.go", "compare.go", "config.go", "doc.go", + "grpc_options.go", + "health_balancer.go", "kv.go", "lease.go", "logger.go", "maintenance.go", "op.go", + "ready_wait.go", "retry.go", "sort.go", "txn.go", @@ -28,15 +30,15 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) @@ -49,7 +51,12 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs", + "//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs", + "//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md index 87c32d1a88a..376bfba7614 100644 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -1,6 +1,6 @@ # etcd/clientv3 -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) `etcd/clientv3` is the official Go etcd client for v3. @@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs: ```go ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := kvc.Put(ctx, "sample_key", "sample_value") +resp, err := cli.Put(ctx, "sample_key", "sample_value") cancel() if err != nil { // handle error! @@ -57,7 +57,7 @@ etcd client returns 2 types of errors: Here is the example code to handle client errors: ```go -resp, err := kvc.Put(ctx, "", "") +resp, err := cli.Put(ctx, "", "") if err != nil { switch err { case context.Canceled: @@ -76,6 +76,10 @@ if err != nil { The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). +## Namespacing + +The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + ## Examples More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index b995bce8e3f..a64b8caca89 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -100,68 +101,65 @@ type Auth interface { } type auth struct { - c *Client - - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient + remote pb.AuthClient + callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - conn := c.ActiveConnection() - return &auth{ - conn: c.ActiveConnection(), - remote: pb.NewAuthClient(conn), - c: c, + api := &auth{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts } + return api } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } @@ -171,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran RangeEnd: []byte(rangeEnd), PermType: authpb.Permission_Type(permType), } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } @@ -204,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) { } type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthenticateResponse)(resp), toErr(ctx, err) } @@ -217,14 +216,18 @@ func (auth *authenticator) close() { auth.conn.Close() } -func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { +func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return nil, err } - return &authenticator{ + api := &authenticator{ conn: conn, remote: pb.NewAuthClient(conn), - }, nil + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 0fef9c54934..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoints for grpc - addrs []grpc.Address - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects upEps, pinAddr, and connectingAddr - mu sync.RWMutex - // upEps holds the current endpoints that have an active connection - upEps map[string]struct{} - // upc closes when upEps transitions from empty to non-zero or the balancer closes. - upc chan struct{} - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // intialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - notifyCh <- addrs - sb := &simpleBalancer{ - addrs: addrs, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upEps: make(map[string]struct{}), - upc: make(chan struct{}), - host2ep: getHost2ep(eps), - } - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) getEndpoint(host string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.host2ep[host] -} - -func getHost2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.host2ep) - for k, v := range np { - if b.host2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs - b.notifyCh <- addrs -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Or our simplerBalancer - // might panic since b.upc is closed. - if b.closed { - return func(err error) {} - } - - if len(b.upEps) == 0 { - // notify waiting Get()s and pin first connected address - close(b.upc) - b.pinAddr = addr.Addr - } - b.upEps[addr.Addr] = struct{}{} - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - b.mu.Lock() - delete(b.upEps, addr.Addr) - if len(b.upEps) == 0 && b.pinAddr != "" { - b.upc = make(chan struct{}) - } else if b.pinAddr == addr.Addr { - // choose new random up endpoint - for k := range b.upEps { - b.pinAddr = k - break - } - } - b.mu.Unlock() - } -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var addr string - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed := b.closed - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - - if upEps == 0 { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if addr == "" { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if upEps > 0 { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - return nil - } - b.closed = true - close(b.notifyCh) - // terminate all waiting Get()s - b.pinAddr = "" - if len(b.upEps) == 0 { - close(b.upc) - } - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index 8263890bdff..2bdd928771f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -20,22 +20,25 @@ import ( "fmt" "net" "net/url" + "strconv" "strings" "sync" "time" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) var ( ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") ) // Client provides and manages an etcd v3 client session. @@ -47,22 +50,25 @@ type Client struct { Auth Maintenance - conn *grpc.ClientConn - cfg Config - creds *credentials.TransportCredentials - balancer *simpleBalancer - retryWrapper retryRpcFunc - retryAuthWrapper retryRpcFunc + conn *grpc.ClientConn + dialerrc chan error + + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu *sync.Mutex ctx context.Context cancel context.CancelFunc - // Username is a username for authentication + // Username is a user name for authentication. Username string - // Password is a password for authentication + // Password is a password for authentication. Password string // tokenCred is an instance of WithPerRPCCredentials()'s argument tokenCred *authTokenCredential + + callOpts []grpc.CallOption } // New creates a new etcdv3 client from a given configuration. @@ -74,26 +80,28 @@ func New(cfg Config) (*Client, error) { return newClient(&cfg) } +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + // NewFromURL creates a new etcdv3 client from a URL. func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) } -// NewFromConfigFile creates a new etcdv3 client from a configuration file. -func NewFromConfigFile(path string) (*Client, error) { - cfg, err := configFromFile(path) - if err != nil { - return nil, err - } - return New(*cfg) -} - // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() c.Watcher.Close() c.Lease.Close() - return toErr(c.ctx, c.conn.Close()) + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() } // Ctx is a context for "out of band" messages (e.g., for sending @@ -111,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) + c.mu.Unlock() + c.balancer.updateAddrs(eps...) + + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + c.balancer.mu.RLock() + update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) + c.balancer.mu.RUnlock() + if update { + select { + case c.balancer.updateAddrsC <- notifyNext: + case <-c.balancer.stopc: + } + } } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -139,8 +162,10 @@ func (c *Client) autoSync() { case <-c.ctx.Done(): return case <-time.After(c.cfg.AutoSyncInterval): - ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) - if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { logger.Println("Auto sync endpoints failed:", err) } } @@ -169,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { - return + return proto, host, scheme } scheme = url.Scheme @@ -177,12 +202,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = url.Host switch url.Scheme { case "http", "https": - case "unix": + case "unix", "unixs": proto = "unix" + host = url.Host + url.Path default: proto, host = "", "" } - return + return proto, host, scheme } func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { @@ -191,7 +217,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden case "unix": case "http": creds = nil - case "https": + case "https", "unixs": if creds != nil { break } @@ -201,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden default: creds = nil } - return + return creds } // dialSetupOpts gives the dial opts prior to any authentication @@ -209,10 +235,22 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + if host == "" && endpoint != "" { + // dialing an endpoint not in the balancer; use + // endpoint passed into dial + proto, host, _ = parseEndpoint(endpoint) + } if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } @@ -222,7 +260,14 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts default: } dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) + conn, err := dialer.DialContext(c.ctx, proto, host) + if err != nil { + select { + case c.dialerrc <- err: + default: + } + } + return conn, err } opts = append(opts, grpc.WithDialer(f)) @@ -252,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error { endpoint := c.cfg.Endpoints[i] host := getHost(endpoint) // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) + auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c) if err != nil { continue } @@ -288,21 +333,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo defer cancel() ctx = cctx } - if err := c.getToken(ctx); err != nil { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout - } - return nil, err - } - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + } } - // add metrics options - opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) - opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) + opts = append(opts, c.cfg.DialOptions...) - conn, err := grpc.Dial(host, opts...) + conn, err := grpc.DialContext(c.ctx, host, opts...) if err != nil { return nil, err } @@ -313,7 +360,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) + return metadata.NewOutgoingContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -327,20 +374,50 @@ func newClient(cfg *Config) (*Client, error) { } // use a temporary skeleton client to bootstrap first connection - ctx, cancel := context.WithCancel(context.TODO()) + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, + conn: nil, + dialerrc: make(chan error, 1), + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.Mutex), + callOpts: defaultCallOpts, } if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } - client.balancer = newSimpleBalancer(cfg.Endpoints) + client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { + return grpcHealthCheck(client, ep) + }) + + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() @@ -348,24 +425,27 @@ func newClient(cfg *Config) (*Client, error) { return nil, err } client.conn = conn - client.retryWrapper = client.newRetryWrapper() - client.retryAuthWrapper = client.newAuthRetryWrapper() // wait for a connection if cfg.DialTimeout > 0 { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.readyc: + case <-client.balancer.ready(): hasConn = true case <-ctx.Done(): case <-waitc: } if !hasConn { + err := context.DeadlineExceeded + select { + case err = <-client.dialerrc: + default: + } client.cancel() client.balancer.Close() conn.Close() - return nil, grpc.ErrClientConnTimeout + return nil, err } } @@ -376,10 +456,57 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + go client.autoSync() return client, nil } +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + // ActiveConnection returns the current in-use connection func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } @@ -392,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - code := grpc.Code(err) + ev, _ := status.FromError(err) // Unavailable codes mean the system will be right back. // (e.g., can't connect, lost leader) // Treat Internal codes as if something failed, leaving the // system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? - return code != codes.Unavailable && code != codes.Internal + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -410,7 +537,8 @@ func toErr(ctx context.Context, err error) error { if _, ok := err.(rpctypes.EtcdError); ok { return err } - code := grpc.Code(err) + ev, _ := status.FromError(err) + code := ev.Code() switch code { case codes.DeadlineExceeded: fallthrough @@ -419,9 +547,16 @@ func toErr(ctx context.Context, err error) error { err = ctx.Err() } case codes.Unavailable: - err = ErrNoAvailableEndpoints case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } return err } + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index b9bff626bd7..545d676e7bb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -16,6 +16,7 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -43,60 +44,59 @@ type Cluster interface { } type cluster struct { - remote pb.ClusterClient + remote pb.ClusterClient + callOpts []grpc.CallOption } func NewCluster(c *Client) Cluster { - return &cluster{remote: RetryClusterClient(c)} + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r) - if err == nil { - return (*MemberAddResponse)(resp), nil - } - if isHaltErr(ctx, err) { + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberAddResponse)(resp), nil } func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r) - if err == nil { - return (*MemberRemoveResponse)(resp), nil - } - if isHaltErr(ctx, err) { + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberRemoveResponse)(resp), nil } func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil } + return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil } + return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go index 32d97eb0cc1..41e80c1da5d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} } -// WithCompactPhysical makes compact RPC call wait until -// the compaction is physically applied to the local database -// such that compacted entries are totally removed from the -// backend database. +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index f89ffb52c4a..68a25fd800f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -82,6 +82,24 @@ func ModRevision(key string) Cmp { return Cmp{Key: []byte(key), Target: pb.Compare_MOD} } +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v @@ -91,3 +109,12 @@ func mustInt64(val interface{}) int64 { } panic("bad value") } + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD b/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD new file mode 100644 index 00000000000..4ee0f5650b2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "election.go", + "key.go", + "mutex.go", + "session.go", + "stm.go", + ], + importpath = "github.com/coreos/etcd/clientv3/concurrency", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go new file mode 100644 index 00000000000..dcdbf511d1b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go new file mode 100644 index 00000000000..c092bde0aeb --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -0,0 +1,246 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "errors" + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election. It blocks until +// it is elected, an error occurs, or the context is cancelled. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go new file mode 100644 index 00000000000..9936737756c --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go new file mode 100644 index 00000000000..736a9d3d353 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -0,0 +1,119 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + "sync" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" +) + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + + // wait for deletion revisions prior to myKey + hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if cancelled + select { + case <-ctx.Done(): + m.Unlock(client.Ctx()) + default: + m.hdr = hdr + } + return werr +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go new file mode 100644 index 00000000000..55cb553ea4a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -0,0 +1,142 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "time" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = v3.LeaseID(resp.ID) + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go new file mode 100644 index 00000000000..6bfd70ec428 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -0,0 +1,388 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "math" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index d1d5f40906a..fee12eaf60b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -16,98 +16,60 @@ package clientv3 import ( "crypto/tls" - "crypto/x509" - "io/ioutil" "time" - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/ghodss/yaml" + "golang.org/x/net/context" + "google.golang.org/grpc" ) type Config struct { - // Endpoints is a list of URLs - Endpoints []string + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` // AutoSyncInterval is the interval to update endpoints with its latest members. // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration + AutoSyncInterval time.Duration `json:"auto-sync-interval"` // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int // TLS holds the client secure credentials, if any. TLS *tls.Config - // Username is a username for authentication - Username string + // Username is a user name for authentication. + Username string `json:"username"` - // Password is a password for authentication - Password string -} - -type yamlConfig struct { - Endpoints []string `json:"endpoints"` - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - DialTimeout time.Duration `json:"dial-timeout"` - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` - Certfile string `json:"cert-file"` - Keyfile string `json:"key-file"` - CAfile string `json:"ca-file"` -} - -func configFromFile(fpath string) (*Config, error) { - b, err := ioutil.ReadFile(fpath) - if err != nil { - return nil, err - } - - yc := &yamlConfig{} - - err = yaml.Unmarshal(b, yc) - if err != nil { - return nil, err - } - - cfg := &Config{ - Endpoints: yc.Endpoints, - AutoSyncInterval: yc.AutoSyncInterval, - DialTimeout: yc.DialTimeout, - } - - if yc.InsecureTransport { - cfg.TLS = nil - return cfg, nil - } - - var ( - cert *tls.Certificate - cp *x509.CertPool - ) - - if yc.Certfile != "" && yc.Keyfile != "" { - cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) - if err != nil { - return nil, err - } - } - - if yc.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) - if err != nil { - return nil, err - } - } - - tlscfg := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: yc.InsecureSkipTLSVerify, - RootCAs: cp, - } - if cert != nil { - tlscfg.Certificates = []tls.Certificate{*cert} - } - cfg.TLS = tlscfg - - return cfg, nil + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context } diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index 470ca4dc476..dacc5bb346f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -28,7 +28,7 @@ // Make sure to close the client after using it. If the client is not closed, the // connection will have leaky goroutines. // -// To specify client request timeout, pass context.WithTimeout to APIs: +// To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) // resp, err := kvc.Put(ctx, "sample_key", "sample_value") diff --git a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go new file mode 100644 index 00000000000..592dd6993cf --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + + "google.golang.org/grpc" +) + +var ( + // Disable gRPC internal retrial logic + // TODO: enable when gRPC retry is stable (FailFast=false) + // Reference: + // - https://github.com/grpc/grpc-go/issues/1532 + // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md + defaultFailFast = grpc.FailFast(true) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 00000000000..52bea90e66e --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,627 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "errors" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + minHealthRetryDuration = 3 * time.Second + unknownService = "unknown service grpc.health.v1.Health" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") + +type healthCheckFunc func(ep string) (bool, error) + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// healthBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type healthBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + unhealthyMu sync.RWMutex + unhealthyHostPorts map[string]time.Time + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + hb := &healthBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + healthCheck: hc, + unhealthyHostPorts: make(map[string]time.Time), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + close(hb.downc) + go hb.updateNotifyLoop() + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy() + }() + return hb +} + +func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *healthBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *healthBalancer) endpoint(hostPort string) string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.hostPort2ep[hostPort] +} + +func (b *healthBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func (b *healthBalancer) hostPortError(hostPort string, err error) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) + } + return + } + + b.unhealthyMu.Lock() + b.unhealthyHostPorts[hostPort] = time.Now() + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + } +} + +func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) + } + return + } + + b.unhealthyMu.Lock() + delete(b.unhealthyHostPorts, hostPort) + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + } +} + +func (b *healthBalancer) countUnhealthy() (count int) { + b.unhealthyMu.RLock() + count = len(b.unhealthyHostPorts) + b.unhealthyMu.RUnlock() + return count +} + +func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { + b.unhealthyMu.RLock() + _, unhealthy = b.unhealthyHostPorts[hostPort] + b.unhealthyMu.RUnlock() + return unhealthy +} + +func (b *healthBalancer) cleanupUnhealthy() { + b.unhealthyMu.Lock() + for k, v := range b.unhealthyHostPorts { + if time.Since(v) > b.healthCheckTimeout { + delete(b.unhealthyHostPorts, k) + if logger.V(4) { + logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + } + } + } + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { + unhealthyCnt := b.countUnhealthy() + + b.mu.RLock() + defer b.mu.RUnlock() + + hbAddrs := b.addrs + if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { + liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) + for k := range b.hostPort2ep { + liveHostPorts[k] = struct{}{} + } + return hbAddrs, liveHostPorts + } + + addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) + liveHostPorts := make(map[string]struct{}, len(addrs)) + for _, addr := range b.addrs { + if !b.isUnhealthy(addr.Addr) { + addrs = append(addrs, addr) + liveHostPorts[addr.Addr] = struct{}{} + } + } + return addrs, liveHostPorts +} + +func (b *healthBalancer) updateUnhealthy() { + for { + select { + case <-time.After(b.healthCheckTimeout): + b.cleanupUnhealthy() + pinned := b.pinned() + if pinned == "" || b.isUnhealthy(pinned) { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + return + } + } + case <-b.stopc: + return + } + } +} + +func (b *healthBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.hostPort2ep) + if match { + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + b.unhealthyMu.Lock() + b.unhealthyHostPorts = make(map[string]time.Time) + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func (b *healthBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *healthBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + addrs, hostPorts := b.liveAddrs() + + var waitDown bool + if pinAddr != "" { + _, ok := hostPorts[pinAddr] + waitDown = !ok + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *healthBalancer) Up(addr grpc.Address) func(error) { + if !b.mayPin(addr) { + return func(err error) {} + } + + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {} + } + + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {} + } + + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {} + } + + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + b.hostPortError(addr.Addr, err) + + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + } +} + +func (b *healthBalancer) mayPin(addr grpc.Address) bool { + if b.endpoint(addr.Addr) == "" { // stale host:port + return false + } + + b.unhealthyMu.RLock() + unhealthyCnt := len(b.unhealthyHostPorts) + failedTime, bad := b.unhealthyHostPorts[addr.Addr] + b.unhealthyMu.RUnlock() + + b.mu.RLock() + skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt + b.mu.RUnlock() + if skip || !bad { + return true + } + + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + } + return false + } + + if ok, _ := b.healthCheck(addr.Addr); ok { + b.removeUnhealthy(addr.Addr, "health check success") + return true + } + + b.hostPortError(addr.Addr, errors.New("health check failed")) + return false +} + +func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *healthBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + b.stopOnce.Do(func() { close(b.stopc) }) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + b.wg.Wait() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index c8350f9268b..6289605c8e0 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -16,6 +16,7 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -32,7 +33,7 @@ type KV interface { // Put puts a key-value pair into etcd. // Note that key,value can be plain bytes array and string is // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte(0x10, 0x20)). + // To get a string of bytes, do string([]byte{0x10, 0x20}). Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) // Get retrieves keys. @@ -51,11 +52,6 @@ type KV interface { // Compact compacts etcd KV history before the given rev. Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - // Do applies a single Op on KV without a transaction. - // Do is useful when declaring operations to be issued at a later time - // whereas Get/Put/Delete are for better suited for when the operation - // should be immediately issued at time of declaration. - // Do applies a single Op on KV without a transaction. // Do is useful when creating arbitrary operations to be issued at a // later time; the user can range over the operations, calling Do to @@ -71,22 +67,46 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} type kv struct { - remote pb.KVClient + remote pb.KVClient + callOpts []grpc.CallOption } func NewKV(c *Client) KV { - return &kv{remote: RetryKVClient(c)} + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api } -func NewKVFromKVClient(remote pb.KVClient) KV { - return &kv{remote: remote} +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { @@ -105,7 +125,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -114,54 +134,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C func (kv *kv) Txn(ctx context.Context) Txn { return &txn{ - kv: kv, - ctx: ctx, + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, } } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} - resp, err = kv.remote.Put(ctx, r) + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r) + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } - return OpResponse{}, err + return OpResponse{}, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index 10d3dd0b27f..e74e1d6b549 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -20,8 +20,10 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) type ( @@ -29,7 +31,7 @@ type ( LeaseID int64 ) -// LeaseGrantResponse is used to convert the protobuf grant response. +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. type LeaseGrantResponse struct { *pb.ResponseHeader ID LeaseID @@ -37,14 +39,14 @@ type LeaseGrantResponse struct { Error string } -// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. type LeaseKeepAliveResponse struct { *pb.ResponseHeader ID LeaseID TTL int64 } -// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. type LeaseTimeToLiveResponse struct { *pb.ResponseHeader ID LeaseID `json:"id"` @@ -59,6 +61,12 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `json:"keys"` } +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -67,6 +75,9 @@ const ( leaseResponseChSize = 16 // NoLease is a lease ID for the absence of a lease. NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond ) // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. @@ -97,7 +108,7 @@ type Lease interface { // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - // KeepAliveOnce renews the lease once. In most of the cases, Keepalive + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive // should be used instead of KeepAliveOnce. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) @@ -126,6 +137,11 @@ type lessor struct { // firstKeepAliveTimeout is the timeout for the first keepalive request // before the actual TTL is known to the lease client firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -141,85 +157,65 @@ type keepAlive struct { } func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { l := &lessor{ donec: make(chan struct{}), keepAlives: make(map[LeaseID]*keepAlive), - remote: RetryLeaseClient(c), - firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second, + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } - - l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) - go l.recvKeepAliveLoop() - go l.deadlineLoop() + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) return l } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(cctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(cctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil } + return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -254,19 +250,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl l.mu.Unlock() go l.keepAliveCtxCloser(id, ctx, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) return ch, nil } func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - for { - resp, err := l.keepAliveOnce(cctx, id) + resp, err := l.keepAliveOnce(ctx, id) if err == nil { - if resp.TTL == 0 { + if resp.TTL <= 0 { err = rpctypes.ErrLeaseNotFound } return resp, err @@ -279,6 +275,8 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive func (l *lessor) Close() error { l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) <-l.donec return nil } @@ -315,11 +313,50 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha } } +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -348,32 +385,50 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.Close() + ka.close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() }() - stream, serr := l.resetRecv() - for serr == nil { - resp, err := stream.Recv() + for { + stream, err := l.resetRecv() if err != nil { - if isHaltErr(l.stopCtx, err) { + if canceledByCaller(l.stopCtx, err) { return err } - stream, serr = l.resetRecv() - continue + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() } - l.recvKeepAlive(resp) } - return serr } -// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests +// resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) - if err = toErr(sctx, err); err != nil { + stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) + if err != nil { cancel() return nil, err } @@ -381,7 +436,6 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { l.mu.Lock() defer l.mu.Unlock() if l.stream != nil && l.streamCancel != nil { - l.stream.CloseSend() l.streamCancel() } @@ -411,7 +465,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.Close() + ka.close() return } @@ -441,7 +495,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.Close() + ka.close() delete(l.keepAlives, id) } } @@ -449,19 +503,9 @@ func (l *lessor) deadlineLoop() { } } -// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for { - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - var tosend []LeaseID now := time.Now() @@ -480,29 +524,22 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { return } } + + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } } } -func (ka *keepAlive) Close() { +func (ka *keepAlive) close() { close(ka.donec) for _, ch := range ka.chs { close(ch) } } - -// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done -// should be closed when the work is finished. When done fires, cancelWhenStop will release -// its internal resource. -func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} { - done := make(chan struct{}, 1) - - go func() { - select { - case <-stopc: - case <-done: - } - cancel() - }() - - return done -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 519db45d8e3..012abdbce63 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -16,36 +16,35 @@ package clientv3 import ( "io/ioutil" - "log" "sync" "google.golang.org/grpc/grpclog" ) // Logger is the logger used by client library. -// It implements grpclog.Logger interface. -type Logger grpclog.Logger +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 var ( logger settableLogger ) type settableLogger struct { - l grpclog.Logger + l grpclog.LoggerV2 mu sync.RWMutex } func init() { // disable client side logs by default logger.mu.Lock() - logger.l = log.New(ioutil.Discard, "", 0) + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) // logger has to override the grpclog at initialization so that // any changes to the grpclog go through logger with locking // instead of through SetLogger // // now updates only happen through settableLogger.set - grpclog.SetLogger(&logger) + grpclog.SetLoggerV2(&logger) logger.mu.Unlock() } @@ -62,6 +61,7 @@ func GetLogger() Logger { func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l + grpclog.SetLoggerV2(&logger) s.mu.Unlock() } @@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger { return l } -// implement the grpclog.Logger interface +// implement the grpclog.LoggerV2 interface +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 718356250be..67b928fcfb3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -18,6 +18,7 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -36,7 +37,7 @@ type Maintenance interface { // AlarmDisarm disarms a given alarm. AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - // Defragment defragments storage backend of the etcd member with given endpoint. + // Defragment releases wasted space from internal fragmentation on a given etcd member. // Defragment is only needed when deleting a large number of keys and want to reclaim // the resources. // Defragment is an expensive operation. User should avoid defragmenting multiple members @@ -48,17 +49,45 @@ type Maintenance interface { // Status gets the status of the endpoint. Status(ctx context.Context, endpoint string) (*StatusResponse, error) - // Snapshot provides a reader for a snapshot of a backend. + // Snapshot provides a reader for a point-in-time snapshot of etcd. Snapshot(ctx context.Context) (io.ReadCloser, error) } type maintenance struct { - c *Client - remote pb.MaintenanceClient + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption } func NewMaintenance(c *Client) Maintenance { - return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)} + api := &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.dial(endpoint) + if err != nil { + return nil, nil, err + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api } func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { @@ -67,15 +96,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil } + return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -101,7 +126,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) if err == nil { return (*AlarmResponse)(resp), nil } @@ -109,13 +134,12 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -123,13 +147,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm } func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -137,7 +160,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD b/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD new file mode 100644 index 00000000000..8c8b12d4cf4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "kv.go", + "lease.go", + "util.go", + "watch.go", + ], + importpath = "github.com/coreos/etcd/clientv3/namespace", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go new file mode 100644 index 00000000000..3f883320fcc --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go @@ -0,0 +1,43 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package namespace is a clientv3 wrapper that translates all keys to begin +// with a given prefix. +// +// First, create a client: +// +// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) +// if err != nil { +// // handle error! +// } +// +// Next, override the client interfaces: +// +// unprefixedKV := cli.KV +// cli.KV = namespace.NewKV(cli.KV, "my-prefix/") +// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/") +// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/") +// +// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/": +// +// cli.Put(context.TODO(), "abc", "123") +// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 123 +// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456") +// resp, _ = cli.Get("abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 456 +// +package namespace diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go new file mode 100644 index 00000000000..2b759e0d394 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go @@ -0,0 +1,189 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" +) + +type kvPrefix struct { + clientv3.KV + pfx string +} + +// NewKV wraps a KV instance so that all requests +// are prefixed with a given string. +func NewKV(kv clientv3.KV, prefix string) clientv3.KV { + return &kvPrefix{kv, prefix} +} + +func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + op := kv.prefixOp(clientv3.OpPut(key, val, opts...)) + r, err := kv.KV.Do(ctx, op) + if err != nil { + return nil, err + } + put := r.Put() + kv.unprefixPutResponse(put) + return put, nil +} + +func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...))) + if err != nil { + return nil, err + } + get := r.Get() + kv.unprefixGetResponse(get) + return get, nil +} + +func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...))) + if err != nil { + return nil, err + } + del := r.Del() + kv.unprefixDeleteResponse(del) + return del, nil +} + +func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { + if len(op.KeyBytes()) == 0 { + return clientv3.OpResponse{}, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(op)) + if err != nil { + return r, err + } + switch { + case r.Get() != nil: + kv.unprefixGetResponse(r.Get()) + case r.Put() != nil: + kv.unprefixPutResponse(r.Put()) + case r.Del() != nil: + kv.unprefixDeleteResponse(r.Del()) + } + return r, nil +} + +type txnPrefix struct { + clientv3.Txn + kv *kvPrefix +} + +func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { + return &txnPrefix{kv.KV.Txn(ctx), kv} +} + +func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { + newCmps := make([]clientv3.Cmp, len(cs)) + for i := range cs { + newCmps[i] = cs[i] + pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil) + newCmps[i].WithKeyBytes(pfxKey) + } + txn.Txn = txn.Txn.If(newCmps...) + return txn +} + +func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = txn.kv.prefixOp(ops[i]) + } + txn.Txn = txn.Txn.Then(newOps...) + return txn +} + +func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = txn.kv.prefixOp(ops[i]) + } + txn.Txn = txn.Txn.Else(newOps...) + return txn +} + +func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { + resp, err := txn.Txn.Commit() + if err != nil { + return nil, err + } + txn.kv.unprefixTxnResponse(resp) + return resp, nil +} + +func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { + begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) + op.WithKeyBytes(begin) + op.WithRangeBytes(end) + return op +} + +func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { + for i := range resp.Kvs { + resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) { + if resp.PrevKv != nil { + resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) { + for i := range resp.PrevKvs { + resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { + for _, r := range resp.Responses { + switch tv := r.Response.(type) { + case *pb.ResponseOp_ResponseRange: + if tv.ResponseRange != nil { + kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange)) + } + case *pb.ResponseOp_ResponsePut: + if tv.ResponsePut != nil { + kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut)) + } + case *pb.ResponseOp_ResponseDeleteRange: + if tv.ResponseDeleteRange != nil { + kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) + } + default: + } + } +} + +func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { + return prefixInterval(p.pfx, key, end) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go new file mode 100644 index 00000000000..c3167fa5d87 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go @@ -0,0 +1,58 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "bytes" + + "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +type leasePrefix struct { + clientv3.Lease + pfx []byte +} + +// NewLease wraps a Lease interface to filter for only keys with a prefix +// and remove that prefix when fetching attached keys through TimeToLive. +func NewLease(l clientv3.Lease, prefix string) clientv3.Lease { + return &leasePrefix{l, []byte(prefix)} +} + +func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { + resp, err := l.Lease.TimeToLive(ctx, id, opts...) + if err != nil { + return nil, err + } + if len(resp.Keys) > 0 { + var outKeys [][]byte + for i := range resp.Keys { + if len(resp.Keys[i]) < len(l.pfx) { + // too short + continue + } + if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) { + // doesn't match prefix + continue + } + // strip prefix + outKeys = append(outKeys, resp.Keys[i][len(l.pfx):]) + } + resp.Keys = outKeys + } + return resp, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go new file mode 100644 index 00000000000..ecf04046c32 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go @@ -0,0 +1,42 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) { + pfxKey = make([]byte, len(pfx)+len(key)) + copy(pfxKey[copy(pfxKey, pfx):], key) + + if len(end) == 1 && end[0] == 0 { + // the edge of the keyspace + pfxEnd = make([]byte, len(pfx)) + copy(pfxEnd, pfx) + ok := false + for i := len(pfxEnd) - 1; i >= 0; i-- { + if pfxEnd[i]++; pfxEnd[i] != 0 { + ok = true + break + } + } + if !ok { + // 0xff..ff => 0x00 + pfxEnd = []byte{0} + } + } else if len(end) >= 1 { + pfxEnd = make([]byte, len(pfx)+len(end)) + copy(pfxEnd[copy(pfxEnd, pfx):], end) + } + + return pfxKey, pfxEnd +} diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go new file mode 100644 index 00000000000..9907211529a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go @@ -0,0 +1,84 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "sync" + + "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +type watcherPrefix struct { + clientv3.Watcher + pfx string + + wg sync.WaitGroup + stopc chan struct{} + stopOnce sync.Once +} + +// NewWatcher wraps a Watcher instance so that all Watch requests +// are prefixed with a given string and all Watch responses have +// the prefix removed. +func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { + return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} +} + +func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + // since OpOption is opaque, determine range for prefixing through an OpGet + op := clientv3.OpGet(key, opts...) + end := op.RangeBytes() + pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end) + if pfxEnd != nil { + opts = append(opts, clientv3.WithRange(string(pfxEnd))) + } + + wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...) + + // translate watch events from prefixed to unprefixed + pfxWch := make(chan clientv3.WatchResponse) + w.wg.Add(1) + go func() { + defer func() { + close(pfxWch) + w.wg.Done() + }() + for wr := range wch { + for i := range wr.Events { + wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):] + if wr.Events[i].PrevKv != nil { + wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key + } + } + select { + case pfxWch <- wr: + case <-ctx.Done(): + return + case <-w.stopc: + return + } + } + }() + return pfxWch +} + +func (w *watcherPrefix) Close() error { + err := w.Watcher.Close() + w.stopOnce.Do(func() { close(w.stopc) }) + w.wg.Wait() + return err +} diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/BUILD b/vendor/github.com/coreos/etcd/clientv3/naming/BUILD new file mode 100644 index 00000000000..9329d763bab --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "grpc.go", + ], + importpath = "github.com/coreos/etcd/clientv3/naming", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go new file mode 100644 index 00000000000..71608cc738b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services. +// +// To use, first import the packages: +// +// import ( +// "github.com/coreos/etcd/clientv3" +// etcdnaming "github.com/coreos/etcd/clientv3/naming" +// +// "google.golang.org/grpc" +// "google.golang.org/grpc/naming" +// ) +// +// First, register new endpoint addresses for a service: +// +// func etcdAdd(c *clientv3.Client, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}) +// } +// +// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: +// +// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { +// r := &etcdnaming.GRPCResolver{Client: c} +// b := grpc.RoundRobin(r) +// return grpc.Dial(service, grpc.WithBalancer(b)) +// } +// +// Optionally, force delete an endpoint: +// +// func etcdDelete(c *clientv3, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"}) +// } +// +// Or register an expiring endpoint with a lease: +// +// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid)) +// } +// +package naming diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go new file mode 100644 index 00000000000..7fabc4f109a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go @@ -0,0 +1,132 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package naming + +import ( + "encoding/json" + "fmt" + + etcd "github.com/coreos/etcd/clientv3" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" +) + +var ErrWatcherClosed = fmt.Errorf("naming: watch closed") + +// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes. +type GRPCResolver struct { + // Client is an initialized etcd client. + Client *etcd.Client +} + +func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) { + switch nm.Op { + case naming.Add: + var v []byte + if v, err = json.Marshal(nm); err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + _, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...) + case naming.Delete: + _, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...) + default: + return status.Error(codes.InvalidArgument, "naming: bad naming op") + } + return err +} + +func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) { + ctx, cancel := context.WithCancel(context.Background()) + w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel} + return w, nil +} + +type gRPCWatcher struct { + c *etcd.Client + target string + ctx context.Context + cancel context.CancelFunc + wch etcd.WatchChan + err error +} + +// Next gets the next set of updates from the etcd resolver. +// Calls to Next should be serialized; concurrent calls are not safe since +// there is no way to reconcile the update ordering. +func (gw *gRPCWatcher) Next() ([]*naming.Update, error) { + if gw.wch == nil { + // first Next() returns all addresses + return gw.firstNext() + } + if gw.err != nil { + return nil, gw.err + } + + // process new events on target/* + wr, ok := <-gw.wch + if !ok { + gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error()) + return nil, gw.err + } + if gw.err = wr.Err(); gw.err != nil { + return nil, gw.err + } + + updates := make([]*naming.Update, 0, len(wr.Events)) + for _, e := range wr.Events { + var jupdate naming.Update + var err error + switch e.Type { + case etcd.EventTypePut: + err = json.Unmarshal(e.Kv.Value, &jupdate) + jupdate.Op = naming.Add + case etcd.EventTypeDelete: + err = json.Unmarshal(e.PrevKv.Value, &jupdate) + jupdate.Op = naming.Delete + } + if err == nil { + updates = append(updates, &jupdate) + } + } + return updates, nil +} + +func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) { + // Use serialized request so resolution still works if the target etcd + // server is partitioned away from the quorum. + resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable()) + if gw.err = err; err != nil { + return nil, err + } + + updates := make([]*naming.Update, 0, len(resp.Kvs)) + for _, kv := range resp.Kvs { + var jupdate naming.Update + if err := json.Unmarshal(kv.Value, &jupdate); err != nil { + continue + } + updates = append(updates, &jupdate) + } + + opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()} + gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...) + return updates, nil +} + +func (gw *gRPCWatcher) Close() { gw.cancel() } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 6e260076698..e18d28662c4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -52,6 +53,10 @@ type Op struct { // for watch, put, delete prevKV bool + // for put + ignoreValue bool + ignoreLease bool + // progressNotify is for progress updates. progressNotify bool // createdNotify is for created event @@ -63,8 +68,69 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + func (op Op) toRangeRequest() *pb.RangeRequest { if op.t != tRange { panic("op.t != tRange") @@ -89,12 +155,28 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} @@ -105,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp { } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -170,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -207,6 +306,7 @@ func WithLease(leaseID LeaseID) OpOption { } // WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } // WithRev specifies the store revision for 'Get' request. @@ -222,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption { if target == SortByKey && order == SortAscend { // If order != SortNone, server fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since current mvcc.Range implementation returns results - // sorted by keys in lexicographically ascending order, - // client should ignore SortOrder if the target is SortByKey. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. order = SortNone } op.sort = &SortOption{target, order} @@ -257,6 +357,10 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } op.end = getPrefix(op.key) } } @@ -360,6 +464,24 @@ func WithPrevKV() OpOption { } } +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + // LeaseOp represents an Operation that lease can execute. type LeaseOp struct { id LeaseID @@ -377,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) { } } -// WithAttachedKeys requests lease timetolive API to return -// attached keys of given lease ID. +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. func WithAttachedKeys() LeaseOption { return func(op *LeaseOp) { op.attachedKeys = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 00000000000..23eea9367ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "golang.org/x/net/context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 78f31a8c4b0..c95b2cad7c4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -17,135 +17,183 @@ package clientv3 import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool -func (c *Client) newRetryWrapper() retryRpcFunc { +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + desc := rpctypes.ErrorDesc(err) + return desc != "there is no address available" && desc != "there is no connection available" +} + +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { - return err + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) } - // only retry if unavailable - if grpc.Code(err) != codes.Unavailable { - return err + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } } - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() + if isStop(err) { + return err } } } } -func (c *Client) newAuthRetryWrapper() retryRpcFunc { +func (c *Client) newAuthRetryWrapper() retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } return err // return the original error for simplicity } continue } - return err } } } -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. +// RetryKVClient implements a KVClient. func RetryKVClient(c *Client) pb.KVClient { - retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} - return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} } type retryKVClient struct { - *retryWriteKVClient + *nonRepeatableKVClient + repeatableRetry retryRPCFunc } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) return err }) return resp, err } -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc } -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc + lc pb.LeaseClient + repeatableRetry retryRPCFunc } -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. +// RetryLeaseClient implements a LeaseClient. func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} - return &retryLeaseClient{retry, c.retryAuthWrapper} + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc } -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. +// RetryClusterClient implements a ClusterClient. func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} } -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc } -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. +// RetryAuthClient implements a AuthClient. func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} } -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index a61decd6406..1a80c8ebaab 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -18,13 +18,14 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. // -// Tx.If( +// Txn(context.TODO()).If( // Compare(Value(k1), ">", v1), // Compare(Version(k1), "=", 2) // ).Then( @@ -49,8 +50,6 @@ type Txn interface { // Commit tries to commit the transaction. Commit() (*TxnResponse, error) - - // TODO: add a Do for shortcut the txn without any condition? } type txn struct { @@ -68,6 +67,8 @@ type txn struct { sus []*pb.RequestOp fas []*pb.RequestOp + + callOpts []grpc.CallOption } func (txn *txn) If(cs ...Cmp) Txn { @@ -137,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} -func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, err + return nil, toErr(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 9b083cc9462..16a91fdff40 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,8 +22,12 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const ( @@ -39,10 +43,9 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -65,6 +68,9 @@ type WatchResponse struct { Created bool closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string } // IsCreate returns true if the event tells that the key is newly created. @@ -85,6 +91,9 @@ func (wr *WatchResponse) Err() error { case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } return v3rpc.ErrFutureRev } return nil @@ -97,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool { // watcher implements the Watcher interface type watcher struct { - remote pb.WatchClient + remote pb.WatchClient + callOpts []grpc.CallOption // mu protects the grpc streams map mu sync.RWMutex @@ -108,8 +118,9 @@ type watcher struct { // watchGrpcStream tracks all watch resources attached to a single grpc stream. type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption // ctx controls internal remote.Watch requests ctx context.Context @@ -128,7 +139,7 @@ type watchGrpcStream struct { respc chan *pb.WatchResponse // donec closes to broadcast shutdown donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconn logic + // errc transmits errors from grpc Recv to the watch stream reconnect logic errc chan error // closingc gets the watcherStream of closing watchers closingc chan *watcherStream @@ -180,14 +191,18 @@ type watcherStream struct { } func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) } -func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { - return &watcher{ +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ remote: wc, streams: make(map[string]*watchGrpcStream), } + if c != nil { + w.callOpts = c.callOpts + } + return w } // never closes @@ -206,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { wgs := &watchGrpcStream{ owner: w, remote: w.remote, + callOpts: w.callOpts, ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), + ctxKey: streamKeyFromCtx(inctx), cancel: cancel, substreams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -247,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } ok := false - ctxKey := fmt.Sprintf("%v", ctx) + ctxKey := streamKeyFromCtx(ctx) // find or allocate appropriate grpc watch stream w.mu.Lock() @@ -310,14 +325,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { + if werr := wgs.close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) Close() (err error) { +func (w *watchGrpcStream) close() (err error) { w.cancel() <-w.donec select { @@ -428,7 +443,7 @@ func (w *watchGrpcStream) run() { initReq: *wreq, id: -1, outc: outc, - // unbufffered so resumes won't cause repeat events + // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), } @@ -480,7 +495,7 @@ func (w *watchGrpcStream) run() { req := &pb.WatchRequest{RequestUnion: cr} wc.Send(req) } - // watch client failed to recv; spawn another if possible + // watch client failed on Recv; spawn another if possible case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -520,10 +535,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream { // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -534,6 +545,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { CompactRevision: pbresp.CompactRevision, Created: pbresp.Created, Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false } select { case ws.recvc <- wr: @@ -725,7 +741,11 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str ws.closing = true close(ws.outc) ws.outc = nil - go func() { w.closingc <- ws }() + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() case <-stopc: } }(w.resuming[i]) @@ -737,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str return donec } -// joinSubstream waits for all substream goroutines to complete +// joinSubstreams waits for all substream goroutines to complete. func (w *watchGrpcStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec @@ -749,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() { } } -// openWatchClient retries opening a watchclient until retryConnection fails +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { @@ -760,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return nil, err default: } - if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { break } if isHaltErr(w.ctx, err) { @@ -770,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ StartRevision: wr.rev, @@ -783,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} } + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go index 322a0987011..5cf7b65094a 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -30,7 +30,8 @@ var ( ) const ( - checkCompactionInterval = 5 * time.Minute + checkCompactionInterval = 5 * time.Minute + executeCompactionInterval = time.Hour ) type Compactable interface { @@ -41,6 +42,8 @@ type RevGetter interface { Rev() int64 } +// Periodic compacts the log by purging revisions older than +// the configured retention time. Compaction happens hourly. type Periodic struct { clock clockwork.Clock periodInHour int @@ -85,11 +88,12 @@ func (t *Periodic) Run() { continue } } - if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { + + if clock.Now().Sub(last) < executeCompactionInterval { continue } - rev := t.getRev(t.periodInHour) + rev, remaining := t.getRev(t.periodInHour) if rev < 0 { continue } @@ -97,11 +101,11 @@ func (t *Periodic) Run() { plog.Noticef("Starting auto-compaction at revision %d", rev) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { - t.revs = make([]int64, 0) + t.revs = remaining last = clock.Now() plog.Noticef("Finished auto-compaction at revision %d", rev) } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) plog.Noticef("Retry after %v", checkCompactionInterval) } } @@ -124,10 +128,10 @@ func (t *Periodic) Resume() { t.paused = false } -func (t *Periodic) getRev(h int) int64 { +func (t *Periodic) getRev(h int) (int64, []int64) { i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) if i < 0 { - return -1 + return -1, t.revs } - return t.revs[i] + return t.revs[i], t.revs[i+1:] } diff --git a/vendor/github.com/coreos/etcd/discovery/BUILD b/vendor/github.com/coreos/etcd/discovery/BUILD index 4a437e67af1..496402a6761 100644 --- a/vendor/github.com/coreos/etcd/discovery/BUILD +++ b/vendor/github.com/coreos/etcd/discovery/BUILD @@ -2,10 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "discovery.go", - "srv.go", - ], + srcs = ["discovery.go"], importpath = "github.com/coreos/etcd/discovery", visibility = ["//visibility:public"], deps = [ diff --git a/vendor/github.com/coreos/etcd/discovery/srv.go b/vendor/github.com/coreos/etcd/discovery/srv.go deleted file mode 100644 index c3d20ca9243..00000000000 --- a/vendor/github.com/coreos/etcd/discovery/srv.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr -) - -// SRVGetCluster gets the cluster information via DNS discovery. -// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) -// Also doesn't do any lookups for the token (though it could) -// Also sees each entry as a separate instance. -func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) - return "", "", err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, err := resolveTCPAddr("tcp", host) - if err != nil { - plog.Warningf("couldn't resolve host %s during SRV discovery", host) - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) - if ok && url.Scheme != scheme { - plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - return nil - } - - failCount := 0 - err := updateNodeMap("etcd-server-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) - failCount++ - } - err = updateNodeMap("etcd-server", "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) - failCount++ - } - if failCount == 2 { - plog.Warningf(srvErr[0]) - plog.Warningf(srvErr[1]) - plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") - return "", "", err - } - return strings.Join(stringParts, ","), defaultToken, nil -} diff --git a/vendor/github.com/coreos/etcd/embed/BUILD b/vendor/github.com/coreos/etcd/embed/BUILD new file mode 100644 index 00000000000..286bdae6892 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "doc.go", + "etcd.go", + "serve.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/embed", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cockroachdb/cmux:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/cors:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/debugutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", + "//vendor/github.com/coreos/etcd/wal:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/golang.org/x/net/trace:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go new file mode 100644 index 00000000000..90efb3937d7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/config.go @@ -0,0 +1,464 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/pkg/cors" + "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/srv" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + + "github.com/ghodss/yaml" + "google.golang.org/grpc" +) + +const ( + ClusterStateFlagNew = "new" + ClusterStateFlagExisting = "existing" + + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second + + DefaultListenPeerURLs = "http://localhost:2380" + DefaultListenClientURLs = "http://localhost:2379" + + // maxElectionMs specifies the maximum value of election timeout. + // More details are listed in ../Documentation/tuning.md#time-parameters. + maxElectionMs = 50000 +) + +var ( + ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + + "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") + ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") + + DefaultInitialAdvertisePeerURLs = "http://localhost:2380" + DefaultAdvertiseClientURLs = "http://localhost:2379" + + defaultHostname string + defaultHostStatus error +) + +func init() { + defaultHostname, defaultHostStatus = netutil.GetDefaultHost() +} + +// Config holds the arguments for configuring an etcd server. +type Config struct { + // member + + CorsInfo *cors.CORSInfo + LPUrls, LCUrls []url.URL + Dir string `json:"data-dir"` + WalDir string `json:"wal-dir"` + MaxSnapFiles uint `json:"max-snapshots"` + MaxWalFiles uint `json:"max-wals"` + Name string `json:"name"` + SnapCount uint64 `json:"snapshot-count"` + AutoCompactionRetention int `json:"auto-compaction-retention"` + + // TickMs is the number of milliseconds between heartbeat ticks. + // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). + // make ticks a cluster wide configuration. + TickMs uint `json:"heartbeat-interval"` + ElectionMs uint `json:"election-timeout"` + QuotaBackendBytes int64 `json:"quota-backend-bytes"` + MaxRequestBytes uint `json:"max-request-bytes"` + + // gRPC server options + + // GRPCKeepAliveMinTime is the minimum interval that a client should + // wait before pinging server. When client pings "too fast", server + // sends goaway and closes the connection (errors: too_many_pings, + // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. + // Server expects client pings only when there is any active streams + // (PermitWithoutStream is set false). + GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` + // GRPCKeepAliveInterval is the frequency of server-to-client ping + // to check if a connection is alive. Close a non-responsive connection + // after an additional duration of Timeout. 0 to disable. + GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` + // GRPCKeepAliveTimeout is the additional duration of wait + // before closing a non-responsive connection. 0 to disable. + GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` + + // clustering + + APUrls, ACUrls []url.URL + ClusterState string `json:"initial-cluster-state"` + DNSCluster string `json:"discovery-srv"` + Dproxy string `json:"discovery-proxy"` + Durl string `json:"discovery"` + InitialCluster string `json:"initial-cluster"` + InitialClusterToken string `json:"initial-cluster-token"` + StrictReconfigCheck bool `json:"strict-reconfig-check"` + EnableV2 bool `json:"enable-v2"` + + // security + + ClientTLSInfo transport.TLSInfo + ClientAutoTLS bool + PeerTLSInfo transport.TLSInfo + PeerAutoTLS bool + + // debug + + Debug bool `json:"debug"` + LogPkgLevels string `json:"log-package-levels"` + EnablePprof bool `json:"enable-pprof"` + Metrics string `json:"metrics"` + + // ForceNewCluster starts a new cluster even if previously started; unsafe. + ForceNewCluster bool `json:"force-new-cluster"` + + // UserHandlers is for registering users handlers and only used for + // embedding etcd into other applications. + // The map key is the route path for the handler, and + // you must ensure it can't be conflicted with etcd's. + UserHandlers map[string]http.Handler `json:"-"` + // ServiceRegister is for registering users' gRPC services. A simple usage example: + // cfg := embed.NewConfig() + // cfg.ServerRegister = func(s *grpc.Server) { + // pb.RegisterFooServer(s, &fooServer{}) + // pb.RegisterBarServer(s, &barServer{}) + // } + // embed.StartEtcd(cfg) + ServiceRegister func(*grpc.Server) `json:"-"` + + // auth + + AuthToken string `json:"auth-token"` +} + +// configYAML holds the config suitable for yaml parsing +type configYAML struct { + Config + configJSON +} + +// configJSON has file options that are translated into Config options +type configJSON struct { + LPUrlsJSON string `json:"listen-peer-urls"` + LCUrlsJSON string `json:"listen-client-urls"` + CorsJSON string `json:"cors"` + APUrlsJSON string `json:"initial-advertise-peer-urls"` + ACUrlsJSON string `json:"advertise-client-urls"` + ClientSecurityJSON securityConfig `json:"client-transport-security"` + PeerSecurityJSON securityConfig `json:"peer-transport-security"` +} + +type securityConfig struct { + CAFile string `json:"ca-file"` + CertFile string `json:"cert-file"` + KeyFile string `json:"key-file"` + CertAuth bool `json:"client-cert-auth"` + TrustedCAFile string `json:"trusted-ca-file"` + AutoTLS bool `json:"auto-tls"` +} + +// NewConfig creates a new Config populated with default values. +func NewConfig() *Config { + lpurl, _ := url.Parse(DefaultListenPeerURLs) + apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) + lcurl, _ := url.Parse(DefaultListenClientURLs) + acurl, _ := url.Parse(DefaultAdvertiseClientURLs) + cfg := &Config{ + CorsInfo: &cors.CORSInfo{}, + MaxSnapFiles: DefaultMaxSnapshots, + MaxWalFiles: DefaultMaxWALs, + Name: DefaultName, + SnapCount: etcdserver.DefaultSnapCount, + MaxRequestBytes: DefaultMaxRequestBytes, + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + TickMs: 100, + ElectionMs: 1000, + LPUrls: []url.URL{*lpurl}, + LCUrls: []url.URL{*lcurl}, + APUrls: []url.URL{*apurl}, + ACUrls: []url.URL{*acurl}, + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + StrictReconfigCheck: true, + Metrics: "basic", + EnableV2: true, + AuthToken: "simple", + } + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + return cfg +} + +func ConfigFromFile(path string) (*Config, error) { + cfg := &configYAML{Config: *NewConfig()} + if err := cfg.configFromFile(path); err != nil { + return nil, err + } + return &cfg.Config, nil +} + +func (cfg *configYAML) configFromFile(path string) error { + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + defaultInitialCluster := cfg.InitialCluster + + err = yaml.Unmarshal(b, cfg) + if err != nil { + return err + } + + if cfg.LPUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err) + } + cfg.LPUrls = []url.URL(u) + } + + if cfg.LCUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-client-urls: %v", err) + } + cfg.LCUrls = []url.URL(u) + } + + if cfg.CorsJSON != "" { + if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil { + plog.Panicf("unexpected error setting up cors: %v", err) + } + } + + if cfg.APUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err) + } + cfg.APUrls = []url.URL(u) + } + + if cfg.ACUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err) + } + cfg.ACUrls = []url.URL(u) + } + + // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName + if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = "" + } + if cfg.ClusterState == "" { + cfg.ClusterState = ClusterStateFlagNew + } + + copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { + tls.CAFile = ysc.CAFile + tls.CertFile = ysc.CertFile + tls.KeyFile = ysc.KeyFile + tls.ClientCertAuth = ysc.CertAuth + tls.TrustedCAFile = ysc.TrustedCAFile + } + copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON) + copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) + cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS + cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS + + return cfg.Validate() +} + +func (cfg *Config) Validate() error { + if err := checkBindURLs(cfg.LPUrls); err != nil { + return err + } + if err := checkBindURLs(cfg.LCUrls); err != nil { + return err + } + + // Check if conflicting flags are passed. + nSet := 0 + for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { + if v { + nSet++ + } + } + + if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting { + return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState) + } + + if nSet > 1 { + return ErrConflictBootstrapFlags + } + + if 5*cfg.TickMs > cfg.ElectionMs { + return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs) + } + if cfg.ElectionMs > maxElectionMs { + return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs) + } + + // check this last since proxying in etcdmain may make this OK + if cfg.LCUrls != nil && cfg.ACUrls == nil { + return ErrUnsetAdvertiseClientURLsFlag + } + + return nil +} + +// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. +func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { + token = cfg.InitialClusterToken + switch { + case cfg.Durl != "": + urlsmap = types.URLsMap{} + // If using discovery, generate a temporary cluster based on + // self's advertised peer URLs + urlsmap[cfg.Name] = cfg.APUrls + token = cfg.Durl + case cfg.DNSCluster != "": + clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + return nil, "", cerr + } + for _, s := range clusterStrs { + plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) + } + clusterStr := strings.Join(clusterStrs, ",") + if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { + cfg.PeerTLSInfo.ServerName = cfg.DNSCluster + } + urlsmap, err = types.NewURLsMap(clusterStr) + // only etcd member must belong to the discovered cluster. + // proxy does not need to belong to the discovered cluster. + if which == "etcd" { + if _, ok := urlsmap[cfg.Name]; !ok { + return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) + } + } + default: + // We're statically configured, and cluster has appropriately been set. + urlsmap, err = types.NewURLsMap(cfg.InitialCluster) + } + return urlsmap, token, err +} + +func (cfg Config) InitialClusterFromName(name string) (ret string) { + if len(cfg.APUrls) == 0 { + return "" + } + n := name + if name == "" { + n = DefaultName + } + for i := range cfg.APUrls { + ret = ret + "," + n + "=" + cfg.APUrls[i].String() + } + return ret[1:] +} + +func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } +func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } + +func (cfg Config) defaultPeerHost() bool { + return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs +} + +func (cfg Config) defaultClientHost() bool { + return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs +} + +// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host, +// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. +// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380 +// then the advertise peer host would be updated with machine's default host, +// while keeping the listen URL's port. +// User can work around this by explicitly setting URL with 127.0.0.1. +// It returns the default hostname, if used, and the error, if any, from getting the machine's default host. +// TODO: check whether fields are set instead of whether fields have default value +func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) { + if defaultHostname == "" || defaultHostStatus != nil { + // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + return "", defaultHostStatus + } + + used := false + pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() + if cfg.defaultPeerHost() && pip == "0.0.0.0" { + cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} + used = true + } + // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + + cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() + if cfg.defaultClientHost() && cip == "0.0.0.0" { + cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} + used = true + } + dhost := defaultHostname + if !used { + dhost = "" + } + return dhost, defaultHostStatus +} + +// checkBindURLs returns an error if any URL uses a domain name. +// TODO: return error in 3.2.0 +func checkBindURLs(urls []url.URL) error { + for _, url := range urls { + if url.Scheme == "unix" || url.Scheme == "unixs" { + continue + } + host, _, err := net.SplitHostPort(url.Host) + if err != nil { + return err + } + if host == "localhost" { + // special case for local address + // TODO: support /etc/hosts ? + continue + } + if net.ParseIP(host) == nil { + return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) + } + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/github.com/coreos/etcd/embed/doc.go new file mode 100644 index 00000000000..c555aa58eba --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/doc.go @@ -0,0 +1,45 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package embed provides bindings for embedding an etcd server in a program. + +Launch an embedded etcd server using the configuration defaults: + + import ( + "log" + "time" + + "github.com/coreos/etcd/embed" + ) + + func main() { + cfg := embed.NewConfig() + cfg.Dir = "default.etcd" + e, err := embed.StartEtcd(cfg) + if err != nil { + log.Fatal(err) + } + defer e.Close() + select { + case <-e.Server.ReadyNotify(): + log.Printf("Server is ready!") + case <-time.After(60 * time.Second): + e.Server.Stop() // trigger a shutdown + log.Printf("Server took too long to start!") + } + log.Fatal(<-e.Err()) + } +*/ +package embed diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go new file mode 100644 index 00000000000..2f500f9f13b --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/etcd.go @@ -0,0 +1,509 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "crypto/tls" + "fmt" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "path/filepath" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" + "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/pkg/cors" + "github.com/coreos/etcd/pkg/debugutil" + runtimeutil "github.com/coreos/etcd/pkg/runtime" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/rafthttp" + + "github.com/cockroachdb/cmux" + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") + +const ( + // internal fd usage includes disk usage and transport usage. + // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs + // at most 2 to read/lock/write WALs. One case that it needs to 2 is to + // read all logs after some snapshot index, which locates at the end of + // the second last and the head of the last. For purging, it needs to read + // directory, so it needs 1. For fd monitor, it needs 1. + // For transport, rafthttp builds two long-polling connections and at most + // four temporary connections with each member. There are at most 9 members + // in a cluster, so it should reserve 96. + // For the safety, we set the total reserved number to 150. + reservedInternalFDNum = 150 +) + +// Etcd contains a running etcd server and its listeners. +type Etcd struct { + Peers []*peerListener + Clients []net.Listener + // a map of contexts for the servers that serves client requests. + sctxs map[string]*serveCtx + + Server *etcdserver.EtcdServer + + cfg Config + stopc chan struct{} + errc chan error + + closeOnce sync.Once +} + +type peerListener struct { + net.Listener + serve func() error + close func(context.Context) error +} + +// StartEtcd launches the etcd server and HTTP handlers for client/server communication. +// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait +// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. +func StartEtcd(inCfg *Config) (e *Etcd, err error) { + if err = inCfg.Validate(); err != nil { + return nil, err + } + serving := false + e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} + cfg := &e.cfg + defer func() { + if e == nil || err == nil { + return + } + if !serving { + // errored before starting gRPC server for serveCtx.serversC + for _, sctx := range e.sctxs { + close(sctx.serversC) + } + } + e.Close() + e = nil + }() + + if e.Peers, err = startPeerListeners(cfg); err != nil { + return e, err + } + if e.sctxs, err = startClientListeners(cfg); err != nil { + return e, err + } + for _, sctx := range e.sctxs { + e.Clients = append(e.Clients, sctx.l) + } + + var ( + urlsmap types.URLsMap + token string + ) + + if !isMemberInitialized(cfg) { + urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") + if err != nil { + return e, fmt.Errorf("error setting up initial cluster: %v", err) + } + } + + srvcfg := &etcdserver.ServerConfig{ + Name: cfg.Name, + ClientURLs: cfg.ACUrls, + PeerURLs: cfg.APUrls, + DataDir: cfg.Dir, + DedicatedWALDir: cfg.WalDir, + SnapCount: cfg.SnapCount, + MaxSnapFiles: cfg.MaxSnapFiles, + MaxWALFiles: cfg.MaxWalFiles, + InitialPeerURLsMap: urlsmap, + InitialClusterToken: token, + DiscoveryURL: cfg.Durl, + DiscoveryProxy: cfg.Dproxy, + NewCluster: cfg.IsNewCluster(), + ForceNewCluster: cfg.ForceNewCluster, + PeerTLSInfo: cfg.PeerTLSInfo, + TickMs: cfg.TickMs, + ElectionTicks: cfg.ElectionTicks(), + AutoCompactionRetention: cfg.AutoCompactionRetention, + QuotaBackendBytes: cfg.QuotaBackendBytes, + MaxRequestBytes: cfg.MaxRequestBytes, + StrictReconfigCheck: cfg.StrictReconfigCheck, + ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, + AuthToken: cfg.AuthToken, + Debug: cfg.Debug, + } + + if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { + return e, err + } + + // buffer channel so goroutines on closed connections won't wait forever + e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) + + e.Server.Start() + + if err = e.servePeers(); err != nil { + return e, err + } + if err = e.serveClients(); err != nil { + return e, err + } + + serving = true + return e, nil +} + +// Config returns the current configuration. +func (e *Etcd) Config() Config { + return e.cfg +} + +// Close gracefully shuts down all servers/listeners. +// Client requests will be terminated with request timeout. +// After timeout, enforce remaning requests be closed immediately. +func (e *Etcd) Close() { + e.closeOnce.Do(func() { close(e.stopc) }) + + // close client requests with request timeout + timeout := 2 * time.Second + if e.Server != nil { + timeout = e.Server.Cfg.ReqTimeout() + } + for _, sctx := range e.sctxs { + for ss := range sctx.serversC { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + stopServers(ctx, ss) + cancel() + } + } + + for _, sctx := range e.sctxs { + sctx.cancel() + } + + for i := range e.Clients { + if e.Clients[i] != nil { + e.Clients[i].Close() + } + } + + // close rafthttp transports + if e.Server != nil { + e.Server.Stop() + } + + // close all idle connections in peer handler (wait up to 1-second) + for i := range e.Peers { + if e.Peers[i] != nil && e.Peers[i].close != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + e.Peers[i].close(ctx) + cancel() + } + } +} + +func stopServers(ctx context.Context, ss *servers) { + shutdownNow := func() { + // first, close the http.Server + ss.http.Shutdown(ctx) + // then close grpc.Server; cancels all active RPCs + ss.grpc.Stop() + } + + // do not grpc.Server.GracefulStop with TLS enabled etcd server + // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 + // and https://github.com/coreos/etcd/issues/8916 + if ss.secure { + shutdownNow() + return + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + ss.grpc.GracefulStop() + }() + + // wait until all pending RPCs are finished + select { + case <-ch: + case <-ctx.Done(): + // took too long, manually close open transports + // e.g. watch streams + shutdownNow() + + // concurrent GracefulStop should be interrupted + <-ch + } +} + +func (e *Etcd) Err() <-chan error { return e.errc } + +func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { + if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { + phosts := make([]string, len(cfg.LPUrls)) + for i, u := range cfg.LPUrls { + phosts[i] = u.Host + } + cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) + if err != nil { + plog.Fatalf("could not get certs (%v)", err) + } + } else if cfg.PeerAutoTLS { + plog.Warningf("ignoring peer auto TLS since certs given") + } + + if !cfg.PeerTLSInfo.Empty() { + plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) + } + + peers = make([]*peerListener, len(cfg.LPUrls)) + defer func() { + if err == nil { + return + } + for i := range peers { + if peers[i] != nil && peers[i].close != nil { + plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + peers[i].close(ctx) + cancel() + } + } + }() + + for i, u := range cfg.LPUrls { + if u.Scheme == "http" { + if !cfg.PeerTLSInfo.Empty() { + plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) + } + if cfg.PeerTLSInfo.ClientCertAuth { + plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } + } + peers[i] = &peerListener{close: func(context.Context) error { return nil }} + peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) + if err != nil { + return nil, err + } + // once serve, overwrite with 'http.Server.Shutdown' + peers[i].close = func(context.Context) error { + return peers[i].Listener.Close() + } + plog.Info("listening for peers on ", u.String()) + } + return peers, nil +} + +// configure peer handlers after rafthttp.Transport started +func (e *Etcd) servePeers() (err error) { + ph := etcdhttp.NewPeerHandler(e.Server) + var peerTLScfg *tls.Config + if !e.cfg.PeerTLSInfo.Empty() { + if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { + return err + } + } + + for _, p := range e.Peers { + gs := v3rpc.Server(e.Server, peerTLScfg) + m := cmux.New(p.Listener) + go gs.Serve(m.Match(cmux.HTTP2())) + srv := &http.Server{ + Handler: grpcHandlerFunc(gs, ph), + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + go srv.Serve(m.Match(cmux.Any())) + p.serve = func() error { return m.Serve() } + p.close = func(ctx context.Context) error { + // gracefully shutdown http.Server + // close open listeners, idle connections + // until context cancel or time-out + stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) + return nil + } + } + + // start peer servers in a goroutine + for _, pl := range e.Peers { + go func(l *peerListener) { + e.errHandler(l.serve()) + }(pl) + } + return nil +} + +func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { + if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { + chosts := make([]string, len(cfg.LCUrls)) + for i, u := range cfg.LCUrls { + chosts[i] = u.Host + } + cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) + if err != nil { + plog.Fatalf("could not get certs (%v)", err) + } + } else if cfg.ClientAutoTLS { + plog.Warningf("ignoring client auto TLS since certs given") + } + + if cfg.EnablePprof { + plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) + } + + sctxs = make(map[string]*serveCtx) + for _, u := range cfg.LCUrls { + sctx := newServeCtx() + + if u.Scheme == "http" || u.Scheme == "unix" { + if !cfg.ClientTLSInfo.Empty() { + plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) + } + if cfg.ClientTLSInfo.ClientCertAuth { + plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } + } + if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { + return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) + } + + proto := "tcp" + addr := u.Host + if u.Scheme == "unix" || u.Scheme == "unixs" { + proto = "unix" + addr = u.Host + u.Path + } + + sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" + sctx.insecure = !sctx.secure + if oldctx := sctxs[addr]; oldctx != nil { + oldctx.secure = oldctx.secure || sctx.secure + oldctx.insecure = oldctx.insecure || sctx.insecure + continue + } + + if sctx.l, err = net.Listen(proto, addr); err != nil { + return nil, err + } + // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking + // hosts that disable ipv6. So, use the address given by the user. + sctx.addr = addr + + if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { + if fdLimit <= reservedInternalFDNum { + plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) + } + sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) + } + + if proto == "tcp" { + if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil { + return nil, err + } + } + + plog.Info("listening for client requests on ", u.Host) + defer func() { + if err != nil { + sctx.l.Close() + plog.Info("stopping listening for client requests on ", u.Host) + } + }() + for k := range cfg.UserHandlers { + sctx.userHandlers[k] = cfg.UserHandlers[k] + } + sctx.serviceRegister = cfg.ServiceRegister + if cfg.EnablePprof || cfg.Debug { + sctx.registerPprof() + } + if cfg.Debug { + sctx.registerTrace() + } + sctxs[addr] = sctx + } + return sctxs, nil +} + +func (e *Etcd) serveClients() (err error) { + var ctlscfg *tls.Config + if !e.cfg.ClientTLSInfo.Empty() { + plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) + if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil { + return err + } + } + + if e.cfg.CorsInfo.String() != "" { + plog.Infof("cors = %s", e.cfg.CorsInfo) + } + + // Start a client server goroutine for each listen address + var h http.Handler + if e.Config().EnableV2 { + h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) + } else { + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, e.Server) + h = mux + } + h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) + + gopts := []grpc.ServerOption{} + if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: e.cfg.GRPCKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && + e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: e.cfg.GRPCKeepAliveInterval, + Timeout: e.cfg.GRPCKeepAliveTimeout, + })) + } + + // start client servers in a goroutine + for _, sctx := range e.sctxs { + go func(s *serveCtx) { + e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...)) + }(sctx) + } + return nil +} + +func (e *Etcd) errHandler(err error) { + select { + case <-e.stopc: + return + default: + } + select { + case <-e.stopc: + case e.errc <- err: + } +} diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go new file mode 100644 index 00000000000..b659bf8b7d6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/serve.go @@ -0,0 +1,244 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "crypto/tls" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "strings" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3client" + "github.com/coreos/etcd/etcdserver/api/v3election" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" + "github.com/coreos/etcd/etcdserver/api/v3lock" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" + "github.com/coreos/etcd/pkg/debugutil" + + "github.com/cockroachdb/cmux" + gw "github.com/grpc-ecosystem/grpc-gateway/runtime" + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type serveCtx struct { + l net.Listener + addr string + secure bool + insecure bool + + ctx context.Context + cancel context.CancelFunc + + userHandlers map[string]http.Handler + serviceRegister func(*grpc.Server) + serversC chan *servers +} + +type servers struct { + secure bool + grpc *grpc.Server + http *http.Server +} + +func newServeCtx() *serveCtx { + ctx, cancel := context.WithCancel(context.Background()) + return &serveCtx{ + ctx: ctx, + cancel: cancel, + userHandlers: make(map[string]http.Handler), + serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true + } +} + +// serve accepts incoming connections on the listener l, +// creating a new service goroutine for each. The service goroutines +// read requests and then call handler to reply to them. +func (sctx *serveCtx) serve( + s *etcdserver.EtcdServer, + tlscfg *tls.Config, + handler http.Handler, + errHandler func(error), + gopts ...grpc.ServerOption) error { + logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) + <-s.ReadyNotify() + plog.Info("ready to serve client requests") + + m := cmux.New(sctx.l) + v3c := v3client.New(s) + servElection := v3election.NewElectionServer(v3c) + servLock := v3lock.NewLockServer(v3c) + + if sctx.insecure { + gs := v3rpc.Server(s, nil, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + grpcl := m.Match(cmux.HTTP2()) + go func() { errHandler(gs.Serve(grpcl)) }() + + opts := []grpc.DialOption{grpc.WithInsecure()} + gwmux, err := sctx.registerGateway(opts) + if err != nil { + return err + } + + httpmux := sctx.createMux(gwmux, handler) + + srvhttp := &http.Server{ + Handler: httpmux, + ErrorLog: logger, // do not log user error + } + httpl := m.Match(cmux.HTTP1()) + go func() { errHandler(srvhttp.Serve(httpl)) }() + + sctx.serversC <- &servers{grpc: gs, http: srvhttp} + plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) + } + + if sctx.secure { + gs := v3rpc.Server(s, tlscfg, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + handler = grpcHandlerFunc(gs, handler) + + dtls := tlscfg.Clone() + // trust local server + dtls.InsecureSkipVerify = true + creds := credentials.NewTLS(dtls) + opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} + gwmux, err := sctx.registerGateway(opts) + if err != nil { + return err + } + + tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) + // TODO: add debug flag; enable logging when debug flag is set + httpmux := sctx.createMux(gwmux, handler) + + srv := &http.Server{ + Handler: httpmux, + TLSConfig: tlscfg, + ErrorLog: logger, // do not log user error + } + go func() { errHandler(srv.Serve(tlsl)) }() + + sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} + plog.Infof("serving client requests on %s", sctx.l.Addr().String()) + } + + close(sctx.serversC) + return m.Serve() +} + +// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC +// connections or otherHandler otherwise. Copied from cockroachdb. +func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + if otherHandler == nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + grpcServer.ServeHTTP(w, r) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + grpcServer.ServeHTTP(w, r) + } else { + otherHandler.ServeHTTP(w, r) + } + }) +} + +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error + +func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { + ctx := sctx.ctx + conn, err := grpc.DialContext(ctx, sctx.addr, opts...) + if err != nil { + return nil, err + } + gwmux := gw.NewServeMux() + + handlers := []registerHandlerFunc{ + etcdservergw.RegisterKVHandler, + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, + } + for _, h := range handlers { + if err := h(ctx, gwmux, conn); err != nil { + return nil, err + } + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) + } + }() + + return gwmux, nil +} + +func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { + httpmux := http.NewServeMux() + for path, h := range sctx.userHandlers { + httpmux.Handle(path, h) + } + + httpmux.Handle("/v3alpha/", gwmux) + if handler != nil { + httpmux.Handle("/", handler) + } + return httpmux +} + +func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { + if sctx.userHandlers[s] != nil { + plog.Warningf("path %s already registered by user handler", s) + return + } + sctx.userHandlers[s] = h +} + +func (sctx *serveCtx) registerPprof() { + for p, h := range debugutil.PProfHandlers() { + sctx.registerUserHandler(p, h) + } +} + +func (sctx *serveCtx) registerTrace() { + reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } + sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) + evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } + sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) +} diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/github.com/coreos/etcd/embed/util.go new file mode 100644 index 00000000000..168e031389d --- /dev/null +++ b/vendor/github.com/coreos/etcd/embed/util.go @@ -0,0 +1,30 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "path/filepath" + + "github.com/coreos/etcd/wal" +) + +func isMemberInitialized(cfg *Config) bool { + waldir := cfg.WalDir + if waldir == "" { + waldir = filepath.Join(cfg.Dir, "member", "wal") + } + + return wal.Exist(waldir) +} diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go index 8cf83cc716a..b541a628b87 100644 --- a/vendor/github.com/coreos/etcd/error/error.go +++ b/vendor/github.com/coreos/etcd/error/error.go @@ -154,9 +154,10 @@ func (e Error) StatusCode() int { return status } -func (e Error) WriteTo(w http.ResponseWriter) { +func (e Error) WriteTo(w http.ResponseWriter) error { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - fmt.Fprintln(w, e.toJsonString()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/BUILD b/vendor/github.com/coreos/etcd/etcdserver/BUILD index e05450a1bee..ebbd59bb139 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/BUILD @@ -6,6 +6,7 @@ go_library( "apply.go", "apply_auth.go", "apply_v2.go", + "backend.go", "cluster_util.go", "config.go", "consistent_index.go", @@ -40,7 +41,6 @@ go_library( "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/contention:go_default_library", "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD index ab95a4f3277..5913cf0d91a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD @@ -29,7 +29,11 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:all-srcs", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go index ab8cee7cf89..5e2de58e9a1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -33,11 +33,10 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") // capabilityMaps is a static map of version to capability map. - // the base capabilities is the set of capability 2.0 supports. capabilityMaps = map[string]map[Capability]bool{ - "2.3.0": {AuthCapability: true}, "3.0.0": {AuthCapability: true, V3rpcCapability: true}, "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -48,7 +47,10 @@ var ( ) func init() { - enabledMap = make(map[Capability]bool) + enabledMap = map[Capability]bool{ + AuthCapability: true, + V3rpcCapability: true, + } } // UpdateCapability updates the enabledMap when the cluster version increases. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD new file mode 100644 index 00000000000..323b6e08dc0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "peer.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/etcdhttp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/error:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go new file mode 100644 index 00000000000..283b32dbf95 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go @@ -0,0 +1,186 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "strings" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/version" + "github.com/coreos/pkg/capnslog" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") + mlog = logutil.NewMergeLogger(plog) +) + +const ( + configPath = "/config" + metricsPath = "/metrics" + healthPath = "/health" + varsPath = "/debug/vars" + versionPath = "/version" +) + +// HandleBasic adds handlers to a mux for serving JSON etcd client requests +// that do not access the v2 store. +func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) { + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(configPath+"/local/log", logHandleFunc) + mux.Handle(metricsPath, prometheus.Handler()) + mux.Handle(healthPath, healthHandler(server)) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) +} + +func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + if uint64(server.Leader()) == raft.None { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"health": "true"}`)) + } +} + +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + plog.Panicf("cannot marshal versions to json (%v)", err) + } + w.Write(b) +} + +func logHandleFunc(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "PUT") { + return + } + + in := struct{ Level string }{} + + d := json.NewDecoder(r.Body) + if err := d.Decode(&in); err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + return + } + + logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) + if err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + return + } + + plog.Noticef("globalLogLevel set to %q", logl.String()) + capnslog.SetGlobalLogLevel(logl) + w.WriteHeader(http.StatusNoContent) +} + +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +// WriteError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError +func WriteError(w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *etcdErr.Error: + e.WriteTo(w) + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: + mlog.MergeError(err) + default: + mlog.MergeErrorf("got unexpected response error (%v)", err) + } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go rename to vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go index a1abadba8e7..721bae3c600 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2http +package etcdhttp import ( "encoding/json" @@ -61,7 +61,7 @@ type peerMembersHandler struct { } func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { + if !allowMethod(w, r, "GET") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD index ab856d75cac..680ea8c0f36 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD @@ -9,7 +9,6 @@ go_library( "doc.go", "http.go", "metrics.go", - "peer.go", ], importpath = "github.com/coreos/etcd/etcdserver/api/v2http", visibility = ["//visibility:public"], @@ -17,18 +16,15 @@ go_library( "//vendor/github.com/coreos/etcd/error:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", "//vendor/github.com/coreos/etcd/store:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/jonboulle/clockwork:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go index 038f5417e67..aa1e71ec329 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go @@ -17,7 +17,6 @@ package v2http import ( "encoding/json" "errors" - "expvar" "fmt" "io/ioutil" "net/http" @@ -30,38 +29,36 @@ import ( etcdErr "github.com/coreos/etcd/error" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - deprecatedMachinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" - varsPath = "/debug/vars" - metricsPath = "/metrics" - healthPath = "/health" - versionPath = "/version" - configPath = "/config" + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + machinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" ) // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - sec := auth.NewStore(server, timeout) + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, server) + handleV2(mux, server, timeout) + return requestLogger(mux) +} +func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) { + sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, @@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - dmh := &deprecatedMachinesHandler{ - cluster: server.Cluster(), - } + mah := &machinesHandler{cluster: server.Cluster()} sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - - mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) - mux.Handle(deprecatedMachinesPrefix, dmh) + mux.Handle(machinesPrefix, mah) handleAuth(mux, sech) - - return requestLogger(mux) } type keysHandler struct { @@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -type deprecatedMachinesHandler struct { +type machinesHandler struct { cluster api.Cluster } -func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "HEAD") { return } @@ -234,7 +220,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } now := h.clock.Now() m := membership.NewMember("", req.PeerURLs, "", &now) - err := h.server.AddMember(ctx, *m) + _, err := h.server.AddMember(ctx, *m) switch { case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -255,7 +241,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ok { return } - err := h.server.RemoveMember(ctx, uint64(id)) + _, err := h.server.RemoveMember(ctx, uint64(id)) switch { case err == membership.ErrIDRemoved: writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) @@ -280,7 +266,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ID: id, RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, } - err := h.server.UpdateMember(ctx, m) + _, err := h.server.UpdateMember(ctx, m) switch { case err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { } stats := h.stats.LeaderStats() if stats == nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) return } w.Header().Set("Content-Type", "application/json") w.Write(stats) } -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r.Method, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - // parseKeyRequest converts a received http.Request on keysPrefix to // a server Request, performing validation of supplied fields as appropriate. // If any validation fails, an empty Request and non-nil error is returned. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go index 62c99e19d4a..589c172dbbb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go @@ -20,12 +20,11 @@ import ( "strings" "time" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/pkg/capnslog" ) @@ -39,37 +38,18 @@ var ( mlog = logutil.NewMergeLogger(plog) ) -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError func writeError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - case auth.Error: + if e, ok := err.(auth.Error); ok { herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } + return } + etcdhttp.WriteError(w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD new file mode 100644 index 00000000000..5fc2b9dc11c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "v3client.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3client", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", + "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go new file mode 100644 index 00000000000..310715f5cd7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go @@ -0,0 +1,45 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3client provides clientv3 interfaces from an etcdserver. +// +// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: +// +// import ( +// "context" +// +// "github.com/coreos/etcd/embed" +// "github.com/coreos/etcd/etcdserver/api/v3client" +// ) +// +// ... +// +// // create an embedded EtcdServer from the default configuration +// cfg := embed.NewConfig() +// cfg.Dir = "default.etcd" +// e, err := embed.StartEtcd(cfg) +// if err != nil { +// // handle error! +// } +// +// // wrap the EtcdServer with v3client +// cli := v3client.New(e.Server) +// +// // use like an ordinary clientv3 +// resp, err := cli.Put(context.TODO(), "some-key", "it works!") +// if err != nil { +// // handle error! +// } +// +package v3client diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go new file mode 100644 index 00000000000..c0c07c8d767 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3client + +import ( + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/proxy/grpcproxy/adapter" + + "golang.org/x/net/context" +) + +// New creates a clientv3 client that wraps an in-process EtcdServer. Instead +// of making gRPC calls through sockets, the client makes direct function calls +// to the etcd server through its api/v3rpc function interfaces. +func New(s *etcdserver.EtcdServer) *clientv3.Client { + c := clientv3.NewCtxClient(context.Background()) + + kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) + c.KV = clientv3.NewKVFromKVClient(kvc, c) + + lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) + c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second) + + wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) + c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)} + + mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) + c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c) + + clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) + c.Cluster = clientv3.NewClusterFromClusterClient(clc, c) + + // TODO: implement clientv3.Auth interface? + + return c +} + +// BlankContext implements Stringer on a context so the ctx string doesn't +// depend on the context's WithValue data, which tends to be unsynchronized +// (e.g., x/net/trace), causing ctx.String() to throw data races. +type blankContext struct{ context.Context } + +func (*blankContext) String() string { return "(blankCtx)" } + +// watchWrapper wraps clientv3 watch calls to blank out the context +// to avoid races on trace data. +type watchWrapper struct{ clientv3.Watcher } + +func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + return ww.Watcher.Watch(&blankContext{ctx}, key, opts...) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD new file mode 100644 index 00000000000..55965b382df --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "election.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go new file mode 100644 index 00000000000..d6fefd74150 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3election provides a v3 election service from an etcdserver. +package v3election diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go new file mode 100644 index 00000000000..f9061c07926 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go @@ -0,0 +1,123 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3election + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionServer struct { + c *clientv3.Client +} + +func NewElectionServer(c *clientv3.Client) epb.ElectionServer { + return &electionServer{c} +} + +func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { + s, err := es.session(ctx, req.Lease) + if err != nil { + return nil, err + } + e := concurrency.NewElection(s, string(req.Name)) + if err = e.Campaign(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.CampaignResponse{ + Header: e.Header(), + Leader: &epb.LeaderKey{ + Name: req.Name, + Key: []byte(e.Key()), + Rev: e.Rev(), + Lease: int64(s.Lease()), + }, + }, nil +} + +func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Proclaim(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.ProclaimResponse{Header: e.Header()}, nil +} + +func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { + s, err := es.session(stream.Context(), -1) + if err != nil { + return err + } + e := concurrency.NewElection(s, string(req.Name)) + ch := e.Observe(stream.Context()) + for stream.Context().Err() == nil { + select { + case <-stream.Context().Done(): + case resp, ok := <-ch: + if !ok { + return nil + } + lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} + if err := stream.Send(lresp); err != nil { + return err + } + } + } + return stream.Context().Err() +} + +func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { + s, err := es.session(ctx, -1) + if err != nil { + return nil, err + } + l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) + if lerr != nil { + return nil, lerr + } + return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil +} + +func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Resign(ctx); err != nil { + return nil, err + } + return &epb.ResignResponse{Header: e.Header()}, nil +} + +func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { + s, err := concurrency.NewSession( + es.c, + concurrency.WithLease(clientv3.LeaseID(lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + return s, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD new file mode 100644 index 00000000000..e46a6a322e4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["v3election.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["v3election.pb.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD new file mode 100644 index 00000000000..41b80256081 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["v3election.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go new file mode 100644 index 00000000000..ac00cbea983 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3election/v3electionpb/v3election.proto + +/* +Package v3electionpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.CampaignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ProclaimRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.Observe(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ResignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterElectionHandler(ctx, mux, conn) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ElectionClient" to call the correct interceptors. +func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { + + mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, "")) + + pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, "")) + + pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, "")) + + pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, "")) + + pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, "")) +) + +var ( + forward_Election_Campaign_0 = runtime.ForwardResponseMessage + + forward_Election_Proclaim_0 = runtime.ForwardResponseMessage + + forward_Election_Leader_0 = runtime.ForwardResponseMessage + + forward_Election_Observe_0 = runtime.ForwardResponseStream + + forward_Election_Resign_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go new file mode 100644 index 00000000000..92acb1469e9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -0,0 +1,2098 @@ +// Code generated by protoc-gen-gogo. +// source: v3election.proto +// DO NOT EDIT! + +/* + Package v3electionpb is a generated protocol buffer package. + + It is generated from these files: + v3election.proto + + It has these top-level messages: + CampaignRequest + CampaignResponse + LeaderKey + LeaderRequest + LeaderResponse + ResignRequest + ResignResponse + ProclaimRequest + ProclaimResponse +*/ +package v3electionpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CampaignRequest struct { + // name is the election's identifier for the campaign. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` + // value is the initial proclaimed value set when the campaigner wins the + // election. + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } +func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } +func (*CampaignRequest) ProtoMessage() {} +func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } + +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type CampaignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // leader describes the resources used for holding leadereship of the election. + Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` +} + +func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } +func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } +func (*CampaignResponse) ProtoMessage() {} +func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} } + +func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *CampaignResponse) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type LeaderKey struct { + // name is the election identifier that correponds to the leadership key. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` + // lease is the lease ID of the election leader. + Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LeaderKey) Reset() { *m = LeaderKey{} } +func (m *LeaderKey) String() string { return proto.CompactTextString(m) } +func (*LeaderKey) ProtoMessage() {} +func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } + +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LeaderRequest struct { + // name is the election identifier for the leadership information. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } +func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } +func (*LeaderRequest) ProtoMessage() {} +func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } + +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type LeaderResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kv is the key-value pair representing the latest leader update. + Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` +} + +func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } +func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } +func (*LeaderResponse) ProtoMessage() {} +func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} } + +func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { + if m != nil { + return m.Kv + } + return nil +} + +type ResignRequest struct { + // leader is the leadership to relinquish by resignation. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` +} + +func (m *ResignRequest) Reset() { *m = ResignRequest{} } +func (m *ResignRequest) String() string { return proto.CompactTextString(m) } +func (*ResignRequest) ProtoMessage() {} +func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} } + +func (m *ResignRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type ResignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ResignResponse) Reset() { *m = ResignResponse{} } +func (m *ResignResponse) String() string { return proto.CompactTextString(m) } +func (*ResignResponse) ProtoMessage() {} +func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} } + +func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type ProclaimRequest struct { + // leader is the leadership hold on the election. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` + // value is an update meant to overwrite the leader's current value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } +func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } +func (*ProclaimRequest) ProtoMessage() {} +func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} } + +func (m *ProclaimRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type ProclaimResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } +func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } +func (*ProclaimResponse) ProtoMessage() {} +func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} } + +func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") + proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") + proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") + proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") + proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") + proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") + proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") + proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") + proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Election service + +type ElectionClient interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) +} + +type electionClient struct { + cc *grpc.ClientConn +} + +func NewElectionClient(cc *grpc.ClientConn) ElectionClient { + return &electionClient{cc} +} + +func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { + out := new(CampaignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { + out := new(ProclaimResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { + out := new(LeaderResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...) + if err != nil { + return nil, err + } + x := &electionObserveClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Election_ObserveClient interface { + Recv() (*LeaderResponse, error) + grpc.ClientStream +} + +type electionObserveClient struct { + grpc.ClientStream +} + +func (x *electionObserveClient) Recv() (*LeaderResponse, error) { + m := new(LeaderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { + out := new(ResignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Election service + +type ElectionServer interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(*LeaderRequest, Election_ObserveServer) error + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(context.Context, *ResignRequest) (*ResignResponse, error) +} + +func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { + s.RegisterService(&_Election_serviceDesc, srv) +} + +func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CampaignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Campaign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Campaign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProclaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Proclaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Proclaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Leader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Leader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LeaderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) +} + +type Election_ObserveServer interface { + Send(*LeaderResponse) error + grpc.ServerStream +} + +type electionObserveServer struct { + grpc.ServerStream +} + +func (x *electionObserveServer) Send(m *LeaderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Resign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Resign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Election_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3electionpb.Election", + HandlerType: (*ElectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Campaign", + Handler: _Election_Campaign_Handler, + }, + { + MethodName: "Proclaim", + Handler: _Election_Proclaim_Handler, + }, + { + MethodName: "Leader", + Handler: _Election_Leader_Handler, + }, + { + MethodName: "Resign", + Handler: _Election_Resign_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Observe", + Handler: _Election_Observe_Handler, + ServerStreams: true, + }, + }, + Metadata: "v3election.proto", +} + +func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Leader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n2, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaderKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Rev != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) + } + if m.Lease != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n3, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size())) + n4, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *ResignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n5, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *ResignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n6, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n7, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n8, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func encodeFixed64V3Election(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Election(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CampaignRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *CampaignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderKey) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Rev != 0 { + n += 1 + sovV3Election(uint64(m.Rev)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + return n +} + +func (m *LeaderRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func sovV3Election(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Election(x uint64) (n int) { + return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CampaignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CampaignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType) + } + m.Rev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rev |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &mvccpb.KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Election(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Election + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Election(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) } + +var fileDescriptorV3Election = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0x89, 0x34, 0xa4, 0x26, 0xda, 0x02, 0xaa, + 0x72, 0xf0, 0xa2, 0x86, 0x53, 0x4e, 0x08, 0x04, 0xaa, 0x54, 0x24, 0xc0, 0x07, 0x04, 0xc7, 0x8d, + 0x3b, 0x4a, 0xa2, 0x38, 0xde, 0xc5, 0x4e, 0x2d, 0xe5, 0xca, 0x2f, 0x70, 0xe1, 0x33, 0xf8, 0x0c, + 0x8e, 0x48, 0xfc, 0x00, 0x0a, 0x7c, 0x08, 0xda, 0x5d, 0x1b, 0x3b, 0x6e, 0x88, 0x50, 0x73, 0xb1, + 0xc6, 0x33, 0xcf, 0xf3, 0xe6, 0xbd, 0x9d, 0x35, 0xd8, 0x69, 0x1f, 0x43, 0x0c, 0xe6, 0x13, 0x11, + 0x79, 0x32, 0x16, 0x73, 0xe1, 0x34, 0x8b, 0x8c, 0x1c, 0xb6, 0x0f, 0x46, 0x62, 0x24, 0x74, 0x81, + 0xa9, 0xc8, 0x60, 0xda, 0x8f, 0x70, 0x1e, 0x5c, 0x30, 0xf5, 0x48, 0x30, 0x4e, 0x31, 0x2e, 0x85, + 0x72, 0xc8, 0x62, 0x19, 0x64, 0xb8, 0x43, 0x8d, 0x9b, 0xa5, 0x41, 0xa0, 0x1f, 0x72, 0xc8, 0xa6, + 0x69, 0x56, 0xea, 0x8c, 0x84, 0x18, 0x85, 0xc8, 0xb8, 0x9c, 0x30, 0x1e, 0x45, 0x62, 0xce, 0x15, + 0x63, 0x62, 0xaa, 0xf4, 0x2d, 0xec, 0x3f, 0xe7, 0x33, 0xc9, 0x27, 0xa3, 0xc8, 0xc7, 0x8f, 0x97, + 0x98, 0xcc, 0x1d, 0x07, 0xea, 0x11, 0x9f, 0x61, 0x8b, 0x74, 0xc9, 0x49, 0xd3, 0xd7, 0xb1, 0x73, + 0x00, 0x37, 0x43, 0xe4, 0x09, 0xb6, 0xac, 0x2e, 0x39, 0xa9, 0xf9, 0xe6, 0x45, 0x65, 0x53, 0x1e, + 0x5e, 0x62, 0xab, 0xa6, 0xa1, 0xe6, 0x85, 0x2e, 0xc0, 0x2e, 0x5a, 0x26, 0x52, 0x44, 0x09, 0x3a, + 0x4f, 0xa0, 0x31, 0x46, 0x7e, 0x81, 0xb1, 0xee, 0x7a, 0xe7, 0xb4, 0xe3, 0x95, 0x85, 0x78, 0x39, + 0xee, 0x4c, 0x63, 0xfc, 0x0c, 0xeb, 0x30, 0x68, 0x84, 0xe6, 0x2b, 0x4b, 0x7f, 0x75, 0xd7, 0x2b, + 0x5b, 0xe6, 0xbd, 0xd2, 0xb5, 0x73, 0x5c, 0xf8, 0x19, 0x8c, 0x7e, 0x80, 0xdb, 0x7f, 0x93, 0x6b, + 0x75, 0xd8, 0x50, 0x9b, 0xe2, 0x42, 0xb7, 0x6b, 0xfa, 0x2a, 0x54, 0x99, 0x18, 0x53, 0xad, 0xa0, + 0xe6, 0xab, 0xb0, 0xd0, 0x5a, 0x2f, 0x69, 0xa5, 0xc7, 0xb0, 0x6b, 0x5a, 0x6f, 0xb0, 0x89, 0x8e, + 0x61, 0x2f, 0x07, 0x6d, 0x25, 0xbc, 0x0b, 0xd6, 0x34, 0xcd, 0x44, 0xdb, 0x9e, 0x39, 0x51, 0xef, + 0x1c, 0x17, 0xef, 0x94, 0xc1, 0xbe, 0x35, 0x4d, 0xe9, 0x53, 0xd8, 0xf5, 0x31, 0x29, 0x9d, 0x5a, + 0xe1, 0x15, 0xf9, 0x3f, 0xaf, 0x5e, 0xc2, 0x5e, 0xde, 0x61, 0x9b, 0x59, 0xe9, 0x7b, 0xd8, 0x7f, + 0x13, 0x8b, 0x20, 0xe4, 0x93, 0xd9, 0x75, 0x67, 0x29, 0x16, 0xc9, 0x2a, 0x2f, 0xd2, 0x19, 0xd8, + 0x45, 0xe7, 0x6d, 0x66, 0x3c, 0xfd, 0x5a, 0x87, 0x9d, 0x17, 0xd9, 0x00, 0x8e, 0x84, 0x9d, 0x7c, + 0x3f, 0x9d, 0xa3, 0xd5, 0xc9, 0x2a, 0x57, 0xa1, 0xed, 0xfe, 0xab, 0x6c, 0x58, 0xe8, 0xc3, 0x4f, + 0x3f, 0x7e, 0x7f, 0xb6, 0xee, 0xd3, 0x36, 0x4b, 0xfb, 0x3c, 0x94, 0x63, 0xce, 0x72, 0x34, 0x0b, + 0x32, 0xec, 0x80, 0xf4, 0x14, 0x63, 0x2e, 0xa4, 0xca, 0x58, 0xb1, 0xae, 0xca, 0x58, 0xd5, 0xbf, + 0x89, 0x51, 0x66, 0x58, 0xc5, 0x38, 0x86, 0x86, 0x71, 0xd9, 0xb9, 0xb7, 0xce, 0xfb, 0x9c, 0xad, + 0xb3, 0xbe, 0x98, 0x71, 0x1d, 0x6b, 0xae, 0x23, 0xda, 0xba, 0xca, 0x65, 0xce, 0x4d, 0x31, 0x85, + 0x70, 0xeb, 0xf5, 0x50, 0xfb, 0xbf, 0x0d, 0xd5, 0x03, 0x4d, 0xe5, 0xd2, 0xc3, 0xab, 0x54, 0xc2, + 0x74, 0x1f, 0x90, 0xde, 0x63, 0xa2, 0x74, 0x99, 0xa5, 0xad, 0x92, 0xad, 0x5c, 0x86, 0x2a, 0xd9, + 0xea, 0x9e, 0x6f, 0xd2, 0x15, 0x6b, 0xe4, 0x80, 0xf4, 0x9e, 0xd9, 0xdf, 0x96, 0x2e, 0xf9, 0xbe, + 0x74, 0xc9, 0xcf, 0xa5, 0x4b, 0xbe, 0xfc, 0x72, 0x6f, 0x0c, 0x1b, 0xfa, 0x8f, 0xd9, 0xff, 0x13, + 0x00, 0x00, 0xff, 0xff, 0xfc, 0x4d, 0x5a, 0x40, 0xca, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto new file mode 100644 index 00000000000..ebf6c88f7fa --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto @@ -0,0 +1,119 @@ +syntax = "proto3"; +package v3electionpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The election service exposes client-side election facilities as a gRPC interface. +service Election { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + rpc Campaign(CampaignRequest) returns (CampaignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/campaign" + body: "*" + }; + } + // Proclaim updates the leader's posted value with a new value. + rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) { + option (google.api.http) = { + post: "/v3alpha/election/proclaim" + body: "*" + }; + } + // Leader returns the current election proclamation, if any. + rpc Leader(LeaderRequest) returns (LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/leader" + body: "*" + }; + } + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + rpc Observe(LeaderRequest) returns (stream LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/observe" + body: "*" + }; + } + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + rpc Resign(ResignRequest) returns (ResignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/resign" + body: "*" + }; + } +} + +message CampaignRequest { + // name is the election's identifier for the campaign. + bytes name = 1; + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + int64 lease = 2; + // value is the initial proclaimed value set when the campaigner wins the + // election. + bytes value = 3; +} + +message CampaignResponse { + etcdserverpb.ResponseHeader header = 1; + // leader describes the resources used for holding leadereship of the election. + LeaderKey leader = 2; +} + +message LeaderKey { + // name is the election identifier that correponds to the leadership key. + bytes name = 1; + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + bytes key = 2; + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + int64 rev = 3; + // lease is the lease ID of the election leader. + int64 lease = 4; +} + +message LeaderRequest { + // name is the election identifier for the leadership information. + bytes name = 1; +} + +message LeaderResponse { + etcdserverpb.ResponseHeader header = 1; + // kv is the key-value pair representing the latest leader update. + mvccpb.KeyValue kv = 2; +} + +message ResignRequest { + // leader is the leadership to relinquish by resignation. + LeaderKey leader = 1; +} + +message ResignResponse { + etcdserverpb.ResponseHeader header = 1; +} + +message ProclaimRequest { + // leader is the leadership hold on the election. + LeaderKey leader = 1; + // value is an update meant to overwrite the leader's current value. + bytes value = 2; +} + +message ProclaimResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD new file mode 100644 index 00000000000..a528567d38f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "lock.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go new file mode 100644 index 00000000000..e0a1008abc9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3lock provides a v3 locking service from an etcdserver. +package v3lock diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go new file mode 100644 index 00000000000..66465bf13f6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3lock + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockServer struct { + c *clientv3.Client +} + +func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { + return &lockServer{c} +} + +func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + s, err := concurrency.NewSession( + ls.c, + concurrency.WithLease(clientv3.LeaseID(req.Lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + m := concurrency.NewMutex(s, string(req.Name)) + if err = m.Lock(ctx); err != nil { + return nil, err + } + return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil +} + +func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + resp, err := ls.c.Delete(ctx, string(req.Key)) + if err != nil { + return nil, err + } + return &v3lockpb.UnlockResponse{Header: resp.Header}, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD new file mode 100644 index 00000000000..abe9cd04aeb --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["v3lock.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["v3lock.pb.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD new file mode 100644 index 00000000000..9d54f77260e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["v3lock.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go new file mode 100644 index 00000000000..5aef4756dfe --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto + +/* +Package v3lockpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.LockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.UnlockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLockHandler(ctx, mux, conn) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn)) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LockClient" to call the correct interceptors. +func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { + + mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, "")) + + pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, "")) +) + +var ( + forward_Lock_Lock_0 = runtime.ForwardResponseMessage + + forward_Lock_Unlock_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go new file mode 100644 index 00000000000..dcf2bad4019 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -0,0 +1,978 @@ +// Code generated by protoc-gen-gogo. +// source: v3lock.proto +// DO NOT EDIT! + +/* + Package v3lockpb is a generated protocol buffer package. + + It is generated from these files: + v3lock.proto + + It has these top-level messages: + LockRequest + LockResponse + UnlockRequest + UnlockResponse +*/ +package v3lockpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LockRequest struct { + // name is the identifier for the distributed shared lock to be acquired. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LockRequest) Reset() { *m = LockRequest{} } +func (m *LockRequest) String() string { return proto.CompactTextString(m) } +func (*LockRequest) ProtoMessage() {} +func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } + +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *LockResponse) Reset() { *m = LockResponse{} } +func (m *LockResponse) String() string { return proto.CompactTextString(m) } +func (*LockResponse) ProtoMessage() {} +func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} } + +func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockRequest struct { + // key is the lock ownership key granted by Lock. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } +func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockRequest) ProtoMessage() {} +func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } + +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } +func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockResponse) ProtoMessage() {} +func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} } + +func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") + proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") + proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") + proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Lock service + +type LockClient interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) +} + +type lockClient struct { + cc *grpc.ClientConn +} + +func NewLockClient(cc *grpc.ClientConn) LockClient { + return &lockClient{cc} +} + +func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { + out := new(LockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { + out := new(UnlockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lock service + +type LockServer interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(context.Context, *LockRequest) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) +} + +func RegisterLockServer(s *grpc.Server, srv LockServer) { + s.RegisterService(&_Lock_serviceDesc, srv) +} + +func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Lock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Lock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Lock(ctx, req.(*LockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Unlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Unlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lock_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3lockpb.Lock", + HandlerType: (*LockServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lock", + Handler: _Lock_Lock_Handler, + }, + { + MethodName: "Unlock", + Handler: _Lock_Unlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v3lock.proto", +} + +func (m *LockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Lock(uint64(m.Lease)) + } + return n +} + +func (m *LockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func sovV3Lock(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Lock(x uint64) (n int) { + return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Lock(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Lock + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Lock(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) } + +var fileDescriptorV3Lock = []byte{ + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, + 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, + 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, + 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, + 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, + 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, + 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, + 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, + 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, + 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, + 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, + 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, + 0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, + 0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39, + 0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2, + 0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36, + 0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, + 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, + 0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto new file mode 100644 index 00000000000..3e92a6ec277 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; +package v3lockpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The lock service exposes client-side locking facilities as a gRPC interface. +service Lock { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + rpc Lock(LockRequest) returns (LockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/lock" + body: "*" + }; + } + + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + rpc Unlock(UnlockRequest) returns (UnlockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/unlock" + body: "*" + }; + } +} + +message LockRequest { + // name is the identifier for the distributed shared lock to be acquired. + bytes name = 1; + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + int64 lease = 2; +} + +message LockResponse { + etcdserverpb.ResponseHeader header = 1; + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + bytes key = 2; +} + +message UnlockRequest { + // key is the lock ownership key granted by Lock. + bytes key = 1; +} + +message UnlockResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index 88174e3bac2..5333491a2e2 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,6 +16,10 @@ package v3rpc import ( "crypto/tls" + "io/ioutil" + "math" + "os" + "sync" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -24,11 +28,16 @@ import ( "google.golang.org/grpc/grpclog" ) -func init() { - grpclog.SetLogger(plog) -} +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 + maxSendBytes = math.MaxInt32 +) -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { +// integration tests call this multiple times, which is racey in gRPC side +var grpclogOnce sync.Once + +func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { @@ -36,8 +45,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) + opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) + grpcServer := grpc.NewServer(append(opts, gopts...)...) - grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) @@ -45,5 +57,15 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) + grpclogOnce.Do(func() { + if s.Cfg.Debug { + grpc.EnableTracing = true + // enable info, warning, error + grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) + } else { + // only discard info + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + } + }) return grpcServer } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index 29aef2914a5..de9470a8905 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ss.Context()) + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go index 6ea7bbacde0..d0220e03a26 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -134,6 +134,12 @@ func checkPutRequest(r *pb.PutRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } + if r.IgnoreValue && len(r.Value) != 0 { + return rpctypes.ErrGRPCValueProvided + } + if r.IgnoreLease && r.Lease != 0 { + return rpctypes.ErrGRPCLeaseProvided + } return nil } @@ -246,8 +252,8 @@ func checkRequestOp(u *pb.RequestOp) error { return checkDeleteRequest(uv.RequestDeleteRange) } default: - // empty op - return nil + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound } return nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index be6e20b97fb..91618d115fc 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -18,6 +18,7 @@ import ( "io" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/lease" "golang.org/x/net/context" @@ -53,20 +54,45 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil { + if err != nil && err != lease.ErrLeaseNotFound { return nil, togRPCError(err) } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: rr.ID, + TTL: -1, + } + } ls.hdr.fill(resp.Header) return resp, nil } -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { + errc := make(chan error, 1) + go func() { + errc <- ls.leaseKeepAlive(stream) + }() + select { + case err = <-errc: + case <-stream.Context().Done(): + // the only server-side cancellation is noleader for now. + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + return err +} + +func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { + plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) return err } @@ -92,6 +118,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro resp.TTL = ttl err = stream.Send(resp) if err != nil { + plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) return err } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index af29ab3b71e..3657d036082 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -47,6 +47,7 @@ type RaftStatusGetter interface { } type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } @@ -152,7 +153,7 @@ type authMaintenanceServer struct { } func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := ams.ag.AuthInfoFromCtx(ctx) if err != nil { return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go index bcd5dac5183..91a59389b87 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go @@ -48,21 +48,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) now := time.Now() m := membership.NewMember("", urls, "", &now) - if err = cs.server.AddMember(ctx, *m); err != nil { - return nil, togRPCError(err) + membs, merr := cs.server.AddMember(ctx, *m) + if merr != nil { + return nil, togRPCError(merr) } return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Header: cs.header(), + Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Members: membersToProtoMembers(membs), }, nil } func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - if err := cs.server.RemoveMember(ctx, r.ID); err != nil { + membs, err := cs.server.RemoveMember(ctx, r.ID) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberRemoveResponse{Header: cs.header()}, nil + return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { @@ -70,15 +73,23 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq ID: types.ID(r.ID), RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, } - if err := cs.server.UpdateMember(ctx, m); err != nil { + membs, err := cs.server.UpdateMember(ctx, m) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberUpdateResponse{Header: cs.header()}, nil + return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := cs.cluster.Members() + membs := membersToProtoMembers(cs.cluster.Members()) + return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil +} +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} +} + +func membersToProtoMembers(membs []*membership.Member) []*pb.Member { protoMembs := make([]*pb.Member, len(membs)) for i := range membs { protoMembs[i] = &pb.Member{ @@ -88,10 +99,5 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest ClientURLs: membs[i].ClientURLs, } } - - return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} + return protoMembs } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD index 38cd71455d5..e1ada36c303 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 5a3cfc0a0db..bd17179e997 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -17,16 +17,20 @@ package rpctypes import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( // server-side error - ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") - ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") - ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") - ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") - ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") - ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") + ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") + ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") + ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") + ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") @@ -53,6 +57,7 @@ var ( ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") + ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") @@ -63,7 +68,11 @@ var ( ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") errStringToError = map[string]error{ - grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, @@ -95,6 +104,7 @@ var ( grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, @@ -106,12 +116,15 @@ var ( } // client-side error - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -138,6 +151,7 @@ var ( ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotCapable = Error(ErrGRPCNotCapable) @@ -175,3 +189,10 @@ func Error(err error) error { } return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} } + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 5a057ed040d..8d38d9bd18f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -42,8 +42,6 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCCompacted case mvcc.ErrFutureRev: return rpctypes.ErrGRPCFutureRev - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound case etcdserver.ErrRequestTooLarge: return rpctypes.ErrGRPCRequestTooLarge case etcdserver.ErrNoSpace: @@ -63,6 +61,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCTimeoutDueToConnectionLost case etcdserver.ErrUnhealthy: return rpctypes.ErrGRPCUnhealthy + case etcdserver.ErrKeyNotFound: + return rpctypes.ErrGRPCKeyNotFound case lease.ErrLeaseNotFound: return rpctypes.ErrGRPCLeaseNotFound @@ -95,6 +95,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCAuthNotEnabled case auth.ErrInvalidAuthToken: return rpctypes.ErrGRPCInvalidAuthToken + case auth.ErrInvalidAuthMgmt: + return rpctypes.ErrGRPCInvalidAuthMgmt default: return grpc.Errorf(codes.Unknown, err.Error()) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index f0215531dee..cd2adf98453 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -33,6 +34,8 @@ type watchServer struct { memberID int64 raftTimer etcdserver.RaftTimer watchable mvcc.WatchableKV + + ag AuthGetter } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { @@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), + ag: s, } } @@ -101,6 +105,8 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup + + ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { @@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), + + ag: ws.ag, } sws.wg.Add(1) @@ -133,6 +141,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { + plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) errc <- rerr } }() @@ -150,6 +159,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { return err } +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + + return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil +} + func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() @@ -171,10 +193,32 @@ func (sws *serverWatchStream) recvLoop() error { // \x00 is the smallest key creq.Key = []byte{0} } + if len(creq.RangeEnd) == 0 { + // force nil since watchstream.Watch distinguishes + // between nil and []byte{} for single key / >= + creq.RangeEnd = nil + } if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { // support >= key queries creq.RangeEnd = []byte{} } + + if !sws.isWatchPermitted(creq) { + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + } + return nil + } + filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() @@ -294,6 +338,7 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { + plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) return } @@ -310,6 +355,7 @@ func (sws *serverWatchStream) sendLoop() { } if err := sws.gRPCStream.Send(c); err != nil { + plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) return } @@ -325,6 +371,7 @@ func (sws *serverWatchStream) sendLoop() { for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { + plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go index e4bf35bc47e..0be93c52b6f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -16,7 +16,6 @@ package etcdserver import ( "bytes" - "fmt" "sort" "time" @@ -30,11 +29,6 @@ import ( ) const ( - // noTxn is an invalid txn ID. - // To apply with independent Range, Put, Delete, you can pass noTxn - // to apply functions instead of a valid txn ID. - noTxn = -1 - warnApplyDuration = 100 * time.Millisecond ) @@ -51,9 +45,9 @@ type applyResult struct { type applierV3 interface { Apply(r *pb.InternalRaftRequest) *applyResult - Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) @@ -99,11 +93,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range) + ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put) + ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange) + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) case r.Txn != nil: ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) case r.Compaction != nil: @@ -152,106 +146,87 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { return ar } -func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - resp := &pb.PutResponse{} +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rev int64 - err error - ) - var rr *mvcc.RangeResult - if p.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) - if err != nil { - return nil, err - } - } else { - leaseID := lease.LeaseID(p.Lease) + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } - rev = a.s.KV().Put(p.Key, p.Value, leaseID) + txn = a.s.KV().Write() + defer txn.End() } - resp.Header.Revision = rev - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) return resp, nil } -func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - n int64 - rev int64 - err error - ) + if txn == nil { + txn = a.s.kv.Write() + defer txn.End() + } if isGteRange(dr.RangeEnd) { dr.RangeEnd = []byte{} } - var rr *mvcc.RangeResult if dr.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd) + rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) if err != nil { return nil, err } - } else { - n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd) - } - - resp.Deleted = n - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } } } - resp.Header.Revision = rev + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) return resp, nil } -func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rr *mvcc.RangeResult - err error - ) + if txn == nil { + txn = a.s.kv.Read() + defer txn.End() + } if isGteRange(r.RangeEnd) { r.RangeEnd = []byte{} @@ -275,16 +250,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp Count: r.CountOnly, } - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } + rr, err := txn.Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err } if r.MaxModRevision != 0 { @@ -350,61 +318,64 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := true - for _, c := range rt.Compare { - if _, ok = a.applyCompare(c); !ok { - break + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) + + reqs, ok := a.compareToOps(txn, rt) + if isWrite { + if err := a.checkRequestPut(txn, reqs); err != nil { + txn.End() + return nil, err } } - - var reqs []*pb.RequestOp - if ok { - reqs = rt.Success - } else { - reqs = rt.Failure - } - - if err := a.checkRequestLeases(reqs); err != nil { + if err := checkRequestRange(txn, reqs); err != nil { + txn.End() return nil, err } - if err := a.checkRequestRange(reqs); err != nil { - return nil, err - } - - // When executing the operations of txn, we need to hold the txn lock. - // So the reader will not see any intermediate results. - txnID := a.s.KV().TxnBegin() resps := make([]*pb.ResponseOp, len(reqs)) + txnResp := &pb.TxnResponse{ + Responses: resps, + Succeeded: ok, + Header: &pb.ResponseHeader{}, + } + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() + } for i := range reqs { - resps[i] = a.applyUnion(txnID, reqs[i]) + resps[i] = a.applyUnion(txn, reqs[i]) } - - err := a.s.KV().TxnEnd(txnID) - if err != nil { - panic(fmt.Sprint("unexpected error when closing txn", txnID)) + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ } + txn.End() - txnResp := &pb.TxnResponse{} - txnResp.Header = &pb.ResponseHeader{} - txnResp.Header.Revision = a.s.KV().Rev() - txnResp.Responses = resps - txnResp.Succeeded = ok + txnResp.Header.Revision = rev return txnResp, nil } -// applyCompare applies the compare request. -// It returns the revision at which the comparison happens. If the comparison -// succeeds, the it returns true. Otherwise it returns false. -func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { - rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) - rev := rr.Rev - - if err != nil { - if err == mvcc.ErrTxnIDMismatch { - panic("unexpected txn ID mismatch error") +func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { + for _, c := range rt.Compare { + if !applyCompare(rv, c) { + return rt.Failure, false } - return rev, false + } + return rt.Success, true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return false } var ckv mvccpb.KeyValue if len(rr.KVs) != 0 { @@ -416,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { // We can treat non-existence as the empty set explicitly, such that // even a key with a value of length 0 bytes is still a real key // that was written that way - return rev, false + return false } } @@ -448,30 +419,22 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { switch c.Result { case pb.Compare_EQUAL: - if result != 0 { - return rev, false - } + return result == 0 case pb.Compare_NOT_EQUAL: - if result == 0 { - return rev, false - } + return result != 0 case pb.Compare_GREATER: - if result != 1 { - return rev, false - } + return result > 0 case pb.Compare_LESS: - if result != -1 { - return rev, false - } + return result < 0 } - return rev, true + return true } -func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp { +func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { switch tv := union.Request.(type) { case *pb.RequestOp_RequestRange: if tv.RequestRange != nil { - resp, err := a.Range(txnID, tv.RequestRange) + resp, err := a.Range(txn, tv.RequestRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -479,7 +442,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestPut: if tv.RequestPut != nil { - resp, err := a.Put(txnID, tv.RequestPut) + resp, err := a.Put(txn, tv.RequestPut) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -487,7 +450,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestDeleteRange: if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -588,7 +551,7 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { return nil, ErrNoSpace } @@ -617,7 +580,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { } func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) if resp != nil { resp.Header = newHeader(a.s) @@ -738,9 +701,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { return "aApplierV3{app, NewBackendQuota(s)} } -func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { ok := a.q.Available(p) - resp, err := a.applierV3.Put(txnID, p) + resp, err := a.applierV3.Put(txn, p) if err == nil && !ok { err = ErrNoSpace } @@ -804,14 +767,27 @@ func (s *kvSortByValue) Less(i, j int) bool { return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 } -func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestPut) if !ok { continue } preq := tv.RequestPut - if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { + if preq == nil { + continue + } + if preq.IgnoreValue || preq.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { @@ -821,7 +797,7 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { return nil } -func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { +func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestRange) if !ok { @@ -832,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { continue } - if greq.Revision > a.s.KV().Rev() { + if greq.Revision > rv.Rev() { return mvcc.ErrFutureRev } - if greq.Revision < a.s.KV().FirstRev() { + if greq.Revision < rv.FirstRev() { return mvcc.ErrCompacted } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go index 4868e855ca1..7da4ae45df5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -19,6 +19,7 @@ import ( "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { @@ -58,7 +59,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { return ret } -func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) { +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, err } @@ -68,17 +69,17 @@ func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, er return nil, err } } - return aa.applierV3.Put(txnID, r) + return aa.applierV3.Put(txn, r) } -func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } - return aa.applierV3.Range(txnID, r) + return aa.applierV3.Range(txn, r) } -func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } @@ -89,7 +90,7 @@ func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb } } - return aa.applierV3.DeleteRange(txnID, r) + return aa.applierV3.DeleteRange(txn, r) } func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 00000000000..c5e2dabf3e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg *ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg *ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go index fa84ffae630..f44862a4638 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,7 +23,6 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" @@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) continue } - // etcd 2.0 does not have version endpoint on peer url. - if resp.StatusCode == http.StatusNotFound { - httputil.GracefulClose(resp) - return &version.Versions{ - Server: "2.0.0", - Cluster: "2.0.0", - }, nil - } - var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index 9bcac0f076b..ae8a4d08e35 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -55,10 +55,17 @@ type ServerConfig struct { AutoCompactionRetention int QuotaBackendBytes int64 + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint + StrictReconfigCheck bool // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool + + AuthToken string + + Debug bool } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -198,3 +205,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } return time.Second } + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go index 5edc155624b..ed749dbe8d8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -33,6 +33,7 @@ var ( ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") ) type DiscoveryError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD index 0c1db78018e..4476d65ab94 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD @@ -16,7 +16,6 @@ go_library( "etcdserver.pb.go", "raft_internal.pb.go", "rpc.pb.go", - "rpc.pb.gw.go", ], importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb", visibility = ["//visibility:public"], @@ -24,12 +23,9 @@ go_library( "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", ], ) @@ -42,7 +38,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index f34bedf3ed3..aabf90061f6 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1018,7 +1018,7 @@ func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } var fileDescriptorEtcdserver = []byte{ // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD new file mode 100644 index 00000000000..0b8e37503da --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rpc.pb.gw.go"], + importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", + "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go similarity index 69% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go rename to vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go index 473ad582ef8..02a23b78c10 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -1,15 +1,15 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: etcdserver/etcdserverpb/rpc.proto -// DO NOT EDIT! /* Package etcdserverpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package etcdserverpb +package gw import ( + "github.com/coreos/etcd/etcdserver/etcdserverpb" "io" "net/http" @@ -20,19 +20,21 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RangeRequest +func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -40,12 +42,12 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client } -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PutRequest +func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -53,12 +55,12 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRangeRequest +func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -66,12 +68,12 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TxnRequest +func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -79,12 +81,12 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CompactionRequest +func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -92,7 +94,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie } -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Watch(ctx) if err != nil { @@ -101,7 +103,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq WatchRequest + var protoReq etcdserverpb.WatchRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -144,12 +146,12 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli return stream, metadata, nil } -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseGrantRequest +func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -157,12 +159,12 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseRevokeRequest +func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -170,7 +172,7 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale } -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.LeaseKeepAlive(ctx) if err != nil { @@ -179,7 +181,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq LeaseKeepAliveRequest + var protoReq etcdserverpb.LeaseKeepAliveRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -222,12 +224,12 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh return stream, metadata, nil } -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseTimeToLiveRequest +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -235,12 +237,12 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars } -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberAddRequest +func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -248,12 +250,12 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale } -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberRemoveRequest +func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -261,12 +263,12 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberUpdateRequest +func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -274,12 +276,12 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberListRequest +func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -287,12 +289,12 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AlarmRequest +func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -300,12 +302,12 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale } -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest +func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -313,12 +315,12 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DefragmentRequest +func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -326,12 +328,12 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar } -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HashRequest +func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -339,12 +341,12 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq SnapshotRequest +func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.SnapshotRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.Snapshot(ctx, &protoReq) @@ -360,12 +362,12 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh } -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthEnableRequest +func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -373,12 +375,12 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthDisableRequest +func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -386,12 +388,12 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthenticateRequest +func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -399,12 +401,12 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale } -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserAddRequest +func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -412,12 +414,12 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGetRequest +func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -425,12 +427,12 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserListRequest +func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -438,12 +440,12 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserDeleteRequest +func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -451,12 +453,12 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserChangePasswordRequest +func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -464,12 +466,12 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma } -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGrantRoleRequest +func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -477,12 +479,12 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal } -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserRevokeRoleRequest +func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -490,12 +492,12 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha } -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleAddRequest +func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -503,12 +505,12 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGetRequest +func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -516,12 +518,12 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleListRequest +func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -529,12 +531,12 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleDeleteRequest +func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -542,12 +544,12 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGrantPermissionRequest +func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -555,12 +557,12 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M } -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleRevokePermissionRequest +func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -596,7 +598,15 @@ func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, e // RegisterKVHandler registers the http handlers for service KV to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewKVClient(conn) + return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) +} + +// RegisterKVHandler registers the http handlers for service KV to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "KVClient" to call the correct interceptors. +func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -611,18 +621,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -639,18 +650,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -667,18 +679,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -695,18 +708,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -723,18 +737,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -793,7 +808,15 @@ func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterWatchHandler registers the http handlers for service Watch to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWatchClient(conn) + return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) +} + +// RegisterWatchHandler registers the http handlers for service Watch to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WatchClient" to call the correct interceptors. +func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -808,18 +831,19 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -862,7 +886,15 @@ func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterLeaseHandler registers the http handlers for service Lease to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLeaseClient(conn) + return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) +} + +// RegisterLeaseHandler registers the http handlers for service Lease to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LeaseClient" to call the correct interceptors. +func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -877,18 +909,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -905,18 +938,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -933,18 +967,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -961,18 +996,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1027,7 +1063,15 @@ func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeM // RegisterClusterHandler registers the http handlers for service Cluster to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewClusterClient(conn) + return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) +} + +// RegisterClusterHandler registers the http handlers for service Cluster to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClusterClient" to call the correct interceptors. +func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1042,18 +1086,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1070,18 +1115,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1098,18 +1144,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1126,18 +1173,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1192,7 +1240,15 @@ func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewMaintenanceClient(conn) + return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn)) +} + +// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MaintenanceClient" to call the correct interceptors. +func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1207,18 +1263,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1235,18 +1292,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1263,18 +1321,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1291,18 +1350,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1319,18 +1379,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1389,7 +1450,15 @@ func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterAuthHandler registers the http handlers for service Auth to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewAuthClient(conn) + return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn)) +} + +// RegisterAuthHandler registers the http handlers for service Auth to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "AuthClient" to call the correct interceptors. +func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1404,18 +1473,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1432,18 +1502,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1460,18 +1531,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1488,18 +1560,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1516,18 +1589,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1544,18 +1618,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1572,18 +1647,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1600,18 +1676,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1628,18 +1705,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1656,18 +1734,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1684,18 +1763,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1712,18 +1792,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1740,18 +1821,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1768,18 +1850,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1796,18 +1879,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1824,18 +1908,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 66890c93c44..44a3b6f69eb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -2038,7 +2038,7 @@ func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftIntern var fileDescriptorRaftInternal = []byte{ // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index b28f2e50e3c..894c815f824 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -15,6 +15,8 @@ import ( authpb "github.com/coreos/etcd/auth/authpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -223,16 +225,45 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -272,6 +303,97 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -302,6 +424,20 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -313,6 +449,12 @@ type PutRequest struct { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } func (m *PutRequest) Reset() { *m = PutRequest{} } @@ -320,6 +462,48 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -350,12 +534,12 @@ type DeleteRangeRequest struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } @@ -364,6 +548,27 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -384,6 +589,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -754,6 +966,27 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -950,6 +1183,13 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -973,6 +1213,20 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1015,6 +1269,13 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + type SnapshotRequest struct { } @@ -1045,6 +1306,20 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1198,6 +1473,48 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1208,6 +1525,13 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1228,8 +1552,10 @@ type WatchResponse struct { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } @@ -1244,6 +1570,41 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1263,6 +1624,20 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1284,6 +1659,27 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1294,6 +1690,13 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1320,6 +1723,13 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1340,6 +1750,20 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1352,6 +1776,20 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1376,6 +1814,34 @@ func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1392,6 +1858,34 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1402,10 +1896,19 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } @@ -1427,6 +1930,13 @@ func (m *MemberAddResponse) GetMember() *Member { return nil } +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberRemoveRequest struct { // ID is the member ID of the member to remove. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1437,8 +1947,17 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } @@ -1453,6 +1972,13 @@ func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1465,8 +1991,24 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } @@ -1481,6 +2023,13 @@ func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberListRequest struct { } @@ -1555,6 +2104,27 @@ func (m *AlarmRequest) String() string { return proto.CompactTextStri func (*AlarmRequest) ProtoMessage() {} func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` @@ -1567,6 +2137,20 @@ func (m *AlarmMember) String() string { return proto.CompactTextStrin func (*AlarmMember) ProtoMessage() {} func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. @@ -1626,6 +2210,41 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type AuthEnableRequest struct { } @@ -1652,6 +2271,20 @@ func (m *AuthenticateRequest) String() string { return proto.CompactT func (*AuthenticateRequest) ProtoMessage() {} func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` @@ -1662,6 +2295,20 @@ func (m *AuthUserAddRequest) String() string { return proto.CompactTe func (*AuthUserAddRequest) ProtoMessage() {} func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1671,6 +2318,13 @@ func (m *AuthUserGetRequest) String() string { return proto.CompactTe func (*AuthUserGetRequest) ProtoMessage() {} func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserDeleteRequest struct { // name is the name of the user to delete. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1681,6 +2335,13 @@ func (m *AuthUserDeleteRequest) String() string { return proto.Compac func (*AuthUserDeleteRequest) ProtoMessage() {} func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1695,6 +2356,20 @@ func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -1707,6 +2382,20 @@ func (m *AuthUserGrantRoleRequest) String() string { return proto.Com func (*AuthUserGrantRoleRequest) ProtoMessage() {} func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` @@ -1717,6 +2406,20 @@ func (m *AuthUserRevokeRoleRequest) String() string { return proto.Co func (*AuthUserRevokeRoleRequest) ProtoMessage() {} func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1727,6 +2430,13 @@ func (m *AuthRoleAddRequest) String() string { return proto.CompactTe func (*AuthRoleAddRequest) ProtoMessage() {} func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } @@ -1736,6 +2446,13 @@ func (m *AuthRoleGetRequest) String() string { return proto.CompactTe func (*AuthRoleGetRequest) ProtoMessage() {} func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserListRequest struct { } @@ -1761,6 +2478,13 @@ func (m *AuthRoleDeleteRequest) String() string { return proto.Compac func (*AuthRoleDeleteRequest) ProtoMessage() {} func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1775,6 +2499,13 @@ func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { if m != nil { return m.Perm @@ -1795,6 +2526,27 @@ func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + type AuthEnableResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1845,6 +2597,13 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1878,6 +2637,13 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2001,6 +2767,13 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2018,6 +2791,13 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -3941,6 +4721,26 @@ func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -4749,6 +5549,12 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } if len(m.Events) > 0 { for _, msg := range m.Events { dAtA[i] = 0x5a @@ -5159,6 +5965,18 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { } i += n29 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5210,6 +6028,18 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { } i += n30 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5276,6 +6106,18 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { } i += n31 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6610,6 +7452,12 @@ func (m *PutRequest) Size() (n int) { if m.PrevKv { n += 2 } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } return n } @@ -6973,6 +7821,10 @@ func (m *WatchResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() @@ -7143,6 +7995,12 @@ func (m *MemberAddResponse) Size() (n int) { l = m.Member.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7162,6 +8020,12 @@ func (m *MemberRemoveResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7187,6 +8051,12 @@ func (m *MemberUpdateResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -8413,6 +9283,46 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } } m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -10345,7 +11255,24 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 2 { + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10386,23 +11313,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } @@ -10656,6 +11566,35 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) @@ -11876,6 +12815,37 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12028,6 +12998,37 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12209,6 +13210,37 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -16041,218 +17073,221 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcb, 0x73, 0x1b, 0xc7, - 0xd1, 0xe7, 0x02, 0x24, 0x40, 0x34, 0x1e, 0x84, 0x86, 0x94, 0x04, 0xae, 0x24, 0x8a, 0x1a, 0xbd, - 0x28, 0xc9, 0x26, 0x6d, 0xda, 0xdf, 0x77, 0xd0, 0xe7, 0x72, 0x7d, 0x14, 0x09, 0x8b, 0x0c, 0x29, - 0x52, 0x5e, 0x52, 0xb2, 0x53, 0xe5, 0x0a, 0x6a, 0x09, 0x8c, 0xc8, 0x2d, 0x02, 0xbb, 0xf0, 0xee, - 0x02, 0x22, 0x9d, 0xa4, 0x2a, 0xe5, 0xd8, 0x95, 0x4a, 0x8e, 0xf1, 0x21, 0xaf, 0x63, 0x2a, 0x87, - 0xfc, 0x01, 0xb9, 0xe5, 0x0f, 0x48, 0xe5, 0x92, 0x54, 0xe5, 0x1f, 0x48, 0x39, 0x39, 0xe4, 0x90, - 0x7b, 0x4e, 0xa9, 0xa4, 0xe6, 0xb5, 0x3b, 0xbb, 0xd8, 0x05, 0xe5, 0x6c, 0x7c, 0x11, 0x77, 0x66, - 0x7a, 0xfa, 0xd7, 0xdd, 0x33, 0xdd, 0xd3, 0xd3, 0x03, 0x41, 0xc9, 0xed, 0xb7, 0x97, 0xfb, 0xae, - 0xe3, 0x3b, 0xa8, 0x42, 0xfc, 0x76, 0xc7, 0x23, 0xee, 0x90, 0xb8, 0xfd, 0x43, 0x7d, 0xee, 0xc8, - 0x39, 0x72, 0xd8, 0xc0, 0x0a, 0xfd, 0xe2, 0x34, 0xfa, 0x3c, 0xa5, 0x59, 0xe9, 0x0d, 0xdb, 0x6d, - 0xf6, 0x4f, 0xff, 0x70, 0xe5, 0x64, 0x28, 0x86, 0xae, 0xb0, 0x21, 0x73, 0xe0, 0x1f, 0xb3, 0x7f, - 0xfa, 0x87, 0xec, 0x8f, 0x18, 0xbc, 0x7a, 0xe4, 0x38, 0x47, 0x5d, 0xb2, 0x62, 0xf6, 0xad, 0x15, - 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0xf8, 0x28, 0xfe, 0x5c, 0x83, 0x9a, 0x41, 0xbc, - 0xbe, 0x63, 0x7b, 0x64, 0x93, 0x98, 0x1d, 0xe2, 0xa2, 0x6b, 0x00, 0xed, 0xee, 0xc0, 0xf3, 0x89, - 0xdb, 0xb2, 0x3a, 0x0d, 0x6d, 0x51, 0x5b, 0x9a, 0x34, 0x4a, 0xa2, 0x67, 0xab, 0x83, 0xae, 0x40, - 0xa9, 0x47, 0x7a, 0x87, 0x7c, 0x34, 0xc7, 0x46, 0xa7, 0x79, 0xc7, 0x56, 0x07, 0xe9, 0x30, 0xed, - 0x92, 0xa1, 0xe5, 0x59, 0x8e, 0xdd, 0xc8, 0x2f, 0x6a, 0x4b, 0x79, 0x23, 0x68, 0xd3, 0x89, 0xae, - 0xf9, 0xc2, 0x6f, 0xf9, 0xc4, 0xed, 0x35, 0x26, 0xf9, 0x44, 0xda, 0x71, 0x40, 0xdc, 0x1e, 0xfe, - 0x6c, 0x0a, 0x2a, 0x86, 0x69, 0x1f, 0x11, 0x83, 0x7c, 0x3c, 0x20, 0x9e, 0x8f, 0xea, 0x90, 0x3f, - 0x21, 0x67, 0x0c, 0xbe, 0x62, 0xd0, 0x4f, 0x3e, 0xdf, 0x3e, 0x22, 0x2d, 0x62, 0x73, 0xe0, 0x0a, - 0x9d, 0x6f, 0x1f, 0x91, 0xa6, 0xdd, 0x41, 0x73, 0x30, 0xd5, 0xb5, 0x7a, 0x96, 0x2f, 0x50, 0x79, - 0x23, 0x22, 0xce, 0x64, 0x4c, 0x9c, 0x75, 0x00, 0xcf, 0x71, 0xfd, 0x96, 0xe3, 0x76, 0x88, 0xdb, - 0x98, 0x5a, 0xd4, 0x96, 0x6a, 0xab, 0xb7, 0x96, 0xd5, 0x85, 0x58, 0x56, 0x05, 0x5a, 0xde, 0x77, - 0x5c, 0x7f, 0x8f, 0xd2, 0x1a, 0x25, 0x4f, 0x7e, 0xa2, 0xf7, 0xa0, 0xcc, 0x98, 0xf8, 0xa6, 0x7b, - 0x44, 0xfc, 0x46, 0x81, 0x71, 0xb9, 0x7d, 0x0e, 0x97, 0x03, 0x46, 0x6c, 0x30, 0x78, 0xfe, 0x8d, - 0x30, 0x54, 0x3c, 0xe2, 0x5a, 0x66, 0xd7, 0xfa, 0xc4, 0x3c, 0xec, 0x92, 0x46, 0x71, 0x51, 0x5b, - 0x9a, 0x36, 0x22, 0x7d, 0x54, 0xff, 0x13, 0x72, 0xe6, 0xb5, 0x1c, 0xbb, 0x7b, 0xd6, 0x98, 0x66, - 0x04, 0xd3, 0xb4, 0x63, 0xcf, 0xee, 0x9e, 0xb1, 0x45, 0x73, 0x06, 0xb6, 0xcf, 0x47, 0x4b, 0x6c, - 0xb4, 0xc4, 0x7a, 0xd8, 0xf0, 0x12, 0xd4, 0x7b, 0x96, 0xdd, 0xea, 0x39, 0x9d, 0x56, 0x60, 0x10, - 0x60, 0x06, 0xa9, 0xf5, 0x2c, 0xfb, 0x89, 0xd3, 0x31, 0xa4, 0x59, 0x28, 0xa5, 0x79, 0x1a, 0xa5, - 0x2c, 0x0b, 0x4a, 0xf3, 0x54, 0xa5, 0x5c, 0x86, 0x59, 0xca, 0xb3, 0xed, 0x12, 0xd3, 0x27, 0x21, - 0x71, 0x85, 0x11, 0x5f, 0xe8, 0x59, 0xf6, 0x3a, 0x1b, 0x89, 0xd0, 0x9b, 0xa7, 0x23, 0xf4, 0x55, - 0x41, 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x02, 0x9b, 0xa3, 0x69, 0x98, 0xdc, 0xdd, 0xdb, - 0x6d, 0xd6, 0x27, 0x10, 0x40, 0x61, 0x6d, 0x7f, 0xbd, 0xb9, 0xbb, 0x51, 0xd7, 0x50, 0x19, 0x8a, - 0x1b, 0x4d, 0xde, 0xc8, 0xe1, 0x47, 0x00, 0xa1, 0x75, 0x51, 0x11, 0xf2, 0xdb, 0xcd, 0x6f, 0xd6, - 0x27, 0x28, 0xcd, 0xf3, 0xa6, 0xb1, 0xbf, 0xb5, 0xb7, 0x5b, 0xd7, 0xe8, 0xe4, 0x75, 0xa3, 0xb9, - 0x76, 0xd0, 0xac, 0xe7, 0x28, 0xc5, 0x93, 0xbd, 0x8d, 0x7a, 0x1e, 0x95, 0x60, 0xea, 0xf9, 0xda, - 0xce, 0xb3, 0x66, 0x7d, 0x12, 0x7f, 0xa1, 0x41, 0x55, 0xac, 0x17, 0xf7, 0x09, 0xf4, 0x36, 0x14, - 0x8e, 0x99, 0x5f, 0xb0, 0xad, 0x58, 0x5e, 0xbd, 0x1a, 0x5b, 0xdc, 0x88, 0xef, 0x18, 0x82, 0x16, - 0x61, 0xc8, 0x9f, 0x0c, 0xbd, 0x46, 0x6e, 0x31, 0xbf, 0x54, 0x5e, 0xad, 0x2f, 0x73, 0x87, 0x5d, - 0xde, 0x26, 0x67, 0xcf, 0xcd, 0xee, 0x80, 0x18, 0x74, 0x10, 0x21, 0x98, 0xec, 0x39, 0x2e, 0x61, - 0x3b, 0x76, 0xda, 0x60, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0x70, 0x1b, 0xe0, - 0xe9, 0xc0, 0x4f, 0xf7, 0x8c, 0x39, 0x98, 0x1a, 0x52, 0xbe, 0xc2, 0x2b, 0x78, 0x83, 0xb9, 0x04, - 0x31, 0x3d, 0x12, 0xb8, 0x04, 0x6d, 0xa0, 0xcb, 0x50, 0xec, 0xbb, 0x64, 0xd8, 0x3a, 0x19, 0x32, - 0x8c, 0x69, 0xa3, 0x40, 0x9b, 0xdb, 0x43, 0x6c, 0x43, 0x99, 0x81, 0x64, 0xd2, 0xfb, 0x5e, 0xc8, - 0x3d, 0xc7, 0xa6, 0x8d, 0xea, 0x2e, 0xf1, 0x3e, 0x02, 0xb4, 0x41, 0xba, 0xc4, 0x27, 0x59, 0xdc, - 0x5e, 0xd1, 0x26, 0x1f, 0xd1, 0xe6, 0xc7, 0x1a, 0xcc, 0x46, 0xd8, 0x67, 0x52, 0xab, 0x01, 0xc5, - 0x0e, 0x63, 0xc6, 0x25, 0xc8, 0x1b, 0xb2, 0x89, 0x1e, 0xc0, 0xb4, 0x10, 0xc0, 0x6b, 0xe4, 0x53, - 0x56, 0xbb, 0xc8, 0x65, 0xf2, 0xf0, 0xdf, 0x35, 0x28, 0x09, 0x45, 0xf7, 0xfa, 0x68, 0x0d, 0xaa, - 0x2e, 0x6f, 0xb4, 0x98, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x15, 0x31, 0x85, - 0x75, 0xa3, 0xff, 0x83, 0xb2, 0x64, 0xd1, 0x1f, 0xf8, 0xc2, 0xe4, 0x8d, 0x28, 0x83, 0x70, 0xe7, - 0x6c, 0x4e, 0x18, 0x20, 0xc8, 0x9f, 0x0e, 0x7c, 0x74, 0x00, 0x73, 0x72, 0x32, 0xd7, 0x46, 0x88, - 0x91, 0x67, 0x5c, 0x16, 0xa3, 0x5c, 0x46, 0x97, 0x6a, 0x73, 0xc2, 0x40, 0x62, 0xbe, 0x32, 0xf8, - 0xa8, 0x04, 0x45, 0xd1, 0x8b, 0xff, 0xa1, 0x01, 0x48, 0x83, 0xee, 0xf5, 0xd1, 0x06, 0xd4, 0x5c, - 0xd1, 0x8a, 0x28, 0x7c, 0x25, 0x51, 0x61, 0xb1, 0x0e, 0x13, 0x46, 0x55, 0x4e, 0xe2, 0x2a, 0xbf, - 0x0b, 0x95, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0xb2, 0x9c, 0x40, 0xb5, 0xfe, - 0x00, 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, - 0x0e, 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xd7, 0x79, 0x28, 0xae, 0x3b, 0xbd, 0xbe, 0xe9, 0xd2, 0x35, - 0x2a, 0xb8, 0xc4, 0x1b, 0x74, 0x7d, 0xa6, 0x6e, 0x6d, 0xf5, 0x66, 0x14, 0x41, 0x90, 0xc9, 0xbf, - 0x06, 0x23, 0x35, 0xc4, 0x14, 0x3a, 0x59, 0x1c, 0x2d, 0xb9, 0x57, 0x98, 0x2c, 0x0e, 0x16, 0x31, - 0x45, 0xfa, 0x52, 0x3e, 0xf4, 0x25, 0x1d, 0x8a, 0x43, 0xe2, 0x86, 0xc7, 0xe1, 0xe6, 0x84, 0x21, - 0x3b, 0xd0, 0x3d, 0x98, 0x89, 0x87, 0xe6, 0x29, 0x41, 0x53, 0x6b, 0x47, 0x23, 0xf9, 0x4d, 0xa8, - 0x44, 0xce, 0x87, 0x82, 0xa0, 0x2b, 0xf7, 0x94, 0xe3, 0xe1, 0x92, 0x0c, 0x4a, 0xf4, 0x2c, 0xab, - 0x6c, 0x4e, 0x88, 0xb0, 0x84, 0xff, 0x1f, 0xaa, 0x11, 0x5d, 0x69, 0xf8, 0x6d, 0xbe, 0xff, 0x6c, - 0x6d, 0x87, 0xc7, 0xea, 0xc7, 0x2c, 0x3c, 0x1b, 0x75, 0x8d, 0x86, 0xfc, 0x9d, 0xe6, 0xfe, 0x7e, - 0x3d, 0x87, 0xaa, 0x50, 0xda, 0xdd, 0x3b, 0x68, 0x71, 0xaa, 0x3c, 0x7e, 0x27, 0xe0, 0x20, 0x62, - 0xbd, 0x12, 0xe2, 0x27, 0x94, 0x10, 0xaf, 0xc9, 0x10, 0x9f, 0x0b, 0x43, 0x7c, 0xfe, 0x51, 0x0d, - 0x2a, 0xdc, 0x3e, 0xad, 0x81, 0x4d, 0x8f, 0x99, 0x5f, 0x6a, 0x00, 0x07, 0xa7, 0xb6, 0x0c, 0x40, - 0x2b, 0x50, 0x6c, 0x73, 0xe6, 0x0d, 0x8d, 0xf9, 0xf3, 0xc5, 0x44, 0x93, 0x1b, 0x92, 0x0a, 0xbd, - 0x09, 0x45, 0x6f, 0xd0, 0x6e, 0x13, 0x4f, 0x86, 0xfb, 0xcb, 0xf1, 0x90, 0x22, 0x1c, 0xde, 0x90, - 0x74, 0x74, 0xca, 0x0b, 0xd3, 0xea, 0x0e, 0x58, 0xf0, 0x1f, 0x3f, 0x45, 0xd0, 0xe1, 0x9f, 0x69, - 0x50, 0x66, 0x52, 0x66, 0x8a, 0x63, 0x57, 0xa1, 0xc4, 0x64, 0x20, 0x1d, 0x11, 0xc9, 0xa6, 0x8d, - 0xb0, 0x03, 0xfd, 0x2f, 0x94, 0xe4, 0x0e, 0x96, 0xc1, 0xac, 0x91, 0xcc, 0x76, 0xaf, 0x6f, 0x84, - 0xa4, 0x78, 0x1b, 0x2e, 0x30, 0xab, 0xb4, 0x69, 0x62, 0x29, 0xed, 0xa8, 0xa6, 0x5e, 0x5a, 0x2c, - 0xf5, 0xd2, 0x61, 0xba, 0x7f, 0x7c, 0xe6, 0x59, 0x6d, 0xb3, 0x2b, 0xa4, 0x08, 0xda, 0xf8, 0x1b, - 0x80, 0x54, 0x66, 0x59, 0xd4, 0xc5, 0x55, 0x28, 0x6f, 0x9a, 0xde, 0xb1, 0x10, 0x09, 0x7f, 0x08, - 0x15, 0xde, 0xcc, 0x64, 0x43, 0x04, 0x93, 0xc7, 0xa6, 0x77, 0xcc, 0x04, 0xaf, 0x1a, 0xec, 0x1b, - 0x5f, 0x80, 0x99, 0x7d, 0xdb, 0xec, 0x7b, 0xc7, 0x8e, 0x8c, 0xb5, 0x34, 0xb1, 0xae, 0x87, 0x7d, - 0x99, 0x10, 0xef, 0xc2, 0x8c, 0x4b, 0x7a, 0xa6, 0x65, 0x5b, 0xf6, 0x51, 0xeb, 0xf0, 0xcc, 0x27, - 0x9e, 0xc8, 0xbb, 0x6b, 0x41, 0xf7, 0x23, 0xda, 0x4b, 0x45, 0x3b, 0xec, 0x3a, 0x87, 0xc2, 0xe3, - 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0x95, 0x0f, 0x4c, 0xbf, 0x2d, 0xad, 0x80, 0xb6, 0xa0, 0x16, 0xf8, - 0x39, 0xeb, 0x11, 0xb2, 0xc4, 0x02, 0x3e, 0x9b, 0x23, 0x33, 0x32, 0x19, 0xf0, 0xab, 0x6d, 0xb5, - 0x83, 0xb1, 0x32, 0xed, 0x36, 0xe9, 0x06, 0xac, 0x72, 0xe9, 0xac, 0x18, 0xa1, 0xca, 0x4a, 0xed, - 0x78, 0x34, 0x13, 0x1e, 0x86, 0xdc, 0x2d, 0x7f, 0x9e, 0x03, 0x34, 0x2a, 0xc3, 0x57, 0xcd, 0x0f, - 0x6e, 0x43, 0xcd, 0xf3, 0x4d, 0xd7, 0x6f, 0xc5, 0x6e, 0x25, 0x55, 0xd6, 0x1b, 0xc4, 0xaa, 0xbb, - 0x30, 0xd3, 0x77, 0x9d, 0x23, 0x97, 0x78, 0x5e, 0xcb, 0x76, 0x7c, 0xeb, 0xc5, 0x99, 0x48, 0x8e, - 0x6a, 0xb2, 0x7b, 0x97, 0xf5, 0xa2, 0x26, 0x14, 0x5f, 0x58, 0x5d, 0x9f, 0xb8, 0x5e, 0x63, 0x6a, - 0x31, 0xbf, 0x54, 0x5b, 0x7d, 0x70, 0x9e, 0xd5, 0x96, 0xdf, 0x63, 0xf4, 0x07, 0x67, 0x7d, 0x62, - 0xc8, 0xb9, 0x6a, 0xda, 0x52, 0x88, 0xa4, 0x2d, 0xb7, 0x01, 0x42, 0x7a, 0x1a, 0xb5, 0x76, 0xf7, - 0x9e, 0x3e, 0x3b, 0xa8, 0x4f, 0xa0, 0x0a, 0x4c, 0xef, 0xee, 0x6d, 0x34, 0x77, 0x9a, 0x34, 0xae, - 0xe1, 0x15, 0x69, 0x1b, 0xd5, 0x86, 0x68, 0x1e, 0xa6, 0x5f, 0xd2, 0x5e, 0x79, 0x6d, 0xcb, 0x1b, - 0x45, 0xd6, 0xde, 0xea, 0xe0, 0xbf, 0x69, 0x50, 0x15, 0xbb, 0x20, 0xd3, 0x56, 0x54, 0x21, 0x72, - 0x11, 0x08, 0x9a, 0x23, 0xf1, 0xdd, 0xd1, 0x11, 0xa9, 0x98, 0x6c, 0x52, 0x77, 0xe7, 0x8b, 0x4d, - 0x3a, 0xc2, 0xac, 0x41, 0x1b, 0xdd, 0x83, 0x7a, 0x9b, 0xbb, 0x7b, 0xec, 0xd8, 0x31, 0x66, 0x44, - 0x7f, 0xb0, 0x48, 0xb7, 0xa1, 0x40, 0x86, 0xc4, 0xf6, 0xbd, 0x46, 0x99, 0xc5, 0xa6, 0xaa, 0x4c, - 0xb4, 0x9a, 0xb4, 0xd7, 0x10, 0x83, 0xf8, 0x7f, 0xe0, 0xc2, 0x0e, 0xcd, 0x74, 0x1f, 0xbb, 0xa6, - 0xad, 0xe6, 0xcc, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0xd5, 0x20, 0xb7, 0xb5, 0x21, 0x74, - 0xc8, 0x6d, 0x6d, 0xe0, 0x4f, 0x35, 0x40, 0xea, 0xbc, 0x4c, 0x66, 0x8a, 0x31, 0x97, 0xf0, 0xf9, - 0x10, 0x7e, 0x0e, 0xa6, 0x88, 0xeb, 0x3a, 0x2e, 0x33, 0x48, 0xc9, 0xe0, 0x0d, 0x7c, 0x4b, 0xc8, - 0x60, 0x90, 0xa1, 0x73, 0x12, 0xec, 0x79, 0xce, 0x4d, 0x0b, 0x44, 0xdd, 0x86, 0xd9, 0x08, 0x55, - 0xa6, 0x18, 0x79, 0x17, 0x2e, 0x32, 0x66, 0xdb, 0x84, 0xf4, 0xd7, 0xba, 0xd6, 0x30, 0x15, 0xb5, - 0x0f, 0x97, 0xe2, 0x84, 0x5f, 0xaf, 0x8d, 0xf0, 0x3b, 0x02, 0xf1, 0xc0, 0xea, 0x91, 0x03, 0x67, - 0x27, 0x5d, 0x36, 0x1a, 0xf8, 0xe8, 0x4d, 0x58, 0x1c, 0x26, 0xec, 0x1b, 0xff, 0x4a, 0x83, 0xcb, - 0x23, 0xd3, 0xbf, 0xe6, 0x55, 0x5d, 0x00, 0x38, 0xa2, 0xdb, 0x87, 0x74, 0xe8, 0x00, 0xbf, 0xc3, - 0x29, 0x3d, 0x81, 0x9c, 0x34, 0x76, 0x54, 0x84, 0x9c, 0xc7, 0x50, 0x78, 0xc2, 0xca, 0x27, 0x8a, - 0x56, 0x93, 0x52, 0x2b, 0xdb, 0xec, 0xf1, 0x5b, 0x5d, 0xc9, 0x60, 0xdf, 0xec, 0xe8, 0x24, 0xc4, - 0x7d, 0x66, 0xec, 0xf0, 0x23, 0xba, 0x64, 0x04, 0x6d, 0x8a, 0xde, 0xee, 0x5a, 0xc4, 0xf6, 0xd9, - 0xe8, 0x24, 0x1b, 0x55, 0x7a, 0xf0, 0x32, 0xd4, 0x39, 0xd2, 0x5a, 0xa7, 0xa3, 0x1c, 0xd3, 0x01, - 0x3f, 0x2d, 0xca, 0x0f, 0xbf, 0x84, 0x0b, 0x0a, 0x7d, 0x26, 0xd3, 0xbd, 0x06, 0x05, 0x5e, 0x23, - 0x12, 0x27, 0xc4, 0x5c, 0x74, 0x16, 0x87, 0x31, 0x04, 0x0d, 0xbe, 0x0d, 0xb3, 0xa2, 0x87, 0xf4, - 0x9c, 0xa4, 0x55, 0x67, 0xf6, 0xc1, 0x3b, 0x30, 0x17, 0x25, 0xcb, 0xe4, 0x08, 0x6b, 0x12, 0xf4, - 0x59, 0xbf, 0xa3, 0x1c, 0x38, 0xf1, 0x45, 0x51, 0x0d, 0x96, 0x8b, 0x19, 0x2c, 0x10, 0x48, 0xb2, - 0xc8, 0x24, 0xd0, 0xac, 0x34, 0xff, 0x8e, 0xe5, 0x05, 0x69, 0xc5, 0x27, 0x80, 0xd4, 0xce, 0x4c, - 0x8b, 0xb2, 0x0c, 0x45, 0x6e, 0x70, 0x99, 0xb9, 0x26, 0xaf, 0x8a, 0x24, 0xa2, 0x02, 0x6d, 0x90, - 0x17, 0xae, 0x79, 0xd4, 0x23, 0x41, 0x64, 0xa5, 0xf9, 0x9a, 0xda, 0x99, 0x49, 0xe3, 0x3f, 0x68, - 0x50, 0x59, 0xeb, 0x9a, 0x6e, 0x4f, 0x1a, 0xff, 0x5d, 0x28, 0xf0, 0x44, 0x50, 0xdc, 0x9d, 0xee, - 0x44, 0xd9, 0xa8, 0xb4, 0xbc, 0xb1, 0xc6, 0xd3, 0x46, 0x31, 0x8b, 0x2e, 0x96, 0x28, 0x4d, 0x6e, - 0xc4, 0x4a, 0x95, 0x1b, 0xe8, 0x75, 0x98, 0x32, 0xe9, 0x14, 0xe6, 0xbf, 0xb5, 0x78, 0x0a, 0xce, - 0xb8, 0xb1, 0x43, 0x9b, 0x53, 0xe1, 0xb7, 0xa1, 0xac, 0x20, 0xd0, 0x9b, 0xc5, 0xe3, 0xa6, 0x38, - 0x98, 0xd7, 0xd6, 0x0f, 0xb6, 0x9e, 0xf3, 0x0b, 0x47, 0x0d, 0x60, 0xa3, 0x19, 0xb4, 0x73, 0xf8, - 0x43, 0x31, 0x4b, 0x78, 0xb8, 0x2a, 0x8f, 0x96, 0x26, 0x4f, 0xee, 0x95, 0xe4, 0x39, 0x85, 0xaa, - 0x50, 0x3f, 0xd3, 0x1e, 0x78, 0x13, 0x0a, 0x8c, 0x9f, 0xdc, 0x02, 0xf3, 0x09, 0xb0, 0xd2, 0x3b, - 0x39, 0x21, 0x9e, 0x81, 0xea, 0xbe, 0x6f, 0xfa, 0x03, 0x4f, 0x6e, 0x81, 0xdf, 0x6b, 0x50, 0x93, - 0x3d, 0x59, 0xcb, 0x2c, 0xf2, 0x7a, 0xca, 0x63, 0x5e, 0x70, 0x39, 0xbd, 0x04, 0x85, 0xce, 0xe1, - 0xbe, 0xf5, 0x89, 0x2c, 0x66, 0x89, 0x16, 0xed, 0xef, 0x72, 0x1c, 0x5e, 0x50, 0x16, 0x2d, 0x7a, - 0xd1, 0x71, 0xcd, 0x17, 0xfe, 0x96, 0xdd, 0x21, 0xa7, 0x2c, 0x9f, 0x98, 0x34, 0xc2, 0x0e, 0x76, - 0x37, 0x11, 0x85, 0x67, 0x96, 0x7f, 0xa9, 0x85, 0xe8, 0x59, 0xb8, 0xb0, 0x36, 0xf0, 0x8f, 0x9b, - 0xb6, 0x79, 0xd8, 0x95, 0x41, 0x00, 0xcf, 0x01, 0xa2, 0x9d, 0x1b, 0x96, 0xa7, 0xf6, 0x36, 0x61, - 0x96, 0xf6, 0x12, 0xdb, 0xb7, 0xda, 0x4a, 0xc4, 0x90, 0x61, 0x5b, 0x8b, 0x85, 0x6d, 0xd3, 0xf3, - 0x5e, 0x3a, 0x6e, 0x47, 0xa8, 0x16, 0xb4, 0xf1, 0x06, 0x67, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, - 0xca, 0x65, 0x29, 0xe4, 0xf2, 0x98, 0xf8, 0x63, 0xb8, 0xe0, 0x07, 0x70, 0x51, 0x52, 0x8a, 0xfa, - 0xc5, 0x18, 0xe2, 0x3d, 0xb8, 0x26, 0x89, 0xd7, 0x8f, 0x69, 0x56, 0xfd, 0x54, 0x00, 0xfe, 0xa7, - 0x72, 0x3e, 0x82, 0x46, 0x20, 0x27, 0xcb, 0xb4, 0x9c, 0xae, 0x2a, 0xc0, 0xc0, 0x13, 0x7b, 0xa6, - 0x64, 0xb0, 0x6f, 0xda, 0xe7, 0x3a, 0xdd, 0xe0, 0x10, 0xa4, 0xdf, 0x78, 0x1d, 0xe6, 0x25, 0x0f, - 0x91, 0x03, 0x45, 0x99, 0x8c, 0x08, 0x94, 0xc4, 0x44, 0x18, 0x8c, 0x4e, 0x1d, 0x6f, 0x76, 0x95, - 0x32, 0x6a, 0x5a, 0xc6, 0x53, 0x53, 0x78, 0x5e, 0xe4, 0x3b, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0xd1, - 0x4d, 0x19, 0xa8, 0xdd, 0x62, 0x21, 0x68, 0xf7, 0xc8, 0x42, 0x8c, 0xb0, 0xfe, 0x08, 0x16, 0x02, - 0x21, 0xa8, 0xdd, 0x9e, 0x12, 0xb7, 0x67, 0x79, 0x9e, 0x72, 0xe3, 0x4e, 0x52, 0xfc, 0x0e, 0x4c, - 0xf6, 0x89, 0x88, 0x29, 0xe5, 0x55, 0xb4, 0xcc, 0x9f, 0x87, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0x77, - 0xe0, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, - 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, - 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x10, 0xe6, 0xa2, 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0x94, - 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, - 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, - 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, - 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0xbf, 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, - 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, - 0x35, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, - 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, - 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, 0x82, 0x84, 0x52, 0x79, 0x5a, 0x2d, 0x43, - 0x71, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, 0xad, 0xfe, 0x33, 0x0f, 0xb9, 0xed, 0xe7, - 0xe8, 0x5b, 0x30, 0xc5, 0x1f, 0x5e, 0xc6, 0xbc, 0x4b, 0xe9, 0xe3, 0x9e, 0x70, 0xf0, 0xd5, 0x4f, - 0xff, 0xf4, 0xd7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, 0xe1, 0x5b, 0x66, 0xb7, 0x7f, 0x6c, 0xae, - 0x9c, 0x0c, 0x57, 0xd8, 0x99, 0xf0, 0x50, 0xbb, 0x8f, 0x9e, 0x43, 0xfe, 0xe9, 0xc0, 0x47, 0xa9, - 0x8f, 0x56, 0x7a, 0xfa, 0xd3, 0x0e, 0xd6, 0x19, 0xe7, 0x39, 0x3c, 0xa3, 0x72, 0xee, 0x0f, 0x7c, - 0xca, 0x77, 0x08, 0x65, 0xe5, 0x75, 0x06, 0x9d, 0xfb, 0x9c, 0xa5, 0x9f, 0xff, 0xf2, 0x83, 0x31, - 0xc3, 0xbb, 0x8a, 0x2f, 0xab, 0x78, 0xfc, 0x11, 0x49, 0xd5, 0xe7, 0xe0, 0xd4, 0x8e, 0xeb, 0x13, - 0x3e, 0x30, 0xc4, 0xf5, 0x51, 0x8a, 0xfa, 0xc9, 0xfa, 0xf8, 0xa7, 0x36, 0xe5, 0xeb, 0x88, 0x17, - 0xa5, 0xb6, 0x8f, 0xae, 0x27, 0xbc, 0x48, 0xa8, 0xb5, 0x77, 0x7d, 0x31, 0x9d, 0x40, 0x20, 0xdd, - 0x60, 0x48, 0x57, 0xf0, 0x25, 0x15, 0xa9, 0x1d, 0xd0, 0x3d, 0xd4, 0xee, 0xaf, 0x1e, 0xc3, 0x14, - 0xab, 0x18, 0xa2, 0x96, 0xfc, 0xd0, 0x13, 0x6a, 0x9d, 0x29, 0x3b, 0x20, 0x52, 0x6b, 0xc4, 0xf3, - 0x0c, 0x6d, 0x16, 0xd7, 0x02, 0x34, 0x56, 0x34, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0xde, 0xd0, 0x56, - 0xbf, 0x3f, 0x09, 0x53, 0xac, 0x52, 0x83, 0xfa, 0x00, 0x61, 0x0d, 0x2e, 0xae, 0xe7, 0x48, 0x55, - 0x2f, 0xae, 0xe7, 0x68, 0xf9, 0x0e, 0x5f, 0x67, 0xc8, 0xf3, 0x78, 0x2e, 0x40, 0x66, 0xaf, 0xe0, - 0x2b, 0xac, 0x26, 0x43, 0xcd, 0xfa, 0x12, 0xca, 0x4a, 0x2d, 0x0d, 0x25, 0x71, 0x8c, 0x14, 0xe3, - 0xe2, 0xdb, 0x24, 0xa1, 0x10, 0x87, 0x6f, 0x32, 0xd0, 0x6b, 0xb8, 0xa1, 0x1a, 0x97, 0xe3, 0xba, - 0x8c, 0x92, 0x02, 0x7f, 0xa6, 0x41, 0x2d, 0x5a, 0x4f, 0x43, 0x37, 0x13, 0x58, 0xc7, 0xcb, 0x72, - 0xfa, 0xad, 0xf1, 0x44, 0xa9, 0x22, 0x70, 0xfc, 0x13, 0x42, 0xfa, 0x26, 0xa5, 0x14, 0xb6, 0x47, - 0x3f, 0xd0, 0x60, 0x26, 0x56, 0x25, 0x43, 0x49, 0x10, 0x23, 0x35, 0x38, 0xfd, 0xf6, 0x39, 0x54, - 0x42, 0x92, 0xbb, 0x4c, 0x92, 0x1b, 0xf8, 0xea, 0xa8, 0x31, 0x7c, 0xab, 0x47, 0x7c, 0x47, 0x48, - 0xb3, 0xfa, 0xaf, 0x3c, 0x14, 0xd7, 0xf9, 0xaf, 0x8c, 0x90, 0x0f, 0xa5, 0xa0, 0xf2, 0x84, 0x16, - 0x92, 0xaa, 0x12, 0x61, 0xca, 0xae, 0x5f, 0x4f, 0x1d, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, - 0x2b, 0x81, 0x08, 0xe2, 0xd7, 0x4c, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0x76, 0x3a, 0x74, 0x49, 0xbe, - 0xa7, 0x41, 0x45, 0x2d, 0x28, 0xa1, 0x1b, 0x89, 0xf5, 0x10, 0xb5, 0x26, 0xa5, 0xe3, 0x71, 0x24, - 0x02, 0xff, 0x1e, 0xc3, 0xbf, 0x89, 0x17, 0xd2, 0xf0, 0x5d, 0x46, 0x1f, 0x15, 0x81, 0x97, 0x90, - 0x92, 0x45, 0x88, 0x54, 0xa8, 0x92, 0x45, 0x88, 0x56, 0xa0, 0xce, 0x17, 0x61, 0xc0, 0xe8, 0xa9, - 0x08, 0xa7, 0x00, 0x61, 0x85, 0x09, 0x25, 0x1a, 0x57, 0xb9, 0xc4, 0xc4, 0x7d, 0x70, 0xb4, 0x38, - 0x95, 0xb0, 0x03, 0x62, 0xd8, 0x5d, 0xcb, 0xa3, 0xbe, 0xb8, 0xfa, 0xdb, 0x49, 0x28, 0x3f, 0x31, - 0x2d, 0xdb, 0x27, 0xb6, 0x69, 0xb7, 0x09, 0x3a, 0x82, 0x29, 0x76, 0x4a, 0xc5, 0x03, 0x8f, 0x5a, - 0xf6, 0x89, 0x07, 0x9e, 0x48, 0x4d, 0x04, 0xdf, 0x66, 0xd0, 0xd7, 0xb1, 0x1e, 0x40, 0xf7, 0x42, - 0xfe, 0x2b, 0xac, 0x9e, 0x41, 0x55, 0x3e, 0x81, 0x02, 0xaf, 0x5f, 0xa0, 0x18, 0xb7, 0x48, 0x9d, - 0x43, 0xbf, 0x9a, 0x3c, 0x98, 0xba, 0xcb, 0x54, 0x2c, 0x8f, 0x11, 0x53, 0xb0, 0x6f, 0x03, 0x84, - 0x05, 0xb3, 0xb8, 0x7d, 0x47, 0xea, 0x6b, 0xfa, 0x62, 0x3a, 0x81, 0x00, 0xbe, 0xcf, 0x80, 0x6f, - 0xe1, 0xeb, 0x89, 0xc0, 0x9d, 0x60, 0x02, 0x05, 0x6f, 0xc3, 0xe4, 0xa6, 0xe9, 0x1d, 0xa3, 0xd8, - 0x21, 0xa4, 0xbc, 0x92, 0xea, 0x7a, 0xd2, 0x90, 0x80, 0xba, 0xc5, 0xa0, 0x16, 0xf0, 0x7c, 0x22, - 0xd4, 0xb1, 0xe9, 0xd1, 0x98, 0x8e, 0x06, 0x30, 0x2d, 0x5f, 0x3e, 0xd1, 0xb5, 0x98, 0xcd, 0xa2, - 0xaf, 0xa4, 0xfa, 0x42, 0xda, 0xb0, 0x00, 0x5c, 0x62, 0x80, 0x18, 0x5f, 0x4b, 0x36, 0xaa, 0x20, - 0x7f, 0xa8, 0xdd, 0x7f, 0x43, 0x5b, 0xfd, 0x51, 0x1d, 0x26, 0x69, 0xbe, 0x44, 0x4f, 0x91, 0xf0, - 0x9a, 0x19, 0xb7, 0xf0, 0x48, 0x71, 0x27, 0x6e, 0xe1, 0xd1, 0x1b, 0x6a, 0xc2, 0x29, 0xc2, 0x7e, - 0x6b, 0x49, 0x18, 0x15, 0xd5, 0xd8, 0x87, 0xb2, 0x72, 0x19, 0x45, 0x09, 0x1c, 0xa3, 0xa5, 0xa3, - 0xf8, 0x29, 0x92, 0x70, 0x93, 0xc5, 0x8b, 0x0c, 0x54, 0xc7, 0x17, 0xa3, 0xa0, 0x1d, 0x4e, 0x46, - 0x51, 0xbf, 0x03, 0x15, 0xf5, 0xd6, 0x8a, 0x12, 0x98, 0xc6, 0x6a, 0x53, 0xf1, 0x58, 0x91, 0x74, - 0xe9, 0x4d, 0x70, 0x9a, 0xe0, 0x97, 0xa5, 0x92, 0x96, 0xa2, 0x7f, 0x0c, 0x45, 0x71, 0x97, 0x4d, - 0xd2, 0x37, 0x5a, 0xcd, 0x4a, 0xd2, 0x37, 0x76, 0x11, 0x4e, 0x48, 0x49, 0x18, 0x2c, 0xcd, 0xd9, - 0x65, 0x80, 0x16, 0x90, 0x8f, 0x89, 0x9f, 0x06, 0x19, 0xd6, 0x67, 0xd2, 0x20, 0x95, 0xfb, 0xd2, - 0x58, 0xc8, 0x23, 0xe2, 0x8b, 0xbd, 0x2c, 0x2f, 0x23, 0x28, 0x85, 0xa3, 0x1a, 0x0d, 0xf1, 0x38, - 0x92, 0xd4, 0x2c, 0x32, 0x44, 0x15, 0xa1, 0x10, 0x7d, 0x17, 0x20, 0xbc, 0x78, 0xc7, 0x13, 0x83, - 0xc4, 0xea, 0x5d, 0x3c, 0x31, 0x48, 0xbe, 0xbb, 0x27, 0x78, 0x70, 0x08, 0xce, 0x33, 0x59, 0x0a, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x48, 0x86, 0x48, 0x2c, 0x0c, 0xea, 0xaf, 0xbd, - 0x1a, 0x71, 0x6a, 0xf4, 0x0c, 0xe5, 0x6a, 0xb3, 0x29, 0xfd, 0x97, 0x54, 0xb2, 0xcf, 0x35, 0xa8, - 0x46, 0xae, 0xfa, 0xe8, 0x4e, 0xca, 0x3a, 0xc7, 0x8a, 0x8b, 0xfa, 0xdd, 0x73, 0xe9, 0x52, 0x73, - 0x27, 0x65, 0x57, 0xc8, 0xbc, 0xf1, 0x87, 0x1a, 0xd4, 0xa2, 0xf5, 0x01, 0x94, 0x02, 0x30, 0x52, - 0xa1, 0xd4, 0x97, 0xce, 0x27, 0x7c, 0x85, 0xd5, 0x0a, 0x53, 0xc9, 0x8f, 0xa1, 0x28, 0xca, 0x0a, - 0x49, 0x6e, 0x11, 0x2d, 0x70, 0x26, 0xb9, 0x45, 0xac, 0x26, 0x91, 0xe6, 0x16, 0xf4, 0x86, 0xae, - 0x78, 0xa2, 0x28, 0x3e, 0xa4, 0x41, 0x8e, 0xf7, 0xc4, 0x58, 0xe5, 0x62, 0x2c, 0x64, 0xe8, 0x89, - 0xb2, 0xf4, 0x80, 0x52, 0x38, 0x9e, 0xe3, 0x89, 0xf1, 0xca, 0x45, 0x9a, 0x27, 0x32, 0x54, 0xc5, - 0x13, 0xc3, 0x4a, 0x41, 0x92, 0x27, 0x8e, 0x94, 0x6f, 0x93, 0x3c, 0x71, 0xb4, 0xd8, 0x90, 0xb6, - 0xb6, 0x0c, 0x3c, 0xe2, 0x89, 0xb3, 0x09, 0x95, 0x05, 0xf4, 0x5a, 0x8a, 0x4d, 0x13, 0x4b, 0xc3, - 0xfa, 0xeb, 0xaf, 0x48, 0x3d, 0xde, 0x03, 0xf8, 0x6a, 0x48, 0x0f, 0xf8, 0x85, 0x06, 0x73, 0x49, - 0xa5, 0x09, 0x94, 0x02, 0x96, 0x52, 0x57, 0xd6, 0x97, 0x5f, 0x95, 0xfc, 0x15, 0xec, 0x16, 0xf8, - 0xc4, 0xa3, 0xfa, 0xef, 0xbe, 0x5c, 0xd0, 0xfe, 0xf8, 0xe5, 0x82, 0xf6, 0xe7, 0x2f, 0x17, 0xb4, - 0x9f, 0xfe, 0x65, 0x61, 0xe2, 0xb0, 0xc0, 0xfe, 0xc3, 0xc3, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x73, 0x7e, 0xb4, 0xb4, 0x77, 0x31, 0x00, 0x00, + // 3450 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, + 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, + 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, + 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, + 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, + 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, + 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, + 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, + 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, + 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, + 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, + 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, + 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, + 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, + 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, + 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, + 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, + 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, + 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, + 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, + 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, + 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, + 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, + 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, + 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, + 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, + 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, + 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, + 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, + 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, + 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, + 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, + 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, + 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, + 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, + 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, + 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, + 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, + 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, + 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, + 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, + 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, + 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, + 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, + 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, + 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, + 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, + 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, + 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, + 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, + 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, + 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, + 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, + 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, + 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, + 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, + 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, + 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, + 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, + 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, + 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, + 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, + 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, + 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, + 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, + 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, + 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, + 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, + 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, + 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, + 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, + 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, + 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, + 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, + 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, + 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, + 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, + 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, + 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, + 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, + 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, + 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, + 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, + 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, + 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, + 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, + 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, + 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, + 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, + 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, + 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, + 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, + 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, + 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, + 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, + 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, + 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, + 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, + 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, + 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, + 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, + 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, + 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, + 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, + 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, + 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, + 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, + 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, + 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, + 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, + 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, + 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, + 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, + 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, + 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, + 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, + 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, + 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, + 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, + 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, + 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, + 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, + 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, + 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, + 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, + 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, + 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, + 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, + 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, + 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, + 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, + 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, + 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, + 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, + 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, + 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, + 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, + 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, + 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, + 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, + 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, + 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, + 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, + 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, + 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, + 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, + 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, + 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, + 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, + 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, + 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, + 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, + 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, + 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, + 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, + 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, + 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, + 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, + 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, + 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, + 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, + 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, + 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, + 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, + 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, + 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, + 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, + 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, + 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, + 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, + 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, + 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, + 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, + 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, + 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, + 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, + 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, + 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, + 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, + 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, + 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, + 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, + 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, + 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, + 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, + 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, + 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, + 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, + 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, + 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, + 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, + 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, + 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, + 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, + 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, + 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, + 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, + 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, + 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, + 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, + 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, + 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, + 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, + 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, + 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, + 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, + 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, + 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, + 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, + 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, + 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, + 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, + 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, + 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, + 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index ddf1ad23329..a6cd00ab7c3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -352,11 +352,12 @@ message RangeRequest { bytes key = 1; // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. int64 limit = 3; // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -423,6 +424,14 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; } message PutResponse { @@ -436,13 +445,13 @@ message DeleteRangeRequest { bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. bytes range_end = 2; // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. bool prev_kv = 3; } @@ -645,6 +654,9 @@ message WatchResponse { // watcher with the same start_revision again. int64 compact_revision = 5; + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + repeated mvccpb.Event events = 11; } @@ -725,6 +737,8 @@ message MemberAddResponse { ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; } message MemberRemoveRequest { @@ -734,6 +748,8 @@ message MemberRemoveRequest { message MemberRemoveResponse { ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; } message MemberUpdateRequest { @@ -745,6 +761,8 @@ message MemberUpdateRequest { message MemberUpdateResponse{ ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; } message MemberListRequest { diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD index d9478aa74fc..473575baffc 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "cluster.go", + "doc.go", "errors.go", "member.go", "store.go", diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go index 25c45dfce12..2330219f18a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go @@ -178,7 +178,7 @@ func (c *RaftCluster) String() string { fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) var ids []string for id := range c.removed { - ids = append(ids, fmt.Sprintf("%s", id)) + ids = append(ids, id.String()) } fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) return b.String() diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go new file mode 100644 index 00000000000..b07fb2d9285 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package membership describes individual etcd members and clusters of members. +package membership diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go index f2ea0120d74..d3f8f2474a4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go @@ -36,7 +36,7 @@ const ( var ( membersBucketName = []byte("members") - membersRemovedBuckedName = []byte("members_removed") + membersRemovedBucketName = []byte("members_removed") clusterBucketName = []byte("cluster") StoreMembersPrefix = path.Join(storePrefix, "members") @@ -62,7 +62,7 @@ func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { tx := be.BatchTx() tx.Lock() tx.UnsafeDelete(membersBucketName, mkey) - tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) + tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed")) tx.Unlock() } @@ -164,7 +164,7 @@ func mustCreateBackendBuckets(be backend.Backend) { tx.Lock() defer tx.Unlock() tx.UnsafeCreateBucket(membersBucketName) - tx.UnsafeCreateBucket(membersRemovedBuckedName) + tx.UnsafeCreateBucket(membersRemovedBucketName) tx.UnsafeCreateBucket(clusterBucketName) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go index 2b549f738f7..90bbd3632a6 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -58,6 +58,12 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) ) func init() { @@ -67,6 +73,7 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) } func monitorFileDescriptor(done <-chan struct{}) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go index 088a4696253..87126f1564c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -16,7 +16,15 @@ package etcdserver import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" +) + +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB ) // Quota represents an arbitrary quota against arbitrary requests. Each request @@ -57,11 +65,10 @@ func NewBackendQuota(s *EtcdServer) Quota { } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given - return &backendQuota{s, backend.DefaultQuotaBytes} + return &backendQuota{s, DefaultQuotaBytes} } - if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes) - return &backendQuota{s, backend.MaxQuotaBytes} + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) } return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go index d7ec176eb3a..dcb894f82fb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -83,7 +83,8 @@ type RaftTimer interface { type apply struct { entries []raftpb.Entry snapshot raftpb.Snapshot - raftDone <-chan struct{} // rx {} after raft has persisted messages + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} } type raftNode struct { @@ -94,14 +95,7 @@ type raftNode struct { term uint64 lead uint64 - mu sync.Mutex - // last lead elected time - lt time.Time - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - - raft.Node + raftNodeConfig // a chan to send/receive snapshot msgSnapC chan raftpb.Message @@ -113,28 +107,51 @@ type raftNode struct { readStateC chan raft.ReadState // utility - ticker <-chan time.Time + ticker *time.Ticker // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - heartbeat time.Duration // for logging - raftStorage *raft.MemoryStorage - storage Storage - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter + td *contention.TimeoutDetector stopped chan struct{} done chan struct{} } +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node + raftStorage *raft.MemoryStorage + storage Storage + heartbeat time.Duration // for logging + // transport specifies the transport to send and receive msgs to members. + // Sending messages MUST NOT block. It is okay to drop messages, since + // clients should timeout and reissue their messages. + // If transport is nil, server will panic. + transport rafthttp.Transporter +} + +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r +} + // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { - r.applyc = make(chan apply) - r.stopped = make(chan struct{}) - r.done = make(chan struct{}) internalTimeout := time.Second go func() { @@ -143,14 +160,12 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { - case <-r.ticker: + case <-r.ticker.C: r.Tick() case rd := <-r.Ready(): if rd.SoftState != nil { - if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { - r.mu.Lock() - r.lt = time.Now() - r.mu.Unlock() + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { leaderChanges.Inc() } @@ -162,7 +177,8 @@ func (r *raftNode) start(rh *raftReadyHandler) { atomic.StoreUint64(&r.lead, rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader - rh.updateLeadership() + rh.updateLeadership(newLeader) + r.td.Reset() } if len(rd.ReadStates) != 0 { @@ -175,11 +191,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } - raftDone := make(chan struct{}, 1) + notifyc := make(chan struct{}, 1) ap := apply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, - raftDone: raftDone, + notifyc: notifyc, } updateCommittedIndex(&ap, rh) @@ -195,7 +211,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { // For more details, check raft thesis 10.2.1 if islead { // gofail: var raftBeforeLeaderSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(r.processMessages(rd.Messages)) } // gofail: var raftBeforeSave struct{} @@ -212,6 +228,9 @@ func (r *raftNode) start(rh *raftReadyHandler) { if err := r.storage.SaveSnap(rd.Snapshot); err != nil { plog.Fatalf("raft save snapshot error: %v", err) } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) @@ -221,10 +240,44 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.raftStorage.Append(rd.Entries) if !islead { + // finish processing incoming messages before we signal raftdone chan + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to be applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to be applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + // gofail: var raftBeforeFollowerSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} } - raftDone <- struct{}{} + r.Advance() case <-r.stopped: return @@ -246,7 +299,7 @@ func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { } } -func (r *raftNode) sendMessages(ms []raftpb.Message) { +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { sentAppResp := false for i := len(ms) - 1; i >= 0; i-- { if r.isIDRemoved(ms[i].To) { @@ -282,20 +335,13 @@ func (r *raftNode) sendMessages(ms []raftpb.Message) { } } } - - r.transport.Send(ms) + return ms } func (r *raftNode) apply() chan apply { return r.applyc } -func (r *raftNode) leadElectedTime() time.Time { - r.mu.Lock() - defer r.mu.Unlock() - return r.lt -} - func (r *raftNode) stop() { r.stopped <- struct{}{} <-r.done @@ -303,6 +349,7 @@ func (r *raftNode) stop() { func (r *raftNode) onStop() { r.Stop() + r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { plog.Panicf("raft close storage error: %v", err) diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index 98eb2cc7b29..271c5e77313 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -23,7 +23,6 @@ import ( "net/http" "os" "path" - "path/filepath" "regexp" "sync" "sync/atomic" @@ -41,7 +40,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/contention" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -62,7 +60,7 @@ import ( ) const ( - DefaultSnapCount = 10000 + DefaultSnapCount = 100000 StoreClusterPrefix = "/0" StoreKeysPrefix = "/1" @@ -77,7 +75,6 @@ const ( // (since it will timeout). monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - databaseFilename = "db" // max number of in-flight snapshot messages etcdserver allows to have // This number is more than enough for most clusters with 5 machines. maxInFlightMsgSnap = 16 @@ -85,7 +82,8 @@ const ( releaseDelayAfterSnapshot = 30 * time.Second // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 + maxPendingRevokes = 16 + recommendedMaxRequestBytes = 10 * 1024 * 1024 ) var ( @@ -135,15 +133,15 @@ type Server interface { // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) error + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) // RemoveMember attempts to remove a member from the cluster. It will // return ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) error + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) // UpdateMember attempts to update an existing member in the cluster. It will // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) error + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) // ClusterVersion is the cluster-wide minimum major.minor version. // Cluster version is set to the min version that an etcd member is @@ -201,7 +199,8 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + store store.Store + snapshotter *snap.Snapshotter applyV2 ApplierV2 @@ -221,7 +220,7 @@ type EtcdServer struct { stats *stats.ServerStats lstats *stats.LeaderStats - SyncTicker <-chan time.Time + SyncTicker *time.Ticker // compactor is used to auto-compact the KV. compactor *compactor.Periodic @@ -238,6 +237,14 @@ type EtcdServer struct { // wg is used to wait for the go routines that depends on the server state // to exit when stopping the server. wg sync.WaitGroup + + // ctx is used for etcd-initiated requests that may need to be canceled + // on etcd server shutdown. + ctx context.Context + cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time } // NewServer creates a new EtcdServer from the supplied configuration. The @@ -253,6 +260,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } @@ -264,23 +275,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { } ss := snap.New(cfg.SnapDir()) - bepath := filepath.Join(cfg.SnapDir(), databaseFilename) + bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) - - var be backend.Backend - beOpened := make(chan struct{}) - go func() { - be = backend.NewDefaultBackend(bepath) - beOpened <- struct{}{} - }() - - select { - case <-beOpened: - case <-time.After(time.Second): - plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") - plog.Warningf("waiting for it to exit before starting...") - <-beOpened - } + be := openBackend(cfg) defer func() { if err != nil { @@ -378,6 +375,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Panicf("recovered store from snapshot error: %v", err) } plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } } cfg.Print() if !cfg.ForceNewCluster { @@ -400,39 +400,32 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, fmt.Errorf("cannot access member directory: %v", terr) } - sstats := &stats.ServerStats{ - Name: cfg.Name, - ID: id.String(), - } - sstats.Initialize() + sstats := stats.NewServerStats(cfg.Name, id.String()) lstats := stats.NewLeaderStats(id.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - ticker: time.Tick(heartbeat), - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * heartbeat), - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - readStateC: make(chan raft.ReadState, 1), - }, + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), id: id, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: cl, stats: sstats, lstats: lstats, - SyncTicker: time.Tick(500 * time.Millisecond), + SyncTicker: time.NewTicker(500 * time.Millisecond), peerRt: prt, reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), forceVersionC: make(chan struct{}), @@ -458,12 +451,26 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() - srv.authStore = auth.NewAuthStore(srv.be, + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.AuthToken, func(index uint64) <-chan struct{} { return srv.applyWait.Wait(index) - }) + }, + ) + if err != nil { + plog.Errorf("failed to create token provider: %s", err) + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.be, tp) if h := cfg.AutoCompactionRetention; h != 0 { srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) srv.compactor.Run() @@ -531,6 +538,7 @@ func (s *EtcdServer) start() { s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) + s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() if s.ClusterVersion() != nil { @@ -603,16 +611,19 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { - updateLeadership func() + updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { - snap, err := s.r.raftStorage.Snapshot() + sn, err := s.r.raftStorage.Snapshot() if err != nil { plog.Panicf("get snapshot from raft storage error: %v", err) } + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + var ( smu sync.RWMutex syncC <-chan time.Time @@ -629,7 +640,7 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ - updateLeadership: func() { + updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { s.lessor.Demote() @@ -639,7 +650,13 @@ func (s *EtcdServer) run() { } setSyncC(nil) } else { - setSyncC(s.SyncTicker) + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) if s.compactor != nil { s.compactor.Resume() } @@ -650,9 +667,6 @@ func (s *EtcdServer) run() { if s.stats != nil { s.stats.BecomeLeader() } - if s.r.td != nil { - s.r.td.Reset() - } }, updateCommittedIndex: func(ci uint64) { cci := s.getCommittedIndex() @@ -663,25 +677,26 @@ func (s *EtcdServer) run() { } s.r.start(rh) - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() ep := etcdProgress{ - confState: snap.Metadata.ConfState, - snapi: snap.Metadata.Index, - appliedt: snap.Metadata.Term, - appliedi: snap.Metadata.Index, + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, } defer func() { s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping close(s.stopping) s.wgMu.Unlock() + s.cancel() sched.Stop() // wait for gouroutines before closing raft so wal stays open s.wg.Wait() + s.SyncTicker.Stop() + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines // by adding a peer after raft stops the transport s.r.stop() @@ -728,7 +743,8 @@ func (s *EtcdServer) run() { } lid := lease.ID s.goAttach(func() { - s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)}) + s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() <-c }) } @@ -762,7 +778,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. - <-apply.raftDone + <-apply.notifyc s.triggerSnapshot(ep) select { @@ -787,23 +803,19 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { apply.snapshot.Metadata.Index, ep.appliedi) } - snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc + + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) if err != nil { - plog.Panicf("get database snapshot file path error: %v", err) + plog.Panic(err) } - fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) - if err := os.Rename(snapfn, fn); err != nil { - plog.Panicf("rename snapshot file error: %v", err) - } - - newbe := backend.NewDefaultBackend(fn) - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { plog.Info("recovering lessor...") - s.lessor.Recover(newbe, s.kv) + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) plog.Info("finished recovering lessor") } @@ -955,7 +967,7 @@ func (s *EtcdServer) TransferLeadership() error { } tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), tm) + ctx, cancel := context.WithTimeout(s.ctx, tm) err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) cancel() return err @@ -1015,7 +1027,7 @@ func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { - // In the context of ordinal etcd process, s.authStore will never be nil. + // In the context of ordinary etcd process, s.authStore will never be nil. // This branch is for handling cases in server_test.go return nil } @@ -1026,7 +1038,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -1034,27 +1046,27 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err return s.AuthStore().IsAdminPermitted(authInfo) } -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } if s.Cfg.StrictReconfigCheck { // by default StrictReconfigCheck is enabled; reject new members if unhealthy if !s.cluster.IsReadyToAddNewMember() { plog.Warningf("not enough started members, rejecting member add %+v", memb) - return ErrNotEnoughStartedMembers + return nil, ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return ErrUnhealthy + return nil, ErrUnhealthy } } // TODO: move Member to protobuf type b, err := json.Marshal(memb) if err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, @@ -1064,14 +1076,14 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro return s.configure(ctx, cc) } -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error { +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss if err := s.mayRemoveMember(types.ID(id)); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ @@ -1107,14 +1119,14 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { return nil } -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { b, merr := json.Marshal(memb) if merr != nil { - return merr + return nil, merr } if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, @@ -1137,31 +1149,34 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } +type confChangeResponse struct { + membs []*membership.Member + err error +} + // configure sends a configuration change through consensus and // then waits for it to be applied to the server. It // will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) - return err + return nil, err } select { case x := <-ch: - if err, ok := x.(error); ok { - return err + if x == nil { + plog.Panicf("configure trigger value should never be nil") } - if x != nil { - plog.Panicf("return type should always be error") - } - return nil + resp := x.(*confChangeResponse) + return resp.membs, resp.err case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait - return s.parseProposeCtxErr(ctx.Err(), start) + return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return ErrStopped + return nil, ErrStopped } } @@ -1169,7 +1184,6 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error // This makes no guarantee that the request will be proposed or performed. // The request will be canceled after the given timeout. func (s *EtcdServer) sync(timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) req := pb.Request{ Method: "SYNC", ID: s.reqIDGen.Next(), @@ -1178,6 +1192,7 @@ func (s *EtcdServer) sync(timeout time.Duration) { data := pbutil.MustMarshal(&req) // There is no promise that node has leader when do SYNC request, // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) s.goAttach(func() { s.r.Propose(ctx, data) cancel() @@ -1202,7 +1217,7 @@ func (s *EtcdServer) publish(timeout time.Duration) { } for { - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(s.ctx, timeout) _, err := s.Do(ctx, req) cancel() switch err { @@ -1262,7 +1277,7 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, err) + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) default: plog.Panicf("entry type should be either EntryNormal or EntryConfChange") } @@ -1347,8 +1362,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - r := pb.InternalRaftRequest{Alarm: a} - s.processInternalRaftRequest(context.TODO(), r) + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) s.w.Trigger(id, ar) }) } @@ -1544,7 +1558,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) { Path: membership.StoreClusterVersionKey(), Val: ver, } - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() switch err { @@ -1563,7 +1577,9 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: - curLeadElected := s.r.leadElectedTime() + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 9cfc852168b..928aa95b6b1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -60,9 +60,14 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { n, err := snapshot.WriteTo(pw) if err == nil { plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) } pw.CloseWithError(err) - snapshot.Close() + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } }() return pr } diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go index 1bed85474e3..8f6a54ff751 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -24,25 +24,30 @@ import ( // LeaderStats is used by the leader in an etcd cluster, and encapsulates // statistics about communication with its followers type LeaderStats struct { + leaderStats + sync.Mutex +} + +type leaderStats struct { // Leader is the ID of the leader in the etcd cluster. // TODO(jonboulle): clarify that these are IDs, not names Leader string `json:"leader"` Followers map[string]*FollowerStats `json:"followers"` - - sync.Mutex } // NewLeaderStats generates a new LeaderStats with the given id as leader func NewLeaderStats(id string) *LeaderStats { return &LeaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), + leaderStats: leaderStats{ + Leader: id, + Followers: make(map[string]*FollowerStats), + }, } } func (ls *LeaderStats) JSON() []byte { ls.Lock() - stats := *ls + stats := ls.leaderStats ls.Unlock() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go index cd450e2d199..0278e885cf9 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go @@ -26,6 +26,26 @@ import ( // ServerStats encapsulates various statistics about an EtcdServer and its // communication with other members of the cluster type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { Name string `json:"name"` // ID is the raft ID of the node. // TODO(jonboulle): use ID instead of name? @@ -49,17 +69,15 @@ type ServerStats struct { sendRateQueue *statsQueue recvRateQueue *statsQueue - - sync.Mutex } func (ss *ServerStats) JSON() []byte { ss.Lock() - stats := *ss + stats := ss.serverStats ss.Unlock() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? if err != nil { @@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte { return b } -// Initialize clears the statistics of ServerStats and resets its start time -func (ss *ServerStats) Initialize() { - if ss == nil { - return - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{ - back: -1, - } - ss.recvRateQueue = &statsQueue{ - back: -1, - } -} - -// RecvRates calculates and returns the rate of received append requests -func (ss *ServerStats) RecvRates() (float64, float64) { - return ss.recvRateQueue.Rate() -} - -// SendRates calculates and returns the rate of sent append requests -func (ss *ServerStats) SendRates() (float64, float64) { - return ss.sendRateQueue.Rate() -} - // RecvAppendReq updates the ServerStats in response to an AppendRequest // from the given leader being received func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go index 693618fbd51..aa8f87569db 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -32,9 +32,6 @@ type Storage interface { Save(st raftpb.HardState, ents []raftpb.Entry) error // SaveSnap function saves snapshot to the underlying stable storage. SaveSnap(snap raftpb.Snapshot) error - // DBFilePath returns the file path of database snapshot saved with given - // id. - DBFilePath(id uint64) (string, error) // Close closes the Storage and performs finalization. Close() error } diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go index 66084ae1244..e3896ffc2d3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -87,7 +87,7 @@ type notifier struct { func newNotifier() *notifier { return ¬ifier{ - c: make(chan struct{}, 0), + c: make(chan struct{}), } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index 60653cb6dff..ae449bbf22f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -19,6 +19,8 @@ import ( "encoding/binary" "time" + "github.com/gogo/protobuf/proto" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" @@ -27,17 +29,10 @@ import ( "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/raft" - "github.com/coreos/go-semver/semver" "golang.org/x/net/context" ) const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -45,10 +40,6 @@ const ( maxGapBetweenApplyAndCommitIndex = 5000 ) -var ( - newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0")) -) - type RaftKV interface { Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) @@ -91,11 +82,6 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyRange(ctx, r) - } - if !r.Serializable { err := s.linearizableReadNotify(ctx) if err != nil { @@ -107,65 +93,30 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe chk := func(ai *auth.AuthInfo) error { return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.RangeResponse), nil -} - func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.PutResponse), nil + return resp.(*pb.PutResponse), nil } func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.DeleteRangeResponse), nil + return resp.(*pb.DeleteRangeResponse), nil } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyTxn(ctx, r) - } - if isTxnReadonly(r) { if !isTxnSerializable(r) { err := s.linearizableReadNotify(ctx) @@ -184,38 +135,11 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } return resp, err } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil -} - -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnSerializable(r) { - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil + return resp.(*pb.TxnResponse), nil } func isTxnSerializable(r *pb.TxnRequest) bool { @@ -280,25 +204,19 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (* // only use positive int64 id's r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) } - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseGrantResponse), nil + return resp.(*pb.LeaseGrantResponse), nil } func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseRevokeResponse), nil + return resp.(*pb.LeaseRevokeResponse), nil } func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { @@ -394,54 +312,45 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) } func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AlarmResponse), nil + return resp.(*pb.AlarmResponse), nil } func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthEnableResponse), nil + return resp.(*pb.AuthEnableResponse), nil } func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthDisableResponse), nil + return resp.(*pb.AuthDisableResponse), nil } func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - var result *applyResult - - err := s.linearizableReadNotify(ctx) - if err != nil { + if err := s.linearizableReadNotify(ctx); err != nil { return nil, err } + var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } return nil, err } - st, err := s.AuthStore().GenSimpleToken() + st, err := s.AuthStore().GenTokenPrefix() if err != nil { return nil, err } @@ -452,172 +361,147 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest SimpleToken: st, } - result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if checkedRevision == s.AuthStore().Revision() { + break } - - if checkedRevision != s.AuthStore().Revision() { - plog.Infof("revision when password checked is obsolete, retrying") - continue - } - - break + plog.Infof("revision when password checked is obsolete, retrying") } - return result.resp.(*pb.AuthenticateResponse), nil + return resp.(*pb.AuthenticateResponse), nil } func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserAddResponse), nil + return resp.(*pb.AuthUserAddResponse), nil } func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserDeleteResponse), nil + return resp.(*pb.AuthUserDeleteResponse), nil } func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserChangePasswordResponse), nil + return resp.(*pb.AuthUserChangePasswordResponse), nil } func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGrantRoleResponse), nil + return resp.(*pb.AuthUserGrantRoleResponse), nil } func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGetResponse), nil + return resp.(*pb.AuthUserGetResponse), nil } func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserListResponse), nil + return resp.(*pb.AuthUserListResponse), nil } func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserRevokeRoleResponse), nil + return resp.(*pb.AuthUserRevokeRoleResponse), nil } func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleAddResponse), nil + return resp.(*pb.AuthRoleAddResponse), nil } func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil + return resp.(*pb.AuthRoleGrantPermissionResponse), nil } func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGetResponse), nil + return resp.(*pb.AuthRoleGetResponse), nil } func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleListResponse), nil + return resp.(*pb.AuthRoleListResponse), nil } func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil + return resp.(*pb.AuthRoleRevokePermissionResponse), nil } func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) if err != nil { return nil, err } if result.err != nil { return nil, result.err } - return result.resp.(*pb.AuthRoleDeleteResponse), nil + return result.resp, nil +} + +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } } // doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { for { - ai, err := s.AuthStore().AuthInfoFromCtx(ctx) + ai, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -652,7 +536,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ID: s.reqIDGen.Next(), } - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return nil, err } @@ -666,7 +550,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > maxRequestBytes { + if len(data) > int(s.Cfg.MaxRequestBytes) { return nil, ErrRequestTooLarge } @@ -696,19 +580,6 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } } -func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - var result *applyResult - var err error - for { - result, err = s.processInternalRaftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - break - } - } - - return result, err -} - // Watchable returns a watchable interface attached to the etcdserver. func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } @@ -802,3 +673,14 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { return ErrStopped } } + +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + if s.Cfg.ClientCertAuthEnabled { + authInfo := s.AuthStore().AuthInfoFromTLS(ctx) + if authInfo != nil { + return authInfo, nil + } + } + + return s.AuthStore().AuthInfoFromCtx(ctx) +} diff --git a/vendor/github.com/coreos/etcd/integration/BUILD b/vendor/github.com/coreos/etcd/integration/BUILD index c6a3e2ae69b..cd730e4a52a 100644 --- a/vendor/github.com/coreos/etcd/integration/BUILD +++ b/vendor/github.com/coreos/etcd/integration/BUILD @@ -13,9 +13,15 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/embed:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3client:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", @@ -25,6 +31,7 @@ go_library( "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/integration/bridge.go b/vendor/github.com/coreos/etcd/integration/bridge.go index b9e67318e52..59cebe1f0e0 100644 --- a/vendor/github.com/coreos/etcd/integration/bridge.go +++ b/vendor/github.com/coreos/etcd/integration/bridge.go @@ -17,6 +17,7 @@ package integration import ( "fmt" "io" + "io/ioutil" "net" "sync" @@ -31,9 +32,10 @@ type bridge struct { l net.Listener conns map[*bridgeConn]struct{} - stopc chan struct{} - pausec chan struct{} - wg sync.WaitGroup + stopc chan struct{} + pausec chan struct{} + blackholec chan struct{} + wg sync.WaitGroup mu sync.Mutex } @@ -41,11 +43,12 @@ type bridge struct { func newBridge(addr string) (*bridge, error) { b := &bridge{ // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), + inaddr: addr + "0", + outaddr: addr, + conns: make(map[*bridgeConn]struct{}), + stopc: make(chan struct{}), + pausec: make(chan struct{}), + blackholec: make(chan struct{}), } close(b.pausec) @@ -152,12 +155,12 @@ func (b *bridge) serveConn(bc *bridgeConn) { var wg sync.WaitGroup wg.Add(2) go func() { - io.Copy(bc.out, bc.in) + b.ioCopy(bc, bc.out, bc.in) bc.close() wg.Done() }() go func() { - io.Copy(bc.in, bc.out) + b.ioCopy(bc, bc.in, bc.out) bc.close() wg.Done() }() @@ -179,3 +182,47 @@ func (bc *bridgeConn) close() { bc.in.Close() bc.out.Close() } + +func (b *bridge) Blackhole() { + b.mu.Lock() + close(b.blackholec) + b.mu.Unlock() +} + +func (b *bridge) Unblackhole() { + b.mu.Lock() + for bc := range b.conns { + bc.Close() + } + b.conns = make(map[*bridgeConn]struct{}) + b.blackholec = make(chan struct{}) + b.mu.Unlock() +} + +// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer +func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error) { + buf := make([]byte, 32*1024) + for { + select { + case <-b.blackholec: + io.Copy(ioutil.Discard, src) + return nil + default: + } + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if ew != nil { + return ew + } + if nr != nw { + return io.ErrShortWrite + } + } + if er != nil { + err = er + break + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/integration/cluster.go b/vendor/github.com/coreos/etcd/integration/cluster.go index 4989e1f62fa..2907e994de2 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster.go +++ b/vendor/github.com/coreos/etcd/integration/cluster.go @@ -31,21 +31,28 @@ import ( "testing" "time" - "golang.org/x/net/context" - "google.golang.org/grpc" - "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/etcdserver/api/v3client" + "github.com/coreos/etcd/etcdserver/api/v3election" + epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock" + lockpb "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" "github.com/coreos/etcd/etcdserver/api/v3rpc" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -72,16 +79,32 @@ var ( ClientCertAuth: true, } + testTLSInfoExpired = transport.TLSInfo{ + KeyFile: "./fixtures-expired/server-key.pem", + CertFile: "./fixtures-expired/server.pem", + TrustedCAFile: "./fixtures-expired/etcd-root-ca.pem", + ClientCertAuth: true, + } + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration") ) type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - DiscoveryURL string - UseGRPC bool - QuotaBackendBytes int64 + Size int + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + DiscoveryURL string + UseGRPC bool + QuotaBackendBytes int64 + MaxRequestBytes uint + GRPCKeepAliveMinTime time.Duration + GRPCKeepAliveInterval time.Duration + GRPCKeepAliveTimeout time.Duration + // SkipCreatingClient to skip creating clients for each member. + SkipCreatingClient bool + + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int } type cluster struct { @@ -89,11 +112,6 @@ type cluster struct { Members []*member } -func init() { - // manually enable v3 capability since we know the cluster members all support v3. - api.EnableCapability(api.V3rpcCapability) -} - func schemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return UrlScheme @@ -175,8 +193,12 @@ func (c *cluster) URL(i int) string { // URLs returns a list of all active client URLs in the cluster func (c *cluster) URLs() []string { + return getMembersURLs(c.Members) +} + +func getMembersURLs(members []*member) []string { urls := make([]string, 0) - for _, m := range c.Members { + for _, m := range members { select { case <-m.s.StopNotify(): continue @@ -210,10 +232,16 @@ func (c *cluster) HTTPMembers() []client.Member { func (c *cluster) mustNewMember(t *testing.T) *member { m := mustNewMember(t, memberConfig{ - name: c.name(rand.Int()), - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, + name: c.name(rand.Int()), + peerTLS: c.cfg.PeerTLS, + clientTLS: c.cfg.ClientTLS, + quotaBackendBytes: c.cfg.QuotaBackendBytes, + maxRequestBytes: c.cfg.MaxRequestBytes, + grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, + grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, + grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, + clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize, + clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize, }) m.DiscoveryURL = c.cfg.DiscoveryURL if c.cfg.UseGRPC { @@ -312,9 +340,15 @@ func (c *cluster) removeMember(t *testing.T, id uint64) error { } func (c *cluster) Terminate(t *testing.T) { + var wg sync.WaitGroup + wg.Add(len(c.Members)) for _, m := range c.Members { - m.Terminate(t) + go func(mm *member) { + defer wg.Done() + mm.Terminate(t) + }(m) } + wg.Wait() } func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { @@ -331,7 +365,6 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { time.Sleep(tickDuration) } } - return } func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) } @@ -343,6 +376,18 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int { for _, m := range membs { possibleLead[uint64(m.s.ID())] = true } + cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) + kapi := client.NewKeysAPI(cc) + + // ensure leader is up via linearizable get + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second) + _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) + cancel() + if err == nil || strings.Contains(err.Error(), "Key not found") { + break + } + } for lead == 0 || !possibleLead[lead] { lead = 0 @@ -446,20 +491,32 @@ type member struct { s *etcdserver.EtcdServer hss []*httptest.Server - grpcServer *grpc.Server - grpcAddr string - grpcBridge *bridge + grpcServerOpts []grpc.ServerOption + grpcServer *grpc.Server + grpcAddr string + grpcBridge *bridge - keepDataDirTerminate bool + // serverClient is a clientv3 that directly calls the etcdserver. + serverClient *clientv3.Client + + keepDataDirTerminate bool + clientMaxCallSendMsgSize int + clientMaxCallRecvMsgSize int } func (m *member) GRPCAddr() string { return m.grpcAddr } type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - quotaBackendBytes int64 + name string + peerTLS *transport.TLSInfo + clientTLS *transport.TLSInfo + quotaBackendBytes int64 + maxRequestBytes uint + grpcKeepAliveMinTime time.Duration + grpcKeepAliveInterval time.Duration + grpcKeepAliveTimeout time.Duration + clientMaxCallSendMsgSize int + clientMaxCallRecvMsgSize int } // mustNewMember return an inited member with the given name. If peerTLS is @@ -507,6 +564,30 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member { m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.quotaBackendBytes + m.MaxRequestBytes = mcfg.maxRequestBytes + if m.MaxRequestBytes == 0 { + m.MaxRequestBytes = embed.DefaultMaxRequestBytes + } + m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough + + m.grpcServerOpts = []grpc.ServerOption{} + if mcfg.grpcKeepAliveMinTime > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.grpcKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if mcfg.grpcKeepAliveInterval > time.Duration(0) && + mcfg.grpcKeepAliveTimeout > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.grpcKeepAliveInterval, + Timeout: mcfg.grpcKeepAliveTimeout, + })) + } + + m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize + m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize + return m } @@ -523,7 +604,7 @@ func (m *member) listenGRPC() error { l.Close() return err } - m.grpcAddr = m.grpcBridge.URL() + m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr m.grpcListener = l return nil } @@ -535,6 +616,8 @@ func (m *member) electionTimeout() time.Duration { func (m *member) DropConnections() { m.grpcBridge.Reset() } func (m *member) PauseConnections() { m.grpcBridge.Pause() } func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } +func (m *member) Blackhole() { m.grpcBridge.Blackhole() } +func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *member) (*clientv3.Client, error) { @@ -543,8 +626,10 @@ func NewClientV3(m *member) (*clientv3.Client, error) { } cfg := clientv3.Config{ - Endpoints: []string{m.grpcAddr}, - DialTimeout: 5 * time.Second, + Endpoints: []string{m.grpcAddr}, + DialTimeout: 5 * time.Second, + MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, + MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, } if m.ClientTLSInfo != nil { @@ -597,10 +682,10 @@ func (m *member) Launch() error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ @@ -644,7 +729,10 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg) + m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...) + m.serverClient = v3client.New(m.s) + lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) + epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) go m.grpcServer.Serve(m.grpcListener) } @@ -688,8 +776,12 @@ func (m *member) Close() { m.grpcBridge.Close() m.grpcBridge = nil } + if m.serverClient != nil { + m.serverClient.Close() + m.serverClient = nil + } if m.grpcServer != nil { - m.grpcServer.Stop() + m.grpcServer.GracefulStop() m.grpcServer = nil } m.s.HardStop() @@ -785,7 +877,7 @@ func (m *member) Metric(metricName string) (string, error) { } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t *testing.T, others []*member) { +func (m *member) InjectPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.CutPeer(other.s.ID()) other.s.CutPeer(m.s.ID()) @@ -793,7 +885,7 @@ func (m *member) InjectPartition(t *testing.T, others []*member) { } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t *testing.T, others []*member) { +func (m *member) RecoverPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.MendPeer(other.s.ID()) other.s.MendPeer(m.s.ID()) @@ -845,12 +937,15 @@ func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 { cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) + + if !cfg.SkipCreatingClient { + for _, m := range clus.Members { + client, err := NewClientV3(m) + if err != nil { + t.Fatalf("cannot create client: %v", err) + } + clus.clients = append(clus.clients, client) } - clus.clients = append(clus.clients, client) } return clus @@ -897,4 +992,8 @@ type grpcAPI struct { Maintenance pb.MaintenanceClient // Auth is the authentication API for the client's connection. Auth pb.AuthClient + // Lock is the lock API for the client's connection. + Lock lockpb.LockClient + // Election is the election API for the client's connection. + Election epb.ElectionClient } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_direct.go b/vendor/github.com/coreos/etcd/integration/cluster_direct.go index 84b2a796cc0..ff97e6146ed 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_direct.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_direct.go @@ -18,6 +18,8 @@ package integration import ( "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) @@ -29,6 +31,8 @@ func toGRPC(c *clientv3.Client) grpcAPI { pb.NewWatchClient(c.ActiveConnection()), pb.NewMaintenanceClient(c.ActiveConnection()), pb.NewAuthClient(c.ActiveConnection()), + v3lockpb.NewLockClient(c.ActiveConnection()), + v3electionpb.NewElectionClient(c.ActiveConnection()), } } diff --git a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go index 75319218ec6..15094358e7b 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go @@ -20,8 +20,10 @@ import ( "sync" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/namespace" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/proxy/grpcproxy" + "github.com/coreos/etcd/proxy/grpcproxy/adapter" ) var ( @@ -29,10 +31,13 @@ var ( proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy) ) +const proxyNamespace = "proxy-namespace" + type grpcClientProxy struct { grpc grpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} + lpdonec <-chan struct{} } func toGRPC(c *clientv3.Client) grpcAPI { @@ -43,17 +48,30 @@ func toGRPC(c *clientv3.Client) grpcAPI { return v.grpc } - wp, wpch := grpcproxy.NewWatchProxy(c) + // test namespacing proxy + c.KV = namespace.NewKV(c.KV, proxyNamespace) + c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace) + c.Lease = namespace.NewLease(c.Lease, proxyNamespace) + // test coalescing/caching proxy kvp, kvpch := grpcproxy.NewKvProxy(c) + wp, wpch := grpcproxy.NewWatchProxy(c) + lp, lpch := grpcproxy.NewLeaseProxy(c) + mp := grpcproxy.NewMaintenanceProxy(c) + clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs + lockp := grpcproxy.NewLockProxy(c) + electp := grpcproxy.NewElectionProxy(c) + grpc := grpcAPI{ - pb.NewClusterClient(c.ActiveConnection()), - grpcproxy.KvServerToKvClient(kvp), - pb.NewLeaseClient(c.ActiveConnection()), - grpcproxy.WatchServerToWatchClient(wp), - pb.NewMaintenanceClient(c.ActiveConnection()), + adapter.ClusterServerToClusterClient(clp), + adapter.KvServerToKvClient(kvp), + adapter.LeaseServerToLeaseClient(lp), + adapter.WatchServerToWatchClient(wp), + adapter.MaintenanceServerToMaintenanceClient(mp), pb.NewAuthClient(c.ActiveConnection()), + adapter.LockServerToLockClient(lockp), + adapter.ElectionServerToElectionClient(electp), } - proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch} + proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch} return grpc } @@ -61,13 +79,17 @@ type proxyCloser struct { clientv3.Watcher wdonec <-chan struct{} kvdonec <-chan struct{} + lclose func() + lpdonec <-chan struct{} } func (pc *proxyCloser) Close() error { - // client ctx is canceled before calling close, so kv will close out + // client ctx is canceled before calling close, so kv and lp will close out <-pc.kvdonec err := pc.Watcher.Close() <-pc.wdonec + pc.lclose() + <-pc.lpdonec return err } @@ -77,12 +99,16 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { return nil, err } rpc := toGRPC(c) - c.KV = clientv3.NewKVFromKVClient(rpc.KV) + c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) pmu.Lock() + lc := c.Lease + c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout) c.Watcher = &proxyCloser{ - Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), + Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c), wdonec: proxies[c].wdonec, kvdonec: proxies[c].kvdonec, + lclose: func() { lc.Close() }, + lpdonec: proxies[c].lpdonec, } pmu.Unlock() return c, nil diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD index 7be8ef45cc9..1385cb46bf8 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD @@ -13,7 +13,6 @@ go_library( "//vendor/github.com/coreos/etcd/lease:go_default_library", "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go index 256051efc8d..c3175cbbb0f 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go @@ -16,6 +16,7 @@ package leasehttp import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -26,7 +27,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease/leasepb" "github.com/coreos/etcd/pkg/httputil" - "golang.org/x/net/context" ) var ( @@ -202,45 +202,27 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string } req.Header.Set("Content-Type", "application/protobuf") - cancel := httputil.RequestCanceler(req) + req = req.WithContext(ctx) cc := &http.Client{Transport: rt} var b []byte // buffer errc channel so that errc don't block inside the go routinue - errc := make(chan error, 2) - go func() { - resp, err := cc.Do(req) - if err != nil { - errc <- err - return - } - b, err = readResponse(resp) - if err != nil { - errc <- err - return - } - if resp.StatusCode == http.StatusRequestTimeout { - errc <- ErrLeaseHTTPTimeout - return - } - if resp.StatusCode == http.StatusNotFound { - errc <- lease.ErrLeaseNotFound - return - } - if resp.StatusCode != http.StatusOK { - errc <- fmt.Errorf("lease: unknown error(%s)", string(b)) - return - } - errc <- nil - }() - select { - case derr := <-errc: - if derr != nil { - return nil, derr - } - case <-ctx.Done(): - cancel() - return nil, ctx.Err() + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + b, err = readResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrLeaseHTTPTimeout + } + if resp.StatusCode == http.StatusNotFound { + return nil, lease.ErrLeaseNotFound + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) } lresp := &leasepb.LeaseInternalResponse{} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go index fb3a9bab0c3..ec8db732be5 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go @@ -590,7 +590,7 @@ func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } var fileDescriptorLease = []byte{ // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go index 385bd76d73c..3418cf565ed 100644 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ b/vendor/github.com/coreos/etcd/lease/lessor.go @@ -31,40 +31,39 @@ import ( const ( // NoLease is a special LeaseID representing the absence of a lease. NoLease = LeaseID(0) + + forever = monotime.Time(math.MaxInt64) ) var ( leaseBucketName = []byte("lease") - forever = monotime.Time(math.MaxInt64) + // maximum number of leases to revoke per second; configurable for tests + leaseRevokeRate = 1000 ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") ErrLeaseExists = errors.New("lease already exists") ) -type LeaseID int64 - -// RangeDeleter defines an interface with Txn and DeleteRange method. -// We define this interface only for lessor to limit the number -// of methods of mvcc.KV to what lessor actually needs. -// -// Having a minimum interface makes testing easy. -type RangeDeleter interface { - // TxnBegin see comments on mvcc.KV - TxnBegin() int64 - // TxnEnd see comments on mvcc.KV - TxnEnd(txnID int64) error - // TxnDeleteRange see comments on mvcc.KV - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) +// TxnDelete is a TxnWrite that only permits deletes. Defined here +// to avoid circular dependency with mvcc. +type TxnDelete interface { + DeleteRange(key, end []byte) (n, rev int64) + End() } +// RangeDeleter is a TxnDelete constructor. +type RangeDeleter func() TxnDelete + +type LeaseID int64 + // Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. type Lessor interface { - // SetRangeDeleter sets the RangeDeleter to the Lessor. - // Lessor deletes the items in the revoked or expired lease from the - // the set RangeDeleter. - SetRangeDeleter(dr RangeDeleter) + // SetRangeDeleter lets the lessor create TxnDeletes to the store. + // Lessor deletes the items in the revoked or expired lease by creating + // new TxnDeletes. + SetRangeDeleter(rd RangeDeleter) // Grant grants a lease that expires at least after TTL seconds. Grant(id LeaseID, ttl int64) (*Lease, error) @@ -248,17 +247,14 @@ func (le *lessor) Revoke(id LeaseID) error { return nil } - tid := le.rd.TxnBegin() + txn := le.rd() // sort keys so deletes are in same order among all members, // otherwise the backened hashes will be different keys := l.Keys() sort.StringSlice(keys).Sort() for _, key := range keys { - _, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil) - if err != nil { - panic(err) - } + txn.DeleteRange([]byte(key), nil) } le.mu.Lock() @@ -269,11 +265,7 @@ func (le *lessor) Revoke(id LeaseID) error { // deleting the keys if etcdserver fails in between. le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) - err := le.rd.TxnEnd(tid) - if err != nil { - panic(err) - } - + txn.End() return nil } @@ -335,8 +327,53 @@ func (le *lessor) Promote(extend time.Duration) { for _, l := range le.leaseMap { l.refresh(extend) } + + if len(le.leaseMap) < leaseRevokeRate { + // no possibility of lease pile-up + return + } + + // adjust expiries in case of overlap + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() + nextWindow := baseWindow + time.Second + expires := 0 + // have fewer expires than the total revoke rate so piled up leases + // don't consume the entire revoke limit + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // If leases are extended by n seconds, leases n seconds ahead of the + // base window should be extended by only one second. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + } } +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -433,6 +470,10 @@ func (le *lessor) runLoop() { le.mu.Unlock() if len(ls) != 0 { + // rate limit + if len(ls) > leaseRevokeRate/2 { + ls = ls[:leaseRevokeRate/2] + } select { case <-le.stopC: return diff --git a/vendor/github.com/coreos/etcd/mvcc/BUILD b/vendor/github.com/coreos/etcd/mvcc/BUILD index ab4fab1efde..21b837ec9ac 100644 --- a/vendor/github.com/coreos/etcd/mvcc/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/BUILD @@ -7,12 +7,16 @@ go_library( "index.go", "key_index.go", "kv.go", + "kv_view.go", "kvstore.go", "kvstore_compaction.go", + "kvstore_txn.go", "metrics.go", + "metrics_txn.go", "revision.go", "util.go", "watchable_store.go", + "watchable_store_txn.go", "watcher.go", "watcher_group.go", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD index e8456f33cca..5cc8cced856 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD +++ b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD @@ -7,46 +7,48 @@ go_library( "batch_tx.go", "doc.go", "metrics.go", + "read_tx.go", + "tx_buffer.go", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:darwin": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:linux": [ - "boltoption_linux.go", + "config_linux.go", ], "@io_bazel_rules_go//go/platform:nacl": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:plan9": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:solaris": [ - "boltoption_default.go", + "config_default.go", ], "@io_bazel_rules_go//go/platform:windows": [ - "boltoption_default.go", + "config_windows.go", ], "//conditions:default": [], }), importpath = "github.com/coreos/etcd/mvcc/backend", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/boltdb/bolt:go_default_library", + "//vendor/github.com/coreos/bbolt:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go index e5e0028f94b..87edd25f427 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" ) @@ -35,25 +35,21 @@ var ( defragLimit = 10000 - // InitialMmapSize is the initial size of the mmapped region. Setting this larger than + // initialMmapSize is the initial size of the mmapped region. Setting this larger than // the potential max db size can prevent writer from blocking reader. // This only works for linux. - InitialMmapSize = int64(10 * 1024 * 1024 * 1024) + initialMmapSize = uint64(10 * 1024 * 1024 * 1024) plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") -) -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = time.Duration(30 * time.Second) ) type Backend interface { + ReadTx() ReadTx BatchTx() BatchTx + Snapshot() Snapshot Hash(ignores map[IgnoreKey]struct{}) (uint32, error) // Size returns the current size of the backend. @@ -86,36 +82,71 @@ type backend struct { batchInterval time.Duration batchLimit int - batchTx *batchTx + batchTx *batchTxBuffered + + readTx *readTx stopc chan struct{} donec chan struct{} } -func New(path string, d time.Duration, limit int) Backend { - return newBackend(path, d, limit) +type BackendConfig struct { + // Path is the file path to the backend file. + Path string + // BatchInterval is the maximum time before flushing the BatchTx. + BatchInterval time.Duration + // BatchLimit is the maximum puts before flushing the BatchTx. + BatchLimit int + // MmapSize is the number of bytes to mmap for the backend. + MmapSize uint64 +} + +func DefaultBackendConfig() BackendConfig { + return BackendConfig{ + BatchInterval: defaultBatchInterval, + BatchLimit: defaultBatchLimit, + MmapSize: initialMmapSize, + } +} + +func New(bcfg BackendConfig) Backend { + return newBackend(bcfg) } func NewDefaultBackend(path string) Backend { - return newBackend(path, defaultBatchInterval, defaultBatchLimit) + bcfg := DefaultBackendConfig() + bcfg.Path = path + return newBackend(bcfg) } -func newBackend(path string, d time.Duration, limit int) *backend { - db, err := bolt.Open(path, 0600, boltOpenOptions) +func newBackend(bcfg BackendConfig) *backend { + bopts := &bolt.Options{} + if boltOpenOptions != nil { + *bopts = *boltOpenOptions + } + bopts.InitialMmapSize = bcfg.mmapSize() + + db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { - plog.Panicf("cannot open database at %s (%v)", path, err) + plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) } + // In future, may want to make buffering optional for low-concurrency systems + // or dynamically swap between buffered/non-buffered depending on workload. b := &backend{ db: db, - batchInterval: d, - batchLimit: limit, + batchInterval: bcfg.BatchInterval, + batchLimit: bcfg.BatchLimit, + + readTx: &readTx{buf: txReadBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}}, + }, stopc: make(chan struct{}), donec: make(chan struct{}), } - b.batchTx = newBatchTx(b) + b.batchTx = newBatchTxBuffered(b) go b.run() return b } @@ -127,6 +158,8 @@ func (b *backend) BatchTx() BatchTx { return b.batchTx } +func (b *backend) ReadTx() ReadTx { return b.readTx } + // ForceCommit forces the current batching tx to commit. func (b *backend) ForceCommit() { b.batchTx.Commit() @@ -141,7 +174,33 @@ func (b *backend) Snapshot() Snapshot { if err != nil { plog.Fatalf("cannot begin tx (%s)", err) } - return &snapshot{tx} + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() + go func() { + defer close(donec) + // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection + // assuming a min tcp throughput of 100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1014 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + case <-stopc: + snapshotDurations.Observe(time.Since(start).Seconds()) + return + } + } + }() + + return &snapshot{tx, stopc, donec} } type IgnoreKey struct { @@ -235,7 +294,11 @@ func (b *backend) defrag() error { b.mu.Lock() defer b.mu.Unlock() - b.batchTx.commit(true) + // block concurrent read requests while resetting tx + b.readTx.mu.Lock() + defer b.readTx.mu.Unlock() + + b.batchTx.unsafeCommit(true) b.batchTx.tx = nil tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) @@ -276,6 +339,10 @@ func (b *backend) defrag() error { plog.Fatalf("cannot begin tx (%s)", err) } + b.readTx.buf.reset() + b.readTx.tx = b.unsafeBegin(false) + atomic.StoreInt64(&b.size, b.readTx.tx.Size()) + return nil } @@ -331,6 +398,22 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { return tmptx.Commit() } +func (b *backend) begin(write bool) *bolt.Tx { + b.mu.RLock() + tx := b.unsafeBegin(write) + b.mu.RUnlock() + atomic.StoreInt64(&b.size, tx.Size()) + return tx +} + +func (b *backend) unsafeBegin(write bool) *bolt.Tx { + tx, err := b.db.Begin(write) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + return tx +} + // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") @@ -338,7 +421,9 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin plog.Fatal(err) } tmpPath := filepath.Join(dir, "database") - return newBackend(tmpPath, batchInterval, batchLimit), tmpPath + bcfg := DefaultBackendConfig() + bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit + return newBackend(bcfg), tmpPath } func NewDefaultTmpBackend() (*backend, string) { @@ -347,6 +432,12 @@ func NewDefaultTmpBackend() (*backend, string) { type snapshot struct { *bolt.Tx + stopc chan struct{} + donec chan struct{} } -func (s *snapshot) Close() error { return s.Tx.Rollback() } +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go index 04fea1e9477..e5fb8474089 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -16,23 +16,24 @@ package backend import ( "bytes" + "fmt" + "math" "sync" "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) type BatchTx interface { - Lock() - Unlock() + ReadTx UnsafeCreateBucket(name []byte) UnsafePut(bucketName []byte, key []byte, value []byte) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) UnsafeDelete(bucketName []byte, key []byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error + // Commit commits a previous tx and begins a new writable one. Commit() + // CommitAndStop commits the previous tx and does not create a new one. CommitAndStop() } @@ -40,13 +41,8 @@ type batchTx struct { sync.Mutex tx *bolt.Tx backend *backend - pending int -} -func newBatchTx(backend *backend) *batchTx { - tx := &batchTx{backend: backend} - tx.Commit() - return tx + pending int } func (t *batchTx) UnsafeCreateBucket(name []byte) { @@ -84,30 +80,37 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo } // UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { - bucket := t.tx.Bucket(bucketName) +func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) + if err != nil { + plog.Fatal(err) + } + return k, v +} + +func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { + bucket := tx.Bucket(bucketName) if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) + return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) } - if len(endKey) == 0 { - if v := bucket.Get(key); v == nil { - return keys, vs - } else { - return append(keys, key), append(vs, v) + if v := bucket.Get(key); v != nil { + return append(keys, key), append(vs, v), nil } + return nil, nil, nil + } + if limit <= 0 { + limit = math.MaxInt64 } - c := bucket.Cursor() for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) - if limit > 0 && limit == int64(len(keys)) { + if limit == int64(len(keys)) { break } } - - return keys, vs + return keys, vs, nil } // UnsafeDelete must be called holding the lock on the tx. @@ -125,12 +128,14 @@ func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { // UnsafeForEach must be called holding the lock on the tx. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - b := t.tx.Bucket(bucketName) - if b == nil { - // bucket does not exist - return nil + return unsafeForEach(t.tx, bucketName, visitor) +} + +func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error { + if b := tx.Bucket(bucket); b != nil { + return b.ForEach(visitor) } - return b.ForEach(visitor) + return nil } // Commit commits a previous tx and begins a new writable one. @@ -140,7 +145,7 @@ func (t *batchTx) Commit() { t.commit(false) } -// CommitAndStop commits the previous tx and do not create a new one. +// CommitAndStop commits the previous tx and does not create a new one. func (t *batchTx) CommitAndStop() { t.Lock() defer t.Unlock() @@ -150,37 +155,28 @@ func (t *batchTx) CommitAndStop() { func (t *batchTx) Unlock() { if t.pending >= t.backend.batchLimit { t.commit(false) - t.pending = 0 } t.Mutex.Unlock() } func (t *batchTx) commit(stop bool) { - var err error // commit the last tx if t.tx != nil { if t.pending == 0 && !stop { t.backend.mu.RLock() defer t.backend.mu.RUnlock() - // batchTx.commit(true) calls *bolt.Tx.Commit, which - // initializes *bolt.Tx.db and *bolt.Tx.meta as nil, - // and subsequent *bolt.Tx.Size() call panics. - // - // This nil pointer reference panic happens when: - // 1. batchTx.commit(false) from newBatchTx - // 2. batchTx.commit(true) from stopping backend - // 3. batchTx.commit(false) from inflight mvcc Hash call - // - // Check if db is nil to prevent this panic - if t.tx.DB() != nil { - atomic.StoreInt64(&t.backend.size, t.tx.Size()) - } + // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)', + // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size(). + // Server must make sure 'batchTx.commit(false)' does not follow + // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call). + atomic.StoreInt64(&t.backend.size, t.tx.Size()) return } + start := time.Now() // gofail: var beforeCommit struct{} - err = t.tx.Commit() + err := t.tx.Commit() // gofail: var afterCommit struct{} commitDurations.Observe(time.Since(start).Seconds()) atomic.AddInt64(&t.backend.commits, 1) @@ -190,17 +186,81 @@ func (t *batchTx) commit(stop bool) { plog.Fatalf("cannot commit tx (%s)", err) } } - - if stop { - return + if !stop { + t.tx = t.backend.begin(true) } - - t.backend.mu.RLock() - defer t.backend.mu.RUnlock() - // begin a new tx - t.tx, err = t.backend.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - atomic.StoreInt64(&t.backend.size, t.tx.Size()) +} + +type batchTxBuffered struct { + batchTx + buf txWriteBuffer +} + +func newBatchTxBuffered(backend *backend) *batchTxBuffered { + tx := &batchTxBuffered{ + batchTx: batchTx{backend: backend}, + buf: txWriteBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}, + seq: true, + }, + } + tx.Commit() + return tx +} + +func (t *batchTxBuffered) Unlock() { + if t.pending != 0 { + t.backend.readTx.mu.Lock() + t.buf.writeback(&t.backend.readTx.buf) + t.backend.readTx.mu.Unlock() + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + } + t.batchTx.Unlock() +} + +func (t *batchTxBuffered) Commit() { + t.Lock() + defer t.Unlock() + t.commit(false) +} + +func (t *batchTxBuffered) CommitAndStop() { + t.Lock() + defer t.Unlock() + t.commit(true) +} + +func (t *batchTxBuffered) commit(stop bool) { + // all read txs must be closed to acquire boltdb commit rwlock + t.backend.readTx.mu.Lock() + defer t.backend.readTx.mu.Unlock() + t.unsafeCommit(stop) +} + +func (t *batchTxBuffered) unsafeCommit(stop bool) { + if t.backend.readTx.tx != nil { + if err := t.backend.readTx.tx.Rollback(); err != nil { + plog.Fatalf("cannot rollback tx (%s)", err) + } + t.backend.readTx.buf.reset() + t.backend.readTx.tx = nil + } + + t.batchTx.commit(stop) + + if !stop { + t.backend.readTx.tx = t.backend.begin(false) + } +} + +func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafePut(bucketName, key, value) + t.buf.put(bucketName, key, value) +} + +func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafeSeqPut(bucketName, key, value) + t.buf.putSeq(bucketName, key, value) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go similarity index 82% rename from vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go rename to vendor/github.com/coreos/etcd/mvcc/backend/config_default.go index 92019c18415..edfed0025c6 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !linux +// +build !linux,!windows package backend -import "github.com/boltdb/bolt" +import bolt "github.com/coreos/bbolt" var boltOpenOptions *bolt.Options = nil + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go similarity index 88% rename from vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go rename to vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go index 4ee9b05a77c..a8f6abeba63 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead @@ -27,6 +27,7 @@ import ( // (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might // silently ignore this flag. Please update your kernel to prevent this. var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, - InitialMmapSize: int(InitialMmapSize), + MmapFlags: syscall.MAP_POPULATE, } + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go new file mode 100644 index 00000000000..71d02700bcd --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +// setting mmap size != 0 on windows will allocate the entire +// mmap size for the file, instead of growing it. So, force 0. + +func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go index 34a56a91956..30a38801476 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -24,8 +24,18 @@ var ( Help: "The latency distributions of commit called by backend.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) + + snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_snapshot_duration_seconds", + Help: "The latency distribution of backend snapshots.", + // 10 ms -> 655 seconds + Buckets: prometheus.ExponentialBuckets(.01, 2, 17), + }) ) func init() { prometheus.MustRegister(commitDurations) + prometheus.MustRegister(snapshotDurations) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go new file mode 100644 index 00000000000..9fc6b790620 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,92 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + + bolt "github.com/coreos/bbolt" +) + +// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket +// is known to never overwrite any key so range is safe. +var safeRangeBucket = []byte("key") + +type ReadTx interface { + Lock() + Unlock() + + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error +} + +type readTx struct { + // mu protects accesses to the txReadBuffer + mu sync.RWMutex + buf txReadBuffer + + // txmu protects accesses to the Tx on Range requests + txmu sync.Mutex + tx *bolt.Tx +} + +func (rt *readTx) Lock() { rt.mu.RLock() } +func (rt *readTx) Unlock() { rt.mu.RUnlock() } + +func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + rt.txmu.Lock() + // ignore error since bucket may have been created in this batch + k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) + rt.txmu.Unlock() + return append(k2, keys...), append(v2, vals...) +} + +func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + f1 := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return visitor(k, v) + } + f2 := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, f1); err != nil { + return err + } + rt.txmu.Lock() + err := unsafeForEach(rt.tx, bucketName, f2) + rt.txmu.Unlock() + return err +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go new file mode 100644 index 00000000000..56e885dbfbc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go @@ -0,0 +1,181 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "sort" +) + +// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. +type txBuffer struct { + buckets map[string]*bucketBuffer +} + +func (txb *txBuffer) reset() { + for k, v := range txb.buckets { + if v.used == 0 { + // demote + delete(txb.buckets, k) + } + v.used = 0 + } +} + +// txWriteBuffer buffers writes of pending updates that have not yet committed. +type txWriteBuffer struct { + txBuffer + seq bool +} + +func (txw *txWriteBuffer) put(bucket, k, v []byte) { + txw.seq = false + txw.putSeq(bucket, k, v) +} + +func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) { + b, ok := txw.buckets[string(bucket)] + if !ok { + b = newBucketBuffer() + txw.buckets[string(bucket)] = b + } + b.add(k, v) +} + +func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { + for k, wb := range txw.buckets { + rb, ok := txr.buckets[k] + if !ok { + delete(txw.buckets, k) + txr.buckets[k] = wb + continue + } + if !txw.seq && wb.used > 1 { + // assume no duplicate keys + sort.Sort(wb) + } + rb.merge(wb) + } + txw.reset() +} + +// txReadBuffer accesses buffered updates. +type txReadBuffer struct{ txBuffer } + +func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.Range(key, endKey, limit) + } + return nil, nil +} + +func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.ForEach(visitor) + } + return nil +} + +type kv struct { + key []byte + val []byte +} + +// bucketBuffer buffers key-value pairs that are pending commit. +type bucketBuffer struct { + buf []kv + // used tracks number of elements in use so buf can be reused without reallocation. + used int +} + +func newBucketBuffer() *bucketBuffer { + return &bucketBuffer{buf: make([]kv, 512), used: 0} +} + +func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { + f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } + idx := sort.Search(bb.used, f) + if idx < 0 { + return nil, nil + } + if len(endKey) == 0 { + if bytes.Equal(key, bb.buf[idx].key) { + keys = append(keys, bb.buf[idx].key) + vals = append(vals, bb.buf[idx].val) + } + return keys, vals + } + if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { + return nil, nil + } + for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { + if bytes.Compare(endKey, bb.buf[i].key) <= 0 { + break + } + keys = append(keys, bb.buf[i].key) + vals = append(vals, bb.buf[i].val) + } + return keys, vals +} + +func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { + for i := 0; i < bb.used; i++ { + if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { + return err + } + } + return nil +} + +func (bb *bucketBuffer) add(k, v []byte) { + bb.buf[bb.used].key, bb.buf[bb.used].val = k, v + bb.used++ + if bb.used == len(bb.buf) { + buf := make([]kv, (3*len(bb.buf))/2) + copy(buf, bb.buf) + bb.buf = buf + } +} + +// merge merges data from bb into bbsrc. +func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { + for i := 0; i < bbsrc.used; i++ { + bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) + } + if bb.used == bbsrc.used { + return + } + if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { + return + } + + sort.Stable(bb) + + // remove duplicates, using only newest update + widx := 0 + for ridx := 1; ridx < bb.used; ridx++ { + if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { + widx++ + } + bb.buf[widx] = bb.buf[ridx] + } + bb.used = widx + 1 +} + +func (bb *bucketBuffer) Len() int { return bb.used } +func (bb *bucketBuffer) Less(i, j int) bool { + return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 +} +func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go index 397098a7ba7..991289cdd5c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -29,7 +29,9 @@ type index interface { RangeSince(key, end []byte, rev int64) []revision Compact(rev int64) map[revision]struct{} Equal(b index) bool + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex } type treeIndex struct { @@ -60,18 +62,27 @@ func (ti *treeIndex) Put(key []byte, rev revision) { func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { keyi := &keyIndex{key: key} - ti.RLock() defer ti.RUnlock() - item := ti.tree.Get(keyi) - if item == nil { + if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - - keyi = item.(*keyIndex) return keyi.get(atRev) } +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go index 983c64e2f6b..9104f9b2d36 100644 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -222,7 +222,6 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { } // remove the previous generations. ki.generations = ki.generations[i:] - return } func (ki *keyIndex) isEmpty() bool { diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go index c851c8725e8..6636347aa43 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -32,15 +32,15 @@ type RangeResult struct { Count int } -type KV interface { - // Rev returns the current revision of the KV. - Rev() int64 - - // FirstRev returns the first revision of the KV. +type ReadView interface { + // FirstRev returns the first KV revision at the time of opening the txn. // After a compaction, the first revision increases to the compaction // revision. FirstRev() int64 + // Rev returns the revision of the KV at the time of opening the txn. + Rev() int64 + // Range gets the keys in the range at rangeRev. // The returned rev is the current revision of the KV when the operation is executed. // If rangeRev <=0, range gets the keys at currentRev. @@ -50,14 +50,17 @@ type KV interface { // Limit limits the number of keys returned. // If the required rev is compacted, ErrCompacted will be returned. Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) +} - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) +// TxnRead represents a read-only transaction with operations that will not +// block other read transactions. +type TxnRead interface { + ReadView + // End marks the transaction is complete and ready to commit. + End() +} +type WriteView interface { // DeleteRange deletes the given range from the store. // A deleteRange increases the rev of the store if any key in the range exists. // The number of key deleted will be returned. @@ -67,26 +70,51 @@ type KV interface { // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). DeleteRange(key, end []byte) (n, rev int64) - // TxnBegin begins a txn. Only Txn prefixed operation can be executed, others will be blocked - // until txn ends. Only one on-going txn is allowed. - // TxnBegin returns an int64 txn ID. - // All txn prefixed operations with same txn ID will be done with the same rev. - TxnBegin() int64 - // TxnEnd ends the on-going txn with txn ID. If the on-going txn ID is not matched, error is returned. - TxnEnd(txnID int64) error - // TxnRange returns the current revision of the KV when the operation is executed. - TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) - TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +// TxnWrite represents a transaction that can modify the store. +type TxnWrite interface { + TxnRead + WriteView + // Changes gets the changes made since opening the write txn. + Changes() []mvccpb.KeyValue +} + +// txnReadWrite coerces a read txn to a write, panicking on any write operation. +type txnReadWrite struct{ TxnRead } + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type KV interface { + ReadView + WriteView + + // Read creates a read transaction. + Read() TxnRead + + // Write creates a write transaction. + Write() TxnWrite + + // Hash retrieves the hash of KV state and revision. + // This method is designed for consistency checking purposes. + Hash() (hash uint32, revision int64, err error) // Compact frees all superseded keys with revisions less than rev. Compact(rev int64) (<-chan struct{}, error) - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purpose. - Hash() (hash uint32, revision int64, err error) - - // Commit commits txns into the underlying backend. + // Commit commits outstanding txns into the underlying backend. Commit() // Restore restores the KV store from a backend. diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go new file mode 100644 index 00000000000..f40ba8edc22 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read() + defer tr.End() + return tr.Range(key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.DeleteRange(key, end) +} + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go index 28a18a06597..28a508ccb95 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -18,7 +18,6 @@ import ( "encoding/binary" "errors" "math" - "math/rand" "sync" "time" @@ -34,25 +33,29 @@ var ( keyBucketName = []byte("key") metaBucketName = []byte("meta") + consistentIndexKeyName = []byte("consistent_index") + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrClosed = errors.New("mvcc: closed") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") +) + +const ( // markedRevBytesLen is the byte length of marked revision. // The first `revBytesLen` bytes represents a normal revision. The last // one byte is the mark. markedRevBytesLen = revBytesLen + 1 markBytePosition = markedRevBytesLen - 1 markTombstone byte = 't' - - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrTxnIDMismatch = errors.New("mvcc: txn id mismatch") - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") ) +var restoreChunkKeys = 10000 // non-const for testing + // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. type ConsistentIndexGetter interface { @@ -61,7 +64,11 @@ type ConsistentIndexGetter interface { } type store struct { - mu sync.Mutex // guards the following + ReadView + WriteView + + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex ig ConsistentIndexGetter @@ -70,19 +77,19 @@ type store struct { le lease.Lessor - currentRev revision - // the main revision of the last compaction + // revMuLock protects currentRev and compactMainRev. + // Locked at end of write txn and released after write txn unlock lock. + // Locked before locking read txn and released after locking. + revMu sync.RWMutex + // currentRev is the revision of the last completed transaction. + currentRev int64 + // compactMainRev is the main revision of the last compaction. compactMainRev int64 - tx backend.BatchTx - txnID int64 // tracks the current txnID to verify txn operations - txnModify bool - // bytesBuf8 is a byte slice of length 8 // to avoid a repetitive allocation in saveIndex. bytesBuf8 []byte - changes []mvccpb.KeyValue fifoSched schedule.Scheduler stopc chan struct{} @@ -98,17 +105,18 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto le: le, - currentRev: revision{main: 1}, + currentRev: 1, compactMainRev: -1, - bytesBuf8: make([]byte, 8, 8), + bytesBuf8: make([]byte, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), } - + s.ReadView = &readView{s} + s.WriteView = &writeView{s} if s.le != nil { - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } tx := s.b.BatchTx() @@ -126,140 +134,6 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto return s } -func (s *store) Rev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.currentRev.main -} - -func (s *store) FirstRev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.compactMainRev -} - -func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 { - id := s.TxnBegin() - s.put(key, value, lease) - s.txnEnd(id) - - putCounter.Inc() - - return int64(s.currentRev.main) -} - -func (s *store) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - id := s.TxnBegin() - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - s.txnEnd(id) - - rangeCounter.Inc() - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - - return r, err -} - -func (s *store) DeleteRange(key, end []byte) (n, rev int64) { - id := s.TxnBegin() - n = s.deleteRange(key, end) - s.txnEnd(id) - - deleteCounter.Inc() - - return n, int64(s.currentRev.main) -} - -func (s *store) TxnBegin() int64 { - s.mu.Lock() - s.currentRev.sub = 0 - s.tx = s.b.BatchTx() - s.tx.Lock() - - s.txnID = rand.Int63() - return s.txnID -} - -func (s *store) TxnEnd(txnID int64) error { - err := s.txnEnd(txnID) - if err != nil { - return err - } - - txnCounter.Inc() - return nil -} - -// txnEnd is used for unlocking an internal txn. It does -// not increase the txnCounter. -func (s *store) txnEnd(txnID int64) error { - if txnID != s.txnID { - return ErrTxnIDMismatch - } - - // only update index if the txn modifies the mvcc state. - // read only txn might execute with one write txn concurrently, - // it should not write its index to mvcc. - if s.txnModify { - s.saveIndex() - } - s.txnModify = false - - s.tx.Unlock() - if s.currentRev.sub != 0 { - s.currentRev.main += 1 - } - s.currentRev.sub = 0 - - dbTotalSize.Set(float64(s.b.Size())) - s.mu.Unlock() - return nil -} - -func (s *store) TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - if txnID != s.txnID { - return nil, ErrTxnIDMismatch - } - - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - return r, err -} - -func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) { - if txnID != s.txnID { - return 0, ErrTxnIDMismatch - } - - s.put(key, value, lease) - return int64(s.currentRev.main + 1), nil -} - -func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) { - if txnID != s.txnID { - return 0, 0, ErrTxnIDMismatch - } - - n = s.deleteRange(key, end) - if n != 0 || s.currentRev.sub != 0 { - rev = int64(s.currentRev.main + 1) - } else { - rev = int64(s.currentRev.main) - } - return n, rev, nil -} - func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { if ctx == nil || ctx.Err() != nil { s.mu.Lock() @@ -275,16 +149,25 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { close(ch) } +func (s *store) Hash() (hash uint32, revision int64, err error) { + s.b.ForceCommit() + h, err := s.b.Hash(DefaultIgnores) + return h, s.currentRev, err +} + func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.mu.Lock() defer s.mu.Unlock() + s.revMu.Lock() + defer s.revMu.Unlock() + if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) return ch, ErrCompacted } - if rev > s.currentRev.main { + if rev > s.currentRev { return nil, ErrFutureRev } @@ -333,24 +216,14 @@ func init() { } } -func (s *store) Hash() (uint32, int64, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.b.ForceCommit() - - h, err := s.b.Hash(DefaultIgnores) - rev := s.currentRev.main - return h, rev, err -} - func (s *store) Commit() { s.mu.Lock() defer s.mu.Unlock() - s.tx = s.b.BatchTx() - s.tx.Lock() - s.saveIndex() - s.tx.Unlock() + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() s.b.ForceCommit() } @@ -363,10 +236,8 @@ func (s *store) Restore(b backend.Backend) error { s.b = b s.kvindex = newTreeIndex() - s.currentRev = revision{main: 1} + s.currentRev = 1 s.compactMainRev = -1 - s.tx = b.BatchTx() - s.txnID = -1 s.fifoSched = schedule.NewFIFOScheduler() s.stopc = make(chan struct{}) @@ -374,75 +245,63 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) - // use an unordered map to hold the temp index data to speed up - // the initial key index recovery. - // we will convert this unordered map into the tree index later. - unordered := make(map[string]*keyIndex, 100000) - // restore index tx := s.b.BatchTx() tx.Lock() + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main plog.Printf("restore compact to %d", s.compactMainRev) } - - // TODO: limit N to reduce max memory usage - keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) - for i, key := range keys { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - - rev := bytesToRev(key[:revBytesLen]) - - // restore index - switch { - case isTombstone(key): - if ki, ok := unordered[string(kv.Key)]; ok { - ki.tombstone(rev.main, rev.sub) - } - delete(keyToLease, string(kv.Key)) - - default: - ki, ok := unordered[string(kv.Key)] - if ok { - ki.put(rev.main, rev.sub) - } else { - ki = &keyIndex{key: kv.Key} - ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) - unordered[string(kv.Key)] = ki - } - - if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { - keyToLease[string(kv.Key)] = lid - } else { - delete(keyToLease, string(kv.Key)) - } - } - - // update revision - s.currentRev = rev + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main } - // restore the tree index from the unordered index. - for _, v := range unordered { - s.kvindex.Insert(v) + // index keys concurrently as they're loaded in from tx + keysGauge.Set(0) + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break + } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break + } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) } + close(rkvc) + s.currentRev = <-revc // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // the correct revision should be set to compaction revision in the case, not the largest revision // we have seen. - if s.currentRev.main < s.compactMainRev { - s.currentRev.main = s.compactMainRev + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 } for key, lid := range keyToLease { @@ -455,15 +314,6 @@ func (s *store) restore() error { } } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - } - tx.Unlock() if scheduledCompact != 0 { @@ -474,6 +324,75 @@ func (s *store) restore() error { return nil } +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + func (s *store) Close() error { close(s.stopc) s.fifoSched.Stop() @@ -490,180 +409,10 @@ func (a *store) Equal(b *store) bool { return a.kvindex.Equal(b.kvindex) } -// range is a keyword in Go, add Keys suffix. -func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool) (kvs []mvccpb.KeyValue, count int, curRev int64, err error) { - curRev = int64(s.currentRev.main) - if s.currentRev.sub > 0 { - curRev += 1 - } - - if rangeRev > curRev { - return nil, -1, s.currentRev.main, ErrFutureRev - } - var rev int64 - if rangeRev <= 0 { - rev = curRev - } else { - rev = rangeRev - } - if rev < s.compactMainRev { - return nil, -1, 0, ErrCompacted - } - - _, revpairs := s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return nil, 0, curRev, nil - } - if countOnly { - return nil, len(revpairs), curRev, nil - } - - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - - _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if limit > 0 && len(kvs) >= int(limit) { - break - } - } - return kvs, len(revpairs), curRev, nil -} - -func (s *store) put(key, value []byte, leaseID lease.LeaseID) { - s.txnModify = true - - rev := s.currentRev.main + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub}) - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - if oldLease != lease.NoLease { - if s.le == nil { - panic("no lessor to detach lease") - } - - err = s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - - if leaseID != lease.NoLease { - if s.le == nil { - panic("no lessor to attach lease") - } - - err = s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (s *store) deleteRange(key, end []byte) int64 { - s.txnModify = true - - rrev := s.currentRev.main - if s.currentRev.sub > 0 { - rrev += 1 - } - keys, revs := s.kvindex.Range(key, end, rrev) - - if len(keys) == 0 { - return 0 - } - - for i, key := range keys { - s.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (s *store) delete(key []byte, rev revision) { - mainrev := s.currentRev.main + 1 - - ibytes := newRevBytes() - revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{ - Key: key, - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - item := lease.LeaseItem{Key: string(key)} - leaseID := s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (s *store) getChanges() []mvccpb.KeyValue { - changes := s.changes - s.changes = make([]mvccpb.KeyValue, 0, 4) - return changes -} - -func (s *store) saveIndex() { +func (s *store) saveIndex(tx backend.BatchTx) { if s.ig == nil { return } - tx := s.tx bs := s.bytesBuf8 binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) // put the index into the underlying backend diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go new file mode 100644 index 00000000000..13d4d530d0a --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,253 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + + firstRev int64 + rev int64 +} + +func (s *store) Read() TxnRead { + s.mu.RLock() + tx := s.b.ReadTx() + s.revMu.RLock() + tx.Lock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnRead) Rev() int64 { return tr.rev } + +func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(key, end, tr.Rev(), ro) +} + +func (tr *storeTxnRead) End() { + tr.tx.Unlock() + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + *storeTxnRead + tx backend.BatchTx + // beginRev is the revision where the txn begins; it will write to the next revision. + beginRev int64 + changes []mvccpb.KeyValue +} + +func (s *store) Write() TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return newMetricsTxnWrite(tw) +} + +func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } + +func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(key, end, rev, ro) +} + +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, int64(tw.beginRev + 1) + } + return 0, int64(tw.beginRev) +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return int64(tw.beginRev + 1) +} + +func (tw *storeTxnWrite) End() { + // only update index if the txn modifies the mvcc state. + if len(tw.changes) != 0 { + tw.s.saveIndex(tw.tx) + // hold revMu lock to prevent new read txns from opening until writeback. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} + +func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + + _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil + } + if ro.Count { + return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil + } + + var kvs []mvccpb.KeyValue + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { + break + } + } + return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil +} + +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := tw.s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + idxRev := revision{main: rev, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + tw.s.kvindex.Put(key, idxRev) + tw.changes = append(tw.changes, kv) + + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to detach lease") + } + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to attach lease") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev += 1 + } + keys, revs := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for i, key := range keys { + tw.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (tw *storeTxnWrite) delete(key []byte, rev revision) { + ibytes := newRevBytes() + idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{Key: key} + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go index aa8af6aa552..a65fe59b996 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -15,6 +15,8 @@ package mvcc import ( + "sync" + "github.com/prometheus/client_golang/prometheus" ) @@ -129,12 +131,21 @@ var ( Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) - dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database in bytes.", - }) + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } ) func init() { diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go new file mode 100644 index 00000000000..fd2144279ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type metricsTxnWrite struct { + TxnWrite + ranges uint + puts uint + deletes uint +} + +func newMetricsTxnRead(tr TxnRead) TxnRead { + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} +} + +func newMetricsTxnWrite(tw TxnWrite) TxnWrite { + return &metricsTxnWrite{tw, 0, 0, 0} +} + +func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { + tw.ranges++ + return tw.TxnWrite.Range(key, end, ro) +} + +func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { + tw.deletes++ + return tw.TxnWrite.DeleteRange(key, end) +} + +func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw.puts++ + return tw.TxnWrite.Put(key, value, lease) +} + +func (tw *metricsTxnWrite) End() { + defer tw.TxnWrite.End() + if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { + if sum > 1 { + txnCounter.Inc() + } + return + } + switch { + case tw.ranges == 1: + rangeCounter.Inc() + case tw.puts == 1: + putCounter.Inc() + case tw.deletes == 1: + deleteCounter.Inc() + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index aa053f4e66e..7033f132662 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -713,7 +713,7 @@ func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } var fileDescriptorKv = []byte{ // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go index dbb79bcb693..68d9ab71d27 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -41,10 +41,12 @@ type watchable interface { } type watchableStore struct { - mu sync.Mutex - *store + // mu protects watcher groups and batches. It should never be locked + // before locking store.mu to avoid deadlock. + mu sync.RWMutex + // victims are watcher batches that were blocked on the watch channel victims []watcherBatch victimc chan struct{} @@ -76,9 +78,11 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet synced: newWatcherGroup(), stopc: make(chan struct{}), } + s.store.ReadView = &readView{s} + s.store.WriteView = &writeView{s} if s.le != nil { // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } s.wg.Add(2) go s.syncWatchersLoop() @@ -86,89 +90,6 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet return s } -func (s *watchableStore) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - rev = s.store.Put(key, value, lease) - changes := s.store.getChanges() - if len(changes) != 1 { - plog.Panicf("unexpected len(changes) != 1 after put") - } - - ev := mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[0], - } - s.notify(rev, []mvccpb.Event{ev}) - return rev -} - -func (s *watchableStore) DeleteRange(key, end []byte) (n, rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - n, rev = s.store.DeleteRange(key, end) - changes := s.store.getChanges() - - if len(changes) != int(n) { - plog.Panicf("unexpected len(changes) != n after deleteRange") - } - - if n == 0 { - return n, rev - } - - evs := make([]mvccpb.Event, n) - for i := range changes { - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - } - s.notify(rev, evs) - return n, rev -} - -func (s *watchableStore) TxnBegin() int64 { - s.mu.Lock() - return s.store.TxnBegin() -} - -func (s *watchableStore) TxnEnd(txnID int64) error { - err := s.store.TxnEnd(txnID) - if err != nil { - return err - } - - changes := s.getChanges() - if len(changes) == 0 { - s.mu.Unlock() - return nil - } - - rev := s.store.Rev() - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - switch change.CreateRevision { - case 0: - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - default: - evs[i] = mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[i]} - } - } - - s.notify(rev, evs) - s.mu.Unlock() - - return nil -} - func (s *watchableStore) Close() error { close(s.stopc) s.wg.Wait() @@ -186,9 +107,6 @@ func (s *watchableStore) NewWatchStream() WatchStream { } func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - s.mu.Lock() - defer s.mu.Unlock() - wa := &watcher{ key: key, end: end, @@ -198,21 +116,24 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c fcs: fcs, } - s.store.mu.Lock() - synced := startRev > s.store.currentRev.main || startRev == 0 + s.mu.Lock() + s.revMu.RLock() + synced := startRev > s.store.currentRev || startRev == 0 if synced { - wa.minRev = s.store.currentRev.main + 1 + wa.minRev = s.store.currentRev + 1 if startRev > wa.minRev { wa.minRev = startRev } } - s.store.mu.Unlock() if synced { s.synced.add(wa) } else { slowWatcherGauge.Inc() s.unsynced.add(wa) } + s.revMu.RUnlock() + s.mu.Unlock() + watcherGauge.Inc() return wa, func() { s.cancelWatcher(wa) } @@ -258,17 +179,35 @@ func (s *watchableStore) cancelWatcher(wa *watcher) { s.mu.Unlock() } +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + s.unsynced.watchers.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + // syncWatchersLoop syncs the watcher in the unsynced map every 100ms. func (s *watchableStore) syncWatchersLoop() { defer s.wg.Done() for { - s.mu.Lock() + s.mu.RLock() st := time.Now() lastUnsyncedWatchers := s.unsynced.size() - s.syncWatchers() - unsyncedWatchers := s.unsynced.size() - s.mu.Unlock() + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + unsyncedWatchers = s.syncWatchers() + } syncDuration := time.Since(st) waitDuration := 100 * time.Millisecond @@ -295,9 +234,9 @@ func (s *watchableStore) syncVictimsLoop() { for s.moveVictims() != 0 { // try to update all victim watchers } - s.mu.Lock() + s.mu.RLock() isEmpty := len(s.victims) == 0 - s.mu.Unlock() + s.mu.RUnlock() var tickc <-chan time.Time if !isEmpty { @@ -340,8 +279,8 @@ func (s *watchableStore) moveVictims() (moved int) { // assign completed victim watchers to unsync/sync s.mu.Lock() - s.store.mu.Lock() - curRev := s.store.currentRev.main + s.store.revMu.RLock() + curRev := s.store.currentRev for w, eb := range wb { if newVictim != nil && newVictim[w] != nil { // couldn't send watch response; stays victim @@ -358,7 +297,7 @@ func (s *watchableStore) moveVictims() (moved int) { s.synced.add(w) } } - s.store.mu.Unlock() + s.store.revMu.RUnlock() s.mu.Unlock() } @@ -376,19 +315,23 @@ func (s *watchableStore) moveVictims() (moved int) { // 2. iterate over the set to get the minimum revision and remove compacted watchers // 3. use minimum revision to get all key-value pairs and send those events to watchers // 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() { +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + if s.unsynced.size() == 0 { - return + return 0 } - s.store.mu.Lock() - defer s.store.mu.Unlock() + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() // in order to find key-value pairs from unsynced watchers, we need to // find min revision index, and these revisions can be used to // query the backend store of key-value pairs - curRev := s.store.currentRev.main + curRev := s.store.currentRev compactionRev := s.store.compactMainRev + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) minBytes, maxBytes := newRevBytes(), newRevBytes() revToBytes(revision{main: minRev}, minBytes) @@ -396,7 +339,7 @@ func (s *watchableStore) syncWatchers() { // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. - tx := s.store.b.BatchTx() + tx := s.store.b.ReadTx() tx.Lock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) evs := kvsToEvents(wg, revs, vs) @@ -446,6 +389,8 @@ func (s *watchableStore) syncWatchers() { vsz += len(v) } slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) + + return s.unsynced.size() } // kvsToEvents gets all events for the watchers from all key-value pairs @@ -511,8 +456,8 @@ func (s *watchableStore) addVictim(victim watcherBatch) { func (s *watchableStore) rev() int64 { return s.store.Rev() } func (s *watchableStore) progress(w *watcher) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if _, ok := s.synced.watchers[w]; ok { w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go new file mode 100644 index 00000000000..5c5bfda1341 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // end write txn under watchable store lock so the updates are visible + // when asynchronous event posting checks the current store revision + tw.s.mu.Lock() + tw.s.notify(rev, evs) + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go index 2710c1cc940..6ef1d0ce8bb 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -183,7 +183,7 @@ func (wg *watcherGroup) add(wa *watcher) { // contains is whether the given key has a watcher in the group. func (wg *watcherGroup) contains(key string) bool { _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key)) + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) } // size gives the number of unique watchers in the group. diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go index 6edbe593fb4..9769771ea4f 100644 --- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -15,6 +15,7 @@ package adt import ( + "bytes" "math" ) @@ -134,25 +135,29 @@ func (x *intervalNode) updateMax() { type nodeVisitor func(n *intervalNode) bool // visit will call a node visitor on each node that overlaps the given interval -func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) { +func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { if x == nil { - return + return true } v := iv.Compare(&x.iv.Ivl) switch { case v < 0: - x.left.visit(iv, nv) + if !x.left.visit(iv, nv) { + return false + } case v > 0: maxiv := Interval{x.iv.Ivl.Begin, x.max} if maxiv.Compare(iv) == 0 { - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { + return false + } } default: - nv(x) - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { + return false + } } + return true } type IntervalValue struct { @@ -402,10 +407,11 @@ func (ivt *IntervalTree) MaxHeight() int { return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) } -// IntervalVisitor is used on tree searchs; return false to stop searching. +// IntervalVisitor is used on tree searches; return false to stop searching. type IntervalVisitor func(n *IntervalValue) bool // Visit calls a visitor function on every tree node intersecting the given interval. +// It will visit each interval [x, y) in ascending order sorted on x. func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) } @@ -432,8 +438,8 @@ func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { return &n.iv } -// Contains returns true if there is some tree node intersecting the given interval. -func (ivt *IntervalTree) Contains(iv Interval) bool { +// Intersects returns true if there is some tree node intersecting the given interval. +func (ivt *IntervalTree) Intersects(iv Interval) bool { x := ivt.root for x != nil && iv.Compare(&x.iv.Ivl) != 0 { if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { @@ -445,6 +451,30 @@ func (ivt *IntervalTree) Contains(iv Interval) bool { return x != nil } +// Contains returns true if the interval tree's keys cover the entire given interval. +func (ivt *IntervalTree) Contains(ivl Interval) bool { + var maxEnd, minBegin Comparable + + isContiguous := true + ivt.Visit(ivl, func(n *IntervalValue) bool { + if minBegin == nil { + minBegin = n.Ivl.Begin + maxEnd = n.Ivl.End + return true + } + if maxEnd.Compare(n.Ivl.Begin) < 0 { + isContiguous = false + return false + } + if n.Ivl.End.Compare(maxEnd) > 0 { + maxEnd = n.Ivl.End + } + return true + }) + + return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0 +} + // Stab returns a slice with all elements in the tree intersecting the interval. func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { if ivt.count == 0 { @@ -529,3 +559,32 @@ func (v Int64Comparable) Compare(c Comparable) int { } return 0 } + +// BytesAffineComparable treats empty byte arrays as > all other byte arrays +type BytesAffineComparable []byte + +func (b BytesAffineComparable) Compare(c Comparable) int { + bc := c.(BytesAffineComparable) + + if len(b) == 0 { + if len(bc) == 0 { + return 0 + } + return 1 + } + if len(bc) == 0 { + return -1 + } + + return bytes.Compare(b, bc) +} + +func NewBytesAffineInterval(begin, end []byte) Interval { + return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)} +} +func NewBytesAffinePoint(b []byte) Interval { + be := make([]byte, len(b)+1) + copy(be, b) + be[len(b)] = 0 + return NewBytesAffineInterval(b, be) +} diff --git a/vendor/github.com/coreos/etcd/pkg/cors/BUILD b/vendor/github.com/coreos/etcd/pkg/cors/BUILD new file mode 100644 index 00000000000..e707c8a14d5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cors/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cors.go"], + importpath = "github.com/coreos/etcd/pkg/cors", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go new file mode 100644 index 00000000000..0c64f16a390 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cors/cors.go @@ -0,0 +1,90 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cors handles cross-origin HTTP requests (CORS). +package cors + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" +) + +type CORSInfo map[string]bool + +// Set implements the flag.Value interface to allow users to define a list of CORS origins +func (ci *CORSInfo) Set(s string) error { + m := make(map[string]bool) + for _, v := range strings.Split(s, ",") { + v = strings.TrimSpace(v) + if v == "" { + continue + } + if v != "*" { + if _, err := url.Parse(v); err != nil { + return fmt.Errorf("Invalid CORS origin: %s", err) + } + } + m[v] = true + + } + *ci = CORSInfo(m) + return nil +} + +func (ci *CORSInfo) String() string { + o := make([]string, 0) + for k := range *ci { + o = append(o, k) + } + sort.StringSlice(o).Sort() + return strings.Join(o, ",") +} + +// OriginAllowed determines whether the server will allow a given CORS origin. +func (c CORSInfo) OriginAllowed(origin string) bool { + return c["*"] || c[origin] +} + +type CORSHandler struct { + Handler http.Handler + Info *CORSInfo +} + +// addHeader adds the correct cors headers given an origin +func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) { + w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + w.Header().Add("Access-Control-Allow-Origin", origin) + w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") +} + +// ServeHTTP adds the correct CORS headers based on the origin and returns immediately +// with a 200 OK if the method is OPTIONS. +func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Write CORS header. + if h.Info.OriginAllowed("*") { + h.addHeader(w, "*") + } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) { + h.addHeader(w, origin) + } + + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + h.Handler.ServeHTTP(w, req) +} diff --git a/vendor/github.com/karlseguin/ccache/BUILD b/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD similarity index 66% rename from vendor/github.com/karlseguin/ccache/BUILD rename to vendor/github.com/coreos/etcd/pkg/debugutil/BUILD index 398b1fd68ad..d943ed2561a 100644 --- a/vendor/github.com/karlseguin/ccache/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/BUILD @@ -3,15 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "bucket.go", - "cache.go", - "configuration.go", - "item.go", - "layeredbucket.go", - "layeredcache.go", - "secondarycache.go", + "doc.go", + "pprof.go", ], - importpath = "github.com/karlseguin/ccache", + importpath = "github.com/coreos/etcd/pkg/debugutil", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go new file mode 100644 index 00000000000..74499eb2737 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debugutil includes utility functions for debugging. +package debugutil diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go new file mode 100644 index 00000000000..8d5544a3dca --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go @@ -0,0 +1,47 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debugutil + +import ( + "net/http" + "net/http/pprof" + "runtime" +) + +const HTTPPrefixPProf = "/debug/pprof" + +// PProfHandlers returns a map of pprof handlers keyed by the HTTP path. +func PProfHandlers() map[string]http.Handler { + // set only when there's no existing setting + if runtime.SetMutexProfileFraction(-1) == 0 { + // 1 out of 5 mutex events are reported, on average + runtime.SetMutexProfileFraction(5) + } + + m := make(map[string]http.Handler) + + m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index) + m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile) + m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol) + m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline) + m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace) + m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap") + m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine") + m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate") + m[HTTPPrefixPProf+"/block"] = pprof.Handler("block") + m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex") + + return m +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go index aad40b75904..fce5126c695 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -17,6 +17,7 @@ package fileutil import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -101,11 +102,11 @@ func Exist(name string) bool { // shorten the length of the file. func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, os.SEEK_CUR) + off, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - lenf, lerr := f.Seek(0, os.SEEK_END) + lenf, lerr := f.Seek(0, io.SeekEnd) if lerr != nil { return lerr } @@ -116,6 +117,6 @@ func ZeroToEnd(f *os.File) error { if err = Preallocate(f, lenf, true); err != nil { return err } - _, err = f.Seek(off, os.SEEK_SET) + _, err = f.Seek(off, io.SeekStart) return err } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go index dec25a1af44..939fea62381 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -17,6 +17,7 @@ package fileutil import ( + "io" "os" "syscall" ) @@ -36,7 +37,7 @@ const ( var ( wrlck = syscall.Flock_t{ Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go index bb7f0281239..c747b7cf81f 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -14,7 +14,10 @@ package fileutil -import "os" +import ( + "io" + "os" +) // Preallocate tries to allocate the space for given // file. This operation is only supported on linux by a @@ -22,6 +25,10 @@ import "os" // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } if extendFile { return preallocExtend(f, sizeInBytes) } @@ -29,15 +36,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { } func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, os.SEEK_CUR) + curOff, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - size, err := f.Seek(sizeInBytes, os.SEEK_END) + size, err := f.Seek(sizeInBytes, io.SeekEnd) if err != nil { return err } - if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + if _, err = f.Seek(curOff, io.SeekStart); err != nil { return err } if sizeInBytes > size { diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go index 859fc9d49e1..09f44e7c71d 100644 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go @@ -13,15 +13,6 @@ import ( "net/http" ) -func RequestCanceler(req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} - // GracefulClose drains http.Response.Body until it hits EOF // and closes it. This prevents TCP/TLS connections from closing, // therefore available for reuse. diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go index 931beb2d058..2da21062657 100644 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -32,8 +32,8 @@ const ( // a node member ID. // // The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. +// High order 2 bytes are from memberID, next 5 bytes are from timestamp, +// and low order one byte is a counter. // | prefix | suffix | // | 2 bytes | 5 bytes | 1 byte | // | memberID | timestamp | cnt | diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD index 7c84c7d5abe..97fa436fcba 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD @@ -56,7 +56,6 @@ go_library( deps = [ "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go index bb5f392b34c..5e38dc98dbf 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -16,14 +16,13 @@ package netutil import ( + "context" "net" "net/url" "reflect" "sort" "time" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" ) @@ -32,11 +31,38 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") // indirection for testing - resolveTCPAddr = net.ResolveTCPAddr + resolveTCPAddr = resolveTCPAddrDefault ) const retryInterval = time.Second +// taken from go's ResolveTCP code but uses configurable ctx +func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { + host, port, serr := net.SplitHostPort(addr) + if serr != nil { + return nil, serr + } + portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) + if perr != nil { + return nil, perr + } + + var ips []net.IPAddr + if ip := net.ParseIP(host); ip != nil { + ips = []net.IPAddr{{IP: ip}} + } else { + // Try as a DNS name. + ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + ips = ipss + } + // randomize? + ip := ips[0] + return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil +} + // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. @@ -75,7 +101,7 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { if host == "localhost" || net.ParseIP(host) != nil { return "", nil } - tcpAddr, err := resolveTCPAddr("tcp", u.Host) + tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) return tcpAddr.String(), nil diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go index 79c59b01288..bf8528b753a 100644 --- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go +++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -88,8 +88,6 @@ func (f *fifo) Schedule(j Job) { } } f.pendings = append(f.pendings, j) - - return } func (f *fifo) Pending() int { diff --git a/vendor/github.com/coreos/etcd/pkg/srv/BUILD b/vendor/github.com/coreos/etcd/pkg/srv/BUILD new file mode 100644 index 00000000000..3707eb3e968 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["srv.go"], + importpath = "github.com/coreos/etcd/pkg/srv", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/etcd/pkg/types:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 00000000000..fefcbcb4b88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD index 054063068f4..d3b887f366d 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "assert.go", "leak.go", "pauseable_handler.go", "recorder.go", diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/assert.go b/vendor/github.com/coreos/etcd/pkg/testutil/assert.go new file mode 100644 index 00000000000..9cf03457d52 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/testutil/assert.go @@ -0,0 +1,62 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "reflect" + "testing" +) + +func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { + if (e == nil || a == nil) && (isNil(e) && isNil(a)) { + return + } + if reflect.DeepEqual(e, a) { + return + } + s := "" + if len(msg) > 1 { + s = msg[0] + ": " + } + s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) + FatalStack(t, s) +} + +func AssertNil(t *testing.T, v interface{}) { + AssertEqual(t, nil, v) +} + +func AssertNotNil(t *testing.T, v interface{}) { + if v == nil { + t.Fatalf("expected non-nil, got %+v", v) + } +} + +func AssertTrue(t *testing.T, v bool, msg ...string) { + AssertEqual(t, true, v, msg...) +} + +func AssertFalse(t *testing.T, v bool, msg ...string) { + AssertEqual(t, false, v, msg...) +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() != reflect.Struct && rv.IsNil() +} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go index 80bc0eebc8a..a29d06d9bd0 100644 --- a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go +++ b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go @@ -62,10 +62,11 @@ func CheckLeakedGoroutine() bool { return true } -func AfterTest(t *testing.T) { +// CheckAfterTest returns an error if AfterTest would fail with an error. +func CheckAfterTest(d time.Duration) error { http.DefaultTransport.(*http.Transport).CloseIdleConnections() if testing.Short() { - return + return nil } var bad string badSubstring := map[string]string{ @@ -75,10 +76,12 @@ func AfterTest(t *testing.T) { "net.(*netFD).connect(": "a timing out dial", ").noteClientGone(": "a closenotifier sender", ").readLoop(": "a Transport", + ".grpc": "a gRPC resource", } var stacks string - for i := 0; i < 6; i++ { + begin := time.Now() + for time.Since(begin) < d { bad = "" stacks = strings.Join(interestingGoroutines(), "\n\n") for substr, what := range badSubstring { @@ -87,13 +90,22 @@ func AfterTest(t *testing.T) { } } if bad == "" { - return + return nil } // Bad stuff found, but goroutines might just still be // shutting down, so give it some time. time.Sleep(50 * time.Millisecond) } - t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) + return fmt.Errorf("appears to have leaked %s:\n%s", bad, stacks) +} + +// AfterTest is meant to run in a defer that executes after a test completes. +// It will detect common goroutine leaks, retrying in case there are goroutines +// not synchronously torn down, and fail the test if any goroutines are stuck. +func AfterTest(t *testing.T) { + if err := CheckAfterTest(300 * time.Millisecond); err != nil { + t.Errorf("Test %v", err) + } } func interestingGoroutines() (gs []string) { @@ -106,6 +118,7 @@ func interestingGoroutines() (gs []string) { } stack := strings.TrimSpace(sl[1]) if stack == "" || + strings.Contains(stack, "sync.(*WaitGroup).Done") || strings.Contains(stack, "created by os/signal.init") || strings.Contains(stack, "runtime/panic.go") || strings.Contains(stack, "created by testing.RunTests") || diff --git a/vendor/github.com/coreos/etcd/pkg/transport/BUILD b/vendor/github.com/coreos/etcd/pkg/transport/BUILD index 7074e74a002..3ae75dce137 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/BUILD +++ b/vendor/github.com/coreos/etcd/pkg/transport/BUILD @@ -7,6 +7,7 @@ go_library( "keepalive_listener.go", "limit_listen.go", "listener.go", + "listener_tls.go", "timeout_conn.go", "timeout_dialer.go", "timeout_listener.go", @@ -17,10 +18,7 @@ go_library( ], importpath = "github.com/coreos/etcd/pkg/transport", visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - ], + deps = ["//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library"], ) filegroup( diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 4fcdb5ad9a3..3b58b41543f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -23,22 +23,21 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "log" "math/big" "net" "os" "path/filepath" + "strings" "time" - "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/tlsutil" ) -func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlscfg, l) + return wrapTLS(addr, scheme, tlsinfo, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -49,15 +48,11 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { +func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } - if tlscfg == nil { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - return tls.NewListener(l, tlscfg), nil + return newTLSListener(l, tlsinfo) } type TLSInfo struct { @@ -70,6 +65,10 @@ type TLSInfo struct { // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -86,7 +85,7 @@ func (info TLSInfo) Empty() bool { } func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = fileutil.TouchDirAll(dirpath); err != nil { + if err = os.MkdirAll(dirpath, 0700); err != nil { return } @@ -173,6 +172,14 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } return cfg, nil } @@ -235,9 +242,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if err != nil { return nil, err } - // if given a CA, trust any host with a cert signed by the CA - log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") - cfg.ServerName = "" } if info.selfCert { @@ -246,31 +250,11 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { return cfg, nil } -// ShallowCopyTLSConfig copies *tls.Config. This is only -// work-around for go-vet tests, which complains -// -// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex -// -// Keep up-to-date with 'go/src/crypto/tls/common.go' -func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { - ncfg := tls.Config{ - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } - return &ncfg +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 00000000000..86511860335 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,217 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + + st := tlsConn.ConnectionState() + if len(st.PeerCertificates) > 0 { + cert := st.PeerCertificates[0] + addr := tlsConn.RemoteAddr().String() + if cerr := checkCert(ctx, cert, addr); cerr != nil { + l.handshakeFailure(tlsConn, cerr) + return + } + } + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + h, _, herr := net.SplitHostPort(remoteAddr) + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go index 0f4df5fbe3b..b35e04955bb 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -15,7 +15,6 @@ package transport import ( - "crypto/tls" "net" "time" ) @@ -23,7 +22,7 @@ import ( // NewTimeoutListener returns a listener that listens on the given address. // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err @@ -33,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeou rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { + if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go index c126b6f7fa0..123e2036f0f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -22,7 +22,7 @@ import ( type unixListener struct{ net.Listener } func NewUnixListener(addr string) (net.Listener, error) { - if err := os.RemoveAll(addr); err != nil { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { return nil, err } l, err := net.Listen("unix", addr) @@ -33,7 +33,7 @@ func NewUnixListener(addr string) (net.Listener, error) { } func (ul *unixListener) Close() error { - if err := os.RemoveAll(ul.Addr().String()); err != nil { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { return err } return ul.Listener.Close() diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go index 0f31eeb9790..34fa237e825 100644 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go @@ -21,22 +21,29 @@ import ( "sync" ) +// Wait is an interface that provides the ability to wait and trigger events that +// are associated with IDs. type Wait interface { + // Register waits returns a chan that waits on the given ID. + // The chan will be triggered when Trigger is called with + // the same ID. Register(id uint64) <-chan interface{} + // Trigger triggers the waiting chans with the given ID. Trigger(id uint64, x interface{}) IsRegistered(id uint64) bool } -type List struct { +type list struct { l sync.Mutex m map[uint64]chan interface{} } -func New() *List { - return &List{m: make(map[uint64]chan interface{})} +// New creates a Wait. +func New() Wait { + return &list{m: make(map[uint64]chan interface{})} } -func (w *List) Register(id uint64) <-chan interface{} { +func (w *list) Register(id uint64) <-chan interface{} { w.l.Lock() defer w.l.Unlock() ch := w.m[id] @@ -49,7 +56,7 @@ func (w *List) Register(id uint64) <-chan interface{} { return ch } -func (w *List) Trigger(id uint64, x interface{}) { +func (w *list) Trigger(id uint64, x interface{}) { w.l.Lock() ch := w.m[id] delete(w.m, id) @@ -60,7 +67,7 @@ func (w *List) Trigger(id uint64, x interface{}) { } } -func (w *List) IsRegistered(id uint64) bool { +func (w *list) IsRegistered(id uint64) bool { w.l.Lock() defer w.l.Unlock() _, ok := w.m[id] diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD index e0aa0fddb02..881f0a54f10 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD @@ -6,15 +6,18 @@ go_library( "auth.go", "cluster.go", "doc.go", + "election.go", "kv.go", - "kv_client_adapter.go", + "leader.go", "lease.go", + "lock.go", + "logger.go", "maintenance.go", "metrics.go", + "register.go", "watch.go", "watch_broadcast.go", "watch_broadcasts.go", - "watch_client_adapter.go", "watch_ranges.go", "watcher.go", ], @@ -22,17 +25,23 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library", + "//vendor/github.com/coreos/etcd/clientv3/naming:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/mvcc:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/time/rate:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", ], ) @@ -47,6 +56,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter:all-srcs", "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD new file mode 100644 index 00000000000..136c7198aa8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "chan_stream.go", + "cluster_client_adapter.go", + "doc.go", + "election_client_adapter.go", + "kv_client_adapter.go", + "lease_client_adapter.go", + "lock_client_adapter.go", + "maintenance_client_adapter.go", + "watch_client_adapter.go", + ], + importpath = "github.com/coreos/etcd/proxy/grpcproxy/adapter", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go similarity index 65% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go index 283c2ed07fa..3aa01f2052b 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go @@ -1,4 +1,4 @@ -// Copyright 2016 The etcd Authors +// Copyright 2017 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,79 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpcproxy +package adapter import ( - "errors" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" + "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) -var errAlreadySentHeader = errors.New("grpcproxy: already send header") - -type ws2wc struct{ wserv pb.WatchServer } - -func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { - return &ws2wc{wserv} -} - -func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { - // ch1 is buffered so server can send error on close - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) - headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) - - cctx, ccancel := context.WithCancel(ctx) - cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} - wclient := &ws2wcClientStream{chanClientStream{headerc, trailerc, cli}} - - sctx, scancel := context.WithCancel(ctx) - srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} - wserver := &ws2wcServerStream{chanServerStream{headerc, trailerc, srv, nil}} - go func() { - if err := s.wserv.Watch(wserver); err != nil { - select { - case srv.sendc <- err: - case <-sctx.Done(): - case <-cctx.Done(): - } - } - scancel() - ccancel() - }() - return wclient, nil -} - -// ws2wcClientStream implements Watch_WatchClient -type ws2wcClientStream struct{ chanClientStream } - -// ws2wcServerStream implements Watch_WatchServer -type ws2wcServerStream struct{ chanServerStream } - -func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { - return s.SendMsg(wr) -} -func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchResponse), nil -} - -func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { - return s.SendMsg(wr) -} -func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchRequest), nil -} - // chanServerStream implements grpc.ServerStream with a chanStream type chanServerStream struct { headerc chan<- metadata.MD @@ -151,8 +87,8 @@ func (cs *chanClientStream) Trailer() metadata.MD { } } -func (s *chanClientStream) CloseSend() error { - close(s.chanStream.sendc) +func (cs *chanClientStream) CloseSend() error { + close(cs.chanStream.sendc) return nil } @@ -180,17 +116,50 @@ func (s *chanStream) SendMsg(m interface{}) error { func (s *chanStream) RecvMsg(m interface{}) error { v := m.(*interface{}) - select { - case msg, ok := <-s.recvc: - if !ok { - return grpc.ErrClientConnClosing + for { + select { + case msg, ok := <-s.recvc: + if !ok { + return grpc.ErrClientConnClosing + } + if err, ok := msg.(error); ok { + return err + } + *v = msg + return nil + case <-s.ctx.Done(): } - if err, ok := msg.(error); ok { - return err + if len(s.recvc) == 0 { + // prioritize any pending recv messages over canceled context + break } - *v = msg - return nil - case <-s.ctx.Done(): } return s.ctx.Err() } + +func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { + // ch1 is buffered so server can send error on close + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) + headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) + + cctx, ccancel := context.WithCancel(ctx) + cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} + cs := chanClientStream{headerc, trailerc, cli} + + sctx, scancel := context.WithCancel(ctx) + srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} + ss := chanServerStream{headerc, trailerc, srv, nil} + + go func() { + if err := ssHandler(ss); err != nil { + select { + case srv.sendc <- err: + case <-sctx.Done(): + case <-cctx.Done(): + } + } + scancel() + ccancel() + }() + return cs +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go new file mode 100644 index 00000000000..4ddf78e15ec --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -0,0 +1,44 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type cls2clc struct{ cls pb.ClusterServer } + +func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient { + return &cls2clc{cls} +} + +func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) { + return s.cls.MemberList(ctx, r) +} + +func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) { + return s.cls.MemberAdd(ctx, r) +} + +func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) { + return s.cls.MemberUpdate(ctx, r) +} + +func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) { + return s.cls.MemberRemove(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go new file mode 100644 index 00000000000..7170be23304 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adapter provides gRPC adapters between client and server +// gRPC interfaces without needing to go through a gRPC connection. +package adapter diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go new file mode 100644 index 00000000000..383c1b9d8fb --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type es2ec struct{ es v3electionpb.ElectionServer } + +func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient { + return &es2ec{es} +} + +func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) { + return s.es.Campaign(ctx, r) +} + +func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) { + return s.es.Proclaim(ctx, r) +} + +func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) { + return s.es.Leader(ctx, r) +} + +func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) { + return s.es.Resign(ctx, r) +} + +func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.es.Observe(in, &es2ecServerStream{ss}) + }) + return &es2ecClientStream{cs}, nil +} + +// es2ecClientStream implements Election_ObserveClient +type es2ecClientStream struct{ chanClientStream } + +// es2ecServerStream implements Election_ObserveServer +type es2ecServerStream struct{ chanServerStream } + +func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { + return s.SendMsg(rr) +} +func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderResponse), nil +} + +func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { + return s.SendMsg(rr) +} +func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go rename to vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go index 7880b18109d..fec401d9dd0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpcproxy +package adapter import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go new file mode 100644 index 00000000000..d471fd9144b --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -0,0 +1,77 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ls2lc struct { + leaseServer pb.LeaseServer +} + +func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient { + return &ls2lc{ls} +} + +func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) { + return c.leaseServer.LeaseGrant(ctx, in) +} + +func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) { + return c.leaseServer.LeaseRevoke(ctx, in) +} + +func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss}) + }) + return &ls2lcClientStream{cs}, nil +} + +func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) { + return c.leaseServer.LeaseTimeToLive(ctx, in) +} + +// ls2lcClientStream implements Lease_LeaseKeepAliveClient +type ls2lcClientStream struct{ chanClientStream } + +// ls2lcServerStream implements Lease_LeaseKeepAliveServer +type ls2lcServerStream struct{ chanServerStream } + +func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { + return s.SendMsg(rr) +} +func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveResponse), nil +} + +func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { + return s.SendMsg(rr) +} +func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go new file mode 100644 index 00000000000..05e5cb020a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -0,0 +1,36 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ls2lsc struct{ ls v3lockpb.LockServer } + +func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient { + return &ls2lsc{ls} +} + +func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) { + return s.ls.Lock(ctx, r) +} + +func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) { + return s.ls.Unlock(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go new file mode 100644 index 00000000000..9b21bf2576e --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type mts2mtc struct{ mts pb.MaintenanceServer } + +func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient { + return &mts2mtc{mts} +} + +func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) { + return s.mts.Alarm(ctx, r) +} + +func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) { + return s.mts.Status(ctx, r) +} + +func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) { + return s.mts.Defragment(ctx, dr) +} + +func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) { + return s.mts.Hash(ctx, r) +} + +func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.mts.Snapshot(in, &ss2scServerStream{ss}) + }) + return &ss2scClientStream{cs}, nil +} + +// ss2scClientStream implements Maintenance_SnapshotClient +type ss2scClientStream struct{ chanClientStream } + +// ss2scServerStream implements Maintenance_SnapshotServer +type ss2scServerStream struct{ chanServerStream } + +func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { + return s.SendMsg(rr) +} +func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotResponse), nil +} + +func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { + return s.SendMsg(rr) +} +func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go new file mode 100644 index 00000000000..af4a13c4152 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "errors" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var errAlreadySentHeader = errors.New("adapter: already sent header") + +type ws2wc struct{ wserv pb.WatchServer } + +func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { + return &ws2wc{wserv} +} + +func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.wserv.Watch(&ws2wcServerStream{ss}) + }) + return &ws2wcClientStream{cs}, nil +} + +// ws2wcClientStream implements Watch_WatchClient +type ws2wcClientStream struct{ chanClientStream } + +// ws2wcServerStream implements Watch_WatchServer +type ws2wcServerStream struct{ chanServerStream } + +func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { + return s.SendMsg(wr) +} +func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchResponse), nil +} + +func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { + return s.SendMsg(wr) +} +func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD index ba6e5289a51..5d0c3e9e39c 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD @@ -9,7 +9,7 @@ go_library( "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", - "//vendor/github.com/karlseguin/ccache:go_default_library", + "//vendor/github.com/golang/groupcache/lru:go_default_library", ], ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go index 155bbf90022..e84a05229e0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package cache exports functionality for efficiently caching and mapping +// `RangeRequest`s to corresponding `RangeResponse`s. package cache import ( "errors" "sync" - "time" - - "github.com/karlseguin/ccache" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/adt" + "github.com/golang/groupcache/lru" ) var ( @@ -31,14 +31,12 @@ var ( ErrCompacted = rpctypes.ErrGRPCCompacted ) -const defaultHistoricTTL = time.Hour -const defaultCurrentTTL = time.Minute - type Cache interface { Add(req *pb.RangeRequest, resp *pb.RangeResponse) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) Compact(revision int64) Invalidate(key []byte, endkey []byte) + Size() int Close() } @@ -54,17 +52,17 @@ func keyFunc(req *pb.RangeRequest) string { func NewCache(maxCacheEntries int) Cache { return &cache{ - lru: ccache.New(ccache.Configure().MaxSize(int64(maxCacheEntries))), + lru: lru.New(maxCacheEntries), compactedRev: -1, } } -func (c *cache) Close() { c.lru.Stop() } +func (c *cache) Close() {} // cache implements Cache type cache struct { mu sync.RWMutex - lru *ccache.Cache + lru *lru.Cache // a reverse index for cache invalidation cachedRanges adt.IntervalTree @@ -80,11 +78,7 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { defer c.mu.Unlock() if req.Revision > c.compactedRev { - if req.Revision == 0 { - c.lru.Set(key, resp, defaultCurrentTTL) - } else { - c.lru.Set(key, resp, defaultHistoricTTL) - } + c.lru.Add(key, resp) } // we do not need to invalidate a request with a revision specified. // so we do not need to add it into the reverse index. @@ -116,16 +110,16 @@ func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { key := keyFunc(req) - c.mu.RLock() - defer c.mu.RUnlock() + c.mu.Lock() + defer c.mu.Unlock() - if req.Revision < c.compactedRev { - c.lru.Delete(key) + if req.Revision > 0 && req.Revision < c.compactedRev { + c.lru.Remove(key) return nil, ErrCompacted } - if item := c.lru.Get(key); item != nil { - return item.Value().(*pb.RangeResponse), nil + if resp, ok := c.lru.Get(key); ok { + return resp.(*pb.RangeResponse), nil } return nil, errors.New("not exist") } @@ -149,7 +143,7 @@ func (c *cache) Invalidate(key, endkey []byte) { for _, iv := range ivs { keys := iv.Val.([]string) for _, key := range keys { - c.lru.Delete(key) + c.lru.Remove(key) } } // delete after removing all keys since it is destructive to 'ivs' @@ -166,3 +160,9 @@ func (c *cache) Compact(revision int64) { c.compactedRev = revision } } + +func (c *cache) Size() int { + c.mu.RLock() + defer c.mu.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go index 8a2fa16c124..899fb9be65f 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go @@ -15,38 +15,163 @@ package grpcproxy import ( + "fmt" + "os" + "sync" + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/naming" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" + "golang.org/x/time/rate" + "google.golang.org/grpc" + gnaming "google.golang.org/grpc/naming" ) +// allow maximum 1 retry per second +const resolveRetryRate = 1 + type clusterProxy struct { - client *clientv3.Client + clus clientv3.Cluster + ctx context.Context + gr *naming.GRPCResolver + + // advertise client URL + advaddr string + prefix string + + umu sync.RWMutex + umap map[string]gnaming.Update } -func NewClusterProxy(c *clientv3.Client) pb.ClusterServer { - return &clusterProxy{ - client: c, +// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints. +// The returned channel is closed when there is grpc-proxy endpoint registered +// and the client's context is canceled so the 'register' loop returns. +func NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) { + cp := &clusterProxy{ + clus: c.Cluster, + ctx: c.Ctx(), + gr: &naming.GRPCResolver{Client: c}, + + advaddr: advaddr, + prefix: prefix, + umap: make(map[string]gnaming.Update), + } + + donec := make(chan struct{}) + if advaddr != "" && prefix != "" { + go func() { + defer close(donec) + cp.resolve(prefix) + }() + return cp, donec + } + + close(donec) + return cp, donec +} + +func (cp *clusterProxy) resolve(prefix string) { + rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate) + for rm.Wait(cp.ctx) == nil { + wa, err := cp.gr.Resolve(prefix) + if err != nil { + plog.Warningf("failed to resolve %q (%v)", prefix, err) + continue + } + cp.monitor(wa) + } +} + +func (cp *clusterProxy) monitor(wa gnaming.Watcher) { + for cp.ctx.Err() == nil { + ups, err := wa.Next() + if err != nil { + plog.Warningf("clusterProxy watcher error (%v)", err) + if grpc.ErrorDesc(err) == naming.ErrWatcherClosed.Error() { + return + } + } + + cp.umu.Lock() + for i := range ups { + switch ups[i].Op { + case gnaming.Add: + cp.umap[ups[i].Addr] = *ups[i] + case gnaming.Delete: + delete(cp.umap, ups[i].Addr) + } + } + cp.umu.Unlock() } } func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberAdd(ctx, r) + mresp, err := cp.clus.MemberAdd(ctx, r.PeerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberAddResponse)(*mresp) + return &resp, err } func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberRemove(ctx, r) + mresp, err := cp.clus.MemberRemove(ctx, r.ID) + if err != nil { + return nil, err + } + resp := (pb.MemberRemoveResponse)(*mresp) + return &resp, err } func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberUpdate(ctx, r) + mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberUpdateResponse)(*mresp) + return &resp, err } -func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberList(ctx, r) +func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { + cp.umu.RLock() + defer cp.umu.RUnlock() + mbs := make([]*pb.Member, 0, len(cp.umap)) + for addr, upt := range cp.umap { + m, err := decodeMeta(fmt.Sprint(upt.Metadata)) + if err != nil { + return nil, err + } + mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}}) + } + return mbs, nil +} + +// MemberList wraps member list API with following rules: +// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver +// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr' +// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register' +// - If 'advaddr' is empty, forward to member list API +func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { + if cp.advaddr != "" { + if cp.prefix != "" { + mbs, err := cp.membersFromUpdates() + if err != nil { + return nil, err + } + if len(mbs) > 0 { + return &pb.MemberListResponse{Members: mbs}, nil + } + } + // prefix is empty or no grpc-proxy members haven't been registered + hostname, _ := os.Hostname() + return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil + } + mresp, err := cp.clus.MemberList(ctx) + if err != nil { + return nil, err + } + resp := (pb.MemberListResponse)(*mresp) + return &resp, err } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go new file mode 100644 index 00000000000..27115a81d7d --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/election.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionProxy struct { + client *clientv3.Client +} + +func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { + return &electionProxy{client: client} +} + +func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req) +} + +func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req) +} + +func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req) +} + +func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { + conn := ep.client.ActiveConnection() + ctx, cancel := context.WithCancel(s.Context()) + defer cancel() + sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req) + if err != nil { + return err + } + for { + rr, err := sc.Recv() + if err != nil { + return err + } + if err = s.Send(rr); err != nil { + return err + } + } +} + +func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go index 36885135797..0654729a0ae 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go @@ -33,11 +33,7 @@ func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { cache: cache.NewCache(cache.DefaultMaxEntries), } donec := make(chan struct{}) - go func() { - defer close(donec) - <-c.Ctx().Done() - kv.cache.Close() - }() + close(donec) return kv, donec } @@ -65,12 +61,14 @@ func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRespo req.Serializable = true gresp := (*pb.RangeResponse)(resp.Get()) p.cache.Add(&req, gresp) + cacheKeys.Set(float64(p.cache.Size())) return gresp, nil } func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { p.cache.Invalidate(r.Key, nil) + cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, PutRequestToOp(r)) return (*pb.PutResponse)(resp.Put()), err @@ -78,6 +76,7 @@ func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, e func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { p.cache.Invalidate(r.Key, r.RangeEnd) + cacheKeys.Set(float64(p.cache.Size())) resp, err := p.kv.Do(ctx, DelRequestToOp(r)) return (*pb.DeleteRangeResponse)(resp.Del()), err @@ -133,6 +132,8 @@ func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, e p.txnToCache(r.Failure, resp.Responses) } + cacheKeys.Set(float64(p.cache.Size())) + return (*pb.TxnResponse)(resp), nil } @@ -147,6 +148,8 @@ func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Com p.cache.Compact(r.Revision) } + cacheKeys.Set(float64(p.cache.Size())) + return (*pb.CompactionResponse)(resp), err } @@ -183,7 +186,12 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) - + if r.CountOnly { + opts = append(opts, clientv3.WithCountOnly()) + } + if r.KeysOnly { + opts = append(opts, clientv3.WithKeysOnly()) + } if r.Serializable { opts = append(opts, clientv3.WithSerializable()) } @@ -194,7 +202,15 @@ func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { func PutRequestToOp(r *pb.PutRequest) clientv3.Op { opts := []clientv3.OpOption{} opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) - + if r.IgnoreValue { + opts = append(opts, clientv3.WithIgnoreValue()) + } + if r.IgnoreLease { + opts = append(opts, clientv3.WithIgnoreLease()) + } + if r.PrevKv { + opts = append(opts, clientv3.WithPrevKV()) + } return clientv3.OpPut(string(r.Key), string(r.Value), opts...) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go new file mode 100644 index 00000000000..86afdb7072b --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/leader.go @@ -0,0 +1,114 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "math" + "sync" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + "google.golang.org/grpc" + + "github.com/coreos/etcd/clientv3" +) + +const ( + lostLeaderKey = "__lostleader" // watched to detect leader loss + retryPerSecond = 10 +) + +type leader struct { + ctx context.Context + w clientv3.Watcher + // mu protects leaderc updates. + mu sync.RWMutex + leaderc chan struct{} + disconnc chan struct{} + donec chan struct{} +} + +func newLeader(ctx context.Context, w clientv3.Watcher) *leader { + l := &leader{ + ctx: clientv3.WithRequireLeader(ctx), + w: w, + leaderc: make(chan struct{}), + disconnc: make(chan struct{}), + donec: make(chan struct{}), + } + // begin assuming leader is lost + close(l.leaderc) + go l.recvLoop() + return l +} + +func (l *leader) recvLoop() { + defer close(l.donec) + + limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond) + rev := int64(math.MaxInt64 - 2) + for limiter.Wait(l.ctx) == nil { + wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify()) + cresp, ok := <-wch + if !ok { + l.loseLeader() + continue + } + if cresp.Err() != nil { + l.loseLeader() + if grpc.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() { + close(l.disconnc) + return + } + continue + } + l.gotLeader() + <-wch + l.loseLeader() + } +} + +func (l *leader) loseLeader() { + l.mu.RLock() + defer l.mu.RUnlock() + select { + case <-l.leaderc: + default: + close(l.leaderc) + } +} + +// gotLeader will force update the leadership status to having a leader. +func (l *leader) gotLeader() { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.leaderc: + l.leaderc = make(chan struct{}) + default: + } +} + +func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc } + +func (l *leader) stopNotify() <-chan struct{} { return l.donec } + +// lostNotify returns a channel that is closed if there has been +// a leader loss not yet followed by a leader reacquire. +func (l *leader) lostNotify() <-chan struct{} { + l.mu.RLock() + defer l.mu.RUnlock() + return l.leaderc +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go index 4f870220b79..19c2249a7e2 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go @@ -15,73 +15,353 @@ package grpcproxy import ( + "io" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) type leaseProxy struct { - client *clientv3.Client + // leaseClient handles req from LeaseGrant() that requires a lease ID. + leaseClient pb.LeaseClient + + lessor clientv3.Lease + + ctx context.Context + + leader *leader + + // mu protects adding outstanding leaseProxyStream through wg. + mu sync.RWMutex + + // wg waits until all outstanding leaseProxyStream quit. + wg sync.WaitGroup } -func NewLeaseProxy(c *clientv3.Client) pb.LeaseServer { - return &leaseProxy{ - client: c, +func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(c.Ctx()) + lp := &leaseProxy{ + leaseClient: pb.NewLeaseClient(c.ActiveConnection()), + lessor: c.Lease, + ctx: cctx, + leader: newLeader(c.Ctx(), c.Watcher), } + ch := make(chan struct{}) + go func() { + defer close(ch) + <-lp.leader.stopNotify() + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + case <-lp.leader.disconnectNotify(): + cancel() + } + <-lp.ctx.Done() + lp.mu.Unlock() + lp.wg.Wait() + }() + return lp, ch } func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseGrant(ctx, cr) + rp, err := lp.leaseClient.LeaseGrant(ctx, cr) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return rp, nil } func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseRevoke(ctx, rr) + r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID)) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return (*pb.LeaseRevokeResponse)(r), nil } func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseTimeToLive(ctx, rr) + var ( + r *clientv3.LeaseTimeToLiveResponse + err error + ) + if rr.Keys { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys()) + } else { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID)) + } + if err != nil { + return nil, err + } + rp := &pb.LeaseTimeToLiveResponse{ + Header: r.ResponseHeader, + ID: int64(r.ID), + TTL: r.TTL, + GrantedTTL: r.GrantedTTL, + Keys: r.Keys, + } + return rp, err } func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - conn := lp.client.ActiveConnection() + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + lp.mu.Unlock() + return lp.ctx.Err() + default: + lp.wg.Add(1) + } + lp.mu.Unlock() + ctx, cancel := context.WithCancel(stream.Context()) - lc, err := pb.NewLeaseClient(conn).LeaseKeepAlive(ctx) - if err != nil { - cancel() - return err + lps := leaseProxyStream{ + stream: stream, + lessor: lp.lessor, + keepAliveLeases: make(map[int64]*atomicCounter), + respc: make(chan *pb.LeaseKeepAliveResponse), + ctx: ctx, + cancel: cancel, } - go func() { - // Cancel the context attached to lc to unblock lc.Recv when - // this routine returns on error. - defer cancel() + errc := make(chan error, 2) - for { - // stream.Recv will be unblock when the loop in the parent routine - // returns on error. - rr, err := stream.Recv() - if err != nil { - return - } - err = lc.Send(rr) - if err != nil { - return + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { + v := md[rpctypes.MetadataRequireLeaderKey] + if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { + lostLeaderC = lp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + lp.wg.Done() + return rpctypes.ErrNoLeader + default: } } + } + stopc := make(chan struct{}, 3) + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.recvLoop(); err != nil { + errc <- err + } }() - for { - rr, err := lc.Recv() + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.sendLoop(); err != nil { + errc <- err + } + }() + + // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated. + go func() { + defer func() { stopc <- struct{}{} }() + select { + case <-lostLeaderC: + case <-ctx.Done(): + case <-lp.ctx.Done(): + } + }() + + var err error + select { + case <-stopc: + stopc <- struct{}{} + case err = <-errc: + } + cancel() + + // recv/send may only shutdown after function exits; + // this goroutine notifies lease proxy that the stream is through + go func() { + <-stopc + <-stopc + <-stopc + lps.close() + close(errc) + lp.wg.Done() + }() + + select { + case <-lostLeaderC: + return rpctypes.ErrNoLeader + case <-lp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing + default: if err != nil { return err } - err = stream.Send(rr) + return ctx.Err() + } +} + +type leaseProxyStream struct { + stream pb.Lease_LeaseKeepAliveServer + + lessor clientv3.Lease + // wg tracks keepAliveLoop goroutines + wg sync.WaitGroup + // mu protects keepAliveLeases + mu sync.RWMutex + // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease. + keepAliveLeases map[int64]*atomicCounter + // respc receives lease keepalive responses from etcd backend + respc chan *pb.LeaseKeepAliveResponse + + ctx context.Context + cancel context.CancelFunc +} + +func (lps *leaseProxyStream) recvLoop() error { + for { + rr, err := lps.stream.Recv() + if err == io.EOF { + return nil + } if err != nil { return err } + lps.mu.Lock() + neededResps, ok := lps.keepAliveLeases[rr.ID] + if !ok { + neededResps = &atomicCounter{} + lps.keepAliveLeases[rr.ID] = neededResps + lps.wg.Add(1) + go func() { + defer lps.wg.Done() + if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil { + lps.cancel() + } + }() + } + neededResps.add(1) + lps.mu.Unlock() + } +} + +func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error { + cctx, ccancel := context.WithCancel(lps.ctx) + defer ccancel() + respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + // ticker expires when loop hasn't received keepalive within TTL + var ticker <-chan time.Time + for { + select { + case <-ticker: + lps.mu.Lock() + // if there are outstanding keepAlive reqs at the moment of ticker firing, + // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs. + if neededResps.get() > 0 { + lps.mu.Unlock() + ticker = nil + continue + } + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + return nil + case rp, ok := <-respc: + if !ok { + lps.mu.Lock() + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + if neededResps.get() == 0 { + return nil + } + ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + r := &pb.LeaseKeepAliveResponse{ + Header: ttlResp.ResponseHeader, + ID: int64(ttlResp.ID), + TTL: ttlResp.TTL, + } + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-lps.ctx.Done(): + return nil + } + } + return nil + } + if neededResps.get() == 0 { + continue + } + ticker = time.After(time.Duration(rp.TTL) * time.Second) + r := &pb.LeaseKeepAliveResponse{ + Header: rp.ResponseHeader, + ID: int64(rp.ID), + TTL: rp.TTL, + } + lps.replyToClient(r, neededResps) + } } } + +func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) { + timer := time.After(500 * time.Millisecond) + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-timer: + return + case <-lps.ctx.Done(): + return + } + } +} + +func (lps *leaseProxyStream) sendLoop() error { + for { + select { + case lrp, ok := <-lps.respc: + if !ok { + return nil + } + if err := lps.stream.Send(lrp); err != nil { + return err + } + case <-lps.ctx.Done(): + return lps.ctx.Err() + } + } +} + +func (lps *leaseProxyStream) close() { + lps.cancel() + lps.wg.Wait() + // only close respc channel if all the keepAliveLoop() goroutines have finished + // this ensures those goroutines don't send resp to a closed resp channel + close(lps.respc) +} + +type atomicCounter struct { + counter int64 +} + +func (ac *atomicCounter) add(delta int64) { + atomic.AddInt64(&ac.counter, delta) +} + +func (ac *atomicCounter) get() int64 { + return atomic.LoadInt64(&ac.counter) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go new file mode 100644 index 00000000000..804aff64a96 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lock.go @@ -0,0 +1,38 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockProxy struct { + client *clientv3.Client +} + +func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { + return &lockProxy{client: client} +} + +func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req) +} + +func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go new file mode 100644 index 00000000000..c2d81804395 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/logger.go @@ -0,0 +1,19 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import "github.com/coreos/pkg/capnslog" + +var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "grpcproxy") diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go index 209dc94a712..384d1520360 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go @@ -15,6 +15,8 @@ package grpcproxy import ( + "io" + "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" @@ -49,6 +51,9 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan for { rr, err := sc.Recv() if err != nil { + if err == io.EOF { + return nil + } return err } err = stream.Send(rr) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go index f4a1d4c8de4..864fa1609a0 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go @@ -29,6 +29,12 @@ var ( Name: "events_coalescing_total", Help: "Total number of events coalescing", }) + cacheKeys = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "grpc_proxy", + Name: "cache_keys_total", + Help: "Total number of keys/ranges cached", + }) cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "grpc_proxy", @@ -46,6 +52,7 @@ var ( func init() { prometheus.MustRegister(watchersCoalescing) prometheus.MustRegister(eventsCoalescing) + prometheus.MustRegister(cacheKeys) prometheus.MustRegister(cacheHits) prometheus.MustRegister(cachedMisses) } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go new file mode 100644 index 00000000000..598c71f07ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/register.go @@ -0,0 +1,94 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "encoding/json" + "os" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/clientv3/naming" + + "golang.org/x/time/rate" + gnaming "google.golang.org/grpc/naming" +) + +// allow maximum 1 retry per second +const registerRetryRate = 1 + +// Register registers itself as a grpc-proxy server by writing prefixed-key +// with session of specified TTL (in seconds). The returned channel is closed +// when the client's context is canceled. +func Register(c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} { + rm := rate.NewLimiter(rate.Limit(registerRetryRate), registerRetryRate) + + donec := make(chan struct{}) + go func() { + defer close(donec) + + for rm.Wait(c.Ctx()) == nil { + ss, err := registerSession(c, prefix, addr, ttl) + if err != nil { + plog.Warningf("failed to create a session %v", err) + continue + } + select { + case <-c.Ctx().Done(): + ss.Close() + return + + case <-ss.Done(): + plog.Warning("session expired; possible network partition or server restart") + plog.Warning("creating a new session to rejoin") + continue + } + } + }() + + return donec +} + +func registerSession(c *clientv3.Client, prefix string, addr string, ttl int) (*concurrency.Session, error) { + ss, err := concurrency.NewSession(c, concurrency.WithTTL(ttl)) + if err != nil { + return nil, err + } + + gr := &naming.GRPCResolver{Client: c} + if err = gr.Update(c.Ctx(), prefix, gnaming.Update{Op: gnaming.Add, Addr: addr, Metadata: getMeta()}, clientv3.WithLease(ss.Lease())); err != nil { + return nil, err + } + + plog.Infof("registered %q with %d-second lease", addr, ttl) + return ss, nil +} + +// meta represents metadata of proxy register. +type meta struct { + Name string `json:"name"` +} + +func getMeta() string { + hostname, _ := os.Hostname() + bts, _ := json.Marshal(meta{Name: hostname}) + return string(bts) +} + +func decodeMeta(s string) (meta, error) { + m := meta{} + err := json.Unmarshal([]byte(s), &m) + return m, err +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go index 42d196ca2ca..b960c94769a 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go @@ -18,7 +18,7 @@ import ( "sync" "golang.org/x/net/context" - "golang.org/x/time/rate" + "google.golang.org/grpc" "google.golang.org/grpc/metadata" "github.com/coreos/etcd/clientv3" @@ -31,49 +31,35 @@ type watchProxy struct { cw clientv3.Watcher ctx context.Context + leader *leader + ranges *watchRanges - // retryLimiter controls the create watch retry rate on lost leaders. - retryLimiter *rate.Limiter - - // mu protects leaderc updates. - mu sync.RWMutex - leaderc chan struct{} + // mu protects adding outstanding watch servers through wg. + mu sync.Mutex // wg waits until all outstanding watch servers quit. wg sync.WaitGroup } -const ( - lostLeaderKey = "__lostleader" // watched to detect leader loss - retryPerSecond = 10 -) - func NewWatchProxy(c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(c.Ctx()) wp := &watchProxy{ - cw: c.Watcher, - ctx: clientv3.WithRequireLeader(c.Ctx()), - retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond), - leaderc: make(chan struct{}), + cw: c.Watcher, + ctx: cctx, + leader: newLeader(c.Ctx(), c.Watcher), } wp.ranges = newWatchRanges(wp) ch := make(chan struct{}) go func() { defer close(ch) - // a new streams without opening any watchers won't catch - // a lost leader event, so have a special watch to monitor it - rev := int64((uint64(1) << 63) - 2) - for wp.ctx.Err() == nil { - wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev)) - for range wch { - } - wp.mu.Lock() - close(wp.leaderc) - wp.leaderc = make(chan struct{}) - wp.mu.Unlock() - wp.retryLimiter.Wait(wp.ctx) - } + <-wp.leader.stopNotify() wp.mu.Lock() + select { + case <-wp.ctx.Done(): + case <-wp.leader.disconnectNotify(): + cancel() + } <-wp.ctx.Done() wp.mu.Unlock() wp.wg.Wait() @@ -87,7 +73,12 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { select { case <-wp.ctx.Done(): wp.mu.Unlock() - return + select { + case <-wp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing + default: + return wp.ctx.Err() + } default: wp.wg.Add(1) } @@ -103,11 +94,19 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { cancel: cancel, } - var leaderc <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - leaderc = wp.lostLeaderNotify() + lostLeaderC = wp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + wp.wg.Done() + return rpctypes.ErrNoLeader + default: + } } } @@ -126,7 +125,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { go func() { defer func() { stopc <- struct{}{} }() select { - case <-leaderc: + case <-lostLeaderC: case <-ctx.Done(): case <-wp.ctx.Done(): } @@ -145,19 +144,15 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { }() select { - case <-leaderc: + case <-lostLeaderC: return rpctypes.ErrNoLeader + case <-wp.leader.disconnectNotify(): + return grpc.ErrClientConnClosing default: return wps.ctx.Err() } } -func (wp *watchProxy) lostLeaderNotify() <-chan struct{} { - wp.mu.RLock() - defer wp.mu.RUnlock() - return wp.leaderc -} - // watchProxyStream forwards etcd watch events to a proxied client stream. type watchProxyStream struct { ranges *watchRanges diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go index 5529fb5a2bc..5e750bdb0d4 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go @@ -50,27 +50,20 @@ func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) wb.add(w) go func() { defer close(wb.donec) - // loop because leader loss will close channel - for cctx.Err() == nil { - opts := []clientv3.OpOption{ - clientv3.WithRange(w.wr.end), - clientv3.WithProgressNotify(), - clientv3.WithRev(wb.nextrev), - clientv3.WithPrevKV(), - } - // The create notification should be the first response; - // if the watch is recreated following leader loss, it - // shouldn't post a second create response to the client. - if wb.responses == 0 { - opts = append(opts, clientv3.WithCreatedNotify()) - } - wch := wp.cw.Watch(cctx, w.wr.key, opts...) - for wr := range wch { - wb.bcast(wr) - update(wb) - } - wp.retryLimiter.Wait(cctx) + opts := []clientv3.OpOption{ + clientv3.WithRange(w.wr.end), + clientv3.WithProgressNotify(), + clientv3.WithRev(wb.nextrev), + clientv3.WithPrevKV(), + clientv3.WithCreatedNotify(), + } + + wch := wp.cw.Watch(cctx, w.wr.key, opts...) + + for wr := range wch { + wb.bcast(wr) + update(wb) } }() return wb diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go index e860a69ce81..7387caf4dbd 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go @@ -102,16 +102,17 @@ func (w *watcher) send(wr clientv3.WatchResponse) { } // all events are filtered out? - if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { + if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { return } w.lastHeader = wr.Header w.post(&pb.WatchResponse{ - Header: &wr.Header, - Created: wr.Created, - WatchId: w.id, - Events: events, + Header: &wr.Header, + Created: wr.Created, + CompactRevision: wr.CompactRevision, + WatchId: w.id, + Events: events, }) } diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md index a724b958579..f485b839771 100644 --- a/vendor/github.com/coreos/etcd/raft/README.md +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -13,9 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample # Features @@ -51,11 +49,11 @@ This raft implementation also includes a few optional enhancements: - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks ## Usage -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. To start a three-node cluster ```go @@ -73,7 +71,7 @@ To start a three-node cluster n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) ``` -You can start a single node cluster, like so: +Start a single node cluster, like so: ```go // Create storage and config as shown above. // Set peer list to itself, so this node can become the leader of this single-node cluster. @@ -81,7 +79,7 @@ You can start a single node cluster, like so: n := raft.StartNode(c, peers) ``` -To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so: +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: ```go // Create storage and config as shown above. n := raft.StartNode(c, nil) @@ -110,46 +108,21 @@ To restart a node from previous state: n := raft.RestartNode(c) ``` -Now that you are holding onto a Node you have a few responsibilities: +After creating a Node, the user has a few responsibilities: -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. +1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. -Third, when you receive a message from another node, pass it to Node.Step: +Third, after receiving a message from another node, pass it to Node.Step: ```go func recvRaftRPC(ctx context.Context, m raftpb.Message) { @@ -157,10 +130,7 @@ Third, when you receive a message from another node, pass it to Node.Step: } ``` -Finally, you need to call `Node.Tick()` at regular intervals (probably -via a `time.Ticker`). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". The total state machine handling loop will look something like this: @@ -190,16 +160,13 @@ The total state machine handling loop will look something like this: } ``` -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: ```go n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -207,8 +174,7 @@ To add or remove node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) ``` -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: ```go var cc raftpb.ConfChange @@ -223,25 +189,8 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. +This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go index 8ae301c3d8d..263af9ce405 100644 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -85,6 +85,26 @@ func (u *unstable) stableTo(i, t uint64) { if gt == t && i >= u.offset { u.entries = u.entries[i+1-u.offset:] u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries } } diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go index c8410fdc77f..5da1c1193b2 100644 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -83,6 +83,10 @@ type Ready struct { // If it contains a MsgSnap message, the application MUST report back to raft // when the snapshot has been received or has failed by calling ReportSnapshot. Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool } func isHardStateEqual(a, b pb.HardState) bool { @@ -517,5 +521,17 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } + rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) return rd } + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go index 7be4407ee2b..29f20398203 100644 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -1159,6 +1159,10 @@ func (r *raft) addNode(id uint64) { } r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true } func (r *raft) removeNode(id uint64) { diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 86ad3120708..4c6e79d58a0 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1847,7 +1889,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } var fileDescriptorRaft = []byte{ // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go index ccd9eb78698..d9f07c3479d 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go @@ -16,13 +16,13 @@ package rafthttp import ( "bytes" + "context" "errors" "io/ioutil" "sync" "time" "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft" @@ -118,7 +118,8 @@ func (p *pipeline) post(data []byte) (err error) { req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) done := make(chan struct{}, 1) - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) go func() { select { case <-done: diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go index 105b330728e..52273c9d195 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go @@ -16,6 +16,7 @@ package rafthttp import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -104,7 +105,9 @@ func (s *snapshotSender) send(merged snap.Message) { // post posts the given request. // It returns nil when request is sent out and processed successfully. func (s *snapshotSender) post(req *http.Request) (err error) { - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + defer cancel() type responseAndError struct { resp *http.Response @@ -130,7 +133,6 @@ func (s *snapshotSender) post(req *http.Request) (err error) { select { case <-s.stopc: - cancel() return errStopped case r := <-result: if r.err != nil { diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go index e69a44ff65a..2a6c620f56d 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go @@ -15,10 +15,10 @@ package rafthttp import ( + "context" "fmt" "io" "io/ioutil" - "net" "net/http" "path" "strings" @@ -27,6 +27,7 @@ import ( "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/httputil" + "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" @@ -51,6 +52,7 @@ var ( "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -140,7 +142,8 @@ func (cw *streamWriter) run() { flusher http.Flusher batched int ) - tickc := time.Tick(ConnReadTimeout / 3) + tickc := time.NewTicker(ConnReadTimeout / 3) + defer tickc.Stop() unflushed := 0 plog.Infof("started streaming with peer %s (writer)", cw.peerID) @@ -212,7 +215,7 @@ func (cw *streamWriter) run() { plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) } plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = tickc, cw.msgc + heartbeatc, msgc = tickc.C, cw.msgc case <-cw.stopc: if cw.close() { plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) @@ -314,7 +317,7 @@ func (cr *streamReader) run() { // all data is read out case err == io.EOF: // connection is closed by the remote - case isClosedConnectionError(err): + case transport.IsClosedConnError(err): default: cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) } @@ -426,14 +429,17 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { setPeerURLsHeader(req, cr.tr.URLs) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + cr.mu.Lock() + cr.cancel = cancel select { case <-cr.stopc: cr.mu.Unlock() return nil, fmt.Errorf("stream reader is stopped") default: } - cr.cancel = httputil.RequestCanceler(req) cr.mu.Unlock() resp, err := cr.tr.streamRt.RoundTrip(req) @@ -508,11 +514,6 @@ func (cr *streamReader) resume() { cr.paused = false } -func isClosedConnectionError(err error) bool { - operr, ok := err.(*net.OpError) - return ok && operr.Err.Error() == "use of closed network connection" -} - // checkStreamSupport checks whether the stream type is supported in the // given version. func checkStreamSupport(v *semver.Version, t streamType) bool { diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go index 61855c52a60..12e548c7717 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ b/vendor/github.com/coreos/etcd/rafthttp/util.go @@ -15,8 +15,6 @@ package rafthttp import ( - "crypto/tls" - "encoding/binary" "fmt" "io" "net" @@ -27,7 +25,6 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" ) @@ -39,8 +36,8 @@ var ( // NewListener returns a listener for raft message transfer between peers. // It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlscfg *tls.Config) (net.Listener, error) { - return transport.NewTimeoutListener(u.Host, u.Scheme, tlscfg, ConnReadTimeout, ConnWriteTimeout) +func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { + return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) } // NewRoundTripper returns a roundTripper used to send requests @@ -61,31 +58,6 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) } -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} - // createPostRequest creates a HTTP POST request that sends raft message. func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go index ae3c743f80c..01d897ae861 100644 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -15,6 +15,7 @@ package snap import ( + "errors" "fmt" "io" "io/ioutil" @@ -24,6 +25,8 @@ import ( "github.com/coreos/etcd/pkg/fileutil" ) +var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") + // SaveDBFrom saves snapshot of the database from the given reader. It // guarantees the save operation is atomic. func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { @@ -41,7 +44,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { os.Remove(f.Name()) return n, err } - fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + fn := s.dbFilePath(id) if fileutil.Exist(fn) { os.Remove(f.Name()) return n, nil @@ -60,15 +63,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { // DBFilePath returns the file path for the snapshot of the database with // given id. If the snapshot does not exist, it returns error. func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { + if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return filepath.Join(s.dir, fn), nil - } + if fn := s.dbFilePath(id); fileutil.Exist(fn) { + return fn, nil } - return "", fmt.Errorf("snap: snapshot file doesn't exist") + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) } diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go index 130e2277c84..05a77ff9d06 100644 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -342,7 +342,7 @@ func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } var fileDescriptorSnap = []byte{ // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go index 731327b08ba..54159553500 100644 --- a/vendor/github.com/coreos/etcd/store/node.go +++ b/vendor/github.com/coreos/etcd/store/node.go @@ -332,7 +332,6 @@ func (n *node) UpdateTTL(expireTime time.Time) { n.ExpireTime = expireTime // push into ttl heap n.store.ttlKeyHeap.push(n) - return } // Compare function compares node index and value with provided ones. diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go index 6c19ad4c970..edf7f21942b 100644 --- a/vendor/github.com/coreos/etcd/store/store.go +++ b/vendor/github.com/coreos/etcd/store/store.go @@ -682,6 +682,9 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.PrevNode = node.Repr(false, false, s.clock) + if node.IsDir() { + e.Node.Dir = true + } callback := func(path string) { // notify function // notify the watchers with deleted set true diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go index 6dd63f3c541..13c23e391d9 100644 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go @@ -116,7 +116,7 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde } func (wh *watcherHub) add(e *Event) { - e = wh.EventHistory.addEvent(e) + wh.EventHistory.addEvent(e) } // notify function accepts an event and notify to the watchers. diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index 0173d6f11d3..b488499c686 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.1.10" + Version = "3.2.13" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go index efe58928cc8..aac1e197e59 100644 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -52,7 +52,7 @@ func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { // newFileEncoder creates a new encoder with current file offset for the page writer. func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, os.SEEK_CUR) + offset, err := f.Seek(0, io.SeekCurrent) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go index ffb14161682..091036b57b9 100644 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -62,7 +62,7 @@ func Repair(dirpath string) bool { } defer bf.Close() - if _, err = f.Seek(0, os.SEEK_SET); err != nil { + if _, err = f.Seek(0, io.SeekStart); err != nil { plog.Errorf("could not repair %v, failed to read file", f.Name()) return false } diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go index b65f6448304..2cac25c1c90 100644 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -112,7 +112,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { if err != nil { return nil, err } - if _, err = f.Seek(0, os.SEEK_END); err != nil { + if _, err = f.Seek(0, io.SeekEnd); err != nil { return nil, err } if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { @@ -322,7 +322,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // not all, will cause CRC errors on WAL open. Since the records // were never fully synced to disk in the first place, it's safe // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { + if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { return nil, state, nil, err } if err = fileutil.ZeroToEnd(w.tail().File); err != nil { @@ -361,7 +361,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // Then cut atomically rename temp wal file to a wal file. func (w *WAL) cut() error { // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, os.SEEK_CUR) + off, serr := w.tail().Seek(0, io.SeekCurrent) if serr != nil { return serr } @@ -401,7 +401,7 @@ func (w *WAL) cut() error { return err } - off, err = w.tail().Seek(0, os.SEEK_CUR) + off, err = w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -418,7 +418,7 @@ func (w *WAL) cut() error { if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { return err } - if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { + if _, err = newTail.Seek(off, io.SeekStart); err != nil { return err } @@ -552,7 +552,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return nil } - mustSync := mustSync(st, w.state, len(ents)) + mustSync := raft.MustSync(st, w.state, len(ents)) // TODO(xiangli): no more reference operator for i := range ents { @@ -564,7 +564,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return err } - curOff, err := w.tail().Seek(0, os.SEEK_CUR) + curOff, err := w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -618,15 +618,6 @@ func (w *WAL) seq() uint64 { return seq } -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term -} - func closeAll(rcs ...io.ReadCloser) error { for _, f := range rcs { if err := f.Close(); err != nil { diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go index e1a77d5e51a..664fae1305b 100644 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -506,7 +506,7 @@ func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } var fileDescriptorRecord = []byte{ // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD new file mode 100644 index 00000000000..3f80de51569 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["descriptor.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["descriptor.pb.go"], + importpath = "github.com/golang/protobuf/protoc-gen-go/descriptor", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 00000000000..f706871a6fa --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . + protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 00000000000..c6a91bcab9c --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, + 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, + 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, + 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, + 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, + 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, + 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, + 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, + 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, + 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, + 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, + 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, + 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, + 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, + 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, + 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, + 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, + 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, + 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, + 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, + 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, + 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, + 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, + 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, + 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, + 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, + 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, + 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, + 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, + 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, + 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, + 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, + 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, + 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, + 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, + 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, + 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, + 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, + 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, + 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, + 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, + 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, + 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, + 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, + 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, + 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, + 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, + 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, + 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, + 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, + 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, + 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, + 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, + 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, + 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, + 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, + 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, + 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, + 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, + 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, + 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, + 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, + 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, + 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, + 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, + 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, + 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, + 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, + 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, + 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, + 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, + 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, + 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, + 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, + 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, + 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, + 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, + 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, + 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, + 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, + 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, + 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, + 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, + 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, + 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, + 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, + 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, + 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, + 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, + 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, + 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, + 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, + 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, + 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, + 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, + 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, + 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, + 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, + 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, + 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, + 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, + 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, + 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, + 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, + 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, + 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, + 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, + 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, + 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, + 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, + 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, + 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, + 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, + 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, + 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, + 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, + 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, + 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, + 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, + 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, + 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, + 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, + 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, + 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, + 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, + 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, + 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, + 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, + 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, + 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, + 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, + 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, + 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, + 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, + 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, + 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, + 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, + 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, + 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, + 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, + 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, + 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, + 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, + 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, + 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, + 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, + 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, + 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, + 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, + 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, + 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, + 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 00000000000..4d4fb378f50 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,849 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/karlseguin/ccache/Makefile b/vendor/github.com/karlseguin/ccache/Makefile deleted file mode 100644 index 5b3f26bafdc..00000000000 --- a/vendor/github.com/karlseguin/ccache/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -t: - go test ./... - -f: - go fmt ./... diff --git a/vendor/github.com/karlseguin/ccache/bucket.go b/vendor/github.com/karlseguin/ccache/bucket.go deleted file mode 100644 index d67535170c5..00000000000 --- a/vendor/github.com/karlseguin/ccache/bucket.go +++ /dev/null @@ -1,41 +0,0 @@ -package ccache - -import ( - "sync" - "time" -) - -type bucket struct { - sync.RWMutex - lookup map[string]*Item -} - -func (b *bucket) get(key string) *Item { - b.RLock() - defer b.RUnlock() - return b.lookup[key] -} - -func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) { - expires := time.Now().Add(duration).UnixNano() - item := newItem(key, value, expires) - b.Lock() - defer b.Unlock() - existing := b.lookup[key] - b.lookup[key] = item - return item, existing -} - -func (b *bucket) delete(key string) *Item { - b.Lock() - defer b.Unlock() - item := b.lookup[key] - delete(b.lookup, key) - return item -} - -func (b *bucket) clear() { - b.Lock() - defer b.Unlock() - b.lookup = make(map[string]*Item) -} diff --git a/vendor/github.com/karlseguin/ccache/cache.go b/vendor/github.com/karlseguin/ccache/cache.go deleted file mode 100644 index a9e94f486f9..00000000000 --- a/vendor/github.com/karlseguin/ccache/cache.go +++ /dev/null @@ -1,227 +0,0 @@ -// An LRU cached aimed at high concurrency -package ccache - -import ( - "container/list" - "hash/fnv" - "sync/atomic" - "time" -) - -type Cache struct { - *Configuration - list *list.List - size int64 - buckets []*bucket - bucketMask uint32 - deletables chan *Item - promotables chan *Item - donec chan struct{} -} - -// Create a new cache with the specified configuration -// See ccache.Configure() for creating a configuration -func New(config *Configuration) *Cache { - c := &Cache{ - list: list.New(), - Configuration: config, - bucketMask: uint32(config.buckets) - 1, - buckets: make([]*bucket, config.buckets), - } - for i := 0; i < int(config.buckets); i++ { - c.buckets[i] = &bucket{ - lookup: make(map[string]*Item), - } - } - c.restart() - return c -} - -// Get an item from the cache. Returns nil if the item wasn't found. -// This can return an expired item. Use item.Expired() to see if the item -// is expired and item.TTL() to see how long until the item expires (which -// will be negative for an already expired item). -func (c *Cache) Get(key string) *Item { - item := c.bucket(key).get(key) - if item == nil { - return nil - } - if item.expires > time.Now().UnixNano() { - c.promote(item) - } - return item -} - -// Used when the cache was created with the Track() configuration option. -// Avoid otherwise -func (c *Cache) TrackingGet(key string) TrackedItem { - item := c.Get(key) - if item == nil { - return NilTracked - } - item.track() - return item -} - -// Set the value in the cache for the specified duration -func (c *Cache) Set(key string, value interface{}, duration time.Duration) { - c.set(key, value, duration) -} - -// Replace the value if it exists, does not set if it doesn't. -// Returns true if the item existed an was replaced, false otherwise. -// Replace does not reset item's TTL -func (c *Cache) Replace(key string, value interface{}) bool { - item := c.bucket(key).get(key) - if item == nil { - return false - } - c.Set(key, value, item.TTL()) - return true -} - -// Attempts to get the value from the cache and calles fetch on a miss (missing -// or stale item). If fetch returns an error, no value is cached and the error -// is returned back to the caller. -func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := c.Get(key) - if item != nil && !item.Expired() { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return c.set(key, value, duration), nil -} - -// Remove the item from the cache, return true if the item was present, false otherwise. -func (c *Cache) Delete(key string) bool { - item := c.bucket(key).delete(key) - if item != nil { - c.deletables <- item - return true - } - return false -} - -//this isn't thread safe. It's meant to be called from non-concurrent tests -func (c *Cache) Clear() { - for _, bucket := range c.buckets { - bucket.clear() - } - c.size = 0 - c.list = list.New() -} - -// Stops the background worker. Operations performed on the cache after Stop -// is called are likely to panic -func (c *Cache) Stop() { - close(c.promotables) - <-c.donec -} - -func (c *Cache) restart() { - c.deletables = make(chan *Item, c.deleteBuffer) - c.promotables = make(chan *Item, c.promoteBuffer) - c.donec = make(chan struct{}) - go c.worker() -} - -func (c *Cache) deleteItem(bucket *bucket, item *Item) { - bucket.delete(item.key) //stop other GETs from getting it - c.deletables <- item -} - -func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item { - item, existing := c.bucket(key).set(key, value, duration) - if existing != nil { - c.deletables <- existing - } - c.promote(item) - return item -} - -func (c *Cache) bucket(key string) *bucket { - h := fnv.New32a() - h.Write([]byte(key)) - return c.buckets[h.Sum32()&c.bucketMask] -} - -func (c *Cache) promote(item *Item) { - c.promotables <- item -} - -func (c *Cache) worker() { - defer close(c.donec) - - for { - select { - case item, ok := <-c.promotables: - if ok == false { - goto drain - } - if c.doPromote(item) && c.size > c.maxSize { - c.gc() - } - case item := <-c.deletables: - c.doDelete(item) - } - } - -drain: - for { - select { - case item := <-c.deletables: - c.doDelete(item) - default: - close(c.deletables) - return - } - } -} - -func (c *Cache) doDelete(item *Item) { - if item.element == nil { - item.promotions = -2 - } else { - c.size -= item.size - c.list.Remove(item.element) - } -} - -func (c *Cache) doPromote(item *Item) bool { - //already deleted - if item.promotions == -2 { - return false - } - if item.element != nil { //not a new item - if item.shouldPromote(c.getsPerPromote) { - c.list.MoveToFront(item.element) - item.promotions = 0 - } - return false - } - - c.size += item.size - item.element = c.list.PushFront(item) - return true -} - -func (c *Cache) gc() { - element := c.list.Back() - for i := 0; i < c.itemsToPrune; i++ { - if element == nil { - return - } - prev := element.Prev() - item := element.Value.(*Item) - if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { - c.bucket(item.key).delete(item.key) - c.size -= item.size - c.list.Remove(element) - item.promotions = -2 - } - element = prev - } -} diff --git a/vendor/github.com/karlseguin/ccache/configuration.go b/vendor/github.com/karlseguin/ccache/configuration.go deleted file mode 100644 index daa8357767e..00000000000 --- a/vendor/github.com/karlseguin/ccache/configuration.go +++ /dev/null @@ -1,94 +0,0 @@ -package ccache - -type Configuration struct { - maxSize int64 - buckets int - itemsToPrune int - deleteBuffer int - promoteBuffer int - getsPerPromote int32 - tracking bool -} - -// Creates a configuration object with sensible defaults -// Use this as the start of the fluent configuration: -// e.g.: ccache.New(ccache.Configure().MaxSize(10000)) -func Configure() *Configuration { - return &Configuration{ - buckets: 16, - itemsToPrune: 500, - deleteBuffer: 1024, - getsPerPromote: 3, - promoteBuffer: 1024, - maxSize: 5000, - tracking: false, - } -} - -// The max size for the cache -// [5000] -func (c *Configuration) MaxSize(max int64) *Configuration { - c.maxSize = max - return c -} - -// Keys are hashed into % bucket count to provide greater concurrency (every set -// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) -// [16] -func (c *Configuration) Buckets(count uint32) *Configuration { - if count == 0 || ((count&(^count+1)) == count) == false { - count = 16 - } - c.buckets = int(count) - return c -} - -// The number of items to prune when memory is low -// [500] -func (c *Configuration) ItemsToPrune(count uint32) *Configuration { - c.itemsToPrune = int(count) - return c -} - -// The size of the queue for items which should be promoted. If the queue fills -// up, promotions are skipped -// [1024] -func (c *Configuration) PromoteBuffer(size uint32) *Configuration { - c.promoteBuffer = int(size) - return c -} - -// The size of the queue for items which should be deleted. If the queue fills -// up, calls to Delete() will block -func (c *Configuration) DeleteBuffer(size uint32) *Configuration { - c.deleteBuffer = int(size) - return c -} - -// Give a large cache with a high read / write ratio, it's usually unecessary -// to promote an item on every Get. GetsPerPromote specifies the number of Gets -// a key must have before being promoted -// [3] -func (c *Configuration) GetsPerPromote(count int32) *Configuration { - c.getsPerPromote = count - return c -} - -// Typically, a cache is agnostic about how cached values are use. This is fine -// for a typical cache usage, where you fetch an item from the cache, do something -// (write it out) and nothing else. - -// However, if callers are going to keep a reference to a cached item for a long -// time, things get messy. Specifically, the cache can evict the item, while -// references still exist. Technically, this isn't an issue. However, if you reload -// the item back into the cache, you end up with 2 objects representing the same -// data. This is a waste of space and could lead to weird behavior (the type an -// identity map is meant to solve). - -// By turning tracking on and using the cache's TrackingGet, the cache -// won't evict items which you haven't called Release() on. It's a simple reference -// counter. -func (c *Configuration) Track() *Configuration { - c.tracking = true - return c -} diff --git a/vendor/github.com/karlseguin/ccache/item.go b/vendor/github.com/karlseguin/ccache/item.go deleted file mode 100644 index bb7c04fff9d..00000000000 --- a/vendor/github.com/karlseguin/ccache/item.go +++ /dev/null @@ -1,103 +0,0 @@ -package ccache - -import ( - "container/list" - "sync/atomic" - "time" -) - -type Sized interface { - Size() int64 -} - -type TrackedItem interface { - Value() interface{} - Release() - Expired() bool - TTL() time.Duration - Expires() time.Time - Extend(duration time.Duration) -} - -type nilItem struct{} - -func (n *nilItem) Value() interface{} { return nil } -func (n *nilItem) Release() {} - -func (i *nilItem) Expired() bool { - return true -} - -func (i *nilItem) TTL() time.Duration { - return time.Minute -} - -func (i *nilItem) Expires() time.Time { - return time.Time{} -} - -func (i *nilItem) Extend(duration time.Duration) { -} - -var NilTracked = new(nilItem) - -type Item struct { - key string - group string - promotions int32 - refCount int32 - expires int64 - size int64 - value interface{} - element *list.Element -} - -func newItem(key string, value interface{}, expires int64) *Item { - size := int64(1) - if sized, ok := value.(Sized); ok { - size = sized.Size() - } - return &Item{ - key: key, - value: value, - promotions: 0, - size: size, - expires: expires, - } -} - -func (i *Item) shouldPromote(getsPerPromote int32) bool { - i.promotions += 1 - return i.promotions == getsPerPromote -} - -func (i *Item) Value() interface{} { - return i.value -} - -func (i *Item) track() { - atomic.AddInt32(&i.refCount, 1) -} - -func (i *Item) Release() { - atomic.AddInt32(&i.refCount, -1) -} - -func (i *Item) Expired() bool { - expires := atomic.LoadInt64(&i.expires) - return expires < time.Now().UnixNano() -} - -func (i *Item) TTL() time.Duration { - expires := atomic.LoadInt64(&i.expires) - return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) -} - -func (i *Item) Expires() time.Time { - expires := atomic.LoadInt64(&i.expires) - return time.Unix(0, expires) -} - -func (i *Item) Extend(duration time.Duration) { - atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) -} diff --git a/vendor/github.com/karlseguin/ccache/layeredbucket.go b/vendor/github.com/karlseguin/ccache/layeredbucket.go deleted file mode 100644 index 88f3def4219..00000000000 --- a/vendor/github.com/karlseguin/ccache/layeredbucket.go +++ /dev/null @@ -1,82 +0,0 @@ -package ccache - -import ( - "sync" - "time" -) - -type layeredBucket struct { - sync.RWMutex - buckets map[string]*bucket -} - -func (b *layeredBucket) get(primary, secondary string) *Item { - bucket := b.getSecondaryBucket(primary) - if bucket == nil { - return nil - } - return bucket.get(secondary) -} - -func (b *layeredBucket) getSecondaryBucket(primary string) *bucket { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return nil - } - return bucket -} - -func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) { - b.Lock() - bkt, exists := b.buckets[primary] - if exists == false { - bkt = &bucket{lookup: make(map[string]*Item)} - b.buckets[primary] = bkt - } - b.Unlock() - item, existing := bkt.set(secondary, value, duration) - item.group = primary - return item, existing -} - -func (b *layeredBucket) delete(primary, secondary string) *Item { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return nil - } - return bucket.delete(secondary) -} - -func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { - b.RLock() - bucket, exists := b.buckets[primary] - b.RUnlock() - if exists == false { - return false - } - - bucket.Lock() - defer bucket.Unlock() - - if l := len(bucket.lookup); l == 0 { - return false - } - for key, item := range bucket.lookup { - delete(bucket.lookup, key) - deletables <- item - } - return true -} - -func (b *layeredBucket) clear() { - b.Lock() - defer b.Unlock() - for _, bucket := range b.buckets { - bucket.clear() - } - b.buckets = make(map[string]*bucket) -} diff --git a/vendor/github.com/karlseguin/ccache/layeredcache.go b/vendor/github.com/karlseguin/ccache/layeredcache.go deleted file mode 100644 index 20b13f94d30..00000000000 --- a/vendor/github.com/karlseguin/ccache/layeredcache.go +++ /dev/null @@ -1,237 +0,0 @@ -// An LRU cached aimed at high concurrency -package ccache - -import ( - "container/list" - "hash/fnv" - "sync/atomic" - "time" -) - -type LayeredCache struct { - *Configuration - list *list.List - buckets []*layeredBucket - bucketMask uint32 - size int64 - deletables chan *Item - promotables chan *Item - donec chan struct{} -} - -// Create a new layered cache with the specified configuration. -// A layered cache used a two keys to identify a value: a primary key -// and a secondary key. Get, Set and Delete require both a primary and -// secondary key. However, DeleteAll requires only a primary key, deleting -// all values that share the same primary key. - -// Layered Cache is useful as an HTTP cache, where an HTTP purge might -// delete multiple variants of the same resource: -// primary key = "user/44" -// secondary key 1 = ".json" -// secondary key 2 = ".xml" - -// See ccache.Configure() for creating a configuration -func Layered(config *Configuration) *LayeredCache { - c := &LayeredCache{ - list: list.New(), - Configuration: config, - bucketMask: uint32(config.buckets) - 1, - buckets: make([]*layeredBucket, config.buckets), - deletables: make(chan *Item, config.deleteBuffer), - } - for i := 0; i < int(config.buckets); i++ { - c.buckets[i] = &layeredBucket{ - buckets: make(map[string]*bucket), - } - } - c.restart() - return c -} - -// Get an item from the cache. Returns nil if the item wasn't found. -// This can return an expired item. Use item.Expired() to see if the item -// is expired and item.TTL() to see how long until the item expires (which -// will be negative for an already expired item). -func (c *LayeredCache) Get(primary, secondary string) *Item { - item := c.bucket(primary).get(primary, secondary) - if item == nil { - return nil - } - if item.expires > time.Now().UnixNano() { - c.promote(item) - } - return item -} - -// Get the secondary cache for a given primary key. This operation will -// never return nil. In the case where the primary key does not exist, a -// new, underlying, empty bucket will be created and returned. -func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache { - primaryBkt := c.bucket(primary) - bkt := primaryBkt.getSecondaryBucket(primary) - primaryBkt.Lock() - if bkt == nil { - bkt = &bucket{lookup: make(map[string]*Item)} - primaryBkt.buckets[primary] = bkt - } - primaryBkt.Unlock() - return &SecondaryCache{ - bucket: bkt, - pCache: c, - } -} - -// Used when the cache was created with the Track() configuration option. -// Avoid otherwise -func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem { - item := c.Get(primary, secondary) - if item == nil { - return NilTracked - } - item.track() - return item -} - -// Set the value in the cache for the specified duration -func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) { - c.set(primary, secondary, value, duration) -} - -// Replace the value if it exists, does not set if it doesn't. -// Returns true if the item existed an was replaced, false otherwise. -// Replace does not reset item's TTL nor does it alter its position in the LRU -func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool { - item := c.bucket(primary).get(primary, secondary) - if item == nil { - return false - } - c.Set(primary, secondary, value, item.TTL()) - return true -} - -// Attempts to get the value from the cache and calles fetch on a miss. -// If fetch returns an error, no value is cached and the error is returned back -// to the caller. -func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := c.Get(primary, secondary) - if item != nil { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return c.set(primary, secondary, value, duration), nil -} - -// Remove the item from the cache, return true if the item was present, false otherwise. -func (c *LayeredCache) Delete(primary, secondary string) bool { - item := c.bucket(primary).delete(primary, secondary) - if item != nil { - c.deletables <- item - return true - } - return false -} - -// Deletes all items that share the same primary key -func (c *LayeredCache) DeleteAll(primary string) bool { - return c.bucket(primary).deleteAll(primary, c.deletables) -} - -//this isn't thread safe. It's meant to be called from non-concurrent tests -func (c *LayeredCache) Clear() { - for _, bucket := range c.buckets { - bucket.clear() - } - c.size = 0 - c.list = list.New() -} - -func (c *LayeredCache) Stop() { - close(c.promotables) - <-c.donec -} - -func (c *LayeredCache) restart() { - c.promotables = make(chan *Item, c.promoteBuffer) - c.donec = make(chan struct{}) - go c.worker() -} - -func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item { - item, existing := c.bucket(primary).set(primary, secondary, value, duration) - if existing != nil { - c.deletables <- existing - } - c.promote(item) - return item -} - -func (c *LayeredCache) bucket(key string) *layeredBucket { - h := fnv.New32a() - h.Write([]byte(key)) - return c.buckets[h.Sum32()&c.bucketMask] -} - -func (c *LayeredCache) promote(item *Item) { - c.promotables <- item -} - -func (c *LayeredCache) worker() { - defer close(c.donec) - for { - select { - case item, ok := <-c.promotables: - if ok == false { - return - } - if c.doPromote(item) && c.size > c.maxSize { - c.gc() - } - case item := <-c.deletables: - if item.element == nil { - item.promotions = -2 - } else { - c.size -= item.size - c.list.Remove(item.element) - } - } - } -} - -func (c *LayeredCache) doPromote(item *Item) bool { - // deleted before it ever got promoted - if item.promotions == -2 { - return false - } - if item.element != nil { //not a new item - if item.shouldPromote(c.getsPerPromote) { - c.list.MoveToFront(item.element) - item.promotions = 0 - } - return false - } - c.size += item.size - item.element = c.list.PushFront(item) - return true -} - -func (c *LayeredCache) gc() { - element := c.list.Back() - for i := 0; i < c.itemsToPrune; i++ { - if element == nil { - return - } - prev := element.Prev() - item := element.Value.(*Item) - if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { - c.bucket(item.group).delete(item.group, item.key) - c.size -= item.size - c.list.Remove(element) - item.promotions = -2 - } - element = prev - } -} diff --git a/vendor/github.com/karlseguin/ccache/license.txt b/vendor/github.com/karlseguin/ccache/license.txt deleted file mode 100644 index aebeebfa520..00000000000 --- a/vendor/github.com/karlseguin/ccache/license.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Karl Seguin. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/karlseguin/ccache/readme.md b/vendor/github.com/karlseguin/ccache/readme.md deleted file mode 100644 index 8a7efa2e2b4..00000000000 --- a/vendor/github.com/karlseguin/ccache/readme.md +++ /dev/null @@ -1,172 +0,0 @@ -# CCache -CCache is an LRU Cache, written in Go, focused on supporting high concurrency. - -Lock contention on the list is reduced by: - -* Introducing a window which limits the frequency that an item can get promoted -* Using a buffered channel to queue promotions for a single worker -* Garbage collecting within the same thread as the worker - -## Setup - -First, download the project: - - go get github.com/karlseguin/ccache - -## Configuration -Next, import and create a `Cache` instance: - - -```go -import ( - "github.com/karlseguin/ccache" -) - -var cache = ccache.New(ccache.Configure()) -``` - -`Configure` exposes a chainable API: - -```go -var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)) -``` - -The most likely configuration options to tweak are: - -* `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) -* `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) -* `ItemsToPrune(int)` - the number of items to prune when we hit `MaxSize`. Freeing up more than 1 slot at a time improved performance (default: 500) - -Configurations that change the internals of the cache, which aren't as likely to need tweaking: - -* `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). -* `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) -* `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) - -## Usage - -Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: - -### Get -```go -item := cache.Get("user:4") -if item == nil { - //handle -} else { - user := item.Value().(*User) -} -``` -The returned `*Item` exposes a number of methods: - -* `Value() interface{}` - the value cached -* `Expired() bool` - whether the item is expired or not -* `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) -* `Expires() time.Time` - the time the item will expire - -By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. - -### Set -`Set` expects the key, value and ttl: - -```go -cache.Set("user:4", user, time.Minute * 10) -``` - -### Fetch -There's also a `Fetch` which mixes a `Get` and a `Set`: - -```go -item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) { - //code to fetch the data incase of a miss - //should return the data to cache and the error, if any -}) -``` - -### Delete -`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key: - -```go -cache.Delete("user:4") -``` - -### Extend -The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. - -### Replace -The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: - -```go -cache.Replace("user:4", user) -``` - -`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. - -### Stop -The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called -the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. - -## Tracking -CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. - -When you configure your cache with `Track()`: - -```go -cache = ccache.New(ccache.Configure().Track()) -``` - -The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: - -```go -item := cache.TrackingGet("user:4") -user := item.Value() //will be nil if "user:4" didn't exist in the cache -item.Release() //can be called even if item.Value() returned nil -``` - -In practive, `Release` wouldn't be called until later, at some other place in your code. - -There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. - -More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. - -## LayeredCache - -CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). - -`LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. - -`LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: - -```go -cache := ccache.Layered(ccache.Configure()) - -cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) -cache.Set("/users/goku", "type:xml", "", time.Minute * 5) - -json := cache.Get("/users/goku", "type:json") -xml := cache.Get("/users/goku", "type:xml") - -cache.Delete("/users/goku", "type:json") -cache.Delete("/users/goku", "type:xml") -// OR -cache.DeleteAll("/users/goku") -``` - -# SecondaryCache - -In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: - -```go -cache := ccache.Layered(ccache.Configure()) -sCache := cache.GetOrCreateSecondaryCache("/users/goku") -sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) -``` - -The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. - -## Size -By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. - -However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. - -## Want Something Simpler? -For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache) diff --git a/vendor/github.com/karlseguin/ccache/secondarycache.go b/vendor/github.com/karlseguin/ccache/secondarycache.go deleted file mode 100644 index f901fde0c55..00000000000 --- a/vendor/github.com/karlseguin/ccache/secondarycache.go +++ /dev/null @@ -1,72 +0,0 @@ -package ccache - -import "time" - -type SecondaryCache struct { - bucket *bucket - pCache *LayeredCache -} - -// Get the secondary key. -// The semantics are the same as for LayeredCache.Get -func (s *SecondaryCache) Get(secondary string) *Item { - return s.bucket.get(secondary) -} - -// Set the secondary key to a value. -// The semantics are the same as for LayeredCache.Set -func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item { - item, existing := s.bucket.set(secondary, value, duration) - if existing != nil { - s.pCache.deletables <- existing - } - s.pCache.promote(item) - return item -} - -// Fetch or set a secondary key. -// The semantics are the same as for LayeredCache.Fetch -func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { - item := s.Get(secondary) - if item != nil { - return item, nil - } - value, err := fetch() - if err != nil { - return nil, err - } - return s.Set(secondary, value, duration), nil -} - -// Delete a secondary key. -// The semantics are the same as for LayeredCache.Delete -func (s *SecondaryCache) Delete(secondary string) bool { - item := s.bucket.delete(secondary) - if item != nil { - s.pCache.deletables <- item - return true - } - return false -} - -// Replace a secondary key. -// The semantics are the same as for LayeredCache.Replace -func (s *SecondaryCache) Replace(secondary string, value interface{}) bool { - item := s.Get(secondary) - if item == nil { - return false - } - s.Set(secondary, value, item.TTL()) - return true -} - -// Track a secondary key. -// The semantics are the same as for LayeredCache.TrackingGet -func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem { - item := c.Get(secondary) - if item == nil { - return NilTracked - } - item.track() - return item -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD b/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD new file mode 100644 index 00000000000..bf4e2ecbebb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "annotations.pb.go", + "http.pb.go", + ], + importpath = "google.golang.org/genproto/googleapis/api/annotations", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 00000000000..53d57f67a53 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +/* +Package annotations is a generated protocol buffer package. + +It is generated from these files: + google/api/annotations.proto + google/api/http.proto + +It has these top-level messages: + Http + HttpRule + CustomHttpPattern +*/ +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 00000000000..f91c604620b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Defines the HTTP configuration for a service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST APIs. The mapping determines what portions of the request +// message are populated from the path, query parameters, or body of +// the HTTP request. The mapping is typically specified as an +// `google.api.http` annotation, see "google/api/annotations.proto" +// for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it assumes there is no HTTP body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. It follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion. +// +// The syntax `**` matches zero or more path segments. It follows the semantics +// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved +// Expansion. NOTE: it must be the last segment in the path except the Verb. +// +// The syntax `LITERAL` matches literal text in the URL path. +// +// The syntax `Variable` matches the entire path as specified by its template; +// this nested template must not contain further variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +// +// Use CustomHttpPattern to specify any HTTP method that is not included in the +// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for +// a given URL path rule. The wild-card rule is useful for services that provide +// content to Web (HTML) clients. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,oneof"` +} +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,oneof"` +} +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,oneof"` +} +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` +} +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` +} +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} +func (*HttpRule_Put) isHttpRule_Pattern() {} +func (*HttpRule_Post) isHttpRule_Pattern() {} +func (*HttpRule_Delete) isHttpRule_Pattern() {} +func (*HttpRule_Patch) isHttpRule_Pattern() {} +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 359 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, + 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29, + 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1, + 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe, + 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8, + 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39, + 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62, + 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18, + 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2, + 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48, + 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24, + 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49, + 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc, + 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84, + 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12, + 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74, + 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4, + 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67, + 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90, + 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64, + 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a, + 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, + 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD index 9b550cb77b9..48803bf4539 100644 --- a/vendor/google.golang.org/grpc/BUILD +++ b/vendor/google.golang.org/grpc/BUILD @@ -57,6 +57,7 @@ filegroup( "//vendor/google.golang.org/grpc/credentials:all-srcs", "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:all-srcs", "//vendor/google.golang.org/grpc/grpclog:all-srcs", + "//vendor/google.golang.org/grpc/health/grpc_health_v1:all-srcs", "//vendor/google.golang.org/grpc/internal:all-srcs", "//vendor/google.golang.org/grpc/keepalive:all-srcs", "//vendor/google.golang.org/grpc/metadata:all-srcs", diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD b/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD new file mode 100644 index 00000000000..9a60f52bef1 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["health.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["health.pb.go"], + importpath = "google.golang.org/grpc/health/grpc_health_v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..89c4d459f0a --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,176 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "health.proto", +} + +func init() { proto.RegisterFile("health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, + 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, + 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, + 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, + 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, + 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, + 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, + 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, + 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, + 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, + 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 00000000000..e2dc0889258 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} From ac7690b0fe4586978d1d1aff51f04ee358aca77b Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 12:24:59 -0800 Subject: [PATCH 640/794] Version bump to grpc v1.7.5 --- Godeps/Godeps.json | 80 +- Godeps/LICENSES | 4230 +++++++++++++++-- vendor/google.golang.org/grpc/.please-update | 0 vendor/google.golang.org/grpc/.travis.yml | 19 +- vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/BUILD | 15 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 58 +- vendor/google.golang.org/grpc/LICENSE | 224 +- vendor/google.golang.org/grpc/Makefile | 13 +- vendor/google.golang.org/grpc/PATENTS | 22 - vendor/google.golang.org/grpc/README.md | 12 +- vendor/google.golang.org/grpc/backoff.go | 18 + vendor/google.golang.org/grpc/balancer.go | 66 +- vendor/google.golang.org/grpc/balancer/BUILD | 28 + .../grpc/balancer/balancer.go | 206 + .../grpc/balancer_conn_wrappers.go | 252 + .../grpc/balancer_v1_wrapper.go | 367 ++ vendor/google.golang.org/grpc/call.go | 133 +- vendor/google.golang.org/grpc/clientconn.go | 988 ++-- vendor/google.golang.org/grpc/codec.go | 40 +- .../grpc/codes/code_string.go | 4 +- vendor/google.golang.org/grpc/codes/codes.go | 37 +- .../google.golang.org/grpc/connectivity/BUILD | 26 + .../grpc/connectivity/connectivity.go | 72 + vendor/google.golang.org/grpc/coverage.sh | 48 - .../grpc/credentials/credentials.go | 47 +- .../grpc/credentials/credentials_util_go17.go | 35 +- .../grpc/credentials/credentials_util_go18.go | 35 +- .../credentials/credentials_util_pre_go17.go | 35 +- vendor/google.golang.org/grpc/doc.go | 20 +- vendor/google.golang.org/grpc/go16.go | 56 - vendor/google.golang.org/grpc/go17.go | 55 - vendor/google.golang.org/grpc/grpclb.go | 241 +- .../grpc/grpclb/grpc_lb_v1/BUILD | 14 +- .../grpc/grpclb/grpc_lb_v1/doc.go | 21 + .../grpc/grpclb/grpc_lb_v1/messages/BUILD | 29 + .../{grpclb.pb.go => messages/messages.pb.go} | 118 +- .../{grpclb.proto => messages/messages.proto} | 50 +- vendor/google.golang.org/grpc/grpclog/BUILD | 6 +- .../google.golang.org/grpc/grpclog/grpclog.go | 123 + .../google.golang.org/grpc/grpclog/logger.go | 104 +- .../grpc/grpclog/loggerv2.go | 195 + .../grpc/health/grpc_health_v1/health.pb.go | 54 +- .../grpc/health/grpc_health_v1/health.proto | 14 + vendor/google.golang.org/grpc/interceptor.go | 41 +- .../grpc/internal/internal.go | 35 +- .../grpc/keepalive/keepalive.go | 39 +- .../grpc/metadata/metadata.go | 87 +- vendor/google.golang.org/grpc/naming/BUILD | 11 +- .../grpc/naming/dns_resolver.go | 290 ++ vendor/google.golang.org/grpc/naming/go17.go | 34 + vendor/google.golang.org/grpc/naming/go18.go | 28 + .../google.golang.org/grpc/naming/naming.go | 35 +- vendor/google.golang.org/grpc/peer/peer.go | 38 +- .../google.golang.org/grpc/picker_wrapper.go | 141 + vendor/google.golang.org/grpc/pickfirst.go | 95 + vendor/google.golang.org/grpc/proxy.go | 38 +- vendor/google.golang.org/grpc/resolver/BUILD | 22 + .../grpc/resolver/resolver.go | 143 + .../grpc/resolver_conn_wrapper.go | 139 + vendor/google.golang.org/grpc/rpc_util.go | 355 +- vendor/google.golang.org/grpc/server.go | 560 ++- .../google.golang.org/grpc/stats/handlers.go | 42 +- vendor/google.golang.org/grpc/stats/stats.go | 147 +- vendor/google.golang.org/grpc/status/BUILD | 1 + .../google.golang.org/grpc/status/status.go | 73 +- vendor/google.golang.org/grpc/stream.go | 254 +- vendor/google.golang.org/grpc/tap/tap.go | 55 +- vendor/google.golang.org/grpc/trace.go | 50 +- vendor/google.golang.org/grpc/transport/BUILD | 4 +- .../grpc/transport/bdp_estimator.go | 143 + .../grpc/transport/control.go | 174 +- .../google.golang.org/grpc/transport/go16.go | 46 - .../google.golang.org/grpc/transport/go17.go | 46 - .../grpc/transport/handler_server.go | 107 +- .../grpc/transport/http2_client.go | 948 ++-- .../grpc/transport/http2_server.go | 900 ++-- .../grpc/transport/http_util.go | 290 +- .../google.golang.org/grpc/transport/log.go | 50 + .../grpc/transport/transport.go | 357 +- vendor/google.golang.org/grpc/vet.sh | 78 + 81 files changed, 10244 insertions(+), 3863 deletions(-) create mode 100644 vendor/google.golang.org/grpc/.please-update create mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/PATENTS create mode 100644 vendor/google.golang.org/grpc/balancer/BUILD create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/connectivity/BUILD create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100755 vendor/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/google.golang.org/grpc/go16.go delete mode 100644 vendor/google.golang.org/grpc/go17.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.pb.go => messages/messages.pb.go} (79%) rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.proto => messages/messages.proto} (71%) create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go create mode 100644 vendor/google.golang.org/grpc/naming/go18.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/resolver/BUILD create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/log.go create mode 100755 vendor/google.golang.org/grpc/vet.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 989543a931a..675d49d51b5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3018,78 +3018,98 @@ }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/gcfg.v1", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index ab65bb6703b..8021a07e90e 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -90010,540 +90010,3990 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ = vendor/google.golang.org/grpc licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/balancer licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/codes licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/connectivity licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/credentials licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclb/grpc_lb_v1 licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/grpclog licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/health/grpc_health_v1 licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/internal licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/keepalive licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/metadata licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/naming licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/peer licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + +================================================================================ += vendor/google.golang.org/grpc/resolver licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/stats licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/status licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/tap licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ ================================================================================ = vendor/google.golang.org/grpc/transport licensed under: = -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. -= vendor/google.golang.org/grpc/LICENSE a4bad33881612090c6035d8393175996 + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/google.golang.org/grpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 ================================================================================ diff --git a/vendor/google.golang.org/grpc/.please-update b/vendor/google.golang.org/grpc/.please-update new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index b3577c7ae20..22bf25004a3 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,19 +1,20 @@ language: go go: - - 1.6.3 - - 1.7 - - 1.8 + - 1.7.x + - 1.8.x + - 1.9.x + +matrix: + include: + - go: 1.9.x + env: ARCH=386 go_import_path: google.golang.org/grpc before_install: - - go get github.com/golang/lint/golint - - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi script: - - '! gofmt -s -d -l . 2>&1 | read' - - '! goimports -l . | read' - - '! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"' - - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi - make test testrace diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/BUILD b/vendor/google.golang.org/grpc/BUILD index 48803bf4539..b266ac7de56 100644 --- a/vendor/google.golang.org/grpc/BUILD +++ b/vendor/google.golang.org/grpc/BUILD @@ -5,15 +5,18 @@ go_library( srcs = [ "backoff.go", "balancer.go", + "balancer_conn_wrappers.go", + "balancer_v1_wrapper.go", "call.go", "clientconn.go", "codec.go", "doc.go", - "go16.go", - "go17.go", "grpclb.go", "interceptor.go", + "picker_wrapper.go", + "pickfirst.go", "proxy.go", + "resolver_conn_wrapper.go", "rpc_util.go", "server.go", "stream.go", @@ -26,15 +29,18 @@ go_library( "//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/golang.org/x/net/trace:go_default_library", + "//vendor/google.golang.org/grpc/balancer:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:go_default_library", + "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/internal:go_default_library", "//vendor/google.golang.org/grpc/keepalive:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", "//vendor/google.golang.org/grpc/naming:go_default_library", "//vendor/google.golang.org/grpc/peer:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", "//vendor/google.golang.org/grpc/stats:go_default_library", "//vendor/google.golang.org/grpc/status:go_default_library", "//vendor/google.golang.org/grpc/tap:go_default_library", @@ -53,7 +59,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/google.golang.org/grpc/balancer:all-srcs", "//vendor/google.golang.org/grpc/codes:all-srcs", + "//vendor/google.golang.org/grpc/connectivity:all-srcs", "//vendor/google.golang.org/grpc/credentials:all-srcs", "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:all-srcs", "//vendor/google.golang.org/grpc/grpclog:all-srcs", @@ -63,6 +71,7 @@ filegroup( "//vendor/google.golang.org/grpc/metadata:all-srcs", "//vendor/google.golang.org/grpc/naming:all-srcs", "//vendor/google.golang.org/grpc/peer:all-srcs", + "//vendor/google.golang.org/grpc/resolver:all-srcs", "//vendor/google.golang.org/grpc/stats:all-srcs", "//vendor/google.golang.org/grpc/status:all-srcs", "//vendor/google.golang.org/grpc/tap:all-srcs", diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 36cd6f7581b..a5c6e06e255 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,46 +1,32 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. +We definitely welcome your patches and contributions to gRPC! -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -## Filing Issues -When filing an issue, make sure to answer these five questions: +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE index f4988b45079..d6456956733 100644 --- a/vendor/google.golang.org/grpc/LICENSE +++ b/vendor/google.golang.org/grpc/LICENSE @@ -1,28 +1,202 @@ -Copyright 2014, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 03bb01f0b35..39606b564a6 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -20,24 +20,17 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done + go generate google.golang.org/grpc/... test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... + go test -cpu 1,4 google.golang.org/grpc/... testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... + go test -race -cpu 1,4 google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... -coverage: testdeps - ./coverage.sh --coveralls - .PHONY: \ all \ deps \ diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 69b47959fab..00000000000 --- a/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the gRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of gRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of gRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of gRPC or any code incorporated within this -implementation of gRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of gRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ae0236f92f3..622a5dc3e85 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ @@ -10,13 +10,13 @@ Installation To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` -$ go get google.golang.org/grpc +$ go get -u google.golang.org/grpc ``` Prerequisites ------------- -This requires Go 1.6 or later. +This requires Go 1.7 or later. Constraints ----------- @@ -26,9 +26,13 @@ Documentation ------------- See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + Status ------ -GA +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). FAQ --- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index c99024ee302..090fbe87c52 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package grpc import ( diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 9d943fbadae..ab65049ddc1 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -35,6 +20,7 @@ package grpc import ( "fmt" + "net" "sync" "golang.org/x/net/context" @@ -60,6 +46,10 @@ type BalancerConfig struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. @@ -167,7 +157,7 @@ type roundRobin struct { func (rr *roundRobin) watchAddrUpdates() error { updates, err := rr.w.Next() if err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) return err } rr.mu.Lock() @@ -183,7 +173,7 @@ func (rr *roundRobin) watchAddrUpdates() error { for _, v := range rr.addrs { if addr == v.addr { exist = true - grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) break } } @@ -200,7 +190,7 @@ func (rr *roundRobin) watchAddrUpdates() error { } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorln("Unknown update.Op ", update.Op) } } // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. @@ -211,6 +201,10 @@ func (rr *roundRobin) watchAddrUpdates() error { if rr.done { return ErrClientConnClosing } + select { + case <-rr.addrCh: + default: + } rr.addrCh <- open return nil } @@ -233,7 +227,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error { return err } rr.w = w - rr.addrCh = make(chan []Address) + rr.addrCh = make(chan []Address, 1) go func() { for { if err := rr.watchAddrUpdates(); err != nil { @@ -385,6 +379,9 @@ func (rr *roundRobin) Notify() <-chan []Address { func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } rr.done = true if rr.w != nil { rr.w.Close() @@ -398,3 +395,14 @@ func (rr *roundRobin) Close() error { } return nil } + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/BUILD b/vendor/google.golang.org/grpc/balancer/BUILD new file mode 100644 index 00000000000..e422cbb250b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["balancer.go"], + importpath = "google.golang.org/grpc/balancer", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000000..84e10b630e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + // defaultBuilder is the default balancer to use. + defaultBuilder Builder // TODO(bar) install pickfirst as default. +) + +// Register registers the balancer builder to the balancer map. +// b.Name will be used as the name registered with this builder. +func Register(b Builder) { + m[b.Name()] = b +} + +// Get returns the resolver builder registered with the given name. +// If no builder is register with the name, the default pickfirst will +// be used. +func Get(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return defaultBuilder +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot everytime its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // this call until a new picker is updated and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000000..f5dbc4ba201 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + grpclog.Infof("ccBalancerWrapper: removing subconn") + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) + acbw.mu.Lock() + defer acbw.mu.Unlock() + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect(false) + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect(false) +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 00000000000..9d0616080a1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,367 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(cc.Target(), BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.cc.Target(), + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.cc.Target()}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + oldSC.UpdateAddresses(newAddrs) + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } + return +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. + return +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() + return +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index af34a71316f..1ef2507c35f 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,6 +25,7 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -73,7 +59,10 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } } for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { break } @@ -86,14 +75,11 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) } c.trailerMD = stream.Trailer() - if peer, ok := peer.FromContext(stream.Context()); ok { - c.peer = peer - } return nil } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. @@ -114,11 +100,17 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, Client: true, } } - outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err } - err = t.Write(stream, outBuf, opts) + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) @@ -144,25 +136,33 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - defer cancel() - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { - o.after(&c) + o.after(c) } }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() @@ -179,27 +179,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) - } - }() + }() + } topts := &transport.Options{ Last: true, Delay: false, @@ -209,9 +207,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the + // Record the done handler from Balancer.Get(...). It is called once the // RPC has completed or failed. - put func() + done func(balancer.DoneInfo) ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ @@ -221,11 +219,11 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + if c.creds != nil { + callHdr.Creds = c.creds } - t, put, err = cc.getTransport(ctx, gopts) + + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -245,28 +243,31 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } stream, err = t.NewStream(ctx, callHdr) if err != nil { - if put != nil { + if done != nil { if _, ok := err.(transport.ConnectionError); ok { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) } - err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, stream, t, args, topts) + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } // Retry a non-failfast RPC when // i) there is a connection error; or @@ -276,14 +277,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } - err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) + err = recvResponse(ctx, cc.dopts, t, c, stream, reply) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -294,12 +295,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } return stream.Status().Err() } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f542d8bd041..71de2e50d2b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,14 +23,19 @@ import ( "fmt" "math" "net" + "reflect" + "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -56,8 +46,7 @@ var ( ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be - // removed in Q1 2017. + // DEPRECATED: Please use context.DeadlineExceeded instead. ErrClientConnTimeout = errors.New("grpc: timed out when dialing") // errNoTransportSecurity indicates that there is no transport security @@ -79,6 +68,8 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errConnUnavailable indicates that the connection is unavailable. errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -86,30 +77,71 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions - maxMsgSize int + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is to support v1 balancer. + balancerBuilder balancer.Builder } -const defaultClientMaxMsgSize = math.MaxInt32 +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) // DialOption configures how we set up the connection. type DialOption func(*dialOptions) -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. -func WithMaxMsgSize(s int) DialOption { +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { return func(o *dialOptions) { - o.maxMsgSize = s + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) } } @@ -136,10 +168,23 @@ func WithDecompressor(dc Decompressor) DialOption { } } -// WithBalancer returns a DialOption which sets a load balancer. +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// Deprecated: use the new balancer APIs in balancer package instead. func WithBalancer(b Balancer) DialOption { return func(o *dialOptions) { - o.balancer = b + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b } } @@ -204,7 +249,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption } // WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. +// credentials and places auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) @@ -213,6 +258,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // initially. This is valid if and only if WithBlock() is present. +// Deprecated: use DialContext and context.WithTimeout instead. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { o.timeout = d @@ -241,7 +287,7 @@ func WithStatsHandler(h stats.Handler) DialOption { } } -// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // address and won't try to reconnect. // The default value of FailOnNonTempDialError is false. @@ -259,7 +305,7 @@ func WithUserAgent(s string) DialOption { } } -// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { return func(o *dialOptions) { o.copts.KeepaliveParams = kp @@ -295,26 +341,44 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { } // DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the +// cancel or expire the pending connection. Once this function returns, the // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - conns: make(map[Address]*addrConn), + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), } cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.dopts.maxMsgSize = defaultClientMaxMsgSize + for _, opt := range opts { opt(&cc.dopts) } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) }, ) } @@ -343,15 +407,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() + scSet := false if cc.dopts.scChan != nil { - // Wait for the initial service config. + // Try to get an initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = sc + scSet = true } - case <-ctx.Done(): - return nil, ctx.Err() + default: } } // Set defaults. @@ -369,89 +434,130 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else { cc.authority = target } - waitC := make(chan error, 1) - go func() { - defer close(waitC) - if cc.dopts.balancer == nil && cc.sc.LB != nil { - cc.dopts.balancer = cc.sc.LB + + if cc.dopts.balancerBuilder != nil { + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() } - if cc.dopts.balancer != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - config := BalancerConfig{ - DialCreds: credsClone, - } - if err := cc.dopts.balancer.Start(target, config); err != nil { + buildOpts := balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + // Build should not take long time. So it's ok to not have a goroutine for it. + // TODO(bar) init balancer after first resolver result to support service config balancer. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) + } else { + waitC := make(chan error, 1) + go func() { + defer close(waitC) + // No balancer, or no resolver within the balancer. Connect directly. + ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) + if err != nil { waitC <- err return } - ch := cc.dopts.balancer.Notify() - if ch != nil { - if cc.dopts.block { - doneChan := make(chan struct{}) - go cc.lbWatcher(doneChan) - <-doneChan - } else { - go cc.lbWatcher(nil) - } + if err := ac.connect(cc.dopts.block); err != nil { + waitC <- err return } - } - // No balancer, or no resolver within the balancer. Connect directly. - if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + } + } + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() } } - if cc.dopts.scChan != nil { go cc.scWatcher() } + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + if cc.balancerWrapper != nil && cc.resolverWrapper == nil { + // TODO(bar) there should always be a resolver (DNS as the default). + // Unblock balancer initialization with a fake resolver update if there's no resolver. + // The balancer wrapper will not read the addresses, so an empty list works. + // TODO(bar) remove this after the real resolver is started. + cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) + } + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + return cc, nil } -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan } // ClientConn represents a client connection to an RPC server. @@ -462,58 +568,40 @@ type ClientConn struct { target string authority string dopts dialOptions + csMgr *connectivityStateManager + + balancerWrapper *ccBalancerWrapper + resolverWrapper *ccResolverWrapper + + blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig - conns map[Address]*addrConn - // Keepalive parameter can be udated if a GoAway is received. + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters } -// lbWatcher watches the Notify channel of the balancer in cc and manages -// connections accordingly. If doneChan is not nil, it is closed after the -// first successfull connection is made. -func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { - for addrs := range cc.dopts.balancer.Notify() { - var ( - add []Address // Addresses need to setup connections. - del []*addrConn // Connections need to tear down. - ) - cc.mu.Lock() - for _, a := range addrs { - if _, ok := cc.conns[a]; !ok { - add = append(add, a) - } - } - for k, c := range cc.conns { - var keep bool - for _, a := range addrs { - if k == a { - keep = true - break - } - } - if !keep { - del = append(del, c) - delete(cc.conns, c.addr) - } - } - cc.mu.Unlock() - for _, a := range add { - if doneChan != nil { - err := cc.resetAddrConn(a, true, nil) - if err == nil { - close(doneChan) - doneChan = nil - } - } else { - cc.resetAddrConn(a, false, nil) - } - } - for _, c := range del { - c.tearDown(errConnDrain) - } +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() } func (cc *ClientConn) scWatcher() { @@ -534,69 +622,64 @@ func (cc *ClientConn) scWatcher() { } } -// resetAddrConn creates an addrConn for addr and adds it to cc.conns. -// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. -// If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ cc: cc, - addr: addr, + addrs: addrs, dopts: cc.dopts, } - cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = cc.mkp - cc.mu.RUnlock() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.stateCV = sync.NewCond(&ac.mu) - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing + return nil, ErrClientConnClosing } - stale := cc.conns[ac.addr] - cc.conns[ac.addr] = ac + cc.conns[ac] = struct{}{} cc.mu.Unlock() - if stale != nil { - // There is an addrConn alive on ac.addr already. This could be due to - // 1) a buggy Balancer notifies duplicated Addresses; - // 2) goaway was received, a new ac will replace the old ac. - // The old ac should be deleted from cc.conns, but the - // underlying transport should drain rather than close. - if tearDownErr == nil { - // tearDownErr is nil if resetAddrConn is called by - // 1) Dial - // 2) lbWatcher - // In both cases, the stale ac should drain, not close. - stale.tearDown(errConnDrain) - } else { - stale.tearDown(tearDownErr) - } + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect(block bool) error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.mu.Unlock() + if block { - if err := ac.resetTransport(false); err != nil { + if err := ac.resetTransport(); err != nil { if err != errConnClosing { - // Tear down ac and delete it from cc.conns. - cc.mu.Lock() - delete(cc.conns, ac.addr) - cc.mu.Unlock() ac.tearDown(err) } if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { @@ -609,8 +692,8 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) if err != errConnClosing { // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) @@ -623,66 +706,86 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) return nil } -// TODO: Avoid the locking here. -func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { - cc.mu.RLock() - defer cc.mu.RUnlock() - m, ok = cc.sc.Methods[method] - return +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound } -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { - var ( - ac *addrConn - ok bool - put func() - ) - if cc.dopts.balancer == nil { +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + if cc.balancerWrapper == nil { // If balancer is nil, there should be only one addrConn available. cc.mu.RLock() if cc.conns == nil { cc.mu.RUnlock() + // TODO this function returns toRPCErr and non-toRPCErr. Clean up + // the errors in ClientConn. return nil, nil, toRPCErr(ErrClientConnClosing) } - for _, ac = range cc.conns { + var ac *addrConn + for ac = range cc.conns { // Break after the first iteration to get the first addrConn. - ok = true break } cc.mu.RUnlock() - } else { - var ( - addr Address - err error - ) - addr, put, err = cc.dopts.balancer.Get(ctx, opts) + if ac == nil { + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) if err != nil { - return nil, nil, toRPCErr(err) + return nil, nil, err } - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - ac, ok = cc.conns[addr] - cc.mu.RUnlock() + return t, nil, nil } - if !ok { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() - } - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) + + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() - } - return nil, nil, err + return nil, nil, toRPCErr(err) } - return t, put, nil + return t, done, nil } // Close tears down the ClientConn and all underlying connections. @@ -696,11 +799,16 @@ func (cc *ClientConn) Close() error { } conns := cc.conns cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) cc.mu.Unlock() - if cc.dopts.balancer != nil { - cc.dopts.balancer.Close() + cc.blockingpicker.close() + if cc.resolverWrapper != nil { + cc.resolverWrapper.close() } - for _, ac := range conns { + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + for ac := range conns { ac.tearDown(ErrClientConnClosing) } return nil @@ -711,15 +819,15 @@ type addrConn struct { ctx context.Context cancel context.CancelFunc - cc *ClientConn - addr Address - dopts dialOptions - events trace.EventLog + cc *ClientConn + curAddr resolver.Address + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - down func(error) // the handler called when a connection is down. + mu sync.Mutex + state connectivity.State // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} @@ -759,125 +867,137 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { } } -// getState returns the connectivity state of the Conn -func (ac *addrConn) getState() ConnectivityState { +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -// waitForStateChange blocks until the state changes to something other than the sourceState. -func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - ac.mu.Lock() - defer ac.mu.Unlock() - if sourceState != ac.state { - return ac.state, nil + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - ac.mu.Lock() - err = ctx.Err() - ac.stateCV.Broadcast() - ac.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == ac.state { - ac.stateCV.Wait() - if err != nil { - return ac.state, err - } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - return ac.state, nil -} - -func (ac *addrConn) resetTransport(closeTransport bool) error { + ac.transport = nil + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() for retries := 0; ; retries++ { + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout ac.mu.Lock() - ac.printf("connecting") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. + if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { + timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + } + connectTime := time.Now() + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } - if ac.down != nil { - ac.down(downErrorf(false, true, "%v", errNetworkIO)) - ac.down = nil - } - ac.state = Connecting - ac.stateCV.Broadcast() - t := ac.transport - ac.mu.Unlock() - if closeTransport && t != nil { - t.Close() - } - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - if timeout < sleepTime { - timeout = sleepTime - } - ctx, cancel := context.WithTimeout(ac.ctx, timeout) - connectTime := time.Now() - sinfo := transport.TargetInfo{ - Addr: ac.addr.Addr, - Metadata: ac.addr.Metadata, - } - newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) - // Don't call cancel in success path due to a race in Go 1.6: - // https://github.com/golang/go/issues/15078. - if err != nil { - cancel() - - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return err + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + // TODO(bar) remove condition once we always have a balancer. + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + for _, addr := range addrsIter { ac.mu.Lock() - if ac.state == Shutdown { + if ac.state == connectivity.Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() return errConnClosing } - ac.errorf("transient failure: %v", err) - ac.state = TransientFailure - ac.stateCV.Broadcast() + ac.mu.Unlock() + sinfo := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + } + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) + if err != nil { + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + ac.mu.Lock() + if ac.state != connectivity.Shutdown { + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + } + ac.mu.Unlock() + return err + } + grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + continue + } + ac.mu.Lock() + ac.printf("ready") + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + newTransport.Close() + return errConnClosing + } + ac.state = connectivity.Ready + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + t := ac.transport + ac.transport = newTransport + if t != nil { + t.Close() + } + ac.curAddr = addr if ac.ready != nil { close(ac.ready) ac.ready = nil } ac.mu.Unlock() - closeTransport = false - select { - case <-time.After(sleepTime - time.Since(connectTime)): - case <-ac.ctx.Done(): - return ac.ctx.Err() - } - continue + return nil } ac.mu.Lock() - ac.printf("ready") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) } - ac.state = Ready - ac.stateCV.Broadcast() - ac.transport = newTransport if ac.ready != nil { close(ac.ready) ac.ready = nil } - if ac.cc.dopts.balancer != nil { - ac.down = ac.cc.dopts.balancer.Up(ac.addr) - } ac.mu.Unlock() - return nil + timer := time.NewTimer(sleepTime - time.Since(connectTime)) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return ac.ctx.Err() + } + timer.Stop() } } @@ -888,73 +1008,54 @@ func (ac *addrConn) transportMonitor() { ac.mu.Lock() t := ac.transport ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return case <-t.GoAway(): ac.adjustParams(t.GetGoAwayReason()) - // If GoAway happens without any network I/O error, ac is closed without shutting down the - // underlying transport (the transport will be closed when all the pending RPCs finished or - // failed.). - // If GoAway and some network I/O error happen concurrently, ac and its underlying transport - // are closed. - // In both cases, a new ac is created. - select { - case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - default: - ac.cc.resetAddrConn(ac.addr, false, errConnDrain) + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) } return - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - return - default: - } - ac.mu.Lock() - if ac.state == Shutdown { - // ac has been shutdown. - ac.mu.Unlock() - return - } - ac.state = TransientFailure - ac.stateCV.Broadcast() - ac.mu.Unlock() - if err := ac.resetTransport(true); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } } } } // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in TransientFailure and there is a balancer/failfast is true. +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { for { ac.mu.Lock() switch { - case ac.state == Shutdown: + case ac.state == connectivity.Shutdown: if failfast || !hasBalancer { // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. err := ac.tearDownErr @@ -963,11 +1064,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } ac.mu.Unlock() return nil, errConnClosing - case ac.state == Ready: + case ac.state == connectivity.Ready: ct := ac.transport ac.mu.Unlock() return ct, nil - case ac.state == TransientFailure: + case ac.state == connectivity.TransientFailure: if failfast || hasBalancer { ac.mu.Unlock() return nil, errConnUnavailable @@ -988,6 +1089,28 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } } +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect(false) + } + return nil, false +} + // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a @@ -995,13 +1118,9 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.cancel() - ac.mu.Lock() + ac.curAddr = resolver.Address{} defer ac.mu.Unlock() - if ac.down != nil { - ac.down(downErrorf(false, false, "%v", err)) - ac.down = nil - } if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1009,12 +1128,16 @@ func (ac *addrConn) tearDown(err error) { // address removal and GoAway. ac.transport.GracefulClose() } - if ac.state == Shutdown { + if ac.state == connectivity.Shutdown { return } - ac.state = Shutdown + ac.state = connectivity.Shutdown ac.tearDownErr = err - ac.stateCV.Broadcast() + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1023,8 +1146,11 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } return } + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index bd76ebb7f17..905b048e2ac 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -1,35 +1,20 @@ /* -* - * Copyright 2014, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2014 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 * -*/ + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package grpc @@ -96,6 +81,7 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) { func (p protoCodec) Unmarshal(data []byte, v interface{}) error { cb := protoBufferPool.Get().(*cachedProtoBuffer) cb.SetBuf(data) + v.(proto.Message).Reset() err := cb.Unmarshal(v.(proto.Message)) cb.SetBuf(nil) protoBufferPool.Put(cb) diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index e6762d08455..259837060ab 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=Code; DO NOT EDIT +// Code generated by "stringer -type=Code"; DO NOT EDIT. package codes @@ -9,7 +9,7 @@ const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlre var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { + if i >= Code(len(_Code_index)-1) { return fmt.Sprintf("Code(%d)", i) } return _Code_name[_Code_index[i]:_Code_index[i+1]] diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 37c5b860bd6..81fe7bf85b3 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -44,7 +29,7 @@ const ( // OK is returned on success. OK Code = 0 - // Canceled indicates the operation was cancelled (typically by the caller). + // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is diff --git a/vendor/google.golang.org/grpc/connectivity/BUILD b/vendor/google.golang.org/grpc/connectivity/BUILD new file mode 100644 index 00000000000..d5555d4a28f --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["connectivity.go"], + importpath = "google.golang.org/grpc/connectivity", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000000..568ef5dc68b --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index b85f9181dee..00000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index d1217344b67..0ce766a4dcf 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -106,10 +91,14 @@ type TransportCredentials interface { // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo @@ -196,14 +185,14 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs a TLS from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { @@ -218,12 +207,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } -// NewServerTLSFromCert constructs a TLS from the input certificate for server. +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } -// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go index 7597b09e358..60409aac0fb 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -3,34 +3,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go index 0ecf342da84..93f0e1d8de2 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -2,34 +2,19 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go index cfd40dfa34a..d6bbcc9fdd9 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -2,34 +2,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index b4c0e740e9c..e153b2c390c 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + /* Package grpc implements an RPC system called gRPC. -See www.grpc.io for more information about gRPC. +See grpc.io for more information about gRPC. */ package grpc diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go deleted file mode 100644 index b61c57e88de..00000000000 --- a/vendor/google.golang.org/grpc/go16.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "fmt" - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req.Cancel = ctx.Done() - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go deleted file mode 100644 index 844f0e1899b..00000000000 --- a/vendor/google.golang.org/grpc/go17.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return err - } - return nil -} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go index 524e429df3e..db56ff36217 100644 --- a/vendor/google.golang.org/grpc/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,7 +28,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/naming" @@ -74,41 +59,21 @@ type balanceLoadClientStream struct { ClientStream } -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { +func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) +func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { + m := new(lbmpb.LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolution for grpclb should provide. The -// name resolver used by grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - // NewGRPCLBBalancer creates a grpclb load balancer. func NewGRPCLBBalancer(r naming.Resolver) Balancer { - return &balancer{ + return &grpclbBalancer{ r: r, } } @@ -131,27 +96,27 @@ type grpclbAddrInfo struct { dropForLoadBalancing bool } -type balancer struct { - r naming.Resolver - target string - mu sync.Mutex - seq int // a sequence number to make sure addrCh does not get stale addresses. - w naming.Watcher - addrCh chan []Address - rbs []remoteBalancerInfo - addrs []*grpclbAddrInfo - next int - waitCh chan struct{} - done bool - expTimer *time.Timer - rand *rand.Rand +type grpclbBalancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + rand *rand.Rand - clientStats lbpb.ClientStats + clientStats lbmpb.ClientStats } -func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { +func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { updates, err := w.Next() if err != nil { + grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) return err } b.mu.Lock() @@ -173,24 +138,24 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn if exist { continue } - md, ok := update.Metadata.(*AddrMetadataGRPCLB) + md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) if !ok { // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) + grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) continue } switch md.AddrType { - case Backend: + case naming.Backend: // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution does not give grpclb addresses") + grpclog.Errorf("The name resolution does not give grpclb addresses") continue - case GRPCLB: + case naming.GRPCLB: b.rbs = append(b.rbs, remoteBalancerInfo{ addr: update.Addr, name: md.ServerName, }) default: - grpclog.Printf("Received unknow address type %d", md.AddrType) + grpclog.Errorf("Received unknow address type %d", md.AddrType) continue } case naming.Delete: @@ -202,7 +167,7 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorf("Unknown update.Op %v", update.Op) } } // TODO: Fall back to the basic round-robin load balancing if the resulting address is @@ -215,42 +180,33 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn return nil } -func (b *balancer) serverListExpire(seq int) { - b.mu.Lock() - defer b.mu.Unlock() - // TODO: gRPC interanls do not clear the connections when the server list is stale. - // This means RPCs will keep using the existing server list until b receives new - // server list even though the list is expired. Revisit this behavior later. - if b.done || seq < b.seq { - return - } - b.next = 0 - b.addrs = nil - // Ask grpc internals to close all the corresponding connections. - b.addrCh <- nil -} - -func convertDuration(d *lbpb.Duration) time.Duration { +func convertDuration(d *lbmpb.Duration) time.Duration { if d == nil { return 0 } return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond } -func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { +func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { if l == nil { return } servers := l.GetServers() - expiration := convertDuration(l.GetExpirationInterval()) var ( sl []*grpclbAddrInfo addrs []Address ) for _, s := range servers { md := metadata.Pairs("lb-token", s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } addr := Address{ - Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } sl = append(sl, &grpclbAddrInfo{ @@ -270,20 +226,11 @@ func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { b.next = 0 b.addrs = sl b.addrCh <- addrs - if b.expTimer != nil { - b.expTimer.Stop() - b.expTimer = nil - } - if expiration > 0 { - b.expTimer = time.AfterFunc(expiration, func() { - b.serverListExpire(seq) - }) - } } return } -func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { +func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -294,29 +241,30 @@ func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Dura } b.mu.Lock() stats := b.clientStats - b.clientStats = lbpb.ClientStats{} // Clear the stats. + b.clientStats = lbmpb.ClientStats{} // Clear the stats. b.mu.Unlock() t := time.Now() - stats.Timestamp = &lbpb.Timestamp{ + stats.Timestamp = &lbmpb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + if err := s.Send(&lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ ClientStats: &stats, }, }); err != nil { + grpclog.Errorf("grpclb: failed to send load report: %v", err) return } } } -func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { +func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx) if err != nil { - grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) + grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() @@ -325,37 +273,39 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return } b.mu.Unlock() - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ + initReq := &lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbmpb.InitialLoadBalanceRequest{ Name: b.target, }, }, } if err := stream.Send(initReq); err != nil { + grpclog.Errorf("grpclb: failed to send init request: %v", err) // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv init response: %v", err) // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { - grpclog.Println("Failed to receive the initial response from the remote balancer.") + grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation - grpclog.Println("TODO: Delegation is not supported yet.") + grpclog.Errorf("TODO: Delegation is not supported yet.") return } streamDone := make(chan struct{}) defer close(streamDone) b.mu.Lock() - b.clientStats = lbpb.ClientStats{} // Clear client stats. + b.clientStats = lbmpb.ClientStats{} // Clear client stats. b.mu.Unlock() if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { go b.sendLoadReport(stream, d, streamDone) @@ -364,6 +314,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b for { reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv server list: %v", err) break } b.mu.Lock() @@ -381,7 +332,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return true } -func (b *balancer) Start(target string, config BalancerConfig) error { +func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { b.rand = rand.New(rand.NewSource(time.Now().Unix())) // TODO: Fall back to the basic direct connection if there is no name resolver. if b.r == nil { @@ -397,6 +348,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { w, err := b.r.Resolve(target) if err != nil { b.mu.Unlock() + grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) return err } b.w = w @@ -406,7 +358,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { go func() { for { if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) close(balancerAddrsCh) return } @@ -490,22 +442,32 @@ func (b *balancer) Start(target string, config BalancerConfig) error { cc.Close() } // Talk to the remote load balancer to get the server list. - var err error - creds := config.DialCreds - ccError = make(chan struct{}) - if creds == nil { - cc, err = Dial(rb.addr, WithInsecure()) - } else { + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { if rb.name != "" { if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Printf("Failed to override the server name in the credentials: %v", err) + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) continue } } - cc, err = Dial(rb.addr, WithTransportCredentials(creds)) + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) + } + dopts = append(dopts, WithBlock()) + ccError = make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cc, err = DialContext(ctx, rb.addr, dopts...) + cancel() if err != nil { - grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) close(ccError) continue } @@ -529,7 +491,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { return nil } -func (b *balancer) down(addr Address, err error) { +func (b *grpclbBalancer) down(addr Address, err error) { b.mu.Lock() defer b.mu.Unlock() for _, a := range b.addrs { @@ -540,7 +502,7 @@ func (b *balancer) down(addr Address, err error) { } } -func (b *balancer) Up(addr Address) func(error) { +func (b *grpclbBalancer) Up(addr Address) func(error) { b.mu.Lock() defer b.mu.Unlock() if b.done { @@ -568,7 +530,7 @@ func (b *balancer) Up(addr Address) func(error) { } } -func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { +func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} b.mu.Lock() if b.done { @@ -638,17 +600,10 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } if !opts.BlockingWait { - if len(b.addrs) == 0 { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on b.addrs for a failfast RPC. - addr = b.addrs[b.next].addr - b.next++ + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") return } // Wait on b.waitCh for non-failfast RPCs. @@ -725,17 +680,17 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } -func (b *balancer) Notify() <-chan []Address { +func (b *grpclbBalancer) Notify() <-chan []Address { return b.addrCh } -func (b *balancer) Close() error { +func (b *grpclbBalancer) Close() error { b.mu.Lock() defer b.mu.Unlock() - b.done = true - if b.expTimer != nil { - b.expTimer.Stop() + if b.done { + return errBalancerClosed } + b.done = true if b.waitCh != nil { close(b.waitCh) } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD index 79fd880777f..35716e585b4 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD @@ -1,17 +1,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -filegroup( - name = "go_default_library_protos", - srcs = ["grpclb.proto"], - visibility = ["//visibility:public"], -) - go_library( name = "go_default_library", - srcs = ["grpclb.pb.go"], + srcs = ["doc.go"], importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], ) filegroup( @@ -23,7 +16,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go new file mode 100644 index 00000000000..aba962840c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go @@ -0,0 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer +// message and service protobuf definitions. +package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD new file mode 100644 index 00000000000..06ab31fa949 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["messages.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["messages.pb.go"], + importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go similarity index 79% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go index f63941bd803..f4a27125a4f 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: grpclb.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto /* -Package grpc_lb_v1 is a generated protocol buffer package. +Package messages is a generated protocol buffer package. It is generated from these files: - grpclb.proto + grpc_lb_v1/messages/messages.proto It has these top-level messages: Duration @@ -19,7 +18,7 @@ It has these top-level messages: ServerList Server */ -package grpc_lb_v1 +package messages import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -473,11 +472,6 @@ type ServerList struct { // across more servers. The client should consume the server list in order // unless instructed otherwise via the client_config. Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` } func (m *ServerList) Reset() { *m = ServerList{} } @@ -492,13 +486,6 @@ func (m *ServerList) GetServers() []*Server { return nil } -func (m *ServerList) GetExpirationInterval() *Duration { - if m != nil { - return m.ExpirationInterval - } - return nil -} - // Contains server information. When none of the [drop_for_*] fields are true, // use the other fields. When drop_for_rate_limiting is true, ignore all other // fields. Use drop_for_load_balancing only when it is true and @@ -576,54 +563,53 @@ func init() { proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } -func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, - 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, - 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, - 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, - 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, - 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, - 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, - 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, - 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, - 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, - 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, - 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, - 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, - 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, - 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, - 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, - 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, - 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, - 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, - 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, - 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, - 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, - 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, - 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, - 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, - 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, - 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, - 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, - 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, - 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, - 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, - 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, - 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, - 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, - 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, - 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, - 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, - 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, - 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, - 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, - 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, - 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, - 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, - 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, - 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, - 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, + 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, + 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, + 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, + 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, + 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, + 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, + 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, + 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, + 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, + 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, + 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, + 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, + 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, + 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, + 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, + 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, + 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, + 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, + 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, + 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, + 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, + 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, + 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, + 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, + 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, + 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, + 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, + 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, + 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, + 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, + 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, + 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, + 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, + 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, + 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, + 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, + 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, + 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, + 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, + 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, + 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, + 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, + 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, + 0xa6, 0x4a, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto similarity index 71% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto index a2502fb284a..2ed04551fad 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -1,35 +1,21 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; package grpc.lb.v1; +option go_package = "messages"; message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 @@ -46,7 +32,6 @@ message Duration { } message Timestamp { - // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -59,12 +44,6 @@ message Timestamp { int32 nanos = 2; } -service LoadBalancer { - // Bidirectional rpc to get a list of servers. - rpc BalanceLoad(stream LoadBalanceRequest) - returns (stream LoadBalanceResponse); -} - message LoadBalanceRequest { oneof load_balance_request_type { // This message should be sent on the first request to the load balancer. @@ -142,11 +121,8 @@ message ServerList { // unless instructed otherwise via the client_config. repeated Server servers = 1; - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - Duration expiration_interval = 3; + // Was google.protobuf.Duration expiration_interval. + reserved 3; } // Contains server information. When none of the [drop_for_*] fields are true, diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD b/vendor/google.golang.org/grpc/grpclog/BUILD index 4595e517367..4b225761989 100644 --- a/vendor/google.golang.org/grpc/grpclog/BUILD +++ b/vendor/google.golang.org/grpc/grpclog/BUILD @@ -2,7 +2,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["logger.go"], + srcs = [ + "grpclog.go", + "logger.go", + "loggerv2.go", + ], importpath = "google.golang.org/grpc/grpclog", visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000000..1d71e25de50 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 2cc09be4894..d03b2397bfa 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -1,52 +1,25 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package grpclog defines logging for grpc. -*/ package grpclog -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) - // Logger mimics golang's standard Logger as an interface. +// Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) @@ -58,36 +31,53 @@ type Logger interface { // SetLogger sets the logger that is used in grpc. Call only from // init() functions. +// Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = l + logger = &loggerWrapper{Logger: l} } -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger } -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) } -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) } -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) } -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) } -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000000..d4932577695 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 89c4d459f0a..fdcbb9e0b7d 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: health.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto /* Package grpc_health_v1 is a generated protocol buffer package. It is generated from these files: - health.proto + grpc_health_v1/health.proto It has these top-level messages: HealthCheckRequest @@ -69,6 +68,13 @@ func (m *HealthCheckRequest) String() string { return proto.CompactTe func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + type HealthCheckResponse struct { Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` } @@ -78,6 +84,13 @@ func (m *HealthCheckResponse) String() string { return proto.CompactT func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + func init() { proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") @@ -153,24 +166,25 @@ var _Health_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "health.proto", + Metadata: "grpc_health_v1/health.proto", } -func init() { proto.RegisterFile("health.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, - 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, - 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, - 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, - 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, - 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, - 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, - 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, - 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, - 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, - 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, - 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, - 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto index e2dc0889258..6072fdc3b80 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; package grpc.health.v1; diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index a6921614572..06dc825b9fb 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,15 +27,15 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// This is an EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5489143a85c..07083832c3c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -1,32 +1,17 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index d492589c96b..f8adc7e6d4f 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -39,8 +24,8 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a connection broken -// and to cause activity so intermediaries are aware the connection is still in use. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. // Make sure these parameters are set in coordination with the keepalive policy on the server, // as incompatible settings can result in closing of connection. type ClientParameters struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a4f2de026db..589161d57fa 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -1,38 +1,23 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ // Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata. +// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. package metadata import ( @@ -51,8 +36,17 @@ func DecodeKeyValue(k, v string) (string, string, error) { // two convenience functions New and Pairs to generate MD. type MD map[string][]string -// New creates a MD from given key-value map. -// Keys are automatically converted to lowercase. +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func New(m map[string]string) MD { md := MD{} for k, val := range m { @@ -64,7 +58,16 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. -// Keys are automatically converted to lowercase. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) @@ -91,9 +94,9 @@ func (md MD) Copy() MD { return Join(md) } -// Join joins any number of MDs into a single MD. +// Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which -// the MDs containing those values are presented to Join. +// the mds containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -107,11 +110,6 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated. -func NewContext(ctx context.Context, md MD) context.Context { - return NewOutgoingContext(ctx, md) -} - // NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) @@ -122,22 +120,17 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, md) } -// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. -func FromContext(ctx context.Context) (md MD, ok bool) { - return FromIncomingContext(ctx) -} - -// FromIncomingContext returns the incoming MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. func FromIncomingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdIncomingKey{}).(MD) return } -// FromOutgoingContext returns the outgoing MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdOutgoingKey{}).(MD) return diff --git a/vendor/google.golang.org/grpc/naming/BUILD b/vendor/google.golang.org/grpc/naming/BUILD index 2318033a3c4..ea07a9fb642 100644 --- a/vendor/google.golang.org/grpc/naming/BUILD +++ b/vendor/google.golang.org/grpc/naming/BUILD @@ -2,9 +2,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["naming.go"], + srcs = [ + "dns_resolver.go", + "go17.go", + "go18.go", + "naming.go", + ], importpath = "google.golang.org/grpc/naming", visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], ) filegroup( diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 00000000000..7e69a2ca0a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the adrress resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exisits until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 00000000000..8bdf21e7998 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 00000000000..b5a0f842748 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c2e0871e6f8..1af7e32f86d 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index bfa6205ba9e..317b8b9d09a 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,7 +27,8 @@ import ( "google.golang.org/grpc/credentials" ) -// Peer contains the information of the peer for an RPC. +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. type Peer struct { // Addr is the peer address. Addr net.Addr diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000000..9085dbc9c98 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000000..7f993ef5a38 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + } else { + b.sc.UpdateAddresses(addrs) + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc || s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go index 10188dc3433..3e17efec61b 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/proxy.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -97,7 +82,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ Header: map[string][]string{"User-Agent": {grpcUA}}, }) - if err := sendHTTPRequest(ctx, req, conn); err != nil { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } diff --git a/vendor/google.golang.org/grpc/resolver/BUILD b/vendor/google.golang.org/grpc/resolver/BUILD new file mode 100644 index 00000000000..51f8d6f28fe --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["resolver.go"], + importpath = "google.golang.org/grpc/resolver", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000000..49307e8fe9e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme string +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. +// b.Scheme will be used as the scheme registered with this builder. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "dns" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + if b, ok := m[defaultScheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "dns". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // It's the name of the grpc load balancer, which will be used for authentication. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name again. + // It's just a hint, resolver can ignore this if it's not necessary. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000000..7d53964d094 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,139 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns "", s instead. +func split2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", s + } + return spl[0], spl[1] +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + ret.Scheme, ret.Endpoint = split2(target, "://") + ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +// +// This function could return nil, nil, in tests for old behaviors. +// TODO(bar) never return nil, nil when DNS becomes the default resolver. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + target := parseTarget(cc.target) + grpclog.Infof("dialing to target with scheme: %q", target.Scheme) + + rb := resolver.Get(target.Scheme) + if rb == nil { + // TODO(bar) return error when DNS becomes the default (implemented and + // registered by DNS package). + grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) + return nil, nil + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) + // TODO(bar switching) this should never be nil. Pickfirst should be default. + if ccr.cc.balancerWrapper != nil { + // TODO(bar switching) create balancer if it's nil? + ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) + } + case sc := <-ccr.scCh: + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 34e1ad03b97..188a75fff94 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,15 +21,18 @@ package grpc import ( "bytes" "compress/gzip" + stdctx "context" "encoding/binary" "io" "io/ioutil" "math" "os" + "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -60,16 +48,25 @@ type Compressor interface { Type() string } -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} +type gzipCompressor struct { + pool sync.Pool } -type gzipCompressor struct { +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) if _, err := z.Write(p); err != nil { return err } @@ -89,6 +86,7 @@ type Decompressor interface { } type gzipDecompressor struct { + pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. @@ -97,11 +95,26 @@ func NewGZIPDecompressor() Decompressor { } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } } - defer z.Close() + + defer func() { + z.Close() + d.pool.Put(z) + }() return ioutil.ReadAll(z) } @@ -111,14 +124,19 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer - traceInfo traceInfo // in trace.go + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials } -var defaultCallInfo = callInfo{failFast: true} +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} // CallOption configures a Call before it starts or extracts information from // a Call after it completes. @@ -132,6 +150,14 @@ type CallOption interface { after(*callInfo) } +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } @@ -173,7 +199,8 @@ func Peer(peer *peer.Peer) CallOption { // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -181,6 +208,31 @@ func FailFast(failFast bool) CallOption { }) } +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -197,7 +249,7 @@ type parser struct { r io.Reader // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. + // at https://grpc.io/docs/guides/wire.html. header [5]byte } @@ -214,8 +266,8 @@ type parser struct { // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } @@ -225,13 +277,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro if length == 0 { return pf, nil, nil } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { + if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -240,19 +292,20 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro return pf, msg, nil } -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) { - var ( - b []byte - length uint +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { + var b []byte + const ( + payloadLen = 1 + sizeLen = 4 ) + if msg != nil { var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } if outPayload != nil { outPayload.Payload = msg @@ -262,39 +315,28 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl } if cp != nil { if err := cp.Do(cbuf, b); err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } b = cbuf.Bytes() } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) } - const ( - payloadLen = 1 - sizeLen = 4 - ) + if uint(len(b)) > math.MaxUint32 { + return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } - var buf = make([]byte, payloadLen+sizeLen+len(b)) - - // Write payload format + bufHeader := make([]byte, payloadLen+sizeLen) if cp == nil { - buf[0] = byte(compressionNone) + bufHeader[0] = byte(compressionNone) } else { - buf[0] = byte(compressionMade) + bufHeader[0] = byte(compressionMade) } // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) if outPayload != nil { - outPayload.WireLength = len(buf) + outPayload.WireLength = payloadLen + sizeLen + len(b) } - - return buf, nil + return bufHeader, b, nil } func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { @@ -310,8 +352,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { - pf, d, err := p.recvMsg(maxMsgSize) +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return err } @@ -327,10 +369,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } - if len(d) > maxMsgSize { + if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) @@ -346,14 +388,15 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ } type rpcInfo struct { + failfast bool bytesSent bool bytesReceived bool } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { @@ -363,11 +406,63 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { if ss, ok := rpcInfoFromContext(ctx); ok { - *ss = s + ss.bytesReceived = s.bytesReceived + ss.bytesSent = s.bytesSent } return } +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, stdctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled, stdctx.Canceled: + return codes.Canceled + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // @@ -398,57 +493,6 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) - case transport.ConnectionError: - return status.Error(codes.Internal, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - case ErrClientConnClosing: - return status.Error(codes.FailedPrecondition, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - // MethodConfig defines the configuration recommended by the service providers for a // particular method. // This is EXPERIMENTAL and subject to change. @@ -456,24 +500,22 @@ type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. - WaitForReady bool + WaitForReady *bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout time.Duration + Timeout *time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minumum of the value specified here and the value set + // The actual value used is the minimum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. - // TODO: support this. - MaxReqSize uint32 + MaxReqSize *int // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. - // TODO: support this. - MaxRespSize uint32 + MaxRespSize *int } // ServiceConfig is provided by the service provider and contains parameters for how @@ -484,9 +526,38 @@ type ServiceConfig struct { // via grpc.WithBalancer will override this. LB Balancer // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig } +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +// SupportPackageIsVersion3 is referenced from generated protocol buffer files. +// The latest support package version is 4. +// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the +// next support package version update. +const SupportPackageIsVersion3 = true + // SupportPackageIsVersion4 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the grpc package. // @@ -496,6 +567,6 @@ type ServiceConfig struct { const SupportPackageIsVersion4 = true // Version is the current grpc version. -const Version = "1.3.0" +const Version = "1.7.5" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index b15f71c6c18..787665dfeb3 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,6 +23,7 @@ import ( "errors" "fmt" "io" + "math" "net" "net/http" "reflect" @@ -61,6 +47,11 @@ import ( "google.golang.org/grpc/transport" ) +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -96,6 +87,7 @@ type Server struct { mu sync.Mutex // guards following lis map[net.Listener]bool conns map[io.Closer]bool + serve bool drain bool ctx context.Context cancel context.CancelFunc @@ -107,27 +99,69 @@ type Server struct { } type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration } -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, +} -// A ServerOption sets options. +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { return func(o *options) { @@ -163,11 +197,25 @@ func RPCDecompressor(dc Decompressor) ServerOption { } } -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { return func(o *options) { - o.maxMsgSize = m + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m } } @@ -192,7 +240,7 @@ func Creds(c credentials.TransportCredentials) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return func(o *options) { if o.unaryInt != nil { - panic("The unary server interceptor has been set.") + panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i } @@ -203,7 +251,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption { return func(o *options) { if o.streamInt != nil { - panic("The stream server interceptor has been set.") + panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i } @@ -214,7 +262,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption { return func(o *options) { if o.inTapHandle != nil { - panic("The tap handle has been set.") + panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h } @@ -229,10 +277,10 @@ func StatsHandler(h stats.Handler) ServerOption { // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the the "unimplemented" gRPC +// handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function has full access to the Context of the request and the -// stream, and the invocation passes through interceptors. +// stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return func(o *options) { o.unknownStreamDesc = &StreamDesc{ @@ -245,11 +293,20 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { } } +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +func ConnectionTimeout(d time.Duration) ServerOption { + return func(o *options) { + o.connectionTimeout = d + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize + opts := defaultServerOptions for _, o := range opt { o(&opts) } @@ -288,8 +345,8 @@ func (s *Server) errorf(format string, a ...interface{}) { } } -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() @@ -304,6 +361,9 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } @@ -334,7 +394,7 @@ type MethodInfo struct { IsServerStream bool } -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. @@ -392,6 +452,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") + s.serve = true if s.lis == nil { s.mu.Unlock() lis.Close() @@ -427,10 +488,12 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() + timer := time.NewTimer(tempDelay) select { - case <-time.After(tempDelay): + case <-timer.C: case <-s.ctx.Done(): } + timer.Stop() continue } s.mu.Lock() @@ -448,16 +511,18 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandShake returns ErrConnDispatched, keep rawConn open. + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { rawConn.Close() } + rawConn.SetDeadline(time.Time{}) return } @@ -470,25 +535,32 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Unlock() if s.opts.useHandlerImpl { + rawConn.SetDeadline(time.Time{}) s.serveUsingHandler(conn) } else { - s.serveHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + rawConn.SetDeadline(time.Time{}) + s.serveStreams(st) } } -// serveHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -496,14 +568,14 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil } if !s.addConn(st) { st.Close() - return + return nil } - s.serveStreams(st) + return st } func (s *Server) serveStreams(st transport.ServerTransport) { @@ -554,6 +626,30 @@ func (s *Server) serveUsingHandler(conn net.Conn) { }) } +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r) if err != nil { @@ -618,18 +714,15 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str if s.opts.statsHandler != nil { outPayload = &stats.OutPayload{} } - p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err } - err = t.Write(stream, p, opts) + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) @@ -644,9 +737,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -654,8 +745,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if trInfo != nil { defer trInfo.tr.Finish() trInfo.firstLine.client = false @@ -672,139 +763,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. stream.SetSendCompress(s.opts.cp.Type()) } p := &parser{r: stream} - for { // TODO: delete - pf, req, err := p.recvMsg(s.opts.maxMsgSize) + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) - } - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - } - if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. - } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - return Errorf(codes.Internal, err.Error()) - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.Internal, "grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - if inPayload != nil { - inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) - sh.HandleRPC(stream.Context(), inPayload) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(convertCode(appErr), appErr.Error()) - appStatus, _ = status.FromError(appErr) - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - return appErr - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) - } - } - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - return t.WriteStatus(stream, status.New(codes.OK, "")) + return err } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -814,9 +903,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -824,24 +911,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if s.opts.cp != nil { stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - statsHandler: sh, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -913,12 +998,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.InvalidArgument, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -943,7 +1028,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -973,7 +1058,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1011,8 +1096,9 @@ func (s *Server) Stop() { s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. func (s *Server) GracefulStop() { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index 26e1a8e2f08..05b384c6931 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -45,19 +30,22 @@ type ConnTagInfo struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // TODO add QOS related fields. } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. - // The returned context is used in the rest lifetime of the RPC. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index c2c9a9dfa23..e844541e9c0 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -1,36 +1,23 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -39,6 +26,8 @@ package stats import ( "net" "time" + + "golang.org/x/net/context" ) // RPCStats contains stats information about RPCs. @@ -49,7 +38,7 @@ type RPCStats interface { } // Begin contains stats when an RPC begins. -// FailFast are only valid if Client is true. +// FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool @@ -59,7 +48,7 @@ type Begin struct { FailFast bool } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} @@ -80,19 +69,19 @@ type InPayload struct { RecvTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. -// FullMethod, addresses and Compression are only valid if Client is false. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int + // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -103,7 +92,7 @@ type InHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} @@ -116,7 +105,7 @@ type InTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} @@ -137,19 +126,17 @@ type OutPayload struct { SentTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. -// FullMethod, addresses and Compression are only valid if Client is true. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool - // WireLength is the wire length of header. - WireLength int + // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -160,7 +147,7 @@ type OutHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} @@ -173,7 +160,7 @@ type OutTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} @@ -184,7 +171,9 @@ type End struct { Client bool // EndTime is the time when the RPC ends. EndTime time.Time - // Error is the error just happened. It implements status.Status if non-nil. + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. Error error } @@ -221,3 +210,85 @@ type ConnEnd struct { func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/BUILD b/vendor/google.golang.org/grpc/status/BUILD index a92cd5f4e04..84cb8afc49f 100644 --- a/vendor/google.golang.org/grpc/status/BUILD +++ b/vendor/google.golang.org/grpc/status/BUILD @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", ], diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 99a4cbe5112..871dc4b31c7 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,9 +28,11 @@ package status import ( + "errors" "fmt" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) @@ -143,3 +130,39 @@ func FromError(err error) (s *Status, ok bool) { } return nil, false } + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 33f1c787b34..75eab40b109 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,8 +27,10 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" @@ -73,11 +60,17 @@ type Stream interface { // side. On server side, it simply returns the error to the caller. // SendMsg is called by generated code. Also Users can call SendMsg // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } @@ -93,6 +86,11 @@ type ClientStream interface { // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // Always call Stream.RecvMsg() to get the final status if you care about the status of + // the RPC. Stream } @@ -109,29 +107,48 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var ( t transport.ClientTransport s *transport.Stream - put func() + done func(balancer.DoneInfo) cancel context.CancelFunc ) - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer func() { + if err != nil { + cancel() + } + }() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return nil, toRPCErr(err) } } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + if c.creds != nil { + callHdr.Creds = c.creds + } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) @@ -151,32 +168,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if err != nil && sh != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) } - sh.HandleRPC(ctx, end) - } - }() - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + }() } for { - t, put, err = cc.getTransport(ctx, gopts) + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -194,15 +208,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && put != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - if put != nil { - put() - put = nil + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -211,20 +225,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } break } + // Set callInfo.peer object from stream's context. + if peer, ok := peer.FromContext(s.Context()); ok { + c.peer = peer + } cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - maxMsgSize: cc.dopts.maxMsgSize, - cancel: cancel, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + cancel: cancel, - put: put, - t: t, - s: s, - p: &parser{r: s}, + done: done, + t: t, + s: s, + p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, @@ -232,9 +249,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth statsCtx: ctx, statsHandler: cc.dopts.copts.StatsHandler, } - if cc.dopts.cp != nil { - cs.cbuf = new(bytes.Buffer) - } // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { @@ -263,23 +277,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - maxMsgSize int - cancel context.CancelFunc + opts []CallOption + c *callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + dc Decompressor + cancel context.CancelFunc tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex - put func() + done func(balancer.DoneInfo) closed bool finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), @@ -329,7 +341,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return } if err == io.EOF { - // Specialize the process for server streaming. SendMesg is only called + // Specialize the process for server streaming. SendMsg is only called // once when creating the stream object. io.EOF needs to be skipped when // the rpc is early finished (before the stream object is created.). // TODO: It is probably better to move this into the generated code. @@ -349,16 +361,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Client: true, } } - out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() + hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err } - err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + } + err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) @@ -373,7 +386,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { Client: true, } } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -396,7 +412,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } // Special handling for client streaming rpc. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) @@ -424,7 +443,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) defer func() { if err != nil { cs.finish(err) @@ -464,15 +483,15 @@ func (cs *clientStream) finish(err error) { } }() for _, o := range cs.opts { - o.after(&cs.c) + o.after(cs.c) } - if cs.put != nil { + if cs.done != nil { updateRPCInfoInContext(cs.s.Context(), rpcInfo{ bytesSent: cs.s.BytesSent(), bytesReceived: cs.s.BytesReceived(), }) - cs.put() - cs.put = nil + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil } if cs.statsHandler != nil { end := &stats.End{ @@ -521,15 +540,15 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - trInfo *traceInfo + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo statsHandler stats.Handler @@ -573,22 +592,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var outPayload *stats.OutPayload if ss.statsHandler != nil { outPayload = &stats.OutPayload{} } - out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() + hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - err = Errorf(codes.Internal, "grpc: %v", err) return err } - if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { + if len(data) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } if outPayload != nil { @@ -612,12 +632,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var inPayload *stats.InPayload if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { return err } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 0f366476742..22b8fb50dea 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,8 +32,20 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs when a new stream is created -// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead -// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming -// work in this handle. Otherwise all the RPCs would slow down. +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index f6747e1dfa4..c1c96dedcb7 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -1,33 +1,18 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -46,7 +31,7 @@ import ( // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. // This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true +var EnableTracing bool // methodFamily returns the trace family for the given method. // It turns "/pkg.Service/GetFoo" into "pkg.Service". @@ -91,6 +76,15 @@ func (f *firstLine) String() string { return line.String() } +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + // payload represents an RPC request or response payload. type payload struct { sent bool // whether this is an outgoing payload @@ -100,9 +94,9 @@ type payload struct { func (p payload) String() string { if p.sent { - return fmt.Sprintf("sent: %v", p.msg) + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) } - return fmt.Sprintf("recv: %v", p.msg) + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/BUILD b/vendor/google.golang.org/grpc/transport/BUILD index d6b3e9fd45f..838ad3079a6 100644 --- a/vendor/google.golang.org/grpc/transport/BUILD +++ b/vendor/google.golang.org/grpc/transport/BUILD @@ -3,13 +3,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "bdp_estimator.go", "control.go", - "go16.go", - "go17.go", "handler_server.go", "http2_client.go", "http2_server.go", "http_util.go", + "log.go", "transport.go", ], importpath = "google.golang.org/grpc/transport", diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 00000000000..8dd2ed42792 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +var ( + // Adding arbitrary data to ping so that its ack can be + // identified. + // Easter-egg: what does the ping message say? + bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} +) + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 8d29aee53d4..dd1a8d42e7e 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -37,17 +22,18 @@ import ( "fmt" "math" "sync" + "sync/atomic" "time" "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection + initialWindowSize = defaultWindowSize // for an RPC infinity = time.Duration(math.MaxInt64) defaultClientKeepaliveTime = infinity defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) @@ -58,11 +44,43 @@ const ( defaultServerKeepaliveTime = time.Duration(2 * time.Hour) defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultLocalSendQuota sets is default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultLocalSendQuota = 64 * 1024 ) // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool +} + +func (*headerFrame) item() {} + +type continuationFrame struct { + streamID uint32 + endHeaders bool + headerBlockFragment []byte +} + +type dataFrame struct { + streamID uint32 + endStream bool + d []byte + f func() +} + +func (*dataFrame) item() {} + +func (*continuationFrame) item() {} + type windowUpdate struct { streamID uint32 increment uint32 @@ -87,6 +105,8 @@ func (*resetStream) item() {} type goAway struct { code http2.ErrCode debugData []byte + headsUp bool + closeConn bool } func (*goAway) item() {} @@ -108,8 +128,9 @@ func (*ping) item() {} type quotaPool struct { c chan int - mu sync.Mutex - quota int + mu sync.Mutex + version uint32 + quota int } // newQuotaPool creates a quotaPool which has quota q available to consume. @@ -130,6 +151,10 @@ func newQuotaPool(q int) *quotaPool { func (qb *quotaPool) add(v int) { qb.mu.Lock() defer qb.mu.Unlock() + qb.lockedAdd(v) +} + +func (qb *quotaPool) lockedAdd(v int) { select { case n := <-qb.c: qb.quota += n @@ -150,6 +175,35 @@ func (qb *quotaPool) add(v int) { } } +func (qb *quotaPool) addAndUpdate(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) + // Update the version only after having added to the quota + // so that if acquireWithVesrion sees the new vesrion it is + // guaranteed to have seen the updated quota. + // Also, still keep this inside of the lock, so that when + // compareAndExecute is processing, this function doesn't + // get executed partially (quota gets updated but the version + // doesn't). + atomic.AddUint32(&(qb.version), 1) +} + +func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { + return qb.c, atomic.LoadUint32(&(qb.version)) +} + +func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { + qb.mu.Lock() + defer qb.mu.Unlock() + if version == atomic.LoadUint32(&(qb.version)) { + success() + return true + } + failure() + return false +} + // acquire returns the channel on which available quota amounts are sent. func (qb *quotaPool) acquire() <-chan int { return qb.c @@ -157,16 +211,59 @@ func (qb *quotaPool) acquire() <-chan int { // inFlow deals with inbound flow control type inFlow struct { + mu sync.Mutex // The inbound flow control limit for pending data. limit uint32 - - mu sync.Mutex // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + d := n - f.limit + f.limit = n + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 } // onData is invoked when some data frame is received. It updates pendingData. @@ -174,7 +271,7 @@ func (f *inFlow) onData(n uint32) error { f.mu.Lock() defer f.mu.Unlock() f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { + if f.pendingData+f.pendingUpdate > f.limit+f.delta { return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } return nil @@ -189,6 +286,13 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate @@ -198,10 +302,10 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } -func (f *inFlow) resetPendingData() uint32 { +func (f *inFlow) resetPendingUpdate() uint32 { f.mu.Lock() defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 + n := f.pendingUpdate + f.pendingUpdate = 0 return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go deleted file mode 100644 index ee1c46bad57..00000000000 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go deleted file mode 100644 index 356f13ff197..00000000000 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 24f306babbb..7e0fdb35938 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -1,32 +1,18 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2016 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,6 +33,7 @@ import ( "sync" "time" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc/codes" @@ -102,15 +89,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr continue } for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } v, err := decodeMetadataHeader(k, v) if err != nil { return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) @@ -144,6 +122,10 @@ type serverHandlerTransport struct { // ServeHTTP (HandleStreams) goroutine. The channel is closed // when WriteStatus is called. writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex } func (ht *serverHandlerTransport) Close() error { @@ -179,15 +161,24 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { - case ht.writes <- fn: - return nil case <-ht.closedCh: return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } } } func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + err := ht.do(func() { ht.writeCommonHeaders(s) @@ -202,7 +193,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } - // TODO: Support Grpc-Status-Details-Bin + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } if md := s.Trailer(); len(md) > 0 { for k, vv := range md { @@ -218,7 +217,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } }) - close(ht.writes) + + if err == nil { // transport has not been closed + ht.Close() + close(ht.writes) + } return err } @@ -241,16 +244,17 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") - // TODO: Support Grpc-Status-Details-Bin + h.Add("Trailer", "Grpc-Status-Details-Bin") if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) } } -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { return ht.do(func() { ht.writeCommonHeaders(s) + ht.rw.Write(hdr) ht.rw.Write(data) if !opts.Delay { ht.rw.(http.Flusher).Flush() @@ -309,13 +313,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -326,7 +330,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx = metadata.NewIncomingContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) @@ -338,11 +345,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(&recvMsg{data: buf[:n:n]}) + s.buf.put(recvMsg{data: buf[:n:n]}) buf = buf[n:] } if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } if len(buf) == 0 { diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 380fff665fb..1abb62e6df4 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -48,7 +33,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -59,6 +43,7 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { ctx context.Context + cancel context.CancelFunc target string // server name/addr userAgent string md interface{} @@ -68,17 +53,6 @@ type http2Client struct { authInfo credentials.AuthInfo // auth info about the connection nextID uint32 // the next stream ID to be used - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -91,7 +65,7 @@ type http2Client struct { // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool @@ -101,6 +75,8 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string + isSecure bool + creds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. @@ -110,6 +86,11 @@ type http2Client struct { statsHandler stats.Handler + initialWindowSize int32 + + bdpEst *bdpEstimator + outQuotaVersion uint32 + mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream @@ -117,8 +98,6 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 - // goAwayID records the Last-Stream-ID in the GoAway frame from the server. - goAwayID uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -130,7 +109,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if fn != nil { return fn(ctx, addr) } - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { @@ -164,14 +143,23 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) { +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { scheme := "http" - conn, err := dial(ctx, opts.Dialer, addr.Addr) + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + connectCancel() + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -179,16 +167,20 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( conn.Close() } }(conn) - var authInfo credentials.AuthInfo + var ( + isSecure bool + authInfo credentials.AuthInfo + ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) if err != nil { // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: %v", err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) } + isSecure = true } kp := opts.KeepaliveParams // Validate keepalive parameters. @@ -198,9 +190,24 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } t := &http2Client{ ctx: ctx, + cancel: cancel, target: addr.Addr, userAgent: opts.UserAgent, md: addr.Metadata, @@ -209,27 +216,36 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( localAddr: conn.LocalAddr(), authInfo: authInfo, // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: defaultMaxStreamsClient, - streamsQuota: newQuotaPool(defaultMaxStreamsClient), - streamSendQuota: defaultWindowSize, - kp: kp, - statsHandler: opts.StatsHandler, + nextID: 1, + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } // Make sure awakenKeepalive can't be written upon. // keepalive routine will make it writable, if need be. @@ -252,65 +268,75 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - if initialWindowSize != defaultWindowSize { - err = t.framer.writeSettings(true, http2.Setting{ + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), + Val: uint32(t.initialWindowSize), }) } else { - err = t.framer.writeSettings(true) + err = t.framer.fr.WriteSettings() } if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } - go t.controller() + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() if t.kp.Time != infinity { go t.keepalive() } - t.writableChan <- 0 return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + localSendQuota: newQuotaPool(defaultLocalSendQuota), + headerChan: make(chan struct{}), } t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, } + return s } @@ -324,31 +350,51 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } - userCtx := ctx ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + pos = len(callHdr.Method) } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + audience = "https://" + host + callHdr.Method[:pos] + } + for _, c := range t.creds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) + return nil, streamErrorf(codes.Internal, "transport: %v", err) } for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) authData[k] = v } } + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() @@ -363,7 +409,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } t.mu.Unlock() - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -371,79 +417,49 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if sq > 1 { t.streamsQuota.add(sq - 1) } - if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok { - t.streamsQuota.add(1) - } - return nil, err - } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - t.streamsQuota.add(1) - // Need to make t writable again so that the rpc in flight can still proceed. - t.writableChan <- 0 - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - s.clientStatsCtx = userCtx - t.activeStreams[s.id] = s - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { - select { - case t.awakenKeepalive <- struct{}{}: - t.framer.writePing(false, false, [8]byte{}) - default: - } - } - - t.mu.Unlock() - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := dl.Sub(time.Now()) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } - for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - var ( - hasMD bool - endHeaders bool - ) if md, ok := metadata.FromOutgoingContext(ctx); ok { - hasMD = true for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -453,60 +469,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - first := true - bufLen := t.hBuf.Len() - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, connectionErrorf(true, err, "transport: %v", err) + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.controlBuf.put(&ping{data: [8]byte{}}) + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: } } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + t.mu.Unlock() + + s.mu.Lock() s.bytesSent = true + s.mu.Unlock() if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, - WireLength: bufLen, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, } - t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader) + t.statsHandler.HandleRPC(s.ctx, outHeader) } - t.writableChan <- 0 return s, nil } @@ -518,6 +530,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) { t.mu.Unlock() return } + if err != nil { + // notify in-flight streams, before the deletion + s.write(recvMsg{err: err}) + } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -547,11 +563,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) { s.mu.Lock() rstStream = s.rstStream rstError = s.rstError - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -577,12 +588,9 @@ func (t *http2Client) Close() (err error) { t.mu.Unlock() return } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } t.state = closing t.mu.Unlock() - close(t.shutdownChan) + t.cancel() err = t.conn.Close() t.mu.Lock() streams := t.activeStreams @@ -604,41 +612,18 @@ func (t *http2Client) Close() (err error) { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return + return err } +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - // Notify the streams which were initiated after the server sent GOAWAY. - select { - case <-t.goAway: - n := t.prevGoAwayID - if n == 0 && t.nextID > 1 { - n = t.nextID - 2 - } - m := t.goAwayID + 2 - if m == 2 { - m = 1 - } - for i := m; i <= n; i += 2 { - if s, ok := t.activeStreams[i]; ok { - close(s.goAway) - } - } - default: - } - if t.state == draining { + case closing, draining: t.mu.Unlock() return nil } @@ -653,21 +638,38 @@ func (t *http2Client) GracefulClose() error { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + if hdr == nil && data == nil && opts.Last { + // stream.CloseSend uses this to send an empty frame with endStream=True + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) + return nil + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for idx, r := range [][]byte{hdr, data} { + for len(r) > 0 { size := http2MaxFrameLen // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) if err != nil { return err } // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) if err != nil { return err } @@ -677,69 +679,51 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { if tq < size { size = tq } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) + if size > len(r) { + size = len(r) } + p := r[:size] + ps := len(p) if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - // Return the connection quota back. - t.sendQuotaPool.add(len(p)) + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) + s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. + var endStream bool + // See if this is the last frame to be written. + if opts.Last { + if len(r)-size == 0 { // No more data in r after this iteration. + if idx == 0 { // We're writing data header. + if len(data) == 0 { // There's no data to follow. + endStream = true + } + } else { // We're writing data. + endStream = true + } + } } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(len(p)) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) + if ps < sq { + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break } } if !opts.Last { @@ -760,6 +744,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { return s, ok } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback connection's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -769,41 +771,76 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) +} + func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(connectionErrorf(true, err, "%v", err)) - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could've been an empty data frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -859,10 +896,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } - s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode)) + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -881,7 +918,11 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { } func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -890,36 +931,56 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } t.mu.Lock() - if t.state == reachable || t.state == draining { - if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) - return - } - select { - case <-t.goAway: - id := t.goAwayID - // t.goAway has been closed (i.e.,multiple GoAways). - if id < f.LastStreamID { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) - return - } - t.prevGoAwayID = id - t.goAwayID = f.LastStreamID - t.mu.Unlock() - return - default: - t.setGoAwayReason(f) - } - t.goAwayID = f.LastStreamID - close(t.goAway) + if t.state != reachable && t.state != draining { + t.mu.Unlock() + return } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay + // with the ID of the last stream the server will process. + // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we + // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server + // was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + close(stream.goAway) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) t.mu.Unlock() + if active == 0 { + t.Close() + } } // setGoAwayReason sets the value of t.goAwayReason based @@ -960,20 +1021,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } + s.mu.Lock() s.bytesReceived = true + s.mu.Unlock() var state decodeState - for _, hf := range frame.Fields { - if err := state.processHeaderField(hf); err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return + if err := state.decodeResponseHeader(frame); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return } endStream := frame.StreamEnded() @@ -985,13 +1046,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader) + t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer) + t.statsHandler.HandleRPC(s.ctx, inTrailer) } } }() @@ -1039,22 +1100,22 @@ func handleMalformedHTTP2(s *Stream, err error) { // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() if err != nil { - t.notifyError(err) + t.Close() return } atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.notifyError(err) + t.Close() return } t.handleSettings(sf) // loop to keep reading incoming messages on this transport. for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.CompareAndSwapUint32(&t.activity, 0, 1) if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1066,12 +1127,12 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) } continue } else { // Transport error. - t.notifyError(err) + t.Close() return } } @@ -1091,7 +1152,7 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } @@ -1115,7 +1176,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) { t.mu.Lock() for _, stream := range t.activeStreams { // Adjust the sending quota for each stream. - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) } t.streamSendQuota = s.Val t.mu.Unlock() @@ -1123,49 +1184,78 @@ func (t *http2Client) applySettings(ss []http2.Setting) { } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - // If the server needs to be to intimated about stream closing, - // then we need to make sure the RST_STREAM frame is written to - // the wire before the headers of the next stream waiting on - // streamQuota. We ensure this by adding to the streamsQuota pool - // only after having acquired the writableChan to send RST_STREAM. - t.streamsQuota.add(1) - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Client) itemHandler(i item) error { + var err error + switch i := i.(type) { + case *dataFrame: + err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) + if err == nil { + i.f() } + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + case *windowUpdate: + err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + err = t.framer.fr.WriteSettingsAck() + } else { + err = t.framer.fr.WriteSettings(i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + t.streamsQuota.add(1) + case *flushIO: + err = t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + err = t.framer.fr.WritePing(i.ack, i.data) + default: + errorf("transport: http2Client.controller got unexpected item type %v\n", i) } + return err } // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. @@ -1189,7 +1279,7 @@ func (t *http2Client) keepalive() { case <-t.awakenKeepalive: // If the control gets here a ping has been sent // need to reset the timer with keepalive.Timeout. - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } else { @@ -1208,13 +1298,13 @@ func (t *http2Client) keepalive() { } t.Close() return - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } return } - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } @@ -1224,25 +1314,9 @@ func (t *http2Client) keepalive() { } func (t *http2Client) Error() <-chan struct{} { - return t.errorChan + return t.ctx.Done() } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 14cd19c64c6..00df8eed0fd 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,6 +21,7 @@ package transport import ( "bytes" "errors" + "fmt" "io" "math" "math/rand" @@ -51,7 +37,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -67,35 +52,25 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context + cancel context.CancelFunc conn net.Conn remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool - - stats stats.Handler - + stats stats.Handler // Flag to keep track of reading activity on transport. // 1 is true and 0 is false. activity uint32 // Accessed atomically. @@ -111,15 +86,25 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator - mu sync.Mutex // guard the following + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 // idle is the time instant when the connection went idle. - // This is either the begining of the connection or when the number of + // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time @@ -128,32 +113,51 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - framer := newFramer(conn) + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) // Send initial settings as connection preface to client. - var settings []http2.Setting + var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{ + isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, Val: maxStreams, }) } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } } kp := config.KeepaliveParams @@ -179,29 +183,36 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err kep.MinTime = defaultKeepalivePolicyMinTime } var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ - ctx: context.Background(), - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - stats: config.StatsHandler, - kp: kp, - idle: time.Now(), - kep: kep, + ctx: ctx, + cancel: cancel, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ @@ -211,37 +222,68 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - go t.controller() + t.framer.writer.Flush() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() go t.keepalive() - t.writableChan <- 0 return t, nil } // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { - buf := newRecvBuffer() - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: &inFlow{limit: initialWindowSize}, - } + streamID := frame.Header().StreamID var state decodeState for _, hf := range frame.Fields { if err := state.processHeaderField(hf); err != nil { if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) } return } } + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - s.recvCompress = state.encoding if state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { @@ -263,13 +305,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) } - s.recvCompress = state.encoding - s.method = state.method if t.inTapHandle != nil { var err error info := &tap.Info{ @@ -277,7 +318,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - // TODO: Log the real error. + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } @@ -289,24 +330,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) return } - if s.id%2 != 1 || s.id <= t.maxStreamID { + if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) return true } - t.maxStreamID = s.id + t.maxStreamID = streamID s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s + s.localSendQuota = newQuotaPool(defaultLocalSendQuota) + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { @@ -320,6 +362,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.stats.HandleRPC(s.ctx, inHeader) } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } handle(s) return } @@ -328,40 +379,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - atomic.StoreUint32(&t.activity, 1) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -378,7 +397,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } @@ -401,7 +420,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -421,6 +440,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { return s, true } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -430,42 +466,78 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) + +} + func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could be an empty frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + errorf("transport: http2Server %v", err) + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -517,17 +589,38 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { ss = append(ss, s) return nil }) - // The settings will be applied once the ack is sent. t.controlBuf.put(&settings{ack: true, ss: ss}) } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + + } +} + const ( maxPingStrikes = 2 defaultPingTimeout = 2 * time.Hour ) func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -550,7 +643,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should - // have come after atleast defaultPingTimeout. + // have come after at least defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } @@ -563,7 +656,8 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")}) + errorf("transport: Got to too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -579,47 +673,16 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - defer func() { - if err == nil { - // Reset ping strikes when seding headers since that might cause the - // peer to send ping. - atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - } - return nil -} - // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + s.mu.Lock() if s.headerOk || s.state == streamDone { s.mu.Unlock() @@ -635,14 +698,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } md = s.header s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, vv := range md { if isReservedHeader(k) { @@ -650,20 +712,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) if t.stats != nil { outHeader := &stats.OutHeader{ - WireLength: bufLen, + //WireLength: // TODO(mmukhi): Revisit this later, if needed. } t.stats.HandleRPC(s.Context(), outHeader) } - t.writableChan <- 0 return nil } @@ -672,6 +734,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var headersSent, hasHeader bool s.mu.Lock() if s.state == streamDone { @@ -691,20 +759,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headersSent = true } - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(st.Code())), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) @@ -713,7 +776,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { panic(err) } - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. @@ -723,29 +786,32 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) if t.stats != nil { - outTrailer := &stats.OutTrailer{ - WireLength: bufLen, - } - t.stats.HandleRPC(s.Context(), outTrailer) + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } t.closeStream(s) - t.writableChan <- 0 return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { - // TODO(zhaoq): Support multi-writers for a single stream. +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var writeHeaderFrame bool s.mu.Lock() if s.state == streamDone { @@ -759,107 +825,81 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { if writeHeaderFrame { t.WriteHeader(s, nil) } - defer func() { - if err == nil { + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for _, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok we make this negative. // Reset ping strikes when sending data since this might cause // the peer to send ping. atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok { - // Return the connection quota back. + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { + s.localSendQuota.add(ps) + }}) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(ps) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - } - } + return nil } // keepalive running in a separate goroutine does the following: // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} @@ -868,7 +908,7 @@ func (t *http2Server) keepalive() { maxAge := time.NewTimer(t.kp.MaxConnectionAge) keepalive := time.NewTimer(t.kp.Time) // NOTE: All exit paths of this function should reset their - // respecitve timers. A failure to do so will cause the + // respective timers. A failure to do so will cause the // following clean-up to deadlock and eventually leak. defer func() { if !maxIdle.Stop() { @@ -892,23 +932,18 @@ func (t *http2Server) keepalive() { continue } val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) // Reseting the timer so that the clean-up doesn't deadlock. maxIdle.Reset(infinity) return } - t.mu.Unlock() maxIdle.Reset(val) case <-maxAge.C: - t.mu.Lock() - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) maxAge.Reset(t.kp.MaxConnectionAgeGrace) select { case <-maxAge.C: @@ -916,7 +951,7 @@ func (t *http2Server) keepalive() { t.Close() // Reseting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.shutdownChan: + case <-t.ctx.Done(): } return case <-keepalive.C: @@ -934,69 +969,137 @@ func (t *http2Server) keepalive() { pingSent = true t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Server) itemHandler(i item) error { + switch i := i.(type) { + case *dataFrame: + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err + } + i.f() + return nil + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil + case *windowUpdate: + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + return t.framer.fr.WriteSettingsAck() + } + return t.framer.fr.WriteSettings(i.ss...) + case *resetStream: + return t.framer.fr.WriteRSTStream(i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return fmt.Errorf("transport: Connection closing") + } + sid := t.maxStreamID + if !i.headsUp { + // Stop accepting more streams now. + t.state = draining + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { + return err + } + if i.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return fmt.Errorf("transport: Connection closing") + } + return nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return - } - sid := t.maxStreamID - t.state = draining - t.mu.Unlock() - t.framer.writeGoAway(true, sid, i.code, i.debugData) - if i.code == http2.ErrCodeEnhanceYourCalm { - t.Close() - } - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): return } - case <-t.shutdownChan: - return + t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) + }() + return nil + case *flushIO: + return t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) } + return t.framer.fr.WritePing(i.ack, i.data) + default: + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { +func (t *http2Server) Close() error { t.mu.Lock() if t.state == closing { t.mu.Unlock() @@ -1006,8 +1109,8 @@ func (t *http2Server) Close() (err error) { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() + t.cancel() + err := t.conn.Close() // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1016,7 +1119,7 @@ func (t *http2Server) Close() (err error) { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return + return err } // closeStream clears the footprint of a stream when the stream is not needed @@ -1036,11 +1139,6 @@ func (t *http2Server) closeStream(s *Stream) { // called to interrupt the potential blocking on other goroutines. s.cancel() s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -1054,7 +1152,17 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{code: http2.ErrCodeNo}) + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) } var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 795d5d18a4f..39f878cfd5b 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,9 +25,9 @@ import ( "fmt" "io" "net" + "net/http" "strconv" "strings" - "sync/atomic" "time" "github.com/golang/protobuf/proto" @@ -50,7 +35,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -60,7 +44,8 @@ const ( // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 ) var ( @@ -88,6 +73,24 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } ) // Records the states during HPACK decoding. Must be reset once the @@ -100,14 +103,17 @@ type decodeState struct { statusGen *status.Status // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not // intended for direct access outside of parsing. - rawStatusCode int32 + rawStatusCode *int rawStatusMsg string + httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string][]string + mdata map[string][]string + statsTags []byte + statsTrace []byte } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -159,7 +165,7 @@ func validContentType(t string) bool { func (d *decodeState) status() *status.Status { if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.statusGen = status.New(codes.Code(d.rawStatusCode), d.rawStatusMsg) + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) } return d.statusGen } @@ -193,6 +199,51 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": @@ -206,7 +257,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { if err != nil { return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.rawStatusCode = int32(code) + d.rawStatusCode = &code case "grpc-message": d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": @@ -227,18 +278,36 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { } case ":path": d.method = f.Value - default: - if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return nil - } - d.mdata[f.Name] = append(d.mdata[f.Name], v) + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, string(v)) } return nil } @@ -406,10 +475,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn) *framer { +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), } f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -418,132 +487,3 @@ func newFramer(conn net.Conn) *framer { f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} - -func (f *framer) errorDetail() error { - return f.fr.ErrorDetail() -} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 00000000000..ac8e358c5c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 87dc27e5bba..ce5cb74d2ee 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -1,48 +1,32 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). package transport import ( - "bytes" + stdctx "context" "fmt" "io" "net" "sync" + "time" "golang.org/x/net/context" "golang.org/x/net/http2" @@ -65,57 +49,56 @@ type recvMsg struct { err error } -func (*recvMsg) item() {} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - item() -} - -// recvBuffer is an unbounded channel of item. +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { - c chan item + c chan recvMsg mu sync.Mutex - backlog []item + backlog []recvMsg } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ - c: make(chan item, 1), + c: make(chan recvMsg, 1), } return b } -func (b *recvBuffer) put(r item) { +func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) + b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} b.backlog = b.backlog[1:] default: } } + b.mu.Unlock() } -// get returns the channel that receives an item in the buffer. +// get returns the channel that receives a recvMsg in the buffer. // -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { return b.c } @@ -125,7 +108,7 @@ type recvBufferReader struct { ctx context.Context goAway chan struct{} recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. + last []byte // Stores the remaining data in the previous calls. err error } @@ -136,27 +119,87 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { // Read remaining data left in last call. - return r.last.Read(p) + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil } select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) case <-r.goAway: return 0, ErrStreamDrain - case i := <-r.recv.get(): + case m := <-r.recv.get(): r.recv.load() - m := i.(*recvMsg) if m.err != nil { return 0, m.err } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil } } +// All items in an out of a controlBuffer should be the same type. +type item interface { + item() +} + +// controlBuffer is an unbounded channel of item. +type controlBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newControlBuffer() *controlBuffer { + b := &controlBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *controlBuffer) put(r item) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *controlBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *controlBuffer) get() <-chan item { + return b.c +} + type streamState uint8 const ( @@ -171,11 +214,6 @@ type Stream struct { id uint32 // nil for client side Stream. st ServerTransport - // clientStatsCtx keeps the user context for stats handling. - // It's only valid on client side. Server side stats context is same as s.ctx. - // All client side stats collection should use the clientStatsCtx (instead of the stream context) - // so that all the generated stats for a particular RPC can be associated in the processing phase. - clientStatsCtx context.Context // ctx is the associated context of the stream. ctx context.Context // cancel is always nil for client side Stream. @@ -189,16 +227,20 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - dec io.Reader + trReader io.Reader fc *inFlow recvQuota uint32 + + // TODO: Remote this unused variable. // The accumulated inbound quota pending for window update. updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - sendQuotaPool *quotaPool + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) + + sendQuotaPool *quotaPool + localSendQuota *quotaPool // Close headerChan to indicate the end of reception of header metadata. headerChan chan struct{} // header caches the received header metadata. @@ -251,16 +293,24 @@ func (s *Stream) GoAway() <-chan struct{} { // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. +// header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { + var err error select { case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + err = ContextErr(s.ctx.Err()) case <-s.goAway: - return nil, ErrStreamDrain + err = ErrStreamDrain case <-s.headerChan: return s.header.Copy(), nil } + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + return s.header.Copy(), nil + default: + } + return nil, err } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -268,8 +318,9 @@ func (s *Stream) Header() (metadata.MD, error) { // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() + c := s.trailer.Copy() + s.mu.RUnlock() + return c } // ServerTransport returns the underlying ServerTransport for the stream. @@ -297,14 +348,16 @@ func (s *Stream) Status() *status.Status { // Server side only. func (s *Stream) SetHeader(md metadata.MD) error { s.mu.Lock() - defer s.mu.Unlock() if s.headerOk || s.state == streamDone { + s.mu.Unlock() return ErrIllegalHeaderWrite } if md.Len() == 0 { + s.mu.Unlock() return nil } s.header = metadata.Join(s.header, md) + s.mu.Unlock() return nil } @@ -315,25 +368,44 @@ func (s *Stream) SetTrailer(md metadata.MD) error { return nil } s.mu.Lock() - defer s.mu.Unlock() s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() return nil } func (s *Stream) write(m recvMsg) { - s.buf.put(&m) + s.buf.put(m) } -// Read reads all the data available for this Stream from the transport and +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) if err != nil { + t.er = err return } - s.windowHandler(n) + t.windowHandler(n) return } @@ -348,15 +420,17 @@ func (s *Stream) finish(st *status.Status) { // BytesSent indicates whether any bytes have been sent on this stream. func (s *Stream) BytesSent() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesSent + bs := s.bytesSent + s.mu.Unlock() + return bs } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesReceived + br := s.bytesReceived + s.mu.Unlock() + return br } // GoString is implemented by Stream so context.String() won't @@ -385,19 +459,22 @@ type transportState int const ( reachable transportState = iota - unreachable closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -425,6 +502,14 @@ type ConnectOptions struct { KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int } // TargetInfo contains the information of the target such as network address and metadata. @@ -435,8 +520,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) } // Options provides additional hints and information for message @@ -448,7 +533,7 @@ type Options struct { // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. + // transport implementation may ignore the hint. Delay bool } @@ -468,10 +553,15 @@ type CallHdr struct { // outbound message. SendCompress string + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision + // only a hint. + // If it's true, the transport may modify the flush decision // for performance purposes. + // If it's false, new stream will never be flushed. Flush bool } @@ -489,7 +579,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -507,7 +597,7 @@ type ClientTransport interface { // once the transport is initiated. Error() <-chan struct{} - // GoAway returns a channel that is closed when ClientTranspor + // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} @@ -531,7 +621,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -613,45 +703,33 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { +// wait blocks until it can receive from one of the provided contexts or channels +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-done: - // User cancellation has precedence. - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - default: - } return 0, io.EOF case <-goAway: return 0, ErrStreamDrain - case <-closing: + case <-tctx.Done(): return 0, ErrConnClosing case i := <-proceed: return i, nil } } +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, stdctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -661,6 +739,39 @@ const ( // NoReason is the default value when GoAway frame is received. NoReason GoAwayReason = 1 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was recieved and that the debug data said "too_many_pings". + // was received and that the debug data said "too_many_pings". TooManyPings GoAwayReason = 2 ) + +// loopyWriter is run in a separate go routine. It is the single code path that will +// write data on wire. +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + } + hasData: + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + default: + if err := handler(&flushIO{}); err != nil { + return + } + break hasData + } + } + } +} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 00000000000..d006a426347 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/golang/protobuf/protoc-gen-go \ + golang.org/x/tools/cmd/stringer + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... From bbdac135dd187b757622a27627225d5644161232 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 13:34:22 -0800 Subject: [PATCH 641/794] Version bump to grpc-gateway v1.3.0 --- Godeps/Godeps.json | 12 +- .../grpc-ecosystem/grpc-gateway/runtime/BUILD | 3 +- .../grpc-gateway/runtime/context.go | 64 +++++-- .../grpc-gateway/runtime/errors.go | 20 ++- .../grpc-gateway/runtime/handler.go | 35 ++-- .../runtime/internal/stream_chunk.pb.go | 33 +++- .../grpc-gateway/runtime/mux.go | 144 ++++++++++++++- .../grpc-gateway/runtime/pattern.go | 2 +- .../grpc-gateway/runtime/proto_errors.go | 61 +++++++ .../grpc-gateway/runtime/query.go | 165 ++++++++++++++++-- 10 files changed, 475 insertions(+), 64 deletions(-) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 675d49d51b5..fa46a593410 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1871,18 +1871,18 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/hashicorp/golang-lru", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD index 77e7c662829..4c47e798460 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD @@ -15,6 +15,7 @@ go_library( "mux.go", "pattern.go", "proto2_convert.go", + "proto_errors.go", "query.go", ], importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -25,10 +26,10 @@ go_library( "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal:go_default_library", "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", "//vendor/google.golang.org/grpc/codes:go_default_library", "//vendor/google.golang.org/grpc/grpclog:go_default_library", "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", ], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go index f248c738b23..6e0eb27e285 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -9,18 +9,23 @@ import ( "time" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) -// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to -// gRPC metadata for incoming requests processed by grpc-gateway +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields. +const MetadataPrefix = "grpcgateway-" + // MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to // HTTP headers in a response handled by grpc-gateway const MetadataTrailerPrefix = "Grpc-Trailer-" + const metadataGrpcTimeout = "Grpc-Timeout" const xForwardedFor = "X-Forwarded-For" @@ -39,25 +44,25 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) { +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { var err error timeout, err = timeoutDecode(tm) if err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) } } for key, vals := range req.Header { for _, val := range vals { - if key == "Authorization" { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if strings.ToLower(key) == "authorization" { pairs = append(pairs, "authorization", val) - continue } - if strings.HasPrefix(key, MetadataHeaderPrefix) { - pairs = append(pairs, key[len(MetadataHeaderPrefix):], val) + if h, ok := mux.incomingHeaderMatcher(key); ok { + pairs = append(pairs, h, val) } } } @@ -85,7 +90,11 @@ func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, e if len(pairs) == 0 { return ctx, nil } - return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil + md := metadata.Pairs(pairs...) + if mux.metadataAnnotator != nil { + md = metadata.Join(md, mux.metadataAnnotator(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil } // ServerMetadata consists of metadata sent from gRPC server. @@ -141,3 +150,38 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { } return } + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go index 0d3cb3bf3ca..8eebdcf49f4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -6,9 +6,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. @@ -64,7 +64,7 @@ var ( type errorBody struct { Error string `protobuf:"bytes,1,name=error" json:"error"` - Code int `protobuf:"bytes,2,name=code" json:"code"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` } //Make this also conform to proto.Message for builtin JSONPb Marshaler @@ -78,14 +78,20 @@ func (*errorBody) ProtoMessage() {} // // The response body returned by this function is a JSON object, // which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { const fallback = `{"error": "failed to marshal error message"}` w.Header().Del("Trailer") w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + body := &errorBody{ - Error: grpc.ErrorDesc(err), - Code: int(grpc.Code(err)), + Error: s.Message(), + Code: int32(s.Code()), } buf, merr := marshaler.Marshal(body) @@ -103,9 +109,9 @@ func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseW grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(grpc.Code(err)) + st := HTTPStatusFromCode(s.Code()) w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go index d7040851ae9..ae6a5d551cf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -9,12 +9,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime/internal" "golang.org/x/net/context" - "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) if !ok { grpclog.Printf("Flush not supported in %T", w) @@ -28,7 +29,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp http.Error(w, "unexpected error", http.StatusInternalServerError) return } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Content-Type", marshaler.ContentType()) @@ -57,7 +58,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp grpclog.Printf("Failed to marshal response chunk: %v", err) return } - if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { + if _, err = w.Write(buf); err != nil { grpclog.Printf("Failed to send response chunk: %v", err) return } @@ -65,11 +66,12 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp } } -func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) { +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { for k, vs := range md.HeaderMD { - hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k) - for i := range vs { - w.Header().Add(hKey, vs[i]) + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } } } } @@ -84,31 +86,31 @@ func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { for k, vs := range md.TrailerMD { tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for i := range vs { - w.Header().Add(tKey, vs[i]) + for _, v := range vs { + w.Header().Add(tKey, v) } } } // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } buf, err := marshaler.Marshal(resp) if err != nil { grpclog.Printf("Marshal error: %v", err) - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } @@ -146,7 +148,10 @@ func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter func streamChunk(result proto.Message, err error) map[string]proto.Message { if err != nil { - grpcCode := grpc.Code(err) + grpcCode := codes.Unknown + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + } httpCode := HTTPStatusFromCode(grpcCode) return map[string]proto.Message{ "error": &internal.StreamError{ diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go index 6f837cfd5d9..44550f393b4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: runtime/internal/stream_chunk.proto -// DO NOT EDIT! /* Package internal is a generated protocol buffer package. @@ -42,6 +41,34 @@ func (m *StreamError) String() string { return proto.CompactTextStrin func (*StreamError) ProtoMessage() {} func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + func init() { proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") } @@ -50,7 +77,7 @@ func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDesc var fileDescriptor0 = []byte{ // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, 0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1, 0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c, 0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 2e6c5621302..205bc430921 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -1,12 +1,16 @@ package runtime import ( + "fmt" "net/http" + "net/textproto" "strings" - "golang.org/x/net/context" - "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // A HandlerFunc handles a specific pair of path pattern and HTTP method. @@ -19,6 +23,10 @@ type ServeMux struct { handlers map[string][]handler forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotator func(context.Context, *http.Request) metadata.MD + protoErrorHandler ProtoErrorHandlerFunc } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -36,6 +44,64 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. } } +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotator = annotator + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ @@ -47,6 +113,29 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { for _, opt := range opts { opt(serveMux) } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + return serveMux } @@ -57,9 +146,17 @@ func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + path := r.URL.Path if !strings.HasPrefix(path, "/") { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } return } @@ -67,7 +164,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { l := len(components) var verb string if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } return } else if idx > 0 { c := components[l-1] @@ -77,7 +180,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) { r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } } @@ -104,17 +213,36 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // X-HTTP-Method-Override is optional. Always allow fallback to POST. if isPathLengthFallback(r) { if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } h.h(w, r, pathParams) return } - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } return } } - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } } // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go index 3947dbea023..8a9ec2cdae4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -21,7 +21,7 @@ type op struct { operand int } -// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. type Pattern struct { // ops is a list of operations ops []op diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 00000000000..b1b089273b6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,61 @@ +package runtime + +import ( + "io" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + w.Header().Del("Trailer") + w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Printf("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Printf("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go index 56a919a52f1..c00e0b914e2 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -4,7 +4,9 @@ import ( "fmt" "net/url" "reflect" + "strconv" "strings" + "time" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/utilities" @@ -38,31 +40,39 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] if m.Kind() != reflect.Ptr { return fmt.Errorf("unexpected type %T: %v", msg, msg) } + var props *proto.Properties m = m.Elem() for i, fieldName := range fieldPath { isLast := i == len(fieldPath)-1 if !isLast && m.Kind() != reflect.Struct { return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) } - f := fieldByProtoName(m, fieldName) - if !f.IsValid() { + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) return nil } switch f.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } m = f case reflect.Slice: // TODO(yugui) Support []byte if !isLast { return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) } - return populateRepeatedField(f, values) + return populateRepeatedField(f, values, props) case reflect.Ptr: if f.IsNil() { m = reflect.New(f.Type().Elem()) - f.Set(m) + f.Set(m.Convert(f.Type())) } m = f.Elem() continue @@ -80,39 +90,127 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] default: grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) } - return populateField(m, values[0]) + return populateField(m, values[0], props) } // fieldByProtoName looks up a field whose corresponding protobuf field name is "name". // "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) reflect.Value { +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + for _, p := range props.Prop { if p.OrigName == name { - return m.FieldByName(p.Name) + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil } } - return reflect.Value{} + return reflect.Value{}, nil, nil } -func populateRepeatedField(f reflect.Value, values []string) error { +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + conv, ok := convFromType[elemType.Kind()] if !ok { return fmt.Errorf("unsupported field type %s", elemType) } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values))) + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) for i, v := range values { result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) if err := result[1].Interface(); err != nil { return err.(error) } - f.Index(i).Set(result[0]) + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) } return nil } -func populateField(f reflect.Value, value string) error { +func populateField(f reflect.Value, value string, props *proto.Properties) error { + // Handle well known type + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := f.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "Timestamp": + if value == "null" { + f.Field(0).SetInt(0) + f.Field(1).SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.Field(0).SetInt(int64(t.Unix())) + f.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.Field(0).SetBool(true) + } else if value == "false" { + f.Field(0).SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.Field(0).SetString(value) + return nil + } + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + conv, ok := convFromType[f.Kind()] if !ok { return fmt.Errorf("unsupported field type %T", f) @@ -121,7 +219,48 @@ func populateField(f reflect.Value, value string) error { if err := result[1].Interface(); err != nil { return err.(error) } - f.Set(result[0]) + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } return nil } From 71c5f8ab12edd7209deb3ea951a2de9fb9c156bd Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Thu, 14 Dec 2017 23:15:58 -0800 Subject: [PATCH 642/794] Update staging deps for etcd 3.2.13 version bump --- .../Godeps/Godeps.json | 118 ++++---- .../src/k8s.io/apiserver/Godeps/Godeps.json | 260 ++++++++++++------ .../k8s.io/kube-aggregator/Godeps/Godeps.json | 118 ++++---- .../sample-apiserver/Godeps/Godeps.json | 118 ++++---- 4 files changed, 357 insertions(+), 257 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 08607c1ddcf..2aaf0acc53b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -32,60 +32,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -167,11 +167,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -186,10 +186,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -222,22 +218,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -442,65 +422,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 49ef5aaa071..a8011ef1d06 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -27,232 +27,304 @@ "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" }, { - "ImportPath": "github.com/boltdb/bolt", - "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" + "ImportPath": "github.com/cockroachdb/cmux", + "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" + }, + { + "ImportPath": "github.com/coreos/bbolt", + "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/namespace", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/naming", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/embed", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/error", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/cors", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/debugutil", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/store", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/version", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/go-oidc/http", @@ -306,6 +378,10 @@ "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, + { + "ImportPath": "github.com/dgrijalva/jwt-go", + "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" + }, { "ImportPath": "github.com/elazarl/go-bindata-assetfs", "Rev": "3dcc96556217539f50599357fb481ac0dc7439b9" @@ -358,6 +434,10 @@ "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, + { + "ImportPath": "github.com/golang/groupcache/lru", + "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" + }, { "ImportPath": "github.com/golang/protobuf/jsonpb", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -366,6 +446,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -448,15 +532,15 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/hashicorp/golang-lru", @@ -486,10 +570,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/karlseguin/ccache", - "Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c" - }, { "ImportPath": "github.com/mailru/easyjson/buffer", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" @@ -690,65 +770,85 @@ "ImportPath": "golang.org/x/time/rate", "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index ec78f699e49..bac5d82c33e 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -147,11 +147,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -166,10 +166,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -202,22 +198,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -418,65 +398,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 3281bf2fb31..27ea0ca6a47 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -28,60 +28,60 @@ }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/version", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/go-semver/semver", + "Rev": "568e959cd89871e61434c1143528d9162da89ef2" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -139,11 +139,11 @@ "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, { - "ImportPath": "github.com/golang/protobuf/jsonpb", + "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { - "ImportPath": "github.com/golang/protobuf/proto", + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { @@ -158,10 +158,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/duration", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/golang/protobuf/ptypes/struct", - "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" - }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -194,22 +190,6 @@ "ImportPath": "github.com/gregjones/httpcache/diskcache", "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, - { - "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", - "Rev": "2500245aa6110c562d17020fb31a2c133d737799" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -402,65 +382,85 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" + }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/grpc", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/inf.v0", From 6a0c69e971a8341149cf2ad45ce0c2d6cc55aa30 Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Wed, 13 Dec 2017 15:12:11 -0800 Subject: [PATCH 643/794] Fix build and test errors from etcd 3.2.13 upgrade --- staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD | 1 + .../src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | 6 ++++-- test/e2e_node/services/etcd.go | 1 + test/integration/scale/BUILD | 1 + test/integration/scale/scale_test.go | 1 + 5 files changed, 8 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD index ac48442e50d..5d495404c6d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD @@ -13,6 +13,7 @@ go_library( "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 6ab310b601e..96d21b5812e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -35,6 +35,7 @@ import ( etcd "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" @@ -154,6 +155,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } + m.AuthToken = "simple" } else { cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -189,9 +191,9 @@ func (m *EtcdTestServer) launch(t *testing.T) error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ Listener: ln, diff --git a/test/e2e_node/services/etcd.go b/test/e2e_node/services/etcd.go index 9176ff7a0d7..8c40dc7797e 100644 --- a/test/e2e_node/services/etcd.go +++ b/test/e2e_node/services/etcd.go @@ -77,6 +77,7 @@ func NewEtcd(dataDir string) *EtcdServer { MaxWALFiles: maxWALFiles, TickMs: tickMs, ElectionTicks: electionTicks, + AuthToken: "simple", } return &EtcdServer{ diff --git a/test/integration/scale/BUILD b/test/integration/scale/BUILD index 120e556ce4b..5fd7eb7c093 100644 --- a/test/integration/scale/BUILD +++ b/test/integration/scale/BUILD @@ -14,6 +14,7 @@ go_test( deps = [ "//cmd/kube-apiserver/app/testing:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/scale/scale_test.go b/test/integration/scale/scale_test.go index a40093571c1..fe92420ec1e 100644 --- a/test/integration/scale/scale_test.go +++ b/test/integration/scale/scale_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + _ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init. "github.com/coreos/pkg/capnslog" appsv1beta2 "k8s.io/api/apps/v1beta2" From bac270533e0176739f78777bb59f43647c474137 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Wed, 6 Dec 2017 09:33:45 +0200 Subject: [PATCH 644/794] use danglingerror add getNodeNameByID and use volume.AttachedDevice as devicepath use uppercase functionname do not delete automatically nodes if node is shutdowned in openstack do not delete node fix gofmt fix cinder detach if instance is not in active state fix gofmt --- pkg/cloudprovider/providers/openstack/BUILD | 1 + .../providers/openstack/openstack.go | 29 +++++++++++++++---- .../openstack/openstack_instances.go | 6 ++-- .../openstack/openstack_loadbalancer.go | 2 +- .../providers/openstack/openstack_routes.go | 2 +- .../providers/openstack/openstack_volumes.go | 29 ++++++++++++++++++- 6 files changed, 58 insertions(+), 11 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 42a185e2f4c..7840fd00a6a 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/controller:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index 00e15d228b2..759412a9f01 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -319,6 +319,22 @@ func mapNodeNameToServerName(nodeName types.NodeName) string { return string(nodeName) } +// getNodeNameByID maps instanceid to types.NodeName +func (os *OpenStack) GetNodeNameByID(instanceID string) (types.NodeName, error) { + client, err := os.NewComputeV2() + var nodeName types.NodeName + if err != nil { + return nodeName, err + } + + server, err := servers.Get(client, instanceID).Extract() + if err != nil { + return nodeName, err + } + nodeName = mapServerToNodeName(server) + return nodeName, nil +} + // mapServerToNodeName maps an OpenStack Server to a k8s NodeName func mapServerToNodeName(server *servers.Server) types.NodeName { // Node names are always lowercase, and (at least) @@ -346,11 +362,14 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild return err } -func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) { +func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) { opts := servers.ListOpts{ - Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))), - Status: "ACTIVE", + Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))), } + if showOnlyActive { + opts.Status = "ACTIVE" + } + pager := servers.List(client, opts) serverList := make([]servers.Server, 0, 1) @@ -432,7 +451,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) { } func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) { - srv, err := getServerByName(client, name) + srv, err := getServerByName(client, name, true) if err != nil { return nil, err } @@ -582,7 +601,7 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z return cloudprovider.Zone{}, err } - srv, err := getServerByName(compute, nodeName) + srv, err := getServerByName(compute, nodeName, true) if err != nil { if err == ErrNotFound { return cloudprovider.Zone{}, cloudprovider.InstanceNotFound diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index 3cf1733b322..981ff7b9f89 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -103,7 +103,7 @@ func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddre // ExternalID returns the cloud provider ID of the specified instance (deprecated). func (i *Instances) ExternalID(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { if err == ErrNotFound { return "", cloudprovider.InstanceNotFound @@ -151,7 +151,7 @@ func (os *OpenStack) InstanceID() (string, error) { // InstanceID returns the cloud provider ID of the specified instance. func (i *Instances) InstanceID(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { if err == ErrNotFound { return "", cloudprovider.InstanceNotFound @@ -184,7 +184,7 @@ func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) // InstanceType returns the type of the specified instance. func (i *Instances) InstanceType(name types.NodeName) (string, error) { - srv, err := getServerByName(i.compute, name) + srv, err := getServerByName(i.compute, name, true) if err != nil { return "", err diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 2089c14f4ce..036af670bc7 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -551,7 +551,7 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1 for _, node := range nodes { nodeName := types.NodeName(node.Name) - srv, err := getServerByName(compute, nodeName) + srv, err := getServerByName(compute, nodeName, true) if err != nil { return nodeSecurityGroupIDs.List(), err } diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index c5f0974dadd..c5a8ba6d212 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -288,7 +288,7 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err } func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) { - srv, err := getServerByName(compute, targetNode) + srv, err := getServerByName(compute, targetNode, true) if err != nil { return "", err } diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index eab5b7c9b5d..8a530592845 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "errors" "fmt" "io/ioutil" "path" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" k8s_volume "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" "github.com/gophercloud/gophercloud" volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" @@ -317,8 +319,33 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if instanceID == volume.AttachedServerId { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil + } else { + nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) + attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) + if err != nil { + glog.Error(attachErr) + return "", errors.New(attachErr) + } + // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 + devicePath := volume.AttachedDevice + danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) + glog.V(4).Infof("volume %s is already attached to node %s path %s", volumeID, nodeName, devicePath) + // check special case, if node is deleted from cluster but exist still in openstack + // we need to check can we detach the cinder, node is deleted from cluster if state is not ACTIVE + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + return "", err + } + if srv.Status != "ACTIVE" { + err = os.DetachDisk(volume.AttachedServerId, volumeID) + if err != nil { + glog.Error(err) + return "", err + } + glog.V(4).Infof("detached volume %s node state was %s", volumeID, srv.Status) + } + return "", danglingErr } - return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId) } startTime := time.Now() From 1bfb5d0670bf7b5c4253464291c20bd03d90205f Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 8 Jan 2018 06:01:42 +0000 Subject: [PATCH 645/794] add remount logic if original mount path is invalid --- pkg/volume/azure_dd/attacher.go | 13 +++++++++++++ pkg/volume/azure_dd/azure_mounter.go | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 839c96048ce..aeca4a176b7 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -232,6 +232,19 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str } } + if !notMnt { + // testing original mount point, make sure the mount link is valid + if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil { + // mount link is invalid, now unmount and remount later + glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err) + if err := mounter.Unmount(deviceMountPath); err != nil { + glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err) + return err + } + notMnt = true + } + } + volumeSource, err := getVolumeSource(spec) if err != nil { return err diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go index dfa9907e880..36203435ae2 100644 --- a/pkg/volume/azure_dd/azure_mounter.go +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -86,8 +86,19 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { return err } if !mountPoint { - glog.V(4).Infof("azureDisk - already mounted to target %s", dir) - return nil + // testing original mount point, make sure the mount link is valid + _, err := (&osIOHandler{}).ReadDir(dir) + if err == nil { + glog.V(4).Infof("azureDisk - already mounted to target %s", dir) + return nil + } + // mount link is invalid, now unmount and remount later + glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err) + if err := mounter.Unmount(dir); err != nil { + glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err) + return err + } + mountPoint = true } if runtime.GOOS != "windows" { From 74c9efa148ac6591a1dcf8f95c7fdac35aa603d0 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Mon, 8 Jan 2018 14:44:48 +0530 Subject: [PATCH 646/794] Add CustomResourceValidation example in sample-controller - Mention the schema in the example CRD. - Update README and mention about feature gates. --- staging/src/k8s.io/sample-controller/README.md | 16 ++++++++++++++++ .../artifacts/examples/crd.yaml | 9 +++++++++ 2 files changed, 25 insertions(+) diff --git a/staging/src/k8s.io/sample-controller/README.md b/staging/src/k8s.io/sample-controller/README.md index d2aaf2b34e2..95ba305a7bf 100644 --- a/staging/src/k8s.io/sample-controller/README.md +++ b/staging/src/k8s.io/sample-controller/README.md @@ -73,6 +73,22 @@ type User struct { } ``` +## Validation + +To validate custom resources, use the [`CustomResourceValidation`](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) feature. + +This feature is beta and enabled by default in v1.9. If you are using v1.8, enable the feature using +the `CustomResourceValidation` feature gate on the [kube-apiserver](https://kubernetes.io/docs/admin/kube-apiserver): + +```sh +--feature-gates=CustomResourceValidation=true +``` + +### Example + +The schema in the [example CRD](./artifacts/examples/crd.yaml) applies the following validation on the custom resource: +`spec.replicas` must be an integer and must have a minimum value of 1 and a maximum value of 10. + ## Cleanup You can clean up the created CustomResourceDefinition with: diff --git a/staging/src/k8s.io/sample-controller/artifacts/examples/crd.yaml b/staging/src/k8s.io/sample-controller/artifacts/examples/crd.yaml index 4a457068dcd..36469161c6a 100644 --- a/staging/src/k8s.io/sample-controller/artifacts/examples/crd.yaml +++ b/staging/src/k8s.io/sample-controller/artifacts/examples/crd.yaml @@ -9,3 +9,12 @@ spec: kind: Foo plural: foos scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + properties: + replicas: + type: integer + minimum: 1 + maximum: 10 From ff380d67f469de8d8ad119dc36b490cedfa77d88 Mon Sep 17 00:00:00 2001 From: zouyee Date: Mon, 8 Jan 2018 18:42:01 +0800 Subject: [PATCH 647/794] remove deplicate func --- pkg/controller/deployment/util/deployment_util.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d017e33709e..763b8debdb3 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -124,18 +124,6 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi return nil } -// TODO: remove the duplicate -// GetDeploymentConditionInternal returns the condition with the provided type. -func GetDeploymentConditionInternal(status internalextensions.DeploymentStatus, condType internalextensions.DeploymentConditionType) *internalextensions.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - // SetDeploymentCondition updates the deployment to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) { From 93602cd823fc9dc0da663f3580b8f409db56a3a0 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 11 Dec 2017 12:21:35 +0100 Subject: [PATCH 648/794] Containerized kubelet is no longer experimental --- cmd/kubelet/app/options/options.go | 2 +- cmd/kubelet/app/server.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 4e9e10e9df4..29e3cc1d8e3 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -329,11 +329,11 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") fs.Var(utiltaints.NewTaintsVar(&f.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") + fs.BoolVar(&f.Containerized, "containerized", f.Containerized, "Running kubelet in a container.") // EXPERIMENTAL FLAGS fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.") fs.StringSliceVar(&f.AllowedUnsafeSysctls, "experimental-allowed-unsafe-sysctls", f.AllowedUnsafeSysctls, "Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk.") - fs.BoolVar(&f.Containerized, "containerized", f.Containerized, "Experimental support for running kubelet in a container. Intended for testing.") fs.BoolVar(&f.ExperimentalKernelMemcgNotification, "experimental-kernel-memcg-notification", f.ExperimentalKernelMemcgNotification, "If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.") fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "[Experimental] The endpoint of remote runtime service. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 8100b44afae..c93996ac6ad 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -139,7 +139,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err mounter := mount.New(s.ExperimentalMounterPath) var writer kubeio.Writer = &kubeio.StdWriter{} if s.Containerized { - glog.V(2).Info("Running kubelet in containerized mode (experimental)") + glog.V(2).Info("Running kubelet in containerized mode") mounter = mount.NewNsenterMounter() writer = &kubeio.NsenterWriter{} } From 77c71170ebb780fbe36dc66cf2ec99c6b3ac4a75 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 2 Jan 2018 11:57:06 -0500 Subject: [PATCH 649/794] Add support for cloud-controller-manager in local-up-cluster.sh We need an easy way to test the new external cloud provider. So let's keep the existing CLOUD_PROVIDER and CLOUD_CONFIG as-is and add a flag EXTERNAL_CLOUD_PROVIDER to run a separate process. --- hack/local-up-cluster.sh | 61 ++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 8 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 98a0a6b958e..4c6b855331b 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -65,6 +65,7 @@ KUBECTL=${KUBECTL:-cluster/kubectl.sh} WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-20} ENABLE_DAEMON=${ENABLE_DAEMON:-false} HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"} +EXTERNAL_CLOUD_PROVIDER=${EXTERNAL_CLOUD_PROVIDER:-false} CLOUD_PROVIDER=${CLOUD_PROVIDER:-""} CLOUD_CONFIG=${CLOUD_CONFIG:-""} FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"} @@ -532,9 +533,14 @@ function start_apiserver { kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator + cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then + cloud_config_arg="--cloud-provider=external" + fi APISERVER_LOG=${LOG_DIR}/kube-apiserver.log - ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\ + ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${authorizer_arg} ${priv_arg} ${runtime_config} \ + ${cloud_config_arg} \ ${advertise_address} \ --v=${LOG_LEVEL} \ --vmodule="${LOG_SPEC}" \ @@ -556,8 +562,6 @@ function start_apiserver { --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \ --feature-gates="${FEATURE_GATES}" \ --external-hostname="${EXTERNAL_HOSTNAME}" \ - --cloud-provider="${CLOUD_PROVIDER}" \ - --cloud-config="${CLOUD_CONFIG}" \ --requestheader-username-headers=X-Remote-User \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ @@ -601,6 +605,11 @@ function start_controller_manager { node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 " fi + cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then + cloud_config_arg="--cloud-provider=external" + fi + CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \ --v=${LOG_LEVEL} \ @@ -613,8 +622,7 @@ function start_controller_manager { ${node_cidr_args} \ --pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \ --feature-gates="${FEATURE_GATES}" \ - --cloud-provider="${CLOUD_PROVIDER}" \ - --cloud-config="${CLOUD_CONFIG}" \ + ${cloud_config_arg} \ --kubeconfig "$CERT_DIR"/controller.kubeconfig \ --use-service-account-credentials \ --controllers="${KUBE_CONTROLLERS}" \ @@ -622,6 +630,35 @@ function start_controller_manager { CTLRMGR_PID=$! } +function start_cloud_controller_manager { + if [ -z "${CLOUD_CONFIG}" ]; then + echo "CLOUD_CONFIG cannot be empty!" + exit 1 + fi + if [ ! -f "${CLOUD_CONFIG}" ]; then + echo "Cloud config ${CLOUD_CONFIG} doesn't exist" + exit 1 + fi + + node_cidr_args="" + if [[ "${NET_PLUGIN}" == "kubenet" ]]; then + node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 " + fi + + CLOUD_CTLRMGR_LOG=${LOG_DIR}/cloud-controller-manager.log + ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" alpha cloud-controller-manager \ + --v=${LOG_LEVEL} \ + --vmodule="${LOG_SPEC}" \ + ${node_cidr_args} \ + --feature-gates="${FEATURE_GATES}" \ + --cloud-provider=${CLOUD_PROVIDER} \ + --cloud-config=${CLOUD_CONFIG} \ + --kubeconfig "$CERT_DIR"/controller.kubeconfig \ + --use-service-account-credentials \ + --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CLOUD_CTLRMGR_LOG}" 2>&1 & + CLOUD_CTLRMGR_PID=$! +} + function start_kubelet { KUBELET_LOG=${LOG_DIR}/kubelet.log mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}" @@ -631,6 +668,11 @@ function start_kubelet { priv_arg="--allow-privileged " fi + cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then + cloud_config_arg="--cloud-provider=external" + fi + mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet" if [[ -z "${DOCKERIZE_KUBELET}" ]]; then # Enable dns @@ -687,8 +729,7 @@ function start_kubelet { --rkt-path="${RKT_PATH}" \ --rkt-stage1-image="${RKT_STAGE1_IMAGE}" \ --hostname-override="${HOSTNAME_OVERRIDE}" \ - --cloud-provider="${CLOUD_PROVIDER}" \ - --cloud-config="${CLOUD_CONFIG}" \ + ${cloud_config_arg} \ --address="${KUBELET_HOST}" \ --kubeconfig "$CERT_DIR"/kubelet.kubeconfig \ --feature-gates="${FEATURE_GATES}" \ @@ -753,7 +794,7 @@ function start_kubelet { -i \ --cidfile=$KUBELET_CIDFILE \ gcr.io/google_containers/kubelet \ - /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG & + /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" ${cloud_config_arg} \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG & fi } @@ -850,6 +891,7 @@ if [[ "${START_MODE}" != "kubeletonly" ]]; then Logs: ${APISERVER_LOG:-} ${CTLRMGR_LOG:-} + ${CLOUD_CTLRMGR_LOG:-} ${PROXY_LOG:-} ${SCHEDULER_LOG:-} EOF @@ -936,6 +978,9 @@ if [[ "${START_MODE}" != "kubeletonly" ]]; then set_service_accounts start_apiserver start_controller_manager + if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then + start_cloud_controller_manager + fi start_kubeproxy start_kubedns start_kubedashboard From e3cafd83037a752c99ddb072a3c09c94b9dfdf43 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Thu, 14 Dec 2017 17:16:34 +0200 Subject: [PATCH 650/794] Enable support for etcd3 --- .../reactive/kubernetes_master.py | 37 ++++++++++++++----- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index d27caf59f57..a4211913002 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -24,7 +24,7 @@ import string import json import ipaddress -import charms.leadership +from charms.leadership import leader_get, leader_set from shutil import move @@ -112,6 +112,7 @@ def check_for_upgrade_needed(): # we take no risk and forcibly upgrade the snaps. # Forcibly means we do not prompt the user to call the upgrade action. set_upgrade_needed(forced=True) + upgrade_for_etcd() def snap_resources_changed(): @@ -136,6 +137,13 @@ def snap_resources_changed(): any_file_changed(paths) return 'unknown' +def upgrade_for_etcd(): + # we are upgrading the charm. + # If this is an old deployment etcd_version is not set + # so if we are the leader we need to set it to v2 + if not leader_get('etcd_version') and is_state('leadership.is_leader'): + leader_set(etcd_version='etcd2') + def add_rbac_roles(): '''Update the known_tokens file with proper groups.''' @@ -316,7 +324,7 @@ def setup_leader_authentication(): # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} - charms.leadership.leader_set(leader_data) + leader_set(leader_data) remove_state('kubernetes-master.components.started') set_state('authentication.setup') @@ -364,7 +372,7 @@ def get_keys_from_leader(keys, overwrite_local=False): # If the path does not exist, assume we need it if not os.path.exists(k) or overwrite_local: # Fetch data from leadership broadcast - contents = charms.leadership.leader_get(k) + contents = leader_get(k) # Default to logging the warning and wait for leader data to be set if contents is None: msg = "Waiting on leaders crypto keys." @@ -423,6 +431,7 @@ def master_services_down(): @when('etcd.available', 'tls_client.server.certificate.saved', 'authentication.setup') +@when('leadership.set.etcd_version') @when_not('kubernetes-master.components.started') def start_master(etcd): '''Run the Kubernetes master components.''' @@ -440,7 +449,8 @@ def start_master(etcd): handle_etcd_relation(etcd) # Add CLI options to all components - configure_apiserver(etcd) + leader_etcd_version = leader_get('etcd_version') + configure_apiserver(etcd.get_connection_string(), leader_etcd_version) configure_controller_manager() configure_scheduler() set_state('kubernetes-master.components.started') @@ -462,6 +472,14 @@ def etcd_data_change(etcd): if data_changed('etcd-connect', connection_string): remove_state('kubernetes-master.components.started') + # We are the leader and the etcd_version is not set meaning + # this is the first time we connect to etcd. + if is_state('leadership.is_leader') and not leader_get('etcd_version'): + if etcd.get_version().startswith('3.'): + leader_set(etcd_version='etcd3') + else: + leader_set(etcd_version='etcd2') + @when('kube-control.connected') @when('cdk-addons.configured') @@ -816,9 +834,11 @@ def on_config_allow_privileged_change(): @when('config.changed.api-extra-args') @when('kubernetes-master.components.started') +@when('leadership.set.etcd_version') @when('etcd.available') def on_config_api_extra_args_change(etcd): - configure_apiserver(etcd) + configure_apiserver(etcd.get_connection_string(), + leader_get('etcd_version')) @when('config.changed.controller-manager-extra-args') @@ -1045,7 +1065,7 @@ def configure_kubernetes_service(service, base_args, extra_args_key): db.set(prev_args_key, args) -def configure_apiserver(etcd): +def configure_apiserver(etcd_connection_string, leader_etcd_version): api_opts = {} # Get the tls paths from the layer data. @@ -1075,8 +1095,7 @@ def configure_apiserver(etcd): api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' - api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support - + api_opts['storage-backend'] = leader_etcd_version api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' @@ -1089,7 +1108,7 @@ def configure_apiserver(etcd): api_opts['etcd-cafile'] = etcd_ca api_opts['etcd-keyfile'] = etcd_key api_opts['etcd-certfile'] = etcd_cert - api_opts['etcd-servers'] = etcd.get_connection_string() + api_opts['etcd-servers'] = etcd_connection_string admission_control = [ 'Initializers', From dca74f17fd4e1088145a9baa11e4674f0ca61b3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Mon, 8 Jan 2018 14:54:26 +0100 Subject: [PATCH 651/794] Bump fluentd-gcp image used to 2.0.13 --- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index fa039413d39..ac9fdcd0053 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-gcp-v2.0.12 + name: fluentd-gcp-v2.0.13 namespace: kube-system labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v2.0.12 + version: v2.0.13 spec: updateStrategy: type: RollingUpdate @@ -16,7 +16,7 @@ spec: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" - version: v2.0.12 + version: v2.0.13 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.12 + image: gcr.io/google-containers/fluentd-gcp:2.0.13 env: - name: FLUENTD_ARGS value: --no-supervisor -q From 9d7b74658dcd658fb070f009ad153423c20f11ef Mon Sep 17 00:00:00 2001 From: Sandeep Rajan Date: Wed, 3 Jan 2018 14:45:17 +0530 Subject: [PATCH 652/794] include kube-dns deployment check ignore 404 error --- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index f18c2e23f79..a7e98be66ac 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -21,6 +21,7 @@ import ( "os" "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" @@ -122,9 +123,13 @@ func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfigurati return err } if coreDNSDeployment.Status.ReadyReplicas == 0 { - return fmt.Errorf("the CodeDNS deployment isn't ready yet") + return fmt.Errorf("the CoreDNS deployment isn't ready yet") } - return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS) + err = apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil }, 10) } return nil From d4e17cb7b4b3a655d11d9a1739189548b47628a1 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Fri, 1 Dec 2017 11:26:11 -0500 Subject: [PATCH 653/794] Allow oadm drain to continue w ds-managed pods w local storage --- pkg/kubectl/cmd/drain.go | 9 ++++- pkg/kubectl/cmd/drain_test.go | 76 ++++++++++++++++++++++++++++++----- 2 files changed, 74 insertions(+), 11 deletions(-) diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 910654f6f6a..5640c734623 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -466,7 +466,7 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev for _, pod := range podList.Items { podOk := true - for _, filt := range []podFilter{mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter, o.daemonsetFilter} { + for _, filt := range []podFilter{o.daemonsetFilter, mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter} { filterOk, w, f := filt(pod) podOk = podOk && filterOk @@ -476,6 +476,13 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev if f != nil { fs[f.string] = append(fs[f.string], pod.Name) } + + // short-circuit as soon as pod not ok + // at that point, there is no reason to run pod + // through any additional filters + if !podOk { + break + } } if podOk { pods = append(pods, pod) diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 5200e95cd89..f9996314406 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -304,6 +304,34 @@ func TestDrain(t *testing.T) { }, } + ds_pod_with_emptyDir := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "extensions/v1beta1", + Kind: "DaemonSet", + Name: "ds", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + } + orphaned_ds_pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -414,15 +442,16 @@ func TestDrain(t *testing.T) { } tests := []struct { - description string - node *corev1.Node - expected *corev1.Node - pods []corev1.Pod - rcs []api.ReplicationController - replicaSets []extensions.ReplicaSet - args []string - expectFatal bool - expectDelete bool + description string + node *corev1.Node + expected *corev1.Node + pods []corev1.Pod + rcs []api.ReplicationController + replicaSets []extensions.ReplicaSet + args []string + expectWarning string + expectFatal bool + expectDelete bool }{ { description: "RC-managed pod", @@ -474,6 +503,17 @@ func TestDrain(t *testing.T) { expectFatal: false, expectDelete: false, }, + { + description: "DS-managed pod with emptyDir with --ignore-daemonsets", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{ds_pod_with_emptyDir}, + rcs: []api.ReplicationController{rc}, + args: []string{"node", "--ignore-daemonsets"}, + expectWarning: "WARNING: Ignoring DaemonSet-managed pods: bar\n", + expectFatal: false, + expectDelete: false, + }, { description: "Job-managed pod", node: node, @@ -661,6 +701,7 @@ func TestDrain(t *testing.T) { cmd := NewCmdDrain(f, buf, errBuf) saw_fatal := false + fatal_msg := "" func() { defer func() { // Recover from the panic below. @@ -668,7 +709,7 @@ func TestDrain(t *testing.T) { // Restore cmdutil behavior cmdutil.DefaultBehaviorOnFatal() }() - cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; panic(e) }) + cmdutil.BehaviorOnFatal(func(e string, code int) { saw_fatal = true; fatal_msg = e; panic(e) }) cmd.SetArgs(test.args) cmd.Execute() }() @@ -676,6 +717,11 @@ func TestDrain(t *testing.T) { if !saw_fatal { t.Fatalf("%s: unexpected non-error when using %s", test.description, currMethod) } + } else { + if saw_fatal { + t.Fatalf("%s: unexpected error when using %s: %s", test.description, currMethod, fatal_msg) + + } } if test.expectDelete { @@ -693,6 +739,16 @@ func TestDrain(t *testing.T) { t.Fatalf("%s: unexpected delete when using %s", test.description, currMethod) } } + + if len(test.expectWarning) > 0 { + if len(errBuf.String()) == 0 { + t.Fatalf("%s: expected warning, but found no stderr output", test.description) + } + + if errBuf.String() != test.expectWarning { + t.Fatalf("%s: actual warning message did not match expected warning message.\n Expecting: %s\n Got: %s", test.description, test.expectWarning, errBuf.String()) + } + } } } } From ea085e0a32a6b723e5c565e60d8941b5a760bb68 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Fri, 15 Dec 2017 15:02:31 -0800 Subject: [PATCH 654/794] client-go: remove import of github.com/gregjones/httpcache --- pkg/kubectl/cmd/util/BUILD | 1 + pkg/kubectl/cmd/util/factory_client_access.go | 12 ++- pkg/kubectl/util/BUILD | 1 + pkg/kubectl/util/transport/BUILD | 34 +++++++ pkg/kubectl/util/transport/round_tripper.go | 51 ++++++++++ .../util/transport/round_tripper_test.go | 95 +++++++++++++++++++ staging/src/k8s.io/client-go/rest/config.go | 5 - .../src/k8s.io/client-go/rest/config_test.go | 1 - .../src/k8s.io/client-go/rest/transport.go | 1 - staging/src/k8s.io/client-go/transport/BUILD | 3 - .../src/k8s.io/client-go/transport/config.go | 4 - .../client-go/transport/round_trippers.go | 31 ------ .../transport/round_trippers_test.go | 61 ------------ 13 files changed, 193 insertions(+), 107 deletions(-) create mode 100644 pkg/kubectl/util/transport/BUILD create mode 100644 pkg/kubectl/util/transport/round_tripper.go create mode 100644 pkg/kubectl/util/transport/round_tripper_test.go diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 282666a1be4..6541d3953e9 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -40,6 +40,7 @@ go_library( "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", + "//pkg/kubectl/util/transport:go_default_library", "//pkg/kubectl/validation:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index d03ebdc165c..391aa33a384 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -23,6 +23,7 @@ import ( "flag" "fmt" "io" + "net/http" "os" "path/filepath" "regexp" @@ -59,6 +60,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/kubectl/util/transport" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) @@ -109,7 +111,15 @@ func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface return nil, err } - cfg.CacheDir = f.cacheDir + if f.cacheDir != "" { + wt := cfg.WrapTransport + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return transport.NewCacheRoundTripper(f.cacheDir, rt) + } + } discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { diff --git a/pkg/kubectl/util/BUILD b/pkg/kubectl/util/BUILD index 96457f550ef..c1c2e305d65 100644 --- a/pkg/kubectl/util/BUILD +++ b/pkg/kubectl/util/BUILD @@ -101,6 +101,7 @@ filegroup( "//pkg/kubectl/util/logs:all-srcs", "//pkg/kubectl/util/slice:all-srcs", "//pkg/kubectl/util/term:all-srcs", + "//pkg/kubectl/util/transport:all-srcs", ], tags = ["automanaged"], visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], diff --git a/pkg/kubectl/util/transport/BUILD b/pkg/kubectl/util/transport/BUILD new file mode 100644 index 00000000000..ffdf4a2607e --- /dev/null +++ b/pkg/kubectl/util/transport/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["round_tripper.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/transport", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gregjones/httpcache:go_default_library", + "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", + "//vendor/github.com/peterbourgon/diskv:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["round_tripper_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/transport", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubectl/util/transport/round_tripper.go b/pkg/kubectl/util/transport/round_tripper.go new file mode 100644 index 00000000000..82e3e502e5e --- /dev/null +++ b/pkg/kubectl/util/transport/round_tripper.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package transport provides a round tripper capable of caching HTTP responses. +package transport + +import ( + "net/http" + "path/filepath" + + "github.com/gregjones/httpcache" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// NewCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func NewCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/pkg/kubectl/util/transport/round_tripper_test.go b/pkg/kubectl/util/transport/round_tripper_test.go new file mode 100644 index 00000000000..e68e8e37223 --- /dev/null +++ b/pkg/kubectl/util/transport/round_tripper_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" + "os" + "testing" +) + +// copied from k8s.io/client-go/transport/round_trippers_test.go +type testRoundTripper struct { + Request *http.Request + Response *http.Response + Err error +} + +func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.Request = req + return rt.Response, rt.Err +} + +func TestCacheRoundTripper(t *testing.T) { + rt := &testRoundTripper{} + cacheDir, err := ioutil.TempDir("", "cache-rt") + defer os.RemoveAll(cacheDir) + if err != nil { + t.Fatal(err) + } + cache := NewCacheRoundTripper(cacheDir, rt) + + // First call, caches the response + req := &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + Header: http.Header{"ETag": []string{`"123456"`}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), + StatusCode: http.StatusOK, + } + resp, err := cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf(`Expected Body to be "Content", got %q`, string(content)) + } + + // Second call, returns cached response + req = &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + StatusCode: http.StatusNotModified, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Other Content"))), + } + + resp, err = cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + + // Read body and make sure we have the initial content + content, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf("Invalid content read from cache %q", string(content)) + } +} diff --git a/staging/src/k8s.io/client-go/rest/config.go b/staging/src/k8s.io/client-go/rest/config.go index 038fee94537..eb006ea101e 100644 --- a/staging/src/k8s.io/client-go/rest/config.go +++ b/staging/src/k8s.io/client-go/rest/config.go @@ -71,10 +71,6 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string - // CacheDir is the directory where we'll store HTTP cached responses. - // If set to empty string, no caching mechanism will be used. - CacheDir string - // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -434,7 +430,6 @@ func CopyConfig(config *Config) *Config { Username: config.Username, Password: config.Password, BearerToken: config.BearerToken, - CacheDir: config.CacheDir, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/staging/src/k8s.io/client-go/rest/config_test.go b/staging/src/k8s.io/client-go/rest/config_test.go index 0e86442dbd0..5d4d1a2fe3e 100644 --- a/staging/src/k8s.io/client-go/rest/config_test.go +++ b/staging/src/k8s.io/client-go/rest/config_test.go @@ -267,7 +267,6 @@ func TestAnonymousConfig(t *testing.T) { expected.BearerToken = "" expected.Username = "" expected.Password = "" - expected.CacheDir = "" expected.AuthProvider = nil expected.AuthConfigPersister = nil expected.TLSClientConfig.CertData = nil diff --git a/staging/src/k8s.io/client-go/rest/transport.go b/staging/src/k8s.io/client-go/rest/transport.go index f59f8dbe278..878c6abf164 100644 --- a/staging/src/k8s.io/client-go/rest/transport.go +++ b/staging/src/k8s.io/client-go/rest/transport.go @@ -89,7 +89,6 @@ func (c *Config) TransportConfig() (*transport.Config, error) { }, Username: c.Username, Password: c.Password, - CacheDir: c.CacheDir, BearerToken: c.BearerToken, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, diff --git a/staging/src/k8s.io/client-go/transport/BUILD b/staging/src/k8s.io/client-go/transport/BUILD index e5a5a570939..d9c8502faf5 100644 --- a/staging/src/k8s.io/client-go/transport/BUILD +++ b/staging/src/k8s.io/client-go/transport/BUILD @@ -28,9 +28,6 @@ go_library( importpath = "k8s.io/client-go/transport", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/gregjones/httpcache:go_default_library", - "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", - "//vendor/github.com/peterbourgon/diskv:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/transport/config.go b/staging/src/k8s.io/client-go/transport/config.go index 425f8f87a53..af347dafea8 100644 --- a/staging/src/k8s.io/client-go/transport/config.go +++ b/staging/src/k8s.io/client-go/transport/config.go @@ -37,10 +37,6 @@ type Config struct { // Bearer token for authentication BearerToken string - // CacheDir is the directory where we'll store HTTP cached responses. - // If set to empty string, no caching mechanism will be used. - CacheDir string - // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig diff --git a/staging/src/k8s.io/client-go/transport/round_trippers.go b/staging/src/k8s.io/client-go/transport/round_trippers.go index 2ee605d7be0..c728b18775f 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers.go @@ -19,14 +19,10 @@ package transport import ( "fmt" "net/http" - "path/filepath" "strings" "time" "github.com/golang/glog" - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/peterbourgon/diskv" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -60,9 +56,6 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip len(config.Impersonate.Extra) > 0 { rt = NewImpersonatingRoundTripper(config.Impersonate, rt) } - if len(config.CacheDir) > 0 { - rt = NewCacheRoundTripper(config.CacheDir, rt) - } return rt, nil } @@ -86,30 +79,6 @@ type requestCanceler interface { CancelRequest(*http.Request) } -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// NewCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func NewCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } - type authProxyRoundTripper struct { username string groups []string diff --git a/staging/src/k8s.io/client-go/transport/round_trippers_test.go b/staging/src/k8s.io/client-go/transport/round_trippers_test.go index c1e30c3f208..d5ffc6bde30 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers_test.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers_test.go @@ -17,11 +17,7 @@ limitations under the License. package transport import ( - "bytes" - "io/ioutil" "net/http" - "net/url" - "os" "reflect" "strings" "testing" @@ -220,60 +216,3 @@ func TestAuthProxyRoundTripper(t *testing.T) { } } } - -func TestCacheRoundTripper(t *testing.T) { - rt := &testRoundTripper{} - cacheDir, err := ioutil.TempDir("", "cache-rt") - defer os.RemoveAll(cacheDir) - if err != nil { - t.Fatal(err) - } - cache := NewCacheRoundTripper(cacheDir, rt) - - // First call, caches the response - req := &http.Request{ - Method: http.MethodGet, - URL: &url.URL{Host: "localhost"}, - } - rt.Response = &http.Response{ - Header: http.Header{"ETag": []string{`"123456"`}}, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), - StatusCode: http.StatusOK, - } - resp, err := cache.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - content, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if string(content) != "Content" { - t.Errorf(`Expected Body to be "Content", got %q`, string(content)) - } - - // Second call, returns cached response - req = &http.Request{ - Method: http.MethodGet, - URL: &url.URL{Host: "localhost"}, - } - rt.Response = &http.Response{ - StatusCode: http.StatusNotModified, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Other Content"))), - } - - resp, err = cache.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - - // Read body and make sure we have the initial content - content, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatal(err) - } - if string(content) != "Content" { - t.Errorf("Invalid content read from cache %q", string(content)) - } -} From dedeb99c97dcd0e5814ba49cf0f82aa7bf23f4ad Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Mon, 8 Jan 2018 10:00:13 -0800 Subject: [PATCH 655/794] generated: update staging godeps --- .../apiextensions-apiserver/Godeps/Godeps.json | 16 ---------------- staging/src/k8s.io/apiserver/Godeps/Godeps.json | 12 ------------ staging/src/k8s.io/client-go/Godeps/Godeps.json | 16 ---------------- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 16 ---------------- staging/src/k8s.io/metrics/Godeps/Godeps.json | 16 ---------------- .../k8s.io/sample-apiserver/Godeps/Godeps.json | 16 ---------------- .../k8s.io/sample-controller/Godeps/Godeps.json | 16 ---------------- 7 files changed, 108 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 2aaf0acc53b..770352cf7d1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -190,10 +190,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -210,14 +206,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -270,10 +258,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index a8011ef1d06..b1fcb0a32f1 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -518,14 +518,6 @@ "ImportPath": "github.com/gophercloud/gophercloud/pagination", "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", "Rev": "2500245aa6110c562d17020fb31a2c133d737799" @@ -594,10 +586,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index c3d9ca80a46..4fae7ddb897 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -150,10 +150,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -198,14 +194,6 @@ "ImportPath": "github.com/gophercloud/gophercloud/pagination", "Rev": "8e59687aa4b27ab22a0bf3295f1e165ff7bd5f97" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -246,10 +234,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index bac5d82c33e..abddac483d5 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -170,10 +170,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -190,14 +186,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -250,10 +238,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index e964a638073..ce359bb79d8 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -74,10 +74,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -94,14 +90,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/json-iterator/go", "Rev": "13f86432b882000a51c6e610c620974462691a97" @@ -122,10 +110,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 27ea0ca6a47..ce731cb79fd 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -162,10 +162,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -182,14 +178,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -242,10 +230,6 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 5c0249ded73..ce34ffc7dfd 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -82,10 +82,6 @@ "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, - { - "ImportPath": "github.com/google/btree", - "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" @@ -102,14 +98,6 @@ "ImportPath": "github.com/googleapis/gnostic/extensions", "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, - { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, - { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -146,10 +134,6 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, - { - "ImportPath": "github.com/peterbourgon/diskv", - "Rev": "5f041e8faa004a95c88a202771f4cc3e991971e6" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" From f6721480f425bdbfb10b4edc831187f69c63ba9f Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Mon, 8 Jan 2018 10:20:02 -0800 Subject: [PATCH 656/794] enable on-demand metrics for eviction --- pkg/kubelet/eviction/eviction_manager.go | 4 +++- pkg/kubelet/eviction/helpers.go | 3 ++- pkg/kubelet/eviction/helpers_test.go | 2 +- pkg/kubelet/server/server_test.go | 2 +- pkg/kubelet/server/stats/handler.go | 6 ++++-- pkg/kubelet/server/stats/summary.go | 11 ++++++----- pkg/kubelet/server/stats/summary_test.go | 10 +++++----- .../stats/testing/mock_stats_provider.go | 18 +++++++++--------- pkg/kubelet/stats/helper.go | 12 +++++++++--- pkg/kubelet/stats/stats_provider.go | 8 ++++---- pkg/kubelet/stats/stats_provider_test.go | 7 ++++--- 11 files changed, 48 insertions(+), 35 deletions(-) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 007c204f7fb..7811f3bea5f 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -455,7 +455,9 @@ func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceNam // localStorageEviction checks the EmptyDir volume usage for each pod and determine whether it exceeds the specified limit and needs // to be evicted. It also checks every container in the pod, if the container overlay usage exceeds the limit, the pod will be evicted too. func (m *managerImpl) localStorageEviction(pods []*v1.Pod) []*v1.Pod { - summary, err := m.summaryProvider.Get() + // do not update node-level stats as local storage evictions do not utilize them. + forceStatsUpdate := false + summary, err := m.summaryProvider.Get(forceStatsUpdate) if err != nil { glog.Errorf("Could not get summary provider") return nil diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 3ae9c0306a6..4376c842882 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -712,7 +712,8 @@ func (a byEvictionPriority) Less(i, j int) bool { // makeSignalObservations derives observations using the specified summary provider. func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod) (signalObservations, statsFunc, error) { - summary, err := summaryProvider.Get() + updateStats := true + summary, err := summaryProvider.Get(updateStats) if err != nil { return nil, nil, err } diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 5a794112051..727e0be14b9 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -920,7 +920,7 @@ type fakeSummaryProvider struct { result *statsapi.Summary } -func (f *fakeSummaryProvider) Get() (*statsapi.Summary, error) { +func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) { return f.result, nil } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index c1b068f2318..230b7df43a7 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -176,7 +176,7 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo func (_ *fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil } func (_ *fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil } func (_ *fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil } -func (_ *fakeKubelet) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { +func (_ *fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { return nil, nil, nil } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 969b25f3de1..85d441e9f73 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -50,7 +50,7 @@ type StatsProvider interface { // // GetCgroupStats returns the stats and the networking usage of the cgroup // with the specified cgroupName. - GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) + GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) // RootFsStats returns the stats of the node root filesystem. RootFsStats() (*statsapi.FsStats, error) @@ -183,7 +183,9 @@ func (h *handler) handleStats(request *restful.Request, response *restful.Respon // Handles stats summary requests to /stats/summary func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { - summary, err := h.summaryProvider.Get() + // external calls to the summary API use cached stats + forceStatsUpdate := false + summary, err := h.summaryProvider.Get(forceStatsUpdate) if err != nil { handleError(response, "/stats/summary", err) } else { diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index 3e59d772475..a1ffc22ea49 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -25,7 +25,9 @@ import ( ) type SummaryProvider interface { - Get() (*statsapi.Summary, error) + // Get provides a new Summary with the stats from Kubelet, + // and will update some stats if updateStats is true + Get(updateStats bool) (*statsapi.Summary, error) } // summaryProviderImpl implements the SummaryProvider interface. @@ -41,8 +43,7 @@ func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider { return &summaryProviderImpl{statsProvider} } -// Get provides a new Summary with the stats from Kubelet. -func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { +func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) { // TODO(timstclair): Consider returning a best-effort response if any of // the following errors occur. node, err := sp.provider.GetNode() @@ -50,7 +51,7 @@ func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { return nil, fmt.Errorf("failed to get node info: %v", err) } nodeConfig := sp.provider.GetNodeConfig() - rootStats, networkStats, err := sp.provider.GetCgroupStats("/") + rootStats, networkStats, err := sp.provider.GetCgroupStats("/", updateStats) if err != nil { return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) } @@ -87,7 +88,7 @@ func (sp *summaryProviderImpl) Get() (*statsapi.Summary, error) { if name == "" { continue } - s, _, err := sp.provider.GetCgroupStats(name) + s, _, err := sp.provider.GetCgroupStats(name, false) if err != nil { glog.Errorf("Failed to get system container stats for %q: %v", name, err) continue diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index 422688446f9..49b4ebebba1 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -69,13 +69,13 @@ func TestSummaryProvider(t *testing.T) { On("ListPodStats").Return(podStats, nil). On("ImageFsStats").Return(imageFsStats, nil). On("RootFsStats").Return(rootFsStats, nil). - On("GetCgroupStats", "/").Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil). - On("GetCgroupStats", "/runtime").Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil). - On("GetCgroupStats", "/misc").Return(cgroupStatsMap["/misc"].cs, cgroupStatsMap["/misc"].ns, nil). - On("GetCgroupStats", "/kubelet").Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil) + On("GetCgroupStats", "/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil). + On("GetCgroupStats", "/runtime", false).Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil). + On("GetCgroupStats", "/misc", false).Return(cgroupStatsMap["/misc"].cs, cgroupStatsMap["/misc"].ns, nil). + On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil) provider := NewSummaryProvider(mockStatsProvider) - summary, err := provider.Get() + summary, err := provider.Get(true) assert.NoError(err) assert.Equal(summary.Node.NodeName, "test-node") diff --git a/pkg/kubelet/server/stats/testing/mock_stats_provider.go b/pkg/kubelet/server/stats/testing/mock_stats_provider.go index befa19b0bf7..0da5d872f30 100644 --- a/pkg/kubelet/server/stats/testing/mock_stats_provider.go +++ b/pkg/kubelet/server/stats/testing/mock_stats_provider.go @@ -33,13 +33,13 @@ type StatsProvider struct { mock.Mock } -// GetCgroupStats provides a mock function with given fields: cgroupName -func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) { - ret := _m.Called(cgroupName) +// GetCgroupStats provides a mock function with given fields: cgroupName, updateStats +func (_m *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*v1alpha1.ContainerStats, *v1alpha1.NetworkStats, error) { + ret := _m.Called(cgroupName, updateStats) var r0 *v1alpha1.ContainerStats - if rf, ok := ret.Get(0).(func(string) *v1alpha1.ContainerStats); ok { - r0 = rf(cgroupName) + if rf, ok := ret.Get(0).(func(string, bool) *v1alpha1.ContainerStats); ok { + r0 = rf(cgroupName, updateStats) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*v1alpha1.ContainerStats) @@ -47,8 +47,8 @@ func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerS } var r1 *v1alpha1.NetworkStats - if rf, ok := ret.Get(1).(func(string) *v1alpha1.NetworkStats); ok { - r1 = rf(cgroupName) + if rf, ok := ret.Get(1).(func(string, bool) *v1alpha1.NetworkStats); ok { + r1 = rf(cgroupName, updateStats) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*v1alpha1.NetworkStats) @@ -56,8 +56,8 @@ func (_m *StatsProvider) GetCgroupStats(cgroupName string) (*v1alpha1.ContainerS } var r2 error - if rf, ok := ret.Get(2).(func(string) error); ok { - r2 = rf(cgroupName) + if rf, ok := ret.Get(2).(func(string, bool) error); ok { + r2 = rf(cgroupName, updateStats) } else { r2 = ret.Error(2) } diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index 6856a8c76da..cee923722a7 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -238,11 +238,17 @@ func isMemoryUnlimited(v uint64) bool { // getCgroupInfo returns the information of the container with the specified // containerName from cadvisor. -func getCgroupInfo(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerInfo, error) { +func getCgroupInfo(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerInfo, error) { + var maxAge *time.Duration + if updateStats { + age := 0 * time.Second + maxAge = &age + } infoMap, err := cadvisor.ContainerInfoV2(containerName, cadvisorapiv2.RequestOptions{ IdType: cadvisorapiv2.TypeName, Count: 2, // 2 samples are needed to compute "instantaneous" CPU Recursive: false, + MaxAge: maxAge, }) if err != nil { return nil, fmt.Errorf("failed to get container info for %q: %v", containerName, err) @@ -256,8 +262,8 @@ func getCgroupInfo(cadvisor cadvisor.Interface, containerName string) (*cadvisor // getCgroupStats returns the latest stats of the container having the // specified containerName from cadvisor. -func getCgroupStats(cadvisor cadvisor.Interface, containerName string) (*cadvisorapiv2.ContainerStats, error) { - info, err := getCgroupInfo(cadvisor, containerName) +func getCgroupStats(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerStats, error) { + info, err := getCgroupInfo(cadvisor, containerName, updateStats) if err != nil { return nil, err } diff --git a/pkg/kubelet/stats/stats_provider.go b/pkg/kubelet/stats/stats_provider.go index b61a4107940..235f1a46d2d 100644 --- a/pkg/kubelet/stats/stats_provider.go +++ b/pkg/kubelet/stats/stats_provider.go @@ -88,8 +88,8 @@ type containerStatsProvider interface { // GetCgroupStats returns the stats of the cgroup with the cgroupName. Note that // this function doesn't generate filesystem stats. -func (p *StatsProvider) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { - info, err := getCgroupInfo(p.cadvisor, cgroupName) +func (p *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { + info, err := getCgroupInfo(p.cadvisor, cgroupName, updateStats) if err != nil { return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err) } @@ -113,8 +113,8 @@ func (p *StatsProvider) RootFsStats() (*statsapi.FsStats, error) { } // Get the root container stats's timestamp, which will be used as the - // imageFs stats timestamp. - rootStats, err := getCgroupStats(p.cadvisor, "/") + // imageFs stats timestamp. Dont force a stats update, as we only want the timestamp. + rootStats, err := getCgroupStats(p.cadvisor, "/", false) if err != nil { return nil, fmt.Errorf("failed to get root container stats: %v", err) } diff --git a/pkg/kubelet/stats/stats_provider_test.go b/pkg/kubelet/stats/stats_provider_test.go index 20616f39025..6f02704ed59 100644 --- a/pkg/kubelet/stats/stats_provider_test.go +++ b/pkg/kubelet/stats/stats_provider_test.go @@ -71,6 +71,7 @@ func TestGetCgroupStats(t *testing.T) { const ( cgroupName = "test-cgroup-name" containerInfoSeed = 1000 + updateStats = false ) var ( mockCadvisor = new(cadvisortest.Mock) @@ -87,7 +88,7 @@ func TestGetCgroupStats(t *testing.T) { mockCadvisor.On("ContainerInfoV2", cgroupName, options).Return(containerInfoMap, nil) provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{}) - cs, ns, err := provider.GetCgroupStats(cgroupName) + cs, ns, err := provider.GetCgroupStats(cgroupName, updateStats) assert.NoError(err) checkCPUStats(t, "", containerInfoSeed, cs.CPU) @@ -599,8 +600,8 @@ type fakeResourceAnalyzer struct { podVolumeStats serverstats.PodVolumeStats } -func (o *fakeResourceAnalyzer) Start() {} -func (o *fakeResourceAnalyzer) Get() (*statsapi.Summary, error) { return nil, nil } +func (o *fakeResourceAnalyzer) Start() {} +func (o *fakeResourceAnalyzer) Get(bool) (*statsapi.Summary, error) { return nil, nil } func (o *fakeResourceAnalyzer) GetPodVolumeStats(uid types.UID) (serverstats.PodVolumeStats, bool) { return o.podVolumeStats, true } From 25b1cd4958e0da5a99f7239a9aec65cb39b91984 Mon Sep 17 00:00:00 2001 From: prashima Date: Wed, 20 Dec 2017 15:23:21 -0800 Subject: [PATCH 657/794] Renews cached NodeInfo with new vSphere connection --- .../providers/vsphere/nodemanager.go | 67 ++++++++++++++----- .../providers/vsphere/vclib/virtualmachine.go | 8 +++ 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 580dbae413d..a30b9135b01 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -251,6 +251,8 @@ func (nm *NodeManager) removeNode(node *v1.Node) { // GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address. // This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf // NodeInfo returned may not be updated to reflect current VM location. +// +// This method is a getter but it can cause side-effect of updating NodeInfo object. func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) { getNodeInfo := func(nodeName k8stypes.NodeName) *NodeInfo { nm.nodeInfoLock.RLock() @@ -259,42 +261,57 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) return nodeInfo } nodeInfo := getNodeInfo(nodeName) + var err error if nodeInfo == nil { - err := nm.RediscoverNode(nodeName) + // Rediscover node if no NodeInfo found. + glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) + err = nm.RediscoverNode(nodeName) if err != nil { - glog.V(4).Infof("error %q node info for node %q not found", err, convertToString(nodeName)) + glog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) return NodeInfo{}, err } nodeInfo = getNodeInfo(nodeName) + } else { + // Renew the found NodeInfo to avoid stale vSphere connection. + glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) + nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) + if err != nil { + glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) + return NodeInfo{}, err + } + nm.addNodeInfo(convertToString(nodeName), nodeInfo) } return *nodeInfo, nil } +// GetNodeDetails returns NodeDetails for all the discovered nodes. +// +// This method is a getter but it can cause side-effect of updating NodeInfo objects. func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { nm.nodeInfoLock.RLock() defer nm.nodeInfoLock.RUnlock() var nodeDetails []NodeDetails vsphereSessionRefreshMap := make(map[string]bool) - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for nodeName, nodeInfo := range nm.nodeInfoMap { - nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm}) + var n *NodeInfo + var err error if vsphereSessionRefreshMap[nodeInfo.vcServer] { - continue + // vSphere connection already refreshed. Just refresh VM and Datacenter. + glog.V(4).Infof("Renewing NodeInfo %+v for node %q. No new connection needed.", nodeInfo, nodeName) + n, err = nm.renewNodeInfo(nodeInfo, false) + } else { + // Refresh vSphere connection, VM and Datacenter. + glog.V(4).Infof("Renewing NodeInfo %+v for node %q with new vSphere connection.", nodeInfo, nodeName) + n, err = nm.renewNodeInfo(nodeInfo, true) + vsphereSessionRefreshMap[nodeInfo.vcServer] = true } - vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] - if vsphereInstance == nil { - err := fmt.Errorf("vSphereInstance for vc server %q not found while looking for vm %q", nodeInfo.vcServer, nodeInfo.vm) - return nil, err - } - err := vsphereInstance.conn.Connect(ctx) if err != nil { return nil, err } - vsphereSessionRefreshMap[nodeInfo.vcServer] = true + nm.nodeInfoMap[nodeName] = n + glog.V(4).Infof("Updated NodeInfo %q for node %q.", nodeInfo, nodeName) + nodeDetails = append(nodeDetails, NodeDetails{nodeName, n.vm}) } return nodeDetails, nil } @@ -317,3 +334,23 @@ func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereIn } return *vsphereInstance, nil } + +// renewNodeInfo renews vSphere connection, VirtualMachine and Datacenter for NodeInfo instance. +func (nm *NodeManager) renewNodeInfo(nodeInfo *NodeInfo, reconnect bool) (*NodeInfo, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] + if vsphereInstance == nil { + err := fmt.Errorf("vSphereInstance for vSphere %q not found while refershing NodeInfo for VM %q", nodeInfo.vcServer, nodeInfo.vm) + return nil, err + } + if reconnect { + err := vsphereInstance.conn.Connect(ctx) + if err != nil { + return nil, err + } + } + vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.GoVmomiClient) + return &NodeInfo{vm: &vm, dataCenter: vm.Datacenter, vcServer: nodeInfo.vcServer}, nil +} diff --git a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go index db45b8e1935..679d827adc3 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go @@ -23,6 +23,7 @@ import ( "time" "github.com/golang/glog" + "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" @@ -400,3 +401,10 @@ func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice } return nil } + +// RenewVM renews this virtual machine with new client connection. +func (vm *VirtualMachine) RenewVM(client *govmomi.Client) VirtualMachine { + dc := Datacenter{Datacenter: object.NewDatacenter(client.Client, vm.Datacenter.Reference())} + newVM := object.NewVirtualMachine(client.Client, vm.VirtualMachine.Reference()) + return VirtualMachine{VirtualMachine: newVM, Datacenter: &dc} +} From e21ecc0166fa73c7d2d5d57aa44529315fc24601 Mon Sep 17 00:00:00 2001 From: Cheng Xing Date: Wed, 20 Dec 2017 08:37:55 -0800 Subject: [PATCH 658/794] Updated Flexvolume setup mechanisms for COS instance image. - If REMOUNT_VOLUME_PLUGIN_DIR is set to true, VOLUME_PLUGIN_DIR is remounted with `exec` option during cluster startup. This allows any writable location to be used as the plugin directory. - New HostPath added to controller-manager deployment to enable access to volume plugin directory. - Improved how the default directory is passed to master and node setup. --- cluster/gce/gci/configure-helper.sh | 12 ++++++++++++ cluster/gce/gci/configure.sh | 12 ++++++++++++ .../kube-controller-manager.manifest | 3 +++ 3 files changed, 27 insertions(+) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index a10ffe2f312..5d22636a5b5 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1452,6 +1452,8 @@ function start-etcd-servers { # CLOUD_CONFIG_VOLUME # CLOUD_CONFIG_MOUNT # DOCKER_REGISTRY +# FLEXVOLUME_HOSTPATH_MOUNT +# FLEXVOLUME_HOSTPATH_VOLUME function compute-master-manifest-variables { CLOUD_CONFIG_OPT="" CLOUD_CONFIG_VOLUME="" @@ -1465,6 +1467,13 @@ function compute-master-manifest-variables { if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" fi + + FLEXVOLUME_HOSTPATH_MOUNT="" + FLEXVOLUME_HOSTPATH_VOLUME="" + if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then + FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true}," + FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}}," + fi } # A helper function that bind mounts kubelet dirs for running mount in a chroot @@ -1867,6 +1876,9 @@ function start-kube-controller-manager { sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}" sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}" + sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}" + sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}" + cp "${src_file}" /etc/kubernetes/manifests } diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 317abe9afbb..fa5a26e676e 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -138,6 +138,13 @@ function split-commas { echo $1 | tr "," "\n" } +function remount-flexvolume-directory { + local -r flexvolume_plugin_dir=$1 + mkdir -p $flexvolume_plugin_dir + mount --bind $flexvolume_plugin_dir $flexvolume_plugin_dir + mount -o remount,exec $flexvolume_plugin_dir +} + function install-gci-mounter-tools { CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter" local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}" @@ -336,6 +343,11 @@ function install-kube-binary-config { # Install gci mounter related artifacts to allow mounting storage volumes in GCI install-gci-mounter-tools + # Remount the Flexvolume directory with the "exec" option, if needed. + if [[ "${REMOUNT_VOLUME_PLUGIN_DIR:-}" == "true" && -n "${VOLUME_PLUGIN_DIR:-}" ]]; then + remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}" + fi + # Clean up. rm -rf "${KUBE_HOME}/kubernetes" rm -f "${KUBE_HOME}/${server_binary_tar}" diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index e037d880177..c287b29652e 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -37,6 +37,7 @@ {% set pv_recycler_mount = "" -%} {% set pv_recycler_volume = "" -%} {% set srv_kube_path = "/srv/kubernetes" -%} +{% flex_vol_plugin_dir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec" -%} {% if grains.cloud is defined -%} {% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} @@ -137,6 +138,7 @@ { "name": "srvkube", "mountPath": "{{srv_kube_path}}", "readOnly": true}, + {{flexvolume_hostpath_mount}} { "name": "logfile", "mountPath": "/var/log/kube-controller-manager.log", "readOnly": false}, @@ -166,6 +168,7 @@ "hostPath": { "path": "{{srv_kube_path}}"} }, + {{flexvolume_hostpath}} { "name": "logfile", "hostPath": { "path": "/var/log/kube-controller-manager.log", From 68c2c79362a506939b6513108b9b8d689cf94b0f Mon Sep 17 00:00:00 2001 From: Yongkun Anfernee Gui Date: Thu, 16 Nov 2017 15:43:06 -0800 Subject: [PATCH 659/794] Refactor HostIP predicate algorithm - Remove string decode logic. It's not really helping to find the conflict ports, and it's expensive to do encoding/decoding - Not to parse the container ports information in predicate meta, use straight []*v1.ContainerPort - Use better data structure to search port conflict based on ip addresses - Collect scattered source code into common place --- pkg/scheduler/algorithm/predicates/BUILD | 1 - .../algorithm/predicates/metadata.go | 9 +- .../algorithm/predicates/metadata_test.go | 10 +- .../algorithm/predicates/predicates.go | 4 +- .../algorithm/predicates/predicates_test.go | 45 +--- pkg/scheduler/algorithm/predicates/utils.go | 63 +----- .../algorithm/predicates/utils_test.go | 193 ---------------- pkg/scheduler/schedulercache/cache_test.go | 58 +++-- pkg/scheduler/schedulercache/node_info.go | 37 +-- pkg/scheduler/util/utils.go | 138 ++++++++++-- pkg/scheduler/util/utils_test.go | 210 ++++++++++++++++++ 11 files changed, 398 insertions(+), 370 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index 6c091d1381d..a028dac6259 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -57,7 +57,6 @@ go_test( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/schedulercache:go_default_library", "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/scheduler/algorithm/predicates/metadata.go b/pkg/scheduler/algorithm/predicates/metadata.go index af8c32e2c4c..b8b935335f9 100644 --- a/pkg/scheduler/algorithm/predicates/metadata.go +++ b/pkg/scheduler/algorithm/predicates/metadata.go @@ -46,7 +46,7 @@ type predicateMetadata struct { pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource - podPorts map[string]bool + podPorts []*v1.ContainerPort //key is a pod full name with the anti-affinity rules. matchingAntiAffinityTerms map[string][]matchingPodAntiAffinityTerm serviceAffinityInUse bool @@ -90,7 +90,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf pod: pod, podBestEffort: isPodBestEffort(pod), podRequest: GetResourceRequest(pod), - podPorts: schedutil.GetUsedPorts(pod), + podPorts: schedutil.GetContainerPorts(pod), matchingAntiAffinityTerms: matchingTerms, } for predicateName, precomputeFunc := range predicateMetadataProducers { @@ -172,10 +172,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata { podRequest: meta.podRequest, serviceAffinityInUse: meta.serviceAffinityInUse, } - newPredMeta.podPorts = map[string]bool{} - for k, v := range meta.podPorts { - newPredMeta.podPorts[k] = v - } + newPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...) newPredMeta.matchingAntiAffinityTerms = map[string][]matchingPodAntiAffinityTerm{} for k, v := range meta.matchingAntiAffinityTerms { newPredMeta.matchingAntiAffinityTerms[k] = append([]matchingPodAntiAffinityTerm(nil), v...) diff --git a/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go index 31b88411015..026a979d980 100644 --- a/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -373,7 +373,15 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) { Memory: 300, AllowedPodNumber: 4, }, - podPorts: map[string]bool{"1234": true, "456": false}, + podPorts: []*v1.ContainerPort{ + { + Name: "name", + HostPort: 10, + ContainerPort: 20, + Protocol: "TCP", + HostIP: "1.2.3.4", + }, + }, matchingAntiAffinityTerms: map[string][]matchingPodAntiAffinityTerm{ "term1": { { diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 48d7b509285..bd7d349d084 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -966,12 +966,12 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - var wantPorts map[string]bool + var wantPorts []*v1.ContainerPort if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { // We couldn't parse metadata - fallback to computing it. - wantPorts = schedutil.GetUsedPorts(pod) + wantPorts = schedutil.GetContainerPorts(pod) } if len(wantPorts) == 0 { return true, nil, nil diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 1b05d9e4fde..674240477fc 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -20,6 +20,7 @@ import ( "os" "reflect" "strconv" + "strings" "testing" "k8s.io/api/core/v1" @@ -32,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/schedulercache" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) var ( @@ -518,13 +518,13 @@ func TestPodFitsHost(t *testing.T) { func newPod(host string, hostPortInfos ...string) *v1.Pod { networkPorts := []v1.ContainerPort{} for _, portInfo := range hostPortInfos { - hostPortInfo := decode(portInfo) - hostPort, _ := strconv.Atoi(hostPortInfo.hostPort) + splited := strings.Split(portInfo, "/") + hostPort, _ := strconv.Atoi(splited[2]) networkPorts = append(networkPorts, v1.ContainerPort{ - HostIP: hostPortInfo.hostIP, + HostIP: splited[1], HostPort: int32(hostPort), - Protocol: v1.Protocol(hostPortInfo.protocol), + Protocol: v1.Protocol(splited[0]), }) } return &v1.Pod{ @@ -653,41 +653,6 @@ func TestPodFitsHostPorts(t *testing.T) { } } -func TestGetUsedPorts(t *testing.T) { - tests := []struct { - pods []*v1.Pod - ports map[string]bool - }{ - { - []*v1.Pod{ - newPod("m1", "UDP/127.0.0.1/9090"), - }, - map[string]bool{"UDP/127.0.0.1/9090": true}, - }, - { - []*v1.Pod{ - newPod("m1", "UDP/127.0.0.1/9090"), - newPod("m1", "UDP/127.0.0.1/9091"), - }, - map[string]bool{"UDP/127.0.0.1/9090": true, "UDP/127.0.0.1/9091": true}, - }, - { - []*v1.Pod{ - newPod("m1", "TCP/0.0.0.0/9090"), - newPod("m2", "UDP/127.0.0.1/9091"), - }, - map[string]bool{"TCP/0.0.0.0/9090": true, "UDP/127.0.0.1/9091": true}, - }, - } - - for _, test := range tests { - ports := schedutil.GetUsedPorts(test.pods...) - if !reflect.DeepEqual(test.ports, ports) { - t.Errorf("%s: expected %v, got %v", "test get used ports", test.ports, ports) - } - } -} - func TestGCEDiskConflicts(t *testing.T) { volState := v1.PodSpec{ Volumes: []v1.Volume{ diff --git a/pkg/scheduler/algorithm/predicates/utils.go b/pkg/scheduler/algorithm/predicates/utils.go index 9a25c85d9ac..ce3e9d5888e 100644 --- a/pkg/scheduler/algorithm/predicates/utils.go +++ b/pkg/scheduler/algorithm/predicates/utils.go @@ -17,10 +17,7 @@ limitations under the License. package predicates import ( - "strings" - "github.com/golang/glog" - "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -135,65 +132,11 @@ type EquivalencePod struct { PVCSet sets.String } -type hostPortInfo struct { - protocol string - hostIP string - hostPort string -} - -// decode decodes string ("protocol/hostIP/hostPort") to *hostPortInfo object. -func decode(info string) *hostPortInfo { - hostPortInfoSlice := strings.Split(info, "/") - - protocol := hostPortInfoSlice[0] - hostIP := hostPortInfoSlice[1] - hostPort := hostPortInfoSlice[2] - - return &hostPortInfo{ - protocol: protocol, - hostIP: hostIP, - hostPort: hostPort, - } -} - -// specialPortConflictCheck detects whether specailHostPort(whose hostIP is 0.0.0.0) is conflict with otherHostPorts. -// return true if we have a conflict. -func specialPortConflictCheck(specialHostPort string, otherHostPorts map[string]bool) bool { - specialHostPortInfo := decode(specialHostPort) - - if specialHostPortInfo.hostIP == schedutil.DefaultBindAllHostIP { - // loop through all the otherHostPorts to see if there exists a conflict - for hostPortItem := range otherHostPorts { - hostPortInfo := decode(hostPortItem) - - // if there exists one hostPortItem which has the same hostPort and protocol with the specialHostPort, that will cause a conflict - if specialHostPortInfo.hostPort == hostPortInfo.hostPort && specialHostPortInfo.protocol == hostPortInfo.protocol { - return true - } - } - - } - - return false -} - // portsConflict check whether existingPorts and wantPorts conflict with each other // return true if we have a conflict -func portsConflict(existingPorts, wantPorts map[string]bool) bool { - - for existingPort := range existingPorts { - if specialPortConflictCheck(existingPort, wantPorts) { - return true - } - } - - for wantPort := range wantPorts { - if specialPortConflictCheck(wantPort, existingPorts) { - return true - } - - // general check hostPort conflict procedure for hostIP is not 0.0.0.0 - if existingPorts[wantPort] { +func portsConflict(existingPorts schedutil.HostPortInfo, wantPorts []*v1.ContainerPort) bool { + for _, cp := range wantPorts { + if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) { return true } } diff --git a/pkg/scheduler/algorithm/predicates/utils_test.go b/pkg/scheduler/algorithm/predicates/utils_test.go index 308bd8da519..305a27d1304 100644 --- a/pkg/scheduler/algorithm/predicates/utils_test.go +++ b/pkg/scheduler/algorithm/predicates/utils_test.go @@ -18,8 +18,6 @@ package predicates import ( "fmt" - "reflect" - "testing" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -70,194 +68,3 @@ func ExampleFindLabelsInSet() { // label1=value1,label2=value2,label3=will_see_this // pod1,pod2, } - -func Test_decode(t *testing.T) { - tests := []struct { - name string - args string - want *hostPortInfo - }{ - { - name: "test1", - args: "UDP/127.0.0.1/80", - want: &hostPortInfo{ - protocol: "UDP", - hostIP: "127.0.0.1", - hostPort: "80", - }, - }, - { - name: "test2", - args: "TCP/127.0.0.1/80", - want: &hostPortInfo{ - protocol: "TCP", - hostIP: "127.0.0.1", - hostPort: "80", - }, - }, - { - name: "test3", - args: "TCP/0.0.0.0/80", - want: &hostPortInfo{ - protocol: "TCP", - hostIP: "0.0.0.0", - hostPort: "80", - }, - }, - } - - for _, tt := range tests { - if got := decode(tt.args); !reflect.DeepEqual(got, tt.want) { - t.Errorf("test name = %v, decode() = %v, want %v", tt.name, got, tt.want) - } - - } -} - -func Test_specialPortConflictCheck(t *testing.T) { - type args struct { - specialHostPort string - otherHostPorts map[string]bool - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "test-1", - args: args{ - specialHostPort: "TCP/0.0.0.0/80", - otherHostPorts: map[string]bool{ - "TCP/127.0.0.2/8080": true, - "TCP/127.0.0.1/80": true, - "UDP/127.0.0.2/8080": true, - }, - }, - want: true, - }, - { - name: "test-2", - args: args{ - specialHostPort: "TCP/0.0.0.0/80", - otherHostPorts: map[string]bool{ - "TCP/127.0.0.2/8080": true, - "UDP/127.0.0.1/80": true, - "UDP/127.0.0.2/8080": true, - }, - }, - want: false, - }, - { - name: "test-3", - args: args{ - specialHostPort: "TCP/0.0.0.0/80", - otherHostPorts: map[string]bool{ - "TCP/127.0.0.2/8080": true, - "TCP/127.0.0.1/8090": true, - "UDP/127.0.0.2/8080": true, - }, - }, - want: false, - }, - { - name: "test-4", - args: args{ - specialHostPort: "TCP/0.0.0.0/80", - otherHostPorts: map[string]bool{ - "UDP/127.0.0.2/8080": true, - "UDP/127.0.0.1/8090": true, - "TCP/127.0.0.2/8080": true, - }, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := specialPortConflictCheck(tt.args.specialHostPort, tt.args.otherHostPorts); got != tt.want { - t.Errorf("specialPortConflictCheck() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_portsConflict(t *testing.T) { - type args struct { - existingPorts map[string]bool - wantPorts map[string]bool - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "test1", - args: args{ - existingPorts: map[string]bool{ - "UDP/127.0.0.1/8080": true, - }, - wantPorts: map[string]bool{ - "UDP/127.0.0.1/8080": true, - }, - }, - want: true, - }, - { - name: "test2", - args: args{ - existingPorts: map[string]bool{ - "UDP/127.0.0.2/8080": true, - }, - wantPorts: map[string]bool{ - "UDP/127.0.0.1/8080": true, - }, - }, - want: false, - }, - { - name: "test3", - args: args{ - existingPorts: map[string]bool{ - "TCP/127.0.0.1/8080": true, - }, - wantPorts: map[string]bool{ - "UDP/127.0.0.1/8080": true, - }, - }, - want: false, - }, - { - name: "test4", - args: args{ - existingPorts: map[string]bool{ - "TCP/0.0.0.0/8080": true, - }, - wantPorts: map[string]bool{ - "TCP/127.0.0.1/8080": true, - }, - }, - want: true, - }, - { - name: "test5", - args: args{ - existingPorts: map[string]bool{ - "TCP/127.0.0.1/8080": true, - }, - wantPorts: map[string]bool{ - "TCP/0.0.0.0/8080": true, - }, - }, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := portsConflict(tt.args.existingPorts, tt.args.wantPorts); got != tt.want { - t.Errorf("portsConflict() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/scheduler/schedulercache/cache_test.go b/pkg/scheduler/schedulercache/cache_test.go index b5e1243a474..9ed5764be45 100644 --- a/pkg/scheduler/schedulercache/cache_test.go +++ b/pkg/scheduler/schedulercache/cache_test.go @@ -43,6 +43,32 @@ func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *No } } +type hostPortInfoParam struct { + protocol, ip string + port int32 +} + +type hostPortInfoBuilder struct { + inputs []hostPortInfoParam +} + +func newHostPortInfoBuilder() *hostPortInfoBuilder { + return &hostPortInfoBuilder{} +} + +func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfoBuilder { + b.inputs = append(b.inputs, hostPortInfoParam{protocol, ip, port}) + return b +} + +func (b *hostPortInfoBuilder) build() schedutil.HostPortInfo { + res := make(schedutil.HostPortInfo) + for _, param := range b.inputs { + res.Add(param.ip, param.protocol, param.port) + } + return res +} + // TestAssumePodScheduled tests that after a pod is assumed, its information is aggregated // on node level. func TestAssumePodScheduled(t *testing.T) { @@ -74,7 +100,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }, { pods: []*v1.Pod{testPods[1], testPods[2]}, @@ -89,7 +115,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1], testPods[2]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true, "TCP/127.0.0.1/8080": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), }, }, { // test non-zero request pods: []*v1.Pod{testPods[3]}, @@ -104,7 +130,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[3]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }, { pods: []*v1.Pod{testPods[4]}, @@ -120,7 +146,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[4]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }, { pods: []*v1.Pod{testPods[4], testPods[5]}, @@ -136,7 +162,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[4], testPods[5]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true, "TCP/127.0.0.1/8080": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), }, }, { pods: []*v1.Pod{testPods[6]}, @@ -151,7 +177,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[6]}, - usedPorts: map[string]bool{}, + usedPorts: newHostPortInfoBuilder().build(), }, }, } @@ -227,7 +253,7 @@ func TestExpirePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/8080": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), }, }} @@ -276,7 +302,7 @@ func TestAddPodWillConfirm(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }} @@ -331,7 +357,7 @@ func TestAddPodWillReplaceAssumed(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{updatedPod.DeepCopy()}, - usedPorts: map[string]bool{"TCP/0.0.0.0/90": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(), }, }, }} @@ -383,7 +409,7 @@ func TestAddPodAfterExpiration(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{basePod}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }} @@ -436,7 +462,7 @@ func TestUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/8080": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), }, { requestedResource: &Resource{ MilliCPU: 100, @@ -448,7 +474,7 @@ func TestUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }}, }} @@ -503,7 +529,7 @@ func TestExpireAddUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/8080": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), }, { requestedResource: &Resource{ MilliCPU: 100, @@ -515,7 +541,7 @@ func TestExpireAddUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }}, }} @@ -569,7 +595,7 @@ func TestRemovePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{basePod}, - usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, + usedPorts: newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), }, }} @@ -672,7 +698,7 @@ func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo { expected.pods = append(expected.pods, pod) expected.requestedResource.Add(getResourceRequest(pod)) expected.nonzeroRequest.Add(getResourceRequest(pod)) - expected.usedPorts = schedutil.GetUsedPorts(pod) + expected.updateUsedPorts(pod, true) expected.generation++ } diff --git a/pkg/scheduler/schedulercache/node_info.go b/pkg/scheduler/schedulercache/node_info.go index c59a2ebd686..2974206cc69 100644 --- a/pkg/scheduler/schedulercache/node_info.go +++ b/pkg/scheduler/schedulercache/node_info.go @@ -38,7 +38,7 @@ type NodeInfo struct { pods []*v1.Pod podsWithAffinity []*v1.Pod - usedPorts map[string]bool + usedPorts util.HostPortInfo // Total requested resource of all pods on this node. // It includes assumed pods which scheduler sends binding to apiserver but @@ -164,7 +164,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { nonzeroRequest: &Resource{}, allocatableResource: &Resource{}, generation: 0, - usedPorts: make(map[string]bool), + usedPorts: make(util.HostPortInfo), } for _, pod := range pods { ni.AddPod(pod) @@ -188,7 +188,7 @@ func (n *NodeInfo) Pods() []*v1.Pod { return n.pods } -func (n *NodeInfo) UsedPorts() map[string]bool { +func (n *NodeInfo) UsedPorts() util.HostPortInfo { if n == nil { return nil } @@ -269,7 +269,7 @@ func (n *NodeInfo) Clone() *NodeInfo { taintsErr: n.taintsErr, memoryPressureCondition: n.memoryPressureCondition, diskPressureCondition: n.diskPressureCondition, - usedPorts: make(map[string]bool), + usedPorts: make(util.HostPortInfo), generation: n.generation, } if len(n.pods) > 0 { @@ -400,34 +400,15 @@ func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int6 return } -func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, used bool) { +func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k := range container.Ports { podPort := &container.Ports[k] - // "0" is explicitly ignored in PodFitsHostPorts, - // which is the only function that uses this value. - if podPort.HostPort != 0 { - // user does not explicitly set protocol, default is tcp - portProtocol := podPort.Protocol - if podPort.Protocol == "" { - portProtocol = v1.ProtocolTCP - } - - // user does not explicitly set hostIP, default is 0.0.0.0 - portHostIP := podPort.HostIP - if podPort.HostIP == "" { - portHostIP = util.DefaultBindAllHostIP - } - - str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) - - if used { - n.usedPorts[str] = used - } else { - delete(n.usedPorts, str) - } - + if add { + n.usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) + } else { + n.usedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) } } } diff --git a/pkg/scheduler/util/utils.go b/pkg/scheduler/util/utils.go index 66a004e0013..6da6d5ec976 100644 --- a/pkg/scheduler/util/utils.go +++ b/pkg/scheduler/util/utils.go @@ -17,7 +17,6 @@ limitations under the License. package util import ( - "fmt" "sort" "k8s.io/api/core/v1" @@ -28,33 +27,126 @@ import ( const DefaultBindAllHostIP = "0.0.0.0" -// GetUsedPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair +// ProtocolPort represents a protocol port pair, e.g. tcp:80. +type ProtocolPort struct { + Protocol string + Port int32 +} + +// NewProtocolPort creates a ProtocolPort instance. +func NewProtocolPort(protocol string, port int32) *ProtocolPort { + pp := &ProtocolPort{ + Protocol: protocol, + Port: port, + } + + if len(pp.Protocol) == 0 { + pp.Protocol = string(v1.ProtocolTCP) + } + + return pp +} + +// HostPortInfo stores mapping from ip to a set of ProtocolPort +type HostPortInfo map[string]map[ProtocolPort]struct{} + +// Add adds (ip, protocol, port) to HostPortInfo +func (h HostPortInfo) Add(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if _, ok := h[ip]; !ok { + h[ip] = map[ProtocolPort]struct{}{ + *pp: {}, + } + return + } + + h[ip][*pp] = struct{}{} +} + +// Remove removes (ip, protocol, port) from HostPortInfo +func (h HostPortInfo) Remove(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if m, ok := h[ip]; ok { + delete(m, *pp) + if len(h[ip]) == 0 { + delete(h, ip) + } + } +} + +// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo +func (h HostPortInfo) Len() int { + length := 0 + for _, m := range h { + length += len(m) + } + return length +} + +// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing +// ones in HostPortInfo. +func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { + if port <= 0 { + return false + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + + // If ip is 0.0.0.0 check all IP's (protocol, port) pair + if ip == DefaultBindAllHostIP { + for _, m := range h { + if _, ok := m[*pp]; ok { + return true + } + } + return false + } + + // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair + for _, key := range []string{DefaultBindAllHostIP, ip} { + if m, ok := h[key]; ok { + if _, ok2 := m[*pp]; ok2 { + return true + } + } + } + + return false +} + +// sanitize the parameters +func (h HostPortInfo) sanitize(ip, protocol *string) { + if len(*ip) == 0 { + *ip = DefaultBindAllHostIP + } + if len(*protocol) == 0 { + *protocol = string(v1.ProtocolTCP) + } +} + +// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair // will be in the result; but it does not resolve port conflict. -func GetUsedPorts(pods ...*v1.Pod) map[string]bool { - ports := make(map[string]bool) +func GetContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort { + var ports []*v1.ContainerPort for _, pod := range pods { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k := range container.Ports { - podPort := &container.Ports[k] - // "0" is explicitly ignored in PodFitsHostPorts, - // which is the only function that uses this value. - if podPort.HostPort != 0 { - // user does not explicitly set protocol, default is tcp - portProtocol := podPort.Protocol - if podPort.Protocol == "" { - portProtocol = v1.ProtocolTCP - } - - // user does not explicitly set hostIP, default is 0.0.0.0 - portHostIP := podPort.HostIP - if podPort.HostIP == "" { - portHostIP = "0.0.0.0" - } - - str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) - ports[str] = true - } + ports = append(ports, &container.Ports[k]) } } } diff --git a/pkg/scheduler/util/utils_test.go b/pkg/scheduler/util/utils_test.go index 653c3b9b0d6..a39651357cf 100644 --- a/pkg/scheduler/util/utils_test.go +++ b/pkg/scheduler/util/utils_test.go @@ -93,3 +93,213 @@ func TestSortableList(t *testing.T) { } } } + +type hostPortInfoParam struct { + protocol, ip string + port int32 +} + +func TestHostPortInfo_AddRemove(t *testing.T) { + tests := []struct { + desc string + added []hostPortInfoParam + removed []hostPortInfoParam + length int + }{ + { + desc: "normal add case", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + // this might not make sense in real case, but the struct doesn't forbid it. + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + {"TCP", "0.0.0.0", 0}, + {"TCP", "0.0.0.0", -1}, + }, + length: 8, + }, + { + desc: "empty ip and protocol add should work", + added: []hostPortInfoParam{ + {"", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"", "127.0.0.1", 81}, + {"", "127.0.0.1", 82}, + {"", "", 79}, + {"UDP", "", 80}, + {"", "", 81}, + {"", "", 82}, + {"", "", 0}, + {"", "", -1}, + }, + length: 8, + }, + { + desc: "normal remove case", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + removed: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + length: 0, + }, + { + desc: "empty ip and protocol remove should work", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + removed: []hostPortInfoParam{ + {"", "127.0.0.1", 79}, + {"", "127.0.0.1", 81}, + {"", "127.0.0.1", 82}, + {"UDP", "127.0.0.1", 80}, + {"", "", 79}, + {"", "", 81}, + {"", "", 82}, + {"UDP", "", 80}, + }, + length: 0, + }, + } + + for _, test := range tests { + hp := make(HostPortInfo) + for _, param := range test.added { + hp.Add(param.ip, param.protocol, param.port) + } + for _, param := range test.removed { + hp.Remove(param.ip, param.protocol, param.port) + } + if hp.Len() != test.length { + t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len()) + t.Error(hp) + } + } +} + +func TestHostPortInfo_Check(t *testing.T) { + tests := []struct { + desc string + added []hostPortInfoParam + check hostPortInfoParam + expect bool + }{ + { + desc: "empty check should check 0.0.0.0 and TCP", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 81}, + expect: false, + }, + { + desc: "empty check should check 0.0.0.0 and TCP (conflicted)", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 80}, + expect: true, + }, + { + desc: "empty port check should pass", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 0}, + expect: false, + }, + { + desc: "0.0.0.0 should check all registered IPs", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: true, + }, + { + desc: "0.0.0.0 with different protocol should be allowed", + added: []hostPortInfoParam{ + {"UDP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: false, + }, + { + desc: "0.0.0.0 with different port should be allowed", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: false, + }, + { + desc: "normal ip should check all registered 0.0.0.0", + added: []hostPortInfoParam{ + {"TCP", "0.0.0.0", 80}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: true, + }, + { + desc: "normal ip with different port/protocol should be allowed (0.0.0.0)", + added: []hostPortInfoParam{ + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: false, + }, + { + desc: "normal ip with different port/protocol should be allowed", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: false, + }, + } + + for _, test := range tests { + hp := make(HostPortInfo) + for _, param := range test.added { + hp.Add(param.ip, param.protocol, param.port) + } + if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect { + t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect) + } + } +} From b0654ffeb8149fbd72b5d6057b8aae67703f8d81 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 9 Jan 2018 10:05:25 +0800 Subject: [PATCH 660/794] update apiserver key/crt with a long expire time --- .../certificates/apiserver.crt | 34 ++++++------- .../certificates/apiserver.key | 50 +++++++++---------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.crt b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.crt index 6bd8196e890..0b52619bf2f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.crt +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDEzCCAfugAwIBAgIBATANBgkqhkiG9w0BAQsFADAfMR0wGwYDVQQDDBRsb2Nh -bGhvc3RAMTQ5MzY2NDQ4OTAeFw0xNzA1MDExODQ4MDlaFw0xODA1MDExODQ4MDla -MB8xHTAbBgNVBAMMFGxvY2FsaG9zdEAxNDkzNjY0NDg5MIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAy6tMZcDbG1J4vbX+YdiPswxqO+hX1r+i9+DFb1N/ -xyodBbprn1Mmd2k1lv3AkiZKm38v7dgzQ9/teA8Jm/1tyjjZSV/CxsZZWNuukPGU -ykEtn4mvkb5tOI1159ieTBiL4mKx5VNq8DkIpy9CT22Ud9dHkJaxJHcIF601hXHg -GIRla/6CRlkY/GFUItl1oij4sgzXRTS2pdv8lsmt2s7dXj737l10QCz9YDVuGSfu -rYoHGwY5ofYYFWzscD7Ds4O0tPdu4mSPIu753K7nB3ilfBi+tUWcSXpw9wE4+hIF -a1In8jnM+lw5/j/UoghrCtQ54BGWzpivPPXKv2dlNIOPiwIDAQABo1owWDAOBgNV -HQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB -/zAgBgNVHREEGTAXgglsb2NhbGhvc3SHBH8AAAGHBH8AAAEwDQYJKoZIhvcNAQEL -BQADggEBAJbxTi/0Joxx/oja4QDksbWroip0qVJKh1ic7ryai52aSBTcHMF9pWiL -047lL3sL0sN0YavXPUiow4PMTQm14W01ciwuZj5DCCaXnmnGtBy0fy8ifUdQoD/J -9pvLQMWAsx+GP2XzY+KxYFQairKS7BehEF/d24TgNHPskgc2p2XgK3Z7Ipp7hQrj -yZiTNromeULT12d5Zuwf+IeDp3aopGyhxCTOoc+RCz4MKLfKov40xjlaA4jVWazd -ccHWnagwM5lDlXnmCqZRVvyOWaUulJCEzRFfRTHFxKgj6DSPNt00wHXNmQUvjhN/ -YXFAkfKQQEs3qQRXoHAXKquplnLgjyA= +MIIDFTCCAf2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAfMR0wGwYDVQQDDBRsb2Nh +bGhvc3RAMTUxNTQ2MjIwNjAgFw0xODAxMDkwMTQzMjZaGA8yMTE4MDEwOTAxNDMy +NlowHzEdMBsGA1UEAwwUbG9jYWxob3N0QDE1MTU0NjIyMDYwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC2hIORzonehlNadYyI30v1Jj8lhhABuiWiTSkl +KCLqZjwBfWfSC4w02zxi2SAH9ju20XCJrUauwPq1qXCp/CqXC/rVgZrzluDlpJpe +gF9AilQvGOxhrZhV4kqpOjGVE78uOmpfxiOyNermoJ0OVE8ugh3s/LLTNK/qmCAX +uEYTQccAvNEiPX3XPBCiaFlSCkUNS0zp12mJNP43+KF9y0CbtYs1gXKHmmJVSpjR +YmcuJJUfHxNrV2YR3ek6O4IIJFIlnLxgpjRBseBPkTenAT3S2YY9MyQkkBrRSPBa +vLM24al3KDvXYikYe3WpxeYNHGNcHIgR+hKlRTQ5VrWlfx9dAgMBAAGjWjBYMA4G +A1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTAD +AQH/MCAGA1UdEQQZMBeCCWxvY2FsaG9zdIcEfwAAAYcEfwAAATANBgkqhkiG9w0B +AQsFAAOCAQEAFhW8cVTraHPNsE+Jo0ZvcE2ic8lEzeOhWI2O/fpkrUJS5LptPKHS +nTK+CPxA0zhIS/vlJznIabeddXwtq7Xb5SwlJMHYMnHD6f5qwpD22D2dxJJa5sma +3yrK/4CutuEae08qqSeakfgCjcHLL9p7FZWxujkV9/5CEH5lFWYLGumyIoS46Svf +nSfDFKTrOj8P60ncCoWcSpMbdVQBDuKlIZuBMmz9CguC1CtuQWPDUmOGJuPs/+So +yusHbBfj+ATUWDYTg1lLjOIOSJpHGUQkvS+8Bo47SThD/b4w2i6VC72ldxtBuxGf +L7+jALMhMhiQD+Q4qsNuyvvNQLoYcTTFTw== -----END CERTIFICATE----- diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.key b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.key index 152fc1b6f42..d4878784e98 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.key +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiserver.local.config/certificates/apiserver.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAy6tMZcDbG1J4vbX+YdiPswxqO+hX1r+i9+DFb1N/xyodBbpr -n1Mmd2k1lv3AkiZKm38v7dgzQ9/teA8Jm/1tyjjZSV/CxsZZWNuukPGUykEtn4mv -kb5tOI1159ieTBiL4mKx5VNq8DkIpy9CT22Ud9dHkJaxJHcIF601hXHgGIRla/6C -RlkY/GFUItl1oij4sgzXRTS2pdv8lsmt2s7dXj737l10QCz9YDVuGSfurYoHGwY5 -ofYYFWzscD7Ds4O0tPdu4mSPIu753K7nB3ilfBi+tUWcSXpw9wE4+hIFa1In8jnM -+lw5/j/UoghrCtQ54BGWzpivPPXKv2dlNIOPiwIDAQABAoIBABy4qWtoCP4PYUuP -kLIHsiwTwh90on58Q+Uk43LRmaFihPk70tWDCleolJAYdMGneLn4869c383glEJs -DHTdBlCQN8QrJvKVIiBvymxSRSNIkcB/0CyDaC+jc08gsyIUDBX+yQuH+fqqcFfz -SCyfTWKhD0yKk6yKxK9iE7wf1PRf5uLtJD6x1vV0NBmsHH++feODjNVsHDRvnwy6 -3KXkgSvfCTQ7qnQPZ/MSsRxWRdMBCnhaQq9qRnJ8bv8XotrCsEG5laMybriyJYXX -wvr9Dt04ciUD/g3qwIPy1ygMAKE9ya8hivSRURptZxz9SKCenWWihsfIzk4uyOi+ -sVDkJVECgYEA2981ZhbkruG5JhOsWVXTxDlXOrjI0pUqfej0WEp8lwdBMOrTXFs1 -cB8kdSocVC5GnMTbg6bqJrfglbNDOA7sUA8E8APLFFUAAvPwVfnrdDWk+0jK7atu -2sixQGeIB97Y6ojWfSbjyA/0p3Z0zCTfP5SR6xt4hVWhInB11m5IpO0CgYEA7SKH -bfWPZ0xfkFdM2X2cWQjt4zYHIUQqAKLPs2OjPTBC8PioNiZlvZq9bNseYR+eyIg6 -P9Kwe4KV/hzOSScf7JYpsN+YQ1+4Y+E63BkhAHXyz7y/vD+DA/Z83oKbelQtJqwq -W+Zo1OGJrfZwEKK5JWN9HF+KI9Z1iMyZoyw8L1cCgYEAtaLDfj7TVBVs2qPN8U8R -zjyAbyZP4IcRv0o+8OE345w+oqabTOScVK+lcpUDKhfAhamqniu5q5qjkYextBG/ -7rM5pP29OmKty8KxfJUlia73SA9udMD2pw68PzRIEBhsofPBHUqPSarEtcMJ4ctk -EiYuFUdwXNXMc6Lr9eTNZlECgYEAsfFJIvAzjdY3l76KwmGJox4aNHdkXkgiJJwH -s5s+8Tl34g8VWpzxl5e4MSkz4LmzktL2stHM8MGLAEZpXWdog0YjPsBqJ5R6byih -3GtW4lufutbuIbqe+6hJB0eGmAL2ZqCmoJODcstTXyEf8rvIpw/C4DmpFT9mryKo -31LgTr0CgYEAuxusmnR2vzZP/RjpjzmZcvIHf4xORG+SXlg3BXsSEd6+g3Rqiy5t -Q0UkHHwYnYurBmJ2HL1LG9mZwU89D00F/4mJpJuWfwqtqvodIRZ7bimyGGbvKZ1t -BGLmUssF5MYn75v7E5opxcc51aieW8nUQbop/PPMvWsYLrL/mcJNBpA= +MIIEowIBAAKCAQEAtoSDkc6J3oZTWnWMiN9L9SY/JYYQAbolok0pJSgi6mY8AX1n +0guMNNs8YtkgB/Y7ttFwia1GrsD6talwqfwqlwv61YGa85bg5aSaXoBfQIpULxjs +Ya2YVeJKqToxlRO/LjpqX8YjsjXq5qCdDlRPLoId7Pyy0zSv6pggF7hGE0HHALzR +Ij191zwQomhZUgpFDUtM6ddpiTT+N/ihfctAm7WLNYFyh5piVUqY0WJnLiSVHx8T +a1dmEd3pOjuCCCRSJZy8YKY0QbHgT5E3pwE90tmGPTMkJJAa0UjwWryzNuGpdyg7 +12IpGHt1qcXmDRxjXByIEfoSpUU0OVa1pX8fXQIDAQABAoIBAERy2ezaqnXbpnLs +VrIWHCRqHZBzAJnFN8vwaBfZP47snGBqqX7qecBw3+qqRwr1W1uqnCvl4fYzxVJP +o0L8oPRYt89OddAYq2s0GfiK6C4KMpwfGrdfJRxAa4OfoWypJS+vFKmqY0S4V8n6 +Pixbjf6BKbvw4Re4UKkIODDtGMqrZFVKcFe8LCnd3D+7jvt0M/WjEhrepWxscJh3 +aHgDzsLzCv1DNjgZfoRZubkK3bdndMaL6NhaKNBz6S7CT9XmZsJaWkmBXs9zOoyr +0hKP0A11cm6a7LsmxX5h4uaQLh66KHUPbV4KjKgKiGkSS9cnZoXHFZLOplOfozje +1DKitAECgYEA2eWiRNByNIqqRPvBtD8ydavOLk6iLlLt+LkCpGupgELs53WS5fTT +TxbyVq+897qeW2Klir7jZFWG3Q+EaBATxMYON+jb7QnIz8gX9lh1PpUlo88BiQzO +hAIx2uV19KM0ftXYVTSAUh1N2cgoOWGUWLaeMPdxPOlJwvM25hSfp90CgYEA1m8W +vWBO8X5LXM9g+fO1TFSlTnUJW1gWrnOw4VmU2+DbqNmtefpVrqDa5Iw2+mU+EBgA +d3wdAHARXpc2MGcIRnRbHn+gXJVHA+gA7H9LSZ4Yi0qJZbNVAgRySs2iBYUcunsR +AXkS7sPGQinfnjKh6vhYVErh5jA+cvS8CXZtnYECgYBmh61hYAw9OPqB100AebRO +tncgRxP9ZDxiCvx5TcfGeLds+mATIK7FynBh5fOvRfr52WM39DafobcCEiklplsG +/oL2P/YshaweSXMtEdapihjaCbAZQxNx/m5jKBHm+VzcSdev0DKJcQyO66Yxyf65 +98RcGjMIjGWO/E7a2N1/aQKBgCPrY+HBGjg1saYQTuxPuJTasP4deL3GWbZLRtvY +x6i1V9ZG8Fo4ZtXjuAcEvcjf4K+NdbaOIcWLAD3aEoe1GpvCrejD9DbOAqFS4aS8 +Bf6E7xOWHsHccmbuG78QBw3pqFBMgSLABz3bqYA3x2+Wh6z2gMVN7d1DQ5K6EC19 +mwsBAoGBAKZBgqRHRq1Ch3SWb5Q+SgUvNyQ+PAIwCve0vA4mMIK6EGqU/8wbU01B +5/UkCfT+ovDeDuyeaZbTWzwUC4Mrg4C9rThrK5WLc43Dig6G1HhfjdLA+gdKFOjh +FpocOI2FEwbmj5Mka6n3TSFI8c55ubYdyXQu92DoFt4dTOJStUn2 -----END RSA PRIVATE KEY----- From 2ccf22a5d03dad3f46f76d587fe02a44cdb3a6f3 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Thu, 4 Jan 2018 13:19:29 +0800 Subject: [PATCH 661/794] fix rbd ConstructVolumeSpec bug --- pkg/volume/rbd/rbd.go | 37 ++++++++++- pkg/volume/rbd/rbd_test.go | 128 ++++++++++++++++++++++++++++++++++++- pkg/volume/rbd/rbd_util.go | 14 +++- 3 files changed, 174 insertions(+), 5 deletions(-) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 0d4e8c21958..12c2898e3d1 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -357,7 +357,26 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol } s := dstrings.Split(sourceName, "-image-") if len(s) != 2 { - return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName) + // The mountPath parameter is the volume mount path for a specific pod, its format + // is /var/lib/kubelet/pods/{podUID}/volumes/{volumePluginName}/{volumeName}. + // mounter.GetDeviceNameFromMount will find the device path(such as /dev/rbd0) by + // mountPath first, and then try to find the global device mount path from the mounted + // path list of this device. sourceName is extracted from this global device mount path. + // mounter.GetDeviceNameFromMount expects the global device mount path conforms to canonical + // format: /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/{pool}-image-{image}. + // If this assertion failed, it means that the global device mount path is created by + // the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/{pool}-image-{image}. + // So we will try to check whether this old style global device mount path exist or not. + // If existed, extract the sourceName from this old style path, otherwise return an error. + glog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) + sourceName, err = plugin.getDeviceNameFromOldMountPath(mounter, mountPath) + if err != nil { + return nil, err + } + s = dstrings.Split(sourceName, "-image-") + if len(s) != 2 { + return nil, fmt.Errorf("sourceName %s wrong, should be pool+\"-image-\"+imageName", sourceName) + } } rbdVolume := &v1.Volume{ Name: volumeName, @@ -492,6 +511,22 @@ func (plugin *rbdPlugin) newUnmapperInternal(volName string, podUID types.UID, m }, nil } +func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface, mountPath string) (string, error) { + refs, err := mount.GetMountRefsByDev(mounter, mountPath) + if err != nil { + return "", err + } + // baseMountPath is the prefix of deprecated device global mounted path, + // such as: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd + baseMountPath := filepath.Join(plugin.host.GetPluginDir(rbdPluginName), "rbd") + for _, ref := range refs { + if dstrings.HasPrefix(ref, baseMountPath) { + return filepath.Rel(baseMountPath, ref) + } + } + return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath) +} + func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") diff --git a/pkg/volume/rbd/rbd_test.go b/pkg/volume/rbd/rbd_test.go index 23b9b969f96..9107fe7f5be 100644 --- a/pkg/volume/rbd/rbd_test.go +++ b/pkg/volume/rbd/rbd_test.go @@ -325,7 +325,7 @@ func TestPlugin(t *testing.T) { }, }, expectedDevicePath: "/dev/rbd1", - expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool1-image-image1", tmpDir), + expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool1-image-image1", tmpDir), expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID), }) cases = append(cases, &testcase{ @@ -353,7 +353,7 @@ func TestPlugin(t *testing.T) { }, }, expectedDevicePath: "/dev/rbd1", - expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool2-image-image2", tmpDir), + expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir), expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID), }) @@ -450,3 +450,127 @@ func TestGetSecretNameAndNamespace(t *testing.T) { t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace) } } + +// https://github.com/kubernetes/kubernetes/issues/57744 +func TestGetDeviceMountPath(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("rbd_test") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil) + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost) + plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + fdm := NewFakeDiskManager() + + // attacher + attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm) + if err != nil { + t.Errorf("Failed to make a new Attacher: %v", err) + } + + pool, image := "pool", "image" + spec := volume.NewSpecFromVolume(&v1.Volume{ + Name: "vol", + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ + CephMonitors: []string{"a", "b"}, + RBDPool: pool, + RBDImage: image, + FSType: "ext4", + }, + }, + }) + + deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image) + canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image) + + type testCase struct { + deprecated bool + targetPath string + } + for _, c := range []testCase{ + {false, canonicalDir}, + {true, deprecatedDir}, + } { + if c.deprecated { + // This is a deprecated device mount path, we create it, + // and hope attacher.GetDeviceMountPath return c.targetPath. + if err := os.MkdirAll(c.targetPath, 0700); err != nil { + t.Fatalf("Create deprecated mount path failed: %v", err) + } + } + mountPath, err := attacher.GetDeviceMountPath(spec) + if err != nil { + t.Fatalf("GetDeviceMountPath failed: %v", err) + } + if mountPath != c.targetPath { + t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath) + } + } +} + +// https://github.com/kubernetes/kubernetes/issues/57744 +func TestConstructVolumeSpec(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("rbd_test") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil) + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost) + plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter) + + pool, image, volumeName := "pool", "image", "vol" + podMountPath := fmt.Sprintf("%s/pods/pod123/volumes/kubernetes.io~rbd/%s", tmpDir, volumeName) + deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image) + canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image) + + type testCase struct { + volumeName string + targetPath string + } + + for _, c := range []testCase{ + {"vol", canonicalDir}, + {"vol", deprecatedDir}, + } { + if err := os.MkdirAll(c.targetPath, 0700); err != nil { + t.Fatalf("Create mount path %s failed: %v", c.targetPath, err) + } + if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil { + t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err) + } + if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil { + t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err) + } + spec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath) + if err != nil { + t.Errorf("ConstructVolumeSpec failed: %v", err) + } else { + if spec.Volume.RBD.RBDPool != pool { + t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, spec.Volume.RBD.RBDPool) + } + if spec.Volume.RBD.RBDImage != image { + t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, spec.Volume.RBD.RBDImage) + } + } + if err = fakeMounter.Unmount(podMountPath); err != nil { + t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err) + } + if err = fakeMounter.Unmount(c.targetPath); err != nil { + t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err) + } + } +} diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 4f6d9e0121f..bbb936b1290 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/wait" fileutil "k8s.io/kubernetes/pkg/util/file" + "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -110,9 +111,18 @@ func waitForPath(pool, image string, maxRetries int) (string, bool) { return "", false } -// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image func makePDNameInternal(host volume.VolumeHost, pool string, image string) string { - return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) + // Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image + deprecatedDir := path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) + info, err := os.Stat(deprecatedDir) + if err == nil && info.IsDir() { + // The device mount path has already been created with the deprecated format, return it. + glog.V(5).Infof("Deprecated format path %s found", deprecatedDir) + return deprecatedDir + } + // Return the canonical format path. + return path.Join(host.GetPluginDir(rbdPluginName), mount.MountsInGlobalPDPath, pool+"-image-"+image) } // make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image From 2fefca4a0b82143068333c5cc5b4b8602de0d60a Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Tue, 2 Jan 2018 15:04:30 +0800 Subject: [PATCH 662/794] remove unnecessary function getBuggyHostportChain --- .../network/hostport/hostport_manager.go | 12 --- .../network/hostport/hostport_manager_test.go | 84 ------------------- 2 files changed, 96 deletions(-) diff --git a/pkg/kubelet/network/hostport/hostport_manager.go b/pkg/kubelet/network/hostport/hostport_manager.go index a31b4da0842..b355dbbb2e0 100644 --- a/pkg/kubelet/network/hostport/hostport_manager.go +++ b/pkg/kubelet/network/hostport/hostport_manager.go @@ -178,8 +178,6 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er chainsToRemove := []utiliptables.Chain{} for _, pm := range hostportMappings { chainsToRemove = append(chainsToRemove, getHostportChain(id, pm)) - // TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 - chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm)) } // remove rules that consists of target chains @@ -255,16 +253,6 @@ func getHostportChain(id string, pm *PortMapping) utiliptables.Chain { return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) } -// This bugy func does bad conversion on HostPort from int32 to string. -// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833. -// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left. -// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 -func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain { - hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol))) - encoded := base32.StdEncoding.EncodeToString(hash[:]) - return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) -} - // gatherHostportMappings returns all the PortMappings which has hostport for a pod func gatherHostportMappings(podPortMapping *PodPortMapping) []*PortMapping { mappings := []*PortMapping{} diff --git a/pkg/kubelet/network/hostport/hostport_manager_test.go b/pkg/kubelet/network/hostport/hostport_manager_test.go index 1537d274940..289d3b3171a 100644 --- a/pkg/kubelet/network/hostport/hostport_manager_test.go +++ b/pkg/kubelet/network/hostport/hostport_manager_test.go @@ -27,14 +27,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) -func NewFakeHostportManager() HostPortManager { - return &hostportManager{ - hostPortMap: make(map[hostport]closeable), - iptables: NewFakeIPTables(), - portOpener: NewFakeSocketManager().openFakeSocket, - } -} - func TestHostportManager(t *testing.T) { iptables := NewFakeIPTables() portOpener := NewFakeSocketManager() @@ -211,79 +203,3 @@ func TestGetHostportChain(t *testing.T) { t.Fatal(m) } } - -func TestHostPortManagerRemoveLegacyRules(t *testing.T) { - iptables := NewFakeIPTables() - legacyRules := [][]string{ - {"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-5N7UH5JAXCVP5UJR"}, - {"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp --dport 8081 -j KUBE-HP-7THKRFSEH4GIIXK7"}, - {"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp --dport 8080 -j KUBE-HP-4YVONL46AKYWSKS3"}, - {"-A", "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"}, - {"-A", "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"}, - {"-A", "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"}, - {"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"}, - {"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80"}, - {"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"}, - {"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81"}, - {"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"}, - {"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"}, - } - for _, rule := range legacyRules { - _, err := iptables.EnsureChain(utiliptables.TableNAT, utiliptables.Chain(rule[1])) - assert.NoError(t, err) - _, err = iptables.ensureRule(utiliptables.RulePosition(rule[0]), utiliptables.TableNAT, utiliptables.Chain(rule[1]), rule[2]) - assert.NoError(t, err) - } - portOpener := NewFakeSocketManager() - manager := &hostportManager{ - hostPortMap: make(map[hostport]closeable), - iptables: iptables, - portOpener: portOpener.openFakeSocket, - } - err := manager.Remove("id", &PodPortMapping{ - Name: "pod1", - Namespace: "ns1", - IP: net.ParseIP("10.1.1.2"), - HostNetwork: false, - PortMappings: []*PortMapping{ - { - HostPort: 8080, - ContainerPort: 80, - Protocol: v1.ProtocolTCP, - }, - { - HostPort: 8081, - ContainerPort: 81, - Protocol: v1.ProtocolUDP, - }, - }, - }) - assert.NoError(t, err) - - err = manager.Remove("id", &PodPortMapping{ - Name: "pod3", - Namespace: "ns1", - IP: net.ParseIP("10.1.1.4"), - HostNetwork: false, - PortMappings: []*PortMapping{ - { - HostPort: 8443, - ContainerPort: 443, - Protocol: v1.ProtocolTCP, - }, - }, - }) - assert.NoError(t, err) - - natTable, ok := iptables.tables[string(utiliptables.TableNAT)] - assert.True(t, ok) - // check KUBE-HOSTPORTS chain should be cleaned up - hostportChain, ok := natTable.chains["KUBE-HOSTPORTS"] - assert.True(t, ok, string(hostportChain.name)) - assert.Equal(t, 0, len(hostportChain.rules), "%v", hostportChain.rules) - // check KUBE-HP-* chains should be deleted - for _, name := range []string{"KUBE-HP-4YVONL46AKYWSKS3", "KUBE-HP-7THKRFSEH4GIIXK7", "KUBE-HP-5N7UH5JAXCVP5UJR"} { - _, ok := natTable.chains[name] - assert.False(t, ok) - } -} From 86ffa59d340c7b8d31d7ee3655d8a83053dcd95b Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 5 Jan 2018 16:13:51 +0800 Subject: [PATCH 663/794] refactor customresource handler --- .../pkg/apiserver/apiserver.go | 1 - .../pkg/apiserver/customresource_handler.go | 140 ++++++++++-------- 2 files changed, 75 insertions(+), 66 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index de90cef9068..b8af7d55595 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -177,7 +177,6 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) versionDiscoveryHandler, groupDiscoveryHandler, s.GenericAPIServer.RequestContextMapper(), - s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions().Lister(), s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), delegateHandler, c.ExtraConfig.CRDRESTOptionsGetter, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 773a0657726..3112b9353fd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -44,12 +44,13 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/handlers" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/client-go/discovery" - cache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" @@ -67,6 +68,9 @@ type crdHandler struct { customStorageLock sync.Mutex // customStorage contains a crdStorageMap + // atomic.Value has a very good read performance compared to sync.RWMutex + // see https://gist.github.com/dim/152e6bf80e1384ea72e17ac717a5000a + // which is suited for most read and rarely write cases customStorage atomic.Value requestContextMapper apirequest.RequestContextMapper @@ -96,7 +100,6 @@ func NewCustomResourceDefinitionHandler( versionDiscoveryHandler *versionDiscoveryHandler, groupDiscoveryHandler *groupDiscoveryHandler, requestContextMapper apirequest.RequestContextMapper, - crdLister listers.CustomResourceDefinitionLister, crdInformer informers.CustomResourceDefinitionInformer, delegate http.Handler, restOptionsGetter generic.RESTOptionsGetter, @@ -106,7 +109,7 @@ func NewCustomResourceDefinitionHandler( groupDiscoveryHandler: groupDiscoveryHandler, customStorage: atomic.Value{}, requestContextMapper: requestContextMapper, - crdLister: crdLister, + crdLister: crdInformer.Lister(), delegate: delegate, restOptionsGetter: restOptionsGetter, admission: admission, @@ -120,19 +123,20 @@ func NewCustomResourceDefinitionHandler( }) ret.customStorage.Store(crdStorageMap{}) + return ret } func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { ctx, ok := r.requestContextMapper.Get(req) if !ok { - // programmer error - panic("missing context") + responsewriters.InternalError(w, req, fmt.Errorf("no context found for request")) + return } requestInfo, ok := apirequest.RequestInfoFrom(ctx) if !ok { - // programmer error - panic("missing requestInfo") + responsewriters.InternalError(w, req, fmt.Errorf("no RequestInfo found in the context")) + return } if !requestInfo.IsResourceRequest { pathParts := splitPath(requestInfo.Path) @@ -168,6 +172,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } if !apiextensions.IsCRDConditionTrue(crd, apiextensions.Established) { r.delegate.ServeHTTP(w, req) + return } if len(requestInfo.Subresource) > 0 { http.NotFound(w, req) @@ -176,7 +181,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { terminating := apiextensions.IsCRDConditionTrue(crd, apiextensions.Terminating) - crdInfo, err := r.getServingInfoFor(crd) + crdInfo, err := r.getOrCreateServingInfoFor(crd) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -242,19 +247,52 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } } +func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) { + oldCRD := oldObj.(*apiextensions.CustomResourceDefinition) + newCRD := newObj.(*apiextensions.CustomResourceDefinition) + + r.customStorageLock.Lock() + defer r.customStorageLock.Unlock() + + storageMap := r.customStorage.Load().(crdStorageMap) + oldInfo, found := storageMap[newCRD.UID] + if !found { + return + } + if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { + glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) + return + } + + glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) + + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere. + storageMap2 := storageMap.clone() + if oldInfo, ok := storageMap2[types.UID(oldCRD.UID)]; ok { + oldInfo.storage.DestroyFunc() + delete(storageMap2, types.UID(oldCRD.UID)) + } + + r.customStorage.Store(storageMap2) +} + // removeDeadStorage removes REST storage that isn't being used func (r *crdHandler) removeDeadStorage() { - // these don't have to be live. A snapshot is fine - // if we wrongly delete, that's ok. The rest storage will be recreated on the next request - // if we wrongly miss one, that's ok. We'll get it next time - storageMap := r.customStorage.Load().(crdStorageMap) allCustomResourceDefinitions, err := r.crdLister.List(labels.Everything()) if err != nil { utilruntime.HandleError(err) return } - for uid, s := range storageMap { + r.customStorageLock.Lock() + defer r.customStorageLock.Unlock() + + storageMap := r.customStorage.Load().(crdStorageMap) + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere + storageMap2 := storageMap.clone() + for uid, s := range storageMap2 { found := false for _, crd := range allCustomResourceDefinitions { if crd.UID == uid { @@ -265,38 +303,33 @@ func (r *crdHandler) removeDeadStorage() { if !found { glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScope.Resource) s.storage.DestroyFunc() - delete(storageMap, uid) + delete(storageMap2, uid) } } - - r.customStorageLock.Lock() - defer r.customStorageLock.Unlock() - - r.customStorage.Store(storageMap) + r.customStorage.Store(storageMap2) } // GetCustomResourceListerCollectionDeleter returns the ListerCollectionDeleter for // the given uid, or nil if one does not exist. func (r *crdHandler) GetCustomResourceListerCollectionDeleter(crd *apiextensions.CustomResourceDefinition) finalizer.ListerCollectionDeleter { - info, err := r.getServingInfoFor(crd) + info, err := r.getOrCreateServingInfoFor(crd) if err != nil { utilruntime.HandleError(err) } return info.storage } -func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { +func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { storageMap := r.customStorage.Load().(crdStorageMap) - ret, ok := storageMap[crd.UID] - if ok { + if ret, ok := storageMap[crd.UID]; ok { return ret, nil } r.customStorageLock.Lock() defer r.customStorageLock.Unlock() - ret, ok = storageMap[crd.UID] - if ok { + storageMap = r.customStorage.Load().(crdStorageMap) + if ret, ok := storageMap[crd.UID]; ok { return ret, nil } @@ -384,7 +417,7 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti MetaGroupVersion: metav1.SchemeGroupVersion, } - ret = &crdInfo{ + ret := &crdInfo{ spec: &crd.Spec, acceptedNames: &crd.Status.AcceptedNames, @@ -392,16 +425,13 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti requestScope: requestScope, } - storageMap2 := make(crdStorageMap, len(storageMap)) - // Copy because we cannot write to storageMap without a race - // as it is used without locking elsewhere - for k, v := range storageMap { - storageMap2[k] = v - } + // as it is used without locking elsewhere. + storageMap2 := storageMap.clone() storageMap2[crd.UID] = ret r.customStorage.Store(storageMap2) + return ret, nil } @@ -423,39 +453,6 @@ func (c crdObjectConverter) ConvertFieldLabel(version, kind, label, value string } } -func (c *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) { - oldCRD := oldObj.(*apiextensions.CustomResourceDefinition) - newCRD := newObj.(*apiextensions.CustomResourceDefinition) - - c.customStorageLock.Lock() - defer c.customStorageLock.Unlock() - storageMap := c.customStorage.Load().(crdStorageMap) - - oldInfo, found := storageMap[newCRD.UID] - if !found { - return - } - if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { - glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) - return - } - - glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) - storageMap2 := make(crdStorageMap, len(storageMap)) - - // Copy because we cannot write to storageMap without a race - // as it is used without locking elsewhere - for k, v := range storageMap { - if k == oldCRD.UID { - v.storage.DestroyFunc() - continue - } - storageMap2[k] = v - } - - c.customStorage.Store(storageMap2) -} - type unstructuredNegotiatedSerializer struct { typer runtime.ObjectTyper creator runtime.ObjectCreater @@ -578,3 +575,16 @@ func (t CRDRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (gen } return ret, nil } + +// clone returns a clone of the provided crdStorageMap. +// The clone is a shallow copy of the map. +func (in crdStorageMap) clone() crdStorageMap { + if in == nil { + return nil + } + out := make(crdStorageMap, len(in)) + for key, value := range in { + out[key] = value + } + return out +} From e7530405456daafd014cb8e7702d5ce177dbf9e7 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Fri, 5 Jan 2018 16:14:00 +0800 Subject: [PATCH 664/794] run update bazel and staging-godep --- staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json | 4 ++++ .../src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD | 1 + 2 files changed, 5 insertions(+) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 08607c1ddcf..b00cf3779c8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -1790,6 +1790,10 @@ "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/request", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index a3842388097..dbed4134690 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -54,6 +54,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", From 13a30bfcd957e4b9729fa2734c517b18e9475d8b Mon Sep 17 00:00:00 2001 From: NickrenREN Date: Tue, 9 Jan 2018 13:35:30 +0800 Subject: [PATCH 665/794] Update spec dependency to point to 0.1 tag --- Godeps/Godeps.json | 2 +- .../container-storage-interface/spec/lib/go/csi/csi.pb.go | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 691bd76f46e..db5c7aa66f0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -438,7 +438,7 @@ }, { "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", - "Rev": "ec298903f94e1d6d954de121b28044a2e1fdbf48" + "Rev": "9e88e4bfabeca1b8e4810555815f112159292ada" }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go index 6158a4870cc..7f53a1cb436 100644 --- a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -73,9 +73,11 @@ type VolumeCapability_AccessMode_Mode int32 const ( VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 - // Can be published as read/write at one node at a time. + // Can only be published once as read/write on a single node, at + // any given time. VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 - // Can be published as readonly at one node at a time. + // Can only be published once as readonly on a single node, at + // any given time. VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 // Can be published as readonly at multiple nodes simultaneously. VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 @@ -1344,6 +1346,8 @@ type NodePublishVolumeRequest struct { // The path to which the volume will be published. It MUST be an // absolute path in the root filesystem of the process serving this // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. // This is a REQUIRED field. TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` // The capability of the volume the CO expects the volume to have. From e2b6b1d7eb75ddb467561eff87422a262e5a7af7 Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Tue, 9 Jan 2018 15:16:45 +0800 Subject: [PATCH 666/794] Fix exists status for azure GetLoadBalancer --- pkg/cloudprovider/providers/azure/azure_loadbalancer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index d6f4bdfac38..46e594ae9ce 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -78,10 +78,10 @@ func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (statu if err != nil { return nil, false, err } - if exists == false { + if !exists { serviceName := getServiceName(service) - glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s)- IP doesn't exist in any of the lbs", clusterName, serviceName) - return nil, false, fmt.Errorf("Service(%s) - Loadbalancer not found", serviceName) + glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) + return nil, false, nil } return status, true, nil } From 0eb19a004059f599ff4f109b29211ce6b489e0c4 Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Tue, 9 Jan 2018 16:43:43 +0800 Subject: [PATCH 667/794] Fix vm cache in concurrent case --- pkg/cloudprovider/providers/azure/azure_wrap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 85b67c456b0..7b0c2f7f73c 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -112,7 +112,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM request.vm = &vm } - return vm, nil + return *request.vm, nil } glog.V(6).Infof("getVirtualMachine hits cache for(%s)", vmName) From 95f381bd6b8a8db32377ba8f50a075e56c9e63e5 Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Fri, 24 Nov 2017 18:16:26 +0100 Subject: [PATCH 668/794] Refactor retry logic away from updateCIDRAllocation() --- .../nodeipam/ipam/cidr_allocator.go | 2 +- .../nodeipam/ipam/cloud_cidr_allocator.go | 35 ++++++------ .../nodeipam/ipam/range_allocator.go | 57 ++++++++++--------- 3 files changed, 47 insertions(+), 47 deletions(-) diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index b9a97938ad3..4a5cee34d83 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -68,7 +68,7 @@ const ( cidrUpdateQueueSize = 5000 // cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it. - cidrUpdateRetries = 10 + cidrUpdateRetries = 3 ) // CIDRAllocator is an interface implemented by things that know how diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 7a07409c7cd..8d6ef878dac 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -210,35 +210,34 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { } podCIDR := cidr.String() - for rep := 0; rep < cidrUpdateRetries; rep++ { - node, err = ca.nodeLister.Get(nodeName) - if err != nil { - glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err) - continue - } + node, err = ca.nodeLister.Get(nodeName) + if err != nil { + glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) + return err + } + + if node.Spec.PodCIDR == podCIDR { + glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) + // We don't return here, in order to set the NetworkUnavailable condition later below. + } else { if node.Spec.PodCIDR != "" { - if node.Spec.PodCIDR == podCIDR { - glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) - // We don't return to set the NetworkUnavailable condition if needed. - break - } - glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", - node.Name, node.Spec.PodCIDR, podCIDR) + glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR) // We fall through and set the CIDR despite this error. This // implements the same logic as implemented in the // rangeAllocator. // // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 } - if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil { - glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) - break + for i := 0; i < cidrUpdateRetries; i++ { + if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil { + glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) + break + } } - glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err) } if err != nil { nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") - glog.Errorf("CIDR assignment for node %v failed: %v.", nodeName, err) + glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) return err } diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index 5de2195854b..241bc2f1b8e 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -286,39 +286,40 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { defer r.removeNodeFromProcessing(data.nodeName) podCIDR := data.cidr.String() - for rep := 0; rep < cidrUpdateRetries; rep++ { - node, err = r.nodeLister.Get(data.nodeName) - if err != nil { - glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) - continue - } - if node.Spec.PodCIDR != "" { - glog.V(4).Infof("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR) - if node.Spec.PodCIDR != podCIDR { - glog.Errorf("Node %q PodCIDR seems to have changed (original=%v, current=%v), releasing original and occupying new CIDR", - node.Name, node.Spec.PodCIDR, podCIDR) - if err := r.cidrs.Release(data.cidr); err != nil { - glog.Errorf("Error when releasing CIDR %v", podCIDR) - } - } - return nil + + node, err = r.nodeLister.Get(data.nodeName) + if err != nil { + glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err) + return err + } + + if node.Spec.PodCIDR == podCIDR { + glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) + return nil + } + if node.Spec.PodCIDR != "" { + glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR) + if err := r.cidrs.Release(data.cidr); err != nil { + glog.Errorf("Error when releasing CIDR %v", podCIDR) } + return nil + } + // If we reached here, it means that the node has no CIDR currently assigned. So we set it. + for i := 0; i < cidrUpdateRetries; i++ { if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) - break + return nil } - glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err) } - if err != nil { - nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") - // We accept the fact that we may leek CIDRs here. This is safer than releasing - // them in case when we don't know if request went through. - // NodeController restart will return all falsely allocated CIDRs to the pool. - if !apierrors.IsServerTimeout(err) { - glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err) - if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil { - glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr) - } + glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) + nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") + // We accept the fact that we may leak CIDRs here. This is safer than releasing + // them in case when we don't know if request went through. + // NodeController restart will return all falsely allocated CIDRs to the pool. + if !apierrors.IsServerTimeout(err) { + glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err) + if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil { + glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr) } } return err From d8924c1da89518b102f2c2260258a1740118aeb0 Mon Sep 17 00:00:00 2001 From: Antoine Cotten Date: Tue, 9 Jan 2018 13:54:50 +0100 Subject: [PATCH 669/794] Make code generators log to stderr by default --- staging/src/k8s.io/code-generator/cmd/client-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go | 1 + staging/src/k8s.io/code-generator/cmd/informer-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/lister-gen/main.go | 1 + staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go | 1 + 8 files changed, 8 insertions(+) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 5869d83a1d5..ca829c30b1b 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -39,6 +39,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index 4cba1c6bdf2..afb060762b1 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -55,6 +55,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index bb506cf7f9e..5eabfd1b6a3 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -63,6 +63,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index 89d2b39bc6a..4f55680b4f8 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -62,6 +62,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go index ce1756c11a6..847a6a5a02b 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/main.go @@ -29,6 +29,7 @@ var g = protobuf.New() func init() { g.BindFlags(flag.CommandLine) + goflag.Set("logtostderr", "true") flag.CommandLine.AddGoFlagSet(goflag.CommandLine) } diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go index fc6feeb30bc..e993e620eb5 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go @@ -41,6 +41,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go index a89817f4957..6c48240aca2 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go @@ -38,6 +38,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() diff --git a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go index e3c6f6c6cd8..c324c10bab9 100644 --- a/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -40,6 +40,7 @@ func main() { genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() From e9cf3f1ac4c97bea716fbac03d2cd71c5189d5c4 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Sat, 16 Dec 2017 01:38:46 -0500 Subject: [PATCH 670/794] Handle Unhealthy devices Update node capacity with sum of both healthy and unhealthy devices. Node allocatable reflect only healthy devices. --- pkg/kubelet/cm/container_manager.go | 5 +- pkg/kubelet/cm/container_manager_linux.go | 2 +- pkg/kubelet/cm/container_manager_stub.go | 4 +- pkg/kubelet/cm/deviceplugin/manager.go | 74 +++++++++++++----- pkg/kubelet/cm/deviceplugin/manager_stub.go | 4 +- pkg/kubelet/cm/deviceplugin/manager_test.go | 86 +++++++++++++-------- pkg/kubelet/cm/deviceplugin/types.go | 4 +- pkg/kubelet/kubelet_node_status.go | 13 +++- 8 files changed, 128 insertions(+), 64 deletions(-) diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index da7bf4a4642..4f16fa6f9d9 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -70,9 +70,10 @@ type ContainerManager interface { // GetCapacity returns the amount of compute resources tracked by container manager available on the node. GetCapacity() v1.ResourceList - // GetDevicePluginResourceCapacity returns the amount of device plugin resources available on the node + // GetDevicePluginResourceCapacity returns the node capacity (amount of total device plugin resources), + // node allocatable (amount of total healthy resources reported by device plugin), // and inactive device plugin resources previously registered on the node. - GetDevicePluginResourceCapacity() (v1.ResourceList, []string) + GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) // UpdateQOSCgroups performs housekeeping updates to ensure that the top // level QoS containers have their desired state in a thread-safe way diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 6c6c7068172..feb2f0219ae 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -887,6 +887,6 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { return cm.capacity } -func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { +func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { return cm.devicePluginManager.GetCapacity() } diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 27a86849582..0f7516e0515 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -70,8 +70,8 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList { return nil } -func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { - return nil, []string{} +func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} } func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager { diff --git a/pkg/kubelet/cm/deviceplugin/manager.go b/pkg/kubelet/cm/deviceplugin/manager.go index 5e1137c1149..1c55e898480 100644 --- a/pkg/kubelet/cm/deviceplugin/manager.go +++ b/pkg/kubelet/cm/deviceplugin/manager.go @@ -73,8 +73,11 @@ type ManagerImpl struct { // e.g. a new device is advertised, two old devices are deleted and a running device fails. callback monitorCallback - // allDevices contains all of registered resourceNames and their exported device IDs. - allDevices map[string]sets.String + // healthyDevices contains all of the registered healthy resourceNames and their exported device IDs. + healthyDevices map[string]sets.String + + // unhealthyDevices contains all of the unhealthy devices and their exported device IDs. + unhealthyDevices map[string]sets.String // allocatedDevices contains allocated deviceIds, keyed by resourceName. allocatedDevices map[string]sets.String @@ -106,7 +109,8 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { endpoints: make(map[string]endpoint), socketname: file, socketdir: dir, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), + unhealthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } @@ -128,20 +132,24 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, added, updated, deleted []pluginapi.Device) { kept := append(updated, added...) m.mutex.Lock() - if _, ok := m.allDevices[resourceName]; !ok { - m.allDevices[resourceName] = sets.NewString() + if _, ok := m.healthyDevices[resourceName]; !ok { + m.healthyDevices[resourceName] = sets.NewString() + } + if _, ok := m.unhealthyDevices[resourceName]; !ok { + m.unhealthyDevices[resourceName] = sets.NewString() } - // For now, Manager only keeps track of healthy devices. - // TODO: adds support to track unhealthy devices. for _, dev := range kept { if dev.Health == pluginapi.Healthy { - m.allDevices[resourceName].Insert(dev.ID) + m.healthyDevices[resourceName].Insert(dev.ID) + m.unhealthyDevices[resourceName].Delete(dev.ID) } else { - m.allDevices[resourceName].Delete(dev.ID) + m.unhealthyDevices[resourceName].Insert(dev.ID) + m.healthyDevices[resourceName].Delete(dev.ID) } } for _, dev := range deleted { - m.allDevices[resourceName].Delete(dev.ID) + m.healthyDevices[resourceName].Delete(dev.ID) + m.unhealthyDevices[resourceName].Delete(dev.ID) } m.mutex.Unlock() m.writeCheckpoint() @@ -371,7 +379,8 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { // GetCapacity is expected to be called when Kubelet updates its node status. // The first returned variable contains the registered device plugin resource capacity. -// The second returned variable contains previously registered resources that are no longer active. +// The second returned variable contains the registered device plugin resource allocatable. +// The third returned variable contains previously registered resources that are no longer active. // Kubelet uses this information to update resource capacity/allocatable in its node status. // After the call, device plugin can remove the inactive resources from its internal list as the // change is already reflected in Kubelet node status. @@ -380,25 +389,47 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { // cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo // capacity for already allocated pods so that they can continue to run. However, new pods // requiring device plugin resources will not be scheduled till device plugin re-registers. -func (m *ManagerImpl) GetCapacity() (v1.ResourceList, []string) { +func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { needsUpdateCheckpoint := false var capacity = v1.ResourceList{} + var allocatable = v1.ResourceList{} var deletedResources []string m.mutex.Lock() - for resourceName, devices := range m.allDevices { + for resourceName, devices := range m.healthyDevices { if _, ok := m.endpoints[resourceName]; !ok { - delete(m.allDevices, resourceName) + delete(m.healthyDevices, resourceName) deletedResources = append(deletedResources, resourceName) needsUpdateCheckpoint = true } else { capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + } + } + for resourceName, devices := range m.unhealthyDevices { + if _, ok := m.endpoints[resourceName]; !ok { + delete(m.unhealthyDevices, resourceName) + alreadyDeleted := false + for _, name := range deletedResources { + if name == resourceName { + alreadyDeleted = true + } + } + if !alreadyDeleted { + deletedResources = append(deletedResources, resourceName) + } + needsUpdateCheckpoint = true + } else { + capacityCount := capacity[v1.ResourceName(resourceName)] + unhealthyCount := *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + capacityCount.Add(unhealthyCount) + capacity[v1.ResourceName(resourceName)] = capacityCount } } m.mutex.Unlock() if needsUpdateCheckpoint { m.writeCheckpoint() } - return capacity, deletedResources + return capacity, allocatable, deletedResources } // checkpointData struct is used to store pod to device allocation information @@ -416,7 +447,7 @@ func (m *ManagerImpl) writeCheckpoint() error { PodDeviceEntries: m.podDevices.toCheckpointData(), RegisteredDevices: make(map[string][]string), } - for resource, devices := range m.allDevices { + for resource, devices := range m.healthyDevices { data.RegisteredDevices[resource] = devices.UnsortedList() } m.mutex.Unlock() @@ -453,9 +484,10 @@ func (m *ManagerImpl) readCheckpoint() error { m.podDevices.fromCheckpointData(data.PodDeviceEntries) m.allocatedDevices = m.podDevices.devices() for resource, devices := range data.RegisteredDevices { - m.allDevices[resource] = sets.NewString() + // TODO: Support Checkpointing for unhealthy devices as well + m.healthyDevices[resource] = sets.NewString() for _, dev := range devices { - m.allDevices[resource].Insert(dev) + m.healthyDevices[resource].Insert(dev) } } return nil @@ -508,7 +540,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi } glog.V(3).Infof("Needs to allocate %v %v for pod %q container %q", needed, resource, podUID, contName) // Needs to allocate additional devices. - if _, ok := m.allDevices[resource]; !ok { + if _, ok := m.healthyDevices[resource]; !ok { return nil, fmt.Errorf("can't allocate unregistered device %v", resource) } devices = sets.NewString() @@ -527,7 +559,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // Gets Devices in use. devicesInUse := m.allocatedDevices[resource] // Gets a list of available devices. - available := m.allDevices[resource].Difference(devicesInUse) + available := m.healthyDevices[resource].Difference(devicesInUse) if int(available.Len()) < needed { return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len()) } @@ -557,7 +589,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont resource := string(k) needed := int(v.Value()) glog.V(3).Infof("needs %d %s", needed, resource) - _, registeredResource := m.allDevices[resource] + _, registeredResource := m.healthyDevices[resource] _, allocatedResource := m.allocatedDevices[resource] // Continues if this is neither an active device plugin resource nor // a resource we have previously allocated. diff --git a/pkg/kubelet/cm/deviceplugin/manager_stub.go b/pkg/kubelet/cm/deviceplugin/manager_stub.go index 903a0077a2c..c0f6e2d2f17 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_stub.go +++ b/pkg/kubelet/cm/deviceplugin/manager_stub.go @@ -58,6 +58,6 @@ func (h *ManagerStub) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co } // GetCapacity simply returns nil capacity and empty removed resource list. -func (h *ManagerStub) GetCapacity() (v1.ResourceList, []string) { - return nil, []string{} +func (h *ManagerStub) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} } diff --git a/pkg/kubelet/cm/deviceplugin/manager_test.go b/pkg/kubelet/cm/deviceplugin/manager_test.go index d7a032694c5..33ad9e08a4c 100644 --- a/pkg/kubelet/cm/deviceplugin/manager_test.go +++ b/pkg/kubelet/cm/deviceplugin/manager_test.go @@ -149,7 +149,7 @@ func cleanup(t *testing.T, m Manager, p *Stub) { m.Stop() } -func TestUpdateCapacity(t *testing.T) { +func TestUpdateCapacityAllocatable(t *testing.T) { testManager, err := newManagerImpl(socketName) as := assert.New(t) as.NotNil(testManager) @@ -167,61 +167,81 @@ func TestUpdateCapacity(t *testing.T) { resourceName1 := "domain1.com/resource1" testManager.endpoints[resourceName1] = &endpointImpl{devices: make(map[string]pluginapi.Device)} callback(resourceName1, devs, []pluginapi.Device{}, []pluginapi.Device{}) - capacity, removedResources := testManager.GetCapacity() + capacity, allocatable, removedResources := testManager.GetCapacity() resource1Capacity, ok := capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(2), resource1Capacity.Value()) + resource1Allocatable, ok := allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(3), resource1Capacity.Value()) + as.Equal(int64(2), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Deletes an unhealthy device should NOT change capacity. + // Deletes an unhealthy device should NOT change allocatable but change capacity. callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[2]}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) as.Equal(int64(2), resource1Capacity.Value()) + as.Equal(int64(2), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Updates a healthy device to unhealthy should reduce capacity by 1. + // Updates a healthy device to unhealthy should reduce allocatable by 1. dev2 := devs[1] dev2.Health = pluginapi.Unhealthy callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{dev2}, []pluginapi.Device{}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(1), resource1Capacity.Value()) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(2), resource1Capacity.Value()) + as.Equal(int64(1), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) - // Deletes a healthy device should reduce capacity by 1. + // Deletes a healthy device should reduce capacity and allocatable by 1. callback(resourceName1, []pluginapi.Device{}, []pluginapi.Device{}, []pluginapi.Device{devs[0]}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)] as.True(ok) - as.Equal(int64(0), resource1Capacity.Value()) + resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)] + as.True(ok) + as.Equal(int64(0), resource1Allocatable.Value()) + as.Equal(int64(1), resource1Capacity.Value()) as.Equal(0, len(removedResources)) // Tests adding another resource. resourceName2 := "resource2" testManager.endpoints[resourceName2] = &endpointImpl{devices: make(map[string]pluginapi.Device)} callback(resourceName2, devs, []pluginapi.Device{}, []pluginapi.Device{}) - capacity, removedResources = testManager.GetCapacity() + capacity, allocatable, removedResources = testManager.GetCapacity() as.Equal(2, len(capacity)) resource2Capacity, ok := capacity[v1.ResourceName(resourceName2)] as.True(ok) - as.Equal(int64(2), resource2Capacity.Value()) + resource2Allocatable, ok := allocatable[v1.ResourceName(resourceName2)] + as.True(ok) + as.Equal(int64(3), resource2Capacity.Value()) + as.Equal(int64(2), resource2Allocatable.Value()) as.Equal(0, len(removedResources)) // Removes resourceName1 endpoint. Verifies testManager.GetCapacity() reports that resourceName1 - // is removed from capacity and it no longer exists in allDevices after the call. + // is removed from capacity and it no longer exists in healthyDevices after the call. delete(testManager.endpoints, resourceName1) - capacity, removed := testManager.GetCapacity() + capacity, allocatable, removed := testManager.GetCapacity() as.Equal([]string{resourceName1}, removed) _, ok = capacity[v1.ResourceName(resourceName1)] as.False(ok) val, ok := capacity[v1.ResourceName(resourceName2)] as.True(ok) - as.Equal(int64(2), val.Value()) - _, ok = testManager.allDevices[resourceName1] + as.Equal(int64(3), val.Value()) + _, ok = testManager.healthyDevices[resourceName1] as.False(ok) + _, ok = testManager.unhealthyDevices[resourceName1] + as.False(ok) + fmt.Println("removed: ", removed) + as.Equal(1, len(removed)) + } type stringPairType struct { @@ -270,7 +290,7 @@ func TestCheckpoint(t *testing.T) { defer os.RemoveAll(tmpDir) testManager := &ManagerImpl{ socketdir: tmpDir, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } @@ -294,19 +314,19 @@ func TestCheckpoint(t *testing.T) { constructAllocResp(map[string]string{"/dev/r1dev4": "/dev/r1dev4"}, map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{})) - testManager.allDevices[resourceName1] = sets.NewString() - testManager.allDevices[resourceName1].Insert("dev1") - testManager.allDevices[resourceName1].Insert("dev2") - testManager.allDevices[resourceName1].Insert("dev3") - testManager.allDevices[resourceName1].Insert("dev4") - testManager.allDevices[resourceName1].Insert("dev5") - testManager.allDevices[resourceName2] = sets.NewString() - testManager.allDevices[resourceName2].Insert("dev1") - testManager.allDevices[resourceName2].Insert("dev2") + testManager.healthyDevices[resourceName1] = sets.NewString() + testManager.healthyDevices[resourceName1].Insert("dev1") + testManager.healthyDevices[resourceName1].Insert("dev2") + testManager.healthyDevices[resourceName1].Insert("dev3") + testManager.healthyDevices[resourceName1].Insert("dev4") + testManager.healthyDevices[resourceName1].Insert("dev5") + testManager.healthyDevices[resourceName2] = sets.NewString() + testManager.healthyDevices[resourceName2].Insert("dev1") + testManager.healthyDevices[resourceName2].Insert("dev2") expectedPodDevices := testManager.podDevices expectedAllocatedDevices := testManager.podDevices.devices() - expectedAllDevices := testManager.allDevices + expectedAllDevices := testManager.healthyDevices err = testManager.writeCheckpoint() @@ -331,7 +351,7 @@ func TestCheckpoint(t *testing.T) { } } as.True(reflect.DeepEqual(expectedAllocatedDevices, testManager.allocatedDevices)) - as.True(reflect.DeepEqual(expectedAllDevices, testManager.allDevices)) + as.True(reflect.DeepEqual(expectedAllDevices, testManager.healthyDevices)) } type activePodsStub struct { @@ -388,7 +408,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso testManager := &ManagerImpl{ socketdir: tmpDir, callback: monitorCallback, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), endpoints: make(map[string]endpoint), podDevices: make(podDevices), @@ -397,9 +417,9 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso } testManager.store, _ = utilstore.NewFileStore("/tmp/", utilfs.DefaultFs{}) for _, res := range testRes { - testManager.allDevices[res.resourceName] = sets.NewString() + testManager.healthyDevices[res.resourceName] = sets.NewString() for _, dev := range res.devs { - testManager.allDevices[res.resourceName].Insert(dev) + testManager.healthyDevices[res.resourceName].Insert(dev) } if res.resourceName == "domain1.com/resource1" { testManager.endpoints[res.resourceName] = &MockEndpoint{ @@ -682,7 +702,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) { testManager := &ManagerImpl{ callback: monitorCallback, - allDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), podDevices: make(podDevices), } diff --git a/pkg/kubelet/cm/deviceplugin/types.go b/pkg/kubelet/cm/deviceplugin/types.go index c4465a8be4c..d27b11e845e 100644 --- a/pkg/kubelet/cm/deviceplugin/types.go +++ b/pkg/kubelet/cm/deviceplugin/types.go @@ -53,9 +53,9 @@ type Manager interface { // for the found one. An empty struct is returned in case no cached state is found. GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) *DeviceRunContainerOptions - // GetCapacity returns the amount of available device plugin resource capacity + // GetCapacity returns the amount of available device plugin resource capacity, resource allocatable // and inactive device plugin resources previously registered on the node. - GetCapacity() (v1.ResourceList, []string) + GetCapacity() (v1.ResourceList, v1.ResourceList, []string) } // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 006283e808f..696ac04a1b6 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -550,6 +550,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } } + var devicePluginAllocatable v1.ResourceList + var devicePluginCapacity v1.ResourceList + var removedDevicePlugins []string + // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. info, err := kl.GetCachedMachineInfo() @@ -594,13 +598,14 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } } - devicePluginCapacity, removedDevicePlugins := kl.containerManager.GetDevicePluginResourceCapacity() + devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = kl.containerManager.GetDevicePluginResourceCapacity() if devicePluginCapacity != nil { for k, v := range devicePluginCapacity { glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) node.Status.Capacity[k] = v } } + for _, removedResource := range removedDevicePlugins { glog.V(2).Infof("Remove capacity for %s", removedResource) delete(node.Status.Capacity, v1.ResourceName(removedResource)) @@ -631,6 +636,12 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } node.Status.Allocatable[k] = value } + if devicePluginAllocatable != nil { + for k, v := range devicePluginAllocatable { + glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) + node.Status.Allocatable[k] = v + } + } // for every huge page reservation, we need to remove it from allocatable memory for k, v := range node.Status.Capacity { if v1helper.IsHugePageResourceName(k) { From dc0b7da5b7472eba57977136083cd5033dd223ed Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Tue, 9 Jan 2018 11:06:10 +0530 Subject: [PATCH 671/794] Add volumemetrics for glusterfs plugin. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index adab5d535c3..0183b36a09d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -172,10 +172,11 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp source, readOnly := plugin.getGlusterVolumeSource(spec) return &glusterfsMounter{ glusterfs: &glusterfs{ - volName: spec.Name(), - mounter: mounter, - pod: pod, - plugin: plugin, + volName: spec.Name(), + mounter: mounter, + pod: pod, + plugin: plugin, + MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), spec.Name())), }, hosts: ep, path: source.Path, @@ -190,10 +191,11 @@ func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (v func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) { return &glusterfsUnmounter{&glusterfs{ - volName: volName, - mounter: mounter, - pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}}, - plugin: plugin, + volName: volName, + mounter: mounter, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}}, + plugin: plugin, + MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), volName)), }}, nil } @@ -211,7 +213,7 @@ type glusterfs struct { pod *v1.Pod mounter mount.Interface plugin *glusterfsPlugin - volume.MetricsNil + volume.MetricsProvider } type glusterfsMounter struct { From 8debdc1501147bb2980909fe854918946f2ad95b Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Mon, 8 Jan 2018 17:19:12 -0800 Subject: [PATCH 672/794] kubeadm: more random tokens The strategy of hex encoding a random byte array only uses the following characters: 0123456789abcdef Instead of the entire bootstrapping token character set: 0123456789abcdefghijklmnopqrstuvwxyz Update the token generation to use the entire character set. This increases the token secret from 48 bits of entropy to ~82 bits. 256^8 (1.8e+19) vs. 36^16 (7.9e+24). --- cmd/kubeadm/app/util/token/tokens.go | 38 ++++++++++++++++++----- cmd/kubeadm/app/util/token/tokens_test.go | 4 +-- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/cmd/kubeadm/app/util/token/tokens.go b/cmd/kubeadm/app/util/token/tokens.go index 18d886f533f..6be9260005e 100644 --- a/cmd/kubeadm/app/util/token/tokens.go +++ b/cmd/kubeadm/app/util/token/tokens.go @@ -17,8 +17,8 @@ limitations under the License. package token import ( + "bufio" "crypto/rand" - "encoding/hex" "fmt" "regexp" @@ -27,9 +27,9 @@ import ( const ( // TokenIDBytes defines a number of bytes used for a token id - TokenIDBytes = 3 + TokenIDBytes = 6 // TokenSecretBytes defines a number of bytes used for a secret - TokenSecretBytes = 8 + TokenSecretBytes = 16 ) var ( @@ -43,13 +43,35 @@ var ( TokenRegexp = regexp.MustCompile(TokenRegexpString) ) +const validBootstrapTokenChars = "0123456789abcdefghijklmnopqrstuvwxyz" + func randBytes(length int) (string, error) { - b := make([]byte, length) - _, err := rand.Read(b) - if err != nil { - return "", err + // len("0123456789abcdefghijklmnopqrstuvwxyz") = 36 which doesn't evenly divide + // the possible values of a byte: 256 mod 36 = 4. Discard any random bytes we + // read that are >= 252 so the bytes we evenly divide the character set. + const maxByteValue = 252 + + var ( + b byte + err error + token = make([]byte, length) + ) + + reader := bufio.NewReaderSize(rand.Reader, length*2) + for i := range token { + for { + if b, err = reader.ReadByte(); err != nil { + return "", err + } + if b < maxByteValue { + break + } + } + + token[i] = validBootstrapTokenChars[int(b)%len(validBootstrapTokenChars)] } - return hex.EncodeToString(b), nil + + return string(token), nil } // GenerateToken generates a new token with a token ID that is valid as a diff --git a/cmd/kubeadm/app/util/token/tokens_test.go b/cmd/kubeadm/app/util/token/tokens_test.go index b9b617c20a7..4146a027036 100644 --- a/cmd/kubeadm/app/util/token/tokens_test.go +++ b/cmd/kubeadm/app/util/token/tokens_test.go @@ -147,8 +147,8 @@ func TestRandBytes(t *testing.T) { if err != nil { t.Errorf("failed randBytes: %v", err) } - if len(actual) != rt*2 { - t.Errorf("failed randBytes:\n\texpected: %d\n\t actual: %d\n", rt*2, len(actual)) + if len(actual) != rt { + t.Errorf("failed randBytes:\n\texpected: %d\n\t actual: %d\n", rt, len(actual)) } } } From 5caf26fa844cd92f26a20c1c0b134bbcf5a06475 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 4 Jan 2018 15:17:52 -0800 Subject: [PATCH 673/794] Move some old security controls to KubeletFlags and mark them deprecated --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 1 - .../app/apis/kubeadm/v1alpha1/defaults.go | 3 -- cmd/kubelet/app/options/options.go | 33 ++++++++++++++++--- cmd/kubelet/app/server.go | 8 ++--- .../apis/kubeletconfig/fuzzer/fuzzer.go | 3 -- .../apis/kubeletconfig/helpers_test.go | 4 --- pkg/kubelet/apis/kubeletconfig/types.go | 13 -------- .../apis/kubeletconfig/v1alpha1/defaults.go | 9 ----- .../apis/kubeletconfig/v1alpha1/types.go | 13 -------- .../v1alpha1/zz_generated.conversion.go | 12 ------- .../v1alpha1/zz_generated.deepcopy.go | 24 -------------- .../kubeletconfig/zz_generated.deepcopy.go | 15 --------- test/e2e_node/services/kubelet.go | 4 +-- 13 files changed, 34 insertions(+), 108 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index c107fae10ed..7e667805e24 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -66,7 +66,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.KubeletConfiguration = kubeadm.KubeletConfiguration{ BaseConfig: &kubeletconfigv1alpha1.KubeletConfiguration{ PodManifestPath: "foo", - AllowPrivileged: utilpointer.BoolPtr(true), ClusterDNS: []string{"foo"}, ClusterDomain: "foo", Authorization: kubeletconfigv1alpha1.KubeletAuthorization{Mode: "foo"}, diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index ea28af88e5f..772a9612ee6 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -185,9 +185,6 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { if obj.KubeletConfiguration.BaseConfig.PodManifestPath == "" { obj.KubeletConfiguration.BaseConfig.PodManifestPath = DefaultManifestsDir } - if obj.KubeletConfiguration.BaseConfig.AllowPrivileged == nil { - obj.KubeletConfiguration.BaseConfig.AllowPrivileged = utilpointer.BoolPtr(true) - } if obj.KubeletConfiguration.BaseConfig.ClusterDNS == nil { dnsIP, err := constants.GetDNSIP(obj.Networking.ServiceSubnet) if err != nil { diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index a5ee14dc288..250da221f3b 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" "k8s.io/kubernetes/pkg/kubelet/config" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" utiltaints "k8s.io/kubernetes/pkg/util/taints" ) @@ -187,6 +188,19 @@ type KubeletFlags struct { KeepTerminatedPodVolumes bool // enable gathering custom metrics. EnableCustomMetrics bool + // allowPrivileged enables containers to request privileged mode. + // Defaults to false. + AllowPrivileged bool + // hostNetworkSources is a comma-separated list of sources from which the + // Kubelet allows pods to use of host network. Defaults to "*". Valid + // options are "file", "http", "api", and "*" (all sources). + HostNetworkSources []string + // hostPIDSources is a comma-separated list of sources from which the + // Kubelet allows pods to use the host pid namespace. Defaults to "*". + HostPIDSources []string + // hostIPCSources is a comma-separated list of sources from which the + // Kubelet allows pods to use the host ipc namespace. Defaults to "*". + HostIPCSources []string } // NewKubeletFlags will create a new KubeletFlags with default values @@ -221,6 +235,9 @@ func NewKubeletFlags() *KubeletFlags { VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", RegisterNode: true, SeccompProfileRoot: filepath.Join(v1alpha1.DefaultRootDir, "seccomp"), + HostNetworkSources: []string{kubetypes.AllSource}, + HostPIDSources: []string{kubetypes.AllSource}, + HostIPCSources: []string{kubetypes.AllSource}, } } @@ -366,6 +383,18 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { // TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated. fs.BoolVar(&f.EnableCustomMetrics, "enable-custom-metrics", f.EnableCustomMetrics, "Support for gathering custom metrics.") fs.MarkDeprecated("enable-custom-metrics", "will be removed in a future version") + // TODO(#58010:v1.12.0): Remove --allow-privileged, it is deprecated + fs.BoolVar(&f.AllowPrivileged, "allow-privileged", f.AllowPrivileged, "If true, allow containers to request privileged mode.") + fs.MarkDeprecated("allow-privileged", "will be removed in a future version") + // TODO(#58010:v1.12.0): Remove --host-network-sources, it is deprecated + fs.StringSliceVar(&f.HostNetworkSources, "host-network-sources", f.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network.") + fs.MarkDeprecated("host-network-sources", "will be removed in a future version") + // TODO(#58010:v1.12.0): Remove --host-pid-sources, it is deprecated + fs.StringSliceVar(&f.HostPIDSources, "host-pid-sources", f.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace.") + fs.MarkDeprecated("host-pid-sources", "will be removed in a future version") + // TODO(#58010:v1.12.0): Remove --host-ipc-sources, it is deprecated + fs.StringSliceVar(&f.HostIPCSources, "host-ipc-sources", f.HostIPCSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace.") + fs.MarkDeprecated("host-ipc-sources", "will be removed in a future version") } @@ -414,10 +443,6 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat "are generated for the public address and saved to the directory passed to --cert-dir.") fs.StringVar(&c.TLSPrivateKeyFile, "tls-private-key-file", c.TLSPrivateKeyFile, "File containing x509 private key matching --tls-cert-file.") - fs.BoolVar(&c.AllowPrivileged, "allow-privileged", c.AllowPrivileged, "If true, allow containers to request privileged mode.") - fs.StringSliceVar(&c.HostNetworkSources, "host-network-sources", c.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network.") - fs.StringSliceVar(&c.HostPIDSources, "host-pid-sources", c.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace.") - fs.StringSliceVar(&c.HostIPCSources, "host-ipc-sources", c.HostIPCSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace.") fs.Int32Var(&c.RegistryPullQPS, "registry-qps", c.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited.") fs.Int32Var(&c.RegistryBurst, "registry-burst", c.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") fs.Int32Var(&c.EventRecordQPS, "event-qps", c.EventRecordQPS, "If > 0, limit event creations per second to this value. If 0, unlimited.") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 3d63ad272a0..a3eb633f980 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -662,17 +662,17 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. // prefer this to be done as part of an independent validation step on the // KubeletConfiguration. But as far as I can tell, we don't have an explicit // place for validation of the KubeletConfiguration yet. - hostNetworkSources, err := kubetypes.GetValidatedSources(kubeCfg.HostNetworkSources) + hostNetworkSources, err := kubetypes.GetValidatedSources(kubeFlags.HostNetworkSources) if err != nil { return err } - hostPIDSources, err := kubetypes.GetValidatedSources(kubeCfg.HostPIDSources) + hostPIDSources, err := kubetypes.GetValidatedSources(kubeFlags.HostPIDSources) if err != nil { return err } - hostIPCSources, err := kubetypes.GetValidatedSources(kubeCfg.HostIPCSources) + hostIPCSources, err := kubetypes.GetValidatedSources(kubeFlags.HostIPCSources) if err != nil { return err } @@ -682,7 +682,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, } - capabilities.Setup(kubeCfg.AllowPrivileged, privilegedSources, 0) + capabilities.Setup(kubeFlags.AllowPrivileged, privilegedSources, 0) credentialprovider.SetPreferredDockercfgPath(kubeFlags.RootDirectory) glog.V(2).Infof("Using root directory: %v", kubeFlags.RootDirectory) diff --git a/pkg/kubelet/apis/kubeletconfig/fuzzer/fuzzer.go b/pkg/kubelet/apis/kubeletconfig/fuzzer/fuzzer.go index 8fb0ca7ca5b..ef1ff425067 100644 --- a/pkg/kubelet/apis/kubeletconfig/fuzzer/fuzzer.go +++ b/pkg/kubelet/apis/kubeletconfig/fuzzer/fuzzer.go @@ -56,9 +56,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.FileCheckFrequency = metav1.Duration{Duration: 20 * time.Second} obj.HealthzBindAddress = "127.0.0.1" obj.HealthzPort = 10248 - obj.HostNetworkSources = []string{kubetypes.AllSource} - obj.HostPIDSources = []string{kubetypes.AllSource} - obj.HostIPCSources = []string{kubetypes.AllSource} obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second} obj.ImageMinimumGCAge = metav1.Duration{Duration: 2 * time.Minute} obj.ImageGCHighThresholdPercent = 85 diff --git a/pkg/kubelet/apis/kubeletconfig/helpers_test.go b/pkg/kubelet/apis/kubeletconfig/helpers_test.go index 5bf10e67a3a..59c3b5d05b6 100644 --- a/pkg/kubelet/apis/kubeletconfig/helpers_test.go +++ b/pkg/kubelet/apis/kubeletconfig/helpers_test.go @@ -138,7 +138,6 @@ var ( // KubeletConfiguration fields that do not contain file paths. kubeletConfigurationNonPathFieldPaths = sets.NewString( "Address", - "AllowPrivileged", "Authentication.Anonymous.Enabled", "Authentication.Webhook.CacheTTL.Duration", "Authentication.Webhook.Enabled", @@ -176,9 +175,6 @@ var ( "HairpinMode", "HealthzBindAddress", "HealthzPort", - "HostIPCSources[*]", - "HostNetworkSources[*]", - "HostPIDSources[*]", "IPTablesDropBit", "IPTablesMasqueradeBit", "ImageGCHighThresholdPercent", diff --git a/pkg/kubelet/apis/kubeletconfig/types.go b/pkg/kubelet/apis/kubeletconfig/types.go index f4392f6ed6f..aee0077d47f 100644 --- a/pkg/kubelet/apis/kubeletconfig/types.go +++ b/pkg/kubelet/apis/kubeletconfig/types.go @@ -89,19 +89,6 @@ type KubeletConfiguration struct { Authentication KubeletAuthentication // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization - // allowPrivileged enables containers to request privileged mode. - // Defaults to false. - AllowPrivileged bool - // hostNetworkSources is a comma-separated list of sources from which the - // Kubelet allows pods to use of host network. Defaults to "*". Valid - // options are "file", "http", "api", and "*" (all sources). - HostNetworkSources []string - // hostPIDSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host pid namespace. Defaults to "*". - HostPIDSources []string - // hostIPCSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host ipc namespace. Defaults to "*". - HostIPCSources []string // registryPullQPS is the limit of registry pulls per second. If 0, // unlimited. Set to 0 for no limit. Defaults to 5.0. RegistryPullQPS int32 diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go index 030892ea908..fe46fb6d8a3 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go @@ -108,15 +108,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.HealthzPort == nil { obj.HealthzPort = utilpointer.Int32Ptr(10248) } - if obj.HostNetworkSources == nil { - obj.HostNetworkSources = []string{kubetypes.AllSource} - } - if obj.HostPIDSources == nil { - obj.HostPIDSources = []string{kubetypes.AllSource} - } - if obj.HostIPCSources == nil { - obj.HostIPCSources = []string{kubetypes.AllSource} - } if obj.HTTPCheckFrequency == zeroDuration { obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second} } diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go index bc49f3361a2..8d4ffe4a615 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go @@ -89,19 +89,6 @@ type KubeletConfiguration struct { Authentication KubeletAuthentication `json:"authentication"` // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization `json:"authorization"` - // allowPrivileged enables containers to request privileged mode. - // Defaults to false. - AllowPrivileged *bool `json:"allowPrivileged"` - // hostNetworkSources is a comma-separated list of sources from which the - // Kubelet allows pods to use of host network. Defaults to "*". Valid - // options are "file", "http", "api", and "*" (all sources). - HostNetworkSources []string `json:"hostNetworkSources"` - // hostPIDSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host pid namespace. Defaults to "*". - HostPIDSources []string `json:"hostPIDSources"` - // hostIPCSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host ipc namespace. Defaults to "*". - HostIPCSources []string `json:"hostIPCSources"` // registryPullQPS is the limit of registry pulls per second. If 0, // unlimited. Set to 0 for no limit. Defaults to 5.0. RegistryPullQPS *int32 `json:"registryPullQPS"` diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go index b8d27ea68e7..9368d7c0a22 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go @@ -164,12 +164,6 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := Convert_v1alpha1_KubeletAuthorization_To_kubeletconfig_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { return err } - if err := v1.Convert_Pointer_bool_To_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { - return err - } - out.HostNetworkSources = *(*[]string)(unsafe.Pointer(&in.HostNetworkSources)) - out.HostPIDSources = *(*[]string)(unsafe.Pointer(&in.HostPIDSources)) - out.HostIPCSources = *(*[]string)(unsafe.Pointer(&in.HostIPCSources)) if err := v1.Convert_Pointer_int32_To_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { return err } @@ -291,12 +285,6 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := Convert_kubeletconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { return err } - if err := v1.Convert_bool_To_Pointer_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { - return err - } - out.HostNetworkSources = *(*[]string)(unsafe.Pointer(&in.HostNetworkSources)) - out.HostPIDSources = *(*[]string)(unsafe.Pointer(&in.HostPIDSources)) - out.HostIPCSources = *(*[]string)(unsafe.Pointer(&in.HostIPCSources)) if err := v1.Convert_int32_To_Pointer_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil { return err } diff --git a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go index 6165a73880b..700a997a931 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go @@ -134,30 +134,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } in.Authentication.DeepCopyInto(&out.Authentication) out.Authorization = in.Authorization - if in.AllowPrivileged != nil { - in, out := &in.AllowPrivileged, &out.AllowPrivileged - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.HostNetworkSources != nil { - in, out := &in.HostNetworkSources, &out.HostNetworkSources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.HostPIDSources != nil { - in, out := &in.HostPIDSources, &out.HostPIDSources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.HostIPCSources != nil { - in, out := &in.HostIPCSources, &out.HostIPCSources - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.RegistryPullQPS != nil { in, out := &in.RegistryPullQPS, &out.RegistryPullQPS if *in == nil { diff --git a/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go b/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go index 8a6b1a2a515..eb788f3b4c2 100644 --- a/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go @@ -107,21 +107,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } out.Authentication = in.Authentication out.Authorization = in.Authorization - if in.HostNetworkSources != nil { - in, out := &in.HostNetworkSources, &out.HostNetworkSources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.HostPIDSources != nil { - in, out := &in.HostPIDSources, &out.HostPIDSources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.HostIPCSources != nil { - in, out := &in.HostIPCSources, &out.HostIPCSources - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = make([]string, len(*in)) diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index aa803859a39..59b147cb634 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -157,9 +157,6 @@ func (e *E2EServices) startKubelet() (*server, error) { kc.VolumeStatsAggPeriod = metav1.Duration{Duration: 10 * time.Second} // Aggregate volumes frequently so tests don't need to wait as long kubeletConfigFlags = append(kubeletConfigFlags, "volume-stats-agg-period") - kc.AllowPrivileged = true - kubeletConfigFlags = append(kubeletConfigFlags, "allow-privileged") - kc.SerializeImagePulls = false kubeletConfigFlags = append(kubeletConfigFlags, "serialize-image-pulls") @@ -264,6 +261,7 @@ func (e *E2EServices) startKubelet() (*server, error) { "--root-dir", KubeletRootDirectory, "--docker-disable-shared-pid=false", "--v", LOG_VERBOSITY_LEVEL, "--logtostderr", + "--allow-privileged", "true", ) // Apply test framework feature gates by default. This could also be overridden From f416e38a1e377c83d7defc49d4306c455de012de Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 9 Jan 2018 15:54:39 -0500 Subject: [PATCH 674/794] make controller port exposure optional --- cmd/kube-controller-manager/app/controllermanager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 85dc51de5c8..25de45f5041 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -128,7 +128,9 @@ func Run(s *options.CMServer) error { return err } - go startHTTP(s) + if s.Port >= 0 { + go startHTTP(s) + } recorder := createRecorder(kubeClient) From 1834e682dc090690de266ecdec336c23944e1801 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 9 Jan 2018 13:45:53 -0800 Subject: [PATCH 675/794] removed deprecated windows install script from cluster see #49213 --- cluster/windows/kube-startup.ps1 | 91 -------------------------------- 1 file changed, 91 deletions(-) delete mode 100644 cluster/windows/kube-startup.ps1 diff --git a/cluster/windows/kube-startup.ps1 b/cluster/windows/kube-startup.ps1 deleted file mode 100644 index 4f531407bb9..00000000000 --- a/cluster/windows/kube-startup.ps1 +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# kube-startup.ps1 is used to run kubelet and kubeproxy as a process. It uses nssm (https://nssm.cc/) process manager to register kubelet and kube-proxy process, -# The processes can be viewed using TaskManager(Taskmgr.exe). -# Please note that this startup script does not start the API server. Kubernetes control plane currently runs on Linux -# and only Kubelet and Kube-Proxy can be run on Windows - -param ( - [Parameter(Mandatory=$true)][string]$ContainerNetwork, - [string]$InterfaceForServiceIP = "vEthernet (HNS Internal NIC)", - [string]$LogDirectory = "C:\temp", - [Parameter(Mandatory=$true)][string]$Hostname, - [Parameter(Mandatory=$true)][string]$APIServer, - [string]$InfraContainerImage = "apprenda/pause", - [string]$ClusterDNS = "10.0.0.10", - [string]$KubeletExePath = ".\kubelet.exe", - [string]$KubeProxyExePath = ".\kube-proxy.exe" -) - -$kubeletDirectory = (Get-Item $KubeletExePath).Directory.FullName -$kubeproxyDirectory = (Get-Item $KubeProxyExePath).Directory.FullName - -# Assemble the Kubelet executable arguments -$kubeletArgs = @("--hostname-override=$Hostname","--pod-infra-container-image=$InfraContainerImage","--resolv-conf=""""","--api-servers=$APIServer","--cluster-dns=$ClusterDNS") -# Assemble the kube-proxy executable arguments -$kubeproxyArgs = @("--hostname-override=$Hostname","--proxy-mode=userspace","--bind-address=$Hostname","--master=$APIServer") - -# Setup kubelet service -nssm install kubelet "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" -nssm set kubelet Application "$KubeletExePath" -nssm set kubelet AppDirectory "$kubeletDirectory" -nssm set kubelet AppParameters $kubeletArgs -nssm set kubelet DisplayName kubelet -nssm set kubelet Description kubelet -nssm set kubelet Start SERVICE_AUTO_START -nssm set kubelet ObjectName LocalSystem -nssm set kubelet Type SERVICE_WIN32_OWN_PROCESS -# Delay restart if application runs for less than 1500 ms -nssm set kubelet AppThrottle 1500 -nssm set kubelet AppStdout "$LogDirectory\kubelet.log" -nssm set kubelet AppStderr "$LogDirectory\kubelet.err.log" -nssm set kubelet AppStdoutCreationDisposition 4 -nssm set kubelet AppStderrCreationDisposition 4 -nssm set kubelet AppRotateFiles 1 -nssm set kubelet AppRotateOnline 1 -# Rotate Logs Every 24 hours or 1 gb -nssm set kubelet AppRotateSeconds 86400 -nssm set kubelet AppRotateBytes 1073741824 -nssm set kubelet AppEnvironmentExtra CONTAINER_NETWORK=$ContainerNetwork - - -# Setup kube-proxy service -nssm install kube-proxy "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" -nssm set kube-proxy Application "$KubeProxyExePath" -nssm set kube-proxy AppDirectory "$kubeproxyDirectory" -nssm set kube-proxy AppParameters $kubeproxyArgs -nssm set kube-proxy DisplayName kube-proxy -nssm set kube-proxy Description kube-proxy -nssm set kube-proxy Start SERVICE_AUTO_START -nssm set kube-proxy ObjectName LocalSystem -nssm set kube-proxy Type SERVICE_WIN32_OWN_PROCESS -# Delay restart if application runs for less than 1500 ms -nssm set kube-proxy AppThrottle 1500 -nssm set kube-proxy AppStdout "$LogDirectory\kube-proxy.log" -nssm set kube-proxy AppStderr "$LogDirectory\kube-proxy.err.log" -nssm set kube-proxy AppStdoutCreationDisposition 4 -nssm set kube-proxy AppStderrCreationDisposition 4 -nssm set kube-proxy AppRotateFiles 1 -nssm set kube-proxy AppRotateOnline 1 -# Rotate Logs Every 24 hours or 1 gb -nssm set kube-proxy AppRotateSeconds 86400 -nssm set kube-proxy AppRotateBytes 1073741824 -nssm set kube-proxy AppEnvironmentExtra INTERFACE_TO_ADD_SERVICE_IP=$InterfaceForServiceIP - -# Start kubelet and kube-proxy Services -echo "Starting kubelet" -Start-Service kubelet -echo "Starting kube-proxy" -Start-Service kube-proxy From 29fa89d9d2e43be1be894bcb6c3e185796fe4443 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 9 Jan 2018 13:48:23 -0800 Subject: [PATCH 676/794] remove deprecated openstack heat this directory hasn't been touched in a year. see #49213 --- cluster/openstack-heat/config-default.sh | 81 --- cluster/openstack-heat/config-image.sh | 35 -- cluster/openstack-heat/config-test.sh | 19 - .../fragments/configure-proxy.sh | 70 --- .../fragments/configure-salt.yaml | 68 --- .../deploy-kube-auth-files-master.yaml | 44 -- .../deploy-kube-auth-files-node.yaml | 45 -- .../fragments/hostname-hack.sh | 23 - .../fragments/hostname-hack.yaml | 9 - .../kubernetes-heat/fragments/kube-user.yaml | 10 - .../fragments/provision-network-master.sh | 23 - .../fragments/provision-network-node.sh | 25 - .../kubernetes-heat/fragments/run-salt.sh | 53 -- .../fragments/write-heat-params.yaml | 11 - .../kubernetes-heat/kubecluster.yaml | 465 ------------------ .../kubernetes-heat/kubeminion.yaml | 314 ------------ cluster/openstack-heat/openrc-default.sh | 26 - cluster/openstack-heat/openrc-swift.sh | 27 - cluster/openstack-heat/util.sh | 293 ----------- 19 files changed, 1641 deletions(-) delete mode 100644 cluster/openstack-heat/config-default.sh delete mode 100644 cluster/openstack-heat/config-image.sh delete mode 100644 cluster/openstack-heat/config-test.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/configure-proxy.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/configure-salt.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/kube-user.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/provision-network-master.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/provision-network-node.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/run-salt.sh delete mode 100644 cluster/openstack-heat/kubernetes-heat/fragments/write-heat-params.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/kubecluster.yaml delete mode 100644 cluster/openstack-heat/kubernetes-heat/kubeminion.yaml delete mode 100644 cluster/openstack-heat/openrc-default.sh delete mode 100644 cluster/openstack-heat/openrc-swift.sh delete mode 100644 cluster/openstack-heat/util.sh diff --git a/cluster/openstack-heat/config-default.sh b/cluster/openstack-heat/config-default.sh deleted file mode 100644 index bc6fb3aa726..00000000000 --- a/cluster/openstack-heat/config-default.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for the Openstack cluster - -# Stack name -STACK_NAME=${STACK_NAME:-kube-stack} - -# Keypair for kubernetes stack -KUBERNETES_KEYPAIR_NAME=${KUBERNETES_KEYPAIR_NAME:-kubernetes_keypair} - -# Kubernetes release tar file -KUBERNETES_RELEASE_TAR=${KUBERNETES_RELEASE_TAR:-kubernetes-server-linux-amd64.tar.gz} - -NUMBER_OF_MINIONS=${NUMBER_OF_MINIONS-3} - -MAX_NUMBER_OF_MINIONS=${MAX_NUMBER_OF_MINIONS:-3} - -MASTER_FLAVOR=${MASTER_FLAVOR:-m1.medium} - -MINION_FLAVOR=${MINION_FLAVOR:-m1.medium} - -EXTERNAL_NETWORK=${EXTERNAL_NETWORK:-public} - -LBAAS_VERSION=${LBAAS_VERSION:-} - -FIXED_NETWORK_CIDR=${FIXED_NETWORK_CIDR:-10.0.0.0/24} -SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16} -CLUSTER_IP_RANGE=${CLUSTER_IP_RANGE:-10.244.0.0/16} - -SWIFT_SERVER_URL=${SWIFT_SERVER_URL:-} - -# The name of the object store container to use -SWIFT_OBJECT_STORE=${SWIFT_OBJECT_STORE:-kubernetes} - -# Flag indicates if new image must be created. If 'false' then image with IMAGE_ID will be used. -# If 'true' then new image will be created from file config-image.sh -CREATE_IMAGE=${CREATE_IMAGE:-true} # use "true" for devstack - -# Flag indicates if image should be downloaded -DOWNLOAD_IMAGE=${DOWNLOAD_IMAGE:-true} - -# Image id which will be used for kubernetes stack -IMAGE_ID=${IMAGE_ID:-f0f394b1-5546-4b68-b2bc-8abe8a7e6b8b} - -# DNS server address -DNS_SERVER=${DNS_SERVER:-8.8.8.8} - -# Public RSA key path -CLIENT_PUBLIC_KEY_PATH=${CLIENT_PUBLIC_KEY_PATH:-~/.ssh/id_rsa.pub} - -# Max time period for stack provisioning. Time in minutes. -STACK_CREATE_TIMEOUT=${STACK_CREATE_TIMEOUT:-60} - -# Enable Proxy, if true kube-up will apply your current proxy settings(defined by *_PROXY environment variables) to the deployment. -ENABLE_PROXY=${ENABLE_PROXY:-false} - -# Per-protocol proxy settings. -FTP_PROXY=${FTP_PROXY:-} -HTTP_PROXY=${HTTP_PROXY:-} -HTTPS_PROXY=${HTTPS_PROXY:-} -SOCKS_PROXY=${SOCKS_PROXY:-} - -# IPs and Domains that bypass the proxy. -NO_PROXY=${NO_PROXY:-} - -# Whether to assign floating IPs to minions -ASSIGN_FLOATING_IP=${ASSIGN_FLOATING_IP:-true} diff --git a/cluster/openstack-heat/config-image.sh b/cluster/openstack-heat/config-image.sh deleted file mode 100644 index 90ced8019c4..00000000000 --- a/cluster/openstack-heat/config-image.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for new image. It is skip when CREATE_IMAGE=false - -# Image name which will be displayed in OpenStack -OPENSTACK_IMAGE_NAME=${OPENSTACK_IMAGE_NAME:-CentOS-7-x86_64-GenericCloud-1604} - -# Downloaded image name for Openstack project -IMAGE_FILE=${IMAGE_FILE:-CentOS-7-x86_64-GenericCloud-1604.qcow2} - -# Absolute path where image file is stored. -IMAGE_PATH=${IMAGE_PATH:-~/Downloads/openstack} - -# The URL basepath for downloading the image -IMAGE_URL_PATH=${IMAGE_URL_PATH:-http://cloud.centos.org/centos/7/images} - -# The disk format of the image. Acceptable formats are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso. -IMAGE_FORMAT=${IMAGE_FORMAT:-qcow2} - -# The container format of the image. Acceptable formats are ami, ari, aki, bare, docker, and ovf. -CONTAINER_FORMAT=${CONTAINER_FORMAT:-bare} diff --git a/cluster/openstack-heat/config-test.sh b/cluster/openstack-heat/config-test.sh deleted file mode 100644 index afeec36dfb6..00000000000 --- a/cluster/openstack-heat/config-test.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Ubuntu cluster in test mode -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/openstack-heat/config-default.sh" diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/configure-proxy.sh b/cluster/openstack-heat/kubernetes-heat/fragments/configure-proxy.sh deleted file mode 100644 index 2aa4df7cf9e..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/configure-proxy.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# The contents of these variables swapped in by heat via environments presented to kube-up.sh - -export ETC_ENVIRONMENT='FTP_PROXY=$FTP_PROXY -HTTP_PROXY=$HTTP_PROXY -HTTPS_PROXY=$HTTPS_PROXY -SOCKS_PROXY=$SOCKS_PROXY -NO_PROXY=$NO_PROXY -ftp_proxy=$FTP_PROXY -http_proxy=$HTTP_PROXY -https_proxy=$HTTPS_PROXY -socks_proxy=$SOCKS_PROXY -no_proxy=$NO_PROXY -' - -export ETC_PROFILE_D='export FTP_PROXY=$FTP_PROXY -export HTTP_PROXY=$HTTP_PROXY -export HTTPS_PROXY=$HTTPS_PROXY -export SOCKS_PROXY=$SOCKS_PROXY -export NO_PROXY=$NO_PROXY -export ftp_proxy=$FTP_PROXY -export http_proxy=$HTTP_PROXY -export https_proxy=$HTTPS_PROXY -export socks_proxy=$SOCKS_PROXY -export no_proxy=$NO_PROXY -' - -export DOCKER_PROXY='[Service] - Environment="HTTP_PROXY=$HTTP_PROXY" - Environment="HTTPS_PROXY=$HTTPS_PROXY" - Environment="SOCKS_PROXY=$SOCKS_PROXY" - Environment="NO_PROXY=$NO_PROXY" - Environment="ftp_proxy=$FTP_PROXY" - Environment="http_proxy=$HTTP_PROXY" - Environment="https_proxy=$HTTPS_PROXY" - Environment="socks_proxy=$SOCKS_PROXY" - Environment="no_proxy=$NO_PROXY" -' - -# This again is set by heat -ENABLE_PROXY='$ENABLE_PROXY' - -# Heat itself doesn't have conditionals, so this is how we set up our proxy without breaking non-proxy setups. -if [[ "${ENABLE_PROXY}" == "true" ]]; then - mkdir -p /etc/systemd/system/docker.service.d/ - - echo "${ETC_ENVIRONMENT}" >> /etc/environment - echo "${ETC_PROFILE_D}" > /etc/profile.d/proxy_config.sh - echo "${DOCKER_PROXY}" > etc/systemd/system/docker.service.d/http-proxy.conf - echo "proxy=$HTTP_PROXY" >> /etc/yum.conf -fi diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/configure-salt.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/configure-salt.yaml deleted file mode 100644 index 8b2e5662101..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/configure-salt.yaml +++ /dev/null @@ -1,68 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -bootcmd: - - mkdir -p /etc/salt/minion.d - - mkdir -p /srv/salt-overlay/pillar -write_files: - - path: /etc/salt/minion.d/log-level-debug.conf - content: | - log_level: warning - log_level_logfile: warning - - path: /etc/salt/minion.d/grains.conf - content: | - grains: - node_ip: $MASTER_IP - cbr-cidr: $MASTER_IP_RANGE - publicAddressOverride: $MASTER_IP - network_mode: openvswitch - networkInterfaceName: eth0 - api_servers: $MASTER_IP - kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig - cloud: openstack - cloud_config: /srv/kubernetes/openstack.conf - roles: - - $role - runtime_config: "" - docker_opts: "--bridge=cbr0 --iptables=false --ip-masq=false" - master_extra_sans: "DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local,DNS:kubernetes-master" - keep_host_etcd: true - kube_user: $KUBE_USER - - path: /srv/kubernetes/openstack.conf - content: | - [Global] - auth-url=$OS_AUTH_URL - username=$OS_USERNAME - password=$OS_PASSWORD - region=$OS_REGION_NAME - tenant-name=$OS_TENANT_NAME - domain-name=$OS_USER_DOMAIN_NAME - [LoadBalancer] - lb-version=$LBAAS_VERSION - subnet-id=$SUBNET_ID - floating-network-id=$FLOATING_NETWORK_ID - [Route] - router-id=$router_id - - path: /srv/salt-overlay/pillar/cluster-params.sls - content: | - allocate_node_cidrs: "true" - service_cluster_ip_range: 10.246.0.0/16 - cert_ip: 10.246.0.1 - enable_cluster_monitoring: influxdb - enable_cluster_logging: "true" - enable_cluster_ui: "true" - enable_node_logging: "true" - logging_destination: elasticsearch - elasticsearch_replicas: "1" - enable_cluster_dns: "true" - dns_server: 10.246.0.10 - dns_domain: cluster.local - enable_dns_horizontal_autoscaler: "false" - instance_prefix: kubernetes - admission_control: Initializers,NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota - enable_cpu_cfs_quota: "true" - network_provider: none - cluster_cidr: "$cluster_cidr" - opencontrail_tag: R2.20 - opencontrail_kubernetes_tag: master - opencontrail_public_subnet: 10.1.0.0/16 - e2e_storage_test_environment: "false" diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml deleted file mode 100644 index 2e8d230192e..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml +++ /dev/null @@ -1,44 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -bootcmd: - - mkdir -p /srv/salt-overlay/salt/kube-apiserver - - mkdir -p /srv/salt-overlay/salt/kubelet -write_files: - - path: /srv/salt-overlay/salt/kube-apiserver/basic_auth.csv - permissions: "0600" - content: | - $apiserver_password,$apiserver_user,admin - - path: /srv/salt-overlay/salt/kube-apiserver/known_tokens.csv - permissions: "0600" - content: | - $token_kubelet,kubelet,kubelet - $token_kube_proxy,kube_proxy,kube_proxy - TokenSystemScheduler,system:scheduler,system:scheduler - TokenSystemControllerManager,system:controller_manager,system:controller_manager - TokenSystemLogging,system:logging,system:logging - TokenSystemMonitoring,system:monitoring,system:monitoring - TokenSystemDns,system:dns,system:dns - - path: /srv/salt-overlay/salt/kubelet/kubernetes_auth - permissions: "0600" - content: | - {"BearerToken": "$token_kubelet", "Insecure": true } - - path: /srv/salt-overlay/salt/kubelet/kubeconfig - permissions: "0600" - content: | - apiVersion: v1 - kind: Config - users: - - name: kubelet - user: - token: $token_kubelet - clusters: - - name: local - cluster: - server: https://$MASTER_IP - insecure-skip-tls-verify: true - contexts: - - context: - cluster: local - user: kubelet - name: service-account-context - current-context: service-account-context diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml deleted file mode 100644 index 21d4586bae8..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml +++ /dev/null @@ -1,45 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -bootcmd: - - mkdir -p /srv/salt-overlay/salt/kubelet - - mkdir -p /srv/salt-overlay/salt/kube-proxy -write_files: - - path: /srv/salt-overlay/salt/kubelet/kubeconfig - permissions: "0600" - content: | - apiVersion: v1 - kind: Config - users: - - name: kubelet - user: - token: $token_kubelet - clusters: - - name: local - cluster: - server: https://$MASTER_IP - insecure-skip-tls-verify: true - contexts: - - context: - cluster: local - user: kubelet - name: service-account-context - current-context: service-account-context - - path: /srv/salt-overlay/salt/kube-proxy/kubeconfig - permissions: "0600" - content: | - apiVersion: v1 - kind: Config - users: - - name: kube-proxy - user: - token: $token_kube_proxy - clusters: - - name: local - cluster: - insecure-skip-tls-verify: true - contexts: - - context: - cluster: local - user: kube-proxy - name: service-account-context - current-context: service-account-context diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.sh b/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.sh deleted file mode 100644 index 67034749686..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Workaround for this, which has been fixed but not widely distributed: https://bugs.launchpad.net/cloud-init/+bug/1246485 -# See also http://blog.oddbit.com/2014/12/10/cloudinit-and-the-case-of-the-changing-hostname/ -hostname > /etc/hostname diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.yaml deleted file mode 100644 index 8e3b2ac0158..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/hostname-hack.yaml +++ /dev/null @@ -1,9 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) - -write_files: - - path: /etc/cloud/cloud.cfg.d/99_hostname.cfg - owner: "root:root" - permissions: "0644" - content: | - preserve_hostname: true diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/kube-user.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/kube-user.yaml deleted file mode 100644 index 4e7477d6401..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/kube-user.yaml +++ /dev/null @@ -1,10 +0,0 @@ -#cloud-config -system_info: - default_user: - name: minion - lock_passwd: true - gecos: Kubernetes Interactive User - groups: [wheel, adm, systemd-journal] - sudo: ["ALL=(ALL) NOPASSWD:ALL"] - shell: /bin/bash - diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-master.sh b/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-master.sh deleted file mode 100644 index ad75cc2a774..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-master.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -. /etc/sysconfig/heat-params - -# nothing to do diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-node.sh b/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-node.sh deleted file mode 100644 index 1b30d29b092..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/provision-network-node.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Kubernetes node shoud be able to resolve its hostname. -# In some cloud providers, myhostname is not enabled by default. -grep '^hosts:.*myhostname' /etc/nsswitch.conf || ( - sed -e 's/^hosts:\(.*\)/hosts:\1 myhostname/' -i /etc/nsswitch.conf -) diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/run-salt.sh b/cluster/openstack-heat/kubernetes-heat/fragments/run-salt.sh deleted file mode 100644 index 72ce6d6a48a..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/run-salt.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -. /etc/sysconfig/heat-params - -#Reads in profile, need to relax restrictions for some OSes. -set +o nounset -. /etc/profile -set -o nounset - -rm -rf /kube-install -mkdir -p /kube-install -cd /kube-install - -curl "${KUBERNETES_SERVER_URL}" -o kubernetes-server.tar.gz -curl "${KUBERNETES_SALT_URL}" -o kubernetes-salt.tar.gz - -tar xzf kubernetes-salt.tar.gz -./kubernetes/saltbase/install.sh kubernetes-server.tar.gz - -if ! which salt-call >/dev/null 2>&1; then - echo "+++ Install salt binaries from https://bootstrap.saltstack.com" - # Install salt binaries but do not start daemon after installation - curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- "-X" -fi - -# Salt server runs at locahost -echo "127.0.0.1 salt" >> /etc/hosts - -echo "+++ run salt-call and finalize installation" -# Run salt-call -# salt-call wants to start docker daemon but is unable to. -# See . -# Run salt-call in background and make cloud-final finished. -# Salt-call might be unstable in some environments, execute it twice. -salt-call --local state.highstate && salt-call --local state.highstate && $$wc_notify --data-binary '{"status": "SUCCESS"}' || $$wc_notify --data-binary '{"status": "FAILURE"}' & diff --git a/cluster/openstack-heat/kubernetes-heat/fragments/write-heat-params.yaml b/cluster/openstack-heat/kubernetes-heat/fragments/write-heat-params.yaml deleted file mode 100644 index d8ee9da0f9f..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/fragments/write-heat-params.yaml +++ /dev/null @@ -1,11 +0,0 @@ -#cloud-config -merge_how: dict(recurse_array)+list(append) -write_files: - - path: /etc/sysconfig/heat-params - owner: "root:root" - permissions: "0644" - content: | - KUBERNETES_SERVER_URL="$KUBERNETES_SERVER_URL" - KUBERNETES_SALT_URL="$KUBERNETES_SALT_URL" - MASTER_IP=$MASTER_IP - CONTAINER_SUBNET=10.246.0.0/16 diff --git a/cluster/openstack-heat/kubernetes-heat/kubecluster.yaml b/cluster/openstack-heat/kubernetes-heat/kubecluster.yaml deleted file mode 100644 index 3cd5e5d885f..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/kubecluster.yaml +++ /dev/null @@ -1,465 +0,0 @@ -heat_template_version: 2016-10-14 - -description: > - Kubernetes cluster with one master and one or more worker nodes - (as specified by the number_of_minions parameter, which defaults to 3). - -parameters: - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - default: public - - lbaas_version: - type: string - description: version of OpenStack LBaaS service. not specifying means auto detect - - server_image: - type: string - description: glance image used to boot the server - - master_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - dns_nameserver: - type: string - description: address of a dns nameserver reachable in your environment - default: 8.8.8.8 - - number_of_minions: - type: number - description: how many kubernetes minions to spawn initially - default: 3 - - max_number_of_minions: - type: number - description: maximum number of kubernetes minions to spawn - default: 10 - - fixed_network_cidr: - type: string - description: network range for fixed ip network - default: 10.0.0.0/24 - - cluster_cidr: - type: string - description: network range for pod IPs - default: 10.244.0.0/16 - - service_cluster_cidr: - type: string - description: network range for service IPs - default: 10.10.0.0/16 - - master_pod_cidr: - type: string - description: >- - network range for master pod IPs (ignored, but must not conflict - with other subnets) - default: 10.245.1.0/24 - - kubernetes_server_url: - type: string - description: URL of kubernetes server binary. Must be tar.gz. - - kubernetes_salt_url: - type: string - description: URL of kubernetes salt scripts. Must be tar.gz. - - apiserver_user: - type: string - description: User name used for api-server - default: user - - apiserver_password: - type: string - description: Password used for api-server - default: password - - token_kubelet: - type: string - description: Token used by kubelet - default: TokenKubelet - - token_kube_proxy: - type: string - description: Token used by kube-proxy - default: TokenKubeproxy - - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - default: 6000 - - os_auth_url: - type: string - description: OpenStack Auth URL - default: false - - os_username: - type: string - description: OpenStack Username - default: false - - os_password: - type: string - description: OpenStack Password - default: false - - os_region_name: - type: string - description: OpenStack Region Name - default: false - - os_tenant_name: - type: string - description: OpenStack Tenant Name - default: false - - os_user_domain_name: - type: string - description: OpenStack User Domain Name (Domain-level authorization scope for keystone v3) - - enable_proxy: - type: string - description: Whether or not to enable proxy settings - default: false - - ftp_proxy: - type: string - description: FTP Proxy URL - default: localhost - - http_proxy: - type: string - description: HTTP Proxy URL - default: localhost - - https_proxy: - type: string - description: HTTPS Proxy URL - default: localhost - - socks_proxy: - type: string - description: SOCKS Proxy URL - default: localhost - - no_proxy: - type: string - description: Comma seperated list of domains/addresses that bypass proxying. - default: localhost - - assign_floating_ip: - type: boolean - description: Indicates whether floating IPs will be generated for minions - default: true - -resources: - - master_wait_handle: - type: OS::Heat::WaitConditionHandle - - master_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube_master - properties: - handle: {get_resource: master_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # network resources. allocate a network and router for our server. - # - - fixed_network: - type: OS::Neutron::Net - - fixed_subnet: - type: OS::Neutron::Subnet - properties: - cidr: {get_param: fixed_network_cidr} - network: {get_resource: fixed_network} - dns_nameservers: - - {get_param: dns_nameserver} - - extrouter: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: external_network} - - extrouter_inside: - type: OS::Neutron::RouterInterface - properties: - router_id: {get_resource: extrouter} - subnet: {get_resource: fixed_subnet} - - ###################################################################### - # - # security groups. we need to permit network traffic of various - # sorts. - # - - secgroup_base: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - remote_mode: remote_group_id - - secgroup_master: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: tcp # api-server - port_range_min: 443 - port_range_max: 443 - - secgroup_node: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - - protocol: udp - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params.yaml} - params: - "$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url} - "$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url} - "$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - - proxy_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/configure-proxy.sh} - params: - "$ENABLE_PROXY": {get_param: enable_proxy } - "$FTP_PROXY": {get_param: ftp_proxy } - "$HTTP_PROXY": {get_param: http_proxy } - "$HTTPS_PROXY": {get_param: https_proxy } - "$SOCKS_PROXY": {get_param: socks_proxy } - "$NO_PROXY": {get_param: no_proxy } - - hostname_hack: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/hostname-hack.yaml} - - hostname_hack_script: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/hostname-hack.sh} - - kube_user: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/kube-user.yaml} - - provision_network_master: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/provision-network-master.sh} - - deploy_kube_auth_files_master: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/deploy-kube-auth-files-master.yaml} - params: - "$apiserver_user": {get_param: apiserver_user} - "$apiserver_password": {get_param: apiserver_password} - "$token_kubelet": {get_param: token_kubelet} - "$token_kube_proxy": {get_param: token_kube_proxy} - - configure_salt_master: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/configure-salt.yaml} - params: - "$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - "$OS_AUTH_URL": {get_param: os_auth_url} - "$OS_USERNAME": {get_param: os_username} - "$OS_PASSWORD": {get_param: os_password} - "$OS_REGION_NAME": {get_param: os_region_name} - "$OS_TENANT_NAME": {get_param: os_tenant_name} - "$OS_USER_DOMAIN_NAME": {get_param: os_user_domain_name} - "$LBAAS_VERSION": {get_param: lbaas_version} - "$SUBNET_ID": {get_resource: fixed_subnet} - "$FLOATING_NETWORK_ID": {get_attr: [kube_master_floating, floating_network_id]} - "$role": "kubernetes-master" - "$router_id": {get_resource: extrouter} - "$cluster_cidr": {get_param: cluster_cidr} - "$MASTER_IP_RANGE": {get_param: master_pod_cidr} - - run_salt: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/run-salt.sh} - params: - "$$wc_notify": {get_attr: [master_wait_handle, curl_cli]} - - kube_master_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: proxy_config} - - config: {get_resource: hostname_hack} - - config: {get_resource: hostname_hack_script} - - config: {get_resource: kube_user} - - config: {get_resource: provision_network_master} - - config: {get_resource: deploy_kube_auth_files_master} - - config: {get_resource: configure_salt_master} - - config: {get_resource: run_salt} - - ###################################################################### - # - # kubernetes master server. - # - - kube_master: - type: OS::Nova::Server - depends_on: - - extrouter_inside - properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - user_data_format: RAW - user_data: {get_resource: kube_master_init} - networks: - - port: {get_resource: kube_master_eth0} - name: - list_join: [-, [{get_param: "OS::stack_name"}, master]] - - kube_master_eth0: - type: OS::Neutron::Port - properties: - network: {get_resource: fixed_network} - security_groups: - - {get_resource: secgroup_base} - - {get_resource: secgroup_master} - fixed_ips: - - subnet: {get_resource: fixed_subnet} - allowed_address_pairs: - - ip_address: 10.246.0.0/16 - replacement_policy: AUTO - - kube_master_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_master_eth0} - - ###################################################################### - # - # kubernetes minions. This is an autoscaling group that will initially - # create minions, and will scale up to - # based on CPU utilization. - # - - kube_minions: - type: OS::Heat::AutoScalingGroup - depends_on: - - extrouter_inside - - master_wait_condition - properties: - resource: - type: kubeminion.yaml - properties: - kubernetes_server_url: {get_param: kubernetes_server_url} - kubernetes_salt_url: {get_param: kubernetes_salt_url} - ssh_key_name: {get_param: ssh_key_name} - server_image: {get_param: server_image} - minion_flavor: {get_param: minion_flavor} - token_kubelet: {get_param: token_kubelet} - token_kube_proxy: {get_param: token_kube_proxy} - fixed_network: {get_resource: fixed_network} - fixed_subnet: {get_resource: fixed_subnet} - cluster_cidr: {get_param: cluster_cidr} - kube_master_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} - external_network: {get_param: external_network} - wait_condition_timeout: {get_param: wait_condition_timeout} - metadata: {"metering.stack": {get_param: "OS::stack_id"}} - cluster_name: {get_param: "OS::stack_name"} - secgroup_base: {get_resource: secgroup_base} - secgroup_node: {get_resource: secgroup_node} - os_auth_url: {get_param: os_auth_url} - os_username: {get_param: os_username} - os_password: {get_param: os_password} - os_region_name: {get_param: os_region_name} - os_tenant_name: {get_param: os_tenant_name} - os_user_domain_name: {get_param: os_user_domain_name} - enable_proxy: {get_param: enable_proxy } - ftp_proxy: {get_param: ftp_proxy } - http_proxy: {get_param: http_proxy } - https_proxy: {get_param: https_proxy } - socks_proxy: {get_param: socks_proxy } - no_proxy: {get_param: no_proxy } - assign_floating_ip: {get_param: assign_floating_ip } - min_size: {get_param: number_of_minions} - desired_capacity: {get_param: number_of_minions} - max_size: {get_param: max_number_of_minions} - -outputs: - - kube_master: - value: {get_attr: [kube_master_floating, floating_ip_address]} - description: > - This is the "public" IP address of the Kubernetes master node. Use this IP address - to log in to the Kubernetes master via ssh or to access the Kubernetes API - from outside the cluster. - - kube_minions: - value: {get_attr: [kube_minions, outputs_list, kube_minion_ip]} - description: > - Here is the list of the "private" addresses of all Kubernetes worker nodes. - - kube_minions_external: - value: {get_attr: [kube_minions, outputs_list, kube_minion_external_ip]} - description: > - Here is the list of the "public" addresses of all Kubernetes worker nodes. diff --git a/cluster/openstack-heat/kubernetes-heat/kubeminion.yaml b/cluster/openstack-heat/kubernetes-heat/kubeminion.yaml deleted file mode 100644 index 409a1b0e588..00000000000 --- a/cluster/openstack-heat/kubernetes-heat/kubeminion.yaml +++ /dev/null @@ -1,314 +0,0 @@ -heat_template_version: 2016-10-14 - -description: > - This is a nested stack that defines a single Kubernetes minion, This stack is - included by an AutoScalingGroup resource in the parent template - (kubecluster.yaml). - -parameters: - - server_image: - type: string - description: glance image used to boot the server - - minion_flavor: - type: string - default: m1.small - description: flavor to use when booting the server - - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - default: lars - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - kubernetes_server_url: - type: string - description: URL of kubernetes server binary. Must be tar.gz. - - kubernetes_salt_url: - type: string - description: URL of kubernetes salt scripts. Must be tar.gz. - - token_kubelet: - type: string - description: Token used by kubelet - - token_kube_proxy: - type: string - description: Token used by kube-proxy - - os_auth_url: - type: string - description: OpenStack Auth URL - default: false - - os_username: - type: string - description: OpenStack Username - default: false - - os_password: - type: string - description: OpenStack Password - default: false - - os_region_name: - type: string - description: OpenStack Region Name - default: false - - os_tenant_name: - type: string - description: OpenStack Tenant Name - default: false - - os_user_domain_name: - type: string - description: OpenStack User Domain Name (Domain-level authorization scope for keystone v3) - - enable_proxy: - type: string - description: Whether or not to enable proxy settings - default: false - - ftp_proxy: - type: string - description: FTP Proxy URL - default: localhost - - http_proxy: - type: string - description: HTTP Proxy URL - default: localhost - - https_proxy: - type: string - description: HTTPS Proxy URL - default: localhost - - socks_proxy: - type: string - description: SOCKS Proxy URL - default: localhost - - no_proxy: - type: string - description: Comma seperated list of domains/addresses that bypass proxying. - default: localhost - - assign_floating_ip: - type: boolean - description: Indicates whether floating IPs will be generated for minions - default: true - - # The following are all generated in the parent template. - kube_master_ip: - type: string - description: IP address of the Kubernetes master server. - fixed_network: - type: string - description: Network from which to allocate fixed addresses. - fixed_subnet: - type: string - description: Subnet from which to allocate fixed addresses. - cluster_cidr: - type: string - description: Subnet from which to allocate pod subnets. - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - metadata: - type: json - description: metadata for ceilometer query - cluster_name: - type: string - secgroup_base: - type: string - secgroup_node: - type: string - -conditions: - assign_floating_ip: {equals : [{get_param: assign_floating_ip}, true]} - -resources: - - minion_wait_handle: - type: OS::Heat::WaitConditionHandle - - minion_wait_condition: - type: OS::Heat::WaitCondition - depends_on: kube_minion - properties: - handle: {get_resource: minion_wait_handle} - timeout: {get_param: wait_condition_timeout} - - ###################################################################### - # - # software configs. these are components that are combined into - # a multipart MIME user-data archive. - # - - write_heat_params: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/write-heat-params.yaml} - params: - "$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url} - "$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url} - "$MASTER_IP": {get_param: kube_master_ip} - - proxy_config: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/configure-proxy.sh} - params: - "$ENABLE_PROXY": {get_param: enable_proxy } - "$FTP_PROXY": {get_param: ftp_proxy } - "$HTTP_PROXY": {get_param: http_proxy } - "$HTTPS_PROXY": {get_param: https_proxy } - "$SOCKS_PROXY": {get_param: socks_proxy } - "$NO_PROXY": {get_param: no_proxy } - - hostname_hack: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/hostname-hack.yaml} - - hostname_hack_script: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/hostname-hack.sh} - - kube_user: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/kube-user.yaml} - - provision_network_node: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/provision-network-node.sh} - - deploy_kube_auth_files_node: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/deploy-kube-auth-files-node.yaml} - params: - "$token_kubelet": {get_param: token_kubelet} - "$token_kube_proxy": {get_param: token_kube_proxy} - - configure_salt_node: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/configure-salt.yaml} - params: - "$MASTER_IP": {get_param: kube_master_ip} - "$OS_AUTH_URL": {get_param: os_auth_url} - "$OS_USERNAME": {get_param: os_username} - "$OS_PASSWORD": {get_param: os_password} - "$OS_REGION_NAME": {get_param: os_region_name} - "$OS_TENANT_NAME": {get_param: os_tenant_name} - "$OS_USER_DOMAIN_NAME": {get_param: os_user_domain_name} - "$role": "kubernetes-pool" - "$cluster_cidr": {get_param: cluster_cidr} - - run_salt: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: {get_file: fragments/run-salt.sh} - params: - "$$wc_notify": {get_attr: [minion_wait_handle, curl_cli]} - - kube_minion_init: - type: OS::Heat::MultipartMime - properties: - parts: - - config: {get_resource: write_heat_params} - - config: {get_resource: proxy_config} - - config: {get_resource: hostname_hack} - - config: {get_resource: hostname_hack_script} - - config: {get_resource: kube_user} - - config: {get_resource: provision_network_node} - - config: {get_resource: deploy_kube_auth_files_node} - - config: {get_resource: configure_salt_node} - - config: {get_resource: run_salt} - - ###################################################################### - # - # a single kubernetes minion. - # - server_name_post_fix: - type: OS::Heat::RandomString - properties: - character_classes: [{'class': 'lowercase', 'min': 1}] - length: 8 - character_classes: - - class: lowercase - - class: digits - - kube_minion: - type: OS::Nova::Server - properties: - image: {get_param: server_image} - flavor: {get_param: minion_flavor} - key_name: {get_param: ssh_key_name} - metadata: {get_param: metadata} - user_data_format: RAW - user_data: {get_resource: kube_minion_init} - networks: - - port: {get_resource: kube_minion_eth0} - name: - list_join: [-, [{get_param: cluster_name}, node, {get_resource: server_name_post_fix}]] - - kube_minion_eth0: - type: OS::Neutron::Port - properties: - network: {get_param: fixed_network} - security_groups: - - {get_param: secgroup_base} - - {get_param: secgroup_node} - fixed_ips: - - subnet: {get_param: fixed_subnet} - allowed_address_pairs: - - ip_address: 10.246.0.0/16 - replacement_policy: AUTO - - kube_minion_floating: - type: OS::Neutron::FloatingIP - properties: - floating_network: {get_param: external_network} - port_id: {get_resource: kube_minion_eth0} - condition: assign_floating_ip - -outputs: - - kube_minion_ip: - value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} - - kube_minion_external_ip: - value: {get_attr: [kube_minion_floating, floating_ip_address]} - condition: assign_floating_ip diff --git a/cluster/openstack-heat/openrc-default.sh b/cluster/openstack-heat/openrc-default.sh deleted file mode 100644 index 8f1dea1a9e1..00000000000 --- a/cluster/openstack-heat/openrc-default.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Enviroment variables for the OpenStack command-line client -## Values set via an openrc will override these defaults. - -export OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-2.0} -export OS_USERNAME=${OS_USERNAME:-admin} -export OS_PASSWORD=${OS_PASSWORD:-secretsecret} -export OS_AUTH_URL=${OS_AUTH_URL:-http://192.168.123.100:5000/v2.0} -export OS_TENANT_NAME=${OS_TENANT_NAME:-admin} -export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-} -export OS_REGION_NAME=${OS_REGION_NAME:-RegionOne} diff --git a/cluster/openstack-heat/openrc-swift.sh b/cluster/openstack-heat/openrc-swift.sh deleted file mode 100644 index 4da9863c514..00000000000 --- a/cluster/openstack-heat/openrc-swift.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Enviroment variables for the OpenStack Swift command-line client. This is required for CityCloud -## provider where Swift has different credentials. When Swift is part of your OpenStack do not -## modify these settings. - -export OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-2.0} -export OS_USERNAME=${OS_USERNAME:-admin} -export OS_PASSWORD=${OS_PASSWORD:-secretsecret} -export OS_AUTH_URL=${OS_AUTH_URL:-http://192.168.123.100:5000/v2.0} -export OS_TENANT_NAME=${OS_TENANT_NAME:-admin} -export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-} -export OS_REGION_NAME=${OS_REGION_NAME:-RegionOne} diff --git a/cluster/openstack-heat/util.sh b/cluster/openstack-heat/util.sh deleted file mode 100644 index c81b16cc201..00000000000 --- a/cluster/openstack-heat/util.sh +++ /dev/null @@ -1,293 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -# exit on any error -set -e - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -readonly ROOT=$(dirname "${BASH_SOURCE}") -source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}" -source "${KUBE_ROOT}/cluster/common.sh" -if [ $CREATE_IMAGE = true ]; then -source "${ROOT}/config-image.sh" -fi - -# Verify prereqs on host machine -function verify-prereqs() { - # Check the OpenStack command-line clients - for client in swift glance nova heat openstack; - do - if which $client >/dev/null 2>&1; then - echo "${client} client installed" - else - echo "${client} client does not exist" - echo "Please install ${client} client, and retry." - echo "Documentation for installing ${client} can be found at" - echo "http://docs.openstack.org/user-guide/common/cli-install-openstack-command-line-clients.html" - exit 1 - fi - done -} - -# Instantiate a kubernetes cluster -# -# Assumed vars: -# KUBERNETES_PROVIDER -function kube-up() { - echo "kube-up for provider ${KUBERNETES_PROVIDER}" - create-stack -} - -# Periodically checks if cluster is created -# -# Assumed vars: -# STACK_CREATE_TIMEOUT -# STACK_NAME -function validate-cluster() { - - while (( --$STACK_CREATE_TIMEOUT >= 0)) ;do - local status=$(openstack stack show "${STACK_NAME}" | awk '$2=="stack_status" {print $4}') - if [[ $status ]]; then - echo "Cluster status ${status}" - if [ $status = "CREATE_COMPLETE" ]; then - configure-kubectl - break - elif [ $status = "CREATE_FAILED" ]; then - echo "Cluster not created. Please check stack logs to find the problem" - break - fi - else - echo "Cluster not created. Please verify if process started correctly" - break - fi - sleep 60 - done -} - -# Create stack -# -# Assumed vars: -# OPENSTACK -# OPENSTACK_TEMP -# DNS_SERVER -# OPENSTACK_IP -# OPENRC_FILE -function create-stack() { - echo "[INFO] Execute commands to create Kubernetes cluster" - # It is required for some cloud provider like CityCloud where swift client has different credentials - source "${ROOT}/openrc-swift.sh" - upload-resources - source "${ROOT}/openrc-default.sh" - - create-glance-image - - add-keypair - run-heat-script -} - -# Upload kubernetes release tars and heat templates. -# -# Assumed vars: -# ROOT -# KUBERNETES_RELEASE_TAR -function upload-resources() { - swift post ${SWIFT_OBJECT_STORE} --read-acl '.r:*,.rlistings' - - locations=( - "${ROOT}/../../_output/release-tars/${KUBERNETES_RELEASE_TAR}" - "${ROOT}/../../server/${KUBERNETES_RELEASE_TAR}" - ) - - RELEASE_TAR_LOCATION=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) - RELEASE_TAR_PATH=$(dirname ${RELEASE_TAR_LOCATION}) - - echo "[INFO] Uploading ${KUBERNETES_RELEASE_TAR}" - swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/${KUBERNETES_RELEASE_TAR} \ - --object-name kubernetes-server.tar.gz - - echo "[INFO] Uploading kubernetes-salt.tar.gz" - swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/kubernetes-salt.tar.gz \ - --object-name kubernetes-salt.tar.gz -} - -# Create a new key pair for use with servers. -# -# Assumed vars: -# KUBERNETES_KEYPAIR_NAME -# CLIENT_PUBLIC_KEY_PATH -function add-keypair() { - local status=$(nova keypair-show ${KUBERNETES_KEYPAIR_NAME}) - if [[ ! $status ]]; then - nova keypair-add ${KUBERNETES_KEYPAIR_NAME} --pub-key ${CLIENT_PUBLIC_KEY_PATH} - echo "[INFO] Key pair created" - else - echo "[INFO] Key pair already exists" - fi -} - -# Create a new glance image. -# -# Assumed vars: -# IMAGE_FILE -# IMAGE_PATH -# OPENSTACK_IMAGE_NAME -function create-glance-image() { - if [[ ${CREATE_IMAGE} == "true" ]]; then - local image_status=$(openstack image show ${OPENSTACK_IMAGE_NAME} | awk '$2=="id" {print $4}') - - if [[ ! $image_status ]]; then - if [[ "${DOWNLOAD_IMAGE}" == "true" ]]; then - mkdir -p ${IMAGE_PATH} - curl -L ${IMAGE_URL_PATH}/${IMAGE_FILE} -o ${IMAGE_PATH}/${IMAGE_FILE} -z ${IMAGE_PATH}/${IMAGE_FILE} - fi - echo "[INFO] Create image ${OPENSTACK_IMAGE_NAME}" - glance image-create --name ${OPENSTACK_IMAGE_NAME} --disk-format ${IMAGE_FORMAT} \ - --container-format ${CONTAINER_FORMAT} --file ${IMAGE_PATH}/${IMAGE_FILE} - else - echo "[INFO] Image ${OPENSTACK_IMAGE_NAME} already exists" - fi - fi -} - -# Create a new kubernetes stack. -# -# Assumed vars: -# STACK_NAME -# KUBERNETES_KEYPAIR_NAME -# DNS_SERVER -# SWIFT_SERVER_URL -# OPENSTACK_IMAGE_NAME -# EXTERNAL_NETWORK -# IMAGE_ID -# MASTER_FLAVOR -# MINION_FLAVOR -# NUMBER_OF_MINIONS -# MAX_NUMBER_OF_MINIONS -# DNS_SERVER -# STACK_NAME -function run-heat-script() { - - local stack_status=$(openstack stack show ${STACK_NAME}) - - # Automatically detect swift url if it wasn't specified - if [[ -z $SWIFT_SERVER_URL ]]; then - local rgx="" - if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - rgx="public: (.+)$" - else - rgx="publicURL: (.+)$" - fi - SWIFT_SERVER_URL=$(openstack catalog show object-store --format value | egrep -o "$rgx" | cut -d" " -f2 | head -n 1) - fi - local swift_repo_url="${SWIFT_SERVER_URL}/${SWIFT_OBJECT_STORE}" - - if [ $CREATE_IMAGE = true ]; then - echo "[INFO] Retrieve new image ID" - IMAGE_ID=$(openstack image show ${OPENSTACK_IMAGE_NAME} | awk '$2=="id" {print $4}') - echo "[INFO] Image Id ${IMAGE_ID}" - fi - - if [[ ! $stack_status ]]; then - echo "[INFO] Create stack ${STACK_NAME}" - ( - cd ${ROOT}/kubernetes-heat - openstack stack create --timeout 60 \ - --parameter external_network=${EXTERNAL_NETWORK} \ - --parameter lbaas_version=${LBAAS_VERSION} \ - --parameter fixed_network_cidr=${FIXED_NETWORK_CIDR} \ - --parameter ssh_key_name=${KUBERNETES_KEYPAIR_NAME} \ - --parameter server_image=${IMAGE_ID} \ - --parameter master_flavor=${MASTER_FLAVOR} \ - --parameter minion_flavor=${MINION_FLAVOR} \ - --parameter number_of_minions=${NUMBER_OF_MINIONS} \ - --parameter max_number_of_minions=${MAX_NUMBER_OF_MINIONS} \ - --parameter dns_nameserver=${DNS_SERVER} \ - --parameter kubernetes_salt_url=${swift_repo_url}/kubernetes-salt.tar.gz \ - --parameter kubernetes_server_url=${swift_repo_url}/kubernetes-server.tar.gz \ - --parameter os_auth_url=${OS_AUTH_URL} \ - --parameter os_username=${OS_USERNAME} \ - --parameter os_password=${OS_PASSWORD} \ - --parameter os_region_name=${OS_REGION_NAME} \ - --parameter os_tenant_name=${OS_TENANT_NAME} \ - --parameter os_user_domain_name=${OS_USER_DOMAIN_NAME} \ - --parameter enable_proxy=${ENABLE_PROXY} \ - --parameter ftp_proxy="${FTP_PROXY}" \ - --parameter http_proxy="${HTTP_PROXY}" \ - --parameter https_proxy="${HTTPS_PROXY}" \ - --parameter socks_proxy="${SOCKS_PROXY}" \ - --parameter no_proxy="${NO_PROXY}" \ - --parameter assign_floating_ip="${ASSIGN_FLOATING_IP}" \ - --template kubecluster.yaml \ - ${STACK_NAME} - ) - else - echo "[INFO] Stack ${STACK_NAME} already exists" - openstack stack show ${STACK_NAME} - fi -} - -# Configure kubectl. -# -# Assumed vars: -# STACK_NAME -function configure-kubectl() { - - export KUBE_MASTER_IP=$(nova show "${STACK_NAME}"-master | awk '$3=="network" {print $6}') - export CONTEXT="openstack-${STACK_NAME}" - export KUBE_BEARER_TOKEN="TokenKubelet" - - if [[ "${ENABLE_PROXY:-}" == "true" ]]; then - echo 'export NO_PROXY=$NO_PROXY,'"${KUBE_MASTER_IP}" > /tmp/kube-proxy-env - echo 'export no_proxy=$NO_PROXY,'"${KUBE_MASTER_IP}" >> /tmp/kube-proxy-env - . /tmp/kube-proxy-env - fi - - create-kubeconfig -} - - -# Delete a kubernetes cluster -# -# Assumed vars: -# STACK_NAME -function kube-down { - source "${ROOT}/openrc-default.sh" - openstack stack delete ${STACK_NAME} -} - -# Perform preparations required to run e2e tests -function prepare-e2e { - echo "TODO: prepare-e2e" 1>&2 -} - -function test-build-release { - echo "test-build-release() " 1>&2 -} - -# Must ensure that the following ENV vars are set -function detect-master { - - source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}" - source "${ROOT}/openrc-default.sh" - - export KUBE_MASTER_IP=$(nova show "${STACK_NAME}"-master | awk '$3=="network" {print $6}') - - echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 -} From e77373199285f97b39953ec61a52242a8136f03a Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 9 Jan 2018 13:55:03 -0800 Subject: [PATCH 677/794] removed deprecated libvirt-coreos kube-up/ from cluster see #49213 --- cluster/libvirt-coreos/.gitignore | 2 - cluster/libvirt-coreos/README.md | 31 -- cluster/libvirt-coreos/config-default.sh | 72 --- cluster/libvirt-coreos/config-test.sh | 19 - cluster/libvirt-coreos/coreos.xml | 71 --- .../libvirt-coreos/forEmptyDirRegistry.sed | 2 - cluster/libvirt-coreos/forShellEval.sed | 3 - cluster/libvirt-coreos/namespace.yaml | 4 - .../network_kubernetes_global.xml | 11 - .../network_kubernetes_pods.xml | 6 - cluster/libvirt-coreos/node-openssl.cnf | 10 - cluster/libvirt-coreos/openssl.cnf | 15 - cluster/libvirt-coreos/user_data.yml | 116 ----- cluster/libvirt-coreos/user_data_master.yml | 93 ---- cluster/libvirt-coreos/user_data_minion.yml | 49 -- cluster/libvirt-coreos/util.sh | 459 ------------------ 16 files changed, 963 deletions(-) delete mode 100644 cluster/libvirt-coreos/.gitignore delete mode 100644 cluster/libvirt-coreos/README.md delete mode 100644 cluster/libvirt-coreos/config-default.sh delete mode 100644 cluster/libvirt-coreos/config-test.sh delete mode 100644 cluster/libvirt-coreos/coreos.xml delete mode 100644 cluster/libvirt-coreos/forEmptyDirRegistry.sed delete mode 100644 cluster/libvirt-coreos/forShellEval.sed delete mode 100644 cluster/libvirt-coreos/namespace.yaml delete mode 100644 cluster/libvirt-coreos/network_kubernetes_global.xml delete mode 100644 cluster/libvirt-coreos/network_kubernetes_pods.xml delete mode 100644 cluster/libvirt-coreos/node-openssl.cnf delete mode 100644 cluster/libvirt-coreos/openssl.cnf delete mode 100644 cluster/libvirt-coreos/user_data.yml delete mode 100644 cluster/libvirt-coreos/user_data_master.yml delete mode 100644 cluster/libvirt-coreos/user_data_minion.yml delete mode 100644 cluster/libvirt-coreos/util.sh diff --git a/cluster/libvirt-coreos/.gitignore b/cluster/libvirt-coreos/.gitignore deleted file mode 100644 index f26d8e4020a..00000000000 --- a/cluster/libvirt-coreos/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/libvirt_storage_pool/ -/coreos_production_qemu_image.img.bz2 diff --git a/cluster/libvirt-coreos/README.md b/cluster/libvirt-coreos/README.md deleted file mode 100644 index b1b0a3e490c..00000000000 --- a/cluster/libvirt-coreos/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Kubernetes CoreOS cluster - -With this tutorial one creates a Kubernetes CoreOS cluster containing of one -master and three nodes (workers) running on `192.168.10.1`-`192.168.10.4`. - -For working correctly you need to create the directory addressed as `POOL_PATH` in -`util.sh`: -``` -$ sudo mkdir /var/lib/libvirt/images/kubernetes -$ sudo chown -R $USER:$USER /var/lib/libvirt/images/kubernetes/ -``` - -Then we follow the instructions in the main `kubernetes` directory. - -For debugging set `export UTIL_SH_DEBUG=1`. -``` -$ export KUBERNETES_PROVIDER=libvirt-coreos -$ make release-skip-tests -$ ./cluster/kube-up.sh -``` - -To bring the cluster down again, execute: -``` -$ ./cluster/kube-down.sh -``` - -Have fun! - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/libvirt-coreos/README.md?pixel)]() diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh deleted file mode 100644 index 44ba5ce6aac..00000000000 --- a/cluster/libvirt-coreos/config-default.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the libvirt CoreOS cluster - -# Number of minions in the cluster -NUM_NODES=${NUM_NODES:-3} -export NUM_NODES - -# The IP of the master -export MASTER_IP="192.168.10.1" - -export INSTANCE_PREFIX=kubernetes -export MASTER_NAME="${INSTANCE_PREFIX}-master" - -# Map out the IPs, names and container subnets of each node -export NODE_IP_BASE="192.168.10." -NODE_CONTAINER_SUBNET_BASE="10.10" -MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" -CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -if [[ "$NUM_NODES" -gt 253 ]]; then - echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes" - exit 1 -fi -for ((i=0; i < NUM_NODES; i++)) do - NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))" - NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" - NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" - NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" -done -NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET - -SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.11.0.0/16}" # formerly PORTAL_NET - - -# Optional: Enable node logging. -ENABLE_NODE_LOGGING=false -LOGGING_DESTINATION=elasticsearch - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="${SERVICE_CLUSTER_IP_RANGE%.*}.254" -DNS_DOMAIN="cluster.local" - -# Optional: Install cluster registry -ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-true}" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -#Generate dns files -sed -f "${KUBE_ROOT}/cluster/addons/dns/transforms2sed.sed" < "${KUBE_ROOT}/cluster/addons/dns/kube-dns.yaml.base" | sed -f "${KUBE_ROOT}/cluster/libvirt-coreos/forShellEval.sed" > "${KUBE_ROOT}/cluster/libvirt-coreos/kube-dns.yaml" - - -#Generate registry files -sed -f "${KUBE_ROOT}/cluster/libvirt-coreos/forEmptyDirRegistry.sed" < "${KUBE_ROOT}/cluster/addons/registry/registry-rc.yaml" > "${KUBE_ROOT}/cluster/libvirt-coreos/registry-rc.yaml" diff --git a/cluster/libvirt-coreos/config-test.sh b/cluster/libvirt-coreos/config-test.sh deleted file mode 100644 index af3633c1be7..00000000000 --- a/cluster/libvirt-coreos/config-test.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Vagrant cluster in test mode -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/libvirt-coreos/config-default.sh" diff --git a/cluster/libvirt-coreos/coreos.xml b/cluster/libvirt-coreos/coreos.xml deleted file mode 100644 index 041f3a01510..00000000000 --- a/cluster/libvirt-coreos/coreos.xml +++ /dev/null @@ -1,71 +0,0 @@ - - ${name} - 512 - 512 - 2 - - hvm - - - - - - - - - destroy - restart - restart - - $(which qemu-system-$(uname -m)) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cluster/libvirt-coreos/forEmptyDirRegistry.sed b/cluster/libvirt-coreos/forEmptyDirRegistry.sed deleted file mode 100644 index a00392576b3..00000000000 --- a/cluster/libvirt-coreos/forEmptyDirRegistry.sed +++ /dev/null @@ -1,2 +0,0 @@ -s/persistentVolumeClaim:/emptyDir: {}/g -s/claimName: kube-registry-pvc//g diff --git a/cluster/libvirt-coreos/forShellEval.sed b/cluster/libvirt-coreos/forShellEval.sed deleted file mode 100644 index c1efc974831..00000000000 --- a/cluster/libvirt-coreos/forShellEval.sed +++ /dev/null @@ -1,3 +0,0 @@ -s/\"/\\"/g -s/DNS_SERVER_IP/{DNS_SERVER_IP}/g -s/DNS_DOMAIN/{DNS_DOMAIN}/g diff --git a/cluster/libvirt-coreos/namespace.yaml b/cluster/libvirt-coreos/namespace.yaml deleted file mode 100644 index 986f4b48221..00000000000 --- a/cluster/libvirt-coreos/namespace.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system diff --git a/cluster/libvirt-coreos/network_kubernetes_global.xml b/cluster/libvirt-coreos/network_kubernetes_global.xml deleted file mode 100644 index b22cb262fc0..00000000000 --- a/cluster/libvirt-coreos/network_kubernetes_global.xml +++ /dev/null @@ -1,11 +0,0 @@ - - kubernetes_global - - - - - - - - - diff --git a/cluster/libvirt-coreos/network_kubernetes_pods.xml b/cluster/libvirt-coreos/network_kubernetes_pods.xml deleted file mode 100644 index 13bb1d566a5..00000000000 --- a/cluster/libvirt-coreos/network_kubernetes_pods.xml +++ /dev/null @@ -1,6 +0,0 @@ - - kubernetes_pods - - - - diff --git a/cluster/libvirt-coreos/node-openssl.cnf b/cluster/libvirt-coreos/node-openssl.cnf deleted file mode 100644 index acf03fcfbcf..00000000000 --- a/cluster/libvirt-coreos/node-openssl.cnf +++ /dev/null @@ -1,10 +0,0 @@ -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = @alt_names -[alt_names] -IP.1 = $ENV::WORKER_IP diff --git a/cluster/libvirt-coreos/openssl.cnf b/cluster/libvirt-coreos/openssl.cnf deleted file mode 100644 index 5b1c8e894c8..00000000000 --- a/cluster/libvirt-coreos/openssl.cnf +++ /dev/null @@ -1,15 +0,0 @@ -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = @alt_names -[alt_names] -DNS.1 = kubernetes -DNS.2 = kubernetes.default -DNS.3 = kubernetes.default.svc -DNS.4 = kubernetes.default.svc.cluster.local -IP.1 = $ENV::KUBERNETES_SVC -IP.2 = $ENV::MASTER_IP diff --git a/cluster/libvirt-coreos/user_data.yml b/cluster/libvirt-coreos/user_data.yml deleted file mode 100644 index 3288c44bb4f..00000000000 --- a/cluster/libvirt-coreos/user_data.yml +++ /dev/null @@ -1,116 +0,0 @@ -#cloud-config - -hostname: ${name} - -ssh_authorized_keys: -${ssh_keys} - -write_files: - - path: /etc/systemd/journald.conf - permissions: 0644 - content: | - [Journal] - SystemMaxUse=50M - RuntimeMaxUse=50M - -coreos: - etcd2: - advertise-client-urls: http://${public_ip}:2379 - initial-advertise-peer-urls: http://${public_ip}:2380 - listen-client-urls: http://0.0.0.0:2379 - listen-peer-urls: http://${public_ip}:2380 - initial-cluster-state: new - initial-cluster: ${etcd2_initial_cluster} - units: - - name: static.network - command: start - content: | - [Match] - # Name=eth0 - MACAddress=52:54:00:00:00:${i} - - [Network] - Address=${public_ip}/24 - DNS=192.168.10.254 - Gateway=192.168.10.254 - - name: cbr0.netdev - command: start - content: | - [NetDev] - Kind=bridge - Name=cbr0 - - name: cbr0.network - command: start - content: | - [Match] - Name=cbr0 - - [Network] - Address=${NODE_CONTAINER_SUBNETS[$i]} - - [Route] - Destination=${CONTAINER_SUBNET} - - name: cbr0-interface.network - command: start - content: | - [Match] - # Name=eth1 - MACAddress=52:54:00:00:01:${i} - - [Network] - Bridge=cbr0 - - name: nat.service - command: start - content: | - [Unit] - Description=NAT non container traffic - - [Service] - ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d ${CONTAINER_SUBNET} - RemainAfterExit=yes - Type=oneshot - - name: etcd2.service - command: start - drop-ins: - - name: 10-override-name.conf - content: | - [Service] - Environment=ETCD_NAME=%H - - name: docker.service - command: start - drop-ins: - - name: 50-opts.conf - content: | - [Service] - Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false' - - name: docker-tcp.socket - command: start - enable: yes - content: | - [Unit] - Description=Docker Socket for the API - - [Socket] - ListenStream=2375 - BindIPv6Only=both - Service=docker.service - - [Install] - WantedBy=sockets.target - - name: opt-kubernetes.mount - command: start - content: | - [Unit] - ConditionVirtualization=|vm - - [Mount] - What=kubernetes - Where=/opt/kubernetes - Options=ro,trans=virtio,version=9p2000.L - Type=9p - update: - group: ${COREOS_CHANNEL:-alpha} - reboot-strategy: off - -$( [[ ${type} =~ "master" ]] && render-template "$ROOT/user_data_master.yml" ) -$( [[ ${type} =~ "node" ]] && render-template "$ROOT/user_data_minion.yml" ) diff --git a/cluster/libvirt-coreos/user_data_master.yml b/cluster/libvirt-coreos/user_data_master.yml deleted file mode 100644 index 4f277db6c99..00000000000 --- a/cluster/libvirt-coreos/user_data_master.yml +++ /dev/null @@ -1,93 +0,0 @@ -#cloud-config - -coreos: - units: - - name: kube-apiserver.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount etcd2.service - ConditionFileIsExecutable=/opt/kubernetes/bin/kube-apiserver - Description=Kubernetes API Server - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount etcd2.service - - [Service] - ExecStart=/opt/kubernetes/bin/kube-apiserver \ - --tls-cert-file=./opt/kubernetes/certs/apiserver.pem \ - --tls-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \ - --client-ca-file=/opt/kubernetes/certs/ca.pem \ - --service-account-key-file=/opt/kubernetes/certs/apiserver-key.pem \ - --service-account-lookup=${SERVICE_ACCOUNT_LOOKUP} \ - --admission-control=${ADMISSION_CONTROL} \ - --insecure-bind-address=0.0.0.0 \ - --insecure-port=8080 \ - --etcd-servers=http://127.0.0.1:2379 \ - --kubelet-port=10250 \ - --v=4 \ - --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE} - Restart=always - RestartSec=2 - - [Install] - WantedBy=multi-user.target - - name: kube-controller-manager.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount kube-apiserver.service - ConditionFileIsExecutable=/opt/kubernetes/bin/kube-controller-manager - Description=Kubernetes Controller Manager - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount kube-apiserver.service - - [Service] - ExecStart=/opt/kubernetes/bin/kube-controller-manager \ - --master=127.0.0.1:8080 \ - --service-account-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \ - --root-ca-file=/opt/kubernetes/certs/ca.pem \ - --v=4 - Restart=always - RestartSec=2 - - [Install] - WantedBy=multi-user.target - - name: kube-scheduler.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount kube-apiserver.service - ConditionFileIsExecutable=/opt/kubernetes/bin/kube-scheduler - Description=Kubernetes Scheduler - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount kube-apiserver.service - - [Service] - ExecStart=/opt/kubernetes/bin/kube-scheduler \ - --master=127.0.0.1:8080 - Restart=always - RestartSec=2 - - [Install] - WantedBy=multi-user.target - - name: kube-addons.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount kube-apiserver.service - ConditionPathIsDirectory=/opt/kubernetes/addons - Description=Kubernetes addons - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount - Wants=kube-apiserver.service - - [Service] - Type=oneshot - ExecStartPre=/bin/bash -c 'while [[ \"\$(curl -s http://127.0.0.1:8080/healthz)\" != \"ok\" ]]; do sleep 1; done' - ExecStartPre=/bin/sleep 10 - ExecStart=/opt/kubernetes/bin/kubectl create -f /opt/kubernetes/addons - ExecStop=/opt/kubernetes/bin/kubectl delete -f /opt/kubernetes/addons - RemainAfterExit=yes - - [Install] - WantedBy=multi-user.target diff --git a/cluster/libvirt-coreos/user_data_minion.yml b/cluster/libvirt-coreos/user_data_minion.yml deleted file mode 100644 index a28bdded378..00000000000 --- a/cluster/libvirt-coreos/user_data_minion.yml +++ /dev/null @@ -1,49 +0,0 @@ -#cloud-config - -coreos: - units: - - name: kubelet.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount docker.socket - ConditionFileIsExecutable=/opt/kubernetes/bin/kubelet - Description=Kubernetes Kubelet - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount docker.socket - - [Service] - ExecStart=/opt/kubernetes/bin/kubelet \ - --address=0.0.0.0 \ - --hostname-override=${NODE_IPS[$i]} \ - --cluster-domain=cluster.local \ - --kubeconfig=/opt/kubernetes/kubeconfig/kubelet.kubeconfig \ - --tls-cert-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node.pem \ \ - --tls-private-key-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node-key.pem \ - $( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-dns=${DNS_SERVER_IP}" ) \ - $( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-domain=${DNS_DOMAIN}" ) \ - --pod-manifest-path=/opt/kubernetes/manifests - Restart=always - RestartSec=2 - - [Install] - WantedBy=multi-user.target - - name: kube-proxy.service - command: start - content: | - [Unit] - After=opt-kubernetes.mount - ConditionFileIsExecutable=/opt/kubernetes/bin/kube-proxy - Description=Kubernetes Proxy - Documentation=https://github.com/kubernetes/kubernetes - Requires=opt-kubernetes.mount - - [Service] - ExecStart=/opt/kubernetes/bin/kube-proxy \ - --master=http://${MASTER_IP}:8080 \ - --hostname-override=${NODE_IPS[$i]} - Restart=always - RestartSec=2 - - [Install] - WantedBy=multi-user.target diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh deleted file mode 100644 index 545d9850453..00000000000 --- a/cluster/libvirt-coreos/util.sh +++ /dev/null @@ -1,459 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -[ ! -z ${UTIL_SH_DEBUG+x} ] && set -x - -command -v kubectl >/dev/null 2>&1 || { echo >&2 "kubectl not found in path. Aborting."; exit 1; } - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -readonly ROOT=$(dirname "${BASH_SOURCE}") -source "$ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}" -source "$KUBE_ROOT/cluster/common.sh" - -export LIBVIRT_DEFAULT_URI=qemu:///system -export SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true} -export ADMISSION_CONTROL=${ADMISSION_CONTROL:-Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,PVCProtection,ResourceQuota} -readonly POOL=kubernetes -readonly POOL_PATH=/var/lib/libvirt/images/kubernetes - -[ ! -d "${POOL_PATH}" ] && (echo "$POOL_PATH" does not exist ; exit 1 ) - -# Creates a kubeconfig file for the kubelet. -# Args: address (e.g. "http://localhost:8080"), destination file path -function create-kubelet-kubeconfig() { - local apiserver_address="${1}" - local destination="${2}" - if [[ -z "${apiserver_address}" ]]; then - echo "Must provide API server address to create Kubelet kubeconfig file!" - exit 1 - fi - if [[ -z "${destination}" ]]; then - echo "Must provide destination path to create Kubelet kubeconfig file!" - exit 1 - fi - echo "Creating Kubelet kubeconfig file" - local dest_dir="$(dirname "${destination}")" - mkdir -p "${dest_dir}" &>/dev/null || sudo mkdir -p "${dest_dir}" - sudo=$(test -w "${dest_dir}" || echo "sudo -E") - cat < /dev/null -apiVersion: v1 -kind: Config -clusters: - - cluster: - server: ${apiserver_address} - name: local -contexts: - - context: - cluster: local - name: local -current-context: local -EOF -} - -# join -# Concatenates the list elements with the delimiter passed as first parameter -# -# Ex: join , a b c -# -> a,b,c -function join { - local IFS="$1" - shift - echo "$*" -} - -# Must ensure that the following ENV vars are set -function detect-master { - KUBE_MASTER_IP=$MASTER_IP - KUBE_MASTER=$MASTER_NAME - export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080 - echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" - echo "KUBE_MASTER: $KUBE_MASTER" -} - -# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] -function detect-nodes { - KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") -} - -function generate_certs { - node_names=("${@}") - #Root-CA - tempdir=$(mktemp -d) - CA_KEY=${CA_KEY:-"$tempdir/ca-key.pem"} - CA_CERT=${CA_CERT:-"$tempdir/ca.pem"} - openssl genrsa -out "${CA_KEY}" 2048 2>/dev/null - openssl req -x509 -new -nodes -key "${CA_KEY}" -days 10000 -out "${CA_CERT}" -subj "/CN=kube-ca" 2>/dev/null - - #API server key pair - KUBE_KEY=${KUBE_KEY:-"$tempdir/apiserver-key.pem"} - API_SERVER_CERT_REQ=${API_SERVER_CERT_REQ:-"$tempdir/apiserver.csr"} - openssl genrsa -out "${KUBE_KEY}" 2048 2>/dev/null - KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl req -new -key "${KUBE_KEY}" -out "${API_SERVER_CERT_REQ}" -subj "/CN=kube-apiserver" -config cluster/libvirt-coreos/openssl.cnf 2>/dev/null - KUBE_CERT=${KUBE_CERT:-"$tempdir/apiserver.pem"} - KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl x509 -req -in "${API_SERVER_CERT_REQ}" -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out "${KUBE_CERT}" -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/openssl.cnf 2>/dev/null - - #Copy apiserver and controller tsl assets - mkdir -p "$POOL_PATH/kubernetes/certs" - cp "${KUBE_CERT}" "$POOL_PATH/kubernetes/certs" - cp "${KUBE_KEY}" "$POOL_PATH/kubernetes/certs" - cp "${CA_CERT}" "$POOL_PATH/kubernetes/certs" - - #Generate nodes certificate - for (( i = 0 ; i < $NUM_NODES ; i++ )); do - openssl genrsa -out $tempdir/${node_names[$i]}-node-key.pem 2048 2>/dev/null - cp "$tempdir/${node_names[$i]}-node-key.pem" "$POOL_PATH/kubernetes/certs" - WORKER_IP=${NODE_IPS[$i]} openssl req -new -key $tempdir/${node_names[$i]}-node-key.pem -out $tempdir/${node_names[$i]}-node.csr -subj "/CN=${node_names[$i]}" -config cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null - WORKER_IP=${NODE_IPS[$i]} openssl x509 -req -in $tempdir/${node_names[$i]}-node.csr -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out $tempdir/${node_names[$i]}-node.pem -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null - cp "$tempdir/${node_names[$i]}-node.pem" "$POOL_PATH/kubernetes/certs" - done - echo "TLS assets generated..." -} - -#Setup registry proxy -function setup_registry_proxy { - if [[ "$ENABLE_CLUSTER_REGISTRY" == "true" ]]; then - cp "./cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml" "$POOL_PATH/kubernetes/manifests" - fi -} - -# Verify prereqs on host machine -function verify-prereqs { - if ! which virsh >/dev/null; then - echo "Can't find virsh in PATH, please fix and retry." >&2 - exit 1 - fi - if ! virsh nodeinfo >/dev/null; then - exit 1 - fi - if [[ "$(
&2 - echo "Enabling it would reduce the memory footprint of large clusters" >&2 - if [[ -t 0 ]]; then - read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer - echo "" - if [[ "$answer" == 'y' ]]; then - su -c 'echo 1 > /sys/kernel/mm/ksm/run' - fi - else - echo "You can enable it with (as root):" >&2 - echo "" >&2 - echo " echo 1 > /sys/kernel/mm/ksm/run" >&2 - echo "" >&2 - fi - fi -} - -# Destroy the libvirt storage pool and all the images inside -# -# If 'keep_base_image' is passed as first parameter, -# the base image is kept, as well as the storage pool. -# All the other images are deleted. -function destroy-pool { - virsh pool-info $POOL >/dev/null 2>&1 || return - - rm -rf "$POOL_PATH"/kubernetes/* - rm -rf "$POOL_PATH"/kubernetes_config*/* - local vol - virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \ - while read vol; do - virsh vol-delete $vol --pool $POOL - done - - [[ "$1" == 'keep_base_image' ]] && return - - set +e - virsh vol-delete coreos_base.img --pool $POOL - virsh pool-destroy $POOL - rmdir "$POOL_PATH" - set -e -} - -# Creates the libvirt storage pool and populate it with -# - the CoreOS base image -# - the kubernetes binaries -function initialize-pool { - mkdir -p "$POOL_PATH" - if ! virsh pool-info $POOL >/dev/null 2>&1; then - virsh pool-create-as $POOL dir --target "$POOL_PATH" - fi - - wget -N -P "$ROOT" https://${COREOS_CHANNEL:-alpha}.release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2 - if [[ "$ROOT/coreos_production_qemu_image.img.bz2" -nt "$POOL_PATH/coreos_base.img" ]]; then - bunzip2 -f -k "$ROOT/coreos_production_qemu_image.img.bz2" - virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true - fi - if ! virsh vol-list $POOL | grep -q coreos_base.img; then - virsh vol-create-as $POOL coreos_base.img 10G --format qcow2 - virsh vol-upload coreos_base.img "$ROOT/coreos_production_qemu_image.img" --pool $POOL - fi - - mkdir -p "$POOL_PATH/kubernetes" - kube-push-internal - - mkdir -p "$POOL_PATH/kubernetes/manifests" - if [[ "$ENABLE_NODE_LOGGING" == "true" ]]; then - if [[ "$LOGGING_DESTINATION" == "elasticsearch" ]]; then - cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-es/fluentd-es.manifest" "$POOL_PATH/kubernetes/manifests" - elif [[ "$LOGGING_DESTINATION" == "gcp" ]]; then - cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.manifest" "$POOL_PATH/kubernetes/manifests" - fi - fi - - mkdir -p "$POOL_PATH/kubernetes/addons" - if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then - render-template "$ROOT/namespace.yaml" > "$POOL_PATH/kubernetes/addons/namespace.yaml" - render-template "$ROOT/kube-dns.yaml" > "$POOL_PATH/kubernetes/addons/kube-dns.yaml" - fi - - virsh pool-refresh $POOL -} - -function destroy-network { - set +e - virsh net-destroy kubernetes_global - virsh net-destroy kubernetes_pods - set -e -} - -function initialize-network { - virsh net-create "$ROOT/network_kubernetes_global.xml" - virsh net-create "$ROOT/network_kubernetes_pods.xml" -} - -function render-template { - eval "echo \"$(cat $1)\"" -} - -function wait-cluster-readiness { - echo "Wait for cluster readiness" - - local timeout=120 - while [[ $timeout -ne 0 ]]; do - nb_ready_nodes=$(kubectl get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" 2>/dev/null | tr ':' '\n' | grep -c Ready || true) - echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES" - if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then - return 0 - fi - - timeout=$(($timeout-1)) - sleep .5 - done - - return 1 -} - -# Instantiate a kubernetes cluster -function kube-up { - detect-master - detect-nodes - initialize-pool keep_base_image - generate_certs "${NODE_NAMES[@]}" - setup_registry_proxy - initialize-network - - readonly ssh_keys="$(cat ~/.ssh/*.pub | sed 's/^/ - /')" - readonly kubernetes_dir="$POOL_PATH/kubernetes" - - local i - for (( i = 0 ; i <= $NUM_NODES ; i++ )); do - if [[ $i -eq $NUM_NODES ]]; then - etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380" - else - etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380" - fi - done - etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}") - readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}") - - for (( i = 0 ; i <= $NUM_NODES ; i++ )); do - if [[ $i -eq $NUM_NODES ]]; then - type=master - name=$MASTER_NAME - public_ip=$MASTER_IP - else - type=node-$(printf "%02d" $i) - name=${NODE_NAMES[$i]} - public_ip=${NODE_IPS[$i]} - fi - image=$name.img - config=kubernetes_config_$type - - virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2 - - mkdir -p "$POOL_PATH/$config/openstack/latest" - render-template "$ROOT/user_data.yml" > "$POOL_PATH/$config/openstack/latest/user_data" - virsh pool-refresh $POOL - - domain_xml=$(mktemp) - render-template $ROOT/coreos.xml > $domain_xml - virsh create $domain_xml - rm $domain_xml - done - - export KUBE_SERVER="http://192.168.10.1:8080" - export CONTEXT="libvirt-coreos" - create-kubeconfig - create-kubelet-kubeconfig "http://${MASTER_IP}:8080" "${POOL_PATH}/kubernetes/kubeconfig/kubelet.kubeconfig" - - wait-cluster-readiness - - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " http://${KUBE_MASTER_IP}:8080" - echo - echo "You can control the Kubernetes cluster with: 'kubectl'" - echo "You can connect on the master with: 'ssh core@${KUBE_MASTER_IP}'" - - wait-registry-readiness - -} - -function create_registry_rc() { - echo " Create registry replication controller" - kubectl create -f $ROOT/registry-rc.yaml - local timeout=120 - while [[ $timeout -ne 0 ]]; do - phase=$(kubectl get pods -n kube-system -lk8s-app=kube-registry --output='jsonpath={.items..status.phase}') - if [ "$phase" = "Running" ]; then - return 0 - fi - timeout=$(($timeout-1)) - sleep .5 - done -} - - -function create_registry_svc() { - echo " Create registry service" - kubectl create -f "${KUBE_ROOT}/cluster/addons/registry/registry-svc.yaml" -} - -function wait-registry-readiness() { - if [[ "$ENABLE_CLUSTER_REGISTRY" != "true" ]]; then - return 0 - fi - echo "Wait for registry readiness..." - local timeout=120 - while [[ $timeout -ne 0 ]]; do - phase=$(kubectl get namespaces --output=jsonpath='{.items[?(@.metadata.name=="kube-system")].status.phase}') - if [ "$phase" = "Active" ]; then - create_registry_rc - create_registry_svc - return 0 - fi - echo "waiting for namespace kube-system" - timeout=$(($timeout-1)) - sleep .5 - done -} - -# Delete a kubernetes cluster -function kube-down { - virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \ - while read dom; do - virsh destroy $dom - done - destroy-pool keep_base_image - destroy-network -} - -# The kubernetes binaries are pushed to a host directory which is exposed to the VM -function upload-server-tars { - tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes - rm -rf "$POOL_PATH/kubernetes/bin" - mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin" - chmod -R 755 "$POOL_PATH/kubernetes/bin" - rm -fr "$POOL_PATH/kubernetes/kubernetes" -} - -# Update a kubernetes cluster with latest source -function kube-push { - kube-push-internal - ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler" - for ((i=0; i < NUM_NODES; i++)); do - ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy" - done - wait-cluster-readiness -} - -function kube-push-internal { - case "${KUBE_PUSH:-release}" in - release) - kube-push-release;; - local) - kube-push-local;; - *) - echo "The only known push methods are \"release\" to use the release tarball or \"local\" to use the binaries built by make. KUBE_PUSH is set \"$KUBE_PUSH\"" >&2 - return 1;; - esac -} - -function kube-push-release { - find-release-tars - upload-server-tars -} - -function kube-push-local { - rm -rf "$POOL_PATH/kubernetes/bin/*" - mkdir -p "$POOL_PATH/kubernetes/bin" - cp "${KUBE_ROOT}/_output/local/go/bin"/* "$POOL_PATH/kubernetes/bin" -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - echo "TODO" -} - -# Execute prior to running tests to initialize required structure -function test-setup { - "${KUBE_ROOT}/cluster/kube-up.sh" -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - kube-down -} - -# SSH to a node by name or IP ($1) and run a command ($2). -function ssh-to-node { - local node="$1" - local cmd="$2" - local machine - - if [[ "$node" == "$MASTER_IP" ]] || [[ "$node" =~ ^"$NODE_IP_BASE" ]]; then - machine="$node" - elif [[ "$node" == "$MASTER_NAME" ]]; then - machine="$MASTER_IP" - else - for ((i=0; i < NUM_NODES; i++)); do - if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then - machine="${NODE_IPS[$i]}" - break - fi - done - fi - if [[ -z "$machine" ]]; then - echo "$node is an unknown machine to ssh to" >&2 - fi - ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=no "core@$machine" "$cmd" -} - -# Perform preparations required to run e2e tests -function prepare-e2e() { - echo "libvirt-coreos doesn't need special preparations for e2e tests" 1>&2 -} From f64c508e2e85393c5f59d26abd630e7c11791b81 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 8 Jan 2018 18:46:51 +0000 Subject: [PATCH 678/794] Add getCRIClient and set default values for CRI related flags --- hack/make-rules/test-e2e-node.sh | 20 ++++++++++---------- test/e2e/framework/test_context.go | 22 +++++++++++++--------- test/e2e_node/image_list.go | 16 +--------------- test/e2e_node/services/kubelet.go | 12 ++++++++++++ test/e2e_node/util.go | 24 ++++++++++++++++++++++++ 5 files changed, 60 insertions(+), 34 deletions(-) diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh index 1cff4c85d8e..5733f1eb442 100755 --- a/hack/make-rules/test-e2e-node.sh +++ b/hack/make-rules/test-e2e-node.sh @@ -60,6 +60,16 @@ if [ ! -d "${artifacts}" ]; then fi echo "Test artifacts will be written to ${artifacts}" +if [[ $runtime == "remote" ]] ; then + if [[ ! -z $container_runtime_endpoint ]] ; then + test_args="--container-runtime-endpoint=${container_runtime_endpoint} $test_args" + fi + if [[ ! -z $image_service_endpoint ]] ; then + test_args="--image-service-endpoint=$image_service_endpoint $test_args" + fi +fi + + if [ $remote = true ] ; then # The following options are only valid in remote run. images=${IMAGES:-""} @@ -153,22 +163,12 @@ else # Runtime flags test_args='--kubelet-flags="--container-runtime='$runtime'" '$test_args - if [[ $runtime == "remote" ]] ; then - if [[ ! -z $container_runtime_endpoint ]] ; then - test_args='--kubelet-flags="--container-runtime-endpoint='$container_runtime_endpoint'" '$test_args - fi - if [[ ! -z $image_service_endpoint ]] ; then - test_args='--kubelet-flags="--image-service-endpoint='$image_service_endpoint'" '$test_args - fi - fi # Test using the host the script was run on # Provided for backwards compatibility go run test/e2e_node/runner/local/run_local.go \ --system-spec-name="$system_spec_name" --ginkgo-flags="$ginkgoflags" \ --test-flags="--container-runtime=${runtime} \ - --container-runtime-endpoint=${container_runtime_endpoint} \ - --image-service-endpoint=${image_service_endpoint} \ --alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \ $test_args" --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt" exit $? diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 54e261abff3..31b2dde1855 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -57,14 +57,16 @@ type TestContextType struct { Prefix string MinStartupPods int // Timeout for waiting for system pods to be running - SystemPodsStartupTimeout time.Duration - UpgradeTarget string - EtcdUpgradeStorage string - EtcdUpgradeVersion string - UpgradeImage string - GCEUpgradeScript string - ContainerRuntime string - ContainerRuntimeEndpoint string + SystemPodsStartupTimeout time.Duration + UpgradeTarget string + EtcdUpgradeStorage string + EtcdUpgradeVersion string + UpgradeImage string + GCEUpgradeScript string + ContainerRuntime string + ContainerRuntimeEndpoint string + ContainerRuntimeProcessName string + ContainerRuntimePidFile string // SystemdServices are comma separated list of systemd services the test framework // will dump logs for. SystemdServices string @@ -203,7 +205,9 @@ func RegisterCommonFlags() { flag.StringVar(&TestContext.FeatureGates, "feature-gates", "", "A set of key=value pairs that describe feature gates for alpha/experimental features.") flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.") flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).") - flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The container runtime endpoint of cluster VM instances.") + flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.") + flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.") + flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.") flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.") flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.") flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.") diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index 01360451fcb..0c37c88752d 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -17,7 +17,6 @@ limitations under the License. package e2e_node import ( - "errors" "fmt" "os/exec" "os/user" @@ -28,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/kubelet/remote" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -39,8 +37,6 @@ const ( maxImagePullRetries = 5 // Sleep duration between image pull retry attempts. imagePullRetryDelay = time.Second - // connection timeout for gRPC image service connection - imageServiceConnectionTimeout = 15 * time.Minute ) // NodeImageWhiteList is a list of images used in node e2e test. These images will be prepulled @@ -107,17 +103,7 @@ func getPuller() (puller, error) { case "docker": return &dockerPuller{}, nil case "remote": - endpoint := framework.TestContext.ContainerRuntimeEndpoint - if framework.TestContext.ImageServiceEndpoint != "" { - //ImageServiceEndpoint is the same as ContainerRuntimeEndpoint if not - //explicitly specified - //https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet.go#L517 - endpoint = framework.TestContext.ImageServiceEndpoint - } - if endpoint == "" { - return nil, errors.New("can't prepull images, no remote endpoint provided") - } - is, err := remote.NewRemoteImageService(endpoint, imageServiceConnectionTimeout) + _, is, err := getCRIClient() if err != nil { return nil, err } diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index aa803859a39..1fee35acfd0 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -303,6 +303,18 @@ func (e *E2EServices) startKubelet() (*server, error) { cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName) } + if framework.TestContext.ContainerRuntime != "" { + cmdArgs = append(cmdArgs, "--container-runtime", framework.TestContext.ContainerRuntime) + } + + if framework.TestContext.ContainerRuntimeEndpoint != "" { + cmdArgs = append(cmdArgs, "--container-runtime-endpoint", framework.TestContext.ContainerRuntimeEndpoint) + } + + if framework.TestContext.ImageServiceEndpoint != "" { + cmdArgs = append(cmdArgs, "--image-service-endpoint", framework.TestContext.ImageServiceEndpoint) + } + // Write config file or flags, depending on whether --generate-kubelet-config-file was provided if genKubeletConfigFile { if err := writeKubeletConfigFile(kc, kubeletConfigPath); err != nil { diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 0d00f88944e..bf1914c5e71 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -35,11 +35,13 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/pkg/kubelet/remote" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/metrics" @@ -365,3 +367,25 @@ func runCommand(cmd ...string) (string, error) { } return string(output), nil } + +// getCRIClient connects CRI and returns CRI runtime service clients and image service client. +func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService, error) { + // connection timeout for CRI service connection + const connectionTimeout = 2 * time.Minute + runtimeEndpoint := framework.TestContext.ContainerRuntimeEndpoint + r, err := remote.NewRemoteRuntimeService(runtimeEndpoint, connectionTimeout) + if err != nil { + return nil, nil, err + } + imageManagerEndpoint := runtimeEndpoint + if framework.TestContext.ImageServiceEndpoint != "" { + //ImageServiceEndpoint is the same as ContainerRuntimeEndpoint if not + //explicitly specified + imageManagerEndpoint = framework.TestContext.ImageServiceEndpoint + } + i, err := remote.NewRemoteImageService(imageManagerEndpoint, connectionTimeout) + if err != nil { + return nil, nil, err + } + return r, i, nil +} From e05a5b9f7a9723c12a652a7e1ff7fb069d60f623 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 8 Jan 2018 18:47:54 +0000 Subject: [PATCH 679/794] Remove unnecessary docker specific logic in node e2e test. --- test/e2e_node/BUILD | 3 +- test/e2e_node/container_manager_test.go | 27 +++++--- test/e2e_node/cpu_manager_test.go | 42 +++++++----- test/e2e_node/dockershim_checkpoint_test.go | 4 ++ test/e2e_node/garbage_collector_test.go | 71 ++++++++++----------- test/e2e_node/restart_test.go | 40 ++++++++---- 6 files changed, 108 insertions(+), 79 deletions(-) diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index e6431d05dee..e70755c5365 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -120,13 +120,14 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet:go_default_library", + "//pkg/kubelet/apis/cri:go_default_library", + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/cm/cpumanager:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/images:go_default_library", "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/kubelet/kubeletconfig/status:go_default_library", diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 54187d73a53..fa123c4d542 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -76,10 +77,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { f := framework.NewDefaultFramework("kubelet-container-manager") Describe("Validate OOM score adjustments", func() { Context("once the node is setup", func() { - It("docker daemon's oom-score-adj should be -999", func() { - dockerPids, err := getPidsForProcess(dockerProcessName, dockerPidFile) - Expect(err).To(BeNil(), "failed to get list of docker daemon pids") - for _, pid := range dockerPids { + It("container runtime's oom-score-adj should be -999", func() { + runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) + Expect(err).To(BeNil(), "failed to get list of container runtime pids") + for _, pid := range runtimePids { Eventually(func() error { return validateOOMScoreAdjSetting(pid, -999) }, 5*time.Minute, 30*time.Second).Should(BeNil()) @@ -148,14 +149,22 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { return validateOOMScoreAdjSetting(shPids[0], 1000) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) - // Log the running containers here to help debugging. Use `docker ps` - // directly for now because the test is already docker specific. + // Log the running containers here to help debugging. AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - By("Dump all running docker containers") - output, err := exec.Command("docker", "ps").CombinedOutput() + By("Dump all running containers") + runtime, _, err := getCRIClient() Expect(err).NotTo(HaveOccurred()) - framework.Logf("Running docker containers:\n%s", string(output)) + containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ + State: &runtimeapi.ContainerStateValue{ + State: runtimeapi.ContainerState_CONTAINER_RUNNING, + }, + }) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Running containers:\n") + for _, c := range containers { + framework.Logf("%+v\n", c) + } } }) }) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 4186d78cb48..9ac0bfca4a6 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -27,9 +27,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/features" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -101,14 +103,21 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value() } -// TODO(balajismaniam): Make this func generic to all container runtimes. -func waitForContainerRemoval(ctnPartName string) { +func waitForContainerRemoval(containerName, podName, podNS string) { + rs, _, err := getCRIClient() + Expect(err).NotTo(HaveOccurred()) Eventually(func() bool { - err := exec.Command("/bin/sh", "-c", fmt.Sprintf("if [ -n \"$(docker ps -a | grep -i %s)\" ]; then exit 1; fi", ctnPartName)).Run() + containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{ + LabelSelector: map[string]string{ + types.KubernetesPodNameLabel: podName, + types.KubernetesPodNamespaceLabel: podNS, + types.KubernetesContainerNameLabel: containerName, + }, + }) if err != nil { return false } - return true + return len(containers) == 0 }, 2*time.Minute, 1*time.Second).Should(BeTrue()) } @@ -135,9 +144,8 @@ func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletCo } func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) { - // Run only if the container runtime is Docker. - // TODO(balajismaniam): Make this test generic to all container runtimes. - framework.RunIfContainerRuntimeIs("docker") + // Run only if the container runtime is not docker or remote (not rkt). + framework.RunIfContainerRuntimeIs("docker", "remote") // Enable CPU Manager in Kubelet with static policy. oldCfg, err := getCurrentKubeletConfig() @@ -219,7 +227,7 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name)) + waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) By("running a Gu pod") ctnAttrs = []ctnAttribute{ @@ -245,7 +253,7 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name)) + waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) By("running multiple Gu and non-Gu pods") ctnAttrs = []ctnAttribute{ @@ -291,8 +299,8 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod1.Name, pod2.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name)) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name)) + waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) + waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) // Skip rest of the tests if CPU capacity < 3. if cpuCap < 3 { @@ -327,7 +335,7 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name)) + waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) By("running a Gu pod with multiple containers requesting integer CPUs") ctnAttrs = []ctnAttribute{ @@ -365,8 +373,8 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name)) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[1].Name, pod.Name)) + waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace) + waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace) By("running multiple Gu pods") ctnAttrs = []ctnAttribute{ @@ -410,15 +418,15 @@ func runCPUManagerTests(f *framework.Framework) { By("by deleting the pods and waiting for container removal") deletePods(f, []string{pod1.Name, pod2.Name}) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name)) - waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name)) + waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace) + waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace) setOldKubeletConfig(f, oldCfg) }) } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("CPU Manager [Feature:CPUManager]", func() { +var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() { f := framework.NewDefaultFramework("cpu-manager-test") Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() { diff --git a/test/e2e_node/dockershim_checkpoint_test.go b/test/e2e_node/dockershim_checkpoint_test.go index 0dfdfbfc11d..018213d1b22 100644 --- a/test/e2e_node/dockershim_checkpoint_test.go +++ b/test/e2e_node/dockershim_checkpoint_test.go @@ -45,6 +45,10 @@ const ( var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() { f := framework.NewDefaultFramework("dockerhism-checkpoint-test") + BeforeEach(func() { + framework.RunIfContainerRuntimeIs("docker") + }) + It("should clean up pod sandbox checkpoint after pod deletion", func() { podName := "pod-checkpoint-no-disrupt" runPodCheckpointTest(f, podName, func() { diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index dd4578ef031..04010cd6acd 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -19,12 +19,13 @@ package e2e_node import ( "fmt" "strconv" - "strings" "time" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -130,8 +131,7 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() { }, } for _, test := range tests { - // TODO (dashpole): Once the Container Runtime Interface (CRI) is complete, generalize run on other runtimes (other than docker) - dockerContainerGCTest(f, test) + containerGCTest(f, test) } }) @@ -142,6 +142,32 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() { // while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container // once pods are killed, all containers are eventually cleaned up func containerGCTest(f *framework.Framework, test testRun) { + var runtime internalapi.RuntimeService + BeforeEach(func() { + var err error + runtime, _, err = getCRIClient() + Expect(err).NotTo(HaveOccurred()) + }) + for _, pod := range test.testPods { + // Initialize the getContainerNames function to use CRI runtime client. + pod.getContainerNames = func() ([]string, error) { + relevantContainers := []string{} + containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ + LabelSelector: map[string]string{ + types.KubernetesPodNameLabel: pod.podName, + types.KubernetesPodNamespaceLabel: f.Namespace.Name, + }, + }) + if err != nil { + return relevantContainers, err + } + for _, container := range containers { + relevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel]) + } + return relevantContainers, nil + } + } + Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() { BeforeEach(func() { realPods := getPods(test.testPods) @@ -175,7 +201,7 @@ func containerGCTest(f *framework.Framework, test testRun) { for i := 0; i < pod.numContainers; i++ { containerCount := 0 for _, containerName := range containerNames { - if strings.Contains(containerName, pod.getContainerName(i)) { + if containerName == pod.getContainerName(i) { containerCount += 1 } } @@ -203,7 +229,7 @@ func containerGCTest(f *framework.Framework, test testRun) { for i := 0; i < pod.numContainers; i++ { containerCount := 0 for _, containerName := range containerNames { - if strings.Contains(containerName, pod.getContainerName(i)) { + if containerName == pod.getContainerName(i) { containerCount += 1 } } @@ -245,39 +271,6 @@ func containerGCTest(f *framework.Framework, test testRun) { }) } -// Runs containerGCTest using the docker runtime. -func dockerContainerGCTest(f *framework.Framework, test testRun) { - var runtime libdocker.Interface - BeforeEach(func() { - runtime = libdocker.ConnectToDockerOrDie( - defaultDockerEndpoint, - defaultRuntimeRequestTimeoutDuration, - defaultImagePullProgressDeadline, - false, - false, - ) - }) - for _, pod := range test.testPods { - // Initialize the getContainerNames function to use the libdocker api - thisPrefix := pod.containerPrefix - pod.getContainerNames = func() ([]string, error) { - relevantContainers := []string{} - dockerContainers, err := libdocker.GetKubeletDockerContainers(runtime, true) - if err != nil { - return relevantContainers, err - } - for _, container := range dockerContainers { - // only look for containers from this testspec - if strings.Contains(container.Names[0], thisPrefix) { - relevantContainers = append(relevantContainers, container.Names[0]) - } - } - return relevantContainers, nil - } - } - containerGCTest(f, test) -} - func getPods(specs []*testPodSpec) (pods []*v1.Pod) { for _, spec := range specs { By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount)) diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 747d0b97993..ac37cf606fe 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -28,6 +28,7 @@ import ( "os/exec" . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" "k8s.io/api/core/v1" testutils "k8s.io/kubernetes/test/utils" ) @@ -75,11 +76,11 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() { ) f := framework.NewDefaultFramework("restart-test") - Context("Docker Daemon", func() { + Context("Container Runtime", func() { Context("Network", func() { It("should recover from ip leak", func() { - pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test") + pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-container-runtime-test") By(fmt.Sprintf("Trying to create %d pods on node", len(pods))) createBatchPodWithRateControl(f, pods, podCreationInterval) defer deletePodsSync(f, pods) @@ -88,34 +89,47 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() { // startTimeout fit on the node and the node is now saturated. runningPods := waitForPods(f, podCount, startTimeout) if len(runningPods) < minPods { - framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods) + framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods) } for i := 0; i < restartCount; i += 1 { - By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i)) - - // TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494 - if stdout, err := exec.Command("sudo", "systemctl", "restart", "docker").CombinedOutput(); err != nil { - framework.Logf("Failed to trigger docker restart with systemd/systemctl: %v, stdout: %q", err, string(stdout)) - if stdout, err = exec.Command("sudo", "service", "docker", "restart").CombinedOutput(); err != nil { - framework.Failf("Failed to trigger docker restart with upstart/service: %v, stdout: %q", err, string(stdout)) + By(fmt.Sprintf("Killing container runtime iteration %d", i)) + // Wait for container runtime to be running + var pid int + Eventually(func() error { + runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) + if err != nil { + return err } + if len(runtimePids) != 1 { + return fmt.Errorf("unexpected container runtime pid list: %+v", runtimePids) + } + // Make sure the container runtime is running, pid got from pid file may not be running. + pid = runtimePids[0] + if _, err := exec.Command("sudo", "ps", "-p", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil { + return err + } + return nil + }, 1*time.Minute, 2*time.Second).Should(BeNil()) + if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil { + framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout)) } + // Assume that container runtime will be restarted by systemd/supervisord etc. time.Sleep(20 * time.Second) } By("Checking currently Running/Ready pods") postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout) if len(postRestartRunningPods) == 0 { - framework.Failf("Failed to start *any* pods after docker restart, this might indicate an IP leak") + framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak") } By("Confirm no containers have terminated") for _, pod := range postRestartRunningPods { if c := testutils.TerminatedContainers(pod); len(c) != 0 { - framework.Failf("Pod %q has failed containers %+v after docker restart, this might indicate an IP leak", pod.Name, c) + framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c) } } - By(fmt.Sprintf("Docker restart test passed with %d pods", len(postRestartRunningPods))) + By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods))) }) }) }) From d9b5773101e930431f24fe178d988271c1becc35 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Fri, 22 Dec 2017 17:09:51 -0500 Subject: [PATCH 680/794] Treat staging repos as authoritative for all files Move files from kubernetes/foo root back to kubernetes/kubernetes/staging/src/k8s.io/foo root Then: - add CONTRIBUTING.md for all staging repos - add .PULL_REQUEST_TEMPLATE to all staging repos - ignore .github while diffing generated protobuf --- hack/verify-generated-protobuf.sh | 2 +- .../api/.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/api/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../apiextensions-apiserver/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../src/k8s.io/apimachinery/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/client-go/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../src/k8s.io/code-generator/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/kube-aggregator/CONTRIBUTING.md | 7 ++++++ .../metrics/.github/PULL_REQUEST_TEMPLATE.md | 2 ++ staging/src/k8s.io/metrics/CONTRIBUTING.md | 25 +++---------------- .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/sample-apiserver/CONTRIBUTING.md | 7 ++++++ .../.github/PULL_REQUEST_TEMPLATE.md | 2 ++ .../k8s.io/sample-controller/CONTRIBUTING.md | 7 ++++++ 20 files changed, 80 insertions(+), 23 deletions(-) create mode 100644 staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/api/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/apimachinery/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/client-go/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/code-generator/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md create mode 100644 staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 staging/src/k8s.io/sample-controller/CONTRIBUTING.md diff --git a/hack/verify-generated-protobuf.sh b/hack/verify-generated-protobuf.sh index 3c8518c4af3..a57f0fe4d77 100755 --- a/hack/verify-generated-protobuf.sh +++ b/hack/verify-generated-protobuf.sh @@ -43,7 +43,7 @@ for APIROOT in ${APIROOTS}; do TMP_APIROOT="${_tmp}/${APIROOT}" echo "diffing ${APIROOT} against freshly generated protobuf" ret=0 - diff -Naupr -I 'Auto generated by' -x 'zz_generated.*' "${KUBE_ROOT}/${APIROOT}" "${TMP_APIROOT}" || ret=$? + diff -Naupr -I 'Auto generated by' -x 'zz_generated.*' -x '.github' "${KUBE_ROOT}/${APIROOT}" "${TMP_APIROOT}" || ret=$? cp -a "${TMP_APIROOT}"/* "${KUBE_ROOT}/${APIROOT}/" if [[ $ret -eq 0 ]]; then echo "${APIROOT} up to date." diff --git a/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/api/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/api/CONTRIBUTING.md b/staging/src/k8s.io/api/CONTRIBUTING.md new file mode 100644 index 00000000000..d9e171333cc --- /dev/null +++ b/staging/src/k8s.io/api/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/api](https://git.k8s.io/kubernetes/staging/src/k8s.io/api) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md b/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md new file mode 100644 index 00000000000..cea7c91b260 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apiextensions-apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/apimachinery/CONTRIBUTING.md b/staging/src/k8s.io/apimachinery/CONTRIBUTING.md new file mode 100644 index 00000000000..41eb4c62bc6 --- /dev/null +++ b/staging/src/k8s.io/apimachinery/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apimachinery](https://git.k8s.io/kubernetes/staging/src/k8s.io/apimachinery) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/client-go/CONTRIBUTING.md b/staging/src/k8s.io/client-go/CONTRIBUTING.md new file mode 100644 index 00000000000..df408d019e6 --- /dev/null +++ b/staging/src/k8s.io/client-go/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/client-go](https://git.k8s.io/kubernetes/staging/src/k8s.io/client-go) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/code-generator/CONTRIBUTING.md b/staging/src/k8s.io/code-generator/CONTRIBUTING.md new file mode 100644 index 00000000000..da7836aa972 --- /dev/null +++ b/staging/src/k8s.io/code-generator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md b/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md new file mode 100644 index 00000000000..483b3cbc435 --- /dev/null +++ b/staging/src/k8s.io/kube-aggregator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/kube-aggregator](https://git.k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/metrics/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/metrics/CONTRIBUTING.md b/staging/src/k8s.io/metrics/CONTRIBUTING.md index 18eca2f0726..e35f90ad56e 100644 --- a/staging/src/k8s.io/metrics/CONTRIBUTING.md +++ b/staging/src/k8s.io/metrics/CONTRIBUTING.md @@ -1,26 +1,7 @@ # Contributing guidelines -## How to become a contributor and submit your own code +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. -### Contributor License Agreements +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/metrics](https://git.k8s.io/kubernetes/staging/src/k8s.io/metrics) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). -We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. - -Please fill out either the individual or corporate Contributor License Agreement (CLA). - - * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](https://identity.linuxfoundation.org/node/285/node/285/individual-signup). - * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](https://identity.linuxfoundation.org/node/285/organization-signup). - -Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. - -### Contributing A Patch - -1. Submit an issue describing your proposed change to the repo in question. -1. The [repo owners](OWNERS) will respond to your issue promptly. -1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). -1. Fork the desired repo, develop and test your code changes. -1. Submit a pull request. - -### Adding dependencies - -If your patch depends on new packages, add that package with [`godep`](https://github.com/tools/godep). Follow the [instructions to add a dependency](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md#godep-and-dependency-management). +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/sample-apiserver/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md b/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md new file mode 100644 index 00000000000..8379e606490 --- /dev/null +++ b/staging/src/k8s.io/sample-apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..e559c074bb5 --- /dev/null +++ b/staging/src/k8s.io/sample-controller/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,2 @@ +Sorry, we do not accept changes directly against this repository. Please see +CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/staging/src/k8s.io/sample-controller/CONTRIBUTING.md b/staging/src/k8s.io/sample-controller/CONTRIBUTING.md new file mode 100644 index 00000000000..3598f4aa55a --- /dev/null +++ b/staging/src/k8s.io/sample-controller/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kuberentes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information From 5029bb56c434c0099fd1d2e78de7531c69430753 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Tue, 9 Jan 2018 15:58:18 -0800 Subject: [PATCH 681/794] Let mutating webhook defaults the object after applying the patch sent back by the webhook --- .../plugin/webhook/mutating/admission.go | 8 +- test/e2e/apimachinery/webhook.go | 78 +++++++++++++++++++ test/images/webhook/Makefile | 4 +- test/images/webhook/main.go | 33 ++++++++ test/utils/image/manifest.go | 2 +- 5 files changed, 120 insertions(+), 5 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index f944152770d..ec0ae942b69 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -112,6 +112,7 @@ type MutatingWebhook struct { namespaceMatcher namespace.Matcher clientManager config.ClientManager convertor versioned.Convertor + defaulter runtime.ObjectDefaulter jsonSerializer runtime.Serializer } @@ -137,6 +138,7 @@ func (a *MutatingWebhook) SetScheme(scheme *runtime.Scheme) { Serializer: serializer.NewCodecFactory(scheme).LegacyCodec(admissionv1beta1.SchemeGroupVersion), })) a.convertor.Scheme = scheme + a.defaulter = scheme a.jsonSerializer = json.NewSerializer(json.DefaultMetaFactory, scheme, scheme, false) } } @@ -171,6 +173,9 @@ func (a *MutatingWebhook) ValidateInitialization() error { if err := a.convertor.Validate(); err != nil { return fmt.Errorf("MutatingWebhook.convertor is not properly setup: %v", err) } + if a.defaulter == nil { + return fmt.Errorf("MutatingWebhook.defaulter is not properly setup: %v") + } go a.hookSource.Run(wait.NeverStop) return nil } @@ -312,10 +317,9 @@ func (a *MutatingWebhook) callAttrMutatingHook(ctx context.Context, h *v1beta1.W if err != nil { return apierrors.NewInternalError(err) } - // TODO: if we have multiple mutating webhooks, we can remember the json - // instead of encoding and decoding for each one. if _, _, err := a.jsonSerializer.Decode(patchedJS, nil, attr.Object); err != nil { return apierrors.NewInternalError(err) } + a.defaulter.Default(attr.Object) return nil } diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 755055c9696..63d69315f1f 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -133,6 +133,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() { testMutatingConfigMapWebhook(f) }) + It("Should mutate pod and apply defaults after mutation", func() { + registerMutatingWebhookForPod(f, context) + defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingWebhookConfigName, nil) + testMutatingPodWebhook(f) + }) + It("Should mutate crd", func() { crdCleanup, dynamicClient := createCRD(f) defer crdCleanup() @@ -423,6 +429,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo // The webhook configuration is honored in 1s. time.Sleep(10 * time.Second) } + func testMutatingConfigMapWebhook(f *framework.Framework) { By("create a configmap that should be updated by the webhook") client := f.ClientSet @@ -439,6 +446,77 @@ func testMutatingConfigMapWebhook(f *framework.Framework) { } } +func registerMutatingWebhookForPod(f *framework.Framework, context *certContext) { + client := f.ClientSet + By("Registering the mutating pod webhook via the AdmissionRegistration API") + + namespace := f.Namespace.Name + + _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: mutatingWebhookConfigName, + }, + Webhooks: []v1beta1.Webhook{ + { + Name: "adding-init-container.k8s.io", + Rules: []v1beta1.RuleWithOperations{{ + Operations: []v1beta1.OperationType{v1beta1.Create}, + Rule: v1beta1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + }, + }}, + ClientConfig: v1beta1.WebhookClientConfig{ + Service: &v1beta1.ServiceReference{ + Namespace: namespace, + Name: serviceName, + Path: strPtr("/mutating-pods"), + }, + CABundle: context.signingCert, + }, + }, + }, + }) + framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", mutatingWebhookConfigName, namespace) + + // The webhook configuration is honored in 1s. + time.Sleep(10 * time.Second) +} + +func testMutatingPodWebhook(f *framework.Framework) { + By("create a pod that should be updated by the webhook") + client := f.ClientSet + configMap := toBeMutatedPod(f) + mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap) + Expect(err).To(BeNil()) + if len(mutatedPod.Spec.InitContainers) != 1 { + framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) + } + if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected { + framework.Failf("expect the init container name to be %q, got %q", expected, got) + } + if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected { + framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got) + } +} + +func toBeMutatedPod(f *framework.Framework) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "webhook-to-be-mutated", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example", + Image: framework.GetPauseImageName(f.ClientSet), + }, + }, + }, + } +} + func testWebhook(f *framework.Framework) { By("create a pod that should be denied by the webhook") client := f.ClientSet diff --git a/test/images/webhook/Makefile b/test/images/webhook/Makefile index a201dd5b233..d9ce02940ef 100644 --- a/test/images/webhook/Makefile +++ b/test/images/webhook/Makefile @@ -14,7 +14,7 @@ build: CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o webhook . - docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 . + docker build --no-cache -t gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.9v1 . rm -rf webhook push: - gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v7 + gcloud docker -- push gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.9v1 diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index da2e4e9d3fb..bdf68ba9c72 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -40,6 +40,9 @@ const ( patch2 string = `[ { "op": "add", "path": "/data/mutation-stage-2", "value": "yes" } ]` + addInitContainerPatch string = `[ + {"op":"add","path":"/spec/initContainers","value":[{"image":"webhook-added-image","name":"webhook-added-init-container","resources":{}}]} + ]` ) // Config contains the server (the webhook) cert and key. @@ -108,6 +111,31 @@ func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { return &reviewResponse } +func mutatePods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { + glog.V(2).Info("mutating pods") + podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + if ar.Request.Resource != podResource { + glog.Errorf("expect resource to be %s", podResource) + return nil + } + + raw := ar.Request.Object.Raw + pod := corev1.Pod{} + deserializer := codecs.UniversalDeserializer() + if _, _, err := deserializer.Decode(raw, nil, &pod); err != nil { + glog.Error(err) + return toAdmissionResponse(err) + } + reviewResponse := v1beta1.AdmissionResponse{} + reviewResponse.Allowed = true + if pod.Name == "webhook-to-be-mutated" { + reviewResponse.Patch = []byte(addInitContainerPatch) + pt := v1beta1.PatchTypeJSONPatch + reviewResponse.PatchType = &pt + } + return &reviewResponse +} + // deny configmaps with specific key-value pair. func admitConfigMaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { glog.V(2).Info("admitting configmaps") @@ -271,6 +299,10 @@ func servePods(w http.ResponseWriter, r *http.Request) { serve(w, r, admitPods) } +func serveMutatePods(w http.ResponseWriter, r *http.Request) { + serve(w, r, mutatePods) +} + func serveConfigmaps(w http.ResponseWriter, r *http.Request) { serve(w, r, admitConfigMaps) } @@ -293,6 +325,7 @@ func main() { flag.Parse() http.HandleFunc("/pods", servePods) + http.HandleFunc("/mutating-pods", serveMutatePods) http.HandleFunc("/configmaps", serveConfigmaps) http.HandleFunc("/mutating-configmaps", serveMutateConfigmaps) http.HandleFunc("/crd", serveCRD) diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 7b19ed01861..bac17165482 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -48,7 +48,7 @@ func (i *ImageConfig) SetVersion(version string) { } var ( - AdmissionWebhook = ImageConfig{e2eRegistry, "k8s-sample-admission-webhook", "1.8v7", true} + AdmissionWebhook = ImageConfig{e2eRegistry, "k8s-sample-admission-webhook", "1.9v1", true} APIServer = ImageConfig{e2eRegistry, "k8s-aggregator-sample-apiserver", "1.7v2", true} AppArmorLoader = ImageConfig{gcRegistry, "apparmor-loader", "0.1", false} BusyBox = ImageConfig{gcRegistry, "busybox", "1.24", false} From 3d986e7cfe60ec0e1a75b59fe093af0d059c0669 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 16:11:40 -0800 Subject: [PATCH 682/794] Remove the empty vsphere directory from cluster/ --- cluster/vsphere/README.md | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 cluster/vsphere/README.md diff --git a/cluster/vsphere/README.md b/cluster/vsphere/README.md deleted file mode 100644 index d031bca0df1..00000000000 --- a/cluster/vsphere/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Please use [Kubernetes-anywhere](https://github.com/kubernetes/kubernetes-anywhere) to get started on vSphere. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/vsphere/README.md?pixel)]() From 7a75a7b43b742868eda50a36a2e9341d65aec5f5 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 16:14:15 -0800 Subject: [PATCH 683/794] Remove aws from the cluster/ directory. --- cluster/aws/util.sh | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100755 cluster/aws/util.sh diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh deleted file mode 100755 index 39830b9be85..00000000000 --- a/cluster/aws/util.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/hack/lib/util.sh" - -echo -e "${color_red}WARNING${color_norm}: The bash deployment for AWS is obsolete. The" >&2 -echo -e "v1.5.x releases are the last to support cluster/kube-up.sh with AWS." >&2 -echo "For a list of viable alternatives, see:" >&2 -echo >&2 -echo " http://kubernetes.io/docs/getting-started-guides/aws/" >&2 -echo >&2 -exit 1 From 6894e3d32bc9990c82b0681aff280e0f9b844db3 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:21:53 -0800 Subject: [PATCH 684/794] Support utilities --- .../providers/gce/cloud/utils.go | 167 +++++++++++++++ .../providers/gce/cloud/utils_test.go | 197 ++++++++++++++++++ 2 files changed, 364 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/utils.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/utils_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/utils.go b/pkg/cloudprovider/providers/gce/cloud/utils.go new file mode 100644 index 00000000000..dd4a07cfd05 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/utils.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +const ( + gaPrefix = "https://www.googleapis.com/compute/v1/" + alphaPrefix = "https://www.googleapis.com/compute/alpha/" + betaPrefix = "https://www.googleapis.com/compute/beta/" +) + +var ( + allPrefixes = []string{gaPrefix, alphaPrefix, betaPrefix} +) + +// ResourceID identifies a GCE resource as parsed from compute resource URL. +type ResourceID struct { + ProjectID string + Resource string + Key *meta.Key +} + +// Equal returns true if two resource IDs are equal. +func (r *ResourceID) Equal(other *ResourceID) bool { + if r.ProjectID != other.ProjectID || r.Resource != other.Resource { + return false + } + if r.Key != nil && other.Key != nil { + return *r.Key == *other.Key + } + if r.Key == nil && other.Key == nil { + return true + } + return false +} + +// ParseResourceURL parses resource URLs of the following formats: +// +// projects//global// +// projects//regions/// +// projects//zones/// +// [https://www.googleapis.com/compute/]/projects//global// +// [https://www.googleapis.com/compute/]/projects//regions/// +// [https://www.googleapis.com/compute/]/projects//zones/// +func ParseResourceURL(url string) (*ResourceID, error) { + errNotValid := fmt.Errorf("%q is not a valid resource URL", url) + + // Remove the "https://..." prefix if present + for _, prefix := range allPrefixes { + if strings.HasPrefix(url, prefix) { + if len(url) < len(prefix) { + return nil, errNotValid + } + url = url[len(prefix):] + break + } + } + + parts := strings.Split(url, "/") + if len(parts) < 2 || parts[0] != "projects" { + return nil, errNotValid + } + + ret := &ResourceID{ProjectID: parts[1]} + if len(parts) == 2 { + ret.Resource = "projects" + return ret, nil + } + + if len(parts) < 4 { + return nil, errNotValid + } + + if len(parts) == 4 { + switch parts[2] { + case "regions": + ret.Resource = "regions" + ret.Key = meta.GlobalKey(parts[3]) + return ret, nil + case "zones": + ret.Resource = "zones" + ret.Key = meta.GlobalKey(parts[3]) + return ret, nil + default: + return nil, errNotValid + } + } + + switch parts[2] { + case "global": + if len(parts) != 5 { + return nil, errNotValid + } + ret.Resource = parts[3] + ret.Key = meta.GlobalKey(parts[4]) + return ret, nil + case "regions": + if len(parts) != 6 { + return nil, errNotValid + } + ret.Resource = parts[4] + ret.Key = meta.RegionalKey(parts[5], parts[3]) + return ret, nil + case "zones": + if len(parts) != 6 { + return nil, errNotValid + } + ret.Resource = parts[4] + ret.Key = meta.ZonalKey(parts[5], parts[3]) + return ret, nil + } + return nil, errNotValid +} + +func copyViaJSON(dest, src interface{}) error { + bytes, err := json.Marshal(src) + if err != nil { + return err + } + return json.Unmarshal(bytes, dest) +} + +// SelfLink returns the self link URL for the given object. +func SelfLink(ver meta.Version, project, resource string, key meta.Key) string { + var prefix string + switch ver { + case meta.VersionAlpha: + prefix = alphaPrefix + case meta.VersionBeta: + prefix = betaPrefix + case meta.VersionGA: + prefix = gaPrefix + default: + prefix = "invalid-prefix" + } + + switch key.Type() { + case meta.Zonal: + return fmt.Sprintf("%sprojects/%s/zones/%s/%s/%s", prefix, project, key.Zone, resource, key.Name) + case meta.Regional: + return fmt.Sprintf("%sprojects/%s/regions/%s/%s/%s", prefix, project, key.Region, resource, key.Name) + case meta.Global: + return fmt.Sprintf("%sprojects/%s/%s/%s", prefix, project, resource, key.Name) + } + return "invalid-self-link" +} diff --git a/pkg/cloudprovider/providers/gce/cloud/utils_test.go b/pkg/cloudprovider/providers/gce/cloud/utils_test.go new file mode 100644 index 00000000000..823c8e73c88 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/utils_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +func TestParseResourceURL(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + in string + r *ResourceID + }{ + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project", + &ResourceID{"some-gce-project", "projects", nil}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/regions/us-central1", + &ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-b", + &ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf", + &ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")}, + }, + { + "https://www.googleapis.com/compute/alpha/projects/some-gce-project/regions/us-central1/addresses/my-address", + &ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")}, + }, + { + "https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-c/instances/instance-1", + &ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")}, + }, + { + "projects/some-gce-project", + &ResourceID{"some-gce-project", "projects", nil}, + }, + { + "projects/some-gce-project/regions/us-central1", + &ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")}, + }, + { + "projects/some-gce-project/zones/us-central1-b", + &ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")}, + }, + { + "projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf", + &ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")}, + }, + { + "projects/some-gce-project/regions/us-central1/addresses/my-address", + &ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")}, + }, + { + "projects/some-gce-project/zones/us-central1-c/instances/instance-1", + &ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")}, + }, + } { + r, err := ParseResourceURL(tc.in) + if err != nil { + t.Errorf("ParseResourceURL(%q) = %+v, %v; want _, nil", tc.in, r, err) + continue + } + if !r.Equal(tc.r) { + t.Errorf("ParseResourceURL(%q) = %+v, nil; want %+v, nil", tc.in, r, tc.r) + } + } + // Malformed URLs. + for _, tc := range []string{ + "", + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/c/d", + "/a/b/c/d/e", + "/a/b/c/d/e/f", + "https://www.googleapis.com/compute/v1/projects/some-gce-project/global", + "projects/some-gce-project/global", + "projects/some-gce-project/global/foo/bar/baz", + "projects/some-gce-project/zones/us-central1-c/res", + "projects/some-gce-project/zones/us-central1-c/res/name/extra", + "https://www.googleapis.com/compute/gamma/projects/some-gce-project/global/addresses/name", + } { + r, err := ParseResourceURL(tc) + if err == nil { + t.Errorf("ParseResourceURL(%q) = %+v, %v, want _, error", tc, r, err) + } + } +} + +type A struct { + A, B, C string +} + +type B struct { + A, B, D string +} + +type E struct{} + +func (*E) MarshalJSON() ([]byte, error) { + return nil, errors.New("injected error") +} + +func TestCopyVisJSON(t *testing.T) { + t.Parallel() + + var b B + srcA := &A{"aa", "bb", "cc"} + err := copyViaJSON(&b, srcA) + if err != nil { + t.Errorf(`copyViaJSON(&b, %+v) = %v, want nil`, srcA, err) + } else { + expectedB := B{"aa", "bb", ""} + if b != expectedB { + t.Errorf("b == %+v, want %+v", b, expectedB) + } + } + + var a A + srcB := &B{"aaa", "bbb", "ccc"} + err = copyViaJSON(&a, srcB) + if err != nil { + t.Errorf(`copyViaJSON(&a, %+v) = %v, want nil`, srcB, err) + } else { + expectedA := A{"aaa", "bbb", ""} + if a != expectedA { + t.Errorf("a == %+v, want %+v", a, expectedA) + } + } + + if err := copyViaJSON(&a, &E{}); err == nil { + t.Errorf("copyViaJSON(&a, &E{}) = nil, want error") + } +} + +func TestSelfLink(t *testing.T) { + t.Parallel() + + for _, tc := range []struct{ + ver meta.Version + project string + resource string + key meta.Key + want string + }{ + { + meta.VersionAlpha, + "proj1", + "addresses", + *meta.RegionalKey("key1", "us-central1"), + "https://www.googleapis.com/compute/alpha/projects/proj1/regions/us-central1/addresses/key1", + }, + { + meta.VersionBeta, + "proj3", + "disks", + *meta.ZonalKey("key2", "us-central1-b"), + "https://www.googleapis.com/compute/beta/projects/proj3/zones/us-central1-b/disks/key2", + }, + { + meta.VersionGA, + "proj4", + "urlMaps", + *meta.GlobalKey("key3"), + "https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3", + }, + }{ + if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want { + t.Errorf("SelfLink(%v, %q, %q, %v) = %v, want %q", tc.ver, tc.project, tc.resource, tc.key, link, tc.want) + } + } +} From b19149406eda9730a8f233ab374e3521fdc08016 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:06 -0800 Subject: [PATCH 685/794] "meta" type descriptions used for code generation --- .../providers/gce/cloud/meta/doc.go | 19 + .../providers/gce/cloud/meta/key.go | 96 +++++ .../providers/gce/cloud/meta/key_test.go | 75 ++++ .../providers/gce/cloud/meta/meta.go | 372 ++++++++++++++++++ .../providers/gce/cloud/meta/method.go | 241 ++++++++++++ .../providers/gce/cloud/meta/service.go | 273 +++++++++++++ 6 files changed, 1076 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/doc.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/key.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/key_test.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/meta.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/method.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/service.go diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/doc.go b/pkg/cloudprovider/providers/gce/cloud/meta/doc.go new file mode 100644 index 00000000000..7aa24e06379 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta contains the meta description of the GCE cloud types to +// generate code for. +package meta diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/key.go b/pkg/cloudprovider/providers/gce/cloud/meta/key.go new file mode 100644 index 00000000000..fff2543c7b0 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/key.go @@ -0,0 +1,96 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" +) + +// Key for a GCP resource. +type Key struct { + Name string + Zone string + Region string +} + +// KeyType is the type of the key. +type KeyType string + +const ( + // Zonal key type. + Zonal = "zonal" + // Regional key type. + Regional = "regional" + // Global key type. + Global = "global" +) + +// ZonalKey returns the key for a zonal resource. +func ZonalKey(name, zone string) *Key { + return &Key{name, zone, ""} +} + +// RegionalKey returns the key for a regional resource. +func RegionalKey(name, region string) *Key { + return &Key{name, "", region} +} + +// GlobalKey returns the key for a global resource. +func GlobalKey(name string) *Key { + return &Key{name, "", ""} +} + +// Type returns the type of the key. +func (k *Key) Type() KeyType { + switch { + case k.Zone != "": + return Zonal + case k.Region != "": + return Regional + default: + return Global + } +} + +// String returns a string representation of the key. +func (k Key) String() string { + switch k.Type() { + case Zonal: + return fmt.Sprintf("Key{%q, zone: %q}", k.Name, k.Zone) + case Regional: + return fmt.Sprintf("Key{%q, region: %q}", k.Name, k.Region) + default: + return fmt.Sprintf("Key{%q}", k.Name) + } +} + +// Valid is true if the key is valid. +func (k *Key) Valid(typeName string) bool { + if k.Zone != "" && k.Region != "" { + return false + } + return true +} + +// KeysToMap creates a map[Key]bool from a list of keys. +func KeysToMap(keys ...Key) map[Key]bool { + ret := map[Key]bool{} + for _, k := range keys { + ret[k] = true + } + return ret +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go b/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go new file mode 100644 index 00000000000..0f1a6df8b4c --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "testing" +) + +func TestKeyType(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + key *Key + want KeyType + }{ + {GlobalKey("abc"), Global}, + {ZonalKey("abc", "us-central1-b"), Zonal}, + {RegionalKey("abc", "us-central1"), Regional}, + } { + if tc.key.Type() != tc.want { + t.Errorf("key.Type() == %v, want %v", tc.key.Type(), tc.want) + } + } +} + +func TestKeyString(t *testing.T) { + t.Parallel() + + for _, k := range []*Key{ + GlobalKey("abc"), + RegionalKey("abc", "us-central1"), + ZonalKey("abc", "us-central1-b"), + } { + if k.String() == "" { + t.Errorf(`k.String() = "", want non-empty`) + } + } +} + +func TestKeyValid(t *testing.T) { + t.Parallel() + + region := "us-central1" + zone := "us-central1-b" + + for _, tc := range []struct { + key *Key + typeName string + want bool + }{ + // Note: these test cases need to be synchronized with the + // actual settings for each type. + {GlobalKey("abc"), "UrlMap", true}, + {&Key{"abc", zone, region}, "UrlMap", false}, + } { + valid := tc.key.Valid(tc.typeName) + if valid != tc.want { + t.Errorf("key %+v, type %v; key.Valid() = %v, want %v", tc.key, tc.typeName, valid, tc.want) + } + } +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go new file mode 100644 index 00000000000..3f60c00f412 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -0,0 +1,372 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "reflect" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Version of the API (ga, alpha, beta). +type Version string + +const ( + // NoGet prevents the Get() method from being generated. + NoGet = 1 << iota + // NoList prevents the List() method from being generated. + NoList = 1 << iota + // NoDelete prevents the Delete() method from being generated. + NoDelete = 1 << iota + // NoInsert prevents the Insert() method from being generated. + NoInsert = 1 << iota + // CustomOps specifies that an empty interface xxxOps will be generated to + // enable custom method calls to be attached to the generated service + // interface. + CustomOps = 1 << iota + // AggregatedList will generated a method for AggregatedList(). + AggregatedList = 1 << iota + + // ReadOnly specifies that the given resource is read-only and should not + // have insert() or delete() methods generated for the wrapper. + ReadOnly = NoDelete | NoInsert + + // VersionGA is the API version in compute.v1. + VersionGA Version = "ga" + // VersionAlpha is the API version in computer.v0.alpha. + VersionAlpha Version = "alpha" + // VersionBeta is the API version in computer.v0.beta. + VersionBeta Version = "beta" +) + +// AllVersions is a list of all versions of the GCE API. +var AllVersions = []Version{ + VersionGA, + VersionAlpha, + VersionBeta, +} + +// AllServices are a list of all the services to generate code for. Keep +// this list in lexiographical order by object type. +var AllServices = []*ServiceInfo{ + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "Addresses", + Resource: "addresses", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.AddressesService{}), + }, + &ServiceInfo{ + Object: "Address", + Service: "GlobalAddresses", + Resource: "addresses", + keyType: Global, + serviceType: reflect.TypeOf(&ga.GlobalAddressesService{}), + }, + &ServiceInfo{ + Object: "BackendService", + Service: "BackendServices", + Resource: "backendServices", + keyType: Global, + serviceType: reflect.TypeOf(&ga.BackendServicesService{}), + additionalMethods: []string{ + "GetHealth", + "Update", + }, + }, + &ServiceInfo{ + Object: "BackendService", + Service: "BackendServices", + Resource: "backendServices", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), + additionalMethods: []string{"Update"}, + }, + &ServiceInfo{ + Object: "BackendService", + Service: "RegionBackendServices", + Resource: "backendServices", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionBackendServicesService{}), + additionalMethods: []string{ + "GetHealth", + "Update", + }, + }, + &ServiceInfo{ + Object: "Disk", + Service: "Disks", + Resource: "disks", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.DisksService{}), + }, + &ServiceInfo{ + Object: "Disk", + Service: "Disks", + Resource: "disks", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.DisksService{}), + }, + &ServiceInfo{ + Object: "Disk", + Service: "RegionDisks", + Resource: "disks", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.DisksService{}), + }, + &ServiceInfo{ + Object: "Firewall", + Service: "Firewalls", + Resource: "firewalls", + keyType: Global, + serviceType: reflect.TypeOf(&ga.FirewallsService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "ForwardingRules", + Resource: "forwardingRules", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}), + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "ForwardingRules", + Resource: "forwardingRules", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}), + }, + &ServiceInfo{ + Object: "ForwardingRule", + Service: "GlobalForwardingRules", + Resource: "forwardingRules", + keyType: Global, + serviceType: reflect.TypeOf(&ga.GlobalForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, + }, + &ServiceInfo{ + Object: "HealthCheck", + Service: "HealthChecks", + Resource: "healthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HealthCheck", + Service: "HealthChecks", + Resource: "healthChecks", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.HealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HttpHealthCheck", + Service: "HttpHealthChecks", + Resource: "httpHealthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HttpHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "HttpsHealthCheck", + Service: "HttpsHealthChecks", + Resource: "httpsHealthChecks", + keyType: Global, + serviceType: reflect.TypeOf(&ga.HttpsHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "InstanceGroup", + Service: "InstanceGroups", + Resource: "instanceGroups", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.InstanceGroupsService{}), + additionalMethods: []string{ + "AddInstances", + "ListInstances", + "RemoveInstances", + "SetNamedPorts", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + version: VersionBeta, + keyType: Zonal, + serviceType: reflect.TypeOf(&beta.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + }, + }, + &ServiceInfo{ + Object: "Instance", + Service: "Instances", + Resource: "instances", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.InstancesService{}), + additionalMethods: []string{ + "AttachDisk", + "DetachDisk", + "UpdateNetworkInterface", + }, + }, + &ServiceInfo{ + Object: "NetworkEndpointGroup", + Service: "NetworkEndpointGroups", + Resource: "networkEndpointGroups", + version: VersionAlpha, + keyType: Zonal, + serviceType: reflect.TypeOf(&alpha.NetworkEndpointGroupsService{}), + additionalMethods: []string{ + "AttachNetworkEndpoints", + "DetachNetworkEndpoints", + }, + options: AggregatedList, + }, + &ServiceInfo{ + Object: "Project", + Service: "Projects", + Resource: "projects", + keyType: Global, + // Generate only the stub with no methods. + options: NoGet | NoList | NoInsert | NoDelete | CustomOps, + serviceType: reflect.TypeOf(&ga.ProjectsService{}), + }, + &ServiceInfo{ + Object: "Region", + Service: "Regions", + Resource: "regions", + keyType: Global, + options: ReadOnly, + serviceType: reflect.TypeOf(&ga.RegionsService{}), + }, + &ServiceInfo{ + Object: "Route", + Service: "Routes", + Resource: "routes", + keyType: Global, + serviceType: reflect.TypeOf(&ga.RoutesService{}), + }, + &ServiceInfo{ + Object: "SslCertificate", + Service: "SslCertificates", + Resource: "sslCertificates", + keyType: Global, + serviceType: reflect.TypeOf(&ga.SslCertificatesService{}), + }, + &ServiceInfo{ + Object: "TargetHttpProxy", + Service: "TargetHttpProxies", + Resource: "targetHttpProxies", + keyType: Global, + serviceType: reflect.TypeOf(&ga.TargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, + &ServiceInfo{ + Object: "TargetHttpsProxy", + Service: "TargetHttpsProxies", + Resource: "targetHttpsProxies", + keyType: Global, + serviceType: reflect.TypeOf(&ga.TargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, + &ServiceInfo{ + Object: "TargetPool", + Service: "TargetPools", + Resource: "targetPools", + keyType: Regional, + serviceType: reflect.TypeOf(&ga.TargetPoolsService{}), + additionalMethods: []string{ + "AddInstance", + "RemoveInstance", + }, + }, + &ServiceInfo{ + Object: "UrlMap", + Service: "UrlMaps", + Resource: "urlMaps", + keyType: Global, + serviceType: reflect.TypeOf(&ga.UrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, + &ServiceInfo{ + Object: "Zone", + Service: "Zones", + Resource: "zones", + keyType: Global, + options: ReadOnly, + serviceType: reflect.TypeOf(&ga.ZonesService{}), + }, +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/method.go b/pkg/cloudprovider/providers/gce/cloud/meta/method.go new file mode 100644 index 00000000000..5adf065fae4 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/method.go @@ -0,0 +1,241 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/glog" +) + +func newArg(t reflect.Type) *arg { + ret := &arg{} + + // Dereference the pointer types to get at the underlying concrete type. +Loop: + for { + switch t.Kind() { + case reflect.Ptr: + ret.numPtr++ + t = t.Elem() + default: + ret.pkg = t.PkgPath() + ret.typeName += t.Name() + break Loop + } + } + return ret +} + +type arg struct { + pkg, typeName string + numPtr int +} + +func (a *arg) normalizedPkg() string { + if a.pkg == "" { + return "" + } + + // Strip the repo.../vendor/ prefix from the package path if present. + parts := strings.Split(a.pkg, "/") + // Remove vendor prefix. + for i := 0; i < len(parts); i++ { + if parts[i] == "vendor" { + parts = parts[i+1:] + break + } + } + switch strings.Join(parts, "/") { + case "google.golang.org/api/compute/v1": + return "ga." + case "google.golang.org/api/compute/v0.alpha": + return "alpha." + case "google.golang.org/api/compute/v0.beta": + return "beta." + default: + panic(fmt.Errorf("unhandled package %q", a.pkg)) + } +} + +func (a *arg) String() string { + var ret string + for i := 0; i < a.numPtr; i++ { + ret += "*" + } + ret += a.normalizedPkg() + ret += a.typeName + return ret +} + +// newMethod returns a newly initialized method. +func newMethod(s *ServiceInfo, m reflect.Method) *Method { + ret := &Method{s, m, ""} + ret.init() + return ret +} + +// Method is used to generate the calling code non-standard methods. +type Method struct { + *ServiceInfo + m reflect.Method + + ReturnType string +} + +// argsSkip is the number of arguments to skip when generating the +// synthesized method. +func (mr *Method) argsSkip() int { + switch mr.keyType { + case Zonal: + return 4 + case Regional: + return 4 + case Global: + return 3 + } + panic(fmt.Errorf("invalid KeyType %v", mr.keyType)) +} + +// args return a list of arguments to the method, skipping the first skip +// elements. If nameArgs is true, then the arguments will include a generated +// parameter name (arg). prefix will be added to the parameters. +func (mr *Method) args(skip int, nameArgs bool, prefix []string) []string { + var args []*arg + fType := mr.m.Func.Type() + for i := 0; i < fType.NumIn(); i++ { + t := fType.In(i) + args = append(args, newArg(t)) + } + + var a []string + for i := skip; i < fType.NumIn(); i++ { + if nameArgs { + a = append(a, fmt.Sprintf("arg%d %s", i-skip, args[i])) + } else { + a = append(a, args[i].String()) + } + } + return append(prefix, a...) +} + +func (mr *Method) init() { + fType := mr.m.Func.Type() + if fType.NumIn() < mr.argsSkip() { + err := fmt.Errorf("method %q.%q, arity = %d which is less than required (< %d)", + mr.Service, mr.Name(), fType.NumIn(), mr.argsSkip()) + panic(err) + } + // Skipped args should all be string (they will be projectID, zone, region etc). + for i := 1; i < mr.argsSkip(); i++ { + if fType.In(i).Kind() != reflect.String { + panic(fmt.Errorf("method %q.%q: skipped args can only be strings", mr.Service, mr.Name())) + } + } + // Return of the method must return a single value of type *xxxCall. + if fType.NumOut() != 1 || fType.Out(0).Kind() != reflect.Ptr || !strings.HasSuffix(fType.Out(0).Elem().Name(), "Call") { + panic(fmt.Errorf("method %q.%q: generator only supports methods returning an *xxxCall object", + mr.Service, mr.Name())) + } + returnType := fType.Out(0) + returnTypeName := fType.Out(0).Elem().Name() + // xxxCall must have a Do() method. + doMethod, ok := returnType.MethodByName("Do") + if !ok { + panic(fmt.Errorf("method %q.%q: return type %q does not have a Do() method", + mr.Service, mr.Name(), returnTypeName)) + } + // Do() method must return (*T, error). + switch doMethod.Func.Type().NumOut() { + case 2: + glog.Infof("Method %q.%q: return type %q of Do() = %v, %v", + mr.Service, mr.Name(), returnTypeName, doMethod.Func.Type().Out(0), doMethod.Func.Type().Out(1)) + out0 := doMethod.Func.Type().Out(0) + if out0.Kind() != reflect.Ptr { + panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, _; S must be pointer type (%v)", + mr.Service, mr.Name(), returnTypeName, out0)) + } + mr.ReturnType = out0.Elem().Name() + if out0.Elem().Name() == "Operation" { + glog.Infof("Method %q.%q is an *Operation", mr.Service, mr.Name()) + } else { + glog.Infof("Method %q.%q returns %v", mr.Service, mr.Name(), out0) + } + // Second argument must be "error". + if doMethod.Func.Type().Out(1).Name() != "error" { + panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, T; T must be 'error'", + mr.Service, mr.Name(), returnTypeName)) + } + break + default: + panic(fmt.Errorf("method %q.%q: %q Do() return type is not handled by the generator", + mr.Service, mr.Name(), returnTypeName)) + } +} + +func (mr *Method) Name() string { + return mr.m.Name +} + +func (mr *Method) CallArgs() string { + var args []string + for i := mr.argsSkip(); i < mr.m.Func.Type().NumIn(); i++ { + args = append(args, fmt.Sprintf("arg%d", i-mr.argsSkip())) + } + if len(args) == 0 { + return "" + } + return fmt.Sprintf(", %s", strings.Join(args, ", ")) +} + +func (mr *Method) MockHookName() string { + return mr.m.Name + "Hook" +} + +func (mr *Method) MockHook() string { + args := mr.args(mr.argsSkip(), false, []string{ + fmt.Sprintf("*%s", mr.MockWrapType()), + "context.Context", + "meta.Key", + }) + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v func(%v) error", mr.MockHookName(), strings.Join(args, ", ")) + } + return fmt.Sprintf("%v func(%v) (*%v.%v, error)", mr.MockHookName(), strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} + +func (mr *Method) FcnArgs() string { + args := mr.args(mr.argsSkip(), true, []string{ + "ctx context.Context", + "key meta.Key", + }) + + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v(%v) error", mr.m.Name, strings.Join(args, ", ")) + } + return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} + +func (mr *Method) InterfaceFunc() string { + args := mr.args(mr.argsSkip(), false, []string{"context.Context", "meta.Key"}) + if mr.ReturnType == "Operation" { + return fmt.Sprintf("%v(%v) error", mr.m.Name, strings.Join(args, ", ")) + } + return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) +} diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/service.go b/pkg/cloudprovider/providers/gce/cloud/meta/service.go new file mode 100644 index 00000000000..ffa3385075b --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/service.go @@ -0,0 +1,273 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "errors" + "fmt" + "reflect" +) + +// ServiceInfo defines the entry for a Service that code will be generated for. +type ServiceInfo struct { + // Object is the Go name of the object type that the service deals + // with. Example: "ForwardingRule". + Object string + // Service is the Go name of the service struct i.e. where the methods + // are defined. Examples: "GlobalForwardingRules". + Service string + // Resource is the plural noun of the resource in the compute API URL (e.g. + // "forwardingRules"). + Resource string + // version if unspecified will be assumed to be VersionGA. + version Version + keyType KeyType + serviceType reflect.Type + + additionalMethods []string + options int + aggregatedListField string +} + +// Version returns the version of the Service, defaulting to GA if APIVersion +// is empty. +func (i *ServiceInfo) Version() Version { + if i.version == "" { + return VersionGA + } + return i.version +} + +// VersionTitle returns the capitalized golang CamelCase name for the version. +func (i *ServiceInfo) VersionTitle() string { + switch i.Version() { + case VersionGA: + return "GA" + case VersionAlpha: + return "Alpha" + case VersionBeta: + return "Beta" + } + panic(fmt.Errorf("invalid version %q", i.Version())) +} + +// WrapType is the name of the wrapper service type. +func (i *ServiceInfo) WrapType() string { + switch i.Version() { + case VersionGA: + return i.Service + case VersionAlpha: + return "Alpha" + i.Service + case VersionBeta: + return "Beta" + i.Service + } + return "Invalid" +} + +// WrapTypeOps is the name of the additional operations type. +func (i *ServiceInfo) WrapTypeOps() string { + return i.WrapType() + "Ops" +} + +// FQObjectType is fully qualified name of the object (e.g. compute.Instance). +func (i *ServiceInfo) FQObjectType() string { + return fmt.Sprintf("%v.%v", i.Version(), i.Object) +} + +// ObjectListType is the compute List type for the object (contains Items field). +func (i *ServiceInfo) ObjectListType() string { + return fmt.Sprintf("%v.%vList", i.Version(), i.Object) +} + +// ObjectAggregatedListType is the compute List type for the object (contains Items field). +func (i *ServiceInfo) ObjectAggregatedListType() string { + return fmt.Sprintf("%v.%vAggregatedList", i.Version(), i.Object) +} + +// MockWrapType is the name of the concrete mock for this type. +func (i *ServiceInfo) MockWrapType() string { + return "Mock" + i.WrapType() +} + +// MockField is the name of the field in the mock struct. +func (i *ServiceInfo) MockField() string { + return "Mock" + i.WrapType() +} + +// GCEWrapType is the name of the GCE wrapper type. +func (i *ServiceInfo) GCEWrapType() string { + return "GCE" + i.WrapType() +} + +// Field is the name of the GCE struct. +func (i *ServiceInfo) Field() string { + return "gce" + i.WrapType() +} + +// Methods returns a list of additional methods to generate code for. +func (i *ServiceInfo) Methods() []*Method { + methods := map[string]bool{} + for _, m := range i.additionalMethods { + methods[m] = true + } + + var ret []*Method + for j := 0; j < i.serviceType.NumMethod(); j++ { + m := i.serviceType.Method(j) + if _, ok := methods[m.Name]; !ok { + continue + } + ret = append(ret, newMethod(i, m)) + methods[m.Name] = false + } + + for k, b := range methods { + if b { + panic(fmt.Errorf("method %q was not found in service %q", k, i.Service)) + } + } + + return ret +} + +// KeyIsGlobal is true if the key is global. +func (i *ServiceInfo) KeyIsGlobal() bool { + return i.keyType == Global +} + +// KeyIsRegional is true if the key is regional. +func (i *ServiceInfo) KeyIsRegional() bool { + return i.keyType == Regional +} + +// KeyIsZonal is true if the key is zonal. +func (i *ServiceInfo) KeyIsZonal() bool { + return i.keyType == Zonal +} + +// MakeKey returns the call used to create the appropriate key type. +func (i *ServiceInfo) MakeKey(name, location string) string { + switch i.keyType { + case Global: + return fmt.Sprintf("GlobalKey(%q)", name) + case Regional: + return fmt.Sprintf("RegionalKey(%q, %q)", name, location) + case Zonal: + return fmt.Sprintf("ZonalKey(%q, %q)", name, location) + } + return "Invalid" +} + +// GenerateGet is true if the method is to be generated. +func (i *ServiceInfo) GenerateGet() bool { + return i.options&NoGet == 0 +} + +// GenerateList is true if the method is to be generated. +func (i *ServiceInfo) GenerateList() bool { + return i.options&NoList == 0 +} + +// GenerateDelete is true if the method is to be generated. +func (i *ServiceInfo) GenerateDelete() bool { + return i.options&NoDelete == 0 +} + +// GenerateInsert is true if the method is to be generated. +func (i *ServiceInfo) GenerateInsert() bool { + return i.options&NoInsert == 0 +} + +// GenerateCustomOps is true if we should generated a xxxOps interface for +// adding additional methods to the generated interface. +func (i *ServiceInfo) GenerateCustomOps() bool { + return i.options&CustomOps != 0 +} + +// AggregatedList is true if the method is to be generated. +func (i *ServiceInfo) AggregatedList() bool { + return i.options&AggregatedList != 0 +} + +// AggregatedListField is the name of the field used for the aggregated list +// call. This is typically the same as the name of the service, but can be +// customized by setting the aggregatedListField field. +func (i *ServiceInfo) AggregatedListField() string { + if i.aggregatedListField == "" { + return i.Service + } + return i.aggregatedListField +} + +// ServiceGroup is a grouping of the same service but at different API versions. +type ServiceGroup struct { + Alpha *ServiceInfo + Beta *ServiceInfo + GA *ServiceInfo +} + +func (sg *ServiceGroup) Service() string { + switch { + case sg.GA != nil: + return sg.GA.Service + case sg.Alpha != nil: + return sg.Alpha.Service + case sg.Beta != nil: + return sg.Beta.Service + default: + panic(errors.New("service group is empty")) + } +} + +func (sg *ServiceGroup) HasGA() bool { + return sg.GA != nil +} + +func (sg *ServiceGroup) HasAlpha() bool { + return sg.Alpha != nil +} + +func (sg *ServiceGroup) HasBeta() bool { + return sg.Beta != nil +} + +// groupServices together by version. +func groupServices(services []*ServiceInfo) map[string]*ServiceGroup { + ret := map[string]*ServiceGroup{} + for _, si := range services { + if _, ok := ret[si.Service]; !ok { + ret[si.Service] = &ServiceGroup{} + } + group := ret[si.Service] + switch si.Version() { + case VersionAlpha: + group.Alpha = si + case VersionBeta: + group.Beta = si + case VersionGA: + group.GA = si + } + } + return ret +} + +// AllServicesByGroup is a map of service name to ServicesGroup. +var AllServicesByGroup map[string]*ServiceGroup + +func init() { + AllServicesByGroup = groupServices(AllServices) +} From 94ddfd17e769bc4b4b407adc864233252d62eb38 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:30 -0800 Subject: [PATCH 686/794] Implementation of the compute "filter" handling for List() --- .../providers/gce/cloud/filter/filter.go | 303 ++++++++++++++++++ .../providers/gce/cloud/filter/filter_test.go | 176 ++++++++++ 2 files changed, 479 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/filter.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/filter.go b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go new file mode 100644 index 00000000000..c08005726c8 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go @@ -0,0 +1,303 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package filter encapsulates the filter argument to compute API calls. +// +// // List all global addresses (no filter). +// c.GlobalAddresses().List(ctx, filter.None) +// +// // List global addresses filtering for name matching "abc.*". +// c.GlobalAddresses().List(ctx, filter.Regexp("name", "abc.*")) +// +// // List on multiple conditions. +// f := filter.Regexp("name", "homer.*").AndNotRegexp("name", "homers") +// c.GlobalAddresses().List(ctx, f) +package filter + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/golang/glog" +) + +var ( + // None indicates that the List result set should not be filter (i.e. + // return all values). + None *F +) + +// Regexp returns a filter for fieldName matches regexp v. +func Regexp(fieldName, v string) *F { + return (&F{}).AndRegexp(fieldName, v) +} + +// NotRegexp returns a filter for fieldName not matches regexp v. +func NotRegexp(fieldName, v string) *F { + return (&F{}).AndNotRegexp(fieldName, v) +} + +// EqualInt returns a filter for fieldName ~ v. +func EqualInt(fieldName string, v int) *F { + return (&F{}).AndEqualInt(fieldName, v) +} + +// NotEqualInt returns a filter for fieldName != v. +func NotEqualInt(fieldName string, v int) *F { + return (&F{}).AndNotEqualInt(fieldName, v) +} + +// EqualBool returns a filter for fieldName == v. +func EqualBool(fieldName string, v bool) *F { + return (&F{}).AndEqualBool(fieldName, v) +} + +// NotEqualBool returns a filter for fieldName != v. +func NotEqualBool(fieldName string, v bool) *F { + return (&F{}).AndNotEqualBool(fieldName, v) +} + +// F is a filter to be used with List() operations. +// +// From the compute API description: +// +// Sets a filter {expression} for filtering listed resources. Your {expression} +// must be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only atomic field +// types are supported (string, number, boolean). The comparison_string must be +// either eq (equals) or ne (not equals). The literal_string is the string value +// to filter to. The literal value must be valid for the type of field you are +// filtering by (string, number, boolean). For string fields, the literal value is +// interpreted as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on instances +// that have set the scheduling.automaticRestart field to true. Use filtering on +// nested fields to take advantage of labels to organize and search for results +// based on label values. +// +// To filter on multiple expressions, provide each separate expression within +// parentheses. For example, (scheduling.automaticRestart eq true) +// (zone eq us-central1-f). Multiple expressions are treated as AND expressions, +// meaning that resources must match all expressions to pass the filters. +type F struct { + predicates []filterPredicate +} + +// And joins two filters together. +func (fl *F) And(rest *F) *F { + fl.predicates = append(fl.predicates, rest.predicates...) + return fl +} + +// AndRegexp adds a field match string predicate. +func (fl *F) AndRegexp(fieldName, v string) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, s: &v}) + return fl +} + +// AndNotRegexp adds a field not match string predicate. +func (fl *F) AndNotRegexp(fieldName, v string) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, s: &v}) + return fl +} + +// AndEqualInt adds a field == int predicate. +func (fl *F) AndEqualInt(fieldName string, v int) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, i: &v}) + return fl +} + +// AndNotEqualInt adds a field != int predicate. +func (fl *F) AndNotEqualInt(fieldName string, v int) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, i: &v}) + return fl +} + +// AndEqualBool adds a field == bool predicate. +func (fl *F) AndEqualBool(fieldName string, v bool) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, b: &v}) + return fl +} + +// AndNotEqualBool adds a field != bool predicate. +func (fl *F) AndNotEqualBool(fieldName string, v bool) *F { + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, b: &v}) + return fl +} + +func (fl *F) String() string { + if len(fl.predicates) == 1 { + return fl.predicates[0].String() + } + + var pl []string + for _, p := range fl.predicates { + pl = append(pl, "("+p.String()+")") + } + return strings.Join(pl, " ") +} + +// Match returns true if the F as specifies matches the given object. This +// is used by the Mock implementations to perform filtering and SHOULD NOT be +// used in production code as it is not well-tested to be equivalent to the +// actual compute API. +func (fl *F) Match(obj interface{}) bool { + if fl == nil { + return true + } + for _, p := range fl.predicates { + if !p.match(obj) { + return false + } + } + return true +} + +type filterOp int + +const ( + equals filterOp = iota + notEquals filterOp = iota +) + +// filterPredicate is an individual predicate for a fieldName and value. +type filterPredicate struct { + fieldName string + + op filterOp + s *string + i *int + b *bool +} + +func (fp *filterPredicate) String() string { + var op string + switch fp.op { + case equals: + op = "eq" + case notEquals: + op = "ne" + default: + op = "invalidOp" + } + + var value string + switch { + case fp.s != nil: + // There does not seem to be any sort of escaping as specified in the + // document. This means it's possible to create malformed expressions. + value = *fp.s + case fp.i != nil: + value = fmt.Sprintf("%d", *fp.i) + case fp.b != nil: + value = fmt.Sprintf("%t", *fp.b) + default: + value = "invalidValue" + } + + return fmt.Sprintf("%s %s %s", fp.fieldName, op, value) +} + +func (fp *filterPredicate) match(o interface{}) bool { + v, err := extractValue(fp.fieldName, o) + glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) + if err != nil { + return false + } + + var match bool + switch x := v.(type) { + case string: + if fp.s == nil { + return false + } + re, err := regexp.Compile(*fp.s) + if err != nil { + glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) + return false + } + match = re.Match([]byte(x)) + case int: + if fp.i == nil { + return false + } + match = x == *fp.i + case bool: + if fp.b == nil { + return false + } + match = x == *fp.b + } + + switch fp.op { + case equals: + return match + case notEquals: + return !match + } + + return false +} + +// snakeToCamelCase converts from "names_like_this" to "NamesLikeThis" to +// interoperate between proto and Golang naming conventions. +func snakeToCamelCase(s string) string { + parts := strings.Split(s, "_") + var ret string + for _, x := range parts { + ret += strings.Title(x) + } + return ret +} + +// extractValue returns the value of the field named by path in object o if it exists. +func extractValue(path string, o interface{}) (interface{}, error) { + parts := strings.Split(path, ".") + for _, f := range parts { + v := reflect.ValueOf(o) + // Dereference Ptr to handle *struct. + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return nil, errors.New("field is nil") + } + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("cannot get field from non-struct (%T)", o) + } + v = v.FieldByName(snakeToCamelCase(f)) + if !v.IsValid() { + return nil, fmt.Errorf("cannot get field %q as it is not a valid field in %T", f, o) + } + if !v.CanInterface() { + return nil, fmt.Errorf("cannot get field %q in obj of type %T", f, o) + } + o = v.Interface() + } + switch o.(type) { + case string, int, bool: + return o, nil + } + return nil, fmt.Errorf("unhandled object of type %T", o) +} diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go b/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go new file mode 100644 index 00000000000..46b3c279a47 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filter + +import ( + "reflect" + "testing" +) + +func TestFilterToString(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + f *F + want string + }{ + {Regexp("field1", "abc"), `field1 eq abc`}, + {NotRegexp("field1", "abc"), `field1 ne abc`}, + {EqualInt("field1", 13), "field1 eq 13"}, + {NotEqualInt("field1", 13), "field1 ne 13"}, + {EqualBool("field1", true), "field1 eq true"}, + {NotEqualBool("field1", true), "field1 ne true"}, + {Regexp("field1", "abc").AndRegexp("field2", "def"), `(field1 eq abc) (field2 eq def)`}, + {Regexp("field1", "abc").AndNotEqualInt("field2", 17), `(field1 eq abc) (field2 ne 17)`}, + {Regexp("field1", "abc").And(EqualInt("field2", 17)), `(field1 eq abc) (field2 eq 17)`}, + } { + if tc.f.String() != tc.want { + t.Errorf("filter %#v String() = %q, want %q", tc.f, tc.f.String(), tc.want) + } + } +} + +func TestFilterMatch(t *testing.T) { + t.Parallel() + + type inner struct { + X string + } + type S struct { + S string + I int + B bool + Unhandled struct{} + NestedField *inner + } + + for _, tc := range []struct { + f *F + o interface{} + want bool + }{ + {f: None, o: &S{}, want: true}, + {f: Regexp("s", "abc"), o: &S{}}, + {f: EqualInt("i", 10), o: &S{}}, + {f: EqualBool("b", true), o: &S{}}, + {f: NotRegexp("s", "abc"), o: &S{}, want: true}, + {f: NotEqualInt("i", 10), o: &S{}, want: true}, + {f: NotEqualBool("b", true), o: &S{}, want: true}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{}}, + {f: Regexp("s", "abc"), o: &S{S: "abc"}, want: true}, + {f: Regexp("s", "a.*"), o: &S{S: "abc"}, want: true}, + {f: Regexp("s", "a((("), o: &S{S: "abc"}}, + {f: NotRegexp("s", "abc"), o: &S{S: "abc"}}, + {f: EqualInt("i", 10), o: &S{I: 11}}, + {f: EqualInt("i", 10), o: &S{I: 10}, want: true}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc"}}, + {f: Regexp("s", "abcd").AndEqualBool("b", true), o: &S{S: "abc"}}, + {f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc", B: true}, want: true}, + {f: Regexp("s", "abc").And(EqualBool("b", true)), o: &S{S: "abc", B: true}, want: true}, + {f: Regexp("unhandled", "xyz"), o: &S{}}, + {f: Regexp("nested_field.x", "xyz"), o: &S{}}, + {f: Regexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}, want: true}, + {f: NotRegexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + {f: Regexp("nested_field.y", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + {f: Regexp("nested_field", "xyz"), o: &S{NestedField: &inner{"xyz"}}}, + } { + got := tc.f.Match(tc.o) + if got != tc.want { + t.Errorf("%v: Match(%+v) = %v, want %v", tc.f, tc.o, got, tc.want) + } + } +} + +func TestFilterSnakeToCamelCase(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + s string + want string + }{ + {"", ""}, + {"abc", "Abc"}, + {"_foo", "Foo"}, + {"a_b_c", "ABC"}, + {"a_BC_def", "ABCDef"}, + {"a_Bc_def", "ABcDef"}, + } { + got := snakeToCamelCase(tc.s) + if got != tc.want { + t.Errorf("snakeToCamelCase(%q) = %q, want %q", tc.s, got, tc.want) + } + } +} + +func TestFilterExtractValue(t *testing.T) { + t.Parallel() + + type nest2 struct { + Y string + } + type nest struct { + X string + Nest2 nest2 + } + st := &struct { + S string + I int + F bool + Nest nest + NestPtr *nest + + Unhandled float64 + }{ + "abc", + 13, + true, + nest{"xyz", nest2{"zzz"}}, + &nest{"yyy", nest2{}}, + 0.0, + } + + for _, tc := range []struct { + path string + o interface{} + want interface{} + wantErr bool + }{ + {path: "s", o: st, want: "abc"}, + {path: "i", o: st, want: 13}, + {path: "f", o: st, want: true}, + {path: "nest.x", o: st, want: "xyz"}, + {path: "nest_ptr.x", o: st, want: "yyy"}, + // Error cases. + {path: "", o: st, wantErr: true}, + {path: "no_such_field", o: st, wantErr: true}, + {path: "s.invalid_type", o: st, wantErr: true}, + {path: "unhandled", o: st, wantErr: true}, + {path: "nest.x", o: &struct{ Nest *nest }{}, wantErr: true}, + } { + o, err := extractValue(tc.path, tc.o) + gotErr := err != nil + if gotErr != tc.wantErr { + t.Errorf("extractValue(%v, %+v) = %v, %v; gotErr = %v, tc.wantErr = %v", tc.path, tc.o, o, err, gotErr, tc.wantErr) + } + if err != nil { + continue + } + if !reflect.DeepEqual(o, tc.want) { + t.Errorf("extractValue(%v, %+v) = %v, nil; want %v, nil", tc.path, tc.o, o, tc.want) + } + } +} From 8250950d15521f9ffa1f966a5b358bda65197f49 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:37 -0800 Subject: [PATCH 687/794] documentation --- pkg/cloudprovider/providers/gce/cloud/doc.go | 111 +++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/doc.go diff --git a/pkg/cloudprovider/providers/gce/cloud/doc.go b/pkg/cloudprovider/providers/gce/cloud/doc.go new file mode 100644 index 00000000000..d0d7a6cfb19 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/doc.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloud implements a more golang friendly interface to the GCE compute +// API. The code in this package is generated automatically via the generator +// implemented in "gen/main.go". The code generator creates the basic CRUD +// actions for the given resource: "Insert", "Get", "List" and "Delete". +// Additional methods by customizing the ServiceInfo object (see below). +// Generated code includes a full mock of the GCE compute API. +// +// Usage +// +// The root of the GCE compute API is the interface "Cloud". Code written using +// Cloud can be used against the actual implementation "GCE" or "MockGCE". +// +// func foo(cloud Cloud) { +// igs, err := cloud.InstanceGroups().List(ctx, "us-central1-b", filter.None) +// ... +// } +// // Run foo against the actual cloud. +// foo(NewGCE(&Service{...})) +// // Run foo with a mock. +// foo(NewMockGCE()) +// +// Rate limiting and routing +// +// The generated code allows for custom policies for operation rate limiting +// and GCE project routing. See RateLimiter and ProjectRouter for more details. +// +// Mocks +// +// Mocks are automatically generated for each type implementing basic logic for +// resource manipulation. This eliminates the boilerplate required to mock GCE +// functionality. Each method will also have a corresponding "xxxHook" +// function generated in the mock structure where unit test code can hook the +// execution of the method. +// +// Mocks for different versions of the same service will share the same set of +// objects, i.e. an alpha object will be visible with beta and GA methods. +// Note that translation is done with JSON serialization between the API versions. +// +// Changing service code generation +// +// The list of services to generate is contained in "meta/meta.go". To add a +// service, add an entry to the list "meta.AllServices". An example entry: +// +// &ServiceInfo{ +// Object: "InstanceGroup", // Name of the object type. +// Service: "InstanceGroups", // Name of the service. +// version: meta.VersionAlpha, // API version (one entry per version is needed). +// keyType: Zonal, // What kind of resource this is. +// serviceType: reflect.TypeOf(&alpha.InstanceGroupsService{}), // Associated golang type. +// additionalMethods: []string{ // Additional methods to generate code for. +// "SetNamedPorts", +// }, +// options: // Or'd ("|") together. +// } +// +// Read-only objects +// +// Services such as Regions and Zones do not allow for mutations. Specify +// "ReadOnly" in ServiceInfo.options to omit the mutation methods. +// +// Adding custom methods +// +// Some methods that may not be properly handled by the generated code. To enable +// addition of custom code to the generated mocks, set the "CustomOps" option +// in "meta.ServiceInfo" entry. This will make the generated service interface +// embed a "Ops" interface. This interface MUST be written by hand +// and contain the custom method logic. Corresponding methods must be added to +// the corresponding Mockxxx and GCExxx struct types. +// +// // In "meta/meta.go": +// &ServiceInfo{ +// Object: "InstanceGroup", +// ... +// options: CustomOps, +// } +// +// // In the generated code "gen.go": +// type InstanceGroups interface { +// InstanceGroupsOps // Added by CustomOps option. +// ... +// } +// +// // In hand written file: +// type InstanceGroupsOps interface { +// MyMethod() +// } +// +// func (mock *MockInstanceGroups) MyMethod() { +// // Custom mock implementation. +// } +// +// func (gce *GCEInstanceGroups) MyMethod() { +// // Custom implementation. +// } +package cloud From 75bff35884a52275825741c3cbf1da46ca363d5f Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:34:58 -0800 Subject: [PATCH 688/794] long running operation support --- pkg/cloudprovider/providers/gce/cloud/op.go | 142 ++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/op.go diff --git a/pkg/cloudprovider/providers/gce/cloud/op.go b/pkg/cloudprovider/providers/gce/cloud/op.go new file mode 100644 index 00000000000..92ee6f3f6f3 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/op.go @@ -0,0 +1,142 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// operation is a GCE operation that can be watied on. +type operation interface { + // isDone queries GCE for the done status. This call can block. + isDone(ctx context.Context) (bool, error) + // rateLimitKey returns the rate limit key to use for the given operation. + // This rate limit will govern how fast the server will be polled for + // operation completion status. + rateLimitKey() *RateLimitKey +} + +type gaOperation struct { + s *Service + op *ga.Operation + projectID string +} + +func (o *gaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *ga.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.GA.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *gaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionGA, + } +} + +type alphaOperation struct { + s *Service + op *alpha.Operation + projectID string +} + +func (o *alphaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *alpha.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *alphaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionAlpha, + } +} + +type betaOperation struct { + s *Service + op *beta.Operation + projectID string +} + +func (o *betaOperation) isDone(ctx context.Context) (bool, error) { + var ( + op *beta.Operation + err error + ) + + switch { + case o.op.Region != "": + op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.op.Region, o.op.Name).Context(ctx).Do() + case o.op.Zone != "": + op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.op.Zone, o.op.Name).Context(ctx).Do() + default: + op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.op.Name).Context(ctx).Do() + } + if err != nil { + return false, err + } + return op != nil && op.Status == "DONE", nil +} + +func (o *betaOperation) rateLimitKey() *RateLimitKey { + return &RateLimitKey{ + ProjectID: o.projectID, + Operation: "Get", + Service: "Operations", + Version: meta.VersionBeta, + } +} From 968cce929c500cd8d5e172a9e1e70924e7b427e0 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:09 -0800 Subject: [PATCH 689/794] code generation --- .../providers/gce/cloud/gen/main.go | 1140 +++++++++++++++++ 1 file changed, 1140 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen/main.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go new file mode 100644 index 00000000000..f8dcd730b9f --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -0,0 +1,1140 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generator for GCE compute wrapper code. You must regenerate the code after +// modifying this file: +// +// $ go run gen/main.go > gen.go +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "os" + "os/exec" + "text/template" + "time" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + "github.com/golang/glog" +) + +const ( + gofmt = "gofmt" + packageRoot = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + + // readOnly specifies that the given resource is read-only and should not + // have insert() or delete() methods generated for the wrapper. + readOnly = iota +) + +var flags = struct { + gofmt bool + mode string +}{} + +func init() { + flag.BoolVar(&flags.gofmt, "gofmt", true, "run output through gofmt") + flag.StringVar(&flags.mode, "mode", "src", "content to generate: src, test, dummy") +} + +// gofmtContent runs "gofmt" on the given contents. +func gofmtContent(r io.Reader) string { + cmd := exec.Command(gofmt, "-s") + out := &bytes.Buffer{} + cmd.Stdin = r + cmd.Stdout = out + cmdErr := &bytes.Buffer{} + cmd.Stderr = cmdErr + + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, cmdErr.String()) + panic(err) + } + return out.String() +} + +// genHeader generate the header for the file. +func genHeader(wr io.Writer) { + const text = `/* +Copyright {{.Year}} The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go > gen.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "fmt" + "net/http" + "sync" + + "google.golang.org/api/googleapi" + "github.com/golang/glog" + + "{{.PackageRoot}}/filter" + "{{.PackageRoot}}/meta" + +` + tmpl := template.Must(template.New("header").Parse(text)) + values := map[string]string{ + "Year": fmt.Sprintf("%v", time.Now().Year()), + "PackageRoot": packageRoot, + } + if err := tmpl.Execute(wr, values); err != nil { + panic(err) + } + + var hasGA, hasAlpha, hasBeta bool + for _, s := range meta.AllServices { + switch s.Version() { + case meta.VersionGA: + hasGA = true + case meta.VersionAlpha: + hasAlpha = true + case meta.VersionBeta: + hasBeta = true + } + } + if hasAlpha { + fmt.Fprintln(wr, ` alpha "google.golang.org/api/compute/v0.alpha"`) + } + if hasBeta { + fmt.Fprintln(wr, ` beta "google.golang.org/api/compute/v0.beta"`) + } + if hasGA { + fmt.Fprintln(wr, ` ga "google.golang.org/api/compute/v1"`) + } + fmt.Fprintf(wr, ")\n\n") +} + +// genStubs generates the interface and wrapper stubs. +func genStubs(wr io.Writer) { + const text = `// Cloud is an interface for the GCE compute API. +type Cloud interface { +{{- range .All}} + {{.WrapType}}() {{.WrapType}} +{{- end}} +} + +// NewGCE returns a GCE. +func NewGCE(s *Service) *GCE { + g := &GCE{ + {{- range .All}} + {{.Field}}: &{{.GCEWrapType}}{s}, + {{- end}} + } + return g +} + +// GCE implements Cloud. +var _ Cloud = (*GCE)(nil) + +// GCE is the golang adapter for the compute APIs. +type GCE struct { +{{- range .All}} + {{.Field}} *{{.GCEWrapType}} +{{- end}} +} + +{{range .All}} +func (gce *GCE) {{.WrapType}}() {{.WrapType}} { + return gce.{{.Field}} +} +{{- end}} + +// NewMockGCE returns a new mock for GCE. +func NewMockGCE() *MockGCE { + {{- range .Groups}} + mock{{.Service}}Objs := map[meta.Key]*Mock{{.Service}}Obj{} + {{- end}} + + mock := &MockGCE{ + {{- range .All}} + {{.MockField}}: New{{.MockWrapType}}(mock{{.Service}}Objs), + {{- end}} + } + return mock +} + +// MockGCE implements Cloud. +var _ Cloud = (*MockGCE)(nil) + +// MockGCE is the mock for the compute API. +type MockGCE struct { +{{- range .All}} + {{.MockField}} *{{.MockWrapType}} +{{- end}} +} +{{range .All}} +func (mock *MockGCE) {{.WrapType}}() {{.WrapType}} { + return mock.{{.MockField}} +} +{{end}} + +{{range .Groups}} +// Mock{{.Service}}Obj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type Mock{{.Service}}Obj struct { + Obj interface{} +} +{{- if .HasAlpha}} +// ToAlpha retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToAlpha() *{{.Alpha.FQObjectType}} { + if ret, ok := m.Obj.(*{{.Alpha.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.Alpha.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- if .HasBeta}} +// ToBeta retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToBeta() *{{.Beta.FQObjectType}} { + if ret, ok := m.Obj.(*{{.Beta.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.Beta.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- if .HasGA}} +// ToGA retrieves the given version of the object. +func (m *Mock{{.Service}}Obj) ToGA() *{{.GA.FQObjectType}} { + if ret, ok := m.Obj.(*{{.GA.FQObjectType}}); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &{{.GA.FQObjectType}}{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err) + } + return ret +} +{{- end}} +{{- end}} +` + data := struct { + All []*meta.ServiceInfo + Groups map[string]*meta.ServiceGroup + }{meta.AllServices, meta.AllServicesByGroup} + + tmpl := template.Must(template.New("interface").Parse(text)) + if err := tmpl.Execute(wr, data); err != nil { + panic(err) + } +} + +// genTypes generates the type wrappers. +func genTypes(wr io.Writer) { + const text = `// {{.WrapType}} is an interface that allows for mocking of {{.Service}}. +type {{.WrapType}} interface { +{{- if .GenerateCustomOps}} + // {{.WrapTypeOps}} is an interface with additional non-CRUD type methods. + // This interface is expected to be implemented by hand (non-autogenerated). + {{.WrapTypeOps}} +{{- end}} +{{- if .GenerateGet}} + Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) +{{- end -}} +{{- if .GenerateList}} +{{- if .KeyIsGlobal}} + List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- if .KeyIsRegional}} + List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- if .KeyIsZonal}} + List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) +{{- end -}} +{{- end -}} +{{- if .GenerateInsert}} + Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error +{{- end -}} +{{- if .GenerateDelete}} + Delete(ctx context.Context, key meta.Key) error +{{- end -}} +{{- if .AggregatedList}} + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) +{{- end}} +{{- with .Methods -}} +{{- range .}} + {{.InterfaceFunc}} +{{- end -}} +{{- end}} +} + +// New{{.MockWrapType}} returns a new mock for {{.Service}}. +func New{{.MockWrapType}}(objs map[meta.Key]*Mock{{.Service}}Obj) *{{.MockWrapType}} { + mock := &{{.MockWrapType}}{ + Objects: objs, + {{- if .GenerateGet}} + GetError: map[meta.Key]error{}, + {{- end -}} + {{- if .GenerateInsert}} + InsertError: map[meta.Key]error{}, + {{- end -}} + {{- if .GenerateDelete}} + DeleteError: map[meta.Key]error{}, + {{- end}} + } + return mock +} + +// {{.MockWrapType}} is the mock for {{.Service}}. +type {{.MockWrapType}} struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*Mock{{.Service}}Obj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + {{- if .GenerateGet}} + GetError map[meta.Key]error + {{- end -}} + {{- if .GenerateList}} + ListError *error + {{- end -}} + {{- if .GenerateInsert}} + InsertError map[meta.Key]error + {{- end -}} + {{- if .GenerateDelete}} + DeleteError map[meta.Key]error + {{- end -}} + {{- if .AggregatedList}} + AggregatedListError *error + {{- end}} + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + {{- if .GenerateGet}} + GetHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key) (bool, *{{.FQObjectType}}, error) + {{- end -}} + {{- if .GenerateList}} + {{- if .KeyIsGlobal}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end -}} + {{- if .KeyIsRegional}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, region string, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end -}} + {{- if .KeyIsZonal}} + ListHook func(m *{{.MockWrapType}}, ctx context.Context, zone string, fl *filter.F) (bool, []*{{.FQObjectType}}, error) + {{- end}} + {{- end -}} + {{- if .GenerateInsert}} + InsertHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) (bool, error) + {{- end -}} + {{- if .GenerateDelete}} + DeleteHook func(m *{{.MockWrapType}}, ctx context.Context, key meta.Key) (bool, error) + {{- end -}} + {{- if .AggregatedList}} + AggregatedListHook func(m *{{.MockWrapType}}, ctx context.Context, fl *filter.F) (bool, map[string][]*{{.FQObjectType}}, error) + {{- end}} + +{{- with .Methods -}} +{{- range .}} + {{.MockHook}} +{{- end -}} +{{- end}} + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +{{- if .GenerateGet}} +// Get returns the object from the mock. +func (m *{{.MockWrapType}}) Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.To{{.VersionTitle}}() + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} +{{- end}} + +{{- if .GenerateList}} +{{if .KeyIsGlobal -}} +// List all of the objects in the mock. +func (m *{{.MockWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsRegional -}} +// List all of the objects in the mock in the given region. +func (m *{{.MockWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsZonal -}} +// List all of the objects in the mock in the given zone. +func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end}} + if m.ListHook != nil { + {{if .KeyIsGlobal -}} + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + {{- end -}} + {{- if .KeyIsRegional -}} + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + {{- end -}} + {{- if .KeyIsZonal -}} + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + {{- end}} + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + {{if .KeyIsGlobal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err) + {{- end -}} + {{- if .KeyIsRegional -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + {{- end -}} + {{- if .KeyIsZonal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + {{- end}} + + return nil, *m.ListError + } + + var objs []*{{.FQObjectType}} +{{- if .KeyIsGlobal}} + for _, obj := range m.Objects { +{{- else}} + for key, obj := range m.Objects { +{{- end -}} +{{- if .KeyIsRegional}} + if key.Region != region { + continue + } +{{- end -}} +{{- if .KeyIsZonal}} + if key.Zone != zone { + continue + } +{{- end}} + if ! fl.Match(obj.To{{.VersionTitle}}()) { + continue + } + objs = append(objs, obj.To{{.VersionTitle}}()) + } + + {{if .KeyIsGlobal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + {{- end -}} + {{- if .KeyIsRegional -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + {{- end -}} + {{- if .KeyIsZonal -}} + glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + {{- end}} + return objs, nil +} +{{- end}} + +{{- if .GenerateInsert}} +// Insert is a mock for inserting/creating a new object. +func (m *{{.MockWrapType}}) Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("{{.MockWrapType}} %v exists", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.Version{{.VersionTitle}}, "mock-project", "{{.Resource}}", key) + } + + m.Objects[key] = &Mock{{.Service}}Obj{obj} + glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} +{{- end}} + +{{- if .GenerateDelete}} +// Delete is a mock for deleting the object. +func (m *{{.MockWrapType}}) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), + } + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key) + return nil +} +{{- end}} + +{{- if .AggregatedList}} +// AggregatedList is a mock for AggregatedList. +func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(m, ctx, fl); intercept { + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*{{.FQObjectType}}{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.To{{.VersionTitle}}().SelfLink) + {{- if .KeyIsRegional}} + location := res.Key.Region + {{- end -}} + {{- if .KeyIsZonal}} + location := res.Key.Zone + {{- end}} + if err != nil { + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if ! fl.Match(obj.To{{.VersionTitle}}()) { + continue + } + objs[location] = append(objs[location], obj.To{{.VersionTitle}}()) + } + glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} +{{- end}} + +// Obj wraps the object for use in the mock. +func (m *{{.MockWrapType}}) Obj(o *{{.FQObjectType}}) *Mock{{.Service}}Obj { + return &Mock{{.Service}}Obj{o} +} + +{{with .Methods -}} +{{- range .}} +// {{.Name}} is a mock for the corresponding method. +func (m *{{.MockWrapType}}) {{.FcnArgs}} { +{{- if eq .ReturnType "Operation"}} + if m.{{.MockHookName}} != nil { + return m.{{.MockHookName}}(m, ctx, key {{.CallArgs}}) + } + return nil +{{- else}} + if m.{{.MockHookName}} != nil { + return m.{{.MockHookName}}(m, ctx, key {{.CallArgs}}) + } + return nil, fmt.Errorf("{{.MockHookName}} must be set") +{{- end}} +} +{{end -}} +{{- end}} +// {{.GCEWrapType}} is a simplifying adapter for the GCE {{.Service}}. +type {{.GCEWrapType}} struct { + s *Service +} + +{{- if .GenerateGet}} +// Get the {{.Object}} named by key. +func (g *{{.GCEWrapType}}) Get(ctx context.Context, key meta.Key) (*{{.FQObjectType}}, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Name) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Region, key.Name) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Get(projectID, key.Zone, key.Name) +{{- end}} + call.Context(ctx) + return call.Do() +} +{{- end}} + +{{- if .GenerateList}} +// List all {{.Object}} objects. +{{- if .KeyIsGlobal}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsRegional}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end -}} +{{- if .KeyIsZonal}} +func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) { +{{- end}} +projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") +rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, region) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, zone) +{{- end}} + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*{{.FQObjectType}} + f := func(l *{{.ObjectListType}}) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} +{{- end}} + +{{- if .GenerateInsert}} +// Insert {{.Object}} with key of value obj. +func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key meta.Key, obj *{{.FQObjectType}}) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, obj) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, key.Region, obj) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Insert(projectID, key.Zone, obj) +{{- end}} + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} +{{- end}} + +{{- if .GenerateDelete}} +// Delete the {{.Object}} referenced by key. +func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Name) +{{end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Region, key.Name) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.Delete(projectID, key.Zone, key.Name) +{{- end}} + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} +{{end -}} + +{{- if .AggregatedList}} +// AggregatedList lists all resources of the given type across all locations. +func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + + call := g.s.{{.VersionTitle}}.{{.Service}}.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*{{.FQObjectType}}{} + f := func(l *{{.ObjectAggregatedListType}}) error { + for k, v := range l.Items { + all[k] = append(all[k], v.{{.AggregatedListField}}...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} +{{- end}} + +{{- with .Methods -}} +{{- range .}} +// {{.Name}} is a method on {{.GCEWrapType}}. +func (g *{{.GCEWrapType}}) {{.FcnArgs}} { + projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "{{.Name}}", + Version: meta.Version("{{.Version}}"), + Service: "{{.Service}}", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + {{- if eq .ReturnType "Operation"}} + return err + {{- else}} + return nil, err + {{- end}} + } +{{- if .KeyIsGlobal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Name {{.CallArgs}}) +{{- end -}} +{{- if .KeyIsRegional}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Region, key.Name {{.CallArgs}}) +{{- end -}} +{{- if .KeyIsZonal}} + call := g.s.{{.VersionTitle}}.{{.Service}}.{{.Name}}(projectID, key.Zone, key.Name {{.CallArgs}}) +{{- end}} + call.Context(ctx) +{{- if eq .ReturnType "Operation"}} + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +{{- else}} + return call.Do() +{{- end}} +} +{{end -}} +{{- end}} +` + tmpl := template.Must(template.New("interface").Parse(text)) + for _, s := range meta.AllServices { + if err := tmpl.Execute(wr, s); err != nil { + panic(err) + } + } +} + +func genUnitTestHeader(wr io.Writer) { + const text = `/* +Copyright {{.Year}} The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go -mode test > gen_test.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "{{.PackageRoot}}/filter" + "{{.PackageRoot}}/meta" +) + +const location = "location" +` + tmpl := template.Must(template.New("header").Parse(text)) + values := map[string]string{ + "Year": fmt.Sprintf("%v", time.Now().Year()), + "PackageRoot": packageRoot, + } + if err := tmpl.Execute(wr, values); err != nil { + panic(err) + } +} + +func genUnitTestServices(wr io.Writer) { + const text = ` +func Test{{.Service}}Group(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key +{{- if .HasAlpha}} + keyAlpha := meta.{{.Alpha.MakeKey "key-alpha" "location"}} + key = keyAlpha +{{- end}} +{{- if .HasBeta}} + keyBeta := meta.{{.Beta.MakeKey "key-beta" "location"}} + key = keyBeta +{{- end}} +{{- if .HasGA}} + keyGA := meta.{{.GA.MakeKey "key-ga" "location"}} + key = keyGA +{{- end}} + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. +{{- if .HasAlpha}}{{- if .Alpha.GenerateGet}} + if _, err := mock.Alpha{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("Alpha{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateGet}} + if _, err := mock.Beta{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("Beta{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateGet}} + if _, err := mock.{{.Service}}().Get(ctx, *key); err == nil { + t.Errorf("{{.Service}}().Get(%v, %v) = _, nil; want error", ctx, key) + } +{{- end}}{{- end}} + + // Insert. +{{- if .HasAlpha}}{{- if .Alpha.GenerateInsert}} + { + obj := &alpha.{{.Alpha.Object}}{} + if err := mock.Alpha{{.Service}}().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("Alpha{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateInsert}} + { + obj := &beta.{{.Beta.Object}}{} + if err := mock.Beta{{.Service}}().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("Beta{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateInsert}} + { + obj := &ga.{{.GA.Object}}{} + if err := mock.{{.Service}}().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("{{.Service}}().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } +{{- end}}{{- end}} + + // Get across versions. +{{- if .HasAlpha}}{{- if .Alpha.GenerateInsert}} + if obj, err := mock.Alpha{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("Alpha{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateInsert}} + if obj, err := mock.Beta{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("Beta{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateInsert}} + if obj, err := mock.{{.Service}}().Get(ctx, *key); err != nil { + t.Errorf("{{.Service}}().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } +{{- end}}{{- end}} + + // List. +{{- if .HasAlpha}} + mock.MockAlpha{{.Service}}.Objects[*keyAlpha] = mock.MockAlpha{{.Service}}.Obj(&alpha.{{.Alpha.Object}}{Name: keyAlpha.Name}) +{{- end}} +{{- if .HasBeta}} + mock.MockBeta{{.Service}}.Objects[*keyBeta] = mock.MockBeta{{.Service}}.Obj(&beta.{{.Beta.Object}}{Name: keyBeta.Name}) +{{- end}} +{{- if .HasGA}} + mock.Mock{{.Service}}.Objects[*keyGA] = mock.Mock{{.Service}}.Obj(&ga.{{.GA.Object}}{Name: keyGA.Name}) +{{- end}} + want := map[string]bool{ +{{- if .HasAlpha}} + "key-alpha": true, +{{- end}} +{{- if .HasBeta}} + "key-beta": true, +{{- end}} +{{- if .HasGA}} + "key-ga": true, +{{- end}} + } + _ = want // ignore unused variables. + +{{- if .HasAlpha}}{{- if .Alpha.GenerateList}} + { + {{- if .Alpha.KeyIsGlobal }} + objs, err := mock.Alpha{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.Alpha{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("Alpha{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateList}} + { + {{- if .Beta.KeyIsGlobal }} + objs, err := mock.Beta{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.Beta{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("Beta{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateList}} + { + {{- if .GA.KeyIsGlobal }} + objs, err := mock.{{.Service}}().List(ctx, filter.None) + {{- else}} + objs, err := mock.{{.Service}}().List(ctx, location, filter.None) + {{- end}} + if err != nil { + t.Errorf("{{.Service}}().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want) + } + } + } +{{- end}}{{- end}} + + // Delete across versions. +{{- if .HasAlpha}}{{- if .Alpha.GenerateDelete}} + if err := mock.Alpha{{.Service}}().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("Alpha{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateDelete}} + if err := mock.Beta{{.Service}}().Delete(ctx, *keyBeta); err != nil { + t.Errorf("Beta{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateDelete}} + if err := mock.{{.Service}}().Delete(ctx, *keyGA); err != nil { + t.Errorf("{{.Service}}().Delete(%v, %v) = %v; want nil", ctx, key, err) + } +{{- end}}{{- end}} + + // Delete not found. +{{- if .HasAlpha}}{{- if .Alpha.GenerateDelete}} + if err := mock.Alpha{{.Service}}().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("Alpha{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasBeta}}{{- if .Beta.GenerateDelete}} + if err := mock.Beta{{.Service}}().Delete(ctx, *keyBeta); err == nil { + t.Errorf("Beta{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +{{- if .HasGA}}{{- if .GA.GenerateDelete}} + if err := mock.{{.Service}}().Delete(ctx, *keyGA); err == nil { + t.Errorf("{{.Service}}().Delete(%v, %v) = nil; want error", ctx, key) + } +{{- end}}{{- end}} +} +` + tmpl := template.Must(template.New("unittest").Parse(text)) + for _, s := range meta.AllServicesByGroup { + if err := tmpl.Execute(wr, s); err != nil { + panic(err) + } + } +} + +func main() { + flag.Parse() + + out := &bytes.Buffer{} + + switch flags.mode { + case "src": + genHeader(out) + genStubs(out) + genTypes(out) + case "test": + genUnitTestHeader(out) + genUnitTestServices(out) + default: + glog.Fatalf("Invalid -mode: %q", flags.mode) + } + + if flags.gofmt { + fmt.Print(gofmtContent(out)) + } else { + fmt.Print(out.String()) + } +} From 329e0b1cb57178196f8854bc67573bc4d2813454 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:41 -0800 Subject: [PATCH 690/794] support interfaces for the generated code --- .../providers/gce/cloud/project.go | 44 +++++++++++ .../providers/gce/cloud/ratelimit.go | 67 ++++++++++++++++ .../providers/gce/cloud/service.go | 79 +++++++++++++++++++ 3 files changed, 190 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/project.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/ratelimit.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/service.go diff --git a/pkg/cloudprovider/providers/gce/cloud/project.go b/pkg/cloudprovider/providers/gce/cloud/project.go new file mode 100644 index 00000000000..74299e4a23e --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/project.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// ProjectRouter routes service calls to the appropriate GCE project. +type ProjectRouter interface { + // ProjectID returns the project ID (non-numeric) to be used for a call + // to an API (version,service). Example tuples: ("ga", "ForwardingRules"), + // ("alpha", "GlobalAddresses"). + // + // This allows for plumbing different service calls to the appropriate + // project, for instance, networking services to a separate project + // than instance management. + ProjectID(ctx context.Context, version meta.Version, service string) string +} + +// SingleProjectRouter routes all service calls to the same project ID. +type SingleProjectRouter struct { + ID string +} + +func (r *SingleProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { + return r.ID +} diff --git a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go new file mode 100644 index 00000000000..948f1d36d89 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "time" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// RateLimitKey is a key identifying the operation to be rate limited. The rate limit +// queue will be determined based on the contents of RateKey. +type RateLimitKey struct { + // ProjectID is the non-numeric ID of the project. + ProjectID string + // Operation is the specific method being invoked (e.g. "Get", "List"). + Operation string + // Version is the API version of the call. + Version meta.Version + // Service is the service being invoked (e.g. "Firewalls", "BackendServices") + Service string +} + +// RateLimiter is the interface for a rate limiting policy. +type RateLimiter interface { + // Accept uses the RateLimitKey to derive a sleep time for the calling + // goroutine. This call will block until the operation is ready for + // execution. + // + // Accept returns an error if the given context ctx was canceled + // while waiting for acceptance into the queue. + Accept(ctx context.Context, key *RateLimitKey) error +} + +// NopRateLimiter is a rate limiter that performs no rate limiting. +type NopRateLimiter struct { +} + +func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error { + // Rate limit polling of the Operation status to avoid hammering GCE + // for the status of an operation. + const pollTime = time.Duration(1) * time.Second + if key.Operation == "Get" && key.Service == "Operations" { + select { + case <-time.NewTimer(pollTime).C: + break + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} diff --git a/pkg/cloudprovider/providers/gce/cloud/service.go b/pkg/cloudprovider/providers/gce/cloud/service.go new file mode 100644 index 00000000000..8a6c0a6cf95 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/service.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "fmt" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Service is the top-level adapter for all of the different compute API +// versions. +type Service struct { + GA *ga.Service + Alpha *alpha.Service + Beta *beta.Service + ProjectRouter ProjectRouter + RateLimiter RateLimiter +} + +// wrapOperation wraps a GCE anyOP in a version generic operation type. +func (g *Service) wrapOperation(anyOp interface{}) (operation, error) { + switch o := anyOp.(type) { + case *ga.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &gaOperation{g, o, r.ProjectID}, nil + case *alpha.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &alphaOperation{g, o, r.ProjectID}, nil + case *beta.Operation: + r, err := ParseResourceURL(o.SelfLink) + if err != nil { + return nil, err + } + return &betaOperation{g, o, r.ProjectID}, nil + default: + return nil, fmt.Errorf("invalid type %T", anyOp) + } +} + +// WaitForCompletion of a long running operation. This will poll the state of +// GCE for the completion status of the given operation. genericOp can be one +// of alpha, beta, ga Operation types. +func (g *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error { + op, err := g.wrapOperation(genericOp) + if err != nil { + return err + } + for done, err := op.isDone(ctx); !done; done, err = op.isDone(ctx) { + if err != nil { + return err + } + g.RateLimiter.Accept(ctx, op.rateLimitKey()) + } + return nil +} From e230bd967b3cab2a264cbf59b03660dcd24a130c Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:35:58 -0800 Subject: [PATCH 691/794] Generated code (see gen/main.go for the source) --- pkg/cloudprovider/providers/gce/cloud/gen.go | 10351 ++++++++++++++++ .../providers/gce/cloud/gen_test.go | 1749 +++ 2 files changed, 12100 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen.go create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go new file mode 100644 index 00000000000..ef7a2c62eaf --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -0,0 +1,10351 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go > gen.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + "google.golang.org/api/googleapi" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" +) + +// Cloud is an interface for the GCE compute API. +type Cloud interface { + Addresses() Addresses + AlphaAddresses() AlphaAddresses + BetaAddresses() BetaAddresses + GlobalAddresses() GlobalAddresses + BackendServices() BackendServices + AlphaBackendServices() AlphaBackendServices + AlphaRegionBackendServices() AlphaRegionBackendServices + Disks() Disks + AlphaDisks() AlphaDisks + AlphaRegionDisks() AlphaRegionDisks + Firewalls() Firewalls + ForwardingRules() ForwardingRules + AlphaForwardingRules() AlphaForwardingRules + GlobalForwardingRules() GlobalForwardingRules + HealthChecks() HealthChecks + AlphaHealthChecks() AlphaHealthChecks + HttpHealthChecks() HttpHealthChecks + HttpsHealthChecks() HttpsHealthChecks + InstanceGroups() InstanceGroups + Instances() Instances + BetaInstances() BetaInstances + AlphaInstances() AlphaInstances + AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups + Projects() Projects + Regions() Regions + Routes() Routes + SslCertificates() SslCertificates + TargetHttpProxies() TargetHttpProxies + TargetHttpsProxies() TargetHttpsProxies + TargetPools() TargetPools + UrlMaps() UrlMaps + Zones() Zones +} + +// NewGCE returns a GCE. +func NewGCE(s *Service) *GCE { + g := &GCE{ + gceAddresses: &GCEAddresses{s}, + gceAlphaAddresses: &GCEAlphaAddresses{s}, + gceBetaAddresses: &GCEBetaAddresses{s}, + gceGlobalAddresses: &GCEGlobalAddresses{s}, + gceBackendServices: &GCEBackendServices{s}, + gceAlphaBackendServices: &GCEAlphaBackendServices{s}, + gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, + gceDisks: &GCEDisks{s}, + gceAlphaDisks: &GCEAlphaDisks{s}, + gceAlphaRegionDisks: &GCEAlphaRegionDisks{s}, + gceFirewalls: &GCEFirewalls{s}, + gceForwardingRules: &GCEForwardingRules{s}, + gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, + gceGlobalForwardingRules: &GCEGlobalForwardingRules{s}, + gceHealthChecks: &GCEHealthChecks{s}, + gceAlphaHealthChecks: &GCEAlphaHealthChecks{s}, + gceHttpHealthChecks: &GCEHttpHealthChecks{s}, + gceHttpsHealthChecks: &GCEHttpsHealthChecks{s}, + gceInstanceGroups: &GCEInstanceGroups{s}, + gceInstances: &GCEInstances{s}, + gceBetaInstances: &GCEBetaInstances{s}, + gceAlphaInstances: &GCEAlphaInstances{s}, + gceAlphaNetworkEndpointGroups: &GCEAlphaNetworkEndpointGroups{s}, + gceProjects: &GCEProjects{s}, + gceRegions: &GCERegions{s}, + gceRoutes: &GCERoutes{s}, + gceSslCertificates: &GCESslCertificates{s}, + gceTargetHttpProxies: &GCETargetHttpProxies{s}, + gceTargetHttpsProxies: &GCETargetHttpsProxies{s}, + gceTargetPools: &GCETargetPools{s}, + gceUrlMaps: &GCEUrlMaps{s}, + gceZones: &GCEZones{s}, + } + return g +} + +// GCE implements Cloud. +var _ Cloud = (*GCE)(nil) + +// GCE is the golang adapter for the compute APIs. +type GCE struct { + gceAddresses *GCEAddresses + gceAlphaAddresses *GCEAlphaAddresses + gceBetaAddresses *GCEBetaAddresses + gceGlobalAddresses *GCEGlobalAddresses + gceBackendServices *GCEBackendServices + gceAlphaBackendServices *GCEAlphaBackendServices + gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices + gceDisks *GCEDisks + gceAlphaDisks *GCEAlphaDisks + gceAlphaRegionDisks *GCEAlphaRegionDisks + gceFirewalls *GCEFirewalls + gceForwardingRules *GCEForwardingRules + gceAlphaForwardingRules *GCEAlphaForwardingRules + gceGlobalForwardingRules *GCEGlobalForwardingRules + gceHealthChecks *GCEHealthChecks + gceAlphaHealthChecks *GCEAlphaHealthChecks + gceHttpHealthChecks *GCEHttpHealthChecks + gceHttpsHealthChecks *GCEHttpsHealthChecks + gceInstanceGroups *GCEInstanceGroups + gceInstances *GCEInstances + gceBetaInstances *GCEBetaInstances + gceAlphaInstances *GCEAlphaInstances + gceAlphaNetworkEndpointGroups *GCEAlphaNetworkEndpointGroups + gceProjects *GCEProjects + gceRegions *GCERegions + gceRoutes *GCERoutes + gceSslCertificates *GCESslCertificates + gceTargetHttpProxies *GCETargetHttpProxies + gceTargetHttpsProxies *GCETargetHttpsProxies + gceTargetPools *GCETargetPools + gceUrlMaps *GCEUrlMaps + gceZones *GCEZones +} + +func (gce *GCE) Addresses() Addresses { + return gce.gceAddresses +} +func (gce *GCE) AlphaAddresses() AlphaAddresses { + return gce.gceAlphaAddresses +} +func (gce *GCE) BetaAddresses() BetaAddresses { + return gce.gceBetaAddresses +} +func (gce *GCE) GlobalAddresses() GlobalAddresses { + return gce.gceGlobalAddresses +} +func (gce *GCE) BackendServices() BackendServices { + return gce.gceBackendServices +} +func (gce *GCE) AlphaBackendServices() AlphaBackendServices { + return gce.gceAlphaBackendServices +} +func (gce *GCE) AlphaRegionBackendServices() AlphaRegionBackendServices { + return gce.gceAlphaRegionBackendServices +} +func (gce *GCE) Disks() Disks { + return gce.gceDisks +} +func (gce *GCE) AlphaDisks() AlphaDisks { + return gce.gceAlphaDisks +} +func (gce *GCE) AlphaRegionDisks() AlphaRegionDisks { + return gce.gceAlphaRegionDisks +} +func (gce *GCE) Firewalls() Firewalls { + return gce.gceFirewalls +} +func (gce *GCE) ForwardingRules() ForwardingRules { + return gce.gceForwardingRules +} +func (gce *GCE) AlphaForwardingRules() AlphaForwardingRules { + return gce.gceAlphaForwardingRules +} +func (gce *GCE) GlobalForwardingRules() GlobalForwardingRules { + return gce.gceGlobalForwardingRules +} +func (gce *GCE) HealthChecks() HealthChecks { + return gce.gceHealthChecks +} +func (gce *GCE) AlphaHealthChecks() AlphaHealthChecks { + return gce.gceAlphaHealthChecks +} +func (gce *GCE) HttpHealthChecks() HttpHealthChecks { + return gce.gceHttpHealthChecks +} +func (gce *GCE) HttpsHealthChecks() HttpsHealthChecks { + return gce.gceHttpsHealthChecks +} +func (gce *GCE) InstanceGroups() InstanceGroups { + return gce.gceInstanceGroups +} +func (gce *GCE) Instances() Instances { + return gce.gceInstances +} +func (gce *GCE) BetaInstances() BetaInstances { + return gce.gceBetaInstances +} +func (gce *GCE) AlphaInstances() AlphaInstances { + return gce.gceAlphaInstances +} +func (gce *GCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { + return gce.gceAlphaNetworkEndpointGroups +} +func (gce *GCE) Projects() Projects { + return gce.gceProjects +} +func (gce *GCE) Regions() Regions { + return gce.gceRegions +} +func (gce *GCE) Routes() Routes { + return gce.gceRoutes +} +func (gce *GCE) SslCertificates() SslCertificates { + return gce.gceSslCertificates +} +func (gce *GCE) TargetHttpProxies() TargetHttpProxies { + return gce.gceTargetHttpProxies +} +func (gce *GCE) TargetHttpsProxies() TargetHttpsProxies { + return gce.gceTargetHttpsProxies +} +func (gce *GCE) TargetPools() TargetPools { + return gce.gceTargetPools +} +func (gce *GCE) UrlMaps() UrlMaps { + return gce.gceUrlMaps +} +func (gce *GCE) Zones() Zones { + return gce.gceZones +} + +// NewMockGCE returns a new mock for GCE. +func NewMockGCE() *MockGCE { + mockAddressesObjs := map[meta.Key]*MockAddressesObj{} + mockBackendServicesObjs := map[meta.Key]*MockBackendServicesObj{} + mockDisksObjs := map[meta.Key]*MockDisksObj{} + mockFirewallsObjs := map[meta.Key]*MockFirewallsObj{} + mockForwardingRulesObjs := map[meta.Key]*MockForwardingRulesObj{} + mockGlobalAddressesObjs := map[meta.Key]*MockGlobalAddressesObj{} + mockGlobalForwardingRulesObjs := map[meta.Key]*MockGlobalForwardingRulesObj{} + mockHealthChecksObjs := map[meta.Key]*MockHealthChecksObj{} + mockHttpHealthChecksObjs := map[meta.Key]*MockHttpHealthChecksObj{} + mockHttpsHealthChecksObjs := map[meta.Key]*MockHttpsHealthChecksObj{} + mockInstanceGroupsObjs := map[meta.Key]*MockInstanceGroupsObj{} + mockInstancesObjs := map[meta.Key]*MockInstancesObj{} + mockNetworkEndpointGroupsObjs := map[meta.Key]*MockNetworkEndpointGroupsObj{} + mockProjectsObjs := map[meta.Key]*MockProjectsObj{} + mockRegionBackendServicesObjs := map[meta.Key]*MockRegionBackendServicesObj{} + mockRegionDisksObjs := map[meta.Key]*MockRegionDisksObj{} + mockRegionsObjs := map[meta.Key]*MockRegionsObj{} + mockRoutesObjs := map[meta.Key]*MockRoutesObj{} + mockSslCertificatesObjs := map[meta.Key]*MockSslCertificatesObj{} + mockTargetHttpProxiesObjs := map[meta.Key]*MockTargetHttpProxiesObj{} + mockTargetHttpsProxiesObjs := map[meta.Key]*MockTargetHttpsProxiesObj{} + mockTargetPoolsObjs := map[meta.Key]*MockTargetPoolsObj{} + mockUrlMapsObjs := map[meta.Key]*MockUrlMapsObj{} + mockZonesObjs := map[meta.Key]*MockZonesObj{} + + mock := &MockGCE{ + MockAddresses: NewMockAddresses(mockAddressesObjs), + MockAlphaAddresses: NewMockAlphaAddresses(mockAddressesObjs), + MockBetaAddresses: NewMockBetaAddresses(mockAddressesObjs), + MockGlobalAddresses: NewMockGlobalAddresses(mockGlobalAddressesObjs), + MockBackendServices: NewMockBackendServices(mockBackendServicesObjs), + MockAlphaBackendServices: NewMockAlphaBackendServices(mockBackendServicesObjs), + MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(mockRegionBackendServicesObjs), + MockDisks: NewMockDisks(mockDisksObjs), + MockAlphaDisks: NewMockAlphaDisks(mockDisksObjs), + MockAlphaRegionDisks: NewMockAlphaRegionDisks(mockRegionDisksObjs), + MockFirewalls: NewMockFirewalls(mockFirewallsObjs), + MockForwardingRules: NewMockForwardingRules(mockForwardingRulesObjs), + MockAlphaForwardingRules: NewMockAlphaForwardingRules(mockForwardingRulesObjs), + MockGlobalForwardingRules: NewMockGlobalForwardingRules(mockGlobalForwardingRulesObjs), + MockHealthChecks: NewMockHealthChecks(mockHealthChecksObjs), + MockAlphaHealthChecks: NewMockAlphaHealthChecks(mockHealthChecksObjs), + MockHttpHealthChecks: NewMockHttpHealthChecks(mockHttpHealthChecksObjs), + MockHttpsHealthChecks: NewMockHttpsHealthChecks(mockHttpsHealthChecksObjs), + MockInstanceGroups: NewMockInstanceGroups(mockInstanceGroupsObjs), + MockInstances: NewMockInstances(mockInstancesObjs), + MockBetaInstances: NewMockBetaInstances(mockInstancesObjs), + MockAlphaInstances: NewMockAlphaInstances(mockInstancesObjs), + MockAlphaNetworkEndpointGroups: NewMockAlphaNetworkEndpointGroups(mockNetworkEndpointGroupsObjs), + MockProjects: NewMockProjects(mockProjectsObjs), + MockRegions: NewMockRegions(mockRegionsObjs), + MockRoutes: NewMockRoutes(mockRoutesObjs), + MockSslCertificates: NewMockSslCertificates(mockSslCertificatesObjs), + MockTargetHttpProxies: NewMockTargetHttpProxies(mockTargetHttpProxiesObjs), + MockTargetHttpsProxies: NewMockTargetHttpsProxies(mockTargetHttpsProxiesObjs), + MockTargetPools: NewMockTargetPools(mockTargetPoolsObjs), + MockUrlMaps: NewMockUrlMaps(mockUrlMapsObjs), + MockZones: NewMockZones(mockZonesObjs), + } + return mock +} + +// MockGCE implements Cloud. +var _ Cloud = (*MockGCE)(nil) + +// MockGCE is the mock for the compute API. +type MockGCE struct { + MockAddresses *MockAddresses + MockAlphaAddresses *MockAlphaAddresses + MockBetaAddresses *MockBetaAddresses + MockGlobalAddresses *MockGlobalAddresses + MockBackendServices *MockBackendServices + MockAlphaBackendServices *MockAlphaBackendServices + MockAlphaRegionBackendServices *MockAlphaRegionBackendServices + MockDisks *MockDisks + MockAlphaDisks *MockAlphaDisks + MockAlphaRegionDisks *MockAlphaRegionDisks + MockFirewalls *MockFirewalls + MockForwardingRules *MockForwardingRules + MockAlphaForwardingRules *MockAlphaForwardingRules + MockGlobalForwardingRules *MockGlobalForwardingRules + MockHealthChecks *MockHealthChecks + MockAlphaHealthChecks *MockAlphaHealthChecks + MockHttpHealthChecks *MockHttpHealthChecks + MockHttpsHealthChecks *MockHttpsHealthChecks + MockInstanceGroups *MockInstanceGroups + MockInstances *MockInstances + MockBetaInstances *MockBetaInstances + MockAlphaInstances *MockAlphaInstances + MockAlphaNetworkEndpointGroups *MockAlphaNetworkEndpointGroups + MockProjects *MockProjects + MockRegions *MockRegions + MockRoutes *MockRoutes + MockSslCertificates *MockSslCertificates + MockTargetHttpProxies *MockTargetHttpProxies + MockTargetHttpsProxies *MockTargetHttpsProxies + MockTargetPools *MockTargetPools + MockUrlMaps *MockUrlMaps + MockZones *MockZones +} + +func (mock *MockGCE) Addresses() Addresses { + return mock.MockAddresses +} + +func (mock *MockGCE) AlphaAddresses() AlphaAddresses { + return mock.MockAlphaAddresses +} + +func (mock *MockGCE) BetaAddresses() BetaAddresses { + return mock.MockBetaAddresses +} + +func (mock *MockGCE) GlobalAddresses() GlobalAddresses { + return mock.MockGlobalAddresses +} + +func (mock *MockGCE) BackendServices() BackendServices { + return mock.MockBackendServices +} + +func (mock *MockGCE) AlphaBackendServices() AlphaBackendServices { + return mock.MockAlphaBackendServices +} + +func (mock *MockGCE) AlphaRegionBackendServices() AlphaRegionBackendServices { + return mock.MockAlphaRegionBackendServices +} + +func (mock *MockGCE) Disks() Disks { + return mock.MockDisks +} + +func (mock *MockGCE) AlphaDisks() AlphaDisks { + return mock.MockAlphaDisks +} + +func (mock *MockGCE) AlphaRegionDisks() AlphaRegionDisks { + return mock.MockAlphaRegionDisks +} + +func (mock *MockGCE) Firewalls() Firewalls { + return mock.MockFirewalls +} + +func (mock *MockGCE) ForwardingRules() ForwardingRules { + return mock.MockForwardingRules +} + +func (mock *MockGCE) AlphaForwardingRules() AlphaForwardingRules { + return mock.MockAlphaForwardingRules +} + +func (mock *MockGCE) GlobalForwardingRules() GlobalForwardingRules { + return mock.MockGlobalForwardingRules +} + +func (mock *MockGCE) HealthChecks() HealthChecks { + return mock.MockHealthChecks +} + +func (mock *MockGCE) AlphaHealthChecks() AlphaHealthChecks { + return mock.MockAlphaHealthChecks +} + +func (mock *MockGCE) HttpHealthChecks() HttpHealthChecks { + return mock.MockHttpHealthChecks +} + +func (mock *MockGCE) HttpsHealthChecks() HttpsHealthChecks { + return mock.MockHttpsHealthChecks +} + +func (mock *MockGCE) InstanceGroups() InstanceGroups { + return mock.MockInstanceGroups +} + +func (mock *MockGCE) Instances() Instances { + return mock.MockInstances +} + +func (mock *MockGCE) BetaInstances() BetaInstances { + return mock.MockBetaInstances +} + +func (mock *MockGCE) AlphaInstances() AlphaInstances { + return mock.MockAlphaInstances +} + +func (mock *MockGCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { + return mock.MockAlphaNetworkEndpointGroups +} + +func (mock *MockGCE) Projects() Projects { + return mock.MockProjects +} + +func (mock *MockGCE) Regions() Regions { + return mock.MockRegions +} + +func (mock *MockGCE) Routes() Routes { + return mock.MockRoutes +} + +func (mock *MockGCE) SslCertificates() SslCertificates { + return mock.MockSslCertificates +} + +func (mock *MockGCE) TargetHttpProxies() TargetHttpProxies { + return mock.MockTargetHttpProxies +} + +func (mock *MockGCE) TargetHttpsProxies() TargetHttpsProxies { + return mock.MockTargetHttpsProxies +} + +func (mock *MockGCE) TargetPools() TargetPools { + return mock.MockTargetPools +} + +func (mock *MockGCE) UrlMaps() UrlMaps { + return mock.MockUrlMaps +} + +func (mock *MockGCE) Zones() Zones { + return mock.MockZones +} + +// MockAddressesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockAddressesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockAddressesObj) ToAlpha() *alpha.Address { + if ret, ok := m.Obj.(*alpha.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockAddressesObj) ToBeta() *beta.Address { + if ret, ok := m.Obj.(*beta.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockAddressesObj) ToGA() *ga.Address { + if ret, ok := m.Obj.(*ga.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// MockBackendServicesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockBackendServicesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockBackendServicesObj) ToAlpha() *alpha.BackendService { + if ret, ok := m.Obj.(*alpha.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockBackendServicesObj) ToGA() *ga.BackendService { + if ret, ok := m.Obj.(*ga.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// MockDisksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockDisksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockDisksObj) ToAlpha() *alpha.Disk { + if ret, ok := m.Obj.(*alpha.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockDisksObj) ToGA() *ga.Disk { + if ret, ok := m.Obj.(*ga.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// MockFirewallsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockFirewallsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockFirewallsObj) ToGA() *ga.Firewall { + if ret, ok := m.Obj.(*ga.Firewall); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Firewall{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) + } + return ret +} + +// MockForwardingRulesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockForwardingRulesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { + if ret, ok := m.Obj.(*alpha.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { + if ret, ok := m.Obj.(*ga.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// MockGlobalAddressesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockGlobalAddressesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockGlobalAddressesObj) ToGA() *ga.Address { + if ret, ok := m.Obj.(*ga.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + } + return ret +} + +// MockGlobalForwardingRulesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockGlobalForwardingRulesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { + if ret, ok := m.Obj.(*ga.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHealthChecksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockHealthChecksObj) ToAlpha() *alpha.HealthCheck { + if ret, ok := m.Obj.(*alpha.HealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.HealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockHealthChecksObj) ToGA() *ga.HealthCheck { + if ret, ok := m.Obj.(*ga.HealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHttpHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHttpHealthChecksObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockHttpHealthChecksObj) ToGA() *ga.HttpHealthCheck { + if ret, ok := m.Obj.(*ga.HttpHealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HttpHealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockHttpsHealthChecksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockHttpsHealthChecksObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockHttpsHealthChecksObj) ToGA() *ga.HttpsHealthCheck { + if ret, ok := m.Obj.(*ga.HttpsHealthCheck); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.HttpsHealthCheck{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) + } + return ret +} + +// MockInstanceGroupsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockInstanceGroupsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockInstanceGroupsObj) ToGA() *ga.InstanceGroup { + if ret, ok := m.Obj.(*ga.InstanceGroup); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.InstanceGroup{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) + } + return ret +} + +// MockInstancesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockInstancesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockInstancesObj) ToAlpha() *alpha.Instance { + if ret, ok := m.Obj.(*alpha.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockInstancesObj) ToBeta() *beta.Instance { + if ret, ok := m.Obj.(*beta.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockInstancesObj) ToGA() *ga.Instance { + if ret, ok := m.Obj.(*ga.Instance); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Instance{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) + } + return ret +} + +// MockNetworkEndpointGroupsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockNetworkEndpointGroupsObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockNetworkEndpointGroupsObj) ToAlpha() *alpha.NetworkEndpointGroup { + if ret, ok := m.Obj.(*alpha.NetworkEndpointGroup); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.NetworkEndpointGroup{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) + } + return ret +} + +// MockProjectsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockProjectsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockProjectsObj) ToGA() *ga.Project { + if ret, ok := m.Obj.(*ga.Project); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Project{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionBackendServicesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionBackendServicesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { + if ret, ok := m.Obj.(*alpha.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionDisksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionDisksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionDisksObj) ToAlpha() *alpha.Disk { + if ret, ok := m.Obj.(*alpha.Disk); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Disk{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *alpha.Disk via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRegionsObj) ToGA() *ga.Region { + if ret, ok := m.Obj.(*ga.Region); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Region{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRoutesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRoutesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRoutesObj) ToGA() *ga.Route { + if ret, ok := m.Obj.(*ga.Route); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Route{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + } + return ret +} + +// MockSslCertificatesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockSslCertificatesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { + if ret, ok := m.Obj.(*ga.SslCertificate); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.SslCertificate{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetHttpProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetHttpProxiesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { + if ret, ok := m.Obj.(*ga.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetHttpsProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetHttpsProxiesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { + if ret, ok := m.Obj.(*ga.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockTargetPoolsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockTargetPoolsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockTargetPoolsObj) ToGA() *ga.TargetPool { + if ret, ok := m.Obj.(*ga.TargetPool); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.TargetPool{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) + } + return ret +} + +// MockUrlMapsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockUrlMapsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { + if ret, ok := m.Obj.(*ga.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + +// MockZonesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockZonesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockZonesObj) ToGA() *ga.Zone { + if ret, ok := m.Obj.(*ga.Zone); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Zone{} + if err := copyViaJSON(ret, m.Obj); err != nil { + glog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) + } + return ret +} + +// Addresses is an interface that allows for mocking of Addresses. +type Addresses interface { + Get(ctx context.Context, key meta.Key) (*ga.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAddresses returns a new mock for Addresses. +func NewMockAddresses(objs map[meta.Key]*MockAddressesObj) *MockAddresses { + mock := &MockAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAddresses is the mock for Addresses. +type MockAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAddresses, ctx context.Context, key meta.Key) (bool, *ga.Address, error) + ListHook func(m *MockAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*ga.Address, error) + InsertHook func(m *MockAddresses, ctx context.Context, key meta.Key, obj *ga.Address) (bool, error) + DeleteHook func(m *MockAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAddresses %v not found", key), + } + glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAddresses %v exists", key), + } + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAddresses %v not found", key), + } + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAddresses) Obj(o *ga.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEAddresses is a simplifying adapter for the GCE Addresses. +type GCEAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Address + f := func(l *ga.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaAddresses is an interface that allows for mocking of Addresses. +type AlphaAddresses interface { + Get(ctx context.Context, key meta.Key) (*alpha.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaAddresses returns a new mock for Addresses. +func NewMockAlphaAddresses(objs map[meta.Key]*MockAddressesObj) *MockAlphaAddresses { + mock := &MockAlphaAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaAddresses is the mock for Addresses. +type MockAlphaAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key) (bool, *alpha.Address, error) + ListHook func(m *MockAlphaAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.Address, error) + InsertHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key, obj *alpha.Address) (bool, error) + DeleteHook func(m *MockAlphaAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaAddresses) Get(ctx context.Context, key meta.Key) (*alpha.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), + } + glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaAddresses) Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaAddresses %v exists", key), + } + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), + } + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaAddresses) Obj(o *alpha.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEAlphaAddresses is a simplifying adapter for the GCE Addresses. +type GCEAlphaAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEAlphaAddresses) Get(ctx context.Context, key meta.Key) (*alpha.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Address + f := func(l *alpha.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEAlphaAddresses) Insert(ctx context.Context, key meta.Key, obj *alpha.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEAlphaAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BetaAddresses is an interface that allows for mocking of Addresses. +type BetaAddresses interface { + Get(ctx context.Context, key meta.Key) (*beta.Address, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) + Insert(ctx context.Context, key meta.Key, obj *beta.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockBetaAddresses returns a new mock for Addresses. +func NewMockBetaAddresses(objs map[meta.Key]*MockAddressesObj) *MockBetaAddresses { + mock := &MockBetaAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaAddresses is the mock for Addresses. +type MockBetaAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key) (bool, *beta.Address, error) + ListHook func(m *MockBetaAddresses, ctx context.Context, region string, fl *filter.F) (bool, []*beta.Address, error) + InsertHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key, obj *beta.Address) (bool, error) + DeleteHook func(m *MockBetaAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaAddresses) Get(ctx context.Context, key meta.Key) (*beta.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToBeta() + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaAddresses %v not found", key), + } + glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Address + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaAddresses) Insert(ctx context.Context, key meta.Key, obj *beta.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaAddresses %v exists", key), + } + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionBeta, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockAddressesObj{obj} + glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaAddresses %v not found", key), + } + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaAddresses) Obj(o *beta.Address) *MockAddressesObj { + return &MockAddressesObj{o} +} + +// GCEBetaAddresses is a simplifying adapter for the GCE Addresses. +type GCEBetaAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEBetaAddresses) Get(ctx context.Context, key meta.Key) (*beta.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Addresses.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Addresses.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Address + f := func(l *beta.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEBetaAddresses) Insert(ctx context.Context, key meta.Key, obj *beta.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Beta.Addresses.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEBetaAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Addresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Addresses.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GlobalAddresses is an interface that allows for mocking of GlobalAddresses. +type GlobalAddresses interface { + Get(ctx context.Context, key meta.Key) (*ga.Address, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Address) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockGlobalAddresses returns a new mock for GlobalAddresses. +func NewMockGlobalAddresses(objs map[meta.Key]*MockGlobalAddressesObj) *MockGlobalAddresses { + mock := &MockGlobalAddresses{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockGlobalAddresses is the mock for GlobalAddresses. +type MockGlobalAddresses struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockGlobalAddressesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key) (bool, *ga.Address, error) + ListHook func(m *MockGlobalAddresses, ctx context.Context, fl *filter.F) (bool, []*ga.Address, error) + InsertHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key, obj *ga.Address) (bool, error) + DeleteHook func(m *MockGlobalAddresses, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockGlobalAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + } + glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Address + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockGlobalAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), + } + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "addresses", key) + } + + m.Objects[key] = &MockGlobalAddressesObj{obj} + glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockGlobalAddresses) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + } + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockGlobalAddresses) Obj(o *ga.Address) *MockGlobalAddressesObj { + return &MockGlobalAddressesObj{o} +} + +// GCEGlobalAddresses is a simplifying adapter for the GCE GlobalAddresses. +type GCEGlobalAddresses struct { + s *Service +} + +// Get the Address named by key. +func (g *GCEGlobalAddresses) Get(ctx context.Context, key meta.Key) (*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Address objects. +func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalAddresses.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Address + f := func(l *ga.AddressList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Address with key of value obj. +func (g *GCEGlobalAddresses) Insert(ctx context.Context, key meta.Key, obj *ga.Address) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.GlobalAddresses.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Address referenced by key. +func (g *GCEGlobalAddresses) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "GlobalAddresses", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BackendServices is an interface that allows for mocking of BackendServices. +type BackendServices interface { + Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error + Delete(ctx context.Context, key meta.Key) error + GetHealth(context.Context, meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + Update(context.Context, meta.Key, *ga.BackendService) error +} + +// NewMockBackendServices returns a new mock for BackendServices. +func NewMockBackendServices(objs map[meta.Key]*MockBackendServicesObj) *MockBackendServices { + mock := &MockBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBackendServices is the mock for BackendServices. +type MockBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBackendServices, ctx context.Context, key meta.Key) (bool, *ga.BackendService, error) + ListHook func(m *MockBackendServices, ctx context.Context, fl *filter.F) (bool, []*ga.BackendService, error) + InsertHook func(m *MockBackendServices, ctx context.Context, key meta.Key, obj *ga.BackendService) (bool, error) + DeleteHook func(m *MockBackendServices, ctx context.Context, key meta.Key) (bool, error) + GetHealthHook func(*MockBackendServices, context.Context, meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + UpdateHook func(*MockBackendServices, context.Context, meta.Key, *ga.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBackendServices) Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBackendServices %v not found", key), + } + glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.BackendService + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBackendServices) Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBackendServices %v exists", key), + } + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockBackendServicesObj{obj} + glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBackendServices %v not found", key), + } + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBackendServices) Obj(o *ga.BackendService) *MockBackendServicesObj { + return &MockBackendServicesObj{o} +} + +// GetHealth is a mock for the corresponding method. +func (m *MockBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockBackendServices) Update(ctx context.Context, key meta.Key, arg0 *ga.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEBackendServices) Get(ctx context.Context, key meta.Key) (*ga.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.BackendService + f := func(l *ga.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEBackendServices) Insert(ctx context.Context, key meta.Key, obj *ga.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.BackendServices.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.BackendServices.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GetHealth is a method on GCEBackendServices. +func (g *GCEBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// Update is a method on GCEBackendServices. +func (g *GCEBackendServices) Update(ctx context.Context, key meta.Key, arg0 *ga.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaBackendServices is an interface that allows for mocking of BackendServices. +type AlphaBackendServices interface { + Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *alpha.BackendService) error +} + +// NewMockAlphaBackendServices returns a new mock for BackendServices. +func NewMockAlphaBackendServices(objs map[meta.Key]*MockBackendServicesObj) *MockAlphaBackendServices { + mock := &MockAlphaBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaBackendServices is the mock for BackendServices. +type MockAlphaBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key) (bool, *alpha.BackendService, error) + ListHook func(m *MockAlphaBackendServices, ctx context.Context, fl *filter.F) (bool, []*alpha.BackendService, error) + InsertHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key, obj *alpha.BackendService) (bool, error) + DeleteHook func(m *MockAlphaBackendServices, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockAlphaBackendServices, context.Context, meta.Key, *alpha.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.BackendService + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockBackendServicesObj{obj} + glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaBackendServices) Obj(o *alpha.BackendService) *MockBackendServicesObj { + return &MockBackendServicesObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEAlphaBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEAlphaBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.BackendServices.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.BackendServices.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.BackendServices.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEAlphaBackendServices. +func (g *GCEAlphaBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaRegionBackendServices is an interface that allows for mocking of RegionBackendServices. +type AlphaRegionBackendServices interface { + Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error + Delete(ctx context.Context, key meta.Key) error + GetHealth(context.Context, meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) + Update(context.Context, meta.Key, *alpha.BackendService) error +} + +// NewMockAlphaRegionBackendServices returns a new mock for RegionBackendServices. +func NewMockAlphaRegionBackendServices(objs map[meta.Key]*MockRegionBackendServicesObj) *MockAlphaRegionBackendServices { + mock := &MockAlphaRegionBackendServices{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionBackendServices is the mock for RegionBackendServices. +type MockAlphaRegionBackendServices struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionBackendServicesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key) (bool, *alpha.BackendService, error) + ListHook func(m *MockAlphaRegionBackendServices, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.BackendService, error) + InsertHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key, obj *alpha.BackendService) (bool, error) + DeleteHook func(m *MockAlphaRegionBackendServices, ctx context.Context, key meta.Key) (bool, error) + GetHealthHook func(*MockAlphaRegionBackendServices, context.Context, meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) + UpdateHook func(*MockAlphaRegionBackendServices, context.Context, meta.Key, *alpha.BackendService) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.BackendService + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "backendServices", key) + } + + m.Objects[key] = &MockRegionBackendServicesObj{obj} + glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionBackendServices) Obj(o *alpha.BackendService) *MockRegionBackendServicesObj { + return &MockRegionBackendServicesObj{o} +} + +// GetHealth is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaRegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. +type GCEAlphaRegionBackendServices struct { + s *Service +} + +// Get the BackendService named by key. +func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key meta.Key) (*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all BackendService objects. +func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert BackendService with key of value obj. +func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key meta.Key, obj *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionBackendServices.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the BackendService referenced by key. +func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GetHealth is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// Update is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key meta.Key, arg0 *alpha.BackendService) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Disks is an interface that allows for mocking of Disks. +type Disks interface { + Get(ctx context.Context, key meta.Key) (*ga.Disk, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockDisks returns a new mock for Disks. +func NewMockDisks(objs map[meta.Key]*MockDisksObj) *MockDisks { + mock := &MockDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockDisks is the mock for Disks. +type MockDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockDisks, ctx context.Context, key meta.Key) (bool, *ga.Disk, error) + ListHook func(m *MockDisks, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.Disk, error) + InsertHook func(m *MockDisks, ctx context.Context, key meta.Key, obj *ga.Disk) (bool, error) + DeleteHook func(m *MockDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockDisks) Get(ctx context.Context, key meta.Key) (*ga.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockDisks %v not found", key), + } + glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Disk + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockDisks) Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockDisks %v exists", key), + } + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "disks", key) + } + + m.Objects[key] = &MockDisksObj{obj} + glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockDisks %v not found", key), + } + glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockDisks) Obj(o *ga.Disk) *MockDisksObj { + return &MockDisksObj{o} +} + +// GCEDisks is a simplifying adapter for the GCE Disks. +type GCEDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEDisks) Get(ctx context.Context, key meta.Key) (*ga.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Disks.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Disk + f := func(l *ga.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEDisks) Insert(ctx context.Context, key meta.Key, obj *ga.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Disks.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaDisks is an interface that allows for mocking of Disks. +type AlphaDisks interface { + Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaDisks returns a new mock for Disks. +func NewMockAlphaDisks(objs map[meta.Key]*MockDisksObj) *MockAlphaDisks { + mock := &MockAlphaDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaDisks is the mock for Disks. +type MockAlphaDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key) (bool, *alpha.Disk, error) + ListHook func(m *MockAlphaDisks, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.Disk, error) + InsertHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key, obj *alpha.Disk) (bool, error) + DeleteHook func(m *MockAlphaDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Disk + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaDisks %v exists", key), + } + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "disks", key) + } + + m.Objects[key] = &MockDisksObj{obj} + glog.V(5).Infof("MockAlphaDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaDisks) Obj(o *alpha.Disk) *MockDisksObj { + return &MockDisksObj{o} +} + +// GCEAlphaDisks is a simplifying adapter for the GCE Disks. +type GCEAlphaDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEAlphaDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Disks.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEAlphaDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Disks.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Disk + f := func(l *alpha.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEAlphaDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Disks.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEAlphaDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Disks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Disks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Disks.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaRegionDisks is an interface that allows for mocking of RegionDisks. +type AlphaRegionDisks interface { + Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaRegionDisks returns a new mock for RegionDisks. +func NewMockAlphaRegionDisks(objs map[meta.Key]*MockRegionDisksObj) *MockAlphaRegionDisks { + mock := &MockAlphaRegionDisks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionDisks is the mock for RegionDisks. +type MockAlphaRegionDisks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionDisksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key) (bool, *alpha.Disk, error) + ListHook func(m *MockAlphaRegionDisks, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.Disk, error) + InsertHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key, obj *alpha.Disk) (bool, error) + DeleteHook func(m *MockAlphaRegionDisks, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Disk + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionDisks %v exists", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "disks", key) + } + + m.Objects[key] = &MockRegionDisksObj{obj} + glog.V(5).Infof("MockAlphaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionDisks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionDisks %v not found", key), + } + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaRegionDisks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionDisks) Obj(o *alpha.Disk) *MockRegionDisksObj { + return &MockRegionDisksObj{o} +} + +// GCEAlphaRegionDisks is a simplifying adapter for the GCE RegionDisks. +type GCEAlphaRegionDisks struct { + s *Service +} + +// Get the Disk named by key. +func (g *GCEAlphaRegionDisks) Get(ctx context.Context, key meta.Key) (*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionDisks.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Disk objects. +func (g *GCEAlphaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Disk, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.RegionDisks.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Disk + f := func(l *alpha.DiskList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Disk with key of value obj. +func (g *GCEAlphaRegionDisks) Insert(ctx context.Context, key meta.Key, obj *alpha.Disk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionDisks.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Disk referenced by key. +func (g *GCEAlphaRegionDisks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionDisks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.RegionDisks.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Firewalls is an interface that allows for mocking of Firewalls. +type Firewalls interface { + Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.Firewall) error +} + +// NewMockFirewalls returns a new mock for Firewalls. +func NewMockFirewalls(objs map[meta.Key]*MockFirewallsObj) *MockFirewalls { + mock := &MockFirewalls{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockFirewalls is the mock for Firewalls. +type MockFirewalls struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockFirewallsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockFirewalls, ctx context.Context, key meta.Key) (bool, *ga.Firewall, error) + ListHook func(m *MockFirewalls, ctx context.Context, fl *filter.F) (bool, []*ga.Firewall, error) + InsertHook func(m *MockFirewalls, ctx context.Context, key meta.Key, obj *ga.Firewall) (bool, error) + DeleteHook func(m *MockFirewalls, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockFirewalls, context.Context, meta.Key, *ga.Firewall) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockFirewalls) Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockFirewalls %v not found", key), + } + glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Firewall + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockFirewalls) Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockFirewalls %v exists", key), + } + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "firewalls", key) + } + + m.Objects[key] = &MockFirewallsObj{obj} + glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockFirewalls) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockFirewalls %v not found", key), + } + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockFirewalls) Obj(o *ga.Firewall) *MockFirewallsObj { + return &MockFirewallsObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockFirewalls) Update(ctx context.Context, key meta.Key, arg0 *ga.Firewall) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEFirewalls is a simplifying adapter for the GCE Firewalls. +type GCEFirewalls struct { + s *Service +} + +// Get the Firewall named by key. +func (g *GCEFirewalls) Get(ctx context.Context, key meta.Key) (*ga.Firewall, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Firewalls.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Firewall objects. +func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Firewalls.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Firewall + f := func(l *ga.FirewallList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Firewall with key of value obj. +func (g *GCEFirewalls) Insert(ctx context.Context, key meta.Key, obj *ga.Firewall) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Firewalls.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Firewall referenced by key. +func (g *GCEFirewalls) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Firewalls.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEFirewalls. +func (g *GCEFirewalls) Update(ctx context.Context, key meta.Key, arg0 *ga.Firewall) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "Firewalls", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// ForwardingRules is an interface that allows for mocking of ForwardingRules. +type ForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockForwardingRules returns a new mock for ForwardingRules. +func NewMockForwardingRules(objs map[meta.Key]*MockForwardingRulesObj) *MockForwardingRules { + mock := &MockForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockForwardingRules is the mock for ForwardingRules. +type MockForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockForwardingRules, ctx context.Context, key meta.Key) (bool, *ga.ForwardingRule, error) + ListHook func(m *MockForwardingRules, ctx context.Context, region string, fl *filter.F) (bool, []*ga.ForwardingRule, error) + InsertHook func(m *MockForwardingRules, ctx context.Context, key meta.Key, obj *ga.ForwardingRule) (bool, error) + DeleteHook func(m *MockForwardingRules, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockForwardingRules %v not found", key), + } + glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockForwardingRules %v exists", key), + } + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockForwardingRulesObj{obj} + glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockForwardingRules %v not found", key), + } + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockForwardingRules) Obj(o *ga.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} +} + +// GCEForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.ForwardingRules.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.ForwardingRule + f := func(l *ga.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.ForwardingRules.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaForwardingRules is an interface that allows for mocking of ForwardingRules. +type AlphaForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockAlphaForwardingRules returns a new mock for ForwardingRules. +func NewMockAlphaForwardingRules(objs map[meta.Key]*MockForwardingRulesObj) *MockAlphaForwardingRules { + mock := &MockAlphaForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaForwardingRules is the mock for ForwardingRules. +type MockAlphaForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key) (bool, *alpha.ForwardingRule, error) + ListHook func(m *MockAlphaForwardingRules, ctx context.Context, region string, fl *filter.F) (bool, []*alpha.ForwardingRule, error) + InsertHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) (bool, error) + DeleteHook func(m *MockAlphaForwardingRules, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaForwardingRules) Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockForwardingRulesObj{obj} + glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + } + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaForwardingRules) Obj(o *alpha.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} +} + +// GCEAlphaForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEAlphaForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key meta.Key) (*alpha.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.ForwardingRules.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.ForwardingRule + f := func(l *alpha.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key meta.Key, obj *alpha.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.ForwardingRules.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "ForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// GlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. +type GlobalForwardingRules interface { + Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) + List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) + Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error + Delete(ctx context.Context, key meta.Key) error + SetTarget(context.Context, meta.Key, *ga.TargetReference) error +} + +// NewMockGlobalForwardingRules returns a new mock for GlobalForwardingRules. +func NewMockGlobalForwardingRules(objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockGlobalForwardingRules { + mock := &MockGlobalForwardingRules{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockGlobalForwardingRules is the mock for GlobalForwardingRules. +type MockGlobalForwardingRules struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockGlobalForwardingRulesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key) (bool, *ga.ForwardingRule, error) + ListHook func(m *MockGlobalForwardingRules, ctx context.Context, fl *filter.F) (bool, []*ga.ForwardingRule, error) + InsertHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key, obj *ga.ForwardingRule) (bool, error) + DeleteHook func(m *MockGlobalForwardingRules, ctx context.Context, key meta.Key) (bool, error) + SetTargetHook func(*MockGlobalForwardingRules, context.Context, meta.Key, *ga.TargetReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockGlobalForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.ForwardingRule + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "forwardingRules", key) + } + + m.Objects[key] = &MockGlobalForwardingRulesObj{obj} + glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + } + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockGlobalForwardingRules) Obj(o *ga.ForwardingRule) *MockGlobalForwardingRulesObj { + return &MockGlobalForwardingRulesObj{o} +} + +// SetTarget is a mock for the corresponding method. +func (m *MockGlobalForwardingRules) SetTarget(ctx context.Context, key meta.Key, arg0 *ga.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(m, ctx, key, arg0) + } + return nil +} + +// GCEGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. +type GCEGlobalForwardingRules struct { + s *Service +} + +// Get the ForwardingRule named by key. +func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key meta.Key) (*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all ForwardingRule objects. +func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.GlobalForwardingRules.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.ForwardingRule + f := func(l *ga.ForwardingRuleList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert ForwardingRule with key of value obj. +func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key meta.Key, obj *ga.ForwardingRule) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.GlobalForwardingRules.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the ForwardingRule referenced by key. +func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetTarget is a method on GCEGlobalForwardingRules. +func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key meta.Key, arg0 *ga.TargetReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetTarget", + Version: meta.Version("ga"), + Service: "GlobalForwardingRules", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HealthChecks is an interface that allows for mocking of HealthChecks. +type HealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HealthCheck) error +} + +// NewMockHealthChecks returns a new mock for HealthChecks. +func NewMockHealthChecks(objs map[meta.Key]*MockHealthChecksObj) *MockHealthChecks { + mock := &MockHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHealthChecks is the mock for HealthChecks. +type MockHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HealthCheck, error) + ListHook func(m *MockHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HealthCheck, error) + InsertHook func(m *MockHealthChecks, ctx context.Context, key meta.Key, obj *ga.HealthCheck) (bool, error) + DeleteHook func(m *MockHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHealthChecks, context.Context, meta.Key, *ga.HealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "healthChecks", key) + } + + m.Objects[key] = &MockHealthChecksObj{obj} + glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHealthChecks) Obj(o *ga.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEHealthChecks struct { + s *Service +} + +// Get the HealthCheck named by key. +func (g *GCEHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HealthCheck objects. +func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HealthCheck + f := func(l *ga.HealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HealthCheck with key of value obj. +func (g *GCEHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HealthCheck referenced by key. +func (g *GCEHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHealthChecks. +func (g *GCEHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaHealthChecks is an interface that allows for mocking of HealthChecks. +type AlphaHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *alpha.HealthCheck) error +} + +// NewMockAlphaHealthChecks returns a new mock for HealthChecks. +func NewMockAlphaHealthChecks(objs map[meta.Key]*MockHealthChecksObj) *MockAlphaHealthChecks { + mock := &MockAlphaHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaHealthChecks is the mock for HealthChecks. +type MockAlphaHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key) (bool, *alpha.HealthCheck, error) + ListHook func(m *MockAlphaHealthChecks, ctx context.Context, fl *filter.F) (bool, []*alpha.HealthCheck, error) + InsertHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key, obj *alpha.HealthCheck) (bool, error) + DeleteHook func(m *MockAlphaHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockAlphaHealthChecks, context.Context, meta.Key, *alpha.HealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaHealthChecks) Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.HealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "healthChecks", key) + } + + m.Objects[key] = &MockHealthChecksObj{obj} + glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + } + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaHealthChecks) Obj(o *alpha.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *alpha.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEAlphaHealthChecks struct { + s *Service +} + +// Get the HealthCheck named by key. +func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key meta.Key) (*alpha.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HealthCheck objects. +func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.HealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.HealthCheck + f := func(l *alpha.HealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HealthCheck with key of value obj. +func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key meta.Key, obj *alpha.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.HealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HealthCheck referenced by key. +func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEAlphaHealthChecks. +func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *alpha.HealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("alpha"), + Service: "HealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HttpHealthChecks is an interface that allows for mocking of HttpHealthChecks. +type HttpHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HttpHealthCheck) error +} + +// NewMockHttpHealthChecks returns a new mock for HttpHealthChecks. +func NewMockHttpHealthChecks(objs map[meta.Key]*MockHttpHealthChecksObj) *MockHttpHealthChecks { + mock := &MockHttpHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHttpHealthChecks is the mock for HttpHealthChecks. +type MockHttpHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHttpHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HttpHealthCheck, error) + ListHook func(m *MockHttpHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HttpHealthCheck, error) + InsertHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) (bool, error) + DeleteHook func(m *MockHttpHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHttpHealthChecks, context.Context, meta.Key, *ga.HttpHealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHttpHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HttpHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHttpHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "httpHealthChecks", key) + } + + m.Objects[key] = &MockHttpHealthChecksObj{obj} + glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHttpHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHttpHealthChecks) Obj(o *ga.HttpHealthCheck) *MockHttpHealthChecksObj { + return &MockHttpHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHttpHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHttpHealthChecks is a simplifying adapter for the GCE HttpHealthChecks. +type GCEHttpHealthChecks struct { + s *Service +} + +// Get the HttpHealthCheck named by key. +func (g *GCEHttpHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HttpHealthCheck objects. +func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpHealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HttpHealthCheck + f := func(l *ga.HttpHealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HttpHealthCheck with key of value obj. +func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HttpHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HttpHealthCheck referenced by key. +func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHttpHealthChecks. +func (g *GCEHttpHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// HttpsHealthChecks is an interface that allows for mocking of HttpsHealthChecks. +type HttpsHealthChecks interface { + Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) + Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.HttpsHealthCheck) error +} + +// NewMockHttpsHealthChecks returns a new mock for HttpsHealthChecks. +func NewMockHttpsHealthChecks(objs map[meta.Key]*MockHttpsHealthChecksObj) *MockHttpsHealthChecks { + mock := &MockHttpsHealthChecks{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHttpsHealthChecks is the mock for HttpsHealthChecks. +type MockHttpsHealthChecks struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHttpsHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key) (bool, *ga.HttpsHealthCheck, error) + ListHook func(m *MockHttpsHealthChecks, ctx context.Context, fl *filter.F) (bool, []*ga.HttpsHealthCheck, error) + InsertHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) (bool, error) + DeleteHook func(m *MockHttpsHealthChecks, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockHttpsHealthChecks, context.Context, meta.Key, *ga.HttpsHealthCheck) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHttpsHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HttpsHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "httpsHealthChecks", key) + } + + m.Objects[key] = &MockHttpsHealthChecksObj{obj} + glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHttpsHealthChecks) Obj(o *ga.HttpsHealthCheck) *MockHttpsHealthChecksObj { + return &MockHttpsHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHttpsHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpsHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEHttpsHealthChecks is a simplifying adapter for the GCE HttpsHealthChecks. +type GCEHttpsHealthChecks struct { + s *Service +} + +// Get the HttpsHealthCheck named by key. +func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key meta.Key) (*ga.HttpsHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all HttpsHealthCheck objects. +func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.HttpsHealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HttpsHealthCheck + f := func(l *ga.HttpsHealthCheckList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert HttpsHealthCheck with key of value obj. +func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key meta.Key, obj *ga.HttpsHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.HttpsHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the HttpsHealthCheck referenced by key. +func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEHttpsHealthChecks. +func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key meta.Key, arg0 *ga.HttpsHealthCheck) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// InstanceGroups is an interface that allows for mocking of InstanceGroups. +type InstanceGroups interface { + Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) + Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error + Delete(ctx context.Context, key meta.Key) error + AddInstances(context.Context, meta.Key, *ga.InstanceGroupsAddInstancesRequest) error + ListInstances(context.Context, meta.Key, *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) + RemoveInstances(context.Context, meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error + SetNamedPorts(context.Context, meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error +} + +// NewMockInstanceGroups returns a new mock for InstanceGroups. +func NewMockInstanceGroups(objs map[meta.Key]*MockInstanceGroupsObj) *MockInstanceGroups { + mock := &MockInstanceGroups{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstanceGroups is the mock for InstanceGroups. +type MockInstanceGroups struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstanceGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key) (bool, *ga.InstanceGroup, error) + ListHook func(m *MockInstanceGroups, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.InstanceGroup, error) + InsertHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key, obj *ga.InstanceGroup) (bool, error) + DeleteHook func(m *MockInstanceGroups, ctx context.Context, key meta.Key) (bool, error) + AddInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsAddInstancesRequest) error + ListInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) + RemoveInstancesHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error + SetNamedPortsHook func(*MockInstanceGroups, context.Context, meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstanceGroups) Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.InstanceGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstanceGroups) Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstanceGroups %v exists", key), + } + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "instanceGroups", key) + } + + m.Objects[key] = &MockInstanceGroupsObj{obj} + glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstanceGroups) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstanceGroups) Obj(o *ga.InstanceGroup) *MockInstanceGroupsObj { + return &MockInstanceGroupsObj{o} +} + +// AddInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) AddInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + if m.AddInstancesHook != nil { + return m.AddInstancesHook(m, ctx, key, arg0) + } + return nil +} + +// ListInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) ListInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) { + if m.ListInstancesHook != nil { + return m.ListInstancesHook(m, ctx, key, arg0) + } + return nil, fmt.Errorf("ListInstancesHook must be set") +} + +// RemoveInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) RemoveInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + if m.RemoveInstancesHook != nil { + return m.RemoveInstancesHook(m, ctx, key, arg0) + } + return nil +} + +// SetNamedPorts is a mock for the corresponding method. +func (m *MockInstanceGroups) SetNamedPorts(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + if m.SetNamedPortsHook != nil { + return m.SetNamedPortsHook(m, ctx, key, arg0) + } + return nil +} + +// GCEInstanceGroups is a simplifying adapter for the GCE InstanceGroups. +type GCEInstanceGroups struct { + s *Service +} + +// Get the InstanceGroup named by key. +func (g *GCEInstanceGroups) Get(ctx context.Context, key meta.Key) (*ga.InstanceGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all InstanceGroup objects. +func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.InstanceGroup + f := func(l *ga.InstanceGroupList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert InstanceGroup with key of value obj. +func (g *GCEInstanceGroups) Insert(ctx context.Context, key meta.Key, obj *ga.InstanceGroup) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.InstanceGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the InstanceGroup referenced by key. +func (g *GCEInstanceGroups) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AddInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// ListInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest) (*ga.InstanceGroupsListInstances, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + return call.Do() +} + +// RemoveInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetNamedPorts is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetNamedPorts", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Instances is an interface that allows for mocking of Instances. +type Instances interface { + Get(ctx context.Context, key meta.Key) (*ga.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *ga.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error +} + +// NewMockInstances returns a new mock for Instances. +func NewMockInstances(objs map[meta.Key]*MockInstancesObj) *MockInstances { + mock := &MockInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstances is the mock for Instances. +type MockInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockInstances, ctx context.Context, key meta.Key) (bool, *ga.Instance, error) + ListHook func(m *MockInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*ga.Instance, error) + InsertHook func(m *MockInstances, ctx context.Context, key meta.Key, obj *ga.Instance) (bool, error) + DeleteHook func(m *MockInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockInstances, context.Context, meta.Key, *ga.AttachedDisk) error + DetachDiskHook func(*MockInstances, context.Context, meta.Key, string) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstances) Get(ctx context.Context, key meta.Key) (*ga.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstances) Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstances %v exists", key), + } + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstances) Obj(o *ga.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *ga.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// GCEInstances is a simplifying adapter for the GCE Instances. +type GCEInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEInstances) Get(ctx context.Context, key meta.Key) (*ga.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Instance + f := func(l *ga.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEInstances) Insert(ctx context.Context, key meta.Key, obj *ga.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEInstances. +func (g *GCEInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *ga.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEInstances. +func (g *GCEInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// BetaInstances is an interface that allows for mocking of Instances. +type BetaInstances interface { + Get(ctx context.Context, key meta.Key) (*beta.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *beta.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error +} + +// NewMockBetaInstances returns a new mock for Instances. +func NewMockBetaInstances(objs map[meta.Key]*MockInstancesObj) *MockBetaInstances { + mock := &MockBetaInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaInstances is the mock for Instances. +type MockBetaInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockBetaInstances, ctx context.Context, key meta.Key) (bool, *beta.Instance, error) + ListHook func(m *MockBetaInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*beta.Instance, error) + InsertHook func(m *MockBetaInstances, ctx context.Context, key meta.Key, obj *beta.Instance) (bool, error) + DeleteHook func(m *MockBetaInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockBetaInstances, context.Context, meta.Key, *beta.AttachedDisk) error + DetachDiskHook func(*MockBetaInstances, context.Context, meta.Key, string) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaInstances) Get(ctx context.Context, key meta.Key) (*beta.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToBeta() + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaInstances) Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaInstances %v exists", key), + } + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionBeta, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaInstances) Obj(o *beta.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *beta.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// GCEBetaInstances is a simplifying adapter for the GCE Instances. +type GCEBetaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEBetaInstances) Get(ctx context.Context, key meta.Key) (*beta.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Beta.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Instance + f := func(l *beta.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEBetaInstances) Insert(ctx context.Context, key meta.Key, obj *beta.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Beta.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEBetaInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *beta.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaInstances is an interface that allows for mocking of Instances. +type AlphaInstances interface { + Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error + Delete(ctx context.Context, key meta.Key) error + AttachDisk(context.Context, meta.Key, *alpha.AttachedDisk) error + DetachDisk(context.Context, meta.Key, string) error + UpdateNetworkInterface(context.Context, meta.Key, string, *alpha.NetworkInterface) error +} + +// NewMockAlphaInstances returns a new mock for Instances. +func NewMockAlphaInstances(objs map[meta.Key]*MockInstancesObj) *MockAlphaInstances { + mock := &MockAlphaInstances{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaInstances is the mock for Instances. +type MockAlphaInstances struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key) (bool, *alpha.Instance, error) + ListHook func(m *MockAlphaInstances, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.Instance, error) + InsertHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key, obj *alpha.Instance) (bool, error) + DeleteHook func(m *MockAlphaInstances, ctx context.Context, key meta.Key) (bool, error) + AttachDiskHook func(*MockAlphaInstances, context.Context, meta.Key, *alpha.AttachedDisk) error + DetachDiskHook func(*MockAlphaInstances, context.Context, meta.Key, string) error + UpdateNetworkInterfaceHook func(*MockAlphaInstances, context.Context, meta.Key, string, *alpha.NetworkInterface) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaInstances) Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaInstances) Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaInstances %v exists", key), + } + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "instances", key) + } + + m.Objects[key] = &MockInstancesObj{obj} + glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaInstances) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaInstances) Obj(o *alpha.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *alpha.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(m, ctx, key, arg0) + } + return nil +} + +// UpdateNetworkInterface is a mock for the corresponding method. +func (m *MockAlphaInstances) UpdateNetworkInterface(ctx context.Context, key meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + if m.UpdateNetworkInterfaceHook != nil { + return m.UpdateNetworkInterfaceHook(m, ctx, key, arg0, arg1) + } + return nil +} + +// GCEAlphaInstances is a simplifying adapter for the GCE Instances. +type GCEAlphaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEAlphaInstances) Get(ctx context.Context, key meta.Key) (*alpha.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Instance objects. +func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Instance + f := func(l *alpha.InstanceList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEAlphaInstances) Insert(ctx context.Context, key meta.Key, obj *alpha.Instance) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Instance referenced by key. +func (g *GCEAlphaInstances) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AttachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key meta.Key, arg0 *alpha.AttachedDisk) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key meta.Key, arg0 string) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// UpdateNetworkInterface is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "UpdateNetworkInterface", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AlphaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. +type AlphaNetworkEndpointGroups interface { + Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) + Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error + Delete(ctx context.Context, key meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpoints(context.Context, meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpoints(context.Context, meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error +} + +// NewMockAlphaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. +func NewMockAlphaNetworkEndpointGroups(objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockAlphaNetworkEndpointGroups { + mock := &MockAlphaNetworkEndpointGroups{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaNetworkEndpointGroups is the mock for NetworkEndpointGroups. +type MockAlphaNetworkEndpointGroups struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworkEndpointGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key) (bool, *alpha.NetworkEndpointGroup, error) + ListHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, zone string, fl *filter.F) (bool, []*alpha.NetworkEndpointGroup, error) + InsertHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) (bool, error) + DeleteHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, key meta.Key) (bool, error) + AggregatedListHook func(m *MockAlphaNetworkEndpointGroups, ctx context.Context, fl *filter.F) (bool, map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpointsHook func(*MockAlphaNetworkEndpointGroups, context.Context, meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpointsHook func(*MockAlphaNetworkEndpointGroups, context.Context, meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToAlpha() + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, zone, fl); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.NetworkEndpointGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionAlpha, "mock-project", "networkEndpointGroups", key) + } + + m.Objects[key] = &MockNetworkEndpointGroupsObj{obj} + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// AggregatedList is a mock for AggregatedList. +func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*alpha.NetworkEndpointGroup{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToAlpha().SelfLink) + location := res.Key.Zone + if err != nil { + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs[location] = append(objs[location], obj.ToAlpha()) + } + glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaNetworkEndpointGroups) Obj(o *alpha.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { + return &MockNetworkEndpointGroupsObj{o} +} + +// AttachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + if m.AttachNetworkEndpointsHook != nil { + return m.AttachNetworkEndpointsHook(m, ctx, key, arg0) + } + return nil +} + +// DetachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + if m.DetachNetworkEndpointsHook != nil { + return m.DetachNetworkEndpointsHook(m, ctx, key, arg0) + } + return nil +} + +// GCEAlphaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. +type GCEAlphaNetworkEndpointGroups struct { + s *Service +} + +// Get the NetworkEndpointGroup named by key. +func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key meta.Key) (*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all NetworkEndpointGroup objects. +func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.NetworkEndpointGroup + f := func(l *alpha.NetworkEndpointGroupList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert NetworkEndpointGroup with key of value obj. +func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key meta.Key, obj *alpha.NetworkEndpointGroup) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.Alpha.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the NetworkEndpointGroup referenced by key. +func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + + call := g.s.Alpha.NetworkEndpointGroups.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*alpha.NetworkEndpointGroup{} + f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { + for k, v := range l.Items { + all[k] = append(all[k], v.NetworkEndpointGroups...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Projects is an interface that allows for mocking of Projects. +type Projects interface { + // ProjectsOps is an interface with additional non-CRUD type methods. + // This interface is expected to be implemented by hand (non-autogenerated). + ProjectsOps +} + +// NewMockProjects returns a new mock for Projects. +func NewMockProjects(objs map[meta.Key]*MockProjectsObj) *MockProjects { + mock := &MockProjects{ + Objects: objs, + } + return mock +} + +// MockProjects is the mock for Projects. +type MockProjects struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockProjectsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Obj wraps the object for use in the mock. +func (m *MockProjects) Obj(o *ga.Project) *MockProjectsObj { + return &MockProjectsObj{o} +} + +// GCEProjects is a simplifying adapter for the GCE Projects. +type GCEProjects struct { + s *Service +} + +// Regions is an interface that allows for mocking of Regions. +type Regions interface { + Get(ctx context.Context, key meta.Key) (*ga.Region, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) +} + +// NewMockRegions returns a new mock for Regions. +func NewMockRegions(objs map[meta.Key]*MockRegionsObj) *MockRegions { + mock := &MockRegions{ + Objects: objs, + GetError: map[meta.Key]error{}, + } + return mock +} + +// MockRegions is the mock for Regions. +type MockRegions struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockRegions, ctx context.Context, key meta.Key) (bool, *ga.Region, error) + ListHook func(m *MockRegions, ctx context.Context, fl *filter.F) (bool, []*ga.Region, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRegions) Get(ctx context.Context, key meta.Key) (*ga.Region, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRegions %v not found", key), + } + glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Region + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRegions) Obj(o *ga.Region) *MockRegionsObj { + return &MockRegionsObj{o} +} + +// GCERegions is a simplifying adapter for the GCE Regions. +type GCERegions struct { + s *Service +} + +// Get the Region named by key. +func (g *GCERegions) Get(ctx context.Context, key meta.Key) (*ga.Region, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Regions", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Regions.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Region objects. +func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Regions", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Regions.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Region + f := func(l *ga.RegionList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Routes is an interface that allows for mocking of Routes. +type Routes interface { + Get(ctx context.Context, key meta.Key) (*ga.Route, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) + Insert(ctx context.Context, key meta.Key, obj *ga.Route) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockRoutes returns a new mock for Routes. +func NewMockRoutes(objs map[meta.Key]*MockRoutesObj) *MockRoutes { + mock := &MockRoutes{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockRoutes is the mock for Routes. +type MockRoutes struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRoutesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockRoutes, ctx context.Context, key meta.Key) (bool, *ga.Route, error) + ListHook func(m *MockRoutes, ctx context.Context, fl *filter.F) (bool, []*ga.Route, error) + InsertHook func(m *MockRoutes, ctx context.Context, key meta.Key, obj *ga.Route) (bool, error) + DeleteHook func(m *MockRoutes, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRoutes) Get(ctx context.Context, key meta.Key) (*ga.Route, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Route + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockRoutes) Insert(ctx context.Context, key meta.Key, obj *ga.Route) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockRoutes %v exists", key), + } + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "routes", key) + } + + m.Objects[key] = &MockRoutesObj{obj} + glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockRoutes) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRoutes) Obj(o *ga.Route) *MockRoutesObj { + return &MockRoutesObj{o} +} + +// GCERoutes is a simplifying adapter for the GCE Routes. +type GCERoutes struct { + s *Service +} + +// Get the Route named by key. +func (g *GCERoutes) Get(ctx context.Context, key meta.Key) (*ga.Route, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Routes.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Route objects. +func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Routes.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Route + f := func(l *ga.RouteList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert Route with key of value obj. +func (g *GCERoutes) Insert(ctx context.Context, key meta.Key, obj *ga.Route) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.Routes.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the Route referenced by key. +func (g *GCERoutes) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Routes.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SslCertificates is an interface that allows for mocking of SslCertificates. +type SslCertificates interface { + Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) + List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) + Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error + Delete(ctx context.Context, key meta.Key) error +} + +// NewMockSslCertificates returns a new mock for SslCertificates. +func NewMockSslCertificates(objs map[meta.Key]*MockSslCertificatesObj) *MockSslCertificates { + mock := &MockSslCertificates{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockSslCertificates is the mock for SslCertificates. +type MockSslCertificates struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockSslCertificates, ctx context.Context, key meta.Key) (bool, *ga.SslCertificate, error) + ListHook func(m *MockSslCertificates, ctx context.Context, fl *filter.F) (bool, []*ga.SslCertificate, error) + InsertHook func(m *MockSslCertificates, ctx context.Context, key meta.Key, obj *ga.SslCertificate) (bool, error) + DeleteHook func(m *MockSslCertificates, ctx context.Context, key meta.Key) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockSslCertificates) Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.SslCertificate + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockSslCertificates) Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockSslCertificates %v exists", key), + } + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "sslCertificates", key) + } + + m.Objects[key] = &MockSslCertificatesObj{obj} + glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockSslCertificates) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockSslCertificates) Obj(o *ga.SslCertificate) *MockSslCertificatesObj { + return &MockSslCertificatesObj{o} +} + +// GCESslCertificates is a simplifying adapter for the GCE SslCertificates. +type GCESslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCESslCertificates) Get(ctx context.Context, key meta.Key) (*ga.SslCertificate, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.SslCertificates.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all SslCertificate objects. +func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.SslCertificates.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.SslCertificate + f := func(l *ga.SslCertificateList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCESslCertificates) Insert(ctx context.Context, key meta.Key, obj *ga.SslCertificate) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.SslCertificates.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the SslCertificate referenced by key. +func (g *GCESslCertificates) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.SslCertificates.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. +type TargetHttpProxies interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error + Delete(ctx context.Context, key meta.Key) error + SetUrlMap(context.Context, meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpProxies returns a new mock for TargetHttpProxies. +func NewMockTargetHttpProxies(objs map[meta.Key]*MockTargetHttpProxiesObj) *MockTargetHttpProxies { + mock := &MockTargetHttpProxies{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpProxies is the mock for TargetHttpProxies. +type MockTargetHttpProxies struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key) (bool, *ga.TargetHttpProxy, error) + ListHook func(m *MockTargetHttpProxies, ctx context.Context, fl *filter.F) (bool, []*ga.TargetHttpProxy, error) + InsertHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) (bool, error) + DeleteHook func(m *MockTargetHttpProxies, ctx context.Context, key meta.Key) (bool, error) + SetUrlMapHook func(*MockTargetHttpProxies, context.Context, meta.Key, *ga.UrlMapReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetHttpProxies", key) + } + + m.Objects[key] = &MockTargetHttpProxiesObj{obj} + glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpProxies) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpProxies) Obj(o *ga.TargetHttpProxy) *MockTargetHttpProxiesObj { + return &MockTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. +type GCETargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCETargetHttpProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetHttpProxy objects. +func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpProxy + f := func(l *ga.TargetHttpProxyList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCETargetHttpProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpProxy) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCETargetHttpProxies) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetUrlMap is a method on GCETargetHttpProxies. +func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. +type TargetHttpsProxies interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error + Delete(ctx context.Context, key meta.Key) error + SetSslCertificates(context.Context, meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpsProxies returns a new mock for TargetHttpsProxies. +func NewMockTargetHttpsProxies(objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockTargetHttpsProxies { + mock := &MockTargetHttpsProxies{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpsProxies is the mock for TargetHttpsProxies. +type MockTargetHttpsProxies struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpsProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key) (bool, *ga.TargetHttpsProxy, error) + ListHook func(m *MockTargetHttpsProxies, ctx context.Context, fl *filter.F) (bool, []*ga.TargetHttpsProxy, error) + InsertHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) (bool, error) + DeleteHook func(m *MockTargetHttpsProxies, ctx context.Context, key meta.Key) (bool, error) + SetSslCertificatesHook func(*MockTargetHttpsProxies, context.Context, meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMapHook func(*MockTargetHttpsProxies, context.Context, meta.Key, *ga.UrlMapReference) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpsProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpsProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetHttpsProxies", key) + } + + m.Objects[key] = &MockTargetHttpsProxiesObj{obj} + glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpsProxies) Obj(o *ga.TargetHttpsProxy) *MockTargetHttpsProxiesObj { + return &MockTargetHttpsProxiesObj{o} +} + +// SetSslCertificates is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetSslCertificates(ctx context.Context, key meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(m, ctx, key, arg0) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. +type GCETargetHttpsProxies struct { + s *Service +} + +// Get the TargetHttpsProxy named by key. +func (g *GCETargetHttpsProxies) Get(ctx context.Context, key meta.Key) (*ga.TargetHttpsProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetHttpsProxy objects. +func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetHttpsProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpsProxy + f := func(l *ga.TargetHttpsProxyList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetHttpsProxy with key of value obj. +func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key meta.Key, obj *ga.TargetHttpsProxy) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpsProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetHttpsProxy referenced by key. +func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetSslCertificates is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSslCertificates", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// SetUrlMap is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key meta.Key, arg0 *ga.UrlMapReference) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// TargetPools is an interface that allows for mocking of TargetPools. +type TargetPools interface { + Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) + Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error + Delete(ctx context.Context, key meta.Key) error + AddInstance(context.Context, meta.Key, *ga.TargetPoolsAddInstanceRequest) error + RemoveInstance(context.Context, meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error +} + +// NewMockTargetPools returns a new mock for TargetPools. +func NewMockTargetPools(objs map[meta.Key]*MockTargetPoolsObj) *MockTargetPools { + mock := &MockTargetPools{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetPools is the mock for TargetPools. +type MockTargetPools struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetPoolsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockTargetPools, ctx context.Context, key meta.Key) (bool, *ga.TargetPool, error) + ListHook func(m *MockTargetPools, ctx context.Context, region string, fl *filter.F) (bool, []*ga.TargetPool, error) + InsertHook func(m *MockTargetPools, ctx context.Context, key meta.Key, obj *ga.TargetPool) (bool, error) + DeleteHook func(m *MockTargetPools, ctx context.Context, key meta.Key) (bool, error) + AddInstanceHook func(*MockTargetPools, context.Context, meta.Key, *ga.TargetPoolsAddInstanceRequest) error + RemoveInstanceHook func(*MockTargetPools, context.Context, meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetPools) Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetPools %v not found", key), + } + glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, region, fl); intercept { + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetPool + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetPools) Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetPools %v exists", key), + } + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "targetPools", key) + } + + m.Objects[key] = &MockTargetPoolsObj{obj} + glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetPools) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetPools %v not found", key), + } + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetPools) Obj(o *ga.TargetPool) *MockTargetPoolsObj { + return &MockTargetPoolsObj{o} +} + +// AddInstance is a mock for the corresponding method. +func (m *MockTargetPools) AddInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + if m.AddInstanceHook != nil { + return m.AddInstanceHook(m, ctx, key, arg0) + } + return nil +} + +// RemoveInstance is a mock for the corresponding method. +func (m *MockTargetPools) RemoveInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + if m.RemoveInstanceHook != nil { + return m.RemoveInstanceHook(m, ctx, key, arg0) + } + return nil +} + +// GCETargetPools is a simplifying adapter for the GCE TargetPools. +type GCETargetPools struct { + s *Service +} + +// Get the TargetPool named by key. +func (g *GCETargetPools) Get(ctx context.Context, key meta.Key) (*ga.TargetPool, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all TargetPool objects. +func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.TargetPools.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetPool + f := func(l *ga.TargetPoolList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert TargetPool with key of value obj. +func (g *GCETargetPools) Insert(ctx context.Context, key meta.Key, obj *ga.TargetPool) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.TargetPools.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the TargetPool referenced by key. +func (g *GCETargetPools) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// AddInstance is a method on GCETargetPools. +func (g *GCETargetPools) AddInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddInstance", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// RemoveInstance is a method on GCETargetPools. +func (g *GCETargetPools) RemoveInstance(ctx context.Context, key meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveInstance", + Version: meta.Version("ga"), + Service: "TargetPools", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// UrlMaps is an interface that allows for mocking of UrlMaps. +type UrlMaps interface { + Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) + List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) + Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error + Delete(ctx context.Context, key meta.Key) error + Update(context.Context, meta.Key, *ga.UrlMap) error +} + +// NewMockUrlMaps returns a new mock for UrlMaps. +func NewMockUrlMaps(objs map[meta.Key]*MockUrlMapsObj) *MockUrlMaps { + mock := &MockUrlMaps{ + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockUrlMaps is the mock for UrlMaps. +type MockUrlMaps struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockUrlMapsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockUrlMaps, ctx context.Context, key meta.Key) (bool, *ga.UrlMap, error) + ListHook func(m *MockUrlMaps, ctx context.Context, fl *filter.F) (bool, []*ga.UrlMap, error) + InsertHook func(m *MockUrlMaps, ctx context.Context, key meta.Key, obj *ga.UrlMap) (bool, error) + DeleteHook func(m *MockUrlMaps, ctx context.Context, key meta.Key) (bool, error) + UpdateHook func(*MockUrlMaps, context.Context, meta.Key, *ga.UrlMap) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockUrlMaps) Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockUrlMaps %v not found", key), + } + glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.UrlMap + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockUrlMaps) Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(m, ctx, key, obj); intercept { + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockUrlMaps %v exists", key), + } + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + if obj.SelfLink == "" { + obj.SelfLink = SelfLink(meta.VersionGA, "mock-project", "urlMaps", key) + } + + m.Objects[key] = &MockUrlMapsObj{obj} + glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockUrlMaps) Delete(ctx context.Context, key meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(m, ctx, key); intercept { + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[key]; ok { + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockUrlMaps %v not found", key), + } + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, key) + glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockUrlMaps) Obj(o *ga.UrlMap) *MockUrlMapsObj { + return &MockUrlMapsObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockUrlMaps) Update(ctx context.Context, key meta.Key, arg0 *ga.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(m, ctx, key, arg0) + } + return nil +} + +// GCEUrlMaps is a simplifying adapter for the GCE UrlMaps. +type GCEUrlMaps struct { + s *Service +} + +// Get the UrlMap named by key. +func (g *GCEUrlMaps) Get(ctx context.Context, key meta.Key) (*ga.UrlMap, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.UrlMaps.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all UrlMap objects. +func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.UrlMaps.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.UrlMap + f := func(l *ga.UrlMapList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} + +// Insert UrlMap with key of value obj. +func (g *GCEUrlMaps) Insert(ctx context.Context, key meta.Key, obj *ga.UrlMap) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + obj.Name = key.Name + call := g.s.GA.UrlMaps.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Delete the UrlMap referenced by key. +func (g *GCEUrlMaps) Delete(ctx context.Context, key meta.Key) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.UrlMaps.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Update is a method on GCEUrlMaps. +func (g *GCEUrlMaps) Update(ctx context.Context, key meta.Key, arg0 *ga.UrlMap) error { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "UrlMaps", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} + +// Zones is an interface that allows for mocking of Zones. +type Zones interface { + Get(ctx context.Context, key meta.Key) (*ga.Zone, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) +} + +// NewMockZones returns a new mock for Zones. +func NewMockZones(objs map[meta.Key]*MockZonesObj) *MockZones { + mock := &MockZones{ + Objects: objs, + GetError: map[meta.Key]error{}, + } + return mock +} + +// MockZones is the mock for Zones. +type MockZones struct { + Lock sync.Mutex + + // Objects maintained by the mock. + Objects map[meta.Key]*MockZonesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(m *MockZones, ctx context.Context, key meta.Key) (bool, *ga.Zone, error) + ListHook func(m *MockZones, ctx context.Context, fl *filter.F) (bool, []*ga.Zone, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockZones) Get(ctx context.Context, key meta.Key) (*ga.Zone, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(m, ctx, key); intercept { + glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[key]; ok { + glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[key]; ok { + typedObj := obj.ToGA() + glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockZones %v not found", key), + } + glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(m, ctx, fl); intercept { + glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + glog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Zone + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockZones) Obj(o *ga.Zone) *MockZonesObj { + return &MockZonesObj{o} +} + +// GCEZones is a simplifying adapter for the GCE Zones. +type GCEZones struct { + s *Service +} + +// Get the Zone named by key. +func (g *GCEZones) Get(ctx context.Context, key meta.Key) (*ga.Zone, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Zones", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Zones.Get(projectID, key.Name) + call.Context(ctx) + return call.Do() +} + +// List all Zone objects. +func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Zones", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Zones.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Zone + f := func(l *ga.ZoneList) error { + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + return nil, err + } + return all, nil +} diff --git a/pkg/cloudprovider/providers/gce/cloud/gen_test.go b/pkg/cloudprovider/providers/gce/cloud/gen_test.go new file mode 100644 index 00000000000..cbe5d9938d3 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen_test.go @@ -0,0 +1,1749 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by "go run gen/main.go -mode test > gen_test.go". Do not edit +// directly. + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +const location = "location" + +func TestDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Disks().Get(ctx, *key); err == nil { + t.Errorf("Disks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Disk{} + if err := mock.Disks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Disks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Disks().Get(ctx, *key); err != nil { + t.Errorf("Disks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaDisks.Objects[*keyAlpha] = mock.MockAlphaDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + mock.MockDisks.Objects[*keyGA] = mock.MockDisks.Obj(&ga.Disk{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Disks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Disks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Disks().Delete(ctx, *keyGA); err != nil { + t.Errorf("Disks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaDisks().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Disks().Delete(ctx, *keyGA); err == nil { + t.Errorf("Disks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestFirewallsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Firewalls().Get(ctx, *key); err == nil { + t.Errorf("Firewalls().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Firewall{} + if err := mock.Firewalls().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Firewalls().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Firewalls().Get(ctx, *key); err != nil { + t.Errorf("Firewalls().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockFirewalls.Objects[*keyGA] = mock.MockFirewalls.Obj(&ga.Firewall{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Firewalls().List(ctx, filter.None) + if err != nil { + t.Errorf("Firewalls().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaFirewalls().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Firewalls().Delete(ctx, *keyGA); err != nil { + t.Errorf("Firewalls().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Firewalls().Delete(ctx, *keyGA); err == nil { + t.Errorf("Firewalls().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestInstancesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.ZonalKey("key-beta", "location") + key = keyBeta + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { + t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { + t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Instances().Get(ctx, *key); err == nil { + t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Instance{} + if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Instance{} + if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Instance{} + if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { + t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { + t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Instances().Get(ctx, *key); err != nil { + t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) + mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) + mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Instances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Instances().Delete(ctx, *keyGA); err != nil { + t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Instances().Delete(ctx, *keyGA); err == nil { + t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestProjectsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + + // Insert. + + // Get across versions. + + // List. + mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + + // Delete across versions. + + // Delete not found. +} + +func TestRoutesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Routes().Get(ctx, *key); err == nil { + t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Route{} + if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Routes().Get(ctx, *key); err != nil { + t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Routes().List(ctx, filter.None) + if err != nil { + t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Routes().Delete(ctx, *keyGA); err != nil { + t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Routes().Delete(ctx, *keyGA); err == nil { + t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetPoolsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetPools().Get(ctx, *key); err == nil { + t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetPool{} + if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { + t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetPools().List(ctx, location, filter.None) + if err != nil { + t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestGlobalForwardingRulesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.ForwardingRule{} + if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) + if err != nil { + t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestNetworkEndpointGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.NetworkEndpointGroup{} + if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetHttpProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpProxy{} + if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestUrlMapsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { + t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.UrlMap{} + if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { + t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.UrlMaps().List(ctx, filter.None) + if err != nil { + t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { + t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { + t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestZonesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Zones().Get(ctx, *key); err == nil { + t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Zones().List(ctx, filter.None) + if err != nil { + t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestGlobalAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Address{} + if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.GlobalAddresses().List(ctx, filter.None) + if err != nil { + t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BackendServices().Get(ctx, *key); err == nil { + t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.BackendService{} + if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { + t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaBackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { + t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { + t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestForwardingRulesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("AlphaForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.ForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("ForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.ForwardingRule{} + if err := mock.AlphaForwardingRules().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.ForwardingRule{} + if err := mock.ForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("ForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("AlphaForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.ForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("ForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaForwardingRules.Objects[*keyAlpha] = mock.MockAlphaForwardingRules.Obj(&alpha.ForwardingRule{Name: keyAlpha.Name}) + mock.MockForwardingRules.Objects[*keyGA] = mock.MockForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaForwardingRules().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.ForwardingRules().List(ctx, location, filter.None) + if err != nil { + t.Errorf("ForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaForwardingRules().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.ForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("ForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaForwardingRules().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.ForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("ForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Regions().Get(ctx, *key); err == nil { + t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Regions().List(ctx, filter.None) + if err != nil { + t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.RegionalKey("key-beta", "location") + key = keyBeta + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) + mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) + mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("AlphaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.HealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.HealthCheck{} + if err := mock.AlphaHealthChecks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.HealthCheck{} + if err := mock.HealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("AlphaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.HealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaHealthChecks.Objects[*keyAlpha] = mock.MockAlphaHealthChecks.Obj(&alpha.HealthCheck{Name: keyAlpha.Name}) + mock.MockHealthChecks.Objects[*keyGA] = mock.MockHealthChecks.Obj(&ga.HealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.HealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaHealthChecks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.HealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaHealthChecks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.HealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHttpHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.HttpHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HttpHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.HttpHealthCheck{} + if err := mock.HttpHealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HttpHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.HttpHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HttpHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockHttpHealthChecks.Objects[*keyGA] = mock.MockHttpHealthChecks.Obj(&ga.HttpHealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.HttpHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HttpHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHttpHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.HttpHealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HttpHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.HttpHealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HttpHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestHttpsHealthChecksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.HttpsHealthChecks().Get(ctx, *key); err == nil { + t.Errorf("HttpsHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.HttpsHealthCheck{} + if err := mock.HttpsHealthChecks().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("HttpsHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.HttpsHealthChecks().Get(ctx, *key); err != nil { + t.Errorf("HttpsHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockHttpsHealthChecks.Objects[*keyGA] = mock.MockHttpsHealthChecks.Obj(&ga.HttpsHealthCheck{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.HttpsHealthChecks().List(ctx, filter.None) + if err != nil { + t.Errorf("HttpsHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaHttpsHealthChecks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.HttpsHealthChecks().Delete(ctx, *keyGA); err != nil { + t.Errorf("HttpsHealthChecks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.HttpsHealthChecks().Delete(ctx, *keyGA); err == nil { + t.Errorf("HttpsHealthChecks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestInstanceGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.InstanceGroups().Get(ctx, *key); err == nil { + t.Errorf("InstanceGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.InstanceGroup{} + if err := mock.InstanceGroups().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("InstanceGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.InstanceGroups().Get(ctx, *key); err != nil { + t.Errorf("InstanceGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockInstanceGroups.Objects[*keyGA] = mock.MockInstanceGroups.Obj(&ga.InstanceGroup{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.InstanceGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("InstanceGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstanceGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.InstanceGroups().Delete(ctx, *keyGA); err != nil { + t.Errorf("InstanceGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.InstanceGroups().Delete(ctx, *keyGA); err == nil { + t.Errorf("InstanceGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestSslCertificatesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.SslCertificates().Get(ctx, *key); err == nil { + t.Errorf("SslCertificates().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.SslCertificate{} + if err := mock.SslCertificates().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("SslCertificates().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.SslCertificates().Get(ctx, *key); err != nil { + t.Errorf("SslCertificates().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockSslCertificates.Objects[*keyGA] = mock.MockSslCertificates.Obj(&ga.SslCertificate{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.SslCertificates().List(ctx, filter.None) + if err != nil { + t.Errorf("SslCertificates().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaSslCertificates().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.SslCertificates().Delete(ctx, *keyGA); err != nil { + t.Errorf("SslCertificates().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.SslCertificates().Delete(ctx, *keyGA); err == nil { + t.Errorf("SslCertificates().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestTargetHttpsProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpsProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpsProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpsProxy{} + if err := mock.TargetHttpsProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpsProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpsProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpsProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpsProxies.Objects[*keyGA] = mock.MockTargetHttpsProxies.Obj(&ga.TargetHttpsProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpsProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpsProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpsProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpsProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpsProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpsProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpsProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} From a0adc1bb19c706ac5d93519c1fa68689c7782720 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:36:12 -0800 Subject: [PATCH 692/794] Special custom code for handling the Projects resource --- .../providers/gce/cloud/gce_projects.go | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/gce_projects.go diff --git a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go new file mode 100644 index 00000000000..adc60927afc --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +// ProjectsOps is the manually implemented methods for the Projects service. +type ProjectsOps interface { + Get(ctx context.Context, projectID string) (*compute.Project, error) + SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error +} + +// MockProjectOpsState is stored in the mock.X field. +type MockProjectOpsState struct { + metadata map[string]*compute.Metadata +} + +func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + if p, ok := m.Objects[*meta.GlobalKey(projectID)]; ok { + return p.ToGA(), nil + } + return nil, &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockProjects %v not found", projectID), + } +} + +func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Projects", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + call := g.s.GA.Projects.Get(projectID) + call.Context(ctx) + return call.Do() +} + +func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, meta *compute.Metadata) error { + if m.X == nil { + m.X = &MockProjectOpsState{metadata: map[string]*compute.Metadata{}} + } + state := m.X.(*MockProjectOpsState) + state.metadata[projectID] = meta + return nil +} + +func (g *GCEProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error { + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetCommonInstanceMetadata", + Version: meta.Version("ga"), + Service: "Projects", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return err + } + call := g.s.GA.Projects.SetCommonInstanceMetadata(projectID, m) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + return err + } + return g.s.WaitForCompletion(ctx, op) +} From f076f4fa0b0418ee0c99102a9021f1e4b8978b34 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:36:28 -0800 Subject: [PATCH 693/794] Hand written unit test for exercising the mock --- .../providers/gce/cloud/mock_test.go | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/cloud/mock_test.go diff --git a/pkg/cloudprovider/providers/gce/cloud/mock_test.go b/pkg/cloudprovider/providers/gce/cloud/mock_test.go new file mode 100644 index 00000000000..3d0fb160cc0 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/mock_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "reflect" + "testing" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +func TestMocks(t *testing.T) { + t.Parallel() + + // This test uses Addresses, but the logic that is generated is the same for + // other basic objects. + const region = "us-central1" + + ctx := context.Background() + mock := NewMockGCE() + + keyAlpha := meta.RegionalKey("key-alpha", region) + keyBeta := meta.RegionalKey("key-beta", region) + keyGA := meta.RegionalKey("key-ga", region) + key := keyAlpha + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, &ga.Address{Name: "ga"}); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + // List across versions. + want := map[string]bool{"key-alpha": true, "key-beta": true, "key-ga": true} + { + objs, err := mock.AlphaAddresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, region, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} From 9a7088555904a3f3a9f61d94292fd8f308563602 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 4 Jan 2018 23:30:56 -0800 Subject: [PATCH 694/794] BUILD --- pkg/cloudprovider/providers/gce/BUILD | 5 +- pkg/cloudprovider/providers/gce/cloud/BUILD | 63 +++++++++++++++++++ .../providers/gce/cloud/filter/BUILD | 30 +++++++++ .../providers/gce/cloud/gen/BUILD | 33 ++++++++++ .../providers/gce/cloud/meta/BUILD | 41 ++++++++++++ 5 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 pkg/cloudprovider/providers/gce/cloud/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/filter/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/gen/BUILD create mode 100644 pkg/cloudprovider/providers/gce/cloud/meta/BUILD diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 18205b9fcad..b112c95912c 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -125,6 +125,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/gce/cloud:all-srcs", + ], tags = ["automanaged"], ) diff --git a/pkg/cloudprovider/providers/gce/cloud/BUILD b/pkg/cloudprovider/providers/gce/cloud/BUILD new file mode 100644 index 00000000000..3df8f7a5e97 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/BUILD @@ -0,0 +1,63 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "gce_projects.go", + "gen.go", + "op.go", + "project.go", + "ratelimit.go", + "service.go", + "utils.go", + ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/google.golang.org/api/googleapi:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "gen_test.go", + "mock_test.go", + "utils_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/gce/cloud/filter:all-srcs", + "//pkg/cloudprovider/providers/gce/cloud/gen:all-srcs", + "//pkg/cloudprovider/providers/gce/cloud/meta:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/BUILD b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD new file mode 100644 index 00000000000..c0176ded894 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["filter.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/glog:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["filter_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD new file mode 100644 index 00000000000..e196daf2ac8 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = ["main.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + ], +) + +go_binary( + name = "gen", + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/BUILD b/pkg/cloudprovider/providers/gce/cloud/meta/BUILD new file mode 100644 index 00000000000..4bcf3b5f5ba --- /dev/null +++ b/pkg/cloudprovider/providers/gce/cloud/meta/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "key.go", + "meta.go", + "method.go", + "service.go", + ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["key_test.go"], + embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) From 2aaf8b47b2bea38aa9f0c6082a087fd80960a272 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 11:54:42 -0800 Subject: [PATCH 695/794] Clean up documentation. --- pkg/cloudprovider/providers/gce/cloud/doc.go | 1 + .../providers/gce/cloud/gce_projects.go | 6 +- pkg/cloudprovider/providers/gce/cloud/gen.go | 95 +++++++++++++++++++ .../providers/gce/cloud/gen/main.go | 4 +- .../providers/gce/cloud/meta/method.go | 11 ++- .../providers/gce/cloud/meta/service.go | 4 + .../providers/gce/cloud/project.go | 1 + .../providers/gce/cloud/ratelimit.go | 1 + 8 files changed, 120 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/doc.go b/pkg/cloudprovider/providers/gce/cloud/doc.go index d0d7a6cfb19..a6b121457cd 100644 --- a/pkg/cloudprovider/providers/gce/cloud/doc.go +++ b/pkg/cloudprovider/providers/gce/cloud/doc.go @@ -60,6 +60,7 @@ limitations under the License. // &ServiceInfo{ // Object: "InstanceGroup", // Name of the object type. // Service: "InstanceGroups", // Name of the service. +// Resource: "instanceGroups", // Lowercase resource name (as appears in the URL). // version: meta.VersionAlpha, // API version (one entry per version is needed). // keyType: Zonal, // What kind of resource this is. // serviceType: reflect.TypeOf(&alpha.InstanceGroupsService{}), // Associated golang type. diff --git a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go index adc60927afc..c531881a94a 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gce_projects.go +++ b/pkg/cloudprovider/providers/gce/cloud/gce_projects.go @@ -21,9 +21,9 @@ import ( "fmt" "net/http" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) // ProjectsOps is the manually implemented methods for the Projects service. @@ -37,6 +37,7 @@ type MockProjectOpsState struct { metadata map[string]*compute.Metadata } +// Get a project by projectID. func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { m.Lock.Lock() defer m.Lock.Unlock() @@ -50,6 +51,7 @@ func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Proj } } +// Get a project by projectID. func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) { rk := &RateLimitKey{ ProjectID: projectID, @@ -65,6 +67,7 @@ func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Proje return call.Do() } +// SetCommonInstanceMetadata for a given project. func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, meta *compute.Metadata) error { if m.X == nil { m.X = &MockProjectOpsState{metadata: map[string]*compute.Metadata{}} @@ -74,6 +77,7 @@ func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID return nil } +// SetCommonInstanceMetadata for a given project. func (g *GCEProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error { rk := &RateLimitKey{ ProjectID: projectID, diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go index ef7a2c62eaf..33a2b7ba619 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -150,99 +150,162 @@ type GCE struct { gceZones *GCEZones } +// Addresses returns the interface for the ga Addresses. func (gce *GCE) Addresses() Addresses { return gce.gceAddresses } + +// AlphaAddresses returns the interface for the alpha Addresses. func (gce *GCE) AlphaAddresses() AlphaAddresses { return gce.gceAlphaAddresses } + +// BetaAddresses returns the interface for the beta Addresses. func (gce *GCE) BetaAddresses() BetaAddresses { return gce.gceBetaAddresses } + +// GlobalAddresses returns the interface for the ga GlobalAddresses. func (gce *GCE) GlobalAddresses() GlobalAddresses { return gce.gceGlobalAddresses } + +// BackendServices returns the interface for the ga BackendServices. func (gce *GCE) BackendServices() BackendServices { return gce.gceBackendServices } + +// AlphaBackendServices returns the interface for the alpha BackendServices. func (gce *GCE) AlphaBackendServices() AlphaBackendServices { return gce.gceAlphaBackendServices } + +// AlphaRegionBackendServices returns the interface for the alpha RegionBackendServices. func (gce *GCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return gce.gceAlphaRegionBackendServices } + +// Disks returns the interface for the ga Disks. func (gce *GCE) Disks() Disks { return gce.gceDisks } + +// AlphaDisks returns the interface for the alpha Disks. func (gce *GCE) AlphaDisks() AlphaDisks { return gce.gceAlphaDisks } + +// AlphaRegionDisks returns the interface for the alpha RegionDisks. func (gce *GCE) AlphaRegionDisks() AlphaRegionDisks { return gce.gceAlphaRegionDisks } + +// Firewalls returns the interface for the ga Firewalls. func (gce *GCE) Firewalls() Firewalls { return gce.gceFirewalls } + +// ForwardingRules returns the interface for the ga ForwardingRules. func (gce *GCE) ForwardingRules() ForwardingRules { return gce.gceForwardingRules } + +// AlphaForwardingRules returns the interface for the alpha ForwardingRules. func (gce *GCE) AlphaForwardingRules() AlphaForwardingRules { return gce.gceAlphaForwardingRules } + +// GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (gce *GCE) GlobalForwardingRules() GlobalForwardingRules { return gce.gceGlobalForwardingRules } + +// HealthChecks returns the interface for the ga HealthChecks. func (gce *GCE) HealthChecks() HealthChecks { return gce.gceHealthChecks } + +// AlphaHealthChecks returns the interface for the alpha HealthChecks. func (gce *GCE) AlphaHealthChecks() AlphaHealthChecks { return gce.gceAlphaHealthChecks } + +// HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (gce *GCE) HttpHealthChecks() HttpHealthChecks { return gce.gceHttpHealthChecks } + +// HttpsHealthChecks returns the interface for the ga HttpsHealthChecks. func (gce *GCE) HttpsHealthChecks() HttpsHealthChecks { return gce.gceHttpsHealthChecks } + +// InstanceGroups returns the interface for the ga InstanceGroups. func (gce *GCE) InstanceGroups() InstanceGroups { return gce.gceInstanceGroups } + +// Instances returns the interface for the ga Instances. func (gce *GCE) Instances() Instances { return gce.gceInstances } + +// BetaInstances returns the interface for the beta Instances. func (gce *GCE) BetaInstances() BetaInstances { return gce.gceBetaInstances } + +// AlphaInstances returns the interface for the alpha Instances. func (gce *GCE) AlphaInstances() AlphaInstances { return gce.gceAlphaInstances } + +// AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (gce *GCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return gce.gceAlphaNetworkEndpointGroups } + +// Projects returns the interface for the ga Projects. func (gce *GCE) Projects() Projects { return gce.gceProjects } + +// Regions returns the interface for the ga Regions. func (gce *GCE) Regions() Regions { return gce.gceRegions } + +// Routes returns the interface for the ga Routes. func (gce *GCE) Routes() Routes { return gce.gceRoutes } + +// SslCertificates returns the interface for the ga SslCertificates. func (gce *GCE) SslCertificates() SslCertificates { return gce.gceSslCertificates } + +// TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (gce *GCE) TargetHttpProxies() TargetHttpProxies { return gce.gceTargetHttpProxies } + +// TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (gce *GCE) TargetHttpsProxies() TargetHttpsProxies { return gce.gceTargetHttpsProxies } + +// TargetPools returns the interface for the ga TargetPools. func (gce *GCE) TargetPools() TargetPools { return gce.gceTargetPools } + +// UrlMaps returns the interface for the ga UrlMaps. func (gce *GCE) UrlMaps() UrlMaps { return gce.gceUrlMaps } + +// Zones returns the interface for the ga Zones. func (gce *GCE) Zones() Zones { return gce.gceZones } @@ -350,130 +413,162 @@ type MockGCE struct { MockZones *MockZones } +// Addresses returns the interface for the ga Addresses. func (mock *MockGCE) Addresses() Addresses { return mock.MockAddresses } +// AlphaAddresses returns the interface for the alpha Addresses. func (mock *MockGCE) AlphaAddresses() AlphaAddresses { return mock.MockAlphaAddresses } +// BetaAddresses returns the interface for the beta Addresses. func (mock *MockGCE) BetaAddresses() BetaAddresses { return mock.MockBetaAddresses } +// GlobalAddresses returns the interface for the ga GlobalAddresses. func (mock *MockGCE) GlobalAddresses() GlobalAddresses { return mock.MockGlobalAddresses } +// BackendServices returns the interface for the ga BackendServices. func (mock *MockGCE) BackendServices() BackendServices { return mock.MockBackendServices } +// AlphaBackendServices returns the interface for the alpha BackendServices. func (mock *MockGCE) AlphaBackendServices() AlphaBackendServices { return mock.MockAlphaBackendServices } +// AlphaRegionBackendServices returns the interface for the alpha RegionBackendServices. func (mock *MockGCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return mock.MockAlphaRegionBackendServices } +// Disks returns the interface for the ga Disks. func (mock *MockGCE) Disks() Disks { return mock.MockDisks } +// AlphaDisks returns the interface for the alpha Disks. func (mock *MockGCE) AlphaDisks() AlphaDisks { return mock.MockAlphaDisks } +// AlphaRegionDisks returns the interface for the alpha RegionDisks. func (mock *MockGCE) AlphaRegionDisks() AlphaRegionDisks { return mock.MockAlphaRegionDisks } +// Firewalls returns the interface for the ga Firewalls. func (mock *MockGCE) Firewalls() Firewalls { return mock.MockFirewalls } +// ForwardingRules returns the interface for the ga ForwardingRules. func (mock *MockGCE) ForwardingRules() ForwardingRules { return mock.MockForwardingRules } +// AlphaForwardingRules returns the interface for the alpha ForwardingRules. func (mock *MockGCE) AlphaForwardingRules() AlphaForwardingRules { return mock.MockAlphaForwardingRules } +// GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (mock *MockGCE) GlobalForwardingRules() GlobalForwardingRules { return mock.MockGlobalForwardingRules } +// HealthChecks returns the interface for the ga HealthChecks. func (mock *MockGCE) HealthChecks() HealthChecks { return mock.MockHealthChecks } +// AlphaHealthChecks returns the interface for the alpha HealthChecks. func (mock *MockGCE) AlphaHealthChecks() AlphaHealthChecks { return mock.MockAlphaHealthChecks } +// HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (mock *MockGCE) HttpHealthChecks() HttpHealthChecks { return mock.MockHttpHealthChecks } +// HttpsHealthChecks returns the interface for the ga HttpsHealthChecks. func (mock *MockGCE) HttpsHealthChecks() HttpsHealthChecks { return mock.MockHttpsHealthChecks } +// InstanceGroups returns the interface for the ga InstanceGroups. func (mock *MockGCE) InstanceGroups() InstanceGroups { return mock.MockInstanceGroups } +// Instances returns the interface for the ga Instances. func (mock *MockGCE) Instances() Instances { return mock.MockInstances } +// BetaInstances returns the interface for the beta Instances. func (mock *MockGCE) BetaInstances() BetaInstances { return mock.MockBetaInstances } +// AlphaInstances returns the interface for the alpha Instances. func (mock *MockGCE) AlphaInstances() AlphaInstances { return mock.MockAlphaInstances } +// AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (mock *MockGCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return mock.MockAlphaNetworkEndpointGroups } +// Projects returns the interface for the ga Projects. func (mock *MockGCE) Projects() Projects { return mock.MockProjects } +// Regions returns the interface for the ga Regions. func (mock *MockGCE) Regions() Regions { return mock.MockRegions } +// Routes returns the interface for the ga Routes. func (mock *MockGCE) Routes() Routes { return mock.MockRoutes } +// SslCertificates returns the interface for the ga SslCertificates. func (mock *MockGCE) SslCertificates() SslCertificates { return mock.MockSslCertificates } +// TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (mock *MockGCE) TargetHttpProxies() TargetHttpProxies { return mock.MockTargetHttpProxies } +// TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (mock *MockGCE) TargetHttpsProxies() TargetHttpsProxies { return mock.MockTargetHttpsProxies } +// TargetPools returns the interface for the ga TargetPools. func (mock *MockGCE) TargetPools() TargetPools { return mock.MockTargetPools } +// UrlMaps returns the interface for the ga UrlMaps. func (mock *MockGCE) UrlMaps() UrlMaps { return mock.MockUrlMaps } +// Zones returns the interface for the ga Zones. func (mock *MockGCE) Zones() Zones { return mock.MockZones } diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index f8dcd730b9f..7217d35c5d6 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -30,8 +30,8 @@ import ( "text/template" "time" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) const ( @@ -167,6 +167,7 @@ type GCE struct { } {{range .All}} +// {{.WrapType}} returns the interface for the {{.Version}} {{.Service}}. func (gce *GCE) {{.WrapType}}() {{.WrapType}} { return gce.{{.Field}} } @@ -196,6 +197,7 @@ type MockGCE struct { {{- end}} } {{range .All}} +// {{.WrapType}} returns the interface for the {{.Version}} {{.Service}}. func (mock *MockGCE) {{.WrapType}}() {{.WrapType}} { return mock.{{.MockField}} } diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/method.go b/pkg/cloudprovider/providers/gce/cloud/meta/method.go index 5adf065fae4..c3a33d801d3 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/method.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/method.go @@ -91,7 +91,7 @@ func newMethod(s *ServiceInfo, m reflect.Method) *Method { return ret } -// Method is used to generate the calling code non-standard methods. +// Method is used to generate the calling code for non-standard methods. type Method struct { *ServiceInfo m reflect.Method @@ -135,6 +135,7 @@ func (mr *Method) args(skip int, nameArgs bool, prefix []string) []string { return append(prefix, a...) } +// init the method, preforming some rudimentary static checking. func (mr *Method) init() { fType := mr.m.Func.Type() if fType.NumIn() < mr.argsSkip() { @@ -189,10 +190,14 @@ func (mr *Method) init() { } } +// Name is the name of the method. func (mr *Method) Name() string { return mr.m.Name } +// CallArgs is a list of comma separated "argN" used for calling the method. +// For example, if the method has two additional arguments, this will return +// "arg0, arg1". func (mr *Method) CallArgs() string { var args []string for i := mr.argsSkip(); i < mr.m.Func.Type().NumIn(); i++ { @@ -204,10 +209,12 @@ func (mr *Method) CallArgs() string { return fmt.Sprintf(", %s", strings.Join(args, ", ")) } +// MockHookName is the name of the hook function in the mock. func (mr *Method) MockHookName() string { return mr.m.Name + "Hook" } +// MockHook is the definition of the hook function. func (mr *Method) MockHook() string { args := mr.args(mr.argsSkip(), false, []string{ fmt.Sprintf("*%s", mr.MockWrapType()), @@ -220,6 +227,7 @@ func (mr *Method) MockHook() string { return fmt.Sprintf("%v func(%v) (*%v.%v, error)", mr.MockHookName(), strings.Join(args, ", "), mr.Version(), mr.ReturnType) } +// FcnArgs is the function signature for the definition of the method. func (mr *Method) FcnArgs() string { args := mr.args(mr.argsSkip(), true, []string{ "ctx context.Context", @@ -232,6 +240,7 @@ func (mr *Method) FcnArgs() string { return fmt.Sprintf("%v(%v) (*%v.%v, error)", mr.m.Name, strings.Join(args, ", "), mr.Version(), mr.ReturnType) } +// InterfaceFunc is the function declaration of the method in the interface. func (mr *Method) InterfaceFunc() string { args := mr.args(mr.argsSkip(), false, []string{"context.Context", "meta.Key"}) if mr.ReturnType == "Operation" { diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/service.go b/pkg/cloudprovider/providers/gce/cloud/meta/service.go index ffa3385075b..b2ba91c8ec5 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/service.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/service.go @@ -220,6 +220,7 @@ type ServiceGroup struct { GA *ServiceInfo } +// Service returns any ServiceInfo object belonging to the ServiceGroup. func (sg *ServiceGroup) Service() string { switch { case sg.GA != nil: @@ -233,14 +234,17 @@ func (sg *ServiceGroup) Service() string { } } +// HasGA returns true if this object has a GA representation. func (sg *ServiceGroup) HasGA() bool { return sg.GA != nil } +// HasAlpha returns true if this object has a Alpha representation. func (sg *ServiceGroup) HasAlpha() bool { return sg.Alpha != nil } +// HasBeta returns true if this object has a Beta representation. func (sg *ServiceGroup) HasBeta() bool { return sg.Beta != nil } diff --git a/pkg/cloudprovider/providers/gce/cloud/project.go b/pkg/cloudprovider/providers/gce/cloud/project.go index 74299e4a23e..231e7cf916a 100644 --- a/pkg/cloudprovider/providers/gce/cloud/project.go +++ b/pkg/cloudprovider/providers/gce/cloud/project.go @@ -39,6 +39,7 @@ type SingleProjectRouter struct { ID string } +// ProjectID returns the project ID to be used for a call to the API. func (r *SingleProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { return r.ID } diff --git a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go index 948f1d36d89..e38b8f7de3c 100644 --- a/pkg/cloudprovider/providers/gce/cloud/ratelimit.go +++ b/pkg/cloudprovider/providers/gce/cloud/ratelimit.go @@ -51,6 +51,7 @@ type RateLimiter interface { type NopRateLimiter struct { } +// Accept the operation to be rate limited. func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error { // Rate limit polling of the Operation status to avoid hammering GCE // for the status of an operation. From adaaed102835e02957d1f3c951ca654cea1a432d Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 11:55:58 -0800 Subject: [PATCH 696/794] Ignore golint failures for bad compute API names --- hack/.golint_failures | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/.golint_failures b/hack/.golint_failures index c7cd6939add..92f25955ec8 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -87,6 +87,7 @@ pkg/cloudprovider pkg/cloudprovider/providers/aws pkg/cloudprovider/providers/fake pkg/cloudprovider/providers/gce +pkg/cloudprovider/providers/gce/cloud pkg/cloudprovider/providers/openstack pkg/cloudprovider/providers/ovirt pkg/cloudprovider/providers/photon From c3e23b1b145455b521fb2d15c5ccf06aac520fb6 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 5 Jan 2018 13:36:21 -0800 Subject: [PATCH 697/794] Fix gofmt --- .../providers/gce/cloud/meta/meta.go | 72 +++++++++---------- .../providers/gce/cloud/utils_test.go | 4 +- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 3f60c00f412..e1f36904d01 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -65,14 +65,14 @@ var AllVersions = []Version{ // AllServices are a list of all the services to generate code for. Keep // this list in lexiographical order by object type. var AllServices = []*ServiceInfo{ - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", keyType: Regional, serviceType: reflect.TypeOf(&ga.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", @@ -80,7 +80,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "Addresses", Resource: "addresses", @@ -88,14 +88,14 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&beta.AddressesService{}), }, - &ServiceInfo{ + { Object: "Address", Service: "GlobalAddresses", Resource: "addresses", keyType: Global, serviceType: reflect.TypeOf(&ga.GlobalAddressesService{}), }, - &ServiceInfo{ + { Object: "BackendService", Service: "BackendServices", Resource: "backendServices", @@ -106,16 +106,16 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "BackendService", Service: "BackendServices", - Resource: "backendServices", + Resource: "backendServices", version: VersionAlpha, keyType: Global, serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), additionalMethods: []string{"Update"}, }, - &ServiceInfo{ + { Object: "BackendService", Service: "RegionBackendServices", Resource: "backendServices", @@ -127,14 +127,14 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "Disk", Service: "Disks", Resource: "disks", keyType: Zonal, serviceType: reflect.TypeOf(&ga.DisksService{}), }, - &ServiceInfo{ + { Object: "Disk", Service: "Disks", Resource: "disks", @@ -142,7 +142,7 @@ var AllServices = []*ServiceInfo{ keyType: Zonal, serviceType: reflect.TypeOf(&alpha.DisksService{}), }, - &ServiceInfo{ + { Object: "Disk", Service: "RegionDisks", Resource: "disks", @@ -150,7 +150,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.DisksService{}), }, - &ServiceInfo{ + { Object: "Firewall", Service: "Firewalls", Resource: "firewalls", @@ -160,14 +160,14 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "ForwardingRules", Resource: "forwardingRules", keyType: Regional, serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}), }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "ForwardingRules", Resource: "forwardingRules", @@ -175,7 +175,7 @@ var AllServices = []*ServiceInfo{ keyType: Regional, serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}), }, - &ServiceInfo{ + { Object: "ForwardingRule", Service: "GlobalForwardingRules", Resource: "forwardingRules", @@ -185,7 +185,7 @@ var AllServices = []*ServiceInfo{ "SetTarget", }, }, - &ServiceInfo{ + { Object: "HealthCheck", Service: "HealthChecks", Resource: "healthChecks", @@ -195,7 +195,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HealthCheck", Service: "HealthChecks", Resource: "healthChecks", @@ -206,7 +206,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HttpHealthCheck", Service: "HttpHealthChecks", Resource: "httpHealthChecks", @@ -216,7 +216,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "HttpsHealthCheck", Service: "HttpsHealthChecks", Resource: "httpsHealthChecks", @@ -226,7 +226,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "InstanceGroup", Service: "InstanceGroups", Resource: "instanceGroups", @@ -239,7 +239,7 @@ var AllServices = []*ServiceInfo{ "SetNamedPorts", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -250,7 +250,7 @@ var AllServices = []*ServiceInfo{ "DetachDisk", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -262,7 +262,7 @@ var AllServices = []*ServiceInfo{ "DetachDisk", }, }, - &ServiceInfo{ + { Object: "Instance", Service: "Instances", Resource: "instances", @@ -275,7 +275,7 @@ var AllServices = []*ServiceInfo{ "UpdateNetworkInterface", }, }, - &ServiceInfo{ + { Object: "NetworkEndpointGroup", Service: "NetworkEndpointGroups", Resource: "networkEndpointGroups", @@ -288,16 +288,16 @@ var AllServices = []*ServiceInfo{ }, options: AggregatedList, }, - &ServiceInfo{ - Object: "Project", - Service: "Projects", + { + Object: "Project", + Service: "Projects", Resource: "projects", - keyType: Global, + keyType: Global, // Generate only the stub with no methods. options: NoGet | NoList | NoInsert | NoDelete | CustomOps, serviceType: reflect.TypeOf(&ga.ProjectsService{}), }, - &ServiceInfo{ + { Object: "Region", Service: "Regions", Resource: "regions", @@ -305,21 +305,21 @@ var AllServices = []*ServiceInfo{ options: ReadOnly, serviceType: reflect.TypeOf(&ga.RegionsService{}), }, - &ServiceInfo{ + { Object: "Route", Service: "Routes", Resource: "routes", keyType: Global, serviceType: reflect.TypeOf(&ga.RoutesService{}), }, - &ServiceInfo{ + { Object: "SslCertificate", Service: "SslCertificates", Resource: "sslCertificates", keyType: Global, serviceType: reflect.TypeOf(&ga.SslCertificatesService{}), }, - &ServiceInfo{ + { Object: "TargetHttpProxy", Service: "TargetHttpProxies", Resource: "targetHttpProxies", @@ -329,7 +329,7 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, - &ServiceInfo{ + { Object: "TargetHttpsProxy", Service: "TargetHttpsProxies", Resource: "targetHttpsProxies", @@ -340,7 +340,7 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, - &ServiceInfo{ + { Object: "TargetPool", Service: "TargetPools", Resource: "targetPools", @@ -351,7 +351,7 @@ var AllServices = []*ServiceInfo{ "RemoveInstance", }, }, - &ServiceInfo{ + { Object: "UrlMap", Service: "UrlMaps", Resource: "urlMaps", @@ -361,7 +361,7 @@ var AllServices = []*ServiceInfo{ "Update", }, }, - &ServiceInfo{ + { Object: "Zone", Service: "Zones", Resource: "zones", diff --git a/pkg/cloudprovider/providers/gce/cloud/utils_test.go b/pkg/cloudprovider/providers/gce/cloud/utils_test.go index 823c8e73c88..562d0f35ba7 100644 --- a/pkg/cloudprovider/providers/gce/cloud/utils_test.go +++ b/pkg/cloudprovider/providers/gce/cloud/utils_test.go @@ -161,7 +161,7 @@ func TestCopyVisJSON(t *testing.T) { func TestSelfLink(t *testing.T) { t.Parallel() - for _, tc := range []struct{ + for _, tc := range []struct { ver meta.Version project string resource string @@ -189,7 +189,7 @@ func TestSelfLink(t *testing.T) { *meta.GlobalKey("key3"), "https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3", }, - }{ + } { if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want { t.Errorf("SelfLink(%v, %q, %q, %v) = %v, want %q", tc.ver, tc.project, tc.resource, tc.key, link, tc.want) } From 5abf80718e3d5591204905c2b2b6d178f9b12104 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 16:57:15 -0800 Subject: [PATCH 698/794] Remove glog dependency in the generator --- pkg/cloudprovider/providers/gce/cloud/gen/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index 7217d35c5d6..ee48b374362 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -25,12 +25,12 @@ import ( "flag" "fmt" "io" + "log" "os" "os/exec" "text/template" "time" - "github.com/golang/glog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) @@ -1131,7 +1131,7 @@ func main() { genUnitTestHeader(out) genUnitTestServices(out) default: - glog.Fatalf("Invalid -mode: %q", flags.mode) + log.Fatalf("Invalid -mode: %q", flags.mode) } if flags.gofmt { From ee0351320edf5c4fe7fa1e4d00e69676e2454401 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Tue, 9 Jan 2018 17:00:35 -0800 Subject: [PATCH 699/794] cmd/kubectl: fix broken error formatting for run This patch adds missing value to a format string (%s) in --restart flag validation for "kubectl run". "kubectl run --restart=foo" was giving error: error: invalid restart policy: %!s(MISSING) Now it says: error: invalid restart policy: foo Signed-off-by: Ahmet Alp Balkan --- pkg/kubectl/cmd/run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 087e8d40b85..7b4128f57cb 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -501,7 +501,7 @@ func getRestartPolicy(cmd *cobra.Command, interactive bool) (api.RestartPolicy, case api.RestartPolicyNever: return api.RestartPolicyNever, nil } - return "", cmdutil.UsageErrorf(cmd, "invalid restart policy: %s") + return "", cmdutil.UsageErrorf(cmd, "invalid restart policy: %s", restart) } func verifyImagePullPolicy(cmd *cobra.Command) error { From e609cda0d2ee6e69ed31740349aaad85edf89ec9 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 17:07:10 -0800 Subject: [PATCH 700/794] hack/ scripts to keep the generated code in sync --- hack/update-cloudprovider-gce.sh | 36 +++++++++++++++++++++++ hack/verify-cloudprovider-gce.sh | 50 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100755 hack/update-cloudprovider-gce.sh create mode 100755 hack/verify-cloudprovider-gce.sh diff --git a/hack/update-cloudprovider-gce.sh b/hack/update-cloudprovider-gce.sh new file mode 100755 index 00000000000..b7d606c95b4 --- /dev/null +++ b/hack/update-cloudprovider-gce.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +GENERATOR="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen/main.go" + +GEN_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen.go" +GEN_TEST_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen_test.go" + +kube::golang::setup_env + +TMPFILE=$(mktemp verify-cloudprovider-gce-XXXX) +trap "{ rm -f ${TMPFILE}; }" EXIT + +go run "${GENERATOR}" > ${TMPFILE} +mv "${TMPFILE}" "${GEN_GO}" +go run "${GENERATOR}" -mode test > ${TMPFILE} +mv "${TMPFILE}" "${GEN_TEST_GO}" + +exit 0 diff --git a/hack/verify-cloudprovider-gce.sh b/hack/verify-cloudprovider-gce.sh new file mode 100755 index 00000000000..c7615d36592 --- /dev/null +++ b/hack/verify-cloudprovider-gce.sh @@ -0,0 +1,50 @@ +#!/bin/bash +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +GENERATOR="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen/main.go" + +GEN_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen.go" +GEN_TEST_GO="${KUBE_ROOT}/pkg/cloudprovider/providers/gce/cloud/gen_test.go" + +kube::golang::setup_env + +TMPFILE=$(mktemp verify-cloudprovider-gce-XXXX) +trap "{ rm -f ${TMPFILE}; }" EXIT + +go run "${GENERATOR}" > ${TMPFILE} +if ! diff "${TMPFILE}" "${GEN_GO}"; then + echo "Generated file ${GEN_GO} needs to be updated (run hack/update-cloudprovider-gce.sh)" + echo + diff -u "${TMPFILE}" "${GEN_GO}" || true + exit 1 +fi + +go run "${GENERATOR}" -mode test > ${TMPFILE} +if ! diff "${TMPFILE}" "${GEN_TEST_GO}"; then + echo "Generated file ${GEN_TEST_GO} needs to be updated (run hack/update-cloudprovider-gce.sh)" + echo + diff -u "${TMPFILE}" "${GEN_TEST_GO}" || true + exit 1 +fi + +exit 0 From 8cdfe362671dc6a7b93f8bfec0e813ee66f0604b Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 17:07:22 -0800 Subject: [PATCH 701/794] Update generated code to stable order --- .../providers/gce/cloud/gen/main.go | 10 +- .../providers/gce/cloud/gen_test.go | 1928 ++++++++--------- 2 files changed, 973 insertions(+), 965 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index ee48b374362..ba0dd9cc2f0 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -28,6 +28,7 @@ import ( "log" "os" "os/exec" + "sort" "text/template" "time" @@ -1110,7 +1111,14 @@ func Test{{.Service}}Group(t *testing.T) { } ` tmpl := template.Must(template.New("unittest").Parse(text)) - for _, s := range meta.AllServicesByGroup { + // Sort keys so the output will be stable. + var keys []string + for k, _ := range meta.AllServicesByGroup { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + s := meta.AllServicesByGroup[k] if err := tmpl.Execute(wr, s); err != nil { panic(err) } diff --git a/pkg/cloudprovider/providers/gce/cloud/gen_test.go b/pkg/cloudprovider/providers/gce/cloud/gen_test.go index cbe5d9938d3..ee7cb103753 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen_test.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen_test.go @@ -34,6 +34,238 @@ import ( const location = "location" +func TestAddressesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.RegionalKey("key-beta", "location") + key = keyBeta + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { + t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Addresses().Get(ctx, *key); err == nil { + t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Address{} + if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Address{} + if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Address{} + if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { + t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { + t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Addresses().Get(ctx, *key); err != nil { + t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) + mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) + mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaAddresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Addresses().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.GlobalKey("key-alpha") + key = keyAlpha + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BackendServices().Get(ctx, *key); err == nil { + t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.BackendService{} + if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { + t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaBackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BackendServices().List(ctx, filter.None) + if err != nil { + t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { + t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { + t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestDisksGroup(t *testing.T) { t.Parallel() @@ -194,749 +426,6 @@ func TestFirewallsGroup(t *testing.T) { } } -func TestInstancesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.ZonalKey("key-alpha", "location") - key = keyAlpha - keyBeta := meta.ZonalKey("key-beta", "location") - key = keyBeta - keyGA := meta.ZonalKey("key-ga", "location") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { - t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { - t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.Instances().Get(ctx, *key); err == nil { - t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.Instance{} - if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &beta.Instance{} - if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { - t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.Instance{} - if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { - t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { - t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.Instances().Get(ctx, *key); err != nil { - t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) - mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) - mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) - want := map[string]bool{ - "key-alpha": true, - "key-beta": true, - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaInstances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BetaInstances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.Instances().List(ctx, location, filter.None) - if err != nil { - t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { - t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.Instances().Delete(ctx, *keyGA); err != nil { - t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { - t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.Instances().Delete(ctx, *keyGA); err == nil { - t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestProjectsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - - // Insert. - - // Get across versions. - - // List. - mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - - // Delete across versions. - - // Delete not found. -} - -func TestRoutesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.Routes().Get(ctx, *key); err == nil { - t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.Route{} - if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.Routes().Get(ctx, *key); err != nil { - t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.Routes().List(ctx, filter.None) - if err != nil { - t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.Routes().Delete(ctx, *keyGA); err != nil { - t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.Routes().Delete(ctx, *keyGA); err == nil { - t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestTargetPoolsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.RegionalKey("key-ga", "location") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.TargetPools().Get(ctx, *key); err == nil { - t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.TargetPool{} - if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { - t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.TargetPools().List(ctx, location, filter.None) - if err != nil { - t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { - t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { - t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestGlobalForwardingRulesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { - t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.ForwardingRule{} - if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { - t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) - if err != nil { - t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { - t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { - t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestNetworkEndpointGroupsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.ZonalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { - t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.NetworkEndpointGroup{} - if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestTargetHttpProxiesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { - t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.TargetHttpProxy{} - if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { - t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.TargetHttpProxies().List(ctx, filter.None) - if err != nil { - t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { - t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { - t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestUrlMapsGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { - t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.UrlMap{} - if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { - t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.UrlMaps().List(ctx, filter.None) - if err != nil { - t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { - t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { - t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestZonesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.Zones().Get(ctx, *key); err == nil { - t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - - // Get across versions. - - // List. - mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.Zones().List(ctx, filter.None) - if err != nil { - t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - - // Delete not found. -} - -func TestGlobalAddressesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { - t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &ga.Address{} - if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { - t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) - want := map[string]bool{ - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.GlobalAddresses().List(ctx, filter.None) - if err != nil { - t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { - t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { - t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestBackendServicesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.GlobalKey("key-alpha") - key = keyAlpha - keyGA := meta.GlobalKey("key-ga") - key = keyGA - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaBackendServices().Get(ctx, *key); err == nil { - t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BackendServices().Get(ctx, *key); err == nil { - t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.BackendService{} - if err := mock.AlphaBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.BackendService{} - if err := mock.BackendServices().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("BackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaBackendServices().Get(ctx, *key); err != nil { - t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BackendServices().Get(ctx, *key); err != nil { - t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) - mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name}) - want := map[string]bool{ - "key-alpha": true, - "key-ga": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaBackendServices().List(ctx, filter.None) - if err != nil { - t.Errorf("AlphaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BackendServices().List(ctx, filter.None) - if err != nil { - t.Errorf("BackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BackendServices().Delete(ctx, *keyGA); err != nil { - t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaBackendServices().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BackendServices().Delete(ctx, *keyGA); err == nil { - t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } -} - func TestForwardingRulesGroup(t *testing.T) { t.Parallel() @@ -1035,7 +524,7 @@ func TestForwardingRulesGroup(t *testing.T) { } } -func TestRegionsGroup(t *testing.T) { +func TestGlobalAddressesGroup(t *testing.T) { t.Parallel() ctx := context.Background() @@ -1048,295 +537,114 @@ func TestRegionsGroup(t *testing.T) { _, _, _ = ctx, mock, key // Get not found. - if _, err := mock.Regions().Get(ctx, *key); err == nil { - t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + if _, err := mock.GlobalAddresses().Get(ctx, *key); err == nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = _, nil; want error", ctx, key) } // Insert. + { + obj := &ga.Address{} + if err := mock.GlobalAddresses().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } // Get across versions. + if obj, err := mock.GlobalAddresses().Get(ctx, *key); err != nil { + t.Errorf("GlobalAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } // List. - mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + mock.MockGlobalAddresses.Objects[*keyGA] = mock.MockGlobalAddresses.Obj(&ga.Address{Name: keyGA.Name}) want := map[string]bool{ "key-ga": true, } _ = want // ignore unused variables. { - objs, err := mock.Regions().List(ctx, filter.None) + objs, err := mock.GlobalAddresses().List(ctx, filter.None) if err != nil { - t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + t.Errorf("GlobalAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) } else { got := map[string]bool{} for _, obj := range objs { got[obj.Name] = true } if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want) } } } // Delete across versions. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + } // Delete not found. + if err := mock.GlobalAddresses().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalAddresses().Delete(%v, %v) = nil; want error", ctx, key) + } } -func TestAddressesGroup(t *testing.T) { +func TestGlobalForwardingRulesGroup(t *testing.T) { t.Parallel() ctx := context.Background() mock := NewMockGCE() var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - keyBeta := meta.RegionalKey("key-beta", "location") - key = keyBeta - keyGA := meta.RegionalKey("key-ga", "location") + keyGA := meta.GlobalKey("key-ga") key = keyGA // Ignore unused variables. _, _, _ = ctx, mock, key // Get not found. - if _, err := mock.AlphaAddresses().Get(ctx, *key); err == nil { - t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.BetaAddresses().Get(ctx, *key); err == nil { - t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key) - } - if _, err := mock.Addresses().Get(ctx, *key); err == nil { - t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key) + if _, err := mock.GlobalForwardingRules().Get(ctx, *key); err == nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = _, nil; want error", ctx, key) } // Insert. { - obj := &alpha.Address{} - if err := mock.AlphaAddresses().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &beta.Address{} - if err := mock.BetaAddresses().Insert(ctx, *keyBeta, obj); err != nil { - t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - { - obj := &ga.Address{} - if err := mock.Addresses().Insert(ctx, *keyGA, obj); err != nil { - t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + obj := &ga.ForwardingRule{} + if err := mock.GlobalForwardingRules().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("GlobalForwardingRules().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) } } // Get across versions. - if obj, err := mock.AlphaAddresses().Get(ctx, *key); err != nil { - t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.BetaAddresses().Get(ctx, *key); err != nil { - t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - if obj, err := mock.Addresses().Get(ctx, *key); err != nil { - t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + if obj, err := mock.GlobalForwardingRules().Get(ctx, *key); err != nil { + t.Errorf("GlobalForwardingRules().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) } // List. - mock.MockAlphaAddresses.Objects[*keyAlpha] = mock.MockAlphaAddresses.Obj(&alpha.Address{Name: keyAlpha.Name}) - mock.MockBetaAddresses.Objects[*keyBeta] = mock.MockBetaAddresses.Obj(&beta.Address{Name: keyBeta.Name}) - mock.MockAddresses.Objects[*keyGA] = mock.MockAddresses.Obj(&ga.Address{Name: keyGA.Name}) + mock.MockGlobalForwardingRules.Objects[*keyGA] = mock.MockGlobalForwardingRules.Obj(&ga.ForwardingRule{Name: keyGA.Name}) want := map[string]bool{ - "key-alpha": true, - "key-beta": true, - "key-ga": true, + "key-ga": true, } _ = want // ignore unused variables. { - objs, err := mock.AlphaAddresses().List(ctx, location, filter.None) + objs, err := mock.GlobalForwardingRules().List(ctx, filter.None) if err != nil { - t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + t.Errorf("GlobalForwardingRules().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) } else { got := map[string]bool{} for _, obj := range objs { got[obj.Name] = true } if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.BetaAddresses().List(ctx, location, filter.None) - if err != nil { - t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) - } - } - } - { - objs, err := mock.Addresses().List(ctx, location, filter.None) - if err != nil { - t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want) + t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want) } } } // Delete across versions. - if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err != nil { - t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - if err := mock.Addresses().Delete(ctx, *keyGA); err != nil { - t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err) + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err != nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = %v; want nil", ctx, key, err) } // Delete not found. - if err := mock.AlphaAddresses().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.BetaAddresses().Delete(ctx, *keyBeta); err == nil { - t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key) - } - if err := mock.Addresses().Delete(ctx, *keyGA); err == nil { - t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestRegionBackendServicesGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { - t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.BackendService{} - if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { - t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) - } -} - -func TestRegionDisksGroup(t *testing.T) { - t.Parallel() - - ctx := context.Background() - mock := NewMockGCE() - - var key *meta.Key - keyAlpha := meta.RegionalKey("key-alpha", "location") - key = keyAlpha - // Ignore unused variables. - _, _, _ = ctx, mock, key - - // Get not found. - if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { - t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) - } - - // Insert. - { - obj := &alpha.Disk{} - if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { - t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) - } - } - - // Get across versions. - if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { - t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) - } - - // List. - mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) - want := map[string]bool{ - "key-alpha": true, - } - _ = want // ignore unused variables. - { - objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) - if err != nil { - t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) - } else { - got := map[string]bool{} - for _, obj := range objs { - got[obj.Name] = true - } - if !reflect.DeepEqual(got, want) { - t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) - } - } - } - - // Delete across versions. - if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { - t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) - } - - // Delete not found. - if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { - t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + if err := mock.GlobalForwardingRules().Delete(ctx, *keyGA); err == nil { + t.Errorf("GlobalForwardingRules().Delete(%v, %v) = nil; want error", ctx, key) } } @@ -1624,6 +932,465 @@ func TestInstanceGroupsGroup(t *testing.T) { } } +func TestInstancesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + keyBeta := meta.ZonalKey("key-beta", "location") + key = keyBeta + keyGA := meta.ZonalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaInstances().Get(ctx, *key); err == nil { + t.Errorf("AlphaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.BetaInstances().Get(ctx, *key); err == nil { + t.Errorf("BetaInstances().Get(%v, %v) = _, nil; want error", ctx, key) + } + if _, err := mock.Instances().Get(ctx, *key); err == nil { + t.Errorf("Instances().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Instance{} + if err := mock.AlphaInstances().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &beta.Instance{} + if err := mock.BetaInstances().Insert(ctx, *keyBeta, obj); err != nil { + t.Errorf("BetaInstances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + { + obj := &ga.Instance{} + if err := mock.Instances().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Instances().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaInstances().Get(ctx, *key); err != nil { + t.Errorf("AlphaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.BetaInstances().Get(ctx, *key); err != nil { + t.Errorf("BetaInstances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + if obj, err := mock.Instances().Get(ctx, *key); err != nil { + t.Errorf("Instances().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaInstances.Objects[*keyAlpha] = mock.MockAlphaInstances.Obj(&alpha.Instance{Name: keyAlpha.Name}) + mock.MockBetaInstances.Objects[*keyBeta] = mock.MockBetaInstances.Obj(&beta.Instance{Name: keyBeta.Name}) + mock.MockInstances.Objects[*keyGA] = mock.MockInstances.Obj(&ga.Instance{Name: keyGA.Name}) + want := map[string]bool{ + "key-alpha": true, + "key-beta": true, + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.BetaInstances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("BetaInstances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + { + objs, err := mock.Instances().List(ctx, location, filter.None) + if err != nil { + t.Errorf("Instances().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err != nil { + t.Errorf("BetaInstances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + if err := mock.Instances().Delete(ctx, *keyGA); err != nil { + t.Errorf("Instances().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaInstances().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.BetaInstances().Delete(ctx, *keyBeta); err == nil { + t.Errorf("BetaInstances().Delete(%v, %v) = nil; want error", ctx, key) + } + if err := mock.Instances().Delete(ctx, *keyGA); err == nil { + t.Errorf("Instances().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestNetworkEndpointGroupsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.ZonalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.NetworkEndpointGroup{} + if err := mock.AlphaNetworkEndpointGroups().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, *key); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaNetworkEndpointGroups().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaNetworkEndpointGroups().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestProjectsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + + // Insert. + + // Get across versions. + + // List. + mock.MockProjects.Objects[*keyGA] = mock.MockProjects.Obj(&ga.Project{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + + // Delete across versions. + + // Delete not found. +} + +func TestRegionBackendServicesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.BackendService{} + if err := mock.AlphaRegionBackendServices().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionBackendServices().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionBackendServices.Objects[*keyAlpha] = mock.MockAlphaRegionBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionBackendServices().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionBackendServices().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionBackendServices().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionDisksGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyAlpha := meta.RegionalKey("key-alpha", "location") + key = keyAlpha + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.AlphaRegionDisks().Get(ctx, *key); err == nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &alpha.Disk{} + if err := mock.AlphaRegionDisks().Insert(ctx, *keyAlpha, obj); err != nil { + t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.AlphaRegionDisks().Get(ctx, *key); err != nil { + t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name}) + want := map[string]bool{ + "key-alpha": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None) + if err != nil { + t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err != nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.AlphaRegionDisks().Delete(ctx, *keyAlpha); err == nil { + t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestRegionsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Regions().Get(ctx, *key); err == nil { + t.Errorf("Regions().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockRegions.Objects[*keyGA] = mock.MockRegions.Obj(&ga.Region{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Regions().List(ctx, filter.None) + if err != nil { + t.Errorf("Regions().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} + +func TestRoutesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Routes().Get(ctx, *key); err == nil { + t.Errorf("Routes().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.Route{} + if err := mock.Routes().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("Routes().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.Routes().Get(ctx, *key); err != nil { + t.Errorf("Routes().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockRoutes.Objects[*keyGA] = mock.MockRoutes.Obj(&ga.Route{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Routes().List(ctx, filter.None) + if err != nil { + t.Errorf("Routes().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.Routes().Delete(ctx, *keyGA); err != nil { + t.Errorf("Routes().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.Routes().Delete(ctx, *keyGA); err == nil { + t.Errorf("Routes().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestSslCertificatesGroup(t *testing.T) { t.Parallel() @@ -1686,6 +1453,68 @@ func TestSslCertificatesGroup(t *testing.T) { } } +func TestTargetHttpProxiesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetHttpProxies().Get(ctx, *key); err == nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetHttpProxy{} + if err := mock.TargetHttpProxies().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetHttpProxies().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetHttpProxies().Get(ctx, *key); err != nil { + t.Errorf("TargetHttpProxies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetHttpProxies.Objects[*keyGA] = mock.MockTargetHttpProxies.Obj(&ga.TargetHttpProxy{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetHttpProxies().List(ctx, filter.None) + if err != nil { + t.Errorf("TargetHttpProxies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetHttpProxies().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetHttpProxies().Delete(%v, %v) = nil; want error", ctx, key) + } +} + func TestTargetHttpsProxiesGroup(t *testing.T) { t.Parallel() @@ -1747,3 +1576,174 @@ func TestTargetHttpsProxiesGroup(t *testing.T) { t.Errorf("TargetHttpsProxies().Delete(%v, %v) = nil; want error", ctx, key) } } + +func TestTargetPoolsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.RegionalKey("key-ga", "location") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.TargetPools().Get(ctx, *key); err == nil { + t.Errorf("TargetPools().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.TargetPool{} + if err := mock.TargetPools().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("TargetPools().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.TargetPools().Get(ctx, *key); err != nil { + t.Errorf("TargetPools().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockTargetPools.Objects[*keyGA] = mock.MockTargetPools.Obj(&ga.TargetPool{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.TargetPools().List(ctx, location, filter.None) + if err != nil { + t.Errorf("TargetPools().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.TargetPools().Delete(ctx, *keyGA); err != nil { + t.Errorf("TargetPools().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.TargetPools().Delete(ctx, *keyGA); err == nil { + t.Errorf("TargetPools().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestUrlMapsGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.UrlMaps().Get(ctx, *key); err == nil { + t.Errorf("UrlMaps().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + { + obj := &ga.UrlMap{} + if err := mock.UrlMaps().Insert(ctx, *keyGA, obj); err != nil { + t.Errorf("UrlMaps().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err) + } + } + + // Get across versions. + if obj, err := mock.UrlMaps().Get(ctx, *key); err != nil { + t.Errorf("UrlMaps().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err) + } + + // List. + mock.MockUrlMaps.Objects[*keyGA] = mock.MockUrlMaps.Obj(&ga.UrlMap{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.UrlMaps().List(ctx, filter.None) + if err != nil { + t.Errorf("UrlMaps().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err != nil { + t.Errorf("UrlMaps().Delete(%v, %v) = %v; want nil", ctx, key, err) + } + + // Delete not found. + if err := mock.UrlMaps().Delete(ctx, *keyGA); err == nil { + t.Errorf("UrlMaps().Delete(%v, %v) = nil; want error", ctx, key) + } +} + +func TestZonesGroup(t *testing.T) { + t.Parallel() + + ctx := context.Background() + mock := NewMockGCE() + + var key *meta.Key + keyGA := meta.GlobalKey("key-ga") + key = keyGA + // Ignore unused variables. + _, _, _ = ctx, mock, key + + // Get not found. + if _, err := mock.Zones().Get(ctx, *key); err == nil { + t.Errorf("Zones().Get(%v, %v) = _, nil; want error", ctx, key) + } + + // Insert. + + // Get across versions. + + // List. + mock.MockZones.Objects[*keyGA] = mock.MockZones.Obj(&ga.Zone{Name: keyGA.Name}) + want := map[string]bool{ + "key-ga": true, + } + _ = want // ignore unused variables. + { + objs, err := mock.Zones().List(ctx, filter.None) + if err != nil { + t.Errorf("Zones().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err) + } else { + got := map[string]bool{} + for _, obj := range objs { + got[obj.Name] = true + } + if !reflect.DeepEqual(got, want) { + t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want) + } + } + } + + // Delete across versions. + + // Delete not found. +} From b2613f151487f12362917ed8db6065c7bb648faf Mon Sep 17 00:00:00 2001 From: TigerXu Date: Wed, 10 Jan 2018 09:30:23 +0800 Subject: [PATCH 702/794] Revert "no need delete endpoint explicitly in endpoint controller" --- pkg/controller/endpoint/endpoints_controller.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index b7d46c3e07e..8aa41da8a83 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -394,7 +394,15 @@ func (e *EndpointController) syncService(key string) error { } service, err := e.serviceLister.Services(namespace).Get(name) if err != nil { - // Service has been deleted. So no need to do any more operations. + // Delete the corresponding endpoint, as the service has been deleted. + // TODO: Please note that this will delete an endpoint when a + // service is deleted. However, if we're down at the time when + // the service is deleted, we will miss that deletion, so this + // doesn't completely solve the problem. See #6877. + err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } return nil } From 8ec1958667e66fb3da2a1f1428998f59f8b027f2 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Sun, 24 Dec 2017 19:19:46 -0600 Subject: [PATCH 703/794] All Kubelet flags should be explicitly registered This explicitly registers Kubelet flags from libraries that were registering flags globally, and stops parsing the global flag set. In general, we should always be explicit about flags we register and parse, so that we maintain control over our command-line API. --- cmd/kubelet/BUILD | 1 + cmd/kubelet/app/options/BUILD | 13 ++ cmd/kubelet/app/options/globalflags.go | 167 ++++++++++++++++++ cmd/kubelet/kubelet.go | 33 +++- .../azure/azure_credentials.go | 2 +- pkg/credentialprovider/gcp/jwt.go | 6 +- pkg/version/verflag/verflag.go | 8 +- .../k8s.io/apiserver/pkg/util/logs/logs.go | 10 +- test/e2e/BUILD | 2 +- test/e2e/e2e.go | 2 +- 10 files changed, 231 insertions(+), 13 deletions(-) create mode 100644 cmd/kubelet/app/options/globalflags.go diff --git a/cmd/kubelet/BUILD b/cmd/kubelet/BUILD index a5b72a1e9ec..b077cfa4b8f 100644 --- a/cmd/kubelet/BUILD +++ b/cmd/kubelet/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", "//pkg/version/verflag:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index aca4a34882f..0d8a49e1e96 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -10,12 +10,15 @@ go_library( name = "go_default_library", srcs = [ "container_runtime.go", + "globalflags.go", "options.go", ], importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", deps = [ "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/core:go_default_library", + "//pkg/credentialprovider/azure:go_default_library", + "//pkg/credentialprovider/gcp:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", @@ -24,10 +27,20 @@ go_library( "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/util/taints:go_default_library", + "//pkg/version/verflag:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/google/cadvisor/container/common:go_default_library", + "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", + "//vendor/github.com/google/cadvisor/container/docker:go_default_library", + "//vendor/github.com/google/cadvisor/container/raw:go_default_library", + "//vendor/github.com/google/cadvisor/machine:go_default_library", + "//vendor/github.com/google/cadvisor/manager:go_default_library", + "//vendor/github.com/google/cadvisor/storage:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], ) diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go new file mode 100644 index 00000000000..85829930c0b --- /dev/null +++ b/cmd/kubelet/app/options/globalflags.go @@ -0,0 +1,167 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/spf13/pflag" + + // libs that provide registration functions + "k8s.io/apiserver/pkg/util/logs" + "k8s.io/kubernetes/pkg/version/verflag" + + // ensure libs have a chance to globally register their flags + _ "github.com/golang/glog" + _ "github.com/google/cadvisor/container/common" + _ "github.com/google/cadvisor/container/containerd" + _ "github.com/google/cadvisor/container/docker" + _ "github.com/google/cadvisor/container/raw" + _ "github.com/google/cadvisor/machine" + _ "github.com/google/cadvisor/manager" + _ "github.com/google/cadvisor/storage" + _ "k8s.io/kubernetes/pkg/credentialprovider/azure" + _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" +) + +// AddGlobalFlags explicitly registers flags that libraries (glog, verflag, etc.) register +// against the global flagsets from "flag" and "github.com/spf13/pflag". +// We do this in order to prevent unwanted flags from leaking into the Kubelet's flagset. +func AddGlobalFlags(fs *pflag.FlagSet) { + addGlogFlags(fs) + addCadvisorFlags(fs) + verflag.AddFlags(fs) + logs.AddFlags(fs) +} + +// normalize replaces underscores with hyphens +// we should always use hyphens instead of underscores when registering kubelet flags +func normalize(s string) string { + return strings.Replace(s, "_", "-", -1) +} + +// register adds a flag to local that targets the Value associated with the Flag named globalName in global +func register(global *flag.FlagSet, local *pflag.FlagSet, globalName string) { + if f := global.Lookup(globalName); f != nil { + f.Name = normalize(f.Name) + local.AddFlag(pflag.PFlagFromGoFlag(f)) + } else { + panic(fmt.Sprintf("failed to find flag in global flagset (flag): %s", globalName)) + } +} + +// pflagRegister adds a flag to local that targets the Value associated with the Flag named globalName in global +func pflagRegister(global, local *pflag.FlagSet, globalName string) { + if f := global.Lookup(globalName); f != nil { + f.Name = normalize(f.Name) + local.AddFlag(f) + } else { + panic(fmt.Sprintf("failed to find flag in global flagset (pflag): %s", globalName)) + } +} + +// registerDeprecated registers the flag with register, and then marks it deprecated +func registerDeprecated(global *flag.FlagSet, local *pflag.FlagSet, globalName, deprecated string) { + register(global, local, globalName) + local.Lookup(normalize(globalName)).Deprecated = deprecated +} + +// pflagRegisterDeprecated registers the flag with pflagRegister, and then marks it deprecated +func pflagRegisterDeprecated(global, local *pflag.FlagSet, globalName, deprecated string) { + pflagRegister(global, local, globalName) + local.Lookup(normalize(globalName)).Deprecated = deprecated +} + +// addCredentialProviderFlags adds flags from k8s.io/kubernetes/pkg/credentialprovider +func addCredentialProviderFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + global := pflag.CommandLine + local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + + // Note this is deprecated in the library that provides it, so we just allow that deprecation + // notice to pass through our registration here. + pflagRegister(global, local, "google-json-key") + // TODO(#58034): This is not a static file, so it's not quite as straightforward as --google-json-key. + // We need to figure out how ACR users can dynamically provide pull credentials before we can deprecate this. + pflagRegister(global, local, "azure-container-registry-config") + + fs.AddFlagSet(local) +} + +// addGlogFlags adds flags from github.com/golang/glog +func addGlogFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + global := flag.CommandLine + local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + + register(global, local, "logtostderr") + register(global, local, "alsologtostderr") + register(global, local, "v") + register(global, local, "stderrthreshold") + register(global, local, "vmodule") + register(global, local, "log_backtrace_at") + register(global, local, "log_dir") + + fs.AddFlagSet(local) +} + +// addCadvisorFlags adds flags from cadvisor +func addCadvisorFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + global := flag.CommandLine + local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + + // These flags were also implicit from cadvisor, but are actually used by something in the core repo: + // TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor + register(global, local, "docker_root") + // e2e node tests rely on this + register(global, local, "housekeeping_interval") + + // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: + const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." + + registerDeprecated(global, local, "application_metrics_count_limit", deprecated) + registerDeprecated(global, local, "boot_id_file", deprecated) + registerDeprecated(global, local, "container_hints", deprecated) + registerDeprecated(global, local, "containerd", deprecated) + registerDeprecated(global, local, "docker", deprecated) + registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated) + registerDeprecated(global, local, "docker_only", deprecated) + registerDeprecated(global, local, "docker-tls", deprecated) + registerDeprecated(global, local, "docker-tls-ca", deprecated) + registerDeprecated(global, local, "docker-tls-cert", deprecated) + registerDeprecated(global, local, "docker-tls-key", deprecated) + registerDeprecated(global, local, "enable_load_reader", deprecated) + registerDeprecated(global, local, "event_storage_age_limit", deprecated) + registerDeprecated(global, local, "event_storage_event_limit", deprecated) + registerDeprecated(global, local, "global_housekeeping_interval", deprecated) + registerDeprecated(global, local, "log_cadvisor_usage", deprecated) + registerDeprecated(global, local, "machine_id_file", deprecated) + registerDeprecated(global, local, "storage_driver_user", deprecated) + registerDeprecated(global, local, "storage_driver_password", deprecated) + registerDeprecated(global, local, "storage_driver_host", deprecated) + registerDeprecated(global, local, "storage_driver_db", deprecated) + registerDeprecated(global, local, "storage_driver_table", deprecated) + registerDeprecated(global, local, "storage_driver_secure", deprecated) + registerDeprecated(global, local, "storage_driver_buffer_duration", deprecated) + + // finally, add cadvisor flags to the provided flagset + fs.AddFlagSet(local) +} diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go index 891aace197c..76b86233dc6 100644 --- a/cmd/kubelet/kubelet.go +++ b/cmd/kubelet/kubelet.go @@ -24,6 +24,7 @@ import ( "fmt" "os" + "github.com/golang/glog" "github.com/spf13/pflag" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -36,25 +37,43 @@ import ( "k8s.io/kubernetes/pkg/version/verflag" ) +func parseFlagSet(fs *pflag.FlagSet, args []string) error { + if err := fs.Parse(args); err != nil { + return err + } + fs.VisitAll(func(flag *pflag.Flag) { + glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) + return nil +} + func die(err error) { fmt.Fprintf(os.Stderr, "error: %v\n", err) os.Exit(1) } func main() { - // construct KubeletFlags object and register command line flags mapping - kubeletFlags := options.NewKubeletFlags() - kubeletFlags.AddFlags(pflag.CommandLine) + fs := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + // set the normalize func, similar to k8s.io/apiserver/pkg/util/flag/flags.go:InitFlags + fs.SetNormalizeFunc(flag.WordSepNormalizeFunc) + // explicitly add flags from libs that register global flags + options.AddGlobalFlags(fs) - // construct KubeletConfiguration object and register command line flags mapping + // register kubelet flags + kubeletFlags := options.NewKubeletFlags() + kubeletFlags.AddFlags(fs) + + // register kubelet config flags defaultConfig, err := options.NewKubeletConfiguration() if err != nil { die(err) } - options.AddKubeletConfigFlags(pflag.CommandLine, defaultConfig) + options.AddKubeletConfigFlags(fs, defaultConfig) - // parse the command line flags into the respective objects - flag.InitFlags() + // parse flags + if err := parseFlagSet(fs, os.Args[1:]); err != nil { + die(err) + } // initialize logging and defer flush logs.InitLogs() diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index e48d8133f55..6edf6fe30ad 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -34,7 +34,7 @@ import ( ) var flagConfigFile = pflag.String("azure-container-registry-config", "", - "Path to the file container Azure container registry configuration information.") + "Path to the file containing Azure container registry configuration information.") const dummyRegistryEmail = "name@contoso.com" diff --git a/pkg/credentialprovider/gcp/jwt.go b/pkg/credentialprovider/gcp/jwt.go index b34c0fcaaf0..d187560a351 100644 --- a/pkg/credentialprovider/gcp/jwt.go +++ b/pkg/credentialprovider/gcp/jwt.go @@ -31,10 +31,11 @@ import ( const ( storageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + jwtFileFlagName = "google-json-key" ) var ( - flagJwtFile = pflag.String("google-json-key", "", + flagJwtFile = pflag.String(jwtFileFlagName, "", "The Google Cloud Platform Service Account JSON Key to use for authentication.") ) @@ -49,6 +50,9 @@ type jwtProvider struct { // init registers the various means by which credentials may // be resolved on GCP. func init() { + pflag.CommandLine.MarkDeprecated(jwtFileFlagName, "Will be removed in a future version. "+ + "To maintain node-level authentication, credentials should instead be included in a docker "+ + "config.json file, located inside the Kubelet's --root-dir.") credentialprovider.RegisterCredentialProvider("google-jwt-key", &credentialprovider.CachingDockerConfigProvider{ Provider: &jwtProvider{ diff --git a/pkg/version/verflag/verflag.go b/pkg/version/verflag/verflag.go index 5809c3aa8f4..0c078052774 100644 --- a/pkg/version/verflag/verflag.go +++ b/pkg/version/verflag/verflag.go @@ -85,10 +85,16 @@ func Version(name string, value versionValue, usage string) *versionValue { return p } +const versionFlagName = "version" + var ( - versionFlag = Version("version", VersionFalse, "Print version information and quit") + versionFlag = Version(versionFlagName, VersionFalse, "Print version information and quit") ) +func AddFlags(fs *flag.FlagSet) { + fs.AddFlag(flag.Lookup(versionFlagName)) +} + // PrintAndExitIfRequested will check if the -version flag was passed // and, if so, print the version and exit. func PrintAndExitIfRequested() { diff --git a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go index a3909583a7c..c5ba084a59e 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go +++ b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go @@ -26,13 +26,21 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") +const logFlushFreqFlagName = "log-flush-frequency" + +var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum number of seconds between log flushes") // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { flag.Set("logtostderr", "true") } +// AddFlags registers this package's flags on arbitrary FlagSets, such that they point to the +// same value as the global flags. +func AddFlags(fs *pflag.FlagSet) { + fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) +} + // GlogWriter serves as a bridge between the standard log package and the glog package. type GlogWriter struct{} diff --git a/test/e2e/BUILD b/test/e2e/BUILD index a619f53166f..88af77029ab 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -48,7 +48,6 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", - "//pkg/kubectl/util/logs:go_default_library", "//pkg/version:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", @@ -71,6 +70,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index d456bee4153..63211b6892e 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -32,10 +32,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/util/logs" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - "k8s.io/kubernetes/pkg/kubectl/util/logs" "k8s.io/kubernetes/pkg/version" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" From 386c077dc6e241ad94c19e3e6a3fcc56b4ba325a Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 29 Dec 2017 18:43:38 +0800 Subject: [PATCH 704/794] Move common functions together --- test/e2e_node/BUILD | 3 + test/e2e_node/device_plugin.go | 102 +++++++++++++++++++++++++++++ test/e2e_node/gpu_device_plugin.go | 97 +++++---------------------- test/e2e_node/util.go | 13 ++++ 4 files changed, 134 insertions(+), 81 deletions(-) create mode 100644 test/e2e_node/device_plugin.go diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index e70755c5365..9ee141f0096 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -10,6 +10,7 @@ go_library( name = "go_default_library", srcs = [ "container.go", + "device_plugin.go", "doc.go", "docker_util.go", "framework.go", @@ -32,10 +33,12 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", + "//pkg/kubelet/cm/deviceplugin:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/remote:go_default_library", "//test/e2e/common:go_default_library", diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go new file mode 100644 index 00000000000..9748d31d68a --- /dev/null +++ b/test/e2e_node/device_plugin.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "regexp" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/uuid" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/test/e2e/framework" + + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + dp "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// makeBusyboxPod returns a simple Pod spec with a pause container +// that requests resourceName and runs the specified command. +func makeBusyboxPod(resourceName, cmd string) *v1.Pod { + podName := "device-plugin-test-" + string(uuid.NewUUID()) + rl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)} + + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName}, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + Containers: []v1.Container{{ + Image: busyboxImage, + Name: podName, + // Runs the specified command in the test pod. + Command: []string{"sh", "-c", cmd}, + Resources: v1.ResourceRequirements{ + Limits: rl, + Requests: rl, + }, + }}, + }, + } +} + +// parseLogFromNRuns returns restart count of the specified container +// after it has been restarted at least restartCount times, +// and the matching string for the specified regular expression parsed from the container logs. +func parseLogFromNRuns(f *framework.Framework, podName string, contName string, restartCount int32, re string) (int32, string) { + var count int32 + // Wait till pod has been restarted at least restartCount times. + Eventually(func() bool { + p, err := f.PodClient().Get(podName, metav1.GetOptions{}) + if err != nil || len(p.Status.ContainerStatuses) < 1 { + return false + } + count = p.Status.ContainerStatuses[0].RestartCount + return count >= restartCount + }, 5*time.Minute, framework.Poll).Should(BeTrue()) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) + if err != nil { + framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) + } + framework.Logf("got pod logs: %v", logs) + regex := regexp.MustCompile(re) + matches := regex.FindStringSubmatch(logs) + if len(matches) < 2 { + return count, "" + } + return count, matches[1] +} + +// numberOfDevices returns the number of devices of resourceName advertised by a node +func numberOfDevices(node *v1.Node, resourceName string) int64 { + val, ok := node.Status.Capacity[v1.ResourceName(resourceName)] + if !ok { + return 0 + } + + return val.Value() +} diff --git a/test/e2e_node/gpu_device_plugin.go b/test/e2e_node/gpu_device_plugin.go index d2a52c3749e..256a8935c5c 100644 --- a/test/e2e_node/gpu_device_plugin.go +++ b/test/e2e_node/gpu_device_plugin.go @@ -17,15 +17,11 @@ limitations under the License. package e2e_node import ( - "os/exec" - "regexp" "strconv" "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -89,24 +85,28 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() { By("Creating one GPU pod on a node with at least two GPUs") - p1 := f.PodClient().CreateSync(makeCudaPauseImage()) - count1, devId1 := getDeviceId(f, p1.Name, p1.Name, 1) + podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs" + p1 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD)) + + deviceIDRE := "gpu devices: (nvidia[0-9]+)" + count1, devId1 := parseLogFromNRuns(f, p1.Name, p1.Name, 1, deviceIDRE) p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) By("Restarting Kubelet and waiting for the current running pod to restart") - restartKubelet(f) + restartKubelet() By("Confirming that after a kubelet and pod restart, GPU assignement is kept") - count1, devIdRestart1 := getDeviceId(f, p1.Name, p1.Name, count1+1) + count1, devIdRestart1 := parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) By("Restarting Kubelet and creating another pod") - restartKubelet(f) - p2 := f.PodClient().CreateSync(makeCudaPauseImage()) + restartKubelet() + p2 := f.PodClient().CreateSync(makeBusyboxPod(framework.NVIDIAGPUResourceName, podRECMD)) By("Checking that pods got a different GPU") - count2, devId2 := getDeviceId(f, p2.Name, p2.Name, 1) + count2, devId2 := parseLogFromNRuns(f, p2.Name, p2.Name, 1, deviceIDRE) + Expect(devId1).To(Not(Equal(devId2))) By("Deleting device plugin.") @@ -118,16 +118,16 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi return framework.NumberOfNVIDIAGPUs(node) <= 0 }, 10*time.Minute, framework.Poll).Should(BeTrue()) By("Checking that scheduled pods can continue to run even after we delete device plugin.") - count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1) + count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+1, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) - count2, devIdRestart2 := getDeviceId(f, p2.Name, p2.Name, count2+1) + count2, devIdRestart2 := parseLogFromNRuns(f, p2.Name, p2.Name, count2+1, deviceIDRE) Expect(devIdRestart2).To(Equal(devId2)) By("Restarting Kubelet.") - restartKubelet(f) + restartKubelet() By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.") - count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+2) + count1, devIdRestart1 = parseLogFromNRuns(f, p1.Name, p1.Name, count1+2, deviceIDRE) Expect(devIdRestart1).To(Equal(devId1)) - count2, devIdRestart2 = getDeviceId(f, p2.Name, p2.Name, count2+2) + count2, devIdRestart2 = parseLogFromNRuns(f, p2.Name, p2.Name, count2+2, deviceIDRE) Expect(devIdRestart2).To(Equal(devId2)) logDevicePluginMetrics() @@ -165,68 +165,3 @@ func logDevicePluginMetrics() { } } } - -func makeCudaPauseImage() *v1.Pod { - podName := testPodNamePrefix + string(uuid.NewUUID()) - - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: podName}, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyAlways, - Containers: []v1.Container{{ - Image: busyboxImage, - Name: podName, - // Retrieves the gpu devices created in the user pod. - // Note the nvidia device plugin implementation doesn't do device id remapping currently. - // Will probably need to use nvidia-smi if that changes. - Command: []string{"sh", "-c", "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"}, - - Resources: v1.ResourceRequirements{ - Limits: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1), - Requests: newDecimalResourceList(framework.NVIDIAGPUResourceName, 1), - }, - }}, - }, - } -} - -func newDecimalResourceList(name v1.ResourceName, quantity int64) v1.ResourceList { - return v1.ResourceList{name: *resource.NewQuantity(quantity, resource.DecimalSI)} -} - -// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494 -func restartKubelet(f *framework.Framework) { - stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput() - framework.ExpectNoError(err) - regex := regexp.MustCompile("(kubelet-[0-9]+)") - matches := regex.FindStringSubmatch(string(stdout)) - Expect(len(matches)).NotTo(BeZero()) - kube := matches[0] - framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube) - stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput() - framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout) -} - -func getDeviceId(f *framework.Framework, podName string, contName string, restartCount int32) (int32, string) { - var count int32 - // Wait till pod has been restarted at least restartCount times. - Eventually(func() bool { - p, err := f.PodClient().Get(podName, metav1.GetOptions{}) - if err != nil || len(p.Status.ContainerStatuses) < 1 { - return false - } - count = p.Status.ContainerStatuses[0].RestartCount - return count >= restartCount - }, 5*time.Minute, framework.Poll).Should(BeTrue()) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) - if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) - } - framework.Logf("got pod logs: %v", logs) - regex := regexp.MustCompile("gpu devices: (nvidia[0-9]+)") - matches := regex.FindStringSubmatch(logs) - if len(matches) < 2 { - return count, "" - } - return count, matches[1] -} diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index bf1914c5e71..f81ab6f5d8b 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -389,3 +389,16 @@ func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService } return r, i, nil } + +// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494 +func restartKubelet() { + stdout, err := exec.Command("sudo", "systemctl", "list-units", "kubelet*", "--state=running").CombinedOutput() + framework.ExpectNoError(err) + regex := regexp.MustCompile("(kubelet-[0-9]+)") + matches := regex.FindStringSubmatch(string(stdout)) + Expect(len(matches)).NotTo(BeZero()) + kube := matches[0] + framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube) + stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput() + framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout) +} From 40c0cb468fc151df8accd3a6228cbe0b5ca6b183 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 21:53:22 -0800 Subject: [PATCH 705/794] Remove options.md, which is outdated and doesn't contain any useful information. --- cluster/options.md | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 cluster/options.md diff --git a/cluster/options.md b/cluster/options.md deleted file mode 100644 index f48d0ebc264..00000000000 --- a/cluster/options.md +++ /dev/null @@ -1,15 +0,0 @@ -# Configuration options - -These options can be set as environment variables, to customize how your cluster is created. - -These options apply across providers. There are additional documents for options specific to providers: - -* [AWS](aws/options.md) - -This is a work-in-progress; not all options are documented yet! - -**NUM_NODES** - -The number of node instances to create. Most providers default this to 4. - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/options.md?pixel)]() From d2d48cddf8cd3856b66e3bd385f8fab4b6ccf57c Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 14:01:47 +0800 Subject: [PATCH 706/794] Add wrappers for azure clients --- pkg/cloudprovider/providers/azure/azure.go | 256 +----- .../providers/azure/azure_client.go | 811 ++++++++++++++++++ 2 files changed, 854 insertions(+), 213 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_client.go diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 8f31f013508..9d61124f194 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -30,10 +30,6 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/version" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/disk" - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" @@ -110,117 +106,22 @@ type Config struct { MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` } -// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient -type VirtualMachinesClient interface { - CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) - Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) - List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) - ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) -} - -// InterfacesClient defines needed functions for azure network.InterfacesClient -type InterfacesClient interface { - CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) - Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) - GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) -} - -// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient -type LoadBalancersClient interface { - CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) - Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) - List(resourceGroupName string) (result network.LoadBalancerListResult, err error) - ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) -} - -// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient -type PublicIPAddressesClient interface { - CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) - Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) - List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) - ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) -} - -// SubnetsClient defines needed functions for azure network.SubnetsClient -type SubnetsClient interface { - CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) - Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) - List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) -} - -// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient -type SecurityGroupsClient interface { - CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) - Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) - Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) - List(resourceGroupName string) (result network.SecurityGroupListResult, err error) -} - -// VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient -type VirtualMachineScaleSetsClient interface { - CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) - Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) - List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) - UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) -} - -// VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient -type VirtualMachineScaleSetVMsClient interface { - Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) - GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) - List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) -} - -// RoutesClient defines needed functions for azure network.RoutesClient -type RoutesClient interface { - CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) - Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) -} - -// RouteTablesClient defines needed functions for azure network.RouteTablesClient -type RouteTablesClient interface { - CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) - Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) -} - -// StorageAccountClient defines needed functions for azure storage.AccountsClient -type StorageAccountClient interface { - Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) - Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) - ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) - ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) - GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) -} - -// DisksClient defines needed functions for azure disk.DisksClient -type DisksClient interface { - CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) - Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) - Get(resourceGroupName string, diskName string) (result disk.Model, err error) -} - // Cloud holds the config and clients type Cloud struct { Config - Environment azure.Environment - RoutesClient RoutesClient - SubnetsClient SubnetsClient - InterfacesClient InterfacesClient - RouteTablesClient RouteTablesClient - LoadBalancerClient LoadBalancersClient - PublicIPAddressesClient PublicIPAddressesClient - SecurityGroupsClient SecurityGroupsClient - VirtualMachinesClient VirtualMachinesClient - StorageAccountClient StorageAccountClient - DisksClient DisksClient - operationPollRateLimiter flowcontrol.RateLimiter - resourceRequestBackoff wait.Backoff - vmSet VMSet + Environment azure.Environment + RoutesClient RoutesClient + SubnetsClient SubnetsClient + InterfacesClient InterfacesClient + RouteTablesClient RouteTablesClient + LoadBalancerClient LoadBalancersClient + PublicIPAddressesClient PublicIPAddressesClient + SecurityGroupsClient SecurityGroupsClient + VirtualMachinesClient VirtualMachinesClient + StorageAccountClient StorageAccountClient + DisksClient DisksClient + resourceRequestBackoff wait.Backoff + vmSet VMSet // Clients for vmss. VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient @@ -247,116 +148,45 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { return nil, err } - az := Cloud{ - Config: *config, - Environment: *env, - } - servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env) if err != nil { return nil, err } - subnetsClient := network.NewSubnetsClient(az.SubscriptionID) - subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - subnetsClient.PollingDelay = 5 * time.Second - configureUserAgent(&subnetsClient.Client) - az.SubnetsClient = subnetsClient - - routeTablesClient := network.NewRouteTablesClient(az.SubscriptionID) - routeTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint - routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - routeTablesClient.PollingDelay = 5 * time.Second - configureUserAgent(&routeTablesClient.Client) - az.RouteTablesClient = routeTablesClient - - routesClient := network.NewRoutesClient(az.SubscriptionID) - routesClient.BaseURI = az.Environment.ResourceManagerEndpoint - routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - routesClient.PollingDelay = 5 * time.Second - configureUserAgent(&routesClient.Client) - az.RoutesClient = routesClient - - interfacesClient := network.NewInterfacesClient(az.SubscriptionID) - interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint - interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - interfacesClient.PollingDelay = 5 * time.Second - configureUserAgent(&interfacesClient.Client) - az.InterfacesClient = interfacesClient - - loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID) - loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint - loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - loadBalancerClient.PollingDelay = 5 * time.Second - configureUserAgent(&loadBalancerClient.Client) - az.LoadBalancerClient = loadBalancerClient - - virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID) - virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachinesClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachinesClient.Client) - az.VirtualMachinesClient = virtualMachinesClient - - publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID) - publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint - publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - publicIPAddressClient.PollingDelay = 5 * time.Second - configureUserAgent(&publicIPAddressClient.Client) - az.PublicIPAddressesClient = publicIPAddressClient - - securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID) - securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint - securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - securityGroupsClient.PollingDelay = 5 * time.Second - configureUserAgent(&securityGroupsClient.Client) - az.SecurityGroupsClient = securityGroupsClient - - virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID) - virtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachineScaleSetVMsClient.Client) - az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient - - virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID) - virtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second - configureUserAgent(&virtualMachineScaleSetsClient.Client) - az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient - - storageAccountClient := storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - storageAccountClient.PollingDelay = 5 * time.Second - configureUserAgent(&storageAccountClient.Client) - az.StorageAccountClient = storageAccountClient - - disksClient := disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - disksClient.PollingDelay = 5 * time.Second - configureUserAgent(&disksClient.Client) - az.DisksClient = disksClient - - // Conditionally configure rate limits - if az.CloudProviderRateLimit { + // operationPollRateLimiter.Accept() is a no-op if rate limits are configured off. + operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter() + if config.CloudProviderRateLimit { // Assign rate limit defaults if no configuration was passed in - if az.CloudProviderRateLimitQPS == 0 { - az.CloudProviderRateLimitQPS = rateLimitQPSDefault + if config.CloudProviderRateLimitQPS == 0 { + config.CloudProviderRateLimitQPS = rateLimitQPSDefault } - if az.CloudProviderRateLimitBucket == 0 { - az.CloudProviderRateLimitBucket = rateLimitBucketDefault + if config.CloudProviderRateLimitBucket == 0 { + config.CloudProviderRateLimitBucket = rateLimitBucketDefault } - az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter( - az.CloudProviderRateLimitQPS, - az.CloudProviderRateLimitBucket) + operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter( + config.CloudProviderRateLimitQPS, + config.CloudProviderRateLimitBucket) glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d", - az.CloudProviderRateLimitQPS, - az.CloudProviderRateLimitBucket) - } else { - // if rate limits are configured off, az.operationPollRateLimiter.Accept() is a no-op - az.operationPollRateLimiter = flowcontrol.NewFakeAlwaysRateLimiter() + config.CloudProviderRateLimitQPS, + config.CloudProviderRateLimitBucket) + } + + az := Cloud{ + Config: *config, + Environment: *env, + + DisksClient: newAzDisksClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + RoutesClient: newAzRoutesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + SubnetsClient: newAzSubnetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + InterfacesClient: newAzInterfacesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + RouteTablesClient: newAzRouteTablesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + LoadBalancerClient: newAzLoadBalancersClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + SecurityGroupsClient: newAzSecurityGroupsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + StorageAccountClient: newAzStorageAccountClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachinesClient: newAzVirtualMachinesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + PublicIPAddressesClient: newAzPublicIPAddressesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), } // Conditionally configure resource request backoff diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go new file mode 100644 index 00000000000..3a359dd6893 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -0,0 +1,811 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/golang/glog" + + "k8s.io/client-go/util/flowcontrol" +) + +// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient +type VirtualMachinesClient interface { + CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) + Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) + List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) + ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) +} + +// InterfacesClient defines needed functions for azure network.InterfacesClient +type InterfacesClient interface { + CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) + Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) + GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) +} + +// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient +type LoadBalancersClient interface { + CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) + Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) + List(resourceGroupName string) (result network.LoadBalancerListResult, err error) + ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) +} + +// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient +type PublicIPAddressesClient interface { + CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) + Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) + List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) + ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) +} + +// SubnetsClient defines needed functions for azure network.SubnetsClient +type SubnetsClient interface { + CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) + Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) + List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) +} + +// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient +type SecurityGroupsClient interface { + CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) + Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) + List(resourceGroupName string) (result network.SecurityGroupListResult, err error) +} + +// VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient +type VirtualMachineScaleSetsClient interface { + CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) + Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) + List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) +} + +// VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient +type VirtualMachineScaleSetVMsClient interface { + Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) + GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) + List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) + ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) +} + +// RoutesClient defines needed functions for azure network.RoutesClient +type RoutesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) + Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) +} + +// RouteTablesClient defines needed functions for azure network.RouteTablesClient +type RouteTablesClient interface { + CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) + Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) +} + +// StorageAccountClient defines needed functions for azure storage.AccountsClient +type StorageAccountClient interface { + Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) + Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) + ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) + ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) + GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) +} + +// DisksClient defines needed functions for azure disk.DisksClient +type DisksClient interface { + CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) + Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) + Get(resourceGroupName string, diskName string) (result disk.Model, err error) +} + +// azVirtualMachinesClient implements VirtualMachinesClient. +type azVirtualMachinesClient struct { + client compute.VirtualMachinesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachinesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachinesClient { + virtualMachinesClient := compute.NewVirtualMachinesClient(subscriptionID) + virtualMachinesClient.BaseURI = endpoint + virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachinesClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachinesClient.Client) + + return &azVirtualMachinesClient{ + rateLimiter: rateLimiter, + client: virtualMachinesClient, + } +} + +func (az *azVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) +} + +func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) + }() + + return az.client.Get(resourceGroupName, VMName, expand) +} + +func (az *azVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azInterfacesClient implements InterfacesClient. +type azInterfacesClient struct { + client network.InterfacesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzInterfacesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azInterfacesClient { + interfacesClient := network.NewInterfacesClient(subscriptionID) + interfacesClient.BaseURI = endpoint + interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + interfacesClient.PollingDelay = 5 * time.Second + configureUserAgent(&interfacesClient.Client) + + return &azInterfacesClient{ + rateLimiter: rateLimiter, + client: interfacesClient, + } +} + +func (az *azInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) +} + +func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) + }() + + return az.client.Get(resourceGroupName, networkInterfaceName, expand) +} + +func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + defer func() { + glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + }() + + return az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) +} + +// azLoadBalancersClient implements LoadBalancersClient. +type azLoadBalancersClient struct { + client network.LoadBalancersClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzLoadBalancersClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azLoadBalancersClient { + loadBalancerClient := network.NewLoadBalancersClient(subscriptionID) + loadBalancerClient.BaseURI = endpoint + loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + loadBalancerClient.PollingDelay = 5 * time.Second + configureUserAgent(&loadBalancerClient.Client) + + return &azLoadBalancersClient{ + rateLimiter: rateLimiter, + client: loadBalancerClient, + } +} + +func (az *azLoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) +} + +func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.Delete(resourceGroupName, loadBalancerName, cancel) +} + +func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) + }() + + return az.client.Get(resourceGroupName, loadBalancerName, expand) +} + +func (az *azLoadBalancersClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): start", lastResult) + defer func() { + glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): end", lastResult) + }() + + return az.client.ListNextResults(lastResult) +} + +// azPublicIPAddressesClient implements PublicIPAddressesClient. +type azPublicIPAddressesClient struct { + client network.PublicIPAddressesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzPublicIPAddressesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azPublicIPAddressesClient { + publicIPAddressClient := network.NewPublicIPAddressesClient(subscriptionID) + publicIPAddressClient.BaseURI = endpoint + publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + publicIPAddressClient.PollingDelay = 5 * time.Second + configureUserAgent(&publicIPAddressClient.Client) + + return &azPublicIPAddressesClient{ + rateLimiter: rateLimiter, + client: publicIPAddressClient, + } +} + +func (az *azPublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) +} + +func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.Delete(resourceGroupName, publicIPAddressName, cancel) +} + +func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) + }() + + return az.client.Get(resourceGroupName, publicIPAddressName, expand) +} + +func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azSubnetsClient implements SubnetsClient. +type azSubnetsClient struct { + client network.SubnetsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzSubnetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSubnetsClient { + subnetsClient := network.NewSubnetsClient(subscriptionID) + subnetsClient.BaseURI = endpoint + subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + subnetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&subnetsClient.Client) + + return &azSubnetsClient{ + client: subnetsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) +} + +func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) +} + +func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + defer func() { + glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + }() + + return az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) +} + +func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) + defer func() { + glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) + }() + + return az.client.List(resourceGroupName, virtualNetworkName) +} + +// azSecurityGroupsClient implements SecurityGroupsClient. +type azSecurityGroupsClient struct { + client network.SecurityGroupsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzSecurityGroupsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSecurityGroupsClient { + securityGroupsClient := network.NewSecurityGroupsClient(subscriptionID) + securityGroupsClient.BaseURI = endpoint + securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + securityGroupsClient.PollingDelay = 5 * time.Second + configureUserAgent(&securityGroupsClient.Client) + + return &azSecurityGroupsClient{ + rateLimiter: rateLimiter, + client: securityGroupsClient, + } +} + +func (az *azSecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) +} + +func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) +} + +func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) + }() + + return az.client.Get(resourceGroupName, networkSecurityGroupName, expand) +} + +func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +// azVirtualMachineScaleSetsClient implements VirtualMachineScaleSetsClient. +type azVirtualMachineScaleSetsClient struct { + client compute.VirtualMachineScaleSetsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachineScaleSetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetsClient { + virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(subscriptionID) + virtualMachineScaleSetsClient.BaseURI = endpoint + virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetsClient.Client) + + return &azVirtualMachineScaleSetsClient{ + client: virtualMachineScaleSetsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) +} + +func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) + }() + + return az.client.Get(resourceGroupName, VMScaleSetName) +} + +func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) + }() + + return az.client.List(resourceGroupName) +} + +func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) + }() + + return az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) +} + +// azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. +type azVirtualMachineScaleSetVMsClient struct { + client compute.VirtualMachineScaleSetVMsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzVirtualMachineScaleSetVMsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetVMsClient { + virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) + virtualMachineScaleSetVMsClient.BaseURI = endpoint + virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetVMsClient.Client) + + return &azVirtualMachineScaleSetVMsClient{ + client: virtualMachineScaleSetVMsClient, + rateLimiter: rateLimiter, + } +} + +func (az *azVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + }() + + return az.client.Get(resourceGroupName, VMScaleSetName, instanceID) +} + +func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + }() + + return az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) +} + +func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) + }() + + return az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) +} + +func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): start", lastResults) + defer func() { + glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): end", lastResults) + }() + + return az.client.ListNextResults(lastResults) +} + +// azRoutesClient implements RoutesClient. +type azRoutesClient struct { + client network.RoutesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzRoutesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRoutesClient { + routesClient := network.NewRoutesClient(subscriptionID) + routesClient.BaseURI = endpoint + routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routesClient.Client) + + return &azRoutesClient{ + client: routesClient, + rateLimiter: rateLimiter, + } +} + +func (az *azRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + defer func() { + glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) +} + +func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + defer func() { + glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + }() + + return az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) +} + +// azRouteTablesClient implements RouteTablesClient. +type azRouteTablesClient struct { + client network.RouteTablesClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzRouteTablesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRouteTablesClient { + routeTablesClient := network.NewRouteTablesClient(subscriptionID) + routeTablesClient.BaseURI = endpoint + routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + routeTablesClient.PollingDelay = 5 * time.Second + configureUserAgent(&routeTablesClient.Client) + + return &azRouteTablesClient{ + client: routeTablesClient, + rateLimiter: rateLimiter, + } +} + +func (az *azRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) + defer func() { + glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) +} + +func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) + defer func() { + glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) + }() + + return az.client.Get(resourceGroupName, routeTableName, expand) +} + +// azStorageAccountClient implements StorageAccountClient. +type azStorageAccountClient struct { + client storage.AccountsClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzStorageAccountClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azStorageAccountClient { + storageAccountClient := storage.NewAccountsClientWithBaseURI(endpoint, subscriptionID) + storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + storageAccountClient.PollingDelay = 5 * time.Second + configureUserAgent(&storageAccountClient.Client) + + return &azStorageAccountClient{ + client: storageAccountClient, + rateLimiter: rateLimiter, + } +} + +func (az *azStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.Create(resourceGroupName, accountName, parameters, cancel) +} + +func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.Delete(resourceGroupName, accountName) +} + +func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.ListKeys(resourceGroupName, accountName) +} + +func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) + }() + + return az.client.ListByResourceGroup(resourceGroupName) +} + +func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) + defer func() { + glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) + }() + + return az.client.GetProperties(resourceGroupName, accountName) +} + +// azDisksClient implements DisksClient. +type azDisksClient struct { + client disk.DisksClient + rateLimiter flowcontrol.RateLimiter +} + +func newAzDisksClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azDisksClient { + disksClient := disk.NewDisksClientWithBaseURI(endpoint, subscriptionID) + disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + disksClient.PollingDelay = 5 * time.Second + configureUserAgent(&disksClient.Client) + + return &azDisksClient{ + client: disksClient, + rateLimiter: rateLimiter, + } +} + +func (az *azDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) +} + +func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.Delete(resourceGroupName, diskName, cancel) +} + +func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { + az.rateLimiter.Accept() + glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) + defer func() { + glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) + }() + + return az.client.Get(resourceGroupName, diskName) +} From 2423e7c52b48e0a3c2fb85cc0090a883ec20df0a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 14:02:08 +0800 Subject: [PATCH 707/794] Clean up azure rateLimiter and verbose logs --- pkg/cloudprovider/providers/azure/BUILD | 3 +- .../providers/azure/azure_backoff.go | 41 ------------------- .../providers/azure/azure_controllerCommon.go | 2 - .../providers/azure/azure_loadbalancer.go | 14 +------ .../providers/azure/azure_routes.go | 6 --- .../providers/azure/azure_storageaccount.go | 8 ---- .../providers/azure/azure_test.go | 2 - .../providers/azure/azure_util.go | 7 ---- .../providers/azure/azure_util_vmss.go | 33 --------------- .../providers/azure/azure_wrap.go | 28 +------------ 10 files changed, 5 insertions(+), 139 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index fe131367e0c..d796860b662 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -12,6 +12,7 @@ go_library( "azure.go", "azure_backoff.go", "azure_blobDiskController.go", + "azure_client.go", "azure_controllerCommon.go", "azure_fakes.go", "azure_file.go", @@ -42,6 +43,7 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", @@ -80,7 +82,6 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 099fea81fe1..ff0e16bfd7d 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -67,10 +67,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, var result compute.VirtualMachineListResult err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup) result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup) - glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -92,10 +89,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup) result, retryErr = az.VirtualMachinesClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) @@ -133,8 +127,6 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) { // CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name) respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) resp := <-respChan err := <-errChan @@ -146,8 +138,6 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { // CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name) respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) resp := <-respChan err := <-errChan @@ -163,10 +153,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup) result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup) - glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -189,10 +176,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup) result, retryErr = az.LoadBalancerClient.ListNextResults(result) - glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup) if retryErr != nil { glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -218,10 +202,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd var result network.PublicIPAddressListResult err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", pipResourceGroup) result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup) - glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, @@ -244,10 +225,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", pipResourceGroup) result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) - glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", pipResourceGroup) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, @@ -270,8 +248,6 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): start", pipResourceGroup, *pip.Name) respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil) resp := <-respChan err := <-errChan @@ -283,8 +259,6 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network // CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name) respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) resp := <-respChan err := <-errChan @@ -296,12 +270,9 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): start", pipResourceGroup, pipName) respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("PublicIPAddressesClient.Delete(%s, %s): end", pipResourceGroup, pipName) return processRetryResponse(resp, err) }) } @@ -309,12 +280,9 @@ func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string // DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry func (az *Cloud) DeleteLBWithRetry(lbName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName) respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("LoadBalancerClient.Delete(%s): end", lbName) return processRetryResponse(resp, err) }) } @@ -322,12 +290,9 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error { // CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name) respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name) return processRetryResponse(resp.Response, err) }) } @@ -335,8 +300,6 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable // CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name) respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) resp := <-respChan err := <-errChan @@ -348,8 +311,6 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { // DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry func (az *Cloud) DeleteRouteWithRetry(routeName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName) respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) resp := <-respChan err := <-errChan @@ -361,8 +322,6 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { // CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName) respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go index ea32f3f477f..ad40f3c5b85 100644 --- a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -110,7 +110,6 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } vmName := mapNodeNameToVMName(nodeName) glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName) - c.cloud.operationPollRateLimiter.Accept() respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) resp := <-respChan err = <-errChan @@ -176,7 +175,6 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t } vmName := mapNodeNameToVMName(nodeName) glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName) - c.cloud.operationPollRateLimiter.Accept() respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) resp := <-respChan err = <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index d6f4bdfac38..cc64f80826b 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -415,7 +415,6 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } pip.Tags = &map[string]*string{"service": &serviceName} glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) if err != nil { @@ -424,10 +423,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): start", pipResourceGroup, *pip.Name) pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %q): end", pipResourceGroup, *pip.Name) if err != nil { return nil, err } @@ -762,14 +758,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) + glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName) err = az.DeleteLBWithRetry(lbName) if err != nil { glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) return nil, err } - glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName) + glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) err := az.CreateOrUpdateLBWithRetry(*lb) @@ -808,10 +803,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, ports = []v1.ServicePort{} } - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName) sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") - glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName) if err != nil { return nil, err } @@ -980,7 +972,6 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, if dirtySg { sg.SecurityRules = &updatedRules glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) err := az.CreateOrUpdateSGWithRetry(sg) if err != nil { @@ -1169,7 +1160,6 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want // Public ip resource with match service tag } else { glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) - az.operationPollRateLimiter.Accept() glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) if err != nil { diff --git a/pkg/cloudprovider/providers/azure/azure_routes.go b/pkg/cloudprovider/providers/azure/azure_routes.go index 60c5049b052..eef61003ff8 100644 --- a/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/pkg/cloudprovider/providers/azure/azure_routes.go @@ -77,8 +77,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo } glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): start", az.RouteTableName) respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) resp := <-respChan err := <-errChan @@ -119,8 +117,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo } glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): start", az.RouteTableName) respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) resp := <-respChan err = <-errChan @@ -147,8 +143,6 @@ func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RoutesClient.Delete(%q): start", az.RouteTableName) respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/pkg/cloudprovider/providers/azure/azure_storageaccount.go index ad69c0a532f..4d33fb21e66 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -19,8 +19,6 @@ package azure import ( "fmt" "strings" - - "github.com/golang/glog" ) type accountWithLocation struct { @@ -29,10 +27,7 @@ type accountWithLocation struct { // getStorageAccounts gets the storage accounts' name, type, location in a resource group func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): start", az.ResourceGroup) result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup) - glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): end", az.ResourceGroup) if err != nil { return nil, err } @@ -61,10 +56,7 @@ func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) { // getStorageAccesskey gets the storage account access key func (az *Cloud) getStorageAccesskey(account string) (string, error) { - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("StorageAccountClient.ListKeys(%q): start", account) result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account) - glog.V(10).Infof("StorageAccountClient.ListKeys(%q): end", account) if err != nil { return "", err } diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 47181bb0d3f..40a1153680b 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/flowcontrol" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -862,7 +861,6 @@ func getTestCloud() (az *Cloud) { MaximumLoadBalancerRuleCount: 250, }, } - az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 100) az.LoadBalancerClient = newFakeAzureLBClient() az.PublicIPAddressesClient = newFakeAzurePIPClient(az.Config.SubscriptionID) az.SubnetsClient = newFakeAzureSubnetsClient() diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index c7fcef10ebd..7a42ab52cd6 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -389,7 +389,6 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) var machine compute.VirtualMachine var err error - as.operationPollRateLimiter.Accept() machine, err = as.getVirtualMachine(types.NodeName(name)) if err != nil { if as.CloudProviderBackoff { @@ -563,7 +562,6 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) { var machine compute.VirtualMachine - as.operationPollRateLimiter.Accept() machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) if err != nil { glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) @@ -589,10 +587,7 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw } } - as.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) if err != nil { return network.Interface{}, err } @@ -642,8 +637,6 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N nicName := *nic.Name glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) - as.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name) respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil) resp := <-respChan err := <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index c9b827c308e..2116e4f0dc7 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -401,10 +401,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { allScaleSets := make([]string, 0) backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.List start for %v", ss.ResourceGroup) result, err = ss.VirtualMachineScaleSetsClient.List(ss.ResourceGroup) - glog.V(10).Infof("VirtualMachineScaleSetsClient.List end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -425,10 +422,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults start for %v", ss.ResourceGroup) result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachineScaleSetsClient.ListNextResults end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.ListNextResults for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -455,10 +449,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir allVMs := make([]compute.VirtualMachineScaleSetVM, 0) backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List start for %v", scaleSetName) result, err = ss.VirtualMachineScaleSetVMsClient.List(ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView)) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.List end for %v", scaleSetName) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.List for %v failed: %v", scaleSetName, err) return false, err @@ -477,10 +468,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults start for %v", scaleSetName) result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(result) - glog.V(10).Infof("VirtualMachineScaleSetVMsClient.ListNextResults end for %v", ss.ResourceGroup) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.ListNextResults for %v failed: %v", scaleSetName, err) return false, err @@ -622,10 +610,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int return network.Interface{}, err } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, vm.ScaleSetName, vm.InstanceID, nicName, "") - glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) if err != nil { glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, vm.ScaleSetName, nicName, err) return network.Interface{}, err @@ -642,11 +627,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int // getScaleSet gets a scale set by name. func (ss *scaleSet) getScaleSet(name string) (compute.VirtualMachineScaleSet, bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): start", name) result, err := ss.VirtualMachineScaleSetsClient.Get(ss.ResourceGroup, name) - glog.V(10).Infof("VirtualMachineScaleSetsClient.Get(%s): end", name) - exists, realErr := checkResourceExistsFromError(err) if realErr != nil { return result, false, realErr @@ -714,8 +695,6 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine // createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): start", *virtualMachineScaleSet.Name) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet, nil) resp := <-respChan err := <-errChan @@ -727,8 +706,6 @@ func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.V // updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): start", scaleSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, scaleSetName, vmInstanceIDs, nil) resp := <-respChan err := <-errChan @@ -784,8 +761,6 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp := <-respChan err := <-errChan @@ -829,8 +804,6 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ InstanceIds: &instanceIDs, } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) resp := <-respChan err = <-errChan @@ -898,8 +871,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { // Update scale set with backoff. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp := <-respChan err = <-errChan @@ -921,8 +892,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ InstanceIds: &instanceIDs, } - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): start", vmSetName) updateRespChan, errChan := ss.VirtualMachineScaleSetsClient.UpdateInstances(ss.ResourceGroup, vmSetName, vmInstanceIDs, nil) updateResp := <-updateRespChan err = <-errChan @@ -943,8 +912,6 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error { // TODO: remove this workaround when figuring out the root cause. if len(newBackendPools) == 0 { glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName) - ss.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): start", vmSetName) respChan, errChan = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ss.ResourceGroup, vmSetName, virtualMachineScaleSet, nil) resp = <-respChan err = <-errChan diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 85b67c456b0..9db1a457b5a 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -96,11 +96,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM // case we do get instance view every time to fulfill the azure_zones requirement without hitting // throttling. // Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName) vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, compute.InstanceView) - glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName) - exists, realErr := checkResourceExistsFromError(err) if realErr != nil { return vm, realErr @@ -122,11 +118,7 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.Get(%s): start", az.RouteTableName) routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "") - glog.V(10).Infof("RouteTablesClient.Get(%s): end", az.RouteTableName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return routeTable, false, realErr @@ -142,11 +134,7 @@ func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, er func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.Get(%s): start", az.SecurityGroupName) sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") - glog.V(10).Infof("SecurityGroupsClient.Get(%s): end", az.SecurityGroupName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return sg, false, realErr @@ -161,11 +149,8 @@ func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name) - lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "") - glog.V(10).Infof("LoadBalancerClient.Get(%s): end", name) + lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "") exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return lb, false, realErr @@ -181,10 +166,7 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup) lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup) - glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup) exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return lbListResult, false, realErr @@ -204,11 +186,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi } var realErr error - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): start", resourceGroup, pipName) pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "") - glog.V(10).Infof("PublicIPAddressesClient.Get(%s, %s): end", resourceGroup, pipName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return pip, false, realErr @@ -231,11 +209,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet rg = az.ResourceGroup } - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SubnetsClient.Get(%s): start", subnetName) subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "") - glog.V(10).Infof("SubnetsClient.Get(%s): end", subnetName) - exists, realErr = checkResourceExistsFromError(err) if realErr != nil { return subnet, false, realErr From 3cde2613fff06d1b35739831a1cee62640d80d0e Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Tue, 9 Jan 2018 23:09:08 -0800 Subject: [PATCH 708/794] Fix lint and bazel --- hack/update-cloudprovider-gce.sh | 2 ++ hack/verify-cloudprovider-gce.sh | 1 - pkg/cloudprovider/providers/gce/cloud/gen/BUILD | 5 +---- pkg/cloudprovider/providers/gce/cloud/gen/main.go | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hack/update-cloudprovider-gce.sh b/hack/update-cloudprovider-gce.sh index b7d606c95b4..90b8659c563 100755 --- a/hack/update-cloudprovider-gce.sh +++ b/hack/update-cloudprovider-gce.sh @@ -1,4 +1,5 @@ #!/bin/bash + # Copyright 2018 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + set -o errexit set -o nounset set -o pipefail diff --git a/hack/verify-cloudprovider-gce.sh b/hack/verify-cloudprovider-gce.sh index c7615d36592..1aae5aae42a 100755 --- a/hack/verify-cloudprovider-gce.sh +++ b/hack/verify-cloudprovider-gce.sh @@ -1,5 +1,4 @@ #!/bin/bash -#!/bin/bash # Copyright 2018 The Kubernetes Authors. # diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD index e196daf2ac8..a3591435fe4 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/BUILD +++ b/pkg/cloudprovider/providers/gce/cloud/gen/BUILD @@ -5,10 +5,7 @@ go_library( srcs = ["main.go"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen", visibility = ["//visibility:private"], - deps = [ - "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], + deps = ["//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library"], ) go_binary( diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index ba0dd9cc2f0..d6e16f16ec4 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -1113,7 +1113,7 @@ func Test{{.Service}}Group(t *testing.T) { tmpl := template.Must(template.New("unittest").Parse(text)) // Sort keys so the output will be stable. var keys []string - for k, _ := range meta.AllServicesByGroup { + for k := range meta.AllServicesByGroup { keys = append(keys, k) } sort.Strings(keys) From c3a885353b0443ffb8ef8a906116b0f9be752247 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 10 Jan 2018 17:19:35 +0800 Subject: [PATCH 709/794] Add azClientConfig to pass all essential information to create clients --- pkg/cloudprovider/providers/azure/azure.go | 30 +++-- .../providers/azure/azure_client.go | 124 ++++++++++-------- 2 files changed, 84 insertions(+), 70 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 9d61124f194..9070be894d4 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -171,22 +171,28 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { config.CloudProviderRateLimitBucket) } + azClientConfig := &azClientConfig{ + subscriptionID: config.SubscriptionID, + resourceManagerEndpoint: env.ResourceManagerEndpoint, + servicePrincipalToken: servicePrincipalToken, + rateLimiter: operationPollRateLimiter, + } az := Cloud{ Config: *config, Environment: *env, - DisksClient: newAzDisksClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - RoutesClient: newAzRoutesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - SubnetsClient: newAzSubnetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - InterfacesClient: newAzInterfacesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - RouteTablesClient: newAzRouteTablesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - LoadBalancerClient: newAzLoadBalancersClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - SecurityGroupsClient: newAzSecurityGroupsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - StorageAccountClient: newAzStorageAccountClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachinesClient: newAzVirtualMachinesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - PublicIPAddressesClient: newAzPublicIPAddressesClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), - VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(config.SubscriptionID, env.ResourceManagerEndpoint, servicePrincipalToken, operationPollRateLimiter), + DisksClient: newAzDisksClient(azClientConfig), + RoutesClient: newAzRoutesClient(azClientConfig), + SubnetsClient: newAzSubnetsClient(azClientConfig), + InterfacesClient: newAzInterfacesClient(azClientConfig), + RouteTablesClient: newAzRouteTablesClient(azClientConfig), + LoadBalancerClient: newAzLoadBalancersClient(azClientConfig), + SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig), + StorageAccountClient: newAzStorageAccountClient(azClientConfig), + VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig), + PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig), + VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig), + VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig), } // Conditionally configure resource request backoff diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 3a359dd6893..7cf65fe13e9 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -124,21 +124,29 @@ type DisksClient interface { Get(resourceGroupName string, diskName string) (result disk.Model, err error) } +// azClientConfig contains all essential information to create an Azure client. +type azClientConfig struct { + subscriptionID string + resourceManagerEndpoint string + servicePrincipalToken *adal.ServicePrincipalToken + rateLimiter flowcontrol.RateLimiter +} + // azVirtualMachinesClient implements VirtualMachinesClient. type azVirtualMachinesClient struct { client compute.VirtualMachinesClient rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachinesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachinesClient { - virtualMachinesClient := compute.NewVirtualMachinesClient(subscriptionID) - virtualMachinesClient.BaseURI = endpoint - virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient { + virtualMachinesClient := compute.NewVirtualMachinesClient(config.subscriptionID) + virtualMachinesClient.BaseURI = config.resourceManagerEndpoint + virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachinesClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachinesClient.Client) return &azVirtualMachinesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: virtualMachinesClient, } } @@ -189,15 +197,15 @@ type azInterfacesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzInterfacesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azInterfacesClient { - interfacesClient := network.NewInterfacesClient(subscriptionID) - interfacesClient.BaseURI = endpoint - interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient { + interfacesClient := network.NewInterfacesClient(config.subscriptionID) + interfacesClient.BaseURI = config.resourceManagerEndpoint + interfacesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) interfacesClient.PollingDelay = 5 * time.Second configureUserAgent(&interfacesClient.Client) return &azInterfacesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: interfacesClient, } } @@ -238,15 +246,15 @@ type azLoadBalancersClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzLoadBalancersClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azLoadBalancersClient { - loadBalancerClient := network.NewLoadBalancersClient(subscriptionID) - loadBalancerClient.BaseURI = endpoint - loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient { + loadBalancerClient := network.NewLoadBalancersClient(config.subscriptionID) + loadBalancerClient.BaseURI = config.resourceManagerEndpoint + loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) loadBalancerClient.PollingDelay = 5 * time.Second configureUserAgent(&loadBalancerClient.Client) return &azLoadBalancersClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: loadBalancerClient, } } @@ -307,15 +315,15 @@ type azPublicIPAddressesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzPublicIPAddressesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azPublicIPAddressesClient { - publicIPAddressClient := network.NewPublicIPAddressesClient(subscriptionID) - publicIPAddressClient.BaseURI = endpoint - publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesClient { + publicIPAddressClient := network.NewPublicIPAddressesClient(config.subscriptionID) + publicIPAddressClient.BaseURI = config.resourceManagerEndpoint + publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) publicIPAddressClient.PollingDelay = 5 * time.Second configureUserAgent(&publicIPAddressClient.Client) return &azPublicIPAddressesClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: publicIPAddressClient, } } @@ -376,16 +384,16 @@ type azSubnetsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzSubnetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSubnetsClient { - subnetsClient := network.NewSubnetsClient(subscriptionID) - subnetsClient.BaseURI = endpoint - subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient { + subnetsClient := network.NewSubnetsClient(config.subscriptionID) + subnetsClient.BaseURI = config.resourceManagerEndpoint + subnetsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) subnetsClient.PollingDelay = 5 * time.Second configureUserAgent(&subnetsClient.Client) return &azSubnetsClient{ client: subnetsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -435,15 +443,15 @@ type azSecurityGroupsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzSecurityGroupsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azSecurityGroupsClient { - securityGroupsClient := network.NewSecurityGroupsClient(subscriptionID) - securityGroupsClient.BaseURI = endpoint - securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient { + securityGroupsClient := network.NewSecurityGroupsClient(config.subscriptionID) + securityGroupsClient.BaseURI = config.resourceManagerEndpoint + securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) securityGroupsClient.PollingDelay = 5 * time.Second configureUserAgent(&securityGroupsClient.Client) return &azSecurityGroupsClient{ - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, client: securityGroupsClient, } } @@ -494,16 +502,16 @@ type azVirtualMachineScaleSetsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachineScaleSetsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetsClient { - virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(subscriptionID) - virtualMachineScaleSetsClient.BaseURI = endpoint - virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachineScaleSetsClient { + virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(config.subscriptionID) + virtualMachineScaleSetsClient.BaseURI = config.resourceManagerEndpoint + virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetsClient.Client) return &azVirtualMachineScaleSetsClient{ client: virtualMachineScaleSetsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -563,16 +571,16 @@ type azVirtualMachineScaleSetVMsClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzVirtualMachineScaleSetVMsClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azVirtualMachineScaleSetVMsClient { - virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) - virtualMachineScaleSetVMsClient.BaseURI = endpoint - virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMachineScaleSetVMsClient { + virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(config.subscriptionID) + virtualMachineScaleSetVMsClient.BaseURI = config.resourceManagerEndpoint + virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second configureUserAgent(&virtualMachineScaleSetVMsClient.Client) return &azVirtualMachineScaleSetVMsClient{ client: virtualMachineScaleSetVMsClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -622,16 +630,16 @@ type azRoutesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzRoutesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRoutesClient { - routesClient := network.NewRoutesClient(subscriptionID) - routesClient.BaseURI = endpoint - routesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzRoutesClient(config *azClientConfig) *azRoutesClient { + routesClient := network.NewRoutesClient(config.subscriptionID) + routesClient.BaseURI = config.resourceManagerEndpoint + routesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) routesClient.PollingDelay = 5 * time.Second configureUserAgent(&routesClient.Client) return &azRoutesClient{ client: routesClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -661,16 +669,16 @@ type azRouteTablesClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzRouteTablesClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azRouteTablesClient { - routeTablesClient := network.NewRouteTablesClient(subscriptionID) - routeTablesClient.BaseURI = endpoint - routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient { + routeTablesClient := network.NewRouteTablesClient(config.subscriptionID) + routeTablesClient.BaseURI = config.resourceManagerEndpoint + routeTablesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) routeTablesClient.PollingDelay = 5 * time.Second configureUserAgent(&routeTablesClient.Client) return &azRouteTablesClient{ client: routeTablesClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -700,15 +708,15 @@ type azStorageAccountClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzStorageAccountClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azStorageAccountClient { - storageAccountClient := storage.NewAccountsClientWithBaseURI(endpoint, subscriptionID) - storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient { + storageAccountClient := storage.NewAccountsClientWithBaseURI(config.resourceManagerEndpoint, config.subscriptionID) + storageAccountClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) storageAccountClient.PollingDelay = 5 * time.Second configureUserAgent(&storageAccountClient.Client) return &azStorageAccountClient{ client: storageAccountClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } @@ -768,15 +776,15 @@ type azDisksClient struct { rateLimiter flowcontrol.RateLimiter } -func newAzDisksClient(subscriptionID string, endpoint string, servicePrincipalToken *adal.ServicePrincipalToken, rateLimiter flowcontrol.RateLimiter) *azDisksClient { - disksClient := disk.NewDisksClientWithBaseURI(endpoint, subscriptionID) - disksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) +func newAzDisksClient(config *azClientConfig) *azDisksClient { + disksClient := disk.NewDisksClientWithBaseURI(config.resourceManagerEndpoint, config.subscriptionID) + disksClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken) disksClient.PollingDelay = 5 * time.Second configureUserAgent(&disksClient.Client) return &azDisksClient{ client: disksClient, - rateLimiter: rateLimiter, + rateLimiter: config.rateLimiter, } } From fc6443ce2c3a2b36b9f7dbf2388055751b612fdd Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 8 Dec 2017 07:11:48 +0530 Subject: [PATCH 710/794] Add volID based delete() and resize() if volID is available in pv spec. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 33 ++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index c42e3cdf794..144724ecfef 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -596,8 +596,13 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { func (d *glusterfsVolumeDeleter) Delete() error { glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path) + volumeName := d.glusterfsMounter.path - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + volumeID, err := getVolumeID(d.spec, volumeName) + if err != nil { + return fmt.Errorf("failed to get volumeID, err: %v", err) + } + class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) if err != nil { return err @@ -1048,13 +1053,35 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa return &cfg, nil } +// getVolumeID returns volumeID from the PV or volumename. +func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { + volumeID := "" + + // Get volID from pvspec if available, else fill it from volumename. + if pv != nil { + if pv.Annotations["VolID"] != "" { + volumeID = pv.Annotations["VolID"] + } else { + volumeID = dstrings.TrimPrefix(volumeName, volPrefix) + } + } else { + return volumeID, fmt.Errorf("provided PV spec is nil") + } + if volumeID == "" { + return volumeID, fmt.Errorf("volume ID is empty") + } + return volumeID, nil +} + func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { pvSpec := spec.PersistentVolume.Spec glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path) volumeName := pvSpec.Glusterfs.Path + volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) - // Fetch the volume for expansion. - volumeID := dstrings.TrimPrefix(volumeName, volPrefix) + if err != nil { + return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err) + } //Get details of SC. class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) From 19003486bfa241d539b13b030979f31f7713450e Mon Sep 17 00:00:00 2001 From: linyouchong Date: Mon, 25 Dec 2017 17:36:59 +0800 Subject: [PATCH 711/794] Fix bug:Kubelet failure to umount mount points --- pkg/volume/util/BUILD | 9 ++--- pkg/volume/util/util.go | 71 +++++++++++++++++++++++++++--------- pkg/volume/util/util_test.go | 41 +++++++++++++++++++++ 3 files changed, 97 insertions(+), 24 deletions(-) diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index 22c8fd82cd4..7a447ddb976 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -147,15 +147,12 @@ go_test( deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/util/mount:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/k8s.io/client-go/util/testing:go_default_library", - ], - "//conditions:default": [], - }), + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], ) filegroup( diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 106036dfb57..6b6528f8ab6 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -23,6 +23,7 @@ import ( "path" "path/filepath" "strings" + "syscall" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -96,29 +97,42 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { // IsNotMountPoint will be called instead of IsLikelyNotMountPoint. // IsNotMountPoint is more expensive but properly handles bind mounts. func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { - if pathExists, pathErr := PathExists(mountPath); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { + pathExists, pathErr := PathExists(mountPath) + if !pathExists { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) return nil } - - var notMnt bool - var err error - - if extensiveMountPointCheck { - notMnt, err = mount.IsNotMountPoint(mounter, mountPath) - } else { - notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + corruptedMnt := isCorruptedMnt(pathErr) + if pathErr != nil && !corruptedMnt { + return fmt.Errorf("Error checking path: %v", pathErr) } + return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) +} - if err != nil { - return err - } +// doUnmountMountPoint is a common unmount routine that unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts. +// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing +func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { + if !corruptedMnt { + var notMnt bool + var err error + if extensiveMountPointCheck { + notMnt, err = mount.IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } - if notMnt { - glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) - return os.Remove(mountPath) + if err != nil { + return err + } + + if notMnt { + glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + return os.Remove(mountPath) + } } // Unmount the mount path @@ -128,7 +142,7 @@ func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMount } notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) if mntErr != nil { - return err + return mntErr } if notMnt { glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) @@ -144,11 +158,32 @@ func PathExists(path string) (bool, error) { return true, nil } else if os.IsNotExist(err) { return false, nil + } else if isCorruptedMnt(err) { + return true, err } else { return false, err } } +// isCorruptedMnt return true if err is about corrupted mount point +func isCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE +} + // GetSecretForPod locates secret by name in the pod's namespace and returns secret map func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) { secret := make(map[string]string) diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index b11be33eeb2..5fd11f0861e 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -24,10 +24,12 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + utiltesting "k8s.io/client-go/util/testing" // util.go uses api.Codecs.LegacyCodec so import this package to do some // resource initialization. _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/util/mount" ) var nodeLabels map[string]string = map[string]string{ @@ -263,3 +265,42 @@ func TestZonesToSet(t *testing.T) { } } } + +func TestDoUnmountMountPoint(t *testing.T) { + + tmpDir1, err1 := utiltesting.MkTmpdir("umount_test1") + if err1 != nil { + t.Fatalf("error creating temp dir: %v", err1) + } + defer os.RemoveAll(tmpDir1) + + tmpDir2, err2 := utiltesting.MkTmpdir("umount_test2") + if err2 != nil { + t.Fatalf("error creating temp dir: %v", err2) + } + defer os.RemoveAll(tmpDir2) + + // Second part: want no error + tests := []struct { + mountPath string + corruptedMnt bool + }{ + { + mountPath: tmpDir1, + corruptedMnt: true, + }, + { + mountPath: tmpDir2, + corruptedMnt: false, + }, + } + + fake := &mount.FakeMounter{} + + for _, tt := range tests { + err := doUnmountMountPoint(tt.mountPath, fake, false, tt.corruptedMnt) + if err != nil { + t.Errorf("err Expected nil, but got: %v", err) + } + } +} From 713e28874afab96b91000d187b0d3d6ce01abf2a Mon Sep 17 00:00:00 2001 From: lcfang Date: Wed, 10 Jan 2018 22:05:00 +0800 Subject: [PATCH 712/794] fixed some bad url --- api/swagger-spec/apps_v1alpha1.json | 2 +- .../src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go | 2 +- .../src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto | 2 +- staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/swagger-spec/apps_v1alpha1.json b/api/swagger-spec/apps_v1alpha1.json index aa3fbdcc2d8..6f546623de3 100644 --- a/api/swagger-spec/apps_v1alpha1.json +++ b/api/swagger-spec/apps_v1alpha1.json @@ -1311,7 +1311,7 @@ }, "serviceAccountName": { "type": "string", - "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md" + "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md" }, "serviceAccount": { "type": "string", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go index cd505e007d0..7e880ab33f3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/types.go @@ -136,7 +136,7 @@ type CarpSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this carp. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto index de59bc3ddaa..dc78ca40e7f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto @@ -122,7 +122,7 @@ message PodSpec { map nodeSelector = 7; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional optional string serviceAccountName = 8; diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go index 7be2a0c6ff4..06c3f9f8873 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go @@ -136,7 +136,7 @@ type PodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. From 8b5f293b3df27c7436ba8ce8130f5d5b447ff920 Mon Sep 17 00:00:00 2001 From: Scott Creeley Date: Wed, 10 Jan 2018 10:21:44 -0500 Subject: [PATCH 713/794] fix for local-up-cluster.sh bad cloud_config_arg --- hack/local-up-cluster.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 4c6b855331b..46a24fb37b0 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -605,7 +605,7 @@ function start_controller_manager { node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 " fi - cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then cloud_config_arg="--cloud-provider=external" fi @@ -668,7 +668,7 @@ function start_kubelet { priv_arg="--allow-privileged " fi - cloud_config_arg=cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" + cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then cloud_config_arg="--cloud-provider=external" fi From a6d979dd88141f362e48c49e7407d8187c3047da Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanino Date: Sat, 28 Oct 2017 15:28:52 -0400 Subject: [PATCH 714/794] Block volumes Support: iSCSI plugin update This patch adds block volume support to iSCSI volume plugin. --- pkg/volume/iscsi/attacher.go | 118 +++------ pkg/volume/iscsi/disk_manager.go | 6 +- pkg/volume/iscsi/iscsi.go | 405 +++++++++++++++++++++++++------ pkg/volume/iscsi/iscsi_test.go | 129 +++++++++- pkg/volume/iscsi/iscsi_util.go | 201 ++++++++++++--- 5 files changed, 658 insertions(+), 201 deletions(-) diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index b86b2f2499e..2aab2ecc9d5 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -19,16 +19,17 @@ package iscsi import ( "fmt" "os" - "strconv" "time" "github.com/golang/glog" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type iscsiAttacher struct { @@ -66,7 +67,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName } func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { - mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, pod) + mounter, err := volumeSpecToMounter(spec, attacher.host, pod) if err != nil { glog.Warningf("failed to get iscsi mounter: %v", err) return "", err @@ -76,7 +77,7 @@ func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath strin func (attacher *iscsiAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { - mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, nil) + mounter, err := volumeSpecToMounter(spec, attacher.host, nil) if err != nil { glog.Warningf("failed to get iscsi mounter: %v", err) return "", err @@ -143,7 +144,7 @@ func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName } func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { - unMounter := detacher.volumeSpecToUnmounter(detacher.mounter) + unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host) err := detacher.manager.DetachDisk(*unMounter, deviceMountPath) if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) @@ -157,94 +158,49 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { return nil } -func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) { +func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) { var secret map[string]string - var bkportal []string readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err } + var podUID types.UID if pod != nil { - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + secret, err = createSecretMap(spec, &iscsiPlugin{host: host}, pod.Namespace) if err != nil { return nil, err } - chapSession, err := getISCSISessionCHAPInfo(spec) + podUID = pod.UID + } + iscsiDisk, err := createISCSIDisk(spec, + podUID, + &iscsiPlugin{host: host}, + &ISCSIUtil{}, + secret, + ) + if err != nil { + return nil, err + } + exec := host.GetExec(iscsiPluginName) + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(spec) if err != nil { return nil, err } - if chapDiscovery || chapSession { - secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) - if err != nil { - return nil, err - } - if len(secretNamespace) == 0 || len(secretName) == 0 { - return nil, fmt.Errorf("CHAP enabled but secret name or namespace is empty") - } - // if secret is provided, retrieve it - kubeClient := host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) - if err != nil { - err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) - return nil, err - } - secret = make(map[string]string) - for name, data := range secretObj.Data { - glog.V(6).Infof("retrieving CHAP secret name: %s", name) - secret[name] = string(data) - } - } - + glog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) + return &iscsiDiskMounter{ + iscsiDisk: iscsiDisk, + fsType: fsType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec}, + exec: exec, + deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), + }, nil } - tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) - if err != nil { - return nil, err - } - - lun := strconv.Itoa(int(lunStr)) - portal := portalMounter(tp) - bkportal = append(bkportal, portal) - for _, p := range portals { - bkportal = append(bkportal, portalMounter(string(p))) - } - - iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) - if err != nil { - return nil, err - } - - var initiatorName string - if initiatorNamePtr != nil { - initiatorName = *initiatorNamePtr - } - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) - if err != nil { - return nil, err - } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - exec := attacher.host.GetExec(iscsiPluginName) - return &iscsiDiskMounter{ - iscsiDisk: &iscsiDisk{ - plugin: &iscsiPlugin{ - host: host, - }, - VolName: spec.Name(), - Portals: bkportal, - Iqn: iqn, - lun: lun, - Iface: iface, - chap_discovery: chapDiscovery, - chap_session: chapSession, - secret: secret, - InitiatorName: initiatorName, - manager: &ISCSIUtil{}}, + iscsiDisk: iscsiDisk, fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec}, @@ -253,8 +209,8 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum }, nil } -func (detacher *iscsiDetacher) volumeSpecToUnmounter(mounter mount.Interface) *iscsiDiskUnmounter { - exec := detacher.host.GetExec(iscsiPluginName) +func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *iscsiDiskUnmounter { + exec := host.GetExec(iscsiPluginName) return &iscsiDiskUnmounter{ iscsiDisk: &iscsiDisk{ plugin: &iscsiPlugin{}, diff --git a/pkg/volume/iscsi/disk_manager.go b/pkg/volume/iscsi/disk_manager.go index ea00c7ebcbe..4d5e9f9fe7d 100644 --- a/pkg/volume/iscsi/disk_manager.go +++ b/pkg/volume/iscsi/disk_manager.go @@ -27,15 +27,19 @@ import ( // Abstract interface to disk operations. type diskManager interface { MakeGlobalPDName(disk iscsiDisk) string + MakeGlobalVDPDName(disk iscsiDisk) string // Attaches the disk to the kubelet's host machine. AttachDisk(b iscsiDiskMounter) (string, error) // Detaches the disk from the kubelet's host machine. DetachDisk(disk iscsiDiskUnmounter, mntPath string) error + // Detaches the block disk from the kubelet's host machine. + DetachBlockISCSIDisk(disk iscsiDiskUnmapper, mntPath string) error } // utility to mount a disk based filesystem +// globalPDPath: global mount path like, /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} +// volPath: pod volume dir path like, /var/lib/kubelet/pods/{podUID}/volumes/kubernetes.io~iscsi/{volumeName} func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error { - // TODO: handle failed mounts here. notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { glog.Errorf("cannot validate mountpoint: %s", volPath) diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 4ea6e792ef8..e9611e208c3 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -18,6 +18,8 @@ package iscsi import ( "fmt" + "os" + "path/filepath" "strconv" "strings" @@ -42,6 +44,7 @@ type iscsiPlugin struct { var _ volume.VolumePlugin = &iscsiPlugin{} var _ volume.PersistentVolumePlugin = &iscsiPlugin{} +var _ volume.BlockVolumePlugin = &iscsiPlugin{} const ( iscsiPluginName = "kubernetes.io/iscsi" @@ -93,98 +96,27 @@ func (plugin *iscsiPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { } func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - // Inject real implementations here, test through the internal function. - var secret map[string]string if pod == nil { return nil, fmt.Errorf("nil pod") } - chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec) + secret, err := createSecretMap(spec, plugin, pod.Namespace) if err != nil { return nil, err } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - if chapDiscover || chapSession { - secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) - if err != nil { - return nil, err - } - - if len(secretName) > 0 && len(secretNamespace) > 0 { - // if secret is provideded, retrieve it - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) - if err != nil { - err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) - return nil, err - } - secret = make(map[string]string) - for name, data := range secretObj.Data { - glog.V(4).Infof("retrieving CHAP secret name: %s", name) - secret[name] = string(data) - } - } - } return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) } func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) { - // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. - // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err } - tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) + iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret) if err != nil { return nil, err } - - lun := strconv.Itoa(int(lunStr)) - portal := portalMounter(tp) - var bkportal []string - bkportal = append(bkportal, portal) - for _, p := range portals { - bkportal = append(bkportal, portalMounter(string(p))) - } - - iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) - if err != nil { - return nil, err - } - - var initiatorName string - if initiatorNamePtr != nil { - initiatorName = *initiatorNamePtr - } - chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) - if err != nil { - return nil, err - } - chapSession, err := getISCSISessionCHAPInfo(spec) - if err != nil { - return nil, err - } - return &iscsiDiskMounter{ - iscsiDisk: &iscsiDisk{ - podUID: podUID, - VolName: spec.Name(), - Portals: bkportal, - Iqn: iqn, - lun: lun, - Iface: iface, - chap_discovery: chapDiscovery, - chap_session: chapSession, - secret: secret, - InitiatorName: initiatorName, - manager: manager, - plugin: plugin}, + iscsiDisk: iscsiDisk, fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, @@ -194,8 +126,41 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI }, nil } +// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. +func (plugin *iscsiPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + // If this is called via GenerateUnmapDeviceFunc(), pod is nil. + // Pass empty string as dummy uid since uid isn't used in the case. + var uid types.UID + var secret map[string]string + var err error + if pod != nil { + uid = pod.UID + secret, err = createSecretMap(spec, plugin, pod.Namespace) + if err != nil { + return nil, err + } + } + return plugin.newBlockVolumeMapperInternal(spec, uid, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) +} + +func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.BlockVolumeMapper, error) { + readOnly, _, err := getISCSIVolumeInfo(spec) + if err != nil { + return nil, err + } + iscsiDisk, err := createISCSIDisk(spec, podUID, plugin, manager, secret) + if err != nil { + return nil, err + } + return &iscsiDiskMapper{ + iscsiDisk: iscsiDisk, + readOnly: readOnly, + exec: exec, + deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), + }, nil +} + func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - // Inject real implementations here, test through the internal function. return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } @@ -212,25 +177,88 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID }, nil } +// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state. +func (plugin *iscsiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetExec(plugin.GetPluginName())) +} + +func (plugin *iscsiPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager, exec mount.Exec) (volume.BlockVolumeUnmapper, error) { + return &iscsiDiskUnmapper{ + iscsiDisk: &iscsiDisk{ + podUID: podUID, + VolName: volName, + manager: manager, + plugin: plugin, + }, + exec: exec, + deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), + }, nil +} + func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + // Find globalPDPath from pod volume directory(mountPath) + var globalPDPath string + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + paths, err := mount.GetMountRefs(mounter, mountPath) + if err != nil { + return nil, err + } + for _, path := range paths { + if strings.Contains(path, plugin.host.GetPluginDir(iscsiPluginName)) { + globalPDPath = path + break + } + } + // Couldn't fetch globalPDPath + if len(globalPDPath) == 0 { + return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec") + } + + // Obtain iscsi disk configurations from globalPDPath + device, _, err := extractDeviceAndPrefix(globalPDPath) + if err != nil { + return nil, err + } + bkpPortal, iqn, err := extractPortalAndIqn(device) + if err != nil { + return nil, err + } + iface, _ := extractIface(globalPDPath) iscsiVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: volumeName, - IQN: volumeName, + TargetPortal: bkpPortal, + IQN: iqn, + ISCSIInterface: iface, }, }, } return volume.NewSpecFromVolume(iscsiVolume), nil } +func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName) + blkutil := ioutil.NewBlockVolumePathHandler() + globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + return nil, err + } + glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + // Retreive volume information from globalMapPathUUID + // globalMapPathUUID example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} + // plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0/{pod uuid} + globalMapPath := filepath.Dir(globalMapPathUUID) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) +} + type iscsiDisk struct { VolName string podUID types.UID Portals []string Iqn string - lun string + Lun string Iface string chap_discovery bool chap_session bool @@ -248,10 +276,25 @@ func (iscsi *iscsiDisk) GetPath() string { return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.VolName) } +func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) { + mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, nil /* pod */) + if err != nil { + glog.Warningf("failed to get iscsi mounter: %v", err) + return "", err + } + return iscsi.manager.MakeGlobalVDPDName(*mounter.iscsiDisk), nil +} + +func (iscsi *iscsiDisk) iscsiPodDeviceMapPath() (string, string) { + name := iscsiPluginName + return iscsi.plugin.host.GetPodVolumeDeviceDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), iscsi.VolName +} + type iscsiDiskMounter struct { *iscsiDisk readOnly bool fsType string + volumeMode v1.PersistentVolumeMode mounter *mount.SafeFormatAndMount exec mount.Exec deviceUtil ioutil.DeviceUtil @@ -306,6 +349,58 @@ func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { return ioutil.UnmountPath(dir, c.mounter) } +// Block Volumes Support +type iscsiDiskMapper struct { + *iscsiDisk + readOnly bool + exec mount.Exec + deviceUtil ioutil.DeviceUtil +} + +var _ volume.BlockVolumeMapper = &iscsiDiskMapper{} + +func (b *iscsiDiskMapper) SetUpDevice() (string, error) { + return "", nil +} + +type iscsiDiskUnmapper struct { + *iscsiDisk + exec mount.Exec + deviceUtil ioutil.DeviceUtil +} + +var _ volume.BlockVolumeUnmapper = &iscsiDiskUnmapper{} + +// Even though iSCSI plugin has attacher/detacher implementation, iSCSI plugin +// needs volume detach operation during TearDownDevice(). This method is only +// chance that operations are done on kubelet node during volume teardown sequences. +func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error { + err := c.manager.DetachBlockISCSIDisk(*c, mapPath) + if err != nil { + return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) + err = os.RemoveAll(mapPath) + if err != nil { + return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", mapPath, err) + } + glog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) + return nil +} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} +func (iscsi *iscsiDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { + return iscsi.iscsiGlobalMapPath(spec) +} + +// GetPodDeviceMapPath returns pod device map path and volume name +// path: pods/{podUid}/volumeDevices/kubernetes.io~iscsi +// volumeName: pv0001 +func (iscsi *iscsiDisk) GetPodDeviceMapPath() (string, string) { + return iscsi.iscsiPodDeviceMapPath() +} + func portalMounter(portal string) string { if !strings.Contains(portal, ":") { portal = portal + ":3260" @@ -316,7 +411,7 @@ func portalMounter(portal string) string { // get iSCSI volume info: readOnly and fstype func getISCSIVolumeInfo(spec *volume.Spec) (bool, string, error) { // for volume source, readonly is in volume spec - // for PV, readonly is in PV spec + // for PV, readonly is in PV spec. PV gets the ReadOnly flag indirectly through the PVC source if spec.Volume != nil && spec.Volume.ISCSI != nil { return spec.Volume.ISCSI.ReadOnly, spec.Volume.ISCSI.FSType, nil } else if spec.PersistentVolume != nil && @@ -397,3 +492,155 @@ func getISCSISecretNameAndNamespace(spec *volume.Spec, defaultSecretNamespace st return "", "", fmt.Errorf("Spec does not reference an ISCSI volume type") } + +func createISCSIDisk(spec *volume.Spec, podUID types.UID, plugin *iscsiPlugin, manager diskManager, secret map[string]string) (*iscsiDisk, error) { + tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) + if err != nil { + return nil, err + } + + lun := strconv.Itoa(int(lunStr)) + portal := portalMounter(tp) + var bkportal []string + bkportal = append(bkportal, portal) + for _, p := range portals { + bkportal = append(bkportal, portalMounter(string(p))) + } + + iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) + if err != nil { + return nil, err + } + + var initiatorName string + if initiatorNamePtr != nil { + initiatorName = *initiatorNamePtr + } + chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + + return &iscsiDisk{ + podUID: podUID, + VolName: spec.Name(), + Portals: bkportal, + Iqn: iqn, + Lun: lun, + Iface: iface, + chap_discovery: chapDiscovery, + chap_session: chapSession, + secret: secret, + InitiatorName: initiatorName, + manager: manager, + plugin: plugin}, nil +} + +func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) (map[string]string, error) { + var secret map[string]string + chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + if chapDiscover || chapSession { + secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, namespace) + if err != nil { + return nil, err + } + + if len(secretName) > 0 && len(secretNamespace) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) + return nil, err + } + secret = make(map[string]string) + for name, data := range secretObj.Data { + glog.V(4).Infof("retrieving CHAP secret name: %s", name) + secret[name] = string(data) + } + } + } + return secret, err +} + +func createVolumeFromISCSIVolumeSource(volumeName string, iscsi v1.ISCSIVolumeSource) *v1.Volume { + return &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + ISCSI: &iscsi, + }, + } +} + +func createPersistentVolumeFromISCSIPVSource(volumeName string, iscsi v1.ISCSIPersistentVolumeSource) *v1.PersistentVolume { + block := v1.PersistentVolumeBlock + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + ISCSI: &iscsi, + }, + VolumeMode: &block, + }, + } +} + +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { + // Retreive volume spec information from globalMapPath + // globalMapPath example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath} + // plugins/kubernetes.io/iscsi/volumeDevices/iface-default/192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + + // device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + device, _, err := extractDeviceAndPrefix(globalMapPath) + if err != nil { + return nil, err + } + bkpPortal, iqn, err := extractPortalAndIqn(device) + if err != nil { + return nil, err + } + arr := strings.Split(device, "-lun-") + if len(arr) < 2 { + return nil, fmt.Errorf("failed to retreive lun from globalMapPath: %v", globalMapPath) + } + lun, err := strconv.Atoi(arr[1]) + if err != nil { + return nil, err + } + iface, found := extractIface(globalMapPath) + if !found { + return nil, fmt.Errorf("failed to retreive iface from globalMapPath: %v", globalMapPath) + } + iscsiPV := createPersistentVolumeFromISCSIPVSource(volumeName, + v1.ISCSIPersistentVolumeSource{ + TargetPortal: bkpPortal, + IQN: iqn, + Lun: int32(lun), + ISCSIInterface: iface, + }, + ) + glog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", + iscsiPV.Spec.PersistentVolumeSource.ISCSI.TargetPortal, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.IQN, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.Lun, + iscsiPV.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface, + ) + return volume.NewSpecFromPersistentVolume(iscsiPV, false), nil +} diff --git a/pkg/volume/iscsi/iscsi_test.go b/pkg/volume/iscsi/iscsi_test.go index 831cd564395..eeb6fa6c015 100644 --- a/pkg/volume/iscsi/iscsi_test.go +++ b/pkg/volume/iscsi/iscsi_test.go @@ -19,6 +19,7 @@ package iscsi import ( "fmt" "os" + "strings" "testing" "k8s.io/api/core/v1" @@ -80,7 +81,7 @@ type fakeDiskManager struct { func NewFakeDiskManager() *fakeDiskManager { return &fakeDiskManager{ - tmpDir: utiltesting.MkTmpdirOrDie("fc_test"), + tmpDir: utiltesting.MkTmpdirOrDie("iscsi_test"), } } @@ -91,6 +92,11 @@ func (fake *fakeDiskManager) Cleanup() { func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string { return fake.tmpDir } + +func (fake *fakeDiskManager) MakeGlobalVDPDName(disk iscsiDisk) string { + return fake.tmpDir +} + func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) (string, error) { globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) err := os.MkdirAll(globalPath, 0750) @@ -113,6 +119,15 @@ func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) er return nil } +func (fake *fakeDiskManager) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mntPath string) error { + globalPath := c.manager.MakeGlobalVDPDName(*c.iscsiDisk) + err := os.RemoveAll(globalPath) + if err != nil { + return err + } + return nil +} + func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { @@ -289,10 +304,12 @@ type testcase struct { defaultNs string spec *volume.Spec // Expected return of the test - expectedName string - expectedNs string - expectedIface string - expectedError error + expectedName string + expectedNs string + expectedIface string + expectedError error + expectedDiscoveryCHAP bool + expectedSessionCHAP bool } func TestGetSecretNameAndNamespaceForPV(t *testing.T) { @@ -424,5 +441,105 @@ func TestGetISCSIInitiatorInfo(t *testing.T) { err, resultIface) } } - +} + +func TestGetISCSICHAP(t *testing.T) { + tests := []testcase{ + { + name: "persistent volume source", + spec: &volume.Spec{ + PersistentVolume: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + ISCSI: &v1.ISCSIPersistentVolumeSource{ + DiscoveryCHAPAuth: true, + SessionCHAPAuth: true, + }, + }, + }, + }, + }, + expectedDiscoveryCHAP: true, + expectedSessionCHAP: true, + expectedError: nil, + }, + { + name: "pod volume source", + spec: &volume.Spec{ + Volume: &v1.Volume{ + VolumeSource: v1.VolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{ + DiscoveryCHAPAuth: true, + SessionCHAPAuth: true, + }, + }, + }, + }, + expectedDiscoveryCHAP: true, + expectedSessionCHAP: true, + expectedError: nil, + }, + { + name: "no volume", + spec: &volume.Spec{}, + expectedDiscoveryCHAP: false, + expectedSessionCHAP: false, + expectedError: fmt.Errorf("Spec does not reference an ISCSI volume type"), + }, + } + for _, testcase := range tests { + resultDiscoveryCHAP, err := getISCSIDiscoveryCHAPInfo(testcase.spec) + resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec) + switch testcase.name { + case "no volume": + if err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP { + t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", + testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP, + err, resultDiscoveryCHAP, resultSessionCHAP) + } + default: + if err != testcase.expectedError || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP { + t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP, + err, resultDiscoveryCHAP, resultSessionCHAP) + } + } + } +} + +func TestGetVolumeSpec(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0" + spec, _ := getVolumeSpecFromGlobalMapPath("test", path) + + portal := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.TargetPortal + if portal != "127.0.0.1:3260" { + t.Errorf("wrong portal: %v", portal) + } + iqn := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.IQN + if iqn != "iqn.2014-12.server:storage.target01" { + t.Errorf("wrong iqn: %v", iqn) + } + lun := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.Lun + if lun != 0 { + t.Errorf("wrong lun: %v", lun) + } + iface := spec.PersistentVolume.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface + if iface != "default" { + t.Errorf("wrong ISCSIInterface: %v", iface) + } +} + +func TestGetVolumeSpec_no_lun(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/iface-default/127.0.0.1:3260-iqn.2014-12.server:storage.target01" + _, err := getVolumeSpecFromGlobalMapPath("test", path) + if !strings.Contains(err.Error(), "malformatted mnt path") { + t.Errorf("should get error: malformatted mnt path") + } +} + +func TestGetVolumeSpec_no_iface(t *testing.T) { + path := "plugins/kubernetes.io/iscsi/volumeDevices/default/127.0.0.1:3260-iqn.2014-12.server:storage.target01-lun-0" + _, err := getVolumeSpecFromGlobalMapPath("test", path) + if !strings.Contains(err.Error(), "failed to retreive iface") { + t.Errorf("should get error: failed to retreive iface") + } } diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 12e8430d85e..b42ca1e5b4c 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -27,6 +27,9 @@ import ( "time" "github.com/golang/glog" + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -163,10 +166,21 @@ func makePDNameInternal(host volume.VolumeHost, portal string, iqn string, lun s return path.Join(host.GetPluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun) } +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/iface_name/portal-some_iqn-lun-lun_id +func makeVDPDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string, iface string) string { + return path.Join(host.GetVolumeDevicePluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun) +} + type ISCSIUtil struct{} +// MakeGlobalPDName returns path of global plugin dir func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string { - return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.lun, iscsi.Iface) + return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface) +} + +// MakeGlobalVDPDName returns path of global volume device plugin dir +func (util *ISCSIUtil) MakeGlobalVDPDName(iscsi iscsiDisk) string { + return makeVDPDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface) } func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error { @@ -184,7 +198,6 @@ func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error { } func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error { - // NOTE: The iscsi config json is not deleted after logging out from target portals. file := path.Join(mnt, "iscsi.json") fp, err := os.Open(file) if err != nil { @@ -198,6 +211,7 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error { return nil } +// AttachDisk returns devicePath of volume if attach succeeded otherwise returns error func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { var devicePath string var devicePaths []string @@ -240,9 +254,9 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { return "", fmt.Errorf("Could not parse iface file for %s", b.Iface) } if iscsiTransport == "tcp" { - devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-") + devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") } else { - devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-") + devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") } if exist := waitForPathToExist(&devicePath, 1, iscsiTransport); exist { @@ -307,26 +321,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { //Make sure we use a valid devicepath to find mpio device. devicePath = devicePaths[0] - - // mount it - globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) - notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) - if err != nil && !os.IsNotExist(err) { - return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err) - } - if !notMnt { - glog.Infof("iscsi: %s already mounted", globalPDPath) - return "", nil - } - - if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) - return "", err - } - - // Persist iscsi disk config to json file for DetachDisk path - util.persistISCSI(*(b.iscsiDisk), globalPDPath) - for _, path := range devicePaths { // There shouldnt be any empty device paths. However adding this check // for safer side to avoid the possibility of an empty entry. @@ -339,14 +333,67 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { break } } - err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) - if err != nil { - glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) - } - - return devicePath, err + glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) + // run global mount path related operations based on volumeMode + return globalPDPathOperation(b)(b, devicePath, util) } +// globalPDPathOperation returns global mount path related operations based on volumeMode. +// If the volumeMode is 'Filesystem' or not defined, plugin needs to create a dir, persist +// iscsi configrations, and then format/mount the volume. +// If the volumeMode is 'Block', plugin creates a dir and persists iscsi configrations. +// Since volume type is block, plugin doesn't need to format/mount the volume. +func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *ISCSIUtil) (string, error) { + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + glog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) + if b.volumeMode == v1.PersistentVolumeBlock { + // If the volumeMode is 'Block', plugin don't need to format the volume. + return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { + globalPDPath := b.manager.MakeGlobalVDPDName(*b.iscsiDisk) + // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + return "", err + } + // Persist iscsi disk config to json file for DetachDisk path + util.persistISCSI(*(b.iscsiDisk), globalPDPath) + + return devicePath, nil + } + } + } + // If the volumeMode is 'Filesystem', plugin needs to format the volume + // and mount it to globalPDPath. + return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { + globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) + notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) + if err != nil && !os.IsNotExist(err) { + return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err) + } + // Return confirmed devicePath to caller + if !notMnt { + glog.Infof("iscsi: %s already mounted", globalPDPath) + return devicePath, nil + } + // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + return "", err + } + // Persist iscsi disk config to json file for DetachDisk path + util.persistISCSI(*(b.iscsiDisk), globalPDPath) + + err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) + if err != nil { + glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) + } + + return devicePath, nil + } +} + +// DetachDisk unmounts and detaches a volume from node func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { _, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) if err != nil { @@ -401,9 +448,91 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { } portals := removeDuplicate(bkpPortal) if len(portals) == 0 { - return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations.") + return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations") } + err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) + if err != nil { + return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err) + } + return nil +} + +// DetachBlockISCSIDisk removes loopback device for a volume and detaches a volume from node +func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) error { + if pathExists, pathErr := volumeutil.PathExists(mapPath); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + return nil + } + // If we arrive here, device is no longer used, see if need to logout the target + // device: 192.168.0.10:3260-iqn.2017-05.com.example:test-lun-0 + device, _, err := extractDeviceAndPrefix(mapPath) + if err != nil { + return err + } + var bkpPortal []string + var volName, iqn, lun, iface, initiatorName string + found := true + // load iscsi disk config from json file + if err := util.loadISCSI(c.iscsiDisk, mapPath); err == nil { + bkpPortal, iqn, lun, iface, volName = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Lun, c.iscsiDisk.Iface, c.iscsiDisk.VolName + initiatorName = c.iscsiDisk.InitiatorName + } else { + // If the iscsi disk config is not found, fall back to the original behavior. + // This portal/iqn/iface is no longer referenced, log out. + // Extract the portal and iqn from device path. + bkpPortal = make([]string, 1) + bkpPortal[0], iqn, err = extractPortalAndIqn(device) + if err != nil { + return err + } + arr := strings.Split(device, "-lun-") + if len(arr) < 2 { + return fmt.Errorf("failed to retreive lun from mapPath: %v", mapPath) + } + lun = arr[1] + // Extract the iface from the mountPath and use it to log out. If the iface + // is not found, maintain the previous behavior to facilitate kubelet upgrade. + // Logout may fail as no session may exist for the portal/IQN on the specified interface. + iface, found = extractIface(mapPath) + } + portals := removeDuplicate(bkpPortal) + if len(portals) == 0 { + return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations") + } + + devicePath := getDevByPath(portals[0], iqn, lun) + glog.V(5).Infof("iscsi: devicePath: %s", devicePath) + if _, err = os.Stat(devicePath); err != nil { + return fmt.Errorf("failed to validate devicePath: %s", devicePath) + } + // check if the dev is using mpio and if so mount it via the dm-XX device + if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(devicePath); mappedDevicePath != "" { + devicePath = mappedDevicePath + } + // Get loopback device which takes fd lock for devicePath before + // detaching a volume from node. + blkUtil := volumeutil.NewBlockVolumePathHandler() + loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath) + if err != nil { + return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err) + } + // Detach a volume from kubelet node + err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) + if err != nil { + return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err) + } + // The volume was successfully detached from node. We can safely remove the loopback. + err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + if err != nil { + return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err) + } + return nil +} + +func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, iface, volName, initiatorName string, found bool) error { for _, portal := range portals { logoutArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"} deleteArgs := []string{"-m", "node", "-p", portal, "-T", iqn, "-o", "delete"} @@ -412,13 +541,13 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { deleteArgs = append(deleteArgs, []string{"-I", iface}...) } glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) - out, err := c.exec.Run("iscsiadm", logoutArgs...) + out, err := exec.Run("iscsiadm", logoutArgs...) if err != nil { glog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) } // Delete the node record glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) - out, err = c.exec.Run("iscsiadm", deleteArgs...) + out, err = exec.Run("iscsiadm", deleteArgs...) if err != nil { glog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) } @@ -427,7 +556,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { // If the iface is not created via iscsi plugin, skip to delete if initiatorName != "" && found && iface == (portals[0]+":"+volName) { deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"} - out, err := c.exec.Run("iscsiadm", deleteArgs...) + out, err := exec.Run("iscsiadm", deleteArgs...) if err != nil { glog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) } @@ -436,6 +565,10 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { return nil } +func getDevByPath(portal, iqn, lun string) string { + return "/dev/disk/by-path/ip-" + portal + "-iscsi-" + iqn + "-lun-" + lun +} + func extractTransportname(ifaceOutput string) (iscsiTransport string) { rexOutput := ifaceTransportNameRe.FindStringSubmatch(ifaceOutput) if rexOutput == nil { From 96509d4f5b91d8ace521dc04a2ed150d4c057090 Mon Sep 17 00:00:00 2001 From: mtanino Date: Mon, 27 Nov 2017 16:12:33 -0500 Subject: [PATCH 715/794] generated code for iSCSI plugin change --- pkg/volume/iscsi/BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index e056ff00889..6da935b9552 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -17,14 +17,17 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/iscsi", deps = [ + "//pkg/features:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) From 4e1b5c6a3299327ff856d05443af8fa20ee760e7 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Sun, 7 Jan 2018 11:05:17 +0200 Subject: [PATCH 716/794] move detach out of os volumes attach add test add test fix bazel fix tests change loglevel, remove else statement --- .../providers/openstack/openstack_volumes.go | 90 +++++++++++++----- pkg/volume/cinder/attacher.go | 31 +----- pkg/volume/cinder/attacher_test.go | 94 ++++++++++++++++--- pkg/volume/cinder/cinder.go | 3 +- 4 files changed, 151 insertions(+), 67 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index 8a530592845..4a441e4c347 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -26,6 +26,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" k8s_volume "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -319,33 +320,18 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if instanceID == volume.AttachedServerId { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil - } else { - nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) - attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) - if err != nil { - glog.Error(attachErr) - return "", errors.New(attachErr) - } - // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 - devicePath := volume.AttachedDevice - danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) - glog.V(4).Infof("volume %s is already attached to node %s path %s", volumeID, nodeName, devicePath) - // check special case, if node is deleted from cluster but exist still in openstack - // we need to check can we detach the cinder, node is deleted from cluster if state is not ACTIVE - srv, err := getServerByName(cClient, nodeName, false) - if err != nil { - return "", err - } - if srv.Status != "ACTIVE" { - err = os.DetachDisk(volume.AttachedServerId, volumeID) - if err != nil { - glog.Error(err) - return "", err - } - glog.V(4).Infof("detached volume %s node state was %s", volumeID, srv.Status) - } - return "", danglingErr } + nodeName, err := os.GetNodeNameByID(volume.AttachedServerId) + attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerId) + if err != nil { + glog.Error(attachErr) + return "", errors.New(attachErr) + } + // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 + devicePath := volume.AttachedDevice + danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) + glog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) + return "", danglingErr } startTime := time.Now() @@ -605,6 +591,9 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, // DiskIsAttached queries if a volume is attached to a compute instance func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { + if instanceID == "" { + glog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) + } volume, err := os.getVolume(volumeID) if err != nil { return false, err @@ -613,6 +602,29 @@ func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { return instanceID == volume.AttachedServerId, nil } +// DiskIsAttachedByName queries if a volume is attached to a compute instance by name +func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) { + cClient, err := os.NewComputeV2() + if err != nil { + return false, "", err + } + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + if err == ErrNotFound { + // instance not found anymore in cloudprovider, assume that cinder is detached + return false, "", nil + } else { + return false, "", err + } + } + instanceID := "/" + srv.ID + if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { + instanceID = instanceID[(ind + 1):] + } + attached, err := os.DiskIsAttached(instanceID, volumeID) + return attached, instanceID, err +} + // DisksAreAttached queries if a list of volumes are attached to a compute instance func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) { attached := make(map[string]bool) @@ -627,6 +639,32 @@ func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (ma return attached, nil } +// DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name +func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) { + attached := make(map[string]bool) + cClient, err := os.NewComputeV2() + if err != nil { + return attached, err + } + srv, err := getServerByName(cClient, nodeName, false) + if err != nil { + if err == ErrNotFound { + // instance not found anymore, mark all volumes as detached + for _, volumeID := range volumeIDs { + attached[volumeID] = false + } + return attached, nil + } else { + return attached, err + } + } + instanceID := "/" + srv.ID + if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { + instanceID = instanceID[(ind + 1):] + } + return os.DisksAreAttached(instanceID, volumeIDs) +} + // diskIsUsed returns true a disk is attached to any node. func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) { volume, err := os.getVolume(volumeID) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 87b58dae01b..65b24640cbb 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -27,7 +27,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -187,23 +186,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod volumeSpecMap[volumeSource.VolumeID] = spec } - instanceID, err := attacher.nodeInstanceID(nodeName) - if err != nil { - if err == cloudprovider.InstanceNotFound { - // If node doesn't exist, OpenStack Nova will assume the volumes are not attached to it. - // Mark the volumes as detached and return false without error. - glog.Warningf("VolumesAreAttached: node %q does not exist.", nodeName) - for spec := range volumesAttachedCheck { - volumesAttachedCheck[spec] = false - } - - return volumesAttachedCheck, nil - } - - return volumesAttachedCheck, err - } - - attachedResult, err := attacher.cinderProvider.DisksAreAttached(instanceID, volumeIDList) + attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList) if err != nil { // Log error and continue with attach glog.Errorf( @@ -381,20 +364,10 @@ func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { volumeID := path.Base(volumeName) - instances, res := detacher.cinderProvider.Instances() - if !res { - return fmt.Errorf("failed to list openstack instances") - } - instanceID, err := instances.InstanceID(nodeName) - if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { - instanceID = instanceID[(ind + 1):] - } - if err := detacher.waitOperationFinished(volumeID); err != nil { return err } - - attached, err := detacher.cinderProvider.DiskIsAttached(instanceID, volumeID) + attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID) if err != nil { // Log error and continue with detach glog.Errorf( diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index f868db675bf..ddc307cd5fe 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -132,7 +132,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Positive", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, attach: attachCall{instanceID, volumeID, "", nil}, diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { @@ -147,7 +147,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Positive_AlreadyAttached", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil}, diskPath: diskPathCall{instanceID, volumeID, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) @@ -173,7 +173,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Negative", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, attach: attachCall{instanceID, volumeID, "/dev/sda", attachError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) @@ -187,7 +187,7 @@ func TestAttachDetach(t *testing.T) { name: "Attach_Negative_DiskPatchFails", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, attach: attachCall{instanceID, volumeID, "", nil}, diskPath: diskPathCall{instanceID, volumeID, "", diskPathError}, test: func(testcase *testcase) (string, error) { @@ -201,7 +201,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_Positive", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: true}, nil}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: true}, nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -214,7 +214,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_Negative", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, map[string]bool{volumeID: false}, nil}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, map[string]bool{volumeID: false}, nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -227,7 +227,7 @@ func TestAttachDetach(t *testing.T) { { name: "VolumesAreAttached_CinderFailed", instanceID: instanceID, - disksAreAttached: disksAreAttachedCall{instanceID, []string{volumeID}, nil, disksCheckError}, + disksAreAttached: disksAreAttachedCall{instanceID, nodeName, []string{volumeID}, nil, disksCheckError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) attachments, err := attacher.VolumesAreAttached([]*volume.Spec{spec}, nodeName) @@ -242,7 +242,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, true, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, true, nil}, detach: detachCall{instanceID, volumeID, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -255,7 +255,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive_AlreadyDetached", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, nil}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) return "", detacher.Detach(volumeID, nodeName) @@ -267,7 +267,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Positive_CheckFails", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, detach: detachCall{instanceID, volumeID, nil}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -280,7 +280,7 @@ func TestAttachDetach(t *testing.T) { name: "Detach_Negative", instanceID: instanceID, operationPending: operationPendingCall{volumeID, false, done, nil}, - diskIsAttached: diskIsAttachedCall{instanceID, volumeID, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{instanceID, nodeName, volumeID, false, diskCheckError}, detach: detachCall{instanceID, volumeID, detachError}, test: func(testcase *testcase) (string, error) { detacher := newDetacher(testcase) @@ -426,6 +426,7 @@ type operationPendingCall struct { type diskIsAttachedCall struct { instanceID string + nodeName types.NodeName volumeID string isAttached bool ret error @@ -440,6 +441,7 @@ type diskPathCall struct { type disksAreAttachedCall struct { instanceID string + nodeName types.NodeName volumeIDs []string areAttached map[string]bool ret error @@ -572,6 +574,46 @@ func (testcase *testcase) ShouldTrustDevicePath() bool { return true } +func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) { + expected := &testcase.diskIsAttached + instanceID := expected.instanceID + // If testcase call DetachDisk*, return false + if *testcase.attachOrDetach == detachStatus { + return false, instanceID, nil + } + + // If testcase call AttachDisk*, return true + if *testcase.attachOrDetach == attachStatus { + return true, instanceID, nil + } + + if expected.nodeName != nodeName { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected nodename %s, got %s", expected.nodeName, nodeName) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong nodename") + } + + if expected.volumeID == "" && expected.instanceID == "" { + // testcase.diskIsAttached looks uninitialized, test did not expect to + // call DiskIsAttached + testcase.t.Errorf("Unexpected DiskIsAttachedByName call!") + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call!") + } + + if expected.volumeID != volumeID { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected volumeID %s, got %s", expected.volumeID, volumeID) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong volumeID") + } + + if expected.instanceID != instanceID { + testcase.t.Errorf("Unexpected DiskIsAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return false, instanceID, errors.New("Unexpected DiskIsAttachedByName call: wrong instanceID") + } + + glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) + + return expected.isAttached, expected.instanceID, expected.ret +} + func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) { return "", "", false, errors.New("Not implemented") } @@ -626,6 +668,36 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string return expected.areAttached, expected.ret } +func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) { + expected := &testcase.disksAreAttached + areAttached := make(map[string]bool) + + instanceID := expected.instanceID + if expected.nodeName != nodeName { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected nodeName %s, got %s", expected.nodeName, nodeName) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong nodename") + } + if len(expected.volumeIDs) == 0 && expected.instanceID == "" { + // testcase.volumeIDs looks uninitialized, test did not expect to call DisksAreAttached + testcase.t.Errorf("Unexpected DisksAreAttachedByName call!") + return areAttached, errors.New("Unexpected DisksAreAttachedByName call") + } + + if !reflect.DeepEqual(expected.volumeIDs, volumeIDs) { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected volumeIDs %v, got %v", expected.volumeIDs, volumeIDs) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong volumeID") + } + + if expected.instanceID != instanceID { + testcase.t.Errorf("Unexpected DisksAreAttachedByName call: expected instanceID %s, got %s", expected.instanceID, instanceID) + return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID") + } + + glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret) + + return expected.areAttached, expected.ret +} + // Implementation of fake cloudprovider.Instances type instances struct { instanceID string diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index c5b785cd0ab..07fa459a98d 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -52,7 +52,8 @@ type CinderProvider interface { GetAttachmentDiskPath(instanceID, volumeID string) (string, error) OperationPending(diskName string) (bool, string, error) DiskIsAttached(instanceID, volumeID string) (bool, error) - DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) + DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) + DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) ShouldTrustDevicePath() bool Instances() (cloudprovider.Instances, bool) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) From 877143e547921747d9fd14e2af776b34663d37dc Mon Sep 17 00:00:00 2001 From: David Eads Date: Wed, 10 Jan 2018 12:11:17 -0500 Subject: [PATCH 717/794] manuallly handle encoding and decoding in the scale client --- staging/src/k8s.io/client-go/scale/client.go | 47 +++++++++++++++----- staging/src/k8s.io/client-go/scale/util.go | 19 ++++++++ 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/staging/src/k8s.io/client-go/scale/client.go b/staging/src/k8s.io/client-go/scale/client.go index 3f85197a0b6..07c6098620b 100644 --- a/staging/src/k8s.io/client-go/scale/client.go +++ b/staging/src/k8s.io/client-go/scale/client.go @@ -21,6 +21,7 @@ import ( autoscaling "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" @@ -129,21 +130,29 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) } - rawObj, err := c.client.clientBase.Get(). + result := c.client.clientBase.Get(). AbsPath(path). Namespace(c.namespace). Resource(gvr.Resource). Name(name). SubResource("scale"). - Do(). - Get() + Do() + if err := result.Error(); err != nil { + return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), name, err) + } + scaleBytes, err := result.Raw() + if err != nil { + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) if err != nil { return nil, err } // convert whatever this is to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) if err != nil { return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) } @@ -158,7 +167,7 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut } // Currently, a /scale endpoint can receive and return different scale types. - // Until we hvae support for the alternative API representations proposal, + // Until we have support for the alternative API representations proposal, // we need to deal with sending and accepting differnet API versions. // figure out what scale we actually need here @@ -170,25 +179,39 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut // convert this to whatever this endpoint wants scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) if err != nil { - return nil, fmt.Errorf("could not convert scale update to internal Scale: %v", err) + return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err) + } + encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion()) + scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate) + if err != nil { + return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err) } - rawObj, err := c.client.clientBase.Put(). + result := c.client.clientBase.Put(). AbsPath(path). Namespace(c.namespace). Resource(gvr.Resource). Name(scale.Name). SubResource("scale"). - Body(scaleUpdate). - Do(). - Get() + Body(scaleUpdateBytes). + Do() + if err := result.Error(); err != nil { + panic(err) + return nil, fmt.Errorf("could not update the scale for %s %s: %v", resource.String(), scale.Name, err) + } + scaleBytes, err := result.Raw() if err != nil { - return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), scale.Name, err) + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) + if err != nil { + return nil, err } // convert whatever this is back to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) if err != nil { return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) } diff --git a/staging/src/k8s.io/client-go/scale/util.go b/staging/src/k8s.io/client-go/scale/util.go index 9eb10853605..46b5c4d1e2d 100644 --- a/staging/src/k8s.io/client-go/scale/util.go +++ b/staging/src/k8s.io/client-go/scale/util.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" scalescheme "k8s.io/client-go/scale/scheme" scaleappsint "k8s.io/client-go/scale/scheme/appsint" @@ -124,6 +125,7 @@ func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) Sc // ScaleConverter knows how to convert between external scale versions. type ScaleConverter struct { scheme *runtime.Scheme + codecs serializer.CodecFactory internalVersioner runtime.GroupVersioner } @@ -141,6 +143,7 @@ func NewScaleConverter() *ScaleConverter { return &ScaleConverter{ scheme: scheme, + codecs: serializer.NewCodecFactory(scheme), internalVersioner: runtime.NewMultiGroupVersioner( scalescheme.SchemeGroupVersion, schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, @@ -156,6 +159,22 @@ func (c *ScaleConverter) Scheme() *runtime.Scheme { return c.scheme } +func (c *ScaleConverter) Codecs() serializer.CodecFactory { + return c.codecs +} + +func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion { + return []schema.GroupVersion{ + scaleautoscaling.SchemeGroupVersion, + scalescheme.SchemeGroupVersion, + scaleext.SchemeGroupVersion, + scaleextint.SchemeGroupVersion, + scaleappsint.SchemeGroupVersion, + scaleappsv1beta1.SchemeGroupVersion, + scaleappsv1beta2.SchemeGroupVersion, + } +} + // ConvertToVersion converts the given *external* input object to the given output *external* output group-version. func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) From bf60b7aa5538280c509128ed9312650e1ba26521 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Wed, 10 Jan 2018 17:47:13 +0100 Subject: [PATCH 718/794] Mark kubelet PID namespace flag as deprecated The `--docker-disable-shared-pid` flag will be removed once per-pod configurable process namespace sharing becomes available. Mark it deprecated to notify cluster admins. --- pkg/kubelet/config/flags.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/config/flags.go b/pkg/kubelet/config/flags.go index 705b8babfd2..f951ee92257 100644 --- a/pkg/kubelet/config/flags.go +++ b/pkg/kubelet/config/flags.go @@ -89,7 +89,8 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { fs.MarkHidden("experimental-dockershim") fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.") fs.MarkHidden("experimental-dockershim-root-directory") - fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "The Container Runtime Interface (CRI) defaults to using a shared PID namespace for containers in a pod when running with Docker 1.13.1 or higher. Setting this flag reverts to the previous behavior of isolated PID namespaces. This ability will be removed in a future Kubernetes release.") + fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "Setting this to false causes Kubernetes to create pods using a shared process namespace for containers in a pod when running with Docker 1.13.1 or higher. A future Kubernetes release will make this configurable instead in the API.") + fs.MarkDeprecated("docker-disable-shared-pid", "will be removed in a future release. This option will be replaced by PID namespace sharing that is configurable per-pod using the API. See https://features.k8s.io/495") fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, "The image whose network/ipc namespaces containers in each pod will use.") fs.StringVar(&s.DockerEndpoint, "docker-endpoint", s.DockerEndpoint, "Use this for the docker endpoint to communicate with") fs.DurationVar(&s.ImagePullProgressDeadline.Duration, "image-pull-progress-deadline", s.ImagePullProgressDeadline.Duration, "If no pulling progress is made before this deadline, the image pulling will be cancelled.") From 316abc7fe08f276c52f35ea7d882216fdb631243 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Bauer?= Date: Wed, 10 Jan 2018 10:39:28 +0100 Subject: [PATCH 719/794] added fluent-plugin-detect-exceptions plugin to fluentd-es-image added configmap changes raised fluentd-es-configmap version fixed missing version match raised image version --- .../fluentd-es-configmap.yaml | 14 ++++++++++++-- .../fluentd-elasticsearch/fluentd-es-ds.yaml | 12 ++++++------ .../fluentd-elasticsearch/fluentd-es-image/Gemfile | 1 + .../fluentd-es-image/Makefile | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml index 09fbad0ebf4..28ffb1c03b1 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml @@ -105,7 +105,7 @@ data: path /var/log/containers/*.log pos_file /var/log/es-containers.log.pos time_format %Y-%m-%dT%H:%M:%S.%NZ - tag kubernetes.* + tag raw.kubernetes.* read_from_head true format multi_format @@ -118,6 +118,16 @@ data: time_format %Y-%m-%dT%H:%M:%S.%N%:z + # Detect exceptions in the log output and forward them as one log entry. + + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + system.input.conf: |- # Example: # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 @@ -367,7 +377,7 @@ data: num_threads 2 metadata: - name: fluentd-es-config-v0.1.1 + name: fluentd-es-config-v0.1.2 namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index 74242adce74..405bfdd4c24 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -48,24 +48,24 @@ roleRef: apiVersion: apps/v1beta2 kind: DaemonSet metadata: - name: fluentd-es-v2.0.2 + name: fluentd-es-v2.0.3 namespace: kube-system labels: k8s-app: fluentd-es - version: v2.0.2 + version: v2.0.3 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: fluentd-es - version: v2.0.2 + version: v2.0.3 template: metadata: labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" - version: v2.0.2 + version: v2.0.3 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -75,7 +75,7 @@ spec: serviceAccountName: fluentd-es containers: - name: fluentd-es - image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2 + image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.3 env: - name: FLUENTD_ARGS value: --no-supervisor -q @@ -112,4 +112,4 @@ spec: path: /usr/lib64 - name: config-volume configMap: - name: fluentd-es-config-v0.1.1 + name: fluentd-es-config-v0.1.2 diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile index c936b40f3c4..1fab8f51f78 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Gemfile @@ -5,6 +5,7 @@ gem 'activesupport', '~>4.2.6' gem 'fluent-plugin-kubernetes_metadata_filter', '~>0.27.0' gem 'fluent-plugin-elasticsearch', '~>1.9.5' gem 'fluent-plugin-systemd', '~>0.0.8' +gem 'fluent-plugin-detect-exceptions', '~>0.0.8' gem 'fluent-plugin-prometheus', '~>0.3.0' gem 'fluent-plugin-multi-format-parser', '~>0.1.1' gem 'oj', '~>2.18.1' diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 0b5fa8a487c..9d161fa6693 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -16,7 +16,7 @@ PREFIX = gcr.io/google-containers IMAGE = fluentd-elasticsearch -TAG = v2.0.2 +TAG = v2.0.3 build: docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) . From 553a3f049b639b2838f55815a4c62d64b32dde21 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Wed, 10 Jan 2018 10:00:00 -0800 Subject: [PATCH 720/794] remove deprecated photon controller --- cluster/photon-controller/config-common.sh | 72 -- cluster/photon-controller/config-default.sh | 94 -- cluster/photon-controller/config-test.sh | 20 - cluster/photon-controller/setup-prereq.sh | 239 ---- cluster/photon-controller/templates/README | 4 - .../templates/create-dynamic-salt-files.sh | 127 -- .../photon-controller/templates/hostname.sh | 22 - .../templates/install-release.sh | 26 - .../templates/salt-master.sh | 59 - .../templates/salt-minion.sh | 51 - cluster/photon-controller/util.sh | 1110 ----------------- 11 files changed, 1824 deletions(-) delete mode 100644 cluster/photon-controller/config-common.sh delete mode 100755 cluster/photon-controller/config-default.sh delete mode 100755 cluster/photon-controller/config-test.sh delete mode 100755 cluster/photon-controller/setup-prereq.sh delete mode 100644 cluster/photon-controller/templates/README delete mode 100755 cluster/photon-controller/templates/create-dynamic-salt-files.sh delete mode 100755 cluster/photon-controller/templates/hostname.sh delete mode 100755 cluster/photon-controller/templates/install-release.sh delete mode 100755 cluster/photon-controller/templates/salt-master.sh delete mode 100755 cluster/photon-controller/templates/salt-minion.sh delete mode 100755 cluster/photon-controller/util.sh diff --git a/cluster/photon-controller/config-common.sh b/cluster/photon-controller/config-common.sh deleted file mode 100644 index 412eb26ba2f..00000000000 --- a/cluster/photon-controller/config-common.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -########################################################## -# -# These parameters describe objects we are using from -# Photon Controller. They are all assumed to be pre-existing. -# -# Note: if you want help in creating them, you can use -# the setup-prereq.sh script, which will create any of these -# that do not already exist. -# -########################################################## - -# Pre-created tenant for Kubernetes to use -PHOTON_TENANT=kube-tenant - -# Pre-created project in PHOTON_TENANT for Kubernetes to use -PHOTON_PROJECT=kube-project - -# Pre-created VM flavor for Kubernetes master to use -# Can be same as master -# We recommend at least 1GB of memory -PHOTON_MASTER_FLAVOR=kube-vm - -# Pre-created VM flavor for Kubernetes node to use -# Can be same as master -# We recommend at least 2GB of memory -PHOTON_NODE_FLAVOR=kube-vm - -# Pre-created disk flavor for Kubernetes to use -PHOTON_DISK_FLAVOR=kube-disk - -# Pre-created Debian 8 image with kube user uploaded to Photon Controller -# Note: While Photon Controller allows multiple images to have the same -# name, we assume that there is exactly one image with this name. -PHOTON_IMAGE=kube - -########################################################## -# -# Parameters just for the setup-prereq.sh script: not used -# elsewhere. If you create the above objects by hand, you -# do not need to edit these. -# -# Note that setup-prereq.sh also creates the objects -# above. -# -########################################################## - -# The specifications for the master and node flavors -SETUP_MASTER_FLAVOR_SPEC="vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB" -SETUP_NODE_FLAVOR_SPEC=${SETUP_MASTER_FLAVOR_SPEC} - -# The specification for the ephemeral disk flavor. -SETUP_DISK_FLAVOR_SPEC="ephemeral-disk 1 COUNT" - -# The specification for the tenant resource ticket and the project resources -SETUP_TICKET_SPEC="vm.memory 1000 GB, vm 1000 COUNT" -SETUP_PROJECT_SPEC="${SETUP_TICKET_SPEC}" diff --git a/cluster/photon-controller/config-default.sh b/cluster/photon-controller/config-default.sh deleted file mode 100755 index b2f71dd8c51..00000000000 --- a/cluster/photon-controller/config-default.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -########################################################## -# -# Common parameters for Kubernetes -# -########################################################## - -# Default number of nodes to make. You can change this as needed -NUM_NODES=3 - -# Range of IPs assigned to pods -NODE_IP_RANGES="10.244.0.0/16" - -# IPs used by Kubernetes master -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" - -# Range of IPs assigned by Kubernetes to services -SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" - -########################################################## -# -# Advanced parameters for Kubernetes -# -########################################################## - -# The instance prefix is the beginning of the name given to each VM we create -# If this is changed, you can have multiple kubernetes clusters per project -# Note that even if you don't change it, each tenant/project can have its own -# Kubernetes cluster -INSTANCE_PREFIX=kubernetes - -# Name of the user used to configure the VM -# We use cloud-init to create the user -VM_USER=kube - -# SSH options for how we connect to the Kubernetes VMs -# We set the user known hosts file to /dev/null because we are connecting to new VMs. -# When working in an environment where there is a lot of VM churn, VM IP addresses -# will be reused, and the ssh keys will be different. This prevents us from seeing error -# due to this, and it will not save the SSH key to the known_hosts file, so users will -# still have standard ssh security checks. -SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C" - -# Optional: Enable node logging. -# Note: currently untested -ENABLE_NODE_LOGGING=false -LOGGING_DESTINATION=elasticsearch - -# Optional: When set to true, Elasticsearch and Kibana will be setup -# Note: currently untested -ENABLE_CLUSTER_LOGGING=false -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging -# Note: currently untested -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.244.240.240" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI=true - -# We need to configure subject alternate names (SANs) for the master's certificate -# we generate. While users will connect via the external IP, pods (like the UI) -# will connect via the cluster IP, from the SERVICE_CLUSTER_IP_RANGE. -# In addition to the extra SANS here, we'll also add one for for the service IP. -MASTER_EXTRA_SANS="DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} diff --git a/cluster/photon-controller/config-test.sh b/cluster/photon-controller/config-test.sh deleted file mode 100755 index 87e68d72f79..00000000000 --- a/cluster/photon-controller/config-test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -NUM_NODES=2 -NODE_IP_RANGES="10.244.0.0/16" -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" diff --git a/cluster/photon-controller/setup-prereq.sh b/cluster/photon-controller/setup-prereq.sh deleted file mode 100755 index 7212081327b..00000000000 --- a/cluster/photon-controller/setup-prereq.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This sets up a Photon Controller with the tenant, project, flavors -# and image that are needed to deploy Kubernetes with kube-up. -# -# This is not meant to be used in production: it creates resource tickets -# (quotas) that are arbitrary and not likely to work in your environment. -# However, it may be a quick way to get your environment set up to try out -# a Kubernetes installation. -# -# It uses the names for the tenant, project, and flavors as specified in the -# config-common.sh file -# -# If you want to do this by hand, this script is equivalent to the following -# Photon Controller commands (assuming you haven't edited config-common.sh -# to change the names) -# -# photon target set https://192.0.2.2 -# photon tenant create kube-tenant -# photon tenant set kube-tenant -# photon resource-ticket create --tenant kube-tenant --name kube-resources --limits "vm.memory 1000 GB, vm 1000 COUNT" -# photon project create --tenant kube-tenant --resource-ticket kube-resources --name kube-project --limits "vm.memory 1000 GB, vm 1000 COUNT" -# photon project set kube-project -# photon -n flavor create --name "kube-vm" --kind "vm" --cost "vm 1 COUNT, vm.cpu 1 COUNT, vm.memory 2 GB" -# photon -n flavor create --name "kube-disk" --kind "ephemeral-disk" --cost "ephemeral-disk 1 COUNT" -# photon image create kube.vmdk -n kube-image -i EAGER -# -# Note that the kube.vmdk can be downloaded as specified in the documentation. - -set -o errexit -set -o nounset -set -o pipefail - -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -# shellcheck source=./util.sh -source "${KUBE_ROOT}/cluster/photon-controller/util.sh" - -function main { - verify-cmd-in-path photon - set-target - create-tenant - create-project - create-vm-flavor "${PHOTON_MASTER_FLAVOR}" "${SETUP_MASTER_FLAVOR_SPEC}" - if [ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]; then - create-vm-flavor "${PHOTON_NODE_FLAVOR}" "${SETUP_NODE_FLAVOR_SPEC}" - fi - create-disk-flavor - create-image -} - -function parse-cmd-line { - PHOTON_TARGET=${1:-""} - PHOTON_VMDK=${2:-""} - - if [[ "${PHOTON_TARGET}" = "" || "${PHOTON_VMDK}" = "" ]]; then - echo "Usage: setup-prereq " - echo "Target should be a URL like https://192.0.2.1" - echo "" - echo "This will create the following, based on the configuration in config-common.sh" - echo " * A tenant named ${PHOTON_TENANT}" - echo " * A project named ${PHOTON_PROJECT}" - echo " * A VM flavor named ${PHOTON_MASTER_FLAVOR}" - echo " * A disk flavor named ${PHOTON_DISK_FLAVOR}" - echo "It will also upload the Kube VMDK" - echo "" - echo "It creates the tenant with a resource ticket (quota) that may" - echo "be inappropriate for your environment. For a production" - echo "environment, you should configure these to match your" - echo "environment." - exit 1 - fi - - echo "Photon Target: ${PHOTON_TARGET}" - echo "Photon VMDK: ${PHOTON_VMDK}" -} - -function set-target { - ${PHOTON} target set "${PHOTON_TARGET}" > /dev/null 2>&1 -} - -function create-tenant { - local rc=0 - local output - - ${PHOTON} tenant list | grep -q "\t${PHOTON_TENANT}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Tenant ${PHOTON_TENANT} already made, skipping" - else - echo "Making tenant ${PHOTON_TENANT}" - rc=0 - output=$(${PHOTON} tenant create "${PHOTON_TENANT}" 2>&1) || { - echo "ERROR: Could not create tenant \"${PHOTON_TENANT}\", exiting" - echo "Output from tenant creation:" - echo "${output}" - exit 1 - } - fi - ${PHOTON} tenant set "${PHOTON_TENANT}" > /dev/null 2>&1 -} - -function create-project { - local rc=0 - local output - - ${PHOTON} project list | grep -q "\t${PHOTON_PROJECT}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Project ${PHOTON_PROJECT} already made, skipping" - else - echo "Making project ${PHOTON_PROJECT}" - rc=0 - output=$(${PHOTON} resource-ticket create --tenant "${PHOTON_TENANT}" --name "${PHOTON_TENANT}-resources" --limits "${SETUP_TICKET_SPEC}" 2>&1) || { - echo "ERROR: Could not create resource ticket, exiting" - echo "Output from resource ticket creation:" - echo "${output}" - exit 1 - } - - rc=0 - output=$(${PHOTON} project create --tenant "${PHOTON_TENANT}" --resource-ticket "${PHOTON_TENANT}-resources" --name "${PHOTON_PROJECT}" --limits "${SETUP_PROJECT_SPEC}" 2>&1) || { - echo "ERROR: Could not create project \"${PHOTON_PROJECT}\", exiting" - echo "Output from project creation:" - echo "${output}" - exit 1 - } - fi - ${PHOTON} project set "${PHOTON_PROJECT}" -} - -function create-vm-flavor { - local flavor_name=${1} - local flavor_spec=${2} - local rc=0 - local output - - ${PHOTON} flavor list | grep -q "\t${flavor_name}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - check-flavor-ready "${flavor_name}" - echo "Flavor ${flavor_name} already made, skipping" - else - echo "Making VM flavor ${flavor_name}" - rc=0 - output=$(${PHOTON} -n flavor create --name "${flavor_name}" --kind "vm" --cost "${flavor_spec}" 2>&1) || { - echo "ERROR: Could not create vm flavor \"${flavor_name}\", exiting" - echo "Output from flavor creation:" - echo "${output}" - exit 1 - } - fi -} - -function create-disk-flavor { - local rc=0 - local output - - ${PHOTON} flavor list | grep -q "\t${PHOTON_DISK_FLAVOR}\t" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - check-flavor-ready "${PHOTON_DISK_FLAVOR}" - echo "Flavor ${PHOTON_DISK_FLAVOR} already made, skipping" - else - echo "Making disk flavor ${PHOTON_DISK_FLAVOR}" - rc=0 - output=$(${PHOTON} -n flavor create --name "${PHOTON_DISK_FLAVOR}" --kind "ephemeral-disk" --cost "${SETUP_DISK_FLAVOR_SPEC}" 2>&1) || { - echo "ERROR: Could not create disk flavor \"${PHOTON_DISK_FLAVOR}\", exiting" - echo "Output from flavor creation:" - echo "${output}" - exit 1 - } - fi -} - -function check-flavor-ready { - local flavor_name=${1} - local rc=0 - - local flavor_id - flavor_id=$(${PHOTON} flavor list | grep "\t${flavor_name}\t" | awk '{print $1}') || { - echo "ERROR: Found ${flavor_name} but cannot find it's id" - exit 1 - } - - ${PHOTON} flavor show "${flavor_id}" | grep "\tREADY\$" > /dev/null 2>&1 || { - echo "ERROR: Flavor \"${flavor_name}\" already exists but is not READY. Please delete or fix it." - exit 1 - } -} - -function create-image { - local rc=0 - local num_images - local output - - ${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q ERROR > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate." - echo "Images in the ERROR state will be ignored." - fi - - rc=0 - # We don't use grep -c because it exists non-zero when there are no matches, tell shellcheck - # shellcheck disable=SC2126 - num_images=$(${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep READY | wc -l) - if [[ "${num_images}" -gt 1 ]]; then - echo "Warning: You have more than one good ${PHOTON_IMAGE} image. You may want to remove duplicates." - fi - - ${PHOTON} image list | grep "\t${PHOTON_IMAGE}\t" | grep -q READY > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Image ${PHOTON_VMDK} already uploaded, skipping" - else - echo "Uploading image ${PHOTON_VMDK}" - rc=0 - output=$(${PHOTON} image create "${PHOTON_VMDK}" -n "${PHOTON_IMAGE}" -i EAGER 2>&1) || { - echo "ERROR: Could not upload image, exiting" - echo "Output from image create:" - echo "${output}" - exit 1 - } - fi -} - -# We don't want silent pipeline failure: we check for failure -set +o pipefail - -parse-cmd-line "$@" -main diff --git a/cluster/photon-controller/templates/README b/cluster/photon-controller/templates/README deleted file mode 100644 index b91d629fa0c..00000000000 --- a/cluster/photon-controller/templates/README +++ /dev/null @@ -1,4 +0,0 @@ -The scripts in this directory are not meant to be invoked -directly. Instead they are partial scripts that are combined into full -scripts by util.sh and are run on the Kubernetes nodes are part of the -setup. diff --git a/cluster/photon-controller/templates/create-dynamic-salt-files.sh b/cluster/photon-controller/templates/create-dynamic-salt-files.sh deleted file mode 100755 index 369fdb7095f..00000000000 --- a/cluster/photon-controller/templates/create-dynamic-salt-files.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#generate token files - -KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -if [[ ! -f "${known_tokens_file}" ]]; then - - mkdir -p /srv/salt-overlay/salt/kube-apiserver - known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; - echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file; - echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file) - - mkdir -p /srv/salt-overlay/salt/kubelet - kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" - (umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file) - kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig" - - mkdir -p /srv/salt-overlay/salt/kubelet - (umask 077; - cat > "${kubelet_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - server: https://${KUBE_MASTER_IP} - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -users: -- name: kubelet - user: - token: ${KUBELET_TOKEN} -EOF -) - - - mkdir -p /srv/salt-overlay/salt/kube-proxy - kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig" - # Make a kubeconfig file with the token. - # TODO(etune): put apiserver certs into secret too, and reference from authfile, - # so that "Insecure" is not needed. - (umask 077; - cat > "${kube_proxy_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -EOF -) - - # Generate tokens for other "service accounts". Append to known_tokens. - # - # NB: If this list ever changes, this script actually has to - # change to detect the existence of this file, kill any deleted - # old tokens and add any new tokens (to handle the upgrade case). - service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns") - for account in "${service_accounts[@]}"; do - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${known_tokens_file}" - done -fi - -readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv" -if [[ ! -e "${BASIC_AUTH_FILE}" ]]; then - mkdir -p /srv/salt-overlay/salt/kube-apiserver - (umask 077; - echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}") -fi - - -# Create the overlay files for the salt tree. We create these in a separate -# place so that we can blow away the rest of the salt configs on a kube-push and -# re-apply these. - -mkdir -p /srv/salt-overlay/pillar -cat </srv/salt-overlay/pillar/cluster-params.sls -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -node_instance_prefix: $NODE_INSTANCE_PREFIX -service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE -enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}" -enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}" -enable_cluster_ui: "${ENABLE_CLUSTER_UI:true}" -enable_node_logging: "${ENABLE_NODE_LOGGING:false}" -logging_destination: $LOGGING_DESTINATION -elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS -enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}" -dns_server: $DNS_SERVER_IP -dns_domain: $DNS_DOMAIN -e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}" -cluster_cidr: "$NODE_IP_RANGES" -allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}" -admission_control: Initializers,NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota -EOF diff --git a/cluster/photon-controller/templates/hostname.sh b/cluster/photon-controller/templates/hostname.sh deleted file mode 100755 index ae7f4d0f4ef..00000000000 --- a/cluster/photon-controller/templates/hostname.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Remove kube.vm from /etc/hosts -sed -i -e 's/\b\w\+.vm\b//' /etc/hosts - -# Update hostname in /etc/hosts and /etc/hostname -sed -i -e "s/\\bkube\\b/${MY_NAME}/g" /etc/host{s,name} -hostname ${MY_NAME} diff --git a/cluster/photon-controller/templates/install-release.sh b/cluster/photon-controller/templates/install-release.sh deleted file mode 100755 index 34206a35aab..00000000000 --- a/cluster/photon-controller/templates/install-release.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script assumes that the environment variable SERVER_BINARY_TAR contains -# the release tar to download and unpack. It is meant to be pushed to the -# master and run. - -echo "Unpacking Salt tree" -rm -rf kubernetes -tar xzf "${SALT_TAR}" - -echo "Running release install script" -sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}" diff --git a/cluster/photon-controller/templates/salt-master.sh b/cluster/photon-controller/templates/salt-master.sh deleted file mode 100755 index 19281d008f1..00000000000 --- a/cluster/photon-controller/templates/salt-master.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use other Debian mirror -sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: ${MASTER_NAME}" > /etc/salt/minion.d/master.conf - -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-master - cbr-cidr: $MASTER_IP_RANGE - cloud: photon-controller - master_extra_sans: $MASTER_EXTRA_SANS - api_servers: $MASTER_NAME - kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig - kube_user: $KUBE_USER -EOF - -# Auto accept all keys from minions that try to join -mkdir -p /etc/salt/master.d -cat </etc/salt/master.d/auto-accept.conf -auto_accept: True -EOF - -cat </etc/salt/master.d/reactor.conf -# React to new minions starting by running highstate on them. -reactor: - - 'salt/minion/*/start': - - /srv/reactor/highstate-new.sls - - /srv/reactor/highstate-masters.sls - - /srv/reactor/highstate-minions.sls -EOF - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -# -# -M installs the master -set +x -curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X -set -x diff --git a/cluster/photon-controller/templates/salt-minion.sh b/cluster/photon-controller/templates/salt-minion.sh deleted file mode 100755 index 314e5e726d5..00000000000 --- a/cluster/photon-controller/templates/salt-minion.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use other Debian mirror -sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list - -# Resolve hostname of master -if ! grep -q $KUBE_MASTER /etc/hosts; then - echo "Adding host entry for $KUBE_MASTER" - echo "${KUBE_MASTER_IP} ${KUBE_MASTER}" >> /etc/hosts -fi - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: ${KUBE_MASTER}" > /etc/salt/minion.d/master.conf - -# Turn on debugging for salt-minion -# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion - -# Our minions will have a pool role to distinguish them from the master. -# -# Setting the "minion_ip" here causes the kubelet to use its IP for -# identification instead of its hostname. -# -cat </etc/salt/minion.d/grains.conf -grains: - hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') - roles: - - kubernetes-pool - - kubernetes-pool-photon-controller - cloud: photon-controller -EOF - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -X diff --git a/cluster/photon-controller/util.sh b/cluster/photon-controller/util.sh deleted file mode 100755 index 55ec52ff9cc..00000000000 --- a/cluster/photon-controller/util.sh +++ /dev/null @@ -1,1110 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -# shellcheck source=./config-common.sh -source "${KUBE_ROOT}/cluster/photon-controller/config-common.sh" -# shellcheck source=./config-default.sh -source "${KUBE_ROOT}/cluster/photon-controller/${KUBE_CONFIG_FILE-"config-default.sh"}" -# shellcheck source=../common.sh -source "${KUBE_ROOT}/cluster/common.sh" - -readonly PHOTON="photon -n" - -# Naming scheme for VMs (masters & nodes) -readonly MASTER_NAME="${INSTANCE_PREFIX}-master" - -# shell check claims this doesn't work because you can't use a variable in a brace -# range. It does work because we're calling eval. -# shellcheck disable=SC2051 -readonly NODE_NAMES=($(eval echo "${INSTANCE_PREFIX}"-node-{1.."${NUM_NODES}"})) - -##################################################################### -# -# Public API -# -##################################################################### - -# -# detect-master will query Photon Controller for the Kubernetes master. -# It assumes that the VM name for the master is unique. -# It will set KUBE_MASTER_ID to be the VM ID of the master -# It will set KUBE_MASTER_IP to be the IP address of the master -# If the silent parameter is passed, it will not print when the master -# is found: this is used internally just to find the MASTER -# -function detect-master { - local silent=${1:-""} - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - - KUBE_MASTER=${MASTER_NAME} - KUBE_MASTER_ID=${KUBE_MASTER_ID:-""} - KUBE_MASTER_IP=${KUBE_MASTER_IP:-""} - - # We don't want silent failure: we check for failure - set +o pipefail - if [[ -z ${KUBE_MASTER_ID} ]]; then - KUBE_MASTER_ID=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"kubernetes-master"$'\t' | awk '{print $1}') - fi - if [[ -z ${KUBE_MASTER_ID} ]]; then - kube::log::error "Could not find Kubernetes master node ID. Make sure you've launched a cluster with kube-up.sh" - exit 1 - fi - - if [[ -z "${KUBE_MASTER_IP-}" ]]; then - # Pick out the NICs that have a MAC address owned VMware (with OUI 00:0C:29) - # Make sure to ignore lines that have a network interface but no address - KUBE_MASTER_IP=$(${PHOTON} vm networks "${KUBE_MASTER_ID}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}') - fi - if [[ -z "${KUBE_MASTER_IP-}" ]]; then - kube::log::error "Could not find Kubernetes master node IP. Make sure you've launched a cluster with 'kube-up.sh'" >&2 - exit 1 - fi - if [[ -z ${silent} ]]; then - kube::log::status "Master: $KUBE_MASTER ($KUBE_MASTER_IP)" - fi - # Reset default set in common.sh - set -o pipefail -} - -# -# detect-nodes will query Photon Controller for the Kubernetes nodes -# It assumes that the VM name for the nodes are unique. -# It assumes that NODE_NAMES has been set -# It will set KUBE_NODE_IP_ADDRESSES to be the VM IPs of the nodes -# It will set the KUBE_NODE_IDS to be the VM IDs of the nodes -# If the silent parameter is passed, it will not print when the nodes -# are found: this is used internally just to find the MASTER -# -function detect-nodes { - local silent=${1:-""} - local failure=0 - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - - KUBE_NODE_IP_ADDRESSES=() - KUBE_NODE_IDS=() - # We don't want silent failure: we check for failure - set +o pipefail - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - - local node_id - node_id=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"${NODE_NAMES[${i}]}"$'\t' | awk '{print $1}') - if [[ -z ${node_id} ]]; then - kube::log::error "Could not find ${NODE_NAMES[${i}]}" - failure=1 - fi - KUBE_NODE_IDS+=("${node_id}") - - # Pick out the NICs that have a MAC address owned VMware (with OUI 00:0C:29) - # Make sure to ignore lines that have a network interface but no address - node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}') - KUBE_NODE_IP_ADDRESSES+=("${node_ip}") - - if [[ -z ${silent} ]]; then - kube::log::status "Node: ${NODE_NAMES[${i}]} (${KUBE_NODE_IP_ADDRESSES[${i}]})" - fi - done - - if [[ ${failure} -ne 0 ]]; then - exit 1 - fi - # Reset default set in common.sh - set -o pipefail -} - -# Get node names if they are not static. -function detect-node-names { - echo "TODO: detect-node-names" 1>&2 -} - -# -# Verifies that this computer has sufficient software installed -# so that it can run the rest of the script. -# -function verify-prereqs { - verify-cmd-in-path photon - verify-cmd-in-path ssh - verify-cmd-in-path scp - verify-cmd-in-path ssh-add - verify-cmd-in-path openssl - verify-cmd-in-path mkisofs -} - -# -# The entry point for bringing up a Kubernetes cluster -# -function kube-up { - verify-prereqs - verify-ssh-prereqs - verify-photon-config - kube::util::ensure-temp-dir - - find-release-tars - find-image-id - - load-or-gen-kube-basicauth - gen-cloud-init-iso - gen-master-start - create-master-vm - install-salt-on-master - - gen-node-start - install-salt-on-nodes - - detect-nodes -s - - install-kubernetes-on-master - install-kubernetes-on-nodes - - wait-master-api - wait-node-apis - - setup-pod-routes - - copy-kube-certs - kube::log::status "Creating kubeconfig..." - create-kubeconfig -} - -# Delete a kubernetes cluster -function kube-down { - detect-master - detect-nodes - - pc-delete-vm "${KUBE_MASTER}" "${KUBE_MASTER_ID}" - for (( node=0; node<${#KUBE_NODE_IDS[@]}; node++)); do - pc-delete-vm "${NODE_NAMES[${node}]}" "${KUBE_NODE_IDS[${node}]}" - done -} - -# Update a kubernetes cluster -function kube-push { - echo "TODO: kube-push" 1>&2 -} - -# Prepare update a kubernetes component -function prepare-push { - echo "TODO: prepare-push" 1>&2 -} - -# Update a kubernetes master -function push-master { - echo "TODO: push-master" 1>&2 -} - -# Update a kubernetes node -function push-node { - echo "TODO: push-node" 1>&2 -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - echo "TODO: test-build-release" 1>&2 -} - -# Execute prior to running tests to initialize required structure -function test-setup { - echo "TODO: test-setup" 1>&2 -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - echo "TODO: test-teardown" 1>&2 -} - -##################################################################### -# -# Internal functions -# -##################################################################### - -# -# Uses Photon Controller to make a VM -# Takes two parameters: -# - The name of the VM (Assumed to be unique) -# - The name of the flavor to create the VM (Assumed to be unique) -# -# It assumes that the variables in config-common.sh (PHOTON_TENANT, etc) -# are set correctly. -# -# It also assumes the cloud-init ISO has been generated -# -# When it completes, it sets two environment variables for use by the -# caller: _VM_ID (the ID of the created VM) and _VM_IP (the IP address -# of the created VM) -# -function pc-create-vm { - local vm_name="${1}" - local vm_flavor="${2}" - local rc=0 - local i=0 - - # Create the VM - local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}" - local vm_args="--name ${vm_name} --image ${PHOTON_IMAGE_ID} --flavor ${vm_flavor}" - local disk_args="disk-1 ${PHOTON_DISK_FLAVOR} boot=true" - - rc=0 - _VM_ID=$(${PHOTON} vm create ${tenant_args} ${vm_args} --disks "${disk_args}" 2>&1) || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Failed to create VM. Error output:" - echo "${_VM_ID}" - exit 1 - fi - kube::log::status "Created VM ${vm_name}: ${_VM_ID}" - - # Start the VM - # Note that the VM has cloud-init in it, and we attach an ISO that - # contains a user-data.txt file for cloud-init. When the VM starts, - # cloud-init will temporarily mount the ISO and configure the VM - # Our user-data will configure the 'kube' user and set up the ssh - # authorized keys to allow us to ssh to the VM and do further work. - run-cmd "${PHOTON} vm attach-iso -p ${KUBE_TEMP}/cloud-init.iso ${_VM_ID}" - run-cmd "${PHOTON} vm start ${_VM_ID}" - kube::log::status "Started VM ${vm_name}, waiting for network address..." - - # Wait for the VM to be started and connected to the network - have_network=0 - for i in {1..120}; do - # photon -n vm networks print several fields: - # NETWORK MAC IP GATEWAY CONNECTED? - # We wait until CONNECTED is True - rc=0 - networks=$(${PHOTON} vm networks "${_VM_ID}") || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "'${PHOTON} vm networks ${_VM_ID}' failed. Error output: " - echo "${networks}" - fi - networks=$(echo "${networks}" | grep True) || rc=$? - if [[ ${rc} -eq 0 ]]; then - have_network=1 - break; - fi - sleep 1 - done - - # Fail if the VM didn't come up - if [[ ${have_network} -eq 0 ]]; then - kube::log::error "VM ${vm_name} failed to start up: no IP was found" - exit 1 - fi - - # Find the IP address of the VM - _VM_IP=$(${PHOTON} vm networks "${_VM_ID}" | head -1 | awk -F'\t' '{print $3}') - kube::log::status "VM ${vm_name} has IP: ${_VM_IP}" -} - -# -# Delete one of our VMs -# If it is STARTED, it will be stopped first. -# -function pc-delete-vm { - local vm_name="${1}" - local vm_id="${2}" - local rc=0 - - kube::log::status "Deleting VM ${vm_name}" - # In some cases, head exits before photon, so the pipline exits with - # SIGPIPE. We disable the pipefile option to hide that failure. - set +o pipefail - ${PHOTON} vm show "${vm_id}" | head -1 | grep STARTED > /dev/null 2>&1 || rc=$? - set +o pipefail - if [[ ${rc} -eq 0 ]]; then - ${PHOTON} vm stop "${vm_id}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Error: could not stop ${vm_name} ($vm_id)" - kube::log::error "Please investigate and stop manually" - return - fi - fi - - rc=0 - ${PHOTON} vm delete "${vm_id}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Error: could not delete ${vm_name} ($vm_id)" - kube::log::error "Please investigate and delete manually" - fi -} - -# -# Looks for the image named PHOTON_IMAGE -# Sets PHOTON_IMAGE_ID to be the id of that image. -# We currently assume there is exactly one image with name -# -function find-image-id { - local rc=0 - PHOTON_IMAGE_ID=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | head -1 | grep READY | awk -F'\t' '{print $1}') - if [[ ${rc} -ne 0 ]]; then - kube::log::error "Cannot find image \"${PHOTON_IMAGE}\"" - fail=1 - fi -} - -# -# Generate an ISO with a single file called user-data.txt -# This ISO will be used to configure cloud-init (which is already -# on the VM). We will tell cloud-init to create the kube user/group -# and give ourselves the ability to ssh to the VM with ssh. We also -# allow people to ssh with the same password that was randomly -# generated for access to Kubernetes as a backup method. -# -# Assumes environment variables: -# - VM_USER -# - KUBE_PASSWORD (randomly generated password) -# -function gen-cloud-init-iso { - local password_hash - password_hash=$(openssl passwd -1 "${KUBE_PASSWORD}") - - local ssh_key - ssh_key=$(ssh-add -L | head -1) - - # Make the user-data file that will be used by cloud-init - ( - echo "#cloud-config" - echo "" - echo "groups:" - echo " - ${VM_USER}" - echo "" - echo "users:" - echo " - name: ${VM_USER}" - echo " gecos: Kubernetes" - echo " primary-group: ${VM_USER}" - echo " lock-passwd: false" - echo " passwd: ${password_hash}" - echo " ssh-authorized-keys: " - echo " - ${ssh_key}" - echo " sudo: ALL=(ALL) NOPASSWD:ALL" - echo " shell: /bin/bash" - echo "" - echo "hostname:" - echo " - hostname: kube" - ) > "${KUBE_TEMP}/user-data.txt" - - # Make the ISO that will contain the user-data - # The -rock option means that we'll generate real filenames (long and with case) - run-cmd "mkisofs -rock -o ${KUBE_TEMP}/cloud-init.iso ${KUBE_TEMP}/user-data.txt" -} - -# -# Generate a script used to install salt on the master -# It is placed into $KUBE_TEMP/master-start.sh -# -function gen-master-start { - python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ - -b -c "${KUBE_TEMP}/htpasswd" "${KUBE_USER}" "${KUBE_PASSWORD}" - local htpasswd - htpasswd=$(cat "${KUBE_TEMP}/htpasswd") - - # This calculation of the service IP should work, but if you choose an - # alternate subnet, there's a small chance you'd need to modify the - # service_ip, below. We'll choose an IP like 10.244.240.1 by taking - # the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking - # on a .1 - local octets - local service_ip - octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g')) - ((octets[3]+=1)) - service_ip=$(echo "${octets[*]}" | sed 's/ /./g') - MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}" - - ( - echo "#! /bin/bash" - echo "readonly MY_NAME=${MASTER_NAME}" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh" - echo "cd /home/kube/cache/kubernetes-install" - echo "readonly KUBE_MASTER_IP='{$KUBE_MASTER_IP}'" - echo "readonly MASTER_NAME='${MASTER_NAME}'" - echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'" - echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" - echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'" - echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'" - echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" - echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" - echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" - echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" - echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'" - echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'" - echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'" - echo "readonly KUBE_USER='${KUBE_USER:-}'" - echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'" - echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'" - echo "readonly SALT_TAR='${SALT_TAR##*/}'" - echo "readonly MASTER_HTPASSWD='${htpasswd}'" - echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" - echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/create-dynamic-salt-files.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/install-release.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-master.sh" - ) > "${KUBE_TEMP}/master-start.sh" -} - -# -# Generate the scripts for each node to install salt -# -function gen-node-start { - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "readonly MY_NAME=${NODE_NAMES[${i}]}" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh" - echo "KUBE_MASTER=${KUBE_MASTER}" - echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" - echo "NODE_IP_RANGE=$NODE_IP_RANGES" - grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-minion.sh" - ) > "${KUBE_TEMP}/node-start-${i}.sh" - done -} - -# -# Create a script that will run on the Kubernetes master and will run salt -# to configure the master. We make it a script instead of just running a -# single ssh command so that we can get logging. -# -function gen-master-salt { - gen-salt "kubernetes-master" -} - -# -# Create scripts that will be run on the Kubernetes master. Each of these -# will invoke salt to configure one of the nodes -# -function gen-node-salt { - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - gen-salt "${NODE_NAMES[${i}]}" - done -} - -# -# Shared implementation for gen-master-salt and gen-node-salt -# Writes a script that installs Kubernetes with salt -# The core of the script is simple (run 'salt ... state.highstate') -# We also do a bit of logging so we can debug problems -# -# There is also a funky workaround for an issue with docker 1.9 -# (elsewhere we peg ourselves to docker 1.9). It's fixed in 1.10, -# so we should be able to remove it in the future -# https://github.com/docker/docker/issues/18113 -# The problem is that sometimes the install (with apt-get) of -# docker fails. Deleting a file and retrying fixes it. -# -# Tell shellcheck to ignore our variables within single quotes: -# We're writing a script, not executing it, so this is normal -# shellcheck disable=SC2016 -function gen-salt { - node_name=${1} - ( - echo '#!/bin/bash' - echo '' - echo "node=${node_name}" - echo 'out=/tmp/${node}-salt.out' - echo 'log=/tmp/${node}-salt.log' - echo '' - echo 'echo $(date) >> $log' - echo 'salt ${node} state.highstate -t 30 --no-color > ${out}' - echo 'grep -E "Failed:[[:space:]]+0" ${out}' - echo 'success=$?' - echo 'cat ${out} >> ${log}' - echo '' - echo 'if [[ ${success} -ne 0 ]]; then' - echo ' # Did we try to install docker-engine?' - echo ' attempted=$(grep docker-engine ${out} | wc -l)' - echo ' # Is docker-engine installed?' - echo ' installed=$(salt --output=txt ${node} pkg.version docker-engine | wc -l)' - echo ' if [[ ${attempted} -ne 0 && ${installed} -eq 0 ]]; then' - echo ' echo "Unwedging docker-engine install" >> ${log}' - echo ' salt ${node} cmd.run "rm -f /var/lib/docker/network/files/local-kv.db"' - echo ' fi' - echo 'fi' - echo 'exit ${success}' - ) > "${KUBE_TEMP}/${node_name}-salt.sh" -} - -# -# Generate a script to add a route to a host (master or node) -# The script will do two things: -# 1. Add the route immediately with the route command -# 2. Persist the route by saving it in /etc/network/interfaces -# This was done with a script because it was easier to get the quoting right -# and make it clear. -# -function gen-add-route { - route=${1} - gateway=${2} - ( - echo '#!/bin/bash' - echo '' - echo '# Immediately add route' - echo "sudo route add -net ${route} gw ${gateway}" - echo '' - echo '# Persist route so it lasts over restarts' - echo 'sed -in "s|^iface eth0.*|&\n post-up route add -net' "${route} gw ${gateway}|"'" /etc/network/interfaces' - ) > "${KUBE_TEMP}/add-route.sh" -} - -# -# Create the Kubernetes master VM -# Sets global variables: -# - KUBE_MASTER (Name) -# - KUBE_MASTER_ID (Photon VM ID) -# - KUBE_MASTER_IP (IP address) -# -function create-master-vm { - kube::log::status "Starting master VM..." - pc-create-vm "${MASTER_NAME}" "${PHOTON_MASTER_FLAVOR}" - KUBE_MASTER=${MASTER_NAME} - KUBE_MASTER_ID=${_VM_ID} - KUBE_MASTER_IP=${_VM_IP} -} - -# -# Install salt on the Kubernetes master -# Relies on the master-start.sh script created in gen-master-start -# -function install-salt-on-master { - kube::log::status "Installing salt on master..." - upload-server-tars "${MASTER_NAME}" "${KUBE_MASTER_IP}" - run-script-remotely "${KUBE_MASTER_IP}" "${KUBE_TEMP}/master-start.sh" -} - -# -# Installs salt on Kubernetes nodes in parallel -# Relies on the node-start script created in gen-node-start -# -function install-salt-on-nodes { - kube::log::status "Creating nodes and installing salt on them..." - - # Start each of the VMs in parallel - # In the future, we'll batch this because it doesn't scale well - # past 10 or 20 nodes - local node - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - ( - pc-create-vm "${NODE_NAMES[${node}]}" "${PHOTON_NODE_FLAVOR}" - run-script-remotely "${_VM_IP}" "${KUBE_TEMP}/node-start-${node}.sh" - ) & - done - - # Wait for the node VM startups to complete - local fail=0 - local job - for job in $(jobs -p); do - wait "${job}" || fail=$((fail + 1)) - done - if (( fail != 0 )); then - kube::log::error "Failed to start ${fail}/${NUM_NODES} nodes" - exit 1 - fi -} - -# -# Install Kubernetes on the master. -# This uses the kubernetes-master-salt.sh script created by gen-master-salt -# That script uses salt to install Kubernetes -# -function install-kubernetes-on-master { - # Wait until salt-master is running: it may take a bit - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Waiting for salt-master to start on ${KUBE_MASTER}" \ - "pgrep salt-master" - gen-master-salt - copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/kubernetes-master-salt.sh" "/tmp/kubernetes-master-salt.sh" - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Installing Kubernetes on ${KUBE_MASTER} via salt" \ - "sudo /bin/bash /tmp/kubernetes-master-salt.sh" -} - -# -# Install Kubernetes on the nodes in parallel -# This uses the kubernetes-master-salt.sh script created by gen-node-salt -# That script uses salt to install Kubernetes -# -function install-kubernetes-on-nodes { - gen-node-salt - - # Run in parallel to bring up the cluster faster - # TODO: Batch this so that we run up to N in parallel, so - # we don't overload this machine or the salt master - local node - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - ( - copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/${NODE_NAMES[${node}]}-salt.sh" "/tmp/${NODE_NAMES[${node}]}-salt.sh" - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for salt-master to start on ${NODE_NAMES[${node}]}" \ - "pgrep salt-minion" - try-until-success-ssh "${KUBE_MASTER_IP}" \ - "Installing Kubernetes on ${NODE_NAMES[${node}]} via salt" \ - "sudo /bin/bash /tmp/${NODE_NAMES[${node}]}-salt.sh" - ) & - done - - # Wait for the Kubernetes installations to complete - local fail=0 - local job - for job in $(jobs -p); do - wait "${job}" || fail=$((fail + 1)) - done - if (( fail != 0 )); then - kube::log::error "Failed to start install Kubernetes on ${fail} out of ${NUM_NODES} nodess" - exit 1 - fi -} - -# -# Upload the Kubernetes tarballs to the master -# -function upload-server-tars { - vm_name=${1} - vm_ip=${2} - - run-ssh-cmd "${vm_ip}" "mkdir -p /home/kube/cache/kubernetes-install" - - local tar - for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do - local base_tar - base_tar=$(basename "${tar}") - kube::log::status "Uploading ${base_tar} to ${vm_name}..." - copy-file-to-vm "${vm_ip}" "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}" - done -} - -# -# Wait for the Kubernets healthz API to be responsive on the master -# -function wait-master-api { - local curl_creds="--insecure --user ${KUBE_USER}:${KUBE_PASSWORD}" - local curl_output="--fail --output /dev/null --silent" - local curl_net="--max-time 1" - - try-until-success "Waiting for Kubernetes API on ${KUBE_MASTER}" \ - "curl ${curl_creds} ${curl_output} ${curl_net} https://${KUBE_MASTER_IP}/healthz" -} - -# -# Wait for the Kubernetes healthz API to be responsive on each node -# -function wait-node-apis { - local curl_output="--fail --output /dev/null --silent" - local curl_net="--max-time 1" - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - try-until-success "Waiting for Kubernetes API on ${NODE_NAMES[${i}]}..." \ - "curl ${curl_output} ${curl_net} http://${KUBE_NODE_IP_ADDRESSES[${i}]}:10250/healthz" - done -} - -# -# Configure the nodes so the pods can communicate -# Each node will have a bridge named cbr0 for the NODE_IP_RANGES -# defined in config-default.sh. This finds the IP subnet (assigned -# by Kubernetes) to nodes and configures routes so they can communicate -# -# Also configure the master to be able to talk to the nodes. This is -# useful so that you can get to the UI from the master. -# -function setup-pod-routes { - local node - - KUBE_NODE_BRIDGE_NETWORK=() - for (( node=0; node<${#NODE_NAMES[@]}; node++)); do - - # This happens in two steps (wait for an address, wait for a non 172.x.x.x address) - # because it's both simpler and more clear what's happening. - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have an address" \ - 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"' - - try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \ - "Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have correct address" \ - 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+" | grep -v "^172."' - - run-ssh-cmd "${KUBE_NODE_IP_ADDRESSES[${node}]}" 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1' - KUBE_NODE_BRIDGE_NETWORK+=(${_OUTPUT}) - kube::log::status "cbr0 on ${NODE_NAMES[${node}]} is ${_OUTPUT}" - done - - local i - local j - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - kube::log::status "Configuring pod routes on ${NODE_NAMES[${i}]}..." - gen-add-route "${KUBE_NODE_BRIDGE_NETWORK[${i}]}" "${KUBE_NODE_IP_ADDRESSES[${i}]}" - run-script-remotely "${KUBE_MASTER_IP}" "${KUBE_TEMP}/add-route.sh" - - for (( j=0; j<${#NODE_NAMES[@]}; j++)); do - if [[ "${i}" != "${j}" ]]; then - gen-add-route "${KUBE_NODE_BRIDGE_NETWORK[${j}]}" "${KUBE_NODE_IP_ADDRESSES[${j}]}" - run-script-remotely "${KUBE_NODE_IP_ADDRESSES[${i}]}" "${KUBE_TEMP}/add-route.sh" - fi - done - done -} - -# -# Copy the certificate/key from the Kubernetes master -# These are used to create the kubeconfig file, which allows -# users to use kubectl easily -# -# We also set KUBE_CERT, KUBE_KEY, CA_CERT, and CONTEXT because they -# are needed by create-kubeconfig from common.sh to generate -# the kube config file. -# -function copy-kube-certs { - local cert="kubecfg.crt" - local key="kubecfg.key" - local ca="ca.crt" - local cert_dir="/srv/kubernetes" - - kube::log::status "Copying credentials from ${KUBE_MASTER}" - - # Set global environment variables: needed by create-kubeconfig - # in common.sh - export KUBE_CERT="${KUBE_TEMP}/${cert}" - export KUBE_KEY="${KUBE_TEMP}/${key}" - export CA_CERT="${KUBE_TEMP}/${ca}" - export CONTEXT="photon-${INSTANCE_PREFIX}" - - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${cert}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${key}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${ca}" - - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${cert}" "${KUBE_CERT}" - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${key}" "${KUBE_KEY}" - copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${ca}" "${CA_CERT}" - - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${cert}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${key}" - run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${ca}" -} - -# -# Copies a script to a VM and runs it -# Parameters: -# - IP of VM -# - Path to local file -# -function run-script-remotely { - local vm_ip=${1} - local local_file="${2}" - local base_file - local remote_file - - base_file=$(basename "${local_file}") - remote_file="/tmp/${base_file}" - - copy-file-to-vm "${vm_ip}" "${local_file}" "${remote_file}" - run-ssh-cmd "${vm_ip}" "chmod 700 ${remote_file}" - run-ssh-cmd "${vm_ip}" "nohup sudo ${remote_file} < /dev/null 1> ${remote_file}.out 2>&1 &" -} - -# -# Runs an command on a VM using ssh -# Parameters: -# - (optional) -i to ignore failure -# - IP address of the VM -# - Command to run -# Assumes environment variables: -# - VM_USER -# - SSH_OPTS -# -function run-ssh-cmd { - local ignore_failure="" - if [[ "${1}" = "-i" ]]; then - ignore_failure="-i" - shift - fi - - local vm_ip=${1} - shift - local cmd=${1} - - - run-cmd ${ignore_failure} "ssh ${SSH_OPTS} $VM_USER@${vm_ip} $1" -} - -# -# Uses scp to copy file to VM -# Parameters: -# - IP address of the VM -# - Path to local file -# - Path to remote file -# Assumes environment variables: -# - VM_USER -# - SSH_OPTS -# -function copy-file-to-vm { - local vm_ip=${1} - local local_file=${2} - local remote_file=${3} - - run-cmd "scp ${SSH_OPTS} ${local_file} ${VM_USER}@${vm_ip}:${remote_file}" -} - -function copy-file-from-vm { - local vm_ip=${1} - local remote_file=${2} - local local_file=${3} - - run-cmd "scp ${SSH_OPTS} ${VM_USER}@${vm_ip}:${remote_file} ${local_file}" -} - -# -# Run a command, print nice error output -# Used by copy-file-to-vm and run-ssh-cmd -# -function run-cmd { - local rc=0 - local ignore_failure="" - if [[ "${1}" = "-i" ]]; then - ignore_failure=${1} - shift - fi - - local cmd=$1 - local output - output=$(${cmd} 2>&1) || rc=$? - if [[ ${rc} -ne 0 ]]; then - if [[ -z "${ignore_failure}" ]]; then - kube::log::error "Failed to run command: ${cmd} Output:" - echo "${output}" - exit 1 - fi - fi - _OUTPUT=${output} - return ${rc} -} - -# -# After the initial VM setup, we use SSH with keys to access the VMs -# This requires an SSH agent, so we verify that it's running -# -function verify-ssh-prereqs { - kube::log::status "Validating SSH configuration..." - local rc - - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc=$? - # "Could not open a connection to your authentication agent." - if [[ "${rc}" -eq 2 ]]; then - # ssh agent wasn't running, so start it and ensure we stop it - eval "$(ssh-agent)" > /dev/null - trap-add "kill ${SSH_AGENT_PID}" EXIT - fi - - rc=0 - ssh-add -L 1> /dev/null 2> /dev/null || rc=$? - # "The agent has no identities." - if [[ "${rc}" -eq 1 ]]; then - # Try adding one of the default identities, with or without passphrase. - ssh-add || true - fi - - # Expect at least one identity to be available. - if ! ssh-add -L 1> /dev/null 2> /dev/null; then - kube::log::error "Could not find or add an SSH identity." - kube::log::error "Please start ssh-agent, add your identity, and retry." - exit 1 - fi -} - -# -# Verify that Photon Controller has been configured in the way we expect. Specifically -# - Have the flavors been created? -# - Has the image been uploaded? -# TODO: Check the tenant and project as well. -function verify-photon-config { - kube::log::status "Validating Photon configuration..." - - # We don't want silent failure: we check for failure - set +o pipefail - - verify-photon-flavors - verify-photon-image - verify-photon-tenant - - # Reset default set in common.sh - set -o pipefail -} - -# -# Verify that the VM and disk flavors have been created -# -function verify-photon-flavors { - local rc=0 - - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_MASTER_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_MASTER_FLAVOR}" - exit 1 - fi - - if [[ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]]; then - rc=0 - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_NODE_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_NODE_FLAVOR}" - exit 1 - fi - fi - - ${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_DISK_FLAVOR}$" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find disk flavor named ${PHOTON_DISK_FLAVOR}" - exit 1 - fi -} - -# -# Verify that we have the image we need, and it's not in error state or -# multiple copies -# -function verify-photon-image { - local rc - - rc=0 - ${PHOTON} image list | grep -q $'\t'"${PHOTON_IMAGE}"$'\t' > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - kube::log::error "ERROR: Cannot find image \"${PHOTON_IMAGE}\"" - exit 1 - fi - - rc=0 - ${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep ERROR > /dev/null 2>&1 || rc=$? - if [[ ${rc} -eq 0 ]]; then - echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate." - echo "Images in the ERROR state will be ignored." - fi - - rc=0 - num_images=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep -c READY) - if [[ "${num_images}" -gt 1 ]]; then - echo "ERROR: You have more than one READY ${PHOTON_IMAGE} image. Ensure there is only one" - exit 1 - fi -} - -function verify-photon-tenant { - local rc - - rc=0 - ${PHOTON} tenant list | grep -q $'\t'"${PHOTON_TENANT}" > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - echo "ERROR: Cannot find tenant \"${PHOTON_TENANT}\"" - exit 1 - fi - - ${PHOTON} project list --tenant "${PHOTON_TENANT}" | grep -q $'\t'"${PHOTON_PROJECT}"$'\t' > /dev/null 2>&1 || rc=$? - if [[ ${rc} -ne 0 ]]; then - echo "ERROR: Cannot find project \"${PHOTON_PROJECT}\"" - exit 1 - fi -} - -# -# Verifies that a given command is in the PATH -# -function verify-cmd-in-path { - cmd=${1} - which "${cmd}" >/dev/null || { - kube::log::error "Can't find ${cmd} in PATH, please install and retry." - exit 1 - } -} - -# -# Repeatedly try a command over ssh until it succeeds or until five minutes have passed -# The timeout isn't exact, since we assume the command runs instantaneously, and -# it doesn't. -# -function try-until-success-ssh { - local vm_ip=${1} - local cmd_description=${2} - local cmd=${3} - local timeout=600 - local sleep_time=5 - local max_attempts - - ((max_attempts=timeout/sleep_time)) - - kube::log::status "${cmd_description} for up to 10 minutes..." - local attempt=0 - while true; do - local rc=0 - run-ssh-cmd -i "${vm_ip}" "${cmd}" || rc=1 - if [[ ${rc} != 0 ]]; then - if (( attempt == max_attempts )); then - kube::log::error "Failed, cannot proceed: you may need to retry to log into the VM to debug" - exit 1 - fi - else - break - fi - attempt=$((attempt+1)) - sleep ${sleep_time} - done -} - -function try-until-success { - local cmd_description=${1} - local cmd=${2} - local timeout=600 - local sleep_time=5 - local max_attempts - - ((max_attempts=timeout/sleep_time)) - - kube::log::status "${cmd_description} for up to 10 minutes..." - local attempt=0 - while true; do - local rc=0 - run-cmd -i "${cmd}" || rc=1 - if [[ ${rc} != 0 ]]; then - if (( attempt == max_attempts )); then - kube::log::error "Failed, cannot proceed" - exit 1 - fi - else - break - fi - attempt=$((attempt+1)) - sleep ${sleep_time} - done -} - -# -# Sets up a trap handler -# -function trap-add { - local handler="${1}" - local signal="${2-EXIT}" - local cur - - cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")" - if [[ -n "${cur}" ]]; then - handler="${cur}; ${handler}" - fi - - # We want ${handler} to expand now, so tell shellcheck - # shellcheck disable=SC2064 - trap "${handler}" ${signal} -} From f03cdd3851bc923afe0da36547a8bb6dd12d18f3 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 10 Jan 2018 12:47:54 -0500 Subject: [PATCH 721/794] Fix cadvisor flag registration for cross build --- cmd/kubelet/app/options/BUILD | 57 +++++++++++--- cmd/kubelet/app/options/globalflags.go | 51 ------------- cmd/kubelet/app/options/globalflags_linux.go | 79 ++++++++++++++++++++ cmd/kubelet/app/options/globalflags_other.go | 26 +++++++ 4 files changed, 153 insertions(+), 60 deletions(-) create mode 100644 cmd/kubelet/app/options/globalflags_linux.go create mode 100644 cmd/kubelet/app/options/globalflags_other.go diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index 0d8a49e1e96..d08afb55bab 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -12,7 +12,42 @@ go_library( "container_runtime.go", "globalflags.go", "options.go", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "globalflags_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "globalflags_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "globalflags_other.go", + ], + "//conditions:default": [], + }), importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", deps = [ "//pkg/apis/componentconfig:go_default_library", @@ -29,19 +64,23 @@ go_library( "//pkg/util/taints:go_default_library", "//pkg/version/verflag:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/google/cadvisor/container/common:go_default_library", - "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", - "//vendor/github.com/google/cadvisor/container/docker:go_default_library", - "//vendor/github.com/google/cadvisor/container/raw:go_default_library", - "//vendor/github.com/google/cadvisor/machine:go_default_library", - "//vendor/github.com/google/cadvisor/manager:go_default_library", - "//vendor/github.com/google/cadvisor/storage:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/google/cadvisor/container/common:go_default_library", + "//vendor/github.com/google/cadvisor/container/containerd:go_default_library", + "//vendor/github.com/google/cadvisor/container/docker:go_default_library", + "//vendor/github.com/google/cadvisor/container/raw:go_default_library", + "//vendor/github.com/google/cadvisor/machine:go_default_library", + "//vendor/github.com/google/cadvisor/manager:go_default_library", + "//vendor/github.com/google/cadvisor/storage:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go index 85829930c0b..ad70a30374f 100644 --- a/cmd/kubelet/app/options/globalflags.go +++ b/cmd/kubelet/app/options/globalflags.go @@ -30,13 +30,6 @@ import ( // ensure libs have a chance to globally register their flags _ "github.com/golang/glog" - _ "github.com/google/cadvisor/container/common" - _ "github.com/google/cadvisor/container/containerd" - _ "github.com/google/cadvisor/container/docker" - _ "github.com/google/cadvisor/container/raw" - _ "github.com/google/cadvisor/machine" - _ "github.com/google/cadvisor/manager" - _ "github.com/google/cadvisor/storage" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" ) @@ -121,47 +114,3 @@ func addGlogFlags(fs *pflag.FlagSet) { fs.AddFlagSet(local) } - -// addCadvisorFlags adds flags from cadvisor -func addCadvisorFlags(fs *pflag.FlagSet) { - // lookup flags in global flag set and re-register the values with our flagset - global := flag.CommandLine - local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) - - // These flags were also implicit from cadvisor, but are actually used by something in the core repo: - // TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor - register(global, local, "docker_root") - // e2e node tests rely on this - register(global, local, "housekeeping_interval") - - // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: - const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." - - registerDeprecated(global, local, "application_metrics_count_limit", deprecated) - registerDeprecated(global, local, "boot_id_file", deprecated) - registerDeprecated(global, local, "container_hints", deprecated) - registerDeprecated(global, local, "containerd", deprecated) - registerDeprecated(global, local, "docker", deprecated) - registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated) - registerDeprecated(global, local, "docker_only", deprecated) - registerDeprecated(global, local, "docker-tls", deprecated) - registerDeprecated(global, local, "docker-tls-ca", deprecated) - registerDeprecated(global, local, "docker-tls-cert", deprecated) - registerDeprecated(global, local, "docker-tls-key", deprecated) - registerDeprecated(global, local, "enable_load_reader", deprecated) - registerDeprecated(global, local, "event_storage_age_limit", deprecated) - registerDeprecated(global, local, "event_storage_event_limit", deprecated) - registerDeprecated(global, local, "global_housekeeping_interval", deprecated) - registerDeprecated(global, local, "log_cadvisor_usage", deprecated) - registerDeprecated(global, local, "machine_id_file", deprecated) - registerDeprecated(global, local, "storage_driver_user", deprecated) - registerDeprecated(global, local, "storage_driver_password", deprecated) - registerDeprecated(global, local, "storage_driver_host", deprecated) - registerDeprecated(global, local, "storage_driver_db", deprecated) - registerDeprecated(global, local, "storage_driver_table", deprecated) - registerDeprecated(global, local, "storage_driver_secure", deprecated) - registerDeprecated(global, local, "storage_driver_buffer_duration", deprecated) - - // finally, add cadvisor flags to the provided flagset - fs.AddFlagSet(local) -} diff --git a/cmd/kubelet/app/options/globalflags_linux.go b/cmd/kubelet/app/options/globalflags_linux.go new file mode 100644 index 00000000000..99911921522 --- /dev/null +++ b/cmd/kubelet/app/options/globalflags_linux.go @@ -0,0 +1,79 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "os" + + "github.com/spf13/pflag" + + // ensure libs have a chance to globally register their flags + _ "github.com/google/cadvisor/container/common" + _ "github.com/google/cadvisor/container/containerd" + _ "github.com/google/cadvisor/container/docker" + _ "github.com/google/cadvisor/container/raw" + _ "github.com/google/cadvisor/machine" + _ "github.com/google/cadvisor/manager" + _ "github.com/google/cadvisor/storage" +) + +// addCadvisorFlags adds flags from cadvisor +func addCadvisorFlags(fs *pflag.FlagSet) { + // lookup flags in global flag set and re-register the values with our flagset + global := flag.CommandLine + local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + + // These flags were also implicit from cadvisor, but are actually used by something in the core repo: + // TODO(mtaufen): This one is stil used by our salt, but for heaven's sake it's even deprecated in cadvisor + register(global, local, "docker_root") + // e2e node tests rely on this + register(global, local, "housekeeping_interval") + + // These flags were implicit from cadvisor, and are mistakes that should be registered deprecated: + const deprecated = "This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed." + + registerDeprecated(global, local, "application_metrics_count_limit", deprecated) + registerDeprecated(global, local, "boot_id_file", deprecated) + registerDeprecated(global, local, "container_hints", deprecated) + registerDeprecated(global, local, "containerd", deprecated) + registerDeprecated(global, local, "docker", deprecated) + registerDeprecated(global, local, "docker_env_metadata_whitelist", deprecated) + registerDeprecated(global, local, "docker_only", deprecated) + registerDeprecated(global, local, "docker-tls", deprecated) + registerDeprecated(global, local, "docker-tls-ca", deprecated) + registerDeprecated(global, local, "docker-tls-cert", deprecated) + registerDeprecated(global, local, "docker-tls-key", deprecated) + registerDeprecated(global, local, "enable_load_reader", deprecated) + registerDeprecated(global, local, "event_storage_age_limit", deprecated) + registerDeprecated(global, local, "event_storage_event_limit", deprecated) + registerDeprecated(global, local, "global_housekeeping_interval", deprecated) + registerDeprecated(global, local, "log_cadvisor_usage", deprecated) + registerDeprecated(global, local, "machine_id_file", deprecated) + registerDeprecated(global, local, "storage_driver_user", deprecated) + registerDeprecated(global, local, "storage_driver_password", deprecated) + registerDeprecated(global, local, "storage_driver_host", deprecated) + registerDeprecated(global, local, "storage_driver_db", deprecated) + registerDeprecated(global, local, "storage_driver_table", deprecated) + registerDeprecated(global, local, "storage_driver_secure", deprecated) + registerDeprecated(global, local, "storage_driver_buffer_duration", deprecated) + + // finally, add cadvisor flags to the provided flagset + fs.AddFlagSet(local) +} diff --git a/cmd/kubelet/app/options/globalflags_other.go b/cmd/kubelet/app/options/globalflags_other.go new file mode 100644 index 00000000000..b4a04f9f40f --- /dev/null +++ b/cmd/kubelet/app/options/globalflags_other.go @@ -0,0 +1,26 @@ +// +build !linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" +) + +func addCadvisorFlags(fs *pflag.FlagSet) { +} From dca369dc8451178d80ca879ceda954f7ccb39ba5 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Wed, 10 Jan 2018 10:03:22 -0800 Subject: [PATCH 722/794] remove support for container-linux in gce kube-up --- build/lib/release.sh | 1 - cluster/common.sh | 22 +- cluster/gce/BUILD | 2 - cluster/gce/config-default.sh | 6 - cluster/gce/config-test.sh | 7 - cluster/gce/container-linux/OWNERS | 8 - cluster/gce/container-linux/README.md | 8 - .../gce/container-linux/configure-helper.sh | 1606 ----------------- cluster/gce/container-linux/configure.sh | 182 -- cluster/gce/container-linux/health-monitor.sh | 83 - cluster/gce/container-linux/helper.sh | 19 - cluster/gce/container-linux/master-helper.sh | 139 -- cluster/gce/container-linux/master.yaml | 57 - cluster/gce/container-linux/node-helper.sh | 35 - cluster/gce/container-linux/node.yaml | 57 - cluster/gce/util.sh | 4 +- 16 files changed, 7 insertions(+), 2229 deletions(-) delete mode 100644 cluster/gce/container-linux/OWNERS delete mode 100644 cluster/gce/container-linux/README.md delete mode 100755 cluster/gce/container-linux/configure-helper.sh delete mode 100755 cluster/gce/container-linux/configure.sh delete mode 100644 cluster/gce/container-linux/health-monitor.sh delete mode 100755 cluster/gce/container-linux/helper.sh delete mode 100755 cluster/gce/container-linux/master-helper.sh delete mode 100644 cluster/gce/container-linux/master.yaml delete mode 100755 cluster/gce/container-linux/node-helper.sh delete mode 100644 cluster/gce/container-linux/node.yaml diff --git a/build/lib/release.sh b/build/lib/release.sh index 870451601f6..c9932ea29d2 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -414,7 +414,6 @@ function kube::release::package_kube_manifests_tarball() { cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${gci_dst_dir}/" cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${gci_dst_dir}/gci-configure-helper.sh" cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${gci_dst_dir}/health-monitor.sh" - cp "${KUBE_ROOT}/cluster/gce/container-linux/configure-helper.sh" "${gci_dst_dir}/container-linux-configure-helper.sh" cp -r "${salt_dir}/kube-admission-controls/limit-range" "${gci_dst_dir}" local objects objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) diff --git a/cluster/common.sh b/cluster/common.sh index a2b947f1748..2aa73622a64 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -436,8 +436,8 @@ function find-release-tars() { # This tarball is used by GCI, Ubuntu Trusty, and Container Linux. KUBE_MANIFESTS_TAR= - if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "container-linux" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ - [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "container-linux" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then + if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ + [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" ]] ; then KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz) fi } @@ -576,9 +576,7 @@ function build-kube-env { local server_binary_tar_url=$SERVER_BINARY_TAR_URL local salt_tar_url=$SALT_TAR_URL local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}" - if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "container-linux" ]] || \ - [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "container-linux" ]] || \ - [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ [[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then # TODO: Support fallback .tar.gz settings on Container Linux server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}") @@ -696,8 +694,8 @@ EOF TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD}) EOF fi - if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "container-linux") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ - [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "container-linux") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then + if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \ + [[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then cat >>$file <>$file <>$file </dev/null; then - useradd -s /sbin/nologin -d /var/etcd etcd - fi - chown -R etcd "${mount_point}/var/etcd" - chgrp -R etcd "${mount_point}/var/etcd" -} - -# replace_prefixed_line ensures: -# 1. the specified file exists -# 2. existing lines with the specified ${prefix} are removed -# 3. a new line with the specified ${prefix}${suffix} is appended -function replace_prefixed_line { - local -r file="${1:-}" - local -r prefix="${2:-}" - local -r suffix="${3:-}" - - touch "${file}" - awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${file}.filtered" && mv "${file}.filtered" "${file}" - echo "${prefix}${suffix}" >> "${file}" -} - -# After the first boot and on upgrade, these files exist on the master-pd -# and should never be touched again (except perhaps an additional service -# account, see NB below.) -function create-master-auth { - echo "Creating master auth files" - local -r auth_dir="/etc/srv/kubernetes" - if [[ ! -e "${auth_dir}/ca.crt" && ! -z "${CA_CERT:-}" && ! -z "${MASTER_CERT:-}" && ! -z "${MASTER_KEY:-}" ]]; then - echo "${CA_CERT}" | base64 --decode > "${auth_dir}/ca.crt" - echo "${MASTER_CERT}" | base64 --decode > "${auth_dir}/server.cert" - echo "${MASTER_KEY}" | base64 --decode > "${auth_dir}/server.key" - fi - local -r basic_auth_csv="${auth_dir}/basic_auth.csv" - if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then - replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters" - fi - local -r known_tokens_csv="${auth_dir}/known_tokens.csv" - if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters" - fi - if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager" - fi - if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler" - fi - if [[ -n "${KUBELET_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "kubelet,uid:kubelet,system:nodes" - fi - if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then - replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy" - fi - local use_cloud_config="false" - cat </etc/gce.conf -[global] -EOF - if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then - cat <>/etc/gce.conf -api-endpoint = ${GCE_API_ENDPOINT} -EOF - fi - if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -token-url = ${TOKEN_URL} -token-body = ${TOKEN_BODY} -EOF - fi - if [[ -n "${PROJECT_ID:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -project-id = ${PROJECT_ID} -EOF - fi - if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -network-project-id = ${NETWORK_PROJECT_ID} -EOF - fi - if [[ -n "${NODE_NETWORK:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -network-name = ${NODE_NETWORK} -EOF - fi - if [[ -n "${NODE_SUBNETWORK:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -subnetwork-name = ${NODE_SUBNETWORK} -EOF - fi - if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then - use_cloud_config="true" - if [[ -n "${NODE_TAGS:-}" ]]; then - # split NODE_TAGS into an array by comma. - IFS=',' read -r -a node_tags <<< ${NODE_TAGS} - else - local -r node_tags="${NODE_INSTANCE_PREFIX}" - fi - cat <>/etc/gce.conf -node-instance-prefix = ${NODE_INSTANCE_PREFIX} -EOF - for tag in ${node_tags[@]}; do - cat <>/etc/gce.conf -node-tags = ${tag} -EOF - done - fi - if [[ -n "${MULTIZONE:-}" ]]; then - use_cloud_config="true" - cat <>/etc/gce.conf -multizone = ${MULTIZONE} -EOF - fi - if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then - use_cloud_config="true" - # split GCE_ALPHA_FEATURES into an array by comma. - IFS=',' read -r -a alpha_features <<< ${GCE_ALPHA_FEATURES} - for feature in ${alpha_features[@]}; do - cat <>/etc/gce.conf -alpha-features = ${feature} -EOF - done - fi - if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then - use_cloud_config="true" - cat <> /etc/gce.conf -secondary-range-name = ${SECONDARY_RANGE_NAME} -EOF - fi - if [[ "${use_cloud_config}" != "true" ]]; then - rm -f /etc/gce.conf - fi - - if [[ -n "${GCP_AUTHN_URL:-}" ]]; then - cat </etc/gcp_authn.config -clusters: - - name: gcp-authentication-server - cluster: - server: ${GCP_AUTHN_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-authentication-server - user: kube-apiserver - name: webhook -EOF - fi - - if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then - cat </etc/gcp_authz.config -clusters: - - name: gcp-authorization-server - cluster: - server: ${GCP_AUTHZ_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-authorization-server - user: kube-apiserver - name: webhook -EOF - fi - -if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then - # This is the config file for the image review webhook. - cat </etc/gcp_image_review.config -clusters: - - name: gcp-image-review-server - cluster: - server: ${GCP_IMAGE_VERIFICATION_URL} -users: - - name: kube-apiserver - user: - auth-provider: - name: gcp -current-context: webhook -contexts: -- context: - cluster: gcp-image-review-server - user: kube-apiserver - name: webhook -EOF - # This is the config for the image review admission controller. - cat </etc/admission_controller.config -imagePolicy: - kubeConfigFile: /etc/gcp_image_review.config - allowTTL: 30 - denyTTL: 30 - retryBackoff: 500 - defaultAllow: true -EOF - fi -} - -# Arg 1: the address of the API server -function create-kubelet-kubeconfig() { - local apiserver_address="${1}" - if [[ -z "${apiserver_address}" ]]; then - echo "Must provide API server address to create Kubelet kubeconfig file!" - exit 1 - fi - echo "Creating kubelet kubeconfig file" - if [[ -z "${KUBELET_CA_CERT:-}" ]]; then - KUBELET_CA_CERT="${CA_CERT}" - fi - cat </var/lib/kubelet/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kubelet - user: - client-certificate-data: ${KUBELET_CERT} - client-key-data: ${KUBELET_KEY} -clusters: -- name: local - cluster: - server: ${apiserver_address} - certificate-authority-data: ${KUBELET_CA_CERT} -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -EOF -} - -# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY -# to generate a kubeconfig file for the kubelet to securely connect to the apiserver. -# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node -# should register to the apiserver. -function create-master-kubelet-auth { - # Only configure the kubelet on the master if the required variables are - # set in the environment. - if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then - REGISTER_MASTER_KUBELET="true" - create-kubelet-kubeconfig "https://${KUBELET_APISERVER}" - fi -} - -function create-kubeproxy-user-kubeconfig { - echo "Creating kube-proxy user kubeconfig file" - cat </var/lib/kube-proxy/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -clusters: -- name: local - cluster: - certificate-authority-data: ${CA_CERT} -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -EOF -} - -function create-kubecontrollermanager-kubeconfig { - echo "Creating kube-controller-manager kubeconfig file" - mkdir -p /etc/srv/kubernetes/kube-controller-manager - cat </etc/srv/kubernetes/kube-controller-manager/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-controller-manager - user: - token: ${KUBE_CONTROLLER_MANAGER_TOKEN} -clusters: -- name: local - cluster: - insecure-skip-tls-verify: true - server: https://localhost:443 -contexts: -- context: - cluster: local - user: kube-controller-manager - name: service-account-context -current-context: service-account-context -EOF -} - -function create-kubescheduler-kubeconfig { - echo "Creating kube-scheduler kubeconfig file" - mkdir -p /etc/srv/kubernetes/kube-scheduler - cat </etc/srv/kubernetes/kube-scheduler/kubeconfig -apiVersion: v1 -kind: Config -users: -- name: kube-scheduler - user: - token: ${KUBE_SCHEDULER_TOKEN} -clusters: -- name: local - cluster: - insecure-skip-tls-verify: true - server: https://localhost:443 -contexts: -- context: - cluster: local - user: kube-scheduler - name: kube-scheduler -current-context: kube-scheduler -EOF -} - -function create-master-etcd-auth { - if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - local -r auth_dir="/etc/srv/kubernetes" - echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt" - echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key" - echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt" - fi -} - -function configure-docker-daemon { - echo "Configuring the Docker daemon" - local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false" - if [[ "${TEST_CLUSTER:-}" == "true" ]]; then - docker_opts+=" --log-level=debug" - else - docker_opts+=" --log-level=warn" - fi - local use_net_plugin="true" - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then - # set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range - docker_opts+=" --bip=169.254.123.1/24" - else - use_net_plugin="false" - docker_opts+=" --bridge=cbr0" - fi - - # Decide whether to enable a docker registry mirror. This is taken from - # the "kube-env" metadata value. - if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then - echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}" - docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}" - fi - - mkdir -p /etc/systemd/system/docker.service.d/ - local kubernetes_conf_dropin="/etc/systemd/system/docker.service.d/00_kubelet.conf" - cat > "${kubernetes_conf_dropin}" < "${kubelet_env_file}" - - # Write the systemd service file for kubelet. - cat </etc/systemd/system/kubelet.service -[Unit] -Description=Kubernetes kubelet -Requires=network-online.target -After=network-online.target - -[Service] -Restart=always -RestartSec=10 -EnvironmentFile=${kubelet_env_file} -ExecStart=${kubelet_bin} \$KUBELET_OPTS - -[Install] -WantedBy=multi-user.target -EOF - - # Flush iptables nat table - iptables -t nat -F || true - - systemctl start kubelet.service -} - -# Create the log file and set its properties. -# -# $1 is the file to create. -function prepare-log-file { - touch $1 - chmod 644 $1 - chown root:root $1 -} - -# Prepares parameters for kube-proxy manifest. -# $1 source path of kube-proxy manifest. -function prepare-kube-proxy-manifest-variables { - local -r src_file=$1; - - remove-salt-config-comments "${src_file}" - - local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" - local kube_docker_registry="gcr.io/google_containers" - if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then - kube_docker_registry=${KUBE_DOCKER_REGISTRY} - fi - local -r kube_proxy_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-proxy.docker_tag) - local api_servers="--master=https://${KUBERNETES_MASTER_NAME}" - local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}" - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s" - if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then - params+=" ${KUBEPROXY_TEST_ARGS}" - fi - local container_env="" - local kube_cache_mutation_detector_env_name="" - local kube_cache_mutation_detector_env_value="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="env:" - kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR" - kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" - fi - local pod_priority="" - if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then - pod_priority="priorityClassName: system-node-critical" - fi - sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file} - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file} - sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file} - sed -i -e "s@{{params}}@${params}@g" ${src_file} - sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file} - sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file} - sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file} - sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file} - sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file} - sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file} - sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file} - if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then - sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file} - fi - if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then - # Work arounds for https://github.com/coreos/rkt/issues/3245 and https://github.com/coreos/rkt/issues/3264 - # This is an incredibly hacky workaround. It's fragile too. If the kube-proxy command changes too much, this breaks - # TODO, this could be done much better in many other places, such as an - # init script within the container, or even within kube-proxy's code. - local extra_workaround_cmd="ln -sf /proc/self/mounts /etc/mtab; \ - mount -o remount,rw /proc; \ - mount -o remount,rw /proc/sys; \ - mount -o remount,rw /sys; " - sed -i -e "s@-\\s\\+kube-proxy@- ${extra_workaround_cmd} kube-proxy@g" "${src_file}" - fi -} - -# Starts kube-proxy static pod. -function start-kube-proxy { - echo "Start kube-proxy static pod" - prepare-log-file /var/log/kube-proxy.log - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest" - prepare-kube-proxy-manifest-variables "$src_file" - - cp "${src_file}" /etc/kubernetes/manifests -} - -# Replaces the variables in the etcd manifest file with the real values, and then -# copy the file to the manifest dir -# $1: value for variable 'suffix' -# $2: value for variable 'port' -# $3: value for variable 'server_port' -# $4: value for variable 'cpulimit' -# $5: pod name, which should be either etcd or etcd-events -function prepare-etcd-manifest { - local host_name=${ETCD_HOSTNAME:-$(hostname -s)} - local etcd_cluster="" - local cluster_state="new" - local etcd_protocol="http" - local etcd_creds="" - - if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth " - etcd_protocol="https" - fi - - for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do - etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3" - if [[ -n "${etcd_cluster}" ]]; then - etcd_cluster+="," - cluster_state="existing" - fi - etcd_cluster+="${etcd_host}" - done - - local -r temp_file="/tmp/$5" - cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}" - remove-salt-config-comments "${temp_file}" - sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}" - sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}" - sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}" - sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}" - sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}" - sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}" - sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}" - sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}" - # Get default storage backend from manifest file. - local -r default_storage_backend=$(cat "${temp_file}" | \ - grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \ - sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g") - if [[ -n "${STORAGE_BACKEND:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then - sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}" - else - sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}" - fi - sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}" - if [[ -n "${ETCD_IMAGE:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - - sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}" - sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}" - if [[ -n "${ETCD_VERSION:-}" ]]; then - sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}" - else - sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}" - fi - # Replace the volume host path. - sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}" - mv "${temp_file}" /etc/kubernetes/manifests -} - -function start-etcd-empty-dir-cleanup-pod { - cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests" -} - -# Starts etcd server pod (and etcd-events pod if needed). -# More specifically, it prepares dirs and files, sets the variable value -# in the manifests, and copies them to /etc/kubernetes/manifests. -function start-etcd-servers { - echo "Start etcd pods" - if [[ -d /etc/etcd ]]; then - rm -rf /etc/etcd - fi - if [[ -e /etc/default/etcd ]]; then - rm -f /etc/default/etcd - fi - if [[ -e /etc/systemd/system/etcd.service ]]; then - rm -f /etc/systemd/system/etcd.service - fi - if [[ -e /etc/init.d/etcd ]]; then - rm -f /etc/init.d/etcd - fi - prepare-log-file /var/log/etcd.log - prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest" - - prepare-log-file /var/log/etcd-events.log - prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest" -} - -# Calculates the following variables based on env variables, which will be used -# by the manifests of several kube-master components. -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function compute-master-manifest-variables { - CLOUD_CONFIG_OPT="" - CLOUD_CONFIG_VOLUME="" - CLOUD_CONFIG_MOUNT="" - if [[ -f /etc/gce.conf ]]; then - CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf" - CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}}," - CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," - fi - DOCKER_REGISTRY="gcr.io/google_containers" - if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then - DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" - fi -} - -# A helper function for removing salt configuration and comments from a file. -# This is mainly for preparing a manifest file. -# -# $1: Full path of the file to manipulate -function remove-salt-config-comments { - # Remove salt configuration. - sed -i "/^[ |\t]*{[#|%]/d" $1 - # Remove comments. - sed -i "/^[ |\t]*#/d" $1 -} - -# Starts kubernetes apiserver. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function start-kube-apiserver { - echo "Start kubernetes api-server" - prepare-log-file /var/log/kube-apiserver.log - prepare-log-file /var/log/kube-apiserver-audit.log - - # Calculate variables and assemble the command line. - local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}" - params+=" --address=127.0.0.1" - params+=" --allow-privileged=true" - params+=" --cloud-provider=gce" - params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt" - params+=" --etcd-servers=http://127.0.0.1:2379" - params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002" - params+=" --secure-port=443" - params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" - params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" - params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" - params+=" --enable-aggregator-routing=true" - if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then - params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" - fi - if [[ -n "${STORAGE_BACKEND:-}" ]]; then - params+=" --storage-backend=${STORAGE_BACKEND}" - fi - if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then - params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}" - fi - if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then - params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s" - fi - if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then - params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" - fi - if [[ -n "${NUM_NODES:-}" ]]; then - # If the cluster is large, increase max-requests-inflight limit in apiserver. - if [[ "${NUM_NODES}" -ge 1000 ]]; then - params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500" - fi - # Set amount of memory available for apiserver based on number of nodes. - # TODO: Once we start setting proper requests and limits for apiserver - # we should reuse the same logic here instead of current heuristic. - params+=" --target-ram-mb=$((${NUM_NODES} * 60))" - fi - if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then - params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" - fi - if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then - params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}" - fi - - if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then - # We currently only support enabling with a fixed path and with built-in log - # rotation "disabled" (large value) so it behaves like kube-apiserver.log. - # External log rotation should be set up the same as for kube-apiserver.log. - params+=" --audit-log-path=/var/log/kube-apiserver-audit.log" - params+=" --audit-log-maxage=0" - params+=" --audit-log-maxbackup=0" - # Lumberjack doesn't offer any way to disable size-based rotation. It also - # has an in-memory counter that doesn't notice if you truncate the file. - # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log - # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver - # never restarts. Please manually restart apiserver before this time. - params+=" --audit-log-maxsize=2000000000" - fi - - if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then - params+=" --enable-logs-handler=false" - fi - - local admission_controller_config_mount="" - local admission_controller_config_volume="" - local image_policy_webhook_config_mount="" - local image_policy_webhook_config_volume="" - if [[ -n "${ADMISSION_CONTROL:-}" ]]; then - params+=" --admission-control=${ADMISSION_CONTROL}" - if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then - params+=" --admission-control-config-file=/etc/admission_controller.config" - # Mount the file to configure admission controllers if ImagePolicyWebhook is set. - admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false}," - admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}}," - # Mount the file to configure the ImagePolicyWebhook's webhook. - image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false}," - image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}}," - fi - fi - - if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then - params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}" - fi - if [[ -n "${RUNTIME_CONFIG:-}" ]]; then - params+=" --runtime-config=${RUNTIME_CONFIG}" - fi - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then - local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") - if [[ -n "${PROXY_SSH_USER:-}" ]]; then - params+=" --advertise-address=${vm_external_ip}" - params+=" --ssh-user=${PROXY_SSH_USER}" - params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile" - else - params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", - fi - elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then - params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}" - fi - - local webhook_authn_config_mount="" - local webhook_authn_config_volume="" - if [[ -n "${GCP_AUTHN_URL:-}" ]]; then - params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config" - webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false}," - webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}}," - fi - - local authorization_mode="RBAC" - local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" - - # Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false - if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then - echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this." - # Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions) - if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then - local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl" - remove-salt-config-comments "${abac_policy_json}" - if [[ -n "${KUBE_USER:-}" ]]; then - sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}" - else - sed -i -e "/{{kube_user}}/d" "${abac_policy_json}" - fi - cp "${abac_policy_json}" /etc/srv/kubernetes/ - fi - - params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl" - authorization_mode+=",ABAC" - fi - - local webhook_config_mount="" - local webhook_config_volume="" - if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then - authorization_mode+=",Webhook" - params+=" --authorization-webhook-config-file=/etc/gcp_authz.config" - webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false}," - webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}}," - fi - params+=" --authorization-mode=${authorization_mode}" - - local container_env="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" - fi - if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then - if [[ -n "${container_env}" ]]; then - container_env="${container_env}, " - fi - container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"" - fi - if [[ -n "${container_env}" ]]; then - container_env="\"env\":[{${container_env}}]," - fi - - src_file="${src_dir}/kube-apiserver.manifest" - remove-salt-config-comments "${src_file}" - # Evaluate variables. - local -r kube_apiserver_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-apiserver.docker_tag) - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}" - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}" - sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" - sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}" - sed -i -e "s@{{secure_port}}@443@g" "${src_file}" - sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" - sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}" - sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}" - sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}" - sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}" - sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}" - sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}" - sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}" - sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts kubernetes controller manager. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -# DOCKER_REGISTRY -function start-kube-controller-manager { - echo "Start kubernetes controller-manager" - create-kubecontrollermanager-kubeconfig - prepare-log-file /var/log/kube-controller-manager.log - # Calculate variables and assemble the command line. - local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}" - params+=" --use-service-account-credentials" - params+=" --cloud-provider=gce" - params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig" - params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt" - params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key" - if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then - params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" - fi - if [[ -n "${INSTANCE_PREFIX:-}" ]]; then - params+=" --cluster-name=${INSTANCE_PREFIX}" - fi - if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then - params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" - fi - if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then - params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" - fi - if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then - params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}" - fi - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then - params+=" --allocate-node-cidrs=true" - elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then - params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" - fi - if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then - params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}" - fi - if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then - params+=" --cidr-allocator-type=CloudAllocator" - params+=" --configure-cloud-routes=false" - fi - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - local -r kube_rc_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-controller-manager.docker_tag) - local container_env="" - if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," - fi - - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest" - remove-salt-config-comments "${src_file}" - # Evaluate variables. - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" - sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts kubernetes scheduler. -# It prepares the log file, loads the docker image, calculates variables, sets them -# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. -# -# Assumed vars (which are calculated in compute-master-manifest-variables) -# DOCKER_REGISTRY -function start-kube-scheduler { - echo "Start kubernetes scheduler" - create-kubescheduler-kubeconfig - prepare-log-file /var/log/kube-scheduler.log - - # Calculate variables and set them in the manifest. - params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}" - params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig" - if [[ -n "${FEATURE_GATES:-}" ]]; then - params+=" --feature-gates=${FEATURE_GATES}" - fi - if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then - params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}" - fi - local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag") - - # Remove salt comments and replace variables with values. - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest" - remove-salt-config-comments "${src_file}" - - sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" - sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}" - cp "${src_file}" /etc/kubernetes/manifests -} - -# Starts cluster autoscaler. -# Assumed vars (which are calculated in function compute-master-manifest-variables) -# CLOUD_CONFIG_OPT -# CLOUD_CONFIG_VOLUME -# CLOUD_CONFIG_MOUNT -function start-cluster-autoscaler { - if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then - echo "Start kubernetes cluster autoscaler" - prepare-log-file /var/log/cluster-autoscaler.log - - # Remove salt comments and replace variables with values - local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest" - remove-salt-config-comments "${src_file}" - - local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}" - sed -i -e "s@{{params}}@${params}@g" "${src_file}" - sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" - sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" - sed -i -e "s@{%.*%}@@g" "${src_file}" - - cp "${src_file}" /etc/kubernetes/manifests - fi -} - -# A helper function for copying addon manifests and set dir/files -# permissions. -# -# $1: addon category under /etc/kubernetes -# $2: manifest source dir -function setup-addon-manifests { - local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2" - local -r dst_dir="/etc/kubernetes/$1/$2" - if [[ ! -d "${dst_dir}" ]]; then - mkdir -p "${dst_dir}" - fi - local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.yaml "${dst_dir}" - fi - files=$(find "${src_dir}" -maxdepth 1 -name "*.json") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.json "${dst_dir}" - fi - files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in") - if [[ -n "${files}" ]]; then - cp "${src_dir}/"*.yaml.in "${dst_dir}" - fi - chown -R root:root "${dst_dir}" - chmod 755 "${dst_dir}" - chmod 644 "${dst_dir}"/* -} - -# Updates parameters in yaml file for prometheus-to-sd configuration, or -# removes component if it is disabled. -function update-prometheus-to-sd-parameters { - if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then - sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1" - sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1" - else - # Removes all lines between two patterns (throws away prometheus-to-sd) - sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1" - fi -} - -# Sets up the manifests of coreDNS for k8s addons. -function setup-coredns-manifest { - local -r coredns_file="${dst_dir}/dns/coredns.yaml" - mv "${dst_dir}/dns/coredns.yaml.in" "${coredns_file}" - # Replace the salt configurations with variable values. - sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}" - sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}" - sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}" -} - -# Sets up the manifests of kube-dns for k8s addons. -function setup-kube-dns-manifest { - local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml" - mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}" - if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then - # Replace with custom GKE kube-dns deployment. - cat > "${kubedns_file}" < "$src_dir/kube-proxy/kube-proxy-ds.yaml" < /etc/systemd/system/rkt-api.service </dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - -# KUBERNETES_CONTAINER_RUNTIME is set by the `kube-env` file, but it's a bit of a mouthful -if [[ "${CONTAINER_RUNTIME:-}" == "" ]]; then - CONTAINER_RUNTIME="${KUBERNETES_CONTAINER_RUNTIME:-docker}" -fi - -create-dirs -ensure-local-ssds -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - mount-master-pd - create-master-auth - create-master-kubelet-auth - create-master-etcd-auth -else - create-kubelet-kubeconfig "https://${KUBERNETES_MASTER_NAME}" - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - create-kubeproxy-user-kubeconfig - fi -fi - -if [[ "${KUBERNETES_CONTAINER_RUNTIME:-}" == "rkt" ]]; then - systemctl stop docker - systemctl disable docker - setup-rkt - install-docker2aci - create-kube-controller-manager-dirs -else - configure-docker-daemon -fi - -load-docker-images -start-kubelet - -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - compute-master-manifest-variables - start-etcd-servers - start-etcd-empty-dir-cleanup-pod - start-kube-apiserver - start-kube-controller-manager - start-kube-scheduler - start-kube-addons - start-cluster-autoscaler - start-lb-controller - start-rescheduler -else - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - start-kube-proxy - fi - # Kube-registry-proxy. - if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then - start-kube-registry-proxy - fi - if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then - start-image-puller - fi -fi -echo "Done for the configuration for kubernetes" diff --git a/cluster/gce/container-linux/configure.sh b/cluster/gce/container-linux/configure.sh deleted file mode 100755 index 16dcf27a044..00000000000 --- a/cluster/gce/container-linux/configure.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Use --retry-connrefused opt only if it's supported by curl. -CURL_RETRY_CONNREFUSED="" -if curl --help | grep -q -- '--retry-connrefused'; then - CURL_RETRY_CONNREFUSED='--retry-connrefused' -fi - -function download-kube-env { - # Fetch kube-env from GCE metadata server. - local -r tmp_kube_env="/tmp/kube-env.yaml" - curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \ - -H "X-Google-Metadata-Request: True" \ - -o "${tmp_kube_env}" \ - http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env - # Convert the yaml format file into a shell-style file. - sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env" - rm -f "${tmp_kube_env}" -} - -function validate-hash { - local -r file="$1" - local -r expected="$2" - - actual=$(sha1sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" - return 1 - fi -} - - -# Retry a download until we get it. Takes a hash and a set of URLs. -# -# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. -# $2+ are the URLs to download. -function download-or-bust { - local -r hash="$1" - shift 1 - - local -r urls=( $* ) - while true; do - for url in "${urls[@]}"; do - local file="${url##*/}" - rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then - echo "== Failed to download ${url}. Retrying. ==" - elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - else - if [[ -n "${hash}" ]]; then - echo "== Downloaded ${url} (SHA1 = ${hash}) ==" - else - echo "== Downloaded ${url} ==" - fi - return - fi - done - done -} - -function split-commas { - echo $1 | tr "," "\n" -} - -# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them, -# and places them into suitable directories. Files are placed in /opt/kubernetes. -function install-kube-binary-config { - cd "${KUBE_HOME}" - local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") ) - local -r server_binary_tar="${server_binary_tar_urls[0]##*/}" - if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then - local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}" - else - echo "Downloading binary release sha1 (not found in env)" - download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}" - local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1") - fi - echo "Downloading binary release tar" - download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}" - tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite - # Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files. - src_dir="${KUBE_HOME}/kubernetes/server/bin" - dst_dir="${KUBE_HOME}/kube-docker-files" - mkdir -p "${dst_dir}" - cp "${src_dir}/"*.docker_tag "${dst_dir}" - if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then - cp "${src_dir}/kube-proxy.tar" "${dst_dir}" - else - cp "${src_dir}/kube-apiserver.tar" "${dst_dir}" - cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}" - cp "${src_dir}/kube-scheduler.tar" "${dst_dir}" - cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}" - fi - local -r kube_bin="${KUBE_HOME}/bin" - mv "${src_dir}/kubelet" "${kube_bin}" - mv "${src_dir}/kubectl" "${kube_bin}" - - if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \ - [[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then - local -r cni_version="v0.6.0" - local -r cni_tar="cni-plugins-amd64-${cni_version}.tgz" - local -r cni_sha1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f" - download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}" - local -r cni_dir="${KUBE_HOME}/cni" - mkdir -p "${cni_dir}/bin" - tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite - mv "${cni_dir}/bin"/* "${kube_bin}" - rmdir "${cni_dir}/bin" - rm -f "${KUBE_HOME}/${cni_tar}" - fi - - mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}" - mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}" - - # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. - dst_dir="${KUBE_HOME}/kube-manifests" - mkdir -p "${dst_dir}" - local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") ) - local -r manifests_tar="${manifests_tar_urls[0]##*/}" - if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then - local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}" - else - echo "Downloading k8s manifests sha1 (not found in env)" - download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}" - local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1") - fi - echo "Downloading k8s manifests tar" - download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" - tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite - local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" - if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then - find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \ - xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" - find "${dst_dir}" -name \*.manifest -or -name \*.json | \ - xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" - fi - cp "${dst_dir}/kubernetes/gci-trusty/container-linux-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh" - chmod -R 755 "${kube_bin}" - - # Clean up. - rm -rf "${KUBE_HOME}/kubernetes" - rm -f "${KUBE_HOME}/${server_binary_tar}" - rm -f "${KUBE_HOME}/${server_binary_tar}.sha1" - rm -f "${KUBE_HOME}/${manifests_tar}" - rm -f "${KUBE_HOME}/${manifests_tar}.sha1" -} - -######### Main Function ########## -echo "Start to install kubernetes files" -KUBE_HOME="/opt/kubernetes" -mkdir -p "${KUBE_HOME}" -download-kube-env -source "${KUBE_HOME}/kube-env" -install-kube-binary-config -echo "Done for installing kubernetes files" - -# On Container Linux, the hosts is in /usr/share/baselayout/hosts -# So we need to manually populdate the hosts file here on gce. -echo "127.0.0.1 localhost" >> /etc/hosts -echo "::1 localhost" >> /etc/hosts - -echo "Configuring hostname" -hostnamectl set-hostname $(hostname | cut -f1 -d.) diff --git a/cluster/gce/container-linux/health-monitor.sh b/cluster/gce/container-linux/health-monitor.sh deleted file mode 100644 index 6e8f1b03b12..00000000000 --- a/cluster/gce/container-linux/health-monitor.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is for master and node instance health monitoring, which is -# packed in kube-manifest tarball. It is executed through a systemd service -# in cluster/gce/gci/.yaml. The env variables come from an env -# file provided by the systemd service. - -set -o nounset -set -o pipefail - -# We simply kill the process when there is a failure. Another systemd service will -# automatically restart the process. -function docker_monitoring { - while [ 1 ]; do - if ! timeout 60 docker ps > /dev/null; then - echo "Docker daemon failed!" - pkill docker - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 30 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - -function kubelet_monitoring { - echo "Wait for 2 minutes for kubelet to be fuctional" - # TODO(andyzheng0831): replace it with a more reliable method if possible. - sleep 120 - local -r max_seconds=10 - local output="" - while [ 1 ]; do - if ! output=$(curl --insecure -m "${max_seconds}" -f -s -S https://127.0.0.1:${KUBELET_PORT:-10250}/healthz 2>&1); then - # Print the response and/or errors. - echo $output - echo "Kubelet is unhealthy!" - pkill kubelet - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 60 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - - -############## Main Function ################ -if [[ "$#" -ne 1 ]]; then - echo "Usage: health-monitor.sh " - exit 1 -fi - -KUBE_ENV="/home/kubernetes/kube-env" -if [[ ! -e "${KUBE_ENV}" ]]; then - echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring" - exit 1 -fi - -SLEEP_SECONDS=10 -component=$1 -echo "Start kubernetes health monitoring for ${component}" -source "${KUBE_ENV}" -if [[ "${component}" == "docker" ]]; then - docker_monitoring -elif [[ "${component}" == "kubelet" ]]; then - kubelet_monitoring -else - echo "Health monitoring for component "${component}" is not supported!" -fi diff --git a/cluster/gce/container-linux/helper.sh b/cluster/gce/container-linux/helper.sh deleted file mode 100755 index ddd45791266..00000000000 --- a/cluster/gce/container-linux/helper.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constants for the Container Linux distro. - -# This file intentionally left blank diff --git a/cluster/gce/container-linux/master-helper.sh b/cluster/gce/container-linux/master-helper.sh deleted file mode 100755 index 3cd3ee3a3e5..00000000000 --- a/cluster/gce/container-linux/master-helper.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for the Container Linux distro. -source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh" - -# create-master-instance creates the master instance. If called with -# an argument, the argument is used as the name to a reserved IP -# address for the master. (In the case of upgrade/repair, we re-use -# the same IP.) -# -# It requires a whole slew of assumed variables, partially due to to -# the call to write-master-env. Listing them would be rather -# futile. Instead, we list the required calls to ensure any additional -# -# variables are set: -# ensure-temp-dir -# detect-project -# get-bearer-token -function create-master-instance { - local address="" - [[ -n ${1:-} ]] && address="${1}" - - write-master-env - create-master-instance-internal "${MASTER_NAME}" "${address}" -} - -function replicate-master-instance() { - local existing_master_zone="${1}" - local existing_master_name="${2}" - local existing_master_replicas="${3}" - - local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)" - # Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering. - kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")" - kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")" - ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")" - ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")" - - create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}" - - kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")" - kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")" - kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")" - kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")" - - echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml - get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt" - - create-master-instance-internal "${REPLICA_NAME}" -} - - -function create-master-instance-internal() { - local gcloud="gcloud" - local retries=5 - if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then - gcloud="gcloud beta" - fi - - local -r master_name="${1}" - local -r address="${2:-}" - - local preemptible_master="" - if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then - preemptible_master="--preemptible --maintenance-policy TERMINATE" - fi - - local network=$(make-gcloud-network-argument \ - "${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \ - "${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}") - - local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml" - metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml" - metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh" - metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt" - - local disk="name=${master_name}-pd" - disk="${disk},device-name=master-pd" - disk="${disk},mode=rw" - disk="${disk},boot=no" - disk="${disk},auto-delete=no" - - for attempt in $(seq 1 ${retries}); do - if result=$(${gcloud} compute instances create "${master_name}" \ - --project "${PROJECT}" \ - --zone "${ZONE}" \ - --machine-type "${MASTER_SIZE}" \ - --image-project="${MASTER_IMAGE_PROJECT}" \ - --image "${MASTER_IMAGE}" \ - --tags "${MASTER_TAG}" \ - --scopes "storage-ro,compute-rw,monitoring,logging-write" \ - --metadata-from-file "${metadata}" \ - --disk "${disk}" \ - --boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \ - ${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \ - ${preemptible_master} \ - ${network} 2>&1); then - echo "${result}" >&2 - return 0 - else - echo "${result}" >&2 - if [[ ! "${result}" =~ "try again later" ]]; then - echo "Failed to create master instance due to non-retryable error" >&2 - return 1 - fi - sleep 10 - fi - done - - echo "Failed to create master instance despite ${retries} attempts" >&2 - return 1 -} - -function get-metadata() { - local zone="${1}" - local name="${2}" - local key="${3}" - - local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}" - - gcloud compute ssh "${name}" \ - --project "${PROJECT}" \ - --zone "${zone}" \ - --command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null -} diff --git a/cluster/gce/container-linux/master.yaml b/cluster/gce/container-linux/master.yaml deleted file mode 100644 index 444d3042739..00000000000 --- a/cluster/gce/container-linux/master.yaml +++ /dev/null @@ -1,57 +0,0 @@ -#cloud-config - -coreos: - update: - reboot-strategy: off - units: - - name: locksmithd.service - mask: true - - name: kube-master-installation.service - command: start - content: | - [Unit] - Description=Download and install k8s binaries and configurations - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - # Use --retry-connrefused opt only if it's supported by curl. - ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh - ExecStart=/opt/kubernetes/bin/configure.sh - - [Install] - WantedBy=kubernetes.target - - name: kube-master-configuration.service - command: start - content: | - [Unit] - Description=Configure kubernetes master - After=kube-master-installation.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh - ExecStart=/opt/kubernetes/bin/configure-helper.sh - - [Install] - WantedBy=kubernetes.target - - name: kubernetes.target - enable: true - command: start - content: | - [Unit] - Description=Kubernetes - - [Install] - WantedBy=multi-user.target - - name: docker.service - drop-ins: - - name: "use-cgroupfs-driver.conf" - # This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl - content: | - [Service] - Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=" diff --git a/cluster/gce/container-linux/node-helper.sh b/cluster/gce/container-linux/node-helper.sh deleted file mode 100755 index c2432c5b0ea..00000000000 --- a/cluster/gce/container-linux/node-helper.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for the Container Linux distro. -source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh" - -function get-node-instance-metadata { - local metadata="" - metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," - metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml," - metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh," - metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" - echo "${metadata}" -} - -# $1: template name (required). -function create-node-instance-template { - local template_name="$1" - - create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)" - # TODO(euank): We should include update-strategy here. We should also switch to ignition -} diff --git a/cluster/gce/container-linux/node.yaml b/cluster/gce/container-linux/node.yaml deleted file mode 100644 index 9886679cd78..00000000000 --- a/cluster/gce/container-linux/node.yaml +++ /dev/null @@ -1,57 +0,0 @@ -#cloud-config - -coreos: - update: - reboot-strategy: off - units: - - name: locksmithd.service - mask: true - - name: kube-node-installation.service - command: start - content: | - [Unit] - Description=Download and install k8s binaries and configurations - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin - # Use --retry-connrefused opt only if it's supported by curl. - ExecStartPre=/bin/bash -c 'OPT=""; if curl --help | grep -q -- "--retry-connrefused"; then OPT="--retry-connrefused"; fi; /usr/bin/curl --fail --retry 5 --retry-delay 3 $OPT --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh' - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh - ExecStart=/opt/kubernetes/bin/configure.sh - - [Install] - WantedBy=kubernetes.target - - name: kube-node-configuration.service - command: start - content: | - [Unit] - Description=Configure kubernetes master - After=kube-node-installation.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh - ExecStart=/opt/kubernetes/bin/configure-helper.sh - - [Install] - WantedBy=kubernetes.target - - name: kubernetes.target - enable: true - command: start - content: | - [Unit] - Description=Kubernetes - - [Install] - WantedBy=multi-user.target - - name: docker.service - drop-ins: - - name: "use-cgroupfs-driver.conf" - # This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl - content: | - [Service] - Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 46178dba693..5c4f48d12cd 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -25,14 +25,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/hack/lib/util.sh" -if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "container-linux" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" else echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 exit 1 fi -if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh" else echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2 From 1ddd5efaa0f50bf021958e62700966cdf2703729 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Wed, 10 Jan 2018 18:55:50 +0100 Subject: [PATCH 723/794] Create a feature flag for sharing PID namespace This feature is described in https://features.k8s.io/495. --- pkg/features/kube_features.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index a80f86a756d..c6cbc6cd586 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -118,11 +118,17 @@ const ( ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" // owner: @verb - // alpha: v1.8 + // alpha: v1.10 // // Allows running a "debug container" in a pod namespaces to troubleshoot a running pod. DebugContainers utilfeature.Feature = "DebugContainers" + // owner: @verb + // alpha: v1.10 + // + // Allows all containers in a pod to share a process namespace. + PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace" + // owner: @bsalamat // alpha: v1.8 // @@ -239,6 +245,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha}, HugePages: {Default: true, PreRelease: utilfeature.Beta}, DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, + PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha}, PodPriority: {Default: false, PreRelease: utilfeature.Alpha}, EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha}, TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha}, From 010a127314a935d8d038f8dd4559fc5b249813e4 Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Wed, 10 Jan 2018 16:36:01 -0500 Subject: [PATCH 724/794] Fix quota controller worker deadlock The resource quota controller worker pool can deadlock when: * Worker goroutines are idle waiting for work from queues * The Sync() method detects discovery updates to apply The problem is workers acquire a read lock while idle, making write lock acquisition dependent upon the presence of work in the queues. The Sync() method blocks on a pending write lock acquisition and won't unblock until every existing worker processes one item from their queue and releases their read lock. While the Sync() method's lock is pending, all new read lock acquisitions will block; if a worker does process work and release its lock, it will then become blocked on a read lock acquisition; they become blocked on Sync(). This can easily deadlock all the workers processing from one queue while any workers on the other queue remain blocked waiting for work. Fix the deadlock by refactoring workers to acquire a read lock *after* work is popped from the queue. This allows writers to get locks while workers are idle, while preserving the worker pause semantics necessary to allow safe sync. --- pkg/controller/resourcequota/resource_quota_controller.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index b2ae6d1f6e2..e341e1cde55 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -237,15 +237,13 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() { workFunc := func() bool { - - rq.workerLock.RLock() - defer rq.workerLock.RUnlock() - key, quit := queue.Get() if quit { return true } defer queue.Done(key) + rq.workerLock.RLock() + defer rq.workerLock.RUnlock() err := rq.syncHandler(key.(string)) if err == nil { queue.Forget(key) From ac48b1b075efa31f2109f818159bcaff95947609 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Wed, 10 Jan 2018 12:15:44 -0800 Subject: [PATCH 725/794] Add `cloud` for the generated GCE interfaces, support structs Note: this does not wire the generated code. --- pkg/cloudprovider/providers/gce/BUILD | 3 + pkg/cloudprovider/providers/gce/gce.go | 35 ++++++++---- pkg/cloudprovider/providers/gce/support.go | 66 ++++++++++++++++++++++ 3 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 pkg/cloudprovider/providers/gce/support.go diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index b112c95912c..c6583798dc8 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -41,12 +41,15 @@ go_library( "gce_util.go", "gce_zones.go", "metrics.go", + "support.go", "token_source.go", ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/master/ports:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index b515c8ff67a..b734a9b8d8e 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -30,6 +30,13 @@ import ( gcfg "gopkg.in/gcfg.v1" "cloud.google.com/go/compute/metadata" + "github.com/golang/glog" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + computealpha "google.golang.org/api/compute/v0.alpha" + computebeta "google.golang.org/api/compute/v0.beta" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -41,18 +48,12 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/version" - - "github.com/golang/glog" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - computealpha "google.golang.org/api/compute/v0.alpha" - computebeta "google.golang.org/api/compute/v0.beta" - compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" ) const ( @@ -147,6 +148,9 @@ type GCECloud struct { // the corresponding api is enabled. // If not enabled, it should return error. AlphaFeatureGate *AlphaFeatureGate + + // New code generated interface to the GCE compute library. + c cloud.Cloud } // TODO: replace gcfg with json @@ -243,7 +247,6 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { return nil, err } return CreateGCECloud(cloudConfig) - } func readConfig(reader io.Reader) (*ConfigFile, error) { @@ -363,11 +366,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { - // Remove any pre-release version and build metadata from the semver, leaving only the MAJOR.MINOR.PATCH portion. - // See http://semver.org/. + // Remove any pre-release version and build metadata from the semver, + // leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/. version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v") - // Create a user-agent header append string to supply to the Google API clients, to identify Kubernetes as the origin of the GCP API calls. + // Create a user-agent header append string to supply to the Google API + // clients, to identify Kubernetes as the origin of the GCP API calls. userAgent := fmt.Sprintf("Kubernetes/%s (%s %s)", version, runtime.GOOS, runtime.GOARCH) // Use ProjectID for NetworkProjectID, if it wasn't explicitly set. @@ -506,6 +510,13 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { } gce.manager = &gceServiceManager{gce} + gce.c = cloud.NewGCE(&cloud.Service{ + GA: service, + Alpha: serviceAlpha, + Beta: serviceBeta, + ProjectRouter: &gceProjectRouter{gce}, + RateLimiter: &gceRateLimiter{gce}, + }) return gce, nil } diff --git a/pkg/cloudprovider/providers/gce/support.go b/pkg/cloudprovider/providers/gce/support.go new file mode 100644 index 00000000000..42903af4579 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/support.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "context" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +// gceProjectRouter sends requests to the appropriate project ID. +type gceProjectRouter struct { + gce *GCECloud +} + +// ProjectID returns the project ID to be used for the given operation. +func (r *gceProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string { + switch service { + case "Firewalls", "Routes": + return r.gce.NetworkProjectID() + default: + return r.gce.projectID + } +} + +// gceRateLimiter implements cloud.RateLimiter. +type gceRateLimiter struct { + gce *GCECloud +} + +// Accept blocks until the operation can be performed. +// +// TODO: the current cloud provider policy doesn't seem to be correct as it +// only rate limits the polling operations, but not the /submission/ of +// operations. +func (l *gceRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) error { + if key.Operation == "Get" && key.Service == "Operations" { + ch := make(chan struct{}) + go func() { + l.gce.operationPollRateLimiter.Accept() + close(ch) + }() + select { + case <-ch: + break + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} From ce0a8303d6f20f20de774bb5e32dcb6f972ffce0 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Tue, 9 Jan 2018 13:44:55 -0800 Subject: [PATCH 726/794] integration: add retries to node authorizer tests --- test/integration/auth/BUILD | 1 + test/integration/auth/node_test.go | 267 +++++++++++++++++------------ 2 files changed, 158 insertions(+), 110 deletions(-) diff --git a/test/integration/auth/BUILD b/test/integration/auth/BUILD index ccd4ec402c4..897557a21a9 100644 --- a/test/integration/auth/BUILD +++ b/test/integration/auth/BUILD @@ -58,6 +58,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library", diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 32a699b12c4..1199ad9cbb1 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -20,8 +20,6 @@ import ( "fmt" "net/http" "net/http/httptest" - "path/filepath" - "runtime" "testing" "time" @@ -29,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/request/bearertoken" "k8s.io/apiserver/pkg/authentication/token/tokenfile" "k8s.io/apiserver/pkg/authentication/user" @@ -149,125 +148,159 @@ func TestNodeAuthorizer(t *testing.T) { t.Fatal(err) } - getSecret := func(client clientset.Interface) error { - _, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{}) - return err + getSecret := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{}) + return err + } } - getPVSecret := func(client clientset.Interface) error { - _, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) - return err + getPVSecret := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) + return err + } } - getConfigMap := func(client clientset.Interface) error { - _, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) - return err + getConfigMap := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) + return err + } } - getPVC := func(client clientset.Interface) error { - _, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) - return err + getPVC := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) + return err + } } - getPV := func(client clientset.Interface) error { - _, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{}) - return err + getPV := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{}) + return err + } } - createNode2NormalPod := func(client clientset.Interface) error { - _, err := client.Core().Pods("ns").Create(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, - Spec: api.PodSpec{ - NodeName: "node2", - Containers: []api.Container{{Name: "image", Image: "busybox"}}, - Volumes: []api.Volume{ - {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}}, - {Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}}, - {Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, + createNode2NormalPod := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + Volumes: []api.Volume{ + {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}}, + {Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}}, + {Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, + }, }, - }, - }) - return err + }) + return err + } } - updateNode2NormalPodStatus := func(client clientset.Interface) error { - startTime := metav1.NewTime(time.Now()) - _, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, - Status: api.PodStatus{StartTime: &startTime}, - }) - return err + updateNode2NormalPodStatus := func(client clientset.Interface) func() error { + return func() error { + startTime := metav1.NewTime(time.Now()) + _, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Status: api.PodStatus{StartTime: &startTime}, + }) + return err + } } - deleteNode2NormalPod := func(client clientset.Interface) error { - zero := int64(0) - return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + deleteNode2NormalPod := func(client clientset.Interface) func() error { + return func() error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } } - createNode2MirrorPod := func(client clientset.Interface) error { - _, err := client.Core().Pods("ns").Create(&api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node2mirrorpod", - Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"}, - }, - Spec: api.PodSpec{ - NodeName: "node2", - Containers: []api.Container{{Name: "image", Image: "busybox"}}, - }, - }) - return err + createNode2MirrorPod := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"}, + }, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + }, + }) + return err + } } - deleteNode2MirrorPod := func(client clientset.Interface) error { - zero := int64(0) - return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + deleteNode2MirrorPod := func(client clientset.Interface) func() error { + return func() error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } } - createNode2 := func(client clientset.Interface) error { - _, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) - return err + createNode2 := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) + return err + } } - updateNode2Status := func(client clientset.Interface) error { - _, err := client.Core().Nodes().UpdateStatus(&api.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "node2"}, - Status: api.NodeStatus{}, - }) - return err + updateNode2Status := func(client clientset.Interface) func() error { + return func() error { + _, err := client.Core().Nodes().UpdateStatus(&api.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "node2"}, + Status: api.NodeStatus{}, + }) + return err + } } - deleteNode2 := func(client clientset.Interface) error { - return client.Core().Nodes().Delete("node2", nil) + deleteNode2 := func(client clientset.Interface) func() error { + return func() error { + return client.Core().Nodes().Delete("node2", nil) + } } - createNode2NormalPodEviction := func(client clientset.Interface) error { - return client.Policy().Evictions("ns").Evict(&policy.Eviction{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "policy/v1beta1", - Kind: "Eviction", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "node2normalpod", - Namespace: "ns", - }, - }) + createNode2NormalPodEviction := func(client clientset.Interface) func() error { + return func() error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2normalpod", + Namespace: "ns", + }, + }) + } } - createNode2MirrorPodEviction := func(client clientset.Interface) error { - return client.Policy().Evictions("ns").Evict(&policy.Eviction{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "policy/v1beta1", - Kind: "Eviction", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "node2mirrorpod", - Namespace: "ns", - }, - }) + createNode2MirrorPodEviction := func(client clientset.Interface) func() error { + return func() error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Namespace: "ns", + }, + }) + } } capacity := 50 - updatePVCCapacity := func(client clientset.Interface) error { - capacity++ - statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) - patchBytes := []byte(statusString) - _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") - return err + updatePVCCapacity := func(client clientset.Interface) func() error { + return func() error { + capacity++ + statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) + patchBytes := []byte(statusString) + _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + return err + } } - updatePVCPhase := func(client clientset.Interface) error { - patchBytes := []byte(`{"status":{"phase": "Bound"}}`) - _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") - return err + updatePVCPhase := func(client clientset.Interface) func() error { + return func() error { + patchBytes := []byte(`{"status":{"phase": "Bound"}}`) + _, err := client.Core().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + return err + } } nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig) @@ -386,23 +419,37 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, updatePVCPhase(node2Client)) } -func expectForbidden(t *testing.T, err error) { - if !errors.IsForbidden(err) { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err) +// expect executes a function a set number of times until it either returns the +// expected error or executes too many times. It returns if the retries timed +// out and the last error returned by the method. +func expect(f func() error, wantErr func(error) bool) (timeout bool, lastErr error) { + err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { + lastErr = f() + if wantErr(lastErr) { + return true, nil + } + return false, nil + }) + return err == nil, lastErr +} + +func expectForbidden(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, errors.IsForbidden); !ok { + t.Errorf("Expected forbidden error, got %v", err) } } -func expectNotFound(t *testing.T, err error) { - if !errors.IsNotFound(err) { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err) +func expectNotFound(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, errors.IsNotFound); !ok { + t.Errorf("Expected notfound error, got %v", err) } } -func expectAllowed(t *testing.T, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err) +func expectAllowed(t *testing.T, f func() error) { + t.Helper() + if ok, err := expect(f, func(e error) bool { return e == nil }); !ok { + t.Errorf("Expected no error, got %v", err) } } From dc5384a139cc05ac31f76758c2c0923ce5cb88ff Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 17 Nov 2017 13:10:25 +0800 Subject: [PATCH 727/794] Don't rewrite device health --- pkg/kubelet/cm/deviceplugin/device_plugin_stub.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 01f08c15987..9969e99989b 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -115,16 +115,8 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string) error { // ListAndWatch lists devices and update that list according to the Update call func (m *Stub) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error { log.Println("ListAndWatch") - var devs []*pluginapi.Device - for _, d := range m.devs { - devs = append(devs, &pluginapi.Device{ - ID: d.ID, - Health: pluginapi.Healthy, - }) - } - - s.Send(&pluginapi.ListAndWatchResponse{Devices: devs}) + s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs}) for { select { From 1c73497c7e35f6f7596b127b75688879dd245f23 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Tue, 9 Jan 2018 23:17:12 -0800 Subject: [PATCH 728/794] Add zouyee as a reviewer for the cluster/centos directory. --- cluster/centos/OWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 cluster/centos/OWNERS diff --git a/cluster/centos/OWNERS b/cluster/centos/OWNERS new file mode 100644 index 00000000000..0edb92d5f9c --- /dev/null +++ b/cluster/centos/OWNERS @@ -0,0 +1,2 @@ +reviewers: + - zouyee From 671c4eb2b79941983d89ef5b07b25b0d546504ad Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Thu, 11 Jan 2018 14:41:45 +0800 Subject: [PATCH 729/794] Add e2e test logic for device plugin --- .../cm/deviceplugin/device_plugin_stub.go | 28 ++- test/e2e_node/device_plugin.go | 162 +++++++++++++++++- test/e2e_node/util.go | 1 + 3 files changed, 188 insertions(+), 3 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 01f08c15987..a04389cc192 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -38,6 +38,18 @@ type Stub struct { update chan []*pluginapi.Device server *grpc.Server + + // allocFunc is used for handling allocation request + allocFunc stubAllocFunc +} + +// stubAllocFunc is the function called when receive an allocation request from Kubelet +type stubAllocFunc func(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) + +func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) { + var response pluginapi.AllocateResponse + + return &response, nil } // NewDevicePluginStub returns an initialized DevicePlugin Stub. @@ -48,9 +60,16 @@ func NewDevicePluginStub(devs []*pluginapi.Device, socket string) *Stub { stop: make(chan interface{}), update: make(chan []*pluginapi.Device), + + allocFunc: defaultAllocFunc, } } +// SetAllocFunc sets allocFunc of the device plugin +func (m *Stub) SetAllocFunc(f stubAllocFunc) { + m.allocFunc = f +} + // Start starts the gRPC server of the device plugin func (m *Stub) Start() error { err := m.cleanup() @@ -145,8 +164,13 @@ func (m *Stub) Update(devs []*pluginapi.Device) { func (m *Stub) Allocate(ctx context.Context, r *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) { log.Printf("Allocate, %+v", r) - var response pluginapi.AllocateResponse - return &response, nil + devs := make(map[string]pluginapi.Device) + + for _, dev := range m.devs { + devs[dev.ID] = *dev + } + + return m.allocFunc(r, devs) } func (m *Stub) cleanup() error { diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 9748d31d68a..826d3b66989 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -40,7 +40,130 @@ import ( . "github.com/onsi/gomega" ) -// makeBusyboxPod returns a simple Pod spec with a pause container +const ( + // fake resource name + resourceName = "fake.com/resource" +) + +// Serial because the test restarts Kubelet +var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin] [Serial] [Disruptive]", func() { + f := framework.NewDefaultFramework("device-plugin-errors") + + Context("DevicePlugin", func() { + By("Enabling support for Device Plugin") + tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + initialConfig.FeatureGates[string(features.DevicePlugins)] = true + }) + + It("Verifies the Kubelet device plugin functionality.", func() { + + By("Wait for node is ready") + framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + + By("Start stub device plugin") + // fake devices for e2e test + devs := []*pluginapi.Device{ + {ID: "Dev-1", Health: pluginapi.Healthy}, + {ID: "Dev-2", Health: pluginapi.Healthy}, + } + + socketPath := pluginapi.DevicePluginPath + "dp." + fmt.Sprintf("%d", time.Now().Unix()) + + dp1 := dp.NewDevicePluginStub(devs, socketPath) + dp1.SetAllocFunc(stubAllocFunc) + err := dp1.Start() + framework.ExpectNoError(err) + + By("Register resources") + err = dp1.Register(pluginapi.KubeletSocket, resourceName) + framework.ExpectNoError(err) + + By("Waiting for the resource exported by the stub device plugin to become available on the local node") + devsLen := int64(len(devs)) + Eventually(func() int64 { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) + }, 30*time.Second, framework.Poll).Should(Equal(devsLen)) + + By("Creating one pod on node with at least one fake-device") + podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs" + pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + deviceIDRE := "stub devices: (Dev-[0-9]+)" + count1, devId1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, 0, deviceIDRE) + Expect(devId1).To(Not(Equal(""))) + + pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + By("Restarting Kubelet and waiting for the current running pod to restart") + restartKubelet() + + By("Confirming that after a kubelet and pod restart, fake-device assignement is kept") + count1, devIdRestart1 := parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + + By("Wait for node is ready") + framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + + By("Re-Register resources") + dp1 = dp.NewDevicePluginStub(devs, socketPath) + dp1.SetAllocFunc(stubAllocFunc) + err = dp1.Start() + framework.ExpectNoError(err) + + err = dp1.Register(pluginapi.KubeletSocket, resourceName) + framework.ExpectNoError(err) + + By("Waiting for resource to become available on the local node after re-registration") + Eventually(func() int64 { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) + }, 30*time.Second, framework.Poll).Should(Equal(devsLen)) + + By("Creating another pod") + pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + + By("Checking that pods got a different GPU") + count2, devId2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, 1, deviceIDRE) + + Expect(devId1).To(Not(Equal(devId2))) + + By("Deleting device plugin.") + err = dp1.Stop() + framework.ExpectNoError(err) + + By("Waiting for stub device plugin to become unavailable on the local node") + Eventually(func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + return numberOfDevices(node, resourceName) <= 0 + }, 10*time.Minute, framework.Poll).Should(BeTrue()) + + By("Checking that scheduled pods can continue to run even after we delete device plugin.") + count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+1, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + count2, devIdRestart2 := parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+1, deviceIDRE) + Expect(devIdRestart2).To(Equal(devId2)) + + By("Restarting Kubelet.") + restartKubelet() + + By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.") + count1, devIdRestart1 = parseLogFromNRuns(f, pod1.Name, pod1.Name, count1+2, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + count2, devIdRestart2 = parseLogFromNRuns(f, pod2.Name, pod2.Name, count2+2, deviceIDRE) + Expect(devIdRestart2).To(Equal(devId2)) + + // Cleanup + f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + }) + }) +}) + +// makeBusyboxPod returns a simple Pod spec with a busybox container // that requests resourceName and runs the specified command. func makeBusyboxPod(resourceName, cmd string) *v1.Pod { podName := "device-plugin-test-" + string(uuid.NewUUID()) @@ -78,16 +201,19 @@ func parseLogFromNRuns(f *framework.Framework, podName string, contName string, count = p.Status.ContainerStatuses[0].RestartCount return count >= restartCount }, 5*time.Minute, framework.Poll).Should(BeTrue()) + logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } + framework.Logf("got pod logs: %v", logs) regex := regexp.MustCompile(re) matches := regex.FindStringSubmatch(logs) if len(matches) < 2 { return count, "" } + return count, matches[1] } @@ -100,3 +226,37 @@ func numberOfDevices(node *v1.Node, resourceName string) int64 { return val.Value() } + +// stubAllocFunc will pass to stub device plugin +func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) { + var response pluginapi.AllocateResponse + for _, requestID := range r.DevicesIDs { + dev, ok := devs[requestID] + if !ok { + return nil, fmt.Errorf("invalid allocation request with non-existing device %s", requestID) + } + + if dev.Health != pluginapi.Healthy { + return nil, fmt.Errorf("invalid allocation request with unhealthy device: %s", requestID) + } + + // create fake device file + fpath := filepath.Join("/tmp", dev.ID) + + // clean first + os.RemoveAll(fpath) + f, err := os.Create(fpath) + if err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("failed to create fake device file: %s", err) + } + + f.Close() + + response.Mounts = append(response.Mounts, &pluginapi.Mount{ + ContainerPath: fpath, + HostPath: fpath, + }) + } + + return &response, nil +} diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index f81ab6f5d8b..9a9e39b91eb 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -24,6 +24,7 @@ import ( "net/http" "os/exec" "reflect" + "regexp" "strings" "time" From 8d44e0b38a122809b86c3d24e1ee9d0a9f289e46 Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Wed, 10 Jan 2018 16:24:46 -0800 Subject: [PATCH 730/794] Remove the deprecated vagrant kube-up implementation. --- Vagrantfile | 325 ---------------- build/lib/release.sh | 1 - build/release-tars/BUILD | 1 - cluster/saltbase/README.md | 9 +- cluster/vagrant/OWNERS | 36 -- cluster/vagrant/config-default.sh | 122 ------ cluster/vagrant/config-test.sh | 29 -- cluster/vagrant/pod-ip-test.sh | 105 ------ cluster/vagrant/provision-master.sh | 122 ------ cluster/vagrant/provision-network-master.sh | 91 ----- cluster/vagrant/provision-network-node.sh | 51 --- cluster/vagrant/provision-node.sh | 88 ----- cluster/vagrant/provision-utils.sh | 222 ----------- cluster/vagrant/util.sh | 389 -------------------- test/e2e/framework/test_context.go | 2 +- test/e2e/framework/util.go | 6 - 16 files changed, 5 insertions(+), 1594 deletions(-) delete mode 100644 Vagrantfile delete mode 100644 cluster/vagrant/OWNERS delete mode 100755 cluster/vagrant/config-default.sh delete mode 100644 cluster/vagrant/config-test.sh delete mode 100755 cluster/vagrant/pod-ip-test.sh delete mode 100755 cluster/vagrant/provision-master.sh delete mode 100644 cluster/vagrant/provision-network-master.sh delete mode 100644 cluster/vagrant/provision-network-node.sh delete mode 100755 cluster/vagrant/provision-node.sh delete mode 100755 cluster/vagrant/provision-utils.sh delete mode 100755 cluster/vagrant/util.sh diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 8743a6f34af..00000000000 --- a/Vagrantfile +++ /dev/null @@ -1,325 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes -Vagrant.require_version ">= 1.7.4" - -if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true' - raise Vagrant::Errors::VagrantError.new, < { - 'fedora' => { - # :box_url and :box_version are optional (and mutually exclusive); - # if :box_url is omitted the box will be retrieved by :box_name (and - # :box_version if provided) from - # http://atlas.hashicorp.com/boxes/search (formerly - # http://vagrantcloud.com/); this allows you override :box_name with - # your own value so long as you provide :box_url; for example, the - # "official" name of this box is "rickard-von-essen/ - # opscode_fedora-20", but by providing the URL and our own name, we - # make it appear as yet another provider under the "kube-fedora22" - # box - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box' - } - }, - :virtualbox => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box' - } - }, - :libvirt => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box' - } - }, - :vmware_desktop => { - 'fedora' => { - :box_name => 'kube-fedora23', - :box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box' - } - }, - :vsphere => { - 'fedora' => { - :box_name => 'vsphere-dummy', - :box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true' - } - } -} - -# Give access to all physical cpu cores -# Previously cargo-culted from here: -# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck -# Rewritten to actually determine the number of hardware cores instead of assuming -# that the host has hyperthreading enabled. -host = RbConfig::CONFIG['host_os'] -if host =~ /darwin/ - $vm_cpus = `sysctl -n hw.physicalcpu`.to_i -elsif host =~ /linux/ - #This should work on most processors, however it will fail on ones without the core id field. - #So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow... - #But just in case we'll default to the result of nproc if we get 0 just to be safe. - $vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i - if $vm_cpus < 1 - $vm_cpus = `nproc`.to_i - end -else # sorry Windows folks, I can't help you - $vm_cpus = 2 -end - -# Give VM 1024MB of RAM by default -# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation. -# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. -# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) -$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i -$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - if Vagrant.has_plugin?("vagrant-proxyconf") - $http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || "" - $https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || "" - $no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1" - config.proxy.http = $http_proxy - config.proxy.https = $https_proxy - config.proxy.no_proxy = $no_proxy - end - - # this corrects a bug in 1.8.5 where an invalid SSH key is inserted. - if Vagrant::VERSION == "1.8.5" - config.ssh.insert_key = false - end - - def setvmboxandurl(config, provider) - if ENV['KUBERNETES_BOX_NAME'] then - config.vm.box = ENV['KUBERNETES_BOX_NAME'] - - if ENV['KUBERNETES_BOX_URL'] then - config.vm.box_url = ENV['KUBERNETES_BOX_URL'] - end - - if ENV['KUBERNETES_BOX_VERSION'] then - config.vm.box_version = ENV['KUBERNETES_BOX_VERSION'] - end - else - config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name] - - if $kube_provider_boxes[provider][$kube_os][:box_url] then - config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url] - end - - if $kube_provider_boxes[provider][$kube_os][:box_version] then - config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version] - end - end - end - - def customize_vm(config, vm_mem) - - if $use_nfs then - config.vm.synced_folder ".", "/vagrant", nfs: true - elsif $use_rsync then - opts = {} - if ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'] then - opts[:rsync__args] = ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'].split(" ") - end - if ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'] then - opts[:rsync__exclude] = ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'].split(" ") - end - config.vm.synced_folder ".", "/vagrant", opts - end - - # Try VMWare Fusion first (see - # https://docs.vagrantup.com/v2/providers/basic_usage.html) - config.vm.provider :vmware_fusion do |v, override| - setvmboxandurl(override, :vmware_desktop) - v.vmx['memsize'] = vm_mem - v.vmx['numvcpus'] = $vm_cpus - end - - # configure libvirt provider - config.vm.provider :libvirt do |v, override| - setvmboxandurl(override, :libvirt) - v.memory = vm_mem - v.cpus = $vm_cpus - v.nested = true - v.volume_cache = 'none' - end - - # Then try VMWare Workstation - config.vm.provider :vmware_workstation do |v, override| - setvmboxandurl(override, :vmware_desktop) - v.vmx['memsize'] = vm_mem - v.vmx['numvcpus'] = $vm_cpus - end - - # Then try Parallels - config.vm.provider :parallels do |v, override| - setvmboxandurl(override, :parallels) - v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem] - v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus] - - # Don't attempt to update the Parallels tools on the image (this can - # be done manually if necessary) - v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] - - # Set up Parallels folder sharing to behave like VirtualBox (i.e., - # mount the current directory as /vagrant and that's it) - v.customize ['set', :id, '--shf-guest', 'off'] - v.customize ['set', :id, '--shf-guest-automount', 'off'] - v.customize ['set', :id, '--shf-host', 'on'] - - # Synchronize VM clocks to host clock (Avoid certificate invalid issue) - v.customize ['set', :id, '--time-sync', 'on'] - - # Remove all auto-mounted "shared folders"; the result seems to - # persist between runs (i.e., vagrant halt && vagrant up) - override.vm.provision :shell, :inline => (%q{ - set -ex - if [ -d /media/psf ]; then - for i in /media/psf/*; do - if [ -d "${i}" ]; then - umount "${i}" || true - rmdir -v "${i}" - fi - done - rmdir -v /media/psf - fi - exit - }).strip - end - - # Then try vsphere - config.vm.provider :vsphere do |vsphere, override| - setvmboxandurl(override, :vsphere) - - #config.vm.hostname = ENV['MASTER_NAME'] - - config.ssh.username = ENV['MASTER_USER'] - config.ssh.password = ENV['MASTER_PASSWD'] - - config.ssh.pty = true - config.ssh.insert_key = true - #config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere' - - # Don't attempt to update the tools on the image (this can - # be done manually if necessary) - # vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] - - # The vSphere host we're going to connect to - vsphere.host = ENV['VAGRANT_VSPHERE_URL'] - - # The ESX host for the new VM - vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL'] - - # The resource pool for the new VM - #vsphere.resource_pool_name = 'Comp' - - # path to folder where new VM should be created, if not specified template's parent folder will be used - vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH'] - - # The template we're going to clone - vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME'] - - # The name of the new machine - #vsphere.name = ENV['MASTER_NAME'] - - # vSphere login - vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME'] - - # vSphere password - vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD'] - - # cpu count - vsphere.cpu_count = $vm_cpus - - # memory in MB - vsphere.memory_mb = vm_mem - - # If you don't have SSL configured correctly, set this to 'true' - vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE'] - end - - - # Don't attempt to update Virtualbox Guest Additions (requires gcc) - if Vagrant.has_plugin?("vagrant-vbguest") then - config.vbguest.auto_update = false - end - # Finally, fall back to VirtualBox - config.vm.provider :virtualbox do |v, override| - setvmboxandurl(override, :virtualbox) - v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem] - v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus] - - # Use faster paravirtualized networking - v.customize ["modifyvm", :id, "--nictype1", "virtio"] - v.customize ["modifyvm", :id, "--nictype2", "virtio"] - end - end - - # Kubernetes master - config.vm.define "master" do |c| - customize_vm c, $vm_master_mem - if ENV['KUBE_TEMP'] then - script = "#{ENV['KUBE_TEMP']}/master-start.sh" - c.vm.provision "shell", run: "always", path: script - end - c.vm.network "private_network", ip: "#{$master_ip}" - end - - # Kubernetes node - $num_node.times do |n| - node_vm_name = "node-#{n+1}" - - config.vm.define node_vm_name do |node| - customize_vm node, $vm_node_mem - - node_ip = $node_ips[n] - if ENV['KUBE_TEMP'] then - script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh" - node.vm.provision "shell", run: "always", path: script - end - node.vm.network "private_network", ip: "#{node_ip}" - end - end -end diff --git a/build/lib/release.sh b/build/lib/release.sh index 870451601f6..a34f4162a0e 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -517,7 +517,6 @@ EOF cp -R "${KUBE_ROOT}/docs" "${release_stage}/" cp "${KUBE_ROOT}/README.md" "${release_stage}/" cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/" - cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/" echo "${KUBE_GIT_VERSION}" > "${release_stage}/version" diff --git a/build/release-tars/BUILD b/build/release-tars/BUILD index 39f588e9518..27773468028 100644 --- a/build/release-tars/BUILD +++ b/build/release-tars/BUILD @@ -193,7 +193,6 @@ pkg_tar( files = [ "//:Godeps/LICENSES", "//:README.md", - "//:Vagrantfile", "//:version", "//cluster:all-srcs", "//docs:all-srcs", diff --git a/cluster/saltbase/README.md b/cluster/saltbase/README.md index 765d801ff7d..d3d53792838 100644 --- a/cluster/saltbase/README.md +++ b/cluster/saltbase/README.md @@ -4,11 +4,10 @@ This is the root of the SaltStack configuration for Kubernetes. A high level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](https://kubernetes.io/docs/admin/salt/) This SaltStack configuration currently applies to default -configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and -Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an -arbitrary configuration, but those are only the in-tree OS/IaaS -combinations supported today.) As you peruse the configuration, these -are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`; +configurations for Debian-on-GCE. (That doesn't mean it can't +be made to apply to an arbitrary configuration, but those are +only the in-tree OS/IaaS combinations supported today.) As you +peruse the configuration, this is shorthanded as `gce`, in `grains.cloud`; the documentation in this tree uses this same shorthand for convenience. See more: diff --git a/cluster/vagrant/OWNERS b/cluster/vagrant/OWNERS deleted file mode 100644 index 3be25134ef5..00000000000 --- a/cluster/vagrant/OWNERS +++ /dev/null @@ -1,36 +0,0 @@ -approvers: -- derekwaynecarr -reviewers: -- ArtfulCoder -- thockin -- lavalamp -- smarterclayton -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- erictune -- dchen1107 -- zmerlynn -- justinsb -- roberthbailey -- eparis -- jlowdermilk -- piosz -- jsafrane -- jbeda -- madhusudancs -- jayunit100 -- cjcullen -- david-mcmahon -- mfojtik -- pweil- -- dcbw -- ivan4th -- filbranden -- dshulyak -- k82cn -- caseydavenport -- johscheuer diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh deleted file mode 100755 index 63b49146db2..00000000000 --- a/cluster/vagrant/config-default.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Vagrant cluster - -# Number of nodes in the cluster -NUM_NODES=${NUM_NODES-"1"} -export NUM_NODES - -# The IP of the master -export MASTER_IP=${MASTER_IP-"10.245.1.2"} -export KUBE_MASTER_IP=${MASTER_IP} - -export INSTANCE_PREFIX="kubernetes" -export MASTER_NAME="${INSTANCE_PREFIX}-master" - -# Should the master serve as a node -REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} - -# Map out the IPs, names and container subnets of each node -export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."} -NODE_CONTAINER_SUBNET_BASE="10.246" -MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" -CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" -for ((i=0; i < NUM_NODES; i++)) do - NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" - NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" - NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" - NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" - VAGRANT_NODE_NAMES[$i]="node-$((i+1))" -done - -CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.246.0.0/16}" - -SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET - -# Since this isn't exposed on the network, default to a simple user/passwd -MASTER_USER="${MASTER_USER:-vagrant}" -MASTER_PASSWD="${MASTER_PASSWD:-vagrant}" - -# Admission Controllers to invoke prior to persisting objects in cluster -# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. -ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,PVCProtection,ResourceQuota - -# Optional: Enable node logging. -ENABLE_NODE_LOGGING=false -LOGGING_DESTINATION=elasticsearch - -# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. -ENABLE_CLUSTER_LOGGING=false -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" - -# Extra options to set on the Docker command line. This is useful for setting -# --insecure-registry for local registries, or globally configuring selinux options -# TODO Enable selinux when Fedora 21 repositories get an updated docker package -# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151 -#EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} -b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8" -EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} --insecure-registry 10.0.0.0/8 -s overlay" - -# Flag to tell the kubelet to enable CFS quota support -ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}" - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.247.0.10" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" - -# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev -RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" - -# Determine extra certificate names for master -octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g')) -((octets[3]+=1)) -service_ip=$(echo "${octets[*]}" | sed 's/ /./g') -MASTER_EXTRA_SANS="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}" - -NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, kubenet, etc -if [ "${NETWORK_PROVIDER}" == "kubenet" ]; then - CLUSTER_IP_RANGE="${CONTAINER_SUBNET}" -fi - -# If enabled kube-controller-manager will be started with the --enable-hostpath-provisioner flag -ENABLE_HOSTPATH_PROVISIONER="${ENABLE_HOSTPATH_PROVISIONER:-true}" - -# OpenContrail networking plugin specific settings -OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" -OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" -OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - -# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script -export DEFAULT_NETWORK_IF_NAME="eth0" diff --git a/cluster/vagrant/config-test.sh b/cluster/vagrant/config-test.sh deleted file mode 100644 index d5458c2861b..00000000000 --- a/cluster/vagrant/config-test.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -## Contains configuration values for interacting with the Vagrant cluster in test mode -#Set NUM_NODES to minimum required for testing. -NUM_NODES=2 - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/vagrant/config-default.sh" - -# Do not register the master kubelet during testing -REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - diff --git a/cluster/vagrant/pod-ip-test.sh b/cluster/vagrant/pod-ip-test.sh deleted file mode 100755 index 83ed59b3c86..00000000000 --- a/cluster/vagrant/pod-ip-test.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echoOK() { - TC='\e[' - RegB="${TC}0m" - if [ "$1" -eq "0" ]; then - Green="${TC}32m" - echo -e "[${Green}OK${RegB}]" - else - Red="${TC}31m" - echo -e "[${Red}FAIL${RegB}]" - echo "Check log file." - exit 1 - fi -} - -usage() { - echo "Usage options: [--logfile ]" -} - -logfile=/dev/null -while [[ $# > 0 ]]; do - key="$1" - shift - case $key in - -l|--logfile) - logfile="$1" - if [ "$logfile" == "" ]; then - usage - exit 1 - fi - shift - ;; - *) - # unknown option - usage - exit 1 - ;; - esac -done - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -cd "${KUBE_ROOT}" - -echo All verbose output will be redirected to $logfile, use --logfile option to change. - -printf "Start the cluster with 2 nodes .. " -export NUM_NODES=2 -export KUBERNETES_PROVIDER=vagrant - -(cluster/kube-up.sh >>"$logfile" 2>&1) || true -echoOK $? - -printf "Check if node-1 can reach kubernetes master .. " -vagrant ssh node-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 -echoOK $? -printf "Check if node-2 can reach kubernetes master .. " -vagrant ssh node-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 -echoOK $? - -printf "Pull an image that runs a web server on node-1 .. " -vagrant ssh node-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 -echoOK $? -printf "Pull an image that runs a web server on node-2 .. " -vagrant ssh node-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 -echoOK $? - -printf "Run the server on node-1 .. " -vagrant ssh node-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 -echoOK $? -printf "Run the server on node-2 .. " -vagrant ssh node-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 -echoOK $? - -printf "Run ping from node-1 to docker bridges and to the containers on both nodes .. " -vagrant ssh node-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 -echoOK $? -printf "Same pinch from node-2 .. " -vagrant ssh node-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 -echoOK $? - -printf "tcp check, curl to both the running webservers from node-1 .. " -vagrant ssh node-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 -echoOK $? -printf "tcp check, curl to both the running webservers from node-2 .. " -vagrant ssh node-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 -echoOK $? - -printf "All good, destroy the cluster .. " -vagrant destroy -f >>"$logfile" 2>&1 -echoOK $? diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh deleted file mode 100755 index eeff6ed8a91..00000000000 --- a/cluster/vagrant/provision-master.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Set the host name explicitly -# See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${MASTER_NAME} -# Set the variable to empty value explicitly -if_to_edit="" - -if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then - # Disable network interface being managed by Network Manager (needed for Fedora 21+) - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - for if_conf in ${if_to_edit}; do - grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf} - sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf} - done; - systemctl restart network -fi - -# needed for vsphere support -# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts -# set the NETWORK_IF_NAME to have a default value in such case -NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` -if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} -fi - -# Setup hosts file to support ping by hostname to each node in the cluster from apiserver -for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - node=${NODE_NAMES[$i]} - ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $node)" ]; then - echo "Adding $node to hosts file" - echo "$ip $node" >> /etc/hosts - fi -done -echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master. -echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts - -enable-accounting -prepare-package-manager - -# Configure the master network -if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then - provision-network-master -fi - -write-salt-config kubernetes-master - -# Generate and distribute a shared secret (bearer token) to -# apiserver and kubelet so that kubelet can authenticate to -# apiserver to send events. -known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -if [[ ! -f "${known_tokens_file}" ]]; then - - mkdir -p /srv/salt-overlay/salt/kube-apiserver - known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; - echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file; - echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file; - echo "$KUBE_BEARER_TOKEN,admin,admin" >> $known_tokens_file) - - mkdir -p /srv/salt-overlay/salt/kubelet - kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" - (umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file) - - create-salt-kubelet-auth - create-salt-kubeproxy-auth - # Generate tokens for other "service accounts". Append to known_tokens. - # - # NB: If this list ever changes, this script actually has to - # change to detect the existence of this file, kill any deleted - # old tokens and add any new tokens (to handle the upgrade case). - service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns") - for account in "${service_accounts[@]}"; do - token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - echo "${token},${account},${account}" >> "${known_tokens_file}" - done -fi - - -readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv" -if [ ! -e "${BASIC_AUTH_FILE}" ]; then - mkdir -p /srv/salt-overlay/salt/kube-apiserver - (umask 077; - echo "${MASTER_PASSWD},${MASTER_USER},admin" > "${BASIC_AUTH_FILE}") -fi - -# Enable Fedora Cockpit on host to support Kubernetes administration -# Access it by going to :9090 and login as vagrant/vagrant -if ! which /usr/libexec/cockpit-ws &>/dev/null; then - - pushd /etc/yum.repos.d - curl -OL https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/repo/fedora-23/msuchy-cockpit-preview-fedora-23.repo - dnf install -y cockpit cockpit-kubernetes docker socat ethtool - popd - - systemctl enable cockpit.socket - systemctl start cockpit.socket -fi - -install-salt - -run-salt diff --git a/cluster/vagrant/provision-network-master.sh b/cluster/vagrant/provision-network-master.sh deleted file mode 100644 index 14280cba072..00000000000 --- a/cluster/vagrant/provision-network-master.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# provision-network-master configures flannel on the master -function provision-network-master { - - echo "Provisioning network on master" - - FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" - - # Install etcd for flannel data - if ! which etcd >/dev/null 2>&1; then - - dnf install -y etcd - - # Modify etcd configuration for flannel data - cat </etc/etcd/etcd.conf -ETCD_NAME=flannel -ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd" -ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380" -ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379" -ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380" -ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380" -ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}" -EOF - - # fix the etcd boot failure issue - sed -i '/^Restart/a RestartSec=10' /usr/lib/systemd/system/etcd.service - systemctl daemon-reload - - # Enable and start etcd - systemctl enable etcd - systemctl start etcd - - fi - - # Install flannel for overlay - if ! which flanneld >/dev/null 2>&1; then - - dnf install -y flannel - - cat </etc/flannel-config.json -{ - "Network": "${CONTAINER_SUBNET}", - "SubnetLen": 24, - "Backend": { - "Type": "udp", - "Port": 8285 - } -} -EOF - - # Import default configuration into etcd for master setup - etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json - - # Configure local daemon to speak to master - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` - # needed for vsphere support - # handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts - # set the NETWORK_IF_NAME to have a default value in such case - if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} - fi - cat </etc/sysconfig/flanneld -FLANNEL_ETCD="${FLANNEL_ETCD_URL}" -FLANNEL_ETCD_KEY="/coreos.com/network" -FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq" -EOF - - # Start flannel - systemctl enable flanneld - systemctl start flanneld - fi - - echo "Network configuration verified" -} diff --git a/cluster/vagrant/provision-network-node.sh b/cluster/vagrant/provision-network-node.sh deleted file mode 100644 index c8fd42252ef..00000000000 --- a/cluster/vagrant/provision-network-node.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# provision-network-node configures flannel on the node -function provision-network-node { - - echo "Provisioning network on node" - - FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" - - # Install flannel for overlay - if ! which flanneld >/dev/null 2>&1; then - - dnf install -y flannel - - # Configure local daemon to speak to master - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` - # needed for vsphere support - # handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts - # set the NETWORK_IF_NAME to have a default value in such case - if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} - fi - cat </etc/sysconfig/flanneld -FLANNEL_ETCD="${FLANNEL_ETCD_URL}" -FLANNEL_ETCD_KEY="/coreos.com/network" -FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq" -EOF - - # Start flannel - systemctl enable flanneld - systemctl start flanneld - fi - - echo "Network configuration verified" -} diff --git a/cluster/vagrant/provision-node.sh b/cluster/vagrant/provision-node.sh deleted file mode 100755 index 8d43a63cada..00000000000 --- a/cluster/vagrant/provision-node.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Set the host name explicitly -# See: https://github.com/mitchellh/vagrant/issues/2430 -hostnamectl set-hostname ${NODE_NAME} -if_to_edit="" - -if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then - # Disable network interface being managed by Network Manager (needed for Fedora 21+) - NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ - if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN ) - for if_conf in ${if_to_edit}; do - grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf} - sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf} - done; - systemctl restart network -fi - -# needed for vsphere support -# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts -# set the NETWORK_IF_NAME to have a default value in such case -NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'` -if [[ -z "$NETWORK_IF_NAME" ]]; then - NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME} -fi - -# Setup hosts file to support ping by hostname to master -if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then - echo "Adding $MASTER_NAME to hosts file" - echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts -fi -echo "$NODE_IP $NODE_NAME" >> /etc/hosts - -# Setup hosts file to support ping by hostname to each node in the cluster -for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - node=${NODE_NAMES[$i]} - ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $node)" ]; then - echo "Adding $node to hosts file" - echo "$ip $node" >> /etc/hosts - fi -done - -enable-accounting -prepare-package-manager - -# Configure network -if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then - provision-network-node -fi - -write-salt-config kubernetes-pool - -# Generate kubelet and kube-proxy auth file(kubeconfig) if there is not an existing one -known_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig" -if [[ ! -f "${known_kubeconfig_file}" ]]; then - create-salt-kubelet-auth - create-salt-kubeproxy-auth -else - # stop kubelet, let salt start it later - systemctl stop kubelet -fi - -install-salt -add-volume-support - -run-salt - -dnf install -y socat ethtool -dnf update -y docker diff --git a/cluster/vagrant/provision-utils.sh b/cluster/vagrant/provision-utils.sh deleted file mode 100755 index e719a830c8f..00000000000 --- a/cluster/vagrant/provision-utils.sh +++ /dev/null @@ -1,222 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function enable-accounting() { - mkdir -p /etc/systemd/system.conf.d/ - cat </etc/systemd/system.conf.d/kubernetes-accounting.conf -[Manager] -DefaultCPUAccounting=yes -DefaultMemoryAccounting=yes -EOF - systemctl daemon-reload -} - -function prepare-package-manager() { - echo "Prepare package manager" - - # Useful if a mirror is broken or slow - if [ -z "$CUSTOM_FEDORA_REPOSITORY_URL" ]; then - echo "fastestmirror=True" >> /etc/dnf/dnf.conf - else - # remove trailing slash from URL if it's present - CUSTOM_FEDORA_REPOSITORY_URL="${CUSTOM_FEDORA_REPOSITORY_URL%/}" - sed -i -e "/^metalink=/d" /etc/yum.repos.d/*.repo - sed -i -e "s@^#baseurl=http://download.fedoraproject.org/pub/fedora@baseurl=$CUSTOM_FEDORA_REPOSITORY_URL@" /etc/yum.repos.d/*.repo - fi -} - - -function add-volume-support() { - echo "Adding nfs volume support" - - # we need nfs-utils to support volumes - dnf install -y nfs-utils -} - -function write-salt-config() { - local role="$1" - - # Update salt configuration - mkdir -p /etc/salt/minion.d - - mkdir -p /srv/salt-overlay/pillar - cat </srv/salt-overlay/pillar/cluster-params.sls -service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' -enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' -enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' -enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' -logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' -elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' -enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' -dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' -dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' -enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")' -network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' -cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' -opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")' -opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")' -e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' -enable_hostpath_provisioner: '$(echo "$ENABLE_HOSTPATH_PROVISIONER" | sed -e "s/'/''/g")' -EOF - -if [ -n "${EVICTION_HARD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")' -EOF -fi - - cat </etc/salt/minion.d/log-level-debug.conf -log_level: warning -log_level_logfile: warning -EOF - - cat </etc/salt/minion.d/grains.conf -grains: - node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - network_mode: openvswitch - networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' - api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig - cloud: vagrant - roles: - - $role - runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' - docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' - master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")' - keep_host_etcd: true - kube_user: '$(echo "$KUBE_USER" | sed -e "s/'/''/g")' -EOF -} - -function release_not_found() { - echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2 - echo "are running from a clone of the git repo, please run 'make quick-release'." >&2 - echo "Note that this requires having Docker installed. If you are running " >&2 - echo "from a release tarball, something is wrong. Look at " >&2 - echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2 - exit 1 -} - -function install-salt() { - server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz" - if [[ ! -f "$server_binary_tar" ]]; then - server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" - fi - if [[ ! -f "$server_binary_tar" ]]; then - release_not_found - fi - - salt_tar="/vagrant/server/kubernetes-salt.tar.gz" - if [[ ! -f "$salt_tar" ]]; then - salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz" - fi - if [[ ! -f "$salt_tar" ]]; then - release_not_found - fi - - echo "Running release install script" - rm -rf /kube-install - mkdir -p /kube-install - pushd /kube-install - tar xzf "$salt_tar" - cp "$server_binary_tar" . - ./kubernetes/saltbase/install.sh "${server_binary_tar##*/}" - popd - - if ! which salt-call >/dev/null 2>&1; then - # Install salt from official repositories. - # Need to enable testing-repos to get version of salt with fix for dnf-core-plugins - dnf config-manager --set-enabled updates-testing - dnf install -y salt-minion - - # Fedora >= 23 includes salt packages but the bootstrap is - # creating configuration for a (non-existent) salt repo anyway. - # Remove the invalid repo to prevent dnf from warning about it on - # every update. Assume this problem is specific to Fedora 23 and - # will fixed by the time another version of Fedora lands. - local fedora_version=$(grep 'VERSION_ID' /etc/os-release | sed 's+VERSION_ID=++') - if [[ "${fedora_version}" = '23' ]]; then - local repo_file='/etc/yum.repos.d/saltstack-salt-fedora-23.repo' - if [[ -f "${repo_file}" ]]; then - rm "${repo_file}" - fi - fi - - fi -} - -function run-salt() { - echo " Now waiting for the Salt provisioning process to complete on this machine." - echo " This can take some time based on your network, disk, and cpu speed." - salt-call --local state.highstate -} - -function create-salt-kubelet-auth() { - local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet" - mkdir -p "${kubelet_kubeconfig_folder}" - (umask 077; - cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - server: "https://${MASTER_IP}" - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -users: -- name: kubelet - user: - token: ${KUBELET_TOKEN} -EOF - ) -} - -function create-salt-kubeproxy-auth() { - kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy" - mkdir -p "${kube_proxy_kubeconfig_folder}" - (umask 077; - cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -EOF - ) -} diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh deleted file mode 100755 index 3d022576d00..00000000000 --- a/cluster/vagrant/util.sh +++ /dev/null @@ -1,389 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}" -source "${KUBE_ROOT}/cluster/common.sh" - -function detect-master () { - KUBE_MASTER_IP=$MASTER_IP - echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 -} - -# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] -function detect-nodes { - echo "Nodes already detected" 1>&2 - KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") -} - -# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so -# that our Vagrantfile doesn't error out. -function verify-prereqs { - for x in vagrant; do - if ! which "$x" >/dev/null; then - echo "Can't find $x in PATH, please fix and retry." - exit 1 - fi - done - - local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n') - local providers=( - # Format is: - # provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re - # either provider_ctl_executable or vagrant_provider_plugin_re can - # be blank (i.e., '') if none is needed by Vagrant (see, e.g., - # virtualbox entry) - '' vmware_fusion vagrant-vmware-fusion - '' vmware_workstation vagrant-vmware-workstation - prlctl parallels vagrant-parallels - VBoxManage virtualbox '' - virsh libvirt vagrant-libvirt - '' vsphere vagrant-vsphere - ) - local provider_found='' - local provider_bin - local provider_name - local provider_plugin_re - - while [ "${#providers[@]}" -gt 0 ]; do - provider_bin=${providers[0]} - provider_name=${providers[1]} - provider_plugin_re=${providers[2]} - providers=("${providers[@]:3}") - - # If the provider is explicitly set, look only for that provider - if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \ - && [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then - continue - fi - - if ([ -z "${provider_bin}" ] \ - || which "${provider_bin}" >/dev/null 2>&1) \ - && ([ -z "${provider_plugin_re}" ] \ - || [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then - provider_found="${provider_name}" - # Stop after finding the first viable provider - break - fi - done - - if [ -z "${provider_found}" ]; then - if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then - echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider." - echo "Possible reasons could be: " - echo -e "\t- vmrun utility is not in your path" - echo -e "\t- Vagrant plugin was not found." - echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found." - echo "Please fix and retry." - else - echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry." - fi - - exit 1 - fi - - # Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no - # matter what directory the tools are called from. - export VAGRANT_CWD="${KUBE_ROOT}" - - export USING_KUBE_SCRIPTS=true -} - -# Create a set of provision scripts for the master and each of the nodes -function create-provision-scripts { - kube::util::ensure-temp-dir - - ( - echo "#! /bin/bash" - echo-kube-env - echo "NODE_IP='${MASTER_IP}'" - echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" - ) > "${KUBE_TEMP}/master-start.sh" - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo-kube-env - echo "NODE_NAME=(${NODE_NAMES[$i]})" - echo "NODE_IP='${NODE_IPS[$i]}'" - echo "NODE_ID='$i'" - echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" - echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh" - ) > "${KUBE_TEMP}/node-start-${i}.sh" - done -} - -function echo-kube-env() { - echo "KUBE_ROOT=/vagrant" - echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" - echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" - echo "MASTER_IP='${MASTER_IP}'" - echo "NODE_NAMES=(${NODE_NAMES[@]})" - echo "NODE_IPS=(${NODE_IPS[@]})" - echo "DEFAULT_NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}" - echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" - echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" - echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" - echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" - echo "MASTER_USER='${MASTER_USER}'" - echo "MASTER_PASSWD='${MASTER_PASSWD}'" - echo "KUBE_USER='${KUBE_USER}'" - echo "KUBE_PASSWORD='${KUBE_PASSWORD}'" - echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'" - echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'" - echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" - echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'" - echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" - echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'" - echo "ENABLE_HOSTPATH_PROVISIONER='${ENABLE_HOSTPATH_PROVISIONER:-false}'" - echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" - echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" - echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'" - echo "DNS_DOMAIN='${DNS_DOMAIN:-}'" - echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" - echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" - echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" - echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" - echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" - echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" - echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'" - echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'" - echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'" - echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'" - echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'" - echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" - echo "CUSTOM_FEDORA_REPOSITORY_URL='${CUSTOM_FEDORA_REPOSITORY_URL:-}'" - echo "EVICTION_HARD='${EVICTION_HARD:-}'" -} - -function verify-cluster { - # TODO: How does the user know the difference between "tak[ing] some - # time" and "loop[ing] forever"? Can we give more specific feedback on - # whether "an error" has occurred? - echo "Each machine instance has been created/updated." - echo " Now waiting for the Salt provisioning process to complete on each machine." - echo " This can take some time based on your network, disk, and cpu speed." - echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever." - - # verify master has all required daemons - echo "Validating master" - local machine="master" - local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker") - local validated="1" - until [[ "$validated" == "0" ]]; do - validated="0" - for process in "${required_processes[@]}"; do - vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { - printf "." - validated="1" - sleep 2 - } - done - done - - # verify each node has all required daemons - local i - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - echo "Validating ${VAGRANT_NODE_NAMES[$i]}" - local machine=${VAGRANT_NODE_NAMES[$i]} - local -a required_processes=("kube-proxy" "kubelet" "docker") - local validated="1" - until [[ "${validated}" == "0" ]]; do - validated="0" - for process in "${required_processes[@]}"; do - vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { - printf "." - validated="1" - sleep 2 - } - done - done - done - - echo - echo "Waiting for each node to be registered with cloud provider" - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - local validated="0" - start="$(date +%s)" - until [[ "$validated" == "1" ]]; do - now="$(date +%s)" - # Timeout set to 3 minutes - if [ $((now - start)) -gt 180 ]; then - echo "Timeout while waiting for echo node to be registered with cloud provider" - exit 2 - fi - local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name) - validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || { - printf "." - sleep 2 - validated="0" - } - done - done - - # By this time, all kube api calls should work, so no need to loop and retry. - echo "Validating we can run kubectl commands." - vagrant ssh master --command "kubectl get pods" || { - echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP" - } - - ( - # ensures KUBECONFIG is set - get-kubeconfig-basicauth - get-kubeconfig-bearertoken - echo - echo "Kubernetes cluster is running." - echo - echo "The master is running at:" - echo - echo " https://${MASTER_IP}" - echo - echo "Administer and visualize its resources using Cockpit:" - echo - echo " https://${MASTER_IP}:9090" - echo - echo "For more information on Cockpit, visit http://cockpit-project.org" - echo - echo "The user name and password to use is located in ${KUBECONFIG}" - echo - ) -} - -# Instantiate a kubernetes cluster -function kube-up { - load-or-gen-kube-basicauth - load-or-gen-kube-bearertoken - get-tokens - create-provision-scripts - - vagrant up --no-parallel - - export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" - export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" - export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" - export CONTEXT="vagrant" - - ( - umask 077 - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - - # Update the user's kubeconfig to include credentials for this apiserver. - create-kubeconfig - ) - - verify-cluster -} - -# Delete a kubernetes cluster -function kube-down { - vagrant destroy -f -} - -# Update a kubernetes cluster with latest source -function kube-push { - get-kubeconfig-basicauth - get-kubeconfig-bearertoken - create-provision-scripts - vagrant provision -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - # Make a release - "${KUBE_ROOT}/build/release.sh" -} - -# Execute prior to running tests to initialize required structure -function test-setup { - "${KUBE_ROOT}/cluster/kube-up.sh" - echo "Vagrant test setup complete" 1>&2 -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - kube-down -} - -# Find the node name based on the IP address -function find-vagrant-name-by-ip { - local ip="$1" - local ip_pattern="${NODE_IP_BASE}(.*)" - - # This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a - # regexp and using the capture to construct the name. - [[ $ip =~ $ip_pattern ]] || { - return 1 - } - - echo "node-$((${BASH_REMATCH[1]} - 1))" -} - -# Find the vagrant machine name based on the host name of the node -function find-vagrant-name-by-node-name { - local ip="$1" - if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then - echo "master" - return $? - fi - local ip_pattern="${INSTANCE_PREFIX}-node-(.*)" - - [[ $ip =~ $ip_pattern ]] || { - return 1 - } - - echo "node-${BASH_REMATCH[1]}" -} - - -# SSH to a node by name or IP ($1) and run a command ($2). -function ssh-to-node { - local node="$1" - local cmd="$2" - local machine - - machine=$(find-vagrant-name-by-ip $node) || true - [[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true - [[ -n ${machine-} ]] || { - echo "Cannot find machine to ssh to: $1" - return 1 - } - - vagrant ssh "${machine}" -c "${cmd}" -} - -# Perform preparations required to run e2e tests -function prepare-e2e() { - echo "Vagrant doesn't need special preparations for e2e tests" 1>&2 -} - -function get-tokens() { - KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -} diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 31b2dde1855..b6e87ef444a 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -225,7 +225,7 @@ func RegisterClusterFlags() { flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.") flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.") flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.") - flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)") + flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)") flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.") diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..62fa4c9abb3 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3454,12 +3454,6 @@ func GetSigner(provider string) (ssh.Signer, error) { } // Otherwise revert to home dir keyfile = "kube_aws_rsa" - case "vagrant": - keyfile = os.Getenv("VAGRANT_SSH_KEY") - if len(keyfile) != 0 { - return sshutil.MakePrivateKeySignerFromFile(keyfile) - } - return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided") case "local", "vsphere": keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe? if len(keyfile) == 0 { From e826a77919785e651d3c5bad3deb65af8243b319 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Thu, 11 Jan 2018 13:51:03 +0530 Subject: [PATCH 731/794] Add custom volumename option to GlusterFS dynamic PVs. At present glusterfs dynamic PVs are created with random names. However an admin would like to have some handle on the volume names created dynamically for various purposes. One example would be having a filter for sorting out PVs created for a particular storage class. This patch enables the functionality by having a custom volume name as a prefix to dynamic PVs. This is an optional parameter in SC and if set, the dynamic volumes are created in below format where `_` is the field seperator/delimiter: customvolumeprefix_PVCname_randomUUID Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/glusterfs.go | 45 ++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 9afadd1f050..8f2618c765d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" @@ -406,17 +407,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio } type provisionerConfig struct { - url string - user string - userKey string - secretNamespace string - secretName string - secretValue string - clusterID string - gidMin int - gidMax int - volumeType gapi.VolumeDurabilityInfo - volumeOptions []string + url string + user string + userKey string + secretNamespace string + secretName string + secretValue string + clusterID string + gidMin int + gidMax int + volumeType gapi.VolumeDurabilityInfo + volumeOptions []string + volumeNamePrefix string } type glusterfsVolumeProvisioner struct { @@ -743,6 +745,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) { var clusterIDs []string + customVolumeName := "" capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Glusterfs creates volumes in units of GiB, but heketi documentation incorrectly reports GBs sz := int(volume.RoundUpToGiB(capacity)) @@ -760,8 +763,13 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum clusterIDs = dstrings.Split(p.clusterID, ",") glog.V(4).Infof("provided clusterIDs: %v", clusterIDs) } + + if p.provisionerConfig.volumeNamePrefix != "" { + customVolumeName = fmt.Sprintf("%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Name, uuid.NewUUID()) + } + gid64 := int64(gid) - volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} + volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions} volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("error creating volume %v ", err) @@ -927,6 +935,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa authEnabled := true parseVolumeType := "" parseVolumeOptions := "" + parseVolumeNamePrefix := "" for k, v := range params { switch dstrings.ToLower(k) { @@ -977,7 +986,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa if len(v) != 0 { parseVolumeOptions = v } - + case "volumenameprefix": + if len(v) != 0 { + parseVolumeNamePrefix = v + } default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName) } @@ -1057,6 +1069,13 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa cfg.volumeOptions = volOptions } + + if len(parseVolumeNamePrefix) != 0 { + if dstrings.Contains(parseVolumeNamePrefix, "_") { + return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value") + } + cfg.volumeNamePrefix = parseVolumeNamePrefix + } return &cfg, nil } From 66c7fdb8920cca5b095a29f078950747fb29798a Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Thu, 11 Jan 2018 14:14:29 +0530 Subject: [PATCH 732/794] Update bazel. Signed-off-by: Humble Chirammal --- pkg/volume/glusterfs/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/volume/glusterfs/BUILD b/pkg/volume/glusterfs/BUILD index a57114c412e..19638dd3d16 100644 --- a/pkg/volume/glusterfs/BUILD +++ b/pkg/volume/glusterfs/BUILD @@ -32,6 +32,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) From 68eaf536711b9ef8d0ad12fa5c89684750d17c4b Mon Sep 17 00:00:00 2001 From: zoues Date: Thu, 11 Jan 2018 21:52:47 +0800 Subject: [PATCH 733/794] remove provides which has been deleted --- cluster/kube-up.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index a2813f99e2f..81a33d3f3fa 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -34,13 +34,7 @@ source "${KUBE_ROOT}/cluster/kube-util.sh" DEPRECATED_PROVIDERS=( "centos" - "libvirt-coreos" "local" - "openstack-heat" - "photon-controller" - "vagrant" - "vsphere" - "windows" ) for provider in "${DEPRECATED_PROVIDERS[@]}"; do From 41cb533ad683fbf5ddc61a293e5d1da026cae7f6 Mon Sep 17 00:00:00 2001 From: mtanino Date: Wed, 10 Jan 2018 10:16:43 -0500 Subject: [PATCH 734/794] [FC Plugin] Create proper volumeSpec during ConstructVolumeSpec Currently, FC plugin returns volume name and empty FCVolumeSource during ConstrutVolumeSpec during filesystem volume's reconstruction. In this fix, ConstructVolumeSpec retrieves global mount path, analyzes volume parameters such as WWN, LUN, WWID from the path. Fixes #58085 --- pkg/volume/fc/fc.go | 62 +++++++++++++++++++++++++++++---- pkg/volume/fc/fc_test.go | 74 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 7 deletions(-) diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index 8772ce91558..5aee9ba92b3 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -235,11 +235,59 @@ func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, ma } func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - fcVolume := &v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - FC: &v1.FCVolumeSource{}, - }, + // Find globalPDPath from pod volume directory(mountPath) + // examples: + // mountPath: pods/{podUid}/volumes/kubernetes.io~fc/{volumeName} + // globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0 + var globalPDPath string + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + paths, err := mount.GetMountRefs(mounter, mountPath) + if err != nil { + return nil, err + } + for _, path := range paths { + if strings.Contains(path, plugin.host.GetPluginDir(fcPluginName)) { + globalPDPath = path + break + } + } + // Couldn't fetch globalPDPath + if len(globalPDPath) == 0 { + return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec") + } + arr := strings.Split(globalPDPath, "/") + if len(arr) < 1 { + return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath) + } + volumeInfo := arr[len(arr)-1] + // Create volume from wwn+lun or wwid + var fcVolume *v1.Volume + if strings.Contains(volumeInfo, "-lun-") { + wwnLun := strings.Split(volumeInfo, "-lun-") + if len(wwnLun) < 2 { + return nil, fmt.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo) + } + lun, err := strconv.Atoi(wwnLun[1]) + if err != nil { + return nil, err + } + lun32 := int32(lun) + fcVolume = &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}, + }, + } + glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", + fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun) + } else { + fcVolume = &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}}, + }, + } + glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) } return volume.NewSpecFromVolume(fcVolume), nil } @@ -249,7 +297,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu // - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID. // - Once volumePluginDependentPath is obtained, store volume information to VolumeSource // examples: -// mapPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} +// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} // globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName) @@ -284,7 +332,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}) glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs, - fcPV.Spec.PersistentVolumeSource.FC.Lun) + *fcPV.Spec.PersistentVolumeSource.FC.Lun) } else { fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{WWIDs: []string{volumeInfo}}) diff --git a/pkg/volume/fc/fc_test.go b/pkg/volume/fc/fc_test.go index 42a530bc4a5..0f12042432c 100644 --- a/pkg/volume/fc/fc_test.go +++ b/pkg/volume/fc/fc_test.go @@ -19,6 +19,8 @@ package fc import ( "fmt" "os" + "strconv" + "strings" "testing" "k8s.io/api/core/v1" @@ -412,3 +414,75 @@ func Test_getWwnsLunWwidsError(t *testing.T) { t.Errorf("unexpected fc disk found") } } + +func Test_ConstructVolumeSpec(t *testing.T) { + fm := &mount.FakeMounter{ + MountPoints: []mount.MountPoint{ + {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, + {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"}, + }, + } + mountPaths := []string{ + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2", + } + for _, path := range mountPaths { + refs, _ := mount.GetMountRefs(fm, path) + var globalPDPath string + for _, ref := range refs { + if strings.Contains(ref, "kubernetes.io/fc") { + globalPDPath = ref + break + } + } + if len(globalPDPath) == 0 { + t.Errorf("couldn't fetch mountrefs") + } + arr := strings.Split(globalPDPath, "/") + if len(arr) < 1 { + t.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath) + } + volumeInfo := arr[len(arr)-1] + if strings.Contains(volumeInfo, "-lun-") { + wwnLun := strings.Split(volumeInfo, "-lun-") + if len(wwnLun) < 2 { + t.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo) + } + lun, _ := strconv.Atoi(wwnLun[1]) + lun32 := int32(lun) + if wwnLun[0] != "50060e801049cfd1" || lun32 != 0 { + t.Errorf("failed to retrieve TargetWWN and Lun") + } + } else { + if volumeInfo != "3600508b400105e210000900000490000" { + t.Errorf("failed to retrieve WWIDs") + } + } + } +} + +func Test_ConstructVolumeSpecNoRefs(t *testing.T) { + fm := &mount.FakeMounter{ + MountPoints: []mount.MountPoint{ + {Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, + }, + } + mountPaths := []string{ + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", + } + for _, path := range mountPaths { + refs, _ := mount.GetMountRefs(fm, path) + var globalPDPath string + for _, ref := range refs { + if strings.Contains(ref, "kubernetes.io/fc") { + globalPDPath = ref + break + } + } + if len(globalPDPath) != 0 { + t.Errorf("invalid globalPDPath") + } + } +} From eb0ac60175d50340a11fdb731b2c3320f92d4993 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Wed, 10 Jan 2018 15:15:50 -0800 Subject: [PATCH 735/794] remove OpenAPI import from types --- pkg/api/unversioned/BUILD | 6 +---- pkg/api/unversioned/time.go | 22 ++++++++----------- .../apimachinery/pkg/api/resource/BUILD | 2 -- .../apimachinery/pkg/api/resource/quantity.go | 22 ++++++++----------- .../apimachinery/pkg/apis/meta/v1/BUILD | 2 -- .../pkg/apis/meta/v1/micro_time.go | 22 ++++++++----------- .../apimachinery/pkg/apis/meta/v1/time.go | 22 ++++++++----------- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 2 -- .../apimachinery/pkg/util/intstr/intstr.go | 22 ++++++++----------- 9 files changed, 46 insertions(+), 76 deletions(-) diff --git a/pkg/api/unversioned/BUILD b/pkg/api/unversioned/BUILD index 874384b90fb..c0d661e8f8e 100644 --- a/pkg/api/unversioned/BUILD +++ b/pkg/api/unversioned/BUILD @@ -13,11 +13,7 @@ go_library( "types.go", ], importpath = "k8s.io/kubernetes/pkg/api/unversioned", - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], + deps = ["//vendor/github.com/google/gofuzz:go_default_library"], ) filegroup( diff --git a/pkg/api/unversioned/time.go b/pkg/api/unversioned/time.go index 32f9edb7453..34da5ba3985 100644 --- a/pkg/api/unversioned/time.go +++ b/pkg/api/unversioned/time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -141,16 +138,15 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD index fab98203507..2ae7638537e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -38,11 +38,9 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/api/resource", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/gopkg.in/inf.v0:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go index 682ee9aa646..6a8bb997218 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -27,9 +27,7 @@ import ( flag "github.com/spf13/pflag" - "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" - openapi "k8s.io/kube-openapi/pkg/common" ) // Quantity is a fixed-point representation of a number. @@ -399,17 +397,15 @@ func (q Quantity) DeepCopy() Quantity { return q } -// OpenAPIDefinition returns openAPI definition for this type. -func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Quantity) OpenAPISchemaFormat() string { return "" } // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index c851816d782..1c49035bbc2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -53,7 +53,6 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", @@ -67,7 +66,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index a09d79571c3..7e5bc2d4e7f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -149,16 +146,15 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } -func (_ MicroTime) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t MicroTime) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 0a9f2a37756..5041954f763 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -151,16 +148,15 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD index 8c66be54fc8..b4fe3922fff 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -22,11 +22,9 @@ go_library( ], importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 04a77bb6b4b..231498ca032 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -24,9 +24,6 @@ import ( "strconv" "strings" - openapi "k8s.io/kube-openapi/pkg/common" - - "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/google/gofuzz" ) @@ -120,16 +117,15 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } -func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "int-or-string", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { From e1dda7e3be5cded77116cc48370372cb10992c17 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Wed, 10 Jan 2018 15:17:37 -0800 Subject: [PATCH 736/794] bump(k8s.io/kube-openapi): a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3 --- Godeps/Godeps.json | 16 ++-- staging/src/k8s.io/api/Godeps/Godeps.json | 80 ----------------- .../Godeps/Godeps.json | 10 +-- .../k8s.io/apimachinery/Godeps/Godeps.json | 78 +--------------- .../src/k8s.io/apiserver/Godeps/Godeps.json | 10 +-- .../src/k8s.io/client-go/Godeps/Godeps.json | 78 +--------------- .../k8s.io/code-generator/Godeps/Godeps.json | 4 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 12 +-- staging/src/k8s.io/metrics/Godeps/Godeps.json | 76 ---------------- .../sample-apiserver/Godeps/Godeps.json | 10 +-- .../sample-controller/Godeps/Godeps.json | 78 +--------------- .../k8s.io/kube-openapi/pkg/generators/README | 31 +++++++ .../kube-openapi/pkg/generators/openapi.go | 90 +++++++++++-------- .../kube-openapi/pkg/util/proto/document.go | 14 ++- .../kube-openapi/pkg/util/proto/openapi.go | 25 ++++++ .../pkg/util/proto/validation/types.go | 13 ++- 16 files changed, 165 insertions(+), 460 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 45b60e8306b..71f3a14418e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3212,35 +3212,35 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index ffa35e2d980..2a066b367ce 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -6,38 +6,6 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -54,18 +22,6 @@ "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -86,34 +42,10 @@ "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -126,18 +58,10 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" - }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -218,10 +142,6 @@ "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 770352cf7d1..c4bd7aa4cc2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -1632,23 +1632,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index a264f944b4e..bf8fe584f6f 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -6,14 +6,6 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" @@ -30,14 +22,6 @@ "ImportPath": "github.com/elazarl/goproxy", "Rev": "c4fc26588b6ef8af07a191fcb6476387bdd46711" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/evanphx/json-patch", "Rev": "944e07253867aacae43c04b2e6a239005443f33a" @@ -46,22 +30,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -126,18 +94,6 @@ "ImportPath": "github.com/json-iterator/go", "Rev": "13f86432b882000a51c6e610c620974462691a97" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/mxk/go-flowrate/flowrate", "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" @@ -190,34 +146,10 @@ "ImportPath": "golang.org/x/net/websocket", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -230,10 +162,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -242,13 +170,9 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index b1fcb0a32f1..68ec6fbdf25 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -1764,23 +1764,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/client-go/discovery", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 4fae7ddb897..efc5a5dacba 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -30,14 +30,6 @@ "ImportPath": "github.com/Azure/go-autorest/autorest/date", "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/coreos/go-oidc/http", "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" @@ -86,34 +78,10 @@ "ImportPath": "github.com/docker/spdystream/spdy", "Rev": "449fdfce4d962303d702fec724ef0ad181c92528" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -222,18 +190,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" @@ -302,34 +258,10 @@ "ImportPath": "golang.org/x/sys/windows", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -342,10 +274,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -682,13 +610,9 @@ "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index 506e4b88556..c30a706ba89 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -260,11 +260,11 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index abddac483d5..c28bf8966ae 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -1620,27 +1620,27 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index ce359bb79d8..60300dfceee 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -6,42 +6,10 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -98,18 +66,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -130,34 +86,10 @@ "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -170,10 +102,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -497,10 +425,6 @@ { "ImportPath": "k8s.io/client-go/util/integer", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" } ] } diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index ce731cb79fd..c7366214eff 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -1608,23 +1608,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index ce34ffc7dfd..a6be9f90028 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -6,46 +6,14 @@ "./..." ], "Deps": [ - { - "ImportPath": "github.com/PuerkitoBio/purell", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" - }, - { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" - }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" - }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" - }, - { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" - }, - { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" - }, - { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" - }, { "ImportPath": "github.com/gogo/protobuf/proto", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" @@ -122,18 +90,6 @@ "ImportPath": "github.com/juju/ratelimit", "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, - { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, - { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" @@ -170,34 +126,10 @@ "ImportPath": "golang.org/x/sys/windows", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, - { - "ImportPath": "golang.org/x/text/cases", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/language", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, - { - "ImportPath": "golang.org/x/text/runes", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/secure/bidirule", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "golang.org/x/text/transform", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -210,10 +142,6 @@ "ImportPath": "golang.org/x/text/unicode/norm", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, - { - "ImportPath": "golang.org/x/text/width", - "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" - }, { "ImportPath": "gopkg.in/inf.v0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" @@ -966,13 +894,9 @@ "ImportPath": "k8s.io/client-go/util/workqueue", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, - { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" - }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" } ] } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README b/vendor/k8s.io/kube-openapi/pkg/generators/README index 35660a40da7..feb19b401a9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/README +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README @@ -11,5 +11,36 @@ escape or quote the value string. Extensions can be used to pass more informatio documentation generators. For example a type might have a friendly name to be displayed in documentation or being used in a client's fluent interface. +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } TODO(mehdy): Make k8s:openapi-gen a parameter to the generator now that OpenAPI has its own repo. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 5efb3f45c6f..d9b0980abb4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -118,35 +118,13 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat `)...) - outputPath := arguments.OutputPackagePath - - if err := context.AddDir(outputPath); err != nil { - glog.Fatalf("Failed to load output package: %v", err) - } - - // Compute the canonical output path to allow retrieval of the - // package for a vendored output path. - const vendorPath = "/vendor/" - canonicalOutputPath := outputPath - if strings.Contains(outputPath, vendorPath) { - canonicalOutputPath = outputPath[strings.Index(outputPath, vendorPath)+len(vendorPath):] - } - - // The package for outputPath is mapped to the canonical path - pkg := context.Universe[canonicalOutputPath] - if pkg == nil { - glog.Fatalf("Got nil output package: %v", err) - } return generator.Packages{ &generator.DefaultPackage{ - PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], - // Use the supplied output path rather than the canonical - // one to allow generation into the path of a - // vendored package. - PackagePath: outputPath, + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, HeaderText: header, GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, pkg, context)} + return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context)} }, FilterFunc: func(c *generator.Context, t *types.Type) bool { // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen @@ -175,12 +153,12 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage *types.Package + targetPackage string imports namer.ImportTracker context *generator.Context } -func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context *generator.Context) generator.Generator { +func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, @@ -194,7 +172,7 @@ func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context * func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { // Have the raw namer for this file track what it imports. return namer.NameSystems{ - "raw": namer.NewRawNamer(g.targetPackage.Path, g.imports), + "raw": namer.NewRawNamer(g.targetPackage, g.imports), } } @@ -207,10 +185,10 @@ func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { } func (g *openAPIGen) isOtherPackage(pkg string) bool { - if pkg == g.targetPackage.Path { + if pkg == g.targetPackage { return false } - if strings.HasSuffix(pkg, "\""+g.targetPackage.Path+"\"") { + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { return false } return true @@ -300,23 +278,37 @@ func newOpenAPITypeWriter(sw *generator.SnippetWriter) openAPITypeWriter { } } +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + func hasOpenAPIDefinitionMethod(t *types.Type) bool { for mn, mt := range t.Methods { if mn != "OpenAPIDefinition" { continue } - if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { - return false - } - r := mt.Signature.Results[0] - if r.Name.Name != "OpenAPIDefinition" || r.Name.Package != openAPICommonPackagePath { - return false - } - return true + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") } return false } +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + // typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. func typeShortName(t *types.Type) string { return filepath.Base(t.Name.Package) + "." + t.Name.Name @@ -360,6 +352,28 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) return nil } + if hasOpenAPIDefinitionMethods(t) { + // Since this generated snippet is part of a map: + // + // map[string]common.OpenAPIDefinition: { + // "TYPE_NAME": { + // Schema: spec.Schema{ ... }, + // }, + // } + // + // For compliance with gofmt -s it's important we elide the + // struct type. The type is implied by the map and will be + // removed otherwise. + g.Do("{\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n"+ + "Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "},\n", args) + return nil + } g.Do("{\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) g.generateDescription(t.CommentLines) g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5f607c76701..61dbf4fc0e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -210,11 +210,18 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error }, nil } +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + // ParseSchema creates a walkable Schema from an openapi schema. While // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) == 1 { - t := s.GetType().GetValue()[0] + objectTypes := s.GetType().GetValue() + if len(objectTypes) == 1 { + t := objectTypes[0] switch t { case object: return d.parseMap(s, path) @@ -229,6 +236,9 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err if s.GetProperties() != nil { return d.parseKind(s, path) } + if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") { + return d.parseArbitrary(s, path) + } return d.parsePrimitive(s, path) } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 02ab06d6d53..b48e62c3bf9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -58,6 +58,14 @@ type SchemaVisitor interface { VisitReference(Reference) } +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatability, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + // Schema is the base definition of an openapi type. type Schema interface { // Giving a visitor here will let you visit the actual type. @@ -242,6 +250,23 @@ func (p *Primitive) GetName() string { return fmt.Sprintf("%s (%s)", p.Type, p.Format) } +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + // Reference implementation depends on the type of document. type Reference interface { Schema diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index 0be7a5302f1..bbbdd4f61c9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -127,6 +127,9 @@ func (item *mapItem) VisitKind(schema *proto.Kind) { } } +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *mapItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) @@ -163,11 +166,14 @@ func (item *arrayItem) VisitArray(schema *proto.Array) { } func (item *arrayItem) VisitMap(schema *proto.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) } func (item *arrayItem) VisitKind(schema *proto.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { } func (item *arrayItem) VisitReference(schema proto.Reference) { @@ -226,6 +232,9 @@ func (item *primitiveItem) VisitKind(schema *proto.Kind) { item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) } +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *primitiveItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) From 3dd6e98ea011ff891a08668f7916f090f04e5652 Mon Sep 17 00:00:00 2001 From: abhi Date: Mon, 11 Dec 2017 13:20:23 -0800 Subject: [PATCH 737/794] Fixing logs for cri stats Signed-off-by: abhi --- pkg/kubelet/stats/cri_stats_provider.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index 5934c2962bf..152344c3c31 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -123,14 +123,14 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { containerID := stats.Attributes.Id container, found := containerMap[containerID] if !found { - glog.Errorf("Unknown id %q in container map.", containerID) + glog.Errorf("Unable to find container id %q in container stats list", containerID) continue } podSandboxID := container.PodSandboxId podSandbox, found := podSandboxMap[podSandboxID] if !found { - glog.Errorf("Unknown id %q in pod sandbox map.", podSandboxID) + glog.Errorf("Unable to find pod sandbox id %q in pod stats list", podSandboxID) continue } @@ -142,7 +142,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // Fill stats from cadvisor is available for full set of required pod stats caPodSandbox, found := caInfos[podSandboxID] if !found { - glog.V(4).Info("Unable to find cadvisor stats for sandbox %q", podSandboxID) + glog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) } else { p.addCadvisorPodStats(ps, &caPodSandbox) } @@ -153,7 +153,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container stats caStats, caFound := caInfos[containerID] if !caFound { - glog.V(4).Info("Unable to find cadvisor stats for %q", containerID) + glog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } From 28465d8b39f55ae1fb926905e493caaa39d8e05a Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 11 Jan 2018 10:13:10 -0800 Subject: [PATCH 738/794] Fix golint errors on test/e2e/e2e.go When running golint on test/e2e/e2e.go, the following erros were faced: $ golint e2e.go e2e.go:329:2: var metricsJson should be metricsJSON e2e.go:342:1: comment on exported function RunE2ETests should be of the form "RunE2ETests ..." This PR fixes them. --- test/e2e/e2e.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 63211b6892e..bb57e7e6b19 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -288,20 +288,20 @@ func gatherTestSuiteMetrics() error { } metricsForE2E := (*framework.MetricsForE2E)(&received) - metricsJson := metricsForE2E.PrintJSON() + metricsJSON := metricsForE2E.PrintJSON() if framework.TestContext.ReportDir != "" { filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") - if err := ioutil.WriteFile(filePath, []byte(metricsJson), 0644); err != nil { + if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { return fmt.Errorf("error writing to %q: %v", filePath, err) } } else { - framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJson) + framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJSON) } return nil } -// TestE2E checks configuration parameters (specified through flags) and then runs +// RunE2ETests checks configuration parameters (specified through flags) and then runs // E2E tests using the Ginkgo runner. // If a "report directory" is specified, one or more JUnit test reports will be // generated in this directory, and cluster logs will also be saved. From fb56f679aa50c783bf21bb2d102bbeebe2e4dc3d Mon Sep 17 00:00:00 2001 From: mbohlool Date: Thu, 11 Jan 2018 13:40:26 -0800 Subject: [PATCH 739/794] Fix CHANGELOG urls for release 1.9.1 --- CHANGELOG-1.9.md | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 0094d62b279..ee713833ff5 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -149,44 +149,44 @@ filename | sha256 hash -------- | ----------- -[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee` -[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935` +[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes.tar.gz) | `0eece0e6c1f68535ea71b58b87e239019bb57fdd61118f3d7defa6bbf4fad5ee` +[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-src.tar.gz) | `625ebb79412bd12feccf12e8b6a15d9c71ea681b571f34deaa59fe6c9ba55935` ### Client Binaries filename | sha256 hash -------- | ----------- -[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d` -[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48` -[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0` -[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e` -[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471` -[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874` -[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc` -[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc` -[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99` -[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599` +[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-386.tar.gz) | `909556ed9b8445703d0124f2d8c1901b00afaba63a9123a4296be8663c3a2b2d` +[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-darwin-amd64.tar.gz) | `71e191d99d3ac1426e23e087b8d0875e793e5615d3aa7ac1e175b250f9707c48` +[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-386.tar.gz) | `1c4e60c0c056a3300c7fcc9faccd1b1ea2b337e1360c20c5b1c25fdc47923cf0` +[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-amd64.tar.gz) | `fe8fe40148df404b33069931ea30937699758ed4611ef6baddb4c21b7b19db5e` +[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm64.tar.gz) | `921f5711b97f0b4de69784d9c79f95e80f75a550f28fc1f26597aa0ef6faa471` +[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-arm.tar.gz) | `77b010cadef98dc832a2f560afe15e57a675ed9fbc59ffad5e19878510997874` +[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-ppc64le.tar.gz) | `02aa71ddcbe8b711814af7287aac79de5d99c1c143c0d3af5e14b1ff195b8bdc` +[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-linux-s390x.tar.gz) | `7e315024267306a620045d003785ecc8d7f2e763a6108ae806d5d384aa7552cc` +[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-386.tar.gz) | `99b2a81b7876498e119db4cb34c434b3790bc41cd882384037c1c1b18cba9f99` +[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-client-windows-amd64.tar.gz) | `d89d303cbbf9e57e5a540277158e4d83ad18ca7402b5b54665f1378bb4528599` ### Server Binaries filename | sha256 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e` -[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587` -[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200` -[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5` -[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7` +[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-amd64.tar.gz) | `5acf2527461419ba883ac352f7c36c3fa0b86a618dbede187054ad90fa233b0e` +[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm64.tar.gz) | `e1f61b4dc6e0c9986e95ec25f876f9a89966215ee8cc7f4a3539ec391b217587` +[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-arm.tar.gz) | `441c45e16e63e9bdf99887a896a99b3a376af778cb778cc1d0e6afc505237200` +[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-ppc64le.tar.gz) | `c0175f02180d9c88028ee5ad4e3ea04af8a6741a97f4900b02615f7f83c4d1c5` +[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-server-linux-s390x.tar.gz) | `2178150d31197ad7f59d44ffea37d682c2675b3a4ea2fc3fa1eaa0e768b993f7` ### Node Binaries filename | sha256 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a` -[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e` -[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905` -[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd` -[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c` -[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc` +[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-amd64.tar.gz) | `b8ff0ae693ecca4d55669c66786d6c585f8c77b41a270d65f8175eba8729663a` +[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm64.tar.gz) | `f0f63baaace463dc663c98cbc9a41e52233d1ef33410571ce3f3e78bd485787e` +[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-arm.tar.gz) | `554bdd11deaf390de85830c7c888dfd4d75d9de8ac147799df12993f27bde905` +[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-ppc64le.tar.gz) | `913af8ca8b258930e76fd3368acc83608e36e7e270638fa01a6e3be4f682d8bd` +[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-linux-s390x.tar.gz) | `8192c1c80563230d727fab71514105571afa52cde8520b3d90af58e6daf0e19c` +[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release/release/v1.9.1/kubernetes-node-windows-amd64.tar.gz) | `4408e6d741c6008044584c0d7235e608c596e836d51346ee773589d9b4589fdc` ## Changelog since v1.9.0 From c7988bae61f123c6d35ddaaf2dd54def42e02612 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 11 Jan 2018 17:50:07 -0800 Subject: [PATCH 740/794] Get the node before attempting to get its Alias IP ranges This allows us to fail fast if the node doesn't exist, and to record node status changes if we fail to 'allocate' a CIDR. --- .../nodeipam/ipam/cloud_cidr_allocator.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 8d6ef878dac..6f4e4cfcc7a 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -191,10 +191,14 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { // updateCIDRAllocation assigns CIDR to Node and sends an update to the API server. func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { - var err error - var node *v1.Node defer ca.removeNodeFromProcessing(nodeName) + node, err := ca.nodeLister.Get(nodeName) + if err != nil { + glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) + return err + } + cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName)) if err != nil { nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") @@ -210,12 +214,6 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { } podCIDR := cidr.String() - node, err = ca.nodeLister.Get(nodeName) - if err != nil { - glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) - return err - } - if node.Spec.PodCIDR == podCIDR { glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) // We don't return here, in order to set the NetworkUnavailable condition later below. From 9d1b687914226514992d9f47c639847930d315b2 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Thu, 11 Jan 2018 18:24:24 -0800 Subject: [PATCH 741/794] fix a typo --- .../pkg/admission/plugin/webhook/mutating/admission.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go index ec0ae942b69..6d62a36f629 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -174,7 +174,7 @@ func (a *MutatingWebhook) ValidateInitialization() error { return fmt.Errorf("MutatingWebhook.convertor is not properly setup: %v", err) } if a.defaulter == nil { - return fmt.Errorf("MutatingWebhook.defaulter is not properly setup: %v") + return fmt.Errorf("MutatingWebhook.defaulter is not properly setup") } go a.hookSource.Run(wait.NeverStop) return nil From 0ae647bf333f5123bb470c3866084b699ecaafb0 Mon Sep 17 00:00:00 2001 From: wackxu Date: Tue, 14 Nov 2017 21:43:20 +0800 Subject: [PATCH 742/794] use shared informers for BootstrapSigner controller --- cmd/kube-controller-manager/app/bootstrap.go | 2 + pkg/controller/bootstrap/BUILD | 7 + pkg/controller/bootstrap/bootstrapsigner.go | 148 ++++++++++-------- .../bootstrap/bootstrapsigner_test.go | 38 +++-- 4 files changed, 114 insertions(+), 81 deletions(-) diff --git a/cmd/kube-controller-manager/app/bootstrap.go b/cmd/kube-controller-manager/app/bootstrap.go index 046070ecb27..38e066523fd 100644 --- a/cmd/kube-controller-manager/app/bootstrap.go +++ b/cmd/kube-controller-manager/app/bootstrap.go @@ -25,6 +25,8 @@ import ( func startBootstrapSignerController(ctx ControllerContext) (bool, error) { bsc, err := bootstrap.NewBootstrapSigner( ctx.ClientBuilder.ClientGoClientOrDie("bootstrap-signer"), + ctx.InformerFactory.Core().V1().Secrets(), + ctx.InformerFactory.Core().V1().ConfigMaps(), bootstrap.DefaultBootstrapSignerOptions(), ) if err != nil { diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index 6dfcd204c9d..dccc9637a58 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -21,11 +21,14 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/bootstrap/api:go_default_library", + "//pkg/controller:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", ], @@ -44,6 +47,7 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", + "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/square/go-jose:go_default_library", @@ -51,11 +55,14 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 5bb53a44837..5db870aa49d 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -22,25 +22,25 @@ import ( "github.com/golang/glog" + "fmt" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" + informers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) // BootstrapSignerOptions contains options for the BootstrapSigner type BootstrapSignerOptions struct { - // ConfigMapNamespace is the namespace of the ConfigMap ConfigMapNamespace string @@ -71,88 +71,101 @@ func DefaultBootstrapSignerOptions() BootstrapSignerOptions { // BootstrapSigner is a controller that signs a ConfigMap with a set of tokens. type BootstrapSigner struct { - client clientset.Interface - configMapKey string - secretNamespace string - - configMaps cache.Store - secrets cache.Store + client clientset.Interface + configMapKey string + configMapName string + configMapNamespace string + secretNamespace string // syncQueue handles synchronizing updates to the ConfigMap. We'll only ever // have one item (Named ) in this queue. We are using it // serializes and collapses updates as they can come from both the ConfigMap // and Secrets controllers. - syncQueue workqueue.Interface + syncQueue workqueue.RateLimitingInterface - // Since we join two objects, we'll watch both of them with controllers. - configMapsController cache.Controller - secretsController cache.Controller + secretLister corelisters.SecretLister + secretSynced cache.InformerSynced + + configMapLister corelisters.ConfigMapLister + configMapSynced cache.InformerSynced } // NewBootstrapSigner returns a new *BootstrapSigner. -// -// TODO: Switch to shared informers -func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) (*BootstrapSigner, error) { +func NewBootstrapSigner(cl clientset.Interface, secrets informers.SecretInformer, configMaps informers.ConfigMapInformer, options BootstrapSignerOptions) (*BootstrapSigner, error) { e := &BootstrapSigner{ - client: cl, - configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName, - secretNamespace: options.TokenSecretNamespace, - syncQueue: workqueue.NewNamed("bootstrap_signer_queue"), + client: cl, + configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName, + configMapName: options.ConfigMapName, + configMapNamespace: options.ConfigMapNamespace, + secretNamespace: options.TokenSecretNamespace, + secretLister: secrets.Lister(), + secretSynced: secrets.Informer().HasSynced, + configMapLister: configMaps.Lister(), + configMapSynced: configMaps.Informer().HasSynced, + syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"), } if cl.CoreV1().RESTClient().GetRateLimiter() != nil { if err := metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { return nil, err } } - configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName}) - e.configMaps, e.configMapsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = configMapSelector.String() - return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo) + + configMaps.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.ConfigMap: + return t.Name == options.ConfigMapName && t.Namespace == options.ConfigMapNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = configMapSelector.String() - return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, + UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, }, }, - &v1.ConfigMap{}, options.ConfigMapResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, - }, ) - secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) - e.secrets, e.secretsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.secretNamespace).List(lo) + secrets.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.Secret: + return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.secretNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, + UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, + DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() }, }, }, - &v1.Secret{}, options.SecretResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() }, - DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() }, - }, ) + return e, nil } // Run runs controller loops and returns when they are done func (e *BootstrapSigner) Run(stopCh <-chan struct{}) { - go e.configMapsController.Run(stopCh) - go e.secretsController.Run(stopCh) + // Shut down queues + defer utilruntime.HandleCrash() + defer e.syncQueue.ShutDown() + + if !controller.WaitForCacheSync("bootstrap_signer", stopCh, e.configMapSynced, e.secretSynced) { + return + } + + glog.V(5).Infof("Starting workers") go wait.Until(e.serviceConfigMapQueue, 0, stopCh) <-stopCh + glog.V(1).Infof("Shutting down") } func (e *BootstrapSigner) pokeConfigMapSync() { @@ -237,27 +250,32 @@ func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) { // getConfigMap gets the ConfigMap we are interested in func (e *BootstrapSigner) getConfigMap() *v1.ConfigMap { - configMap, exists, err := e.configMaps.GetByKey(e.configMapKey) + configMap, err := e.configMapLister.ConfigMaps(e.configMapNamespace).Get(e.configMapName) // If we can't get the configmap just return nil. The resync will eventually // sync things up. + if err != nil { + if !apierrors.IsNotFound(err) { + utilruntime.HandleError(err) + } + return nil + } + + return configMap +} + +func (e *BootstrapSigner) listSecrets() []*v1.Secret { + secrets, err := e.secretLister.Secrets(e.secretNamespace).List(labels.Everything()) if err != nil { utilruntime.HandleError(err) return nil } - if exists { - return configMap.(*v1.ConfigMap) - } - return nil -} - -func (e *BootstrapSigner) listSecrets() []*v1.Secret { - secrets := e.secrets.List() - items := []*v1.Secret{} - for _, obj := range secrets { - items = append(items, obj.(*v1.Secret)) + for _, secret := range secrets { + if secret.Type == bootstrapapi.SecretTypeBootstrapToken { + items = append(items, secret) + } } return items } diff --git a/pkg/controller/bootstrap/bootstrapsigner_test.go b/pkg/controller/bootstrap/bootstrapsigner_test.go index ba92382300d..15fab2e26b0 100644 --- a/pkg/controller/bootstrap/bootstrapsigner_test.go +++ b/pkg/controller/bootstrap/bootstrapsigner_test.go @@ -24,10 +24,13 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" ) func init() { @@ -36,14 +39,17 @@ func init() { const testTokenID = "abc123" -func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, error) { +func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, coreinformers.SecretInformer, coreinformers.ConfigMapInformer, error) { options := DefaultBootstrapSignerOptions() cl := fake.NewSimpleClientset() - bsc, err := NewBootstrapSigner(cl, options) + informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc()) + secrets := informers.Core().V1().Secrets() + configMaps := informers.Core().V1().ConfigMaps() + bsc, err := NewBootstrapSigner(cl, secrets, configMaps, options) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } - return bsc, cl, nil + return bsc, cl, secrets, configMaps, nil } func newConfigMap(tokenID, signature string) *v1.ConfigMap { @@ -64,7 +70,7 @@ func newConfigMap(tokenID, signature string) *v1.ConfigMap { } func TestNoConfigMap(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, _, _, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } @@ -73,17 +79,17 @@ func TestNoConfigMap(t *testing.T) { } func TestSimpleSign(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap("", "") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -97,17 +103,17 @@ func TestSimpleSign(t *testing.T) { } func TestNoSignNeeded(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -115,17 +121,17 @@ func TestNoSignNeeded(t *testing.T) { } func TestUpdateSignature(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, secrets, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "old signature") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) secret := newTokenSecret(testTokenID, "tokenSecret") addSecretSigningUsage(secret, "true") - signer.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) signer.signConfigMap() @@ -139,13 +145,13 @@ func TestUpdateSignature(t *testing.T) { } func TestRemoveSignature(t *testing.T) { - signer, cl, err := newBootstrapSigner() + signer, cl, _, configMaps, err := newBootstrapSigner() if err != nil { t.Fatalf("error creating BootstrapSigner: %v", err) } cm := newConfigMap(testTokenID, "old signature") - signer.configMaps.Add(cm) + configMaps.Informer().GetIndexer().Add(cm) signer.signConfigMap() From 252ff1e5a606a3517d6d1e2743a2c023cd40267a Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Tue, 9 Jan 2018 10:42:07 +0530 Subject: [PATCH 743/794] Metrics for predicate and priority evaluation --- pkg/scheduler/core/generic_scheduler.go | 7 ++++++- pkg/scheduler/metrics/metrics.go | 21 +++++++++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index e1128c01cb8..f147d534958 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/scheduler/util" @@ -131,6 +132,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister } trace.Step("Computing predicates") + startPredicateEvalTime := time.Now() filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue) if err != nil { return "", err @@ -143,11 +145,13 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister FailedPredicates: failedPredicateMap, } } + metrics.SchedulingAlgorithmPredicateEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPredicateEvalTime)) trace.Step("Prioritizing") - + startPriorityEvalTime := time.Now() // When only one node after predicate, just use it. if len(filteredNodes) == 1 { + metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) return filteredNodes[0].Name, nil } @@ -156,6 +160,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister if err != nil { return "", err } + metrics.SchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime)) trace.Step("Selecting host") return g.selectHost(priorityList) diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index cd50ceddc9a..c0a87f319ae 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -25,8 +25,7 @@ import ( const schedulerSubsystem = "scheduler" -var BindingSaturationReportInterval = 1 * time.Second - +// All the histogram based metrics have 1ms as size for the smallest bucket. var ( E2eSchedulingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ @@ -44,6 +43,22 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + SchedulingAlgorithmPredicateEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_predicate_evaluation", + Help: "Scheduling algorithm predicate evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) + SchedulingAlgorithmPriorityEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_priority_evaluation", + Help: "Scheduling algorithm priority evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) BindingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ Subsystem: schedulerSubsystem, @@ -63,6 +78,8 @@ func Register() { prometheus.MustRegister(E2eSchedulingLatency) prometheus.MustRegister(SchedulingAlgorithmLatency) prometheus.MustRegister(BindingLatency) + prometheus.MustRegister(SchedulingAlgorithmPredicateEvaluationDuration) + prometheus.MustRegister(SchedulingAlgorithmPriorityEvaluationDuration) }) } From b3c57a880ce42222ede01878df2cb595f0aad571 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Tue, 9 Jan 2018 10:50:50 +0530 Subject: [PATCH 744/794] Build files generated --- pkg/scheduler/core/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 6b652164e8c..c04be5f4821 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -50,6 +50,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", + "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/schedulercache:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", From 41c75969748162118708c56ff56627f1980efe09 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Thu, 11 Jan 2018 01:56:16 +0000 Subject: [PATCH 745/794] Use linux commands instead of docker commands. --- test/e2e/framework/util.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..6f311e5c73e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3900,9 +3900,7 @@ func sshRestartMaster() error { } var command string if ProviderIs("gce") { - // `kube-apiserver_kube-apiserver` matches the name of the apiserver - // container. - command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill" + command = "pidof kube-apiserver | xargs sudo kill" } else { command = "sudo /etc/init.d/kube-apiserver restart" } @@ -3933,9 +3931,9 @@ func RestartControllerManager() error { if ProviderIs("gce") && !MasterOSDistroIs("gci") { return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro) } - cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill" + cmd := "pidof kube-controller-manager | xargs sudo kill" Logf("Restarting controller-manager via ssh, running: %v", cmd) - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart controller-manager: %v", err) @@ -3946,7 +3944,7 @@ func RestartControllerManager() error { func WaitForControllerManagerUp() error { cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) } @@ -3960,9 +3958,9 @@ func WaitForControllerManagerUp() error { // CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration" func CheckForControllerManagerHealthy(duration time.Duration) error { var PID string - cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1" + cmd := "pidof kube-controller-manager" for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) { - result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider) + result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil { // We don't necessarily know that it crashed, pipe could just be broken LogSSHResult(result) From 4d6817dd71fb6b7703aaa0b5e74bcdfdaa44b1a6 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 12 Jan 2018 06:33:18 +0000 Subject: [PATCH 746/794] Use GinkgoRecover to avoid panic. --- test/e2e_node/resource_collector.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 53028aebfad..ec256e32f1f 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/perftype" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -373,6 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { for _, pod := range pods { wg.Add(1) go func(pod *v1.Pod) { + defer GinkgoRecover() defer wg.Done() err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) From dd9de90b0ad1aa78c2a8dd7d5238d8f769ffe771 Mon Sep 17 00:00:00 2001 From: p0lyn0mial Date: Thu, 4 Jan 2018 14:52:25 +0100 Subject: [PATCH 747/794] the changes introduced in this commit plumbs in the generic scaler into kubectl. note that we don't change the behaviour of kubectl. For example it won't scale new resources. That's the end goal. The first step is to retrofit existing code to use the generic scaler. --- pkg/kubectl/cmd/util/BUILD | 1 + .../cmd/util/factory_object_mapping.go | 19 ++++++- pkg/kubectl/scale.go | 12 ++-- staging/src/k8s.io/client-go/scale/client.go | 1 - test/e2e/apps/daemon_restart.go | 6 +- test/e2e/examples.go | 4 +- test/e2e/framework/BUILD | 3 + test/e2e/framework/deployment_util.go | 7 ++- test/e2e/framework/framework.go | 25 +++++++++ test/e2e/framework/rc_util.go | 13 +++-- test/e2e/framework/util.go | 12 +++- test/e2e/network/service.go | 2 +- test/e2e/scalability/BUILD | 15 +++-- test/e2e/scalability/density.go | 5 +- test/e2e/scalability/load.go | 55 ++++++++++++++++--- .../equivalence_cache_predicates.go | 2 +- test/e2e/scheduling/priorities.go | 2 +- test/e2e/scheduling/rescheduler.go | 6 +- test/integration/framework/BUILD | 3 - test/integration/framework/util.go | 54 ------------------ test/utils/BUILD | 1 + test/utils/runners.go | 29 ++++++++++ 22 files changed, 177 insertions(+), 100 deletions(-) diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 6541d3953e9..ed3d59475c3 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -78,6 +78,7 @@ go_library( "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/util/homedir:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory_object_mapping.go b/pkg/kubectl/cmd/util/factory_object_mapping.go index dfd82d406c5..5c9f01f7d50 100644 --- a/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" @@ -286,7 +287,23 @@ func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) if err != nil { return nil, err } - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset) + + // create scales getter + // TODO(p0lyn0mial): put scalesGetter to a factory + discoClient, err := f.clientAccessFactory.DiscoveryClient() + if err != nil { + return nil, err + } + restClient, err := f.clientAccessFactory.RESTClient() + if err != nil { + return nil, err + } + mapper, _ := f.Object() + resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) + scalesGetter := scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver) + gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) + + return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset, scalesGetter, gvk.GroupResource()) } func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { diff --git a/pkg/kubectl/scale.go b/pkg/kubectl/scale.go index 1d4165f9626..511514df6b8 100644 --- a/pkg/kubectl/scale.go +++ b/pkg/kubectl/scale.go @@ -53,7 +53,10 @@ type Scaler interface { ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) } -func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) { +// ScalerFor gets a scaler for a given resource +// TODO(p0lyn0mial): remove kind and internalclientset +// TODO(p0lyn0mial): once we have only one scaler, there is no need to return an error anymore. +func ScalerFor(kind schema.GroupKind, c internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) (Scaler, error) { switch kind { case api.Kind("ReplicationController"): return &ReplicationControllerScaler{c.Core()}, nil @@ -63,10 +66,9 @@ func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, er return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. case apps.Kind("StatefulSet"): return &StatefulSetScaler{c.Apps()}, nil - case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentScaler{c.Extensions()}, nil + default: + return &GenericScaler{scalesGetter, gr}, nil } - return nil, fmt.Errorf("no scaler has been implemented for %q", kind) } // ScalePrecondition describes a condition that must be true for the scale to take place @@ -533,7 +535,7 @@ func (precondition *ScalePrecondition) validateGeneric(scale *autoscalingapi.Sca } // GenericScaler can update scales for resources in a particular namespace -// TODO(o0lyn0mial): when the work on GenericScaler is done, don't +// TODO(po0lyn0mial): when the work on GenericScaler is done, don't // export the GenericScaler. Instead use ScalerFor method for getting the Scaler // also update the UTs type GenericScaler struct { diff --git a/staging/src/k8s.io/client-go/scale/client.go b/staging/src/k8s.io/client-go/scale/client.go index 07c6098620b..a8c903d9eab 100644 --- a/staging/src/k8s.io/client-go/scale/client.go +++ b/staging/src/k8s.io/client-go/scale/client.go @@ -196,7 +196,6 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut Body(scaleUpdateBytes). Do() if err := result.Error(); err != nil { - panic(err) return nil, fmt.Errorf("could not update the scale for %s %s: %v", resource.String(), scale.Name, err) } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 85266680ee7..2319dfe5731 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { restarter.kill() // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false)) restarter.waitUp() - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true)) }) It("Kubelet should not restart containers across restart", func() { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 7e377e203e4..6fa937c89f6 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") @@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling hazelcast") - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true) forEachPod("name", "hazelcast", func(pod v1.Pod) { _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 0ecad57686d..6a773a3957b 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -110,6 +110,7 @@ go_library( "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", @@ -132,6 +133,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/cached:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -139,6 +141,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 23feda770d4..d5544e1998e 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -178,8 +179,10 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er return err } -func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment")) +//TODO(p0lyn0mial): remove internalClientset and kind. +//TODO(p0lyn0mial): update the callers. +func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments")) } func RunDeployment(config testutils.DeploymentConfig) error { diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index e628accaa28..f5341d79c00 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -28,14 +28,19 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -67,6 +72,8 @@ type Framework struct { AggregatorClient *aggregatorclient.Clientset ClientPool dynamic.ClientPool + ScalesGetter scaleclient.ScalesGetter + SkipNamespaceCreation bool // Whether to skip creating a namespace Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped namespacesToDelete []*v1.Namespace // Some tests have more than one. @@ -161,6 +168,24 @@ func (f *Framework) BeforeEach() { f.AggregatorClient, err = aggregatorclient.NewForConfig(config) Expect(err).NotTo(HaveOccurred()) f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc) + + // create scales getter, set GroupVersion and NegotiatedSerializer to default values + // as they are required when creating a REST client. + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = legacyscheme.Codecs + } + restClient, err := rest.RESTClientFor(config) + Expect(err).NotTo(HaveOccurred()) + discoClient, err := discovery.NewDiscoveryClientForConfig(config) + Expect(err).NotTo(HaveOccurred()) + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured) + resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) + f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) + if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil { externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig) externalConfig.QPS = f.Options.ClientQPS diff --git a/test/e2e/framework/rc_util.go b/test/e2e/framework/rc_util.go index d0d1982b535..8bbdb6f4a9b 100644 --- a/test/e2e/framework/rc_util.go +++ b/test/e2e/framework/rc_util.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/testapi" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -84,7 +85,9 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. -func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error { +//TODO(p0lyn0mial): remove internalClientset. +//TODO(p0lyn0mial): update the callers. +func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error { listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts) if err != nil { @@ -96,7 +99,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) for _, labelRC := range rcs.Items { name := labelRC.Name - if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil { + if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil { return err } rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) @@ -156,8 +159,10 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name) } -func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController")) +//TODO(p0lyn0mial): remove internalClientset. +//TODO(p0lyn0mial): update the callers. +func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers")) } func RunRC(config testutils.RCConfig) error { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 637a9eace0b..8384c774af0 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -74,6 +74,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -2682,20 +2683,25 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) { ExpectNoError(err) } -func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) { - return kubectl.ScalerFor(kind, internalClientset) +//TODO(p0lyn0mial): remove internalClientset and kind +func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) (kubectl.Scaler, error) { + return kubectl.ScalerFor(kind, internalClientset, scalesGetter, gr) } +//TODO(p0lyn0mial): remove internalClientset and kind. +//TODO(p0lyn0mial): update the callers. func ScaleResource( clientset clientset.Interface, internalClientset internalclientset.Interface, + scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool, kind schema.GroupKind, + gr schema.GroupResource, ) error { By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) - scaler, err := getScalerForKind(internalClientset, kind) + scaler, err := getScalerForKind(internalClientset, kind, scalesGetter, gr) if err != nil { return err } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 759599a9937..44712d0d4cf 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1265,7 +1265,7 @@ var _ = SIGDescribe("Services", func() { } By("Scaling down replication controller to zero") - framework.ScaleRC(f.ClientSet, f.InternalClientset, t.Namespace, rcSpec.Name, 0, false) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) By("Update service to not tolerate unready services") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { diff --git a/test/e2e/scalability/BUILD b/test/e2e/scalability/BUILD index 34dff1f866a..fc6e3cee361 100644 --- a/test/e2e/scalability/BUILD +++ b/test/e2e/scalability/BUILD @@ -1,9 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -14,7 +9,9 @@ go_library( "load.go", ], importpath = "k8s.io/kubernetes/test/e2e/scalability", + visibility = ["//visibility:public"], deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", @@ -26,6 +23,7 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", @@ -38,8 +36,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/cached:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", @@ -57,4 +59,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index f10671e10b6..6e49d068599 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -528,7 +528,7 @@ var _ = SIGDescribe("Density", func() { podThroughput := 20 timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute // createClients is defined in load.go - clients, internalClients, err := createClients(numberOfCollections) + clients, internalClients, scalesClients, err := createClients(numberOfCollections) for i := 0; i < numberOfCollections; i++ { nsName := namespaces[i].Name secretNames := []string{} @@ -559,6 +559,7 @@ var _ = SIGDescribe("Density", func() { baseConfig := &testutils.RCConfig{ Client: clients[i], InternalClient: internalClients[i], + ScalesGetter: scalesClients[i], Image: framework.GetPauseImageName(f.ClientSet), Name: name, Namespace: nsName, @@ -590,7 +591,7 @@ var _ = SIGDescribe("Density", func() { } // Single client is running out of http2 connections in delete phase, hence we need more. - clients, internalClients, err = createClients(2) + clients, internalClients, _, err = createClients(2) dConfig := DensityTestConfig{ ClientSets: clients, diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index c696de42724..6e15dbc8803 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -28,14 +28,18 @@ import ( "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/transport" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/apis/batch" @@ -48,6 +52,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/dynamic" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) const ( @@ -309,9 +315,11 @@ var _ = SIGDescribe("Load capacity", func() { } }) -func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, error) { +func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, []scaleclient.ScalesGetter, error) { clients := make([]clientset.Interface, numberOfClients) internalClients := make([]internalclientset.Interface, numberOfClients) + scalesClients := make([]scaleclient.ScalesGetter, numberOfClients) + for i := 0; i < numberOfClients; i++ { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred()) @@ -327,11 +335,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient // each client here. transportConfig, err := config.TransportConfig() if err != nil { - return nil, nil, err + return nil, nil, nil, err } tlsConfig, err := transport.TLSConfigFor(transportConfig) if err != nil { - return nil, nil, err + return nil, nil, nil, err } config.Transport = utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -349,16 +357,37 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient c, err := clientset.NewForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } clients[i] = c internalClient, err := internalclientset.NewForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } internalClients[i] = internalClient + + // create scale client, if GroupVersion or NegotiatedSerializer are not set + // assign default values - these fields are mandatory (required by RESTClientFor). + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = legacyscheme.Codecs + } + restClient, err := restclient.RESTClientFor(config) + if err != nil { + return nil, nil, nil, err + } + discoClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, nil, err + } + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured) + resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) + scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) } - return clients, internalClients, nil + return clients, internalClients, scalesClients, nil } func computePodCounts(total int) (int, int, int) { @@ -405,12 +434,13 @@ func generateConfigs( // Create a number of clients to better simulate real usecase // where not everyone is using exactly the same client. rcsPerClient := 20 - clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) + clients, internalClients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) framework.ExpectNoError(err) for i := 0; i < len(configs); i++ { configs[i].SetClient(clients[i%len(clients)]) configs[i].SetInternalClient(internalClients[i%len(internalClients)]) + configs[i].SetScalesClient(scalesClients[i%len(clients)]) } for i := 0; i < len(secretConfigs); i++ { secretConfigs[i].Client = clients[i%len(clients)] @@ -590,7 +620,16 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2) framework.ExpectNoError(framework.ScaleResource( - config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()), + config.GetClient(), + config.GetInternalClient(), + config.GetScalesGetter(), + config.GetNamespace(), + config.GetName(), + newSize, + true, + config.GetKind(), + config.GetGroupResource(), + ), fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName())) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()})) diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index 79aaf5d9d8b..3d551476399 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { By("Trying to schedule another equivalent Pod should fail due to node label has been removed.") // use scale to create another equivalent pod and wait for failure event WaitForSchedulerAfterAction(f, func() error { - err := framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, affinityRCName, uint(replica+1), false) + err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) return err }, affinityRCName, false) // and this new pod should be rejected since node label has been updated diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 1c34ea8998c..f3643b6cb13 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -196,7 +196,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) - framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rc.Name, uint(len(nodeList.Items)-1), true) + framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ LabelSelector: "name=scheduler-priority-avoid-pod", }) diff --git a/test/e2e/scheduling/rescheduler.go b/test/e2e/scheduling/rescheduler.go index 512e8b3c6f0..0d1107ccbe1 100644 --- a/test/e2e/scheduling/rescheduler.go +++ b/test/e2e/scheduling/rescheduler.go @@ -68,8 +68,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() { deployment := deployments.Items[0] replicas := uint(*(deployment.Spec.Replicas)) - err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas+1, true) - defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas, true)) + err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true) + defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true)) framework.ExpectNoError(err) }) @@ -80,7 +80,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error { replicas := millicores / 100 reserveCpu(f, id, 1, 100) - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.Namespace.Name, id, uint(replicas), false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels) diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 07da3bc1141..27a3fb8e412 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -22,13 +22,10 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/batch:go_default_library", - "//pkg/apis/core:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/generated/openapi:go_default_library", - "//pkg/kubectl:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master:go_default_library", "//pkg/util/env:go_default_library", diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index afb1d68961e..c9d42a99c48 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -19,22 +19,13 @@ limitations under the License. package framework import ( - "io/ioutil" "net/http/httptest" "strings" "testing" - "time" - - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/testapi" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" ) const ( @@ -80,48 +71,3 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) { // TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace. } - -// RCFromManifest reads a .json file and returns the rc in it. -func RCFromManifest(fileName string) *v1.ReplicationController { - data, err := ioutil.ReadFile(fileName) - if err != nil { - glog.Fatalf("Unexpected error reading rc manifest %v", err) - } - var controller v1.ReplicationController - if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil { - glog.Fatalf("Unexpected error reading rc manifest %v", err) - } - return &controller -} - -// StopRC stops the rc via kubectl's stop library -func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error { - reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset) - if err != nil || reaper == nil { - return err - } - err = reaper.Stop(rc.Namespace, rc.Name, 0, nil) - if err != nil { - return err - } - return nil -} - -// ScaleRC scales the given rc to the given replicas. -func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) { - scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset) - if err != nil { - return nil, err - } - retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} - waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout} - err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas) - if err != nil { - return nil, err - } - scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return scaled, nil -} diff --git a/test/utils/BUILD b/test/utils/BUILD index a2e6045933f..da7eeadab95 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -44,6 +44,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], diff --git a/test/utils/runners.go b/test/utils/runners.go index 2eaf28e48e2..1d71a3eeb62 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/workqueue" batchinternal "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -105,16 +106,20 @@ type RunObjectConfig interface { GetKind() schema.GroupKind GetClient() clientset.Interface GetInternalClient() internalclientset.Interface + GetScalesGetter() scaleclient.ScalesGetter SetClient(clientset.Interface) SetInternalClient(internalclientset.Interface) + SetScalesClient(scaleclient.ScalesGetter) GetReplicas() int GetLabelValue(string) (string, bool) + GetGroupResource() schema.GroupResource } type RCConfig struct { Affinity *v1.Affinity Client clientset.Interface InternalClient internalclientset.Interface + ScalesGetter scaleclient.ScalesGetter Image string Command []string Name string @@ -277,6 +282,10 @@ func (config *DeploymentConfig) GetKind() schema.GroupKind { return extensionsinternal.Kind("Deployment") } +func (config *DeploymentConfig) GetGroupResource() schema.GroupResource { + return extensionsinternal.Resource("deployments") +} + func (config *DeploymentConfig) create() error { deployment := &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -344,6 +353,10 @@ func (config *ReplicaSetConfig) GetKind() schema.GroupKind { return extensionsinternal.Kind("ReplicaSet") } +func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource { + return extensionsinternal.Resource("replicasets") +} + func (config *ReplicaSetConfig) create() error { rs := &extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -411,6 +424,10 @@ func (config *JobConfig) GetKind() schema.GroupKind { return batchinternal.Kind("Job") } +func (config *JobConfig) GetGroupResource() schema.GroupResource { + return batchinternal.Resource("jobs") +} + func (config *JobConfig) create() error { job := &batch.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -482,6 +499,10 @@ func (config *RCConfig) GetKind() schema.GroupKind { return api.Kind("ReplicationController") } +func (config *RCConfig) GetGroupResource() schema.GroupResource { + return api.Resource("replicationcontrollers") +} + func (config *RCConfig) GetClient() clientset.Interface { return config.Client } @@ -490,6 +511,10 @@ func (config *RCConfig) GetInternalClient() internalclientset.Interface { return config.InternalClient } +func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter { + return config.ScalesGetter +} + func (config *RCConfig) SetClient(c clientset.Interface) { config.Client = c } @@ -498,6 +523,10 @@ func (config *RCConfig) SetInternalClient(c internalclientset.Interface) { config.InternalClient = c } +func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) { + config.ScalesGetter = getter +} + func (config *RCConfig) GetReplicas() int { return config.Replicas } From 50444800b14c9841997619a54dc8eeae3a3ebef9 Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Fri, 12 Jan 2018 11:10:40 +0100 Subject: [PATCH 748/794] Instrument the Azure API calls for Prometheus monitoring --- pkg/cloudprovider/providers/azure/BUILD | 3 + .../providers/azure/azure_client.go | 259 +++++++++++++++--- .../providers/azure/azure_metrics.go | 82 ++++++ .../providers/azure/azure_metrics_test.go | 39 +++ 4 files changed, 342 insertions(+), 41 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_metrics.go create mode 100644 pkg/cloudprovider/providers/azure/azure_metrics_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index d796860b662..8272b20219d 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -19,6 +19,7 @@ go_library( "azure_instances.go", "azure_loadbalancer.go", "azure_managedDiskController.go", + "azure_metrics.go", "azure_routes.go", "azure_storage.go", "azure_storageaccount.go", @@ -48,6 +49,7 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -63,6 +65,7 @@ go_test( name = "go_default_test", srcs = [ "azure_loadbalancer_test.go", + "azure_metrics_test.go", "azure_test.go", "azure_util_cache_test.go", "azure_util_test.go", diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 7cf65fe13e9..e0e2697aef8 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -158,7 +158,13 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMNa glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() - return az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, VMName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { @@ -168,7 +174,10 @@ func (az *azVirtualMachinesClient) Get(resourceGroupName string, VMName string, glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() - return az.client.Get(resourceGroupName, VMName, expand) + mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMName, expand) + mc.Observe(err) + return } func (az *azVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { @@ -178,7 +187,10 @@ func (az *azVirtualMachinesClient) List(resourceGroupName string) (result comput glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { @@ -217,7 +229,13 @@ func (az *azInterfacesClient) CreateOrUpdate(resourceGroupName string, networkIn glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() - return az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, networkInterfaceName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -227,7 +245,10 @@ func (az *azInterfacesClient) Get(resourceGroupName string, networkInterfaceName glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() - return az.client.Get(resourceGroupName, networkInterfaceName, expand) + mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, networkInterfaceName, expand) + mc.Observe(err) + return } func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -237,7 +258,10 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resource glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() - return az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetVirtualMachineScaleSetNetworkInterface(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + mc.Observe(err) + return } // azLoadBalancersClient implements LoadBalancersClient. @@ -266,7 +290,13 @@ func (az *azLoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBa glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, loadBalancerName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -276,7 +306,13 @@ func (az *azLoadBalancersClient) Delete(resourceGroupName string, loadBalancerNa glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.Delete(resourceGroupName, loadBalancerName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, loadBalancerName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { @@ -286,7 +322,10 @@ func (az *azLoadBalancersClient) Get(resourceGroupName string, loadBalancerName glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() - return az.client.Get(resourceGroupName, loadBalancerName, expand) + mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, loadBalancerName, expand) + mc.Observe(err) + return } func (az *azLoadBalancersClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { @@ -296,7 +335,10 @@ func (az *azLoadBalancersClient) List(resourceGroupName string) (result network. glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { @@ -335,7 +377,13 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, pu glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, publicIPAddressName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -345,7 +393,13 @@ func (az *azPublicIPAddressesClient) Delete(resourceGroupName string, publicIPAd glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.Delete(resourceGroupName, publicIPAddressName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, publicIPAddressName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { @@ -355,7 +409,10 @@ func (az *azPublicIPAddressesClient) Get(resourceGroupName string, publicIPAddre glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() - return az.client.Get(resourceGroupName, publicIPAddressName, expand) + mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, publicIPAddressName, expand) + mc.Observe(err) + return } func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { @@ -365,7 +422,10 @@ func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result netw glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { @@ -404,7 +464,13 @@ func (az *azSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetwo glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -414,7 +480,13 @@ func (az *azSubnetsClient) Delete(resourceGroupName string, virtualNetworkName s glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, virtualNetworkName, subnetName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { @@ -424,7 +496,10 @@ func (az *azSubnetsClient) Get(resourceGroupName string, virtualNetworkName stri glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() - return az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) + mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, virtualNetworkName, subnetName, expand) + mc.Observe(err) + return } func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { @@ -434,7 +509,10 @@ func (az *azSubnetsClient) List(resourceGroupName string, virtualNetworkName str glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() - return az.client.List(resourceGroupName, virtualNetworkName) + mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName, virtualNetworkName) + mc.Observe(err) + return } // azSecurityGroupsClient implements SecurityGroupsClient. @@ -463,7 +541,13 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(resourceGroupName string, netwo glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, networkSecurityGroupName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -473,7 +557,13 @@ func (az *azSecurityGroupsClient) Delete(resourceGroupName string, networkSecuri glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, networkSecurityGroupName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { @@ -483,7 +573,10 @@ func (az *azSecurityGroupsClient) Get(resourceGroupName string, networkSecurityG glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() - return az.client.Get(resourceGroupName, networkSecurityGroupName, expand) + mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, networkSecurityGroupName, expand) + mc.Observe(err) + return } func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { @@ -493,7 +586,10 @@ func (az *azSecurityGroupsClient) List(resourceGroupName string) (result network glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } // azVirtualMachineScaleSetsClient implements VirtualMachineScaleSetsClient. @@ -522,7 +618,13 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName stri glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) }() - return az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, VMScaleSetName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { @@ -532,7 +634,10 @@ func (az *azVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScale glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() - return az.client.Get(resourceGroupName, VMScaleSetName) + mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMScaleSetName) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { @@ -542,7 +647,10 @@ func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (resul glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) }() - return az.client.List(resourceGroupName) + mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { @@ -562,7 +670,13 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName str glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) }() - return az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.UpdateInstances(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } // azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. @@ -591,7 +705,10 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMSca glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - return az.client.Get(resourceGroupName, VMScaleSetName, instanceID) + mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, VMScaleSetName, instanceID) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { @@ -601,7 +718,10 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName s glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() - return az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) + mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetInstanceView(resourceGroupName, VMScaleSetName, instanceID) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { @@ -611,7 +731,10 @@ func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virt glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() - return az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { @@ -650,7 +773,13 @@ func (az *azRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableNam glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - return az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { @@ -660,7 +789,13 @@ func (az *azRoutesClient) Delete(resourceGroupName string, routeTableName string glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() - return az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, routeTableName, routeName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } // azRouteTablesClient implements RouteTablesClient. @@ -689,7 +824,13 @@ func (az *azRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTab glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() - return az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, routeTableName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { @@ -699,7 +840,10 @@ func (az *azRouteTablesClient) Get(resourceGroupName string, routeTableName stri glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() - return az.client.Get(resourceGroupName, routeTableName, expand) + mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, routeTableName, expand) + mc.Observe(err) + return } // azStorageAccountClient implements StorageAccountClient. @@ -727,7 +871,13 @@ func (az *azStorageAccountClient) Create(resourceGroupName string, accountName s glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() - return az.client.Create(resourceGroupName, accountName, parameters, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Create(resourceGroupName, accountName, parameters, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { @@ -737,7 +887,10 @@ func (az *azStorageAccountClient) Delete(resourceGroupName string, accountName s glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() - return az.client.Delete(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Delete(resourceGroupName, accountName) + mc.Observe(err) + return } func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { @@ -747,7 +900,10 @@ func (az *azStorageAccountClient) ListKeys(resourceGroupName string, accountName glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() - return az.client.ListKeys(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListKeys(resourceGroupName, accountName) + mc.Observe(err) + return } func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { @@ -757,7 +913,10 @@ func (az *azStorageAccountClient) ListByResourceGroup(resourceGroupName string) glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() - return az.client.ListByResourceGroup(resourceGroupName) + mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListByResourceGroup(resourceGroupName) + mc.Observe(err) + return } func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { @@ -767,7 +926,10 @@ func (az *azStorageAccountClient) GetProperties(resourceGroupName string, accoun glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() - return az.client.GetProperties(resourceGroupName, accountName) + mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.GetProperties(resourceGroupName, accountName) + mc.Observe(err) + return } // azDisksClient implements DisksClient. @@ -795,7 +957,13 @@ func (az *azDisksClient) CreateOrUpdate(resourceGroupName string, diskName strin glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() - return az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.CreateOrUpdate(resourceGroupName, diskName, diskParameter, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { @@ -805,7 +973,13 @@ func (az *azDisksClient) Delete(resourceGroupName string, diskName string, cance glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() - return az.client.Delete(resourceGroupName, diskName, cancel) + errChan := make(chan error, 1) + mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) + resultChan, proxyErrChan := az.client.Delete(resourceGroupName, diskName, cancel) + err := <-proxyErrChan + mc.Observe(err) + errChan <- err + return resultChan, errChan } func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { @@ -815,5 +989,8 @@ func (az *azDisksClient) Get(resourceGroupName string, diskName string) (result glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() - return az.client.Get(resourceGroupName, diskName) + mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.Get(resourceGroupName, diskName) + mc.Observe(err) + return } diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go new file mode 100644 index 00000000000..2ef21bb5a5c --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type apiCallMetrics struct { + latency *prometheus.HistogramVec + errors *prometheus.CounterVec +} + +var ( + metricLabels = []string{ + "request", // API function that is being invoked + "resource_group", // Resource group of the resource being monitored + "subscription_id", // Subscription ID of the resource being monitored + } + + apiMetrics = registerAPIMetrics(metricLabels...) +) + +type metricContext struct { + start time.Time + attributes []string +} + +func newMetricContext(prefix, request, resouceGroup, subscriptionID string) *metricContext { + return &metricContext{ + start: time.Now(), + attributes: []string{prefix + "_" + request, resouceGroup, subscriptionID}, + } +} + +func (mc *metricContext) Observe(err error) { + apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( + time.Since(mc.start).Seconds()) + if err != nil { + apiMetrics.errors.WithLabelValues(mc.attributes...).Inc() + } +} + +func registerAPIMetrics(attributes ...string) *apiCallMetrics { + metrics := &apiCallMetrics{ + latency: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "cloudprovider_azure_api_request_duration_seconds", + Help: "Latency of an Azure API call", + }, + attributes, + ), + errors: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cloudprovider_azure_api_request_errors", + Help: "Number of errors for an Azure API call", + }, + attributes, + ), + } + + prometheus.MustRegister(metrics.latency) + prometheus.MustRegister(metrics.errors) + + return metrics +} diff --git a/pkg/cloudprovider/providers/azure/azure_metrics_test.go b/pkg/cloudprovider/providers/azure/azure_metrics_test.go new file mode 100644 index 00000000000..978c6b50540 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_metrics_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAzureMetricLabelCardinality(t *testing.T) { + mc := newMetricContext("test", "create", "resource_group", "subscription_id") + assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match") +} + +func TestAzureMetricLabelPrefix(t *testing.T) { + mc := newMetricContext("prefix", "request", "resource_group", "subscription_id") + found := false + for _, attribute := range mc.attributes { + if attribute == "prefix_request" { + found = true + } + } + assert.True(t, found, "request label must be prefixed") +} From 0601916d78fa0e5dac790ddf32041040f7c10916 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 12 Jan 2018 18:12:32 +0800 Subject: [PATCH 749/794] add KUBE_ROOT in directory --- build/common.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/common.sh b/build/common.sh index 8f8254ca228..f5716709b91 100755 --- a/build/common.sh +++ b/build/common.sh @@ -451,8 +451,8 @@ function kube::build::build_image() { cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/" - cp build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - cp build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/" + cp ${KUBE_ROOT}/build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" + cp ${KUBE_ROOT}/build/build-image/rsyncd.sh "${LOCAL_OUTPUT_BUILD_CONTEXT}/" dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" From 9958389eb9771bb964f6e01ce04c27129c6c4417 Mon Sep 17 00:00:00 2001 From: FengyunPan Date: Fri, 12 Jan 2018 18:57:46 +0800 Subject: [PATCH 750/794] The lbaas.opts.SubnetId should be set by subnet id. Fix #58145 The getSubnetIDForLB() should return subnet id rather than net id. --- pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 036af670bc7..c605dd19e64 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -537,7 +537,7 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string, for _, intf := range interfaces { for _, fixedIP := range intf.FixedIPs { if fixedIP.IPAddress == ipAddress { - return intf.NetID, nil + return fixedIP.SubnetID, nil } } } From 90bc1265cf9fd9a5c98f91274be2a0c3f4d3f528 Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Fri, 12 Jan 2018 20:09:07 +0800 Subject: [PATCH 751/794] Fix endpoint not work issue --- pkg/kubelet/cm/deviceplugin/endpoint_test.go | 55 ++++++++++++++++---- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint_test.go b/pkg/kubelet/cm/deviceplugin/endpoint_test.go index 226148a6b06..f4634db85f4 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint_test.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint_test.go @@ -19,7 +19,6 @@ package deviceplugin import ( "path" "testing" - "time" "github.com/stretchr/testify/require" @@ -54,22 +53,56 @@ func TestRun(t *testing.T) { {ID: "AThirdDeviceId", Health: pluginapi.Healthy}, } - p, e := esetup(t, devs, socket, "mock", func(n string, a, u, r []pluginapi.Device) { - require.Len(t, a, 1) - require.Len(t, u, 1) - require.Len(t, r, 1) + callbackCount := 0 + callbackChan := make(chan int) + callback := func(n string, a, u, r []pluginapi.Device) { + // Should be called twice: + // one for plugin registration, one for plugin update. + if callbackCount > 2 { + t.FailNow() + } - require.Equal(t, a[0].ID, updated[1].ID) + // Check plugin registration + if callbackCount == 0 { + require.Len(t, a, 2) + require.Len(t, u, 0) + require.Len(t, r, 0) + } - require.Equal(t, u[0].ID, updated[0].ID) - require.Equal(t, u[0].Health, updated[0].Health) + // Check plugin update + if callbackCount == 1 { + require.Len(t, a, 1) + require.Len(t, u, 1) + require.Len(t, r, 1) - require.Equal(t, r[0].ID, devs[1].ID) - }) + require.Equal(t, a[0].ID, updated[1].ID) + require.Equal(t, u[0].ID, updated[0].ID) + require.Equal(t, u[0].Health, updated[0].Health) + require.Equal(t, r[0].ID, devs[1].ID) + } + + callbackCount++ + callbackChan <- callbackCount + } + + p, e := esetup(t, devs, socket, "mock", callback) defer ecleanup(t, p, e) go e.run() + // Wait for the first callback to be issued. + select { + case <-callbackChan: + break + } + p.Update(updated) + + // Wait for the second callback to be issued. + select { + case <-callbackChan: + break + } + time.Sleep(time.Second) e.mutex.Lock() @@ -102,7 +135,7 @@ func esetup(t *testing.T, devs []*pluginapi.Device, socket, resourceName string, err := p.Start() require.NoError(t, err) - e, err := newEndpointImpl(socket, "mock", make(map[string]pluginapi.Device), func(n string, a, u, r []pluginapi.Device) {}) + e, err := newEndpointImpl(socket, resourceName, make(map[string]pluginapi.Device), callback) require.NoError(t, err) return p, e From 296ae178d91de909d89005b8d495203f8bf97abd Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Fri, 12 Jan 2018 16:29:29 +0100 Subject: [PATCH 752/794] hack/generate-bindata.sh: make output cleanly by suppressing pushd/popd output. --- hack/generate-bindata.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/generate-bindata.sh b/hack/generate-bindata.sh index 4c843c5cdc8..40605fb419d 100755 --- a/hack/generate-bindata.sh +++ b/hack/generate-bindata.sh @@ -39,7 +39,7 @@ if ! which go-bindata &>/dev/null ; then fi # run the generation from the root directory for stable output -pushd "${KUBE_ROOT}" +pushd "${KUBE_ROOT}" >/dev/null # These are files for e2e tests. BINDATA_OUTPUT="test/e2e/generated/bindata.go" @@ -84,4 +84,4 @@ fi rm -f "${BINDATA_OUTPUT}.tmp" -popd +popd >/dev/null From 6831581f1c86003d584a6dd52591bf59035f9f7c Mon Sep 17 00:00:00 2001 From: Ross Light Date: Fri, 12 Jan 2018 10:16:13 -0800 Subject: [PATCH 753/794] Bump fluentd-gcp version --- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index ac9fdcd0053..130e84aaccd 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: fluentd-gcp-v2.0.13 + name: fluentd-gcp-v2.0.14 namespace: kube-system labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v2.0.13 + version: v2.0.14 spec: updateStrategy: type: RollingUpdate @@ -16,7 +16,7 @@ spec: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" - version: v2.0.13 + version: v2.0.14 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). @@ -27,7 +27,7 @@ spec: dnsPolicy: Default containers: - name: fluentd-gcp - image: gcr.io/google-containers/fluentd-gcp:2.0.13 + image: gcr.io/google-containers/fluentd-gcp:2.0.14 env: - name: FLUENTD_ARGS value: --no-supervisor -q From 4711bccd057d3934ffb265c4ca140d98426918e2 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Wed, 10 Jan 2018 19:20:21 -0500 Subject: [PATCH 754/794] Bump runc to d5b4a3e This fixes a race condition in runc/systemd at container creation time opencontainers/runc#1683 Signed-off-by: vikaschoudhary16 --- Godeps/Godeps.json | 274 ++--- Godeps/LICENSES | 1041 ++++++++++------- vendor/BUILD | 3 +- .../github.com/containerd/console/.travis.yml | 17 + vendor/github.com/containerd/console/BUILD | 71 ++ .../console/LICENSE} | 14 +- .../github.com/containerd/console/README.md | 17 + .../github.com/containerd/console/console.go | 62 + .../containerd/console/console_linux.go | 255 ++++ .../containerd/console/console_unix.go | 142 +++ .../containerd/console/console_windows.go | 200 ++++ .../containerd/console/tc_darwin.go | 37 + .../containerd/console/tc_freebsd.go | 29 + .../github.com/containerd/console/tc_linux.go | 37 + .../containerd/console/tc_solaris_cgo.go | 35 + .../containerd/console/tc_solaris_nocgo.go | 31 + .../github.com/containerd/console/tc_unix.go | 75 ++ .../cyphar/filepath-securejoin/.travis.yml | 19 + .../cyphar/filepath-securejoin/BUILD | 26 + .../filepath-securejoin/LICENSE} | 3 +- .../cyphar/filepath-securejoin/README.md | 65 + .../cyphar/filepath-securejoin/VERSION | 1 + .../cyphar/filepath-securejoin/join.go | 135 +++ .../cyphar/filepath-securejoin/vendor.conf | 1 + .../cyphar/filepath-securejoin/vfs.go | 41 + .../docker/docker/pkg/symlink/BUILD | 68 -- .../docker/docker/pkg/symlink/README.md | 6 - .../docker/docker/pkg/symlink/fs.go | 144 --- .../docker/docker/pkg/symlink/fs_unix.go | 15 - .../docker/docker/pkg/symlink/fs_windows.go | 169 --- .../cadvisor/container/docker/handler.go | 1 + vendor/github.com/google/cadvisor/fs/BUILD | 1 + vendor/github.com/google/cadvisor/fs/fs.go | 13 +- .../google/cadvisor/pages/static/assets.go | 4 +- .../google/cadvisor/pages/templates.go | 2 +- .../opencontainers/runc/libcontainer/BUILD | 27 +- .../opencontainers/runc/libcontainer/SPEC.md | 86 +- .../runc/libcontainer/apparmor/BUILD | 1 - .../runc/libcontainer/apparmor/apparmor.go | 37 +- .../runc/libcontainer/cgroups/BUILD | 1 - .../runc/libcontainer/cgroups/fs/apply_raw.go | 13 + .../runc/libcontainer/cgroups/fs/freezer.go | 13 +- .../libcontainer/cgroups/rootless/rootless.go | 128 -- .../cgroups/systemd/apply_nosystemd.go | 4 +- .../cgroups/systemd/apply_systemd.go | 14 +- .../runc/libcontainer/compat_1.5_linux.go | 10 - .../runc/libcontainer/configs/BUILD | 10 +- .../configs/cgroup_unsupported.go | 6 - .../runc/libcontainer/configs/config.go | 4 + .../libcontainer/configs/device_defaults.go | 2 +- .../runc/libcontainer/configs/intelrdt.go | 7 + .../runc/libcontainer/configs/validate/BUILD | 1 + .../libcontainer/configs/validate/rootless.go | 68 +- .../configs/validate/validator.go | 17 + .../runc/libcontainer/console.go | 17 - .../runc/libcontainer/console_freebsd.go | 13 - .../runc/libcontainer/console_linux.go | 129 +- .../runc/libcontainer/console_solaris.go | 11 - .../runc/libcontainer/console_windows.go | 30 - .../runc/libcontainer/container_linux.go | 149 ++- .../runc/libcontainer/container_solaris.go | 20 - .../runc/libcontainer/container_windows.go | 20 - .../runc/libcontainer/criu_opts_linux.go | 4 +- .../runc/libcontainer/criu_opts_windows.go | 6 - .../runc/libcontainer/factory_linux.go | 77 +- .../runc/libcontainer/init_linux.go | 103 +- .../{cgroups/rootless => intelrdt}/BUILD | 8 +- .../runc/libcontainer/intelrdt/intelrdt.go | 553 +++++++++ .../runc/libcontainer/intelrdt/stats.go | 24 + .../runc/libcontainer/keys/keyctl.go | 2 +- .../runc/libcontainer/message_linux.go | 2 + .../runc/libcontainer/mount/BUILD | 30 + .../runc/libcontainer/mount/mount.go | 23 + .../runc/libcontainer/mount/mount_linux.go | 82 ++ .../runc/libcontainer/mount/mountinfo.go | 40 + .../runc/libcontainer/process.go | 4 + .../runc/libcontainer/process_linux.go | 80 +- .../runc/libcontainer/rootfs_linux.go | 38 +- .../libcontainer/seccomp/seccomp_linux.go | 47 +- .../runc/libcontainer/setgroups_linux.go | 11 - .../runc/libcontainer/setns_init_linux.go | 13 +- .../runc/libcontainer/standard_init_linux.go | 42 +- .../runc/libcontainer/state_linux.go | 5 + .../runc/libcontainer/stats_freebsd.go | 5 - .../runc/libcontainer/stats_linux.go | 6 +- .../runc/libcontainer/stats_solaris.go | 7 - .../runc/libcontainer/stats_windows.go | 5 - .../runc/libcontainer/system/BUILD | 17 +- .../runc/libcontainer/system/linux.go | 11 + ...scall_linux_arm.go => syscall_linux_32.go} | 3 +- .../libcontainer/system/syscall_linux_386.go | 25 - .../libcontainer/system/syscall_linux_64.go | 3 +- .../runc/libcontainer/system/sysconfig.go | 2 +- .../runc/libcontainer/user/BUILD | 12 - .../libcontainer/user/lookup_unsupported.go | 38 - .../runc/libcontainer/utils/cmsg.go | 10 +- .../k8s.io/kube-openapi/pkg/generators/README | 31 + .../kube-openapi/pkg/generators/openapi.go | 90 +- .../kube-openapi/pkg/util/proto/document.go | 14 +- .../kube-openapi/pkg/util/proto/openapi.go | 25 + .../pkg/util/proto/validation/types.go | 13 +- 101 files changed, 3694 insertions(+), 1709 deletions(-) create mode 100644 vendor/github.com/containerd/console/.travis.yml create mode 100644 vendor/github.com/containerd/console/BUILD rename vendor/github.com/{docker/docker/pkg/symlink/LICENSE.APACHE => containerd/console/LICENSE} (94%) create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_linux.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_unix.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/.travis.yml create mode 100644 vendor/github.com/cyphar/filepath-securejoin/BUILD rename vendor/github.com/{docker/docker/pkg/symlink/LICENSE.BSD => cyphar/filepath-securejoin/LICENSE} (92%) create mode 100644 vendor/github.com/cyphar/filepath-securejoin/README.md create mode 100644 vendor/github.com/cyphar/filepath-securejoin/VERSION create mode 100644 vendor/github.com/cyphar/filepath-securejoin/join.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vfs.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/BUILD delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go rename vendor/github.com/opencontainers/runc/libcontainer/{cgroups/rootless => intelrdt}/BUILD (65%) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go rename vendor/github.com/opencontainers/runc/libcontainer/system/{syscall_linux_arm.go => syscall_linux_32.go} (93%) delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 45b60e8306b..44d80a60754 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -437,8 +437,13 @@ }, { "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", + "Comment": "v0.1.0", "Rev": "9e88e4bfabeca1b8e4810555815f112159292ada" }, + { + "ImportPath": "github.com/containerd/console", + "Rev": "84eeaae905fa414d03e07bcd6c8d3f19e7cf180e" + }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", "Comment": "v1.0.0-beta.2-159-g27d450a", @@ -968,6 +973,11 @@ "Comment": "v1.0.4", "Rev": "71acacd42f85e5e82f70a55327789582a5200a90" }, + { + "ImportPath": "github.com/cyphar/filepath-securejoin", + "Comment": "v0.2.1-1-gae69057", + "Rev": "ae69057f2299fb9e5ba2df738607e6a505b74ab6" + }, { "ImportPath": "github.com/d2g/dhcp4", "Rev": "a1d1b6c41b1ce8a71a5121a9cee31809c4707d9c" @@ -1119,11 +1129,6 @@ "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, - { - "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", - "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" - }, { "ImportPath": "github.com/docker/docker/pkg/system", "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616f", @@ -1475,218 +1480,218 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/client/v2", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.24.0-alpha1-322-g13d955d", + "Rev": "13d955d6a9faa2f70387354ff17df3d614a6c37b" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -2339,78 +2344,83 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt", + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/mount", + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc4-50-g4d6e672", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-197-gd5b4a3e", + "Rev": "d5b4a3eddbe4c890843da971b64f45a0f023f4db" }, { "ImportPath": "github.com/opencontainers/runtime-spec/specs-go", @@ -3212,35 +3222,35 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 8021a07e90e..3f809458a4d 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -12724,6 +12724,215 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/containerd/console licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/containerd/console/LICENSE 86d3f3a95c324c9479bd8986968f4327 +================================================================================ + + ================================================================================ = vendor/github.com/containerd/containerd/api/services/containers/v1 licensed under: = @@ -34676,6 +34885,42 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/cyphar/filepath-securejoin licensed under: = + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + += vendor/github.com/cyphar/filepath-securejoin/LICENSE 8d322afab99e1998dbfcc712f94e824d +================================================================================ + + ================================================================================ = vendor/github.com/d2g/dhcp4 licensed under: = @@ -40204,205 +40449,6 @@ Apache License ================================================================================ -================================================================================ -= vendor/github.com/docker/docker/pkg/symlink licensed under: = - - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/docker/docker/LICENSE 9740d093a080530b5c5c6573df9af45a -================================================================================ - - ================================================================================ = vendor/github.com/docker/docker/pkg/system licensed under: = @@ -75650,205 +75696,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ -================================================================================ -= vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless licensed under: = - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 -================================================================================ - - ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd licensed under: = @@ -76645,6 +76492,205 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/opencontainers/runc/libcontainer/intelrdt licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 +================================================================================ + + ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/keys licensed under: = @@ -76844,6 +76890,205 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/opencontainers/runc/libcontainer/mount licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/opencontainers/runc/LICENSE 435b266b3899aa8a959f17d41c56def8 +================================================================================ + + ================================================================================ = vendor/github.com/opencontainers/runc/libcontainer/seccomp licensed under: = diff --git a/vendor/BUILD b/vendor/BUILD index 20495c2248a..f48108ef5a6 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -62,6 +62,7 @@ filegroup( "//vendor/github.com/codedellemc/goscaleio:all-srcs", "//vendor/github.com/codegangsta/negroni:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", + "//vendor/github.com/containerd/console:all-srcs", "//vendor/github.com/containerd/containerd/api/services/containers/v1:all-srcs", "//vendor/github.com/containerd/containerd/api/services/tasks/v1:all-srcs", "//vendor/github.com/containerd/containerd/api/services/version/v1:all-srcs", @@ -135,6 +136,7 @@ filegroup( "//vendor/github.com/coreos/pkg/timeutil:all-srcs", "//vendor/github.com/coreos/rkt/api/v1alpha:all-srcs", "//vendor/github.com/cpuguy83/go-md2man/md2man:all-srcs", + "//vendor/github.com/cyphar/filepath-securejoin:all-srcs", "//vendor/github.com/d2g/dhcp4:all-srcs", "//vendor/github.com/d2g/dhcp4client:all-srcs", "//vendor/github.com/davecgh/go-spew/spew:all-srcs", @@ -151,7 +153,6 @@ filegroup( "//vendor/github.com/docker/docker/pkg/longpath:all-srcs", "//vendor/github.com/docker/docker/pkg/mount:all-srcs", "//vendor/github.com/docker/docker/pkg/stdcopy:all-srcs", - "//vendor/github.com/docker/docker/pkg/symlink:all-srcs", "//vendor/github.com/docker/docker/pkg/system:all-srcs", "//vendor/github.com/docker/docker/pkg/term:all-srcs", "//vendor/github.com/docker/docker/pkg/tlsconfig:all-srcs", diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml new file mode 100644 index 00000000000..ba93012c767 --- /dev/null +++ b/vendor/github.com/containerd/console/.travis.yml @@ -0,0 +1,17 @@ +language: go +go: + - 1.9.x + - tip + +go_import_path: github.com/containerd/console + +install: + - go get -d + - GOOS=windows go get -d + - GOOS=solaris go get -d + +script: + - go test -race + - GOOS=windows go test + - GOOS=solaris go build + - GOOS=solaris go test -c diff --git a/vendor/github.com/containerd/console/BUILD b/vendor/github.com/containerd/console/BUILD new file mode 100644 index 00000000000..8b43101917a --- /dev/null +++ b/vendor/github.com/containerd/console/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "console.go", + ] + select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "console_unix.go", + "tc_darwin.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "console_unix.go", + "tc_freebsd.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "console_linux.go", + "console_unix.go", + "tc_linux.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "console_unix.go", + "tc_solaris_cgo.go", + "tc_solaris_nocgo.go", + "tc_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "console_windows.go", + ], + "//conditions:default": [], + }), + cgo = True, + importpath = "github.com/containerd/console", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/golang.org/x/sys/windows:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/containerd/console/LICENSE similarity index 94% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE rename to vendor/github.com/containerd/console/LICENSE index b9fbf3c98fb..261eeb9e9f8 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ b/vendor/github.com/containerd/console/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2014-2017 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 00000000000..4c56d9d134a --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,17 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 00000000000..bf2798fda37 --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,62 @@ +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) + // Fd returns the console's file descriptor + Fd() uintptr + // Name returns the console's file name + Name() string +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 00000000000..c963729296d --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,255 @@ +// +build linux + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates a epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// Close unregister the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close the epoll fd +func (e *Epoller) Close() error { + return unix.Close(e.efd) +} + +// EpollConsole acts like a console but register its file descriptor with a +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Close closed the file descriptor and signal call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.Signal() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.Signal() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 00000000000..118c8c3abfd --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,142 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 00000000000..d78a0b8419b --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,200 @@ +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 00000000000..b102bad743a --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 00000000000..e2a10e4413c --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,29 @@ +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 00000000000..80ef2f6fb39 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 00000000000..f8066d8e398 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,35 @@ +// +build solaris,cgo + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 00000000000..0aefa0d2bb1 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,31 @@ +// +build solaris,!cgo + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 00000000000..df7dcb93342 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,75 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 00000000000..3938f383494 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,19 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.7.x + - 1.8.x + - tip + +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/BUILD b/vendor/github.com/cyphar/filepath-securejoin/BUILD new file mode 100644 index 00000000000..aa508dc1f1c --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "join.go", + "vfs.go", + ], + importpath = "github.com/cyphar/filepath-securejoin", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/pkg/errors:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/cyphar/filepath-securejoin/LICENSE similarity index 92% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD rename to vendor/github.com/cyphar/filepath-securejoin/LICENSE index 4c056c5ed27..bec842f294f 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved. +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 00000000000..49b2baa9f35 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,65 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `SecureJoin` and will not contain any symlink path components (they will all + be expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existant path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 00000000000..1f5f83047d3 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.1+dev diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 00000000000..f20985479d4 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,135 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been +// evaluated in attempting to securely join the two given paths. +var ErrSymlinkLoop = fmt.Errorf("SecureJoin: too many links") + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // If it's a bone-fide ENOENT just bail. + if os.IsNotExist(errors.Cause(err)) { + return true + } + + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + var errno error + switch err := errors.Cause(err).(type) { + case *os.PathError: + errno = err.Err + case *os.LinkError: + errno = err.Err + case *os.SyscallError: + errno = err.Err + } + return errno == syscall.ENOTDIR || errno == syscall.ENOENT +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", ErrSymlinkLoop + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf new file mode 100644 index 00000000000..66bb574b955 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf @@ -0,0 +1 @@ +github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 00000000000..a82a5eae11e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/docker/docker/pkg/symlink/BUILD b/vendor/github.com/docker/docker/pkg/symlink/BUILD deleted file mode 100644 index f15ddab2d8d..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/BUILD +++ /dev/null @@ -1,68 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fs.go", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "fs_unix.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "fs_windows.go", - ], - "//conditions:default": [], - }), - importpath = "github.com/docker/docker/pkg/symlink", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/docker/docker/pkg/system:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/docker/docker/pkg/longpath:go_default_library", - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md deleted file mode 100644 index 8dba54fd089..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, -as well as a Windows long-path aware version of filepath.EvalSymlinks -from the [Go standard library](https://golang.org/pkg/path/filepath). - -The code from filepath.EvalSymlinks has been adapted in fs.go. -Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go deleted file mode 100644 index 52fb9a691b3..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if isDriveOrRoot(cleanP) { - // never Lstat "/" itself, or drive letters on Windows - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go deleted file mode 100644 index 22708273d60..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package symlink - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} - -func isDriveOrRoot(p string) bool { - return p == string(filepath.Separator) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 31523ade923..00000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,169 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" - "golang.org/x/sys/windows" -) - -func toShort(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return windows.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return windows.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // windows.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} - -func isDriveOrRoot(p string) bool { - if p == string(filepath.Separator) { - return true - } - - length := len(p) - if length >= 2 { - if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { - return true - } - } - return false -} diff --git a/vendor/github.com/google/cadvisor/container/docker/handler.go b/vendor/github.com/google/cadvisor/container/docker/handler.go index 541df67c99d..c5c46ae4bf2 100644 --- a/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -391,6 +391,7 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { } spec.Envs = self.envs spec.Image = self.image + spec.CreationTime = self.creationTime return spec, err } diff --git a/vendor/github.com/google/cadvisor/fs/BUILD b/vendor/github.com/google/cadvisor/fs/BUILD index a5d4bdae7b9..077db1a4866 100644 --- a/vendor/github.com/google/cadvisor/fs/BUILD +++ b/vendor/github.com/google/cadvisor/fs/BUILD @@ -17,6 +17,7 @@ go_library( "//vendor/github.com/docker/docker/pkg/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/devicemapper:go_default_library", + "//vendor/github.com/google/cadvisor/utils:go_default_library", "//vendor/github.com/google/cadvisor/utils/docker:go_default_library", "//vendor/github.com/mistifyio/go-zfs:go_default_library", ], diff --git a/vendor/github.com/google/cadvisor/fs/fs.go b/vendor/github.com/google/cadvisor/fs/fs.go index 271b01e3562..ae11b576802 100644 --- a/vendor/github.com/google/cadvisor/fs/fs.go +++ b/vendor/github.com/google/cadvisor/fs/fs.go @@ -35,6 +35,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/golang/glog" "github.com/google/cadvisor/devicemapper" + "github.com/google/cadvisor/utils" dockerutil "github.com/google/cadvisor/utils/docker" zfs "github.com/mistifyio/go-zfs" ) @@ -409,10 +410,14 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er fs.Type = ZFS default: var inodes, inodesFree uint64 - fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint) - fs.Inodes = &inodes - fs.InodesFree = &inodesFree - fs.Type = VFS + if utils.FileExists(partition.mountpoint) { + fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint) + fs.Inodes = &inodes + fs.InodesFree = &inodesFree + fs.Type = VFS + } else { + glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint) + } } if err != nil { glog.Errorf("Stat fs failed. Error: %v", err) diff --git a/vendor/github.com/google/cadvisor/pages/static/assets.go b/vendor/github.com/google/cadvisor/pages/static/assets.go index 156737f2bd4..ec854945f3a 100644 --- a/vendor/github.com/google/cadvisor/pages/static/assets.go +++ b/vendor/github.com/google/cadvisor/pages/static/assets.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -251,7 +251,7 @@ func pagesAssetsStylesBootstrapTheme311MinCss() (*asset, error) { return a, nil } -var _pagesAssetsStylesContainersCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\xb9\xd7\x8e\xe3\x68\x9a\x28\x78\xdf\x4f\x91\x3b\x83\x05\xce\x39\xec\x6c\x7a\x57\x85\xbd\xa0\x11\x45\x27\x7a\x7f\xb3\xa0\x27\x25\x7a\x27\x92\x85\x79\xf7\x85\x22\x22\x23\xb3\x4c\x57\x75\xcf\x6c\x20\x95\x11\xfc\xf9\x7f\xde\x7f\xfa\xdb\x3f\xca\xbe\x2f\x9b\xfc\xeb\x56\xcf\x6b\xdc\xd4\x67\xbc\xd4\x7d\xf7\x75\xe9\xfb\x26\x89\xa7\x5f\x8a\xbe\x5b\xbe\xce\xf5\x99\xff\x04\x43\xd0\xff\xfd\x5f\x7f\x7a\xf9\xcb\x9f\xbe\xfd\x9a\xef\x43\x3f\x2d\x5f\xeb\xf7\x4b\x7f\xff\xff\x01\x55\x16\x2f\xf1\xff\x08\x4f\xb5\xb4\xcd\xd7\xb4\xcf\xf2\x5f\xda\x78\x2a\xeb\xee\xeb\x54\x97\xd5\xf2\xd3\x3f\xe0\xbc\xfd\x73\x51\xbf\x43\xbe\x78\x69\xe2\xee\xed\xed\xbb\xb6\x9e\xf9\x1b\x92\xa4\x6f\xb2\xbf\x40\xd2\x3f\xbe\x26\xeb\xb2\xf4\xdd\x2f\x43\x9c\x65\x75\x57\xfe\x84\x0c\xfb\x5f\xc0\x2c\x53\x1d\x77\x65\x93\xff\x32\xf4\x73\xfd\x7a\xf3\x53\x9c\xcc\x7d\xb3\x2e\xf9\xcf\xef\xcc\x43\x3f\x2f\xfd\xf0\x13\xf4\x17\x68\xd2\x78\x78\x7f\x8e\x93\x26\xff\xe5\x59\x67\x4b\xf5\x66\xe1\x9f\xbf\x71\x02\xfd\xfc\xae\x92\x9f\xa0\x9f\x93\x7e\xca\xf2\xe9\xf3\x8f\xaf\x69\xdf\x34\xf1\x30\xe7\x3f\x7d\xfb\xe3\x2f\x68\xcd\x6d\xdc\x34\x5f\xb3\x3a\x6e\xfa\xf2\x83\x14\x0e\x41\x7f\x29\x69\x52\x97\xbf\x06\xa2\xfe\x05\xa0\x1f\x69\xfd\xb9\x6b\xfc\x88\xff\xf7\xba\x4c\xe2\xf4\x51\x4e\xfd\xda\x65\x2f\x71\xfb\xe9\xa7\xff\x4c\xe1\x8c\x2e\x8a\x6f\xca\x80\x87\xfd\xcb\xdc\x37\x75\xf6\xe5\x3f\xd1\x18\x27\x49\xec\x53\x71\xd4\xbf\xc5\xe3\xd7\xe4\x5f\x67\xf3\x6b\x52\xfe\xf2\x7b\xbe\xb2\x2c\xfb\xf9\xf7\xfc\xbf\xf9\xc0\xcf\x4d\x5e\x2c\x7f\xe9\x0a\xbf\x62\x67\xa9\x97\xbf\x8a\xcd\x1f\x39\x7a\xbb\xfe\x07\x4c\xe5\x50\x9e\x15\xf9\xcf\x1f\x4f\x10\x04\xfd\x9c\xae\xd3\xdc\x4f\x3f\x0d\x7d\xdd\x2d\xf9\xf4\xa3\xb6\xbe\x73\x3f\xe5\x4d\xbc\xd4\x5b\xfe\xf3\x0f\x59\x07\x19\x96\x9f\x7f\x1b\x56\x3f\x6f\xf9\xb4\xd4\x69\xdc\x7c\x8d\x9b\xba\xec\x7e\x6a\xeb\x2c\x6b\xfe\x1d\x2f\xfc\x9a\xf6\xdd\x92\x77\xcb\xbf\x2e\xe8\x07\xc0\x1f\x88\x5a\x14\xc5\xa7\x34\xd8\xb0\xff\x8a\xd9\xae\x9f\xda\xb8\xf9\xb9\xdf\xf2\xa9\x68\xfa\xe7\x4f\xf1\xba\xf4\xff\xb6\x35\xbe\xa6\x4d\x3f\xff\xbb\x36\x79\x07\xfa\x81\xdd\x9f\x96\x29\xee\xe6\x21\x9e\xf2\x6e\xf9\xb2\x4e\xcd\xff\x7a\xbb\xf0\xff\x26\xfd\xfe\x8f\xb2\x2e\xfe\xf7\x97\xae\xff\x3a\xe5\x43\x1e\x2f\x5f\xe6\x74\xea\x9b\xe6\x4b\x9a\xbf\x19\xaa\x7a\x97\x04\xc6\x7f\x34\xd4\x6f\x52\x0e\x0c\x0d\xfb\x9b\xc7\xbd\xac\xf9\x91\x4b\xf0\x7f\x2f\x0c\x3e\xf4\xfb\xa5\x2e\xa6\xb8\xfd\x37\x84\xfd\x35\xdc\x8f\xd9\xe5\x1b\xe7\xe4\xdb\xc3\xef\xc2\x36\x69\xe2\xf4\xf1\x5f\xff\x48\xab\x78\x5a\xe6\xaf\x75\xd7\xd4\x5d\xfe\x35\x69\xfa\xf4\xf1\xcb\xef\xfd\x31\xab\xe7\xa1\x89\x8f\x9f\xbe\xb6\xfd\xf9\x79\xb7\xdf\x3f\xcf\x7f\x04\xff\xaf\xff\xf3\xe5\x55\x1c\xbe\xfc\x11\xea\xbf\xff\x9f\x9f\x8a\x7a\x9a\x97\xaf\x69\x55\x37\x19\xf0\x4f\xef\xfd\xf2\x6b\xc4\x9f\x6c\xb6\x79\xb7\xfe\x68\xd3\x37\xe7\xfb\x9e\x95\xdf\xd2\x54\x9a\x7e\xf9\x4f\x82\x20\x3e\xfe\x4b\xd3\xf4\xdb\x85\x79\x39\x9a\xfc\xa7\x37\xe9\xbf\x1d\x7d\xd8\x6a\xd8\xbf\x85\x67\x96\x17\xf1\xda\xbc\x47\xdc\x87\xf7\x7e\x81\xd1\x61\xff\xc2\x4c\x75\xdc\xfc\x7d\x8e\xbb\xf9\xeb\x9c\x4f\x75\xf1\xbd\x3a\xf4\xeb\xf2\xe2\xf1\xa7\xae\xef\xf2\x1f\x03\xe1\x0b\xf4\x07\xfe\xf2\xd2\x5e\x96\xef\x3f\x21\x10\x04\x41\xbf\x12\xeb\x5b\x25\xfc\x51\xba\x2c\xcb\xde\x5c\x15\x04\xe7\xb9\xf9\x47\x39\x2f\xf1\x52\xa7\xff\x48\xfb\x16\xcc\xb3\x7a\xe9\x27\xf0\x1d\xe6\x6b\x52\xfe\x63\xe8\xca\xff\xfd\xe5\xdd\x81\xbf\xee\x5f\x96\x7e\xf8\xf2\xca\x7d\xdf\x4b\xd7\x3f\xcf\x44\x4d\x3d\x2f\x1f\xba\x79\x13\xe1\x43\x30\x64\xd8\xff\x58\x34\xe8\xe7\x25\xdf\x97\xaf\x59\x9e\xf6\xd3\x9b\x5b\xbe\xbf\xfe\x27\x29\xe9\xf7\x12\x7e\xed\xd7\x25\x9f\x5e\xee\xf3\xf7\x3f\x7a\x5b\x77\xdd\xfb\xdb\x5f\xfe\xb9\xd9\x3e\x84\x89\xe3\xf8\xb7\x64\x97\x7e\xf8\x73\x9a\xbf\xfc\xa6\xaa\x7f\x77\x81\x97\xbd\xbe\x89\xf8\x87\x38\xbe\x73\xf6\x0d\xc7\x97\xaf\xf0\x67\x68\x7d\x20\x82\xbe\xbc\x8e\xbe\x21\x7a\x79\x0e\x36\xec\xbf\x0d\x89\x3f\xc6\xfa\x56\xac\x5e\x18\xff\xec\xfa\xb4\xfc\xf1\xf9\x77\x01\x3f\xd1\x7c\x24\xa7\xb7\x9c\xfb\xdf\xc0\xf8\x9d\xb1\x1f\xf1\xfc\xf3\xf8\xfd\x4b\xa1\xfe\x35\xd0\xbf\x66\xe7\x0d\xe5\x6f\xe4\xfb\xe9\xa7\xa9\xef\x97\x3f\x02\xfc\xe5\x2d\xab\x7c\xe4\x42\xe8\x4f\x2e\xfe\xa8\xc2\x7f\x15\xe4\x07\xa6\xfe\x55\x90\x8f\xc6\xf3\x57\x00\xef\x89\xe6\xcf\xa0\xb2\xa9\x1f\xb2\xfe\xf9\x87\x60\x7f\x78\xbf\x9e\x5f\x8d\x6d\xf6\x63\xbd\xae\xdb\xb8\x7c\x8f\xe5\xff\xab\x6e\x5f\xf3\x43\xdc\x2d\x3f\xf7\x43\x9c\xd6\xcb\xf1\xd3\x3f\xd0\x9f\xdf\x92\xfb\x0f\xcf\x45\xdd\x2c\xf9\xf4\x53\xdc\x0c\x55\xfc\xbf\x3e\xce\xff\x1f\x14\xfa\xdf\x7f\x4a\xf0\xcf\x35\xfb\x87\x11\xff\xa7\xa0\x9f\x1a\xfe\xf7\x41\x3f\x34\xfd\xef\x03\x7e\x2a\xfb\x23\xcf\xa0\x28\xfa\x83\xc6\x7e\x9d\x83\x68\x9a\xfe\xfe\xee\xcf\xc2\xec\x1b\xc5\x3f\x2b\x81\x7f\x68\xc1\xef\xf9\xf8\x95\x5a\x7e\x60\xe4\x33\x5b\xfd\xfa\xfc\x0f\xcd\x53\xbd\x5a\xb0\xff\x86\x6d\xfe\x39\xdc\xef\xf2\xf4\x37\x85\xa4\xc5\x97\xff\x24\xe8\xfc\xe3\x3f\x32\x2e\xfe\x82\xb5\x38\x7d\xb5\x18\x7f\x48\xbd\x1f\xf2\xee\x0f\x3a\xce\x24\x49\x7e\x9c\x4f\x3e\xab\x6c\xd2\x2f\x4b\xdf\xbe\x15\xbe\x3f\xa4\x54\xf4\xe9\x3a\xff\xb7\x5c\xf4\xcf\x20\xff\x89\x22\xfa\x29\xee\xca\x3f\x2e\x80\xdf\x52\xc0\x77\x03\xbe\xb5\x0b\x5f\xa0\x7f\xb5\x9a\x7d\x3a\xe8\x8f\xcd\xe9\x7b\xfd\x21\x5f\x05\xe9\x7b\x0b\xf1\xa7\xdd\xc3\xfb\xaf\x8f\xa6\xf2\xbd\x83\xf8\xde\x05\x7f\x45\x29\xea\xad\x28\xfe\x8b\x3c\x7d\x9b\x85\xdf\x57\x08\x7f\xa8\xc5\x5f\x5f\xf9\x6f\x98\xe1\x5f\x40\xf0\xdb\x22\xfd\xb1\xd1\xf8\xe3\x8a\xfe\x89\xef\xe5\x32\x7f\x4e\xf1\x75\xe3\x2f\xaa\xc6\x07\xc1\x6f\x33\xe7\x7f\x0b\xdb\xef\xd8\x7f\xc7\xf6\x2d\xe9\x7c\x2b\x7d\xdf\x46\xef\xa2\x28\xfe\x9a\xd0\x1f\x5e\xa8\xf2\xf4\xf1\xaf\x3a\xf4\x6f\xa9\x66\x59\xf6\x2b\xaa\xf5\x92\xb7\xbf\xfc\xd0\x60\xfe\x75\xf3\xfc\x4f\x7a\x4e\xe8\x57\xfd\x33\x99\xb7\x6f\x81\x81\xbc\x4d\x55\x55\xbd\xe4\x5f\xe7\x21\x4e\x5f\x20\xcf\x29\x1e\x7e\xc7\xc2\x6f\x9f\x5f\xad\xc4\xb7\x18\x7b\x97\x81\xcc\xdb\x6f\x14\x3e\xfc\x02\x79\xdb\x56\xfc\xa8\x84\xae\x7f\xd3\x4d\xd2\xef\x5f\x7e\x8b\xf0\xef\xbf\xb9\x58\xa7\x7d\xf7\xbb\x4b\xbf\xa6\x08\x23\xbf\xc7\x1f\xa7\x69\xde\xfc\x73\xb8\x0f\xc6\xa0\xdf\x00\xbe\x49\xf4\x6d\x0a\xff\x77\x94\xfd\x7b\x2c\x7f\x58\xfc\xde\xde\xbc\xb1\xf6\xf7\x7f\x03\xe0\x37\x0c\xa5\x69\xfa\x4f\xb2\xfd\x5f\xe0\x79\xa9\xf2\x97\xff\x69\x2f\xf2\x86\xa9\xaa\xcb\xaa\xf9\x5d\x0a\x7a\x7f\xf5\x2a\x66\x7f\xb4\x41\x22\x72\xba\xa0\x7e\x53\xd8\x7f\x7d\xf8\xee\xab\x59\xbf\x2c\xf9\xef\x87\xc7\xef\x93\xc3\xd7\xf7\x12\xf4\xea\xfb\x3f\x8f\x96\x7e\x78\x3d\xff\x81\x31\x3f\xfc\xec\xf7\x8c\xbe\x69\xe3\x07\x3e\xdf\x53\xf2\x4f\x9f\xc9\xf9\x73\x29\x41\x0c\xfb\xfb\x8e\x8b\xf8\x93\xed\xc4\xab\x41\xfe\xe3\xf1\xec\xdb\xae\x82\xf8\x23\xf6\x7e\xdb\x88\xff\x05\xcf\x7f\x78\xfd\x4d\x90\x37\x06\xdf\x98\x78\xe7\xe7\x47\x6a\xfd\xfb\x16\x76\xce\x9b\x3c\x5d\xfe\xd0\xc1\x7e\x4b\xf0\x2f\x21\x7e\xa3\xbc\xff\x49\x09\xc4\x5f\xe1\xfb\x05\xfa\xbd\x6e\xde\xe2\xe4\x97\xef\x2d\xe0\xcf\x59\x3d\xe5\xe9\x9b\xf2\x9b\x65\xfa\xf9\xbb\xc4\xdf\x4b\xfc\x9f\x59\xe8\x63\xa4\x7e\x37\xcd\xdb\xd1\xbf\x68\x8e\x77\x3e\x3e\x6a\xc4\x0f\xd6\xfe\x01\xdd\xef\xba\xa1\x37\xc0\xb6\xcb\xdb\xbe\xab\xd3\xaf\x55\xdd\x2d\xbf\xfc\x76\xa0\x5f\xbb\x2c\x9f\x7e\xb7\x78\xf9\x35\xe0\x9c\x0f\xf1\x14\x2f\xfd\xf4\xa3\x1a\x7e\xb5\xb7\xfc\x1e\x02\x6f\x0c\xbe\xe6\xe0\xbf\xfd\xed\x1f\x2f\x1b\x7c\x6d\xe2\x24\x6f\xbe\xfc\xf2\xb7\x2f\x5f\xbe\x7c\xf9\xdd\x72\xf3\x6f\xff\xf5\xb7\x7f\xac\x5d\xfd\xeb\x5b\x1f\x44\xa8\xb7\x9f\x9f\xbf\x03\xbe\x07\x66\xbd\xc4\x4d\x9d\xbe\x41\xbe\xb7\x93\x5f\xd3\x61\xfd\xa7\xf8\x7f\xc0\x07\xbd\xfd\xbc\x01\xd6\xdd\xef\x40\x7f\x43\xf4\xbf\xfe\xf6\x8f\x29\x7e\x7e\x7d\x49\x30\xff\x88\xbc\x88\xdb\xba\x39\x7e\xfa\xf2\x1f\x5c\xbf\x4e\x75\x3e\x7d\xd1\xf2\xe7\x7f\xbc\x93\xf9\xb1\x6c\x7d\x19\xa6\xfc\xeb\xab\x72\xbd\x93\x9b\xfb\xe6\x63\xab\x57\x2f\x4d\xfe\x6b\x8a\xc2\xdb\xcf\xdb\xbd\x21\x2e\x5f\x03\x5f\x9c\xe5\xd3\x97\x0a\xfe\xb8\xf6\xec\xa7\xec\x0d\xd5\x4f\x5f\x92\x29\x8f\x1f\x5f\x5f\x07\x6f\xd7\xdf\xbe\xcf\xf8\x3a\xf5\xcf\x3f\x64\x30\x7d\x67\xf0\x3f\xfe\xfe\xe5\x3f\xda\xbe\xeb\xdf\xf8\xfa\x8f\x1f\xb5\xf9\x32\xdd\x97\xb7\x76\xf2\xed\xf0\x07\x3f\xfa\xf2\xe6\x5e\xef\xc7\xbf\x49\x26\x5f\x96\x7e\x78\x7f\xf1\xb1\x6e\xfa\xf2\x89\xe0\xc7\x2e\xe6\x0b\xfa\x9b\xd3\x77\x87\xfd\xdd\xf1\x2b\x61\xfe\xee\xf0\x23\xb1\xbe\x9f\xff\xd7\xdf\xfe\x31\xaf\xc9\xab\xf6\xc4\xf5\xab\x57\xf9\xd8\x16\x7e\xad\xbb\x61\x5d\x3e\x04\xff\x15\x65\xec\x1b\xb6\xf7\x84\xf7\x05\x83\xde\xd1\xfc\x67\xd3\x97\xfd\x07\xc0\x47\x4e\xfd\x82\xbc\xed\x4b\x7f\xc7\x10\xf2\x79\xfa\xfb\xc4\xfc\x3d\x67\xfc\xee\xc6\xbb\x46\x3f\x58\xfd\xdd\xdb\xcf\x84\xf0\x6d\xdb\xfc\xdb\x0b\xef\x33\xfb\x2b\x85\xfd\x47\x16\x2f\xf1\x4f\x6f\xcf\xe0\xd0\x95\x3f\x27\xf1\x9c\x13\xd8\xdf\x6b\x8f\xd5\xad\x27\xa4\x5c\xcb\x9e\x61\x18\x46\xb3\xdd\xea\xe2\x96\x0c\xc3\x92\xee\xeb\xb1\xe6\x18\x89\x61\x18\x8e\x45\xcc\x01\x65\x18\x46\xb1\xfd\x86\x37\x61\xf6\x74\x1f\x19\x27\x5d\x1a\xc1\x3c\xc3\x5d\x77\x42\x44\x75\xa4\xf5\x66\x32\x4c\xae\x8d\x99\x8b\x66\xe1\x03\x4c\x7d\x63\x01\xd1\xc8\xa4\x7d\x73\xb9\x3f\xe7\x28\x9b\x4b\x93\xa9\x25\xe2\x29\x1d\x2c\x1b\xf5\x15\x67\x33\xd7\xeb\xe5\xc2\xca\xcc\x1e\xfa\x12\xd7\x84\xae\x60\x5d\x53\x5b\x08\x2e\xe5\xe2\x2a\xb6\x8c\x48\xd8\xc8\xf4\xeb\x35\x63\xeb\xbe\x8a\x6b\xd8\x4b\x75\x1d\x2b\x50\x25\x87\x8b\x60\xda\xc8\x1c\x25\x61\x70\xa5\x70\xfc\xec\xc0\x1b\x8e\x9f\x4f\x80\xb9\x58\x52\xfb\xc8\xe3\x3a\x2e\x19\xdc\xed\x5d\x4a\x5f\xca\xe2\x0e\xd1\xd2\x7e\x6b\xe9\x92\x91\x3c\x5b\x67\x2e\x12\xcb\xb7\x07\x25\x77\x6c\xea\x32\x8c\xc1\xf0\x79\x50\x01\x99\xca\x80\x20\x73\xdd\x92\x27\xc3\x31\x22\xef\xa9\xb5\x69\xef\x80\x08\x96\xe4\x60\xca\x4d\xca\x48\xf2\x85\xc1\x38\xc9\xc6\x9f\x33\xfb\x10\x24\xe6\xe0\x70\xf7\x60\x4d\xe6\x60\x4b\x66\x16\xf4\x33\x62\x7a\xa6\x61\x18\x26\x11\x1b\xca\xac\x25\x66\x2c\x21\x86\x34\x2c\xd9\x28\x5d\x06\x51\x5a\x07\x65\x53\x06\x51\xa4\x74\x34\x4b\xe6\x6e\x5e\x98\xd6\xae\x44\x99\x91\x99\xf5\xc9\x96\x5e\x28\xd8\x2a\xa3\x32\xcf\x92\x2b\xa7\x0b\x53\xab\xa5\xc3\x4c\xe5\x95\xf1\x93\xf0\xb0\x9e\x0a\xb3\x99\x2e\x93\xf5\x78\xf8\x60\x68\x26\x64\x38\xa6\x75\xa5\xf9\x64\x98\x52\x67\x24\xc6\xdc\x31\xe7\x34\x6f\xa5\xc3\xe8\xe5\x4d\x53\x40\x4c\x69\x8a\x54\xf2\x31\x9b\xb9\x30\xcf\xdb\xea\x9d\x51\xa6\x42\xf7\x9b\xbb\xca\x81\x79\xed\x8a\x83\xd2\xf9\x5a\xca\x55\x2e\x51\x05\x2e\xb4\xd2\xe1\x72\x0d\x65\x9c\x53\xd2\x06\x9f\xef\x17\xe8\xde\x31\xd0\xa9\xf3\x0c\xc3\x5c\xb7\xce\x7a\x16\x18\x66\xe0\x7c\x87\xaf\x4e\x85\xb7\x49\x97\xe8\x20\xb4\xc7\x2a\x10\x83\xec\x26\xdd\xb1\x8a\xca\x40\x60\x93\x9e\x1c\xcb\x30\xa6\x33\x83\x1b\xd2\x80\x0d\x9e\x85\x7c\x7a\x4b\xd9\x59\xf4\xc1\x98\x68\x12\x46\x1b\x7c\xe6\x7a\x2f\x00\x0f\x3a\x91\x67\x5b\xfa\xe5\x64\xe6\x5b\xab\xe6\xce\x9d\x32\x59\x22\xef\x3d\x5e\x32\x58\x28\xe5\xca\x59\x25\x6d\xaf\xee\x13\xe8\x89\x61\x33\x00\x9e\x46\x95\x00\x9c\x5a\x6c\x09\x65\xb2\x24\x80\xa0\xd3\x93\x11\xaf\xb1\x29\xb7\xcb\x53\xd1\x79\x30\x10\xaa\xa4\x8b\x57\xb7\x27\xbb\xe3\x69\x71\x37\x9f\x44\xf1\xc3\x00\x76\xcc\x03\xc1\x10\x1b\xa0\x5c\x82\x66\xef\x39\x0b\xe1\xb4\xfb\x52\x8b\xb9\x9c\xac\xed\x68\x8b\xbb\x83\x70\x91\xd2\xd6\xc3\x2a\x75\x28\x4e\x75\x17\xf1\x84\xe6\x22\x0d\xe5\x4d\x6e\xae\x1e\x20\xe3\x4c\xa1\xb8\xa4\xcb\xfc\x14\xc9\x95\x31\x15\x95\x6f\xee\x76\x56\xb2\x4c\xb1\xdd\x96\x3b\xdb\xab\x0f\x93\x31\xa3\xd3\x3c\xf2\x22\x65\x18\x59\x07\x81\x90\x66\x94\xe7\x85\x39\xa3\xfc\x81\x3d\x19\x66\xd3\xcb\x30\x50\x6a\x57\x80\xce\x2b\xc7\x30\x8c\xa5\x70\xac\x32\xb1\x2c\xfb\x3c\x59\x41\x39\x19\x7e\x38\x59\x31\x39\x19\xe1\x29\x57\xec\xc3\x62\x78\xce\x61\x0c\x96\xe5\xd8\xd5\x66\x44\x66\xac\xae\x72\xc4\x5e\x3d\x87\xd1\x97\xf2\xb2\x24\x0c\x7f\x1d\x19\xab\x5d\x2f\xe6\x72\xbb\xec\x3a\x83\x76\x18\x2b\xa7\x65\xbe\x3c\x85\x67\x54\x5e\x65\x92\xbb\xaa\xc7\x95\x33\x2d\xf6\x60\x4a\x27\xb4\x2f\x44\xcd\x0a\xf5\x7d\x29\xeb\xb3\x2c\x83\x36\xc4\x2e\x52\x2b\x48\x9a\xac\x30\x03\xc7\x0f\x35\x6b\x59\xd2\x1c\xbc\x9c\x90\x1b\x5d\x79\x7c\x78\xb0\x24\x81\xd2\x92\x56\x26\x7e\xf7\x76\x06\x18\xf8\xe4\xe0\x98\xa3\xde\x0e\x65\xbb\xa4\x7b\x23\x5d\x0f\xd7\x34\x5c\xa6\xf3\x98\x35\x2e\x8b\x6b\x3f\x5d\x7b\xd1\xb0\x32\xde\x34\x82\x90\xf3\xfb\x76\xb3\xae\x93\x99\x1b\x21\x87\xf4\x63\x61\xd9\x40\x05\x67\x0c\x4b\xd7\x3d\x7e\x2f\xb5\xcb\xce\xb7\xf1\x0d\xf1\x85\xf4\x2e\x65\x42\x28\x35\xe1\xb5\x4e\x44\xa1\x35\x1a\x9f\xf7\xfc\xfb\xe9\xdd\x45\xaf\x44\xb3\xca\x4d\x9e\x32\x5e\x91\xcf\xf2\xc1\xf4\x2b\x73\x01\x9e\x52\xd0\x9b\xe6\xb3\x3d\xb8\x87\xcc\x8a\xfe\x65\x0f\x4d\xa2\x6c\x48\x4e\x87\x2e\x53\x2f\x79\xcf\x96\xaf\x78\x91\x93\x0f\xc5\x19\x2f\x00\x24\x07\xbd\x0d\xa4\x16\x16\xde\xc7\xfd\x21\x54\xd7\x89\x4b\x56\xc9\xc2\xaa\xae\xf6\x72\xf9\x89\x39\x01\xae\x49\x32\x43\xca\x8f\xd5\xd2\xb9\xfa\xd2\x3c\x78\x59\xe6\xa0\x66\xb2\xb5\x45\x52\x63\xcb\x19\x78\xd2\xbe\x5c\xdb\xd4\xe1\x92\xe9\x71\x45\x65\x74\x35\xa5\xb9\xe7\x66\x49\xce\xad\x05\x67\x8c\xc7\x05\xb0\x01\xaa\xe3\x71\x35\x75\xf5\xd9\x97\xb0\xc7\x53\xee\x6b\xcb\x1e\x64\xd3\xb6\xef\x5e\x63\x00\xee\x65\x88\xa8\xc3\x1c\xda\x72\x6c\x44\x6d\x8c\x48\x97\x53\x46\x49\x1f\xe8\xd8\xf6\x91\x32\x6b\x98\xcd\xc5\xf2\xe1\x72\x5a\x70\x5b\x07\x5a\xbb\x47\xfc\x41\x98\x4b\x4f\x07\x66\xdb\x36\x5d\x5c\x64\xbd\x0a\x34\xd0\x5a\x76\x59\xb9\xe3\x42\x43\x73\x01\xd4\x6e\x3d\x0b\x74\x1d\x11\x1a\x50\xf6\x7c\x6a\x55\x89\xf0\x3b\x54\xd5\x0a\xdb\x4c\xd7\x60\x8e\x86\xbd\x46\xaf\x36\x10\x39\x4a\x28\x40\x78\x1e\x1d\x82\xf9\x84\x4f\x29\x69\xfd\x3c\xe4\xf1\x6a\xf3\xbc\x72\xa4\xeb\x81\x12\x22\x51\xb9\x0c\x2e\xd1\x78\xb1\x57\x0c\x41\x37\x36\x96\x97\xc2\x2e\x08\x07\xed\xc8\xc1\xf6\xe4\x99\xc0\x14\x34\x11\x3d\x3c\x22\x6f\x86\xf9\xfb\x68\x76\xae\x30\x6e\xde\x68\xc1\xde\x4a\x3c\x90\x69\xc6\xed\x0c\x56\x0c\xf7\x46\x5b\x01\xa1\x03\x01\x88\x70\x40\x7b\xd3\xe6\x3e\x16\xac\x91\xbb\xda\x8a\xe6\x86\x64\x71\xbd\x86\x50\x25\xcc\xe1\x11\x15\x42\x68\x2e\xd1\xe1\x95\xc6\xa4\x5e\xd7\xd0\xf1\xe5\x64\xde\xba\xd9\x11\x43\x01\xd8\x17\x44\x45\x97\x9b\x0b\x69\xe7\xd8\xc6\x73\x34\x08\x6d\x97\x47\xfb\x69\x5e\xf4\x7d\x12\x4b\x8a\xef\xa2\xee\x32\xe9\x52\x8a\x69\x67\x37\x2f\xcc\x01\x81\x6a\x22\xfa\xf1\x08\xb7\x53\x40\x2c\x1a\x3d\x10\xc8\x44\xc6\xe1\x0a\x1f\xa4\x95\x2c\x04\x3d\x6d\x59\xc0\x65\xa6\x0c\xcd\x5d\xe1\x12\x89\x33\xa1\x99\xd9\x12\xda\x34\x58\x6b\xef\x13\x08\x1a\x18\xc4\x9d\x74\x42\x60\xc1\x09\xb3\x83\x8f\xcd\xd5\x89\x0c\xb5\x2c\xd0\x5b\x68\xc5\x80\x05\x60\xda\xe0\x15\xb4\xc4\x33\x7d\x6a\x4c\x89\x3c\xd8\x66\xbe\x39\xd5\x9d\xba\x1d\x66\x08\x54\x53\x7b\x6d\x31\xa8\xe0\x2e\x58\x5c\x5a\x07\x52\x13\x8f\xd4\xea\x7b\x5a\x90\xfb\x23\xc7\xe4\x95\x10\x23\xe0\x06\xe7\x6c\x0a\x44\xe3\xbe\x89\x57\x65\xd6\x34\xbc\x41\xaf\x33\x15\xfa\xf2\x70\x5b\xd6\xc7\xae\x38\x02\xae\x62\xc3\x19\x59\x53\xdb\xc6\x04\x94\x38\x4e\x76\x3b\x6f\x07\x77\xa6\x93\x56\xcf\x47\x7e\xe8\x7a\xe6\xa7\x31\x8e\x57\xdb\xd2\x52\x08\x78\x68\x55\xec\x09\xdd\xcd\x53\xea\x33\x56\x77\x3c\x89\x9b\xee\x54\xd9\x28\xd2\x9a\x2d\x9f\x5b\x21\x04\xbc\xae\xf1\x69\x2f\x09\xa6\x4b\xe7\x07\xfa\x3c\x13\x08\xea\x7b\xc8\x6e\x5e\xe1\x3c\xe1\xe5\x99\x68\xa9\x15\x19\x15\x24\x99\x52\x0b\x69\x11\xb4\x10\x74\xc9\xbd\x1f\x9d\xad\x14\x9e\x93\x2c\xfd\x4e\x1a\xe7\xcc\xad\x47\x4c\x32\xc9\xa2\x17\x51\x45\xd7\x01\x92\x14\x89\x21\xa6\xcf\xf4\x52\xa1\xbe\x8e\x9a\x64\x21\xca\x50\x96\xee\x48\x22\x72\x14\x41\x38\x95\x76\xd3\x87\x5d\xe9\x78\xc2\xcb\xba\x2b\x92\x57\x29\x8e\x9e\x9a\x95\xb7\x36\xce\xab\x97\x21\x43\x9a\x3d\x73\x92\x38\x57\x06\x84\x28\xf2\x79\xf5\x70\x6b\x5a\xd7\x45\x81\x21\xd0\xbf\xb7\xe4\x9a\xa0\x0a\x1a\x25\x99\x93\x9d\x3c\x2a\x14\xf1\x46\x3a\x36\x91\x5c\xa9\x26\x7b\x9e\xe8\x66\x16\xce\x93\x86\xef\x75\x9a\x9e\xf7\x2e\xce\x93\x81\x52\xcf\x87\x9e\x6e\x41\x17\xe5\xbe\x47\x92\x9b\x85\x1b\x33\xfe\x6c\xbb\x25\x7c\x26\xe9\x3d\xd9\x85\xad\xdc\xd2\x85\x0c\xf9\x1e\x30\xfc\x80\x38\xd7\x07\x6f\xcc\x21\xc0\x0c\x34\xd1\x61\x39\x30\x4c\x58\x8d\x9e\x08\x20\xa1\xa4\x5f\x0c\x0d\xc0\x82\x47\x0e\x9c\x1b\x71\x80\x88\x01\xa8\x60\x95\x17\x92\x4f\xb9\x98\x10\x32\x22\xcb\xe4\x2e\xbb\x85\x97\xb8\x4c\x6e\x7c\xfc\xbc\x15\xac\xfc\xb8\x0c\x66\x0a\xb3\x21\x24\xac\xcf\x70\xe3\x79\xcc\x33\xcb\xa7\xcc\x7b\x8f\x22\x1d\xb4\xed\xc9\x16\xc7\x15\x6c\xee\x37\x7a\xef\x54\x79\xb2\x84\xb4\x97\x3a\xcb\x8b\xaf\x60\xff\x3c\xb9\xe8\x26\x2e\xa1\x75\xef\x13\x83\xd3\x28\xb3\x68\x81\x16\x70\xcf\xe1\x54\x43\xa9\xd6\x59\x31\x91\x6a\x4c\x44\x1e\x51\x6b\x15\x78\x61\x99\x83\x72\x57\x46\x75\x1d\xac\x45\xd0\xa0\x34\x70\x50\x55\x8b\x67\x5c\xe8\xc8\x88\x44\x74\x2a\x04\x86\xbb\xfa\x20\x11\x64\x44\xd2\x2e\xea\xa6\xeb\x30\xbb\xed\x7e\x27\xe5\x15\x2b\xd5\x7a\x5c\x25\x01\xab\xc0\x4a\xa0\xa2\xd4\xce\xd8\xdb\x83\xcb\x11\x19\xbf\x39\xe1\xce\x23\x32\x86\xde\x38\x1c\x39\xe2\x1b\x7e\x3e\xda\xc6\xf6\xb3\xab\xbe\xe2\xd9\x51\xaf\x56\xd1\x12\x29\xa9\x76\x5d\x4a\xf9\x8e\x17\x3d\xc4\x27\x56\x45\xa5\xdd\x90\x12\x88\x87\xb7\xd2\x6c\x98\xed\x7a\x01\x9b\x01\xc1\x35\xd1\x32\x8e\x4a\x5e\x05\x07\xf2\x81\xbe\x47\x6a\x37\xb9\xa2\xeb\xa5\xa7\x1e\x6b\x9c\x79\x43\x80\xe8\x1a\x44\x16\xae\x41\xc4\x52\x64\x58\x5d\x70\xbd\x3d\x1b\xc3\xad\x17\xfc\x61\xb5\xf6\x6a\x1b\xa7\xe3\x45\x17\x78\x21\xe6\x93\x6e\x76\xc4\x86\xf0\x46\x3e\xdc\x4e\xd1\x6c\xde\x6f\xfd\x85\xdb\x82\x9b\x9a\xf6\x0b\x44\xb7\xca\xb1\x3a\x7c\x6e\xe1\x78\x8b\x2a\x57\x5c\x17\x8f\xad\x78\x48\x60\x73\x52\xf4\xec\x25\x92\x77\x83\x62\x1d\x72\x47\xdc\xe4\xab\xe8\xa6\x12\xe5\x94\x0b\x73\xe8\xa3\x35\x66\xab\xb0\xe4\x2f\xe6\x89\x22\x09\x40\x6c\x8f\x3b\x87\x49\x7e\xa1\x22\xe6\x2c\x70\xd8\xac\x1a\xe1\x2a\x56\x11\x95\x33\xf5\x06\xf9\xa4\x50\xeb\xaa\xbb\x67\xa8\x7a\x17\x32\x77\x1a\x1e\xf9\xb5\x7b\x42\x1d\xaa\xc6\xb7\xf5\x68\x36\xd8\xc4\x9a\xca\xf6\x0a\x79\xcc\xb3\xf3\xb8\x37\x0a\x0f\x8d\x7e\x63\x1e\x2b\xe7\x9d\xb6\x37\x46\x8e\x7c\x20\xc4\x4a\x0b\x23\x1e\xee\x5e\xd5\xf8\x96\x47\x22\x63\x00\x7b\x8b\x5f\x1c\x7d\xd0\x78\xc0\x9c\x2a\x47\x3f\x6d\xf5\xd3\x9a\xee\x2d\xd6\x64\x7a\x40\x7b\x14\x1d\x34\x0b\xb2\x4c\xb9\x3d\x15\x0a\x99\x2f\x12\x41\x38\x53\x32\x9e\xa8\x9f\x00\x9b\x3d\xc1\xa3\x99\x05\x66\xb5\x31\xda\x64\x6d\xfb\x3d\x77\x93\x00\x81\x15\xb4\xb3\xab\xbc\x9d\xe0\xfb\x8c\x40\xaf\xec\xc0\x9c\x5e\xa5\xe5\x70\x72\xaa\xf1\x8c\x80\x4a\xa5\x8d\x06\x92\x0f\x30\x18\xa4\x8b\x0f\xc6\xc8\xb4\x90\x64\x24\x2f\x93\xbf\xd1\x30\x9a\x83\x37\xbd\xdc\x21\x4f\xa7\xc9\xad\xd1\x0f\x6a\xb1\x17\xcc\x43\x56\xda\x0a\x50\x38\x59\xd4\xb5\x5c\x10\xe3\xda\x4a\xc9\x3a\x81\x6d\xb1\x20\x98\x85\x66\xc6\x1e\x11\x46\x4e\x78\x24\x0b\x46\x02\x09\x2c\x63\x76\xa5\x67\x54\x01\xc0\x8d\x94\xe4\xc4\x93\x90\xbb\x85\x4b\x7a\xc7\x37\x2d\xc6\x6e\xc8\xb0\x2d\x6d\x97\xc3\xa9\xd0\x8f\xfc\x2c\xb3\x78\x7a\xe9\x50\x22\xa2\x72\x40\x33\x09\x45\xd4\x9f\xf4\xc9\x9f\x19\x10\x15\x64\x01\x73\xfa\x22\xe5\xf6\x66\xeb\x13\x0d\x4f\x1c\x72\x4f\x62\x86\xee\x13\x74\x23\xa4\xe3\xde\x59\x70\x2e\xba\x39\xb0\x80\x34\x40\x9e\xe2\x3d\x44\x03\x51\x0b\xe9\xa0\x1b\xb9\x4c\xe1\xd1\x1d\xc8\x6d\xee\x01\xe8\x5d\xa4\xe4\x79\xe7\x62\xa0\x91\xd9\x11\x2a\x76\xa8\x63\x1c\x51\x0c\xc3\x56\x90\xc0\x9d\x49\x01\xb9\xb1\x67\x10\xf9\x38\xd0\x00\x2c\x64\x8a\x26\xbb\x3b\x8c\x3e\x37\x91\x87\xa4\x2d\xab\x91\xc3\x47\x6d\x02\xa2\xa7\x3b\x2c\x16\x1d\x4f\x63\x6e\xbd\xf9\x08\x38\x45\x45\xa1\x92\x5e\x4a\x4d\x9d\x03\x39\xf9\x4c\x42\x39\xe8\x14\xe0\x30\x3e\x35\xef\x84\xc1\x7b\xa0\x36\x48\x91\x27\xe8\xb6\xaf\xcf\x64\x41\xb0\x16\xb5\x0e\x4c\x4e\x26\x18\x10\x13\x84\xb4\x32\x94\x5b\xf1\x1b\x2a\xc0\xc4\x1d\xad\x37\x50\x25\x63\x30\xef\x45\xa0\x04\xc7\x82\x74\x80\xd9\x0f\x98\x24\xe7\xb1\xf0\xc6\x96\x27\xa5\x45\x3c\x40\xe9\x5c\x98\xe7\xe0\xe0\x80\x10\x56\x54\xd0\xa3\xb8\x59\xb4\x7b\xb7\x6a\x5d\xbb\xe3\x50\x21\xee\xb3\x76\x9d\xd0\x23\xba\x11\x0e\x16\xe7\xc7\xb0\x1b\xc6\xd1\xe1\x3e\x88\x76\x53\x4a\x6c\x1d\x0a\xa2\x35\x00\x6d\x86\x6f\x9c\x21\xc5\x6c\x78\x4d\x01\xe0\xd4\xb5\x03\x85\xaa\xdb\x48\xb4\x9b\x3e\xa7\xf7\x7d\x07\xa8\xb3\x7b\xd0\x54\x9c\x50\x53\x4a\x26\x45\x0e\x60\x24\x39\x15\x3b\x09\x15\xad\x7d\xa7\x4c\x03\x37\x29\x5a\x24\x42\x2a\xef\xa0\x0b\x88\x1a\x6b\x46\x81\xdb\x73\xa3\x31\xe7\x04\x20\x50\x96\x22\xea\x35\xe7\x5d\xc3\xc7\x22\x5a\x0c\x0f\x32\x06\xc5\x80\x04\x47\x0f\xf8\xed\x75\x2e\xa7\x97\x2a\x4a\x9f\x0c\xa3\xd6\x37\x86\xe1\xb0\x3b\x13\xd4\x8d\x81\x86\xaf\x77\x62\x76\x69\x34\xcb\x2c\x72\xb6\x34\x6d\xe9\x6e\xdc\x99\xf5\x35\x29\x32\x25\x73\x69\x2e\xa6\x67\x61\xc8\x7a\x66\x19\xc2\x26\x30\xda\xa1\xa9\x2f\x59\xd2\xe5\x62\xcb\xea\x1d\x1d\x6a\x4b\x4f\xc2\x26\x5e\xea\x20\x6a\x56\x73\xc9\xd3\xfe\x3e\x47\xc8\xc3\x4f\x11\x1d\x3a\xa2\x05\x23\xad\x0d\x42\xad\x87\x3e\xb7\xc9\xd0\x36\x57\x8c\x84\x1b\x9f\xd2\x62\x78\x62\xda\x0a\xb2\x14\x91\x93\x1a\xf2\x0e\xb1\x8f\x92\x65\xef\xd2\x86\xf2\x84\x07\x88\x26\x2d\x48\xbd\x40\xf2\x18\xbf\xe1\x4f\x00\x7d\xd8\x6d\x8a\x9f\x64\x47\xdd\x54\x0a\x2b\xd8\xb4\xc9\x8a\x89\x37\xdf\x64\x62\x24\xae\xc4\x44\x86\xe0\x9f\x2e\x53\x30\x92\x39\xaf\x34\x94\x5f\xad\x46\x16\x31\x6e\x1a\x94\x50\xf8\x76\xc9\xf5\x60\x07\xa0\xe2\x24\x81\x1f\x9d\xdc\x96\x42\x29\xf0\x0e\x44\x1b\x64\x05\xd2\xf8\x3a\xe9\x53\x0b\x87\x8c\x07\x81\x02\xd8\x5d\xde\x40\x4c\x8d\x0b\xdb\xa8\xc3\xef\x3b\x48\xef\xba\x77\xa8\x1e\x72\xe4\x1a\x42\x2c\x8e\x68\x41\x85\x79\x31\x35\x7e\x22\x72\x88\xce\x0d\xd6\xb4\x91\x65\xb2\x1f\xd3\x21\x9c\x20\xa8\x5b\x3d\x17\xf0\xe8\x91\x9a\x2f\x2c\x2c\xe3\x6a\x0d\x88\xe9\x5c\xf5\x84\x0a\xee\x39\x2b\x90\xbf\xa2\x47\xe4\x6e\xc1\xe4\x41\x8e\x60\x79\x82\xff\x64\x2e\x43\x41\x02\x60\x79\x13\x79\x18\xee\xe3\xe4\x91\x52\x19\x8c\x92\x27\xdc\x0c\xb6\x0f\xb1\x49\x45\xe0\xfa\xbb\xac\xa5\xc2\xd6\x0b\xbd\x81\x60\x98\x82\xd9\xd4\xa4\x0d\x08\x74\xce\x98\x93\x9d\x2c\x25\x77\x37\x8e\x99\x83\xed\x79\xe6\x79\x13\xbb\xb3\x73\x05\xed\xa4\xd0\xad\xea\xee\x8f\x95\x1c\x97\x02\x88\x64\x65\x88\x25\xfc\xa9\xb9\x77\xf1\x99\xbe\xe1\x63\xe5\x19\x45\xe9\x01\x27\x3a\xc7\x03\x40\xf0\x91\x3e\x46\x20\x9b\xe1\x91\x1e\x24\x9e\x13\xcd\x37\x54\xe8\xd9\x51\x60\xeb\x50\xf3\xb2\xac\x9b\x1f\x08\x54\x1b\x40\x2c\x59\xb6\x9f\x3c\x5d\x99\x53\xbb\x93\xc0\x03\xa7\x00\xd9\xed\x6c\x16\x24\xd2\x86\xdd\x19\xd3\x58\xb1\x88\x7f\xce\x2a\xf7\x3c\x6e\x9b\x22\x27\xe8\xae\x8d\x74\x60\xcb\x39\x17\x3f\x24\x8a\x7f\xb7\x8b\x6a\xde\x28\x0a\x80\x69\x8b\xc2\x6c\x03\x14\xec\x07\x5e\x4a\xec\x59\xf9\x0e\x35\xaf\x9b\x3f\x25\xf0\x8e\x3d\xc3\x98\xf5\x9e\x13\xf5\xcd\x2e\x2c\x3b\xc6\xc8\x06\x92\x39\x08\xa6\x92\xbb\x41\x7a\x03\x1d\x78\xb8\xfa\xc9\xd2\x66\x81\x05\x69\x3a\xc4\x49\xca\x09\x80\xbd\x80\xf2\x0d\x34\x5c\x4d\x4a\xe5\x68\xad\xd7\x04\x10\x24\x86\xec\x1e\xb6\x72\x2f\xa0\xdc\x02\xc8\x90\xfa\x86\xf0\xc2\x98\xc7\xbc\xa9\x8e\x8b\xeb\xd7\xea\x99\x02\x53\xb9\xa5\x6a\x93\xee\x10\x90\x4e\xca\xb8\xfa\x89\x8c\x41\xd9\x08\xc7\xd0\xf9\xb0\x4b\xe6\x02\xd1\x5b\x42\x53\x44\xf5\xdc\xb3\xd8\xdf\xc8\x9a\x5e\x92\x1a\x2b\xfd\x0b\x46\xf1\xf3\x2c\x52\x1f\x8a\xe5\x52\x67\x40\xd6\x7c\xd3\x97\xce\x2b\xce\xc6\x1b\x97\x6c\xbb\xd6\x95\xbf\x05\x53\xdf\xaa\xf7\xd8\xdb\x18\xd6\x09\xe0\x2c\xa0\xcf\x51\xe4\x27\x2b\xdd\x82\x35\x08\x0a\x60\xdc\x2d\x54\x3b\xe6\xf5\x91\xdc\x98\xed\x6d\xee\x64\x18\x8e\xc1\x74\x0b\xd2\x37\x10\x46\x51\x0a\xbb\x9d\xda\xd9\x78\xc0\x06\xf9\xe5\xa8\xcc\xe3\x16\x90\x85\x9f\xf0\x89\x63\x98\x9c\x12\x5f\xc1\x4d\x17\x1d\xec\x50\x7c\x64\x19\x37\x50\x81\x1a\xd9\xf6\x40\x02\x5b\xb3\x41\x74\x18\xb2\x53\x28\xee\x8d\x3f\xa6\xd2\xb1\x5c\xac\x84\xdc\x6b\x06\x62\x1d\x27\x50\x9d\x0c\x0b\x37\x92\x1c\x89\x66\xa4\x98\x5e\xfa\x98\x26\x7a\x0d\x5c\x99\x32\x78\xe8\x22\xc9\x8b\xda\xd0\x53\xae\x07\x07\xd6\x13\xc6\xe9\x6f\x60\x4d\xb5\x9e\x4a\x58\xd8\x33\x06\x9c\xfb\xb7\x18\x5b\x7c\x67\xc7\xc6\xcd\xd8\x0f\xc0\x91\xf5\x6e\x46\x92\x89\x00\x3a\x43\xe0\xf6\x28\x5d\x10\x7c\x89\xdb\x5e\x49\x4c\xa7\xf4\x20\x54\x13\x30\x2c\xd5\xb3\x29\x83\x4e\x5d\xdd\x88\xcd\x8b\xab\xcc\x6f\x34\x8d\x0f\xb5\x44\xda\x9f\x1f\xb6\xb6\x67\xda\xb0\x21\x5c\x0f\x9a\x67\x6a\xb0\x31\x48\x48\x0f\x3b\x4f\x93\x6b\xd5\xc7\x57\x6c\x56\x38\x02\x20\x93\x16\xf6\xa3\x3a\xd1\x58\x9d\x4d\x82\x11\x44\x51\xa3\xbd\x3e\xa1\x5c\x3f\x57\x29\xa7\x23\xb2\x66\x8b\x9b\xc6\x96\x86\xc6\x5e\x63\xad\x60\x9f\xef\x26\xbf\x3d\x7d\x15\x1e\xb1\x9c\x02\x93\x19\x5c\xc9\x17\x9c\xf9\xd8\xd4\x2a\x8a\x7d\x54\x3e\x52\x82\x8e\x17\xd5\xbe\x4b\x03\x4e\x31\xf8\x83\xb0\x07\x5e\xeb\x08\x0c\xcb\x8b\x20\x7b\x64\xa2\xa3\x2f\x64\x25\xd0\xac\x1e\x72\xa0\xbc\x7f\xf8\xd0\xd3\x87\x2b\x6b\xeb\x8a\xa2\xe8\x41\xf1\x86\xa7\x41\x9c\x83\x7d\xac\x83\xbd\x7d\xa1\x8d\x15\x21\xb3\x75\x95\x04\x0a\x78\x8c\xa5\xd6\xec\x2b\x8d\x7a\x5a\x12\x6c\x5b\xb7\xdd\xc2\x29\xd0\xc1\xcc\xbb\xc9\x8d\x06\x14\x15\x9e\xb1\x79\xcc\x7d\xe7\xf3\xb8\x59\xad\x66\x85\xbe\xe0\xa4\xa0\x91\x06\xc4\x88\x2e\x00\x18\xe1\x38\x6e\xd8\x8f\x21\x5e\x91\x08\xb6\x82\x36\xf2\x47\xa4\xdd\x4c\x6e\x7d\xcc\x46\xa5\x8b\x3e\x41\xcc\x8e\xec\xc2\x9a\xe3\x92\x38\x71\xba\xae\xaa\xc0\xe6\x28\x24\x90\x80\x3f\xd4\x86\x0e\xa6\x81\x29\xdf\x43\xb7\xe6\x5d\x38\x40\xef\x2d\x92\x67\x14\x80\x8f\xbb\xbc\x18\x23\x1c\xaf\xbe\xda\xb4\x33\x04\x74\xfd\x4d\x62\xa6\x56\x3e\x52\x43\x24\x6b\x5c\x6f\x47\x62\x68\xa3\xab\x05\x00\xd3\x65\x6c\x80\x75\x0c\x40\x7a\x98\xb5\x45\xab\x42\x8e\x0c\x8d\x27\xf6\x86\x94\x13\xd5\xb3\x2e\x47\xcd\x1a\x0d\x2d\xda\x8c\x2a\x86\xc7\x19\x1e\x10\x32\x9b\xe8\x93\x30\x38\x05\x4f\xe7\xd1\x23\x08\xe9\x2e\x1d\x8f\xf3\xc9\x00\xe1\xd5\x7a\xe0\xb7\x9a\xdc\x81\x69\x3a\xc9\x2e\x4b\x06\x00\xcc\x85\xa8\x95\xd7\x0b\x76\x13\x8b\xf9\x33\x57\xa9\x14\x8a\x76\xe7\x94\x09\x3e\xdc\xc1\x18\x65\xf0\x3b\x75\xe4\xb2\x30\xc2\xf9\x56\xdf\xfd\x28\xe6\x7d\xe6\x95\x6c\xc8\x81\xc8\x7b\x28\x37\x34\x0f\xc0\xa4\x4b\x6b\x77\x58\x8c\xd1\x51\xb8\x28\x70\x3a\x0f\x74\xc0\x88\xaa\x00\x8a\xef\x41\x60\x82\xb7\x41\xb1\x41\x24\xb8\x9f\x6b\x76\x56\xb7\xce\xc1\x91\x75\xac\xb1\x45\xf5\xda\x33\x6e\xbc\x31\x2e\x7b\x8d\x85\x64\x9f\xc2\x7c\x3f\xa4\x72\x7d\x41\x33\x08\x10\x5d\x77\xf1\x86\xcc\x6f\xe4\xdb\x82\x5d\x71\x69\xa7\x3e\xd0\xd9\x60\x27\x9f\x33\xa1\xdf\xfb\x23\x6d\x64\xf0\x89\x56\x54\x70\x1f\x89\x75\x9a\x88\x75\x3a\x29\x50\xf5\x27\x01\xf7\x7c\xa1\x62\x10\x4c\x34\xb1\x20\x26\x4f\x3a\xf1\xc4\x73\x41\x0a\xb1\x7a\x58\x32\x4e\xfa\x8d\xe5\x0a\x57\x48\xc4\x7b\x9c\xfa\xe4\x51\x30\x47\xcf\x6f\x64\x2a\x6c\x9d\x01\x31\x43\x02\x96\x13\x84\x78\xc5\xb7\xfd\x18\x78\x7f\xf2\x60\xdf\x6d\x90\x07\x74\xdd\x7a\x83\xe5\xaf\x24\x1a\x90\x07\xac\x37\xce\x40\x2c\x14\x80\x73\xbc\x72\xcc\x93\x1a\x07\x67\x79\x05\x01\x0e\x13\xcf\x6f\xb1\xd5\xc7\xdc\xb4\x82\x5b\x00\x82\x41\x71\x2b\xa9\x58\xb0\x7c\x38\x9d\x83\x43\x48\xd7\xc0\x72\xad\xcb\x8d\xf1\x98\x07\xfb\x88\x44\x07\x06\xc0\x9b\x1d\xcd\xa8\xbb\x36\xd0\xa6\xa7\x8e\x1b\x9f\x19\x45\x51\x26\xba\x2a\x3d\x6b\x7c\xcf\x79\x8c\x3d\xdf\xa5\x33\x84\xa0\xb6\xa6\xdd\x11\x27\x58\xc7\x85\x0d\x92\xdf\x9f\x54\xba\x39\x99\x65\xce\x93\xd2\xe4\xab\x7d\xa9\xa4\x90\x53\x18\x49\xe3\x7d\xb8\x23\xa9\xfd\xd6\xfa\x70\x93\x80\x46\x40\x9e\xdc\xa6\x1e\x69\xd9\x89\x8c\x47\x55\xdf\xec\x5e\x49\xa3\xfd\x58\xe5\xfb\x0e\x81\x92\x41\x90\x0e\xe1\xec\xf0\xba\x0e\x90\x17\x78\x95\x99\x5e\xc7\x63\x70\x05\xb6\xe8\x6e\xac\xc5\x49\xe8\x15\x47\x83\x02\xc1\xb1\x23\xd3\xf9\x12\xd3\x95\xa6\xba\xc6\xee\x43\x4e\x9f\x22\xc6\x6d\x8f\x0f\x07\x65\xdd\x53\xa6\x40\x07\x17\x28\x90\x57\x36\x98\xde\xde\xca\x37\x32\x8c\xe1\x43\x65\x0f\x3c\x9c\x84\x7a\x4f\xc6\x8c\xef\x5b\xd6\x2c\xa5\x0b\xae\x3b\x03\x44\x64\xd3\x38\xae\x73\xb6\x2d\x47\x13\xd7\xde\xb8\x75\xe7\xb1\xd4\x56\xc9\x16\x92\xf5\x61\x2a\xc9\x56\x48\xe5\x98\x6b\xa9\x96\x88\xbc\x19\xa2\x74\x0e\xc5\xaa\xc1\x29\x02\xc9\x80\x27\x95\x06\xea\xf0\xa0\xc2\xde\x3b\x10\x4d\x93\xc7\xf0\x7a\x11\x22\xc3\x01\xc0\xfd\x71\xbc\xe5\x33\x5a\xec\x29\x89\xf6\xed\xc7\xb0\xaf\x9a\xb5\x86\x02\xc5\x06\x9f\x3d\xc4\x28\x70\x13\xec\x7a\x42\x15\x78\x30\x6d\x4b\xee\xed\x84\xe8\xa2\x70\xb2\x97\x4f\x76\x78\x09\x4e\xc8\x5d\xa0\xd6\x00\x07\x82\xe6\x76\xe1\x18\x7d\xa1\x17\x9a\xec\x4e\x9e\xc8\x34\x0f\x75\x20\xda\xac\x5c\xa4\x00\x31\x2a\x2a\x8b\xa7\x54\x7c\xe6\x2c\xa9\x75\xe8\xad\xab\x79\x1c\xb8\xfa\xb6\xcc\xbb\x88\xe6\xa4\x88\x66\x17\x97\xfd\xa0\x93\xa5\x73\xf6\xbe\x77\xa9\xac\xa4\x4d\x78\xb9\x63\xb9\xe0\x79\x99\x8b\x2e\xd7\xd4\x37\xdc\xad\xc5\xda\xbd\x3a\x59\xa3\x34\x01\xe7\x3d\x99\x48\x92\xe9\x0a\x56\xd4\x6b\xec\xc0\x9e\x8f\x06\xdb\x70\xd1\x82\x32\x98\x44\x17\x1c\xc4\xae\x6c\xe4\xe7\xad\x12\xa5\x9b\x0d\xd1\x7a\xa8\x5e\x58\x3d\x18\x01\x14\x70\x65\xd5\xf3\x02\x78\x77\xd7\x40\xbe\x3c\xec\x19\xaa\xce\x40\xd8\x6d\xb7\x56\x1c\xb6\xf8\x11\xb7\xd3\xd3\xc5\x98\x76\xfa\xfe\xaa\x43\x17\xf5\x06\x36\xe9\x12\xf8\xdd\xd8\x2e\x46\xa4\xe7\xdd\x14\x51\x48\xa2\x1e\x73\xa3\xe4\x98\xd9\x61\x2d\x05\xb7\x23\x41\x0c\x53\x3c\xae\xb3\x57\xce\xe5\x50\x28\x98\x3d\x32\x27\xd1\x52\x41\x4d\x67\xef\xa1\xc5\x5d\xee\xaf\x98\x07\xd0\x74\x71\xe6\xd2\xbe\xbb\x21\xaf\x7b\xcd\xe0\xce\xbe\x8a\x84\xa4\x9e\xb3\xe7\x7b\x5d\x58\x26\xc7\x85\x17\x1b\x59\xa7\x52\xa5\x1d\x6a\xb8\xe4\x60\x6e\xde\x81\xe0\x41\x8d\x4f\xfe\x29\x17\xb1\xc1\x3c\xdf\x8b\xbe\x0d\x89\xa5\xc1\x74\x0e\x16\x70\xe9\x34\xf6\x40\xe0\xf9\x59\x3b\x8c\x47\x64\x88\x75\x65\x05\x0d\xdb\xeb\xec\xd5\x74\xda\x58\xdc\xa9\x4d\x49\x10\x47\x47\xa2\x79\xbf\x39\x97\xa7\xe0\x2e\x0f\xa2\x3b\x28\x52\x09\xb7\x9e\x41\x0f\xe9\xa3\x04\x70\x97\x66\x76\x2e\xbb\x7e\xef\x89\x5b\x27\x1f\x26\x37\xae\x09\x0d\x62\x0a\x92\xad\x34\x99\xaf\x04\x80\xe5\x37\xd1\xde\xc3\xe5\x4c\x51\x3d\xb4\x00\x36\xce\x9d\x23\xaf\xe6\xb8\x6d\x87\x29\x5e\xb6\x04\x31\xf9\xdd\x94\x9c\x79\xb2\x89\xb9\x39\x4a\x8d\xba\x5f\xa9\xf1\xdd\xfe\xbc\xa4\xa4\x77\x43\x47\x75\x44\x5d\x79\xe5\xd5\x4f\x66\xc1\xab\x25\x09\x60\xaa\x10\xf7\x7a\x8f\x7c\x4f\xb6\x83\x3d\x0d\x84\x5b\xb4\x1d\x14\x3a\xd2\xdd\x06\x36\x74\x30\xbe\xfe\x89\x8f\xe9\x04\x6d\xd0\x05\x88\x79\x03\x3d\x65\x86\xaf\x33\xda\x14\xc4\xe0\x2a\x55\xdf\x6f\x00\xb9\x76\x93\x06\x67\x45\xd0\x9d\x0f\xda\x70\x9e\xc5\xf3\x1e\xcf\x65\x47\x3c\x2e\x85\x44\x7e\xe6\x20\x29\xcf\xe6\xa0\x40\xd5\x89\x7c\xc6\x14\x0f\xda\x23\x6c\x45\x6d\xf4\xf0\xde\xfa\x12\xdb\xb5\x2e\x96\x70\x23\xe8\xe0\xcc\xa7\x97\x17\x1b\x08\xe1\x77\xe2\x72\x33\xaa\xd5\x78\x1a\x15\x65\x54\xad\xdc\x46\xa9\x33\x66\x37\x88\xe8\x20\xa0\x81\xce\x7b\xd5\x75\xbc\xb6\x36\xf4\x36\xa4\x53\x93\xaa\x53\x91\x90\x41\x4c\x06\x36\x19\xe0\xee\x4e\xc1\x0f\xad\x70\xe6\x16\xf6\x47\xd8\x1f\x69\x2f\x1b\x96\x60\xcc\xd4\xe9\x49\x19\xec\x13\x8c\x40\xa0\x7f\xcd\x15\x05\x35\x61\x0f\x10\xf4\x76\x2a\xba\xa6\xdd\xa0\x2f\xd9\x98\x5c\xcf\x39\x11\x50\xfa\xd5\xcf\xcc\xc9\x75\x97\xd4\x3a\x55\xd1\x73\x9d\x89\x76\x50\xdc\xe7\x84\x09\xdb\x67\x9d\xaa\xe4\x83\x03\xf9\xc3\x53\x56\x16\xbd\x8b\x2d\x8d\xce\xc4\x3a\xbd\x3e\xaf\x1a\x58\xa5\x9e\xd0\x44\x6e\x73\x03\x0a\x8f\x72\x76\x0e\x11\xaa\xd0\x26\x82\x18\x91\x8f\xb4\x1d\x8e\x78\x1e\xda\x16\x1f\xe3\x76\x78\x7d\x8e\x78\x7e\xc0\x1a\x92\xa3\x8e\x36\x6d\x59\x32\xa5\xa4\xc8\xe0\xf2\xa3\xb2\x07\x27\x92\x1e\x76\x24\xa7\xea\x92\xce\xe0\x88\xc7\x53\x48\x50\x2a\x27\xd5\xb7\xbb\x54\xbf\x5c\x45\xa9\x27\x25\xe6\xcd\xf8\x9e\x26\xfd\xa8\xdc\xfb\x56\xaf\x43\xe2\x5a\x6b\xd6\x7c\xe3\x5a\x99\xc8\x8d\x2e\x07\xb0\xfc\x8c\xd6\x91\x5c\xc9\x91\xc3\x15\xc5\xb0\x6c\xbe\xd4\x01\xc6\xd8\xda\xf2\xbd\xe7\x7c\xb0\x37\x4f\x88\x7c\xb7\x79\x25\x52\xd6\x4c\x1b\xc0\xd8\x73\x79\x8c\x5b\x7c\x8d\xdb\x27\x74\x35\x51\x71\x2f\x27\xc5\x7e\x6c\x27\x65\xa8\x5e\x2c\x66\x87\x43\x88\x31\xa9\x7a\xf1\x14\x65\x7b\x3d\x7b\xe3\xe2\x8d\x9b\x10\x0e\x6d\xda\xc9\x63\xd8\xc9\x47\xd8\x8a\x0c\x26\x5e\x04\xfc\x88\xa4\x47\x10\x92\xe4\x04\x32\xf0\x23\xba\x5a\x37\xe7\xb2\x0f\x43\x1d\xb6\xf2\xbd\x47\xb4\x2a\x7c\x50\x0f\x37\x3e\x55\x61\x9f\xfd\x38\x6c\x15\x0e\x6c\xeb\xa0\x8d\xda\x68\x1f\x93\x96\x02\xd2\x44\x9d\x7c\x78\xcd\xd0\x55\x52\x57\x62\x53\xf7\x34\x59\x10\xc6\xc0\xb9\xcf\x38\x74\x03\x3c\x0f\x16\xfe\x81\x01\xf4\xae\xb3\x07\xbc\xf8\xd9\x18\x53\xf4\xc5\x7a\x84\xab\xcf\x73\xe0\xa2\x98\x98\x7a\xa6\x28\xc7\x87\xae\x03\xc1\xda\x3d\x3c\xf4\x17\xe5\x3b\x06\x18\x77\x0c\x31\xee\x18\xa0\xbd\x7e\x23\xc8\x13\x39\xfb\x93\xbe\xa9\x2f\xfd\x17\x09\xfb\x0c\xaf\xc6\xc6\x77\xf1\xe4\x12\x72\x9d\x5c\x45\x36\x0d\x22\x0c\x3b\x77\x2a\x10\xf6\xd4\x8f\xce\x40\xdc\xa3\xee\x66\x76\xb7\xe7\x69\x38\x68\x86\x8a\x67\x55\x5c\xb4\xbb\x7b\x4b\x6a\xf2\xe8\xa4\x03\xd1\x70\x06\x24\x19\x20\xff\x98\x9d\x84\xb7\xa4\x0b\x91\xe2\x6d\x63\x0d\xb8\xb9\x96\xc7\xb5\x1f\x55\x0e\xdb\xba\x93\x7b\xce\x2a\x2d\x8e\x34\xc0\xf8\x24\x9a\x93\x15\x16\x5d\x2b\x2c\x7f\xe5\xd4\xeb\xdb\xe7\x91\xbd\xfd\x1d\xb5\x56\xe3\x4f\xf2\x9b\x6c\xed\x7e\x2b\x44\x99\x32\xbb\xfd\x96\x76\x32\x67\x3e\x64\x3b\xb8\xb6\x14\xc4\x52\xe9\x2c\x7a\xe3\x0a\x8f\x45\xc3\x85\x88\xe6\x44\x70\x1e\xec\xc0\x4d\x06\x86\x8e\x20\x86\x02\xd4\xef\x18\xd0\x55\xae\x0a\xb0\x27\xf9\xd9\x1f\xd8\x65\xa7\x9f\xb3\x74\xaf\x79\xb0\xde\x9f\xba\x85\xa2\xf4\x5a\x4c\x44\xba\x8f\x9a\x5c\xa7\x64\x4e\x13\x54\xdf\x35\x42\x95\x2b\x8f\x84\x25\x8c\x08\xa9\xfa\x5c\x64\xfb\x58\x64\x9f\x94\x28\xcd\xc9\x24\x84\x0d\xdf\x47\x06\xef\x44\x64\x11\xae\x2b\x88\xd2\x38\x6a\x4d\x9a\x3c\x61\x80\x51\x3f\x5e\x69\x6f\x97\x9c\x07\xae\xdb\x6e\xa3\x5b\xd8\x0c\xb7\xe6\x0c\xb7\x54\xd0\xd2\x86\x03\x00\x42\x33\x24\x2d\x77\x40\x05\x5a\xdf\x8a\x5d\x51\xea\xbe\xbc\x8b\x8c\x43\x57\xe0\xf9\x51\x13\x2e\x0e\x4d\x2b\x4d\xc1\x33\x94\x51\x01\x9c\xf0\xb2\x81\x60\x25\x0f\x1c\x28\x3a\x1c\x28\x9a\x6b\xf9\xbc\xea\x61\x8e\x20\xfe\x49\x83\xa6\x46\x83\x05\x4a\x33\x4f\x9d\x2f\xa1\x57\x84\xe8\xd5\x6c\xdc\xd0\xd7\xec\xb8\xae\xf6\x63\xd0\x6d\x17\x97\x9d\x07\xae\x3a\x6f\x6d\x73\x63\xf0\xcf\x94\x7f\xa8\xb3\x50\xce\xc2\x01\xc0\x41\xb2\x63\x84\x27\x27\x08\xe8\x40\x19\x53\xe9\xe6\xab\x1f\x2b\x50\x14\x4d\x17\x9b\x35\x0e\xf6\xb3\x5f\x57\xe4\x1c\x05\x85\x6b\xe6\xc2\x8b\x22\x66\xcc\xe4\xbd\xdc\xbe\xe8\xf0\xd6\xea\x22\xa4\xe8\xa2\x6b\x1a\xdd\xdd\xb8\xe0\xcf\x0b\xac\x65\x81\x83\xd0\x45\x82\xa0\x33\x2e\x0c\x40\xd8\x5e\xf9\x8a\x4d\xaf\xc7\x2d\xda\xb4\xd8\x6f\x87\x38\x6e\x07\xc2\x6f\x86\xd7\xdf\xe0\xbd\x26\x22\x64\x9f\x27\x45\x3f\x47\x42\x3f\x67\x12\xa1\x43\x74\xa7\x8c\x9b\x45\x6e\x6d\x70\x0e\x8d\xef\x29\x70\x43\x96\x3c\x60\xde\x9f\xef\xbe\x65\x36\x96\x46\x46\x5d\x10\x2c\x34\x2b\xd2\x98\x23\xec\xf9\x2b\x86\xac\x47\xf4\xea\x4e\xc6\xf8\x31\x48\xd4\x4a\xa0\x8d\x3a\xe4\xd3\x42\xe6\xdb\x40\xe6\xe8\x40\xe4\xe8\x40\x16\x41\x38\x89\x26\xa2\x73\xad\x3b\x76\x35\x06\xdc\xce\xdb\x22\x9d\xd0\x72\x3b\xa1\xc5\x70\xa0\x46\xee\x32\xf4\xc0\x77\x7f\x5a\xc8\x9d\x0a\xaf\x6c\x8d\x6d\xa2\x3c\x6b\xd5\x4a\x17\x29\xdd\x19\xa0\x31\xdc\xa5\x1d\x02\x4a\x1e\x63\x80\xfc\x33\x26\x3d\x21\x6a\xd9\x07\x8c\x6d\xa0\xb8\xbf\xea\x0b\xf5\xf2\x25\xbc\x00\x07\xaa\x79\xd8\x8f\xe1\x76\x97\x28\xf4\x55\xb7\x5f\xb1\x2f\x5a\xae\x27\x5a\xda\x72\x97\x4e\x97\x08\x9c\xad\x73\x93\x85\x0e\xa6\x25\xdb\xb6\x07\xae\x3b\x25\x24\xb2\x68\x86\x66\x9d\x07\x16\x26\xc4\xb2\xe2\xd0\x9b\xee\x44\xa7\xe4\xed\x2e\x1d\xb7\xbb\x7c\xd0\xed\x9e\xf9\x42\x1d\x3a\x63\xa6\x3a\xd0\xa6\x9e\xb7\x53\x3a\x6f\xfb\xcd\x7e\x8c\xb7\x5b\x35\x15\x20\x74\x55\xe5\xcb\xe3\xbc\x9d\x37\x64\x45\xe1\xf1\x95\x7b\xd3\x4e\x4d\xc3\xea\x11\x7d\x44\x58\x2c\x96\xa4\x56\xc2\xfa\x5a\x35\xf1\xab\xfe\x6b\xbd\x09\x30\x1b\x49\x0a\x31\xf3\x51\xbb\x78\xb3\x91\xeb\x30\xe6\x99\x58\x7c\x42\x97\xc9\x18\x5e\x99\xcf\x8b\xa7\x18\x0a\x0c\x40\x2e\x82\x90\x94\x64\xb7\x3b\x72\xfe\xa4\xc2\xfb\x94\x9f\x80\x77\x64\x81\x85\x64\x9d\x85\x6c\x5b\xda\x11\x1b\xd5\xd8\x91\x9a\x90\x71\x24\xc0\x47\xbf\xa8\x9e\x7f\x87\x32\x46\x7c\x5e\x40\x77\x79\x6f\x50\x98\x8b\xaa\xda\x10\x2d\xf7\xcf\xfa\xc0\x02\xee\xad\xc6\x21\xeb\xb3\x13\x29\x2b\xcb\xee\xb3\xbb\xa9\x7b\x4d\x3c\x7d\x48\xb9\x30\x28\xae\xbf\x83\x94\x99\xe2\x37\x43\x26\x5d\x18\x4e\xba\x8b\xcf\x82\xaa\xbb\x6f\x03\x14\x47\x70\x32\xc6\x70\x1c\xa1\x21\xbb\x53\x3a\x9f\x73\x3b\x6f\x36\xe9\x04\xa4\x26\xc3\xbd\x6a\x68\x1d\x3e\x64\x9a\x11\x9f\xe2\xe7\x6c\xcb\xf7\xf5\x3d\x4f\x9e\x94\xc1\x13\xe6\x8d\xb9\xd9\x6e\x27\xc7\x8d\xab\x6c\x3d\x03\x30\xe0\x27\xdd\x81\x73\x3b\x99\xbe\x5c\x18\x49\xcd\x7b\x41\xb1\x63\x53\x2d\xbd\xef\xbb\x01\xb3\x19\xf6\x1b\xc8\x58\x8c\xcb\x75\x69\xcf\x30\xe0\xe7\xdc\x24\xd9\x10\xce\x39\xc8\xfd\x79\x33\x3d\x41\xb6\xa5\x07\xc4\x80\xd2\xf0\x39\xff\x40\x95\xe4\xe4\x46\x98\x31\x57\xdc\x34\xa5\xb2\xfe\xac\xb3\xaa\x7d\xb1\x74\xa9\x63\x38\xd5\x26\x40\x77\xbf\x39\x22\xc4\x14\xcc\xf7\x3d\xd1\xa5\x56\xb5\xa8\x81\xf4\x0a\xa6\x19\x8e\x8b\x2b\xc9\xc2\xda\x92\xc7\xae\xe0\xb7\x39\x80\x77\x5f\xbd\x0d\x64\x9a\x4c\x7f\x17\xce\xf8\xa9\x52\x77\xf4\x9b\xae\xf8\x91\x53\x5e\x2a\xac\x0d\xb2\x64\xa2\xcf\x7d\xd5\x45\xc9\x10\xf7\x29\xb1\x2e\xcf\xf8\xb8\x1e\xc9\x25\x0f\x58\x77\xec\x43\x05\x15\x2c\x0f\xba\x2c\x97\x8c\xe9\xd8\x07\x2a\xcf\x3c\x78\xdc\x52\xf6\xed\x95\x22\xea\x02\xab\xf1\x97\x27\x27\xc9\x7d\x94\x5c\x00\x16\xfc\xdc\x9f\x09\x4c\x10\x1e\xca\x85\xa9\x99\xf4\x42\xa6\xed\xd3\x00\xa5\xef\x39\x18\x23\xf4\xa1\x6f\x19\x96\x53\x9c\xb1\x70\x08\x30\x78\xc9\x88\x7e\xca\xa8\x94\xa7\xf5\x48\x6c\x96\x61\x6b\x34\xca\x6f\xd6\xd0\x33\x87\xc4\x7e\xea\x8e\x98\x55\xfe\x89\xba\x84\x18\x40\x8c\xc4\x10\xf7\x47\x0a\x2e\x0c\xdf\x7e\xe6\x88\x0b\x1b\x46\x17\xf7\xe2\x30\x4c\x05\xb9\x80\x8a\xfa\x25\xfb\x5d\x56\x96\xbf\xea\x09\xc5\xec\x6a\x3f\x54\x81\xe6\x30\xab\xc5\x18\xe5\xbb\xea\x18\xe3\x81\x31\x37\x61\xb5\xcb\x68\xb5\x7b\x26\xfd\x94\x93\x29\x55\x18\x2b\x1c\x96\xe1\x6b\x48\xca\x4e\xf7\xa9\x3e\x4c\xfe\x43\x3f\x17\x2e\xb4\x46\xbd\xe2\x4c\x0e\x53\xcc\x6b\x55\x7d\xf3\x4b\x89\x79\xde\xa3\xbe\xe7\x7a\x56\xd2\x8e\xe0\x48\xdd\x97\x6a\xdf\xf9\x63\x24\x6b\xd0\x8c\x83\xe1\x2f\x4c\x19\xdf\x5d\x54\x76\x39\x9f\x3c\xbd\xf8\xa3\x7f\x67\x64\xb1\x19\x98\x92\xdd\x73\x15\x4d\x1b\x86\xc1\xbf\xf3\xce\xb4\xb6\x25\x6b\x0c\xc7\xe8\x77\x2a\xe3\x2f\x4f\xe6\x47\xb9\xda\x47\x58\xd7\x0c\x53\x5f\x78\x00\x62\xf2\xe8\x5b\xdf\xc0\x98\xf8\xb0\xef\x50\x69\x32\xd8\x50\x47\x6d\x7c\x91\x4b\xe6\xfc\xb4\x05\x27\x1a\x37\xfd\x5a\x72\x8c\x2c\xac\xf6\xea\xdc\x4b\xf6\x87\x77\xf2\x9a\xcb\x15\x25\x4a\x2c\x23\x6a\x87\xc7\x08\xeb\x37\xbf\x67\x4d\x1c\xf4\x88\xc4\x09\x93\xc2\x64\xca\x7b\xdf\xc9\x76\xf4\x1b\x1b\xbd\x5a\xbd\xfb\x33\xef\x66\x4a\x36\x39\xc6\xb9\xec\x37\x88\x89\x18\xf6\x73\xf6\xd1\xac\x48\x15\xe2\x57\xda\xe9\x21\xda\x70\x0c\x45\x72\x1e\x8c\xf8\x19\x77\x30\x03\x19\x3c\x7d\xbb\x30\xba\x9a\xb5\x8f\xa8\x67\x56\x76\xfb\xc1\xb7\x50\x8b\xc7\xf4\x52\x98\x9d\x8b\xac\x7e\xf7\xab\x98\xe8\x16\x3c\x6d\x19\x96\xd5\xd4\xdf\xf8\x9c\xe4\x22\xf6\xa0\x91\xb9\xb1\xf5\x8c\xc9\xdb\x59\x90\x41\xbf\xf6\xb9\xcb\x7d\x22\x57\x7c\x5f\xbd\x13\x62\x42\xb6\xaf\xfa\xf8\x4a\x8a\x21\x59\x98\xfc\x0f\xb9\x86\xb7\xdb\x9a\x4e\x7b\xe6\xc1\xb0\x9c\x25\xbb\x8f\x45\x55\xd0\xdf\xc6\xf5\x98\xb4\xed\x83\x63\xf8\x0b\xac\xf1\x89\xc5\xde\xb8\xed\x7b\xbc\x5c\xa7\x2d\x81\xef\x99\x68\xbb\x46\xc9\x98\x2e\xb4\x4c\x04\xe0\x3c\x64\xf1\x85\x23\xfb\xc4\xb1\x3e\x88\x5c\x7f\xb8\x0e\xc3\xdc\x55\x0c\xbc\xa6\xd3\x38\xb6\x3c\xc6\x83\xdf\xe3\x55\x76\x0b\x90\x62\x2a\x62\x66\x2e\xda\x21\x3f\x88\xbc\x1d\x88\x62\x60\x00\xa6\xfb\xd4\x11\xcb\xc1\x11\x40\xe7\x13\x9d\xa9\xe2\x93\x93\x4e\x5d\x78\x6e\xa2\xd1\xdf\xde\x68\x59\x9f\xb4\xea\x71\x98\xe2\x9a\xce\xe2\xf1\x66\x30\x37\x0b\xa5\x16\x67\x38\x67\x6c\x7d\xab\x73\xdf\x73\xca\x6b\xda\x81\xc8\xe5\x80\xdb\x13\x63\x38\xa6\xb6\x65\x1e\x05\xb1\xc7\xe6\xd7\x66\x13\x33\x45\x9d\xd6\x0d\xd5\x30\xdf\x66\x3b\x35\x46\xe8\x35\xcb\x73\x59\x3c\x0b\xe6\x72\x69\xb6\x3b\x06\x48\x69\x5e\x70\xcc\x2e\x85\x82\xcf\xf0\xf8\xe5\x5b\xcc\x71\xdc\x55\xbc\xef\x4f\x3a\x35\x98\xcc\x8e\xe4\x9e\x0d\x39\xe6\xb2\x63\x79\xb1\xe4\x59\x86\xdb\x5e\xb3\x30\xca\x53\x28\xbe\xdb\x31\x15\x87\x4c\x25\x21\x6a\x5c\xb4\x33\x45\x20\xe6\xc1\x32\x3e\x89\x8c\xe9\x62\x68\x1a\xd1\xb8\x4a\xd3\xb3\x27\xfb\xbd\x6e\x00\x12\x72\xbb\xdf\xc9\x93\x34\x0c\xfe\x49\xe9\x10\xf3\x60\xcc\xe3\xb6\xa9\x5b\x01\xe6\xf9\x21\xdd\x25\xe4\x21\xdc\x2e\xc5\xe7\x4e\x5a\xd2\x32\xaa\xcf\x83\xc6\x83\x71\x58\x67\x0f\x88\x79\xf0\x66\x2d\xad\x1a\x5a\x90\x0c\x18\xb7\x83\x92\x98\x5c\xcb\x6c\x9f\x3b\x8c\x1b\xbf\x11\xe4\x24\xd0\xc0\x93\x7e\x62\x7c\xcd\x60\xec\x2d\x17\x27\x7c\x21\x70\xa8\xd9\x53\x1f\x62\x86\x12\xfe\xf4\x31\x91\x3f\x72\xfe\xd8\x36\x50\x6c\x5b\x47\xeb\x62\xd3\x67\x42\xc5\xb3\x47\x27\x9e\x37\x40\x24\x08\x3a\xb1\xb8\x96\xf3\x51\xc8\xf9\xbe\x23\xbf\xea\x28\x82\xd3\x38\xb9\x75\xfa\x59\xe3\x31\x63\xf3\xbd\x17\xd9\x41\x92\xa0\x93\xd3\xc3\x57\xf3\x58\x99\x18\x72\x01\xfe\x5b\x58\xb5\xda\x3d\x44\x41\x0c\x3b\xdf\xfa\x41\x88\x79\x70\xd2\x71\xeb\xf4\xa2\xe3\x49\xd0\xba\x58\x97\xc4\xe4\x56\x2e\xf9\xc4\xdf\x6b\xaa\x87\x16\x28\x74\xa3\xc0\x88\x0c\xa2\x98\xb1\xb9\x87\xcc\x85\x0f\xc3\x50\x25\x13\xbd\xee\xf3\xeb\xbe\x38\x7d\xe7\x47\x13\x43\xd8\x70\xec\x83\x00\x88\x80\x7b\xf1\xc3\x5c\x2c\xd7\x13\x8c\xe2\x34\xe9\xcd\x86\x34\xa1\x67\x03\xee\xfb\x8e\xe7\xa1\xd4\xe3\xd4\x75\xe7\x14\x21\xb6\xac\x5d\x14\x86\x13\xd4\x89\xce\xb6\x02\xe0\x00\x50\x7d\x52\x7e\x29\xa4\xfc\xf7\xdd\xcd\xf3\x6d\xcf\x6f\x60\x3c\x0b\x19\xcb\x38\x1b\x7c\xe6\xc6\xf7\x9b\x2d\xd5\xd2\x2c\x97\x1c\x43\x19\x30\xd0\x40\xfa\x00\xeb\x83\xa7\x0c\x70\x3e\x3e\x67\x9e\xdb\x60\x60\x83\xf3\x06\x7a\xcd\xc7\x4b\x76\x1f\x33\xe8\x49\x88\x21\x3d\x54\x1d\x99\x24\x64\x76\x9b\xaf\x8c\x59\x37\x6e\xef\x5d\xad\x47\x74\x0d\x1a\xfc\xb0\xe6\x75\x42\x6b\xcd\x1a\x35\x7b\xf4\xcc\xd1\xf3\xaa\xb0\xe5\x2c\x59\x7c\x34\xc7\x0a\x3f\xd7\x85\xde\xaa\x94\x2f\xd3\xfb\xda\x0d\x71\xb4\x69\x64\x17\x93\x26\x31\x58\xfe\x08\x23\x08\x1d\x93\xc7\x63\x50\x7d\xde\x60\x02\x92\xd8\x3f\x63\x92\x51\xb3\x69\x81\xc9\xfb\x40\x61\xd7\xd4\x0c\x46\x2d\x17\x5f\xa3\xa1\x3c\x3b\xe2\xab\x87\x6f\x73\xa3\x76\xb8\xc8\x49\x94\xe7\xa0\xd4\x83\x32\xd0\x86\x86\x2d\xee\xc3\x4e\xd4\x28\xd1\x62\x99\x8b\x37\x41\x6f\xb1\x48\xdc\xe7\xe4\xf5\xb9\xee\x7d\xde\x0d\x47\x1a\xd6\x30\x7b\x11\x85\x7d\x0e\x1f\x32\xe7\xc7\x27\xe2\x1f\x7a\x7d\x06\x42\xdb\xca\xda\x6b\xb6\x3c\x5e\xcd\xe1\xd9\x61\x4b\x34\x46\x6d\xd4\xa6\xed\x24\xc7\x13\x1b\x4f\x78\xa1\x70\xe2\x0e\x65\xd7\x1d\x8b\xb8\x5c\x4d\x03\x05\xf6\x51\x74\x52\x9b\x35\x50\x9b\x49\xf1\x4b\x6e\xfe\xfe\x7d\xd9\x4b\x87\xe0\x46\x5c\x11\xd0\x87\x35\xf3\x8a\x04\x9e\x60\x3d\xa2\x47\xd4\xc6\xef\x0d\xfb\xeb\x33\x26\x97\xfd\xe6\xdc\xd4\x2a\xa6\xe4\x57\xbf\xfc\x9a\x39\x40\x94\x66\x40\x94\x06\x33\x88\x36\x62\xfd\x1e\x1e\xcf\xe7\xc1\x85\xb8\x6e\x3f\xa6\xdb\x79\x6b\x64\xdb\x5d\x34\xdb\x1d\x14\xfb\x41\x19\xda\x59\x48\xd0\x68\x6b\x7c\x08\x18\x0b\x03\x42\x5a\x51\xa0\x74\x11\xc0\x34\xea\x10\xa8\x49\x94\x44\xdb\x28\x43\x30\xe2\xc3\xb2\x06\xaa\x46\x08\xf1\xac\xae\x9a\x8c\xfe\xb0\xdf\x64\xec\x48\x85\xa8\x71\xdd\xe0\x2c\x2f\x38\x8c\x54\xeb\x10\xd1\xce\x74\xd3\x8e\x74\x52\x8e\x79\x52\x1c\xa9\x36\x9a\xe3\x62\x6e\xa4\x32\x8c\x71\xbb\x4e\x2b\xb1\x92\x2b\xf9\x1a\x2e\x72\xb4\x21\xfd\x2b\xba\xab\x3d\x2d\x3c\x22\x3f\x6a\x23\x24\x6b\x22\xc4\x6b\xa2\x36\xe9\xe9\x60\x12\xc8\x23\xe5\x4b\x4c\xcf\x3a\x0f\xce\xd0\x0c\x5d\xc8\x1c\x42\xda\x13\xc2\x80\xe9\x04\xaa\x75\x0b\x50\x94\xa1\x89\x27\x8a\xe9\xfa\x67\xef\xc7\xc8\x97\xce\xa6\x0e\x80\x82\xb5\xbb\x2b\x68\x6c\xd0\x67\x42\x9f\x4e\xca\xc9\x95\xb3\x5a\x63\xbd\x72\xd9\x6f\x76\xa5\xa0\x4e\x34\x69\xd9\x26\xd6\xc3\x90\xf8\x30\x9d\x2b\x0d\xe1\xb7\xba\x4a\x15\x73\xeb\x1e\x6d\x3f\x2b\xdc\xb3\x57\xb8\xd7\xbc\xbf\x3b\xc6\x7e\x07\x0c\x38\xdf\x36\x02\x80\x97\xd8\x6f\x96\xd8\x47\x17\x12\x78\x4a\xb4\xe1\xad\x64\xb4\x05\x9a\x46\xa3\x12\x52\xde\x80\xcb\xa7\x5e\xf8\x27\x2b\xf6\xd8\xb4\x78\x23\xbe\xbb\x5e\x33\xc4\xae\xc0\xba\xde\xd1\x71\x7a\x75\xa5\x48\x94\x46\xb3\x22\x21\xaa\x39\xbe\xee\x8f\xc2\x50\xd2\x22\xa8\xcf\x3c\xb1\xa1\x41\xb5\xa1\x41\x71\x71\xdd\x61\xc6\x65\x5f\x97\x33\x45\xb4\x88\x02\x0b\x92\xa0\x7b\x48\xb7\x99\xbb\x77\x95\x0e\x49\x34\x4a\xf2\x33\x97\x3e\x19\x6b\xd5\x15\xdc\x1f\x81\xd4\x13\xa2\xd8\x6b\x66\x22\xf1\x76\x2b\x9c\x8b\x5a\xcc\x13\x34\xbf\xf7\xa4\x4e\x3d\x21\x0d\xa7\x88\xa2\xc3\x71\x64\xe6\xce\xfd\xe1\x3e\x5b\x17\xf2\x70\xdf\x6f\x06\xdb\x1f\x14\x7b\x64\x8b\x9a\x82\xdb\x18\xa9\x66\xbd\x2b\x80\xe2\x92\x89\xd1\xc1\xfb\x1b\x42\x2e\xed\xb0\xac\xe8\xad\x8e\x59\x9e\x60\x3e\x67\xcf\x4b\x4d\x05\x2b\x78\xaa\xe4\x19\x42\x6c\x55\x62\x49\x89\xab\x2d\x29\x3f\x66\x5f\x15\x0e\xe4\x56\xf7\xbd\x32\x57\xcd\xbe\xd2\x88\x76\x77\x21\x78\x49\xbc\x85\xb4\x90\xd9\x81\x8f\xf6\x63\x9f\x1d\xcf\x5d\xb5\x9b\xee\x15\xf0\x47\x48\x9f\xc7\x39\xb9\x5a\xc8\x32\xc6\x0f\x3a\x25\x13\x3d\x9a\x6e\x82\xb7\x06\xc1\xe4\x2d\x24\x46\x85\xda\x74\x01\x4b\x30\xbd\x8b\x8c\xf9\xee\x74\xb8\xe2\x05\xc1\x88\x4c\xe3\x13\x08\x3b\x72\xf0\xc6\x32\x38\x71\x02\x0e\xb7\xfb\x13\xd5\xa0\x62\xc7\x11\x94\x40\xa8\x7b\x1f\x8b\x1e\xec\x04\x9e\x46\xa6\xe7\x8d\x67\x66\x38\xba\x78\xa2\x45\xd0\xe9\xec\x3c\x7b\xe3\x39\x11\x8c\x04\x8a\x10\x7a\x56\xcf\xbe\x75\x86\xb3\x27\xd7\xa7\xd2\x93\x12\x94\xc0\x0d\x0d\xa2\x4e\xef\x2b\x1b\x0f\xa2\x44\x27\x7f\xab\x73\x65\xae\xc2\x1a\x4d\xcc\xd3\x88\xc9\xb5\x20\xc3\xfa\x82\xd6\x6f\xdf\x35\xc0\xc5\xb5\xa5\xa0\x4a\x73\xd3\xf3\x76\x1a\x9e\x96\xb8\x9d\x33\x10\x87\x3f\x22\x33\x22\x73\xf6\x63\x38\xa8\xbc\xf1\x1a\xdf\x6b\x07\x6b\xbc\xc9\xa3\xf9\x72\xa6\x99\xcc\xb6\xf3\x02\x84\xf1\xd5\xe4\xdb\xac\xb8\x6d\xad\x13\xcf\xa3\x47\xca\x75\x99\x6e\x9f\xb5\xb5\xf4\xc5\x80\xbc\xa3\xa2\x83\x3f\xdd\x8d\x22\x24\xee\x66\x8c\x5e\x14\xa8\xcd\x81\xa8\xb5\x54\x4a\x48\xda\xda\xb2\xde\x50\x26\xa5\x5b\x12\x40\xe9\x7c\x89\xec\x99\xa4\xe5\xc9\x83\xd4\xd7\xc9\x9b\xa8\xcd\x38\xcb\x8d\x08\x64\x85\x64\xc2\xe9\x92\x1a\xe8\xf5\x18\xa7\xe9\x4a\x06\xa1\x1a\x8c\x83\x32\x24\xd3\x9d\x5e\x56\x5f\x6d\xba\x1b\x54\x06\xdc\xf7\xfe\x5a\xac\x0e\x4d\x40\x69\x80\x4c\x54\xb4\xb6\xf2\x09\x06\xe8\x78\x52\xbc\xc8\x9d\xe8\xa0\xa7\xdd\x3d\xf4\x85\x35\x81\xf3\x19\xc8\x74\x10\xd4\xfb\xf4\x76\xef\x09\x0d\x92\x8e\xaa\x89\xda\x68\x0e\x54\x0a\x50\x0f\x20\xf0\x9a\x27\x72\x73\x84\x1d\xb8\xdd\xad\x76\x19\xe2\x3b\x94\xda\xab\x4b\xc0\x7e\x87\x82\x45\x91\xfa\x2a\x7e\x37\x1a\x46\xfc\xa8\x65\x0c\x03\x65\x81\xdf\x75\xcd\xb2\xef\x73\x0d\x3e\xb9\x09\x02\x1e\xa3\x47\xe0\xe1\x8c\x0e\x9e\xa2\x53\x11\x78\xd6\xb6\xac\x05\x41\x77\x6e\xcf\xb3\xbe\xaa\xbb\x2b\xa1\xe5\x23\x8a\xe3\x75\xbc\xd2\x4e\xea\x0a\x56\x60\xd7\x25\x95\x2c\x70\xa9\x8b\xce\x40\xe1\xed\x91\x61\x62\xbe\x38\xe7\x79\xb8\x17\x1c\xd1\x36\x5d\xb8\x5c\xd2\x6f\xb5\xd9\xf4\x37\x5d\x68\xda\xbc\xc8\x61\xc2\xde\x35\xe4\xe6\x7b\x42\x9e\x2d\xba\x76\x45\xb4\x3a\x1c\xea\x59\x41\x39\x12\x4d\x73\xbd\x3b\xab\x83\x16\x90\x0e\x82\x1e\x28\xd3\x0c\x50\xa6\x39\x57\xde\x9a\x7d\x35\x7e\x4a\x78\xa6\xb9\xd3\x9c\x30\x98\xce\xaf\x86\x0f\xa0\x4b\xe2\xaa\x3e\x21\xb2\xa1\x71\x87\x30\x8c\x6a\xe5\x23\x3d\x7d\xb3\x6a\x3e\x67\x0d\x51\x84\xe9\x22\x00\xf1\x29\x07\x2f\x67\x64\x9d\x6e\x65\xa0\xa4\xdc\xb8\x48\x74\x65\xef\x1a\xdc\xbc\x72\xfb\x96\x2c\x06\xcc\x92\x78\x7d\x2c\xdc\xc5\x8a\x27\x3f\xf2\xa2\xdc\x83\x09\x3a\xbe\x56\x58\xe1\xdc\xe3\xcd\x85\x35\x9b\x8a\x00\x30\x9f\x13\x61\xbf\xa5\xf3\xd9\xba\x47\x01\xa6\x47\xd7\x8f\xfa\xbd\xb5\xdb\x8b\x6a\x7c\xd4\x70\x89\x2b\x27\x21\x99\x26\x94\xa4\x5b\xaa\x93\x6d\x53\x98\x6a\xac\x7c\xa2\xe2\xe9\x38\xe1\x1c\xa8\xbe\xa9\x92\xf6\x09\xd1\x5b\x0c\x6c\xab\xa1\x95\xa9\xc0\xfa\x95\xc0\x70\xe5\x74\x24\x24\x08\x37\x53\x8d\xe9\xc2\xb0\x4f\xd9\xbe\xd2\xe4\x26\xde\x2b\x88\x28\x34\xe7\x4e\x99\x55\x18\xb4\xc3\x48\x8c\x69\x51\xe0\x8e\x61\xb0\xdc\xf1\xe9\x3f\x9c\x12\xb7\x51\x87\xd3\x3e\x08\xe4\x28\xfb\xb4\x6f\x7c\x6c\x3f\x56\x25\xa1\xdb\x43\xb7\xfb\xb1\x6e\xdb\x58\x0b\x41\x91\x02\x88\x1d\xc3\x52\x5d\x44\x63\xd2\x08\x2c\x3e\x95\xaf\xd6\x25\x5a\xb6\xad\xeb\xaa\x67\x39\xc4\x37\xab\x78\xf5\x88\xdb\x06\x0a\xad\x07\xe7\x68\xa9\x06\x9a\x72\x0c\x04\x71\xbb\x88\x80\x6a\x1a\x7d\xd8\x7c\xeb\x9b\x4b\x51\x01\x03\x80\xca\xbb\x0e\x81\x87\xc4\xf2\x97\xf9\xd5\x6e\x69\xcb\xb6\xf9\x01\x00\x8a\xdd\xed\xe1\x02\xb7\xbb\x44\xae\x88\x21\xb4\x16\x92\x76\x93\xc5\xfa\xb9\xd7\x0d\xcd\x8c\x24\xf7\x4b\x78\x9f\x0a\x5e\xee\x72\x18\xf2\x8a\x20\x2f\x8a\x59\x30\x78\x6d\x7c\xf9\xfc\x19\x2d\x6b\xd4\x63\x66\x39\x7f\xcb\x41\x3c\x87\x14\x06\x82\x76\xb9\x21\x5a\xf9\x32\x0e\x6a\xcd\x06\x45\x5a\x34\xcb\xc5\x62\x4c\x07\xbf\x68\xf9\x04\x16\x59\x48\xa5\x86\xc8\x9f\x3d\xe5\x56\x90\x79\x94\xd7\x12\x24\x11\x1f\xa1\x16\xe5\x58\x1b\x21\xc1\x60\xed\x4e\x02\xd0\xbc\x6e\xaf\x3e\xbe\x6e\x92\x10\x9e\xb6\x35\x50\xf7\x5e\xb2\xf4\x8b\x7b\xb9\x7d\xfa\xe8\x81\xb7\x67\x16\x03\x14\x30\x0c\xa6\xfc\x6a\xaa\x6a\x24\xd1\x0e\x38\x9b\x87\x7b\x44\x45\x3a\xb0\x4d\x44\x10\x74\x38\x1c\x89\xb6\xeb\x95\x65\xad\x9f\xdb\xd4\xb5\x70\xe2\xd8\xa1\x33\x15\x8c\x68\x47\x92\xbe\x75\x46\xff\x8a\xe7\x2a\x9d\x9a\x45\x98\x96\x10\xa3\x01\xa9\x9e\xcf\xc7\xb3\xe1\x6c\xee\xf2\x2d\xb5\x4b\x83\x0a\x80\x20\x9d\x83\x40\x8c\xaa\xb8\xa5\x74\x83\xe6\xb8\x8d\xb8\x6d\xe8\x10\x9b\x8f\x1b\x3f\x03\x72\xe0\x10\xd9\x81\xc7\x68\xb1\xb4\x89\x0e\x99\xcd\x63\x6c\xfc\x89\xe6\xb3\x7c\xf3\xc9\xe5\x3a\x79\x89\xc3\x27\xe0\x48\xea\x03\x41\xc5\x5e\x20\xd7\x61\xbb\x73\x8f\x41\xbf\x4f\x04\x87\x83\xa4\xb2\x13\x6b\xa9\x71\x04\x11\x5d\xbf\xcd\xc2\x97\x9b\x03\xd1\x60\xba\x81\x00\x0e\xde\x85\x7e\xc6\x64\x1b\x8e\x7d\x51\x4c\xa6\xa4\xe8\x33\x7e\x68\xad\x1d\x7b\xd5\x90\x8d\xcc\x4e\xc1\x3c\x57\xef\x52\x3d\xa2\x3c\xa7\x68\x04\x54\x4f\xaa\xad\x05\xe3\xfa\x44\x10\x6c\xde\x36\x5a\xf5\xe1\x1a\xd7\x6d\xe7\x81\x23\x21\x4d\x53\x95\x46\xe9\xbd\x6a\x9a\xce\xc7\x5e\x80\xe5\x05\x11\x24\xb1\xec\xba\x4d\x96\x7b\x55\x20\xbf\x84\xfc\x33\xda\xfc\x09\x9f\x95\xe2\x56\xcf\x24\x01\x6c\x6d\x5e\x14\xb4\x30\x2e\x99\xa4\xb4\xb1\xdf\x0d\x27\xfc\x44\x49\x24\x1d\x6d\xe9\xb4\x23\x15\x11\xcf\x0a\xa3\x35\xf8\x8e\x0d\x36\x34\x76\x6a\xf3\xf2\x9d\x96\x1d\x2f\xdc\x67\xac\x31\x26\x5f\x41\xc0\xbc\x81\x8c\xe9\x5e\x95\x3a\x24\x74\x74\x23\x99\xc2\xea\x06\x22\x3d\x54\x69\xdd\x11\xe0\xc5\x6b\xb0\xcb\xc3\x05\x81\x4d\x1f\x94\xb1\xc7\xab\x0d\x94\x85\x91\x0e\x6c\xee\xe1\x42\xda\x3d\xa6\xa9\x65\x98\x9c\x31\xed\x2d\x68\x59\x5e\x7d\x39\x56\xba\x6a\xcb\xe6\x17\x66\xfb\x46\x87\x3f\x5e\xee\x5d\x80\x1b\x69\x40\xb5\x2d\x5f\xa4\x5d\x49\x3b\x99\x3f\x01\xc8\xb0\xc3\xb6\xea\x1a\xeb\xc1\x59\x72\x42\x9e\xb8\xf7\x40\x97\xab\xd7\x57\x25\xbe\xac\xa8\x5a\x21\xa7\x3f\x11\xbd\x72\x00\xe0\x46\x74\x1d\xda\x20\x69\x27\x2b\xd5\x55\xaa\xa5\x5a\x01\x28\xb5\x05\xeb\x1e\xd1\x22\x0f\x33\xe9\xdb\x37\x7f\x67\x94\x99\xde\x16\x03\x04\x9b\xe9\x69\xe1\xb6\x00\x57\x66\x81\xd3\x34\x4d\xdc\x61\x60\x8d\x84\xfd\x92\x1a\x9d\xec\x76\x1a\x35\x72\x97\x21\x9e\xfc\x24\x09\x82\xdc\x6b\x86\xc4\xe4\x2a\x21\x9c\xb2\x04\xcc\x8b\x31\xea\x63\xa1\xba\x84\x93\x7c\xde\xee\x18\xad\xa4\x39\x76\xb0\x07\xcb\xa3\xda\x03\xa2\x00\xdb\x7a\xef\x7b\x19\x66\x20\xc0\xde\x0f\x5e\x4e\x6d\x29\xd5\x83\x88\x54\x81\xa6\x0f\xfa\x1e\x67\x7b\x5d\xdf\x3a\xfd\x9c\x2b\xa9\x20\xb9\xca\x8a\x11\x1a\x6c\xf8\xe8\xea\x5c\xa4\x27\x05\x7a\x42\x15\x9a\xdc\x7c\x09\x1d\x26\x14\x9a\xd8\xa5\xc1\x04\xbe\x87\x80\xa1\xc0\xe4\x6d\xbf\xd9\x17\x04\x80\xd6\xe3\x46\x77\x41\xc5\xa7\x00\x53\x4a\xef\x52\x71\x0f\x08\x0e\x3c\x01\x1e\xe2\x17\xad\x33\x1f\xbc\x8c\xde\x81\x2b\x3e\x37\x96\xa7\xe5\xde\x96\x94\xf4\xb6\xc0\xfa\x4c\x0a\xf4\x74\xed\x47\xe5\x2e\x8a\xe8\x0c\x29\xa7\xee\x78\x65\x25\x4c\x74\x0a\x82\x3a\x8f\x60\x65\x70\x5a\xf6\xbd\x45\x8c\x8b\x23\x5a\x48\xe6\x30\x23\x77\x05\x40\xe2\x5d\x1e\x5e\xb8\x4b\x84\x4e\x76\x0d\x39\x3d\x6b\x6f\x52\xc1\x0a\x18\x96\x6d\x1d\x2e\xd1\xc5\xd3\x2a\x33\x15\x79\xfe\x84\x42\xb1\xc2\x75\x05\x3d\x85\x88\x97\x69\x62\x13\xc4\xc7\x2e\x39\x71\xe8\x33\x1a\x3b\x69\x45\x51\xe4\x2e\x2c\xe4\x43\x57\x36\x8f\xda\xa6\x41\x7d\x78\xf5\xf2\xe7\x1b\x1d\xe3\xa3\xe7\xbc\x30\x51\x30\x82\xe8\xe0\x65\x79\xe6\x8c\x59\xdd\xe4\x47\x9e\x69\x9a\x06\xef\xa0\xe8\xdc\xea\x79\x39\xd2\xee\x92\x82\x80\x5b\x5d\x2a\x49\xf0\x4f\x01\xa6\x36\xa3\x73\xee\x27\x15\x27\x67\xe2\x70\xed\x75\x80\xf3\x61\xf1\x01\x3a\x5e\x5b\x7b\xc7\x06\x2f\x54\xe5\x18\xcd\x56\xbc\xb6\x2a\xfe\x86\x22\x04\xf4\x2e\x0f\x57\xd6\x8a\xb1\x89\xeb\x36\x10\x9e\x4f\xdd\x39\x24\xd1\x08\x64\x9c\xae\x26\xb1\xcb\xb9\xd5\x6d\x64\x2f\xef\xd8\x7e\x15\x2b\x88\x18\x8e\xf0\x0c\xae\x86\x01\x0e\xf2\x83\x6a\x2e\xca\xd3\xee\xdc\xb1\x59\x37\x00\xf5\x23\x77\x0d\x14\x81\x94\xdb\x44\x83\x4f\x2b\x8b\x5b\xf6\x72\x51\xc0\xcf\x18\xe2\xaa\x3e\x17\x79\x12\x3f\x6d\xe3\x72\x24\xcb\x81\x6d\x9d\xda\x70\x63\x78\x77\x15\xbb\x1d\xee\xe4\x01\x49\x46\x85\x39\x3b\x93\x45\xd1\x6d\x18\x2a\xd1\x00\x05\x29\x13\x4d\x68\x32\x4b\xe9\x35\xb3\x2c\xcd\xb2\x8d\x71\xd9\x7b\xe4\x61\x8b\x3e\x40\x11\x01\x83\xbd\x68\xc8\xc5\x67\xfc\xa4\x9b\x40\x6e\x0a\x01\xd0\xaf\x59\xb2\xb1\xe1\x3b\x9a\xf5\xaf\xfa\xba\x46\x4e\xcf\xd9\x48\xd4\xe2\xfd\xcd\x30\x14\xde\x8b\xdb\xbb\xb0\x38\x52\x7c\x8f\x40\x6a\xe7\xf4\xd7\x24\x08\x49\x3d\xd7\x7b\x92\x94\x1e\x67\xf9\x1a\xc1\x19\xdb\xb5\x58\x4f\x19\x97\x8c\xc6\xad\x40\xdd\x19\xc4\x91\x5a\xf9\xc8\x50\x72\x99\xb4\x8c\x63\xfa\x47\xf8\x31\x93\xea\x16\x42\x2d\xd6\xb0\xbb\xb4\xef\x27\x02\x85\x84\x50\xa2\x9d\x4c\x50\xda\xe5\xa6\x82\x05\x90\x03\xb4\x37\xc2\x3e\x84\xad\x7d\xc4\x4b\xd1\x4b\xae\xa7\xd9\x3d\xa9\xc9\x2c\xb1\x78\x0a\x88\x57\x5e\xe7\x40\xf5\x6c\x38\xf4\x7e\x6a\x2d\x61\x23\xf0\xe2\x30\x39\x47\x09\xeb\x4d\x78\xaf\x17\x0c\xce\x9b\xbb\x9e\x74\xe0\xf3\x22\x34\x31\x5c\x3f\x43\x03\x5b\xfc\x33\x3c\x05\x4c\x98\xd0\xe9\xa8\x11\x84\xa0\x2b\x07\x4f\xe5\x10\xbe\x6e\x05\x0a\x00\xb8\xe2\x3c\x90\x46\x28\x35\x79\x5c\x8a\xa2\xc8\x10\x5a\xa9\x67\xe9\x04\x6e\x71\xa2\x9f\x50\xce\xd3\x3a\x2b\x7d\xd6\x23\xce\x84\x53\x22\x5b\x91\xa8\x8d\x2a\x73\x96\xb8\x1b\x0d\x46\xf8\xe2\xc3\xee\x98\x55\x94\xb8\x8c\x9e\xda\x8c\x7b\xb2\x62\x21\x10\x48\x08\x49\x4e\xcf\xbb\x76\xe6\xa2\x51\x9a\xd0\xe6\x91\x20\x25\x19\x49\x88\x28\xae\xc1\x2f\x40\x26\x3a\x10\xa4\x5f\xee\x27\xe3\x73\xfc\x77\x3f\x06\xa6\x18\x0a\xae\xfb\x98\x08\xdc\xb3\xd4\xee\x09\xf2\xcc\x51\x94\x40\x4d\x0a\xe3\x2f\x61\xa2\x1d\xcf\x6d\x3c\xa0\x8a\xc7\x53\x24\x4c\x60\x18\xce\xdd\x86\xd6\x45\x89\x65\x9c\x0b\x68\x3c\xcd\x27\x38\xd2\xf9\x45\x81\x3b\x1e\x42\x7d\xed\x1e\x22\xc6\x8d\x0d\x25\xf9\xdb\x8e\xd3\xe4\x62\x5b\xe6\x3a\xb2\xe2\x73\xda\xd8\x0e\xab\x22\x23\xd1\x72\xdd\x5d\xa0\x04\x76\xf0\xe4\x65\xcb\x37\xfd\x2e\xed\x90\xbf\x09\xbb\x32\x4f\xea\x34\x81\xab\xbe\xf5\xa6\x26\x28\xf6\x03\x00\x68\x1c\xc1\x04\xee\xd5\x13\x3a\x0b\x4a\x22\x97\xfa\x72\xf9\xbe\x9b\xbd\xdd\x1b\x00\x5b\x1d\x89\x50\x6a\x8e\xf5\x0f\x9a\xa2\xc8\xcb\x7e\x31\x5b\x69\xa8\xfa\x25\xeb\x45\x72\xda\x82\x58\x3a\x1e\x82\x05\xf8\x8f\x41\xa6\x76\x00\x41\xa1\x30\x2d\x5d\xd8\x0a\xd7\xab\x48\xf3\x66\xef\xa7\xba\xc0\xd9\xd0\x32\x29\xbb\xce\xf6\x97\xcb\xa7\xce\x4b\x0a\x6e\x73\xa0\x30\x43\xd3\x55\x7d\x75\x69\xb3\x40\xdc\x99\x3a\xac\x81\x9b\x73\xd9\xb5\x1d\x3d\x4f\x78\xd4\xcc\x42\x01\xf1\x56\xe2\x6e\x37\x9e\xa6\x41\xd1\x75\x65\x96\x48\x06\x38\x06\x0a\x4d\x81\x95\x67\x29\x20\x24\x8a\x87\x8f\x3a\x6c\x98\x94\xfb\x36\xce\xf1\x12\x96\xf2\x6c\xb6\x5b\x94\x7a\xcd\x80\x60\x25\xaf\x7c\xbe\x46\x95\xcf\x79\x2d\xbe\x9c\x10\x06\xa1\x50\x5f\xa0\x2b\xba\x25\x5e\xa7\xa5\xa2\x08\x49\x76\x3d\x7a\xc4\x4a\x2c\x19\xe5\x27\xa7\xdb\x7b\x37\x5e\x3a\xde\x16\xa2\xfd\xaf\xf0\x62\x68\x7a\x97\xec\x0b\xba\x5c\x63\xef\x7a\xc7\xfa\x03\xed\x0c\x54\x9b\xfc\x58\xc4\xae\x3c\x92\x0f\x5e\x53\x18\xdd\x49\xdc\x8c\x86\x80\x93\x95\x88\xfb\x59\x66\x00\x96\xbf\xf2\xcf\x80\x07\x8b\x22\xeb\x35\x76\x6c\xb8\xee\xde\x41\xd7\x6b\x07\x51\xcd\x45\x62\xee\xcf\x6f\xfb\x11\x46\xa5\xc9\x1c\xa0\x81\x3b\x95\xdf\x61\xbd\x4a\x48\xf2\x74\x62\xde\x3c\x38\x48\xdc\xa6\xfb\xb2\xe2\xc0\x75\x0a\x7d\xa1\x22\xfc\x66\x20\x10\x00\xeb\xee\xd8\x65\x8b\x6c\xb7\x61\x67\x54\xd9\xa2\xe9\xe6\xc4\x09\x59\x70\x56\x4f\x3d\xb9\xb2\xe4\x3f\xe6\x69\x9e\xd9\xc8\x81\xcc\x87\x31\x6e\x71\x37\x2e\x72\x5f\xc1\x43\x5f\x60\xd3\x9d\x53\xa4\xfa\x76\xd7\x76\x02\xde\xa1\x0a\xed\x9d\x67\xaa\x41\x74\x86\xd0\x5e\x2a\xcc\x3b\xae\xda\xe3\x7c\x97\x36\x15\xce\x07\xc8\x05\x52\xe3\xea\x9f\x48\x54\x0a\x2c\xdf\x2f\xea\x7b\xfd\x64\x00\xcd\x67\x4b\x80\xea\xb5\xc0\xb2\xdd\x55\xd5\xb4\x67\xb0\xcc\x99\xa5\x0f\xe4\xdb\x4e\x10\x1d\xc8\x7c\x5b\xd0\xec\x99\x8a\x6c\x1d\xce\x72\x3d\x67\x75\x3b\x0c\xcf\x75\x40\x8d\xab\x12\xb4\xb4\x3e\x42\x48\xa0\x00\x46\xcd\x73\xe1\xdb\xce\xea\xba\x73\xc5\x45\x26\xac\x6e\x88\xf3\x6e\x40\xb2\x29\x91\x23\x66\x52\xba\xca\xf4\x05\xc1\x5e\x03\x51\xac\x71\xa5\x35\xc1\x20\x49\xb6\x6b\xe3\x14\xe2\x4e\xb9\x02\x11\xef\x80\x36\x24\x08\xb8\xc5\x3a\xd8\x93\x00\x95\x88\x6c\x99\x5e\xf7\x15\xf6\x95\xa7\xe3\x8d\x9b\x43\x44\xfb\xae\x2f\xd0\xda\x40\xf1\x1d\xca\x1b\x08\xcb\xc5\xea\x16\xc7\x61\xcb\x56\x6f\x7b\x32\xd9\x7e\x8d\x1d\x10\x6c\x38\x10\xad\x9e\x14\xf8\xe2\x1e\x10\xc7\xda\x1d\x5b\x04\x7d\x8d\x65\x28\x88\x8f\x40\x37\xc4\xae\xb7\x9a\x6e\xae\x6f\xd3\x82\x43\x14\x42\x9d\xf6\xb5\xfc\xcc\x81\xc6\xd1\xcf\xc4\x36\x61\xb3\xa7\xd6\xe5\xb1\x78\xe3\x8a\x24\x09\xac\x8e\x4f\x24\xef\xd0\x67\x63\x27\x93\x59\xe8\x42\x5a\x14\x08\x9e\x02\x59\xfc\x32\x61\x8e\x0e\x6d\xe1\x0f\x5a\xd8\xca\x75\xf1\x10\x58\xdb\x6f\xe4\xdc\xf5\x84\xd7\xcb\xe8\x6a\x3d\x20\xd1\x19\x86\x6a\x46\x97\x29\xca\x45\xf6\x91\xf1\xa9\xb0\x2d\x84\xca\x95\xa3\xa7\x0a\x65\xe1\x08\x45\x00\xd1\x9a\x9d\xe9\xc1\xb6\xa2\x6a\x77\xad\xfa\x58\xbf\x97\x44\x70\x9c\xc1\xd4\x40\x83\xe2\x0c\x18\x58\xc0\x38\x9e\xbb\x5e\x64\x05\xcd\x9e\x06\xa2\x89\xb2\x4d\x1d\xb6\xf2\xba\xf9\xc1\x69\x1f\x00\x38\xce\xf7\x7e\x20\xd6\x11\x26\xf0\xe8\x4c\x9a\x2b\x77\x2d\xc7\x89\xce\x54\xe1\x63\x76\x2d\x36\x85\x44\x41\xd0\x18\x92\x10\xb1\xc2\x28\x7f\xcd\xac\xe9\xf5\x99\x37\x87\x35\xe4\xd7\xba\xdf\x36\x32\xf5\x11\x80\x72\x3d\xcd\xc1\x40\xdd\x41\x55\x2f\x4e\xbc\x06\xcf\x73\x7f\x3c\x26\x6f\x82\x81\xe4\x81\x97\x11\xbb\xd3\x3d\xa9\x77\x24\xb9\x9e\xc0\x05\xd9\xb2\x38\x57\xef\xab\x0d\x83\x94\xd9\xf1\x54\x34\xf3\x0f\xfc\x6a\x75\x16\x55\x54\x26\x56\xa0\x64\x36\x93\x3a\xcf\xf7\x14\x9d\x1b\xb5\x54\x85\x73\xe7\x0c\xc7\x3c\x29\x2f\x9f\x37\xee\x35\x82\x80\xe0\x4e\xf8\x59\xec\xd7\xd4\x78\xeb\x9c\xe6\xa8\x7c\x34\xd1\xba\x5b\xbb\x43\x9c\xf0\xed\xbb\x84\x1b\x5b\xe4\xdd\x40\x16\xa0\x5f\x80\x44\x20\x41\xd7\xe8\x44\xd9\x14\x04\xcf\x36\x73\xaa\xec\xa1\x26\x5e\x3c\x15\x1b\x4a\x76\xdb\x34\x25\x79\xdc\xe2\x13\x45\x15\x23\x9e\x07\x8f\x44\x6b\xcf\x70\xbb\x7a\xaa\xdf\x0d\xb1\xc9\x67\xe3\xbe\x2f\x9b\xb7\x14\x6e\xb5\xd8\x28\x72\xc0\x89\x36\xc2\xe9\x6c\xab\xd8\x34\xae\xc3\xec\x77\xe8\xd4\xc0\x64\xb6\x02\x79\x27\x9f\x10\x10\xae\x13\x9c\x0f\x42\xfd\xff\x51\x75\x1d\x6b\xae\x22\xcd\xf2\x81\x58\x14\x1e\x6a\x29\x09\x09\x6f\x84\x87\x1d\xde\x7b\xcf\xd3\xdf\xaf\xcf\x3f\xa7\x67\xee\xba\xf5\xa9\x1b\xaa\x32\x33\x22\x33\x32\xfa\x0c\xd7\x5d\xef\xf9\x0e\xa7\x69\xd9\x96\xbf\x3d\x22\xcd\xab\x3b\x02\xb8\x6b\xb9\x3e\x62\x3a\x12\x24\x3a\x77\x00\x70\xae\x19\xf2\x6e\x2e\x50\x90\xd2\x77\x20\xfa\xeb\xb2\xf2\x7f\xf4\xc5\xdc\x43\x80\x4c\xb4\x79\x2b\x19\xbe\x2d\x73\xc8\x10\xc0\x40\xfa\x1d\x70\xd7\x07\xf7\x1e\xc7\x47\x4a\x1c\x82\xe8\xfd\xfe\xbb\xf8\x4a\x79\x24\x82\x5e\xef\x0c\xc8\x57\x1a\xdb\xf9\x13\x5f\xe7\x08\x83\x34\x9b\xf8\x34\x52\x4f\x9c\x68\x89\xbd\x84\x66\xad\x53\xb8\xd5\x40\xc8\xd7\xc2\x02\x0f\xb3\x1a\xeb\x73\x0d\x95\x78\x19\xb0\x5e\xd7\xed\x05\x39\x69\x9b\x95\xf7\xa9\xda\xb2\x1b\x6d\x38\x11\xa3\xab\x5c\x4f\xd3\x18\xb9\xd8\xb9\x09\x07\x19\xac\xcc\x9e\x32\x08\xd8\xa9\x9a\x61\x18\x06\x1d\x96\x1d\xee\x31\x2e\x55\x4e\x2f\x5d\xdf\xe6\xa9\x57\xe9\x37\x02\xfe\x7a\x4d\x67\x99\xde\x35\xeb\x15\x28\x5f\x38\xf9\xfb\x98\xff\x8c\x3d\xfe\x77\x26\x8f\xfd\x07\x03\xa5\x08\x13\x0d\x6c\xfd\xfd\x98\x7e\x3b\x61\xa9\xfb\xec\x53\x06\x85\x8a\x1d\x33\x3d\x86\xed\x37\x5b\xf4\x46\x7f\xaf\xed\xa2\x8d\x08\xc8\x3b\xb8\x33\xa4\x00\x11\x22\xdf\x89\x09\xf3\xa6\x63\x90\x5f\x85\xb2\x2d\x7c\xb4\xba\xdd\x26\xab\xb6\x83\x6a\x43\xfc\x82\xd4\x28\x8f\xfe\x4c\x0e\x19\x61\x7e\x7d\x94\x5f\x5f\x24\xbb\xcb\x72\x45\xee\x71\x1c\x53\x48\xd2\x2b\xf7\x97\xd0\xef\x1b\x23\x32\x86\xb0\x50\x5f\x99\x6f\x04\xef\x56\x2c\x44\x76\x35\xec\x35\x3e\x6c\xe2\xc7\x6a\x15\xd0\xc8\xb0\x97\xc5\xee\x0d\x25\xd9\xec\xf7\x60\x75\x2e\xc2\x92\xc5\xbd\x6a\x34\xe7\xf8\x12\x7d\xfe\xce\x93\x58\x71\x85\xc8\x01\x50\x98\xfb\x8a\x81\x53\xe4\xa6\xd3\x25\xf3\x05\x03\xcc\xc7\x6f\x24\x48\x5b\xbc\x91\x69\x0a\xef\x0c\x83\xb9\x4f\x8d\x90\x23\xaa\xa4\x97\x50\x36\x31\x80\xd0\x75\x38\x18\x52\x65\xaa\x86\x49\x79\x5c\x33\x62\x35\xa3\x12\xe1\xeb\xb6\xba\xdd\xfe\x55\x48\xa3\xfd\xdf\x3e\xc6\x31\x7c\x9e\x11\x4b\x74\xcf\x60\x21\xe2\xf5\x18\x1c\xc5\xa6\x50\x56\xb7\x51\xc8\x04\x6c\xf9\xc3\x25\x88\x95\xbe\x29\x68\x49\xcd\xd5\x8d\x53\xd6\x8f\xf4\x57\x70\x04\xc9\x95\xdb\xf0\x72\x3c\x61\xef\x66\xe3\xe4\x36\x88\x6d\x03\xe4\x03\x3e\x78\xe4\x72\xf1\x2c\xff\xed\xd7\x20\x07\x8e\xe0\xfb\x3e\x53\x5c\x53\xd9\x54\xc9\x30\x25\xb2\x6d\x72\x35\x8c\xaf\x11\x7a\x34\x6b\x70\x18\x0f\xa3\xd8\x96\x23\x5b\x42\x19\x3a\x0a\xca\x11\x01\x96\x44\x5e\x29\xce\xa4\x43\x28\x9c\x48\xde\x53\x6c\xfe\x55\x31\xc9\x76\xba\x71\x9a\x48\x89\x29\xdc\x42\xd5\x2b\xc2\x20\x9e\x80\x7d\xe2\x58\x7e\xb8\x23\x9d\x75\xb1\xd6\x21\xdb\x1f\xe5\x5f\x39\x5f\xe8\xf9\xe4\xbe\xa8\xc6\xa4\x1b\xbe\xcd\xf4\x96\x6b\x04\x44\xfc\xcf\x99\x0c\x82\xcd\x87\x9c\xac\xd5\x22\x43\xcf\x17\xe6\xab\xdc\x40\x6f\xbd\xd2\x56\xd3\x83\x25\xbf\xe9\xf3\x57\x5b\xa0\x3f\x99\x93\xa4\xf3\x4d\xa8\xcf\x66\xbf\xeb\x1f\xe0\xfa\xbf\x1a\xec\x46\x82\x37\x68\xca\xd5\x8c\x57\xb2\x1b\x3a\x71\x0f\x8b\xc1\x22\x0d\xf3\xd4\xf0\x94\xa4\x90\x37\xe3\xd2\xa9\xf1\x91\x2c\xb7\x5b\xee\xa6\xc0\x4b\xf1\x07\xaa\x9d\x6a\x2a\xe4\xc6\xc9\xbd\x1d\x02\xd4\xe7\x80\x96\x99\x61\xde\x21\x6f\x37\xb6\x9d\xe7\x84\x36\x5b\xf8\x4a\xa4\x4c\x0c\x4d\xa4\x1f\x99\x94\xb9\x71\x64\x2e\xdb\x5c\x17\x5e\x65\x20\x5c\x2f\xc7\xe4\xb3\x7e\x74\xd3\xd8\x67\x7b\x14\xd3\xf5\xf3\x1c\xa3\xa8\x93\x0a\xbf\xa5\x75\x54\x4c\xfe\xe2\xb3\x43\x4d\x1a\xcb\x9f\xc7\xd6\x4d\xfb\xb0\x8b\x1b\x01\xcc\x38\x8e\x53\x92\xed\x34\xd2\x1d\xc7\x3d\xb5\xc5\x1d\x85\x24\x39\x2e\x80\xc7\x4c\xab\x2a\x57\xd0\xc8\xe2\xe9\xc2\x85\x78\xd3\x24\x17\x62\xa5\x6b\x9e\x5a\x2b\xd6\x44\x4f\xb3\x61\x92\xa3\xfc\x4a\x20\xd1\xdd\x59\xb6\x66\xa9\x98\x57\x4f\xe7\x53\x06\xdd\x27\x9a\x89\xf8\x79\x7e\x11\x83\x2a\x57\x26\xf3\x77\x44\x23\x77\x97\xe6\xcf\x25\x82\xc8\x9d\x35\x94\xe7\x76\xa1\xe7\x4a\x3e\x15\x73\x8f\x88\xc3\x11\x62\xa4\xf5\xcb\xc9\xf1\x3a\x6e\x79\xc2\x1c\x06\x8f\x7d\x5c\x4a\xf1\x3b\x67\x12\xae\x65\xca\xdb\x57\x1a\xa0\xaf\x12\x1b\x1a\xe9\xca\xd3\xad\x73\x15\xd9\x40\x5e\x39\x4a\xe9\x75\xdd\x67\xb9\x7e\xa7\x01\x41\xa6\x66\x28\x37\x0b\x9e\xeb\x42\x8d\x5d\xa1\xce\x57\x66\x26\xe6\x86\xf9\xce\x84\x8d\x42\xf5\x7a\x63\xb6\x41\xba\xce\x0e\x84\x4b\xd5\x60\x9a\xed\x36\x1f\xe6\x8c\x66\x97\x22\x80\xef\xf5\x36\x45\x46\xbc\xd9\x9f\x34\xfa\xc0\x3b\x9c\xd8\x71\x62\x15\xca\x26\xf3\x14\x97\xdd\xc0\xac\x4d\x28\x26\xd9\x13\x3d\xce\x04\x4f\x13\xb7\xae\x45\xb3\x16\xc9\xec\xac\x88\xd7\xbb\xce\x9e\xcf\x4f\x91\x9e\xd2\x3f\x7a\x06\xee\x98\xa3\x79\x8a\x1a\x9f\x27\x7c\x8e\xe3\xc2\x13\xc9\x60\xee\xcd\x71\x9e\x64\xd3\xa2\x3e\x30\x9c\xcd\x0c\xfe\xb4\x89\xfb\x1a\xcd\x34\xff\x28\x6c\x1f\x61\xd1\x8e\x6e\x3b\x57\xe0\xc8\xf6\x93\xf8\xae\x66\x54\xbe\xc4\xfd\x34\xd1\x90\xcf\x77\x56\x00\xa7\xe8\x7e\x2c\x9f\xf3\xb8\x35\xcb\x01\x6a\x73\x37\x64\x97\xd8\x75\x59\x89\x0c\x38\x31\xc0\xa6\x6b\x6f\x05\x98\xcf\xf0\x1b\xc3\x24\x67\x45\xeb\xaf\x6e\xe1\xf1\xcc\x97\xb6\x58\xda\x8b\xc6\x2e\x1b\x6e\x5e\x3b\x4f\xe0\x07\x16\x0a\x30\xf0\xb4\x82\xe8\xf7\xb2\x1b\xab\x43\xf8\xfe\x69\xad\xdf\x83\x41\x30\x24\x47\x00\x02\x92\x53\x32\xfa\x06\x26\x56\x62\xe5\xb6\xae\xf7\x5c\xe1\x7b\x66\x40\x89\xac\x0f\xd6\xcb\x1c\xce\x59\x0d\x0b\xec\x01\x43\x50\x61\x3f\xab\xb4\x7e\x37\x2c\x00\x53\x47\x40\xe4\x30\xf7\x57\x32\xbb\xf4\x25\xd6\xe2\xf5\xcd\xe1\x71\x87\xc8\xc2\x0a\x05\x9c\xdf\x03\xff\xc1\xaf\x7f\xee\xa1\x94\xeb\xbd\x8c\xdb\x00\x7e\x8e\xc0\x3b\xb2\xfc\x44\xb8\x23\xe7\xde\x2b\x45\x7f\xe3\xb8\x84\x19\x48\x11\x64\x16\x84\xf2\x88\x04\x53\x67\xa4\x69\x99\xdc\xe7\xa0\x96\x1d\x1f\x50\xba\xdd\x8d\xa3\x52\x05\x98\xcf\x84\x48\x81\xc6\xee\x84\x65\x52\x02\x03\x6e\xc8\xeb\xcb\x07\x24\xb3\x40\x84\x6d\x75\x93\xa7\x15\x9f\x86\xd9\xe2\x57\x04\xd8\xb8\x28\x9e\xf3\xf6\xf5\x3a\x16\xab\x08\xdc\x7f\x7a\x9a\x5c\x61\xe1\xf9\x4c\x6f\x4e\xae\x71\x0d\x35\xc5\xaf\xa3\xd0\x23\x10\x22\xdd\x15\x9a\x0b\x1e\x8f\xe6\xba\x69\xb1\x87\x35\x2c\x84\x6d\xa9\x54\x4c\x45\x75\xea\xa7\xf0\x14\x6a\x89\xec\xa7\xc6\xc6\x03\x16\x81\xf2\x41\x1b\x38\x02\x07\x6c\xbf\x84\xe6\x42\x04\xc3\x30\xf6\x85\xb0\x89\x53\xa1\x34\x24\xf9\xf9\xec\xe4\xd2\xbb\x8b\x3c\x56\x92\x3f\xd2\xe9\xe4\x9f\xa5\xff\x37\x7f\x3c\x6e\x22\x45\xe1\x40\xeb\x35\x3b\x20\x4c\xb8\x3a\xd3\xb2\x88\x46\x66\x9c\x76\xcd\x2e\x14\xf1\x67\x7c\x9f\x03\x7a\xd6\xfa\x68\x61\x45\x9c\x7a\xb9\x32\x15\xce\x9a\x34\x17\xdd\xe7\xb2\xe5\x28\x6a\x4a\x95\xa8\xf3\x2b\x80\x86\x4f\xd8\xed\x82\x5d\x75\x5f\x93\xa8\xfc\xde\xf3\x9c\x57\xb4\x71\xd5\x6e\x31\xfe\xa9\x03\xad\x45\xe5\xa2\x81\x53\x4a\x84\xec\x28\xe4\xbe\xca\xbb\xe0\x7f\x9b\x92\x24\xa7\xf5\xf4\x69\x07\xba\x59\xa4\x82\x8b\x79\x6e\x33\xdf\xc6\xf5\x20\x14\x1c\xa4\x69\xb6\x37\x82\xf6\xe9\x16\x82\x58\x65\x8c\xa5\xd2\x5c\x6e\xba\x07\x2e\xd5\x03\xb9\x29\x17\xab\xd4\x4b\xb7\xfa\x4a\x73\x45\x8a\x84\x08\xc4\xdc\x34\x4e\xda\x08\x33\x8f\xcc\x3b\xad\xf4\x60\x67\x4b\xcd\x99\xe8\x7a\x0c\x87\x54\x10\x85\x1c\xa3\x0c\x89\xba\xa9\x73\x0d\xea\x6f\x51\xfc\xe6\x7f\x0d\x1c\xd9\x8c\x6a\xb5\x43\x5c\x35\xb6\x51\x49\xe1\x86\xa4\x93\xd1\x4c\x1e\x30\xe1\x93\x31\x49\xff\xc1\xb2\x01\xb6\xbb\x23\xbe\xbd\x67\xb5\x9c\xb0\xe4\x73\x60\x2a\x0d\xfd\x19\xa2\x95\xa7\x78\x29\x99\xb7\x63\xe7\xae\x5e\x75\x12\x12\x36\xc6\x1d\x83\xb0\x2d\x04\x56\x72\xa6\x14\xf7\x3e\xd5\xc8\x6b\x5f\xfd\x89\x67\x12\xc5\x0a\x0f\xfe\xf3\xfb\xdc\xcf\x2d\xed\x5d\xcc\x76\x30\xf5\xe2\xe8\xd9\x8e\x92\x46\x36\x58\x46\xbe\x46\xef\x5c\xe2\xf7\xc5\x21\x23\xaa\xcf\x00\xbb\x3d\xe0\x61\x30\x51\x70\x89\x6b\x46\xd9\x66\x81\x72\x3e\x7a\x18\x7f\xaa\x33\x98\x92\x89\x42\xbf\x5a\xb1\x30\xe7\x7b\x6e\x6f\x87\xd0\x08\x80\x5d\x18\x3c\xf5\xe7\xf1\xd2\x3a\xdb\xbd\x4a\xff\xe2\xe0\x6c\xe2\x56\xbf\x2c\xb6\xf1\x28\xa9\xbf\x7d\xd9\x27\x4b\x12\x17\xf7\xe8\x17\xde\x22\x27\x45\xbc\xc4\x2a\x32\x28\x01\xed\x26\x4c\xbe\x16\x17\xe7\xde\xd0\xb0\x09\x78\x47\x10\xee\xb1\x7e\x3b\xec\x2b\x7b\xaa\x7c\x2f\xfb\x47\x91\x3e\xbc\xd9\xa5\xa1\x19\x6a\xe7\x0d\x73\x3f\x47\xd2\x75\xb7\x28\xb1\xf1\x1a\xc3\x42\xd7\x3f\x0c\xa2\x11\x5a\xcb\x1a\xd5\xa8\x0b\xdb\xa0\x7e\x14\xfe\x2f\xff\xd2\xa8\x29\x52\xd8\x02\xc8\x9f\xbe\xf4\x91\xda\x40\x27\x4f\xae\xb9\xe5\x54\x73\x1e\x9b\x89\x13\xcb\x85\x33\x59\xc5\x60\xe9\x38\x90\x47\xe2\xa1\x12\x8f\x5e\xbb\x96\x53\xb5\x9a\xc1\x9a\xad\x89\x21\x2c\x4a\x94\xb2\x90\x4c\x70\x3a\x53\x67\xb6\x4a\x8c\x3b\x58\x4c\x80\x20\x04\x22\x66\x4f\x2d\x18\x96\xa7\xad\x97\x9b\xab\xf3\x8b\xd2\x3d\xff\xee\xce\xbe\x1e\xdc\x71\xa8\xdc\x67\x6c\x73\xb4\x4c\x83\x46\x7a\xa5\x7d\x84\x47\x32\x4a\xba\x04\xd4\x7b\x5e\x92\x5b\x61\xbd\x22\x17\xee\x0c\x64\x59\x36\xb0\x05\xf2\xfc\x4c\x69\xfd\x8d\xee\x44\x96\xab\x85\x83\x66\xbb\x54\x44\x91\xbe\x0a\x52\xe7\x9e\x83\xf6\xec\x85\xc1\xd1\x7d\x81\x59\xda\x34\xcf\x32\xdf\x75\x31\xd2\x8c\x03\x71\xba\x96\x15\xd5\x47\x42\x2d\x98\x90\x48\x95\x32\x51\x7e\x20\xab\xc9\xfd\xf6\x10\x8c\xfe\x55\x8e\x8e\x8f\x7c\xbf\xbe\xfa\x6a\xd8\xc4\xc0\xf9\xd3\x53\xad\x51\xba\xd4\xfa\xcb\xf2\x42\x0b\x63\x1f\xcb\x41\x73\xde\xac\x17\x07\xe4\x0b\x12\x89\x0e\x85\x11\x7a\x5e\x27\x39\x2f\x87\xea\x13\xba\xcf\xf7\xe1\x5e\xdd\xa9\xc8\x5f\x4d\x2f\xa1\xf9\x13\xf5\xc1\x35\x9c\x2c\xd1\x73\xec\x8e\xcf\xe2\xd8\xfa\x44\xb8\x69\xc4\x56\x51\x79\x94\xf1\xe9\xfb\xfb\x1f\xce\x5e\xbc\x14\x2a\x53\xda\xab\xa3\x55\x80\xee\xdc\x17\x35\x32\x04\x81\x84\x7a\x3f\xce\xd3\x5a\xa5\x69\xb5\xba\xab\xbf\x19\x36\xaf\x10\x80\x68\xf2\x13\xe4\xdb\xa7\x9b\xa2\x77\x29\x5a\xe8\xaa\x99\x0f\xf0\x22\x67\xbf\x7e\xe6\x60\x20\x1e\x2e\x88\x6b\x01\x10\x22\xe3\x07\x8a\xeb\xca\xfc\xb0\x22\x0f\x65\xcc\x67\x9c\x00\x0c\x93\xf4\x92\x15\x7d\x68\xa7\xfd\x64\xd2\x8e\x14\x8a\x33\xb1\xf2\xf7\x75\xff\x6a\xf8\xa4\x6a\xee\xc2\x2e\x24\x56\x12\xe9\xbe\xb9\x76\x25\xb4\x5e\x0f\xa4\x37\x15\x20\xcf\x80\xc4\xb8\xd3\x82\xe9\x55\xc8\x4f\xf2\x54\x0d\xa7\xb7\x51\x18\x00\xd4\x37\xe5\x0a\x54\x7b\x41\xc7\x42\xfd\xb4\xf4\x27\xd7\x3a\x96\x81\xcd\xf4\x5c\xa1\x27\x1a\x7e\x4e\xd4\xba\x49\x0d\xee\x91\xc0\x1d\x4f\x08\xc0\xde\x3e\xe0\x95\x10\x52\x93\x83\x00\x38\xfb\x22\xa0\xd9\xe3\x57\x2e\xa7\x9e\xae\x89\x67\x3d\x50\x53\xb5\xa2\x02\xd7\xd8\x77\x55\xbd\x7d\x17\xf3\x3e\x65\x60\xbe\xcd\xb7\x23\xba\xe1\x70\x3a\x88\xc0\x3b\x0d\xa5\xdb\x00\xd4\x4d\x83\x2a\xd8\x28\x7f\x1b\xd6\x50\xce\xea\x51\x72\x7b\x23\x19\xfd\x7e\x8a\x24\x62\xd4\x04\x60\x01\x38\x0f\xef\xd3\x45\xaa\xf6\x98\xdd\xe9\x5d\x3c\xfb\x19\xd2\xac\xca\x3d\x89\x78\xc5\xab\x54\xb5\x57\x77\x4c\x9d\xf6\x58\x58\xfa\x59\x93\x88\xf6\x12\xec\xe0\x93\xeb\x07\xf1\x7a\xfd\xd3\x7a\x55\x4f\xd3\xc3\x36\xe6\x7e\xe9\x93\x2b\x4f\x16\x59\xf8\x55\x2a\xbf\x88\x15\x64\x00\xec\x86\xcf\x40\xa7\xc3\xf6\xaf\xe8\xbc\xa1\x66\x27\xb8\x8e\x11\x99\x88\x52\x32\x89\x6b\x95\x0a\x0d\xfb\x81\x54\xe4\xec\xcf\xcf\xc7\x11\x19\x8d\x81\xdc\x82\x01\xee\xb2\x87\x92\x41\xa6\x11\xf3\x7e\xf8\x10\xaa\x09\xe0\x4d\x36\xff\xd0\xc9\x3a\x3a\x23\x2b\x55\x36\xf0\x0e\xf5\x78\xfd\xab\xad\xa5\xa2\x6e\xdf\x1b\xa1\x7f\x33\xfa\xb5\x4c\x72\x31\x24\xf6\x7c\x14\x08\x00\x15\x6b\x94\x9b\x71\x03\x1c\xc1\xf0\xc1\x0b\x85\x46\x18\x11\x36\x35\xed\x9f\xda\x41\xce\x56\xbb\x2a\x5f\x72\x51\x18\x85\xd2\x00\xc8\x0e\x5f\xa8\x4f\x22\x5e\x89\x3c\xef\xcf\x28\xb0\xe3\xc7\x27\x66\x74\xc9\x11\x63\x37\xda\x09\x33\x07\xe7\xb7\x57\xe2\x86\xe2\xbf\x71\xed\x2b\x3b\xe4\xc1\x4c\x3f\x0a\xae\xfc\x9d\xdb\xbe\x82\x37\x9d\xdb\xba\x5d\x90\xb2\xa1\x17\x2c\x39\xc8\x67\xd6\x3f\x8a\x9c\x60\x26\x7a\x9b\x88\x7d\xcb\x87\x1a\x8f\x62\x74\x52\xb3\x2a\xe8\x25\x02\x9c\xd8\xc7\xa8\x56\xd9\x1b\x16\xee\xf5\xd8\x7f\x62\xb0\xc3\xb6\x78\x20\x8b\x33\xae\x53\xa6\x04\x80\x7d\xe7\x20\x27\x3f\x5a\x15\xfb\x85\x74\x65\xfd\x53\x6e\x1b\xc0\x50\x70\x8d\x3d\xac\x28\xad\xda\x99\x1a\xf9\x75\xac\x8d\x30\x5b\xf3\x44\x6d\x84\x25\x56\xc6\xdf\x3c\xad\xbe\x92\x1f\x7a\x67\x49\x6a\xc3\x46\xf2\xfb\x20\xb3\x58\xfd\x41\x2b\x39\x08\xfb\xc5\x83\x46\x4a\xb2\xe4\x82\x18\x67\x66\xcc\x0c\x72\x38\x63\xb8\x80\xcd\xf8\xa2\x1a\x17\x88\x4e\x43\x55\x01\xff\x5c\xe7\xd7\x68\xef\xcb\x7a\xff\xd1\x04\xc9\x68\x71\xde\x37\x20\xfc\x37\x34\x6a\xaa\x50\x8c\xea\x58\x66\x39\x15\x86\xf7\x8d\x4b\x37\xa0\x97\x34\x9f\x3f\x91\x15\x88\x6b\x7f\xb3\x31\x7f\xaa\xe1\x12\xe0\xa0\x7b\xca\xd7\x6f\x1c\xab\x56\x9c\x2f\xb4\x5b\xaa\xf6\xfb\xbc\x9c\x51\x2c\x5a\x00\x5e\xec\xc5\xaa\xdc\xe3\x62\x01\x8a\xd0\x7a\xbd\x7f\xba\x10\x47\x88\x96\xfe\xe1\xe9\xa2\xea\xd2\x3b\xa4\xbb\x1c\x7c\xac\x84\x2a\xbf\xde\xe7\x61\xcb\x29\xd5\xb3\x57\x16\x0b\x5d\x58\x05\x88\x41\xec\xf9\x0e\xfa\x94\x7c\x6e\x89\x25\x15\x68\x4c\x3e\x3d\x6c\x8d\xd2\x0d\x75\x28\x87\x4f\x9f\x3f\x0c\xa7\x04\x3a\xb1\xfa\xda\xa7\x97\xc5\xfa\xaf\x3e\x93\x7b\xf4\x86\x8d\x43\x75\x60\x07\xbb\x0d\x87\x15\x6e\x79\x9e\xcf\x23\x07\xce\xf2\xcb\x6a\x54\xe2\x85\x42\x6f\x10\x51\x0b\x7d\x17\x0b\x51\x3a\x73\xe5\x6f\x50\x28\x43\xf5\x74\xcf\x61\x3f\xfb\x9e\x60\x22\x6d\x77\xfb\x27\x55\x90\x82\x48\x29\xed\x45\x4b\xab\x1e\xc4\x65\x38\x31\x04\x60\x88\x9c\x58\x58\xa1\x7c\x4f\x55\x50\x89\x97\x5a\x7d\x11\x86\x47\xd9\x19\x3e\x1f\x5a\x26\xff\x7b\x3e\x39\xcc\x19\x8a\xf1\x8a\x98\x42\xfc\xee\xe5\x1b\x8b\x72\xdf\x70\x5f\xdd\x01\xb0\x62\x86\x69\xb1\x5f\xc5\x72\xb6\x4e\x11\xe9\xc5\x89\xc0\x62\x04\x40\xbc\xf1\x29\x27\xbd\x76\x63\xfc\xe4\x22\x1a\xac\x12\x9f\x3f\xc3\x59\x0b\xcb\xf7\xb4\xb8\xdb\xaa\x28\x0c\x83\xbe\x93\x6d\xb1\x46\xb3\x0d\x67\x37\xfa\x79\x17\xa5\x34\x21\xfb\xca\x64\xbd\x87\x40\xc9\x29\xce\x9f\x77\xf2\xcd\x59\xfe\xf1\x1f\x4f\x81\x43\x3f\x35\x1b\x85\xb1\xf2\x8c\xc2\x27\x4b\xe8\x57\xa9\x94\x78\x97\x76\x61\x4b\xb1\x60\x70\x0c\xc0\xf4\x9f\xa0\x6b\x97\x0f\x2f\x0a\x13\x9b\xd7\xba\xdf\x57\x23\x5f\xce\xe3\xc4\x97\xdf\xf0\x25\x7c\xb6\x19\x43\x36\xbb\x09\x0d\x1e\x5c\x87\x19\xb2\xfc\xa7\x0c\x22\x84\x85\x8a\x92\xc6\x3b\x38\xfa\x93\x05\x79\x73\x7d\xff\xd4\x8e\x4b\x28\x38\x8e\xb9\x21\xa7\xbf\x5c\x49\x76\x5b\xab\xbf\x2b\x83\x80\x94\xae\x7e\x85\xbf\x7d\x06\x9e\xc3\x29\x8d\x8d\xf9\xe7\x12\xf3\x27\x9a\x2a\x89\x8c\x31\x70\xa2\x58\x44\x45\x78\x58\xdd\x16\x59\xd8\x25\xbe\x4d\x13\x9a\xf2\x66\x61\x26\xda\x02\x58\xd6\x6b\x9f\x72\xf9\xee\xb3\x99\x7e\x29\x1c\x2f\xa7\xdc\x94\x2a\x0c\x40\xd1\x26\x47\x26\x7a\xb6\xdd\xc4\xe0\x8e\x4b\x74\xe4\xa4\x22\x88\x22\x45\x53\x3f\xc8\x0c\x5f\x7a\x97\xef\xe1\x52\x6f\xa9\x31\x06\x89\x4d\x7a\xd9\x32\x09\xf3\x95\xfc\x6a\xb7\xb8\x90\x3f\x27\x4b\x3f\x6d\xa5\xed\x09\x40\xb2\x39\x75\xa9\x44\xdd\x22\x18\xcc\x68\xc0\x91\xee\x8b\x3d\xea\x14\xdf\x5f\x42\xb7\xd4\xed\x09\x84\xca\xe0\xdf\x60\xbd\x4e\xa9\xd3\x83\x36\xa7\x14\x9a\x80\x24\xfd\x93\x68\x9b\x1d\x48\xf4\x1e\xef\x19\x3e\xb5\x64\xf9\x55\xb9\x07\x9d\x1a\xfd\x19\xde\xef\x16\x90\x88\xb6\x7d\x7a\xc3\xa4\xf3\x60\x5e\xf6\x5b\x3d\x25\x35\xec\x25\x96\x13\x7e\xe5\xb7\xea\x95\x01\x6d\xb3\x9f\x69\xab\x78\x58\xea\x63\x50\x30\x59\x5f\x00\x69\x27\x55\xc1\x92\xce\xf3\xb5\xc6\x81\x03\x09\xc8\xbe\xb9\x27\x6b\x26\x5a\x4b\xef\x2e\xc5\x2d\xfe\x7e\x43\xf2\x88\x38\xd1\x6a\x4e\x27\x30\xe5\x74\xbb\x83\x24\x70\x01\xc2\x2a\xf7\xcd\x02\xf5\x7e\x95\x31\x69\x6b\x00\xc8\x2a\x69\x02\x08\xbe\x3c\xa6\xd9\x0e\x34\xea\xa3\x72\x2f\x03\x9e\x17\x5b\x8b\xb2\x8e\xb8\xef\x67\xff\x0f\x16\x7a\x16\x70\xfb\x1a\x46\x2d\xde\x68\xe3\x91\x2c\x97\x66\x04\x21\xb4\x0a\x51\x77\x74\x94\xb9\x84\xa3\x20\x1b\x04\x88\xbd\x56\x8b\xfa\x3a\x96\x58\xcb\xf5\xde\xa4\x0d\x13\xbc\xaf\xa0\x93\xaa\x27\xd7\x6d\x14\xa2\x35\xa1\x6c\x2b\xf9\x8b\xe3\x4d\x7c\xdc\xa3\x7b\xf5\x7d\x02\xf9\x28\x67\x91\x0b\x04\x58\x31\x37\x71\x67\xe1\x49\x46\xef\x3a\xf0\xa9\x42\x7c\x1d\xfb\x9d\xcd\xd8\x5e\xb5\x9f\x2c\x6c\x22\x9e\xf8\x18\x1c\xe8\x79\xb1\xfe\xcb\x07\xb8\x27\x23\xd2\xfa\x45\x6d\x6a\x54\xdb\xc3\x28\x3b\xfd\x39\x11\x01\xdd\x07\x79\x96\x65\xb9\xec\xdf\x2a\x6b\x18\x37\x43\x00\x4c\x9f\xbe\xb3\xd1\xdf\x1b\x11\x21\xa9\xde\xb8\xb9\x4e\xeb\xe5\x80\x65\xcd\xf4\xb6\x46\xc9\x42\x51\x73\x73\xf9\x51\x76\x64\x07\x82\x38\x8e\x01\xf8\xcc\xa0\xd7\x72\x80\x75\xb1\x60\xaa\xd3\x77\x04\xec\xa6\xe7\xba\x84\xe9\x4d\x94\x5e\x0c\xe6\xa3\x8e\x48\xba\x52\xbe\xbd\x5a\xd0\xae\x8f\xb9\xf4\xfe\xd5\x9d\x77\x96\xc4\xb7\x55\xd0\x28\xfa\x3b\x54\xad\xe6\xfe\x46\x75\x26\xfa\xf9\x7e\x07\x49\xbc\xa7\xa0\xcc\x00\x23\x35\xc0\x58\x89\x0c\xab\x42\x90\x59\xb4\x4f\x43\x80\xc0\x14\xbc\xfd\x9b\x15\xe0\xc6\xda\x43\x70\x3b\xd8\xc7\x32\x37\xea\x3b\x99\xfd\x05\x72\x08\xf3\xbe\xba\x63\x4c\xb3\x8c\x1c\x7c\x6b\x33\x19\x98\x24\xbf\xc8\x45\x4e\xa4\xdd\x4b\x8b\x40\xe7\xd8\xcd\xd7\xe5\x96\xe2\xbf\x8d\xcc\xb9\xd1\xe6\x24\xa8\x2d\x6d\x64\x86\x5a\x62\xa5\xfe\xe6\x60\x74\xb1\xdf\xe5\xcb\x94\x92\x81\x0e\x5a\xe9\x68\x8e\xc1\xcd\x43\x53\xe9\xd3\x79\x07\x4c\xbd\x02\x09\x20\xe5\xad\x40\x92\xde\xb2\x55\xb1\x88\x6a\xa6\x00\x57\x1e\xb4\x92\xa8\xa6\x9c\xb4\xb2\x75\x7d\x50\x6b\x88\x0f\xe5\x73\x31\x28\xdc\xcd\xec\x76\xf3\x8d\x60\xf2\xfd\xb6\xf3\xc6\x68\xdc\x26\x13\x6a\x52\xa0\x00\xeb\x37\x5c\xa2\x1e\xac\xf8\xd6\xe5\x19\xcb\xe6\x77\xf9\xf8\x52\x26\x63\x06\x73\x7c\xa6\xdf\xee\x5f\xdd\xb2\x17\x8a\x00\x9a\x5c\xf9\x0d\x3c\x4b\x92\x50\x3a\xcb\xc5\x69\x67\xea\x15\x9d\x5c\x80\x64\xf9\x7e\x11\x8d\x8e\x18\x14\x44\x30\x8a\x04\x7b\xf7\xf0\x38\xc4\x6e\x28\xb5\x8a\xe5\x54\xb3\xdf\x83\xb3\x36\xee\xb4\xda\x03\x62\xb4\xcc\xa9\x36\x64\x62\x10\x0c\xe6\x83\x9b\xcb\x01\x62\x50\x53\xd0\xe1\xde\x78\xeb\x23\x06\x00\x42\xec\xde\xeb\x7a\x2e\x8e\x41\x75\xf5\x3c\x9e\xe7\x36\xee\xfc\x9e\xd1\xbe\xa9\xdb\x28\xb8\xeb\xa2\x3d\x19\xc6\xfa\x07\xd4\xfc\xa4\x44\xc3\x7b\x85\xad\xd0\x0a\x7d\x4c\xa2\xe9\xa4\x6b\xf0\xb0\xa3\x5c\xde\x15\xac\xbf\x59\x08\xe0\xf8\x65\xd7\x10\x45\xf2\x7c\x64\x08\x02\xbd\x0a\x29\xfa\xbe\x84\x4f\xb9\x6b\xcd\x72\x14\xea\x61\x50\x22\xa5\x34\xf4\xfb\x41\xba\xba\x50\xc3\x13\xba\x2c\xab\xde\xc6\x8d\x59\x10\x8c\xf1\x36\x5b\x64\xfc\x32\x9e\x8c\x91\x02\x80\x08\xc8\x3c\xd1\x85\xd4\x1e\x7e\x8a\xa7\x46\x4c\x40\xab\xcd\xcc\x41\x59\x34\x4c\x9e\x19\x9e\xf5\xfe\xeb\x59\x73\x6b\xb2\x73\x5b\xf1\x5d\xe9\x21\xb9\x20\x92\xef\xfb\x3e\x0d\x15\x1f\x69\xbd\x2c\xcf\xb2\xc0\x37\x00\x42\xed\x44\x66\xf6\xf7\x48\xb1\xed\x9e\x23\x72\x1c\x33\x5f\xba\x90\x7c\x33\x2a\xde\x29\x1a\xcd\x6e\xd4\x5c\x2d\x7f\x1b\xb4\xb8\xe8\xaf\x7e\xa4\x93\x98\x2f\xa7\x8d\x8e\x60\xa0\x48\xc1\x9e\x6f\xbc\x22\x98\x68\x9a\x57\x84\x4a\xf9\x12\x64\x98\x3d\x07\xc1\x6a\x5c\xa5\xab\xb9\xa1\x7f\x45\x37\x93\x29\x46\x46\x2f\x4b\x8f\x84\xc5\x27\x31\xfe\xce\xae\xd5\xc7\x62\x3b\x98\x76\xd9\x57\xa5\x5b\x94\xce\x46\xdb\xd6\x43\xb7\xc3\xa2\x1c\x50\x24\x00\xe0\x0d\xc0\x89\xb8\x17\x12\x75\x93\xf1\x83\xb2\x10\x6d\xf6\x89\x84\x14\x44\x1e\x2b\x3f\x63\xec\x6d\x5f\xd0\x03\xb4\xc6\x46\x3f\x54\xf8\x10\x98\x9f\xfa\xe0\xb2\xdc\xb8\x11\x89\x6b\x69\xe1\x79\x24\x10\x41\x6a\x03\x83\xb9\x8f\x6c\xa0\x65\x59\x7c\xcd\x09\x26\x25\x7c\xac\x2a\xa6\x08\x99\x5d\x82\x4a\xaf\x9c\xa6\xae\xd3\xa6\xf4\x87\xc9\xff\xed\xf3\x76\x2f\x2f\x26\x2c\xa6\x4f\xde\x61\x3d\xbe\xe9\xde\x01\x91\xb5\xbb\xcc\x8e\xe4\x46\x1f\x82\x94\x48\x19\x06\xc3\xba\x24\x03\x98\xb9\xc7\x34\xdc\x2d\x6a\x0a\xbb\xbc\xf7\x05\x70\x17\x19\x84\x50\x9d\x9f\x78\xdd\x82\x2e\x9d\xb7\x3d\xf5\x90\xcb\x60\xad\xc8\xe6\x15\x5c\xba\x03\xdc\xdd\xdf\x51\x38\x12\xbd\x37\x11\xde\x0d\x80\x1b\x52\x80\x22\xa1\x8a\x00\x16\x97\xa8\x29\xea\xec\xfb\x66\x2f\xb9\xb5\xc7\x43\x39\xd7\xb6\xd9\x0f\xbe\x39\xd8\x0a\x92\x70\x80\x5c\xf8\x5b\x63\x9e\x61\x37\xc7\x0a\xb2\x14\x98\x43\xde\x97\xe8\x85\xbe\x0b\xd3\xa4\x18\x99\x8e\xda\x22\x61\xbd\x01\xc9\x70\x86\x91\x71\x47\xd6\x43\xd3\x4e\xe3\x79\x6a\x82\x24\xcb\x77\xc2\xc5\x0e\x89\xf8\xd8\x0e\xa1\x55\x6d\x56\xa0\xbd\x93\x7f\xae\x77\x81\x21\x51\x87\x27\xe0\x6b\xd2\x06\x85\xa4\x86\x91\xfb\x7e\x0f\xe0\xcb\x24\x36\xc6\x26\xc7\x53\x7f\x5e\x00\xd9\x9c\xdd\x00\x04\x39\x82\x91\x05\x76\x67\xc7\xc2\x48\x67\xe5\xd7\xa8\x2e\xb5\x16\x19\x37\xf2\x12\x04\x5b\xd9\x9d\x09\xac\xf1\xb0\xff\xd5\xb8\xde\x9a\xae\x3b\xd0\xb3\xd4\x4e\x2a\x87\xd3\x02\x54\xd0\xed\xb3\xf0\x81\x65\xd6\xcf\xf4\x14\x02\x84\xd1\x85\x95\x43\x98\x7c\xc5\x10\x6b\xfc\x74\xe0\x1e\xfc\x8e\xea\x20\x64\xc9\xe6\x4f\x6f\xc5\x0f\xf1\xf4\x11\xe3\xb2\xd2\x52\x8b\x1b\x1f\xfc\x3b\x8c\xbd\xf3\xfc\x7a\x22\xfb\x8a\xf6\xbb\x16\x5c\xba\x1a\x27\xbf\x1d\x10\xd2\x00\x60\x02\x00\x5b\x41\x6e\x20\x8b\x9f\xd8\x53\x3a\x33\x00\xc7\x7b\x82\x20\x8c\x4d\x20\x00\x5b\xbe\x05\xe8\x92\x35\x96\xcd\x20\xaf\xce\x60\x97\xb1\xe9\x69\x4d\x9e\x5c\x85\x2e\x16\xd9\x53\x66\xa7\xe8\x21\x5a\xe8\xa2\xfc\xa3\x0f\xb9\x49\xea\x32\xb5\x5c\x16\xab\x61\xb4\x80\x24\xee\x7e\x7c\x37\x0d\x4c\xc7\x18\x07\x00\x41\x20\x4b\xc1\x0c\x5c\xf3\xbd\xd6\xeb\x42\xc4\xe9\xe4\x74\x92\x89\xb6\x3e\x30\x49\xff\x91\x18\xbd\x39\x50\xd8\xc2\x84\x80\x0f\x1c\xee\x93\xc2\xcf\x47\xa1\x18\x0b\xad\x3f\x9f\x4a\xab\x72\x39\xfa\x58\xf8\x43\xd9\x35\xad\x7e\x1c\xaa\x91\xfb\xac\x5f\xb0\x33\xc3\x5c\xd7\xc0\xf8\x3e\x44\x8e\x24\xd3\x3d\x8f\x09\x16\x3f\x0e\xd5\x81\xe9\x23\x90\xec\xa9\xef\x62\x4c\xba\x41\x2a\x60\x33\xbd\x09\x79\x93\xfc\xd4\x77\x7b\x51\xd9\x2c\x57\x83\xa3\x04\xfc\x00\xdf\xa4\x56\x93\x1b\x4e\x3c\x2a\x83\x0f\x1e\x08\x57\xff\x37\x67\x52\x6c\xfd\x98\x7a\xd3\xe3\xcb\xc0\xc5\x52\x22\x67\xe0\xea\x22\x48\xae\xe5\x40\x83\x39\x10\x14\xa4\x28\x5c\xa5\xbd\xc8\x55\xbe\x80\x71\x27\x98\x64\x3b\x3b\xf3\x15\xbe\xa4\x5e\xcf\x11\x9e\x0a\x26\xa1\x99\x89\xef\x62\xd8\xb8\x87\xea\xc1\xe1\x6f\x96\x1d\xe9\x5c\x9b\x2d\xcf\xa2\x57\x59\xd6\x83\xdb\xfd\xb0\xb7\x9f\x13\x3f\x68\x00\x0a\x2f\xe3\x64\x0c\x82\x89\x73\x62\xdf\x31\xdc\xff\xdc\x09\xda\xf9\x4a\x5b\x9d\xa9\xd7\x9a\x4e\x2b\xe1\xe9\x27\xef\xfb\x16\xe9\xff\xc7\x27\xd6\x39\xc2\x17\x3a\xd1\x5d\x0d\x6e\xcd\x7c\x12\x69\xf4\x22\xd0\xb0\x93\xd0\x39\xd3\x6c\xf6\x3e\x00\x75\x56\xe2\xfa\x57\x23\x79\x58\x38\x99\x0b\x91\x04\x5b\x4b\xac\xc4\xd2\x02\x94\x41\x23\xc6\x88\x80\x71\xcd\xe7\xd8\x1b\x97\x98\x1f\x17\xa0\xa9\x06\xdd\x8e\xab\xfb\xb1\x1c\x35\x99\x06\xb7\xdb\x97\x74\x5c\xd7\xab\x0c\x9b\x22\x2f\x04\xfb\x0d\x0d\xef\x3a\xbc\xd0\x84\x6f\x39\x51\x2c\xa4\x7e\x69\xe6\x44\xf8\x34\xe5\x66\xac\x41\x20\x99\xd6\x9f\x6c\xfe\x81\x19\xc8\x37\x9c\xd9\x09\x76\x67\xd1\xce\x39\xf9\x01\xfb\xec\xaa\x52\xa6\xca\xf8\x73\x7e\xef\xf3\x9d\x3e\xfa\xe9\x55\xbb\x6e\x28\xf8\xbb\x9a\xd8\xf7\x53\x97\xbf\xff\xe1\x78\x92\xd3\x67\xb9\x76\x16\xea\xab\x44\x2e\x5f\xa5\xbe\x46\x33\x6d\x29\xb6\x55\xac\x5b\x2d\x6e\xf9\x0a\x2f\x01\x83\xc0\x99\x7b\x91\x55\x16\xc5\x7c\xfb\xf9\x13\xaa\xc3\x59\xad\xee\xb4\xba\xdd\x22\x2a\x7e\x19\x6e\xd5\xec\xd0\xbb\xa2\x92\x6e\xa3\x14\x0a\x38\xf3\xf7\xff\x72\xd5\xf1\xba\x25\x41\x68\x02\x4e\x98\x3f\xca\xdd\x35\x41\xb7\x87\xb5\x88\x18\xb2\xbb\x3d\xc8\x75\x43\xcb\x08\x9b\x72\xfe\x4a\xa0\xd9\x39\x7e\xe8\x50\x02\x95\xb0\x3f\xb8\xf9\x77\x66\xca\xbf\x3c\x49\xe8\xef\x24\xc4\xf4\x11\xb9\x31\xbb\x39\x55\x4b\xbf\x5e\xf0\x61\x7c\xe0\x41\xcd\xab\x3b\xb9\x56\xe5\x46\xde\x69\xe6\xec\x4b\x52\xb9\xef\x82\xe8\x9f\xc8\xa7\x74\x5f\x7f\xf9\xbc\xf9\x77\x2f\x32\xdc\x97\x7a\x74\x67\x4f\xb8\x26\xf7\x66\x73\xe9\xb5\xa3\xf5\x53\x37\x5e\xdf\x57\x3b\x06\x9d\xfc\x82\x59\x7b\xdb\x53\x6a\xb3\x41\x22\xd3\x82\x75\x09\x73\xb7\xb8\xe3\xe7\xf1\xdd\x38\xd0\xc9\xef\xcf\x5f\xec\x8c\x7a\x05\x9a\x14\x74\xdf\xfa\x19\xc9\x51\xeb\xa9\x8f\x2d\x9a\x13\x9c\xae\xbf\xb5\x55\xfd\xa6\xb2\x65\xab\x37\xb7\x46\x7d\x0e\x63\xd6\x7e\x57\xab\x32\x7b\xfc\xf3\xa9\x54\x41\xef\xfc\xcf\xa7\x8b\x0f\x25\x11\x01\x48\x12\xba\x11\x19\xc2\xd2\xcf\xec\x1a\x6c\x1f\x54\x4e\x61\xb0\xba\x13\xc9\x62\xee\x1b\x17\x94\x39\x99\xdb\xb0\x07\x54\xc8\x5e\x74\xfb\x61\xae\x65\x42\xb7\x1a\xab\xb9\xf1\xfd\x8c\x7f\x71\xeb\x5a\x43\xeb\x9a\x0f\xce\xc9\x1a\x7b\x58\xb1\x6d\x59\xe4\x22\x8e\xae\x07\x45\x22\xca\x5b\xeb\x6e\x6a\xd0\xb1\xc5\x16\xf7\xec\x41\x30\xe7\x25\x5c\x39\x7b\x7f\xff\xce\x71\x19\xb5\x46\x4a\x2e\xdb\x97\x35\x9b\x31\x6f\x0a\xe1\x0f\x39\xfe\xe4\x23\x96\x3b\xe6\x0b\xa3\xac\xbd\xfa\x3e\x6b\x0a\x2b\xef\x6f\x5e\x15\x65\x7e\xe4\x02\x28\x8c\xec\xdf\xfc\xae\x96\x35\x9a\x14\x4c\xdf\x5a\x66\x3b\x63\xa9\x72\xea\x6a\xae\x4f\xc4\x55\x9b\x4e\x59\x1b\x54\x61\xb9\x94\xc4\x9a\xf3\xeb\xc0\xfa\x1b\xb4\xfd\x6d\xad\x50\x0d\x3f\xcc\xf1\x1c\xf4\xa7\x15\x64\xc7\xaf\x7e\x59\xd1\x1a\x54\x5d\xa2\x2c\x2e\xa0\x30\x79\xbb\xcb\x22\x0d\x42\x1a\xd9\x20\x9c\xb5\xe1\x4c\xfb\x5c\x20\xb6\xf1\x52\x1a\x92\x3e\x29\xe3\x76\x70\x43\xfe\x7a\xbb\x44\xc7\xd2\xf1\xff\xea\x61\x08\x49\x84\x69\x7d\xca\x09\x04\x53\xe5\x24\x63\xe3\xee\xf7\x4b\xd3\xe2\x35\x1d\x7a\xef\x33\xe6\xca\x99\x7e\xdf\x52\x88\xe9\x9f\xf3\x6d\xbe\x5d\x79\xae\x00\x51\xff\x97\x03\xb4\x9d\xa5\x67\x4f\xcb\x78\xe1\x5a\x15\xac\xe6\x94\x0f\x0c\x6e\x84\x86\x34\xf2\xc7\x53\x18\xa0\x70\x18\x9a\x6b\xaa\xb5\x60\xc7\xef\x0e\xeb\xf2\x43\xf1\x6b\xca\xa5\xf2\x3f\xeb\x91\x2e\xcb\x31\x7a\xf1\xcf\xf7\x4c\x72\x35\x6c\xdf\xe4\xae\x85\xda\x31\x42\xda\xb5\x26\xf7\xa5\x7e\x3c\x56\x6c\xee\x8e\x72\xd6\x43\xff\x1c\x2f\xad\xf0\x4e\xf6\x52\xd9\x56\xb5\x3f\x6c\x4e\x73\xea\xb4\xdd\x4b\xc5\x6c\x93\x5c\x0f\x54\x49\xbc\xfe\x9b\x47\x6e\xc1\x44\x7d\x51\x97\x50\x59\xec\x42\x39\xba\xe6\x9b\xd9\xa7\x65\x94\x93\xc1\x13\xf9\x02\x61\xc6\xd2\xdb\xc5\xd0\xf9\xcc\x07\xd8\xc6\xc9\x93\x17\x6e\x6a\x7a\x5f\x33\xd1\xf4\xa3\x22\xec\xf7\xb3\xff\x1b\x47\xc3\xe6\x6b\x27\x82\xe1\xe4\x22\x16\x21\xf7\x08\xee\xf7\xbc\x0b\x14\x92\xf5\x65\x74\xa8\x27\x6d\x50\xc2\x97\xf6\x72\xee\x29\xdd\x0e\x66\xe7\xef\x88\x31\xe4\x2d\xcb\x23\xe3\x8b\xff\x07\xdb\x06\x93\xde\xd8\x4e\xbe\xc2\x76\xa9\x5a\x78\x85\x5d\xc0\xb3\x08\x15\xf3\x66\x2f\xaf\x84\x29\x17\xb2\xd3\x86\xdd\x3b\xec\xc2\x66\x71\x6c\x88\xba\x79\x83\xc5\xd4\x66\xc5\x78\x3b\x6a\xcf\xa9\x46\x93\x8a\xfd\x62\xf5\x7f\xbf\x4f\xa9\x5d\x6f\x74\x15\x51\xfb\xf3\x8c\x2e\xfc\x1e\xca\x21\xf8\xe8\xd6\x8f\x85\xad\x9d\x38\x66\x3b\xec\xe0\xbd\x9f\x8e\xdb\x86\x56\x72\xbf\x66\x8c\x0d\x67\xe3\x83\x73\x1a\x8e\x98\xb1\x57\x45\xb6\x95\x50\xc4\x6b\x7b\x46\xbf\xba\x17\x3e\x94\x26\xe8\xcb\x59\x62\xcb\xa7\x6a\xbd\xc9\x31\x4d\xbe\x1c\xb3\x45\xa0\xf9\x20\xc1\xec\x8b\xaa\x62\x21\x5b\xfd\x92\xaa\x34\x64\xe9\x93\xea\x81\x46\xef\xa1\x7a\x29\x15\x7e\x77\x51\xc6\x7e\xbb\x7f\xff\x3e\x3c\x54\x6f\x44\x79\x46\xed\x97\x3d\x17\x42\xbb\xc4\x4a\xac\x3a\x21\xc9\xbb\x1a\x82\x22\x13\xb1\x97\xb4\x19\x15\xc3\x75\xd4\x85\x04\xf7\x35\x63\x43\x6c\xa1\xab\x54\x05\x3c\xef\xce\x9d\x44\x67\xe9\xdc\x27\x4b\x81\xfa\x33\xf8\x72\xcf\xe1\x31\xff\xee\xc8\xab\x57\xd6\xbf\xa1\xb7\x6a\xed\x12\xcf\x69\x8c\x67\xab\x3b\x41\xa7\x1d\x23\x5f\xdd\x68\x38\x68\xd2\x6c\xa9\x9b\xcf\x44\xf1\x27\x3a\xb3\xcb\x38\x3d\xc9\xdc\xc5\xf8\x62\x7b\xd1\x9b\x2a\xc0\x78\xee\xdb\x8c\x8a\x3d\xb3\x39\x52\x5f\x7e\x35\xde\xcf\x4e\xb9\x9a\xd3\x49\x98\xf2\x5c\x62\xe1\x13\xc3\x86\x6c\xc4\x5c\xae\x8a\xe1\xcd\xbf\x01\x58\xeb\xa0\x93\x6a\xf8\xf6\x84\x67\x4b\x79\xee\x94\xfa\x6f\x93\xa1\x99\xc2\xfd\x0e\xf9\xa1\x86\x3c\xe5\x62\x87\x95\x74\x52\xcd\xed\xef\x67\x43\xfc\x9d\x15\x30\x2f\xa7\x97\xd8\x86\xea\xd5\xeb\xbc\x9b\x34\x5a\xfb\x23\x64\x11\x72\xb6\x0e\x77\x18\x97\xbc\x5e\x82\x89\xce\xb1\xeb\xfd\x34\xe1\xa8\xbd\xa8\x60\x11\x5f\x18\x3d\xab\xf1\xc5\x29\x2d\x06\xee\x60\x93\xef\x59\x1e\x5b\x61\x7f\x4b\xcb\x2f\x9f\xd4\x3a\x5b\xcf\x8b\x80\x37\x31\x13\x48\x8a\x72\x2d\x9b\xe6\xb8\x1f\x75\x77\xa0\xe3\xb6\xc1\xe4\x2f\xc6\x80\x4b\x45\x93\x25\xfe\x72\xc3\x9a\xec\x5e\xa6\x74\x35\xc0\x9e\x5a\x6f\x71\x43\x20\xfe\xd9\xfd\x53\x4f\x67\x17\xef\x32\xc8\xdb\x6d\x7c\xbe\xcd\xb7\x15\xb5\x37\xb6\x08\xf3\xf0\x6b\xe0\x76\x64\x3f\xf1\x13\xf2\x66\x13\x8b\x9a\x76\x17\xe8\x6c\x14\x6d\x74\x5b\xd1\xc7\xda\x3c\x15\xb8\x77\x78\x7f\xa3\xcf\x45\x45\xa3\x50\x84\xc7\x21\x2e\xd3\x00\x6a\xd6\xa9\xea\x81\xd6\xe5\x96\xf9\x4c\xd0\x9b\xbc\x09\x65\x45\xb6\xe9\x39\x54\x79\xb2\x11\xc6\x74\xc4\xfd\x7e\x85\xc3\xe7\xf7\x77\x48\x74\x33\xca\xb1\xac\x69\xf6\x97\x94\x54\xb1\x25\x98\x27\xf2\x00\x2a\x92\xe2\xbb\x7f\x9a\x44\xae\x16\x8f\x87\xf8\x78\x4a\xe6\xfb\xe3\xcc\xa9\x36\x68\xe2\xe4\xab\x76\x81\x4a\x82\xec\x97\x85\x25\x32\x67\xe2\x28\x68\xa9\xd9\x9e\xff\xb9\x57\x82\xe6\xb2\xc5\x9d\x51\xde\x92\xb8\x9e\xa8\xd9\x9d\x7a\x16\x82\xca\xfd\xc6\xf1\x53\x57\x6b\xc5\x0a\xcd\x75\x8e\x84\x5a\x24\xc1\x7b\xc8\x97\x76\xc6\x26\xf2\xe4\x1f\xa7\x0a\xcc\x73\xc5\x95\xd2\x2c\x39\x85\x7e\x1e\xe1\x9b\x19\xc5\x82\x8a\x88\xfc\xe2\xd3\x57\x42\x79\xdf\xe6\xc5\xbd\x14\xf4\x52\xeb\x01\xa7\xfc\x17\x13\x96\xcb\xae\x5e\x3c\x37\x89\x76\xfd\xcf\x8c\x86\x7b\xf4\xbc\xd9\x5d\x65\x0a\x36\x0d\xcf\x1a\x53\x68\xeb\xeb\x41\xf0\x8f\x73\xc6\x68\xac\xa6\xb7\xfc\xf5\x79\x6e\xcc\xd2\xcb\xed\x4b\xf2\x51\x08\x32\x85\x78\x95\xcf\x6b\xde\x47\x6a\x5c\xc3\x55\xd0\x50\x12\x45\x89\x32\x7b\xc9\xcd\xae\x6a\x8f\xb1\x48\x7f\x7b\x72\xd2\x2d\xee\x1c\x7e\x6c\x74\x2a\x94\xf2\x86\x90\x2c\xc9\x74\xc6\x44\xe1\xca\xd9\x2e\x58\x34\xbb\xf1\xa5\x5e\x49\x2b\x4d\x84\xfb\xa2\xde\x31\x06\x8d\x90\xde\x93\x01\x18\xb3\x35\x4c\x7c\x8d\x00\xa3\x5b\x70\xa6\x23\xfd\xe0\x8a\xcd\x06\x1a\x33\x7e\xd6\x7c\x40\xf0\x9b\x52\x90\x63\x49\xbf\xfe\x13\x5b\xd2\x4f\x9d\xee\x58\xfc\xe2\xd2\xdc\xc5\x2c\xe2\xe5\x5a\x30\xdc\xea\xef\xad\x52\x46\xd8\x9f\xb8\x6b\xf5\xbe\xd0\xfb\x99\xec\x88\x4d\x71\x21\x8d\xcc\x18\xf1\xf8\xfe\xbe\x33\xd4\x6c\xbc\x4f\x1f\x0b\xbe\x9f\x5d\x29\x9f\xad\xc3\x24\x56\xd2\x95\x0a\xe5\x7b\xa0\x37\x66\x6c\xf8\x6b\x53\xee\x4c\x15\x9f\xff\xee\x42\x52\xd7\x42\x68\x8d\x87\x2e\xc9\x57\xd1\x36\x04\x81\x25\xdd\xe5\xbd\xe7\x2b\x2d\xe9\x16\x6e\x5d\x09\x9a\x05\xdf\x4d\xb8\xe0\x31\x49\x4b\x95\x27\x60\xf7\x42\x42\x08\xd1\xf9\x5b\xb6\xfa\x48\x68\x8f\xb3\x34\xd6\x84\xd0\xee\xce\x9f\xc4\x5f\x4d\x09\xf7\xf4\x11\x5b\xe6\x45\x59\xab\x03\x4c\xb3\x9d\x4b\x4a\x60\x86\x11\xc4\xf3\x9d\x0a\x09\xb1\xdc\xac\x82\x3f\x4f\xd6\x19\x9f\xca\x09\x49\x44\xf8\xc1\xcc\x55\xcb\x37\x87\xf7\x22\x23\xb8\x7a\xee\xc6\x2e\xa3\xec\xb5\x44\xfc\x44\xf6\x6f\xe5\x42\xc3\x46\xad\x51\x62\x4f\xe0\x37\x4d\xc0\xd6\x6f\xe9\xaf\x47\x02\xd7\xc9\x15\x34\xb0\xf2\xd5\x58\x54\x45\x26\xda\x02\xd3\xdb\x60\x30\x7a\x3a\x26\xbe\xfe\x58\x15\x9f\xdd\x7a\x8b\xea\xfb\xda\x12\xef\x85\x41\x0e\x3f\x97\x93\xa1\x83\xc4\xbe\xef\x0e\xb2\x63\x8c\xce\xcf\xd0\xe1\x38\x9c\xf2\x27\xd2\x13\x49\x65\xa2\x29\x77\x55\x7e\xf0\xbf\x6c\xf5\xd2\xcd\x31\x6f\xf3\xfe\x77\xd6\xf4\x53\x1e\x4f\xd5\x7c\xe3\x41\xc4\x60\x5a\xd0\x5d\xa2\xbb\xe7\x80\x7b\x4e\x72\x31\x7c\x96\x07\x86\xe4\x66\x6f\x54\x9a\x28\x65\x68\x2a\x84\x62\x53\xa5\xc8\x89\xc6\xfc\x99\x04\xeb\xa7\x7c\xcf\x2a\x9b\x45\x2b\xce\x44\x67\x39\x41\xc9\xe9\xe5\x4b\x94\xf3\xb5\x24\xbd\x8f\x1f\x1f\x03\xc1\xe2\xf6\xcf\xd9\x7c\xfe\x9e\xcd\xe3\x91\x77\x9f\xd4\x91\xb5\x35\x50\x22\xd4\x7f\xc6\xfd\x44\xeb\xf5\xf0\x64\xc2\x3d\x04\xd1\x4e\x0c\xca\x9f\x49\x0d\x56\x8c\x96\x2d\x2f\xc7\x0b\x69\xf6\xad\x2f\xcd\xf6\x23\xe6\x48\xf6\xed\x7f\x72\xbd\xa7\xb6\x3e\xc3\x08\x82\xb0\x1a\x63\xfa\x92\x1c\xb2\x7f\x90\x5b\xbf\x37\x08\xe2\xc5\x62\xd9\xe7\x1f\xf1\x45\xfd\xce\xb2\x8c\xfa\xb1\x0b\xed\x67\x19\xb0\x76\x71\x2b\xde\x44\x83\xf7\x96\x73\x2b\x77\xa3\x03\x05\x19\xea\x6b\x64\xad\x42\x40\xa9\xe7\xa5\x48\x39\x42\xd5\xa8\x18\x3d\x44\xe5\x73\x89\xdf\x86\x01\x44\x51\x06\xcd\x44\x65\x71\x47\xf4\x33\x13\x07\xa2\x93\xd0\xdd\x6a\xc7\x31\x77\x42\x77\x82\xf6\x7d\x25\xdf\xaf\xf8\x37\x96\x8a\x07\x6c\x42\xdc\x97\xb6\x68\x75\x7a\x09\x07\xcf\xc1\x28\x13\x0c\x90\xc6\x63\x2f\xa2\xe9\xce\x17\x5e\x28\x87\xc6\x7b\x7d\x38\x95\xd6\xea\x00\xd7\x4a\x72\x98\xbd\x75\x8e\x40\x4a\x1a\xa2\xe8\xb0\x08\xc7\x71\xb4\xd7\x8c\xca\x2b\x9b\xd9\xd0\xdf\xf7\xd0\xad\x9d\xa0\xb6\xb8\xcf\x45\xd4\xb1\x8d\x42\x59\x35\x2b\x55\xf9\x49\x38\xbf\x73\x72\x71\xc9\x15\xe8\x4f\xc7\x20\xb1\x78\x10\xce\x91\x87\xba\x14\x1c\x88\x18\xb6\x96\xdb\xfe\x5c\x84\xea\xaa\xc4\x4a\xdc\xcf\x70\x64\x89\x52\xb1\xf0\xe5\x91\x26\x60\x4c\x85\x10\x4d\x7f\xde\xb1\x50\x92\x88\x94\x21\x6c\xe9\x79\xf0\x83\xa0\xcc\x3c\xcf\xed\x04\x03\xd1\x29\x4e\x7f\x82\xfe\xbd\xc2\x57\xc7\x41\x20\x14\xcb\xe3\x91\xab\xb9\xfa\x57\xd4\xab\x3f\x6c\xc9\xf2\x3f\xcb\x40\xb8\xc4\x4a\x95\x2b\xdc\x3f\x5b\x1e\xee\x39\x92\xc7\x45\x79\x9c\x8a\xfc\xc3\xff\xcf\xbe\x4c\x47\x94\xd0\xb8\x2f\xff\x29\xb4\x8f\x3c\x17\x3c\x8c\xf3\x45\xa7\xe9\x34\xba\x27\xf9\x7e\xf2\xa6\x4f\xd4\x95\x87\xc8\x0e\x0a\x89\x14\xe4\x0d\x25\x9b\xcb\x71\x3a\xbe\x82\x3f\x34\xf1\xf3\xfa\xf5\x50\xc9\x79\x2d\x73\xdc\x0f\x99\x29\x05\x1b\xa0\x9d\x85\x69\x5c\x20\xa9\x34\x46\x21\x76\x97\xef\x8a\x69\x62\x0d\x27\xd0\x19\xba\x1a\x76\x86\x20\xac\x3d\x52\xb4\x54\x05\xbd\x3e\x4f\xcc\x83\xee\x3e\x5c\x40\xa9\x8d\x95\x29\x0e\x4b\xa2\x7d\x3f\x54\x72\xd5\x49\x6b\x4e\xdd\x46\x8e\x08\xf7\xb7\xc6\x43\x52\xbb\xa9\xe7\xda\xf3\x56\xd3\x4a\x76\x73\x8a\xc1\x03\x2d\x9c\xdf\x78\x7c\xb2\x5f\x7a\x77\xaf\xbd\x61\x60\x96\xc4\xfc\x60\xd1\xe0\xcd\x22\xb0\x2b\xc7\xc8\xea\x15\x52\x55\x2b\xf1\xd2\x01\x42\x82\xdc\xa1\x5b\x87\x1c\x4d\xd4\x58\x1f\x78\x4d\x66\x9f\x32\xed\x57\x38\xc7\x78\xda\x0b\x93\xe5\x9c\x2a\xd0\x23\xa2\xef\x9e\x54\x4e\xdb\xb2\x1b\xba\xf1\x8a\xa3\xf6\x78\xd2\x3b\x06\xf7\xaf\xac\x21\x91\x82\x68\x7a\x25\x3e\x5f\xbf\xfa\x41\xa9\xbb\xd0\x52\x09\xcf\x6e\xdc\x19\x86\x0b\xe3\x87\x02\x0b\x19\xf3\x72\x04\xc9\xc1\xd0\x0a\x68\x4f\x04\x18\xc3\x20\x30\xb8\x54\x4e\xfe\xc8\x7a\x8f\x20\xa2\xf9\x60\x64\x77\x0e\x35\x88\xb1\x2c\x1d\x6b\x1d\xd6\x97\x02\xfe\xc0\x3b\x00\x36\x01\x50\xcf\x49\x12\x69\xbd\x5e\x7a\x89\x4e\x1b\x9e\xa6\x9c\xb3\x4e\x5e\xc7\x23\xdd\x09\xe6\xf7\x6c\x67\x33\xf0\x3e\x3f\xf9\x82\x1d\xee\x16\xfa\xf3\x7c\x53\xa8\x4f\xef\x0a\x10\x88\x9d\x10\x4c\xe0\xfb\x59\x0e\xf2\x85\x4e\xd4\x19\x1b\x30\xde\x9b\x2f\x4a\x49\xbe\xd4\xfd\x96\xad\x6f\x69\x37\xec\xf0\x08\xaf\xf7\xb2\x1f\xdb\x22\x64\xfe\x32\xfa\xe9\x45\xc5\x02\x53\x93\xdb\x8e\x60\x4b\x91\xc2\xac\x9d\xa7\xef\xfb\x2d\xbd\xf4\xf1\xf1\x46\xc7\x83\xc4\x2c\x6f\x5b\x2c\x8c\xf0\x18\xc5\xdd\x6e\x0b\xf6\x60\x13\x8e\x67\x2d\x24\xb6\x15\xa1\x61\xbf\x77\x7e\x03\x21\xf8\x14\xb1\x65\x68\x48\x36\xdf\x86\xaf\x23\xb4\x80\x23\xec\xdd\x68\xd6\x39\x23\xd4\x06\x10\x77\xaa\x34\xb4\x71\x09\x1c\xc1\x09\x02\xd0\x4f\x78\x2c\xd8\x28\x77\xe3\x94\xd3\xcf\x87\xad\x7a\x2c\xf2\x2a\xbf\x48\x8b\x7a\x53\x51\x82\xc4\xcd\x81\xe7\xce\xd1\xb5\xf7\x88\xd9\x5d\x8a\x01\xa0\x01\x28\x4e\x7c\x3e\xbe\x51\x6b\x33\x70\x43\x09\xe6\x53\x69\x16\x95\x7a\xaf\x3d\xae\x1f\x67\xfa\xcb\x6d\xe4\x67\x19\xb6\x6b\xb6\x68\x1b\x6d\xc3\x78\xc5\x19\x33\x5a\x1f\x6b\xba\xe2\xc6\x4b\x22\x40\xd3\x84\xac\x9c\xcc\x13\xbd\xed\xe0\x9e\xf7\x53\x5a\x5d\x65\xc5\x99\xfb\x33\x1a\x9e\x7e\x22\x6f\xde\xc4\x37\x2a\x56\x9e\x91\x70\x00\x7e\x07\x20\xdc\x95\x72\x32\x01\x9c\xfb\x1d\x54\x9f\x34\x4c\x13\x71\xb5\xbe\xcd\xd2\x35\x4d\x9c\x58\x0a\xc3\x90\x46\x78\xf0\xea\x33\xff\xf5\xb0\x10\x23\xf4\x56\x9f\x47\x28\x98\x4b\xfc\xe6\xd9\x9a\x7d\xe6\x80\x7c\x20\xdb\xae\x0f\xd3\x3d\xd8\x13\xbd\x8d\x3f\x77\x11\xa8\xfa\x65\xce\xb9\xee\xdc\x5e\x48\x30\xb0\x9d\x89\x69\xde\x70\xc5\x55\xd6\xd7\x1b\xd3\xec\x1c\x02\x1b\xb0\xbb\x75\x2b\xbd\xa1\x13\x84\x30\xcf\x0c\x71\x4f\x0e\x2d\x6b\xd1\x8e\x3c\xfb\xee\x7b\x45\xc9\x75\xba\x1f\x07\x63\xcb\x60\x74\x9e\xdc\x2f\xfe\xfe\x4e\xd3\x3e\xd3\xc8\x3c\xdd\xfe\x92\x70\xfb\x07\x47\xa5\x6d\x75\x67\x40\xbb\x00\xd9\xe2\x8d\xd9\xa8\xd3\x78\xd0\xb9\x8d\x98\x30\x07\xd8\xa8\xc5\x01\x36\x3c\xee\xf5\xa2\xf8\x82\x14\xb8\x70\x0f\xf2\x9d\xb6\x1b\x4c\xb7\x87\x01\x9f\xaa\x6d\xdf\xb6\x51\xc9\x01\xd3\x87\x3e\x33\x6e\x44\x9d\x9e\x3e\xb8\x53\x82\x9d\x49\xf5\x89\xf3\x7d\x69\x84\x89\x7f\xc5\xe3\xae\x9d\xf5\x20\x26\xd7\xaf\x34\xca\xc1\x65\x87\x5f\xcb\x3c\xc8\x91\xe7\x43\x1c\xdf\xdf\xf9\x2b\xe1\x1b\x40\xc1\x0e\x18\xec\x5c\x62\x9e\xad\x8f\xa7\xf6\x26\x98\x96\x06\x48\x84\xa7\xfd\x48\x3e\x6b\x9c\x69\x99\x15\xf6\x8a\x8c\x3c\xa0\x18\xec\x1f\x76\xe5\x35\x01\x10\x6a\x78\xc3\x78\x2d\x33\x78\x4f\x8c\x3e\x03\x86\xd9\xaf\x4d\x16\xd7\xf9\x61\x2e\x39\xff\x2c\x02\x9e\xb3\xf7\x44\x0a\xb3\x25\x38\xbc\x37\xfd\xaf\x97\x91\xcc\x70\xa8\x3f\xdb\xee\xf2\xc2\xdf\x57\x97\x4e\xd0\x64\x4d\x1c\x1d\x04\xf9\xe0\x32\x04\x26\x38\xbd\xcd\xe3\xd5\x11\xd8\xc7\xc9\x99\xb2\x07\xcc\xb9\xec\xfe\xfc\x34\xef\xf8\xd4\xed\x06\x8d\x0c\xe1\xfe\x30\xa5\x1f\xef\xc4\xf8\x11\xc7\xc7\x37\xc7\x11\x0a\x00\x42\x43\x73\x80\x15\xa1\xbf\x7f\x22\xaf\x9d\x11\xfa\xdc\xf3\xe4\xb1\x87\xa5\x40\xcc\x58\x87\xf3\x42\xb4\xe1\xeb\x14\x61\x63\xe0\xb8\x6f\x39\xe4\x7e\xef\x25\xf7\xd4\x50\x48\xdd\xa8\x3b\x73\xdc\x9c\xb6\x34\x13\x2e\x5a\xb5\x16\x82\x37\xce\x11\x03\x28\xc6\x63\x09\x8d\xb3\x9c\x1b\x5a\xf3\xb4\xcf\x04\x00\x85\x81\x61\x27\x33\x87\x35\x87\x33\x52\x1b\x1d\x8d\xa3\x9b\x91\x52\xf9\xad\x71\xf1\xf2\x09\xf4\xce\xdf\x7b\xdb\x27\xfc\xcc\xc7\x32\x6f\x5b\x23\x62\xdd\x19\x00\x95\x18\xe4\x0a\xc2\x05\xc7\x26\x1c\x1e\xd8\x71\xde\x8f\x48\x18\xa6\xe9\xb9\x8b\x82\xfa\xeb\xa9\xfa\x70\x88\x7d\xc6\x5a\xb4\xb3\x42\xf5\x3a\xc3\x2e\xec\xb2\x83\xcc\x3e\x8a\xaa\x73\xce\x12\xed\xbb\x0e\x99\x3b\xc0\x34\x2a\xe7\x46\x6d\x5f\xe1\x04\x10\x84\x81\xe9\x8a\xf3\x5c\xa7\xea\x8a\x39\x69\x0c\x9c\x29\x97\x9f\xfc\x6c\x69\xfc\x6f\x86\x8a\x5e\x85\xb8\x4a\x0e\xa8\xbe\x07\xcc\x12\xb2\x73\xf3\x5d\xf3\x94\xc8\x89\x95\x21\xe8\x7e\x99\x25\xeb\xd6\x3f\x6b\x4e\x68\x82\x3e\xa2\x27\xa3\x63\x3b\xc5\x63\xe6\x20\xfe\xab\x2f\x2a\xdd\x34\x75\x53\xdf\x24\x3a\x26\x20\x53\x8a\xfc\xc2\x13\x1e\xa9\x83\x69\x29\x60\xc0\x8e\x2c\xfe\xac\x77\x2b\x9f\x27\x14\xc8\x01\x46\x40\x40\x92\xc3\x3c\x9f\x26\x5e\x6a\x7b\x4e\x48\x1f\x7d\x44\xb7\x1a\x6f\x0d\x1b\x25\x13\x5d\x38\x43\x3c\x93\x00\x03\xf4\x0c\x40\x3e\x05\xe1\xec\x79\x99\x5e\xa1\xf9\x4e\xd0\x31\xc1\xf4\xf8\x4d\xe7\x36\x7b\x97\xa3\x2b\x8f\x8c\x07\x36\x55\x3f\x8f\x96\xbe\x18\xef\xe3\xba\x58\x4d\x07\xbf\x5a\x77\xfe\x69\xfa\x02\x81\xb9\x41\x90\x46\xc4\x73\xa0\x75\x5b\xf2\xbb\x8f\x42\xe6\x06\x05\xf6\x35\x26\xe0\x79\x80\x20\xb4\xfa\x6c\xe7\xc7\x29\xde\x09\xa0\x73\xda\x57\xc9\x1f\x68\xd1\x35\x70\xc3\x77\xac\x95\x35\x1f\xac\xab\x9d\xa2\xbe\x99\xe2\x99\x90\x42\x46\xa7\x00\x88\xb3\x1d\xdf\x08\xdc\xa9\x89\x9e\x00\xe0\xc9\x81\xbb\x07\xa0\xa3\x51\xa4\x71\xd2\xe1\xcb\xa4\x08\xae\x9f\x9e\xc2\x0b\xef\xcd\xa3\xac\x6d\xdf\xa4\xd3\xe5\xae\xf0\xf3\xf7\x2e\x79\xcf\x1f\x32\x83\x29\xed\x75\x9c\x8c\xf0\xd8\xf7\xd9\x0c\x29\x7e\x1c\xba\x3d\x34\x00\x10\xf7\x1e\xec\x2f\x0a\x77\x43\x65\xe6\xa5\x7d\x97\xa7\x09\x61\x28\x32\xfe\xf9\x3c\xe9\x66\x21\x17\xaf\x1d\x62\x98\x93\xde\xba\x46\xfc\x1c\xa2\x77\x6d\x92\x88\x6c\xa8\xf2\x3c\x33\x7d\x87\x80\x3c\x13\x41\x25\x2d\xdf\x97\x3c\xec\x07\xf0\x01\x73\x11\xe0\x5e\x09\x72\x66\xfd\x86\x6e\x9d\x09\x41\xe0\x3c\x96\x25\xb6\x31\xe1\xec\xfb\x34\xee\xbb\x05\xda\xfd\xf6\x0d\x3f\xc3\x66\x7a\x6c\x69\x40\x90\xe4\x3a\x11\x26\xac\x28\xb1\x48\xad\xe7\x48\x3a\x02\x60\xc2\x72\x3b\xfd\xde\x6a\x79\x87\x00\x20\xa1\x00\x73\xd3\x18\xbc\x37\x88\x56\xf5\xad\x66\x35\x03\x90\x73\x8c\xad\x8c\x40\x20\xa2\x49\x5d\x13\x67\x5f\xef\xba\x3c\xb8\xc7\x78\x46\x40\x94\xb8\x7d\xdf\x6f\xd5\x4b\x8b\x90\x75\x02\x84\x97\x32\xcc\x8d\xda\x0b\xf1\xaa\x19\xfd\xe7\x1c\x5f\x62\xce\x9d\xe5\xa9\x34\x93\x35\x13\xc4\xe6\x3e\xed\xc7\xfa\xcb\xa5\x78\x29\x5c\xfc\x93\x4a\x7d\x0f\x0b\x82\x3c\x86\x9a\xa4\x14\x4b\xf6\xf9\x0c\x7b\xbb\x2c\xcb\xb6\x01\xf0\x7c\xd2\x34\x7b\xed\xe6\x89\x8e\x72\x6b\x31\xcc\xbd\x67\x10\xa1\xfa\x04\xc8\x80\xfa\xae\xf8\x9d\x7e\x86\xfd\x63\x21\x00\x6a\xb6\x78\x65\x3b\x57\x88\xc9\x5b\xa0\xfd\x88\xd9\xc1\xbc\x03\xc0\xb4\x3b\xb8\x94\xf7\x56\x61\xcb\x26\xd0\x34\x02\x01\x29\x0a\x6e\x84\x05\x8a\xb1\x50\x6f\x71\x17\x22\x1a\x67\x83\x4c\x22\x76\x6d\x17\x69\xee\xfc\xfe\x6a\xcb\x03\xe5\x23\x36\xb3\x5d\xe6\x69\x12\xa5\x73\xf5\x67\xc5\xb4\x24\x11\xd1\xa7\x22\xb9\x1a\x66\x00\x8c\xd9\x07\x7b\xbc\x83\x85\x6e\x84\x6f\xb7\xb8\xdb\xce\xeb\x77\x9e\x01\xf6\xc1\xc9\x58\xb6\xbe\x7b\x04\x45\xbd\x12\xbd\x60\xb8\x8e\xf9\xe0\x65\x84\x70\x53\xdd\x81\xcb\x80\x24\x0d\xa0\x98\x35\xa4\x08\x00\x50\xdf\xf7\x2d\xd3\xca\xf6\x05\x82\x1d\x10\x00\xc0\x1d\x80\xd1\x07\x4c\x48\x02\x5e\x34\xe0\xb4\xe2\xe4\x3d\x4b\x95\x29\xa1\x43\x3c\x13\x4b\x5a\x3d\x31\xf5\x77\xe6\xa8\xc8\x8a\x3b\x61\xb9\x8b\x69\xdf\x20\x9d\xeb\xde\xef\x8b\xb2\x61\xbb\x60\xa8\x65\x22\x47\x38\x68\x10\x3d\x49\x9a\xeb\xb6\x6b\x9f\xe4\x0d\xe7\x8c\xd8\xe9\x94\x21\x88\xfd\x66\x00\x1b\x06\x39\x17\x04\xc3\x73\x67\x6c\x8a\x72\xbe\x95\x13\xcd\xfb\x0c\x84\xb3\x0a\x71\x26\x1d\xd7\x6a\x7a\x13\x9a\x9d\x02\x84\xe8\x73\xc0\x4e\x00\x5c\xed\x45\x94\x96\x29\x6d\x3b\x01\x19\x02\x90\xa8\x93\x6e\x5b\xee\x78\xce\x71\x90\x9f\xc7\x10\x6e\x7e\xcb\x52\xc1\xdc\xae\xb1\x87\xcd\xc2\xf7\xd0\xf2\x66\xf6\x41\x23\xbb\xff\x40\x24\xe5\x49\xfb\x28\xd1\x15\x14\xfd\x38\x4f\x72\xfe\x04\x7d\xc4\xcc\x51\x77\xa2\xa6\xe5\x26\x76\x91\xf2\x34\x93\x27\x7a\xd7\xb6\x27\xc9\x64\xdd\x79\x25\xad\x72\xa0\xf9\x0c\x41\x0d\xe8\xe6\x06\x08\xcd\x00\xaa\x63\x26\x5a\x9f\xf5\x24\xc3\xb5\x3a\xb8\x21\xc3\x36\x7d\xd9\x21\x4c\x13\x7a\xcf\x82\xd5\x24\x05\xab\x3a\x39\x18\x66\x33\xad\x60\x0e\xa6\x37\xc3\x0c\xb5\x10\x34\x99\x2e\x3c\x79\xc0\x43\x45\x6e\xad\x1d\x20\xd1\x0e\xb6\x5d\xd0\x6b\x3b\xbe\x35\x31\xa7\x4e\xdb\x30\xb8\x82\xd4\xf9\x5e\x5d\x26\xe8\xd2\x18\x85\xc6\x84\x47\xfa\xd7\x79\xaa\xbf\x5a\x06\xe5\x45\x46\xdd\x38\x71\x25\x8a\x04\x6b\xcd\x91\xfb\x20\x3c\xd7\x34\x05\x4f\x4a\x43\xd9\xd3\xea\x99\x89\xde\xea\x05\x61\x37\xde\x6b\xd7\x08\x21\x56\x3a\xec\xcc\xe6\x69\x22\x25\xbe\xdd\x00\x76\x09\x60\x81\x0d\x20\x49\x4d\x58\x74\xb9\x92\xba\xf2\x14\x44\xea\xda\x46\x4f\xcd\x0e\x73\x1c\xb9\x36\xd7\xea\x9c\xab\xf3\x9c\x54\xf0\x82\x70\x51\xb1\x6e\x03\xf2\x34\xd1\x34\x60\x83\xe8\xa7\x14\x20\x7b\x4d\x9e\x0e\x50\x3c\x6c\x0d\x76\x80\xb8\x39\x50\x3b\x7f\xef\xec\xe6\xc4\xc0\xa5\xdd\x05\x0a\x0d\xdb\xde\x6f\x7b\x49\xf8\xdd\x9b\x15\x66\x41\x9b\x35\x20\xf1\x27\xde\xb4\x4c\x94\x62\xef\x7f\x7c\x5a\x44\x32\xc9\x94\x66\xcf\x73\x78\x29\x1c\xe7\xc7\xca\x46\x16\x10\xb2\x0f\xbd\x9b\x28\x79\xa3\xaf\x9f\x04\x69\x3f\x80\xcd\x22\xb9\x51\x1d\xe2\x5b\x95\x9f\x0f\x31\x66\x98\xfb\xbe\xe7\x7d\x4f\xe6\x3d\x1d\xa7\xbd\x27\x2d\x4d\x10\xfa\xfa\x18\x7c\x89\xcc\xb1\x6e\xc5\x6a\x40\x17\x17\x74\x5c\xd7\x0b\xe3\xed\xdb\xdb\x14\xc3\xa2\x5f\xb0\x67\x84\x60\x8f\xfd\x97\x56\x92\xea\xbb\xcd\x1b\x63\x80\x6e\x24\x98\x1e\x29\xcb\x5d\x4d\xad\x75\x60\x0a\xe5\x02\xfe\xbe\x19\x3e\x4e\x13\x54\xcd\xc9\xc1\x7c\x8d\x04\x60\x4b\x82\x06\xfd\xe1\xe8\x61\xf3\xeb\xbd\x72\x1c\x3d\x0a\x90\x9d\x3f\x49\x36\xd1\x74\x78\xef\x00\x22\x79\x6c\x9b\x52\x4b\x79\x04\xce\xb8\x37\x85\xe8\x86\x91\xc7\xd1\x44\xb3\xae\x6a\x9b\xc9\xe6\x2b\x6d\x79\x53\xec\x79\xe1\x04\x31\x4d\xd3\x54\xb6\x73\xf7\x3e\xba\x1d\x08\x5a\xcc\x12\xea\x2d\xc5\x12\x42\x6f\xd8\x98\x39\x77\x0f\xa8\x7c\x1c\x56\x94\x22\x2f\xad\xae\x09\x9c\x96\x75\x90\x19\xf3\x1f\xb7\xeb\x60\xc0\x9e\x4e\x68\xcd\x33\xd3\xd2\x24\x85\x11\xfe\x7c\x6e\xbd\xdb\xed\x3c\xf3\x53\x5a\xee\xde\x3e\x47\x7c\x33\xd2\x2c\xcd\x9c\x55\x83\xe0\x8d\x0e\x81\xf5\xab\x4b\xe5\xb8\xd7\xa5\xd6\xc3\xca\x30\x37\xe1\x1d\x78\xc4\xde\x65\x34\x4d\xf3\x0f\x6e\xbd\xd9\xee\x8a\x35\x5a\xbe\x93\x5b\x35\x8c\x5d\x55\xa9\x2d\xcb\x11\xa1\x38\x02\x9d\x07\xb9\x94\xb9\x58\x4a\x80\x2f\xfa\x83\x33\x99\x38\x23\x56\x26\x0a\x10\x82\xa2\x28\x38\x37\xab\xc7\x64\x2e\xe6\xee\x4b\x5d\x40\x18\x68\xc4\xe7\x43\xad\x04\x90\x5a\x95\x5a\x5d\xdf\xef\x7b\xbc\xf3\x32\x3c\x86\xeb\x46\xbd\x34\x57\x67\x16\x3a\x66\xee\x71\x5c\xd7\x6d\xbb\xb1\x2f\x6f\x3f\x77\x98\x28\x25\x2b\x40\x48\x52\xfb\x99\x69\x57\x70\xdc\x4d\x52\xd7\x00\xa0\x23\x08\xee\x54\xf7\x84\xb8\x78\xfc\x72\xf4\xf7\x07\x81\x9b\x69\xeb\x44\xbe\xa9\xdb\x79\x60\x0c\xb1\xe3\x05\xe1\xef\x11\xfb\xa6\xeb\x80\x35\x8c\x5b\xbe\x2e\xb8\x13\xb1\xd4\x32\x14\x0d\x2f\xcf\xba\x58\x8d\x7f\x48\x82\x8b\xa6\x9f\x93\xc2\x42\xd7\xdf\xf3\x7c\xc2\xbc\xd5\x6b\x51\xee\xf3\x2c\x02\x61\x07\x09\x30\xae\x8a\x38\x55\x6d\x5b\x01\x90\x04\x80\x3f\x79\x7b\x2f\xfb\x97\x1a\xd5\x31\x73\x22\xdb\xba\xef\x06\xc3\xaf\x1d\x11\x87\x65\xd6\x8b\x0c\x52\xc9\xfc\xee\xcf\x33\xc1\x5e\xd5\x9e\xa5\x10\xc3\x2c\x2a\xd9\x94\x40\x08\x73\xa4\x32\x4d\x62\x53\xb3\xe2\xba\x4e\x4f\xa6\x40\x1c\xc6\x33\x53\xb2\x46\xb0\x44\x56\x61\xfc\x6a\xc5\x45\xed\x33\x40\x7d\x07\x30\xc0\x02\x85\x8f\x55\xd2\x33\x43\xb0\x89\xbe\xd1\xa6\xb6\xef\xa2\xd7\x33\xb8\x05\x12\xec\x38\x93\xee\x00\x9b\x19\x88\xfc\xa9\x95\x03\xda\xe3\x81\xc3\x0d\x59\xbd\xc7\x18\xa4\xa2\xd1\x8d\x60\xb4\x79\xfe\x6e\x9a\xa1\xe7\xb6\x96\x6b\x8f\xce\xd1\xb1\x08\xb1\xe7\x1d\x44\x53\xf9\x50\x09\x5c\x9c\xdd\xeb\x06\xb8\x86\x11\x6d\x72\x19\x03\xb2\xd3\x4d\xa2\x0b\x10\x61\xd2\x8d\x00\x39\x18\xd4\x3f\x5e\x6b\x03\xe8\x1a\xef\x45\x13\x1b\xbd\xcd\x09\x4b\x44\x6b\xb4\xfa\x33\xc3\x0c\xc3\xb0\x25\x4a\x9b\x4b\x1f\x7d\xfe\x92\x89\x6a\x80\xcd\x2c\x8f\x83\xf2\x3d\x6c\x67\x4a\x0c\xb0\xfb\xbb\x8b\x91\xef\x73\xff\xed\x47\x71\x9c\x0e\xeb\x94\x30\x0c\x1f\x55\x8e\x83\xf1\xdf\x1a\x04\x19\x00\xc4\x86\x44\xdb\xb2\xcb\xcf\xe4\x56\x6f\xe9\x60\x13\x4f\x3a\x12\xe1\xd9\x64\xbc\xb9\xc4\xdc\x37\xe0\x54\x46\xbd\x45\x9b\x6b\xdc\xef\xd5\x39\x45\x07\xbd\xd3\x73\x5d\x57\x43\x91\x43\x2f\x59\xd0\xb3\x60\x87\xae\xeb\xba\x7f\x2c\xde\xbe\x04\x76\x5b\xe8\x6a\x51\xc1\x22\xa7\xee\xcd\x88\xae\xa6\x30\x0c\x8d\x6f\x6b\x8b\x9c\x11\x93\x20\x9a\x80\x4d\x67\xe6\x87\x22\xf0\xea\xaf\xa5\xfb\x42\x15\x7e\x60\xce\xb0\xf5\x71\x69\xdc\x17\x55\xed\xf7\xa9\xdf\xea\xcd\x1f\x47\xf7\x3e\x9a\x66\x69\x1b\x79\xf4\xac\xd0\x6d\x5d\x37\xc1\xbb\x2c\x46\x91\x1e\xe6\x33\x0b\xf6\x99\x47\xe6\x99\x46\x26\x5f\xab\xa8\x35\xc2\x91\x7d\xa2\x97\xf9\x8a\xa5\xb7\x38\x0a\x00\xac\x11\x71\x20\xbc\x18\x38\x39\x4b\x24\x10\xd9\x66\xaf\x2d\x33\x47\x95\x02\xed\x9f\xe6\xd5\xb3\x10\x3e\x84\xbf\x83\x7e\x65\x48\x5a\xad\x9f\x25\x7e\xca\x7b\x9e\x41\x57\xd3\x20\xf5\xa0\x92\xf5\x4d\xd0\xad\x0d\xe1\xc9\xf8\x77\xdb\x0b\x76\x14\x33\x5f\x04\x41\x8c\x46\xde\x29\xc4\x1e\x9c\x6b\x9b\x1e\x80\xa5\xe4\x1f\xfc\x45\x46\xe9\x62\xff\xd4\xd7\x34\xcf\x52\x5e\x1e\xa0\x15\x4c\xce\xff\x82\xe2\xc1\x71\x96\x24\xa1\xe7\x91\x80\x40\x71\x22\x08\xf7\x21\xa2\x11\x80\xa6\x7b\x6c\x22\xbe\xf2\xb5\x8f\x2c\x47\xee\x53\x62\x47\x56\xc4\x1f\x55\xe3\xd7\x0b\x6b\x25\x6a\x76\x3e\xb6\xaa\xdb\x85\x7b\xc4\xf8\x72\xd3\xf0\x0d\x10\xb8\x87\x23\x25\xf3\x2a\xce\xdf\xfd\x91\xb7\xba\xfa\xf1\xd8\x00\xcf\xf3\xf9\xbe\x7f\x9f\x14\x43\x4e\xde\x47\xbb\xb1\xcf\xdb\xbe\x19\x0c\xea\xe7\xd2\x70\x53\x62\xac\x58\x7e\x97\x58\x77\x2d\x39\x89\x18\xd5\x7d\xdf\xd8\x4e\x3d\x3f\xfb\xbe\xf5\xfc\xbb\x2f\x1e\x8a\xf0\xd0\x7f\x63\x58\xfb\x6c\xbb\xfe\xa1\x7b\x16\xf6\xb8\xce\xbf\x8e\x8c\xab\x6d\x70\x39\x60\x92\x5b\x26\x41\x53\x1d\x04\x39\x2d\xbf\xf5\xe4\x8e\x00\x30\x14\xa9\x61\x08\x67\x08\xbe\x6d\xc3\x76\x82\xd0\x93\x68\xeb\x0c\x71\xba\x58\x1e\xb5\x85\x8a\x82\xb5\x02\xe5\x7e\xe8\xdf\xd9\xe7\x3b\xb6\x35\x1d\xe5\x18\x06\xa4\xd7\x4b\xcd\x74\x7f\x5e\x28\x37\xcd\xc6\x5c\x6e\xa7\x1e\x16\xa4\x94\x1b\xc6\x7b\x49\xef\xa5\xec\xa0\x93\x64\xd9\x75\x04\x21\x97\xc2\x28\x95\x30\x10\x25\x00\x01\x7d\x80\x28\x65\x69\x62\x5f\x87\x0a\x48\x23\x0f\x38\x79\xb8\xac\x5f\xec\x5a\x4d\x2f\x66\x02\x2a\xfc\xd0\x28\xc6\x0e\xc5\x1e\xfb\x0c\x45\x6b\x9f\xda\x55\x18\xb4\x54\x8d\xb2\xbf\x6f\xd4\x7a\x85\xdc\x89\x10\xf9\x7b\x68\xdf\x02\x71\x0f\xa4\xff\xa5\x6d\xab\x95\x6f\x2a\x5a\xbd\x03\xa0\x02\x67\x5f\x95\x96\xee\x9b\x4c\x4f\xd3\x30\xff\xff\xef\xdf\x0c\x1c\xc7\x19\x2c\x8b\x31\x5e\xd8\xf9\x53\xa2\xe8\xa0\x1b\xa7\x8a\x9d\x9c\xa6\x61\x1b\x16\xa8\x65\x3d\xb8\x2f\xd1\x99\xef\x94\xd4\xb9\xaf\x42\x30\x14\x27\x38\x83\x6a\xa5\x4c\x46\x41\x10\x84\xba\x69\xe2\xe7\x14\xf5\xf7\xee\xba\xa9\x95\x85\xdf\xe9\x5f\xbf\x50\x21\x60\xf2\x80\x61\xf6\xaa\x90\xde\x1d\x2f\xd8\x6f\xaf\x82\x00\x90\x47\x4a\xb9\x5d\x32\x66\xd9\x3c\xfa\xc0\x4a\xf5\xc6\x49\x89\xa5\xd7\xfd\x78\x23\x62\x0d\x93\x05\x67\xe6\x4f\xd5\x7e\x7f\x0f\x16\x34\x0b\x1a\xc1\xfb\xfb\xb1\x53\x15\x41\xb6\x37\x53\x2b\xe7\xf7\x71\x04\xbf\x18\xf0\xa3\xe2\x57\x5d\xd0\x9c\x00\x0a\x5c\xb5\x7b\x8a\x7c\xab\x46\x78\x8b\x61\xd5\xb2\x2f\xbf\xef\xb1\xd5\xc5\x19\x88\xbd\x84\x17\xb1\x77\x78\x96\x18\x42\x3d\xbe\x05\x67\x7c\x48\x4f\x99\x02\x6c\x62\x9c\x9d\x74\x8a\x07\xa4\x51\x12\x25\xfe\xfc\x63\x85\xfe\x72\x54\xe3\xd7\xcf\xf3\x24\xfc\x78\xfe\x63\x3f\xef\x04\xd9\x12\x81\x1d\x7b\x43\x00\x26\x6b\xa3\x4f\x3b\xc7\x8c\x78\x37\x18\x6e\x3c\xc3\x6c\x38\x88\x83\x85\x7b\x61\x3f\x48\x9d\xc3\x54\xc1\x71\xa6\xde\x79\x1c\x2c\xa2\x71\x0e\x8c\x33\xf3\xb9\x31\x1b\xad\xb4\x10\xf0\x89\x2b\x44\x96\xfa\xcb\x79\x1b\x5a\x1c\x52\x88\x50\x8e\xeb\xba\xe2\x01\x68\xac\xdd\x99\xea\x13\x6a\x73\x9c\x11\x0c\xd9\xe5\x20\x38\xe8\xfb\xa3\x4e\x6d\xce\x9c\x8f\x1b\x09\x47\xa3\x43\x22\xcf\xbb\xf1\x24\xc9\x72\x5a\x02\x81\xca\x89\x05\x31\x41\x62\x80\x2c\xf2\x56\x1d\xe1\xf2\x7e\xb5\xf6\x0e\x6f\x61\xab\xed\x10\x04\x61\xd5\xc7\x37\x85\x30\xf6\x7d\x9f\xd8\x61\x1a\x8c\xbb\xb9\xa3\xdc\xe3\x86\x08\xfb\xca\x47\xed\x8d\x5d\x49\xf9\x92\x4c\x9d\x81\xa4\x8f\xdf\x0b\xa3\x3b\x47\x6f\xa2\x7e\x0e\xc6\x9b\xa0\x10\x56\x7b\x96\xe2\x01\xa7\xd5\x1d\x88\x7c\xb3\x39\x7b\xca\x6c\x64\x32\x9a\xdf\x59\xad\x38\xef\xec\xee\xf9\xb3\x7b\x9d\x84\xfe\xe2\x8c\x1d\xbc\xaa\x1f\x28\x85\xf6\xe7\xed\xb9\x64\xa8\x9b\x02\xbc\x91\xaf\xfd\x0a\x46\x33\xa2\xba\x90\x34\x08\x60\xbc\x10\xe7\x55\xbe\x65\xe7\x7c\x63\xc0\xeb\xfb\x9d\xf9\x10\x73\x8e\xf3\xfe\x9d\x44\x34\x42\x6d\x13\xbe\xb6\xf9\x54\x0b\xbf\x3d\xd5\xd7\x9b\x95\xde\x06\x81\x7d\x5a\xe4\x63\x14\xf9\x2e\x28\x21\x11\xef\xe4\x97\x37\xc6\xfa\x8d\x18\x3b\xc3\x21\x08\x6d\x9c\x88\xee\x04\x1d\xdf\x8d\x53\x08\x32\x0d\x40\xd4\xab\x1c\x67\xac\x3f\x09\x8b\x80\x85\x37\x7a\x65\x50\x0d\x7c\x15\xb2\x7d\xe3\x69\x88\x7d\xd8\xf6\xd1\xff\xee\x35\xbd\xf9\x03\xc7\xaf\x04\xc9\xe7\xac\x8b\xbe\x41\x0e\xe9\x04\x64\x50\x21\x91\x47\xa3\xa8\x9c\x78\xb1\x83\xc6\xe4\x68\x85\x77\x74\x76\xb2\xf9\x3b\x27\x8c\x9d\x4b\xb8\x52\x95\xbf\x56\x4f\x83\xe1\xbe\x6f\x86\x38\xc7\x68\x7d\x96\xc0\x67\x80\x91\xe6\x69\xca\x5b\xa1\x1c\x3c\xc6\xaf\xf4\x9b\x8b\x5e\xe5\x14\x75\x03\x02\xd5\xb7\xc1\x0b\x20\x91\x28\x48\x67\xcb\xee\xcd\xa9\x99\x87\x0f\x9d\x2b\x40\xce\xb2\x0a\x25\x96\xc2\x8b\x7d\xa8\x31\x7f\xce\x10\xdc\x0c\x81\x8a\x65\x16\xce\xdb\x6a\x18\xbb\x8f\xb6\x13\x11\x73\x1c\xbd\x0d\x55\x91\x23\xb0\x4c\xb9\x21\xe3\x64\x87\xa3\x1a\x05\x5c\x6d\x04\xbf\xcf\x7f\x9e\x47\xd4\xc7\x29\x3a\x75\xc9\x14\x00\x28\x54\x41\x30\x7c\x86\x2d\x5f\xa6\x34\x5b\xe8\x6a\x8b\xfc\x03\x69\xe8\x74\xdb\xa0\x60\xa0\x64\xf3\x1e\xad\xca\xe1\xe5\xd3\x53\xc1\xf3\x8b\x61\x69\xef\xd0\x96\x63\x7c\x9a\x05\x36\x7d\xbe\x4f\xb7\xe1\xef\xb9\xba\x33\xdf\x94\xb8\x8f\xc7\x9f\x9f\x1f\xcf\xab\x98\xff\x6a\x2f\x1e\xca\x93\x58\x99\x1c\xe5\x1e\x98\x66\xa3\x47\x90\xaf\x4c\xb6\x2b\xd4\x47\x4c\x15\x8b\x40\xb1\xf2\x21\x8b\x89\x2e\x00\x46\xfa\x1e\x48\xef\x54\xad\xec\xb8\x95\x64\xea\xd2\xe3\x66\x05\x7c\x6d\x23\xe3\x44\x55\xec\x94\x3e\xf2\x68\x52\x08\xa5\xf1\x59\x41\x66\xaa\x01\x2c\xd7\xc5\x4e\xe4\xb6\x5d\x5f\x3d\x9c\x41\xfb\xc1\xe8\xd8\xf7\x1f\xcf\xbc\x97\x93\x71\x37\xd2\x67\x00\xb9\x59\xbb\x19\x0d\x5e\xf0\x67\x9e\xcd\x10\x4d\x30\x68\x79\x92\x67\xb5\x28\x3a\x22\x5e\x01\xb8\xba\x2c\x17\x95\x32\x32\xc6\xa9\x25\xe2\xf4\x16\xdb\xee\xf3\xfc\xc9\x9d\xc8\x09\xc9\x84\x2f\x07\xdc\x28\xb9\x95\xc9\x88\x3c\x1f\x41\x1e\x4c\x4c\x71\x95\xdc\x6f\x3d\x7a\x7e\x34\xfb\x7d\xb2\x90\x80\xc8\x5b\xdc\x3f\x35\x07\x50\xb4\x72\x95\x7c\xcb\x6c\x81\xc8\x28\x5f\x3d\x5f\xef\x04\x85\xbd\xc0\xcd\xeb\x86\x3b\xc1\xfc\x91\x9a\xdd\x46\x70\x1c\x81\x8c\x26\x29\xb2\xe3\x9e\x75\x80\x1b\x04\x00\xf5\x13\x0f\xbb\x6a\x7d\xf8\xfb\x34\x5d\xec\xae\xe7\xc8\x8d\x33\x44\x42\x2e\xc3\xf3\xda\x8b\xec\x77\xde\x75\xc2\x3c\xc6\x88\x3c\xab\x05\x3a\xb7\xa3\x4c\x35\x7c\x57\x28\x01\x62\x0b\x06\x16\xc6\x1e\xbb\x2c\x9f\xb7\xf3\xb7\xa6\xea\xc2\xdd\xca\xf2\xf7\xbc\xcf\xcc\x9f\x63\xa6\xa7\xa4\x69\x77\xad\xaa\x93\xc9\xb5\x3e\x49\x92\x65\x91\xb6\x60\xfd\xe1\xdb\x21\x17\xed\xd3\x84\x0e\x20\xb3\x3d\xbf\xe2\x33\xfc\x0a\xcd\xaf\x67\x7f\xc5\xef\xbb\x07\x96\x2c\x47\xe4\xde\xcc\x7b\xd7\x75\x7d\x97\x23\xc0\x50\xd0\xab\x3f\xe3\xa2\xc7\x71\xc1\x4f\x7c\x15\xdf\x03\x1a\xf6\xfb\xb4\x5e\x83\x8e\xad\x9c\x9d\xff\xe0\x82\x15\xae\x9a\x1c\x5c\x0d\x0d\x86\x79\x66\x18\x1c\xdd\x22\xbf\x22\xbc\x12\x41\xae\x1e\xd0\x80\x3f\x27\x57\x7d\xaa\x83\xf7\xeb\x29\xc4\x17\xfb\x0e\x94\x69\x2c\xd3\x5c\x5e\x63\xf3\x07\xab\x82\x04\xfd\x49\x51\x8a\x5a\x6a\x65\x2d\xd1\x59\x81\x02\x96\xd1\x73\x29\x54\xc4\x62\x46\xa2\x4b\xf2\xa8\x64\x9f\x11\x87\xae\x55\x37\x89\x21\x3d\x0b\xfb\xe9\xaf\xa0\x66\xfd\x9b\x0d\x88\x04\x53\x2a\x67\xd5\x9c\xe2\xe7\x9d\xb0\x55\xc8\x66\x7a\x7f\x35\xf2\x3b\x23\xbc\x09\x23\x08\xc6\x3d\xa9\x60\x70\xed\x87\x2d\xf3\x7f\x7b\x68\xef\x27\x6a\x10\xe0\x9a\x4c\x94\x49\x4f\x7f\xa6\x55\x4c\xb3\xfd\x74\x03\x62\x6d\x50\xb8\x5a\x7d\x5a\xa7\x6b\x88\x1e\x29\x1d\x2c\x5e\x3b\x72\x66\xd4\x15\xa1\x77\xc5\xdf\xba\x49\xb0\xe8\x50\x36\x60\x7b\x86\x09\xf8\x92\x43\xec\x2f\xd5\x9d\x27\xbb\xa6\xd1\x17\xc2\xb4\xef\x04\x53\xe6\xb8\x6c\x42\x5e\x10\x88\x16\x4d\x7a\x79\xf2\x1e\x3a\xf9\x8b\xc9\x4d\xfd\xa9\xfe\xcb\xe9\xbe\x7d\xc4\xcc\xdc\x0d\x2e\x5a\xe4\xb3\x82\x75\xa2\x42\xc3\x36\x00\x1f\x02\xc1\xc0\x89\x72\xe6\x2e\x18\x79\xb5\x0e\x58\x80\xac\x75\x2d\x5e\x7a\xfd\xf4\x41\xcb\x37\xe0\xe0\x68\xc8\xdc\xd5\xe4\x7e\xf7\xf1\xcc\xa6\x79\xca\x68\xb8\xaa\x7a\x18\xf4\xc9\x42\x1b\xfd\x2d\x94\x48\x85\x2d\x7d\xed\xaf\x5c\x86\xc7\x1a\x81\x92\xc3\x16\xda\x4a\x18\x04\x44\x9d\xaf\x69\x9e\x24\x06\xca\x53\x85\xfc\xdd\x81\xec\xce\xd8\xe3\xaf\x2e\x4e\x94\xde\x6d\xb4\xcf\x17\xb3\x13\xb0\xc8\xfa\x89\x96\x1f\x9a\x38\x42\x15\x1a\xdf\x3f\xfe\x02\x11\xde\x0e\xae\xe9\x7d\xf8\x57\x53\xfb\xd0\x4e\x09\x46\xb8\xc9\xc6\xc9\x5e\x47\xfa\xac\x76\x81\x6c\xa2\x5c\xa5\x83\x66\x67\x51\x3a\xd8\x5b\xd5\xbf\x2e\x95\x9c\xc3\x4e\xd3\x83\x83\x24\x74\xdb\xc3\x20\x8d\x41\x2d\xf5\xdd\x36\xf4\xdc\x5e\xf6\x3e\x9f\xef\xc1\xe7\x40\x39\xc4\x7b\x25\x98\x6a\xae\xc6\xd5\x57\x94\x99\x68\xb0\x76\xdd\xc2\x8a\x7c\xbc\xc0\xfe\x8f\xc6\xf7\xd9\x35\xce\xd1\x31\x4c\x4f\xec\x83\xf0\x3c\x2c\x59\x1f\x22\xe5\x95\xb3\x89\x60\xc0\xda\xc3\x96\x11\xfd\xa8\xf6\xc2\xe8\xb7\xd1\x13\x33\x8d\x2b\x42\x5d\x11\x83\xb3\x5e\x04\x12\xde\x39\xeb\xb4\x86\x35\x4e\x94\x7b\x43\xce\xea\x91\x8b\x44\xf7\xba\xf6\x67\x65\x23\xc7\x75\xdf\xd6\xcf\xb6\xfe\x1f\x55\xd7\xb5\xeb\x36\xcf\x6c\x1f\x48\x17\xea\xed\x52\xbd\xf7\xae\x3b\x75\xc9\xea\xcd\x2a\x4f\x7f\x90\x7c\xc9\xce\x7f\x0c\x18\x89\x01\x6f\x52\x34\x87\x53\xd6\x0c\xd7\xa0\x16\x21\xee\x7e\xf5\x8d\x40\x9a\x3a\x86\x29\x21\x45\x4c\x28\xaa\x2b\x3b\x63\xed\x04\xc0\xcc\xb3\xa9\x22\x61\xc8\x7f\xf7\x6e\x25\xed\x46\x18\xf0\xfc\x46\x74\x9f\x6a\x1e\x76\x91\x1f\xcc\x76\xeb\xf2\x97\x1c\xfc\xd2\xc5\x52\xd8\x5f\x71\x59\xa1\x79\x4c\xbf\x10\xb6\x7f\xc5\x6a\x71\x27\xb6\x42\xe6\xec\x98\x1c\xb4\xb2\xab\xe2\x0c\x9f\x70\x03\xbf\x78\x79\x8e\xc9\x15\x2c\xf4\xa9\xde\x80\x09\x80\x00\x82\x6e\x61\x07\x4c\xb6\x81\xc8\xb4\xed\x43\x10\x05\xd0\xb5\x7f\x0b\x4f\xa5\x5b\xd8\x7b\x6e\x2b\x6e\xd7\xf6\x7d\xae\x74\xac\x7d\x18\xf6\xfd\xf1\x41\xa5\xc4\xfb\x04\xd9\x2f\xcb\x61\xdf\x54\x2e\x75\x9f\xdd\x38\xe8\x03\x06\x08\x04\xdd\x86\x45\x74\x41\x4d\x94\x58\x28\x07\xc0\xc9\x9e\x80\xb9\x96\x78\x7e\xef\xd4\xb5\x00\x0f\xc5\xfc\x82\x7e\x82\x3d\xd9\x81\x98\x99\xff\xa0\xc4\x0d\xbe\xa8\x28\x17\x96\x86\x97\xe8\xf7\x0b\x42\xc8\x87\xaa\x79\x2e\x7d\xb6\x9c\xcf\xc8\xbc\xfe\x22\xbe\x47\x9f\xfe\x1a\x7a\xb7\x0b\xab\x38\xa9\x0e\x13\xf8\x0a\x37\xe2\xa6\xe9\xa0\x31\x06\xf5\xc3\x81\x38\x0a\x17\x21\x7f\x41\x80\xaf\x9e\x62\x54\xb1\x1d\xe8\xb7\x3c\x1a\x8e\xe3\xfc\x66\x11\x72\x2c\xa9\x3a\x66\x65\xa2\xdb\xb9\x87\x2f\x75\x8d\xea\xc2\x67\x7c\x03\xe2\xb3\x75\x3d\x50\x39\xa5\x08\x40\x50\x42\xcb\x9d\x56\x87\xdb\x88\x10\xb6\xba\x45\xdd\x07\xf2\xfd\x43\x3a\xbf\x67\xbd\x69\xda\x43\x4c\x1c\x08\x52\x93\xff\xa6\x00\x75\x4c\x53\x0b\x1d\x74\x07\xd1\x1d\x15\x7b\xe8\x91\x23\x6b\x09\xe2\xac\x6c\xc8\x93\xf2\x2f\x2e\xb2\x57\xd1\x21\x54\x9c\x24\xef\xb3\x1e\xb9\x0f\x40\xd2\xd1\x2f\xc7\x73\x20\x08\xb2\x2e\x54\xd4\x27\x22\xb7\xd2\x3b\x2e\x7f\x2b\x32\x39\xab\x73\x78\xd3\xf3\x03\x85\x9d\x9b\xf6\x1b\x38\x03\x67\xb5\x81\xb2\x31\xae\x38\xf4\xde\x74\x41\x01\xeb\x9d\x4f\xb2\x14\xc0\x72\xfb\x05\xa2\x71\x84\xb3\x7d\x36\x51\xed\xc4\xbb\x6c\x9a\xc8\xf3\xfc\xed\x67\xce\xfb\x35\xf2\xbf\xce\x0c\x48\x17\x83\xbd\x6a\x9b\xf6\x65\xf8\x69\x6b\xc8\x1f\xfe\x5e\xe3\x5a\xe5\x0b\x04\xdf\x06\xdd\x27\xb6\x6c\xc5\x7c\x13\x13\x52\xce\x49\x33\xac\x40\x1a\xd4\xe1\xe1\x16\xc0\x6f\x8a\x9c\x04\x54\x59\xf2\xaf\x73\xdc\x1f\x22\x0a\xe9\x79\xe8\x7f\xa9\xb5\x38\x6a\x03\xae\xe1\x74\xb4\xad\x0d\x01\x08\x33\x85\xc2\xad\x13\xa5\x0c\xfa\xea\xbc\x5f\xee\x99\xb4\xec\x38\x73\x83\x27\xd4\xf4\xd4\xb9\x6c\x1b\x09\xe0\x09\x54\x10\x93\x2b\xb6\xb6\xb1\x13\xd6\x87\xc4\xbb\x04\xb8\x8b\xfb\xc2\xf4\x7f\x5c\x82\xcb\x52\xda\x4b\xfc\x92\x24\xa8\xe3\x34\x00\xa8\x77\x90\x2e\xda\x49\x23\x67\x46\x10\x00\x70\xa8\x3a\xbc\x88\xfc\x14\xcb\x10\xa1\x0b\xb1\x89\x50\x95\xd4\xce\x51\xca\x3f\xf5\xb7\xd6\x87\xe3\x9c\x6f\xe1\x8e\xaf\xbd\x44\x37\x1a\x4c\x50\xee\x3d\xa9\x77\x0c\x4a\x18\x01\xb0\x6d\x95\xda\x26\x34\x69\xbf\xbb\xa2\x0a\x44\x4c\xbd\x3c\x91\x1c\xa2\xc6\x00\x53\x8c\xa7\xb2\xef\x78\xaa\x4b\xe0\x21\x48\x5c\x52\x47\xf6\x6d\xbe\xff\xfc\xa5\xe1\x40\x73\xe2\xf3\x45\xa9\xa6\xa7\xc6\xc2\x96\xfb\x4e\x9c\x4f\xd5\x78\x39\xfb\x46\xed\xc3\xac\x00\x1a\xe0\xd1\xaf\x56\x55\x34\xae\xaf\xa7\xfc\x1e\x00\xa8\x86\x71\xfc\xf9\x96\xe8\x03\x06\x17\xbb\x51\x10\x0c\x60\xe9\x62\xf4\xa3\x67\x18\x52\x4a\x1c\x80\x99\xeb\x26\x36\x83\xf0\x19\xeb\x27\x01\xd0\xc0\x06\xeb\x43\x77\x2d\xf8\x73\x78\x26\xbb\x95\xc7\x17\x35\xf5\xf0\x44\x55\x57\x51\x8b\x20\x1f\xe8\x7a\xcd\x46\x7c\x1f\xbc\x7c\xa1\xc0\x2c\x34\x3f\xc1\x91\x5a\x0a\x87\xfe\xd4\x2a\xa9\x1f\xfb\x29\x09\x60\xca\xbf\xc8\x6b\x43\xf8\x48\xa1\x08\xfd\x4d\x25\x65\x94\x5a\xf7\x6c\x21\xab\x23\x68\x5c\xd5\x75\x18\x06\xc7\x72\xfb\xea\xd9\xb8\x79\x44\xed\xe6\x00\x98\x7e\x91\x7c\x81\x73\xe9\x2e\x5e\x56\xae\xce\x88\x24\x91\x35\x9a\x7c\x51\x36\x2a\xb2\x2c\x2a\x44\x2f\x4c\x81\x06\xf3\x08\x3f\xab\x13\x08\x43\x34\x85\xcb\x60\xf5\x42\x31\x9f\x40\xcb\xef\x71\x35\x3b\x10\x30\xdf\xd1\xf4\xd0\x96\x24\xc8\x22\x21\x14\x67\x5b\x8e\xf9\x98\x0d\xc2\x1c\x5d\xda\x06\x3a\x45\x8c\x56\x19\x03\xff\xc9\xd3\x18\xea\x70\x59\xf6\x4b\x2d\x3b\xba\xd0\x12\xd0\x69\x22\x1a\xfb\x38\xba\x72\x57\xc6\xcf\x0d\x1c\x0e\x67\x04\x67\x70\xf6\x8d\x51\x3a\x75\x50\x98\x7e\x0a\x5a\xd7\x58\x3f\x3b\xe2\xaf\x4d\x7f\x4a\xb4\x35\xcc\xb6\x2f\xea\xa5\x84\x7c\x90\x56\xd2\x49\x94\x0e\x89\x92\xe7\x4a\xce\x50\x10\xca\xb3\xcd\x8f\x8b\x2c\x14\xc3\x30\x9e\x64\x1c\x8f\xbe\xf1\xa6\xe7\x69\xae\x85\xf4\x75\x9e\x44\xbd\x69\x68\x49\x9f\x08\x99\x54\xb6\xcc\x86\x8f\xb2\x6e\x36\x11\x1f\x7e\x18\x0e\x38\x1a\xc7\xf1\x60\x56\x34\x87\x30\xd5\x3f\xbd\x96\x75\x04\xb8\xea\x23\x0c\xa4\xe3\xb2\x00\x00\x49\xd9\xbc\xed\xda\x3e\x04\xe8\x56\xf5\x12\xc0\xb6\xe8\xf0\x0a\x6d\x4f\xc9\x5c\x23\x05\x55\x36\x8a\x77\x30\x40\x96\x2d\x46\x15\x6f\x94\xbd\x4b\xfb\x79\x5f\xef\x8b\xc6\x25\x7c\x47\x56\xb7\x12\x87\x0a\xe6\x24\xbe\xe8\x96\xf3\x12\xe1\xd8\xae\xe7\xb6\x86\xde\x37\xde\xf2\x13\x53\x59\xb0\x91\x9c\x55\x2c\x4f\x30\x3d\xd1\x2f\x3a\x75\x83\xab\xbf\x2d\x5c\xc6\x01\x6c\x56\x35\x8e\x1b\xd2\x3a\x14\x47\xac\x5b\xf4\x28\x3b\xb2\xf5\xd3\x5f\x88\xe7\x7c\xe6\x45\x64\xbe\xc5\x89\x1b\x71\xdf\x4a\x6e\xb1\x5a\x7c\x8b\xe5\x01\xd6\x75\x5b\x1e\x3c\x99\xe1\x15\xce\xe0\x23\x8f\xb5\x7d\x7c\x91\xd3\x6d\x2a\xc9\x1f\xae\x59\xaa\x7f\xf9\x5c\x02\xaa\x0f\x60\x12\x27\x30\x1d\x15\x28\x68\x68\x8b\x9a\x04\x25\x96\xe1\x7e\xee\x83\x65\x05\xf5\xea\x57\x12\xb7\x6c\x9d\xbf\xd1\x76\x20\x70\x31\x87\xa2\x1b\x84\xe9\x97\x4e\xfb\x7d\xea\x7b\xcf\x94\xe9\x9b\xc2\x0a\xcb\x9f\x8d\x48\xfc\xb2\xc1\xe8\x1d\x92\xaf\xf6\x3d\xd7\x50\xdb\x0b\x6e\x65\xb8\xa2\x31\x5f\x9a\x8e\xfc\xbe\x57\xe8\x3c\x72\x61\xfe\x70\x07\x71\xd7\xb8\x10\x09\x92\xa3\x18\x5c\x51\x72\x9d\x72\xc5\xa8\xce\x99\xdc\x5e\x3e\x98\x97\x75\xbb\x6b\x74\xf2\x9d\xde\xf0\x59\xa2\x63\xf3\x90\x91\xa3\x16\xd9\x85\x0a\x9b\x66\x2f\x0a\x32\x8d\xd7\xf0\xc5\x05\x9d\xec\xa5\xdb\xd2\xcf\x41\x80\x95\x02\xe4\x49\xdf\x4e\x03\x81\x1f\x27\x47\xc5\xc7\x52\xfb\x63\x9c\x6b\x7d\x52\xc6\xda\x90\x1d\xd1\xf6\xe5\x49\xb0\x4a\xb2\xca\x5b\x43\x6b\xda\x20\xc4\xc6\x78\x16\xc3\x2a\xeb\x49\xb3\x16\x14\xdf\x48\x43\x23\xfc\x4c\xc7\x0f\x55\x92\x4d\x06\xde\xa7\x09\xa1\x68\xde\xb5\xac\x45\xda\x4c\x12\xfe\xd4\xb7\x18\x4a\xb9\x9b\xdf\x2f\xc5\x3d\x94\xe8\x10\x22\x0a\x16\x5b\x8a\x49\x2f\x48\x94\xd6\x48\xd8\x61\x7b\xd0\xdf\x0c\xf9\xcd\xc2\x32\x13\x03\x1a\x62\xdf\x10\x6f\x6f\x7c\x30\xd4\x1e\xa8\x48\x1e\x26\x48\xb3\xbb\x31\xea\x97\x3f\x93\xea\x98\xec\x79\x5b\xd9\x83\x48\xcd\xee\x17\x17\x76\x2b\xe8\x9b\x2e\x9c\x01\x95\xdc\x0e\x6b\x4e\xb8\x19\xb2\xae\x03\x1a\x42\x25\x87\xe7\xf0\xf1\x16\xaa\xac\x6a\xd6\x8b\xdd\x45\xf5\x26\x54\x65\xcb\x1f\xb0\x46\x26\x7f\x23\x33\xa7\xe6\x82\xd6\x1b\x8e\x3c\x36\xb1\x98\xa3\x08\xb8\x44\xf4\xf3\xc4\x32\x0b\x5e\xe1\xd6\x6d\x7e\xea\x3e\x0d\xa6\x40\x8f\x05\xc4\x8c\xce\x4d\x40\x9b\xc2\xc6\x06\x42\x8e\x3a\xce\xb0\x25\x20\xb0\x37\x1b\x97\x2d\x82\x97\xfc\xbf\x36\x93\x7d\xe2\xee\x5f\x55\x50\x0e\xb9\xb6\x51\x8e\xf8\x82\x60\x40\x94\xd6\xe4\x2f\xa8\x95\xf2\xca\x50\xf6\xd9\x91\xb5\x1f\x0c\xd1\xc2\xdc\x65\x8f\xd0\x92\x64\x10\x69\x4e\x54\xf5\x4d\xed\xc9\x81\x8f\x19\x20\xb9\x05\xc8\x5a\x5e\xb3\xe8\xef\x52\x33\x61\x10\x9c\xd0\xcb\x47\x7c\xcf\x88\x73\x0f\x97\xa2\x01\xf7\x95\xdc\x7a\xd8\xfc\xdd\x47\x97\x3c\xc1\xaf\x8d\xca\xcf\xa2\x4f\x27\x43\x4f\x07\xbd\x64\x51\xd8\xa3\x3e\xfe\xb5\xc5\xbe\x73\xcb\x12\x40\xc3\x30\x8c\x42\x9d\x3b\x99\x1f\xce\x1b\x89\xd1\x28\x60\xdc\x8b\x19\x0f\xb4\x23\xce\xe1\xfb\xb4\xef\xe2\xc5\x28\x70\xa1\xd0\x5f\x6b\xe8\x02\x93\x0f\xce\xc8\x34\xfd\x10\x3e\xb6\xc3\xdd\xfc\xcc\xd3\x6a\x5f\xb8\x0d\xbf\x81\x6c\xde\x81\x4c\x1e\x74\x7e\x45\xd6\xd6\x47\x75\xa9\xac\xb6\x88\x4c\xbc\xf7\xed\xb6\x66\x8f\x02\x2d\x14\x3e\xd1\x39\x7b\x00\xd4\x24\x9b\xfd\xb9\x20\x93\x0e\x10\xf3\xf3\x4b\x29\x39\x90\x49\x27\x22\x73\x49\x2b\x2c\x4d\x7c\x51\xf0\x0d\x65\xf3\xd7\x63\xf2\x17\x65\xf1\x2d\x76\x6e\x14\x28\xcc\x63\xb6\xc7\x73\xf5\x21\x2b\xfc\xf4\xc7\xa9\x9b\xa6\x82\x5c\x8e\xf3\x1c\x30\x64\x31\x3a\x06\xfb\xe1\x6b\xe6\xba\x11\xa5\xa7\x21\x2f\x41\x0c\x98\xfc\x05\x33\x59\x08\x2c\x78\x2a\x9e\x69\x38\x0c\x7f\xfb\xaa\x4d\xc0\x35\x46\xcb\x5f\xc0\x76\x01\x1b\x5d\x2f\xa5\xb0\x7a\x87\xce\xc4\x3e\xe7\x78\x3b\x25\x36\x05\x9f\x60\xc6\xbb\xbf\xd6\xef\x22\x95\x1e\xb7\x3f\xc2\x6d\xfa\x51\xf8\xa6\x69\xec\x1d\x4c\x31\xda\x1d\xbd\xc2\x94\x97\xbe\xef\x47\xa9\x28\xd4\x3c\x80\x02\xce\x82\xa7\xa6\xbe\xfa\x3e\xdc\xbf\x66\x29\x43\x37\x50\x58\x6e\xde\x34\x10\x02\x50\xfa\xc2\x37\x62\x5b\x9b\x95\x6a\xfb\xa0\xed\xcd\xb6\x2b\xa6\x14\x29\x7f\xb3\xb1\xa9\xbf\x1c\xaa\xa6\x90\x6e\xc0\x1e\x6b\x1b\x40\xca\x71\x5d\xd7\x39\xff\x65\x87\x29\x05\x86\x22\x47\x1e\xff\xf5\x59\x72\x51\xcc\xe2\xda\xe6\x2a\x6c\x1d\xfc\xe4\x2e\x94\x49\xbf\xfc\x43\x6f\xa5\xf6\x70\xfd\x86\x64\x9c\xe1\x13\x47\xe4\x68\x95\x5d\x08\x02\x21\x23\x91\x2e\xdd\xaa\x26\x5b\x99\xe1\x53\xe6\x11\xdb\x79\x12\x53\xbf\x0e\xc1\x3a\x04\xca\x22\x2a\x2a\xab\xdf\x15\xa7\x64\x1d\x96\x72\x58\xe8\xac\x77\xdc\xc3\xf8\x6d\xc5\xdf\x5c\xea\xea\x81\x73\xae\xf2\x01\xc1\x1c\x96\xdd\x76\x4d\x06\xb9\xec\x0e\xfa\x5b\xe8\x5b\x5d\x26\x71\x9d\x90\x7e\x46\xc6\xa1\x1d\x9e\xb1\x59\xf5\xab\x87\x57\x50\x9c\xe6\x7c\x96\xbf\x5e\xe6\x3f\x91\xb3\x0e\xc1\x3c\x26\xa0\x88\xe9\x6c\xa6\xbb\x9c\xe6\x7a\xba\x0f\x09\x1f\x56\xed\x97\x64\x8f\xdc\x59\x61\xd9\x9c\x22\xfd\x72\xaf\x44\x5a\x6c\x53\xf0\xe5\xc6\xc8\xfa\x21\x62\xd7\x5d\x47\xb3\x7d\x8a\x7b\xe8\x81\x9a\xbe\x09\x7e\xbe\xbb\x2f\xd3\xe0\xf7\x19\x2b\x84\x19\xf9\x30\x5f\xb8\x9a\x7c\x5b\xab\x1c\x32\x50\xb6\x3e\x25\xa1\x8f\x2c\x02\x50\x9f\x7b\x62\x6b\xad\x5b\x91\xe3\x2d\xb0\xc9\x21\x28\x74\x74\xb9\x0d\x04\xdb\xf6\x3c\x7b\xa2\x19\x9f\xf6\x38\xda\x3f\x3c\xc1\xce\x71\xe2\x15\x42\x8d\x24\xfc\xe6\x38\x68\xa2\x25\xf7\xbc\x48\xf4\x06\x02\x50\x25\x1f\xb9\x15\x08\xbb\x3a\xa2\xe7\xb5\x69\xed\x1e\xac\x0f\x53\xfb\x6b\xb9\x21\xb9\x6e\xd9\x86\x79\x77\x54\xf0\xd8\xbe\xf9\x92\x66\x7b\x13\x73\xff\xfc\xe4\x2e\x58\x36\xce\x69\x60\xa1\x6a\x49\x6d\xb6\x27\x40\x23\xf2\x4b\x88\x9f\x91\x8a\xbb\x45\xdb\xc9\x1b\x5a\x84\xb0\xcb\xdd\x0c\xe5\xe5\x5d\x42\x9a\x6c\x5c\x87\x46\xff\x72\xa7\xd6\xed\x1b\x42\x7f\x73\x31\x08\x96\x47\xdb\x4b\x82\xef\x03\x33\xa6\xfc\xb2\x63\xea\x7f\x38\xba\x30\x04\xc4\x40\x03\xe8\xae\x01\x8a\xf6\xe8\x7a\x85\x1c\x47\x1e\xc2\x1e\x8a\xed\x69\x32\x7b\xa5\xd4\x93\x69\xf4\x41\x83\xa0\x70\xf8\x65\xab\x2b\xd9\x91\xba\x84\x13\x91\x0c\x8e\x96\x33\x1b\x97\x62\x8f\xc2\x8d\xd1\xe0\x86\x98\x4a\xe2\x2d\x6f\x00\x76\xc3\x11\xdc\x9f\xe6\x0f\xee\xcd\x1a\xc0\x05\x42\x2a\xab\xa9\xcd\x46\xef\x7e\x00\x1f\x19\x72\xe4\x73\x9c\xd6\xf3\xc7\xdd\xad\x52\xc0\x25\xd4\x88\x08\xe0\x93\xfb\x1d\x91\x77\xfe\x9a\xb2\xe1\xc3\xa0\x11\x5e\x1f\xeb\xeb\x93\x0f\x48\x61\xaf\x23\xb3\x58\x29\xb9\x54\x35\x1b\xff\x8b\x11\x08\xce\x6b\x90\x16\xad\x82\x7b\x44\x14\x71\x99\x4f\xe5\x94\xe2\x75\x4e\x66\xb9\x35\xaa\xb6\xf5\x2c\xbb\xb5\x79\x9b\x36\x47\xa6\x4a\x56\xd1\x47\xc3\x1b\x2e\x16\x58\xd1\x8d\x58\x77\x22\xb8\x2b\x05\x28\x20\x2e\xab\xc5\xa0\x66\xff\xdf\x98\x1e\x49\x93\x44\x18\x17\x0f\xe5\x0e\x52\x70\x6c\x29\x52\xad\x68\xf3\xd1\x51\x5a\x2f\xac\xb7\x3e\x2d\xfb\x04\xa7\xb9\x9b\x5a\x6d\xa9\x31\x4c\x87\xc7\x48\xba\xcf\x5c\x6a\xf7\x7c\x2b\x53\x91\xb7\x25\x6e\x2e\x59\x57\x3e\x3c\xb5\xe8\x7f\xfa\x55\x39\xef\x3a\xf9\xa0\x6b\xf8\xfd\x95\x14\xab\x96\x8d\xc7\x86\xd0\x71\x69\x40\x59\x43\x96\xc9\x34\x22\x29\x81\xb2\x2c\x8e\x68\xa7\x5e\xa3\xe5\xc0\x52\x93\x5a\xfb\x44\xf8\x20\x9b\x6e\x75\xd7\xdc\xc8\xc7\x2f\x6b\xc4\x39\x95\x0f\xb3\xef\xc0\xc9\x3f\x58\x64\xc4\xeb\x29\x69\x63\xbc\x9d\x24\xe5\x7b\x0e\x90\x1e\xaa\x71\x8c\x7f\xac\xae\x96\xb3\x04\x31\x61\xb7\x25\xef\x36\xb0\x36\x03\x9e\x38\x29\xfd\x5d\x5f\xf2\xcb\x7e\x20\x74\x43\xf2\xe4\x41\x0c\xc1\xda\xf3\xb6\x53\x2d\xb6\x31\x3b\x03\xf2\x9f\x48\x34\x62\x67\xc9\x1f\x28\xfa\x14\x99\x61\x6b\xca\xfe\x40\xb0\xf9\x06\xb0\xe9\xe7\xca\x1e\x80\xd4\x43\x23\xaf\x8d\xac\xf7\x19\x18\x67\xd9\x54\x13\x03\xee\xa2\x94\xcd\xf8\x9b\xa9\x5b\x04\x1f\x69\x15\xc2\xe5\xb7\xb0\xab\xe9\xbb\xe1\xfe\x4e\x9a\x2c\x31\xd3\x8f\xf3\x4f\x96\x15\x65\x11\xb0\xda\x72\x93\xc0\x21\x0f\x1a\x2e\xa1\x72\xca\x70\xd4\xc5\xc7\x39\x7d\x4c\x76\x82\x49\x60\x38\x0d\xab\x4c\x5e\xdb\x2d\xdd\x42\xbd\x3e\x02\xd3\xfe\xfe\x4e\xcc\x91\x1f\x1e\x47\x43\x3a\xaf\x11\x39\x62\xb4\x40\x22\x7f\x6a\xb0\x98\xcf\xe4\x14\x4f\x17\x7a\x6d\xbc\x11\xda\xba\x1e\x5b\x04\x9b\xbe\xb3\x47\xa5\x2b\x63\x81\x78\x8d\x17\x71\x78\x63\x10\x81\xa9\x21\xb0\x58\x15\x4f\xea\xba\xcf\xc7\x8c\x39\xb0\x14\x0c\x4b\x1e\x2d\xc1\x9e\x77\xe8\x87\xfe\xaf\x33\x6f\x5f\x26\x0f\xee\x84\xc3\xe7\xfb\x57\xf7\x73\x4a\xcc\x7d\x73\x98\x76\x5d\x5a\x0a\xc3\x30\xdc\xeb\xf4\xeb\xcc\x9f\xd7\xb0\x70\x0c\x43\x3f\xf1\xdc\x62\xdd\x0a\x7d\x6d\x19\xbe\xf9\x35\x96\x24\x14\x00\xbf\xc4\xb9\x3d\xd5\x06\x8b\xf2\x35\xe4\x8f\x28\x9e\xff\xfa\xe3\x29\x18\x24\x1b\xd7\x0a\xec\xdd\x47\xf0\xfd\xc3\xce\x11\xfe\x93\x79\x03\x01\xc7\x1f\xb4\x94\x85\x2d\x17\x2b\x10\x2a\xeb\x0a\xf2\x13\x7f\xc3\x2f\x49\xbb\x23\xd6\x15\xb3\x08\x5f\xb2\x68\x59\x87\x31\x14\x5b\x5b\x36\xde\x9d\xa8\xec\x03\xb5\xbf\x1a\x4b\xed\xc6\xf5\x07\x63\x77\x2e\xe3\x35\x48\x83\x6f\x7a\x58\xe5\x47\x73\xd1\x48\x34\xc2\xc7\x85\xbe\x7a\x88\x5a\xf5\x16\x2e\x0e\x68\xde\x27\xe8\x5c\x74\x17\x4e\xbd\x54\x6c\xf6\x61\x5e\xb5\x6e\x25\xce\x7c\x01\x63\x95\xf6\x66\xbe\xbd\xa8\x4d\x39\xee\xf5\x56\x2e\xe6\x1f\x87\xb8\xf2\xeb\x1c\x73\xea\x47\xf9\x6f\x9f\xf3\x0a\x31\xbb\x5e\x63\x96\x37\x14\x53\x64\x1a\xe7\x9c\x7f\x68\x6d\xc7\x8e\xf8\xf3\xa0\x27\x3e\x79\x49\xb3\xc3\xe3\x48\x9e\xc8\xa3\x2b\x3b\x72\x7c\x38\x2c\x66\xbf\xe4\x33\x8d\x9c\x14\x71\x3f\xb9\x07\xf9\xf2\x05\xdc\xe6\x15\xc2\xe6\xf8\x91\x3e\x3f\x48\xd1\x08\x12\x37\x3e\x7d\x19\x0d\x38\xf9\xd5\xe2\x47\xa7\xcc\x31\x49\xa8\x53\xf5\xf7\x25\x7f\x3b\x57\x5d\xd7\x71\x11\xeb\xe7\xd5\x7c\x44\xe6\xc7\xea\x8d\xbc\x44\xc7\x93\xbf\x85\xf9\xac\x53\x33\x25\xc1\x37\x3d\xe0\x0c\x63\x34\x3c\xf8\x91\x4b\xc7\x76\xd0\xd3\xfe\x8a\x55\x6a\xf1\xbe\x07\xf1\x65\x1a\x97\x9f\xb8\xb6\xe5\x6b\x1d\x5c\x69\xd9\xd2\xe1\x01\xc2\x47\x0f\xbf\x51\x14\x96\xb1\x0a\x8d\xde\x63\x59\xc6\x1d\x29\x93\xff\x4d\x9e\xf8\x69\xe4\x9f\xbb\x4c\x9c\x96\xff\x67\x57\x29\xf4\x0e\xf9\xff\x74\x0e\x52\xd2\xb5\x8f\xda\xa4\x77\xf9\xef\x8d\x27\x11\xc5\x8d\xf6\x63\xb2\x20\x4e\xfa\x5b\xe8\xc1\xb6\x36\xae\xc8\xe1\x07\xb0\x49\x56\x49\x5e\xe5\xd8\x4d\xe6\x98\x67\xcb\x88\xa0\x59\xc5\xce\x3e\x7f\x74\xc4\xbc\x2f\x38\xdf\x42\x10\x14\xf5\xa3\xf7\x4b\x82\xcf\x6d\x25\xc0\x18\x03\x0b\xb8\x7e\xd8\x31\xe1\x80\xab\xca\xb9\x6f\xb8\xb6\x01\x9a\x27\x99\xa6\xbb\x43\x18\x96\x61\x19\x1f\x34\x5d\x27\x99\xda\xde\x88\x76\xf7\x24\x4c\xfb\x2c\xad\xc9\x7f\x7d\x49\x8d\x4b\xd4\xae\x9a\xdc\xbe\x96\x7d\xbe\xdc\xc3\x30\xca\xc7\x65\xcb\x11\xfa\x7b\xf0\x4d\xe5\x47\xe6\x52\x59\x7a\x43\x4c\x1e\xcc\xd4\x93\x4a\x6d\x16\x89\x55\x1c\x0a\xeb\x7c\x51\x7d\x88\xea\xdd\x49\xab\x9d\x0b\xc9\x90\xdf\x19\xaf\x2d\x77\x0e\xcf\xaa\xe2\xfb\xf9\xa7\xfe\x6b\x56\x3f\xc6\x11\x4f\x94\xcc\x11\xf1\x89\x9a\x29\x4e\x6b\x83\x77\x87\x62\xe2\x2d\x3a\x1c\xfb\xc1\x91\xc3\x68\x94\x62\xdf\xf9\xfc\x7a\x3b\xf6\xbd\x0e\x01\x50\x5e\x29\x7b\x8f\x3c\x43\x8e\x2d\x43\xc6\xba\x6a\xd9\x52\xfc\xe5\xd0\xa0\xa2\x68\xa9\x50\x9f\xfc\xd5\x99\xf3\xb8\x8c\x29\x92\x4a\x6e\x90\xdb\xa0\xa8\x88\x4c\x1a\xd7\x28\xb5\xac\x99\xb4\x0b\xf5\xa9\x14\x62\x05\x4e\x0a\x35\xb4\xde\x03\x14\x21\xfd\xf5\xd0\x0f\xe3\x2e\x8f\x68\x9c\xa9\x38\x9c\x99\xf4\x99\x49\xeb\x7d\x2a\xfd\x1e\x41\xb8\xb7\xe4\x77\xd2\xd8\x49\x0b\xcc\xef\x4f\x5f\x5c\x57\xe6\xf1\xe7\x10\x5c\xe0\x24\xd3\x8c\x00\xb6\xd5\x2f\xcb\x5a\xb2\x8d\xc0\x43\xfa\xaf\x0c\x23\x37\x26\xd1\x76\x5a\xb9\x66\x0b\xa7\xee\x3d\x29\x8d\x17\xaa\x70\x19\x87\x28\x0d\x6a\xeb\x7a\x6e\xde\x29\x09\xd1\xa7\x77\x24\xa0\xa9\x92\x28\x75\xae\x7f\xf9\x4c\x51\x50\x16\x89\x6d\x0a\xd1\x03\xe6\x1e\xf2\xa3\x63\xcd\x86\x15\xd9\x4c\xfb\x28\x3f\x16\xa8\x46\x69\x9e\x53\x25\x04\xeb\xd1\x45\xa2\xe8\xc7\x15\x8b\xcb\x5f\x40\x02\x3a\x6b\x34\x44\x5d\xb8\x2c\xeb\xd8\xa4\xba\x27\x16\x66\x10\x06\x84\xcf\x8d\xb7\xc5\xc0\xf3\x3f\x75\xda\x85\x7b\xe1\xd5\x76\xf5\x13\xf8\x8e\x05\xff\xc9\x90\x63\xc9\x23\x7a\x2f\x6c\x7d\x7d\x5b\xc9\x7b\xe0\xf3\xd7\x7f\x3d\xb6\xe2\xdc\xb6\x02\xe0\xc4\xbc\x82\x3d\x8f\x08\x34\xa5\xc7\xe7\x5d\xb3\x51\xed\x51\xdf\x3b\x81\x69\xc4\x89\xd4\xa8\x4c\x7f\x50\x44\x5c\x69\xff\x1e\x1f\x97\x55\x16\x79\x26\xbd\xef\x70\x93\x38\x6e\xab\xf8\xf0\xbc\x6b\x3e\xe2\x5f\x13\x40\x1a\x60\x92\x6c\x30\xb1\x6c\xa6\xe4\xf8\x98\x44\xb7\xe5\x31\x16\x61\xdc\x2b\x74\xe8\x9e\xf8\xa1\xe3\x0a\x3d\xc8\x28\x3c\x75\xf4\xa2\x5b\xee\xd3\xaa\xff\x6a\xdb\x15\xc5\x57\xd1\xfd\x1d\x83\x57\x05\xf1\x06\x98\x48\x60\x5b\x89\xf3\xde\x73\xa1\x2f\xf1\x28\x9a\xa3\x92\x84\xcf\x1c\xa2\x5d\xb3\x7d\x31\x02\x28\xcf\x25\xea\xdb\xcb\x72\x91\x63\x8b\xd0\x83\x6c\xa9\x23\x04\xbe\x48\xe0\xee\xb5\x94\x8e\x15\xef\xf3\x8b\xf6\xd3\x1b\xce\x60\xe7\x28\x84\xc0\x22\xd3\xd0\x5a\x26\xab\x6e\x5e\x98\x10\xae\x51\x68\x0d\x75\xaf\x98\xcc\x6d\x04\x1e\xe0\x60\x95\x0b\x7c\x40\x70\x1b\xf9\x8c\xd4\x08\xd7\x84\xb5\x8f\x61\xb7\x99\x34\x2c\x51\x9e\xed\x10\x60\xdf\x83\x41\xdb\x75\xed\x63\x45\x96\x54\xea\x4f\x0f\x24\xb6\x39\x6c\xd2\x67\x48\x11\xfb\x6a\x37\x90\x0a\xa3\xc7\xcc\xb2\xb6\xad\xe4\x79\xef\xa9\x34\x52\xa8\x64\x45\x6c\xf5\x82\xd1\x3e\x45\x75\x8f\xfa\x65\x5a\x47\x2d\xc6\xb9\xc1\xa6\x06\xc4\xb4\x13\xd1\x50\x96\xe3\xe3\xaf\x60\x1c\x22\x18\x08\x00\x93\xcf\x60\x43\xdf\x4f\x06\xb3\x24\xe5\xbf\x39\x74\xbd\x6c\x9f\x56\x85\x8e\xbb\xa2\xf6\x5c\x52\xb7\xb3\xfc\x9e\xde\x68\x1a\x40\xa4\xc5\xdb\xf9\x86\x4d\x63\x2c\xb5\x0e\x97\x5b\x06\x1f\xd2\xfd\x5a\x5f\x78\x18\x0b\xd1\x58\x06\x23\x8a\xd1\x93\x05\x22\xfe\xaf\x4e\x4d\xb4\x4e\xed\x8a\x69\x21\xd2\xfe\xd1\x21\x55\x74\x83\x70\x4a\xe1\x52\xf7\x88\xaf\x4f\x54\x5e\x60\xa8\x28\x6e\xcb\xfa\x7b\x29\x4a\x85\x37\x3d\x5b\xe5\x68\x45\x7f\x33\xeb\xd7\x7b\x3f\xd7\xb1\x18\xb0\xc9\x5f\xde\xa0\x4c\x61\x9f\xe9\x22\xe9\xdf\xb8\x9f\x8f\x0a\xe1\x5e\xcb\x0f\xd5\x46\x56\x9f\x04\x1e\xc8\x0a\xab\x65\x8c\x08\x32\xb5\x55\x55\x44\xc4\x48\x78\x42\x58\x9d\x6a\xfb\xad\xa0\xea\x58\xb0\x5a\x5f\x40\xbc\x85\xb7\xd1\x3d\xbc\x4e\xf1\x8a\xee\x88\x7b\xee\x4a\xbe\x9b\x42\x69\x88\xd9\xf0\x7e\xee\xe1\x7c\x54\x53\x51\x99\x4c\x0d\x51\x22\x7f\x80\x70\xcd\x7f\x6d\xba\xf5\xec\x2b\xd3\x2a\xc2\x4e\x3f\xa6\x52\xa0\xfe\xe4\x88\x18\x86\xa3\xc0\x08\xa0\x74\x9c\x1a\x2e\x34\x04\x6f\xb0\x46\x78\xf5\x4b\x94\xc8\x13\xaf\x5d\xb6\xfa\x1c\xd9\xa6\xf1\xd0\x4f\x6c\xa3\xbc\x3e\x82\xee\x6b\x4f\xc1\x5e\x4b\x56\x2f\x72\x23\xe6\x27\x41\xf5\x81\x00\xe6\x35\x1b\xc3\x34\x89\x38\x8e\x23\xc2\xde\x2f\xca\x68\x8a\x2b\xf0\x59\x23\x68\xc0\x80\x73\x56\xbc\x31\x95\xc6\xac\x8e\x61\xbe\x38\xe3\x8d\xe8\xbe\x39\xcc\x86\x43\xa2\x51\x68\xf9\xc1\x2a\x9e\x09\xe6\x06\x9b\xd5\x9f\x7c\x1c\xff\x99\xac\x76\x20\x01\x49\xd4\x94\xc5\x0b\xe1\x0a\xad\xed\xe9\x51\x83\x33\xda\x27\x31\x2c\xe4\x2f\x11\x41\x19\xff\xdd\x6a\x10\x78\x0e\x79\xa0\x2a\x4f\xfa\x06\xb7\x88\xff\xda\x53\x3f\x40\x4d\x84\xfe\xea\x2b\x81\x8c\xf3\xa0\x6f\xdf\x26\xad\xaf\x7e\x40\xa5\x40\x34\xc9\x3f\x28\x30\xc7\xf3\x0a\x60\xb8\x9d\xea\x12\x5f\xfe\xfc\x12\x47\xbe\xa5\x51\x38\xa4\x48\x19\x47\xf0\x41\xfa\x44\xfd\x21\x92\x2d\x39\x54\x98\x28\xc7\xea\xa8\x6a\x0a\x43\xfd\xe0\xa8\xe3\xde\x98\x97\xcf\xfc\x2d\x33\x85\x65\x93\x63\x7d\xde\x6f\x8e\xf2\x24\x58\xfa\xbd\xdd\x63\x70\x5c\x10\x3d\x35\x38\x2c\xee\x18\xf6\xf6\x87\xfb\x80\x67\x95\x1c\xa1\x6a\xb7\xe8\x54\x04\x80\x91\x93\x46\x96\x50\x56\x9f\x62\x52\xd7\xac\x5b\x4b\x7f\x30\xfc\xd9\xbd\xf2\xf5\xdd\xef\xf8\x38\x2a\x3b\xde\xc6\x18\xc6\xb0\x83\x5f\xfd\xd4\xb7\x3a\x2e\xea\xb2\x70\x18\x52\xd1\x69\x84\x54\x1c\x47\xe9\x33\xa8\xcb\xa0\xf0\x0c\x64\x9b\x6f\x82\x04\xcc\x1a\xb1\xde\x9b\xfc\x87\x2f\x31\x9d\xea\x4a\xea\x5c\x29\x0e\xa1\x2b\xdc\x9a\x4d\x21\x4c\xff\xfa\xbd\x91\x76\xcd\xa6\x8c\x44\x5d\x57\xd0\x0c\x10\xda\xcc\x18\x8e\x11\xf4\x92\xf1\x4b\x25\xd9\x63\x9e\xbb\xa4\xeb\x1f\x5f\xc5\x41\x0b\x3e\xb6\xdc\x3c\x42\xa5\x95\x45\x20\xe9\x51\x98\x16\x19\x61\x68\x40\xf4\x6f\x9e\xf4\x43\xab\xe8\xfa\x82\x6a\xff\x7e\x6e\xa5\x3f\x7d\x14\x00\x61\xa0\x7e\x34\x77\x54\xd7\x6c\x52\x89\x2a\xfd\x9c\x38\x27\xa4\x23\x18\x7f\xc9\x91\x06\x7d\x58\xaa\xee\xc8\x24\xab\xbc\x89\x3b\x88\x77\x0e\x81\x69\x04\x66\x89\x34\xa7\x5d\xa3\x0d\x1b\xd5\x27\x66\x30\x6c\xca\x48\x8d\x09\x35\x46\x2c\xfe\x34\xec\x65\xdc\xea\x18\xbe\x11\xc7\x20\x23\x81\x00\xdf\x55\x3a\xb7\x0f\x96\xf0\x02\x6e\xfb\x58\xc8\x29\x21\x1a\x6b\xc1\xee\xe6\xfe\x4a\x55\x47\xe4\x81\xad\xfd\x39\x72\xc8\x87\x43\x97\x37\xc5\x6c\x83\xab\xfa\x0d\x43\x47\x00\xac\x5b\x64\xaa\x91\x40\x8f\x6f\x7c\xa0\x5b\xe8\xf5\x2b\x2b\xf4\xcc\x1f\x9b\xc8\x26\xa5\x6a\xf9\x01\x25\x36\x1d\x3f\x07\x90\xf8\xe4\x9c\x0a\xda\x03\x44\x4c\x0b\x51\xfd\x7e\x63\x5f\x01\x4b\xbd\x51\x43\xd2\xcf\x87\xae\xcc\xb2\x14\x13\xf7\x53\xda\x0b\xd7\x38\x2b\x68\x63\xda\x8a\xc7\xc8\x35\xc7\x71\x09\x6c\xd0\x3a\x04\x5b\x15\x27\x97\x37\x0a\x34\xee\x25\xef\x7f\x58\x0d\x77\x93\xe5\x4a\xd3\x94\xaf\xc6\xe7\x7a\xad\x92\x76\x23\xc6\x6b\xd0\xc6\x6b\xf8\xdc\x1a\xce\xed\x2c\xf5\xcf\x15\x61\x6d\xc8\x22\xd2\x85\x7c\x69\xba\x0e\x5c\xd9\x31\x15\x78\xd5\xb5\x65\xdb\x36\x62\xdd\xb6\x4c\xb2\x76\x7e\x47\x78\xea\x3f\x9f\x5f\x6c\x8b\x28\x44\xbf\xf2\xdd\xf5\xfc\xfc\x35\xb1\x98\xc1\x64\x85\x24\x80\x39\xbd\x2c\xde\xa1\x6d\xdf\x20\x2d\xc8\xe6\x4b\xca\x70\x3e\x6d\x55\xce\xdd\x6f\xfb\xd0\xcd\xbb\xbe\x94\xa2\x62\x26\xf8\x45\xa0\x01\x05\x7c\x06\x0d\xab\x06\xe6\x3f\x6e\x0d\x83\xeb\xa4\x16\xab\x3c\x2f\xf3\x61\x43\xb8\xf2\xad\x96\xc9\x3a\xd9\xe3\x7c\x86\x24\xe6\xb5\x5e\xe3\x11\x9c\x5e\xe2\x8a\xc5\x59\xa3\x30\xa5\xec\x05\xac\x49\xd4\x22\xed\xd0\xe7\x13\xc5\xe2\x1d\x7c\xd4\x75\x5c\x1d\xc6\x50\x67\xe0\x7f\x18\x8f\x4e\xd0\xfe\x52\xcf\x4b\xf3\xe5\x0d\xf3\xd9\xec\x34\xe7\x19\xcc\x7a\x8d\x57\xb8\x08\x99\x2b\x16\x57\x1a\xdd\x94\x9e\x56\xba\xde\xc0\x9a\xac\x53\x45\xe8\x91\xab\x7b\xac\xd2\x6f\x28\x83\xff\x04\xa8\x19\x2a\xda\xc8\xfe\xd4\x3b\x34\x42\x16\x7a\x6b\x25\xb7\x42\xb5\x38\xee\xef\x7d\x06\xbe\x94\xbd\x6f\xfb\x31\xec\x1a\xeb\x15\x53\x0a\x99\x6d\xa0\xa5\xbe\x92\x6b\x97\xec\x04\x6e\x2d\xe3\xb5\x48\x7c\x5f\x00\x46\x04\xec\xd4\x31\xd3\x5f\xea\xaf\x7a\x8f\x36\xc9\x39\x4d\xff\x9f\x44\x2b\xc7\x54\xc8\x5c\xa2\x89\x89\x13\xae\xdf\x10\x07\x6d\xb2\x96\xf1\x12\xa6\xec\x16\xb2\xee\xdd\x60\xbb\xc2\xcf\x6a\x3f\x64\x32\x0e\x7a\x7d\x93\x00\x63\xbc\xe6\x89\xf8\x81\x02\xb8\xe5\x15\xc4\x5c\x70\x43\x9d\xfa\x40\xeb\x1f\xf3\xaf\x6e\xdf\x95\x0f\xc1\x5e\xc9\xdc\x8c\x4d\xef\xd5\x2a\xf6\x65\x09\x1b\x46\xd4\x54\x9a\x61\xa9\xa1\xc4\xc6\x60\x3d\xe3\x46\xd4\x80\xed\xb5\x72\x09\x88\x09\x02\xbe\x70\xb5\x41\xc0\x48\xc1\x92\x53\x78\xc1\x1b\x25\x32\x3b\xff\xce\xfd\xce\x6c\xf1\xe8\x3f\xe3\x8a\x9d\xb7\x96\xfe\x8c\xf0\x2d\x2c\xa8\x93\x4f\xc4\x0e\x31\xc5\x21\x5c\x16\x43\x6f\xb4\x8e\xd6\x06\x2a\x8b\x2c\xde\xa7\x63\x5d\xb1\xc8\xbf\xa5\x3c\x83\x31\x01\xc6\x8a\xb1\x49\x21\x57\x3b\x36\xaf\x72\xf6\x45\x3f\xb1\x1c\xcc\xff\xad\x5d\x6c\xfa\xe6\x32\x79\x55\x83\xc7\x39\x1a\xb2\xdf\x2a\x30\x8a\x86\xed\x3c\x5d\x4f\x72\x4e\x0d\x03\x65\x0c\x76\xa4\x0e\x42\x3c\x55\xa9\x79\xa2\x16\x31\x9b\x25\x6c\x08\x18\x7b\xc5\x4a\x1e\xfb\x43\xb8\x1e\xe7\x27\xd8\x05\xbe\x3f\x3e\x0f\x77\x45\xe3\x7a\x26\xbd\xe7\xaa\xf9\x56\xe7\x64\x9c\xc7\x30\x5d\x4d\xc1\x3a\x5d\x1b\xe6\x7a\xab\xeb\x8b\x8e\x23\xa7\x39\x9f\xa1\x08\x60\x23\xa7\x09\x9d\xc7\xf3\xea\xa2\xe1\xf5\xfb\x36\x03\xbf\x37\xfe\x48\x35\x2d\x62\xbd\x9f\x38\x41\x88\x5c\x9f\xcb\xe5\xbb\xab\xe6\x86\x18\x97\x75\x58\xdc\x20\x14\xdd\x20\x92\xee\x88\xd5\xd4\x5b\xbc\x3d\x79\x1e\x85\x2a\x9b\x27\xb1\xcf\x7d\x53\x2d\x37\xf2\x4b\xe8\x11\x1c\x86\xdc\xa0\x64\x6c\x93\x48\x4f\x51\xb6\xf1\xbf\x5e\xf4\xa2\xe1\x3b\x94\x33\x59\x61\x30\x6a\x2f\x12\x5d\x48\x84\x84\xc3\x52\x84\x9d\xe7\x39\x5e\x1f\xfc\xe6\xf2\x14\x94\x51\x46\x86\xca\x2c\xdf\xfb\x89\x9f\x5f\xba\x1b\xbe\x4e\xe3\x2b\x7b\x26\x8f\xdd\x63\xf0\x0c\x26\x97\xc4\x78\xef\xfc\xe8\x13\x51\x31\xf9\xeb\xf6\xda\xd0\x80\x07\x3c\x11\x99\xff\xc6\xcb\x7c\x28\x6a\x7a\x61\x1e\xa4\x19\x17\x31\x5b\x23\x37\xeb\x1b\x2c\x05\xae\xf4\x56\x0a\x5b\x0b\x6d\xc0\x34\x0a\x68\xce\x77\x09\xb0\x4d\x4f\x49\x93\x77\x0a\xe1\x78\x05\x22\x62\xa4\x58\x12\xff\xe3\x9d\x61\x82\xb8\xcb\x7c\xc9\x85\xd2\xc5\x21\x07\xc7\x34\x20\xbf\xb4\x29\xb0\x59\xf5\x3b\xd5\xcd\x74\xb5\x5d\xcf\xf6\xe3\xcc\x30\xbd\x70\xad\x58\x08\x18\x20\x4b\xbd\x5f\x55\x18\x3b\x75\x0c\x87\x5e\xbe\x8a\xe5\x1f\x7e\x32\x7f\x55\xd9\xdd\x63\x67\x74\x6e\x54\xfb\x94\xe1\x5a\xe6\x3b\x9d\x6f\xe5\x44\xae\xc0\x12\x72\x73\xf6\x29\xd2\x0e\x8a\x1a\x48\x9a\xbf\xa2\xb1\x5c\x9e\x1a\xb8\x61\x98\xd4\xbc\xe1\x56\x8a\x75\x5f\x96\x39\x8f\xf1\x59\xca\xee\x1e\x5c\xe3\x85\x9a\x51\x12\xa4\xc5\xf7\x8f\x81\x68\xfd\x52\x28\xee\x30\xd1\xb5\x75\x05\xa6\x0b\x98\x28\x0a\xa5\xe9\x32\xd0\xda\xd1\xbc\x2b\xed\x8e\x09\xf1\x3e\xf3\x61\x1e\xcf\xf4\xb2\x58\xc8\x3a\x29\x93\x55\x14\xfc\x5d\x4a\x7b\x81\x4a\xe9\x6e\xe7\x94\xf0\x1a\xd9\xc7\xc2\x59\xfa\x4f\x9e\x3a\x2a\x49\xd7\x0d\x93\xe0\x20\xac\xad\x65\xdc\x41\x72\x25\x1b\xaa\x87\x26\x39\xf9\x8a\x98\xad\xe2\xca\xcd\x5a\xaa\xcb\x5f\xd5\x72\x03\xba\xa7\x5a\x03\x72\xc2\xc8\x48\xc1\xe7\x41\x7f\xf7\x72\xae\xcb\x7c\x2b\xb3\x2d\x75\x44\xf1\x65\x20\xa3\x57\x00\xfe\x63\xfc\xd4\xe1\x88\xe6\x27\xa0\x65\x87\x92\xd7\xc2\x3a\x79\x36\x0d\x8f\xfb\xd7\xdf\x1d\xf6\xd7\xf8\xae\xa1\xb3\x8e\xc1\x33\x42\xc8\x20\xf6\x81\xd5\x06\x5c\x93\x7c\x64\xa1\xd3\x73\x02\xbf\x91\x81\xf8\x86\x04\x89\xa6\xa4\x99\xb6\xa1\x78\x94\xc3\x9e\x33\xb4\xbc\xcd\x99\x7c\x7a\x90\x16\x00\x6a\xf7\xe7\xd8\xce\x86\xa8\x78\xba\xf2\xac\xbf\x5e\x51\xf7\x44\xe4\x4a\x5a\xa7\x53\x50\xcb\x61\x2f\x05\xdf\x14\xdc\x5c\xf3\x73\xc8\x60\x42\x33\xca\x17\x20\x5e\x16\x9b\xe9\x44\xbe\x58\xa7\x9b\x33\xd9\x16\x10\x53\x40\xa0\x08\x00\xdf\x20\x9e\xe1\xdf\x4a\x66\x13\xa9\xbd\x32\xa5\x59\x93\x48\xec\xf6\xf2\x63\x90\xda\x0b\xc2\x20\x12\x30\xba\xd1\xfc\xcf\xba\x2e\x8a\x69\xc5\x4b\xd1\xd5\xfd\x73\xc5\x9f\x61\xdf\xc5\x3e\x74\x41\x99\x00\x51\x80\x02\x81\xeb\x80\xe2\x84\x16\x94\x45\x9c\x4d\x56\x53\x5d\xab\x14\x94\x43\x4c\x74\x36\xd3\xa1\x4c\x1c\x3e\xeb\xba\x16\x18\x68\x96\xfc\x58\xfa\x4b\xad\xe3\xb1\x6e\x82\x85\xd8\x1e\x38\xb0\x90\xe5\x79\xa7\xf7\x8d\xc7\xe0\x2d\xff\x3a\x57\xd6\xf0\x62\x7f\xfc\x2a\x71\x10\x9a\x7e\x19\xcb\x71\x65\x8c\x88\x14\x61\xff\x57\xfc\xfd\x8e\x93\x51\xf4\xc9\x3a\x24\x8b\x34\xd3\xbf\xe7\x30\xef\xd6\x52\x2b\xfe\xa9\x46\xe9\xb8\xe4\x19\x94\x31\x50\x0c\xb4\x21\xc8\x24\x8a\x1a\x7e\xf7\x37\xe7\xb3\xfc\x8d\x24\x14\x01\x6e\xe4\xa4\xe0\x71\xa7\x07\x80\x2e\x6f\xa3\xa5\xec\xa5\xe4\xb0\x9d\x08\xd3\x30\xe9\xf4\x3e\x8d\x5b\x2c\x95\x07\x54\x45\x23\xe6\x71\x93\xd2\xfb\x83\x1d\x6b\x1a\xe7\x42\xbe\x48\xc4\x0f\x53\x03\x99\x07\xad\xeb\x46\xd4\x3c\x59\x5b\xc0\xdd\xc0\x71\x40\x8f\xc1\x35\x0a\xf3\x24\xa0\x36\xad\x83\xfd\x4c\xfc\x06\xdc\xf7\x21\xd0\x16\x58\x5b\x4a\xf5\x05\x71\xd1\x5a\x68\x2b\xf5\x8d\x2c\xe9\xe1\xd1\x53\x85\xd0\x15\xc5\x21\x0a\xc5\x5f\xff\x86\xa1\x19\xfe\x7e\xb9\x70\xde\x83\x19\xbe\x8c\xc0\x1b\x9a\xd0\xeb\x9a\x11\x76\xb7\xbb\x4c\x15\x55\xba\x66\x63\xbf\x0e\xc1\x64\x96\x7d\xc3\xbd\x98\xa1\xfc\xd1\x8f\x5d\x0f\x95\xc2\xdd\xcd\x2c\xa5\x19\x4b\xd7\x46\xc7\x74\x0c\xa8\x0d\xc2\xfb\xf3\xeb\x99\x7a\x68\x98\x8c\x65\x80\xac\x69\x07\x80\xe1\xe1\xb6\xf3\x78\x6b\xfc\x43\x8e\x8f\xab\xf6\x7b\xd0\x79\xa9\x2a\xf6\xeb\x03\x82\x3d\xb8\x21\x54\xa7\xcd\xdd\xaa\xe9\xc6\xf1\x92\x39\xa0\xfb\x8b\xd2\x47\x69\xf2\x8e\xce\x35\x29\x6e\x32\xae\xc0\xf1\x49\x00\x53\x1f\x3e\xae\xc3\x2d\xed\x02\x65\xc2\x5f\xff\x8a\x53\x5a\x5a\x9e\x06\xcf\x2d\x3f\xb2\xbe\x77\xaf\xbe\x0f\x09\x70\x87\x45\x48\x4d\x72\x9f\x20\xee\xef\x46\xef\xf8\xa8\x0d\x7f\x0a\xff\x18\x67\xe9\x4d\x76\xb4\x6f\x4b\x0e\x19\xbb\xf5\x88\x84\xad\x6e\x80\x59\x4b\x23\xe5\x62\xa8\x14\xef\xae\x61\xdf\x1f\x8e\x5a\xd6\x31\x46\xaa\x72\x6d\x3c\x52\x8e\xf9\x23\xe2\x40\x22\x50\xad\x61\xb5\x8f\xed\x6a\x72\xc4\x74\xff\x73\x37\x37\x9d\x3c\x63\x4a\x6c\x45\xbc\xc5\x23\x48\xbe\xf6\xeb\x4c\xfd\xd7\x65\xaf\xf9\xf6\x46\xe7\x6f\xbe\x97\x6b\x0c\x34\xd2\xd2\x51\xed\x7a\x7e\x6f\x7c\x21\x82\xee\x8e\x02\xc7\xba\x72\x8f\x5a\x7e\x38\x65\x67\x85\x1f\x3c\x50\x09\x6e\xe3\x45\x7c\x6f\x8d\x66\x06\xd4\xda\x47\x57\x7a\xaf\xf4\xda\xc8\xec\xa8\x0f\xf8\x98\x7d\x02\xf2\xe1\x3f\x1f\x45\x74\xbc\x8c\x03\xb9\x61\x97\x3b\x6a\x35\x4f\xea\x53\x93\xbf\xe4\xf3\x51\x03\xa6\x4a\xd8\xbf\xb5\x6b\x1a\x73\xdd\x7b\xb9\xd8\x33\x4e\xf5\x05\x15\x0d\x84\x32\x93\x26\x2d\x33\x69\xc6\x1b\x11\x11\x5f\x1f\xff\xdf\xfd\x55\x5f\xb4\x28\x54\xee\x3f\x35\x56\xb9\x59\x31\xfb\x8c\xfc\x98\x7e\xca\xda\x0d\xe9\xff\xf0\x5d\xb0\x3e\xf3\x5a\xd7\xfe\x16\x95\x6b\x63\x28\x8d\x41\x17\xfb\x4a\x00\x57\xb3\xef\xf8\xd3\x0b\x96\xeb\x98\x27\xe9\x97\x6e\xe6\xa9\x35\x5b\x24\x21\x99\x29\xd6\xdd\x70\xcb\x56\xc2\x2e\xf0\x3f\x94\xdf\xc2\x0d\x37\xfe\xc4\x7c\xdc\xc8\x76\xfd\x99\xac\x1a\xc3\x13\x9f\xd7\x10\x35\x35\x14\x9b\xf4\x1c\x0d\xe6\xdf\x9d\x74\x26\xe5\xec\xf9\xe1\x67\xfb\x21\x19\x51\xe5\xec\xf9\xe6\x74\xa6\x07\xfa\x46\xf8\x37\x0e\xd5\x46\xf6\xfd\x58\xae\x26\x87\xcc\xd4\xba\xe9\x31\x2c\xaa\x36\xaf\x06\x78\x1a\x33\x27\x11\xd9\xd5\xdd\x49\xf9\xb7\x67\x5c\xa5\xbe\x34\xd7\x28\xd6\x40\xe9\x86\xf3\xed\xb1\xa7\x13\xc6\x74\x4a\x73\x3e\xd5\x65\x8f\x01\x7e\x30\x1c\xfd\x97\xd1\x4e\x16\xe6\xab\xcb\xd7\x3b\x88\x33\xd7\x68\x27\xd8\xb9\x5f\xe4\x42\xac\x17\x7a\x9d\x1b\x6a\xec\x15\x47\xff\xd8\x66\xca\x0f\x10\xff\x43\x18\x8c\x69\x67\x76\x87\x09\x07\xce\x7d\x0c\x68\xe7\x8e\x00\x6f\xd8\xbf\x58\x99\x22\x82\x3e\x84\x5c\x9e\xcb\x88\x33\xc9\x52\x71\xca\x69\x58\x90\x88\xdc\x28\xa9\x4b\xc3\xfe\xef\x7a\x14\xad\x0d\xb5\x45\x94\x43\x06\x9c\xdb\xc1\x4d\x7b\x25\x1f\xbc\x46\x30\x1b\x90\x1f\xfe\xe9\x4b\x5c\x8a\x3c\x96\x71\xc4\x79\xdc\x84\x7d\x56\x0d\x96\x2f\xd6\x80\xe1\xdb\x42\xed\xc8\xe5\x66\xea\x9f\x79\x39\x8a\x6a\x1c\x48\x4c\xf2\x30\x13\xc3\x71\xe4\x3b\xb5\x01\xdf\x46\x73\x8e\x1a\x26\xd2\x21\x25\x1a\xde\x0d\x6f\x5a\xfb\x13\xc7\x2a\xf7\xf5\xde\xec\xb5\xb2\x78\xb7\x19\x6c\xb0\xdc\x4d\xc5\x48\x5a\xef\xca\x7f\xb1\x9c\x87\xf9\xe2\x16\xb3\x70\x33\x39\x6a\x86\xe8\x86\x83\xe7\xa9\xd6\xcc\x88\xcc\x19\xfd\xf4\xb3\xdf\x65\xce\xf5\x54\x1f\xd9\xbb\x46\xa1\x26\x69\x64\x88\x48\xae\xd9\xb7\xfb\xa9\x85\xe1\xae\x62\xc2\x61\x79\x76\x79\xc5\x68\xa1\xd9\x2e\x44\x47\x53\xbf\x66\xd0\x54\xca\xbf\x8b\x9b\xec\x05\x91\xf6\xce\x07\x6e\x2b\x18\x8d\x66\xb6\x08\xa4\x08\x05\x9b\xea\x76\x9a\xf0\xac\x20\x7b\xcc\xf3\x73\x47\xd6\x50\xc4\x14\x33\x02\xc6\x09\x5b\xe5\xa6\x27\x6b\xe0\xb4\x74\xe8\x47\x85\xcf\x7f\xb8\x8e\xae\x1d\x92\x38\x1f\x77\x18\x65\xde\x23\x02\xee\x45\x46\x6d\xad\x97\x4e\xd4\x81\x19\x23\x79\xff\xe3\x0d\x9c\xbf\x7c\xd9\x0f\xe6\xb4\xb0\x63\x63\x90\x84\x8c\x7b\xc2\x9b\x9c\x3d\x63\x1c\x97\x6a\xb6\x83\xbc\x3f\x7c\x05\xac\xb3\x4a\xce\x30\x94\xcb\x45\x2a\x13\x84\xbc\x81\xe3\xb5\x8f\xae\xed\x1a\xb3\xc0\x8e\x4d\x26\xf6\x45\xb2\x3f\xb5\xef\x6c\xa6\x6c\x9b\x32\x8b\xbf\xe4\xe3\x6d\x96\x9c\x13\x65\x25\xf0\x3e\xff\xf9\x34\x50\xf3\xd3\x57\xdc\x60\x38\x36\x31\xda\x6e\xe6\x38\x4c\x1a\x1d\xa5\xc7\x34\xf3\x81\x1d\x4e\x5f\x4f\x08\x51\xc4\x7f\x5c\xc8\xcc\x1e\x70\x4d\xb1\x34\xcd\xcc\x19\xb0\x54\xdf\x71\x5b\x98\x06\xc7\x86\xfb\xee\x74\x57\x24\xb0\xf5\x4f\x0c\xd2\x24\x0d\x15\xb0\x6c\xb4\xc2\xd1\x00\xe7\x21\x66\xaa\x1b\x37\xc3\x9c\xce\xfa\xa5\xaf\xd4\x4a\xe6\x32\x7f\xe2\x0a\x66\xf7\xd5\xaf\xca\x28\x25\x2f\x37\xe8\xc0\x76\x1e\x15\xa1\xc2\x23\x5e\xff\x7a\x16\xeb\x42\x02\x4b\xc6\x7b\x3f\x7c\x12\x36\x09\x0e\x3c\x3d\xeb\x8a\x7d\x14\xa5\x56\xc7\x82\x97\xcb\xd8\x2b\xc6\x61\xff\xfa\xdd\x1f\x6c\x71\x07\x41\x37\xa3\x23\x06\xa8\x16\x1f\x7e\x95\xc0\xe1\xdb\x42\x71\x3a\x47\xfc\xc7\xbb\xd6\x7a\xfc\xd7\xd1\x04\xe7\x6a\x66\xca\x2c\x1f\xc2\x61\xa4\x1e\x02\x06\xcc\x16\xb0\x52\xe1\xfe\xed\x9f\x33\x3b\x1f\xdc\x32\x66\x5e\x50\x93\x22\xab\x24\xf8\x53\xcc\xbd\xd0\xf4\x5e\x2b\x34\xea\x5f\x5c\xd7\x60\x39\xd7\xf8\xc8\x81\xd3\x3d\x3a\x77\x6a\x4e\x8b\x0a\xb7\x06\xe9\x22\xb3\xa2\x21\x27\xec\x3f\xf2\x22\x28\x4e\xb9\x32\x8d\x27\xf1\xe2\xd4\xc2\x19\x4a\x30\xd7\xa9\x05\x5a\xf2\xaf\x87\x76\xd4\x34\xa0\x3e\x3f\xbc\x72\x43\xb1\x44\x4e\x58\xc0\x6d\x65\xa0\x78\xef\x8f\x6e\x7a\x17\x6d\x09\x7f\xc5\xc4\x0d\xa9\x4c\x38\x4c\xc5\x36\xa3\xee\x1f\x93\xd5\x18\xf6\x5c\x03\x35\xf9\x89\x05\xd4\x5b\xf4\xfc\xc5\x51\xf8\xbc\x0b\x49\x39\xcd\xe3\x4c\x63\xc4\xc4\xfc\x5f\x4c\xf7\x5d\xb5\x5b\x12\x42\x06\xec\x5c\x35\xf9\x02\x18\x26\x75\x4c\x83\x69\xca\x5d\x19\x01\x13\x29\x3f\xbe\x1f\xce\x5d\x91\xea\xd2\x32\x93\x75\xab\xd6\xf7\x5e\xe9\x32\x29\x18\x86\x8e\x60\x0e\xbb\xd9\xe3\x02\xdf\xfd\xe3\xc1\x2c\xda\xcc\xa4\x82\x01\x17\x19\x3e\x5d\xc1\x06\xf2\x73\x33\xeb\x12\xda\xe9\x94\x88\x97\x88\xdd\x11\xda\xe2\x4f\x6e\x87\xb7\x3a\x7e\x70\x04\xdb\x43\xcc\x26\xca\xfd\xcc\x50\x34\x83\xe7\x9c\xc8\x38\x39\xe5\x2f\x0e\xca\x39\x06\xa2\x58\x22\xe3\x93\x37\x04\x3d\x7e\xf9\x4a\x0e\x23\x3b\x23\x73\x74\x3f\x71\x94\xe5\x19\xe6\xa6\xcc\x9c\xa2\xc6\x05\x74\xf9\x08\x78\xbb\x1c\xff\x74\x02\xf2\xff\x9e\xbf\xf2\x61\x46\xc0\x85\x21\x0c\xc3\x31\x5a\x13\x1b\x3b\x15\x84\x65\x3a\xa8\xc0\x2d\xc3\xf9\xe1\x2f\xe0\x18\xf4\x97\x6f\x31\x04\x8b\xcb\x02\x9e\x8a\x77\xcb\x98\xe4\xe6\xd2\x77\xee\xef\xcf\xbf\x9f\x1f\xfc\xf7\xdd\x94\xbd\x66\x4e\x5f\x90\x51\x09\x7a\xcf\x4f\x72\xf3\xb5\x1c\x90\xde\x58\x70\x92\xfe\x4d\xae\x32\xb9\xd1\x60\x01\x2b\x42\xa4\xcf\x87\x29\x6e\x99\x0d\x19\xb0\xcb\xbf\xe7\x77\xc6\x69\xe4\x41\x8b\xd9\x18\xcc\x63\x6a\x93\xa2\x7b\xc4\x85\x02\x26\x1e\x34\x55\x35\x88\x30\x61\xff\xc9\xb6\x3e\xa8\xbd\x42\x7d\xf2\xf7\x29\x20\xf8\x4d\x48\x05\xa4\x88\x7c\xad\x7c\x2e\x5b\x18\xc8\xe5\xff\xe9\x4a\x88\x65\x41\x7d\x52\x24\x0a\x16\xf3\x6f\x99\x6e\x81\xa7\x74\xf2\x17\xf7\x6c\x96\x6a\xe4\xb6\x90\x7a\xfe\xef\xdc\x4e\xc1\xb1\x5f\x73\x58\xb6\x51\x41\x6c\x4d\xf1\x81\xca\x46\xb4\xc6\x15\xfe\xff\x58\x5a\xfc\x82\xe4\x5d\xf2\x4b\x49\xdb\xe2\xb4\xc3\x43\x09\x43\x07\xf9\x32\x51\x61\x8c\x8c\x45\xe6\x7d\xa3\xfc\xdc\x4b\x62\x4f\xe1\xf4\x3f\x8f\x42\xf5\x5e\x3d\x41\x3b\x85\x8f\x3b\x14\xda\x48\xa1\xb9\x1d\xe7\xaa\x8b\x8f\x8e\x05\xc9\x0f\xf2\x73\xd5\x9a\x30\xfd\x9c\x47\x54\x67\x41\x59\x9e\x3c\x93\x43\xea\xf4\xc8\xb2\xa5\x89\xf8\x39\xb8\xc6\x88\x78\x6f\x1f\xba\x2d\xab\x85\x2c\x08\x43\xd3\xbe\xb2\x92\x17\x97\xa7\xe3\x32\x7e\x85\x08\xa9\x2f\x0a\x42\xa7\xa4\x3f\xf2\x32\xc2\x9e\xd6\x30\x26\x5e\x54\x1e\x2d\xd0\xd3\x97\xbe\x20\x93\xbf\xcb\x81\xd2\x44\x27\x32\xc0\xb5\x5b\xb5\x24\x2b\xde\x76\x57\xc9\x10\x2e\x5f\xb9\xe6\x53\x33\x8c\xc2\x14\x3f\x2f\xf1\xb9\x4a\xed\xa7\x7f\x31\xc7\xa0\x2c\xfb\x2a\x01\x88\x91\x7d\x18\x86\x47\x34\x65\x88\xd9\x61\x97\x67\xda\xaa\x6a\x7d\x12\xc4\x0a\x07\x47\xf2\x3f\xf1\x00\x35\x91\x71\x02\x86\xc7\xe2\x89\xd1\x39\xfc\x3f\x2e\x4b\x54\x73\xbd\x2c\x59\x99\x9a\xff\xec\x6c\xd2\xa6\x2b\x31\xaf\x29\x92\x4e\x0d\x25\x39\x49\x61\x5b\xe9\x66\xf0\x0c\x66\x85\x4d\x32\x8b\x26\x56\xdc\x18\xde\x82\x19\xa0\xce\x4e\x82\x51\x03\x46\x41\x80\xdf\x09\x3f\x39\x6c\xa3\x35\xad\x1e\x02\x3e\x73\xe4\xa7\xd9\xd7\x5c\xce\x18\xb4\x86\x6c\x3b\xb2\xf5\x29\xa6\xb6\x57\x25\x32\x28\x1e\x4a\x6e\xb1\x64\x17\xfb\xe3\x77\x29\xde\xa7\x1e\x76\x92\xa5\xe5\x69\xeb\x68\xae\xf9\x9f\x33\xba\xb0\xda\x82\x86\xe5\x72\xe1\x89\xa1\x9f\x74\xc2\x0e\x9b\x9b\x84\xf5\x70\x46\x6c\x53\x88\x86\xba\xd9\xc9\x75\x63\xf5\x74\x75\xb2\xf2\xc6\x21\xec\x25\x45\x01\x47\x6d\x7c\xb0\xed\x01\x9d\xde\xcd\x8f\x06\x1b\x7c\xfd\xf8\x8a\xbb\x1f\x5f\xd6\x72\x9a\xb7\xc2\x40\xdc\x28\xd3\x93\x47\x95\xbe\xf3\xc3\x50\xac\xa6\x60\x1d\xf1\xb8\xd4\x74\xf5\x6e\x34\x3f\xf8\x6f\x1f\x3f\x54\x79\x6c\x1f\x81\x94\x19\xc2\xbe\x61\x6b\x86\x49\x37\xdb\xfc\x2c\xff\xc4\x46\xc2\xe1\x79\x87\x41\x85\xc3\xff\x3b\x67\xf2\xe8\xbc\x0b\xfb\xd5\xbb\x92\xcd\x1e\x17\xab\x63\x02\x15\x30\x52\x15\x95\x5f\xa1\x75\xae\x0a\xd2\x24\x34\x7d\x3b\x97\xb2\xce\xa6\xba\x1f\x20\x30\x51\xa4\x45\x44\x30\x37\x88\xe4\x44\xa6\x4b\x9c\xad\xf9\xbe\x94\xcc\x0c\xf4\xe3\x4f\xb3\x4f\x6f\x10\x4e\x3e\xf2\xad\x2a\xf4\x6b\xc5\x80\xdb\x78\xce\x59\x47\x05\x8f\xe9\x07\xb4\x75\xef\x3b\xa0\x70\x2f\x52\xbd\x0c\x26\x32\xc8\x08\x86\x84\xcd\x04\xc5\xe9\x4e\xb2\xad\x14\x11\x6e\x34\x72\x09\x37\xed\x3f\x99\x67\x2c\xe3\x7c\x01\xe7\x26\x77\x73\xf8\xb2\x12\xdc\x90\x93\x43\xd8\x08\x82\xdc\x91\x90\x4a\x2e\x60\xdf\x95\x24\xd6\xb5\xc4\x5e\x9a\x3a\x18\xde\xa2\x06\x71\xc9\x99\x88\x94\x14\x14\x3e\xba\xc5\xbd\xf2\xa2\xe3\x0d\xed\x3f\x9d\xc1\xd3\x33\xf5\x91\x31\x52\xf4\xc2\x72\xcb\xd6\x79\xf0\xd2\x2d\x37\x61\x94\xc1\x61\xc2\xfa\x24\xb0\xac\xb4\x92\x7e\x87\x7a\x4b\x23\xe7\xfa\x2c\xcd\x21\xce\x67\xa7\x59\x73\xf7\xe5\xee\xc4\x49\xd8\xff\xb7\x2f\x0e\xb9\xb2\x36\xdf\x94\xef\x1e\x62\x51\x80\xfa\xa0\x4c\xd4\x77\xa4\xaa\x4b\xa8\xf0\x0d\x16\xe2\xe4\x18\x5c\x92\xba\x16\xe3\xa6\x26\xba\xae\xec\x21\x1c\x86\xc6\x99\xd7\x3e\xa7\xe6\x8f\x21\x68\x4d\xab\x72\xff\xf4\x81\xbc\xb0\x5f\xed\xae\x4c\x71\x2d\xc3\x28\x84\x7f\x59\x78\xee\x75\x33\xd9\xed\x4d\x13\x9c\xe2\x2e\xf1\xba\x4a\x72\xfb\xbc\x07\x43\x38\x91\x52\xb1\x47\x90\xb4\xe1\xbb\x37\x1c\x1f\x42\x2f\x71\x81\xc1\x21\x39\xa0\x57\xe0\x1b\x30\x81\xf2\xb3\x64\x5c\xe0\xed\xb4\xeb\x39\x71\x4f\xc7\xfd\x81\xab\x8c\xfb\xea\xc5\x18\x0e\x89\x45\xda\x10\x6d\xff\xbe\x40\xc1\xa1\xc4\xe2\x73\xb9\x6f\xbf\x5c\xf2\x46\xc9\xaf\x35\xaf\xeb\x2c\x69\xdc\xd7\x32\xad\x90\xbb\x2b\xdb\x9e\x4b\x55\xb3\x1b\x28\x10\x96\x9f\x7b\xc7\x9c\xd3\x20\x4f\xb4\xc5\x74\x32\x1b\xf1\xf9\xc0\x42\xca\x69\x9d\x66\x7c\xeb\x09\x4b\x25\xd1\x06\xcb\xa7\x90\x9a\x79\x08\x36\x13\xcc\x47\xe2\x5c\xb7\x86\xbf\x3d\xfa\x52\xdb\x4f\xda\x76\xef\xb2\x4e\xda\x46\x15\x06\xae\x13\x4a\xaf\xd0\xbc\xfe\xc3\xcb\xc8\x14\xa9\x1c\x75\xbc\xf7\x3a\xc9\xb7\x1e\x7a\xce\x5b\xce\x6d\x59\xc9\x1d\x20\xb6\x86\x32\x78\x8e\x6c\x92\x84\xfb\xa5\x27\x5c\x4f\xf6\xcf\x17\xcd\x48\xb4\xaa\x61\xe9\x33\x8d\x97\x13\x59\x55\xf9\x2a\x46\xf8\x4d\x9c\x19\xa7\xc4\x56\x7e\x0a\xa7\xfb\xc9\x79\xf5\x8c\xd5\xf1\xde\x93\x30\x05\x0d\xd3\xf6\x40\x95\xcb\x71\x90\x3b\xb0\x94\xf2\x72\x25\x3c\x37\xe5\x38\x65\xb2\x6b\x35\x2e\x3b\x48\x1d\x79\x36\x22\x33\xf7\x88\x92\x6e\x04\x57\x66\x8f\x41\x09\x25\xa7\xa4\x73\x36\x2a\x2b\xee\xe7\xc7\xd6\xf3\xfb\x47\xde\x7f\x19\x8a\x76\xf2\x07\xcd\x1e\x37\x85\xc1\xa4\xf9\x34\x3f\xca\x6b\x51\xf7\xf7\x43\x15\x7e\x1a\x03\x37\x72\x5e\x68\xae\x86\x08\x66\xe4\xa7\x21\xe5\x96\x0e\x56\x66\x23\x04\xd2\xf2\x13\x93\x59\x1e\x37\xc5\x74\x22\xa8\xb5\x9e\x95\x1e\x1b\x73\x44\x4c\x50\x77\xbd\x20\xe5\x98\x8e\x79\x05\xe6\x63\x3c\x06\xf7\x98\xc8\xfc\xe5\xcb\xca\xe4\x85\xaa\x59\xe6\x11\x7b\x7b\x53\x30\x4a\xbf\xc6\x64\xdb\xf2\x3d\x9c\x6e\xde\x95\xf5\xfa\x08\x3f\xb1\xab\xd1\xb0\x97\x93\x44\xf7\xde\x94\x37\x19\xc0\x70\x54\x80\xb3\xac\xf6\x25\xef\x44\x15\x5e\x4f\x71\x83\x19\xe0\x99\xe3\x43\x9b\xe9\x77\x55\xbf\x48\xf4\x64\x7d\x1d\xe0\x28\xde\x7e\x94\x19\x48\xf5\xca\x6d\x84\x40\xfc\xdf\xe7\x14\x0a\xc6\xd0\x93\xca\xf1\xe5\x08\x42\x46\xb0\xda\x53\xd8\x5f\xcf\x0f\x76\x59\x6a\x6b\x2d\x34\xb8\x10\x50\x29\xdf\x84\xee\x12\xea\x08\x46\x2b\x1a\x11\xeb\xa2\x35\xc0\xf1\x7e\x3f\x82\xdd\xf9\xf7\xb8\x60\x07\x25\x6b\x57\xab\x31\xf1\x8f\x4e\x8f\x59\xf6\x50\xe4\x84\xc0\x11\xea\xd7\x47\xc6\x0b\x5c\x36\x94\x4b\xe4\x84\x90\x28\xf5\x2c\x3b\x12\xcd\x32\xcd\x63\x40\xfd\xba\xca\x42\x1b\x7c\x03\xd9\x04\xe0\xce\xe4\x6b\x60\xd2\x42\x64\xd3\x0b\x12\xf1\xd3\x73\x8d\x66\xc0\xfc\xcb\x87\x23\x3d\x03\x7c\xe8\x78\x43\x13\xff\x3b\xfb\x4d\x03\xed\x8d\x69\x2f\xc6\xb7\x53\xba\x55\x11\xb4\x6e\xb3\xc0\xa5\x4c\x74\x3e\x79\xac\x29\xb7\x06\x2a\xfe\x50\xbf\xce\x58\x6c\x81\x46\xc8\xba\x03\xb7\xa6\x19\xf5\x1f\x6e\xf6\x6c\xbd\x29\xb6\x52\xf2\xd3\x17\x88\x61\x93\xd8\xc2\x3b\xda\xd2\xe6\x8d\xbf\x51\x0f\x8c\x8d\xd7\x78\xa1\xd7\x78\xd2\x2f\x85\xd1\x28\xa8\x15\x7c\xb9\x85\x61\x0e\xc0\x5d\xba\xdf\x31\xca\x66\x71\xfa\xe6\x8c\xf1\xfc\xd4\x30\xf6\x8a\xf5\x4e\x1d\xa3\xbc\xb6\x55\xf4\xe9\x2b\x12\x46\xd8\x1e\xe1\xba\x69\x3b\x7d\x3c\x3e\x44\x58\x6d\x4e\xd3\xb1\xf1\x65\xaf\x4c\x0e\xd6\xa9\xce\xbe\xec\xb1\xd3\x13\x55\x0f\x94\x9b\x21\xd1\x69\xe4\xc7\x2f\x9b\x26\xc6\x2c\xb3\x6f\x9c\xe2\x70\xff\xa3\xdb\x85\x46\xc0\x8b\xd3\x85\x2c\x65\x81\x91\xc1\x0d\x00\x3e\x34\x2d\xf0\xa8\x4b\x11\xaa\x65\xf6\xd6\x16\xda\xf8\xcc\x84\x39\xd9\x34\xfd\xf8\x3d\x0c\x97\xa5\xac\xa7\x99\x72\x5b\x38\x74\xea\xcd\x47\xa9\x5c\xe9\x2e\x76\x9c\xf9\x77\x2e\xd9\xa2\x9e\x91\x00\x10\x02\xef\x8c\x85\xf0\x70\xbe\x32\x02\xd8\x9f\x92\xdf\x4a\x18\x2d\xd7\x6a\xda\x46\x54\x16\x88\x29\x22\x50\x81\x81\xf9\x6c\xdc\x46\x0b\x49\xa7\xa0\x28\xf6\x07\xce\x48\xb4\x48\xd2\x4f\x7b\xc5\x42\xad\xde\x9c\x9c\x5a\x41\xa3\xfc\xe7\xbb\xdb\xcf\x1d\xbb\x1e\xae\x8d\x18\x40\x7e\x25\x6c\x72\x48\xd9\xaa\x81\xc4\x5b\xeb\x1a\x5c\x02\xd8\xf4\x23\x2e\x34\xde\xcf\x74\x02\x49\x86\xed\x74\xda\x16\xc7\xa3\xca\xaf\xd2\x2b\x19\x97\x8b\x97\xfb\x67\x9c\x62\x67\x65\x0c\x17\x7c\x37\xcd\xf4\xef\x1e\x61\xcb\x37\x2f\x1d\xc4\xab\xe1\xe7\x1b\x84\x63\xf7\x7e\xd4\x81\x92\xf9\xfe\xab\xac\x57\x2b\x30\xef\x9f\x78\x8a\x69\x58\xe0\xaa\x5d\x08\x2f\x98\xc9\xd9\x8b\xf2\x73\xc4\x2b\x38\x5d\x63\x09\x1f\x0e\x57\xde\xa8\x51\x17\x70\xec\xde\xc0\x00\x59\x92\x9b\xe2\x9f\x57\x06\xc5\xf1\x58\x54\xc1\x21\xd5\xa7\xff\x8b\x4d\x01\x9e\xa1\x3b\x10\x59\x24\xe7\xf5\xdd\xd6\xf1\xfc\xee\xdc\x31\x05\xac\xd8\x66\xf3\x3a\x2f\xe1\x29\x8f\x66\xe4\xba\xdd\xf4\x00\x53\x6f\x1b\x35\xd5\x27\x34\x7f\xca\x96\x27\xfc\xc1\x0a\xf1\x9e\x1a\x91\x9d\xa3\xbc\x54\xe9\x3d\x9b\x32\xc3\x30\xd8\xb4\xef\x3b\xd9\x44\x38\x9c\xd9\xba\x06\x3b\xbf\x8e\x51\x6c\x34\x9f\x37\x10\xb5\x34\x75\xf5\x8f\xca\xed\xf2\x65\x23\x7f\xe2\x5c\xbe\x6f\x90\x9d\xa7\x90\x0a\x46\x8a\x80\x48\xd6\x7d\xd3\xa8\x8b\x18\xd7\x75\xf1\x10\x06\x39\x96\xb6\x79\xf5\xa2\x73\xaf\x6b\xe3\xe4\x30\x53\x5a\xdc\xf0\xfe\x3b\x13\xbc\xbe\xe1\x72\x8f\xe1\x0d\x7a\x40\x1f\x75\xf5\x5e\x7d\x48\x23\xb6\x00\x71\x17\x22\x1d\xed\x0b\x83\x09\xb3\x91\xfc\x28\x46\xed\xbd\x5e\x73\x77\xc0\x27\x8e\x5c\xb2\x76\x7d\x8c\x6a\x58\xbc\xbf\x9c\xf4\x20\x37\xf6\xd2\xaf\x31\xe8\xfd\x80\xe5\x4f\x85\x6e\x16\x79\x6e\x8b\x46\x09\xb5\x35\x8d\xa0\x01\x67\x9a\x39\xba\xc9\x36\x72\x05\xd0\x38\xce\xbf\x38\x5a\xd6\x71\x33\xd4\x47\x11\x9a\x8c\x8e\x50\xe6\x72\x3b\xcc\x95\x13\xd5\x7e\x70\xfd\x38\x8e\xc7\xed\x22\xe4\x0b\xf8\xde\x9c\x88\x4b\x01\x76\xa7\x07\xbb\xb7\xbf\x79\xf4\x05\x9f\x92\xdf\x2c\xf8\x13\xe8\x5e\x35\xb1\x76\x01\xe3\x1b\x2d\x54\x2b\xaf\xe9\x6f\x74\x8a\x2f\xec\xa0\xfa\x01\x82\x68\xde\x3c\x83\xe9\x5c\x97\xdb\xc4\x0e\x17\x7f\x5f\x53\xcf\x32\x3f\xb1\x1c\xfb\x38\xf9\xc8\x55\xad\x09\xfb\xb1\x78\xf8\x79\x2e\x26\xff\xe5\x97\x84\xcb\x7b\x45\xd0\x8e\x32\x74\x97\xac\xf1\x76\xb6\xb1\x79\xcf\x5d\x60\xf9\x7f\x72\x43\x2d\xdf\x03\x52\xa4\x77\x3f\xd6\x23\x5f\x0a\xff\x23\xcb\xf8\x2f\xe7\xc4\xc5\x51\x3f\x8e\xe0\xae\x9a\x17\x70\x69\x97\x3d\xd1\xf1\xcd\xe3\x7b\xec\xe6\xbf\x87\xe6\xfc\xc1\x3f\x79\xbc\x27\xe0\x68\xa5\xa3\xec\x8d\x92\x85\xf5\x48\x06\x8f\x3d\x0d\x34\x5f\x5d\x75\x20\xb3\xf9\x00\x11\x84\xba\x9f\x72\x3b\x8a\x98\x9d\xae\xed\x68\x40\xbe\x91\xee\xfa\xfe\xa3\x37\xd8\x23\xdb\x2c\xb6\xac\xfc\x9b\x23\x9f\x04\x89\xfd\xe6\x04\x97\x66\xf0\x62\x05\x9f\xd2\x17\x4f\xec\x91\x21\xfe\xc7\x27\xbb\xae\x94\x89\xd5\xa6\xf7\x62\x3f\xd7\x04\x8b\x43\x6f\x8f\x15\x8c\x5d\x96\xf3\xb9\x91\xbf\xff\xe2\x0b\x53\x40\x3e\x9c\xde\x30\x2d\x65\xb4\xcf\xab\xc2\x1a\x5f\x9e\x07\x7d\x1a\x12\x68\xb6\xa4\xad\xb2\x76\x43\xb6\xff\x7a\x3a\xab\xbc\x3d\x63\x7a\x72\xd3\x77\x95\xaa\x03\x52\x89\x57\xc0\x54\x57\x6d\xb0\xc5\xff\xc4\x55\x2d\xf3\x30\xe7\x2b\x98\xef\x90\x6b\x65\x35\x12\x26\x26\x22\x11\x0f\xaa\x37\xa7\x94\x3f\x7e\x84\x74\x29\x56\xac\xf0\x65\xbe\xd6\x62\x61\x4e\x4f\xc5\x5f\x01\xe3\x96\x99\xbd\x35\x98\xc3\xbf\xff\x3f\x96\x61\xeb\x46\xf5\x84\x23\x21\xd6\x0e\x92\x14\x51\x84\x2f\xe0\xd4\x21\x0d\xfb\x9f\xf5\xce\x48\x8f\x74\x33\x8b\x62\xdf\xc8\xfb\xe0\x35\xca\x15\x8c\x29\x0c\x77\x03\xe4\x9c\xfb\x81\x0c\xec\x1f\x97\xf9\x9b\x90\x95\xff\xa9\xb4\xf2\x6d\x80\x64\xc5\x13\x2c\xc2\xf5\x8f\xa3\x16\xba\xf4\x2b\x9e\xfb\x9f\xb5\x7e\x14\x80\x67\xac\x87\xc9\xdb\x6c\x5d\xf9\xed\x2c\xad\x93\x3f\x52\x71\x88\x44\x25\x20\xfe\x5f\x6c\xf2\xeb\x19\x2d\x50\xc0\xc1\xd0\x63\xe5\xcf\xbc\x3b\x76\xc4\x2f\x8d\xfc\x83\xdf\xf1\x25\xe5\xb0\xe4\x64\xbf\xe2\xbc\x35\x9a\x7e\x47\xda\x5d\xe9\x83\xd9\x7c\xed\x76\x87\x8a\xff\x17\x07\x9b\xcb\x61\xf6\x9d\xbf\x36\xb2\x75\x55\xee\xfd\x86\x42\x78\x5b\xed\x1b\x30\x55\x75\x41\x68\x23\x3b\xdd\x5f\x1e\x45\xbe\x77\x17\xa5\xf7\x4b\x6b\x66\x28\xb2\x5d\xb2\x5d\xaa\x27\xe3\x29\x74\x43\xc0\x09\xe5\xb6\x8b\xff\x89\x3b\xd9\x31\x77\x50\xf9\xd4\x2a\x11\xa2\x2c\x87\x9e\x2c\x19\xae\xbe\x5d\xc0\x15\x64\x5b\xa8\x9c\xed\x40\xff\xd6\x32\xb1\xe2\x8b\xb1\xef\x2e\x52\x79\x9a\x8a\x03\x62\x2d\x68\xc0\xe4\x00\x5d\xc8\xec\x47\xf4\xfe\x07\x07\x50\x9e\x84\x41\x2a\x5f\x35\xa6\x6d\xce\x7b\xad\x9e\xbf\xf6\xfd\x40\xfe\x2e\xf3\x0c\xfb\x0f\x6f\xc2\x67\x92\xd2\xde\x33\x45\x4e\xc4\x4d\x97\x06\xc3\x46\xaa\x3e\x8e\x61\x53\xa7\x98\x47\xae\x5d\x90\x3f\x0f\xd3\x6d\xc3\x5f\x9c\xe5\xb9\x79\x97\x9d\x28\x5a\x2c\xd8\x88\xfe\x1c\xe1\x4c\xcb\x9f\x12\x66\x98\x5d\x76\x75\x3a\x09\x9c\xff\xf4\xbe\xa3\x11\x85\x3f\x2a\x16\xa8\xcc\xfb\x83\x60\xa8\xe1\x7d\xac\xe6\xde\x27\x4b\x30\x43\x51\x0c\xc3\xd5\x31\x04\xf7\x2f\x37\x95\xd1\xa5\x0f\x15\xf9\x34\xc8\x6c\xf2\x4c\xcb\xbf\x7f\x5b\xdb\x2c\x35\xf1\xd1\xe6\x81\x91\x23\xee\x8a\x16\xea\x10\x05\x8f\x19\x7f\xb0\x63\x83\x28\xfc\xae\x72\x35\x2b\xc7\x2a\xe7\xfa\xa0\xed\x60\x13\x85\x53\x98\x68\x19\x2a\xde\xf6\x0f\xf3\xe9\x5c\x75\x5e\x8c\x5d\x67\x33\x7d\x19\xa7\x6f\xc9\xa5\xfd\x0e\xf7\xae\xbb\x7c\xd4\x54\x58\x48\xae\xf1\xbe\x3b\x0d\x4e\x18\x02\x71\x1d\x15\x77\x2a\x0b\xc8\xeb\xff\x60\x71\x4e\xb2\x87\x11\x47\x24\xf5\xb0\x1e\xf0\xc0\x60\xb1\xf9\xb6\x6a\x6a\xab\x1f\xf6\x73\x53\x95\xe2\x7e\x0d\x22\x5b\x3e\x7d\x51\x1e\xd8\x52\xf0\x43\x6a\x74\xfc\x1a\xa6\x8e\xf7\x47\x07\x46\x8d\xce\xf0\x7d\xe7\x96\xa7\x7c\x4a\xba\x71\xb6\x81\x29\x75\x08\x72\x21\xd2\xf5\x48\x07\xaf\x62\xaa\xc6\x37\x65\x9d\xdd\xad\x7b\x45\x1d\xf6\x05\xb0\x9b\x09\x7f\x78\xff\x38\xb9\xc8\x3f\xa5\xa8\xc9\xc6\x39\x80\x36\xe7\xc3\x34\x64\xb5\x4f\x5b\x70\x9d\xeb\xb1\xe9\x50\x09\x36\x7f\xa1\x38\x18\x79\xde\xdb\x1c\x75\x8a\x83\x61\xb6\x39\xae\xa3\xb3\x53\x8f\xdd\x4c\x30\x3c\xff\xc9\xc1\x25\xa8\x18\xc7\x7f\xb5\xe9\x00\xa7\x9a\xae\xbe\x6b\xb6\xee\x32\xdb\x14\xa6\x8e\xcb\xf4\xf1\xc1\x4e\x2d\x5d\x44\x8c\xfc\x7a\xd8\xa4\x94\x79\x9a\x6d\x2e\x66\x1f\xb4\xad\x8d\x9d\xab\xa2\xa2\x26\xee\xff\xf3\x7b\x60\x5e\x7e\x69\x23\xa0\xcd\xfb\x35\xd1\xa6\x03\xbd\x55\xf8\xfc\xe6\xac\xf3\x47\x24\xe1\x1a\xc8\x7a\xcd\x3c\x0c\x87\xd4\x3e\x8c\xb6\x52\xd1\xa9\x78\x76\x51\x2d\xde\x76\x37\x0e\xaa\x6d\xb8\x0e\x3b\xfe\xe8\x5a\xee\x11\x5a\x0a\x61\xf0\x0a\xe5\xa6\xa2\xb6\xc3\x01\x0e\xc5\x04\x15\xef\x3d\x97\x3f\xee\x90\x28\x8b\x90\xc8\x2d\xb1\xac\xeb\xbc\x1f\x38\x56\x29\x47\x9d\x12\xb1\x3b\x8f\x7b\x46\xb6\xdf\x80\xfb\xa7\xf3\xd9\x46\x71\x5d\x71\x2f\xbf\x07\xac\x89\xfb\x1b\xd3\xf2\xce\xfd\x67\x2f\x9f\x62\x5c\x12\xae\x83\x10\xcd\xd6\x52\x5e\xa7\x19\xcc\xd2\xe6\xc1\x0d\x42\xd5\xcf\x3f\x4c\x5b\xe1\xaf\xbe\xb0\x09\xf8\x62\x5f\x53\xfa\xdf\xde\x25\x4d\xd9\x78\x91\x9e\x16\xfe\x5a\x8d\x6b\x6d\x83\xc0\xbe\x30\x8e\x18\x05\x58\x25\xb9\x2e\xa7\xdd\x2b\x63\xe4\x32\xbb\x58\x4b\xa8\xc0\x70\x18\x0f\x96\x47\x75\x30\x3d\x7d\x29\x68\xf4\x6e\xf9\x94\x6f\x47\x71\xf8\x1f\x8e\x50\x8e\xfd\x8a\x98\xed\x7a\xad\x2d\xba\x11\x7e\xa4\x56\xa4\x6a\x23\x71\x76\x6b\xfe\xd6\x89\xcd\x4b\xc9\x33\x06\x41\x06\x97\x9f\xa4\xe2\x1c\xaf\xf0\x9b\x42\xee\x81\x74\x5a\xfa\x0e\x86\xc7\x28\xcd\x00\x59\x83\xf2\x54\x6f\xd3\xaa\x6b\xd8\xf6\x7f\xb1\xee\x5a\xbf\x28\x2d\x59\x9f\x86\xe1\x33\x1c\xb4\xd3\x3c\x4f\x03\xed\x13\x60\xa6\x7e\x25\xf3\x35\x4b\x0d\x6e\xfa\xc1\x9d\xe9\x4b\x9e\x8d\xe7\x72\x4f\x2e\xe0\x10\xc0\x73\x60\x5e\x57\xe9\xac\xc7\x14\x3f\x38\x2d\xa3\xdd\x6a\xce\x50\x7a\xde\xb9\xa5\x00\x9b\x47\x11\x76\x5d\xe0\x74\x09\x62\x88\xf5\x9c\xf6\xb6\x8a\x25\x72\x3b\xc2\x11\x77\x57\x1f\xaa\x6e\x0b\xd5\xc7\x59\x66\xc8\x53\xa2\xe6\x93\xdc\x4e\x2b\xa6\x0a\x25\x38\x74\x27\x29\x06\xb7\xd9\x57\xfe\x61\xd3\xdf\x3e\x81\x8e\x06\x48\x85\x7e\x2d\x34\xf0\x12\x51\x46\xff\x9d\x33\x56\x53\x5d\xcd\xd5\x35\x9b\x52\xcc\x06\x6d\x74\x82\xe4\x63\x90\x6a\xf0\xb2\xc0\x35\x47\xa6\x3c\x9c\xaa\x77\xf6\xf7\x9e\xf5\x3c\x41\x4c\xb9\x87\x5a\xba\x0d\xcc\x73\x27\x3c\x9b\xd7\x54\x22\x4d\x75\xbf\x73\xfd\x30\xd0\x37\xcf\x9d\x59\xb0\xca\x59\x8a\x6e\x66\x48\x53\xd5\x12\x54\xc9\x73\x5b\x8b\x31\xfa\xa3\x53\x5b\xe4\xd5\x3f\x23\xaa\x3a\x94\x45\x40\x20\x0a\x04\xbc\xa4\xc9\xfe\x89\x94\xaa\x6a\x41\xdf\x2f\x1c\x12\xbe\xb2\xd0\xf6\x0b\x54\x4d\x6f\xd7\x15\x8e\x03\xbd\x93\xe1\x25\x36\xad\x34\x7a\x16\x7a\x6b\x08\xa3\x07\x9f\x77\x65\x73\x24\x6d\xb1\xe7\xdd\xf2\x2f\x89\x09\xd7\xea\xb1\xef\xf3\xf7\xde\x38\xc3\x32\x9f\x36\x50\xda\x9e\xea\x9d\x96\x78\x3a\x28\x6a\x92\x76\x9e\xf0\x17\x80\x90\x8c\x70\xd5\x04\xb6\x7c\xeb\x66\x9c\x05\x2b\xfb\x6f\xb6\xb7\x22\x33\xbb\xac\x24\x40\x92\xe4\xec\xdd\x2b\x79\xc5\xf2\xc2\x56\x0b\x9b\xca\x3d\x86\x71\xc8\x16\xac\xe7\x58\xed\xf3\x27\x91\xc6\x70\x8a\xec\x2a\x35\x52\xa4\xbd\x69\xe4\x2f\x8e\x6a\xb8\xad\x6b\x89\xc9\x5e\x16\x4c\x1e\x02\xc1\xc9\x4b\xf9\xa9\xc9\x9a\x27\x38\x45\xea\xb2\x63\xf8\x18\x12\xc7\xa9\x37\x3e\x76\x9a\x0e\x57\x9f\x4b\xd6\xb4\x69\xdd\xe5\xab\x5c\x1c\x14\x2b\xfe\x0c\xca\x24\x6d\xf9\x6a\xd1\xa2\xb8\xd7\x8a\xc7\x5a\xa4\x1d\x18\x71\xbd\xd1\xda\x6a\xca\xdc\x74\x22\x33\x87\x16\xbd\x76\x52\xe9\xc7\x23\x2d\x47\xe8\x41\xe2\x26\x3d\xee\xd8\x38\x12\xfe\x07\xd5\xdd\xcb\xf2\x1e\xd9\xa2\xb2\xea\x5d\xdc\x7b\x9a\x69\x21\x89\xca\xdd\x8a\x02\xcd\xfe\x5c\xa3\x63\x9e\xfe\x5f\x5d\xc1\x28\x72\x22\xe9\x4c\x6c\xaa\x7b\x8e\x89\x33\x2d\x1b\xcc\xeb\x9f\xd7\xa7\xc4\x41\x0b\x47\x0b\xe4\xce\xbd\xc2\xe1\x78\x0f\x47\x53\x11\xdc\x9f\x7b\x13\x46\xbb\xd6\x94\x6d\x25\xad\x45\xcb\x53\x8e\xb7\x7d\x88\x92\x29\xb2\xb1\x8c\x23\x31\x07\xa5\x92\x73\x7d\x38\x15\x99\xb2\x84\x26\xa7\x4e\xe3\xef\x72\x9c\x13\x6f\x73\x33\xd0\xb3\x39\x1a\xb2\xed\xae\x26\x7f\xd6\xd3\xdc\x9a\x8f\xe4\x20\x23\xf4\x24\x61\x7e\x3b\x65\xd5\x00\x5f\x3a\x23\x24\xf4\x8c\xab\x70\x3d\xc4\x8d\x8b\x55\x53\xf8\x15\x21\x25\xbb\xeb\x62\x56\xfb\xee\xa4\x35\x06\x8f\xb9\xd9\x9f\x5c\xd0\xe2\x77\x7d\xda\x90\xdb\xdc\xa5\x51\x7a\xfe\x3b\xdc\x7f\x27\xeb\x54\xd8\xda\x08\xd3\xd1\x3a\x81\x94\x18\x48\xda\x63\xdd\x11\x85\x30\x7d\x4d\x30\xd8\x25\xfc\x39\x2f\x8b\xbd\x74\x0e\x81\xcc\x7d\xeb\x3b\x46\xf9\x25\x1b\xc9\x2c\x7e\x86\x50\x4f\x91\x32\x48\x79\x3b\xeb\x55\xa3\x31\x3f\x09\xd4\xc1\xdd\xe3\xe5\xea\x1b\x66\x5b\x58\xe8\x7b\x49\x6a\xed\xd9\xb0\x6d\xa9\xc8\xa0\xfe\xb7\xae\x92\x11\xf8\x33\x9b\x3f\x6e\xc6\x57\x9a\x43\x05\x9d\x67\x2a\x2b\x81\x1f\xe5\x6c\xda\xde\xa9\x35\x88\x7c\x81\xd1\xd9\xae\x42\x8c\x46\xf8\x73\x3e\x16\xad\x74\xf6\x6e\xae\x27\xa6\xd3\x69\x67\x1e\x15\x19\x1b\xe3\xd7\x2d\xc9\x02\xea\x34\xff\x89\x97\xb8\x96\x4a\x64\x12\xa0\x51\xa1\xf1\x08\x71\x18\xf8\x14\x78\xa2\xff\x8b\x21\x30\x1c\x73\x6a\xbc\xe0\x7d\x72\x87\x88\xfd\x0e\x1f\x21\xb5\xd9\x58\x11\x0f\x4f\x19\xed\x6d\x8f\x00\x23\xe4\x40\x0f\x22\x47\x66\x48\x5a\x42\x4d\xad\x08\xeb\x89\x9c\xe7\x08\xb7\xa1\x5b\x81\x09\x8f\x59\xb8\x94\x53\xc2\x2e\xc9\x19\x2f\x89\x54\x8e\x4a\x9a\x19\x93\xc0\xb3\x05\x9e\xd9\x42\xc8\xb1\x35\xe7\xaf\x6f\xc2\x28\x1c\xd6\x99\x4a\x2c\x78\xbe\x35\x29\x7c\xf8\x1d\x56\xd3\x5d\x43\x8f\x3e\x48\x08\x88\x1f\xba\xf6\x18\x19\x6b\x28\x41\x5a\x4e\xa0\x72\x6e\xb5\xff\x64\x9b\xe9\x4a\x5f\x18\x59\x6f\xc0\x7e\x01\x7c\xc9\x22\x5c\x71\x50\x89\xb9\x10\xf3\x93\xa6\xba\x94\x69\xf3\xa6\xf4\xbf\xec\x3d\x76\x38\x99\xdd\x31\xcc\xc2\xf0\x78\xf1\xf7\x28\x29\x8d\x4e\x74\x6e\xff\x35\xdc\xb0\x23\xf3\x34\x37\x05\x14\xa1\x77\x3a\x5e\x69\x74\x66\xca\xd7\xb6\x80\xb5\xbc\x46\x0e\x92\x4f\x3b\x5e\x81\xf8\xb9\x6d\x0f\x98\x81\x31\xcb\xed\x14\x36\xb7\x0c\x3e\xe4\x74\xac\x3e\x17\xf5\xb9\xeb\x10\xa0\x3b\xc9\xc8\x92\x5d\xb8\x3b\xf0\x6d\x49\x95\x65\xa7\x99\x96\x9c\xce\x36\x58\x85\x85\x9f\xd3\x7f\x09\x94\x79\x18\xd3\x12\x2e\xf6\xef\x46\xca\xc2\x2d\xd9\xee\x0c\xa5\x53\xfa\x72\xa9\xdf\x6c\xd9\x1e\x72\x2d\x64\x8d\x28\x64\x27\xb0\x99\x7f\x29\xc6\xf6\xdc\x92\xce\x30\xe7\x06\xe3\x6a\xe1\x3e\x5a\xf6\x11\x26\x5a\x8c\x5e\xde\xb4\x13\x84\x75\x0c\xb6\x35\xb2\x78\x28\xf9\xe4\x69\x02\xef\x60\x39\x40\x91\x3b\x94\xf3\x58\xcb\x34\xa0\xad\xac\x37\xca\x26\xb5\x84\x6f\x49\x6e\xec\x59\x20\x16\x7e\x74\x08\x07\xa1\xbe\xd5\x22\x5e\xcc\x0e\xfa\x1c\x72\x33\x9c\x6d\x42\x5e\xd2\xe3\x91\xaa\x2e\x0e\x17\x82\x13\xf3\xe1\x49\xb6\x80\xed\x79\xf9\x78\x5c\xc0\xf7\x05\x09\x79\xb3\xd5\xc1\x64\x38\xb8\x73\xd5\x68\x45\xa2\x82\x6f\x8b\x10\xa0\xb2\xaa\x07\x75\x2c\x5a\xce\xfc\x9c\x4e\x72\x48\x96\xa1\x9c\x05\x9a\x1c\xc3\x74\x79\x4d\xb5\xb5\xd4\x1d\x39\xd3\x07\xf3\x3b\x0c\x67\xac\xff\x95\xab\x8a\x1d\x4c\x4f\x51\x7b\x5d\xb7\xd3\xc1\xe4\x8f\xfd\x79\xf3\x05\x33\xeb\xce\x32\x0b\x3a\x23\xe7\xea\xbd\xaa\x9c\xfc\xca\x02\x0a\xd9\xd7\xcb\xaa\x91\xba\xae\x30\xb0\x87\xc5\xd4\xcf\xa2\xac\xc0\x26\x97\x76\x00\x4d\x58\x9f\x23\x43\xcc\x55\xad\xc1\xd4\xde\x3b\xdf\xd6\xf7\x61\x3d\xc4\x13\x78\x7c\x25\xfe\x35\x67\xf4\xff\xe6\x14\x07\x23\x6d\x9c\x56\xa1\x86\xde\x18\xd0\x0e\xc2\x8b\x68\xc7\xc7\x91\xa8\x5a\xb2\x96\x71\x10\xa2\x6b\x5a\xa9\x61\xa0\x06\x69\xa0\x1f\x32\x5d\xf5\xc7\x61\x45\x20\x44\x83\x04\x07\x2d\xd1\xf2\x2d\xa0\x8e\x06\x66\x83\x55\x34\x17\x26\x3e\x7e\xb6\x84\x9a\xeb\xeb\xd5\x82\x6b\x46\x41\xa8\xf6\x25\x70\x6b\xcf\x79\xea\x95\x44\x50\x63\x5f\xd8\xdf\xad\xbe\x8d\x63\x77\x5c\xa5\x77\xa0\x3a\x1a\x94\xc7\xfd\xf8\x3e\x4f\xca\xe6\x74\x52\xf5\x87\x8a\x1b\xfc\xb3\x1e\xa8\x19\xf8\x68\xd7\x39\x8a\xd3\x11\xb1\x74\xbd\x04\x2e\xb8\xcc\x16\x66\x1b\x1c\xad\x9c\x22\x6e\x39\xb2\x5d\xa5\x1c\xa9\xc7\x70\xd6\x78\xb5\x14\x9b\xcc\x10\xbe\x2a\x8d\x70\x94\xda\x6f\xb4\x3a\x69\x8a\x85\xc5\x76\x88\x33\x25\x92\xba\xcd\xae\x66\xd4\x65\xbe\xe2\x60\x4d\x5d\x74\x7f\x73\xab\x8c\xa3\xdf\x50\xd4\xa1\x8f\xa2\x8e\x72\x5a\x8e\xfd\x11\x50\x90\x94\x40\xfb\x03\x66\xf8\x4e\x4d\xc7\xb6\x22\x1b\x4e\xa8\x84\x1b\x24\xfd\xa1\x86\x43\x8d\xe6\x79\xfd\x1c\xa8\xf9\x46\x38\x73\x89\x77\x35\xc5\xd1\x79\x2f\xdc\xec\xb2\xac\x20\x64\xbc\x53\xe6\xdf\x36\x2a\xf3\x89\x83\xc7\x58\xe1\xbc\xc6\xe7\xf6\x9e\x23\xfe\x9d\x69\x86\x71\x31\x7e\x23\x78\xf6\xf5\x55\x3b\xb5\x54\x61\xf2\xd2\x2d\x86\xce\xd0\x70\x5a\x89\xbb\xf2\x17\xcf\x96\x50\x5d\x4c\x6b\xe9\xef\x5e\x38\x07\xa8\x20\x20\x25\xe7\xb1\x78\x8d\x39\x1d\x22\xd0\x0a\x1f\x27\x8a\x96\x05\xc6\x35\xa1\x3b\xd5\x83\xc9\x52\xa0\xf9\x79\x83\x65\x4a\xe4\x8d\x63\xf8\x2b\x62\x1c\x8f\x01\xff\xd4\x2b\x32\x0c\xa7\x5d\xa3\xca\x68\x8f\xd7\x18\xd7\xe3\x1d\x6d\x3f\x0a\xfe\x5a\xfa\x9a\x81\x97\xed\x71\x56\x80\x98\x45\x60\x39\xee\xe0\x83\xf2\xb2\xb3\x8a\xc4\x00\x99\x54\xf4\x6a\x04\x00\xcf\x19\x02\xd8\xa0\x79\xec\xe6\x37\x58\x38\x95\x80\x8b\x22\xcd\xb0\xeb\xa5\x02\x83\x4b\x9c\x65\x61\xea\xf6\xc7\xc0\x0b\x89\x9e\x34\x62\xaa\xa6\x0c\xbe\x09\x77\x65\xd1\xa4\x55\xad\xb7\x86\xc6\x84\xeb\x68\x55\xfd\x7f\x5c\xbd\xc5\xb2\xec\xca\xce\x35\xfa\x40\x6e\x98\xa9\x69\x66\x66\xf7\x5c\x66\x66\x7c\xfa\x1b\x6b\xed\xfd\x9d\x73\xfe\x1b\xb3\x37\x23\x5c\x95\x95\x29\x69\x8c\xa1\x94\x25\xad\xbb\x7d\xce\x78\xa3\xd6\x00\xcd\x6b\xa2\xc8\xa9\xf8\x7e\xe2\xa1\x3e\x8c\x42\xdb\x3f\x26\x15\x47\xa4\xc0\x4f\x65\x87\xd6\x21\x58\xc7\xa0\x34\xe9\x81\x57\x07\x63\x39\xcc\x45\x70\x0a\x85\xa3\xfe\x7b\x5e\x35\x53\xef\x6b\x58\xab\xdc\x3d\x71\x86\xf2\xb9\x99\xd0\x01\x14\xb1\x04\x6b\x2f\x66\x66\xaa\xf4\x59\x09\xc7\xd2\xeb\x2e\x9f\x94\xf8\xf7\xc7\xb9\x15\x59\xc2\x07\x7a\xd2\x37\x40\x86\x6f\x65\x52\x7f\xf8\x85\xcc\xe0\xf2\x6f\x58\x8f\xe1\xf4\xaa\x3a\x65\xdf\x49\x83\x22\x8e\xc6\x09\x29\xb7\x34\x83\xc8\x1c\x7b\xf9\x4f\xf9\x08\xc3\xf0\x22\x10\x0f\x5a\xf0\x46\xae\x20\xf3\x2a\xef\x35\x99\xed\x82\x0e\x72\x03\xdf\xb5\xe9\xd0\xd0\x7f\x13\x3f\x03\x87\x68\xea\x53\x51\x07\x6c\x1d\x89\xd7\x4c\xe6\x83\x2a\x28\x2e\x37\x17\xfc\x56\x70\x7d\x9e\xd4\x93\x14\x65\xa1\xb7\x86\xde\xec\x2c\x58\x13\x4c\xf5\xca\x21\x7c\xd2\xa0\xe5\xff\x05\x20\x86\x11\x3c\xb6\x11\x9a\xd1\x33\xe3\x96\x72\xda\xd4\x97\x7e\xce\x44\xc4\x67\x59\xa9\xbf\xa8\x4a\x27\x93\x5d\x7a\x63\x98\x7a\x14\x10\xef\xd3\x24\x43\xc3\x76\xcb\x29\x80\xcf\x82\x70\xa6\x94\x72\x70\x0d\x7f\xfa\x14\x7f\x1f\x82\x6f\x3a\x44\x7b\x22\x96\x91\x3d\xb6\xfa\x2f\x46\x2a\x0c\xf6\x87\x0f\xa4\x77\xf4\x58\x48\xa9\x3f\x9c\x71\xc5\x9f\xcf\xdb\xa7\x69\xb6\xe4\x44\x4f\x5c\x9b\xe2\xa1\x88\x93\x84\x0e\x11\x57\x88\x5f\x25\x79\x4e\x34\x91\x4a\x33\x2e\x73\xce\xd8\xab\x4f\x33\xe8\x2f\x6f\xf1\x3e\x97\xea\xf9\x54\x54\xb5\x5c\xff\x8f\x3b\xf3\xad\xa1\x5e\xa6\xa9\x44\xd5\x58\x8e\x66\x1a\x8e\xdd\x9d\xf2\x59\x15\xb1\x33\x32\xde\xaf\x5b\x50\x5d\x4f\x84\xc0\xf6\x6a\x90\xd4\xc3\x90\x04\x8d\xd5\x47\x57\xbf\x1f\x6a\xd4\x60\xcf\xd5\x7b\x39\xfe\x1e\x9c\xe9\xb5\x54\x1a\x8e\x42\x33\x0c\x88\xf8\x52\x87\xd1\xb7\xda\x62\x0e\x45\xb9\xf4\xff\xd8\x32\x63\x8d\xab\x6a\xa4\x8d\xdd\xf6\x66\xb2\x8b\x93\xb9\x8a\x18\x2d\xbb\x7c\xc5\xab\x4c\xc7\x65\x5b\xa8\x25\x19\xd5\xfe\xf8\xf4\x47\x1b\x28\x52\xcc\x60\x4c\x3c\xe8\x7c\xbb\x9f\x7d\x02\x4b\x51\xb7\x3c\x84\xae\x76\x4c\x00\xe1\x3b\x06\x6e\xd9\xde\x7e\xb7\x89\xce\xca\x02\x7d\xc1\x8d\x13\xdb\x9d\xca\x1f\x33\xfa\x1f\x5f\xe5\x5d\xdc\x6c\x0e\x4e\xd0\x09\x69\xad\xc7\xa2\xd2\x5c\xd5\x1c\x7e\xdf\x40\x2c\xa1\xb5\x42\xd1\x39\x08\x5e\x36\x38\x84\xec\x4e\xa4\x40\x77\x3d\xbd\x83\x8e\x46\x5b\x21\x09\x6d\x8f\x2c\xd1\xe6\x3b\x6a\xfd\xe0\xa9\xbf\x68\x92\xa4\xe0\x52\x20\x1c\x2a\xe4\xdb\x73\xc2\xff\xb5\x2f\x53\xa8\xf5\x54\x8f\x36\x36\x51\x71\x2f\x30\xcb\x0d\xce\xf5\xb6\xb2\x74\xbb\x3f\x84\x5f\xf2\xc1\x70\xaf\xd7\x27\xb7\x16\xa8\x4c\x97\x53\x80\x98\xbf\x81\xae\x26\x10\xb2\xa3\x9f\x2f\x95\x27\x2c\x94\xe5\xd7\xef\xd7\x60\x2d\x90\x45\x52\x39\xb4\x84\xce\x0a\xe0\xcf\x1f\x60\xc7\xf0\xff\x68\x0c\x96\x31\xb9\xac\x0b\xbc\x3f\xf1\x8f\x5c\x56\x1b\x78\x09\xd5\xfb\x90\xec\xc9\xb6\x30\x1b\x42\x61\x3e\xc2\xf6\xd3\xa5\x19\x9e\x60\xab\xb7\xf2\xf4\xdd\xd0\x26\xa4\x3b\xaa\xea\x7e\x55\x81\xcd\x27\xdd\x48\x46\xda\x1b\xfd\x72\x13\x5c\x90\xa4\xf8\x15\x2a\x0b\xa2\x59\x1d\xb3\x9f\xca\xac\x40\x35\xf9\x5f\xae\xcf\x31\xe3\x79\x23\x61\x4a\x5b\x1e\xab\x16\x4b\x46\xfb\x2b\xca\xbe\xa4\x69\x98\x83\x91\x1e\xcb\x38\x13\xc0\x03\x54\xa8\x89\xa2\x45\x43\xdb\x17\x0d\x56\x17\x82\xfc\xd1\x4d\x8d\x6a\x6e\x5c\x98\xa8\xef\x00\x71\x79\x94\x5c\xb7\xe2\xc8\x58\x0e\xd5\xe8\xff\xf3\xd9\xaa\x50\x79\x7d\x90\x75\xe1\x3a\x04\xc2\xf1\x3e\x76\x37\xcc\x5e\x89\x24\x67\x48\xa1\xc8\x94\xd9\x0f\x60\x3f\x79\xf3\xc4\x24\xb2\x2a\xe4\xfe\xd7\x0f\x73\xbd\xcb\x13\x5a\xd5\x0a\x86\xd5\x8c\x9d\x0a\xdb\xe1\xfc\x2d\x2c\x5b\x0b\x5e\xaa\x60\xae\xb3\x2c\xec\x9d\x67\xff\x71\x43\xd1\x3c\xfd\x1a\xd3\x14\xc2\x89\xfb\xd6\x3d\xc2\xd5\x83\x9a\x1d\xee\x4d\x5d\xd9\x11\xa0\xb2\xd4\xad\x24\x8f\x95\x28\x5b\x60\x53\xa5\x44\x50\x78\xba\x48\x3f\x88\x63\x1e\x76\x34\x9b\xbe\xc6\x4b\x8c\x81\xea\xeb\xbf\x38\xd7\x0a\xe3\x12\x5a\x1d\x4c\x68\x48\xec\x5f\x2e\x21\x87\x6a\x66\x78\xac\x0a\x8d\xbf\x03\xa1\xc5\x40\x50\xdc\xda\x30\xbb\xc9\xdf\x42\x6d\x29\xa4\x59\xf8\xc9\x8d\xb1\x1c\x06\x98\xd3\x4c\xad\xfa\x03\x5e\x26\x35\xa7\xb2\xc8\x7f\x31\x9b\x61\x7a\x4f\x14\xfb\x3e\x99\xdb\x4d\x6d\xb3\xd8\xda\x0b\x7e\x2d\x9c\x55\xc0\x70\xca\xdc\xcd\xc6\x29\xda\x3a\x54\xfb\x23\x05\xd2\x97\x65\xde\x49\xfd\x72\x9b\x93\x4d\xfb\xce\x5d\xe6\xff\x1c\x89\x65\x62\xa1\x75\x17\x56\xfa\x90\xdc\x55\x8e\x3c\x45\xa3\xae\x99\x8e\x28\xa2\xfd\xb5\x70\x12\x79\x00\x3a\xdb\x21\xa6\x50\x5b\x1c\xf6\xf3\xea\x5e\x6b\x0c\x76\x73\x43\x37\xf6\xf8\xff\xe1\xdd\x0a\xaf\x7d\x48\xe2\x12\x72\x34\xb2\x69\x38\x7e\x9c\x17\x95\x18\xa1\x7a\xab\xef\x64\x5f\xc0\x83\x1e\xae\xf4\xc3\x59\xc9\x6f\x42\x86\x34\xd2\xb7\x26\x55\x5a\x8a\xc0\x10\xce\xf4\x5f\x9d\xf5\x28\x85\xe0\x85\xeb\x8d\x64\xcf\xbc\x34\x33\x08\x50\x6c\x63\xaa\xa3\xa1\x1b\x7b\xed\xda\xa2\xa3\x2d\x21\x21\x87\x73\xb6\x28\x12\x75\x89\xb7\xc0\x14\x8a\xfb\x3f\xba\xa6\xb6\xe7\xbc\xe3\x21\xc9\xb1\xf4\x3f\xfb\x67\xae\x42\x52\x61\x7f\xf8\x76\x0d\x93\x15\x88\x12\xd9\xd2\xc2\xa3\x01\x28\xbe\x8d\x5f\xa6\x37\x88\x4a\x12\xf4\xd9\xfd\x31\x78\xb1\x7e\x50\x43\x7a\x42\x7d\xef\xff\xe1\x80\xfa\x5b\xc9\x5f\xc7\x26\x66\x7b\x14\xaa\x30\x65\x7b\x9e\x56\x36\x9b\x6d\x4b\x4c\x71\x30\xa2\x3c\x0a\xb1\x6f\xdc\x9c\x36\xb9\x96\xb2\xc4\x5e\x50\x27\x06\x4b\xea\x6d\x2a\xfe\xd7\x18\xff\xc1\x27\xbb\x99\xdb\x5d\xcd\x9c\xb6\xff\xa4\xfa\x79\x47\xe7\x79\x04\xe6\xe0\x21\x14\x5a\xcf\xfc\xe8\x8d\xd5\x60\x2f\x67\x11\x93\x4f\x49\xd3\xb4\x7d\x84\x39\xdd\x9a\x96\xa5\x15\xff\xdf\x77\x94\xfe\x4e\x47\x16\xa1\x36\x72\x1e\x5e\xb2\xe0\xa8\x61\xe4\x3c\x9d\xcc\xe5\x6c\x36\x75\x7d\x22\x0e\x59\xc9\x16\xab\x7b\x2f\x7d\xde\xfe\x7d\xb5\xf9\x52\xdf\xfc\x61\x64\xcf\xcc\xd5\xee\x3f\x44\x9a\x65\x94\xd3\x1f\xf0\x95\xc3\x1a\xde\x4f\x7f\x7e\x62\x74\x65\xa3\xf4\xe7\x4d\x13\x04\x1b\x68\x83\xfc\x8b\x63\x77\x3d\xc2\x75\x72\x60\x6f\x78\xa5\x0c\xde\xb9\x57\xb7\x04\x2e\xbb\xf7\xbc\xf9\xbf\x4f\x51\x19\x5f\xdf\xa7\x6c\xa7\x71\xa7\x3c\xc2\x55\x54\x25\x8b\x93\x6c\x97\x38\xc3\xc7\x12\x14\xbf\x48\xfb\x00\x12\x6a\x69\x2a\x56\x5f\x1d\x53\xc0\x56\x04\x9e\xb8\xbb\x7f\xdf\xdd\xfb\xe3\x4b\x6b\xc0\x78\xae\xa5\x2a\x41\xff\x3e\x18\x81\x4c\xcc\xd0\xef\xf0\x08\x47\x2b\xec\xad\x81\x33\x0b\xb5\x34\xd4\x9e\xa7\xcc\x6d\x23\xdb\xeb\xb2\x38\xfe\x66\x27\x89\xf3\x5f\x48\xdc\xfd\xe6\xd5\xc2\x45\x9a\x24\x87\x9f\xaf\xb4\xe3\x32\x99\xcb\x42\x77\x45\xcd\x2f\x67\x92\xb6\x0e\x1e\x69\x86\xb4\xc6\x61\xdf\xfe\x46\x88\xef\x1d\xd8\x9c\x2d\x94\xf4\xbf\xf6\xc1\x39\x1a\xf3\xf1\x38\xc7\xc6\x1a\x3b\xbc\x67\xfe\xec\x66\x2b\x58\x6d\x92\x7e\xce\x3c\x46\x23\x6e\xf1\xe8\x5a\x79\x58\xec\xb7\x56\x2b\x2a\xbf\xff\xe1\x19\x0e\xa3\x84\x38\xbf\x3b\xac\x32\x29\x4e\x98\xb8\xc6\xac\x72\x8a\x9f\xa7\x5d\xd2\xf2\xa2\xc9\x40\x53\x49\x33\xb9\x1b\xfe\xff\x9e\xd1\x07\xbc\x8c\x5e\x5e\x31\xb0\x45\xfd\xdc\x44\xe4\x1a\x8d\x4f\x7b\x6e\xbd\xc2\x7b\xd5\xc6\xe6\xf6\x7b\xfc\x4c\x6e\xcb\x50\xf9\xb1\x37\x81\xff\x38\x34\xab\x29\xcb\x39\x50\x3a\x27\xda\x66\x9a\x0e\x3f\x3f\xd9\xb0\x9a\x49\xeb\xd7\x57\x2f\xf3\xce\x1f\x4e\x1e\xda\x7a\xcd\x39\x46\xeb\x2d\xe0\x7f\x72\x07\xba\xa0\x71\xb8\xc8\x78\x3f\x39\x97\xf9\x43\x54\x1c\x83\x91\x82\x68\x08\x9a\xde\x64\x77\xa7\x69\x4a\xf7\x2d\x3d\xd2\x0a\x7a\x9e\xb8\x89\xfc\x3f\x7e\xab\xaa\x3f\x33\x1b\xa2\x25\x54\x1a\x7e\x6d\x3a\xa8\xdc\xc2\xac\xf3\x1b\x86\x6d\xf0\xdf\x4f\x4c\x83\x28\x18\x1b\x66\xcc\x81\xa8\x59\x18\xb6\x67\xff\x77\x9d\xa7\x60\x56\xf5\x72\x77\xba\x31\x65\x24\x1f\x4b\x4c\x27\xb8\x6e\x28\x61\x16\xdc\x02\x13\xb9\x73\x56\x86\x19\xb5\x81\x61\x3d\xc7\xfc\x87\xda\xf4\xd1\x20\x36\x4c\xa2\x39\xc2\xd6\xe3\x67\xd6\xee\x4c\x33\x18\x5b\x98\xa4\x5d\xf0\x13\x94\x6a\x80\xac\x15\x52\xfb\xa4\x34\x6a\x83\x56\xfe\xef\x21\x8f\x6f\x8f\x70\xb4\xe0\x88\xe5\x64\x95\x37\xb2\x2e\x9a\x61\xed\x38\x98\x66\xb3\x7f\x1b\xaf\xa5\xbc\x2c\x2a\xfb\xa2\xb7\x4a\xed\xb0\x46\x6d\xff\x17\x23\x54\xc6\xf0\xb4\xa4\x1f\x83\x7b\xdc\xe9\x73\x6f\x19\xb3\xb7\x55\xa5\xa7\x40\xf5\x0d\x9c\x9a\x1c\xa3\x56\xbb\x87\x90\xed\x79\xe9\x7f\x74\x36\xdb\xbf\x72\x4e\xa4\x56\xdb\x0f\xef\xdd\x7b\xfe\xea\x18\x89\xb7\xcf\xa1\x33\xeb\x52\xf2\x61\x0b\x03\x95\x09\x46\x6d\xe6\x28\x77\x8a\xfc\x3f\x8e\xea\xf0\x7e\x28\x88\x84\xa9\xa5\xba\x98\xe9\x6e\x26\x3a\x0a\x43\x38\xb6\x77\x47\x3b\xac\x2d\x2e\xfb\x7a\x33\xe1\x61\x82\x20\x31\xff\xef\x73\xae\xea\x05\xbd\x5b\x2f\x8c\xa0\x2d\xb1\xf6\xe3\xb3\xa1\xe2\x73\xac\xeb\x2f\xdc\x9e\x7d\x3e\x09\x04\xc7\x98\x4e\x4c\x28\x8b\xb2\xdb\x9e\x7c\xec\xb3\x51\x03\x04\x28\xdb\xe2\x22\xed\x8d\x5c\x91\xec\x7a\xfd\xcf\xfe\xb2\x6a\x72\xf0\x5b\xe0\x18\x0d\x14\xb5\x59\x71\xfa\x83\x37\x8a\x76\xa9\x62\x36\x7d\x79\x7d\xd1\x00\xc4\xb9\xed\xdf\xb5\xd7\x06\x76\xbc\x7a\x1a\x8e\x24\x9a\x10\xbc\xa3\x69\xe1\xd0\x24\x1d\xa7\xb2\xea\xff\xea\x41\x9c\x1b\xf8\xa4\x8d\x59\x6a\x52\x3f\x49\xc9\xdc\x7b\x21\x15\x3c\xd6\x2e\xf8\xd9\xad\x06\xb6\x60\xe6\x85\x8e\x85\x0e\xd6\x30\xcf\x6c\xcb\x52\x72\x3f\x03\x7d\xa9\x72\x09\x99\xc9\xbd\x86\xf8\x2d\x58\xab\x04\x21\x26\x19\x77\xfb\x7f\x0b\xe4\x1c\xe3\xb7\x12\xe7\x45\xd2\x60\x5d\xaa\xd5\x92\xe1\x28\x89\xbd\xf6\x47\x55\x50\x82\x9b\x57\x67\x56\xf9\x8f\x79\x05\xa6\xae\x45\x65\xa8\x8d\xa6\x87\xa5\x19\x86\xe8\xbd\xac\x4d\xb2\xce\xa4\x05\xc5\xbb\xd3\x01\x8f\xa2\xda\x16\xaf\xd5\x96\x73\xd8\x25\xb9\x12\xac\x70\xb1\xba\x03\xaa\x38\xc6\xe3\x1d\xf7\x76\x84\x55\x66\xc6\x04\xb4\x26\x95\x6d\xfe\xf3\x7b\x14\xde\x80\x8a\xc9\xa5\xaa\x12\x04\x3d\xd4\xfd\x3e\x95\xf8\xd8\xae\x7f\x10\x53\xea\x23\x57\xcd\x4b\xb9\x51\x60\xaa\x11\x35\xd5\xd7\x1a\x07\x59\xe1\xd0\x3b\x5f\xcf\xdf\xbd\x4f\x3f\x36\x2f\x3f\x86\xf9\x78\xe3\xa7\xac\x5e\xf6\x3e\xb7\xe8\x58\xf4\x7a\xf6\xb0\xa4\xcc\x13\x5c\xea\x57\xa7\xd3\x01\x3a\xf0\x03\x48\x83\x69\x77\xda\x75\xdd\x13\xfe\xd8\xe2\x89\xfc\xb8\xe0\x3f\x66\xcc\xbc\xa1\x9b\x05\xfd\x33\x94\xfd\x34\xe3\x9a\xb9\x20\xa7\xac\xf4\x08\x77\xfc\xb2\x4b\x44\x2d\x7c\x21\x81\x6e\xf7\xcd\xf9\xcb\xbb\xef\x5a\xa7\x22\xd5\xa5\xe4\x1d\x89\x24\xc5\x9a\xc7\xd4\x97\x6a\x40\x4c\x91\x09\xa6\x67\xec\xa9\x61\x36\x39\xc6\xea\xa5\xff\xb5\x5b\xae\x0f\xda\x35\xec\x06\x27\xa8\xf7\x66\x2f\x7f\x23\xae\x1b\x87\x96\x1b\xc5\x3c\x76\xd9\xef\x53\x10\xd3\x0f\xc6\xad\x00\xa4\xb5\xa0\x05\x63\xa6\xab\x65\x1f\x67\x4e\x31\x70\x9a\xb0\x43\xa2\xbe\xa5\x34\x56\x25\x53\xe3\x68\x51\xe0\xf8\x4f\xe1\xff\x6b\xa2\xaf\x9f\xbe\x2e\x3b\x72\x0a\xa7\xe9\x48\xf6\xb9\x2b\x17\x91\xe5\x9b\xf9\x7d\xf6\x5b\xca\xa5\xd4\xf1\xc7\x2c\x49\x42\x5b\xde\x12\xfd\x00\x22\x8c\xd2\x3b\x7d\xfc\xb9\xcf\x4f\x08\x69\x02\x8b\xc7\x14\x36\x0b\xbd\x25\xb2\x03\xfc\x74\xda\xf4\xbf\x7c\xb7\x96\x9c\x4f\x4a\xa0\xa8\x7d\x86\x47\x50\xc2\x6c\x0b\xf0\x17\x2e\xa7\x46\xe9\x27\x2f\xdb\x64\x88\x56\xdc\x9c\x77\x20\xb5\xf2\x34\xfb\xa1\xaf\x93\x8a\x6a\xe8\x2f\x49\x4d\x82\x8f\x89\xe5\x53\xc6\x59\xca\x85\x89\x88\x96\x1a\xb3\x6e\xc4\x61\xf1\x5c\x2d\x81\x84\xf1\x7f\x39\x45\x30\xf2\x7d\x2b\x2b\x82\x12\x63\xfb\xfb\x6d\x6f\xbe\x46\xe0\x84\xae\xb4\xbf\x8a\x4e\x21\x17\xa8\xf9\x95\xa8\x6a\xd5\x0b\x28\xe3\x60\x4f\x1f\x66\x3f\xba\x88\xde\xe0\x76\xb2\xdc\x7c\xe6\xba\xd3\x30\x06\x45\x19\x4f\x0a\x31\xcf\xde\x9b\x6a\x9e\xb2\x3b\xac\xd1\xe8\xff\x1b\x63\xb8\x3f\x7e\xd9\x05\x19\x6f\x19\x3e\xdc\xe3\x44\x3b\xd9\xa3\x55\x88\x7d\x29\x41\xe3\x74\x6c\x19\x5c\xc4\x9e\x82\x42\x96\x87\x91\x0d\x11\x62\x46\xcd\xc9\x2b\xed\xb7\x44\x9c\x82\xd6\xee\xc9\x6f\x1e\x97\x7d\x98\x92\x61\xa6\x56\x82\xae\x9b\x2a\xe7\xc5\x82\x7b\x7f\x9c\xc2\x20\x4e\xff\x49\xf7\x1f\xc1\x06\x41\x34\x59\xc6\x03\x5c\xc4\x21\x1c\xa2\xc5\x2f\x23\x2e\x1b\x07\x65\xbc\x52\x58\x56\x01\x42\xec\x38\x2f\x42\xca\x4b\x5b\x4e\x62\x87\x61\x24\x36\x71\x67\x68\x46\xc9\xe3\xb2\x01\xc5\xd9\xe9\x37\x9f\x78\x5f\x63\xb3\x2b\xd4\x66\x58\x42\x8b\x38\x44\x8b\x2e\x50\xd5\x4a\x77\xcd\xe7\x73\x5c\x7e\x8d\x1d\x28\x7f\x60\x21\x48\x7f\x61\x5f\xbe\x09\x64\xed\xd8\x7b\x06\x2f\x6b\xb1\x96\x80\x44\x8e\xc6\x2a\x4a\x21\xbb\x10\x96\x19\x9e\x34\xb7\x0d\x84\xcf\x8e\xd1\xfc\xe5\xb6\x2c\x53\xae\xed\xb1\x86\x13\x00\x55\xda\x11\x91\x55\x04\x1f\xae\x92\xdb\xc5\xf8\xa9\xaf\xfa\x4f\xf2\xe1\x89\x83\x50\x66\x67\xcf\x94\x41\x1b\x63\x9c\x46\xe0\x5b\x7a\xdc\x21\x64\xf2\x33\x9c\x3e\xc2\xf1\x40\x8f\x97\xca\xf9\xa3\x0a\x4f\x6c\xd5\xb9\x3d\xcc\x80\x82\xec\x7f\xd7\x8f\x04\x1d\x97\x1d\x85\x47\xd2\x65\xc9\xec\x26\x83\xc6\xae\x17\xbe\xcc\x37\x0f\x04\xc0\xb6\x31\xae\xf3\x83\x4f\x26\x76\x85\x5b\x42\x4d\xb5\x04\x6e\x74\x26\x28\xfc\x73\x2c\x6b\x74\x9c\xbf\x89\x6c\xa9\xbc\xb8\x2e\x9b\x90\x0a\x9a\xc6\x5d\x7c\x53\x73\x87\x61\x6a\xb6\x01\xfb\x50\x0f\x5e\x59\x0d\x94\x04\x10\xc3\xf5\x30\x1f\x5c\x71\xf3\x6d\xc8\x37\x3a\x77\x59\x56\xd0\xda\x22\xe5\xf7\x03\xed\xa8\xb8\xa6\x58\x41\xb8\x79\xc9\x40\x46\x9f\x41\x05\xab\xfc\x94\xbe\x7f\x6d\x1a\x64\xb1\xcb\x55\x79\xa6\x58\xfb\xe6\x86\x85\xba\x58\x5f\xab\x51\xbb\xbf\x8c\x89\x55\x90\x8c\x28\x2c\x04\x20\x81\x34\x85\x33\x3a\x57\xeb\x99\x9d\xff\xec\x15\xde\xf6\x8b\x9e\xfe\xf4\x54\xe9\xf8\x9d\xab\x83\x76\xf3\xe7\x37\x1e\xa2\x70\x3e\xc5\xb9\x6d\x39\x45\x5f\xc9\xaa\x24\x47\xbc\x6b\x51\xfc\xb2\x49\xb4\x24\xc7\x8d\x2c\xf6\x6e\xbc\xd3\x3f\xdf\x9d\x34\xf5\x68\xd3\x00\xeb\xf9\x6a\xde\x4d\xb7\x5c\xf6\x21\x93\xee\x26\xfb\x87\x07\x38\xfa\x0c\xa0\xbf\x0d\xc6\x9c\xbc\x72\xd6\x41\x60\x18\x5e\x49\x98\xc7\x1c\x08\x6b\x27\x63\xee\xd6\x2d\xdb\x28\x70\xa6\x1f\x33\xac\xfb\x3a\x66\xec\x3f\xf9\x25\xc9\x0c\x29\x46\x2a\x47\x51\x03\xeb\x26\x0b\x97\x5a\x75\xe3\x2c\xf7\x1d\x73\xb1\x6e\x65\x10\xb5\x14\x5c\x2f\x93\x8e\xc3\x62\x9e\xe2\x5e\xa3\x97\xcf\x22\x08\xbc\x3a\xf8\x13\x5f\x88\xea\x42\x3c\x9f\xf0\x54\xa3\x34\xa4\xb9\xc7\x1c\x29\xe5\xff\xe6\x81\x04\xae\x5d\xd7\x75\xad\x2e\x34\xc1\x08\xd1\xe1\x34\xe1\x6f\xfc\x0d\x07\x3c\xd2\x46\x33\x7d\x24\x51\xb9\x00\x61\x79\x97\xb1\x9a\x00\x6c\x38\x76\x68\x0c\x00\x5a\xcf\xa4\xe5\x7d\xe8\x74\x89\x59\x4e\x28\x97\x72\x33\x71\xf8\xd8\xe1\x45\x44\xfc\xb2\xc4\x31\x83\xb6\xb3\xd1\xf4\xe1\x02\x3d\xde\x5a\x8d\xb9\x49\xb6\x92\x08\x8a\x7a\x48\xba\x94\xb5\x7f\xf3\xcb\x6c\x0b\x7f\x65\xf2\xae\xff\xe0\xec\x2f\xde\x38\x8a\x22\x38\x71\xb7\xb6\x5e\xce\x13\x7f\x49\x08\x9b\xd6\x52\x9c\x52\x97\xf2\x3e\x96\x94\x7b\xaa\x6e\x14\x80\xaf\xe2\x95\x0a\x55\xaf\x10\xb8\x61\xbf\xe3\x7b\x0b\xde\x80\x78\x5f\xd9\xc9\xe2\xe0\xd4\xfc\x56\xef\x79\x0b\xf4\x87\x84\xd1\x10\xcc\x43\x08\x42\xc2\x39\xa0\x27\x8d\x4e\x0c\x36\x69\x5b\x7d\xe8\x5e\x1e\xae\x47\x20\xb9\xe0\x04\xf3\xda\x1f\x5e\xda\x09\x49\x19\xcc\xee\xf3\x3d\x33\x56\xfc\xe1\x52\xcf\xbf\xfc\xce\x51\xb6\x68\x0b\x61\x92\x34\xeb\xe7\xf9\xb3\x1c\x5d\x17\x5a\x56\x1c\x8c\xc1\x69\x6c\xbc\xcc\x65\xfd\xc8\xb6\x9c\x30\x60\x44\x81\x24\x79\xdb\xbe\xb9\xe0\xe6\x78\x8b\x29\x79\x51\x8e\xad\x44\x4b\x12\x2d\xd1\xe5\x40\x46\xbc\xd3\x6b\x07\x26\x6a\xe2\x6c\x0c\x2f\x6c\xa8\x78\xa5\x45\x21\x76\x4d\x15\x32\xf3\x00\x3b\x4e\x1a\x19\xe1\x4c\x1b\xe9\xae\xc7\xa8\x49\x31\xcb\x0e\x6d\x19\xb6\xb7\xff\xcd\x71\x2f\xd1\xb6\xf6\x4c\xff\xf7\x4c\xf6\xdb\xd7\x0d\x8f\xbb\x1b\x19\xf7\xc3\x6c\x1f\x6f\x57\x0d\x0e\xed\x98\x56\x0a\x1c\x08\xe4\x03\xca\x9e\x02\xa7\xe2\x2b\x51\xda\x24\x03\x02\x75\xb4\x26\xd0\x05\x39\xb5\xd5\x48\x4d\x87\x08\x5e\x2c\xf5\xeb\x6b\xb0\xe7\xeb\x42\x3a\x7e\x3f\x5a\xbe\x3b\x85\xfe\xde\x6e\x7a\xa9\xc1\x69\xf5\x22\xb6\x3d\xb1\xa6\xf8\xa3\xe7\xff\xf9\x6e\x5e\x25\xca\x6f\xb7\xe5\xee\x56\xac\x3f\xb8\xc8\xa6\xcb\xef\xa5\x42\x05\x1b\x70\xc6\xb8\xff\x60\xc1\xfe\x4d\x7c\xbc\xb8\xd2\x8c\x4d\x96\xbf\x65\xc5\x19\xba\xb8\xc2\x49\x56\xfe\x05\x10\xad\x67\x97\x2a\xfc\xc1\x86\x48\xb9\x46\x3a\x6a\xc9\xa9\x80\x68\xd2\x45\xd2\x11\xca\x64\x86\x22\x89\x05\x8d\xbc\x67\x84\xc2\x43\xf6\xba\xc9\x5a\x83\xc8\x5b\x78\xbb\xea\x1d\x41\x6c\x1f\xa5\x47\x71\x69\x82\x09\xf9\xb5\x24\x47\xfc\x37\x1f\xd2\xe3\x74\xfb\x24\xef\xb8\x97\x75\xff\x27\x96\x7b\xbc\x61\x19\x52\x17\x78\x79\xd9\xd8\x0b\x6d\xa7\xe4\x2f\x33\x2a\x69\xa0\x89\x29\x80\xa6\xf5\x94\x17\x8e\x51\xe8\x2a\xae\x68\x65\xee\x39\x43\xed\x4d\x5d\x51\x33\x0b\xd7\x20\xd2\x4f\x53\x09\xdf\x7f\xcc\x2c\xd5\x54\xaf\x36\xd9\x55\xba\xbe\x69\xd4\x87\xe6\xf6\x33\xfc\xbd\x83\x10\x84\x25\x6f\xa4\xb4\x50\x01\xd2\xeb\x5a\x11\xb8\x4b\x86\xc3\xee\x63\xcc\x29\xf7\x1f\x12\xcf\x7a\x18\x82\xbf\xca\x5d\x33\x8c\xca\x89\xef\x7e\x68\x81\x45\xd4\x3c\x06\x4b\x18\xd5\xe9\x5f\xe1\xc6\x0b\xcc\xcc\x85\x28\xa4\x7f\x38\x21\x31\x20\xa7\x13\x2f\x8a\xd9\x25\xb0\xeb\x15\x48\x96\x6e\x27\xe9\x53\x8b\xb5\xd8\xaa\x67\x9a\xd0\x95\x5f\x22\x75\x0f\xd5\x51\xf0\x01\x6e\xff\x70\xf0\x97\x09\x61\x55\x46\xf9\xf2\xf8\xdd\x28\x25\x77\xad\x4c\xf7\xc7\x60\x48\xef\x6c\x7a\xbb\x08\x55\xfc\x53\xe4\xbd\xb5\x85\x69\x26\x8d\xfa\x0f\xe4\xb0\xaa\x2b\x88\xc1\x1f\x3d\xe3\x09\xd8\x0c\x2b\x7f\xfe\xcb\x32\xe6\x85\x33\x4b\xd6\x7d\x81\x85\x21\x6d\xbf\x0e\xef\x54\xf5\x69\x95\x6e\x87\xd8\x1f\x90\x1b\x8a\x4e\x2b\x9d\xd7\x81\xc3\x70\x5f\x22\xc7\x2f\x24\x2f\x17\x75\x69\xb8\x33\xd2\x79\xd1\x42\x71\xe5\x55\x61\xdc\x9e\x42\xf7\x05\x54\x81\x4d\xce\x81\x98\x86\xb0\x0f\x1a\xed\xa0\xa8\x0b\x88\xde\x2b\x65\x55\xdd\x27\x2d\x62\xd2\xa2\xe7\x16\xd8\x16\x8e\xb5\x5b\xfe\xac\x77\x8f\x4c\x49\x71\x08\xc7\x0d\x29\x50\x60\xfe\xd8\x0e\xb7\x04\xde\xfa\x14\x0c\xc6\x30\xba\x1a\x04\x91\xc4\xda\x9b\x45\x04\xc6\xef\xa9\x02\x7e\xf9\xe4\x52\x89\x67\x38\xcb\x86\x36\x32\xdd\xc9\x26\x84\x6e\xa1\xc0\xf3\x87\xca\xc8\xb5\x54\x36\x5e\x19\xd0\xa1\x17\x32\xfa\x4a\xde\x0e\x45\x8d\x42\xe8\xe4\xf2\x97\x14\x1c\x72\x81\x16\x68\x8e\xaa\x5a\xbd\x80\x3c\x09\x06\xf0\x69\x42\xa3\x67\xae\xfb\xc1\xc6\xe2\xd9\xf7\x5e\xbe\x04\xcc\x39\x3c\x01\x46\xe1\x42\xdb\xbd\xf5\xa5\x53\xc8\x1d\xaf\x37\x53\x43\xff\xfa\xb9\xac\xbf\xc6\x84\x0c\xd2\xdf\x5e\xc5\xbc\x34\xf9\x54\xe4\x84\xcc\x2a\x32\xb1\x05\x1a\x4f\x79\x98\x97\x9e\x74\x32\xfa\x06\x27\x72\xa0\x3b\x35\x8d\x34\xe1\x29\xe9\x5f\x5f\x37\x49\xa8\x55\x88\x27\xca\x1e\xd5\x92\x8f\x76\x0f\x67\xa5\x8e\x8e\x2b\xb3\xba\x4d\x20\xe9\x58\xcd\xc3\xaf\x2f\xca\x09\xb6\xb6\x21\x1f\x66\x05\x2b\x09\xc7\x4b\xdf\xc1\xd1\x58\xdf\x02\xa5\x07\x87\x6e\x98\xd3\x97\xd2\x96\xbb\x9e\x63\x11\x59\xd1\xf6\x7f\x34\xa9\x67\x10\x8a\xe0\x99\xbd\xd1\x4c\xed\x9f\x20\x80\x78\x3d\xfa\xe0\xc4\x42\x7a\xad\xec\x69\xa2\x63\xa9\xa8\x45\x1a\x93\x1f\xb8\xee\x60\x54\xf9\xf1\xa3\xd1\xbe\xb8\x2e\x9a\x89\x34\x26\x39\x06\x6d\x09\xb5\x09\xf6\xd2\x06\xa5\xdd\x75\x08\xb2\x4e\xad\xdb\x76\x0f\xa6\x3e\x5e\x10\x4e\x31\x3d\x52\xea\x5a\x6b\x7e\x3b\x2d\xdb\xc4\xbe\x4c\xef\xc1\x59\x87\xc0\x52\xd1\x74\x6a\x95\xca\x42\x92\x60\xdd\x44\xfe\xd8\x25\x83\x85\x6a\x64\x3a\x4b\x5b\xe6\x07\xaf\x28\x75\x61\x4d\xad\x69\x5c\xd6\x0c\xe4\x89\xea\x03\xa8\xea\x92\x0f\x8c\x3e\xd8\xd9\xe4\x40\x9a\x8e\xa2\x7c\x77\x4a\xe7\x8d\x7c\x7f\xe9\x41\xb0\xb0\x8e\xa8\xf9\x73\xf6\xba\xee\x3f\xb6\x3c\x6d\xf0\x09\x6a\xf1\x37\x5a\x47\xb2\xcf\x21\x33\x73\xcb\x87\x9b\x2a\x5c\xc4\xd8\x68\xba\xbd\xf9\x2b\xd1\x7d\x16\xad\x95\x56\x71\xed\xd4\x3d\x6c\x2d\xf8\x12\x65\x19\x95\x16\x68\x4b\xcd\x90\x38\x9d\x28\x78\x25\x4b\x12\x69\x72\xbb\x39\xa7\x25\xce\x43\xa9\x89\xb8\x00\x1b\x8f\xe7\xbc\x68\xda\x16\x14\x5f\x4d\x85\x27\xb3\x06\x5d\xcb\xd3\x44\x19\x42\x7d\xf9\x0c\xe0\x5a\x09\xca\x6e\x28\x3b\x2d\xc0\xba\xa1\xe5\xa5\xc8\xa2\x5e\xb5\x67\xa3\x46\x5e\x06\xe4\x17\x86\x62\xc1\xdf\x15\xc3\x8b\xbb\x97\xa8\xff\xfb\x63\x4d\x49\x27\x39\x7a\x4a\x9f\x23\x1a\x3d\x3f\xc3\x61\x31\x56\x84\x5f\x9a\xfb\x8b\x95\xba\x16\xf9\x6b\xbe\x69\x5e\x0f\x04\x61\x54\xfc\x3c\x3a\x5f\x43\x26\x10\xf9\xf6\xcb\x75\x48\xf1\x52\x05\x59\xc9\xd0\x56\xa1\xd7\xc2\x20\xa2\x5c\x4f\x01\xdb\x76\x80\x3f\xc2\xd5\xf4\xd6\xc1\x40\x36\x52\xc5\x63\x8d\x8e\xcf\xea\x29\x05\xab\xe9\xb5\xb3\x99\x73\x7e\xe9\x64\xe0\x77\x25\xa1\x29\x29\xc9\x33\x42\xaf\xb2\x13\xde\x1c\x1a\x33\x97\xa2\xf8\x11\xfb\xd9\xf8\x72\xa5\x21\x43\xb9\xc7\x88\xbd\xdc\x7e\x0e\xdb\xaf\xd2\x33\x97\xe2\x44\xd3\x1f\x89\x3d\x60\x71\xc4\x3f\x33\x6a\x7d\x99\x85\xcb\x27\x65\xc6\x3f\xe8\xe9\x3c\xfa\xdf\x76\xcb\x65\xce\xc8\x7f\x6d\x28\x2a\x4f\x6a\xcf\xac\x6f\x2b\xd6\x24\x58\x27\xe3\x3b\x5d\xac\x11\x88\x29\xf4\x36\xf2\xa8\xfd\x7d\x22\x3d\x33\x95\x5b\xff\x92\xe8\xa8\x7d\x83\x57\x3f\xd0\xc2\x9e\x8a\x2f\xc7\x9f\x1f\x05\x4e\xf4\x6f\xaa\x3e\x18\xde\x11\x15\x9d\x84\xc5\x7d\xa6\xf5\x07\xa5\x3b\x01\x17\xcb\x6b\x5c\x30\x42\xea\x68\x2f\xc9\x3f\xe5\x47\x59\x02\xcf\x91\x45\x0b\x60\x60\xfa\xfb\xd1\x27\x9d\x3d\x05\x67\xb8\x1e\x59\x00\xf0\x7b\x86\xaf\xc2\xdc\x2c\x2b\x28\x20\x0d\x4c\x12\x7d\x27\xcc\x5f\x4e\x5a\x2c\xba\x9a\x4a\xb6\xdc\x19\x6b\x09\x13\x87\xb7\x3a\xeb\x7b\x5d\xdc\xf3\xc5\xdb\x79\x5e\x86\x54\x7f\x24\xae\x42\x58\xd2\x74\x52\xda\xa9\x22\x78\x90\x11\x5c\x7e\x3b\x91\x1c\x58\xf0\xa0\xd2\xb9\xa1\x47\xfd\x87\x00\x3b\xef\x0a\x90\x42\x8f\x7c\x8b\xd6\x86\x5c\xad\x4d\xe2\x18\x28\x6b\xbe\x64\xd7\x8b\x51\xa0\xec\x32\x74\xaf\x02\xb1\xc4\x3c\xf6\x02\x11\x74\x2a\xcb\x13\x81\xad\xc4\xae\xcf\x6e\xdb\xe6\x7c\x4b\x6d\x92\xb2\x60\x35\xcb\x08\x82\x6b\xf9\x3b\x79\xc5\xe0\x4b\xbb\xab\xc9\x6a\x7f\x25\xe9\x19\x0e\x69\x90\xf1\xf6\xb2\x97\x5a\x90\x75\xce\x4f\xcd\x13\xa5\x0e\xb9\x75\x08\xbb\x2b\x31\x71\xa0\x3f\x7e\xfa\xfe\x21\xf7\x4b\x87\x89\xfa\x43\xfe\xf2\x23\x56\x1b\xf4\xfd\x44\xcb\xa7\x58\xe7\x28\x13\x76\xfc\xcf\xda\x02\xa3\xd5\xb9\xfa\x4a\x48\xa9\x89\xac\x58\xed\xcb\xcc\x09\xf5\x80\x89\xfe\x68\x21\x45\x91\xd7\x6c\x3c\xab\xd2\xfe\x62\xd0\xa3\xc3\x9a\x92\x8e\x48\xc7\x72\xc7\xb1\x14\x4b\x06\xf3\xd3\xac\x00\xaa\x2a\xb9\xba\xd6\x04\xa6\x66\x98\xbd\x61\x55\x41\x35\x26\xbf\xe5\x55\x9e\x03\xe4\x95\xb6\x97\x3f\x9a\xa6\x93\x81\xd4\x41\xbc\x2f\xaa\x73\xb5\x29\x86\xdd\x3b\x42\x22\x9e\xd7\x56\x67\xd3\x0f\x37\xc1\x26\xd7\xbb\x12\xce\xf9\x0f\x94\x19\x14\x3a\xba\x9e\xbe\x2e\x70\x84\x9b\x78\x0d\x9d\x25\x66\x92\x06\xf1\x1b\xbd\x0b\xee\x51\x7b\xef\xfd\xb5\xdb\x81\x96\x56\xb8\xbc\xfc\x39\x70\x9b\xf3\x15\x37\xa5\xbb\x8d\x1e\xb2\x69\x92\x3e\x06\xc5\x45\x95\x44\x9c\x41\x79\x47\x65\x9e\xdd\x1d\x56\xd1\x34\xe8\xb0\x7d\xb3\xa0\x09\x80\xac\x7e\x3f\x3c\x56\xf0\xb9\x16\x18\xc6\x9c\xa4\x1f\x9f\x84\xe2\x13\x8e\x14\xb3\x43\x52\x82\x58\x97\x78\x1b\x05\x07\xa4\x3e\xff\x73\x5c\xc7\xdc\xf3\xe1\x48\xe9\x31\x87\x24\xcb\xad\x70\x68\xe0\xe7\x90\xa3\x57\x44\x4c\x2a\x9a\x2c\xd1\x93\x84\xcb\xe9\xb8\x96\xed\xe7\xec\xb8\xdf\xe9\x7c\xb0\xbf\x4f\x12\x89\x6d\x1d\xb4\xf5\x03\x74\xa9\x31\x76\x9a\xb1\xa4\x4c\xdd\x94\x88\x8c\x30\xaf\xaf\xd5\x8e\x21\xdb\x07\x48\x5a\xa5\xe2\x48\xa8\xbf\xb0\x4a\x3b\x06\xeb\x74\x02\xc5\x48\x6b\x70\x76\x54\x16\xd7\xb0\x44\x61\x4c\xe9\x96\xa2\x14\xc6\x32\x0c\x4f\x48\x72\x05\x44\xf2\x28\xe9\x07\x01\x45\x2d\x8c\x9c\x37\xcc\xa3\x7c\xbe\xc7\x5f\xc5\x93\x2c\x70\x0e\x1a\xf0\x1e\x50\x30\x1c\x31\x04\xc4\x08\xf0\xfb\x62\xfb\x05\xaa\xef\xc4\x97\x5f\x39\xa2\x6b\xa1\x9f\x71\x45\x68\x4e\x9e\x12\x43\x93\xe9\xeb\x7d\x6b\x43\xb4\x7e\xfa\x0b\xfb\xd3\x9b\x4f\xe6\x97\x7f\xbc\xd6\x9b\xf1\x95\x27\x0e\xa3\x08\x62\xb4\xc1\xaf\xaa\x18\xfe\xde\xde\x66\x15\x8a\x61\x38\xa4\x59\xa4\x34\x0e\x73\xc5\x2e\x01\x26\x10\x8a\xc6\x7e\xe7\xfb\xb9\x5a\x1f\xbc\x03\xc9\xfc\x8a\x1b\x93\xbf\x40\x20\x59\xc5\x44\x85\x3e\xb9\xc1\x4a\x76\x92\x48\xc7\x46\x50\x73\xa8\x3d\x6c\xc2\x30\x9c\x63\xa9\x83\x61\x8e\x64\x5e\xc6\x17\x42\xee\x67\x2f\x42\x91\x97\x4a\x84\x15\x67\x60\x9c\xa0\x1f\x39\x23\x53\xf6\xe3\x33\xf8\x50\x5f\xeb\x1a\x8c\x66\x80\xf6\x26\x66\xff\xa9\x53\x01\xe2\x17\x78\x0d\x04\xa1\xa7\x4c\xc4\x8e\x9b\x14\x4f\x05\x0b\x29\x13\x2c\x51\x55\x72\x0f\x7a\x3d\x3c\xde\x22\x92\xa8\xd5\xa4\x02\xa8\x46\xb3\x0b\xbe\xaf\x88\xbc\x48\x49\xd3\x56\x15\xa8\xb4\xab\x38\x59\x57\x7c\xca\xea\xb9\xf6\x8f\xdf\x4b\x82\xa5\xba\x7b\xdc\xa9\x1f\x0a\x3e\x9c\xa5\x30\xb9\x99\x56\x30\x43\xfc\xed\xfb\xf2\xe8\x63\x81\x51\xa5\xf5\x4f\x1e\x9d\x5d\xe6\x5f\x3b\xef\x7a\x86\xe8\xd3\x86\xff\xc2\xb5\xcb\x8a\xd5\x1d\xf0\x72\xc9\x60\x51\x4c\xfc\x2c\xb5\x2d\xa4\x0c\x44\xd7\x57\x86\x43\x5b\x71\x51\xd8\xe9\x73\x57\x21\x30\xa4\x4d\x7c\xff\xc9\xba\x8e\x57\x2a\x2b\xce\x86\x5e\x0f\x6c\x45\x62\xcb\xb9\x4b\xce\x2b\x0d\x63\xe4\xa6\x9f\x4f\x05\x07\x16\x0f\xd8\xa9\x21\xa6\x3d\x21\xa4\x39\xb2\x05\x8c\x4e\x43\x17\x72\xf3\xc0\x36\xe5\x2b\x5b\xeb\xc6\x70\x9e\xf0\x01\xc5\x96\x8d\x0b\x27\x5b\xda\x83\x9f\x96\x1d\xfc\xa0\xe2\x9f\x94\x98\x36\xfa\x2a\xa9\x18\xea\x78\xdf\xdc\xe1\xbd\x4f\xa7\x52\xb9\x0c\x47\x91\x74\x84\xeb\x15\x66\x5b\x28\xd0\x3d\xd2\x04\x56\x03\x9d\x07\xf1\xab\x5b\x3e\x38\xe1\x97\xa7\xad\x65\xff\x49\xb3\xc6\x32\x1a\x6d\x22\x63\x46\x9e\xe8\xd7\xfa\xed\xa4\x10\x97\xd8\x2f\xe5\xaf\x7f\xfa\x21\x33\x9e\x31\x40\xfb\x54\x3f\x66\xc9\x91\xb1\x5c\xe1\xe5\x0c\x39\x66\xa7\xe1\x1b\xb8\x40\xcb\x0b\x88\x6b\x13\xfb\xf7\x1a\x88\x15\xa4\x19\x2e\xb7\xef\x99\x1f\x8b\x86\xcb\x26\x68\x57\xbd\xbd\xe7\xef\x48\x5f\xe2\xb4\x5d\xe2\x0a\x94\x53\xbd\x5b\x4e\x15\x46\xf2\x61\x13\x18\xa1\x88\x1d\x62\x21\x2d\xaf\x62\x78\xab\x45\x40\xd2\x53\xf2\x8a\xca\x00\x23\xd9\xe4\x53\xbd\x40\x6a\x45\xcd\xca\x71\x6f\x5a\x6e\xf1\x9b\xde\x6c\x39\x6c\x22\x2c\x98\xc0\x8c\x63\x4d\x55\xee\x91\xf9\x5b\x97\xc6\x71\xbc\xae\x52\x8c\x92\x9d\xc9\x25\xb6\xec\xbf\x69\x42\xa5\xcf\x0c\x8d\xb7\xd2\xce\x1a\x49\x10\x5d\xe9\x6b\xe5\x99\xb1\xc4\xf0\x23\xf2\x34\xd8\xde\x1c\x02\x75\xdf\x33\x7c\x3c\x8b\xca\xde\xcd\x18\xb0\x67\xf0\x21\xa0\x82\xcd\xdf\x54\x56\x0d\x38\x50\xd7\x99\x45\x06\x36\x73\x6f\xf4\x87\xdb\xb1\xcc\xc4\x5b\x8c\xb7\x06\xab\xde\x2e\x06\x81\xb5\xa6\xdc\xc1\xda\x42\x87\xad\xc6\x08\xca\x1f\x1d\x20\xf4\xde\x06\x79\x8c\x42\xf9\x07\x00\xca\xdd\x30\xc0\x54\xfb\x7f\x3d\xb2\xf3\x5c\x0a\xde\x72\x4c\xe2\x37\x55\xa1\x91\xf8\xd2\xe0\x31\x9b\xd5\x75\x57\x3a\xcc\xba\x20\xd1\x36\xef\x23\x77\xb8\x33\xaa\x2c\x6d\x4b\xf2\x92\x31\x50\x46\xd0\xfa\x15\x9d\x2b\x3a\x7f\xe8\xf2\xba\x43\x7b\x84\xf5\x64\x31\xde\x24\x47\xf9\x92\x0c\x2d\xb4\xea\x26\xd7\xe3\x08\x0d\xce\xb4\x70\x97\x87\xa3\xf0\x9c\xca\xb0\xb6\xde\x31\xcc\xd0\x18\xcb\x11\x6a\x8b\x13\x88\x86\xe4\x3d\xc1\xfe\x7f\x03\xe3\x14\xc8\x1a\x21\x1c\xa3\x8a\x1e\x26\x49\xdf\xb4\x20\x08\x1f\x2b\xb8\xbd\xaa\x80\xea\x9a\x57\x35\x47\x34\x1e\xb5\xb9\xfd\xc6\x6b\x8c\x85\x91\x7e\xce\xeb\xa3\x19\x5e\xe4\x82\x58\x1b\x02\x6f\xca\xf0\x6d\xf4\x3b\x5d\x16\x02\xd7\x9f\x3e\x39\x4c\xf5\x71\x81\xb3\x59\x1a\x08\xe4\x70\x16\x4b\x84\x64\xca\x80\x49\xc9\x13\x18\x4f\xe5\xbf\x07\x47\x6c\xc5\x70\x2f\x0c\x4b\x4c\x39\xc4\xae\xff\xb3\x11\xc1\x33\xa5\x41\x8b\x5d\x67\x28\x45\x67\x10\xf3\x50\xeb\x24\x68\x19\x12\xcf\x1f\xc5\xf4\x73\xb2\x48\xcd\xcc\xab\x2d\x3d\xf1\xcc\xb1\x6d\x95\x1e\xcd\x65\x14\x7a\x1a\x29\x3f\xd7\xc3\x2b\x15\xd6\x2e\xa9\xc4\x64\xfa\x8d\x16\xad\xcf\xd0\xf2\xfb\x1e\x8c\x74\x89\x90\x4d\xd9\x92\xf9\xbd\x0a\xc5\xab\xc6\x3a\x0c\x30\x22\x29\x86\x77\x2d\x44\x31\x3d\xc9\x1c\xb6\xcc\x3f\xf7\x37\xb4\x3a\x23\x45\xac\x55\x97\xe6\x40\x7f\xdc\x6f\xde\x35\xee\x30\x9b\xb0\x65\x44\xb5\xef\xd2\x0d\xc5\x1d\xf4\x41\x85\xc9\xa7\x43\xa8\x90\x7d\xe8\x0c\x81\x0d\xce\xda\xc0\xab\x9d\x23\x40\x7a\x68\x3c\xb5\xd6\x99\x43\x4e\xa9\xe0\xe2\x08\xe7\x43\x81\xb2\x20\xc9\x6b\x66\x6e\x85\x3f\x9c\x6d\xce\x09\x80\x82\x46\xe7\x55\x1c\xf2\x89\xe0\xec\x11\xff\x6d\xf1\x53\x8b\xd8\x3a\x44\x1b\x57\x0a\x43\xb5\x09\x83\x1b\x8a\x78\xf9\x1b\x09\x4b\xce\x8f\xd7\xc0\x0f\x6b\x81\x83\x15\x20\x52\xc1\x4a\xc8\x2d\xa7\xe8\xd4\x00\x46\x27\xf1\xf4\xe1\x65\x50\x8f\x20\xc1\x2c\x7d\xcb\xc9\x4f\x11\xe3\x76\xf4\x19\x20\x46\x53\x60\x98\xbe\x15\xb4\x06\x30\xa6\xa2\x78\x0d\x8b\xca\xce\x6d\xa5\x4f\x38\x7b\xaa\xf6\xdf\x1c\x39\xdd\xfe\xf9\x71\xf8\x87\x43\xc7\x8d\x0e\xc1\x92\xc6\xa2\x37\xc3\x40\x7f\xfb\xe6\xc4\xa4\x5b\x54\x50\x9f\x20\xd5\x30\x36\xf6\xaf\x18\x49\x09\xd5\xbf\xc5\x9d\x97\x30\x5e\x6e\xa3\xa7\x9f\x01\x90\xda\xac\x54\x40\x35\x4f\x38\xe8\x1f\xec\x97\xce\xb3\xca\xb2\x33\xac\x2c\xd7\xfd\xda\x10\x13\xce\xe9\x9f\x5a\xff\x7b\x69\x47\x5f\x45\xb6\x20\x5b\xa7\xe3\x0f\x42\xbc\xbf\x6a\x8a\x17\xb1\x3b\x10\x89\xd9\x67\x57\x5b\x0a\x77\x35\x15\x83\xaa\x35\x6d\x3e\xac\xab\xe2\x78\xa1\xf3\xfe\x70\x8b\x62\xca\xb5\x97\x08\x13\xe1\x2a\xfb\x53\x69\x53\x12\xe3\x93\xdb\xbd\x87\xda\x80\x9d\x9a\x73\x02\x9d\x06\xa8\x7d\x4c\x77\x24\x69\xf8\x49\xce\xae\x11\xfe\x79\xd0\xae\xff\x9f\x1d\x5d\x6e\x2b\x8d\x3e\x4e\x6b\x7d\xa8\xe2\x61\x79\xc4\x1c\x01\x12\x86\xbe\xf4\x6f\x66\x2c\xc8\x94\x61\x79\xc3\xe7\x69\xbc\x27\x3f\x33\x65\x9f\x61\xdb\xe9\x31\x70\x17\xe8\xa7\xb8\x58\xe0\xba\x01\x46\x11\x70\xd4\xc2\xeb\x71\x9d\xaf\xd7\x42\xa5\x29\x73\x52\x51\x63\xac\xb5\xe0\x10\x96\xb8\x9f\xc4\x59\xfc\xfe\x83\x09\x81\xe1\x78\x17\xa5\xd7\xaa\x04\x64\x1c\x04\x27\x96\x06\xa5\x50\x63\xb1\xfc\x41\x11\xa8\x60\xa7\x7f\xf7\x9b\x77\x7d\xe1\xd7\x01\x91\x2d\x7b\x92\x68\x53\x93\xfa\x6d\xcb\xf3\x8d\x9e\x71\x1d\x60\x5e\xb6\x54\x98\x87\x56\x27\x6c\x18\xbb\x2b\xaa\xad\xf2\xcc\x8f\x8e\x2a\x40\x4f\xdd\xfe\x3b\x07\x75\xf8\x7c\x67\xd6\x3d\x34\x22\x7e\x9e\x3b\xbe\xae\xda\x75\x07\x48\x72\x41\xfc\xe1\x3e\x97\x7a\x18\xe4\xc7\xf4\x86\xd4\x01\x26\x33\x82\xc1\xda\xc3\x53\x1b\x1e\xf8\x79\xdd\xea\xa2\x55\x41\x97\x38\x09\x92\x1b\xd5\xb6\x7b\x1f\x61\xa9\x01\x74\x78\x76\xcc\xdf\x54\xe6\xff\xba\xbd\xa6\x99\xb2\x38\x96\x03\x7e\xfa\x77\x03\xd7\x51\xfc\xd3\x70\xa5\xff\xf4\x6b\x8d\x62\x8e\xbc\x28\x98\xca\xba\x2c\xdb\x0f\xb1\x73\x49\x94\x54\x6c\x31\xd3\xf1\xec\x8d\x8d\xa1\x3e\xc4\xe1\x91\x7a\x06\x82\xa4\x2b\x4c\x51\xe0\x8e\x5a\x74\xb2\x26\xe3\xd0\xda\xf9\xf0\x4e\xf4\xf4\xd5\xf9\x8b\x09\x5a\x70\x99\x88\x65\xfe\xfa\x9e\x30\x3c\x5e\x3e\xf9\x17\x7a\x8c\x70\xba\xda\x13\xd8\x99\x3d\x5c\x14\x55\x15\xcf\x46\x64\x07\xd0\x7a\x46\x9a\x14\x1c\xba\x87\x54\xbf\x03\xd1\xfe\x6f\x76\xa6\x4c\x1b\xe7\xa8\x6a\xab\x98\xce\x78\x71\x5c\xb9\x76\xa7\x7a\x75\x85\x69\x50\xaf\x69\xbb\xa6\xcb\x4e\x4e\x97\x35\x49\x62\xac\x91\x31\xf1\x48\x79\x27\x95\xcf\xa8\x9c\x3a\x57\x17\xd9\x94\xa3\x0e\xad\xfa\xaa\x80\x54\xe6\xc5\x35\x61\xe7\x4b\x7a\x7b\xff\xc1\x9d\xfd\xf1\x19\x8e\xbf\x45\x35\xaa\x35\xdf\x23\xc0\xb8\x3a\xcb\x75\x25\xe1\x07\x0e\x4b\x77\x0c\xe1\x32\x46\x01\xca\x66\x19\x9d\x4d\x44\xc6\x69\x4c\xff\x44\x00\xcb\xb6\x87\xf4\x23\x16\x37\x89\xde\x7d\xd5\xae\xcd\xed\x00\x9a\xa0\xd5\x12\x14\x26\xc1\x1e\xde\xa7\x34\xae\xd3\xf3\x3e\x4e\xff\xf7\x7a\x2b\x7b\x49\x25\x8e\xbb\xe6\xc0\x88\xe4\x78\x7f\x5f\x1e\x70\x99\xdf\xb9\x34\x1a\x4f\xb2\x7b\xfe\x4c\x4b\x14\x82\x50\xec\xda\x5f\x04\x61\xa8\x49\x96\x3d\xc8\xef\xa0\xaa\x90\x69\x6a\x6b\x50\x09\x29\x39\x8c\x9e\xf6\x17\xa5\x62\xc4\x2b\x3f\x37\x14\x2c\x97\xc0\x1b\x4e\x2a\xd8\xfc\x7c\x11\x0d\x28\x72\x7a\x8f\xe2\xa1\x88\xc3\x05\x57\x71\x99\x67\x32\xfe\x0f\xa7\xce\x59\x85\x71\x6e\xeb\x8f\x66\x08\x4d\x75\x36\xd5\x85\x2d\x71\x68\xf2\xe3\x30\x3c\x92\xee\x2f\xff\xc7\x77\xa4\xaa\x80\x61\x18\x30\x28\xb1\x2a\x3d\x09\x61\x40\x3d\x72\x84\x2e\xc1\x5f\x00\x17\x01\x3e\x05\xa4\xf1\xf7\x52\x8b\xad\x6f\x11\x61\x5e\xbb\x42\xc8\xf4\xd1\x69\x9d\x33\xe5\x2e\xf0\x66\x35\x8f\x6a\x18\x02\x32\x4b\x26\x6d\x74\x42\xd5\xd2\x5f\xa6\x6b\xb4\xdb\xc6\x16\xb4\xc7\x67\xb4\x85\x6e\x77\x71\x09\xb5\x28\xac\x26\x6c\xd5\xdf\x05\x67\x55\x4f\xd5\xa0\xa8\x85\xcc\x2e\x40\xdc\x11\x59\x61\x69\xc8\x0d\xf5\x16\xf9\x42\xdb\x38\x17\x1e\x1b\x3b\xb8\x57\x50\x72\xd5\xda\x78\x6c\x64\xb0\x75\xbc\xd2\xb9\x7a\xfa\xf0\x9e\xd9\x67\xc3\xb9\xbf\xb2\xd4\x84\xbb\xd6\x27\x74\xa7\xec\x42\x76\x21\x55\x12\xec\xd4\xd7\xd3\xa6\x00\x32\x75\x81\xc6\xa7\x66\x3c\x7b\x51\x18\x86\xe5\x85\x7d\x0d\x42\x6c\xc2\x8c\x0a\x28\x57\xf2\x61\x84\x3d\xe6\xdd\x7b\x2c\x2b\x20\x85\x93\xf1\x09\x84\x39\xd2\xd5\xfe\xba\xce\xeb\x01\xbb\x7f\xe6\xc1\x9a\xae\xc6\x7a\x71\x8c\x0a\x22\xb6\x70\xe2\x9f\xf8\x88\xd0\x5b\xb6\x12\x8b\xad\x5e\x6e\x0a\x4b\x5b\xb8\x28\xbd\x9f\x4f\xf6\x6e\x54\xef\xe1\xae\xd0\xa9\xbf\xe7\xb2\x66\x28\x0a\xc4\x77\xb2\x61\x6f\x0f\x03\xd7\x0f\x39\x66\xfe\xf6\x58\xb5\x9f\x92\x5d\xee\xa9\xbb\xb9\xf8\x31\x3f\xc1\x19\x88\xa0\x9e\xca\x21\x7c\x79\xf9\x25\xd3\xa2\xeb\xb7\x1a\x97\xf0\xa8\xb9\x4f\xb7\x35\xfc\xed\x79\xe9\x3a\x30\x69\xe9\xaf\xb8\x99\xf8\xd7\x44\xb1\xdc\xa1\x04\x9c\x4a\xdd\xf3\xa8\xc4\xeb\x1c\x6a\x7f\x18\xe7\xa8\xa9\xd1\xfa\xb1\x21\x63\x60\xaa\x8a\xff\x91\x51\xb5\xf4\x77\x06\xa9\x9b\x3e\x35\x5e\xe4\xe5\x3a\x58\xdb\x9b\x2b\xc9\x49\x84\x21\xb6\x92\x8e\x4c\xc2\xef\x10\xd0\xb6\x0f\x25\xc6\x3f\xef\x9f\xce\x3f\x5a\x17\xa8\x30\xdb\xd4\x40\xf8\x2d\xc8\x40\x7c\x6a\xe3\x86\x14\x41\x2b\xdb\xaf\x2f\x19\x9f\x37\xb4\x60\x70\xdd\x4c\x97\x7c\xe8\x67\xc0\x51\xbf\xcc\x4b\xb9\xe0\x56\xa7\x23\x7b\xab\x25\x1e\xb1\x2f\xb8\xcc\xdb\xdf\x48\x58\x0e\xe6\xb8\xec\x7b\xa7\xb1\x7c\x85\x49\x04\x6c\x03\xb7\x0a\xa8\x13\xc0\x46\x30\xa6\x65\x8c\xa1\x0e\x91\xf4\xad\x05\xa2\xb9\xae\xf2\xc3\x48\x6b\xf5\x86\xc3\x6f\x09\x99\x24\x87\x51\x58\x25\x51\x20\xcb\xb5\xdc\xf6\xf7\x0e\xd0\xf0\x24\xba\x3c\xc9\x73\x5b\x04\xd4\xe2\x29\xb6\xb2\x41\x80\x80\xc5\x39\xa9\x77\x95\x2d\x75\x18\xb8\x8a\x5d\xc6\xe8\xa6\xfe\xe7\x42\x0c\x68\x4d\x95\x5a\xd5\xcb\x0f\x0e\x21\x48\xae\x1c\x55\x5f\x3c\x84\x1f\x04\x5d\x3a\xf7\x60\xdb\x90\x9b\xd4\x8f\x54\x02\xd1\x9f\x53\xf5\xae\xe2\x3c\x3b\xa5\x7b\xaa\x00\xbb\x07\x77\x50\x05\x8b\x49\x22\xe9\x44\x29\xdc\x06\x99\xeb\x3c\xf9\x85\xd9\x4a\x0b\x3d\xd9\xdd\xa6\xc0\xf8\xac\x60\x12\x2d\x64\x5a\x3b\xc4\x6f\x0e\xf3\xf8\xcd\xb8\xe9\x8e\x0e\xc6\xcd\x94\x29\xe0\x11\xde\xa7\xf6\xf6\x8b\xb9\x1c\x46\xd2\xdf\x21\x9c\xbc\xfa\xd5\xc5\xdd\x7d\xf6\x59\x57\xb5\xf2\xa6\xe9\x5c\xbd\x08\xdd\xdb\x89\x21\xa3\x24\x0a\x10\x0b\x8c\xa2\x26\xa7\xf0\xb3\x30\x6f\x14\x5d\x28\xf1\x54\x01\x3d\x2e\x7d\x68\x21\xcf\xfc\x42\xc3\x8d\xf4\x01\xe1\xff\xb9\xbc\xb5\xb8\x3d\xe5\xf5\xb4\x53\xce\x71\x07\xb4\x61\x9a\x02\x82\xcc\xee\x54\xa4\x08\x8a\x10\xaf\x16\xdf\x38\x28\xea\x61\x3c\x0a\xfa\x97\x80\xed\xed\xde\x0e\x99\x36\x9f\x0a\xba\x2c\xa1\xab\x4e\xbb\xae\xc0\xe0\x96\x00\x38\x7f\x80\x80\xfe\x1e\xe7\xb6\x11\x57\xf9\xcd\x77\x0e\xf9\x7d\x6e\x79\x03\x6e\xb9\xb2\xd8\x47\x49\x89\x20\x0e\x2e\xd7\x82\x56\x39\x41\x63\xe3\x35\x37\x52\x01\x15\xf7\x26\x3b\xc1\x69\x96\x0c\xde\xec\xb2\xcf\x88\x0e\xf8\x1f\x2c\x3f\xe3\x2f\x07\xca\xf2\xd1\xb9\x2b\x32\x77\x68\x84\x70\x5f\x15\x3e\xd1\x66\x94\xe4\x6f\x6c\xf9\xc9\x15\x05\xd8\xb2\xf6\xf6\xb3\x6b\x7e\x0d\x08\x5e\xdf\x3c\x42\xa1\xe8\xa6\xc6\xe8\x3f\xea\x03\xe0\x51\x38\xe4\xa5\x2d\xe7\x36\xf3\xb7\x86\xc4\x60\x53\x22\x8e\x88\xac\xb0\xc6\x60\x12\x2e\x28\xe4\x15\xfc\x8f\xf6\x5d\x68\xf2\x89\x02\xbc\xcd\x06\xf7\xe6\x59\xc3\x8b\xa5\x06\x8d\x52\xb3\xd0\xa0\x70\xbc\x98\x30\x97\xa9\x99\xe7\xa1\x4b\x04\x36\x5b\x68\x71\xa0\x84\x26\x3b\x79\xbf\xe6\xce\x6f\x97\xc3\xb6\x49\x55\x5d\x69\x35\xfd\xe7\xc4\x2a\x04\xd1\x99\x8e\x56\xe7\xe6\xe2\x51\x40\x25\x82\x59\xeb\x28\xf0\xc3\xa7\xcb\x13\xbc\xcb\xc3\xd6\xd6\x9d\xa1\x1d\xa4\x88\x30\xc4\xbd\x48\x34\x4a\xb0\x4a\x43\xad\x2c\x24\x11\xa2\x25\x5f\x40\x58\xec\x07\x59\x7f\x30\x5d\xd7\xb3\x23\xd1\x4e\x3d\x2a\xbb\xe3\x73\x87\x13\xcb\x54\x09\x03\xc8\x5b\xf0\xa8\x7f\xe6\xfb\x24\x12\x8b\x9f\x7e\xef\xab\xf3\xab\x03\x39\x95\x36\x4d\x4f\xa6\x97\x8b\x5e\x9d\x85\x6e\xba\xd3\x43\xbf\xc1\x1a\x29\x38\x0b\x86\xbb\xef\xa9\xde\x3b\x64\x24\x85\x44\x3d\xa3\xf0\xb6\x44\x3f\x59\x49\x00\x8f\x5b\x5f\x1e\xb2\x85\x51\x30\x62\x93\x01\xd6\x2f\xdf\x65\x92\x69\x2e\x0b\x1f\xe0\x98\x6b\xe3\xb6\x0c\xb2\xd0\x1b\x96\x32\x94\x18\x44\x6a\x74\xec\x3c\xc3\x1b\x28\x0b\x4d\x38\x63\xe3\x7c\x29\x1a\x7d\xe4\x2f\xe9\x7f\xf2\xbe\xb1\x7d\xac\x35\xde\x5c\x39\xd4\x6d\x1b\xad\xcd\x20\x1e\x9c\xf0\x1c\x56\xec\x50\x26\xf8\x05\xf6\x66\x51\xec\xf2\xcb\xe7\x6e\x07\x46\x8f\xaf\xee\xe6\x37\x57\x49\x57\xbc\x59\xd3\x49\xee\xbf\xfb\xdf\xb3\x61\xde\x7e\xd1\x93\xb6\x05\x3d\xda\x86\x5a\x5b\x7b\x57\x12\xa3\x53\xe0\x4d\x02\xe7\x31\x55\xa4\x88\x43\x6f\x19\xca\x70\x5c\xde\xa8\x0e\xaa\x7c\x7f\xe7\x2c\xe8\x69\xc5\xa5\x38\x21\x96\xc2\x52\xec\xc4\x09\xc3\x97\x1a\x56\x64\xe1\x84\xb2\x5d\xad\xf8\xf3\xb0\x3d\x4d\x96\xa5\x6a\xee\xfc\xaa\xe9\x4a\x7e\xf5\x54\x0b\x46\x22\x64\x50\xf3\x52\xb7\x12\x3b\x6a\xe0\xa8\x3e\x27\x99\x64\xd3\xb4\x28\x65\xc3\x1c\xb4\xcf\x15\xe3\xaa\x3d\x32\x7f\x2d\xf8\xcb\x10\x12\xbd\x75\xe4\xbb\x52\xb2\xf3\x8a\x36\x36\x42\x0d\xa0\xa4\x0c\xec\xdc\x68\x9b\x34\x4e\xf9\x57\x16\x9e\x02\xb4\x34\x78\x81\x68\x07\x67\xf5\x5d\xfd\xc3\xb7\x9e\x16\xd4\xbb\x6f\x32\x48\x6a\xae\x30\x64\x59\x3b\xaf\x3f\xf5\x2b\xe6\x87\xe9\xbe\xba\x0f\xaf\xd4\x7e\x23\x89\x5e\x33\xf5\xe6\x65\x23\x34\xf3\x0a\xbe\x60\x23\x19\x5c\xf2\x30\x9a\x7f\x58\xc9\x1e\x95\x10\xee\x96\xe0\xb1\x0b\x6d\x3f\x67\x72\xd5\xb1\xb5\xd7\x42\xf1\x90\x68\x07\xc8\xc0\x10\xde\xa7\xfe\x42\xcb\xb1\x46\x2d\x9c\x6d\x6a\xa6\xde\x30\x46\xa9\x7a\x4a\x51\x59\xb9\x9a\x71\xdf\x7a\x93\xaf\x0e\xfd\x0e\xf7\x8f\xe7\xb0\x2d\xe0\xba\x12\xc7\x70\x2f\x52\x68\x43\xd2\xa4\xb1\xe6\xdd\xeb\xd8\x75\x0b\xa9\xc5\x12\x4e\x21\x95\x8f\x9d\x04\xa3\x15\x1c\xac\x12\xff\xf0\x45\x95\x73\x97\x1e\x29\x2a\x03\x13\x04\x08\xf5\xd3\x28\x18\x50\x70\x42\x26\x63\xf4\xd5\xb6\x0d\xdb\xc6\x3f\xb7\x80\x98\x74\x8f\xba\xe0\x72\x84\xa8\x08\xd0\x54\xc5\xe3\x20\x65\x7f\xe7\x58\x48\x10\x47\x66\x53\x40\x41\x13\x83\x60\x06\x69\xe2\x62\xca\x47\x9a\x3b\x05\xda\x2f\xa5\x72\xad\xb9\x64\x5c\x81\x46\x08\x1d\x52\x2f\x5a\x6c\x8f\x4d\x74\x2b\xd5\xf5\x05\xc3\x01\x40\x4b\x8b\xe5\x3a\x26\x86\x02\x2a\xe0\xfd\x7b\x7f\x93\xd6\x0d\xf3\x53\x99\x8a\xa5\x9a\xf2\x21\x31\xa2\xb8\x3a\x47\x71\x05\xd4\x92\xc0\xf2\x52\x64\xe0\x45\xb2\xef\x4b\x6b\xdc\xa1\xfc\x69\x5d\x89\xd5\x9a\xfe\xa8\xda\xc4\xfd\xe7\x1c\x7e\xe4\x13\x1c\xae\xf1\x82\xc4\x70\x88\x18\xbd\xeb\xac\x6e\x9c\x96\x2c\x0e\x00\xfc\xab\x26\x7f\x99\xef\x78\x0d\x03\x6f\x94\x64\x29\x9d\x4f\x5d\x66\x88\xc9\x21\xa4\x36\x3a\x9e\x36\x81\xab\x8d\xc5\x47\xc9\x59\x39\x85\xf9\x1a\x3d\xad\xbf\x9e\x77\x7a\x25\x6a\x05\xaf\x3d\x41\xd4\x20\xc1\xe7\x71\xcb\xed\x61\x73\x1c\xbc\xfd\xd4\x7d\xc3\xc7\xb6\xb0\xea\x7e\x87\x57\xc7\x73\xb5\xdd\x4f\x7d\xff\xf4\x2f\x15\x95\x09\x8e\x7e\x70\xac\x3f\x9b\xb2\x9f\x17\xea\x21\xca\x4b\xa3\xb3\x08\x65\x46\xa6\xee\xb0\x56\xbb\xa3\x28\x30\x7b\x0c\x62\x4a\xed\x30\x51\x74\xa5\x41\x27\xc5\x89\x71\xf4\xfb\x7a\x9e\xd7\xb8\x3a\xec\xfb\xe4\x85\x8d\x44\x5f\xa4\x02\x58\x48\x10\xa9\x31\x0a\x9c\x6b\xdc\xdd\xbf\xa3\x22\x11\x85\x2e\x9b\xa5\xff\x63\x03\xe6\xf8\x81\x07\x69\x7d\xfb\x15\x5a\x4b\xa0\x7a\xf1\x90\x66\x91\xb8\x19\xbb\x46\x89\xb4\x79\x76\xb0\xb4\xc5\x12\x5c\xf0\x09\x54\x08\xee\x8d\x53\x5b\x13\xcd\x3f\x1a\x11\x8c\x8a\xa8\x95\x09\xd1\x54\xb8\x61\x9b\x67\xf3\x58\xd7\xb4\x64\x9b\xba\x06\x74\xd4\x44\x88\x16\x51\x60\x1d\xbd\xf4\xda\x08\x7e\xdc\x56\xa4\x00\xf5\x46\xcd\x5d\x5b\x97\xc1\xa4\x09\x32\xdf\x7e\x89\xf0\xda\xb4\xda\xc5\x99\x7e\xf5\xd3\xd4\xf8\xd4\x3d\xd8\x7b\xe3\x21\xcd\x94\x57\xcb\xc2\x92\xa1\x6f\x27\xad\x0d\x23\x8a\x55\x1b\x0f\x0c\x1d\x01\x9a\x04\xb4\xf6\xde\xa7\x8e\xdf\x2b\xe3\x9e\x6b\x79\xa8\x41\x7e\xe1\x06\x50\x26\x41\xa4\x0b\x37\xb7\xf8\xea\xc8\x3f\xfe\xc4\x8c\x1a\xa9\x43\x6b\xc1\xeb\xb8\x92\xd0\xec\x34\xa6\x8e\x8a\x98\xf9\x6f\xcc\x90\xd8\x3b\xdf\x26\x0a\x1b\xfa\x78\x0d\x5d\xad\x6e\x59\x43\x3d\x2d\x88\x23\xd2\xbc\x28\xa2\x70\x86\x32\xb3\x97\xd2\xd7\x56\xd4\x86\x30\x34\x0e\x64\xd4\x09\x9c\x98\xad\x5f\x2c\x01\xb3\xd8\xb2\x25\x44\x29\xc7\xdc\x11\xd0\x2e\x8b\x49\x92\xad\x01\x81\x5a\xb1\xdd\x76\x02\xfe\x70\x0b\x5e\xcb\xe5\xb0\x21\xf4\xb1\x85\x64\xc3\x80\x8a\x09\xdc\x10\x97\xcb\xf1\xcf\xce\x00\x2b\x0e\x18\x07\xbd\x45\x91\x0b\x87\x27\x1f\xb9\x33\x4d\x4b\x73\x5a\x8a\x70\x5d\xa0\xb7\x2c\xcd\x08\xc5\x31\xac\x30\xe0\xb5\x09\x58\x46\xe8\x4b\x50\x19\x3f\x15\x17\xc6\x24\x70\xf4\xb7\x3a\x9d\x9a\x75\xe4\x34\x3b\xd0\xf4\x28\xd0\x12\x6d\xd9\x9d\xca\x90\xca\xb2\x9b\x64\x8f\xed\x06\xac\xc0\xd5\x8f\x0c\x2a\xe1\xe6\x30\xaa\x74\x9f\xa2\xbb\xeb\xba\xa5\xfa\x6f\xff\xfd\xdb\x62\x03\xef\x8c\xf7\xc8\xb7\x38\xdb\x87\xc7\xfe\x1d\xcd\x62\x38\x2f\x18\xe9\x21\x62\x4f\xc6\x83\xaa\x3a\xd1\x71\x1b\x4d\xa6\x5b\x5e\xa2\xbd\x66\x3d\xda\xaa\x4f\x87\x53\x5b\x8b\xb1\x43\x13\x06\x73\xd7\x2a\x6e\x27\x65\x7a\x35\xa5\x56\x1a\xe8\x78\xcc\xa9\x04\x1a\xbd\xbe\x7d\x6e\xbf\x1f\x89\x53\xbe\x8b\x22\x35\xdd\x25\x7b\xa6\xd8\xee\x69\xd3\x4a\x17\x22\x9e\xee\xfe\x29\x06\x9d\xc6\x7b\xa0\x6e\x93\x00\x9a\x9c\x05\x4b\xd5\x60\x6c\xb6\x57\x11\xfe\xa2\xf4\x5b\x98\xe9\x5f\xb4\xba\xf9\x98\x50\x63\xaf\x34\xf2\x77\x8c\xb0\xd5\xb9\xca\xb3\x67\xd2\x65\xbf\x79\x3f\x18\x5a\xc2\x61\x49\x05\x61\x8e\x9a\x66\xda\xb8\x80\x2f\x24\x10\x39\x94\x91\xc4\xb2\x6e\x0a\x52\xe2\x6e\xbc\x87\xf3\x95\xc4\x7f\xf9\xb0\xc2\x18\xcb\x21\xce\x50\xbb\xe3\x72\x2f\xfb\x85\x8e\x75\x1b\xd9\xce\x0e\x47\xc4\x7c\x29\xc4\xd5\x0c\x14\x00\x9a\x53\x3e\x91\x3b\x44\xfd\x23\x74\x3c\xbc\xe7\xce\x87\x08\x52\x2b\x5d\xe2\xc9\x1b\x3f\xa3\xfc\x07\x02\x7e\x16\x03\x16\x89\x0f\x58\x9b\x3e\xc9\x03\x8c\x99\x6f\x92\xf0\x4a\x68\xe4\x56\xf5\xfe\x70\xf2\x4b\xb2\xcf\x09\x1a\x95\x70\xda\x31\xb7\x4e\x8b\x76\x94\xf6\xbe\x8d\x8b\xdb\xb0\x16\x58\xc3\x6c\x79\xf7\x43\x23\xf9\x74\x3b\x5b\x5f\xbe\xd0\xf1\xeb\x11\x0f\x50\x30\xaa\xed\x5d\x4c\x16\x2c\xc3\x6c\x5c\x86\x49\x35\xa9\x73\x31\xaa\xb4\xe3\x25\x78\x18\xc8\x82\x96\xfe\x1b\x7b\x62\x3a\x81\xef\x7d\xa1\x90\x42\xeb\x2c\x26\x48\xa3\xb0\x3e\xee\x1f\x8f\x55\xf9\x14\x96\x16\xf8\x0f\x96\x7d\x51\x36\xac\x86\xec\x25\x5e\x0a\x30\xaf\x05\x3d\x95\x29\x6c\x26\xef\x81\x10\xe1\x29\xd6\x47\x55\xd0\x42\xca\x43\x24\x76\xa0\x0f\xc6\xd0\xef\x16\x95\x14\xf8\xdd\xcb\xf5\xf0\x3c\x6b\x95\xf1\x4d\xe2\x22\xf7\xc1\x4c\xe9\x39\x67\xf1\x64\x32\x38\x38\xd7\x89\x4e\xfb\xac\xb8\x1d\xc8\xef\xad\xe0\xf6\x28\xa6\x08\x82\x63\x0a\x10\x49\x4d\xaa\xee\xcd\x5b\x9f\x77\x93\xc4\x7a\xfc\x3f\xb9\x66\x05\x15\x62\xff\x01\xbe\x9d\xc8\x21\x37\x39\xa3\xd6\x3f\x33\x78\x7c\x4b\xa6\x97\xfe\x2d\x6c\x7e\xe5\x7e\xf4\x8a\xe5\x84\x37\xed\x09\x56\x5a\xca\x41\x47\x10\x7e\x80\x14\x3f\x60\xbf\xeb\xa6\x5c\xe2\x0f\xa1\x71\x6c\x78\xeb\x74\x8b\x81\xf7\x93\x41\xdf\xb7\x73\x36\x6c\xea\xee\x27\x1a\x86\x58\x5f\xcf\x2b\xa6\x1d\x0a\x22\x05\x53\xf9\xf7\x5e\x34\x99\x38\x65\x9e\x7e\xd3\x2e\xc2\x69\xfa\xd1\x70\x23\x05\x49\x35\xd4\xb9\xb8\x97\xfa\x42\x62\x70\x35\xe3\xfc\xd1\xeb\x29\x27\x55\x1f\x59\x00\xa8\x3f\xbb\x26\xda\x59\xdf\xf2\xb9\xcf\x7c\x17\x06\xba\x41\x19\x2b\x55\xe0\xcf\x28\x2c\xfa\x39\x4d\x2c\xf9\x5b\x0f\xc5\xe8\x1a\x1b\x73\xc9\xa6\xb5\xf3\xac\xcb\xbc\x96\xca\xc8\xaf\x05\x36\xfe\x04\x67\xd4\x66\xef\x33\x38\xeb\x86\xf3\xb1\x35\xcd\x99\xb0\x59\x48\x9f\xfa\x9d\x12\xa5\x15\x54\x75\xb6\x17\xce\xf0\x26\x7f\x0b\x4a\xdd\x71\xd2\x71\xbf\x68\x4e\x3c\xf9\x6a\x80\x1d\xd8\xfc\xf0\xe3\x65\x3d\xae\xdf\x2f\xd2\x40\x6d\xb3\x50\xa0\xce\x15\xd8\x5a\xa3\x40\x92\x61\xea\x97\xf8\xe8\x52\x41\x6d\xfc\x31\x9f\x1a\xb2\x97\x8f\x1d\x8e\x3a\x6b\xf4\x57\x1e\xcb\x18\xce\x58\xed\xfc\xa7\x78\xb9\x17\x42\xc8\x3a\x11\x85\xea\xb4\x1f\xec\xaa\xc5\x6f\x8f\x85\xed\x09\x16\xbf\x62\x4d\x31\x39\x28\xb2\x0d\x4d\xc0\x60\x1c\x10\x66\x46\x93\xed\xf8\xf1\x13\x08\x24\x60\x9a\x53\xd6\x31\x1a\x5b\x78\x1f\x2f\x85\xa9\xed\x6a\x70\xec\x9a\xf0\xb3\x04\x9a\xfe\x3e\x73\x4d\x08\xda\x49\x81\xf5\x95\xa8\xfa\xe5\x14\x83\x1b\x3c\x9d\x38\xee\xde\x66\x81\xba\x17\x84\x5a\xab\x6d\xbb\xe6\x15\xe3\xd4\x91\x83\xbb\x67\x9d\x7d\xb4\x94\xc2\xe7\x1c\xf8\x59\x50\xb4\xc2\xca\x49\xf2\xd4\x19\xeb\x65\x4a\xca\xef\xdf\x7a\x03\x96\xa1\x5b\x56\x85\x46\x02\x1d\xd2\x32\x24\xe2\x80\xa8\x5b\xb6\x77\x36\x83\xa8\xc4\x2e\x77\x15\x29\x8c\x89\xdf\x74\xdc\xe8\xa6\xbf\x68\xee\xd5\x8f\xd9\xf4\x14\x55\xef\x60\x17\xb7\xe0\x55\xe6\xbc\x16\x46\x1d\xc7\xd2\x7e\x73\xa3\x0b\x50\xbe\x24\x0a\xac\x5d\x1d\x3b\xe4\x29\xdf\x9f\x6b\x15\x4c\x65\xf4\xde\xa4\x4c\xf7\x9f\xa8\x64\xe2\x21\x31\x71\x2e\x6d\xb7\x76\xbe\x41\xdb\x2e\x3a\x77\xcf\x54\x3e\x88\x30\x1c\xab\xcb\x3c\x0e\x01\x66\x13\xc7\xd8\x88\xdd\xf1\xf4\xe1\x37\x8a\xfa\x80\xfb\x02\x61\x18\xc7\x2f\xda\xda\x9b\x57\xda\x1b\x8a\x0b\xf8\x07\xfe\xad\xb5\x54\xb8\x5d\xc1\xad\x87\x75\x62\x5c\xf5\x42\x31\x9c\xd6\x38\x32\xa3\x54\x81\x83\x3d\x34\xb4\x47\xb6\x48\x3a\x89\xc1\x50\x04\xd6\xd0\xb9\x62\x47\x86\x28\x6b\xa7\x67\x69\x61\x53\x95\x12\xa2\x26\xa5\x50\x8a\xb8\xf0\x21\xe8\x5b\xce\x00\xc5\xed\x9d\xf6\x12\x66\xda\x30\x05\xe1\x5f\xfa\xab\x57\xbc\x9d\x34\x8e\xc5\x48\xfa\x16\x65\xa2\x0f\x49\xd5\x45\x7f\x86\x3a\x33\x10\x73\xe1\x79\xc2\xef\xfb\xf8\x2e\x84\x0a\xe1\x09\x35\x40\x3a\xdc\x62\x35\xe7\xaa\x42\x55\xf6\x49\x3d\x12\xc5\xbf\x7b\x3b\x0c\xda\x16\x72\xe9\xdb\xbd\xed\x4f\xa8\xeb\xd1\x37\xea\x59\x39\x1d\x03\x4c\x82\x3a\xd9\x06\x29\x78\x2a\xa0\xb2\x19\xd9\x10\x3c\x79\x08\xe9\x48\x9c\x36\xe8\x52\xf1\xdf\x57\x5f\xa5\xa1\x6f\x53\x73\xc7\x39\x48\x8c\x82\xf0\x34\x32\x91\x5f\x5d\xdc\x00\x14\x5a\x63\xdd\xc1\x56\x28\x68\x26\x01\x7a\x74\xe0\xf5\xb8\xdc\xcf\x7f\xec\x5f\xa9\xc7\x42\xe2\x62\xf6\x2b\xf2\x64\xe2\x83\xda\x7d\xcf\x8c\xe0\xd4\xf6\x4d\x38\xee\x6f\x25\x4f\xba\xbc\xa2\x17\x6c\xd9\xb1\xa8\x8c\xe1\xbc\x3a\x20\xc8\xab\x5e\x6c\xb0\xd2\x59\x02\xc8\xcf\x7b\x14\xa9\x48\xe2\x5c\xd6\x5c\x70\xfe\xd6\xfb\x0b\x0c\xc2\x6d\x81\x03\xb6\x56\xfb\x53\xfa\x09\x76\x5c\x36\xb2\xfb\xc1\x58\x85\xe7\x68\x91\xf9\xc6\x03\x1e\x74\x0b\xb5\x91\x60\xea\xc9\xdb\x72\x02\xd2\x9b\x7c\x37\xfe\x81\xda\xa6\x79\x19\xc5\x1d\xbb\x98\xc2\x81\x88\xee\xde\xec\x26\x69\x3d\x69\xeb\xc1\xe7\xa0\xdc\xbc\xe4\xad\xe9\x34\x55\xd6\xc4\xd8\x2f\x05\xb3\x2a\x89\x90\x2b\x71\x6c\x5a\xa0\xde\x62\x2d\x30\xdc\x1c\x0f\x67\x5c\xb6\x90\x11\xcb\x70\x22\x86\xb3\x7c\x99\xf9\xe9\xca\x38\x53\x20\x7d\x5d\xb4\x32\xe2\x37\x7a\xdd\xa2\x51\x81\xc7\xdd\x07\xba\x5b\xdc\xbe\xe3\x32\xab\x38\x1a\xaa\xf5\x4f\x0e\xb8\x5f\x12\xfc\x68\x38\xd6\xca\xdd\xc8\x32\x75\x53\x58\xf1\xb0\x92\x2f\x23\x1a\x0c\x5d\xdd\x69\xd2\x16\x16\xc2\x83\x34\xf0\x40\xe5\x33\x10\xf8\xee\x07\x62\xe5\x81\xea\x0a\x45\x76\x81\xb1\x8c\xfe\x90\x5b\x94\x7d\x06\x9c\x86\x31\x1d\xed\xbb\x55\x59\x52\x89\x20\x98\x5b\x8b\x32\x02\x0f\x22\xa3\xf2\xb1\xf1\x41\xc7\x4e\xf5\xa8\x35\xe7\x30\xfe\x8b\xdd\x6c\x9a\x5a\xbd\x67\x24\xe7\x26\xb2\x4e\xe4\x2f\x33\xa5\x95\x09\x38\xa1\xf1\xf3\xfc\x7d\xb3\xdf\xa7\xa3\x1f\xbc\x86\x3e\x93\x3c\x5c\xfd\xb7\x77\x87\x63\xfe\x16\xa4\xbd\xa2\x61\x5a\x94\xec\x26\xd1\x78\x74\x87\x02\xb9\xe7\x3b\xcf\xbd\x2d\x46\xcf\x81\x39\xcd\xc5\x47\xc0\x60\x8e\xed\xa0\x90\xb9\x47\xa3\xd0\xa6\x10\x13\x4f\x59\x03\x67\x0b\x56\x5b\x7b\x36\xd1\x50\x2d\xb2\xa7\xb4\xb9\x24\x7e\x6d\x20\x0b\x27\xec\xe9\x2a\xdf\xb0\x0b\xa7\x93\xac\x20\xd5\x57\x3d\x33\x2e\x8b\xd4\x0c\xc7\x90\xa1\x98\x46\x53\x57\x37\xe9\x20\x81\xd2\x1f\x6d\xb6\x9a\x4b\x79\x90\x14\xe0\xe9\xa7\x8f\x14\x13\x4e\xae\x30\xf1\xf1\xff\x04\x2f\x95\x85\x1f\x6d\x0f\xee\x54\x20\x18\x90\x5e\x26\xf6\x74\x39\x0f\xa6\x7f\x4d\x26\x4f\xa3\x6e\x5f\xb2\x51\x5b\xc5\xf3\x33\x4d\xc9\x54\xd2\x20\x53\x48\x1a\x3e\x65\x9a\xe3\x1c\x61\xa2\xdf\x52\xc3\x6c\x9c\xec\xef\x4e\xb8\x9e\xe3\x11\xc7\x1e\x77\x00\xa6\xaf\xab\xec\x66\x04\xc6\x61\x05\xe1\xe6\x04\x91\xa3\xfc\xaf\x2a\xaf\x05\xca\xa1\x53\x87\x81\xeb\x20\xce\x6f\x27\xcf\x3f\x7f\xdb\x49\x9c\xb3\xc6\x32\x4a\xa3\xef\x9e\xe8\x74\x9f\xdd\x40\x4a\x33\xd5\xd4\xc7\x1b\xc6\x6d\x0a\xa8\xcf\x1b\xe7\xcb\x6b\x57\xb4\x8a\xc1\x3f\xc3\x90\xdb\x25\xb4\x36\xd4\x4a\x79\x27\xf3\xbd\xec\x43\xf2\xf7\xd5\x35\x45\xdb\xc3\x21\xda\x43\x5f\xd8\xf2\x38\x1f\x7d\x3b\x2b\x2a\x7b\xba\x2a\x99\x20\x9e\x4f\x6e\x7f\x61\x2c\x06\x67\x14\xcb\xf6\x61\xba\xcd\xd6\x2a\xa8\x8c\x21\x03\x62\x92\x25\x32\x5d\x29\x76\x89\x84\xbd\x10\xe7\xf6\x02\x15\x0a\xc4\x28\x30\x2d\x58\xfd\x80\x02\xf5\xc1\x46\xf0\xfe\x94\xfd\x26\xd9\xf7\x0c\xee\xb1\x27\x8d\x7d\xa5\xb3\x99\x77\x32\x1e\x8a\x9c\x57\x4a\x10\xb7\x7a\xeb\x80\xeb\xff\xe9\xff\xd3\x5f\xe7\xb7\xe3\x86\x02\x2b\x2b\x6c\xc4\x23\xa6\x55\x44\xaf\x78\x16\x44\xa3\xa9\xb7\x30\x03\x18\x94\x0d\xba\xc9\x2c\xd1\xca\xb4\xce\xc1\x50\x54\x2d\xd1\xef\xaa\x05\x5e\x7b\xa7\x2c\xe7\xbe\x29\x61\xfd\x46\x6a\x2e\xd0\x71\x4d\x7a\x84\x5b\xd2\x71\xa6\x16\x76\x76\x93\x06\xbe\x3c\x36\xc4\xb5\xdd\xef\xa8\xa5\xe0\x31\x20\x86\x40\x92\x22\x66\x53\x62\xea\x94\xda\x6c\x5c\x96\x35\x43\xb6\x8d\x00\xae\x95\x00\xae\x8d\x00\xd6\xdf\xb8\x2c\xd6\x41\x7a\xd0\x41\xe6\x7a\xdd\xf2\xed\x8e\xb4\x70\xb4\x5c\x05\x3e\xf1\xbd\xf5\xbc\xd6\x64\xde\x79\x43\x99\x6e\x24\x7a\xc1\x84\xd3\x76\x3f\x36\x3d\x33\xdb\x0b\x79\x92\xe5\x2e\x62\xde\xe4\x8f\x5d\xe4\xbd\xd1\xf8\x33\xc7\xea\xa7\x2e\x3d\xdc\x90\x45\xe9\xdf\x58\x66\xca\xdd\x7c\x67\xb6\x3b\x77\xfc\x59\x50\x2e\xec\x15\x2b\xbf\x3c\xd8\xd4\x99\x6b\xba\x11\x7f\x27\x10\x5e\xeb\xf8\xec\x59\x39\x2d\x24\x76\x20\xf7\x3b\x56\x5a\xe1\xaf\xf9\x87\xe2\x52\x35\xe1\x40\x15\xc3\x8f\x17\x0c\xaa\xe7\x1c\x06\x94\xfc\xd3\x0f\xab\xfd\x63\x13\x9b\xf6\x77\x7c\xe5\xb2\x65\xc0\xd6\x83\x41\x7e\xd9\x26\x07\xfd\xb2\x5d\xe8\x9d\xd9\xea\x45\x79\x73\x5d\xa4\xb9\xb0\xbd\x7b\xd7\xd7\xc8\x9c\x2b\xb4\x3f\x22\xbd\x16\xa2\xce\x39\x3b\xd6\x86\xba\xfd\x65\x09\x40\x86\xd9\xe9\x06\x65\x36\x66\x8c\xce\xba\x82\x36\x32\x75\xaf\xab\x18\x71\xd1\x42\x09\x1c\xd7\x07\x60\x20\x78\x50\x17\xd1\xaf\x03\x34\xae\x1f\x52\xc2\x48\x8a\x49\xdf\x2d\x4e\xb5\xce\xdd\xb3\xd6\x6e\xda\x97\x23\x83\x66\xba\x05\x63\x34\xd4\xab\x80\x72\x6b\x55\x38\x46\x95\x96\xec\xd3\x08\x68\x77\x4f\x42\x95\x80\x35\xfd\x28\xc0\x96\x4f\x72\x74\x73\xe6\xf8\xa5\x48\x31\xa5\x48\x38\xc0\x73\xbb\x11\xfe\x1c\x72\x41\xe8\x06\xa1\xb8\x44\xd1\xa0\x66\xd6\xf8\x33\x46\x53\xec\x9e\x91\x61\x15\xd0\x7b\xca\x23\x5e\x1d\x8e\xa9\xb1\xfa\xd7\x11\xf9\x9d\xfe\x4a\xa9\x6d\x3c\x7d\x7c\xee\x91\x5b\xc5\xf6\xc5\x4d\x59\x96\xa9\x52\x71\x40\x80\xa7\x17\x99\x6f\xde\x13\xdc\xf8\x87\x02\x4c\xae\x71\xb0\xfc\x17\x73\xf8\xe3\xc3\x24\x92\x8d\x7f\xb6\xeb\xd8\x2c\xb0\x13\x45\x12\x45\x5f\x2b\x7e\xfb\x43\xf7\x82\xcc\x94\x3a\xb7\xc6\xa9\xed\xfb\x9e\x25\xb3\x25\x45\xfe\x5b\xd5\x5d\x5b\xd2\xba\x65\xe3\xf2\x27\x72\x63\xbb\xff\x08\x54\x61\xcb\x3c\x8d\x6c\x3a\x42\x5f\x55\x09\x8a\xfb\xad\x09\x9e\x3e\x3c\x54\xac\xd9\x76\x7a\x3e\xc6\x7b\xc1\xc9\x0f\xe5\xf9\xc3\x56\x7e\xde\x76\xe3\x61\x01\xdb\x7d\x52\x60\x83\xd3\xac\x57\x07\x78\x59\xf9\x92\x30\xdc\x4f\x50\xeb\x73\x8d\x88\xb8\x83\x27\x58\x28\x91\xe0\xf6\x39\xc2\x94\x50\x6d\x00\x78\xac\xd9\x78\xac\xe9\xf8\x01\x64\x01\x15\x60\xff\x52\x9b\xee\xce\x58\x66\xc6\x7a\xff\xaa\x52\xe7\x2e\x51\x3c\x20\x76\xc3\x51\xdc\x7d\xe8\x1e\x34\x69\x37\x50\x56\x6a\x7f\x14\xd7\xf1\x9b\x12\x74\x42\xf0\xc5\x8a\x51\xe2\xe0\x29\x87\x63\x12\xae\x0e\x38\x06\x47\xc7\xd4\xd7\x52\x47\x53\x5d\x6d\x09\xd6\xe3\x38\xae\xc3\xf2\x7b\xbc\xd8\x76\xfa\x03\x84\xc4\x88\x55\x21\x08\x45\x17\xba\x12\x1f\xa8\xc4\x86\xb2\x2f\x3a\x7e\x48\x8a\xda\x39\x16\xac\x2e\x94\xf4\xe9\x17\xd0\x39\x0c\xc9\x53\x8c\x65\x34\x76\x86\x4a\x5b\xee\x60\xc2\x94\x35\x7f\x39\xc7\x16\x5b\xb5\x27\xa1\x0a\x6b\xe4\x9b\xe7\x5d\x9c\xc5\xdc\xf6\x22\xb9\x0b\x5d\x7b\xfd\xe0\x4f\x6c\x19\xb4\xc1\xe7\xb0\x53\xcb\x7a\xa2\x10\xc6\xab\x82\x57\xb8\xa0\x49\x9b\x24\xe9\x62\x36\xd9\x06\x12\x18\xbf\x21\x54\xa1\x3b\xd1\xdf\x89\x6c\x30\x49\x03\x22\xdf\xc0\x2a\x0c\x31\x98\x54\x43\xb9\x25\x7f\xf8\x0b\xc8\x7c\xbf\x0c\xc4\x35\x7d\x2e\x4e\x9d\xce\xd2\x13\xb9\x5d\x48\xd3\xfe\xb7\x2d\x80\x30\x08\x4e\xe8\x1a\x53\xba\x19\x1f\xba\xff\xad\xc7\xd7\x09\xd1\x59\x6d\x5d\xd7\xc1\xea\x40\xe0\x0a\x4f\xcb\xa8\x79\xf7\xd5\xea\x66\x82\x95\x97\x61\x13\x57\x44\xda\x7b\xe6\x56\x39\x59\x7b\x87\xd7\x53\x98\x3a\xcd\xfd\x55\xae\xde\x12\xb9\xe8\x5c\x63\xc9\x59\xc1\x8d\x72\x70\x3a\xd9\xd0\x6e\x9e\x80\x14\xc7\xce\x76\xb3\xa2\x34\x37\xb9\x68\x51\x18\x41\x96\x38\x62\xde\xfa\x81\x9a\xac\xd5\x54\xb1\x76\x1f\x84\x7e\xf5\x2c\x5a\xb6\xd4\xe1\x14\x94\x15\x5a\xff\xa6\xb6\x3c\x75\x79\x74\xc6\x1a\x03\x76\x33\x69\xdd\x54\x69\x8b\x0d\x4e\x19\x3b\x0f\x80\x79\x65\xa3\x04\xb5\x12\xd7\x36\x59\xc0\x9b\xf0\xfc\x4c\xc7\xb3\x5a\xaf\xed\x69\x8d\x06\xbc\x0b\x59\x27\xf9\x21\x41\x9b\xed\x93\x10\x05\x98\x81\x2b\x46\x95\x86\x0f\xcc\x38\x5f\x5d\x5f\xbe\xae\xeb\x5a\x8c\xdf\xef\x9a\xec\x29\x85\xa9\x1c\x10\x74\xc1\xd9\x47\xf5\x06\x4d\x0a\x4e\xc6\x75\x6d\xcc\xf0\xfd\xe8\xcc\x6f\x38\x01\x02\xc6\xdc\x98\xe2\xf5\xb0\x26\x7f\xe9\x91\x6a\x1b\x9c\xd5\xc8\xd8\x94\x30\x4e\x9e\x05\xf7\x3c\x2d\x55\x08\xfd\x7d\x9b\x6b\x7e\x19\xc8\xc4\x1f\x08\x7a\xb2\x8d\xad\xf0\x11\x30\xf5\x2e\x6f\xe1\xda\x1c\xe7\xc5\xca\xdd\xb4\x05\x5c\x9d\xb3\x93\x85\xa6\x3b\x00\xfa\x3e\x59\x9c\x48\xf9\xdd\xe5\x5a\x22\x25\x0c\x5c\x74\x6a\x5a\xe3\x2b\x05\x7e\xb8\x1e\x30\xbe\x47\x5f\xba\x03\x40\x6a\x6b\xc1\x5b\x35\xc4\x3f\xe8\xe9\xb2\x32\x4f\x03\x95\x55\x5a\x1f\xfa\x3b\x41\xed\x7b\x5f\x6a\xa9\x0d\xbe\x0d\x6c\xaa\x7f\x5a\x34\x0d\x65\x2e\xc6\x5c\x73\xb4\x1c\x53\x11\x9c\xd2\x13\xc1\x5e\x50\x0d\x0f\x77\x02\x25\xb7\x05\xb0\x79\x55\x76\xe0\xf5\x87\x02\x05\xf6\x02\xe7\x94\xda\x05\xe4\x00\x2d\x15\xaf\x1f\xb5\x2e\xb9\x6b\xba\xd1\xee\x64\x1d\x89\x47\xb7\x01\x21\x0b\x00\x6a\x39\x41\x6b\x02\xf1\xbe\x7f\xc5\x22\x1a\x2f\x41\x64\x46\x62\xb4\x54\xc4\x79\x8e\x9e\xea\xec\x6d\x85\x28\x9e\x02\xc5\x68\xc9\xc5\x89\xfe\xfc\x03\x00\xf5\x26\x81\x91\x05\x2d\xe2\xf8\x40\x7f\xe6\x44\xe2\x18\xfd\x23\x51\x7c\xe9\xb3\x6e\xf4\x53\x1a\x99\xdf\x74\x7f\x09\x9c\x56\x55\x01\x36\x1e\xa6\x9a\x1e\x06\xf8\x63\x63\x24\x0b\xdc\x64\x6a\xdb\x68\x44\xa2\xd1\x44\x3b\xe7\x9f\xc0\xbe\xad\xc4\x92\x46\xa1\xce\xec\x16\x3a\xbd\xb8\x72\x8c\x9f\x5a\x3f\x19\x92\x9e\xbf\x91\xf4\x8a\x96\x49\x78\x63\x1b\xf2\xfd\x9d\x45\x56\x45\xa9\x5f\x63\xa8\xdb\x45\x48\x9d\x48\x26\x22\x4e\x7f\x7e\x74\x81\x57\x10\x0a\x45\x85\x84\xed\x53\xd6\xd7\x83\x89\xf5\x2c\xd5\xf4\x89\xfe\x8a\x13\x4b\x72\x1b\xf9\x15\xe7\x90\xa3\xa8\x8d\x89\x4b\x16\xb5\x73\xa4\x13\x43\x72\x09\xde\x05\x53\x40\x6e\x46\x17\xa2\xfe\x2c\xb0\x28\x25\xd1\x53\x9d\xa9\xf3\x18\xa5\x0e\x98\x5a\x66\x88\x89\x84\xf6\xda\xf3\x35\xbe\x84\x7a\xb1\xa6\x24\x28\x52\xf3\x1b\xb1\x8b\xbd\xd9\xc5\x78\x56\xee\xf4\xfb\x99\xde\x4a\x07\x49\xcc\x32\xe2\x07\xda\x24\x0d\xa0\x3f\xd0\x98\xc6\x2d\x3c\x11\x93\xcf\x51\x1e\xac\xec\xce\x5d\xd8\x7c\x05\x94\x40\x3a\x05\x87\xde\x6e\x74\x69\xd1\x99\x65\x57\x2f\x50\x19\x2e\xd6\x96\x4a\x53\x5c\xca\xc3\x88\x20\x89\x6c\xf6\x6c\x5a\xc8\xc7\x23\xe7\xd4\xfb\x07\xe8\x31\x6f\xde\x65\x66\x96\x8e\x88\x4c\x77\xf4\xa7\x89\xaa\x31\xba\x0a\x54\x94\xff\x1f\x49\x67\xad\x24\xbd\x0e\x05\xe1\x07\x72\x20\x33\x84\x63\x98\x31\x33\x67\x66\x66\xf6\xd3\xdf\xda\xff\x46\x5b\x9b\xb8\x64\xa9\x75\xfa\x6b\xd5\x19\xcb\x14\x4f\x74\xe4\xce\xd1\xa8\x2f\xe3\x95\x23\x18\x3f\x4b\x6a\x2e\xef\xd4\x52\xbe\x96\x63\x3e\x98\x94\x81\xd7\xc0\xca\x03\x53\xd9\x03\x44\xd4\x8e\x3e\x9a\x7a\xb1\xc6\xb8\x90\xfb\x8a\xe3\x89\xfe\x7b\xf3\xad\x34\x7e\x04\xf1\x90\x0c\xa5\x2a\xeb\x4a\x21\xc3\x35\x59\xf2\x54\x53\x3d\x2e\x3c\x7e\x1f\x7b\x29\x82\x82\x68\x13\x09\xc6\x81\x77\xb5\x5e\x51\xd8\xe3\x1a\x38\x83\x4a\x63\x74\x77\x0c\x1c\xe2\xf7\x4b\xd3\x14\x45\x5d\x44\x06\xe5\x85\x79\x64\xca\xc4\x08\x13\x54\x6b\xc8\xc1\x20\xa9\x3e\x80\x78\x0b\x74\x31\xf7\x7d\x3f\x40\xf6\x54\x61\x1d\xe7\x81\xfc\xdf\x44\xfd\xe0\x55\x74\x17\xe4\xb3\x0f\x61\x7b\xa9\xe3\x3d\xca\x88\x3e\xc6\x47\x28\xf2\x76\x2d\x60\x85\xf9\xdd\x04\x77\x7e\xed\x5d\x9f\x74\xd1\x1c\xde\xa9\xcf\xa8\x86\x53\x3c\x32\x37\xa8\x72\xa7\x47\xa4\xf2\x23\x81\x27\xf9\x91\xb8\x33\x3d\x54\x7a\x7b\xc3\x52\xbe\x1d\x67\xff\x70\x88\xd2\x27\x7d\x92\x88\xb6\x50\xfd\x72\x8b\x69\x43\x06\xba\x9e\xa3\xb9\xe7\x79\x51\x33\xd2\x3a\xb9\x33\x0d\x81\x7c\x30\x5e\x0a\x27\x8b\xd5\xf9\x2b\x0d\x7f\x60\x40\x2d\xcf\x16\xf1\x5c\x32\xf1\x22\xfc\x74\x8e\x33\xfb\xdc\x82\x04\x0b\xfa\x4e\x0a\x07\xef\xaa\x4d\xdb\xe6\x00\x93\xb9\xb1\xfb\xc3\x4e\x9e\xd1\x5c\x69\x6a\x3d\x32\x10\xc1\xf7\x51\x61\x88\x2f\x53\x84\x7e\xe8\xc3\xb9\x58\x33\x60\x27\xc6\x0d\xfa\x34\x5e\xe9\xee\x5f\x65\xd6\xd9\x4a\x93\x93\xa0\x9f\x93\xa0\x27\x08\x04\x60\xb3\x33\x7b\xaf\x46\x25\x36\xae\xb1\x67\x79\xa0\xbe\x49\xc4\xc1\xbf\x7b\xbe\x10\x3f\x0e\x45\x9c\x1e\x22\xb8\x78\xf9\x98\x57\xf6\x70\xdd\x7e\x36\x52\x48\x30\x2e\xf0\x2c\x13\x43\xe7\x2f\x75\xe7\x0e\x2b\x0c\x8e\x20\x61\x72\x15\x20\xb7\x52\xeb\xaa\x22\x14\x44\x75\x6a\xdd\xed\x32\x03\xfc\x18\x4b\x18\xd9\xef\x58\xe7\x1f\x66\xbe\x1d\xc7\xbe\xb5\x22\xfe\xb1\x59\xa0\x48\x8e\x1b\x1d\xa8\x19\x2c\xee\x77\x22\x65\x5a\xb8\x2d\x41\xf2\xd5\x8b\x59\xfd\x30\xbc\x61\x26\xde\xde\xec\x08\x8d\x73\xe0\x4f\x31\xe5\xe3\x16\xed\x6a\x25\x40\xcf\xf5\x9d\x51\x4a\x5a\xe9\xef\x9f\x1f\xfd\x71\x54\x6f\x12\x67\x95\xd3\x10\x4d\x5f\x7e\x46\x92\x16\xb1\x52\x3b\x84\xbd\x25\xda\xfa\xab\x3e\x82\xc7\x49\x54\x17\x3f\x31\xa2\x54\x29\x29\x85\x4a\x67\xdb\x25\x8a\x5a\xdf\xb7\x69\xfb\xd7\xfe\x0d\x8d\x83\xa6\x1f\x15\xd6\xed\x5c\x6c\x98\x05\x29\x76\x30\xb4\xf1\x36\x6b\xa3\x8b\xa6\xe6\x42\xc6\x73\xb1\xad\x9e\x92\xe7\x59\x01\xc1\x7d\x8f\xe0\xcb\xd3\x0f\xf3\xdf\xe4\x21\x00\xdc\xef\x1d\xaa\xfd\x05\x3d\xb6\xd4\x34\x8b\x3c\xbc\xf1\x4e\xbc\x14\x85\x19\x55\x25\x6b\x27\xe7\xe2\xcc\x95\x95\xc0\x14\xbf\x26\xde\xe0\x57\x9c\x97\x65\x10\x3e\x84\x36\x36\xd4\xe1\xda\x13\x5c\x80\x02\x6c\xb9\x34\x50\xd7\xb6\x6d\x67\x09\xa5\x31\x86\x8a\xb0\x86\xec\xa1\xcf\x14\x4b\xef\x1c\xa1\xef\xbc\xfa\x63\x98\xe8\x61\x8a\xed\x4a\xec\x18\x66\xfb\x2f\xb1\xd5\xdd\xb6\x59\xcc\x03\xbe\xed\x8d\x92\x80\x71\x93\x7b\x3f\x13\xe0\x02\x06\x7d\xf7\x87\x10\xaa\x77\x40\x1a\x24\x35\xb1\x16\x80\x12\xa3\x98\x21\x34\x73\x02\x97\x85\x27\xcd\xb2\xfd\xd8\xb7\x87\x84\x02\x73\xa1\x82\x7e\x77\x69\x08\xa7\x28\xf2\x4e\x8f\x87\xc8\xd5\x73\xb3\x24\xf7\xa8\xbc\xef\x0d\xdd\x76\x80\x61\x58\x2f\x46\xc1\xb7\xfe\xdb\x63\xe4\xb2\x2e\x39\x43\x40\x05\x63\xfe\x5a\xcf\xd8\x38\x59\x3c\x3a\x16\x12\x0b\xb5\xcd\xa9\xda\x2b\x55\x1f\x42\xb2\xc1\x6f\x23\xd4\xa4\x03\xec\x04\x8e\xcf\xdc\x6b\x55\xab\xd2\x27\x60\x25\xc1\x4f\xd6\x03\x69\x28\xec\x27\x84\xd3\x8a\x2d\x9c\x4c\x1d\xf3\x96\xd5\x79\xd7\xb0\x49\x50\xdf\x88\x5b\xaf\x90\x6a\x5a\x2c\x27\x24\x4a\xae\xfe\x90\x55\x3e\xd5\x6a\xe5\x28\xec\x3a\xc7\x57\xfe\xf6\x29\x9c\xf1\x82\x96\x31\x3f\xed\x67\x6d\x81\x3a\x57\x68\xe9\x4e\x83\x4b\xaf\x3b\xaa\x52\xed\x19\x02\x86\x04\x06\x53\x30\xe6\xb1\xe3\x1f\x81\x86\xb4\x3c\x3c\x80\xf2\xff\xfc\x3d\x7f\xd0\xe8\x3f\x1c\x4e\xd0\xa7\x7e\x1e\x00\x43\x13\x12\xa2\xf4\x8d\xdc\xe4\x3d\x31\x85\x2b\x33\xc4\x6f\x4b\x8f\x17\x84\xa5\x3a\x9e\x71\x53\xf0\x50\x69\xf8\x9a\x2a\xb3\x12\x24\x91\x1b\x58\x11\xbf\x45\xb2\xae\x54\x3f\x60\xe7\x4d\x1c\x61\x4f\x68\x01\xd8\x61\x12\x49\x47\x6c\x7d\x74\x28\xcf\xce\xbb\x33\x6a\x2a\xdf\xb0\x55\x6e\x21\x9c\xa4\x28\x0a\x1b\xe7\x79\x3f\xef\xea\xd6\xf6\x9f\xe7\x3b\x70\x98\x05\xdd\x97\xad\x8a\xff\xfb\x45\xec\x09\x4e\xf4\x20\xdd\x50\xcd\x10\xf9\x69\x40\x2e\x07\x4d\x75\xaa\x40\xf6\x79\xbe\xe0\x96\xe0\x91\x3c\xf5\x4a\x93\x87\xa0\x1b\x3a\xc1\xde\x9e\x0c\x5a\x2e\xc9\xfb\xe2\xb7\x9d\x89\x27\x68\x1b\x22\x85\xe8\x5c\x33\x74\xfd\x21\xe9\x72\xfe\x06\x0b\x1d\xe0\x37\x11\x33\x90\x06\xbb\x2e\x44\x95\xfb\x43\xab\x27\xb3\x65\x86\xc8\x56\xf8\xe6\xc8\x27\x46\x3d\x8d\xfb\xcc\xe1\x6a\x7e\x69\x70\x97\x21\xb6\x12\xd8\xcd\xd0\x9d\xfc\xed\xf5\x2b\xb2\xb9\x8f\x1d\xac\x7d\xf3\xaf\xcf\x35\xa5\x56\x82\x98\x41\x06\x72\x53\x53\x94\xce\xe3\xa6\x10\xa2\x8f\x0c\xa8\xa0\xbf\x32\x2f\x39\x84\x2b\xb3\x10\x90\xee\x28\x91\xd3\x87\xd8\x86\xef\x0c\x37\x67\xcf\x4c\xce\xfc\xee\x9d\xa2\xfa\x5a\x23\xe8\xc0\x48\xf1\x9d\x96\xa4\x70\xb6\xc3\xce\xba\x6d\x7b\x92\x42\x87\xed\xe6\x46\x97\x3e\x7d\x5b\x27\x16\x14\xc4\x38\x06\xbb\xb9\x13\x0e\xf7\xe5\x8a\x99\xbf\xa1\x29\x43\x3e\x45\x68\x14\xd7\xfb\x05\xe0\x7c\x57\x62\x03\x2e\x19\x3a\x91\x45\x8f\x21\xbc\xab\xcf\x49\x31\x44\xb2\x81\x03\xb3\x9a\x2f\x67\x40\xb8\x04\x67\x50\x61\x42\xbc\x6b\x6f\x18\x73\xba\xb0\xf3\x5b\xc5\xd0\x0c\x91\xf7\xc5\x28\xca\x3c\x01\xb4\x67\x8c\x09\xce\x78\x95\xf6\xf1\x2e\x49\xb9\xa3\x7c\x5e\xb9\x5a\x4e\x26\x36\x14\x14\x67\x9e\x63\x1b\x25\x04\xc5\x8b\xec\x69\xb0\x33\x04\x4d\x13\x28\x86\x11\xf8\x9d\x41\xd1\xbe\x33\x70\x62\xdd\xbf\x32\x67\x08\x12\x22\xe9\xf3\x7c\x5f\x18\xc6\x7e\xb5\x67\xd4\x66\x09\xea\xd6\x82\x4d\xbe\xba\x68\xa6\x36\xab\xf1\x9d\x04\x9e\xa5\xed\x3c\x18\xcf\x1c\x88\x9d\x8c\xee\xc1\xb7\xb9\x13\x65\x60\xc1\x68\xfc\x28\xc2\xa6\x0f\x00\xbd\xa0\xbc\xee\x57\xd2\xbe\x74\x61\x9a\x6d\x30\xa0\x60\x47\x55\x9c\x59\x83\xfd\xc7\x23\x48\x1a\x30\xc7\xba\x52\xef\xc3\xf4\xfb\x19\xba\xf9\x89\x6d\x28\x86\xfd\x9e\x17\x7d\x05\xb1\x5e\xc8\x67\x3b\x95\x1a\xba\x19\x09\xd4\x3e\x80\xd7\x2f\x00\x00\xa7\xd2\xf5\x64\x18\x8c\xc1\xb3\x4c\x15\x1a\x5f\x4d\x77\x88\x3a\xa8\xa9\x3c\x4a\x88\xff\x54\x96\x1e\x1b\xef\x94\xdd\x14\x46\xbd\xed\x2e\xb6\xf7\x4d\xe0\x16\x7d\xce\xe3\x49\x31\x12\x69\xb4\x55\x79\x1c\x17\x28\x97\x8f\xf0\xfb\x5e\xd7\xef\xcf\x9b\xe8\x82\x36\x41\x8f\xa4\xde\x63\x92\xf7\xd8\xdd\x67\x1d\x21\x7b\x2c\x37\x4c\x39\x76\x8e\xa2\x91\xaa\x26\xdd\x95\x23\x71\x03\xdf\x39\x52\xd4\x78\xc6\x2d\x49\x75\x63\x93\xb6\xbc\xa2\xf1\x4f\x5b\x07\xfb\x2c\x67\x31\xc0\x3c\x6b\xd0\x03\xdc\x54\x99\x52\x56\x55\x0a\x83\x5e\x8c\x29\x0a\x06\x41\x74\x86\x21\x1d\x98\x30\xf8\x23\x76\xe0\xfa\x02\xf9\x71\x8f\x0c\xf3\xa6\xd8\x4e\x1a\x9e\xe6\xce\xdb\x0d\x53\xe0\x0c\x8f\x13\xa4\x07\xea\x98\xfd\x98\x3c\xfb\x1a\x62\x1d\x1e\x65\xa3\x1b\x53\xf4\x25\x69\x66\x2f\xd1\x91\x90\x6b\xa3\xdb\xa7\x90\xf4\x1d\x9e\xc2\x14\x39\xc8\xda\x0a\x50\x04\x27\xfd\xf5\xa0\xa0\xad\x32\x6c\x9d\xfb\x07\x67\x00\xa4\x68\x65\x79\xd2\xe2\xf0\x02\x22\x36\x6a\x70\xfc\x71\xf0\x9c\x97\x45\x33\xfc\x12\x24\x84\x68\xfa\x07\x30\x0c\x1b\x45\xcf\x82\xb2\xcc\xce\x4d\x8d\x7c\x56\xb5\xc9\x8d\x44\xcf\xb5\xa0\x30\x8e\x5f\xa8\xf7\x0f\x92\x37\xe7\x6c\x0f\x85\xfb\x9e\xe1\x79\x42\xc5\x98\x9b\xe0\x85\x78\x2a\x66\xa0\x61\x08\x64\x2d\xa7\xa0\xf0\x81\x18\x86\xa1\x52\xfa\x47\x43\x19\x0d\x18\xe4\x4e\x93\x67\x1b\x2e\x84\xcb\x0c\x51\x6c\xce\xf1\x3c\xde\x22\xed\x0a\xc1\x0e\x7d\x26\xe3\x19\x06\x01\x8c\x51\xdb\x4e\x81\xf8\x0a\x6b\x93\xb4\x49\x51\xdb\xdb\x11\xd8\x70\x9e\xd8\x0a\x83\x12\x6a\xeb\xfd\x4b\xbc\xa2\x75\xfd\x28\xea\xc4\x62\x50\xff\x78\x97\xc7\x09\x50\xa7\x29\x85\x13\x14\x31\xaf\x21\xd6\x1e\x90\x67\x75\x7f\xf9\x99\x64\xa3\xb6\xcd\xdf\xf9\x8b\xa2\xf3\xde\x71\x17\xcb\x89\x1c\x83\x71\x0e\xce\xb3\x40\x96\xa4\xf1\x0e\x7a\xd1\x95\x7a\xfb\x7e\xaf\x06\xb9\x5f\xe8\xa6\x67\x9c\xbc\x3f\xe8\x28\x50\xbf\xaf\x35\x43\x27\x00\xa5\x88\x3d\x57\x62\x92\xdc\x4c\x50\xf4\x9b\xf4\xee\xfc\x72\x79\xb7\x70\xf7\x54\x88\xc5\x01\x31\x6c\xd6\xde\xc6\xf0\xc6\xc7\x69\x86\xc3\x70\x43\x38\x85\x91\xb2\x4c\xe7\x14\x55\x9e\xf4\x1f\xb9\x40\x39\x16\x86\x63\x87\xd4\xc7\xee\x6e\xcf\x06\xc4\x8a\x14\x7d\xec\x81\x4b\x5d\xa4\x59\x87\xa6\x19\xec\xe1\x1f\x3c\x20\xcc\xf9\x73\xf2\x37\x45\x13\x8e\x01\x8e\xbf\x67\xfd\xd5\xcc\xf1\x35\xd8\xb7\xa0\x30\x07\x82\x68\xf3\x88\x46\xe8\xf4\x4f\x8a\x70\x7c\x3f\xf8\x0e\x5c\xce\xd9\x70\x29\xfe\xed\xd9\x26\x6d\x59\x34\xce\x33\x69\xc0\x15\x83\x98\xff\x38\x7b\x19\x96\xfd\x8b\xfd\x58\xd7\x63\xfe\x38\x7a\xbb\x78\x57\xfd\x7c\x11\x86\xa6\x63\x83\x2a\xc7\x17\x73\x31\x32\x51\x67\xa0\x70\x99\x7c\x87\x5a\xd8\xc8\xd6\x39\xbc\x21\xd8\x7b\xbf\xa0\x0e\x6a\xbb\x35\x2b\xb2\xf9\x4f\x54\x82\xcc\x3b\x85\x24\x2c\x46\xd4\x01\xc5\x61\x85\xe6\xcc\x98\xc6\x7a\xc2\xfd\x0b\x00\x14\x6d\x7c\x88\xb2\x93\x74\xa6\x07\x5a\x1a\xe6\x09\x60\xa6\x2c\x4b\xc0\x53\x0c\xa0\xb3\xb6\xbe\xee\x5a\xac\x24\x01\x6a\x19\x82\x20\x21\x84\x40\x90\x97\x7a\xb6\x1d\xb8\x41\x93\xdc\xfb\xe6\xa7\x38\x79\xae\x73\x07\x27\x9e\x9f\x06\x9e\xe4\x46\x33\x5a\xce\x2d\xa7\xfe\xad\x8d\xfe\x55\xd3\xd9\x7f\xdc\xcd\x9c\x5e\x66\xac\x19\x4a\x36\xba\x89\xd8\x37\xa6\xb1\xcf\x76\x14\x32\x06\x88\x0b\x92\x96\x45\x6e\xea\xe7\x88\x2d\xcb\x96\x08\x6c\xf7\xcc\x37\x2b\x7c\x71\x4a\x17\x45\x53\x14\x60\x33\x4a\x77\x94\x2a\xcb\x2c\x3f\xf7\x16\x50\x8f\x8f\x78\x23\xdc\xdc\xe5\x8d\x89\xd9\x8f\x84\x40\xb9\x15\xfb\x7e\x61\x18\x87\x4e\x41\x99\x13\x5b\xa0\x02\x68\x3e\xf6\xb3\x30\xd2\x79\x26\x8e\x64\x68\xf9\xb0\x26\xd5\xc5\xd6\xbd\xb3\x3c\x29\x89\x81\x48\x3c\x83\xf2\x71\xbc\xb0\x67\x53\x01\xa0\x0a\x70\x13\xdc\x07\x6f\xc7\xb3\x9f\xe3\xcc\xa6\x3c\x3b\x3a\x43\xc5\x3f\x3e\xe8\x43\x34\x68\x34\x32\x9f\x7c\x5c\x99\xea\xe3\x0d\x4a\x61\x29\x20\xea\xba\xef\xdc\x7d\x93\xff\xf3\x92\x89\x99\x18\x1f\xf2\x18\x19\xd5\x65\xd0\xb2\xb7\x24\xb1\x3a\x81\x83\xd2\xfb\xd2\x50\x02\x00\x09\x30\xcf\xc8\x31\xe9\x07\x31\x50\x12\xc3\xc6\x16\x4b\x56\xa0\xb7\xf8\x7d\x9c\xe7\x43\xda\xa3\xdb\x43\x94\xde\x36\x00\x7b\x24\x2b\x1a\xd1\x77\xdf\xcf\x19\x1a\xc8\x36\x33\x44\xbe\x01\x80\xe4\x0c\x4d\x12\x58\x80\x87\x2c\x9e\x64\xa1\x53\x97\xe6\x70\x82\xd7\x71\x6e\x38\x17\xa3\x1e\x3a\x8e\x14\x2d\x44\x10\x96\xa0\x92\x13\x26\x51\xfb\x1a\x1a\x59\x51\x5c\xa6\xc9\xb5\x55\x18\x7e\xac\xc5\x9e\xda\x18\xc3\x37\xbe\x01\xd0\xe7\x4e\x2c\x87\x93\x08\x06\x72\x6b\xd4\xc8\x8f\x28\x37\x66\xe7\x53\xdf\xa7\x49\x2f\x73\xb0\xa6\x7d\x0d\x83\x02\x2a\xcb\x4b\x08\xeb\x26\x95\x9e\xc3\x5b\x6d\xe5\xcd\x87\x01\x36\x28\x1d\x80\xe9\x06\x34\xad\x88\x94\xf8\xb6\x28\xe2\x97\x33\xf4\x2c\xcb\x42\xee\x00\x32\xe1\xf2\x34\x47\xf2\x47\x9b\x38\xa1\x2d\x3f\xd8\x45\x48\xc6\x68\xcc\xf2\xe4\xf9\xfb\x66\x18\x5e\x85\x82\x08\xe3\xbb\x47\x16\xa9\x12\x4b\xcb\x33\x2c\x4b\x0f\x41\x7c\xac\x64\xc0\xe9\x53\xe7\x4d\xee\xe1\x0d\x3a\x0f\xa5\xf2\x73\x25\x8f\xd5\x22\xeb\xef\x8e\xd9\xf3\x6f\x74\x87\xd6\x88\x61\xa3\x81\xa2\x7c\xfa\x49\x3f\xe6\x25\xd0\xa0\xd0\xdb\x08\x8d\xb8\x62\x94\x48\xe0\x41\x80\xb9\xdf\x9d\x61\x1e\x12\xa7\x76\xf4\xfe\xe9\x1b\xed\x8a\x38\x18\x80\x09\xb0\x99\xe9\xf7\xc3\x4c\x59\x60\x92\xaa\x5d\x33\xe0\xaf\x0c\x50\xe2\xf3\x31\x4a\xe7\x85\x5e\x3c\x73\x02\x9f\x07\x44\xbd\x9f\x71\x7f\xc8\xb3\x68\x36\x86\x41\x31\x0c\x43\x98\xf7\x8b\x00\x1a\x87\x31\x4f\x9b\xb4\x49\x05\xa6\x09\x00\xdd\xf6\xfb\x59\x40\x54\x1c\xc3\x50\x69\xce\x68\x16\x93\xf7\xde\x36\x67\x5d\x28\x8b\x6c\xf4\x55\x1f\x2d\x4c\x05\xe7\x09\xe5\x88\xbc\x9e\xed\xe9\x22\x25\x48\x93\x3a\x53\xf6\x3d\x0a\xbe\x22\x23\xa9\xf5\x55\x7c\x9b\x97\x03\xd5\xd4\xdf\x3e\x85\xb9\x8d\xa8\x81\xd4\x6e\x1e\xd7\x60\x10\xad\xe9\x5f\xee\x4e\x20\x9a\x49\xd3\xc0\x94\x9a\x63\x2c\xcb\xd2\xce\xd9\x46\xf8\xaa\x0e\xbd\xb9\xed\x0b\x20\xfc\x06\x79\x5e\x72\xaa\xab\xb6\x56\x07\x19\xb8\x8f\x30\x04\x91\x93\x14\x85\xf6\x6b\x72\x9f\x0a\xfe\x6a\xb4\xc9\x50\x05\x76\x92\xd4\x4c\x00\x68\x7c\x0f\x30\xdc\x43\x3b\xc0\x32\x95\x1f\x10\x04\xf2\x38\x1d\x31\x70\x1c\x2e\x89\x84\x7c\xbe\x63\xac\xcd\x07\x31\x0d\xb1\xc2\x37\x3a\x51\x3c\xaf\x9a\xe7\x00\xe5\x89\x22\x51\x9d\xa0\xcd\x7d\xff\x4d\xbd\xea\x3f\xb0\xa3\x39\x7b\x9d\x99\xf5\x6c\xfb\x91\x35\x13\x90\x32\x47\xdb\x11\x52\xec\x35\xad\x5f\x07\x92\xc0\xf9\x73\x50\xf2\x58\x97\x91\x79\x11\x9c\xa1\x21\xc6\xb6\x0e\x71\x7a\xd0\x3c\xdb\x02\xff\x9b\xce\x2f\x04\x30\x9e\x3c\x5e\x47\x52\xf1\x52\x25\xeb\xfe\x47\xa9\xd6\x71\x06\xab\x77\x50\xb6\xa8\x8b\xf6\x95\x1b\x91\xa5\xc2\x70\x9e\x26\xd0\xde\x9c\xe5\x18\x62\x2d\xc6\x3c\x7f\x4c\x17\xf3\xd4\x49\x97\x0b\x0e\x8f\xd7\xe9\x67\xf5\x66\xef\x82\x61\x05\x5f\x6e\x97\xaf\xd6\x45\xa7\x7b\x71\x9c\xa6\xa6\x2f\x37\x41\x8c\xad\xf8\x66\xdf\xe7\xe7\x09\x96\xcd\x82\xc0\x91\x28\xd4\x6d\x37\x44\x8e\x2a\x41\x3e\xb1\x19\x54\x1c\x1b\x03\x93\x01\xb2\xaa\x32\x08\x7d\xe1\x58\x4a\x87\xdd\x32\xfe\xe5\x1f\x26\xa9\x1e\x73\xbb\x32\xfb\xc5\x88\x63\x3f\x31\x95\xbd\xe0\x74\x41\x86\x38\x49\x50\x4a\xa7\x18\x08\xc5\xb0\x36\xf1\xfc\x20\x88\x47\x96\x29\xe9\x5c\x7c\x7e\x77\x74\x04\x27\x65\x67\xcd\xed\x0a\x47\x99\x72\x19\x13\xaa\x5f\xf2\xf9\xf3\x97\x96\x88\x76\x92\x42\x5e\x8a\xa0\x72\xbc\x7b\xe3\x30\xdd\xec\x28\xfc\xc9\xc3\x9d\x0f\x41\x99\x2c\x0b\xc9\x18\xd7\x40\x99\x03\x3f\xba\xe9\x5a\xa8\xc5\x7d\x7f\xda\x36\xd0\x45\x9e\x41\x29\x71\xc7\xca\xb3\x24\x00\xc8\x33\x61\x21\x84\xb7\x7a\x38\x49\xb9\x93\xa3\x58\x7e\x75\x74\x34\x2d\x7d\xc7\xe9\x01\x15\xa7\xd5\x72\x1f\x9d\x36\x3c\x04\xb0\x80\xe6\xa1\xcf\x5b\x0d\x23\x11\x86\x38\xdc\x3f\x7a\x87\x6a\x70\xfe\x98\xb0\xe5\x48\xfb\xa8\x7e\x9b\x79\x3f\x0e\x51\x6c\xeb\x09\x93\x4b\x68\xe6\xe5\xa7\x3b\x90\x3a\x98\x82\x8b\xb2\xd0\x0b\x7f\xd8\x09\xc1\x6f\x3f\x14\x12\x72\x9a\xf1\x79\xee\x9b\xaf\xf1\x51\x59\xfe\x7e\x2a\xae\x74\x04\xff\x93\xf8\x52\xc5\x71\x52\x24\xd5\x80\x1a\x50\x05\x0f\x83\x09\x00\x7c\x3e\x52\xd9\x17\xac\xe5\x41\xf7\x33\xf9\xe3\x36\x9c\x30\x7e\xa5\x0c\xa1\x4a\x47\x3f\x68\x46\xe7\x23\xa7\x29\x17\xa5\x37\x10\x0e\x45\x69\x3a\xbe\x48\xef\x97\x74\x39\x32\xbf\x73\xe3\x03\x98\x2c\xff\x71\x46\xf3\x02\x9a\xfe\x5e\x65\x38\x0f\xd3\x55\x04\x24\x41\xe6\x98\x09\x4c\x18\x26\xd0\x7c\x8c\xdb\x15\x3f\xd6\x8d\x12\x5f\x74\xc8\x01\x43\x08\x39\x86\xe4\xae\x27\x7c\xec\x1b\x92\x3f\xb8\x18\x0f\x86\xc8\x0f\x1c\x2a\xad\x84\xc6\x9c\x2b\x56\xbe\x72\xfc\xef\x0c\x43\xdc\x50\x2b\xe1\x25\xab\xa7\xdc\xfe\xe7\x1d\x9f\xd3\x47\x99\x33\xd5\x51\x2c\xab\xb9\xae\x2e\x93\xaf\x45\xaa\x7e\x18\x9e\x58\x31\xe6\xcc\x77\xa4\xb2\x28\x17\x6d\xd8\x34\x4f\x8d\xba\x49\x1a\x5c\x82\xa7\x39\x96\x34\xad\x53\x62\xc6\xb4\xd8\x74\x37\xf2\x9c\x65\x4d\x68\xfd\xc2\x6b\xce\xe7\x4a\x3a\x68\xf0\x71\x6a\x14\x53\x66\xa8\x3e\x0e\x24\x88\x1b\xe3\x52\x34\x2e\xe8\x26\x80\x98\xd3\xc3\x02\x0e\xbb\xe9\xee\x24\x1f\x23\xce\x7d\xd4\x7e\x4b\xe2\x3a\xf7\xa7\x6a\x38\x58\xd9\xfa\xfc\x6a\x14\x4b\x9b\x28\x2b\xc2\x62\x99\x15\xe8\xa9\x80\x9b\x34\xf8\x6e\x14\xe2\xd4\x86\xbc\x2c\x98\xd7\xed\x88\x04\x42\xc1\xbc\x43\x7e\xe9\xcd\x07\xa9\xe7\xc0\xab\xc5\xe2\x18\x42\xd5\xcd\xcb\xb2\xe2\xbc\x93\x0e\xf6\xcd\x9a\x42\x7f\x2a\x65\xf2\xf4\x68\x1a\x9c\x0d\xcf\x30\x50\x49\xdb\x81\x69\x44\x14\x05\x4a\xed\xfc\xf7\x3e\x57\x34\xc7\x84\xc3\x42\xa5\x60\x62\xe3\x09\xfa\x79\x3f\xb0\x75\x1f\x79\x28\x26\xf1\xe3\x2d\x4f\x00\xd0\x00\xfd\x93\xf8\x0e\x04\xf2\x8c\x13\xe7\x63\x25\x62\x42\x09\x7e\x73\xaf\xdf\xe7\x9e\x96\x62\xb4\x69\x4b\x7f\x3b\xe6\x8a\x69\x62\x42\x68\x34\xbd\x5f\xb6\x09\x8f\x3e\xd8\xc2\xea\x9e\x27\x57\x10\x5d\x06\xa5\x34\x2a\x4d\x47\xd0\x32\x79\x71\x7f\x79\xa1\x30\xa8\xf8\x3c\xa1\x60\x18\x48\xdf\xb2\x1e\xf3\x8b\x01\x8d\x0e\xbb\xe7\xb7\xa1\x53\x19\x1c\x03\x63\x8b\xb4\x7a\xc3\x1f\xed\x36\xf4\xd9\xa5\xb4\xb8\x0f\x70\x8a\x97\xb9\x29\x84\x53\x8a\xd9\x11\xf4\x21\xb1\xd4\x3c\x61\x78\x4f\x8f\xbb\xec\xe8\x0d\x23\xa0\xb2\xf0\x72\x08\xed\x36\x9b\xe5\x04\x97\xcc\x77\xac\x86\x21\xd8\xbf\xad\x8e\x7e\x7f\x94\xac\xf9\x62\x6b\xcb\xab\x87\x0b\x85\x37\x5b\x0d\x61\x52\x95\x0e\x9f\xc9\x8e\x01\xd9\x34\x45\xd2\xfb\x9a\x5d\x40\x41\x15\x73\x3f\x7e\x68\x96\xe1\xba\x90\x47\x4c\x00\x02\x73\x61\x70\xa6\xc7\x3d\x59\xe2\xab\x06\xc9\x1e\x48\x11\xd0\x8d\x33\x58\xd7\x73\x86\xea\xed\x59\x49\x5e\x91\x78\x22\xd9\x8f\x17\x1a\x19\x6c\xa4\x09\x8c\xa9\x9e\xa5\xa5\x0f\x7d\x4f\x03\x0c\xc3\xc2\xf3\x1c\x01\x85\xdf\xf0\xa7\xaa\xd4\xa1\x48\x78\x98\x31\x4b\x40\xdf\x2f\x23\xac\x72\x18\x57\x8b\x11\x35\x3f\x40\xcd\x81\x52\xe4\x63\x44\x8e\xb1\x31\xba\xc2\x98\xbc\xf3\xa9\x13\xd3\x3e\xbe\x77\x7c\x57\x7f\xd9\x9c\xaf\xe3\xd6\x0e\x54\xf8\x0c\x0f\x14\x02\xf4\x8b\x15\xc0\xc7\xe1\x5b\x98\x4c\x11\x07\x63\x2f\x51\x0c\x4e\x6e\x63\xc9\x9b\xce\x3c\x5e\x88\xd4\x6d\xb6\x66\x70\xdb\xdc\xcb\x5b\xcb\x45\xaf\x71\x7d\x4c\xa1\xa6\x44\xfe\xf3\x1b\xc7\xbf\xcc\x9b\x19\x20\x67\x5e\x0b\xd3\xc9\x9c\x76\x99\x74\x8f\xd2\x45\x2c\x82\x90\x0e\xd0\x33\x52\xad\x12\x1a\x1e\xa5\xb3\x85\x49\x63\xc5\x59\x30\x68\x8a\x7a\x3c\x59\xd3\x84\x71\x9e\xbb\x44\xdd\x9d\xa1\xf5\x8f\x26\x2f\x5e\xb1\x2d\x34\x29\x34\x7e\x06\x94\x96\x4f\x56\x14\xcd\x49\x3c\x24\x0c\xef\xb7\x61\x9e\xd9\x59\x07\x6e\x7b\x8a\xa5\x39\xee\xfb\x79\x06\x58\x8e\xae\x41\x94\xb5\x5f\x83\x57\x1e\xdc\x98\x8b\xa0\x9f\x13\x51\xc4\xde\xa9\xc9\xb2\x32\x45\x81\x79\xac\x9e\x51\x60\xe9\x09\xce\x9f\x49\xb5\xeb\xea\xe7\x45\xdc\xdd\xa0\x84\xf2\x7e\xdc\x19\x84\x7e\x48\x32\x3f\x5e\x3e\x84\x11\x2b\x6c\x8e\x9f\x44\x98\x06\x53\xa0\xfb\x9a\xc0\x61\x16\x89\x12\x99\xc1\x91\xbb\xc3\xdb\x68\xd9\xac\xfa\x6d\x00\x7b\xcb\x9f\xaa\xa5\xf3\x17\xa3\x30\xec\x3c\x9d\xf0\xc4\x6d\x4e\xf2\x6c\xde\x66\xa1\x33\x3c\xb1\xdc\x80\x70\xba\x28\xe5\x2e\x5f\x09\xba\x62\x7e\x92\x11\xb2\x41\x31\x62\xcb\x8a\xc8\xa6\xbb\x0c\x87\xae\x84\x2e\x49\xf1\xfc\xe7\x37\x08\x04\xe8\xc7\xf2\x3c\x61\x11\x01\x74\x05\xdb\x8f\xe6\x23\x10\xbd\x99\xfa\x45\x56\xe1\x16\xe0\x26\x0d\x4d\x4f\x51\x00\x5d\xe0\x00\x67\x4e\x16\x6e\x2d\x74\x90\x96\xbd\x57\x71\x2f\xab\xcb\x86\x27\xae\xfd\xe4\x90\xf1\xe3\x1a\x3f\x0c\x7f\x88\x13\x3e\xa4\x1a\xa2\x21\x74\x8c\x8a\xc8\x20\x18\xdb\x8f\xf1\x4c\xd3\x4d\x1f\x11\xd2\x82\x41\xf3\x3e\xc1\x8b\xce\x65\x6a\x2c\x0f\x3d\x86\x2e\x9b\xb3\x04\xa7\xe7\x11\x7d\xc2\xbf\xcb\x8d\x61\xed\x9d\x86\xba\xa5\xbc\x44\x52\x9e\x9a\xae\x93\x29\x3d\x2c\x99\x67\x9c\xa9\xff\x07\x4c\xb4\x90\xed\xaa\x83\x30\xd4\x06\x09\x40\xac\x70\x51\xa4\xe6\x17\x63\x08\x5c\xa5\x28\xf4\xb1\x2f\xdc\xe5\x05\x30\x0a\x6b\xd2\xa2\x56\xaa\x26\x70\xb4\x0c\x52\x80\xea\x31\x73\xcc\xeb\xda\x8d\x8f\x43\x54\x70\xe1\xcb\xb6\x17\xff\xa4\x1f\xf2\xbe\x74\x56\xe0\xab\x60\x2f\xcc\x01\xd2\xfc\xd9\x8d\xa4\x89\xb5\xd4\xc3\x74\x86\x02\xd4\x69\x61\x2b\x82\x96\x75\xf2\xd0\x87\x92\x45\x82\xa1\x1c\x0d\xbe\xdb\xe2\xbc\x58\xde\x13\xe1\xec\xd7\x76\x06\xd4\x5b\x65\x7c\x50\x34\x79\x70\xce\x88\xe7\x71\x5a\xd2\xcc\xd5\x6f\xa0\xf0\x2b\x53\x4c\x91\x8a\xdb\x96\x06\x52\x10\x32\xfa\x88\x19\xc0\x88\x11\x3b\xb4\x0f\xd8\xa6\x2d\xc9\x47\xf1\x89\x50\x82\x06\x29\x9d\xc3\x3f\xe8\xa4\xc6\x2d\x8d\xe8\xb5\xb8\x95\x92\xd1\xec\x8c\x86\x19\x61\xea\xb3\x9f\x23\xeb\xcc\xe9\x5a\x3b\x6e\xef\x10\x8b\x86\x51\x38\x32\x73\xd4\x30\x11\x10\x4d\x79\x3b\xf2\x01\xa5\x40\x21\x84\x71\x3e\x13\xe5\x8f\xa6\xc2\x89\xd2\x4b\x25\xf7\xfe\x12\xad\xb0\x6c\x3e\xf1\x9d\x16\x24\x39\x8e\xc3\x0c\x50\x9c\x60\x72\xeb\x7d\x33\xb1\x2d\x5d\x7c\xbd\x02\x88\xdf\x4c\x96\x32\xa5\x0b\x00\x20\x0f\xe3\x08\x51\x0b\xb9\xad\x8f\xa7\xbb\x3e\x92\x63\x0c\x4e\x40\xf0\xb8\x0e\x8a\x69\x60\x2f\xfe\xc7\xda\x65\x09\x28\x95\xf8\x77\x5e\x78\x0b\x52\xd9\xb1\x52\x64\xa5\x82\x51\xf8\x1b\x9a\xfe\xed\x63\x78\x5c\x5f\x7d\x10\x0f\xe1\x94\x0e\x6a\x21\x91\xf1\xa5\xb5\x24\x11\x73\xee\xfd\xe3\x3b\x8a\xa2\x70\x71\x2f\x04\xe7\x39\xca\xd7\x2c\xa9\x29\x02\x00\xbb\x5f\x70\x9b\x32\xe3\xab\xd8\xab\xf4\x4a\x17\x81\x54\x17\xe3\x63\xf1\x7a\x03\xf0\x87\x29\xb6\x41\x9c\x7e\x03\xd3\x19\xbd\xa5\xff\xb5\x37\x99\x87\xe7\x46\x17\xa5\xd4\xf5\xbc\xc1\x51\xba\x9b\x63\x21\x66\xba\x18\x45\xa2\x11\x9e\x7b\xfb\x71\x5b\x1f\x40\x3c\x4c\xe6\x81\x83\xaf\x4e\x6a\x90\x72\xa6\xb8\x5b\x80\xe4\x70\x52\x92\x19\x76\x86\x00\x2d\xcf\xd5\x30\x9a\x67\xdd\x40\xb9\x14\x22\x55\x11\xac\xd3\xf9\x5f\x5b\xf0\x59\x9e\xbb\x37\xd6\x15\x3e\x4a\x78\x86\xc1\x68\xe8\x7b\x7a\x50\x7d\x2a\xcf\x77\x41\x7d\x6e\xe1\xe3\xd9\x2c\xcb\x5a\xd5\x97\x1d\x2a\xe1\xdf\x07\xb2\xda\x97\x09\x42\xbb\xac\x5f\xed\x61\x34\xa8\x64\x4e\x33\x8d\x43\x3b\x02\x39\x44\x8d\xe3\xda\xdf\xd4\x39\x14\x30\xe3\x9c\x1b\x5c\x0b\x98\x08\xb3\xc8\xce\xc5\x3e\x5d\xd0\xa0\x6c\xdb\xf8\x79\x3e\xdd\x5b\xc1\xbf\x09\xe3\xf1\xa0\x5e\x92\xb9\x8a\x7e\x54\x70\x6d\x69\x72\xef\xad\xf5\x68\xe3\x4b\xe9\x2d\x66\xbd\x10\xf3\x8d\x0e\x65\x27\xae\x67\xe2\x93\x6e\x12\xc2\xa3\x41\x43\x5f\x0f\x44\xd4\x1e\xcf\xe3\x24\x9d\xcb\xb6\xfc\x9b\xbc\x4d\xb9\x9c\xde\x7a\x9b\x0a\x85\x03\xf9\xe3\x4b\xf1\x13\x1b\x54\xc5\x13\xcd\xb2\x9b\xd9\xb4\xaf\x89\xeb\xf4\xce\x46\x19\xd8\xd9\x40\xa6\x38\xbe\x04\x26\x3e\xe6\xf3\x85\x08\xcd\x10\x73\x4e\xa0\xa8\xfe\x85\xa8\x34\x5c\xf4\x1f\x96\x32\x08\xa7\x50\xd4\x05\x4e\x8c\x22\x7d\x63\x44\x0e\x46\x71\x0e\x8c\x2e\xbe\x73\x32\xac\xd0\x89\x91\x86\x18\x88\xa4\x24\x89\xb4\x9b\x43\xd1\x96\x2a\x9b\xe2\x78\xd4\xb0\x6a\xd0\xc7\xb2\x34\xff\xcb\x5a\x1f\x36\xd1\x9a\xe9\xfd\xd0\x8f\xc4\x7e\xa4\x6f\xf3\x16\x67\x30\xf0\xf6\x71\x6b\xf1\xdd\x19\xa2\x48\x43\xa6\x33\x7b\x50\xb0\xd8\x0b\x9b\x32\x28\xa5\x3c\xdb\x8c\x93\x2a\xc4\x3e\x50\x88\x52\xbb\x2e\x92\x35\xc3\x5b\x9e\xc0\xe6\x9b\x22\xbc\x06\x9f\x2c\x77\xc1\x5f\x09\x1f\xc4\x4c\xea\xc1\xa3\x0c\xe7\x9a\xdd\xd6\x74\x95\x9a\x2c\x71\x12\xa7\x15\x3b\x5e\x2f\x13\xe6\x8d\x2f\x05\x1d\xc3\x54\x34\x7f\xe5\x12\x18\x1f\x13\x9f\xa2\x0a\x4d\xee\x1c\xf3\x6f\xf7\x4f\xde\x1d\x30\x94\x41\xae\xb0\xb6\xbb\x1b\x24\xe3\x73\x8c\x21\x14\xe9\x83\x9d\xd8\x4b\x6c\x4c\x02\x91\x9b\x09\xd2\x03\x1b\x43\x02\xdf\x02\x6d\x87\xc6\xa8\xf3\x18\x4d\x63\x2b\xed\x1b\xf8\x7d\xe8\xc3\xb9\xf6\x9a\x6e\xdf\xa0\xdb\x9a\xc0\x5b\xfb\xb9\x74\x44\x0f\xfd\x80\xbb\x06\xcb\xfe\x8c\x2d\x01\x45\x5a\x18\x26\xfd\x4b\x7d\xfb\x1b\xe2\xcf\xc2\x1e\x34\x38\x3c\x39\xa8\x15\xd7\xd6\x2e\x34\x6e\x6c\x4f\x12\x4a\x45\xfc\x9c\x75\x02\xbf\x5f\x06\x19\x4d\xaf\x1e\xfc\xde\x75\x15\x71\x35\x82\xf4\xbb\xa1\x49\xa4\xff\xa0\x60\x27\x94\xd0\x0e\xf4\x3d\x1a\x88\xbd\xe5\xca\x11\x78\x08\x92\x9f\x61\x8a\x0c\xeb\xf0\x12\x28\xaf\xb6\x01\x83\x0f\x80\x40\x9f\x44\xfd\x86\xf0\x8e\x28\xf1\x55\x9e\x58\xf8\x12\x93\xfb\xc9\xc2\x81\x38\xc2\xda\x12\x0a\x6b\xd5\xf2\x9f\xf5\xfa\x5e\x67\xca\xc3\x6b\x76\xb3\xe2\x8c\x18\xc5\x58\x9a\x57\xca\x46\x02\x9d\x6a\xef\xe0\x97\x45\xfa\x72\x1c\xb8\x7d\x8b\x9e\xd6\xab\xa5\xdf\xa9\x76\x6c\xa2\x3b\xd9\xce\x3a\x38\xbc\xbd\x69\xf0\x6c\x23\x94\xad\x7d\xf6\x22\x42\x4b\xa7\x35\xa8\xc0\x26\xf1\x88\xef\x7b\xa3\xa3\x32\xa7\x89\x5d\xa4\xda\x52\xb7\xe6\x62\x83\xce\x2c\x66\x2b\x7f\xe8\xc4\xbc\x68\x43\x7c\x58\x9f\x36\x47\x60\xac\x7a\x2f\xab\x08\x69\x41\x9e\x01\x95\x5a\x16\xed\xbe\x7b\xa3\xac\x1f\x78\xc5\x58\xca\xc8\x01\x55\x42\xad\xcd\x24\x4d\xdd\xe7\x37\x9b\xc6\x16\x20\x11\xa3\x9e\x3a\x18\xf5\xa7\x7b\x25\xf6\xe3\x14\xa8\x84\x27\xdd\x5c\x15\x98\x78\x12\x39\x34\xbf\x84\x6a\x8e\x04\x3d\xc8\x92\x53\x1a\x6b\x8f\x22\xc9\x3c\x7b\x4f\x72\x86\xab\x48\x27\x42\x2b\xad\xf1\x86\x6d\xad\xf4\x6e\x79\xd3\xc0\x39\x3f\xe7\x9c\xd7\xdb\x01\xb2\xab\xb6\xde\x4a\xe8\xb6\x3e\x48\xb4\xa7\xd9\xab\xb9\xa6\x0f\xe7\xbe\xe3\xf5\xc5\x01\xfe\xf5\xe8\xf8\x18\x89\x10\xf2\x1e\x50\x39\xb4\xbb\x53\xdb\x12\x64\x60\x5d\x25\xc9\x7f\x6c\x56\x10\x4a\x63\x73\x35\xa0\x77\xb1\xa5\x70\x92\x13\xb6\x66\x47\x10\xec\xb7\x96\x4a\xa0\x8f\xfd\xe3\x78\x9d\x6c\x0f\x0c\x4f\x9b\x21\x66\xdf\x26\x74\x36\x52\x27\x87\x83\x49\x47\x8b\xde\x78\xc2\x5a\x6e\x6d\x7e\x60\x25\x6f\x75\x06\x00\xbc\xe1\x4e\xe7\x66\x95\xc7\x0a\x23\x0d\x92\xd6\x82\x54\xcb\x2e\xb9\x37\xd3\xa2\xcc\x73\x12\x6c\xab\xe2\x74\xb3\xf2\xfc\x2d\xd7\xb4\x48\x4d\x4d\xef\xb5\xe6\x77\x9a\x3c\x99\xdf\x58\x17\xe7\x5c\xf9\x5a\x0a\x6b\xc9\x50\x78\x43\x61\x00\xe7\xed\xbf\xfb\x38\x6a\xdf\xf7\xc3\x3e\x0e\x82\x3e\x0e\xfc\x7e\x4e\xc2\x9e\x28\x42\x7c\xa9\xd9\x99\x93\xe8\xd2\xcc\x56\xf9\xb1\x04\xaf\x37\xef\xee\x3d\x17\x8d\x66\x2f\xb6\xb8\x49\xb3\x9d\xee\x0d\x4b\x27\xe4\x9e\x06\x58\xd1\x5a\x73\x7c\x7b\xc6\xee\xe2\x1f\x7b\x76\x40\x99\x4d\x19\xd1\xd3\x00\xd9\x79\xcf\xff\xea\xbe\xef\xfb\xae\xef\xc7\x41\xbc\x8a\x91\xca\xa6\xab\x83\x40\xd2\xb1\x42\xf3\x9a\x34\x93\xc2\x41\xe7\xc2\x55\xa7\x0a\x28\xfa\x48\xb9\xaa\xd2\x6e\xfe\xab\x93\x4c\xf1\x66\x87\x24\xf2\x2f\xd2\xc0\x7b\xb0\xd5\x5f\x05\xa6\x41\x09\x80\xda\x3f\xd4\x58\xcd\xb8\xa9\x44\x68\x71\x9e\x94\xe0\xc4\xba\x44\x0f\xbf\x6d\x55\x60\xc6\x9e\x1e\x89\xfb\x00\x11\x07\x8a\xdf\x25\xba\xe8\xa5\xae\xf5\x64\x94\x91\x9e\x9c\x25\x05\xf6\x7a\x45\x57\x85\x0f\xe6\x4e\xb6\x9e\xdc\xde\x90\x0b\x30\x23\xf0\x99\x2f\x6a\x8f\x92\x71\x2b\x43\xf2\x6b\xfe\x86\x54\x29\x09\x3b\x29\xce\x31\x29\xcd\x7e\xb1\x15\x55\x67\xea\x5c\xca\x51\x2b\x44\x3c\x97\xf0\x8e\xfd\x11\x42\xfa\x13\x72\x9e\xc7\x55\x09\x37\xec\x4b\x92\xe5\xc8\xf2\x45\xe9\xba\xae\x7e\x65\x3b\xd8\x3d\xc1\xb0\x14\xaa\x7d\xfa\xf5\x9c\x38\xbc\x03\x0d\xef\x4a\xfe\x47\xd2\x6a\x9a\x79\xa8\xb7\xbc\x4d\xe9\x67\x3d\xbf\x2d\x1a\xe4\x26\x19\xe6\x2d\xfd\xdd\x53\xfc\xbb\xbb\x54\xb8\x9d\x1c\x17\xba\x0b\x4e\xda\x2c\x6e\xb4\xb4\xcb\xd4\xa9\x54\xf1\x50\x22\xb8\xfe\xfa\xf2\x53\xc9\x8c\x18\xb4\xac\xca\xbd\x51\xc7\x1b\x37\x19\xed\x9e\x18\xda\x1d\x1b\x9a\x10\x88\xc7\xff\xbb\x93\x4c\x10\xac\x3d\x7c\x93\xae\x33\x32\x0b\x63\x82\x0d\xa3\x6f\x3d\x0e\xaf\xe1\xfc\xea\xf3\x6d\xa1\x03\x89\xee\x13\x69\x0c\xf3\x92\xfc\x49\xa6\x99\x96\x67\x5b\x35\xb5\xde\x54\xe6\x67\x20\xe0\xf3\xbd\x2a\x01\xa8\x0b\x39\xfc\x5b\x99\xf5\xa1\x4f\x45\xa1\x4f\xf5\x81\x77\xd5\x71\xf1\xe0\xeb\xb3\x83\xd0\x9f\x15\xad\xe5\x1f\x10\x19\x27\x9a\x66\xe8\xa4\xaf\xac\x40\x9f\x56\x09\x9b\x8c\x96\x1a\xcf\x3b\xc3\xa0\xc7\xa4\xc2\xc2\x0d\x8e\x87\x2e\x50\x2d\xb4\x53\x66\x50\xfb\xb9\x0a\x9c\x29\x5a\x7b\x45\xa6\xc7\x81\xca\x6f\x6e\x60\x51\xc1\x5b\x5b\x8b\x5b\xb9\xeb\xf5\xd2\xa3\xbd\xd9\xab\xb7\xd1\xad\xb9\xc2\xad\xb5\x11\xaa\xf3\xaa\x87\xec\x7f\x1a\x4a\x87\x8c\xaf\xf5\xce\xf6\x24\x56\x0b\xbf\xb7\xe7\x7f\xed\xc8\xfb\xb2\x7f\x32\xb5\x69\x50\x6a\xaf\xf1\x6e\x94\xf2\x6a\xaf\xfa\xd0\x06\x6d\x94\x8a\x39\xdc\xe1\xe2\x42\x29\x2d\x4f\xa5\x33\x77\x70\x7a\x19\xc1\xea\x93\x4c\x36\x96\xa2\x28\x62\x6d\xa9\xb2\x61\x11\xa3\xbe\x3e\xbc\xd3\x3e\x12\x56\xff\x0a\xe9\xef\xfe\x53\xff\x91\x0e\xc4\x90\x0e\xf1\x92\x0c\xf3\x3a\x93\x45\x3f\x27\xc1\x84\x59\x7f\xbb\x0a\x2a\xdb\x3b\x42\xe2\x21\xfe\xb7\x4b\xfe\xfe\xce\x49\x71\xa6\x14\x03\x39\xf1\xfa\xe5\x61\x32\x37\xbc\x26\xd6\x83\xd5\xc7\x5a\xca\xb8\x36\x1a\x88\x3f\x22\x54\xc3\xf5\xbb\x51\x45\xa6\x0a\xe4\x31\x2f\xc0\x8c\x66\xa5\x59\x8e\x20\x2d\x82\x07\x0a\x1c\xe2\xd1\xf5\xf1\xed\x5a\xd9\x09\x09\x24\xdb\xc3\x42\xb4\xa4\x6a\xa8\xd7\x18\x94\x29\x45\x31\x2e\x03\x4c\x73\x6c\xb7\xd1\x74\xe0\x5d\x76\xbc\x2f\xff\x78\x0d\x8d\x3b\xdf\x5a\x26\xd5\xe6\x31\x6b\x16\xd1\x2d\x8f\xff\xdc\xba\xeb\xbd\x09\x1a\x6f\xd1\xa0\x54\x29\xca\x56\x89\xc2\xfd\xc5\xb4\x7e\x4d\xd0\x8e\x91\xce\x1e\x33\x68\x48\xc8\xc2\xf1\x77\x0a\x6c\xb3\xed\xf3\x41\x6a\x7f\x5a\xb9\x2a\x3f\x19\x4d\xf8\x1e\x74\xc5\x96\x77\xe1\x53\x3d\xda\xaa\xbd\xda\xaa\x01\x73\x72\xbf\xb6\xe7\xff\xad\x86\xf5\x68\xad\xe9\x22\x8c\x19\x22\xcc\xe7\xd2\xf8\xdc\x82\xf5\xc2\x3f\x60\x5b\x31\x6d\xf4\x77\x05\x35\x8d\xd4\x19\x53\x65\xfc\x7c\xaa\x78\xa8\x10\x22\xbf\x32\x08\x4e\xe9\x35\xf5\xd0\x83\xab\x0f\xc8\x59\xa2\xca\x9d\x15\xda\x48\xa7\xe1\x62\x66\x84\x90\x44\xbe\xeb\x03\x91\xc8\x30\x23\x7a\x75\xf9\xed\xd4\x52\xc4\x77\x02\xe0\xf4\x01\x86\xea\xb2\x11\xcd\x15\x10\xcd\xf5\x50\xbd\x8d\x10\x24\x17\xbd\xd4\x74\x9d\x75\x56\x9c\x6e\x53\x3e\x95\xfc\x94\x23\x28\x4f\xec\x70\x3b\x82\x62\x88\x7e\x5e\x11\xe2\x82\xcc\xdb\x11\xeb\x8b\xc6\xe4\x57\xbe\x1c\x23\xa0\x98\xbb\xc2\x30\x8a\x19\x6b\x03\xb4\xa3\xea\x93\x78\x44\xdc\x8c\x15\x7c\xab\x69\xa4\xf6\xd2\xff\x3a\x70\x6e\x7e\x4b\xce\xea\x64\x9f\xef\x57\xa7\xfc\xcd\x47\x9a\xa0\xf9\x56\xa0\x69\x7c\x60\xe9\xe2\x47\x65\x7f\x55\x02\x77\xa7\xdb\xe9\x2a\x44\x76\x76\x85\xa5\x60\x69\x80\xa5\xfb\x82\xe6\x7b\x84\x22\x7b\x84\xea\x5d\xfc\xb3\x0d\x35\xe5\x22\x9e\x85\x73\xd1\x86\x73\xd1\x81\x96\xf2\x6c\x69\xf0\x27\x92\x61\x5e\x11\x68\x3c\x3b\x42\x2f\x4f\x6f\xd1\xdd\xc0\x57\x06\x1a\x12\xc2\xaf\x15\x7e\x2f\xcd\x40\x6f\x36\x98\x35\x82\x84\x34\x28\xaa\xec\xaa\x5c\x9f\x61\x3e\xd2\x0c\x4d\xab\x1b\xeb\xf1\x48\xb5\xa5\x81\xca\xd0\x94\x2c\xc1\x1a\x74\xd5\x2f\xe0\x9e\x60\x4d\x90\x1d\xba\x70\x23\xcd\x17\x38\x09\xfc\x75\x21\x93\x81\x6d\x94\xf5\xff\x4e\x24\xa2\x00\xb0\x9d\x87\x3d\x59\x74\xf3\xfb\xd0\x43\xe8\xe0\x70\x61\x86\x4f\xd4\x3b\x29\x32\xc7\xa3\x8f\x30\xe9\x1b\x67\x77\x74\x06\xb9\x0c\x2c\x11\x8a\xdb\x77\x4a\x7f\xc4\xae\x07\x6f\x6a\x0e\xae\x9f\xbe\xbd\xd9\x85\x1c\x81\xe1\x50\xac\xff\x6b\x3c\x35\x67\x2f\xe5\x63\x8a\xa4\xe2\x1d\x26\x63\xc3\x0d\xd1\x5c\x39\x8a\xf1\x26\x92\x3d\x18\xb0\xe2\x98\x42\x47\x83\x0f\x22\x34\x48\x74\x74\xf7\xc1\x9d\x0f\x43\x09\x65\x61\xef\x7a\xcd\xd5\x1f\x2c\x7f\x18\xd9\x9c\x1e\xa7\x9b\xe5\x9d\x64\x12\x74\x0d\xd0\x7d\xf5\x84\xd2\xda\x03\x52\x6f\x70\x70\x1e\x02\x3e\x53\x8f\x62\xd7\x1f\x61\xb4\x48\x17\xe7\x5b\x30\xc9\xdf\xf1\x2e\xd5\x87\x9e\xdd\x8e\x90\xdd\x8e\x30\x67\xac\x0c\x57\x12\x6a\xf6\x60\xcd\x5d\x1b\x49\xfe\x35\x5e\xfd\x0d\x6f\x5d\x93\x63\x5f\x48\x53\x71\x4e\x17\x38\xb5\x6b\x05\xce\x85\x0e\x34\xd2\x6c\x4c\xbf\x7d\xa7\x93\x31\x89\x15\xb7\x0f\x16\x71\xb6\x65\x1b\x42\x40\x07\xe4\xd9\x90\xf5\x9c\x10\x1c\x1a\x8c\xc8\xe1\xa6\xc3\x49\x7c\x16\xa5\x21\x4f\x1a\xc4\xc5\x9d\xfb\xad\xc6\x4c\x7e\x51\xb2\xc3\xfe\x0c\x30\x92\x09\x82\x66\x69\x3f\x1a\x3a\xf7\x97\x0f\x90\x3e\x2d\x01\x2a\x3b\x9c\x4b\x76\x43\x2f\x17\xb3\x08\x53\xfa\xe8\xb6\x6a\x16\x13\x5c\x4f\x28\xce\xf6\x47\x58\x08\x7a\x48\x7d\x09\xfc\x37\xa3\xb5\x97\x86\x81\x85\xe4\x61\xbe\x87\xa5\xc9\xe8\xd0\x48\x1c\xe5\xf8\x30\x07\x84\xe1\x07\xd8\x31\x95\xc8\x4e\x89\x2e\xf3\x0c\x48\xfe\x9e\x04\xf9\x98\xa0\xfd\x1c\x05\xf5\x14\xff\xec\x2e\xf9\x25\xcb\xbc\x38\xdd\xa2\xb4\x52\xc4\xb3\xa4\xc9\x50\x03\x6e\x70\xf0\x6e\x96\x21\x42\x5e\x99\x49\x2d\x59\x37\xc3\xed\xa3\xb2\x08\xd0\x01\x91\xdb\x55\xa2\x9b\x3a\x78\xc1\x4b\x8a\xf4\xd9\xa9\x00\xb9\x2a\x26\xfc\xad\x2e\x97\xbc\x6e\x7e\xa0\x6e\x47\x18\xcc\x81\xed\x01\x15\x2f\x76\x4c\xd4\xce\xe8\x23\xb9\x33\xca\x4a\x06\x95\x26\x76\xe2\xb8\xf6\x53\x6f\xb6\xf0\x06\x84\x7c\xa2\xdc\xfc\xa5\x3b\x4a\xe1\xd8\x40\xc3\x91\x25\xb0\x10\xc8\x4f\x2b\xb7\x32\xbd\x28\x30\x1b\x26\xf3\xd9\x26\xfd\x0f\x9d\x68\xea\xc7\x5b\x37\x28\x59\x92\x71\x26\xb3\x4a\x6a\xd9\x58\xee\xe6\x25\x31\x4e\x0c\x5b\x92\xa0\x9f\x03\x07\x77\x8a\x35\x79\xb3\xf0\x7b\x67\x01\x54\x86\xa7\x8c\x30\x6a\x80\xe8\x16\x5d\x80\x15\xfe\xd7\xe6\x86\x82\x52\x71\xaa\xa9\x53\x24\x33\xe7\x67\x06\x4c\x12\xa8\x2f\x3c\xb5\x6b\x56\x40\xe7\x0d\x8b\x28\x8c\x3d\x6a\xf2\x0c\x54\xcf\xc9\x87\xfb\x03\xd3\xa3\xf2\x9c\xf3\x0a\x17\x12\xb6\x84\x4a\x52\xab\x01\xba\xf7\x24\x1a\x75\xfe\xdb\x40\x69\x07\xc4\x0a\xd1\xdd\x20\xd5\x51\x98\xfc\x61\xc7\xcf\x3a\x8b\xa6\xd2\xd9\x69\xe7\xbe\x7c\x4d\x93\x85\x81\x97\x7a\xbc\x9d\x1f\x6a\x77\x3d\xc4\x78\x8b\xdd\xd9\x1e\x53\x50\x8e\xbe\xfd\xab\xfa\x4b\x71\x62\x96\xad\x02\xf7\xb1\x04\xff\x7b\xfb\xd5\x49\x5d\x74\x69\x76\xed\xad\x51\x7c\x9c\x9a\x27\xca\xd4\x30\x95\x17\x6b\xb4\xc4\x06\xb2\xa7\x7e\x94\x2c\xca\xb7\xf7\x9c\x86\x7b\x44\xfa\x4e\xe5\x78\x8c\xc5\x78\x97\x21\x67\x6c\xa6\xcf\x87\xfb\x7c\x1c\xcf\x66\x7d\x51\x90\xdb\x5b\x73\xc4\x16\x42\x7d\x15\x4f\x70\x3d\xf5\xba\x82\xfd\xce\xa9\x53\x73\x77\x48\x7c\xb6\x2a\xe7\x3f\x93\x9c\xee\xc3\x82\xb8\xcc\xcb\xe1\x0a\x79\xb4\x13\x74\x92\xe4\xd1\xf6\x40\x04\x40\xc6\xca\x1e\xce\xc3\x87\xaf\x68\xd0\x71\x74\xff\x44\xa4\x68\x91\x15\xfd\xad\xb4\xba\xdb\x88\xb9\x04\x9a\x3f\x6c\x00\xd0\x62\x56\x58\xcd\xd8\x32\x94\x7e\x1e\x66\xf0\x67\x55\x2e\x20\x44\x0b\xf6\xbd\xdc\x59\x9c\xc9\x1b\x54\x59\x05\x9e\x60\xa5\xe7\xd1\xa6\x8c\x85\xbe\x36\x93\xe3\xcc\x50\x47\xc2\x66\x2e\x92\x22\x2c\x87\x15\xd8\x73\x12\xf4\xee\xad\x14\xb7\x0e\x31\xd9\x03\x20\xb1\xeb\x9f\x76\xb4\xc8\x32\x23\xe9\xa3\xc0\x0b\x71\xac\x21\xd0\x76\xbd\xec\x78\x6b\xdf\xa0\xe0\x7b\x33\x26\x9c\x41\x20\x3b\xc3\xc5\x1c\x31\x4a\x37\x5f\x8c\xb5\x54\x96\xea\x51\xca\xad\x2f\xca\x5d\x16\x2a\x41\xb7\xf4\x47\x97\x46\x3b\x91\x46\x3b\xa6\x3b\xda\x06\x97\xa2\x38\xf8\x62\xb4\x1b\x9a\x9a\x76\xe7\x0b\xce\x40\x0a\xbe\xc6\x91\x96\x76\x8e\x37\xba\xaf\x49\x11\x9a\x83\xb9\xd6\x20\x44\xa6\xd2\x12\xbe\xec\xe4\x9f\x58\xba\x23\x14\x23\xf2\x1b\x66\x0e\x61\x4f\xce\xf2\x68\xac\x0b\x59\x95\x18\xc5\x40\x50\xee\x9e\x10\xb5\xa3\xcf\xd6\xca\x8a\xf8\x53\xb5\xf6\xcd\x6c\x06\x15\xf9\x4f\x56\xfa\xc3\xdc\x44\xb5\x90\x7f\xbb\xe1\x07\x2e\x73\x9b\xa9\xfc\x61\x16\xd4\x18\xdd\xba\xba\xba\x43\x44\x48\xa6\x74\x4b\x82\x5e\xb0\xd4\xb0\xe6\x25\x1e\xcc\xb7\xc0\xca\x13\xcb\x1f\x5d\x27\x21\xcc\x57\x9e\x25\x50\xfb\x87\xc8\x4f\xae\xd9\x7f\x85\x04\xff\x06\x2c\x18\xf9\xa1\x90\x5e\xa4\x37\x5b\xc6\x10\x24\x5b\x92\xd5\xf9\x02\xe7\x2b\xe3\x4f\x59\x9c\x62\xd7\x9a\x08\xcd\x24\xf7\x91\x7a\x89\x2e\xf2\x76\x85\x77\xe6\xa3\x3e\x3d\x34\xce\x92\x25\xd4\xcc\x43\x94\xfe\x0e\x15\x8d\xf6\x0c\xde\x3b\xae\x3d\xc6\x7c\xc0\x03\x9c\x3c\x95\x3e\x6c\x7f\x6a\xb0\xcc\x5b\x03\x91\x9d\x08\xe1\x7a\x8f\x66\xd6\xb7\xfa\xe2\x40\xe5\x87\x3d\x58\x67\xd7\xcf\xc3\x6f\x1f\x1f\xe1\x61\x8a\x3b\x4a\xe6\x98\xb9\x61\xdf\xce\x49\x41\xf1\xf6\x35\x57\x96\x4b\xe0\xd9\xfa\x50\x6c\x1f\x7c\x48\x44\x30\x1a\xb0\x4b\x4c\xa9\x93\xe5\x1e\x46\x73\x52\xd9\xa0\x52\xd9\x54\x5f\x3a\x2b\x67\xba\x04\x6f\xc7\x4d\x3e\xff\x21\x80\xf6\xfd\x54\xdf\xf2\x2f\x76\xd7\x82\x52\xe3\x06\x00\x9c\xe5\x71\x95\x57\x4d\xc2\xb4\x08\xd6\xf2\xb5\x64\x56\x92\x01\x20\x29\x87\xb3\x1c\x21\xb2\x25\x99\x53\xa1\x1e\xfd\xd8\xa0\x80\x8a\x05\x50\x00\x86\x2a\xf8\x37\x21\xc6\x0c\x00\xa3\xdf\x4b\x6e\x2f\xa5\x4b\x5a\xef\xaf\xa7\xc0\x14\x02\x8a\x84\x1b\x28\x7d\x02\xeb\xfa\x49\x77\x49\x45\xa7\x63\xb3\x38\x54\xf0\x2c\xa0\xd6\xe7\xb7\xb3\x49\x6c\xa8\x15\x9a\xfd\xf8\xf6\x4b\xd2\xa0\xd0\x82\xe8\x28\x35\x51\xf5\x57\x1a\xfb\x4d\x23\x9f\xa0\x3d\x00\xef\x60\x67\x50\xa9\xc9\xf8\xb8\xa6\x26\xfb\x10\xd9\xb9\x22\x10\x95\x9f\xe1\xf3\x2d\xc6\xe0\xc0\x1e\x79\x66\xa0\xd1\x2d\xbd\xd8\xb8\x63\x13\x2e\xe9\x3c\x05\xe5\x29\x5e\x30\x05\x35\x56\x5d\x85\x12\xf2\x4b\x21\x4f\x04\x90\xd5\x53\xf9\x75\x65\x2a\xc9\xa1\x4d\xd7\xc0\xb0\x3a\x30\x24\x03\x01\xf7\x80\x99\xf3\x03\xab\x9f\xf9\xc5\x7e\x22\x5f\xbf\xda\x43\x6f\x2e\x43\x96\x3b\xac\x08\xbc\x5e\xaa\xad\xc9\x12\x27\xb2\x5d\x60\x6c\x09\x42\x3a\x7b\x5d\xbb\x1b\x4d\xed\x08\x23\x3d\xb1\x3c\x54\x67\x24\x41\xa5\x34\x4b\xba\x4a\xf8\xb4\x07\x46\xed\x18\x9d\xa9\xdf\x03\x2b\x8f\x4e\x35\xbe\x48\x9f\x5d\x26\x43\x09\x01\xd1\xc5\x30\x19\xf1\x5c\x47\xe6\xfa\x86\x89\xfd\xed\x86\xe8\xfe\x66\xa1\xc0\x60\x3c\xcd\x7f\x75\xf7\xcf\x35\x8a\x23\xc4\xb3\x5b\xba\x84\x47\x7b\xb5\xd1\x8d\xeb\x48\x59\xbc\x9b\xc4\x2c\xf2\x8c\x76\xf7\x8b\x65\x66\xfb\x10\x86\x5b\xcb\xd0\xfe\xab\x2a\x6b\x11\xff\x9d\xbb\x78\xb6\x60\x33\x10\xc8\x7b\x7a\x6a\x30\x72\x74\xe7\xb6\x47\xf3\xd9\x25\x80\xd8\x32\x95\x78\x5f\x14\x46\x24\xe8\x7c\x60\x64\xf0\x47\x51\x4c\xe9\xfb\x5e\xef\x20\x34\x66\x7b\xfe\xd7\x43\x74\x97\x83\x37\x55\x79\xce\xf5\xed\x88\xfc\xd5\x5e\x6d\x31\x43\xe2\x81\x67\x15\x87\xcc\xf3\x0c\x57\x82\x3e\xb0\x74\x87\xe8\x47\xdb\x56\xe5\x19\xec\x96\x28\xcf\x10\x26\x7e\xbf\x7a\x8a\x09\x1a\x4a\xf5\x81\x38\xb0\x6d\xf5\x91\x6c\x94\x1d\x36\x58\x57\x5b\xc0\x17\xe5\xd9\xca\x7f\x20\x22\x3b\x5e\x57\x52\x28\x95\x1f\xd8\x9d\x85\xdf\xad\x9f\xbd\xf6\xec\x6f\xc3\x11\x6e\x2d\x14\x9f\x5a\xea\x9c\x72\x98\x07\x8d\x6d\xf2\x03\xb3\x0a\xf3\xe7\x48\x19\x53\x1c\xc2\x80\xb1\xbe\xac\xeb\xf2\x87\x1e\xea\x0f\xa6\x16\x98\xaa\x73\xf1\xd1\xb2\x5f\xb3\x05\x18\x78\x20\x47\xb1\xb5\x64\x9b\x51\x12\x3a\xa4\x28\x3a\x3c\x6e\x59\x3f\x23\x49\x40\xe4\x49\x64\xe5\xd8\x31\x0b\x92\x36\x77\x76\x24\x03\xaf\xfa\x55\x60\x28\xe2\xfd\xed\xd3\x38\x91\x28\x3a\x9c\xec\x3c\x2e\x50\x05\x7a\x13\xe2\x32\xe4\xa7\x58\x17\xb2\x65\x89\x03\xc3\xa5\xc8\xde\x4b\xe8\x09\x05\x33\x6c\x5e\x36\x66\x75\x3f\x9e\x9d\xee\x9f\x6f\x85\xfa\x43\x2c\x48\x71\x38\x7c\xa9\x6b\x23\xc5\xd5\xa6\xe0\xca\x28\x0f\xeb\x93\xc3\x01\xf3\xc5\x0f\x20\xcc\x4a\x36\x9e\xda\x4e\xfc\x25\x85\xc0\xff\x3a\x96\x79\x63\x09\x6d\xbe\x38\xe6\xee\x98\xdf\xdc\xa9\x3b\xa5\x4b\xc0\x7a\x7e\x5f\xe2\x3c\x54\xd1\xb9\xb9\xbf\xdc\x55\xc9\x82\xc7\x7d\xac\x91\x9f\x09\x7a\xe3\x20\x10\xa8\xcc\x3a\xf7\x4e\xd4\xe2\xe3\x48\x43\xa5\x8e\x16\x07\x21\x9b\x6d\xb0\xaf\x89\x85\x67\xba\xe8\xc6\xdb\xa0\x90\x85\x38\xfa\xe6\xc7\xba\x7e\x13\x79\xa8\x0e\x92\x85\x3d\xbe\x28\xcd\xa8\xdf\x59\x2a\xda\xda\x5f\x28\x0f\xeb\xa1\xe7\xb1\x8f\x8c\x69\x86\xc8\x3f\x92\xac\xee\x43\xfc\x73\xbd\x27\x17\xec\xcc\xc0\xa4\x31\x3f\x30\x13\xc3\xc8\x59\x41\xf7\x36\x10\x5a\xbf\x25\x68\x88\xe7\xf7\x9d\x81\xfa\xae\xbe\x43\xaf\x97\x12\x6d\x78\x27\x4e\x60\x4c\x2d\x43\x85\x4a\x92\x8d\xf9\xdd\xa8\xbf\xe0\x65\x40\x65\x4f\x4c\x89\x58\xd3\xf9\xef\x86\x2b\x3c\xa2\xf6\x95\xa0\x49\x78\xe7\x5d\xa2\xf1\xba\x57\xa3\x4c\x32\x08\x0c\xd6\xfa\xb2\xbe\xef\xfb\x79\x98\x63\x97\x69\xbc\x48\xc2\xe4\x68\xaa\x6f\xc6\xef\xd2\x29\x1e\x7a\xc3\x84\x96\x77\x76\x46\x7e\x1e\xc2\x7a\x40\x57\xd0\x96\x91\x45\x1b\xf1\xf5\xb5\x84\x3e\xe0\xf4\x28\x1c\xe8\xf8\xa9\xad\xca\xd3\x44\x68\x83\x6f\xb8\x68\xe1\x10\x1c\x62\xd4\x5b\x63\xfa\xef\x7c\xc9\x2a\xf3\x9b\xbf\x9c\xd6\x05\x13\xbe\x0c\xb6\x71\xb5\x48\x1e\x86\x07\x79\x8c\x14\xe3\xfc\x09\x3b\xf0\x7b\xd9\x1b\x7f\x84\xfc\xa8\xcd\x7b\xe0\xe0\x62\x8b\x79\xfe\xfd\x36\x74\x64\xd8\x6e\x1c\x3c\xd9\xf1\xfb\x28\xd3\x7e\xef\xbc\x30\xbf\x8a\x59\xdc\x47\xc8\xba\x4b\x02\x51\x2f\x60\x0b\x05\x40\x07\x7f\x39\x5a\x5d\x1e\x1e\x72\xf0\xcb\x66\xb7\x0f\x9a\x1c\x26\x26\x05\xcf\x7b\x11\x7c\xeb\x08\x3f\x2a\x3f\xd5\x7e\xba\x32\xe5\xb7\x42\xc1\xb2\x2e\xdf\xdd\xcc\x90\xd8\x4c\x7e\x36\x99\xcc\x63\x52\x42\x78\xfa\x0a\xb7\x66\x8f\x07\xee\x12\x13\xac\x70\x00\xbb\x21\xf5\x94\x46\x4e\xa0\x9d\x31\x53\x54\x92\x2a\x01\xe8\x06\x49\xbd\xc6\x0f\x39\xe5\x09\x2f\x4b\x54\xbc\xa3\xcc\x89\x51\x14\x23\x52\xfa\x18\xba\x35\x30\x5b\x89\xc6\x77\x94\xca\x11\x95\xf8\x49\x99\x21\xd6\x96\xc4\xca\xc6\xe0\x6a\x51\x04\x3a\x06\x66\x4a\xe7\x57\x50\x95\x29\x33\x0f\x00\xcb\xbf\x5e\xf1\x65\xa9\x66\x9a\x4b\x4c\x0a\xfe\xfd\x9c\xd5\xd4\xac\xc6\xb3\x44\xe6\x80\x4d\xbe\x5a\x9c\x62\x5d\xcb\x6b\x97\x9b\x56\xce\xc3\xbf\xc8\x14\xc9\x9b\xa8\xd5\x95\xa6\xa2\xcc\xf9\xbd\x8b\xf0\x9b\xed\xa1\xac\x22\xb8\x94\x44\xa6\x4b\x9a\xf6\x23\xe3\xe5\xf7\x5a\xbe\xcd\x1d\x6d\xa5\xe9\x5e\x93\x63\x61\xc9\xbe\x6c\x43\x02\x95\x29\xa0\xd9\x0f\xd9\x42\xbb\x39\xea\x63\x9e\x4c\x70\xa2\x8b\xad\x4d\x31\xb4\xf6\xe3\x37\x9d\xbb\x6a\xad\x30\xe9\xf2\x9a\xb4\xdb\xf2\xbf\x39\xca\x64\x57\x6e\x6d\xdf\x13\xa3\xea\x29\xf9\x1d\x6e\x7c\xd9\xbf\x42\xba\xf8\x17\xc7\x2e\xc8\x7c\xa3\x21\xde\x5f\x9c\xb6\x2b\x02\x5f\x8c\x6d\x17\xdd\x1f\xf2\x1b\x1c\xf1\x2e\x8e\x79\xc1\xa1\xec\x70\xa5\x2e\xd1\xc5\x25\x69\x26\x4f\xab\x0e\xeb\x31\xda\x9f\xab\x8f\x6c\x65\xfd\xfe\x6a\x33\xb9\xbb\x72\x07\x85\x3d\x61\xab\x29\xa3\x04\x2a\xdd\x9f\xc4\xf3\xfb\x5c\xfe\xd7\xf6\xca\xa6\xf7\xec\x09\x4f\xf2\x42\x8d\xb2\x8c\xff\x7c\xab\xe5\x8b\x90\x4c\x01\xa1\x26\xaa\xf2\xf7\x5c\x7e\xa0\xf0\x41\x91\x26\x98\x47\x93\x40\x20\x28\xff\x8c\xa8\x87\x85\x1b\xa2\x0d\x77\x86\x66\xb4\xb1\xd2\x4a\x4d\x6b\xf5\xfb\xcc\x9a\x1e\x77\x07\xd6\x95\x9f\x23\x54\x89\x66\x73\x0b\x39\x39\xe6\x25\xc2\x6e\xba\x9e\x88\x62\x76\x9a\x69\x91\x60\x86\xe9\x2b\xae\x17\x91\xae\x69\x08\x28\x37\x4d\xb2\x7b\xa2\xf4\xf1\xa2\x67\x07\x50\xa8\xfd\x9c\x49\xff\x49\xf7\x11\x4f\x4f\x33\xcf\x97\x5d\x46\x3f\x31\x70\x08\xe8\xc7\xb3\xf8\x16\xaa\xe7\xea\xab\xc6\x0f\x91\xe9\x0b\x12\xe3\x8a\xa1\x44\x9a\xd8\x91\xda\x65\x00\x20\xe8\x38\xd9\x83\x73\x24\xa1\x32\x9d\xd0\x52\x2b\xc5\xe2\x3b\xa9\x82\x1a\xf6\x0c\x8f\xc8\xc5\x04\x09\xeb\xad\x55\xf5\xc7\x11\x79\x83\x4b\xbd\x9a\x23\x09\xc7\xeb\x85\x18\x2f\x26\xea\x62\xd4\x9b\x22\x68\xca\x86\x55\x7e\x7c\x09\x46\x05\xad\x26\x88\xee\xf8\xef\x5e\x90\xcf\xa5\x5d\xdd\x48\xff\xfb\xc5\x9e\x12\x33\xe7\x40\xbf\xe3\x4b\x00\x10\xac\xd5\x24\x39\x5f\xf6\xa8\xa1\x36\x83\x4a\x23\x6c\x71\x68\xb4\xdf\x2f\xaa\xa5\xc1\xad\xbb\xf0\xa5\xfc\xf8\xfa\xa2\x0b\x63\x54\xf1\x8d\xaf\x0b\x13\xad\x97\x44\xfe\x30\x26\x45\x90\x44\x36\xa7\x3a\xca\x28\xa9\x68\x0e\xc4\x91\xb6\xf2\xf4\x9e\x48\xba\x61\x50\xb9\x23\x7a\xbd\x8a\xef\x0c\xe7\x06\x96\x43\x43\x6c\xc3\x91\x49\xb3\x5c\xca\xe0\x96\xe6\x0a\x68\x5e\xbd\x9b\x41\xd3\x35\xb5\x99\x32\xe3\x19\xc6\x58\xee\xcb\x02\x9d\xfc\xdb\xc4\x92\x1b\x91\x72\xf3\xb9\x3a\xf9\x62\x74\xd4\xf9\xfc\x31\xa3\x91\x9a\xe3\x29\x02\x10\xce\x52\xea\xfb\xac\x45\x9b\xe9\x4d\xdb\xd4\x3a\x91\x5c\x65\x7d\xc5\x7f\xdf\x4e\xbe\x61\x28\xd3\xb4\x34\x55\xde\x6a\x52\xcc\xd7\x4b\x62\x19\xbb\x65\xd9\x1c\x84\x67\xd8\x71\x60\xee\x0b\x92\x7e\x5c\xbf\x8d\x50\x89\xbb\xab\x60\x59\xc8\xa3\x3d\x0b\x36\x33\xc4\xac\x38\xf9\x62\x2a\xf1\x0f\xf8\x4d\x5b\x45\x4f\xba\x24\xf3\x0b\x5e\x52\xb4\xa0\x6f\x6e\x9f\xad\x08\x7d\x8a\xd8\xaf\x6d\x7c\x71\x9e\xe2\x9f\xae\x33\x46\xcc\xc1\x75\x33\x52\x5e\x36\xa3\xb3\x5d\xd7\x86\x1c\x3a\x74\x09\x6e\x91\xf4\x26\xe1\x26\xdc\x2a\x76\xfd\xe3\xba\x4a\x63\x6b\x8e\x65\x0b\xeb\x0c\x6e\x6f\xc0\x0c\x2f\x84\x89\x83\x1e\x50\x1c\x22\x4f\xaf\xfb\x74\x4f\xb5\x80\xef\x9a\xc2\xee\x21\xf3\x32\xb1\xbb\x6e\x8a\xac\x89\x2b\x77\x80\x68\x71\x2b\x71\x08\xa6\x22\x7d\x93\xbb\x1b\x68\x65\x6a\xcd\x15\x40\xb9\xa3\xe5\xb9\x91\xbe\x26\xa7\x18\x56\x62\x2b\x92\xa9\x63\x01\x23\x77\xa3\xc5\x60\x24\xe9\x96\x00\xe7\xd8\xb2\x57\x07\x95\x66\x37\xee\x49\xf7\x6b\x8d\xb0\xdc\x8d\x91\xe2\x20\x36\xda\x03\x78\x1e\x32\x48\x1b\xdd\x95\xdd\xd3\x74\xdf\x9e\x5c\x75\xe0\x5e\x8e\xb2\xd0\x50\x89\xc5\x36\x78\x08\x4b\xb6\x52\xcf\x30\x7e\x94\x53\x61\x57\x1d\xf4\x2c\x46\x60\x98\xa3\xfb\xda\x07\x4c\x97\x3f\x8f\x34\x4a\xd2\x47\x86\x37\xfa\xb3\xfa\xec\x0c\xa0\xb3\x8f\x3d\x7f\xec\x93\xc9\x1d\x6e\x8d\x57\xb8\x64\x4b\xbf\x77\x8d\xe3\xb9\x93\x06\xbc\xce\xce\xfb\xef\xd7\x2a\xe7\xc2\x55\x1f\x35\x6d\xc4\x65\x34\x9b\x21\x54\xc3\xc7\x1a\x9c\xe1\x7e\xff\x72\x21\x6a\xf2\x75\x97\x7f\x6d\xfc\xc9\x8d\xe9\x47\x3c\x80\xdb\x15\xa7\x5b\x80\x4b\xc0\x89\x58\x7b\x44\xa6\x7f\xaa\x22\xad\x22\x2f\x22\x6a\x6a\xd3\xeb\x8c\xe9\x68\xf1\x00\x1d\x6d\xeb\x93\xca\x57\x4d\xe7\x54\xda\xb1\xea\x0a\xc9\x24\xbb\xe8\xce\xb0\xf5\xa3\x6d\x5b\x24\xf1\x0c\x1c\x06\xab\x53\x58\x0e\x01\x1b\x84\xb6\x00\x69\xb6\xdc\xc1\xb5\x77\x77\x75\x2b\x04\xf1\xca\x25\x3b\x5f\x8b\xb3\x59\x2d\x3d\x27\x7c\x6c\x36\xbe\x4d\x85\xb2\xda\xb6\xf0\xdc\xd5\x01\x50\x22\x9a\x10\xd7\x0c\x66\xbb\x9f\xe3\x22\xba\xd8\x23\x57\x47\xeb\x6e\x4a\x27\x73\xa5\x09\x06\x3d\x98\xe8\xed\x79\x96\x79\xca\x40\xfe\x14\x29\x88\xcc\x62\x67\x8b\x55\xd3\x87\x37\xca\xf9\x6a\x72\x12\x2a\x98\x7d\x95\x7b\x37\xe6\xa0\x4a\x06\xa0\xa3\xbb\x27\xfb\xaf\xaa\x71\x0f\x39\x31\x2a\xdd\xdd\xdf\xb5\xd5\xab\x4b\x67\xc6\x3b\xc1\x49\xef\x96\xb3\xde\x64\xfb\x43\x2c\x98\x6e\x39\x26\x37\xa8\xf1\xd6\x77\xf7\x0b\x9c\x3a\x1b\x67\x88\xbb\xbc\xcf\xae\x72\x48\x1b\xf3\x35\x4c\x76\x7c\x06\xca\x26\x42\xbf\xeb\xdd\x13\x1f\xa6\xd6\xd8\xba\x3d\x55\xb8\x99\x16\x33\x33\xdd\xb7\x5b\x63\xca\xbb\x32\xa1\x66\xf4\xd6\x2e\x42\x60\xcc\x48\x74\x7e\x16\x0b\xfa\xe3\x47\x08\xf0\xf5\x87\xe4\xab\xcd\x10\x79\x27\x6d\x52\x0b\x8f\x8c\x3a\x46\xf4\x2f\xdb\xd9\x55\x26\xb4\xfc\xa4\x98\xec\x17\x65\xb2\xce\x6f\x06\x74\xef\x13\x6c\xeb\x0c\xa3\x35\x6b\x6b\x83\xf7\xd4\xee\x1e\x2f\xca\xc5\x18\x05\x7d\x83\x13\x99\xd1\x54\x73\xe2\xf5\x6c\x1d\xcc\xb8\xa8\x10\x13\x9d\x7d\x10\x28\x16\x6a\x29\x0f\xfd\x3e\x69\x76\x8a\xcf\x0e\x91\x6a\x4b\x3f\x0a\x56\x25\x39\xcf\x97\x4e\x9a\x87\x7d\xee\x62\x70\x65\x3a\x58\x56\x32\x39\xce\x81\xe2\xa1\xd3\x4b\x75\x42\x86\xb1\x39\x58\x04\xf4\x77\x0f\x14\x4b\x39\xaa\x0d\xec\xbd\x02\x12\xfa\x18\x02\xb6\x42\xac\x60\x22\x27\x48\x96\xe2\xd1\xd0\x3b\xd9\x7a\x7f\xb2\x2f\xdb\xae\xdf\xee\x08\xf9\xfa\x82\x5e\x40\xb4\xfe\x2d\x1f\x16\x62\x16\xda\x1d\x15\x38\xf8\x24\x66\x54\x52\x82\x86\x1c\x30\xee\x3d\x18\xb5\x23\x1d\xfa\x6d\xc5\x77\x46\xf9\x0b\x96\x42\x15\xf4\xaa\x83\xcb\xa6\xc2\x29\x76\xa2\x19\x24\x13\xb1\xb9\x63\x8b\xce\x75\x98\xf7\xb7\x41\xa2\x4d\x34\x29\xfe\xe4\x0d\xd2\xc2\x84\x67\x9b\x17\x86\xc1\x99\x81\xca\x8f\x3e\x64\x07\xc0\x6d\x3f\xb6\x8a\x44\xbe\x46\x1e\xca\xb8\x24\x39\x69\x96\x65\x5f\xfd\x3b\x0a\x3a\xb2\xf8\x8c\x2e\x41\x6c\x81\x9a\x17\x9f\x7a\x5f\xe0\x44\xfd\x76\x31\x5f\xfd\x79\xd8\xd3\xf1\x7e\x54\x18\x15\x1c\xb4\x87\x58\xe3\xe3\x05\x83\x50\x24\x0d\xc1\xfb\x54\xf1\xb8\xdd\xb1\x08\x23\xfa\x6b\xe3\x4c\x71\x49\x8a\x39\xae\xb8\xc2\x65\x23\x0b\x69\x62\xbf\xee\x18\x55\x3f\xd6\xc3\xcb\x5d\xd5\x7c\xbc\xaf\x1d\x8e\xef\xcc\x60\x32\x5f\x4b\x41\x9f\xf7\x5c\xce\x0d\x69\x7e\x58\x13\xfa\x03\x6e\x09\xe7\xf9\x26\xc9\x70\x67\xd1\x36\x8e\x78\x7e\x1f\xfd\xbe\xca\x33\x2d\x9f\x0a\xe4\xf6\xa2\xd7\x8e\xb7\xc9\x1f\x3b\xd8\xd4\xe1\x5b\x07\x17\x78\x39\x4d\xa2\x2b\x84\x38\xd2\xde\x1c\xdb\xcb\x1e\x31\x52\xe4\xeb\xbf\x00\x5d\x19\x05\x46\xaf\x7b\x93\xff\xf8\x4e\x80\x0b\x01\x4d\x97\xed\xc0\xac\x77\x4b\x2f\x25\x71\x47\xe5\x31\x51\xb3\xb7\xa6\xde\xf6\x7c\xed\x49\xb4\xd4\xc0\x0f\xb6\xad\x84\xe2\xf3\xcd\x80\x50\xa7\x03\xc2\x6b\xa3\x0f\xef\x6b\xca\xbb\x6d\x1f\x4b\x6d\x3f\xd1\x06\x7f\x2d\x42\x75\x8a\x28\x24\xb7\x69\xd8\xd2\x60\x50\xbf\xcd\xe3\xa1\x5a\x25\xd8\xdc\xda\x11\x86\x25\x40\x55\x9e\x1d\xd8\xac\x2b\xfd\x19\x22\xba\x9d\x6d\xbf\xba\x26\xd5\x5f\xeb\xf5\x8c\xb3\x24\x4e\xb7\xff\xc2\x74\xdf\xbe\x7c\x3d\x71\x53\x51\xf6\xf8\xc6\xf4\x9b\xd6\x93\xc2\xb6\x43\x20\x7c\xd9\xf3\x5c\xdb\xb2\xe6\x3f\x51\xfb\xc7\xee\xf5\x25\x73\x69\x72\x6f\x99\x78\x0e\x72\x12\x20\xfb\x8a\x90\x44\x72\x86\xd2\xef\x4b\xe8\x4e\xb3\xa9\xfc\x15\x8e\x17\x3d\x71\x7c\x2c\x8f\x66\x70\xa3\x86\xc0\x92\x5c\xe2\x70\x8e\xd2\x4e\x2f\x27\x7c\xd4\xce\x42\xa8\x82\x5e\x42\x55\xe6\x8d\xe5\xa0\x50\xfe\xa2\x04\xf7\x7e\x91\x5f\x2b\xb3\x84\x7f\x77\x5a\xbd\x40\x65\x08\xd7\xd1\x62\x58\x61\xe2\x48\xe0\xb0\x37\xbe\xa5\x50\x57\x86\x29\x0f\xd1\x9d\x6a\x47\x3e\x74\x71\x56\x93\xec\x78\x5f\x7f\x37\xc3\xf7\x2e\x74\xdb\xe6\x08\xfc\x58\xfd\xe5\x51\xec\xe3\xb3\x24\xc0\xd2\xb8\xfc\xa0\xe2\x35\x41\x77\x35\x32\x7f\x70\xdc\x08\xd4\x42\xef\xbb\xba\xc2\x2e\x7f\x1e\xe2\x18\x07\x9d\x53\x71\x55\xc0\x43\x98\x3a\x9e\xd8\x8a\xf2\xc2\xfa\x12\x2d\xdf\xef\x03\x2c\xdb\xa4\x47\xbb\xd8\x6b\x25\x70\x70\xf5\x57\x1a\x58\x22\x41\xfb\x5f\x3b\x18\x5f\xa2\xbe\x69\x93\xaf\x89\x3a\x72\xd0\xe3\x54\x9e\x6d\x8d\x35\x40\x7f\x44\x2e\x31\x3b\x69\xc5\x21\xb3\xfa\x84\x14\xa4\xed\x28\xdd\x9f\x68\x47\x8e\x05\x0e\x5d\x8d\x46\x2b\xa3\x3a\x6d\x3f\xa1\x2b\xb2\xf0\x1b\xf1\x9f\x00\xa4\x3e\x7e\x2d\x8e\xec\xe6\xd6\xad\xb5\x29\xca\x9c\x42\xcd\x49\x32\x0c\x95\xbf\x64\x98\x27\x40\xd0\xf4\xdf\xc3\x5d\x12\x49\x61\xa6\xf4\x31\x8a\xd9\xa8\xf2\x90\x04\x51\x67\xc3\x11\x23\x8e\x74\xcb\x0d\xf1\xc7\xac\x31\x9e\x07\x9e\x14\xbc\x31\x6a\xd3\xda\x90\x33\xe8\x3e\xa6\x17\x43\x44\xb7\x16\x7e\xf5\x00\xb7\xf8\x52\x62\x4a\x9e\x00\x78\xa4\x7c\x89\x22\xec\xf1\x1d\xb7\x3a\x7a\xc8\xcc\xd1\xfc\x8d\x23\x42\xf1\xd6\xa5\x13\x59\x5e\xfc\xe5\x51\xfe\xca\x44\xa8\x1c\x29\xfc\x0d\x54\x59\x7a\x6f\xe6\x90\x9f\x23\xe2\x38\xd9\xf1\x7a\xd8\x17\xf1\xbb\x22\x35\xd8\x60\xab\x40\xf8\xf2\x16\x6d\x40\xa0\x5c\xea\xcd\xaa\x1a\x1f\x79\xa8\xa2\xf7\xb5\xfc\x55\xd4\x1e\x5c\x08\x94\x66\xf3\xd6\xdf\xdc\x02\xed\xea\xc6\xce\x59\xa6\x68\xc4\xa6\x9e\xd8\xed\xfc\x65\xf0\xfa\xe9\x06\x97\x39\x46\xdf\x14\x26\x2e\xb4\x1f\x67\xd0\xa8\x3b\x50\x64\x12\xbc\xb7\x0b\xd1\x2a\xec\x58\x78\x77\x61\x70\x3d\xf1\x73\xcc\xef\xc8\x19\xef\xd5\x44\xe5\xad\x5d\x85\x0a\xdb\x71\x5b\xa7\x72\xd5\x3a\xbc\xf3\x9c\x07\xc8\xa1\x66\x0e\xd1\x8a\x7c\x4f\x17\x06\xb9\xa1\xb6\x8c\xe3\x27\x59\x10\x1b\x9a\x52\xef\x7e\x45\x3a\x45\x43\xde\xac\x38\x83\xf3\xc6\xf9\x8b\xd3\xd6\x16\xaa\xa8\xf4\x0c\x11\x46\xe1\x7f\xb9\x6f\x38\xc3\xb5\x3c\x9d\x87\xc8\x61\x97\x99\x52\x04\x65\xf0\xe1\xef\x55\x1e\x21\x37\x30\x4f\x31\x30\x72\x0b\xd3\x1d\x85\x3e\x21\x75\x65\xa8\x36\xee\x24\xf4\x01\x3b\x03\x4a\xe4\x52\xeb\x97\x10\xa2\x5e\xd8\xdc\x9f\x25\xbd\xb5\x64\xc8\xe3\xfb\x43\x59\xf5\xf9\xf3\xf8\x85\x28\x8e\x68\x61\x59\x4d\x3d\xc6\x74\x77\xba\xd9\xfc\x9e\xb6\xc7\x7f\x70\x93\xa9\x2f\x82\x2c\xc2\x53\x9d\x32\x4d\x63\x07\x53\xa1\x28\x66\xc7\xd2\x5f\xbe\x73\x1b\x4d\x7c\x66\x24\x2f\x83\xfa\xc2\x53\x36\xa9\x71\xd5\x78\x96\x5d\x9c\xa4\xc7\x3e\x95\x95\x90\x1c\x81\xfd\xd6\xba\xc8\x40\x37\x24\x46\x8a\x55\x38\x4a\x37\x2b\x8e\x4e\xf6\xe7\xf5\xf3\x63\x55\x4f\x7e\x6e\xbc\xfd\xf8\x38\x45\xbe\xdc\x97\xef\x5d\x36\x46\x14\xbe\x44\xd8\xf9\x37\x28\x60\x54\x9a\xfa\xc0\xd4\x4b\xde\x64\x6d\xa3\x47\xd3\xb8\xb5\xa4\x68\x6c\x51\x95\xb4\xd2\x71\xf6\x45\x11\x14\xca\x74\xb6\xf0\x1a\xc8\x17\x83\x42\xc7\x6e\x38\xf6\x8f\xd8\x5a\xdb\x68\x09\x21\x4d\x47\x4e\x82\xd9\xec\xd8\x88\x8a\xfe\x45\xba\x38\x3e\xd8\x89\xfd\x69\x47\x93\x55\x8e\x40\x48\xa6\x5c\x11\x88\xe2\x3b\x3d\x8f\x97\x66\x59\xe6\x09\xe4\x10\xc9\x96\xe5\xd6\x6f\x48\x42\x99\xc3\x85\x16\x90\xda\x39\x81\x28\xb6\x54\x80\x24\xf0\x9e\x94\x78\xa4\x89\xe9\x7c\xd3\x64\xfc\x23\x0c\xe2\x6f\xfc\x8b\x1e\x1c\x5b\x9a\x3c\x00\x70\x07\x41\x9d\x61\xfb\xce\xa4\x6e\x60\x7b\xb0\xca\x5b\x64\x04\x6b\xc7\xcc\x30\xb5\x49\xa7\xe4\x92\xbc\x4b\xa6\x80\x01\xe7\xb9\xec\xc1\x2e\xeb\x82\x94\x43\xb6\x88\x51\x3b\xa6\x02\x50\xd9\x1e\xfb\xa1\x31\x4b\x37\xc0\x25\xf0\x91\xa6\x3d\x4a\x2b\x39\x1f\x72\x9c\x4d\x71\xc5\x49\xc3\x91\x05\x48\x2d\x9a\xd9\x78\x33\xa6\xbd\x23\x2a\x41\xf7\x53\xde\xf6\xc4\x68\xa3\xbf\xff\x49\x24\xdd\xe9\x64\xea\xde\xb5\x88\x30\xa4\xaa\xaf\x28\xfc\xd6\x89\x02\xdd\xb1\xe4\x56\xcd\x97\x6a\x9f\xe3\xcf\x9f\x21\xba\xdb\x58\x41\x84\x9e\x5c\x1b\x5d\x9a\x4b\x97\xf7\x13\xb5\x86\xb7\xfa\x6f\x45\x3c\x70\x51\xef\x5a\xf4\xb9\x4e\x1e\x1b\x5f\xe2\xdd\xaa\xb1\x3d\x22\x37\x1e\x1a\x3a\xe6\x9c\xc4\x3d\x02\xd1\x9d\xdb\x78\x47\x51\x75\x6b\xe2\x33\x54\x9c\x21\xd5\x87\xcb\x23\x9f\xe8\xa1\x8f\x26\x75\x31\x05\x0d\xc5\xcf\xbe\xa7\xb5\x8a\xf2\xf4\x18\xf4\x74\xdd\x1a\x99\xab\x7e\x61\xf8\x24\x91\x1c\x25\xb5\xb1\x5d\x4f\x94\x6c\x02\x36\x8d\x51\xb5\xa1\xf5\x63\x5a\x65\xd6\xef\xe3\x60\x92\x01\x58\xe3\x03\x93\x55\x30\xfb\x6b\x82\x46\x59\x61\xb2\x11\x9e\xca\x94\x50\x40\x13\x3a\x34\x84\xe9\x4e\x8e\x21\xbe\xd9\xab\x95\x87\x60\x84\xc8\x4e\xdd\x24\x99\x33\x0e\x50\x64\x6d\x31\x63\x34\x1e\x9a\xd3\xd7\xdc\xa4\x74\x63\x1a\x8a\x35\xf4\x34\x44\xe5\xfa\x24\x55\x73\x9c\xfe\x65\x8a\x3c\xd9\xd9\x34\x11\x93\x55\xcd\x0a\x1f\x8b\x2d\xe8\x6f\xab\xcd\x03\x5f\xcf\x3c\x03\xf9\x8c\xed\xe5\xbf\xfd\xbc\x8f\x2a\xd2\x15\xf5\xfb\xb6\x9f\x59\x1a\xbc\x3e\x0e\xf6\x40\x8d\xb5\xf2\x03\x93\x27\xf4\x79\xf3\xe3\x1d\x56\x7f\xaf\xea\x47\x76\xbe\x3f\x64\xa7\x52\x58\x64\xd6\x13\xa1\x70\x26\x70\xd8\x3c\x0f\x73\x98\xfd\x5c\x25\x97\x4c\xc9\xb7\xee\x06\x48\xe9\x5c\x54\x6c\xbe\x07\x76\xc9\x50\x59\x57\x4c\x5c\xbd\xc6\x23\x78\x56\xe1\x98\x77\xe1\x28\x38\x31\xbb\x65\x39\xeb\x5a\x38\x5d\xdb\xcf\xbc\x0d\xfd\xbb\x1b\x9a\x48\xc0\x34\x99\x06\x7d\x50\xc5\xee\x51\x7d\x11\x93\x8e\x6b\x5a\x8f\x37\x14\xdf\xd7\x59\x73\x29\x0c\xa1\x92\x62\x28\x69\x28\x51\x0f\xa0\xfd\x5a\x2f\x52\xb9\xcf\x9b\x1d\x83\x41\x1d\x53\x34\x58\x99\x69\xd0\x27\xf3\x71\xbc\x7e\x7e\xd3\x05\x39\x61\x28\x57\xa6\x9f\x80\x13\xa9\x3e\x20\x10\x53\x4c\x41\xe1\x11\x93\xc8\x40\x90\x4e\x5f\x40\x7a\xb5\xc7\xa3\xc5\x24\x76\x8d\x44\x68\x6c\xcd\x14\xdd\x31\x5b\xbf\xdd\xbb\x8c\x75\x8f\xad\x37\xf1\xe8\xbf\x0b\x0e\xcc\x1f\xf8\x15\xe7\xd7\x49\x5c\xd2\xfd\x36\xb7\xd4\xbb\x32\x84\xa5\x3b\x09\x40\x46\xc9\x5d\x33\xf9\x1c\x1e\xcd\xfd\x83\x75\xf4\x78\x36\x06\xb5\x4a\xfe\xf3\x22\x62\x9b\x59\x67\xa0\x19\xf7\xae\x5a\xf4\x57\x74\x85\x6b\x97\xb8\xd5\xf9\xd4\x6c\x39\x12\x7f\xfc\xf9\xb9\xdd\x0d\x5b\x34\x4a\x2e\x08\x11\x4b\x44\x11\xef\xfc\x96\x0b\x47\x17\x55\xef\x1b\x27\x73\x73\xa4\x9d\xf1\x45\x95\xbb\x6f\x88\xd2\x1c\x43\x52\xff\xbd\xa4\xaf\x43\x6e\xec\x1c\x01\x78\x09\x44\x19\xa7\x26\x0c\x5f\x06\x04\x5c\x99\x86\xf2\x9b\x2d\x44\x76\x4e\xef\x4f\x9f\x82\xab\xfb\xf2\x51\x7e\x5d\x8b\xd2\x4c\xc5\xd8\x98\x56\xf0\x71\x6c\x9d\x51\xdd\xaa\xd8\xc6\xbf\xe2\x46\x1e\x1a\xe5\x1f\x04\x8d\x98\x51\x17\xf4\x74\xd3\x1a\x99\x4d\x2e\xa4\x90\xb5\x3b\xf7\x46\xde\x35\x08\x0f\xdc\x34\xc2\xc6\xdf\x6c\xb6\x40\x0c\x08\xbd\x5c\x8c\x99\x41\x21\xc2\x9a\x92\x5e\x22\x58\x91\x15\x11\x42\x97\xa1\xcb\x6c\x52\x9a\xe5\x08\x53\xe6\x80\x67\x57\x2e\xd8\x1b\x5c\x7d\x05\x30\x14\xa6\xf6\xe5\x98\x97\xa8\x47\x14\xcc\x47\xf2\x90\xde\x83\xfe\xf6\xfd\x28\xf8\x79\x44\xfe\x66\x45\xf6\x37\xb6\xee\xd4\xc5\xe1\x5b\xc7\x62\x35\x99\xa2\x6b\xd7\xe6\x91\xcf\x0d\x3f\x65\x6f\xc6\x44\xa5\x0c\x93\x25\x7d\xa6\x37\x94\x92\xc1\xa8\xfd\x47\xd3\x59\x2c\x59\xcb\x34\x5b\xf8\x82\x18\xe0\x36\xc4\x36\xee\xce\x0c\x77\x77\xae\xfe\x44\xbf\xdf\xf9\x07\x44\x74\x74\xec\x01\x55\x64\xad\x5c\x4f\x42\x65\x5d\x43\x31\x29\xd0\xf4\xc7\x49\xcb\x88\xed\xa8\x35\x7d\xb0\x11\x04\x15\x3a\xfe\xf0\xba\x47\x62\xb4\x9e\x55\xb6\xdf\x33\x34\x8d\x37\x2b\x21\xbe\x64\xef\x95\x12\xce\x54\x7e\x26\xcc\x6e\x47\x33\x10\xec\x9e\xfe\x8b\x36\x54\xf8\x61\x60\x8f\xf5\xdf\x2e\xc6\x3f\x86\xe1\x6a\x2c\xb3\x9a\xdb\x3d\x5f\x6c\x49\x83\x21\xfc\x4e\xf2\x87\x0d\xb5\xe3\x41\x91\x06\xbf\xb9\x39\x41\x44\xd1\x18\xab\xa4\x15\x17\x9a\xe1\x10\x95\x68\x18\xad\x84\xfe\x60\xa4\x67\x44\x83\xca\xee\x18\x29\x98\xeb\x59\xfa\xc0\xc5\x14\xf8\x5b\x30\x6a\x5e\x3a\x0e\x4d\x22\x7a\xfe\x09\xd1\x17\xa2\xd2\x8c\xfb\x63\x6f\xeb\xcb\x51\x5a\x4b\x9e\x9f\xe9\x4d\x11\xf9\xdc\x4c\x90\x7c\x8d\x9a\x4f\x51\xcb\xd8\x0e\x83\x2b\xec\x83\xc8\x4b\xfa\xee\x97\xc4\x3f\x72\x37\xc2\xfd\x3a\xb0\x84\xd5\x7b\xe5\x85\x19\xa0\x14\x49\xdf\x82\x92\xf4\x79\xa0\x44\xa9\xfc\x78\x41\xb9\xbf\x3d\x2d\x85\xa8\xd4\xd8\x06\x2a\x26\x17\xcd\x8e\x61\x49\x42\xc2\xca\x40\x68\xfa\x43\x3f\x53\xf2\x7a\x2a\xa7\x32\x1d\x55\xee\x9c\x0a\xa1\x41\xfb\x0a\x94\xa2\x52\xa3\x7a\xe1\xa2\x9d\xe3\x8e\x8f\x7f\xe6\x82\x1e\x78\x09\x01\xa9\xde\x8c\x48\x97\xba\x82\xee\x09\xcf\xe8\x25\x1d\xb0\xd9\x09\x44\x9c\xdb\x7a\xef\x2a\xb7\xae\xdb\x4a\x0e\xf9\xbf\x17\xf0\xeb\xba\x12\x3b\xf5\x81\xab\x83\x89\x38\x8f\xe0\x3b\xe2\xd8\x58\xf5\x2b\xfc\x60\xd4\x87\xf2\x08\x63\x4b\xa1\xfc\xb4\xfb\xfb\x59\x4c\x97\x67\x70\x69\x05\x85\x17\x3b\x27\x47\x9c\x4e\x4a\x4c\xff\xf5\x64\xb1\x2a\x3a\xd7\x28\x88\x3b\x16\x41\xe0\x04\x98\xd5\x07\xe6\xaa\xbe\x3b\xaa\xb1\x66\x27\x1b\x95\xc1\xfb\x70\x01\x80\x59\x4b\x4d\xcf\x04\xf0\xe1\x05\x82\x63\x16\xec\x4f\xe9\x16\x3f\xd0\xba\xf9\x58\x3f\x3b\xbd\x93\xdf\xec\x40\xa8\x36\x8f\x91\xee\xb4\xa4\x6e\x37\x01\xed\xcf\xf3\xa4\x96\xd8\x09\x49\x27\xf8\xb6\x49\xa2\xac\xea\x42\x33\xc3\x66\x30\x75\xf1\x21\x7c\x18\x7c\xcc\x5e\x5a\x06\x1b\x20\x98\xa3\x3e\x45\xad\x57\x8f\x9b\xce\x8e\x66\xca\x6f\x53\x8e\x39\x1b\x7f\x8f\x69\xd6\xb2\xb2\xe7\x59\x37\x8f\x0b\x51\x8a\xb5\x4c\xa6\xc7\x93\x1c\x8b\xe9\x14\x28\x79\x6c\xc1\x0a\xd4\x08\xb5\x80\x33\x71\x6e\x27\xaa\x19\x24\x71\x39\xcb\x20\x10\x26\x0c\x15\xac\x9d\xfe\x92\x1d\x89\x8f\x70\xb3\x6f\xfd\x33\xbc\x00\xae\xc0\x0f\x55\x30\xa2\xac\xa4\xc6\xd7\x9a\xf7\xf4\xb7\xe7\xe7\x01\xcf\xc8\xdc\x7f\xeb\x19\x5d\x61\xd2\xc0\x3e\x78\x59\xb9\x7e\xd1\x9c\x0e\x86\xf0\xfd\x10\xe4\x12\x3b\xc1\x52\x73\xeb\x99\x61\xed\x1e\x44\x3c\x13\xb8\xbc\x15\x75\x4e\xe4\xe7\x3f\x3e\xab\x25\x71\x6a\x7f\x24\x0d\x60\x41\x84\x94\x81\x06\x23\x48\x48\x3a\x73\xed\x91\xa0\x24\x14\xd2\x43\x40\xe4\xd1\x15\x6d\xd8\xe5\x1c\xcd\x75\xb1\x64\xdd\xa5\xfc\xd5\xb3\x28\xc3\x07\x2b\x86\xc3\x92\x09\x8f\x9e\x71\xa9\x8d\xc5\x46\x10\x14\x91\x83\x50\x8e\x63\x60\xb3\xc6\xdd\x42\x20\x9a\x1f\xe2\xea\x94\x55\x48\x41\xb3\x9a\xd3\x42\x97\x53\x06\x83\x10\xe3\x70\x8e\xab\x39\xd4\x1b\xa8\x0f\x7d\x0c\x33\x4c\x90\xc5\x9d\x9b\x52\x21\xfd\x08\xc4\x05\xda\x6d\x25\x80\x84\x06\x00\xad\x27\xdb\xad\x4c\xf2\x0b\x4d\x0a\x3f\x10\x9c\x73\x46\x29\x56\x29\x4e\x34\xc6\x4a\x0b\xc8\x6a\x8c\xe3\x2d\xa5\xaf\x41\xeb\xc1\x12\xa1\x9a\xc6\xcf\xd9\x9f\x36\x07\x29\x0c\xa3\x80\xaa\x3a\xea\x8d\xcf\x8b\x24\x8c\x86\xad\x38\x91\x34\x3c\xb6\xf9\x09\x7a\x99\x8b\x09\x71\xe0\x0c\x61\xd1\xf1\x0b\x9c\x98\x1a\xcb\x4d\xc6\x71\x74\x0f\xa2\xc1\xbe\xaa\x39\x0c\x58\x23\x57\x80\x8d\xce\xf1\xb2\xf2\x0e\x8a\xa8\x40\x68\x16\x4c\xbc\x94\x6d\xe5\x42\x13\xbf\x9d\x3e\x81\xfc\xce\x4d\x0d\xf7\x43\xb0\x16\xd8\x5d\x96\x97\x37\xef\x2d\xe9\xcf\x02\xa2\x40\x00\x11\x85\xf9\xa7\x4c\x37\x8c\x6f\x95\xf4\x50\xd0\xaf\x16\x7a\xdf\x54\xed\x1f\x08\x16\x00\xe9\xe2\xd5\x3e\x2e\x54\x67\x89\xb6\x42\xac\x18\x2d\x34\x42\xde\x2b\x6e\xa8\x78\x34\xf0\xec\x80\x85\x44\xde\xf9\x44\xaa\xf1\xe4\x7c\x9b\x7b\xc1\x8e\xd0\xa8\x03\xff\x0a\x6a\x7c\xf7\x4d\x3d\xc6\x2f\x6d\x7e\x71\x07\x3e\x29\x9b\x03\x99\x81\x66\x0d\xde\x13\x6b\x2c\x02\xaa\x27\xbd\xd3\xf4\x7c\x34\x08\x4e\x4f\x8d\x7e\xd8\xd3\xc6\x54\x6a\x39\xb3\x94\xd7\x6a\xbb\x12\x25\x9a\x9d\xba\x38\x06\xc3\x6a\x07\xe8\x4b\x81\xd6\x85\x92\xbb\xa5\x0a\x6d\xf0\x26\x1f\x2c\x96\xe4\xc9\x77\xfb\x2e\x77\x72\xcb\xf4\xd6\x36\xd0\x2d\x95\x3b\xc6\xce\x82\xef\x45\x98\xdd\x4c\xf2\xc8\x8d\x52\x9e\xc0\x0a\xf0\x69\xaa\x68\x76\x18\xe3\x93\x3e\x39\x30\xf4\xed\x6f\x58\xe1\xb4\x88\x14\xa8\x31\x7c\xdd\x9a\x6e\xa1\xab\xa0\xa1\xe4\x9f\x12\x30\x27\x6f\x57\x36\x1a\x35\xde\x9b\xaf\x71\x16\x67\xad\x78\x40\x20\x53\x72\x3e\x1b\x1a\x80\x61\xca\x20\x5a\x3b\x00\x76\xde\xb8\x3b\x37\x80\xd2\x2a\x35\x8f\xa4\x53\x1a\x00\xd2\x23\xfc\xfa\x9c\x1f\xb6\x22\x65\x6b\xfd\x59\x16\x38\xbd\x73\x5d\xe2\x38\xc0\x0b\x5e\xb2\x6c\x51\x80\x1c\x10\x48\xbc\x7a\x91\x83\x4e\x6d\x56\x18\x3b\x32\x3d\x88\x75\xa7\xe9\x41\x8b\x13\x85\xf1\x32\x6a\x31\x13\x4f\xb1\x53\xeb\x8c\x09\x8c\x05\x49\x54\xb2\x70\x0b\x0e\x6b\xf2\x30\xa3\x4a\x98\x73\xf7\x84\xc7\x5e\x05\xdb\x57\x98\xb5\xff\xa6\x2d\x46\xd8\x1a\xd2\xc3\x85\x23\xf8\xba\x2b\xaf\xc8\x85\x0e\x4d\xcf\xe6\xf4\x55\x22\x2d\xb8\x59\x69\x31\x12\x29\x08\x02\x18\xa1\xb0\xc2\xe9\x13\x31\xb5\xd4\x0b\xda\x57\x04\x8e\x13\x43\xa4\x87\xdd\x18\xe6\xa6\xb4\x65\xae\xf6\x2f\x79\xfa\x60\x72\x68\xdb\x72\xc3\xdb\x5d\x68\xf4\x80\x8f\x76\x73\xa1\xad\xa5\xba\x70\x20\xe5\x5e\xee\xe5\x72\xd8\xf0\x42\x26\xdd\x91\xcc\x80\x0a\xad\x7a\xfc\xb4\x2b\xb2\x92\x29\xa8\xed\xd6\xd8\x01\x02\x32\xf5\xed\xe7\xa7\x8d\xaa\x13\x1a\x23\xe3\xdc\xa2\x58\x7b\xb0\x1e\xbf\xe8\xf4\xdc\x1c\x54\xfa\x04\xc4\xa2\x5b\xac\xaa\x4d\xeb\x81\xcc\xad\x4f\x01\xac\x24\x6b\xa9\x89\x6d\xe8\xa0\x38\x0d\xb6\xc0\x13\x24\x42\x8c\x2c\xca\x2b\x82\x29\x4c\xc3\xd3\xa7\x54\x33\x63\x6c\xe1\xea\xb4\x15\x24\x61\xb3\xb7\x09\xc4\xce\x81\x8d\xb9\x57\xad\x5d\x76\x6c\xf5\xa3\x3c\x99\x44\x3c\xb3\xc2\x3f\xc3\xc1\x6e\x26\x50\xcf\x63\xf3\x60\x12\xaf\x6f\x31\x64\x4c\x1a\xd9\xe0\x6f\xf9\x5a\xff\xe7\xd8\xfe\x31\x7e\x59\x3a\x8a\xec\x7c\x63\x69\xeb\x9b\x12\xdf\x1e\xe1\x16\xda\xb7\x28\xb6\xbf\x46\xfb\x5d\x28\x49\x1c\x93\x83\xec\xc9\xc5\x39\x32\xef\x22\xb5\x5d\x0b\xf5\x3f\x3d\xfd\xad\xc5\x09\x3a\xf7\xe3\x64\x16\x27\xca\x92\x42\x07\x8c\x11\xf9\xb0\xc6\x98\x67\x27\xc5\x87\xc6\xb0\x81\x68\xa9\xc4\xb3\xcd\x00\x58\x9d\x68\xd6\x00\x91\xff\xb5\xa4\x03\xe5\xf0\x08\xe6\xb5\x3e\x3c\xaa\x58\x43\xc8\xa5\x06\x2a\xfc\x12\x6d\x78\x60\xe8\x5e\x5a\xd2\x4d\x55\x96\x6c\x03\xf2\xed\xb0\x2a\xd7\x2f\x5b\xb0\x7e\xe5\x41\x54\x15\xa2\x3c\x41\x84\xf4\xa3\x03\x56\x3b\x72\xa2\x2e\x67\xbb\x0f\x2e\x95\xbb\xd8\x77\x5a\xa7\x47\x12\x4a\x9d\x91\xcc\x6f\x0b\xe1\xfd\x50\xef\x40\x63\x2e\x6f\xfd\x71\x7f\x13\x14\xc9\x27\xe5\xd6\x79\x1b\x62\xd9\xd4\xec\xe6\x4e\x62\xc4\x5e\x11\x4a\xab\x93\xba\x07\x6c\x36\x28\x81\x80\x7f\xf7\x70\xa2\x69\xac\x9b\x06\xe6\x97\x56\x74\x99\x80\xa8\x8f\x40\xcb\xe2\xb7\x76\xda\x16\xd9\x80\xd6\xe7\xc3\xbf\x18\x3e\x38\xb3\xb2\xa6\xb8\x2c\x42\x1f\xb2\x9b\x1f\xc0\x58\xcf\xaa\xc9\x92\xac\x0d\xc4\xce\xac\x5c\xec\x73\x85\xe4\x3a\x35\x9f\xa5\x3d\xed\xf8\x81\x60\x5a\x8e\xad\x36\xde\x89\x92\x0f\xe7\x95\xf2\x1e\x6a\x5e\x72\xa1\xb1\xb2\x30\x41\x0b\x18\xbf\x65\x81\x09\xa2\x18\x2d\xc7\x9a\xbe\x79\xe8\xce\x1c\x6b\x7e\x4b\x8c\xd0\xad\x20\x31\x90\x47\x20\xf2\xaf\x6d\x55\x18\x94\x3a\x0e\xa3\x4a\xeb\xe1\x0d\x20\x3d\x7e\x55\x85\x8f\xc9\x58\x8a\xcd\xcf\xa1\x45\xa7\x4f\x4b\x99\x14\x9f\x13\x75\xe5\x76\xbe\xb9\x40\xd4\xdf\xd1\xd6\x25\x9e\x50\xe7\x9f\x67\xa7\x59\x52\x93\x89\x58\xc6\xa8\xf2\xe6\xcd\x4d\x95\xb1\xfc\xd1\x93\xbe\xa9\xef\xc2\xd7\x82\x24\xcc\xe0\x97\xc1\x34\x42\x16\x27\x21\xf1\x5e\x58\xf5\xeb\x4f\x28\x67\x75\xdf\x7a\x39\xab\x71\xf3\xe9\x33\xdb\x48\x08\x37\x76\xdb\x8c\x13\x2e\x59\x14\x9a\x38\xe3\xb8\x6e\x41\x8a\xf1\x73\xe6\xda\xff\x57\x9f\x6c\x31\x99\xfb\xc1\xdc\x71\xaa\x75\xac\x85\x90\xa0\xac\xe9\x58\x8b\x6b\x3a\x2e\xcb\x4b\x88\xbf\xbd\x10\x1e\xd1\x99\x63\x6c\xb0\x22\x98\xf6\x8a\x0b\x21\xd6\x7a\x96\x39\x96\xab\x09\xed\x29\x56\x2c\x89\xf4\x8f\x0a\x5d\x28\x1d\x97\x95\x3e\xbf\xb4\x71\x33\x4d\x07\xb0\xce\x62\x30\x49\xf2\x04\xc4\xe8\xe3\xed\xea\x8b\x15\xa8\xa4\xe2\x44\x53\xcd\x46\xb3\x79\x17\xc5\xf3\x0a\xbe\xe7\x94\xe7\x2c\x42\x29\xf0\x62\x1f\x75\xaa\x69\xbc\xcc\xd8\x6b\xc8\x16\x98\x84\xc8\x65\xa4\xfc\xc6\xc6\x2a\x4e\xab\x5b\xb5\x93\x3b\x41\xb7\x76\x5c\xc1\xb0\x42\x7b\xa1\x85\x49\xf2\xc2\x14\x5b\xfb\xa3\xdb\x27\xe5\x28\x7f\x07\x2a\x93\x28\xb8\x47\x06\xe7\x3a\xce\x59\xed\x15\xc1\xe2\x52\xc6\x64\x8c\x27\x11\x5c\xee\x0c\x26\x00\xf0\xa3\x56\x0e\xae\xbc\x8f\x87\xd8\x4e\xf0\xdd\xd2\x0f\x92\x90\x1d\x3c\xad\xb4\xf6\xf0\x6f\xdc\xcb\x9a\x8e\xbe\x09\xbb\x47\xe6\x42\xb8\xc5\x3b\x47\xba\xae\xc4\xc9\xb1\x0c\xa2\x11\xf5\xf8\x15\x57\x0f\x6b\xee\x7d\x76\x81\xf0\xce\xab\xda\xec\x68\x96\x04\x36\x64\xf0\x92\xa9\x44\xda\x40\x2b\xae\x8f\x8b\x3d\x10\xda\x17\x7a\xae\x9f\x02\x5a\xfd\x51\x00\x3e\x08\x28\x85\xa5\x75\x34\xa0\x5d\x6d\xa9\x93\x69\x98\x8c\xc6\x78\x99\x19\xff\xce\x42\xcf\x06\x41\x14\xd5\xb3\x20\xcd\x35\xe6\x2a\x5d\x3c\xfd\xfe\xf4\xdf\x0f\x8a\x88\x04\x7a\x15\xf0\xb0\x1d\x3c\xf1\x82\x12\x22\x62\xef\x9d\x21\x29\xc3\xa5\x7f\x2b\x36\xf9\x13\xc4\xb2\xa2\x4d\x07\x3c\x9a\xec\x85\x20\xa0\xdf\x02\x02\x74\xa1\xe2\xc7\xaa\xaf\xab\xa4\x2f\x1d\x21\x66\xb0\xd9\xab\x70\x1b\x7c\x1d\x77\x15\x84\xcf\x05\xb3\x85\xc3\x8a\x66\x07\x5a\xce\x1e\x94\xb6\x2f\xff\x76\x1a\xc1\x32\x29\x9f\xfe\xc5\x26\x64\x28\xe2\x6d\xfe\xee\xf9\xb7\xee\x37\x47\xde\x73\xbd\x49\xdd\x83\x6d\xda\x9c\x54\x27\xc4\x4d\x10\x5e\x5e\xe1\x38\x9a\x3e\x52\xfd\x3a\x55\x74\xfa\xa4\x16\x27\x6c\x11\xe4\x4e\x0a\xd6\x56\xe9\x9f\x63\x4b\xcd\xae\xde\x7f\xd2\xb9\x82\x7a\x85\x7b\xee\x92\x59\xce\x1c\xb2\x2b\xa9\x24\xbc\x1a\x6d\xc1\xcb\xf2\x19\xd3\x3f\x27\xd7\xd8\x34\x23\xb8\xae\x6c\x73\x3a\x51\x5a\xf6\x3e\xa7\xa2\xb3\xa3\xfa\xe8\x50\xab\x11\xc6\xd5\x21\x73\x8c\xcb\x8d\x62\xb7\xdb\x17\x71\x35\x0a\x45\x44\x01\x5e\xf2\xeb\xb9\x2c\xaa\x0b\x15\x17\x44\x58\x52\xc8\xcf\x84\xd8\xec\x91\x06\xab\xcb\x1b\x2b\xe2\x8f\x02\x2b\xb8\x61\x5d\xba\x3a\xaf\xaf\xb9\xef\x1c\xa2\x0f\x1a\x03\xcd\xc6\x71\xfb\x59\xdd\xd4\x04\x94\xd9\xd4\x92\xef\xa6\xb4\x3f\x9f\x41\x60\x1f\x74\xa3\x9a\x33\x3f\x58\xae\x26\xfe\x44\x9a\x40\x78\x08\x91\xaf\xc9\x27\x51\x88\xb4\xfd\xcd\x72\xcd\xd4\x32\x3f\x25\x48\xa1\x7b\xf5\x50\x4b\x28\x49\x0f\xe6\xe4\x39\xa6\x94\xef\x5a\xf7\xbc\x80\xd7\x1f\x6c\x03\x84\x52\x3d\x0b\xa2\x54\xcf\x24\xc3\x4f\xa1\xed\xa6\xae\x2d\x41\x34\x30\x54\x1f\xf4\x3a\xc6\x1d\x86\x54\x25\x21\x77\xa8\xfa\xc3\x9e\x89\x4f\xbd\xbc\x7a\x6b\x01\x15\xb2\x05\xc7\xef\x56\x74\xbc\xa1\x56\x46\x0b\xd1\x03\x19\x96\x34\xac\x65\x75\x9a\x47\xe5\x67\xd7\x82\x28\x82\x95\x0f\x1b\x5e\x0e\x6a\x02\x17\xf1\x09\x12\x59\xd3\x6d\xa5\x91\xa3\x91\xee\x77\x60\x80\xcc\x33\x76\x95\x30\x3d\x58\x7c\xc4\x1d\x60\x09\x10\x3d\x40\x28\xd4\xb2\x78\x1d\x57\x2d\xd3\x7c\x2a\xe2\x0d\x54\x44\x3f\x36\x86\x7f\x82\x6e\xc7\xda\x0c\x32\x38\x68\x5a\x92\x87\xa8\x43\x17\x73\xbd\xde\x43\x06\xbb\x1e\x01\x00\x96\xd4\x98\x14\x8e\x54\x63\xe5\x4f\xd0\x6c\x87\x2b\x4c\xb4\x2c\x75\xb1\x6f\x9d\x35\x13\x44\x13\x82\x9d\xfe\x73\x4a\xc0\xbb\x85\xcc\x89\xa4\x91\xfb\x90\x4a\x8f\x34\xab\xd9\x08\x47\x8b\x8f\xcd\x7d\xdb\xc6\x7b\x31\x80\xed\x06\xa6\xe5\x74\x5e\xff\xc4\x8f\x7e\x98\x06\x37\xb9\xc2\x7c\xfc\x37\x87\xec\x50\x63\xe0\xce\xff\xc6\xfc\x61\xeb\xbb\xd2\x9a\xce\xbc\x95\xd2\xad\xac\x09\x21\x8a\x4a\x0f\x9f\xe3\x42\x27\xf9\xd4\x54\x4c\x3a\x2d\x83\x38\x4a\x30\x67\x99\xbb\x6c\x0f\x40\xe3\xad\x8f\x61\xd0\xe7\xd1\x84\x3a\xe5\xb1\x52\xd7\x50\xa1\x07\xc9\xf1\xc9\xff\xbc\x67\x79\xed\xb4\x9a\x4f\x0a\x17\x53\xd5\x8d\x6c\x04\x50\x9d\xcd\x6e\xb7\x9c\xc0\x78\xce\x3a\xf8\x9b\xad\x45\x32\xc5\x74\xb7\x6b\x4f\xef\xbf\xe7\xd7\xa4\xe1\xaf\x87\x7c\xa2\xdc\xda\xb8\xe6\x53\x5d\xae\x28\xd8\xf0\xfc\x89\xbf\xcd\x88\x76\xdb\x39\x15\x9f\xc4\xea\x5f\x5e\xd8\x95\x1d\xe1\x1d\xbb\x83\x68\x2b\xe2\x93\x12\x94\x9e\x0d\x06\x40\x67\x3a\x06\xf1\x83\x69\xb0\x82\x24\xfe\x7e\x50\x09\xc3\x43\x83\x65\x15\x9c\xaa\x3e\x0a\x34\x2e\x34\xbb\x15\xad\x3b\x50\xb0\xfb\x76\xdd\xa5\xf1\x70\x4b\x56\x2e\x19\xc2\x9b\xc7\x18\x10\xc2\xcb\xec\x24\x3b\x07\x67\x13\x46\x5d\x2c\x7c\x7d\x2d\x04\xe3\x01\xb7\x5f\xd4\x5a\x56\x9c\xde\x6b\x07\xea\x2e\x19\x14\x29\xb7\x95\xd8\xaf\x50\xb3\xed\xce\xf1\x37\x9e\x49\x3a\x22\xde\x86\x90\xa3\x6a\xd7\x8e\x2d\x31\x55\x8f\xcd\x86\xe6\xf0\x77\x64\x2d\x16\x4a\xd6\xed\x52\xa5\xf1\xd9\xaf\xda\x58\x28\x89\xcf\x9a\x2c\xd7\xac\x20\x42\x47\xb8\x6b\xe5\x9b\x54\xe7\x68\x2f\x13\x45\x4b\x7f\x3e\x58\xcd\x40\x1a\x5e\x6c\xf0\x1b\xed\x57\x81\x90\x44\x13\x81\x71\x8a\x47\x05\xac\x3e\x5c\x27\xbb\xfd\xde\x43\x2d\x31\x33\xbc\xf7\xe5\x59\x7d\xf7\x2b\xcc\x08\xdd\x9b\x0b\x0b\x95\xc3\x5f\xa3\xaa\xb4\xde\xdf\x93\x43\x55\xf6\x8c\x08\x87\x5d\x80\xf0\xdb\xbd\xee\x37\x03\x55\xed\x60\xb7\xcb\xb2\xc2\x44\xe2\xf4\x1f\x94\xde\xa2\xbe\x94\x21\x93\x89\xe2\x38\xa7\x04\x37\x80\x50\xa2\x7b\xcc\x63\xe2\xd7\x8d\xfe\x58\xdb\x47\xad\x0d\x65\x1f\x87\xea\x17\x8e\x31\x3c\xb2\xca\x39\x10\x40\xaf\x0b\x25\x11\x01\x5e\x20\xa2\x52\xd6\xf4\x21\x85\x86\xfe\x74\xd4\x78\xc0\x8d\x98\x6b\xca\xad\xa9\xa0\x2d\xab\x01\x63\x60\x8a\x9b\x57\x4d\x73\xfb\xcb\x41\xc0\xa5\x4f\xc2\xa8\xb8\xea\x90\x82\xc7\x6a\x47\x34\xa9\x2d\x2d\x5c\x10\x64\x57\x45\xb4\x60\x3b\xa5\x68\x60\x6f\x86\x91\x19\x56\x71\x84\x9f\xcf\xbe\x67\xdc\x2f\x2f\x55\x5c\x7e\x6a\x58\x49\xf4\x17\x07\xd0\xe2\x91\xa0\x25\x78\xca\xce\x22\xf1\x6a\x0b\x58\x29\x35\x98\xb2\xf2\x6c\x4d\x23\x44\x11\x1b\x0c\x2c\xaf\x26\x5f\x63\x1d\xdb\x2d\x9c\x8b\x93\x04\x90\x19\xf7\xae\x70\xc7\xaf\x42\x66\xe3\x37\x33\xb1\xc8\x22\x68\x75\xfa\x3f\xc3\xb3\xbf\x02\xe0\xe2\x1e\x60\xeb\xf6\x77\x6f\xc6\xf4\xed\xec\x9a\x6f\x53\x4e\x78\xc6\x84\xc9\xa2\xcf\xed\x69\x0d\x44\x3f\x43\x8b\xee\x02\x0c\x75\x81\xe9\xa5\x5c\x4c\x3e\x1d\xf0\x8c\x3a\x24\x5e\xd9\x51\xb5\x59\x55\x3e\x17\xb0\xa9\x56\xc6\xf8\x6b\xa7\x4f\x28\x73\xf7\x4f\x8a\x48\xb9\x83\x01\xe6\x22\x44\x47\xa8\xe6\x5a\x2f\x6e\xda\x12\xf9\xcb\xf9\x23\xf5\x9e\x50\x13\x6a\x0e\x0b\xbb\xd7\xb8\x38\xa0\xb3\xf4\x1d\x36\x0f\x9e\x39\x85\xf0\x05\xc6\xc3\x51\x1a\x79\xfb\x44\xc4\x4f\x6f\xdc\xf1\x6d\xc4\x4e\x0e\xc9\x0c\xc2\x44\x2f\x6a\xe5\x1c\xfb\x3a\x10\xb2\xb8\x1e\x96\xca\xf5\xc9\x95\x25\x19\xcf\xab\xcb\xbb\x85\xa8\x6d\x3a\x2c\x15\xd9\x35\xc9\x17\xfd\xf4\x51\x94\xf4\x20\xba\x94\xe0\xd2\xfb\x80\xe2\xe7\xf8\x65\x7b\x74\xaa\xd8\xce\x9b\xf2\x50\x44\xa2\xf8\x6f\xe6\xa5\xbc\x03\x61\x70\xe9\xa4\x12\x49\x45\xd3\x43\xd9\xbe\xc0\x6e\x66\x36\xeb\xa1\x01\x15\x92\x77\xdb\xc2\xfc\xb3\x3c\x88\x5e\xc5\x88\xe4\x99\x98\x1f\x90\x8b\x16\xc9\xe2\x44\xdb\xfb\xd4\xea\xd8\x1f\x64\x54\x99\xac\x60\xc9\x0e\x29\x47\x0f\xac\xa4\x6b\x71\x5c\xd6\x58\x8c\x01\x4a\x93\x3e\x9c\x3a\x25\xf0\xb2\x4e\xf7\xd7\x40\x44\x45\x03\x05\x65\xfc\x92\x76\x37\xa6\x3d\xcc\x8c\x69\xb6\xff\xe2\xe3\x1a\xba\x66\xfe\xc4\x21\xfe\x40\xc3\x5b\xd6\x75\x5e\x69\x2b\xd6\xd8\x6a\xc2\x7f\x2f\x9d\xd7\x86\x9f\x63\x9f\x2f\x9a\xa0\xdf\x70\x3f\xb5\x92\x56\x8c\xbe\xbd\xc0\x9a\xbe\x17\xce\xaf\x5e\x95\x98\x08\x74\x51\x06\x9b\x44\x88\x3c\x6a\x2a\x31\x4f\x37\xca\x3a\x9f\x67\x6a\x3d\xd2\x06\x6c\x6f\xd0\xe4\xd7\xaa\x3f\xac\x32\xd3\xdd\x82\x9d\x20\xf0\x04\x7f\x89\x5b\xae\xde\xb5\xbc\x8c\x2c\x36\x09\xcd\x2e\x06\x59\x48\x11\x76\x28\xa3\x31\x4a\xe5\x6d\xc1\xa1\x20\x54\x9c\xa7\xf1\xa1\x00\x0e\xde\x19\x97\x55\xc5\x71\xeb\xc8\x90\xc7\x4b\xc3\x7d\x8c\x2e\x46\xcc\x39\xfb\x14\xe4\xb5\x20\x74\x6e\xe5\x06\x05\x2e\x15\x1a\x3d\x13\xd8\x5a\x7e\x58\xb8\x68\x48\x82\xe2\x8a\xe4\xdf\xdf\x1c\xe0\x14\x29\xde\x45\xb2\xd0\x0d\x75\xdf\xfa\xa6\x72\x33\x12\x28\x3f\xb8\x88\xd8\x25\x2a\x31\x1f\xea\x16\xb1\x73\x76\xc9\x74\xdc\xfa\xd2\x30\x4a\xef\x20\xb6\xbb\x50\x9e\x42\x24\x29\x36\x36\x6c\x55\xdb\xed\x94\x9a\xff\xf4\x81\x2e\xa7\xe0\x24\x8b\x38\x5b\xaf\x16\x5b\x13\x13\x28\x51\x6f\x81\xc0\x39\xd7\xad\x26\xe7\x6b\xed\xf7\xd2\xe5\x15\x25\x86\x4d\x2f\xe4\x31\x7e\x41\x7a\x02\x2c\x18\x69\x71\xb1\xdc\x75\x2b\xab\x5c\x7d\xb5\x64\x81\x08\x30\x59\xa2\x07\x40\x8e\x78\xec\x70\x2e\x16\x66\xe5\x5b\xf4\x49\xb0\x99\xf3\x2a\xd4\x39\x74\x0b\xce\x5a\xf0\x1e\x5c\x05\x77\xce\xb2\x7e\xdd\x63\x40\x1f\x3c\xc0\xa5\xae\x2b\xb1\x4b\x34\x40\x72\x69\x8c\xa2\xd2\xdd\xe1\xa0\xbc\xd1\x6a\x79\xe7\x92\xc0\xa2\x19\xbc\xa2\xce\xc7\x02\xee\x94\xa0\xb4\xc2\x09\xac\xff\xe3\xc2\x7f\xef\x29\xc9\x30\x52\x81\x07\x70\xfc\xac\x1e\x74\x02\x34\x23\xf0\x94\x54\x26\x1d\x29\xa4\x34\xa5\xde\x3d\xdb\x05\x5f\x7a\xa2\x6a\xce\x6b\x57\xe4\xf4\xdd\x37\x8d\xe4\x4c\x96\x22\xb0\xa9\x7b\xd0\x5f\xc1\x41\x82\x9d\xa4\xe1\xf0\xc7\x99\xbb\x32\x77\xce\x34\xaf\x6a\xbb\xb6\xc7\xdd\x72\xb0\x5e\xb9\xd0\xa6\xc2\xa5\x82\xdb\x53\x07\x93\x78\x67\x89\x04\xb8\xc2\x29\xd2\xbd\x1a\x2d\x6e\x05\x00\x7a\xbd\xf3\xdb\xe0\x15\xdd\x70\x20\x43\x40\xb7\xdb\xf1\x1d\xc9\xba\x67\xa6\xcb\x03\x60\x41\x5f\x68\xee\x28\x41\x8a\x4b\x59\xa0\x32\xd0\x06\x84\xd7\xea\xb0\x87\xc4\x29\x09\x65\x1b\xf4\x92\xfe\x99\x74\xb6\xbd\xc7\x26\xbf\x22\xc1\xe9\x56\x9b\x28\xbc\x85\xa2\x92\x7c\x4b\xe9\x72\xf7\x7f\xac\x67\x77\x69\xc6\x0c\x46\x1b\x92\xc7\x0b\xf0\xd5\x91\x2f\xc4\xba\x72\xc7\x01\x89\x62\x2f\x44\xc9\x5b\xd8\x46\x72\x17\xe5\xe7\x0b\x39\x4a\xc9\x99\xe4\xe8\xae\xea\x70\x3f\xf6\x26\x2c\xb6\xd1\xc7\xde\xaf\x29\x21\xd6\x27\x67\xce\x7f\x8c\x3d\x88\xfc\xbb\xc2\xe5\xb2\xa6\x3d\x62\x74\xc9\x2b\xcd\x3f\x6b\xfa\x3e\xe3\xa5\x76\xfe\x27\x79\x28\x65\xf0\x1e\x44\x99\xd7\x71\xba\xcf\x91\x05\xf0\x9f\xce\x55\xaa\xdd\x50\x23\xa0\x53\x95\xac\x5a\x68\x98\x3e\x80\xc1\x72\x4e\x1c\xfd\x70\x40\x69\x2b\x3f\x37\x02\xcd\xec\x3c\x98\x26\x26\x07\x2e\xbc\xb0\x96\x95\x4a\x93\x3c\x78\xe4\x85\x78\xaf\x5b\xbe\xc2\xbe\x61\xc5\xf5\x33\xd2\xa8\xb0\x89\x7e\x8a\xbb\x5f\xbc\xc5\x16\x30\x59\x58\x74\x47\x8f\x0f\x4d\x68\x19\xa7\xd7\xaf\xe1\x4c\xd6\x81\x97\x5b\xfa\xe2\x7e\xdd\x5a\xda\x4b\x2d\xaa\x2b\x0c\x38\x7c\x24\x6b\x3a\x6e\xab\xc8\xae\xf8\x4f\xa9\xbb\x56\x98\xdc\x73\x47\x00\xb0\x6a\xe1\xec\xf0\x95\xce\x8d\x55\xfc\xac\x46\x7b\xf6\x8d\x66\xc4\xb2\xea\x42\xcb\x6b\x53\x6f\x1d\x00\x0a\xfb\xf7\xf8\xc1\x50\xfa\x5b\xf0\xcd\xb8\x49\x08\x81\xe7\x17\x2f\x5d\x5a\xd1\x56\xab\xa0\xec\x50\x78\xda\xb8\x80\xb4\x08\x84\xe4\x1d\x48\xf0\x33\xa2\x8c\x50\xe5\x7e\x91\x5d\xba\xef\x08\x39\xba\x01\x10\x7e\x72\x49\xe5\xea\x39\x11\x9d\x99\x30\xbb\xcd\x98\x61\x54\xd7\x0b\x7a\x86\x89\x84\x71\x10\xfd\xe5\xce\x68\xeb\xf5\x55\x4f\xda\x3d\xfb\x24\x60\xe5\x36\x68\x97\xaf\x9b\xa5\x8d\x9e\x6d\xff\x90\x07\x84\xb9\x17\x96\x9d\x9b\x2e\x07\xcf\x44\x20\xef\x2d\xb5\xef\xfe\x57\x51\x8e\x73\x53\xbc\x63\xd2\x7b\x02\x56\x50\xc6\xe6\xcf\x6f\xa1\x74\xbe\x30\xa8\xb6\xe1\x2b\xea\x4c\xad\x85\x92\x34\x1c\x85\xc1\xa8\x64\x17\xf5\x6f\x27\xa3\xd2\xc6\x63\x70\x64\xe1\x39\x31\xdf\xee\x36\xc6\x73\x9c\xfa\xa1\xd9\x90\x2d\x08\x62\xfd\xfd\x1a\xa8\xb7\xa1\x61\xb0\x85\x8e\x39\x05\x29\x29\xdc\x00\xce\xa5\x69\x1d\x78\x45\x4f\xa3\xe6\x34\x63\x49\x2e\x38\xfb\x1a\x1e\xca\x8c\x57\x89\xc1\xa7\xc5\x78\x23\x5f\x11\x58\x63\xd9\xe9\x79\x1f\x02\xd0\xfc\xcb\x72\xb7\xc9\x52\x28\x35\x35\x57\x94\xa1\x1f\x00\xb2\x3d\xc4\x7d\xac\xdf\x9c\xa8\x0b\xd1\xd6\x86\xd5\x0e\x9e\x65\xcd\x4b\x64\x6a\x6e\x11\x98\xdc\xe4\x72\x06\x23\x14\xc3\x5d\xc9\x97\x53\xa6\xf2\x0e\x9a\x00\x1f\x71\xb8\xb6\x7d\x08\x35\x2c\xf5\xcc\xf8\xa0\xb7\x42\x44\x9c\x01\x08\x4a\x1d\x80\x4a\x03\x7e\x74\xf6\xa8\x6e\x2b\x74\xb0\x8e\xdd\xeb\x43\x5f\x5e\x07\x16\x13\x77\xcb\x59\xca\xb5\x60\x2d\x93\xc7\xbf\x16\xb5\x5d\xeb\xb9\x77\x22\x1f\x3d\xac\x57\x05\x89\x3e\xa9\x4b\x75\x07\xf6\xc3\x7f\x62\x0e\x4e\xae\x12\x74\x2c\x3a\xa1\x44\xcc\xd1\xdd\xbe\x8a\xdb\x7a\xef\x80\x0b\x7c\x2a\x9e\x1f\xb3\xc6\x46\x6c\x73\x5a\xb5\xcb\xd5\xe0\x74\x31\xd8\x9c\xa5\xc8\x6f\xe0\x6e\x8f\x06\x80\x75\xe8\x76\x1b\xd7\xa6\x8a\x06\x1e\xcf\x1f\xb0\x34\x5c\x56\xe0\xda\xc8\x39\x86\x40\xaf\x76\x04\x06\x02\x47\x89\x7f\xfa\x56\xbc\x49\x63\x97\xbd\xfd\x2c\xdc\x22\x8a\x60\x20\x6b\x3e\xaa\x79\xd9\x0f\xc7\x80\x26\x82\xf9\x9a\x8a\x3a\xa8\xa3\xbb\xee\x41\x34\xa5\xef\x4e\xd1\x4f\xac\x0d\x04\x18\x26\xe3\xd2\x39\x97\xd8\x5d\x40\x50\x55\x76\x18\x99\x6f\x00\x92\x6d\xf7\xe4\xa2\x58\x06\x75\x1f\xec\x31\x39\xd9\x02\xcd\xdd\x94\x78\x3e\x46\xf4\x47\x43\x82\x7d\x53\x5f\x19\xcb\x4d\xfe\xe9\xb1\xd4\x10\x39\xb6\x92\x1e\x2a\x63\x3f\xc8\x11\x70\x2c\x27\x0a\x04\x63\xd7\x82\xdf\x8a\x3f\xcf\x1b\xb9\x7e\x73\x5d\x38\xb0\x9b\x7e\xa4\x3d\xb2\xf7\x52\x97\x06\x05\xb5\x6a\xa7\x95\xa7\x80\x80\xc8\xe7\xd5\x2f\x56\x9f\xde\xb8\xac\xfb\xb6\x8d\xd8\x2e\x5f\xd0\x75\x30\xff\xc7\xba\xe1\x16\x7c\xce\x29\xe9\x50\x4d\xfd\xe2\x4b\x74\x95\x76\x9e\x55\xe4\xaa\x69\xa4\x04\x2b\x0b\x45\xdf\xdf\x8f\xb7\x97\x60\x47\xc7\xe4\xd3\xc3\x04\x34\xa3\x6c\x8e\xeb\x0c\x67\xbb\x78\xf5\x89\x77\x2c\x0e\x84\x66\xd1\xb1\xca\x0e\x84\xbe\xb2\x91\x30\x23\x04\x5d\xb0\xd3\xdb\x13\x91\xad\x6b\x1e\x97\xa9\xa6\x17\x8f\x93\xf6\x8a\x29\x43\x60\xf7\x88\xf0\xc2\xd2\x29\xa0\x32\x7f\x8f\xed\xdf\x3c\x5e\x74\x56\xf4\x33\xa0\x58\xae\xaf\x4f\x82\xa8\xad\x57\x03\xbd\xdf\x09\x34\x45\x8e\xe5\x8f\x51\x8d\xe9\x9a\xa4\x5a\xc2\x4f\x15\xcf\xaf\x0b\xdd\x06\xb7\x0f\x7e\xac\x83\x71\x0b\xf7\xa3\x40\xab\x7b\xb2\x6a\xc9\x6c\x41\x92\xbe\x05\x7d\xae\x05\xc9\x45\xb6\x8e\x4f\xc9\xdb\xd8\xb6\x93\x71\x50\xaf\x50\x09\x60\x9e\x41\xfe\x25\x61\x34\x74\xc9\x8e\x02\x76\x55\x13\xef\x81\x29\x8a\x5d\x55\x37\x97\x4a\x08\x98\xec\xad\x6f\x7e\xae\x1e\xf0\x35\x66\x56\x2f\x5c\x50\xeb\xbe\xe4\xdd\x5a\x58\x27\x68\x01\x45\xbd\x13\x05\x20\x77\x33\x21\x6a\x8e\x9c\xe3\x1d\x9e\x4b\xd3\x9d\xf1\xdb\x9a\x91\x34\x41\x97\x60\x06\xad\xc5\x2d\x62\x05\x42\x16\xd0\x40\xa6\x0a\x2b\xea\xa8\x17\xeb\xbc\xc3\xc6\x4b\x1b\x8f\x5a\x87\x10\x75\xb8\x89\xb6\x8c\xb6\x4a\x93\x36\x72\x28\xde\xf5\x86\x14\x26\xbc\x20\xeb\x9d\x38\xed\xc8\xc9\x3c\x30\x65\x24\x00\x4c\xae\x4d\x5e\x44\xd9\x2f\xa6\x93\xa3\xda\xf4\xe3\x9e\x12\xa7\xcb\x9e\x8c\x8b\x0f\x43\xb3\x03\x79\xf2\x4a\x8a\x31\xfa\xc5\xb9\x4a\xa8\x69\x02\x12\x6e\xaf\x31\x1d\x6d\x13\xf2\x49\x6b\x15\xb1\xbc\x19\x9a\x2e\x00\xa2\x5e\x61\x07\xdf\xbb\xda\x2f\x74\x17\x4f\xe1\x12\xb2\x9b\x47\x96\x3b\xa0\xec\x71\xd3\x1b\x71\x9d\x57\x8d\x06\xc4\x18\x86\xca\x47\xb7\x9b\xf0\x2b\x43\x2f\x94\xa4\xd9\x95\x72\x7f\x83\xe1\x85\x68\x87\xf7\x48\xbc\x5c\x3d\x4a\x9c\xdb\xfa\xcd\x3f\x84\x5e\xf8\xe3\x5f\x9d\xd9\x53\x7a\xce\x7c\xe5\x69\xed\x64\xd5\xea\xf5\xfb\x2a\x92\x30\xe6\xa6\xac\xf1\x83\x21\x21\xce\xed\xea\x02\xa5\x36\xf9\xa6\x7e\x47\x08\x98\xe6\x9a\xf2\x6e\x0e\xe4\x26\x28\x6c\x99\xfa\x9c\x16\x22\x2a\x4e\x14\x8f\x51\x11\xa1\xf3\x0b\xc4\x00\xf5\x3e\x42\x88\xae\x2a\x09\xd0\x3d\x3b\xe1\x44\xdd\x04\x2d\x20\x50\x93\x61\xf7\x26\x0a\x37\xca\x8b\xa9\x35\x2e\x81\x68\x0a\xa3\x80\x6d\x43\x28\x67\xa6\x93\x33\x7a\x70\x40\x9d\x3c\xed\x51\x2a\x4a\xaa\x29\x8b\x23\xe5\x00\x20\xcb\x3a\x7f\x2d\xf1\xd9\xb3\xf8\x11\xb2\x31\xfa\xdd\xb9\xcc\x1d\xe4\x44\xdf\x20\x49\xe6\xc7\x09\x74\x82\x02\xe5\x52\x0c\x08\xac\x72\x26\x63\x2a\x04\x71\x2f\xbb\x1e\xd5\x59\xb6\x74\x08\xba\x32\xe9\xd7\x85\x12\x58\x22\x74\x72\x88\xa6\x47\x0d\x91\xd5\xfe\x59\x28\x4c\x3b\x7e\x25\x4c\x17\x95\x8c\x3b\x96\x1b\x12\x8a\xbd\x20\xef\x02\x74\xb5\xe0\xf2\xca\xf8\x85\xe0\x5a\x99\x00\x8e\x9b\x0c\xac\xe3\xb0\x5e\xe8\x06\xb1\x6c\x08\x96\x8c\x42\xb8\x6e\xe3\x97\x16\x8a\xe9\xab\x91\x76\x09\x04\xc6\x71\x79\xc8\x33\x2d\x85\xf0\x96\x5c\x19\x46\x01\x2e\xf3\x68\x2e\x75\x9e\x5a\x4b\xdc\xe3\x2e\xbd\x01\x24\x97\x62\x73\x73\x2a\xdd\xe2\x39\x52\xe4\x9d\x5c\xd7\x2e\xa3\x3f\x46\x97\x86\xa1\x52\x06\x15\xa4\xcf\x3f\x31\xa5\xaa\x68\x3f\xc9\xe3\x1b\xbd\x64\x1f\xbb\x06\xac\x56\xa8\xb0\x3c\xb4\xb4\xca\xc8\x1a\xc9\x03\xa1\x71\xf0\xc5\xf3\x77\x52\xde\x20\x08\x82\xa8\xed\x71\xd6\x6c\x7c\xa5\xc3\x40\x7e\xac\x65\x23\xee\xfa\x85\x62\x99\x3b\xe7\xd6\x25\x0c\x87\x35\x9d\x96\x54\xdc\x12\x31\xd5\x07\xba\x44\xa4\xe2\x7c\x8c\xec\x17\xc7\xfe\x52\x8b\x50\x7f\xf5\x37\x1f\x48\xfc\x8c\xad\xea\xb9\x61\xb8\xd0\x29\xc4\xba\xae\x83\x01\x80\x14\x9a\x3d\xe0\x75\xa2\xd9\x6e\x91\x07\x6d\x79\x11\xec\x45\x3f\xc7\xf7\x03\x89\xc5\x22\x16\x2a\x24\x7b\xdf\xd2\x9b\x28\xce\x6d\x46\xd6\x3d\x92\x76\x09\x54\xf8\xd9\x2d\x3b\x79\xa2\x95\x67\xd3\x4f\xfd\xb9\x4f\x8b\x03\x2a\xc3\x4e\xa7\x00\x2e\xc2\xa2\x8d\x41\xb4\xdf\xa8\xef\x86\x72\x43\xaa\xc5\x95\x06\xc0\x8c\x25\xbf\x00\x27\x0c\x64\x22\x71\x53\x50\x80\x96\x7f\x2c\xc6\xe2\x65\xee\x00\x24\x12\xa6\x86\x88\xc3\xa8\xbc\x30\xa7\xd4\x17\x6e\xbd\xb2\xd0\xd5\xef\x17\x99\x5b\x81\x3e\xc2\x64\xc0\x69\x5d\x56\xbe\x9f\xa9\x23\x93\xd0\xa6\xf8\x0e\xc8\xba\x47\xb1\x66\x9a\x91\xf5\xee\xdf\xf6\x8d\x6f\xc3\x69\x82\xce\x22\x89\x58\xb7\xf5\xd9\xff\x75\x22\x18\xf1\x36\x1e\xd9\x3a\x15\x9d\x39\x15\xcf\x6d\x21\xca\x69\x26\x76\xc5\xa4\x20\x4b\x7e\x96\xcb\x0f\x74\x0e\x95\x3f\x3b\xfd\x52\x18\xac\x50\x37\xc0\x2d\xe7\x23\xc8\x02\xfd\x1e\x92\x09\x33\xd2\x9a\xde\x22\x0d\x83\xb3\xdb\x5d\x57\xf8\xc2\x0c\x3c\x23\x86\x25\xd9\xdd\xc1\xe5\x42\xc6\xfa\x0d\xa7\xea\xa0\xaf\xb6\x87\xec\x1e\x1c\x43\x84\xd6\x5c\x97\x2d\xad\x65\x81\xa7\xec\x2b\x4f\x79\xf9\xd3\x71\x8a\x2e\xef\x78\x26\x0c\xf5\x18\x89\x34\x1c\xae\x3b\x46\xb4\xc5\x10\x91\x3d\x1b\xbf\x72\x59\x03\x8e\xd3\x03\xc9\xb9\x26\x1c\xd8\x59\xc8\x7d\x75\xfa\x3b\xf9\xd9\x37\xbc\x10\xee\x26\xd5\x7b\x55\x5d\xd8\x36\x38\x37\x08\xbc\xe2\xd1\x46\xeb\xaa\x3b\x0f\xe1\x2b\xe3\x1b\x88\x54\x66\xee\x7c\xed\x1a\x10\x5a\x1b\x04\x2a\xc1\x26\x4c\xed\x31\x46\x34\x43\xb6\xe2\xea\x70\x51\x91\xf5\xfa\xda\x43\x2f\xd1\x17\xea\x89\xe6\xf0\xd7\xa4\x1e\xdd\x70\xfe\xa0\xb8\xfe\xa0\xa4\x74\x79\xad\x1b\x68\x3f\xab\xb1\xac\x62\x33\xff\xb7\x17\x79\x5a\xfe\x1d\x04\x61\x6e\x33\xf1\x37\x59\xe7\xb4\x12\xe6\xb1\xa5\x48\x47\x81\xe6\xef\xba\x6b\x38\x3b\xd6\xca\x76\x52\xeb\x0f\x1c\x91\xbd\x93\xb9\x1f\x34\x98\xd9\xdf\xb4\x66\x07\x52\x0b\xe6\x3b\x6d\x00\xa9\xf4\x53\xf1\x3b\xbe\xc5\x1c\xe6\x34\x3c\x2f\x94\xe4\x80\x22\xd9\x91\x54\xf6\x05\x41\x3a\x80\x4b\x53\x5f\xbd\xd3\x3f\x01\x5a\x0d\x67\xb5\xd8\xef\x5c\x7e\x02\x2b\xbb\xc2\xe3\x1b\xea\xd7\xe8\xe0\x19\xb7\xe5\xfc\xdd\xa1\x76\x10\x7c\xe2\xa5\xcd\x4d\x55\xe4\x81\x90\x38\xf0\xed\x1a\x4f\x89\xc9\x18\x0c\x4c\x94\x61\x44\xe9\xcd\x2d\x83\x5c\x6e\x1c\xa6\xe3\xb0\x6e\xc1\x0a\x97\xd7\xf4\x2a\x41\xdf\xea\xb5\x24\xb2\xbb\x35\x3d\x8d\xec\x0a\x03\x34\x28\xd4\xc4\x91\x93\xa2\xe6\xbc\xb8\x17\xb6\xc3\x03\x21\xff\x9a\xb2\xd0\x76\xdb\x82\x65\x07\xf2\x96\xfa\xf4\x73\x8b\xc8\x87\x1b\x8f\x40\x5c\xce\x5e\xb5\x16\x3b\xb5\x16\x07\xc0\x6a\x83\x69\x90\x0e\xc2\x60\x48\x02\xb8\x88\x42\xa8\x90\x02\xb8\x8a\x6e\x9d\x65\xfe\xbf\x95\x40\xe5\xfd\x5d\x10\x65\x15\xab\x6c\xc9\x1d\x5e\xab\xc4\x79\x4b\x8b\xb0\x16\x26\x58\x4a\x1e\x03\xf2\xc7\xd9\xb2\xd8\xaf\x05\xe6\x6f\x19\xd9\x37\x12\x45\xcd\xea\x88\xbc\x57\x6e\x23\x5c\x7f\x43\x12\xfe\xc5\xa4\xc9\x36\x36\xe2\xc6\xc8\xb1\xa4\xb7\x81\xd5\xac\x20\xc4\x62\xb3\xa9\x6f\xe8\x99\xc0\xe5\x7c\x39\x7d\xe2\x95\x5f\xfc\x1c\x3b\xf4\x63\xc9\x11\x96\xdf\x37\x3c\x30\xd5\x5f\x35\xeb\x13\x80\x2a\x08\x28\x2e\x3b\x4f\x3e\x29\xd4\x6b\xb9\x8f\x2c\xb1\x58\x8e\x64\xc5\xdc\x2b\xbe\x78\xae\xcc\x0e\x08\x9b\xb3\x10\x4f\x9a\x1c\x37\x41\x95\xc6\xe7\x42\x8d\x3c\xcd\x1c\x23\x43\x5d\x93\x1e\x62\xfb\xc4\x47\xa4\x5a\x40\x65\xfc\xee\x5a\xd8\xb7\x9f\xf4\x73\xfc\xe0\xef\x12\x9d\x3e\x41\xc1\x8f\x72\xe9\x10\x9f\x36\x02\xe8\x93\xbf\xff\x88\xce\x9c\x48\xcd\x80\x82\x36\x44\x57\xd2\x5c\x97\x6f\x5f\x7d\xeb\x2f\xe5\xd2\xfd\x3e\x4a\x79\x1d\xd9\x1c\xae\xa6\x2c\x38\x51\x7b\x1d\x68\xe0\x72\x97\x59\x65\x99\x00\xce\xa3\xce\xf5\x83\x82\x9f\x79\x2f\x61\xe2\x78\x07\xac\xbf\xc8\x61\x88\x05\x26\x64\x7b\xa3\x59\x21\xc1\xff\xb8\x8b\x08\x87\x25\xe6\x38\x1b\x70\x46\x7d\x2c\xe8\xf3\xf7\x9d\x7f\x6b\xca\x5b\x28\x82\x77\x03\xec\x2f\x47\x77\x0f\x05\x12\xe7\xb5\xbd\xd8\xc8\xaa\xee\xa4\xd4\xe2\x5a\x98\xdf\xd5\x06\xf8\xb1\x9b\x52\xb6\x54\x16\xcf\x30\x95\xc3\xf6\x57\x2d\xb1\xfb\x07\xc4\x4f\x5a\xc7\xd2\x43\x55\xbf\x25\x1d\x97\x30\x18\xec\x37\x1b\x96\x30\x08\x32\x00\xd6\xea\x16\xa0\xba\xc2\x62\x7a\xc1\xb6\x7f\x2f\x51\x54\x47\x47\xaf\x9f\x21\x7e\xa5\xe2\x70\x72\x82\xee\xc8\x15\x94\xf9\xf2\x56\x00\xa6\x20\xf6\x3b\xfe\xdb\xc3\x1f\x8f\xca\x8e\xa4\x86\xa7\x3f\x86\x27\x7c\xfa\xa7\xc3\x46\x11\x05\x10\x65\x31\x3d\x9b\x0c\x4b\x1a\x32\x56\x44\xe9\xbc\x8d\x1a\x5f\x8e\x3a\x81\xbf\xd4\xf1\xb8\xac\xf1\xa8\xba\x53\x02\x19\x4a\xd2\x86\xc3\x9c\xe1\xb9\xef\x96\x42\x74\x00\xe0\x0f\x15\x4d\x88\x83\xf9\x06\xba\x2d\x8c\x94\x4d\xe3\xe7\xfc\xab\x0f\xdf\x77\x22\x2f\xb4\xc9\x5f\xd5\xcd\xe8\xaf\xc5\x61\x11\x37\xaa\xdd\xfc\x66\xb4\xd1\x3b\x16\xea\x4f\x00\xc4\x6c\xa5\x73\x20\xd8\xa8\x4c\x5a\x00\x2e\xd8\x7a\x88\x5f\xd2\xef\xd7\x40\x4d\x35\x75\x08\x4a\x6b\x4a\xd5\x91\x8e\xf0\x01\x96\x99\x21\xbe\x0d\xdb\xfb\xb5\x2f\x34\x2e\x95\xee\xf0\x2a\x7a\xf1\xa1\x71\xb6\x7f\x0c\x22\xd0\x67\xbf\x87\xfc\x08\x5a\x77\x66\x87\x15\x7b\x55\xfe\x6e\xda\x71\x95\xf6\x7d\xe3\xc4\xc3\x25\x50\xe0\x89\x6a\xc2\x4f\xaf\xc7\x4f\x8f\x02\x78\x00\x4c\x29\x10\x97\x54\x7c\xd9\x91\x74\x4c\xce\x2c\x84\x97\x34\x1c\x96\x24\x1c\x9a\x38\xd6\x93\x3c\x5e\xdd\x61\xb1\xc5\xb5\x28\xbb\x98\x71\x91\x41\x60\xb0\xc2\xac\xf9\xf1\xa1\x2d\x0f\x3a\x9d\xf5\xf6\x5c\xae\x92\xad\x98\x02\xac\x6a\xa3\x83\xd5\x90\x6c\x02\x4d\x89\x42\x72\xb0\xcf\x91\x13\x7c\xc0\x45\x07\xe0\x6f\xc7\xd7\xb8\xad\xc5\xae\x7c\x69\xb3\xbf\xb9\xcc\xcc\xaf\xa6\xaa\x4a\x16\xdc\x8b\xd5\x33\xf3\x15\x5c\x47\x4f\x78\x63\xa8\xb0\xd6\x76\xdb\x82\x18\x2f\x2c\x70\x53\xe4\x8a\x7e\x1b\x98\x2a\xb2\xc7\xc6\x9a\xb3\x23\x8e\x8a\xc4\x6e\xf4\x4e\x60\xfd\x03\x38\x3a\xdd\x59\xd6\xa6\xac\x15\x4e\xe9\xe9\x52\xf0\x31\x1e\xf7\xb5\x8f\x6c\xff\xc7\x3a\xf9\x15\xc9\x7c\x40\xb3\x3d\xf9\xeb\x76\x5e\x66\x3a\xb7\xe0\x1f\xa9\x6c\xe4\x16\x4a\x32\x63\x04\xfc\xdb\xa9\x30\xda\x62\xc9\xcb\x0b\xb7\x75\xa4\x3d\xdf\x05\x41\xb2\xcd\xd4\x77\x9f\xc3\xc1\x89\xf7\x50\x0b\x4e\x28\x33\xc6\x37\xf0\xf7\x4f\xff\x0c\xc8\xf5\x3f\x06\xdf\xab\x0c\x29\xce\x6e\x71\xdc\x76\x8a\x10\x72\x14\xdb\x1b\x0a\xa3\x25\x2d\x87\x59\x75\x35\xcb\x55\x59\x40\xe2\x6e\x10\x5d\x9f\x44\x7f\xfd\x41\xf1\x9e\xb1\x65\x6e\x4f\xb2\x21\x6a\x8c\x4c\x0c\x14\xd0\x71\x93\x41\x89\x63\x40\x06\x94\x9a\x27\xaf\x44\x83\xa0\x0b\x92\x06\xfa\x8e\x22\xc9\x01\x3f\x63\x6e\x68\x42\x34\x9b\x2d\x3c\x4f\x34\xf7\x1c\xa1\xec\x93\x69\x4d\x34\x8a\xf2\xf2\xdf\xcc\x88\xce\x6a\xb0\xfd\x41\x65\x6d\xd5\xc6\x14\xfe\xd7\x82\xb8\xfb\x66\x87\x39\x91\x90\xa8\x80\x16\xfb\x78\x3e\x7c\xc4\xa5\x21\xd5\xf6\xd3\x8a\x9c\xcc\xb1\x58\xd7\xac\xcb\x9c\x8e\x4f\x7e\x8d\xea\xb8\xcc\x69\x61\xea\xd9\x1f\x9f\x0a\x0b\x35\x55\x2b\x4a\xfa\x81\xe2\xee\x5b\xfa\xa4\xc5\x89\x64\xde\x45\x16\x23\x7a\x90\x16\xd7\x48\x0b\xa7\x1f\x08\xc5\xc2\xc5\x0a\xa7\x58\x19\xfa\x75\x17\xd3\xcf\x70\xd9\xa1\xae\xd8\x6e\x2b\x95\x96\xd5\x4d\x1f\x0d\xb2\xcb\x6b\xc1\xd4\x8a\xfc\x1c\xec\x76\xe4\x74\x59\xd3\x11\xcf\x19\xfc\xd5\xd7\x72\x5c\xd6\xb4\xe9\x83\xdf\x99\xad\x64\x1e\x8e\xcb\x92\xa4\xe1\xbe\x31\x0f\xf6\x9b\x57\xbd\x55\x5c\x09\x94\x9a\x30\x69\xd3\x7a\xdc\x42\xc3\x88\x08\xf2\x8d\x47\xf8\xb0\x91\x54\x16\x7f\xb3\xa1\xac\xd5\x45\x83\x2a\xee\xee\x8c\xe1\xd2\xe5\xc9\xb3\x7f\xcc\x4c\x8f\xc1\xa5\x44\x43\x92\x8a\xcd\xee\x31\xc4\x8e\x3f\xc7\xa9\x3d\xa1\xd2\x48\x9c\x2c\x33\x7d\x74\x59\x13\xa4\xf3\x0c\x3a\x7d\x09\xee\x05\x3f\x57\xc8\xae\x57\x78\xf7\x4d\xfd\xd7\xeb\x79\xba\x08\x83\x73\xe3\xd9\xb0\x96\x61\x48\x42\xfa\xca\x90\x29\x3b\x20\xf2\xe0\x1e\x19\x27\xc2\xf1\xee\xea\x1d\x27\xd2\x71\x59\x9b\xde\xf9\xa5\x6e\x2b\x41\x74\x15\x05\x70\x11\x68\x6e\xbf\x30\xb0\xa5\xf5\xa6\x08\x30\xb2\xec\x62\x14\x60\xe1\xd2\x47\x71\xeb\x31\x42\xa9\x2c\x48\xbc\xf2\x6b\xb6\x01\xa1\xc3\x01\x2f\xf9\xb6\x89\xa3\xdf\xb7\x13\x25\xc1\x3e\xb5\xf6\x92\xe6\xe4\x15\x00\x58\xbe\xd5\xe7\x8f\xed\x8b\x61\x43\x9c\x76\x45\x4a\x95\x91\xa9\x4b\x92\xd2\xc6\xa1\xc8\x2a\x7d\x5d\x1e\x20\x49\xf3\x00\x06\x52\xac\x74\xb2\xf3\x64\xe1\x0f\x65\xf0\xc9\x4e\x17\x18\xa1\x8b\x51\x44\xc0\xe6\xb4\x12\xe3\xc2\xd8\xff\x3c\x64\x6e\x5d\x5c\xdc\x72\x8c\xaa\x6f\x29\x7c\xf8\x42\x77\x47\xd8\x1e\xdd\x60\x22\x0f\xa2\x28\xd8\xa7\x18\x25\xed\x3b\x79\x0b\xd9\xe7\xdb\x58\x48\x2c\x52\x9c\xdf\x86\x16\xd2\xbf\x73\x72\x7e\xf9\xb3\xb0\xd1\x88\x95\x83\xe2\x0e\x83\x1b\x11\xae\xd7\xff\x79\x43\x57\xea\x28\xb1\x2f\xb5\xa7\x54\x28\xb0\xa2\x33\x1b\x12\xd8\x48\xa8\x06\xe1\x62\xa1\xd8\xc2\xb4\xf1\xa1\x50\x3d\x57\xe4\x57\xde\x0f\xc1\x81\xc2\x8e\x73\x6c\xcc\x53\x44\x47\xdf\x00\x4b\x02\x65\xc0\x16\xfa\x3f\xc4\x9c\x55\xae\xa9\x11\x85\xf3\x92\x31\x33\x46\xe1\x1e\x81\x96\x6d\x44\xe6\x15\x3b\x80\x8e\x43\x1d\xf9\x12\x0a\xf3\x7f\x4f\xa5\xf5\xfd\x4e\x49\x2d\x48\xd1\xe2\x10\x7b\x39\xe0\xc8\xe0\x0f\x6a\x38\x4e\x36\x0d\x3e\x80\xe8\x93\xac\x50\x9a\x89\x39\xce\x34\x90\x4e\x40\x2a\x12\xd9\x82\x63\x0b\x08\x2a\x21\x8c\x04\x04\x8f\x07\x3f\x10\xb2\x6c\x44\x64\x53\x2a\x5d\x66\x65\x8d\xbb\x7b\x79\x36\xfc\x43\x53\x49\xd7\xcf\x87\x26\xb1\x1d\x0c\xbe\xf5\x82\x61\x55\x21\x1f\xf4\xd5\x1a\xf1\xab\x61\x7c\xa3\x1f\xb2\xe2\x42\xdf\x94\x2e\x65\x01\xe8\x15\xf0\xc3\x29\x52\xab\x94\x4a\xe1\xd1\xa1\x2e\xdc\x7a\x5c\xda\x38\xd6\x09\xf7\x77\x87\x4c\x2c\xb2\x6d\xcd\xe3\xca\xf4\x1a\x5c\x13\x1e\x5b\xba\x5b\xe3\x4f\xa0\x4a\xf3\xc5\x95\xdf\x9f\xef\xfb\x53\xcc\x5a\x96\x55\xae\x5e\x3b\x67\x06\x39\x44\xd0\x6d\xe7\x77\x5b\x70\xe4\x11\x0b\x37\x7a\xf4\x43\xc6\x1a\x68\x6e\xbc\x1e\x08\x51\x80\x16\x5b\xb2\xa3\x38\x05\x14\x06\xc1\x55\x08\x96\xc1\x04\x29\x58\x79\xad\x2f\xaf\x4e\x49\x95\x6a\xd4\x68\xf3\x76\x03\xba\x31\xc9\xde\x03\x95\xde\xf0\xee\x23\xe1\x57\xe2\x6a\x81\x07\x81\xd6\x03\xde\xb7\x9f\x44\xc1\x93\x5b\x22\xa7\x6e\x8f\xc8\x3f\xa6\x10\xce\xab\xda\x62\x85\x25\xb6\x99\x49\x75\x98\x3a\x64\xd1\x03\x7a\x1b\xe1\x88\x12\x68\x75\xd0\xcc\x1b\xc9\x0c\x3e\x62\x6d\x6e\xa2\xe3\x27\x5c\x71\x36\x2e\x36\xff\xcd\x57\xf7\x60\x24\x9b\xd6\x58\x66\x0d\x4f\xe0\xd9\x70\xc7\x43\x82\xbd\x87\xca\x94\x3c\x05\x02\x1a\xad\xd0\x3a\x56\x92\xf1\x29\x01\x79\x9d\xfa\xee\x6b\x2b\xb7\x2a\xed\x60\x40\xa2\x8f\xd3\x4b\x87\x57\xc4\x32\x32\x5b\x5e\xb2\x67\x4d\x28\xc1\xaa\xcd\x98\xca\x9d\xf1\x10\xa3\x34\xa7\x42\x33\xb3\x5b\x9d\xd7\xb6\x30\x3b\x5f\xfb\xc5\x40\x54\x71\x04\xdb\x2f\x2a\x7d\x07\xba\x4c\x2d\x61\x3a\x2e\xb3\x86\x53\x40\xc7\xc3\x76\xa3\xf6\x9a\x18\x12\x37\x63\x8c\x9b\x0d\xd3\x95\x54\x73\xc2\xc7\xd6\x09\x56\xab\x2e\xff\x60\xb5\xb3\xcb\x70\xa8\xbe\xfb\xe6\xe1\x14\xa5\x38\xd7\x38\xf1\x34\xfa\x6f\x7f\xde\x8f\xb3\x9f\x50\xc1\x81\xa8\xe9\x60\x4e\x90\xe5\x5a\x8c\x3d\x33\xc5\xb8\xd2\xb0\x9a\x04\x3c\x71\x20\x15\xc9\x20\x4d\xef\xcd\x9c\xbc\xa7\x02\xce\x90\xc7\x7a\x55\x6c\x64\xf7\xf7\xb7\x0e\xe6\x97\x01\x58\x71\xff\x30\x1a\x41\x1a\x39\x49\x0c\x9f\x01\xe3\x3e\x5d\xef\x15\xcc\xed\xe6\xb7\xca\x34\xbe\x14\x41\x9d\x21\x64\xa1\x79\x0f\x78\x63\xf2\x66\xe4\x67\xf4\xe2\xf3\x2a\x73\x55\xca\xa8\x1b\x0c\x80\xde\x44\x04\x9d\x56\xa5\xbc\xff\x8b\x78\xba\xa2\x7b\x76\x2e\x24\xef\x61\xa0\xfe\xc7\xda\xa1\xd4\xab\x26\x8b\x48\x44\x61\x4d\xb1\x5c\xa4\x5c\x7f\xdc\xbd\xc2\xc7\x76\x44\xf2\x40\x7b\xcd\x84\xc9\xfb\x71\x5e\xb7\xba\x7d\xeb\xbc\x93\x5f\x93\xf2\x73\x7f\xed\xdc\x93\xfa\xd7\xf9\x09\x6f\x3f\x8b\x61\xfb\x12\x3b\x0d\x08\xd9\xdc\x54\x52\x9e\xde\x8d\xe5\x9b\xea\x76\x5e\x2f\x22\xdc\x37\x16\x0d\xd2\x61\xdb\x4c\x94\x3d\x3b\x8d\xf6\x3f\xdf\xa1\x03\x9a\xd9\x61\xfa\x41\x17\x51\xf0\x54\xff\xee\xf9\x6f\xad\x36\x5c\x61\xb7\xee\x94\x4e\x85\x92\x06\x43\x2d\x9f\x49\x10\xf1\x4d\x9d\xf5\x4e\x0f\x15\x3c\xce\xfb\x22\x1f\x8c\x87\xaa\xb6\xf2\xf9\xf0\x7b\xfd\xe6\xe3\x48\x96\xa7\x25\x00\xea\x7d\xd8\xbd\xfe\xab\x3b\x2b\xfa\x6e\xb4\xe1\x05\xc1\x7b\xc3\xdf\x90\x65\xf4\x88\x18\xe5\x87\xed\x15\xc6\xb3\x27\xcb\x90\x5c\x52\x62\x0c\xb9\x23\x1f\xbd\x53\x0a\xd3\x35\x37\xa8\x81\xec\x5b\x47\xa8\x2f\xba\xfd\x92\x84\xce\xd8\x0d\xf2\x7c\x8f\xfe\x3d\x28\x6e\x78\xb7\xdc\xc6\x82\x7c\xdd\x5e\x58\x4d\x5a\x30\x26\xd2\xf5\x6b\xb9\x4e\x22\x38\xfd\xa0\xb9\xe8\x05\x53\x4d\x44\xd8\x1c\x72\x46\x5b\xdf\xea\x04\x0c\x5c\xb7\x88\x18\x7d\x11\xfc\x7c\xb0\x58\x98\xf4\xda\x02\x45\x70\xfc\xe8\x87\xe6\x3a\x1f\x5c\x16\xb6\x97\x26\x7c\x64\xa1\x7f\xb4\x7e\xaf\xb2\x4d\xb8\x0b\x95\x35\xb5\xf1\x68\x72\x75\x6c\x3b\xdc\x12\x87\xc3\x72\x46\x1e\xbe\xaf\x6d\x4c\xc8\x3c\x63\xff\x3e\x0d\x6a\x3c\xf5\xa5\x76\x8b\x34\xae\x8b\x28\xf1\x07\xab\xee\x5f\x61\xea\x5a\x92\x06\x35\xe0\xa4\x39\xd4\xff\x0a\x46\xa6\x8a\x98\x89\xad\xff\xe5\xbd\x5f\xc4\xe3\xf2\x13\x04\x3f\xef\x49\x5b\xe1\xe5\x9e\xd2\xf0\x1b\xbd\xea\xa6\xf8\xd4\x7a\x6a\x28\xd6\xdb\x3b\x28\xb5\x5d\x97\x79\x73\xec\x31\xf1\x03\x8b\x61\x5c\x47\xbf\xb2\x97\x32\x9b\x5b\x9f\x43\xcb\x5b\x20\xba\xff\x12\xa2\xd9\x5c\x5d\xeb\x9e\xb7\x5f\xd4\x98\xe3\x0c\x76\x33\x3e\x9a\xaa\xe4\x1b\x66\x05\x75\xec\x56\x22\x47\xd3\x7c\xeb\x11\xad\xfd\x46\xb2\x38\x8a\x12\x0d\x5a\xea\x1a\xe6\xb6\x34\x66\x82\xab\x63\x6b\x72\x97\x58\x02\x18\x17\xf0\xfa\x83\x31\x6e\xa5\x6a\x43\xde\x60\x23\x9b\xb3\xa2\x10\x3a\xb6\x9d\x66\x70\xb6\x7a\xf0\x3d\x67\x3f\xba\x5c\x01\xe6\xb5\x5e\x4e\xa6\xa4\x13\x7c\x1f\xff\x9a\x99\x27\x54\xba\x79\x0d\xbf\xa4\x16\xc8\x80\xd2\x47\xf0\xe0\x6f\x5d\x9a\x72\x60\x6c\xd7\xd4\x87\x4b\xbd\xb6\x43\x1d\xe9\xe6\x50\x64\xb4\x44\xee\x3d\xa1\xb9\x39\xf5\xa9\xf1\x17\x2f\x36\x11\xd9\xf5\x42\xb6\x59\x4e\xd2\xf6\xaf\x2c\xc8\xf3\xdb\x2f\x31\x2b\x90\xfe\x88\x36\xe2\x29\x2e\x82\x04\x6b\x46\xbc\x77\x7c\x1b\x1a\xc0\x89\xb6\x37\x35\x27\x92\x85\xa9\x27\xc6\x33\xf2\xbf\x7a\xb7\x92\x86\xf5\x2c\xb3\x4f\x53\x55\x5f\x11\x05\x14\xcd\x9a\x0e\x8a\xec\x50\x90\x93\x91\x24\xbd\xa2\x83\xb8\xce\x8b\xe7\x27\x16\x3a\x66\xbb\xa8\xc1\x58\xf5\xbd\xec\x4b\x08\x3d\xef\xd6\x38\x0c\xdd\x1e\xd4\xb1\x3a\xba\xe2\x93\x2b\x62\x0f\x6b\xe2\x83\xe6\x7b\xa6\x5d\x16\xff\x50\x40\xa4\x2d\x58\x1f\x2f\xe1\x25\xe0\x4a\x59\x7d\xb3\x90\xbe\x63\x84\x31\xe0\xd3\xb4\x9c\xcc\xb9\xc2\x0d\x48\x7f\x14\x58\x9c\xd3\x97\x3c\xee\x19\xf1\x0f\x84\x5b\x92\xf4\x2a\xd6\x85\x90\x95\x78\x12\x49\x6e\x73\x73\xf8\x97\x3f\x21\xb1\x34\x83\x51\xa9\xe7\xe3\xcd\x7c\x78\x96\x02\x19\x90\x69\x39\x89\x2e\x9c\xfa\x99\x52\xf4\x62\x3c\x4b\x70\x3e\x43\x3e\xf8\x5e\xe1\x15\x8f\x29\x17\xa8\xcb\x76\x83\xf7\x87\x9d\x24\xde\xdc\xea\xbd\x2e\x2c\x93\x2d\x03\x7f\xea\x32\x6d\x8b\xc2\xd7\x78\xdb\x77\x1b\x01\x5c\x1b\x8d\x1a\x4f\x6d\xe4\xe2\xb6\x73\x3e\x0f\xe8\x25\x92\x22\x55\xb4\x0d\x10\xd9\xf1\x82\x60\xfb\x1d\x6d\x08\xc4\x74\x20\xa4\xa7\x4c\x7c\x7e\x6a\x28\x49\xc3\x08\x5e\x8d\x28\x8a\x76\x88\xb7\x40\xdf\x42\x9c\x6a\x3b\xa7\x22\x8b\x77\xae\x40\x9a\xdf\xae\x3a\xfa\x44\x16\xeb\xe3\xbf\xb5\xff\x9e\xd2\xc9\x20\xf1\x17\xfa\x2f\x95\xa6\xf9\x9a\x69\xb5\xb4\x75\x9b\xde\xbd\xd6\x0b\x8a\xf5\x8e\x5e\x3a\x1c\x29\xc4\xef\x0d\xbd\x1a\xf2\x95\xbe\x33\x3c\x1f\x56\xbc\x3e\xb1\x5c\xf6\x3c\x5e\xa0\x48\xcf\x79\x15\x1a\xcd\x85\xce\xb0\xa8\x37\x84\x88\x15\x73\x41\xcc\xbc\xb7\xd6\x67\x92\xa5\x28\x38\xa7\x0e\x07\x68\x14\xab\x1e\xfc\x56\xe9\x32\x18\xb3\xf1\x2f\x27\x44\xd2\x05\xcf\x0b\xa3\x90\xa4\xf9\xef\xdb\x6f\x2e\xbd\x96\x16\x47\x25\xc1\x76\x9b\xdf\xef\x3e\x61\xe2\xc7\xb3\x58\x6e\x4a\x19\x49\xa5\x08\x7e\x66\xfd\xf2\xe6\xc7\x9a\x1d\x7c\x71\xa1\xdb\x83\x53\x44\x36\x43\x6f\x09\x11\x06\x78\x46\x31\x20\x36\xae\x3f\x29\x04\x5d\x9e\x50\x56\xa2\xed\x2e\x37\xae\x4a\x29\x90\x33\xe7\x0c\x9d\x33\xdb\x40\x7d\x98\x5d\x47\x0f\xdb\x55\xcd\x8b\x80\xae\x3f\x86\x0e\x9f\xc8\x77\xaa\x3d\x6f\x6f\x4e\x14\x60\x75\x71\xaf\x48\x7c\x61\x2e\x2b\xa6\xce\x4c\xf4\x0b\x65\x83\xc9\x2f\x86\xe9\x6d\xc9\x4b\x76\x98\x38\xb7\xa5\x3c\xd1\x39\xf0\x79\x1d\x53\x95\x26\x05\x2a\xf1\x86\x78\x3e\x1e\x50\x23\x0c\x86\xcc\xfc\x76\x2c\x8e\xec\x96\xf3\xd9\xbc\x93\x5f\xbd\x93\x09\xab\xac\xaa\x85\x02\x8a\xca\x9a\x3e\x60\xba\xfc\x81\xa4\x5f\xac\x53\xdf\x6b\x8b\xf2\x94\x2c\x9e\x3e\x51\x5c\x7f\x20\x0c\x25\x9a\x3a\x1c\xc7\xce\xfc\x2b\x76\x84\x38\x52\x34\x3b\x14\xb1\xba\xec\x94\xca\xa8\x82\xb4\xb7\xb7\x52\x48\xde\xe0\x5b\xad\x6a\x79\x89\x10\xed\x5b\xec\xfc\xd8\x6b\x43\xb6\x4c\x2b\x0d\x37\xb9\x9a\xe9\xdd\x71\x5d\x89\xfd\x8a\x28\x4d\x99\x14\x7b\x32\xba\x4d\xb4\x5e\x96\x19\x06\xb2\xd6\x9d\xd6\x86\xef\x68\x8f\xfd\xe2\xa5\x18\xce\x61\x2d\x90\xa9\xcd\x2b\xda\x7a\xa9\xd0\x1b\x41\xa0\xdc\x18\xa6\x03\x92\xb2\xdc\x62\x52\xa0\x6d\xaf\xfa\x1f\x7b\xe7\xbe\xf6\x43\x18\x78\x8f\xea\xdc\x76\x98\xc4\x86\x0c\x7e\xc6\x8d\x6e\x81\x08\x73\xca\x48\x60\x51\xdf\x5d\xb3\x8c\x22\xe9\x71\xb3\x18\xf0\x33\x9a\x12\xa4\xf8\xfb\x7b\xc4\xcd\xad\xbb\xa7\xcf\xde\x55\x6e\x4d\xf4\x71\x7b\xb0\x37\x00\xeb\x2e\x3f\x64\x0e\xdf\x0d\x18\x6e\xbb\xa3\x49\xc1\x24\xaa\xc1\x0e\x81\x11\x41\x1a\xc4\x4b\x04\xa4\x3f\xef\x07\x6b\xfb\x5e\x29\x80\xf2\xb2\x6d\xa4\x0d\xc4\xbe\xa9\x8f\x20\x11\x8d\x59\x4d\x38\x00\x1a\x08\xa9\x1d\x08\x81\x45\x28\xd2\x41\x60\x01\x88\xab\x87\x9a\x1c\x3c\xb8\x0e\xa5\x46\x29\x50\x82\xf9\x39\x8b\x99\xdd\xbb\x86\xff\xc6\x40\x65\x4a\x71\x6e\x34\xae\xd1\xc5\x30\x49\x03\xa3\x91\xc7\x7e\xdd\x8a\x6f\xb7\xae\x5b\x0a\xaf\xf8\x11\x92\x27\x42\x47\x74\x1c\x06\x63\x32\xe0\x67\x36\xe1\x20\x48\x95\x96\x04\x72\x34\x08\xe0\x4d\xd5\x2b\x20\x48\x14\xfa\x54\xba\x35\xb6\x12\xe7\x16\x92\x49\x9f\x86\xbf\x66\xce\xc1\x78\xaf\x64\x9b\x81\x5e\xec\x39\x4c\xb8\x14\x68\x39\x0c\xe7\x86\xa0\xf2\xe7\x2f\xa7\x6a\x8f\xa7\x76\x88\xe9\xb2\x89\xb0\xcc\xab\xd9\x42\x8c\x97\xc8\xbc\xdc\xfa\x4b\xf6\x62\xd0\xa2\xba\x33\xbb\xfe\xab\xb5\x70\xbf\x9e\xad\x29\xbb\xac\x18\x8b\x97\x79\xa3\x7b\x4b\xba\xb4\x24\xe8\x2d\x2c\x84\x3f\x61\xe5\x3a\x8d\x21\x09\xfd\x07\xca\xf9\xe2\xd9\x33\x02\x28\x94\xd9\x60\xee\x95\x15\xa4\x5e\xf8\x57\xd7\x3b\xb6\xf4\x3d\x36\x02\xa1\x01\xad\x51\x95\x76\x3e\x35\xee\xde\x35\xeb\xf1\xc3\x5f\x93\x00\xea\xb5\x41\x6b\xc6\xdd\x94\x61\x4e\xff\xfa\x25\x34\x7e\xf8\x93\x48\x00\x64\x87\x9b\x4f\x0c\x48\x88\x07\xe1\x44\xb3\x1d\xff\x45\xdf\xd3\x03\xb9\x6c\x99\x92\x48\xf2\xe9\x41\xa8\x94\xa4\x51\x40\xa0\x21\x55\xf3\x1d\x88\xc6\xc7\x19\xe8\x7b\xfd\xef\x92\xc6\x07\xee\xf7\x60\x0a\x60\x23\x6e\x7c\xc5\x23\x79\xf9\xd3\x04\x02\x77\x7d\xd8\x28\x17\x42\x36\x76\xca\xec\x9a\x9b\xfc\xf2\x48\xd0\x3b\x99\x9e\x62\xc9\x8f\x6c\x50\x6a\x16\xd7\x1d\x44\x4e\xe9\x17\xf5\x3d\x78\x01\x6a\x38\xe2\x4b\x0e\x89\x9f\x31\x4b\x18\x30\x66\xad\x13\x89\x75\x04\xf0\x7a\x30\x40\x9d\x3c\xdf\x2a\xd5\x88\xe6\x1d\xc8\xe9\xb1\x94\x65\xc5\x9f\x2c\xd7\xae\x04\xff\x71\xde\x7f\x07\x7b\x36\x99\x33\xef\x11\x0a\xe2\x91\x85\x3f\x79\xf8\x33\xed\x08\xb4\xd0\x68\xa1\x14\xab\x8c\x06\xdc\x2c\xa6\xe4\xcc\x46\xfc\x4f\xa2\x02\xe3\xab\xec\x3b\xb7\xa0\xb3\x85\xf2\xd2\x92\x5e\x30\x69\x82\x20\x09\x9b\x45\xe3\x8a\x2b\xf3\x97\xcc\xf6\x49\x59\xd0\x8e\x41\x56\xed\xb4\xfd\xfd\xe5\xec\x88\xc3\x22\xe2\x18\xc9\x8d\x7d\x2b\xcc\x91\x12\xd9\x5e\xb5\x07\xf1\xe5\x39\x60\x62\xc9\x89\x68\x80\x74\x97\x7d\x13\xc8\x3f\x36\xae\xc5\x65\x48\x92\xba\x1a\xf0\x8a\x4c\x53\x6f\x61\x19\x5b\xbc\xcf\x4c\x92\x3e\x98\x1a\x15\x35\x47\x35\x25\x50\x69\xed\xd4\x67\x50\x32\x51\x94\x30\xd4\x77\x1c\xc8\x1c\x25\x95\xa7\x61\x05\xc1\x46\x7c\x4a\xd5\x95\x5e\x6c\xe6\x58\x6c\xe6\xac\x02\x89\x92\x03\xad\x6f\x9f\x94\x37\xff\xd3\x68\x9a\x9e\x2e\x74\x0c\xfb\x85\x88\x26\x1c\x28\xbc\x78\xcd\x4d\xb2\x03\xea\x4d\xbd\xa9\xf4\x72\x66\xd1\xc3\xef\xf4\x5c\xd6\x9c\xaf\x8e\x51\x04\x59\x00\x1d\x38\xcf\x87\x8c\xbf\xe7\x45\xc3\xf2\x1c\x4c\xfe\xdf\x70\xb9\xba\x56\x63\x0a\xf5\x97\x69\xa5\x1c\x31\x15\xd7\xf5\xdc\xd4\x25\xbf\xe6\xfa\x7d\x1e\x7a\x7f\x13\x37\xd0\xc7\x17\x4a\xc4\x66\x87\x47\xf5\xed\x17\xdb\x8b\x24\xed\xc5\xce\xe7\x62\x6a\x9d\x0a\x7e\xa5\xcc\xca\x8e\x5c\x9a\x52\x08\x67\x08\x89\x13\x80\xbd\xd8\x3a\xd0\xc8\x90\x9c\x18\x22\x34\x92\xef\x27\x39\x50\x1a\xd6\x6c\x78\xac\xe9\xb8\xf1\x56\x71\x3f\x43\xc3\x08\x76\xca\xdb\x70\x61\x79\x70\x61\x7c\xb9\xc4\x63\xc0\x62\xfd\x7b\xb5\x79\x68\x1e\x74\x58\xae\xde\xc9\x9f\x0f\x1b\x36\xba\xa6\x63\x4f\x86\x2f\x50\x95\x3b\x52\x45\x28\x4d\x76\x30\xf1\x9a\x9b\xc3\xe4\xcf\x48\x5d\x13\xf5\xe1\x14\x98\x22\xcd\x1c\xeb\x8f\xd7\xe2\xaa\x2b\xec\xcc\x25\x9b\x99\x24\x44\x83\x78\x19\x7d\x1e\x04\xe1\x9d\xd1\xd5\xf4\xe1\xe0\x93\x72\xe2\xfb\x9f\x76\xf1\x51\xdf\x1a\xfd\xc9\x01\x78\x71\x05\xab\xf5\xe5\x28\x52\xd4\x05\x6f\x43\xc6\x6a\x7d\x58\x73\x53\x95\xce\x7f\xf2\xa7\x97\x96\x44\x0b\x82\x7f\x0a\x95\xc0\xb3\x28\xdd\x3c\xcc\xed\x9d\x68\x86\x94\xd4\x17\x36\x27\x94\xbc\x82\xf9\xee\x36\x27\x60\xb9\x79\x13\x1b\xe1\x0d\xaf\xa2\x54\xda\xe3\xb3\x9c\xc0\xd8\x8e\xf4\xce\xab\x7a\x65\x07\x42\x9f\x28\x48\x1e\x60\x80\xc0\x07\x59\x9e\x3e\xfc\xd2\x95\xb2\xc1\x04\x50\x29\x03\xd4\x1f\x6e\x90\xe4\x5a\x93\xab\x43\xf5\x42\xb8\xd0\x3c\xfa\x77\x1e\x35\x84\x49\x46\x72\x66\x03\x1e\xfe\x80\x49\xd2\x7a\xbc\x4c\x58\x67\xb1\x09\x7c\xcf\x1d\x20\xf6\xcf\x60\x0a\x82\x20\x63\x61\xe0\x9a\x30\x5e\x06\x65\xae\xfb\x39\x50\xd8\x72\x24\x2b\x04\xa2\xdb\x7f\x00\xa1\x76\xeb\x7d\xa0\xc4\x4e\x4a\x08\xc7\x7c\x12\x28\xfc\xd8\x7d\xb8\x89\x22\x33\xa0\x43\x76\xfd\x1f\xd3\x0b\xb3\x2f\xb0\xf8\xef\x4b\x5e\x20\xb7\x6e\x20\x73\x86\x9f\x1f\xb0\x37\x5e\x36\x8e\x28\xc4\x72\xdd\x72\x4e\xb3\xd3\xc3\x41\x16\x0b\x51\x46\xe7\x15\x6d\x3b\x62\x9c\xf8\x9e\x19\x5f\xf2\xf8\x17\x40\x1c\xe7\xb5\x91\x2b\x61\x36\x3d\x20\xcd\xa0\x04\xa0\x56\x69\xa9\x9d\xca\x93\x6a\x33\xa7\xbd\xb2\x5a\x71\xa3\xdd\x60\x37\x46\x0a\x05\x30\x2c\x0e\xf3\x9e\x4f\xd9\xbe\x22\x6f\x80\x6c\x7e\x94\x49\x5f\xb1\x92\xd2\xb7\x8d\x94\x7c\xb0\xc2\xff\xf2\x36\x2b\x21\xae\xf5\x98\xa5\x92\xd1\x00\x30\xc4\x30\xad\x7b\xbc\xdd\xe0\x9a\xfa\x52\xe8\xef\x6f\x1c\x65\x55\xd7\x0e\x4f\x52\x4c\xad\xd4\x6c\x4a\x97\x67\xb2\x68\xef\x48\xb7\xa1\x58\xc7\x9b\xc9\xdb\x58\x6e\xf8\x41\xe0\x3f\x0d\xfb\x17\x63\xae\x3c\x2d\x64\x39\xeb\x22\x1f\x10\x4f\x5a\x54\x92\x8a\x17\x5c\xe7\xa0\x40\x65\x1e\x51\x89\x2e\xb8\xb9\x40\xd7\x23\x93\x30\x08\xa2\xe4\x40\x35\xfe\x4b\x64\x29\x70\x75\x44\x1d\x5e\x19\x99\x31\x4f\xfd\x2e\x62\xc1\x47\x93\x4f\x33\x28\x04\x05\x8a\x90\x35\xbf\xab\x7a\x82\x43\x0a\xa1\x79\x2d\xb5\x16\xdb\xfe\xc6\x9f\x6a\x4d\x9e\xeb\xb3\xe7\xde\x4f\x69\xe8\xca\x61\xc5\x08\xdb\x14\x99\x67\xe3\xa6\x37\x32\x60\xf1\xc7\xbc\xda\xf4\x2d\xd4\xc4\x90\x71\x2c\xb2\x0d\xe7\xfe\x25\x97\x79\x11\xe4\x66\x35\x9a\xfd\x27\xda\x7f\xf4\x3d\x31\xa1\x70\x4f\x0b\x9a\x56\x68\x1d\xa1\x24\xa3\xd0\x08\xcb\x28\xac\x2b\xf1\xf7\xdd\x87\x9a\xab\x85\x99\x81\x80\xf9\xab\x6f\x3a\xcb\x3a\xfd\x1e\x74\xa5\xd5\x61\xb1\x75\xb7\x9c\x6c\x7a\x2c\xb2\x67\x29\xb2\x27\xc4\xa6\xd0\x57\x1e\x48\xce\x97\xec\xe8\x06\x03\x24\x78\x7e\x8b\x8f\xfd\x18\x59\x09\x35\xaf\x77\x30\x80\x69\x2c\xef\x19\xbe\x50\x18\x94\x3f\x85\xe6\xc3\x99\x8a\xd1\x85\x10\xeb\xf8\xc0\xfd\xc6\x72\xef\xc2\xd7\x3f\xfb\xb5\xfb\xdd\x39\xfd\x90\x02\x22\x81\xb6\x3c\x68\x56\xd3\x9f\x3d\x2a\x61\xc0\x66\xd7\x2c\x0a\x42\xef\x89\xc1\x80\x27\x91\xd6\x2b\x3a\x1e\x45\x27\xbe\x0d\x2d\xcc\xcd\xf0\x61\xc8\x7b\xa8\x69\x0f\x42\xbb\xab\x8f\xec\xbe\xf6\x23\x12\x73\x22\xd2\x7a\x16\x58\x38\x84\x1e\xdd\x4b\xb9\xbe\xe4\x9f\x32\x1b\xb1\xf3\xda\xfd\x46\x41\x40\x0f\x25\x64\x45\x91\x1d\x71\x06\x27\x02\x48\x21\xd1\xca\x8a\xca\x94\x0a\x00\x94\x6d\xe5\x9c\xb8\xa3\xc7\x17\xd3\x95\x18\x79\x33\x39\xa6\xb6\xff\x18\xcd\x7f\xaa\x3f\x1d\xf1\x5b\x79\x59\xd2\xf0\xc4\x4a\xe9\xc4\x2a\x69\xc0\x7b\x3a\x0f\xab\xce\x5f\x54\x4f\xa9\xee\x84\x3e\xbf\x94\x2c\x5e\x78\xc5\x44\xb6\x8e\x1f\x3c\x45\xd8\x9a\x51\xfc\xca\x43\x4f\x1e\x88\xa4\xb5\x4f\x30\xb1\x38\xbc\xb9\xe0\xb7\xbd\xfd\x57\x67\xc8\xda\xa8\xac\x05\x00\x40\xd5\x52\x39\x59\xf6\x7e\x1b\x83\xad\xcc\x09\xa8\x9e\x52\xe6\x38\x1b\xcb\x2d\x07\x2e\xb3\xfa\xda\x65\x1e\xe2\xe0\x77\xdf\xd4\xf6\xc9\xcf\x77\x66\x8e\x64\x4c\xc6\x5a\x5c\x2e\x88\xd6\x1f\x68\xaf\x15\xe9\x0f\xcd\x46\xde\xb6\x79\xd4\x36\x89\xf6\xcf\x23\x76\xb1\x5f\x29\x3b\xa6\x2b\xdd\x03\xf2\x49\x1f\xc7\xbc\x9e\xc4\x16\x0c\xe8\x12\xe3\xb0\x82\xa2\x76\xdb\x4a\x9e\x7a\xa8\x08\xbe\xd0\xdd\x7b\x62\xe9\x3e\x35\xab\x0c\xc3\xd4\x8c\x30\x08\x76\x60\x4f\xa8\x50\x60\x2a\x37\xaf\xee\xf8\x79\x41\x10\x04\x29\x5c\x5e\xbd\x07\x6b\x8c\x7d\x00\x39\xaa\xbc\xeb\xba\xae\xba\x30\x2f\xac\x0c\x4a\x0c\xfc\x3c\xab\x6a\x6e\x2f\xc2\xdc\xd0\x2f\xf8\x2f\x76\x87\xdb\x32\x79\xc0\x1c\x42\x4c\x3e\xe2\xdf\xb1\x8d\xca\x9a\x9e\x5b\x8d\xc9\xdc\x69\xd4\xbe\xc4\xde\x95\x97\xec\xd0\xbf\x33\x2b\xe4\xd1\x63\x7e\x87\x31\x7e\x36\x43\xce\xdb\x43\x01\x8e\xdc\x75\xc9\x57\xcf\xae\x70\xf0\x4c\xc6\xb3\xe4\x12\xb8\xe2\xa3\x7b\xc2\xa0\x70\xfe\x80\x08\xda\x83\xa1\x5a\x10\x69\xf0\x3b\xd5\x2d\x23\xdb\xad\xb0\x6b\xdc\x37\x7e\x09\x0d\xd2\xd7\x8b\xaf\x85\x39\xb9\xe1\x83\x43\x84\x29\xf1\x14\x50\x99\x2f\xdd\x79\xcb\x9a\x8c\xd0\x4f\xe1\xe2\x88\x5b\x54\xbb\xe7\x05\x6c\x45\xf3\x7b\x52\x6c\x01\xd1\x78\xf9\x4b\xb1\x34\x94\x2f\x55\xff\xcb\xbf\x6c\x1d\x8b\xb5\x30\x86\xf0\xdb\x2f\xa5\xce\xd8\xee\x00\xd0\x50\x2f\x6e\x46\x9c\x9b\x12\x96\x9e\x46\x03\xa3\x05\x5a\x30\x47\xe6\x42\x87\x52\xf9\x17\x01\xfb\xc1\xcf\x16\x34\x21\x50\xdc\xe9\x65\x63\x3f\x31\xfb\x81\xfc\xbb\xff\xcb\xc6\x62\x03\x2e\x3c\x9f\x65\xbd\x9e\x8a\xea\xa9\x6b\xa0\xe9\x26\x08\xda\xeb\x07\x1c\x34\x6b\x67\xe6\x38\xc3\xb9\x2c\x98\xa6\xcc\xd8\x76\x18\x61\x76\x1e\xd5\xed\x67\x20\xa6\x0a\x71\xc2\xc6\xe2\x42\x38\x36\x61\x67\x92\x34\xce\x6f\x21\xd2\xd1\x59\x62\xea\xd0\x5c\x88\x19\x1c\x12\x3c\xef\xa4\x69\xc3\x9f\x2b\xbd\xa5\x6b\x2c\xb1\xdd\xf0\xaa\x5c\x90\x94\xa9\x54\x1a\x9b\x7d\x66\x9c\xfa\x99\x3d\x8e\xed\x4c\xc8\x0c\x1c\xd4\x3b\xbc\xb3\x6c\x4d\xf3\xc1\xd2\x19\x37\xc5\x31\xff\xf8\x5e\x64\x7b\x47\xb5\x4d\xe4\x10\x9b\x86\x79\xc3\xa0\x5f\x64\xce\x64\xec\xa0\x76\x65\xd0\x22\x81\x7a\x66\x58\x5a\xe6\xa0\x9c\x1f\x4b\x7d\x78\xca\x3d\x19\x1d\xc1\x11\x83\x61\x29\x1c\x94\xcf\x1e\xec\x92\x4f\xaf\xc7\xfd\xc0\x3a\xfe\xf5\x2b\xb1\x7f\x76\xca\xeb\xc5\x43\x97\x82\x03\xc1\x96\x9b\xe8\xb0\xe3\x07\xba\x4b\x6c\xb1\xca\xd5\x83\x3e\x79\xd4\xea\x96\x70\x16\xce\xd6\xf4\x2c\x78\x5d\x2d\x8f\x66\x4d\x1d\xf6\x0b\x34\x4b\x38\xdf\x41\xb8\x56\x51\x2b\x6f\xcb\xd0\x92\x85\x77\x83\xc0\x0f\xb4\x87\xa4\x6f\xc2\x12\x7d\xe5\xf7\xe7\x77\xb6\x01\xa1\xd9\xb6\x8b\x67\xa4\xb0\x8a\x28\x80\x0d\x2d\x5b\x56\xb5\x9d\x69\x93\xf4\x44\x98\xbf\x81\x2d\xd8\x12\x9f\xe5\xf3\xd5\x95\xbd\x78\x0f\xfd\x9f\x0c\x1b\x9e\x7f\xed\xb2\x9b\x57\x3a\x13\x52\x9c\xd6\xde\xba\xf1\xb1\xe2\x9b\xce\xaa\xda\xca\xd7\xe7\xa7\xe5\x74\xc2\xd6\xb9\x6f\xbf\x1d\xcd\x28\x65\x05\xea\x08\x8b\x49\xbb\xfe\x1b\x8f\xe4\xa6\x1e\x51\x46\x87\xe4\x60\x01\x12\x81\x0c\xa3\xfd\x4e\x95\xb7\xb1\x3f\xf6\xf0\xc4\x84\xd2\x3f\xe1\xf1\x03\xd6\xae\x7f\x8a\xe6\xf6\xeb\xba\xaa\x1d\x93\xaa\xb6\xf0\xb7\x66\xda\x78\xac\xe9\xd2\x08\x87\xb3\x5e\xe5\x1d\x90\x2d\x8f\x3e\xb3\x03\xcb\x7d\x06\xad\x92\x57\xf6\xf1\x5c\xc6\x31\x7d\xcc\x6f\x92\xc3\xb9\xe6\xe3\xed\xe0\xdc\xff\xdb\x1f\x00\x93\xc3\xb9\x96\x87\x38\x79\xec\xc0\x1f\xe0\x06\x93\xa2\x98\xf4\xcc\x4d\xa9\x40\xf0\xf3\x03\xcb\xce\x0d\x47\x09\x29\xc0\x55\x04\x92\x89\x66\x98\x1a\x34\xde\xd1\x55\xd6\xe6\xb8\x2d\xd9\x60\x8a\x34\xbf\x9a\xe8\x58\x5f\xb0\x1d\x3e\xb1\x03\xb8\xa8\xce\xe9\x4b\xe0\x13\xcd\x0e\xcd\x85\x9f\xcc\x55\xda\x79\x55\x57\xbd\x91\x43\x4f\x33\x90\x3d\x4e\xdd\x79\xb3\xfe\x35\x0f\x78\xc0\xae\x67\x65\xaa\xb2\xb8\x7a\x5b\x31\xd3\x9d\x51\x1e\x4b\xda\x6c\x24\xcc\xad\xe5\x1f\x7e\xa9\x96\xa7\x6e\xf9\x13\xa5\x4a\xcf\xf0\x15\x25\x56\xce\x6d\x54\xda\xd8\x94\xbc\xb9\x56\x71\x0e\xca\x16\x43\xa4\x5b\x18\xc8\x38\x3e\x54\x03\x4e\x85\x8f\x4c\x59\x74\xa5\x38\x37\x31\xe0\x3e\x45\x60\xd6\x5f\xf7\xe0\x2d\xe8\xd5\x3f\x91\x87\x89\x7f\x35\xbc\x95\xad\x5d\x7f\x54\x7e\x02\x6d\xb3\xb6\xff\xfb\xef\xfb\xe9\x48\xc0\x26\xe1\xdf\x00\x66\xec\xf4\x20\x50\xf4\x94\x6f\xc6\xcd\x79\x51\xac\x4f\xa6\x98\x89\x5b\xe7\xc2\x7d\x52\x3a\x37\x7d\x13\x3d\xa7\x62\x8f\xeb\xc4\xab\xc9\x05\x21\x7b\x81\x06\xb7\x60\x66\xad\x0b\x6a\x38\xd6\x77\x4a\x94\xa0\x9c\xf8\x0c\x6b\x71\xb1\x84\x46\xe9\x09\x7f\x15\x5f\xa6\xbf\x84\x81\xee\x13\xd1\x66\xb8\x53\xe9\xbd\x89\xcf\x2d\xef\x66\xe0\x4c\xfd\xf6\x87\x02\x2a\xa1\x59\xec\xaf\xce\xbb\x1a\xce\x39\x50\x83\x68\xcb\xcb\xd1\xdf\xad\xeb\xa0\x76\xd3\xfa\xe8\x51\x97\xf2\xd2\xb2\x44\xad\xea\xba\xf0\x91\x5e\xf1\xeb\x81\x93\x66\xe3\xa4\x31\x24\x98\x92\xb4\x9b\x22\xd7\x08\x90\x60\xc3\x5d\x8c\x44\xc9\x9b\xeb\x02\x66\xb3\xc2\xaf\xe7\x3f\x1f\x3e\x4a\x88\x2c\xd0\x0e\x2f\xe5\x83\xbf\xb4\x3f\x3e\xda\x82\xf5\xd3\xba\x5f\xf8\x00\xa0\x4e\x1e\xe6\x27\x23\xf7\x60\x8f\xff\x04\x13\x81\x63\x98\x22\x4b\x69\x80\x7f\x38\xfb\x3d\x34\x5c\x2c\xd9\x4a\xec\x1b\xf3\x0e\xc1\x30\x24\x9c\x77\x72\x8c\x20\xf4\x8c\xd5\xf4\x5f\x2f\xf2\x2c\x61\x6e\xbb\xe5\x6d\xe4\x9d\x64\x0b\x3b\xfc\x40\x77\x2c\x24\xef\x40\x98\xc1\x25\xf3\xab\x7f\x9b\x66\xc0\x4f\xf9\x95\xea\x6f\xc1\x00\xb0\xbd\x91\xf0\x8b\x17\x81\xcb\x0d\x69\x61\x86\x24\x0c\x1e\xdf\xe6\x07\x82\x24\x81\xea\x68\x1a\x8e\x91\xeb\xbd\x5e\x0d\x76\x39\x80\xf9\x4e\x76\x20\xd3\x97\xe3\x37\x53\x0c\x3d\xf7\x81\xf0\xc6\xb8\x5b\x4c\x0e\x52\xf0\x4b\x16\xaf\xe9\xba\x32\x0b\xe5\xd7\x33\xfb\xd1\x14\x56\x20\x02\xf9\x14\x27\x72\x87\x0f\xd4\x92\x58\x4d\x5b\x10\xd1\x41\xc1\x96\x20\xb5\xac\x46\x1b\xfc\xfe\x6c\x2a\xb3\x5b\x49\x7d\xf7\xf5\x8c\x30\x33\xff\x8c\x91\x76\x6e\x20\xff\x98\xd1\x9c\x57\xa5\xfd\xf3\xca\xb6\x2e\x61\x27\x9a\xff\x31\xe5\x42\xfa\x83\xe4\x50\xef\xff\xb1\x74\xd6\x5a\xd3\x32\x4b\x14\xbe\x20\x02\xdc\x42\x9c\xc1\xdd\x32\xdc\xdd\xb9\xfa\xb3\xde\xef\x3f\x93\x4c\x32\xb0\x58\x3d\x5d\xb5\x9f\xdd\x54\x57\x5b\xef\x1b\x73\x75\xc9\x8f\xb4\x73\xcd\x0c\x1c\xd9\xd0\xf1\x4b\x0d\xc6\xc3\xa6\xb0\x99\x5d\xae\xd9\x05\x90\x93\x04\x99\x5c\x0b\x39\x80\x8d\x94\xab\xd7\x74\x5c\x52\x82\x2e\x25\x4e\xf8\x03\xcd\xf7\xcb\x1b\x67\x46\x32\x6b\xb2\xf5\x73\xc4\xdb\x38\x14\x1d\x5f\x61\xf7\x50\x75\xf7\x2d\x84\x17\xce\x1b\x52\x2a\xb7\x30\x8e\x6d\x48\xa5\x0e\xe1\xf8\x41\xec\x17\x21\x9d\xa3\x25\xb0\x36\xaf\xf1\x82\x39\x4e\x66\xaa\x0c\x04\xa3\x62\x23\xd3\x24\xce\xff\x75\x1f\x4e\x91\xec\x36\xf0\xfc\x9b\x3c\x2d\x33\x25\xaa\x82\x83\x56\x87\x21\xe6\x9c\xd9\x4d\x56\x9c\x08\x9b\xbe\x4b\x85\x5f\x80\xd1\xf9\xb0\x98\x9b\x3a\x6f\xfc\x5b\xaf\xda\x37\xe6\xa1\x67\x96\xed\xaa\x08\x6e\x4c\x48\x1b\xf1\x32\x29\xaf\xc9\x51\xfa\xed\x62\x30\x50\x34\xa7\x0f\x33\x56\x60\xea\x4c\x38\xce\xc5\xd8\xff\xd5\x2d\x3b\x24\x61\x88\x66\x07\x46\x9d\x16\xc8\xac\x8d\xbf\x0d\xef\x53\x5e\x32\x79\x9c\x17\x4c\xd2\x30\x42\xe5\xaa\x18\x07\x87\xba\x0c\x92\x29\x2c\x48\xc1\xc4\x1c\xb7\x2f\x03\x33\xff\xfb\xcd\xfe\xc4\xac\x71\x1c\xa4\x29\xf1\xec\x03\xbe\x54\xc7\xdf\xae\x70\x34\x71\x28\x53\x68\xd4\x30\xb7\x97\x1c\x48\x5b\x2f\x6a\x2a\x3d\xad\x72\x77\x6d\xf5\x60\x21\xcd\x05\xd2\xea\x0f\x8a\x4d\x95\x96\xce\x74\x0d\xcc\x88\x88\x30\xcc\x4a\x1b\x8f\xb2\xb7\x20\x8e\x58\x89\x75\x16\x1a\x47\x08\x6f\xaf\xee\xff\xdb\x57\x3d\xc8\x44\xf2\x20\xea\x13\xeb\x79\xfb\x16\x3f\x22\xe8\x67\xcf\xab\x50\x7c\xaf\x54\xa9\x8e\x45\x16\x45\x71\x3a\xf6\x02\xbe\x86\xf4\xbe\x6c\x47\xbd\x6b\xb1\x43\xb5\x05\x1f\x36\xa2\x40\xf6\xd8\xfb\xa7\xc7\x52\xdd\x72\xb7\x25\x83\x40\x65\x92\xbb\x69\x5b\xc4\x99\x23\x95\x09\x80\xf7\xec\x1b\x4f\x42\xd1\xe5\x35\xd9\xb8\xd9\xce\x2b\xa1\xb7\x4e\xe2\x9f\x21\x7f\xde\xe0\xcf\x60\x40\xcc\x8a\x8f\x49\xc4\x66\xd6\x4d\x8c\xb1\x78\x71\xf7\x29\x62\xaf\xa8\xe1\xeb\x50\x87\x15\x3a\x22\x9b\x0f\x1b\x9f\xad\x1d\x79\x16\x1d\x07\xfe\xe3\x65\xa3\x49\xa8\x1e\xab\xa9\xd8\xaa\x07\xc6\x77\x4b\x4f\x26\x4b\xf4\x30\x9d\xdb\x9c\x39\x3e\x58\xbf\x9f\x63\x66\x9b\x52\x5d\xf1\xae\x7f\xba\xe7\xcb\xfc\xac\x73\x77\x29\xbe\xe6\xed\x7f\xb3\xc6\xdd\xd7\x85\xaa\x19\x22\xe0\x8e\x66\x3c\x05\x73\xe4\x67\xa4\x0d\x41\x10\x84\x52\xec\x26\x55\x8e\x1a\x9f\xdc\x3d\x44\xc3\xe3\x1e\x0f\xd3\x54\xb5\x6b\xfe\x6c\x3b\x4c\x22\x39\xbd\x4b\xbc\xd8\x2a\x30\x8d\x29\xf3\xbb\xb9\x02\xd3\x40\xda\x70\xe6\x63\x54\xdd\x2a\x8f\x17\x06\x13\x0e\x13\x9a\xd8\xbf\x85\x7e\xc9\x16\x49\x6f\xd4\x30\xf5\x76\xeb\x57\xea\x16\x80\xd6\xf8\x31\xd3\x4d\x51\xa0\xf9\xf5\x99\xa7\xe8\xaf\x9b\xf8\x57\x00\x1b\x36\xc3\x06\xd4\xa8\x14\x51\x08\xef\xab\x64\xa6\x68\x76\x88\x02\x4f\xe6\x2c\x4e\xef\x77\x22\xcf\x80\xec\xfb\x05\xf7\xaf\x05\x07\x6b\xb4\xdf\x00\x3d\x16\x86\xd5\xc2\x3e\x5a\x3b\x3d\xec\x25\xcd\x36\xa7\x55\xb5\x18\x31\x95\x08\x5b\x3a\x02\xc2\x6f\x86\xd8\xef\xf0\xce\xb3\x35\xcd\x06\x54\x21\x91\xde\x43\x05\xa2\x13\x30\xb7\xdb\xaa\x01\xf4\x92\xa7\x56\x33\xc9\xca\xad\xeb\xba\x99\x8f\xee\xb4\xa3\x30\x46\xfe\x5b\x58\x6d\x6f\xb7\xf4\x4f\x7c\x47\x65\x4c\xe8\xb2\x4b\x04\x38\xfe\x08\x8b\x71\xde\xe8\xc3\x38\xa0\xb2\x70\xa0\x9a\x98\x7b\x5d\xd7\x74\xb7\xf2\x07\x30\xfd\x58\xa8\x39\xd6\xc1\xcf\x6c\x02\x62\x14\xca\xac\x76\x1f\x14\xaf\xc7\x6f\xa3\xba\xe8\x15\xce\x52\x05\xea\xa1\xc2\xf3\x27\x4b\x23\x33\x8c\xe3\xfe\xb4\xff\x4d\xe9\xf2\x8d\x63\xdc\x60\x37\x83\xcb\x3c\xd1\x1d\xfe\xc2\xc3\x16\x86\x85\xf3\xfa\xcc\x98\xbe\xe6\xdf\x71\x21\xaa\xd4\xcc\xc9\x98\x9c\xd9\x4c\x0a\x3f\x7b\xa9\xf4\x64\x64\x22\x1b\x94\xff\xf8\x6b\x8e\xd4\xd2\xea\x15\xe4\x85\x8e\x5f\x06\xbf\x12\x89\x4c\xd1\x86\x0e\x30\x6f\xdf\xa6\x21\x3a\x54\xce\xd7\x05\x72\x57\x64\xe7\x60\xd4\x1c\x05\x44\xbf\xae\xb1\x84\x50\xe0\xf4\x58\x72\x67\x0a\x2c\xd7\x2e\x6e\x38\x9c\xbf\x8d\x3b\xb7\x56\x7e\x18\x06\x41\x48\x92\x2e\xaf\x88\x7f\x23\x77\x37\xf1\x39\x65\xdb\x1d\xd3\x1b\xe7\x07\x02\xd7\x55\xed\x7a\xfb\x2a\x09\x76\xfb\x80\x7e\x3f\x4d\xcc\xb4\x1c\xd3\xaa\xee\x7c\xf7\xbe\x98\xdf\x8e\xf0\xb6\x9c\xe0\x30\x62\x27\x5d\xa6\x59\x96\xfb\x49\xc4\xb0\x02\xdd\xf3\xfc\xe3\x4e\xd3\xd5\xd7\xf5\xc7\xa9\x35\xd7\x38\xac\x6b\x5b\xbc\x2c\xd7\xd5\xc9\xe6\x24\x8c\xa3\x85\x13\x6c\xe5\xb6\x90\xe0\x46\x59\xf8\x37\x59\x66\xd0\x15\x60\x90\x18\x05\x1a\xc0\x41\x06\xad\x6a\xbb\xcc\x42\xcd\x32\xeb\x9e\x03\x45\xa5\xfa\x13\x80\xdc\x43\xe2\xfa\x9a\xb8\xde\xbd\x00\x65\x05\xcf\x73\x3f\x3e\x47\x7f\x88\xd1\xd5\x75\x24\x3d\x7b\xfa\x2a\x5d\x4b\x4f\xd1\xe9\x69\x15\x0b\xdd\xdc\xf8\xa6\x99\x57\xc3\x50\xdf\x0b\xa7\x92\xe1\x56\xb4\x76\xe1\x37\x31\xec\x6d\x16\xa5\xd5\xc9\xf6\x70\x89\x4d\x02\x81\xb5\x16\x02\x37\x6f\x44\x36\x01\x7a\x72\xa6\xc8\x9d\xf2\xf9\x3b\xdb\x11\x2c\xc7\xfd\x60\x0e\x3e\xb2\x40\x07\xf1\x46\x3e\xd6\xb3\x98\x12\xd8\x30\x8e\x9e\x00\x15\x8f\xd9\x81\xea\x26\x24\xfb\x54\x91\x7d\xc3\x86\xf3\x92\xbb\xe3\x0d\x8c\x75\xaa\xaa\xca\x94\x33\x8d\xec\xd4\x71\x5d\x6b\xac\xd2\x6d\xed\x60\x6c\xf6\x17\x66\x06\xf1\xe5\x05\xb1\xb8\xaa\xef\x72\xee\x16\x10\xb8\xea\xf5\xb8\xc4\x73\x13\x8f\x81\x02\x65\x79\x90\x22\xee\x6d\x0a\x94\x7a\xce\xef\xe5\x62\xdf\xf9\x31\x0f\xe7\x8e\x9b\xd1\x10\x07\x71\x8b\xc6\xae\xdc\x84\xc1\x90\x24\xc2\xb1\xc3\x20\x08\xd6\xf5\xce\xfc\xca\x97\x8a\x04\x4f\x78\xcc\xd7\xfd\x52\xb5\x9d\xe7\xf3\xb7\xeb\x31\x24\x1e\x3f\x4e\xef\xc7\x74\x6c\x66\x44\xc5\xe9\x12\x72\x02\x64\x7e\x8d\xae\x44\x14\xb2\x2e\x60\xd3\x7d\x73\xc8\xdb\x33\xfa\xec\x92\x1e\xb8\xa9\xcf\x35\xe5\x0e\x61\xd4\xd3\xbf\x7d\xf1\x29\x2b\x13\xdd\xc3\x3c\x64\x20\x0b\xa5\xc1\xbd\x0e\x81\xd2\x12\x5a\xa5\x4d\xa4\xf5\x2c\x28\xec\xaa\x95\xbd\x0e\x89\x12\x6d\x6d\xd4\xae\x35\xc6\xf4\x92\x19\x6b\xe4\x0b\xff\x1d\xcb\xf6\x52\x95\x85\xa6\x85\x5d\x77\x43\xb5\x41\x04\xb4\xdf\x07\x3c\x48\x4e\x9b\x6c\xbf\xd5\xac\x67\x83\xcf\x90\x64\xd8\x47\xbf\xb4\x64\x0e\x5a\x54\x5f\xda\x66\x6c\x7a\xa9\x4b\xe5\x81\x1b\xb0\xb5\xc2\xe2\xee\x5d\xcb\x51\xf6\x7d\xd8\x92\xb5\x8d\x8e\x34\xb1\x88\xfb\xa1\xc4\x63\xb1\xb6\x53\x5f\x36\x68\xc3\x97\x31\x7d\x7f\x8e\xbb\xae\xc3\x56\x7f\xf3\x81\x93\xb2\x25\x33\xe7\xd5\xec\xc6\x2e\xb9\xf4\x2e\x4e\xd3\x71\x58\x51\xf2\x80\x14\xf1\xaa\x1b\x1d\xb7\x16\x84\xfc\x7d\xb0\x21\x74\x0e\xc8\x49\x37\xea\x1e\xa4\xb8\xc2\xd9\x61\x4a\x99\x32\xb8\x93\xff\x2b\x65\x27\x10\x8b\x71\x2d\xfb\x45\x75\xe1\xb7\xb9\x0e\xc6\x66\x7e\x66\x84\xed\x01\x40\xb3\x54\x0a\x56\x13\x0a\xa2\x13\x2c\xf4\xfb\x7a\xae\xbe\x95\x1d\xab\xa3\x02\x60\xd7\x30\xf8\xc9\x41\x04\x0d\x5a\xe4\x4f\x14\x1b\x3f\x94\xbb\x81\x92\x54\xc4\x54\x7a\xa2\xe0\xf5\xce\xf8\xdc\xa8\x48\x3a\x16\x97\x4d\x16\x5c\xd9\xbd\x22\xa1\x80\xb0\x88\xcf\x9a\x3e\x4a\x9e\x3a\x44\x56\x6b\x87\xf8\x6d\x7b\xdc\xe5\xe0\x74\xde\x58\xda\xaa\x8d\x3b\x43\x95\x25\x8f\x1e\x5a\xda\xf4\x58\xca\x6a\x56\xa7\x5e\x86\x6e\x42\xe7\x50\x9b\xfa\x1e\xbc\xbb\x2a\x7f\xfc\x02\x43\x72\x3f\x86\x88\xce\x47\x28\x46\xef\x75\x4c\x75\xbf\x47\x77\x85\xd5\x81\x8e\xbd\x50\x32\x5c\xc0\x6f\x3d\x10\x1d\x28\x97\x3f\x9b\x61\x5a\xbd\xd3\xbf\xdf\x6b\xfc\x32\x9e\xa6\x75\xdf\xd6\xa4\x73\x1c\x04\x23\x63\x61\x45\x1c\x92\x32\x12\x1e\xfa\x1e\x54\x21\xd4\x82\x33\x1d\x93\xd1\xd1\x2a\x8b\x3a\x73\x4e\xa7\x65\x17\x32\x97\x49\x7b\xf3\x5e\x31\x85\x1e\x9b\xed\xb3\xfa\x00\xf0\xd2\x6b\xee\x61\xd3\x86\x47\xab\x16\x6b\xe3\x51\x39\xed\xa8\x69\x9c\x7d\xea\x1a\x2a\x6a\x7d\x77\x76\x52\x2e\xbb\x77\x8d\x3b\xd1\xea\x14\xb8\xf7\x0e\xd1\xa2\xc7\x57\xdb\xa4\x52\xca\xd9\x60\x82\x2e\x97\xd3\x7f\x8c\x97\x39\x24\x34\xda\xc6\xc9\x5b\x6a\x5b\xfb\xf4\x4b\x33\x51\xaf\xfe\x89\x73\x3a\x44\xc9\x89\x52\xf2\x5c\x45\x64\xcd\x41\x85\xec\xd0\xc7\xf8\xe5\x6d\x58\x56\xdf\xac\x87\x63\x9e\xee\x6a\x66\xc0\xa2\x35\x3d\xbf\xdd\x17\x44\xf6\x0e\x2b\x3d\xd5\x6c\x96\xe1\x98\x01\x3b\x45\xfe\x77\x06\x82\x15\x21\x6a\x92\xc3\xfe\xbe\xa5\x88\x32\x34\x69\x35\x60\xc0\x88\xfd\xbe\xc2\xaa\xef\xbd\x0e\xc2\x20\x08\x0e\x54\x63\xd8\xcd\x27\xc7\x63\x24\x4b\x7b\x7a\x39\xbf\xca\x4b\xcb\xe3\x0f\xc6\x64\x34\x79\xf1\xe5\x1c\xf5\x07\x8b\x19\x1c\x85\xfd\x15\x80\xac\x83\x69\x23\xcf\x4a\x77\xef\x9b\xba\xae\xed\xc8\xb1\x35\x8d\x16\x42\xe0\xaf\x65\xd4\x60\x94\x5c\xf7\x19\x46\xb3\x22\xa6\xc6\xb2\xc3\xb5\x25\x85\xdf\xa6\xf2\x7c\x09\x05\x00\x15\x98\x97\x5a\x0c\xa5\xdd\xab\x77\x24\xfd\x02\x9d\xd7\xde\x4d\x03\x2e\x6a\xe9\xa1\xe4\x81\x0d\x23\xa4\x7e\xfc\x25\x2b\x18\x09\x13\x34\xb2\xb9\x0b\xd3\xa7\x0c\xc3\xd9\x42\x93\x67\x2e\xe1\xe6\xeb\x3a\xaf\x84\x8b\x0a\x75\x44\xb5\xc0\x7b\xcf\x2a\x52\x05\xf3\x9e\x01\xcc\x1d\xff\x1a\x5f\x7d\x13\x8f\xd0\xaa\x26\x2e\xc5\x39\x95\x14\xc8\x40\x49\xfa\xc5\x2b\x80\x2a\xa9\x50\x95\x3b\xa7\x35\x11\x46\xa1\xec\x72\xc2\xce\xdf\xb9\xfd\x5b\x07\x3e\xfa\xb7\xc6\x4c\x4e\x68\x74\xcf\x4f\x64\x3d\x08\x96\x54\x6d\x52\x2d\x8c\x12\x67\x80\x7e\x67\x7f\xde\xdc\xaf\xd3\x27\xdc\x99\x33\x49\x73\xd0\x82\x73\x62\x4c\x0b\xb7\x00\x3a\x34\x17\xef\x33\xc1\x28\x58\x86\x1a\x46\x22\x7d\xcf\xed\x20\xde\xd4\x31\x52\xa9\xf6\x57\xb5\x5d\x91\xac\x18\x55\x53\x63\x15\x18\x21\x8b\xef\xd7\x8c\xfe\xba\x2e\xab\x2b\x00\x8d\x77\xd3\x5a\x89\x96\x68\xb6\x0b\x1c\xe0\xbc\x67\x6a\xd0\x55\x20\x93\x8c\xc0\xd8\xb7\x0f\xc2\xa3\xd3\x27\x2d\x57\x0c\xd2\xd8\x6c\xc2\xfb\x1e\x9d\x88\x5c\x73\xad\xb2\xf1\x8f\xdb\x38\x50\x6c\x41\xba\xc3\x4b\x08\x37\xf9\x63\x2d\x3d\xd6\x8e\xc4\x10\x2d\xae\x5f\x80\xed\x41\x12\x06\x53\x82\x14\x75\x7d\xdd\x6c\xe9\x77\x33\xbd\xa5\xd2\x86\x10\xc6\xc8\x67\x4a\xef\xb6\xa1\xf7\x3d\x33\x02\x1f\x59\x38\xbb\x77\xa6\x7c\x3b\x06\x54\xe7\x17\x2f\x96\xe0\x9d\x53\xb5\x20\xeb\x41\x76\x38\x59\x9c\x53\x75\xc0\xf2\x84\xf5\xaa\x94\x19\xe3\x4b\x15\x60\x71\x7a\x3e\x64\xcc\x04\x75\xfe\x31\x5b\x8d\xd4\x11\x12\x22\xb7\x44\xcf\x3a\xed\xbd\x52\xa9\xf0\x7b\x99\x94\x01\x18\x45\x03\x37\x99\xdd\x9c\x12\x27\xd9\xad\x36\xc5\xa8\xb5\xf8\xd2\xe5\xc4\x1f\xa4\xd8\xc2\x06\x72\x65\x27\xba\x98\xec\x80\x66\x21\x55\x45\x9f\x12\x5c\xff\x7a\xf5\x58\x28\x4a\xb9\x1a\xcf\xf4\x6f\x61\xf6\x13\x92\x16\xc0\xe6\xbf\xb9\xc6\x31\x83\x6f\x31\x8c\xf3\x07\x05\x8e\xda\x2f\x7e\xc7\x43\xe0\x9a\x9a\xfb\x26\xbc\xf7\x61\x69\x6f\xbe\x64\x62\x02\x78\xbf\x3e\xba\x81\x09\x79\x93\x27\x8f\xc4\x15\x2e\x4f\xfb\x38\xbd\x36\x0a\x36\x18\x1a\xba\x01\x7c\x9d\x68\x21\x7e\x5a\xce\x57\x94\x5b\x03\xd3\x71\xd4\xd4\x71\x08\x97\x4c\xc2\xc0\xe0\x30\x8c\xb9\xba\x42\xa4\xbe\x3b\x9e\x5f\x91\xb7\xf4\xbb\x75\x02\x56\x74\x74\xf1\xa8\x02\x85\x90\xaf\xea\x7b\x79\x10\x6d\xf0\xf5\xac\x47\x9f\xb2\x83\x99\xde\xd9\xbb\xc8\x48\x1a\xc0\xa8\x9c\x9b\x26\xf3\x5c\x59\xf8\xa3\x57\x03\x66\x57\x50\x36\x68\x22\x0c\x17\x51\xfc\x13\xd9\x3a\x17\xf5\x6f\xae\xa7\x03\x2f\x13\xab\xcc\xb1\x88\xc1\x00\x30\xdb\x9d\xfc\x57\xad\xd1\xf6\x80\xd6\x84\xdf\x02\x2f\xd4\x11\x4c\x98\xdf\x9e\x49\xcd\xec\x6a\x1a\x47\x9c\xd7\xd6\xb7\x8e\xd9\xbb\xd7\x45\x01\x95\x15\x5c\x20\xd6\x96\xd4\xd0\xb9\x32\xe5\x32\x0c\x83\x07\x70\x10\x38\x11\xf3\x45\x60\x6a\xec\x85\x92\x14\x91\x0f\xdf\x85\x82\x59\x4c\x18\x3e\x79\x59\xbb\x89\x0e\x54\xd5\x6e\x6b\x5f\x4c\xa8\xf5\xda\x69\x87\xad\x5d\x91\xab\x90\x3f\x95\x32\x7b\x48\xe7\xab\x0b\x26\x31\xf0\x66\xae\x07\xec\x6e\x53\xed\xe8\x4a\x9a\x63\x24\x18\x9c\xe8\x89\x6b\x90\x3a\x3d\x6a\x50\xec\x28\x70\xc3\x12\x3e\xb2\x48\xee\x1e\x28\xb3\x26\x2f\xa6\xc3\x2b\xba\x68\x60\x02\x12\x2e\x39\x43\xd2\x18\xeb\xe8\xc3\x97\xd0\x1f\x78\xfe\xd4\xeb\x5d\xe5\xf6\x1a\xb8\xb5\x05\x21\x88\xc4\x28\x98\xbe\x01\x65\xb6\x1a\xcb\x1c\x29\x79\x9e\x65\x0e\x00\x2c\x9f\xdc\x13\x28\x2b\xaf\x79\x67\x81\x66\x0f\x63\x56\xfe\xa5\x2e\x6d\x78\xf1\xd4\xab\xf2\x91\xd5\x20\xbb\x85\xd2\x39\x50\xf1\x11\xf8\xd5\x25\x50\xc4\x7a\x3b\xad\xfd\x6b\x71\x6f\x0e\x9b\x6f\x0f\xdd\xd2\xf4\x72\x59\x93\x1a\xc8\xe8\xe5\x47\xb8\x05\xfc\xf5\x17\x2f\x48\xfc\xa6\xaf\x4e\x2b\x84\x7d\x44\xd6\x3e\xea\xe3\x3e\xaf\xef\x79\x6b\x7d\x0a\x54\x32\x28\xe2\x9a\x73\x9b\x09\x0d\x94\x57\x26\x09\xc5\x89\xc8\x32\x28\x23\x64\x71\x1d\x21\x59\x28\x33\x60\x3f\xd7\x68\xf6\x90\xff\x63\x46\x0b\x91\x9c\x0a\x6a\x1c\x0c\x2a\xbd\xec\xd3\xdf\x7d\x53\xc5\xfc\x0c\x7b\x14\x4a\x18\x4c\x55\xdb\x79\x1d\xbd\x64\x24\x32\x0a\x2c\x65\x9c\xce\xf2\x12\x3c\xaf\x48\x04\xc0\x3d\x10\x9d\xfc\x25\xa0\x8b\xb7\x75\x4d\xe5\xab\xbc\xd3\x55\xdf\xe4\xd9\xed\x74\xa2\xe4\x8e\x85\xdd\xdb\x35\x05\x01\xc0\x0b\xcf\xb5\xa5\xd8\xc7\x10\x6c\x31\x14\xac\x76\xc7\x34\x3c\x67\x09\xb7\xa0\x51\x72\xd3\xc7\xa0\x1f\xa2\xad\xef\x95\x18\x2e\x11\x0c\x49\x20\xe0\xb3\x1f\x88\x4f\x51\xf1\x4c\xdf\x94\xa4\xd8\x55\x7a\x59\x3f\xb5\xc4\x6c\xec\xaa\x34\x12\xbb\x34\x71\x28\x72\xdc\x30\xbc\x12\x96\x2b\x30\xe3\xec\x1c\x2d\x38\x7b\x26\x5d\xd1\x86\x2a\x03\xbe\x17\x51\x04\xf7\x73\xa8\x51\x97\xf5\x42\x4c\xbd\xfc\x58\x55\x11\x77\xbc\xfa\xfd\x10\x58\xfd\x57\x8f\x4e\x9c\xdb\x5c\xdf\xb7\x58\xf4\x3a\x4f\x96\x08\x39\xa2\x75\x2c\xb0\x54\xa8\x7a\x42\x63\x0e\x7d\xe6\x33\xae\xe8\x18\xfd\x0a\x5a\xc4\x5f\xd0\x2b\x20\xdd\x10\xe3\xb2\x46\xc6\xf8\xa5\xc4\xbe\x49\x9e\xfd\x04\x04\xe5\xf2\xbf\x6f\x81\x24\x4f\x39\xa1\xa2\x4f\x06\x2f\x69\x66\x38\x3d\xdc\x25\x7f\x4b\x8e\x2d\x08\xa0\x48\x2e\x67\x83\x80\x70\x58\x92\xe2\x44\xfd\x21\x3a\x3f\xfc\xd6\xf9\xec\x40\x88\xe6\x4a\x84\x20\xb7\xb4\xfe\xbe\x73\x4b\x60\xf2\x10\x03\x2d\x1c\xed\x88\x8e\xe3\x41\xae\xee\x0d\x00\xcc\x2c\x08\xc8\x59\xa6\x1e\xf6\x30\xe7\xeb\x3c\x7d\x8e\x6d\xb9\xb1\xc2\x37\x75\xc6\x96\xca\xca\x9a\x60\x4e\xe0\xba\x8f\x38\xfc\xad\x5e\x83\x85\x20\xd9\x6f\x2d\x49\x8c\x2b\x4d\x85\x84\x34\x13\x8b\xd8\x37\x47\xf5\x48\x0b\xde\x1f\x9c\x80\x6e\x30\x1c\xd9\x89\x68\x09\x55\x03\xc9\x58\xfc\xe6\xca\x7c\xb1\xdd\x9e\x55\xae\xae\xdf\xff\x8f\x83\x89\x81\x12\xe5\x4c\x83\x36\x03\xce\xbf\x7d\xed\x67\x74\x8b\x83\x3f\x6f\x82\x4d\x55\xde\x32\xdf\x73\x81\x17\xcf\x92\x5f\xd1\x83\x91\xe5\x62\x2f\x22\x78\x41\x6f\x60\xbe\xdf\xc7\xf6\xf8\x62\xe3\xdd\x2b\xf2\x9c\x92\x84\x41\xe0\x07\x45\x3b\xfa\x90\x2d\x24\x06\x22\x71\x86\x91\xb5\x5b\x80\xd0\xf8\x89\x66\x07\xd2\x5c\x0b\xff\x9a\x9d\xf1\xa2\xc9\x0b\xb6\xb7\xbc\xf2\x68\x01\x65\x86\xd4\x30\xbb\xce\xbd\xfb\xf6\xfb\x12\x8a\xa4\xdc\xba\x0f\x44\x27\x64\xf1\xbb\xb9\x4a\x0f\x94\xe0\xa7\x80\x79\x9b\x81\xe8\x17\xc3\x25\x5f\xaa\xb7\x05\xfc\x54\x45\xa9\xd3\x18\x4a\x8d\x50\x19\x86\xdf\xab\x77\xf9\x74\x8a\xd5\x20\x66\xc2\xb7\x8f\x43\xb8\x41\x36\x31\xa4\xd7\x15\x66\x31\xd5\xfe\x56\xc7\xd9\xb7\x51\x04\x72\x96\xad\x1f\xb6\xa9\x65\xc0\xfd\x92\x84\xcf\x66\x5d\x26\x80\x69\x73\x09\xab\xf7\x9c\x1d\xe9\x2d\xa4\xe4\x35\xb0\x45\x8d\xf1\xa3\x80\xca\x14\xb0\x4b\xbc\x60\xed\x47\xc4\xac\x87\x14\x53\xd2\x69\x5d\xd9\x4b\x9c\xcb\x39\x1d\x5e\x59\x38\x93\x98\xa9\x6f\xf8\xf9\xa4\x60\xfa\xe8\xf9\x35\x22\x1c\x5a\xc6\x0f\xbc\x0e\x1d\xea\x7b\x7c\x16\x0e\x27\x4b\xbb\x47\xdd\x49\xf2\x43\xbc\xf9\x83\x90\x8e\xdb\x0a\xed\x81\xa6\x08\x6e\xd4\xd9\x3a\xad\xba\xfb\xd6\x81\xde\xab\x88\xd7\xd8\xe4\xfc\xb7\xa4\xe1\x40\x57\xe4\x24\x42\x06\xd7\xce\xed\xd9\x77\xc2\x6b\x9c\xa4\xb3\xbf\xd5\x2a\xd5\xec\x4c\x5e\xd6\xf4\x41\x0f\x1e\x87\x12\x31\x99\xcf\x5e\x0f\x47\x9d\x22\xc3\xf2\x77\x9f\xfd\x0b\x42\xec\x2d\x4c\x81\x59\x45\xa8\xed\x70\xe4\x08\x37\x00\xac\xe8\xb6\x88\x67\x25\x2d\xac\xc9\x9b\x2b\x89\x6b\x87\x7b\xad\xac\x51\x10\xa9\x1b\xba\x99\x62\xe1\x0d\x40\x6b\xdb\xf9\xd4\xfe\xe6\xce\xe0\x47\x8a\xe8\xe4\x11\x65\x46\xe6\x73\x0b\xe8\x98\x8e\xcb\xfa\xc7\x45\xfb\x68\x65\xf4\x40\x55\xf3\xdb\x5f\x1a\x09\xfe\x1a\x4e\x16\x41\xed\xa5\x58\xb0\xba\x84\xf7\xbd\x5d\xba\x3c\x91\x6e\x26\x52\x61\x7f\x71\x08\x87\x38\xb0\xfd\xd7\x53\xd0\x9a\x36\x81\xf8\xb1\xfa\xc8\x3b\x8a\xb4\xee\xf0\x4e\x9a\x2f\xbe\x13\x49\x0b\x3a\xe1\x85\xce\xdd\xbb\x03\x95\x15\xe9\xa1\x80\xeb\x99\x0d\x42\xba\xec\xa5\x3a\xa9\x9e\x97\xcb\xd9\xea\x39\x71\x4d\x75\xd1\xe1\x6f\x91\x72\xbe\x6b\x44\xba\x56\x68\x67\x06\xa5\xf0\xb9\x19\x9d\xd1\x3c\x13\x6d\x74\x21\xb2\x9b\x56\xe2\xcf\x3c\xd0\x3f\xfd\xc9\x64\xb7\x3f\xb4\x16\xad\x11\xf2\x05\x4c\xb9\x1b\x5e\xeb\x62\x28\xa9\x75\x1e\x98\xc0\x4b\xb4\x42\x65\xad\xed\x03\xf2\x70\x9b\x9a\x30\x3d\x01\xfb\xe9\x94\x4a\xf7\xc3\xa6\x85\x4f\x11\x05\x1e\xbf\x58\x33\xf8\x51\xa0\xd1\x7f\x49\x13\x47\x22\x61\x20\x49\x1a\x62\xab\xd4\x21\x9d\xd2\x47\x5f\x63\x90\xd1\x41\x80\x05\x06\x02\xc1\x03\xa5\x52\x3f\x63\x3a\x91\xcb\xfa\x64\xb9\x77\xcf\xa1\x76\xbc\xaa\xed\xe8\x92\x74\x7c\x9b\xad\x2a\x35\x58\xc2\x35\x36\x11\x38\xf9\xf5\x6d\xf0\x7d\x8d\x5f\x5e\x09\x38\x3a\x6f\x9a\x8b\x14\x66\x84\x4a\xcf\x4d\x55\x02\xb3\xe1\x73\x9a\x8e\xc6\xf8\x61\xb9\xa6\x0c\x53\x07\xd3\xcb\x84\x57\xeb\x4a\x02\xb4\x53\x25\x44\x62\x10\xe5\x34\xf3\x11\x93\xd6\x43\x2d\x31\x42\x4a\xd2\x85\x13\xb0\x3d\x9f\x3a\x2e\xeb\xe8\x86\x81\xd0\x52\x7a\xc9\xcf\xd7\xe7\xdd\x52\xa2\x07\x2e\x3d\x7b\x62\xab\x28\x89\xc3\x04\x10\xdd\x39\xef\x68\xf5\x49\x96\x67\x66\x8c\x70\xb8\x72\x11\xc6\x03\xde\x20\x3a\x79\x91\xe6\x88\x5d\xbf\xb7\x56\xfd\x32\xf9\xcd\x5f\x20\x30\xc7\xee\x49\xdc\x39\x9c\x16\xd5\xd3\xc7\x4e\xe2\x33\x79\x9e\x01\x41\xbc\xb0\xd3\x90\x3b\x50\xc1\x41\xab\x7e\x6e\x4f\xab\x94\x0c\xe8\xc0\xbb\xb4\xa0\xdf\x19\x71\x4a\x32\xc6\x12\x5f\x13\x66\xfb\xc3\x7b\x5b\x0e\x87\x25\x45\xa3\x0b\xfa\x89\x4d\x8d\xab\x86\xec\x89\x00\x6b\xa2\x19\x65\x32\x90\xc1\x87\xa3\x0f\xf2\xf5\x9b\x9f\xd1\x2b\x86\x0d\x4c\x07\x6c\x12\x7a\x03\x0a\x9f\x6f\xff\x08\xae\x60\x1f\x84\x4e\x5b\x3f\x9f\x63\x6c\x81\xa3\x3e\xeb\x23\x69\x7f\x83\x3f\x4f\xc1\x80\xea\x67\x3d\xa1\x45\xcd\x19\x90\xea\x4c\xcd\x0d\x6d\x3c\x6a\xa5\xd2\xfa\x0d\x3f\x28\x30\x6e\xb0\x4d\xef\x37\xc2\xae\xf3\xa3\x96\xac\x9f\x89\xfc\x69\xd6\xac\x89\x2d\xe2\x43\xfd\xba\x23\xc7\x96\xfe\x69\x38\x9a\x3d\x0f\xf3\x22\x1e\x2c\x5c\xed\xde\x06\xda\x92\x54\x64\xf1\x7b\xaf\x8d\x94\x7d\x6b\x89\x99\x9d\x61\xad\x66\xaf\x4e\x64\x45\xb6\x19\x37\x21\xa7\x66\xab\x0b\x9d\xe9\x84\xce\xf6\x77\x5f\x54\x98\x97\x02\x7b\x56\x78\x65\x26\xaa\x5e\x09\x03\x27\xfb\xfa\xcb\x78\x72\x73\x00\x31\x8c\xea\x7e\xdd\x43\x81\x42\xfb\xdc\x02\xaf\x7c\xd0\x3c\xc9\x02\x03\x70\x8b\xdf\xf7\xfd\x7c\xb1\x9f\x41\x88\x91\x60\xf0\x31\xfe\x40\xf4\xde\xb5\x39\x5f\xa3\xdb\x40\x57\x27\x3a\x1b\xe1\x01\x5b\x6b\xfa\xee\x13\x4f\x4f\xe4\xbc\x47\xda\xc3\xa8\x66\xea\x2e\xab\x37\xc5\x53\xc5\x12\x33\xac\xb3\x0b\x8a\x23\x4a\x5e\xd2\x27\x5a\x78\x3e\xac\x2b\xd3\x6c\x86\x43\x93\x3d\xc4\xfd\x63\x7a\x85\x4c\x91\x63\xe6\x81\x9b\xaa\xf9\x43\xf5\x06\xa5\x11\x2a\xd1\x1a\xe1\x15\xce\xf6\x0f\x3f\xf9\x1a\x33\x19\xf8\xc9\x17\xd1\xc5\x1c\x4e\x49\xf6\xf0\xc3\x23\xb8\x02\xec\xeb\xa2\x1c\x1b\x3c\x51\x98\xba\x33\x14\x0f\x82\x24\xf4\xdf\x2e\x7f\xf4\x4a\x86\x69\xc8\xcc\x99\x8c\xe0\x15\xfa\xe2\x3f\x1c\x02\x23\x68\x50\x5c\x7f\x15\x2d\x14\x44\x32\x2b\xd3\xc3\x32\x1a\xf0\xb4\x19\xb9\xb9\x13\xb1\xc1\xdf\x82\x91\x4c\x77\x41\x54\xd1\xd7\x56\xdb\x32\x23\x7f\x2d\x21\x10\x32\x33\x4d\x14\x55\x35\xaa\xe7\x16\x86\x4a\x78\xce\xee\x09\x8f\xc9\xb1\x71\x2a\x35\x9d\x42\x8b\xb8\xd4\xdc\x58\x92\x84\xc1\xa1\xad\x15\x7f\xef\x1a\xf3\xf7\xc8\x11\x7c\xa4\x2d\x59\x21\xbf\x3f\x0e\x29\x23\x6b\xec\x45\x04\x66\xa8\xaf\x35\xf2\x35\xcb\x24\x6a\x35\xdf\x6b\x73\xaf\x6e\x98\xc0\x79\x7a\x30\xaa\x50\xd0\xc3\xb9\xa1\xd2\x8c\x65\xd9\x34\x18\xb6\x5e\xec\x0a\x25\x71\xc3\x70\x6e\xa7\x0a\x44\xc7\xdf\xc4\x16\xe9\xcc\xcf\x0f\x3b\x98\x06\xf3\x58\x62\xaf\x9f\x7d\x39\xfd\x97\x7c\xce\x0c\x09\xac\xda\xf8\x7b\xdf\x77\xe1\x16\x1c\xc1\x85\x3b\x93\x7f\x46\x53\xf7\xac\x3f\x91\x23\x7c\xf4\x4c\x2c\x0d\x77\xf1\xfc\x12\xea\x54\x4a\x6d\x06\x11\x3c\x09\x03\x0c\x63\xfc\x32\x58\xee\x02\xb5\x3c\x03\xd9\x81\x02\x63\xfa\xd6\xe3\xd0\x6c\xdf\x70\xe1\x40\xf7\x84\x67\xfe\x5e\x1a\xdc\x96\xd7\x89\x56\xd9\x17\x75\x0f\xba\xb2\x37\x47\x8d\xef\xf7\x60\x80\xf2\x17\x1b\x2f\x5e\xf4\xca\xe8\x65\xc7\xab\x78\xad\x6c\xbd\x14\x42\x17\x60\x7e\xee\x0f\x55\x5a\x06\x49\x35\xf6\x81\x59\x52\x4f\x3b\x1c\x21\x37\x04\x9d\x5f\xb5\x42\xf6\x15\x35\xed\x41\x4d\x95\x54\xa6\xff\x8b\x09\xd6\x7b\x37\x51\xea\x9c\xdd\x9d\x9a\x05\xc2\xaa\xeb\x93\x2f\x58\xa0\x5b\x8c\x30\x2b\x10\x49\x09\xc1\xac\x40\x8d\x5c\x19\x86\xf1\x43\x8d\x46\x22\x97\xa0\x07\x12\x7a\x61\x33\x35\x24\xd8\x70\xfd\xbd\x0d\x91\x34\xd0\xc4\x6f\x54\xa6\x39\x1d\xf1\xf5\x37\xb4\x44\x10\x55\x3d\x6c\x7c\x79\x06\x4b\x10\x96\x5b\x18\x21\x85\x35\xd5\xd8\xb9\xb0\x0d\x2d\x12\xa1\xdb\x7f\x7d\x32\x1e\xbd\x75\xe8\x49\xbd\x03\x46\xab\x45\x50\xec\xab\x3a\x05\xc0\x09\x93\xbc\x64\xf9\x6d\xe0\x8b\x21\x46\xbd\xf4\x28\xa9\x7d\xd4\xc1\x27\xfe\xc1\xff\x31\x19\xd9\xd1\x88\x97\x60\x58\x5b\x7e\xe7\x6f\x00\x9b\x7c\x9b\x88\xae\x0a\x16\x09\x73\x6f\x17\xb3\xbf\x9c\x9c\x3b\xfd\xad\xe8\x53\x6b\x89\x75\x6d\x67\x81\xbb\x6f\x42\xff\x89\xcb\x5e\xf6\x4e\xbc\x87\x64\x83\x95\x50\x7b\x81\x7f\xba\x46\x01\x99\xbe\x05\x40\x69\xf3\x83\xe3\x8b\xcc\x5d\x96\x6a\x4f\x98\xcd\xdc\x30\x8a\xd4\x3d\x83\x51\x0c\x8b\xed\xf4\x99\xd4\x95\x3b\x78\xae\xfa\x4c\xe5\x53\xf7\x77\x3d\x71\x6e\xeb\x2a\xc6\x4c\x6c\xaa\x5d\x59\x12\x3b\x12\x74\xac\x67\xa4\x4b\xfd\xe3\x35\x8b\x1c\x57\xc2\x2f\x20\x82\xc6\x8d\x6a\xd4\xe2\x23\xd8\xd1\x35\x24\xeb\x30\x38\xb2\x13\xad\xd0\x69\x87\x32\x03\x9a\x97\xe3\x75\x32\xb2\x18\xe9\x92\xb6\xa4\x2e\x50\x0b\x61\x74\xce\xd8\xc3\x07\x64\x70\x05\xe9\x64\xc0\x37\xc2\xa9\x48\x1c\x92\xb2\x1a\x55\xbc\x50\xb3\x46\xef\x9a\x9c\xfd\xb9\xc2\xf5\xd0\x60\xa0\x95\x17\x54\xd0\xfd\x13\x81\xe6\x41\x46\x09\xea\x31\x6f\x62\xb9\x55\xbd\xa6\xa3\xd2\xfc\x54\xa7\xaf\xfb\x14\x63\x90\x22\xe8\xd9\x3a\x96\xba\x1e\x37\x53\x11\x86\x7e\x8f\xad\x96\xdf\x4e\x0a\xef\x77\x03\x99\x35\xa5\xe1\xb0\x78\x0c\xa8\xc1\xe6\xe4\x49\x5d\x71\x6a\x1a\xfa\xca\x50\x20\x80\x7c\xf2\xf8\x83\xe8\x08\xc1\x56\xff\x79\x4b\x4a\x92\x39\x44\x16\x14\xce\xee\x7f\x6c\xc4\x60\xf2\x4f\x1f\xbd\xbc\x2b\xe7\xaf\xce\xbf\x79\xe8\x1e\x94\x98\x07\x30\x1a\xbf\x42\x9f\x14\x31\xbf\x55\xae\x5f\xca\x68\x21\x67\x1f\x63\x6b\x55\x70\x85\x46\x0d\xb5\x81\xe0\x7c\x5f\x71\xb7\x01\x20\x8f\xeb\xb4\xa2\xdc\xd5\xe0\x42\x76\xd2\xd2\xaa\x7f\x62\xe9\x4c\x7c\x40\x1e\xab\xd4\xcc\x81\x6d\x09\xce\xed\x5b\x12\x68\xca\x9a\x18\x77\x72\x46\x4e\xef\xbc\xaa\x4b\x7c\x46\xfd\x8f\x3a\xc0\x1e\x04\x60\x77\x83\x09\x3c\x08\x44\x47\xb8\x19\x19\x10\x30\xca\x54\x10\xb9\x3a\x2f\xff\xf7\xe1\xa2\x6b\xf9\xce\xdf\x70\x63\x7b\x69\x96\xb1\x7c\x84\x76\xbf\x68\xee\x57\x5c\x48\xe7\x99\xe8\x47\xe1\x66\x32\x72\x37\x1a\x5d\x48\x6c\x22\x73\xdc\x50\xa2\xd0\x23\x32\xd0\xd7\x06\xe2\x8d\x0b\x37\xbe\x21\xf1\xf9\xf0\x11\x4b\xd1\xdd\xb4\x65\x7f\x0b\xcc\x2e\xb7\x8b\x7f\x62\x91\x4a\x42\x7c\x23\x83\x40\xe7\x03\xb9\x85\x7b\x99\xf4\xed\x21\x0f\x82\xdd\xef\xd6\x1b\x46\x05\x24\xd3\x9a\x5c\x34\x45\xec\x48\x79\xa9\x99\xde\x17\xf9\xe4\x68\xf5\x2c\x08\x88\x8d\xe4\xf0\x7e\xd9\x9a\xd5\xe9\x8b\x67\x6b\x22\xd4\x06\x84\xcb\x4e\x91\x46\x71\x30\x3b\x60\xf7\x88\x5c\x57\x9f\x3e\x9b\x89\xc4\xdb\x17\xd9\xac\x57\x97\x9e\xa9\x43\x2d\x40\xba\xcc\x95\xdd\x56\x2f\x9d\xab\xb5\x28\x1f\x9e\xc0\xce\xae\xd7\x6a\x83\x88\xc2\xc2\xf6\xd2\x38\xd1\xad\x6e\x48\xdb\x30\x46\x2f\x0b\x50\xe3\xce\x19\xbb\xc2\x04\xef\x4d\x27\xbd\xea\xd5\xc0\xd0\x3d\x1f\x19\xca\xb9\x75\xfb\x45\x4b\x74\x24\x2f\x40\x52\x56\x7c\x8b\x29\x60\x82\x2e\xe4\x0f\x87\x42\x8b\x13\x7f\xd2\x3a\x3a\x15\x8f\x83\x3f\x81\x67\xe9\x1f\x22\xb5\x32\x7b\xc7\xf6\x05\xd7\x48\x62\x86\xe3\x5a\x56\xd6\x98\xf0\x86\x1b\x6d\x03\x35\xda\x61\xd6\x90\xce\x86\xa2\x64\x4c\x49\xa2\x6b\xe5\xae\xfc\xe9\xcf\x03\x8e\x96\x42\x5e\x9a\x95\x4a\x52\x13\x07\x06\x38\xc2\x8f\xed\xff\x58\xfb\x36\x80\x64\x5c\x16\x84\xf4\x13\x6c\xad\xda\x67\x86\x4a\x10\xbc\xf3\x84\x28\x07\x94\xa4\xef\xa6\x14\x6c\x91\x8d\x03\xc7\xea\x88\xd9\xb3\xd9\xfa\x3c\x4e\x34\x6b\x91\xfc\xba\xf9\xec\xb1\xe0\x34\x00\x27\xf5\x9f\xce\x66\xc2\xa5\xe3\x95\x26\xbb\x6e\x44\xc2\x9f\x31\x7e\x65\xa5\x87\xe7\x78\xfc\x00\x77\x5c\xb6\x95\x04\x4e\x39\xd2\xa2\x4a\xb8\x22\x68\x2d\xc1\x59\xf7\x47\xa5\x2d\x7e\x80\x2f\xd1\x13\xfe\x08\x9d\xf3\x6a\x35\xc3\x03\x3f\xed\xb4\xba\x7f\xef\xfe\xe9\xf2\xf4\x83\x24\x1c\xce\xc5\x51\x58\x4d\xb9\x3a\x98\x14\xa9\x37\xeb\xb0\xb7\xa0\x2b\x7d\xb4\x2e\x02\x6d\x13\x33\xd4\x60\x24\x75\x10\x39\x1d\x33\x92\x06\xfa\x05\x6e\xc0\xa5\xea\x13\xd3\xa3\xde\xba\xad\x47\x8e\x9d\xbc\x05\x7d\x56\x1b\x37\x2f\xbd\x36\xe7\x20\x57\x58\xa5\xb8\xd0\xb4\x60\x43\x29\x31\x27\x84\x18\x93\x31\x09\x18\xda\x09\x87\x99\xc8\x28\x32\x67\xc0\x0b\x75\x11\x9d\x13\x62\xc3\xff\xb7\x6e\xb8\x39\x0b\x9a\xcc\x70\x10\x04\x01\xc0\xa3\x9c\x05\x4e\x04\x80\x0b\x07\x65\x85\x1b\x0c\x8b\xd6\x84\x92\x13\x0f\x11\xa5\x29\xc8\x48\x8c\x0e\x57\x82\x54\xa9\x75\xd9\x2c\x23\x93\x6a\x0d\x5e\xa8\x82\xdd\xd9\x80\x9b\x5e\x0d\xf3\xbe\x4d\xf1\x4a\xff\x97\xd8\x12\xc5\x85\x76\xc5\x9c\xbc\x19\x36\x3c\xd5\xfd\x28\xc6\x94\xbd\x74\x8f\x40\x10\x80\x3a\x5c\x75\xfd\x49\x01\xb5\xe9\xab\x0e\xe0\xf1\x89\x65\x51\x9e\x71\x5a\xd6\x6c\xbc\x2d\x92\xd6\x89\xca\x52\xb9\x79\x1f\xac\xf4\xf9\x38\x54\x1f\x5e\xbc\x6a\x96\x9a\x07\xf9\xba\xac\x7c\x52\x84\xf6\x65\xfd\xb6\x17\x17\xd9\x6f\x46\x0f\x84\x44\x5b\x9d\x9e\x3e\x9a\xec\xce\x0d\xcc\x1d\x0a\xac\xc4\x96\x28\x7f\x23\xb8\xd1\x80\x86\xe3\x9e\xe4\xb9\xb5\x0f\x76\xc9\xc0\x8d\x28\x2f\x4c\xf1\x32\x98\x1f\xe7\x86\xde\xb8\x68\x23\x00\x56\xeb\xb2\xa6\xbd\xa3\xf1\x0c\xa6\xfe\x6a\x31\x0e\x44\xd7\x1f\xe6\x01\xb4\x81\xdc\xf0\x7c\xd8\xb9\xb1\x62\xea\x16\x7c\x3f\xbe\x31\x04\x49\x0c\xcb\x0d\x0c\x24\x85\x1d\x51\x62\x79\xd1\x2b\xe9\xd9\x53\xa5\x60\x52\x8d\x84\xee\x06\x04\x3f\x9c\x82\xe9\x08\x6c\x15\x4c\xe3\x88\xc8\xa4\x17\x94\x37\x67\x87\x11\xeb\x4e\x77\x65\x27\xc3\xbe\x6b\x47\x2a\x4b\xf8\xcd\x95\x82\x8b\x3f\x78\xa5\xb3\x03\x3c\xad\xa8\x50\x33\xd1\x13\xd7\x4f\x6a\xf0\xee\xe2\x5a\x25\xde\x0f\x8d\x7b\xfb\x2a\xf1\xfe\x78\xe8\x5b\x20\xc0\xa2\x2b\x4b\x73\x05\x5a\x5e\x81\xe4\x36\xc5\x21\x09\x13\x7d\xf4\xf8\xe7\x25\x78\x46\x34\x5f\x4a\x69\x72\xc5\x08\xc9\x74\x5e\xd5\xba\x26\x19\x06\xec\xfa\x72\xdb\x9f\xa9\xc7\xcf\xac\x25\x7a\xcd\xb8\xc2\x69\xc5\xf5\x91\x04\xe9\x26\x08\x83\x51\x71\x2e\x96\xfe\x57\x27\x4d\x56\x97\xc4\x0f\xd4\xa3\x43\x99\x41\xb4\xd5\xe9\x0d\x34\x3e\xbf\x12\xef\x5c\xae\xfe\xa9\xf0\x91\x85\xcd\x9c\x0a\x02\x87\x7c\xf9\xa9\x89\x5e\x73\xe3\x73\x3e\xd9\xa3\xc0\x74\x04\x7d\xd2\xf2\xae\x33\x28\x55\x09\x4a\x46\x13\xf7\x8f\x57\xc3\x61\xc9\xfc\xca\x64\x29\xf1\x82\xf7\x41\xa9\x50\x30\xba\x50\x5c\x45\x8f\x28\x1b\xa8\xb2\xb2\x67\x19\xf1\xba\xc6\x02\x7b\x93\x4e\x2b\xe0\x07\x9e\x49\xf0\xe5\x17\x88\x57\x0b\x04\x14\xbf\xed\xf4\xe7\xc0\x9a\xbe\xa3\x1e\x84\x9b\x8f\xe9\x4e\x0d\xc6\xef\x38\xa8\xc5\xfe\xe3\xc5\xce\x04\x7f\x55\x9f\x9c\x99\x89\x4c\xdc\x63\x70\xda\x40\x38\x68\xcb\xc7\x26\x7f\x65\x08\x7d\xfd\xf2\x44\x0a\x06\xdc\xfd\xf3\xe6\xc3\x96\xc2\xc6\xbd\xe9\xae\xe0\xb0\xe0\xe5\xff\x79\xc6\x44\xfb\x8c\x94\x69\x6e\x57\xea\x2c\x0d\x6d\xe3\x51\x5f\x4c\x36\xad\x3e\x18\x20\xab\x05\x81\x72\x52\xe0\x76\x06\xb0\xb2\x36\x87\x2a\x40\xe4\xb8\x0e\x1f\xf3\xae\xcd\xc7\x65\x4d\x6b\x45\x9f\x2e\xbf\x20\xac\xf9\x34\x9d\xee\xc2\x02\x78\x48\xc2\x60\x47\x33\x50\xbc\x25\xf4\x43\x3e\x76\x4c\x2a\xa1\xb9\xc9\xc3\x94\xbf\xbd\x5f\xff\xf4\x2b\xe1\xf4\x6f\x01\xbf\x47\xe2\x55\x21\xfb\xc9\xfb\x28\x68\xe6\x5c\x31\x7c\x82\xe2\x00\x78\xbd\x38\x05\xa7\x47\x28\x0e\xae\xdf\xfe\x2c\x2f\x25\xee\x5d\xe6\xd9\xcb\x1a\xb7\x80\x20\xdc\x1e\x38\xc5\x08\x71\xae\x0c\x02\xa9\xa3\xa2\xab\x05\x67\x22\xf1\x29\x2a\x6e\x1b\x7e\x74\xcf\x01\xd5\x46\xcc\xb6\xcd\xd6\x13\xbe\xa6\xe3\x62\xc9\x1e\x76\x27\x78\x65\x1e\x1f\x06\xdb\xca\xfb\x6b\xef\xbd\xd2\x42\x62\x71\xb0\x49\xc5\x21\x22\xb1\x6c\xa2\xce\x0c\x84\xa0\x8a\xea\x40\xd2\x33\x8e\x06\xbc\x8c\x40\x52\xfe\x68\x50\x06\xe2\x3f\x7e\x3a\xed\x1b\x66\x29\xb0\x52\x3a\x02\x01\x1c\x43\xfa\x12\x1b\x5e\xcc\xea\x40\x18\x20\x39\x20\x16\x66\xac\xe3\xd9\xcb\x60\x48\x42\x94\x3c\x1a\xf3\xcb\x0b\x9d\x9f\x4b\xaf\x4f\x84\xa7\xa8\x54\xdf\x48\x8f\xf0\x05\x10\x0e\x8c\x80\xa0\x5f\xd4\x37\x39\xc2\x75\x2e\xb1\x52\xa4\x88\x6a\x80\x83\x50\x2a\x8e\xf1\x0b\x5b\x20\x0e\x92\x9f\x60\xca\xfa\xf4\xd3\xa5\x18\x7d\x73\xc6\xbd\x77\x4d\x5c\x91\x4c\x18\xef\xdb\xbc\xa2\x6d\x33\x46\x54\xb6\xd8\xb9\x0f\x91\x63\xcb\x7c\xd9\x3c\x2f\x15\x2e\x24\xbe\xb9\xcc\xae\x46\xf3\xeb\xd5\x29\x4d\xfa\xfa\xdd\x94\x51\xf2\x20\x45\xef\xa6\x01\xd9\x12\x8b\x97\xa5\x45\x4b\x5e\xb3\xc1\xf2\x02\x10\xad\x01\x5d\x64\x76\xb1\xd1\x29\xb7\xd9\xe5\xe6\x49\x04\x92\xc0\x18\x46\x73\xdd\x49\xb9\xa9\xb8\xf8\xb0\x3f\x0f\x25\x36\x7e\xf0\x73\x96\x4a\x75\xa9\x0b\x25\x17\xfc\xa5\x9f\x64\x27\xc2\xa2\x3a\x5e\xbc\x23\x8d\x63\x47\xe2\x9f\xf9\xab\xf8\xb4\x48\x29\x24\x9d\xe1\x15\xed\xa9\xb6\x4f\x85\xe8\x4f\xf5\x4b\x6b\xb8\xaa\xd3\x8a\x42\x9b\xe4\x51\x27\xf4\x03\xf1\x89\x2d\x1a\xf8\x1e\xa1\x9b\xdf\x31\xef\xbd\x49\x82\xe9\xe2\xa0\x24\x06\xca\x30\x96\xc0\x0c\xf9\x9e\x2c\x0f\xa6\xb3\x62\x47\xa1\x2c\x49\xfa\xf1\x2f\xae\xa2\x9b\x32\xe5\xb2\xbe\xf1\xe2\x74\xe7\x49\xca\xe3\x01\xaa\xfb\x02\x21\xab\xbf\x7c\xd3\x1e\xe5\xaf\x4f\x2a\x5e\xa3\xf6\x7b\x8c\xd3\xa7\xcd\x27\x64\xf2\xce\xfe\x4a\xd0\x67\xa3\x3a\xe5\x9b\x09\xbd\xe3\x9c\x38\x56\x2b\x73\x4a\xb8\xa4\x7f\xa7\xbc\xf5\xb1\x46\x7b\xa9\xeb\x42\x35\x5c\xef\xf6\x76\x26\xdc\xfe\x90\xa6\x4d\xdc\x97\x34\x1f\xf1\x93\xbc\xd4\xc1\x25\x0a\x73\x5a\xd4\x72\xd8\x33\xf5\x00\x5a\x05\xa3\xca\x2b\xca\x90\xd1\xb7\x66\xa5\xfa\xb1\xc3\xc4\x2f\xd0\xae\x49\xcf\xaf\x38\x3c\x73\xca\x20\x44\x2c\xaf\xea\x27\x4f\x4c\x3d\x82\x50\x28\xbf\x26\xc3\xdd\x51\x08\x1f\x59\xd9\x0e\x3e\x03\xa0\x77\x9c\x5b\xf2\xc4\x95\xd6\xcd\x30\x0c\xc7\x30\xae\xef\xb0\x01\x37\x6f\x2a\x64\x30\x44\xf7\xd0\xd4\xe1\x65\x10\x70\x71\x40\x65\xb9\xe8\xac\xf0\x2a\xeb\x99\x23\xef\x6b\x4c\x05\xb4\x21\x58\x9d\x10\x52\x99\xb3\xf9\xdc\x70\x11\x05\x8c\x5b\x58\x7c\x43\xbd\xb4\xbe\x0d\x2f\xf1\x55\xd9\x19\x49\xb4\x73\xf1\xcd\x8d\x91\xf9\x59\xdf\x83\x6f\xe8\x55\x75\xc1\x60\x88\xaa\x09\xef\xec\xb5\xb5\x35\x50\x65\x42\x63\x57\x5c\xd7\xf4\x0d\x5c\x4b\x44\x2e\xdd\x3b\x4f\xa8\x3d\x13\x78\x21\x7c\xa5\xc7\xf9\x29\x53\x65\xa6\x66\xa9\xa7\x70\xde\x94\x21\x58\x6c\xca\x28\x53\xe2\x5a\xb0\xde\x3a\x3f\xaf\xee\x96\x5a\x2b\x91\x18\x23\x40\x96\x27\x51\xb0\xf5\x9d\x40\xdd\xd1\x0e\x2d\x5e\xfd\xc8\x56\xc0\x10\x16\x49\x4c\x67\xec\x0e\xd4\xb8\x78\x1b\x32\xea\x3a\x7c\xd5\xd2\x6c\xb7\x95\x3c\x97\x16\xc9\xd6\xfc\x96\x84\x07\x0a\x70\x2f\x72\x73\xe0\x1a\xb5\x42\x2d\x73\xc1\x4d\x50\xe3\xcb\xff\xe6\x59\xd7\x4c\x83\xf0\xec\x5e\x8b\x53\x19\xcc\x80\x8f\x5a\x3a\x68\x76\x80\x00\x80\x95\x73\xfb\x0a\x8d\xe3\xbb\x20\x6a\x74\x31\xc2\xd7\x84\xb9\x6a\x2d\x23\x30\xe5\xa0\xa4\xa1\xdc\x3d\xf7\xad\x1b\x47\xae\xf3\x4a\x4f\x84\xc3\x52\xfd\xf2\x76\x7e\xf5\xce\x1a\x3e\xfc\x6f\xd0\xae\x80\x4f\xda\x58\x6c\xc0\xe3\x78\x47\x47\x77\xad\x4d\x75\x05\xda\x02\x51\xb1\x53\x4b\x72\xde\x06\x02\x7f\x8f\xcb\x23\x40\x82\x06\x88\x5b\x4f\x4b\xb7\x73\x10\x83\xaa\x94\xf8\x08\x7d\x05\xd1\xe8\x0c\x84\xd5\x60\x83\x01\x7f\x0b\x08\x45\x61\x68\x3b\xeb\xc8\x64\x82\x37\x23\xda\x90\xfe\x5b\x06\x83\x82\x94\x37\x9f\x54\x24\x3b\xee\x3b\x21\x39\x92\xaf\xfc\x70\x0e\x44\xc5\xcd\xcf\xa9\x7d\x0a\x6a\xf4\xa6\x5f\x66\x9a\x5d\x5d\xdb\x3a\x6e\x7a\xfd\x6b\xbd\x7e\x26\xe1\x3b\xdf\xdc\x7f\xd9\xcd\xfd\x7d\x93\xb4\xad\x69\xaf\xfc\xf0\x35\xf1\x61\xba\x92\xd1\x53\xd0\xe3\x3f\x0e\x54\xb5\xf5\xbd\x58\x66\x30\x3c\x1f\xa7\xc0\x92\x22\x52\x40\x73\x66\x24\xd2\xf0\x77\x09\xfd\xad\xc8\xfa\x9c\xef\x02\xb8\xf0\xa3\x6f\xa1\x2a\xe2\xc4\x9f\xdc\xa2\xd8\xde\xef\x04\xd4\x60\x02\xf1\x89\xe8\x33\xaa\x2a\xfd\x47\x35\x73\x52\x70\x7d\xee\x6b\x55\x1f\xa1\x72\x87\xea\x9f\xfe\xfd\xac\xab\x35\xe5\xef\x28\x69\x9a\x1a\x84\x7d\x5a\x7c\xa2\x52\xa6\x8e\x19\xbb\xdb\x64\x2e\xae\x9f\x55\xa3\xa3\x9f\x17\x43\x61\x4c\x24\x8b\x13\x34\xbf\x1c\x09\x0e\xea\x02\x29\x53\xe6\x91\x23\xd8\xac\x5f\x69\xcd\xdc\xb7\x94\xc7\x89\x64\x71\xae\xcb\xf1\x2a\xd1\x10\xe5\x29\x3d\x96\xeb\xd1\xaf\x7f\xec\x82\x06\x01\xb2\x00\xb5\xe1\x7b\xc0\x0d\xdf\xc3\xd1\xb7\xb8\x5f\x1b\xef\x2a\x46\x65\xc6\xf8\x09\x8f\xee\xaa\x1e\x06\x99\x50\x21\x17\xc5\x85\xaa\x5c\x3d\xff\x78\x84\x17\xd9\xab\xe2\xd7\x6a\x52\x4a\xf4\x20\x1b\xbf\x32\xbb\xda\x8e\x57\x2b\xfc\x0a\xb5\x9a\x78\xb0\xce\xe5\x93\xec\x4d\x89\x2f\xa2\x00\x0e\x83\x64\x8b\x0f\x61\xee\x73\x47\x88\x8a\x13\x45\x7b\x5f\x70\x25\xef\x51\xae\x0a\x5a\x71\x1a\x80\xe0\xcc\x58\xcf\x75\xdd\xb7\x95\xe5\xe6\x55\xdd\x37\xf5\xdd\x37\x01\xbb\x54\x95\xdb\x09\x54\xa1\x65\xf9\xd9\x53\x17\x4c\xb2\x9a\x30\x5d\xdf\xed\x9c\xa2\x8d\x57\xb5\xed\xd9\x20\x70\x19\x1b\x92\x47\x4b\x1a\x37\xff\x2f\xfd\x0f\x4b\x1a\xb6\x79\x30\xe0\xa5\x0f\xe3\x4c\x9c\x87\xf4\x08\x9b\x93\x99\xe2\x16\x6f\x23\x9e\x31\x49\x9d\x67\x03\x3b\x56\x51\xac\xed\x07\x62\x10\xfc\xab\xa5\x66\x7e\xec\x10\x1e\x1b\xb1\x16\xd3\x86\x93\x5e\x42\x91\xf1\x19\x41\xf2\x60\xe7\x87\xe6\x42\x74\x65\x76\xff\xae\xb5\x7e\xb1\x49\x55\x1a\xb2\x5c\x7d\x3a\xa8\x60\x8f\x97\x61\xc1\xd5\xf5\x8f\xb3\xd7\xb1\xd4\x42\xf8\xdc\x66\xe2\x76\x29\xd5\x2c\x7a\x5c\x2d\xc1\x0a\x26\xf2\x6b\x5e\xdb\x79\x5d\xe7\x55\x60\x9a\xbe\xb4\xe8\x69\xa2\x25\x20\x86\xff\x6e\x2f\xdd\xb7\x74\x44\xdb\x8d\x43\x12\xe1\x2e\xb3\x23\x24\x92\x3f\x20\xee\xe2\x3e\xad\x3f\x5a\x53\x07\x3b\x49\x7c\xd7\xe2\x7c\x69\x7f\x63\x11\x4f\x02\x1c\xc3\x45\x86\x4f\xd8\x68\x34\xcc\xd2\xfb\xdd\x5a\xd0\x66\xc7\xba\x78\xe4\x04\x43\x32\xab\xff\x1d\x2f\xbe\xa8\xae\x70\xb4\xd0\xf6\xaf\x47\x16\x06\x54\xa6\x6c\xad\x85\x52\x9e\x91\x5d\x92\x3c\x9a\x40\xf9\xa8\xd1\x4a\x06\xe8\xed\x77\xd0\xc4\xe5\xd1\x25\x58\xce\x13\xb3\xfe\x39\x5d\x3c\xbe\xfc\xf5\xd0\x8a\x6b\xf4\x82\xa9\x18\xc6\xc4\xb4\x1c\x57\xd5\x23\xb9\xaa\xd0\x95\xae\x5e\x3c\xc7\x60\xc3\x13\x52\x43\x3a\x42\xb4\xfa\x00\x09\x9d\xf7\x5e\xd8\x47\x83\xa0\x73\xde\x5b\x1e\xdd\xeb\x89\xf0\xb5\xa7\x18\xa1\x8f\x0b\xc9\x92\xc0\x0f\x04\x47\x4b\x06\x3a\x9f\xd4\x57\x2a\x3e\x2f\x16\x67\xab\x11\x2e\xb0\x4b\x68\x00\x25\xa7\x8f\x37\x87\xc5\xed\xc0\x07\xda\xc9\xc9\x44\xa3\x59\x97\x79\xfb\xd6\xf1\xe2\xec\x78\x3a\x87\x9f\x31\x39\x33\x1e\xb8\xa9\xd2\xd2\xea\x4a\x7a\xf4\xed\x26\x51\xf1\xa2\x22\x11\x0e\x23\xa7\x6f\x03\xca\x5a\xa3\x2a\xfb\xad\x5a\xdb\xfe\xba\x1f\x62\x94\x8f\xf6\x90\xf4\x4b\x0e\x60\x74\x48\xdf\xf7\xd1\x01\xa4\x36\x90\xfe\xe5\xff\x22\x49\x27\xcb\x6b\x34\xa7\x6e\x59\xd3\x71\x7d\xc0\xc6\xf0\x42\x78\xcf\xc4\xc7\xf7\xe9\x32\x59\xfd\x41\xf1\x5e\xf0\x42\xe5\xee\xa1\x13\xa3\x06\xa2\x0f\xc8\x33\xc9\xf9\x73\x1b\x57\x86\x18\x35\x56\x94\x4f\x1d\x87\x22\x8b\xc0\x3a\x7d\x5a\x20\x83\xf9\xa6\x9c\xc8\xca\xfc\x37\xce\xf8\x47\x9a\x36\x96\x1b\x33\x4a\xa9\xcb\x90\x5f\x11\x8e\x59\x96\x0c\x92\x70\x8b\xae\x07\x18\xbf\x06\xef\xe6\x7a\x78\x8d\x17\x3f\x7c\x84\xa3\x1b\xa2\xe3\x07\xc2\xa3\x7b\xfe\x67\x7e\x78\x8b\xa4\x05\xd0\xc5\xbb\x3f\x7a\xca\x59\x87\x30\x7c\x18\x5e\x0e\xdb\xdd\x2b\x54\x16\xfa\xd6\x20\xd1\x9f\x51\x46\xea\x45\xc9\x32\x01\xdb\xd3\x4b\xea\xf6\x0e\x8f\x13\xa2\x45\x11\x56\x1b\x0d\x55\xb4\xa4\x67\xf3\x9c\x76\x5b\x5b\x87\x16\xc2\x5e\x0e\xeb\x9e\x8f\x1a\x48\xd7\x42\x79\xe4\xbc\xfe\x60\x78\x3d\x56\x6e\x5a\x8b\xad\xc4\x61\x65\x63\x88\x4f\x49\xc8\xd6\x98\xa9\xad\xc8\x5a\x5c\x13\x91\x48\xce\x79\x6b\x80\x16\x6e\x30\xf4\xbb\xa2\x68\x23\xe9\x16\x0d\xec\x07\xcf\xc9\x35\x3e\x46\xf2\x40\x08\x2c\xae\x00\xb0\xaa\xe6\x22\x3d\xa3\x49\xc7\x2d\xf1\x09\x15\x82\x6b\xe6\x52\x6e\x89\x73\x3b\xb1\x10\xef\x33\xfd\xf5\x67\x19\x25\x00\x4b\x6c\x5f\x3d\xbc\x21\x83\xa7\x40\xf9\x49\x26\xdf\xda\xe0\x15\x41\xc9\xe2\x65\x55\x37\xa9\xd0\x10\x2d\x17\xc2\xed\x97\x34\x0e\xc5\x26\x0d\x07\x3c\xd4\x9e\x52\x5d\xd3\x7e\x21\xb5\x57\x64\xef\xf4\xab\xe2\x84\xf2\xa7\x38\xfe\xb3\x2c\xc6\x97\x7f\x46\xb8\x15\x27\x59\x76\xbd\x62\x9c\xe1\x27\x05\xc2\x11\xa2\xe5\xc9\x0b\x98\x2a\xb6\x08\x4e\x73\xb5\x1d\x1b\xe5\x15\x5d\x28\x61\x84\x9f\x97\xec\x08\xad\x00\x14\x50\xd1\x27\x9f\x47\x22\x14\x88\x8e\xef\x03\xdd\xd4\xe1\xb4\xbb\x23\x48\x76\x85\xf5\xaa\xd6\x58\x4c\x67\xfa\x9c\x9b\x92\x1f\x4d\x76\x67\x79\x90\x7d\xe7\xd6\x1b\x3a\xba\x3b\x64\x58\x25\x13\x28\x79\xa0\x52\xd7\x1c\x57\x32\x92\xd2\x5b\x9f\xfb\xaa\xd6\x3f\x4b\xbe\xce\x7b\xf6\x35\xd1\x04\x61\x65\x38\x42\xf8\x88\x81\x19\xca\x49\x1a\x95\x46\x1c\x98\x0e\x44\xcb\x68\xd4\x18\x3f\xbd\xbb\xd9\xf4\xb9\x36\xc6\xc5\xce\x3f\x8d\x87\x68\xcd\x9d\x87\xa0\xe7\xea\x5d\xdb\x60\x84\x2e\xbb\x7e\xb7\xb8\x7e\x71\xdd\x78\x8a\xa5\xa7\xdd\xc7\xaa\x42\x67\x8d\xaa\xfe\xf8\x3e\x73\x0e\x74\x45\x77\x52\xc8\x70\x40\xf6\x0e\xb8\x09\xe8\x07\xdd\x90\x6f\x36\x78\x63\x44\x63\xdb\x32\x3d\xd8\xb0\x22\x9e\xc4\xe2\xba\x89\x58\x43\xf6\x48\xf8\x44\x3c\x05\x22\xa8\x36\xa6\x65\xd5\x74\xd5\x7d\x53\xde\x8d\x26\x68\x50\x2f\x0d\xfd\xb3\xa1\xc4\x2a\x03\x68\x32\x28\x90\xb9\xb2\x64\x47\x09\xda\x9c\xbc\x2d\xda\x51\x06\x4b\x40\xb5\xc5\x56\x75\xda\x30\x4c\x97\x0d\xe9\x2b\xbb\xa9\x23\x02\xd8\xbe\x0f\x0b\x05\xc1\x36\xa3\x8d\x7b\x05\x49\x07\xf9\x92\x83\x9f\xe6\xbb\xf6\x93\xa9\xe6\xa2\x17\x41\x93\x75\x08\x41\xf0\x82\xff\x04\xf9\xb0\x8b\x72\x89\xf2\x40\x74\xb2\xe2\x44\x33\x1c\xc7\xc0\x55\xc3\x5f\xfa\x0f\xe8\xfb\x92\x4b\x4c\xaf\x27\xb2\x63\x24\xd9\xfe\x12\x88\x3c\x13\x9f\xe0\xc5\xab\xaa\xb9\x93\xd1\x0d\xac\xf9\x16\xf7\x5f\xc3\xfc\x26\xeb\x42\xa8\xca\xfa\x52\xe4\xa9\x7f\xca\x45\xf2\xb0\x0e\xa4\x54\x43\xf5\xa6\xaf\x65\x21\x7a\x64\x07\x42\x9a\x68\x70\xa7\xc7\x79\xca\x1e\x32\x05\x80\x37\xe1\x24\xa0\xb7\xd8\x69\x74\x31\x8c\x0b\x89\x19\xf7\x02\xff\xbd\x99\x41\x00\xd7\x9c\x19\x23\xbe\xc3\xcb\x4b\x98\x09\xf1\xc7\xa7\x32\xff\x60\x44\x71\xd9\x33\xef\x56\x8e\x0f\xb1\xac\x9d\xcb\x1a\x82\x5e\x2e\x6e\xd0\xb1\xd1\xd4\x8e\xf0\x98\x5f\x7e\x0c\xef\x19\x41\x21\x72\x5c\xd9\x06\x13\xa4\x33\x14\x56\x87\x65\xd3\x31\xa1\x1b\xbc\xb3\x5f\x9d\xfd\x68\xf6\x23\x2a\x12\xe8\x7a\x44\x83\xc9\x8e\xc2\x8b\x93\x6a\x10\x14\x0f\xb9\x7a\x56\x65\xfa\x01\xf3\x2b\xba\xa6\xb7\xd4\x27\x7d\xbb\xc5\x31\x59\x53\xf2\x18\xd1\x00\x3e\x64\x1c\x81\x5d\x61\x41\xe5\x07\x04\x23\xba\x04\xed\x38\x0f\xd7\x21\xff\x52\xfd\x97\x8f\xeb\x7a\x6e\xa8\x5d\xe2\x55\x45\x52\x63\x8e\x4d\x66\x74\x9d\x93\x29\x74\xce\xca\xbd\x29\xe7\xa3\x06\x5a\x0d\x54\x13\xfd\x65\x8d\x5f\x53\x7c\x2d\x05\x4e\x00\x46\x65\x04\x50\xed\x5f\x2a\x66\xf6\x47\x68\x38\x60\xd5\x91\xb3\x67\xd2\xe4\x2d\x70\x7e\x6e\xf1\x5d\x90\xe5\xd4\x8e\x5d\xcc\xaa\x0a\x6e\x85\x1b\x4c\xcb\x8e\x9f\x08\x4e\x07\x13\x60\xd5\xa2\x19\x93\xbb\xe6\xe4\x2d\x07\x4a\xf6\xbc\x05\xcc\x95\x42\x5f\x03\x80\x02\x20\x47\x06\x07\x42\x26\x1e\x4e\x11\xc5\x89\xba\x21\x1c\xf2\x49\xf0\xe1\x34\x70\x79\x37\x38\x9b\xbf\xe6\x06\xba\x99\xb0\x16\x5d\xd1\xec\x19\xa9\xac\xa3\xbc\xa2\x38\x8e\xd9\x86\x32\x14\x4f\xf8\xf2\x3d\x0a\x32\x4f\xb8\x75\x7b\x17\xb8\x76\x5e\x55\xec\x4d\x75\xf9\x83\x47\xde\xf2\x69\x81\x23\xf8\xe5\x42\x2f\x84\xb4\x50\x40\x13\xbb\xc3\x9c\x31\x63\x04\x9d\x4b\x80\xb2\xe6\x49\x8f\x70\x4b\x76\x24\x50\xfe\xd0\x34\x4c\x60\xc5\xf5\xaf\x1f\xa2\xfe\x11\x96\x3b\x35\x9e\xaf\xe4\x11\xfb\x1c\x38\x36\x81\x3e\x90\x9f\x51\xf2\xa7\xa1\xd9\x70\xe9\xc4\xba\xae\xf3\xd0\x3e\x89\x37\xd4\xa5\x74\x1c\x2b\x92\x65\xc7\x00\x7c\x29\x6b\xeb\x9c\xf9\xde\xbd\xc5\xd1\x27\x56\xcd\x06\x55\x5d\x3c\x66\x0f\x47\x56\x82\x97\x29\x7b\x29\x72\x74\x36\x75\x00\x68\x76\xd4\x61\x69\xf9\xa5\xac\x03\xf2\x53\xea\x70\x76\xc8\x2c\x54\x48\x5f\x76\x0d\xa8\x36\xb4\xc8\x42\x04\xac\xaa\x28\x6a\xa4\xc1\x14\xd0\x62\x0b\x00\x5c\x9e\xf2\x92\x6d\xb4\x4f\xb0\x86\xcb\x0c\x26\x36\x71\x78\x82\xd5\xb1\x65\xd7\xa3\x8f\x0a\x35\xe9\x56\x43\x98\xd7\x46\xd2\x2b\xb9\x20\x93\xb7\x9c\xf5\x0b\xae\xef\xb7\x50\xe0\x8e\x0a\x72\x95\xdb\xc2\xa1\xa5\xb0\x86\x85\x52\x4c\x43\xf6\xf6\xe6\x93\xb2\x13\x99\x35\x5b\xc3\x24\xa6\xac\x74\x00\x64\x01\x23\x70\xec\x81\xf2\xd8\xe0\x4e\xf3\x5e\x1d\x86\xd2\x14\xad\xb9\xf7\x4e\x9a\xdf\x41\x96\xdf\x91\x49\xf0\x91\x7a\x4a\x0b\x08\xc1\x99\x8d\x24\x37\x7e\xd5\x43\x55\xf2\xb3\xc1\xa4\xfc\xe4\xa1\xf2\x84\x3f\x62\x52\x42\xa3\xcb\x11\x80\x01\x8f\x84\xa2\x16\x7e\xcc\x75\x99\x7f\x20\xa2\x54\x18\xe0\x07\x85\xe3\xb2\x66\x07\xb2\x15\x1b\x9a\x26\x86\x74\xe7\x72\x7b\xe5\x9a\xe6\xce\x6b\x79\x91\x3b\x12\x5f\xd3\x47\x98\xdd\x7c\x80\x5e\xea\x1c\xb3\x08\x57\x91\x92\x9f\x21\x35\xd5\x94\x61\xe4\x1a\x4e\x81\x7f\xd6\x8c\xff\xe6\x9c\x43\x84\x7c\x9c\x50\x00\xfb\x8c\x39\xc4\x81\xae\xf9\x81\x17\x08\x2a\xa6\xa4\x50\xd9\x90\xcf\x88\xde\xa5\xc8\xb1\xe5\xf7\xa1\xfd\x57\x41\xaa\x7f\x7f\xfe\x31\x8d\xa0\x33\x35\xef\x9d\x67\x68\xcd\x83\x68\xcb\x85\x3a\xcb\xd5\x61\xa3\x0c\x0e\xe8\x38\xac\xe9\xc3\x9e\x6c\xda\xbb\x3f\x99\x1e\x33\x63\xe4\x23\x3b\x73\x2c\xf1\x19\xde\x27\x2d\xcc\x3f\x6f\xf2\x9c\x26\xfb\x98\x25\x54\xc9\x53\x04\x52\xdf\xac\x4c\xc4\x09\x67\x07\x1e\x9f\x51\xab\x7d\xba\x9f\x13\x90\xe1\xf9\x96\xcc\x63\x3d\x2f\x1c\x34\xf0\x65\x17\x00\x5a\x33\x54\x9a\x90\xb9\xfe\x6b\x0e\x99\x29\x3b\xbc\x23\xf0\x98\x22\x46\x97\x22\xc3\x80\x64\xce\xe2\xc3\x0a\xcf\xf0\x3e\x6c\x24\xfd\x1f\x8f\x29\x6e\x25\x3f\x54\x08\xb6\xe8\xeb\x52\xa5\x6f\x3c\xf0\x8e\x5e\xa9\x31\xbe\x78\x16\x17\x08\x09\xec\x01\x12\xa9\x00\xdf\xca\x37\x2d\x47\xf0\x47\xbf\xa4\xda\x22\x5d\x8d\x03\x84\x07\x0c\x97\xf6\xe5\x59\xc9\xd5\xf3\xae\x70\xe5\x42\x96\x8b\x41\x03\x14\x18\xa1\xf6\x17\x39\x0f\x51\x5a\x32\x55\x11\x40\x7e\xdc\x32\x1c\xd6\x23\x4c\xd0\x25\x81\x74\xa0\x97\x18\x74\x6e\x62\x54\x25\x5d\x30\xe7\x27\xfb\xc6\x41\x3a\x9f\xa3\xc6\x67\x43\x86\xd4\xb9\x8f\xce\x33\xb5\xf6\xb3\xc0\x6f\x4a\x10\xa7\x4f\xfe\x08\x45\x0a\xc7\x25\x2d\xc1\xf2\x8a\xce\x68\x12\x1f\xdb\x17\x38\x54\xe0\xfd\xc7\xe0\x1f\x0c\x50\x38\xe4\xe8\x12\xe0\xb5\x82\xdb\xf4\x1e\xea\x26\x37\xb8\x60\x8f\x0f\x97\x55\xdc\x8b\x11\xfd\x6f\x1e\x6d\x21\xb2\x6f\x21\xbc\x7b\x02\x6c\xb4\x45\x71\x45\x03\x5e\xa6\xfd\xa2\xc6\xa3\xd2\xc6\xbd\xd2\xda\xbd\xc2\x95\xc3\x9c\xf2\x7e\x91\xfe\x69\xca\xe4\xb1\xcf\x6f\x98\x97\x86\xd2\x15\x53\x16\x5f\x9a\xc0\x8b\x96\xd1\x37\xba\x44\xa3\x82\x0e\x53\x23\xf1\xca\xd8\xa9\xd1\xad\xdf\x2c\x13\xf4\x41\xd5\x9d\x57\xa3\x8b\xff\x1d\x80\x96\x19\xee\xdc\xfb\x38\x2f\xff\xf9\x6e\x07\x2a\x64\x1b\x36\x3c\xef\xdf\x56\x0d\xb8\x48\x45\x7b\xb5\x1a\xb0\x2a\x87\x29\xf8\xd7\xa2\xea\xef\x53\x44\x2d\x15\xfd\xa9\x54\x8b\x9d\x4a\xdb\xfb\x75\x2f\x2c\x51\x8f\x97\xd9\x88\x4b\xde\x94\x98\x95\xff\x97\x23\xe2\x68\xe1\x0f\xd2\x44\x43\x52\x7c\x95\x2a\x24\x09\xdb\x97\xd9\x3f\x4e\x60\x7a\x74\x6c\xa9\xd1\x53\xb6\xd9\x42\xe3\x7d\xa7\xc1\xde\x42\xa1\x14\x2f\x87\x20\x1c\x56\x15\x59\x9f\x2e\x0e\xc5\x87\x34\x6d\xa5\xec\x49\xd8\x9c\x2e\xe2\x33\x29\xb2\xa0\x35\x64\xce\xad\x89\xfa\x23\x99\x64\x4f\x25\x2f\x38\x37\x15\xf8\xd7\x80\x77\xa1\x81\xba\x4e\x52\xa9\x59\x53\x69\xd9\xd3\xc8\x44\xb3\x82\x26\xdc\x74\x5c\x56\xa2\x24\xc3\x99\x6c\xb0\x43\x68\x7c\x5f\x34\x9d\x1e\x07\x1c\xea\xdb\x27\x62\x50\xc2\x70\x58\x56\x67\x1a\xc4\x24\xf4\x27\x3e\x05\xbe\xf0\x2a\xae\xf5\x08\x2f\xd0\x1a\xca\xc8\x2e\xad\x4e\x66\x48\xf9\xcd\x87\x05\x27\x30\x90\xe2\x2d\x30\xcc\xdc\x22\x24\xb8\x71\x99\xe2\xc5\x12\xee\x59\x54\x55\x33\x55\x55\xe4\x98\x52\x0a\xa4\x9b\x68\x83\xb1\x46\xb4\x58\x34\x07\x2a\x6b\xfe\x1a\x4d\xe0\xc3\xd8\xea\x48\xc0\xfb\xee\xd2\x83\xff\x8c\x17\x5d\x45\x44\xd9\x74\xff\x0e\x05\x48\x91\x7f\xdf\x73\x3a\x36\x3d\x57\xaf\x6d\xe2\xfb\xb0\xe1\xf9\x8f\xff\x0b\x18\x9b\xb3\x16\xe0\x9a\x55\xd6\xa6\x71\xba\x02\x57\x58\xf8\xad\x03\x29\xbd\xd0\xa5\xbd\xd4\xa5\x11\xd4\xbc\xbd\xf5\xa2\x3a\x52\xd0\x0a\x8b\xf0\x5b\xc4\x5f\x20\x36\x76\x28\xfe\xab\xfa\xb2\x27\xfc\xcf\xf1\x84\xc1\x90\x00\x95\x07\xea\x86\xc5\xf7\x79\xb7\x10\x00\xb8\x80\x24\x48\x78\x99\x26\x1f\x63\x1b\x6d\x44\x61\x48\x5c\xe3\xd9\x55\xc5\x32\x6a\xe3\x23\x46\xc7\xdc\x06\xef\x5c\x8b\x31\x6e\xf1\x2e\xc9\x7c\x33\x23\xd0\xa1\xd9\x90\xc1\x47\xfe\xdf\xe3\x37\xfe\x6f\x48\xc2\x40\x74\x72\xff\xf2\x32\xe4\xec\x83\x22\x0c\x7e\x8d\xea\x9e\x1b\x41\x09\x81\xe8\x24\xf4\x67\x9d\xc5\xdf\xf8\x2a\xdf\x35\xdd\x98\x6f\x10\x61\xb3\x19\xe8\x7b\xe1\x95\x45\xb4\x4a\x7c\x14\x91\x88\x3a\x56\xd7\xe7\xdf\x6c\x91\x34\x50\x59\x53\x5c\xe8\x23\xf7\x22\xa5\x39\xba\x76\x88\x9f\x03\x8b\x98\x2f\xe5\x49\xcf\xaf\x26\x69\x9f\x86\x93\x70\xd9\x52\x20\xea\x83\x17\xb2\x39\x2e\xd1\x27\xbe\x73\x5f\xdc\x22\xae\xd8\x11\x7e\x8b\x24\x23\x08\xe7\xb8\x3f\x97\xfc\xd8\xea\xcf\xa3\x4e\x35\x66\x40\x45\x14\x26\xe5\x3d\x68\xe2\x0b\xe0\x65\xa9\x4b\xeb\xa5\x5d\xc9\x8e\x24\x56\xa8\xf5\xae\xbc\x96\x08\x86\x6e\x06\x9d\xee\x61\x1f\x59\x7b\xa9\x57\xe5\xe8\xbc\x67\x70\x9f\x03\x21\x69\x62\x1c\x1b\xf2\x87\x93\xe4\xf2\x5e\xe4\x86\xea\x52\x03\x4e\x39\xdf\x66\x07\x82\x8b\xe7\x4e\x54\x41\xb0\xa4\xc4\x19\x13\xfd\x2f\xa5\x95\x85\x7a\x66\xa2\xd0\xa7\x78\x56\xe7\x19\xd4\xb0\x52\xdc\x82\x15\x96\x64\x93\xd9\xfe\xed\xc1\x49\xf2\xf3\x8f\x25\x76\xe0\x25\x10\x3e\x07\xa2\x9a\x52\xb9\x7a\x85\x8f\x2c\xda\x43\x2c\x5c\x56\xe2\x24\x49\x8a\x07\x94\xfa\xb1\x32\x7f\x96\x49\x9a\xa0\xc7\x0f\xfe\x82\x2e\xfb\xf4\xef\x7b\x4a\xfe\x0e\x0f\xd9\x6c\x7c\xad\x69\x20\x2a\x41\xc8\xe2\xec\x92\xe8\x87\xc9\x9a\x18\x5b\x86\x57\x30\xef\x55\x22\x02\xec\xb9\xef\xf5\x07\x9f\x43\xe9\x50\xc9\x99\x05\xdc\x09\x66\x07\xfd\x50\xc4\xaa\x96\xd7\x87\x61\xeb\x46\x8c\x4b\x9f\x58\x2c\x95\x13\x85\x89\x53\xb4\x0c\x74\x0b\xe9\x86\x5e\xc0\x03\xcd\xf0\xe2\x4d\x6e\xdd\xdb\xbf\xfe\xbc\x14\x51\x25\x00\x89\xe3\x88\x33\xa1\x24\x21\xa2\x91\x0a\x14\xd7\xe6\xa7\xc5\x56\xfa\xcb\x47\x9a\x26\x98\x9e\x50\xe9\xdd\x8b\xcd\x27\xbc\x0c\xc9\x89\xa6\xe1\x3e\x51\x58\x23\x6a\x5e\x5a\xbf\xbb\x9a\x81\xa8\x27\xd3\x57\xa7\x0e\xba\x10\x6c\x64\x79\x67\xcf\x91\x4d\xaa\x48\x30\x24\x3e\x6d\x79\xb4\x6d\x6e\x05\xd3\xfc\x9c\xcb\xee\x85\xba\xe7\x6a\xca\xb0\x9f\x27\x76\x5d\xd5\xd9\x25\xfa\xbe\x64\xb9\x7b\xa6\x0b\x40\x7f\x15\x9b\xbb\x9a\xfe\xe5\x88\x35\x7d\x9f\xf3\xec\x12\x4b\x38\x15\x57\x4f\xc9\x99\x8c\x38\x40\xeb\xce\xc3\xff\x70\x30\x2f\x49\x3d\xb4\x5f\x73\x87\x71\x27\x0e\x45\x4c\x92\x7d\x5a\x50\x93\xd3\x0a\x1e\x74\x41\x93\x99\xc0\x98\xf0\xbe\x6b\x82\x77\xce\x13\x25\x9b\x86\xc0\xc8\xa3\x9c\x16\x62\xef\x56\x79\xa4\xf3\xe7\xc5\x9b\x23\xce\x86\x0f\xbf\x09\x44\xae\x80\x89\x54\xdf\x1d\x31\x9a\x18\xf9\x4b\x2a\x09\x5a\x40\xa1\x21\xff\x69\xbc\x2f\x5a\xbe\xc0\xac\xe2\x48\x26\xbb\x54\x46\xdd\x8d\x91\x5a\x1b\x23\x8a\x53\x6c\x4c\x79\xf1\x10\x3d\x28\x8e\x9f\xca\x81\x9a\xf5\x7a\xa1\x07\x27\xb5\xb9\xee\xb4\xa4\xe1\xa4\xbc\xba\xa9\x73\xe0\x39\x45\x9f\x32\xff\x98\x4e\x62\xc7\x6c\x22\xef\x57\x12\x7a\x9a\x4b\x7e\x2e\x74\x6c\x30\x4e\x16\x87\x88\xe4\x6a\x5b\x10\xf4\xb9\xc0\x80\x59\x3b\x86\xf9\x82\x27\xea\x3d\xbf\xf9\x57\xf3\x08\x59\xce\x2a\x26\xdf\x27\xef\x57\xe2\x92\x86\xdb\x85\x90\xec\x34\x60\x94\xc9\x71\x91\xb0\x3f\x67\xf5\x35\xf7\x9f\xe8\x76\x4d\xa5\xd3\xe5\x15\x3a\x98\x74\x46\x03\xfe\x27\x16\x69\x6b\x56\xe0\x86\x51\xa1\x61\x7a\x35\xce\x7b\x1f\x86\x28\x4d\x0c\x03\x2a\xdd\x4e\x1b\x50\x53\x40\x9f\x18\xd2\xf0\x02\x46\xe7\xa0\x3b\x69\xae\xdc\xb0\xe8\x9f\x0f\x5b\x91\xb6\xab\xfa\x8e\x28\xbf\xd1\x68\x7d\xd9\x80\x57\x4b\xc3\xf9\x04\xd3\x55\x94\x60\x3b\x7c\x07\xc2\x8c\x9f\x07\x2a\x94\xd7\xe9\xf6\x42\xb0\xbc\x24\xf9\x44\xc7\xae\xba\xc7\x00\xe8\xa6\x0d\x73\xcf\x2b\xdc\xaa\xbc\xb2\x0c\xee\x8e\x7d\x53\xa8\xdb\x5b\x64\xe3\x20\xc9\x91\xf4\x8a\x34\x58\xfd\xb1\x4c\x2d\x1d\x0e\x3a\xa6\x89\x39\x92\x09\x80\xdd\xb1\x00\x02\xb4\xeb\x0f\xea\x3b\x6a\xe0\xc1\x83\x8f\xcd\xbf\x36\x70\xad\xeb\xda\x2e\x4b\xa7\x67\x47\x76\x9b\xd6\xb3\xf4\x44\x75\x8c\x24\xcf\xb6\x0e\x3a\xaf\x25\x92\x15\x67\xc9\x31\x6d\xac\x23\xd4\x99\x38\xbb\x54\xf9\xc7\x98\xb4\xf1\x28\x4d\xb4\xe6\xf4\xb1\x64\x53\x14\xd5\x93\xe2\xa4\x5d\x10\xe1\xa5\x15\x2e\xb9\xf3\xa6\x12\x74\x71\xfd\x59\x17\x6d\xa8\xf2\xc6\xe6\xf3\x0c\x26\xe8\x6f\x79\x41\xa5\x75\x76\x4f\xd6\xb6\x76\xa7\x2d\x07\x09\xc2\x1b\x2c\xad\xd5\x9a\x29\xd4\xda\xc8\xf2\xd5\x27\x69\x84\x4a\x94\xed\xb1\x45\x50\xd1\xd7\x75\x5c\x1e\xa7\x81\xdd\x7d\xdb\x3f\x2e\xee\x35\x63\x9f\xa7\xed\xfe\x49\x24\x0d\xc0\x03\x0a\x65\x38\x6d\x48\x57\x05\x52\x1e\xe3\x96\x47\x96\xde\x0e\xb7\xcf\xb1\xc4\x76\x54\x19\xf2\xe0\x6a\x48\xb1\x7f\x71\x70\x01\x77\x8f\xd2\x95\xeb\x4a\xec\x60\x65\x35\xcc\x97\xc9\x31\x65\x8c\x54\x42\x00\x25\x13\x16\x26\x42\x5f\xd9\x0c\x4c\xb6\x03\x58\x52\xe7\x15\x3b\xb2\x35\x24\x0a\x63\x5f\xfe\xe9\xc4\x24\x50\x4a\xd1\x90\xa0\x31\x88\x8f\x1e\x58\x96\x6c\x44\xc4\x9f\xbf\xfe\x7e\x9d\xbe\xb3\x32\xff\x87\x1a\xb0\xee\xd9\x82\x18\x48\xc3\x9f\x56\xcf\x6f\xa2\xd3\xc5\x11\x9b\x85\x2e\x80\xdf\x4d\x40\x45\x14\x90\x25\xf8\xde\x0c\x31\x31\x63\xf4\x3e\x68\x67\xde\x31\x6c\x77\xd5\xac\xc6\x12\x85\x46\x67\xf0\x97\xec\x87\x24\x70\xf6\xf7\xad\x67\x95\xb9\x8b\x6a\x85\x96\xfc\x8c\x54\xb7\x9e\x7f\xec\xe2\xc2\x85\xf7\x23\x2c\xde\x99\x71\xbc\x8c\x86\x8e\xf5\x6e\x79\xb2\x26\xd8\x9c\x3a\x32\x98\xc7\x40\x6b\x4c\xb5\xda\x2d\x1d\x03\xd6\xce\xbd\x15\x65\x48\xaa\x8d\x00\xae\xdd\x24\x15\xea\x4e\x5d\xb5\xfa\xa0\x6a\xeb\x5d\xd8\xd0\x52\x74\xd1\x91\x5c\x60\x07\x74\x86\x32\x7d\xda\xb2\xc5\x58\x95\xe5\x8a\x4b\xe2\x62\x8f\x48\xad\xbb\x15\xde\x33\xdf\x30\x65\x44\x0d\xba\x1d\xd4\x15\x24\x39\xab\xd1\x89\xa1\x9c\xbe\x32\x38\x46\xc8\x3b\xa0\x54\xee\x5a\x3e\x09\xb8\x26\x17\xcb\xa2\x6c\x41\x8e\xc8\xff\x98\x2f\xb0\x33\x5a\x56\x27\x3c\xa1\x2c\x98\xd4\xee\x9d\x8c\x62\x28\x35\x7c\xcd\xd0\x17\x04\x39\xae\xb8\x3a\xd1\xd2\xb8\x17\xbc\x10\x35\x7c\xf8\x94\x1a\xce\xa9\xea\x32\xd1\x00\x2e\x1c\xc1\x50\x1d\x41\x6d\xf2\x23\xda\x86\xf6\xcb\x56\x54\x0f\x00\x81\x87\x54\xd3\x72\x07\xc1\x6a\x78\x34\xdb\x26\x09\x8f\x2c\xe0\x89\x6f\x2a\x37\x47\xe7\x8c\x63\x4c\x52\x58\x9d\xab\x73\x16\x44\x8d\xae\x6e\x5f\xb1\x44\x0f\x1c\x42\x3e\x9d\x05\x68\xee\xd1\xbb\x9a\xe0\x13\x39\x23\x9d\xd3\x89\x03\x2f\x34\x31\x91\xd9\xa5\x4e\xc1\x46\x88\x3c\x4e\x1d\xa6\x8b\xb2\xaa\x5f\x43\xea\xba\x07\xc1\xb6\x76\x18\x9c\x30\x1b\xbf\x02\x00\xac\x28\x58\x84\xd1\x43\x77\x82\x4e\x22\x2c\x9d\x3f\x11\x97\x19\xca\x8c\x39\x36\x54\x0e\x4f\x3c\xbe\x75\xdc\xc9\x12\xb5\x50\x61\xbd\x55\xf1\x08\x31\x97\x8b\x7b\xac\x30\x97\x5b\xb1\xcc\x1c\x29\x79\x66\xec\x1b\x2b\x3a\xfd\x33\xd2\x31\x79\x62\xb7\x16\xfb\xb1\x5e\x35\xa7\xe1\xd0\xb8\xfc\xcf\x77\x80\x60\xad\x85\xed\x58\xfd\xe1\xdf\x3b\xa9\x78\x64\x6a\x66\xf4\x94\x51\x91\x4d\x77\x5d\x56\xb5\xc1\xca\xa9\x17\x1b\xdb\xb1\x46\xbe\x75\xa6\x86\xc2\xa5\x67\x8f\x63\xbe\x24\x2a\x22\x9c\x57\xbd\xcb\x8b\xdb\xd0\xdc\xf7\xf2\x31\x0d\x78\x8f\x31\x2b\xbf\x99\x2b\xcc\x29\x07\xca\x8f\xb4\xd1\x5c\xe7\xc1\xf2\x23\xa0\xc2\x8a\x50\x32\x98\x68\xe2\xc6\x77\x78\x02\x01\xcd\x7c\xef\x3d\x8d\x68\xaf\x92\x60\x08\x40\x2c\x52\xfa\x8b\x69\x24\x35\x04\x4e\xf4\x45\x5e\x5c\xe2\x90\x63\x14\xaa\x25\xd2\x50\x6c\xaf\xa9\xdd\x92\x15\x8e\x97\x9f\x99\xad\xc4\xb9\xc7\xa3\x32\xd6\x53\x60\x6a\x1f\x90\xd1\xc6\x98\x7d\xd7\x2d\x93\x7a\xbd\xfc\x82\x3f\x8f\xe2\xf8\x7b\x48\xd2\xeb\x87\xdc\x8c\xe2\xe4\x5d\x91\x06\xe5\x11\x66\x30\x0c\x7c\x99\x2e\x55\x8b\x95\x9c\x69\x2c\x72\xfb\x75\x70\xdd\x5e\x67\xcc\x9d\xfd\x33\x06\x1a\x88\x68\xc2\xe4\xe1\xf4\x8a\x66\x33\x2c\x2d\x26\xc4\xd9\x2e\xe7\x7b\x18\xf0\x4b\xa5\x28\xd9\xdf\xd4\xad\x0c\x5c\xf1\xb1\xc2\x5c\x39\x00\xdc\x47\xa0\x08\xaf\x91\xdc\xd0\x19\xd6\x05\xc5\xa8\xea\xeb\x40\xfd\xf2\xc3\xa5\xa4\x0f\x44\xcf\x87\xcf\x3b\x35\xa5\x8a\xbd\xe9\x38\x1c\xd6\x56\x9e\xc0\x55\x8c\x24\xbf\xa3\x00\x9c\xc2\xc4\x03\x21\x83\x0e\xcc\xc3\x75\x25\x4e\x74\x1f\x4f\xd1\xbd\x00\x11\xbe\xc7\x6f\x3b\x10\xe2\xbd\x18\xc1\xb9\x2e\x62\x6e\xb2\x79\x8d\x11\x9e\x01\x93\xb6\x19\x41\xe4\x54\xee\x3c\x8c\x86\x42\xf6\x16\x58\x11\x15\x93\xc6\x31\xda\x5b\x28\x5a\x47\xd1\x91\x17\x3e\x79\x2e\x97\x24\x99\x48\x0c\x31\x84\xe7\xda\x88\x38\xe0\xc4\x2f\x2b\x4e\xd8\x7c\x0c\xec\x9b\xc0\x3c\xe4\x1f\xb6\xba\x64\xf8\x0b\xd1\x1b\xd3\x19\x0d\x07\x2f\x84\xc0\x8a\x19\x64\x6a\x8d\xbb\xcf\xbd\xc4\x75\x4f\xb6\x7b\x7a\x8d\xc0\x79\xa0\xba\xf0\xc7\xce\x99\x04\x50\xe4\x2e\x83\x07\x30\x45\xd8\xd4\xf6\xc3\xb8\xac\x69\x75\x91\x80\x48\x19\xdc\x02\x11\x15\xbc\xb2\x2c\x03\xae\x15\x25\x31\x56\x41\x7b\xa9\x45\x0e\x64\xc9\xf3\xd8\x96\x26\x9d\x43\xdd\x09\x90\x14\xb2\xab\x97\x66\xa7\x90\x7d\x12\x16\x53\xf1\x7d\x35\x6e\xbe\x78\xfe\xe7\xa1\x44\x2e\x03\x1f\xe8\x2c\xc9\xdd\x63\x6c\xd1\x5b\x20\xda\xf3\x03\xc9\x69\x80\x03\xce\xa3\x40\x9c\x17\xb1\xc2\xcd\x6b\x52\x88\x83\x47\xe8\x2d\x3d\xa6\x62\xef\xde\xb5\x56\xe0\x20\x80\x13\x82\x2e\x5b\xc1\xa4\x54\x4a\xe2\x9b\x4d\x78\xcf\xf3\xdd\x35\xee\x3e\x5a\xe9\x56\xd6\x61\x83\x09\x4c\xe5\xea\x7f\x05\xbf\xec\x2f\x17\xf9\xe6\x46\xf9\x1c\x5a\x88\x3f\xaf\xb9\xf4\x6f\x51\x5e\x8a\xcf\xb0\x56\xe4\x6e\x6a\xb0\x6f\xda\xdb\xfc\x7a\xef\xc3\x6f\x12\x21\x83\xcc\x80\xdc\x4e\x1e\xb6\x04\x71\x20\x3a\x8f\x3e\xa7\xdf\xc3\x3f\x3f\xf1\x7b\x66\x2a\xc3\x7e\xf3\x91\x9a\x1e\x17\xcb\xd5\xca\xae\x0f\x46\x32\xf8\x90\x21\xd5\x94\x3f\x95\xe9\xe1\x3d\x11\xb8\xb5\x3f\x8f\x92\x13\x8f\x4e\x6f\xc8\x60\x53\x1f\xbc\x86\x39\x06\x38\x52\x6e\x3d\x92\x2d\x5f\x43\xd6\xdf\xdf\x67\x57\x11\x18\xed\x43\xb8\x4d\x1b\x4c\xe8\x57\x45\x89\x8e\xed\x3f\x33\x96\x35\xe8\x46\xa9\x6d\xec\x29\x3d\xdc\x02\x43\x74\x97\xda\xd7\xfc\xa0\x58\x88\xec\xef\xa1\x2d\xb5\x0d\xbf\x1f\x0f\x1c\xec\x82\xc8\xfc\x42\x8d\x11\xc9\x3f\x18\xa4\xe3\x65\x44\x06\x3a\x6c\xf0\x9e\x04\x58\x63\x4b\x4c\x80\x93\x65\xc6\x48\x3a\xf0\xa0\x3f\xbe\x0f\xfb\xc5\x07\x9e\x96\x84\x2d\xb1\x06\xd2\x9e\x74\xc7\xb2\x48\xe0\x96\x04\x5a\x6a\x2b\x36\x7b\x36\xbe\xe0\x03\x15\xbc\x3f\xf1\x07\x88\x2e\xc3\x97\xfc\xcd\x0b\xaa\xea\xa8\x82\xc5\x5e\x73\x4b\xb7\xe2\x4c\x72\xa4\xa0\x9f\xf9\x09\x63\xc9\x44\x42\x35\xfe\x1f\x7b\xdf\xad\x73\x31\x8f\x6d\xd7\xdf\xc7\x70\x67\xa8\x50\x4e\x30\x5c\x48\xe7\x28\xe7\x1c\x3a\xe5\x9c\xb3\x00\xbf\xbb\x71\xbe\x01\x8c\x7f\xdc\xdd\x7e\x08\xa8\x10\x41\x70\x8b\xe4\x02\xf7\x5e\x14\xc9\xf5\x26\xba\xdc\x51\x74\xb4\x9f\x00\x48\x5c\xd4\xa4\x08\xc8\x17\x58\xb4\x44\x17\x08\x3a\x3b\x8b\x95\x99\x22\x13\x43\x7b\xcd\x0c\x77\xb2\x10\x8c\xc0\xad\xe9\x0d\xe8\xbd\x43\xe4\xfd\xe4\x08\xb5\xf1\xa8\x3e\xd3\xa4\xe8\x6d\x8c\x5c\x50\x42\x6a\x69\x9d\xc9\x8a\xd3\x2d\xcb\xba\xd0\xa2\x44\x4c\xa5\x8a\xa7\x31\x9a\xcb\xd7\x9d\xd0\x69\x06\x94\xfa\x36\xbc\x7e\x8b\x6d\x8f\xe4\x8f\x31\x82\x25\x7c\xfd\x63\xa3\x0b\x07\x19\x35\x61\x74\xb3\x6a\xef\x00\x18\x09\x48\x05\x45\xa5\x1d\x20\x6f\x70\x97\xa7\xd8\x02\x44\xa1\x8d\xbe\x50\xe5\xe1\x0b\x80\xe7\x89\x12\xdb\xa1\x27\xbd\x1c\xf8\xa3\x3b\x61\x74\x01\x96\x27\x61\x73\xb4\x49\xe6\x00\xbd\x26\xc1\x6e\x9f\x62\xc2\x5b\xc4\x97\x46\x48\x7d\x85\x09\x7c\x82\x13\x30\x51\xed\x22\x1c\xf1\xc8\xe3\xd8\xd7\xec\xaf\xa3\x34\x46\x84\x1e\x53\x2c\x13\x28\xe3\x84\xe8\xd2\x3b\x6b\x2c\x77\x4b\x9e\x71\x39\xdc\xc0\x29\x20\x6b\x01\xd1\x19\x96\x64\x94\x71\xd0\x68\xc7\x2e\xaa\x79\xf3\xc3\xff\x9a\xb5\xcf\x10\x19\x7c\x9f\x15\xca\x5d\x4d\xfd\x60\x07\xb9\x19\xe2\x17\x0c\x49\x1c\xbd\x80\x92\x46\x04\x3a\x9c\x89\xa2\x07\xe9\x5e\x5c\x21\x52\x17\xd5\x23\xcc\x7f\x84\x6e\x49\x9a\x89\xd0\x54\xe0\x28\xa2\x73\x48\xe8\x64\xa7\xee\x8e\x16\xda\x33\x82\xe4\xa7\x9e\x12\x91\xe4\x18\xaa\x8d\x00\x1d\x7d\xb2\x9e\xb7\x3d\x4f\x7a\xd4\x72\x22\x8c\x5a\x1b\x5b\xb9\x07\x92\xc5\xf7\xeb\x2a\x5e\x1f\x80\xb5\x94\xaf\x6f\x19\x27\x0b\x5a\x99\xc4\x46\x67\x59\xa4\xe5\x8e\x72\x7c\x5f\x75\xc6\x11\x7e\x91\xeb\x73\x4d\x8a\x3b\x63\x18\x17\x5e\xf5\xa2\x3e\x94\xa8\xa0\xfb\x4c\x13\x61\xc8\x24\x44\x51\x5d\x71\x34\xd4\xd3\x63\xda\x6b\x61\x47\xb9\x19\x9e\xc3\x2a\x2d\xca\xa7\x1d\x4e\x88\x5c\x8b\x52\x2d\x0f\xd4\xef\x2f\xd5\xf3\x79\xbb\x30\xe9\x9b\x48\xc6\x19\x30\x46\x64\x85\xc5\xf6\x42\x52\x7d\x95\x1f\x85\x4a\xbc\x5e\xb6\xbf\xec\x45\x16\x47\x88\x90\xf3\x12\x0b\xe3\xb2\x47\x8e\xd7\xcf\x8f\xfe\xf9\xd3\x27\x9b\xa5\x68\x41\x9b\xa8\x07\xc2\xc6\x8c\x36\x07\x06\xb0\xab\x30\x83\x30\xf1\xcd\x57\x73\xe2\xee\x4a\xc7\x3b\x4b\x3f\x8f\x6f\xc3\x0f\x90\x6d\x1e\xc3\x13\x0f\x17\x47\x8c\xff\xe0\x71\x79\xa0\x93\x9f\x45\x1f\x82\x3c\x94\x61\xf5\x37\x62\x0b\x51\x3a\xf6\xf1\x02\x6d\x2b\x10\x84\x80\x92\x68\xcd\x38\xda\x08\xaa\xc8\x0c\xe1\x0b\x9e\x86\x48\x38\xe3\x1c\x43\x78\x59\xa4\x0a\x9e\xed\x18\x40\x54\x7f\x24\x32\x44\x69\xd0\x6e\x68\xac\xf7\x28\xa7\x71\x2c\x4a\x35\x5e\xec\xc7\x13\x09\xf0\xbc\x9b\x04\x82\xf6\xd4\xa2\x4e\xf7\x16\x8b\x71\x55\x9e\x3c\x91\xa0\x35\xa6\x09\x3a\x65\x32\x4c\x90\x18\xf0\x98\x16\xa5\xdd\x40\x43\x47\x0b\x64\x71\x9d\x51\xed\x29\x49\x47\xca\x91\x04\xea\xd2\x1c\xab\x87\xe2\x78\x3b\x0a\xa6\xbd\xa7\x59\x0c\x14\x13\xfa\x38\xc6\xe8\x58\x1d\x0c\x28\xcc\x38\xf0\x7a\xbe\x7e\x43\x9b\xb3\xb9\x6c\x0b\xd4\x5b\x50\xef\x50\x4e\x37\x68\x56\x9c\xee\x08\xd4\xc0\x77\xe9\x09\xa4\x40\xf3\xc2\x37\x24\x01\x9b\x08\x08\xf1\x1b\xd8\x08\xea\xc3\x36\x78\xb4\xc5\x67\xf1\x81\x48\x07\xda\x59\x53\x6c\x7b\x84\x2c\x3a\xc8\xf0\x2f\x65\x5e\x94\x4a\xcb\xea\xb3\x23\x0b\xf3\x85\x1d\x07\x0e\x29\xc7\xa3\x42\xde\x4c\x09\x2a\xd6\x90\xb2\x39\xbc\xd7\x9a\xb0\x5a\xdc\x9e\x4e\x5b\x52\x02\x28\xf7\x2f\xcf\x29\x8f\x14\x63\x7f\x7b\xc4\xf6\xe1\x8d\x78\xf6\xf2\x5a\x5e\x94\x0d\x85\x75\x8e\x70\x3d\x82\x67\xbd\x23\x6d\x76\xec\xce\xe5\xe9\x67\x77\x4a\xa1\x51\x47\x2a\xac\x5f\x7c\x83\x72\x6d\x74\xa7\x16\xbc\x6f\xcb\x13\x59\x20\xfb\x32\x9d\xf2\xad\xaa\x49\xfa\xe8\x0a\x54\xc2\x8a\xcc\x7f\x60\x1a\x3c\x29\x3c\x3f\x1a\xc6\x7b\x79\xdf\xf5\x50\xf3\xbd\x37\xd8\x65\x9c\xcc\xe5\x6e\xc9\xfe\x86\x30\x5d\x7a\xf4\xb9\x52\xe2\x41\x6e\x75\xef\xec\x69\x61\x86\xbb\x21\xb6\x00\xdb\xbb\xf3\x36\x2d\xc9\xf0\xe3\x01\xc5\x92\x19\xa2\x53\x01\x6a\x9f\x1f\x03\xcc\xf3\x76\x16\xf8\x0f\x5c\x1a\xa3\x3b\xb8\xc3\xbc\x28\xf4\x99\xf9\xda\x6b\xbc\x60\xa5\x09\x34\x7b\x5a\x27\x2c\xb4\xf6\xf1\x6e\xac\x52\x8c\x29\xbd\x5c\x9a\x3b\x11\x41\x85\xad\x3b\x1e\xe4\x68\x26\xf3\x0d\xdd\xa0\x51\xca\x73\x52\xdb\x43\x98\xf1\x9d\x3d\xa2\xec\x32\x71\x31\xda\x89\x34\x7a\x61\x74\xd9\xb5\xd2\x3a\xd9\x5c\xdf\x8c\x34\xcc\xb1\x44\x52\xa3\xe5\x9d\xc7\xeb\xe5\x52\xbe\xce\x7c\x4d\x90\xce\x10\xbf\xf7\x46\xa4\x72\x05\x71\x73\xea\x94\xe8\x9d\x85\x30\x0d\xd2\xfb\x85\x3f\x66\xd5\x7d\x13\x5d\x04\x1c\xab\xf7\x4e\x07\x00\x66\xda\x55\x25\xfe\x48\x77\x02\xa3\xb2\xd2\x40\x51\xfd\xc9\x6a\x46\x01\x0a\xb7\xc3\x55\xbb\x82\x32\x55\x60\xab\xf4\xc9\xa8\x81\xaf\x23\x7f\xf5\x17\x44\x29\xd8\xf6\x96\x50\x7a\x8b\x05\x07\x8a\xcd\xee\x31\xc8\xa3\x44\x40\x59\x95\x66\x31\x3b\x42\x88\x94\xbf\x2d\x06\xa4\xe6\x28\x68\x2b\xce\x77\x3e\x7f\xff\xfc\x63\x05\x95\x46\x22\xe3\x14\x90\x48\xec\xb0\x51\x65\xa9\x42\xaf\x31\xba\xee\x8d\x81\xfd\x06\xf8\x93\x9a\x20\x7b\x63\xd0\xa6\x8c\x92\x34\xc7\x27\x4c\x33\xc8\x85\x59\x5f\x0f\x91\xea\xbf\xe8\xff\xc3\xf8\x84\xa4\x02\x9f\x06\x29\xdf\x0d\x29\x4d\xab\xa1\x8b\x63\x3c\xc7\x6f\x9f\xd0\x43\x3c\xc4\x74\xf1\xfa\x0b\x54\x9c\xe3\x91\x0e\xf8\x28\x3b\xb4\xc3\xb3\x57\x44\x35\xd2\x61\xd7\xea\x3a\x28\xcd\x34\xbb\xf5\x45\x1e\xd6\x7b\x65\x71\xec\x04\xad\xbd\x94\x26\x4a\x19\xdf\x6a\x21\xb2\x02\x26\xe2\x21\x1a\x08\xe0\xf0\x1e\x20\x4c\x10\x7d\x21\x0e\x75\xc1\xbe\xf5\x35\xd5\xd7\x09\x39\xdd\xae\xae\x98\x5d\x7e\xd7\xf9\xaa\xb2\xb0\x34\x14\x78\x5f\xbf\x88\xf0\x65\xc7\x0a\x6b\x14\xba\x15\x07\x48\xb4\xe9\xd0\xe3\xd9\x09\x4a\x15\x7e\x01\x4e\xa6\xac\x4c\x42\x42\x84\xaa\xd7\x1f\x20\x0f\xcf\xfb\x9a\x3c\x4f\xe4\xea\x34\x78\xb7\x8e\x66\xe5\xc1\x79\x79\x03\xf5\x1d\xe7\x41\xf8\x9a\x2a\xc5\x3b\x5b\x09\xb4\x2f\x2f\xeb\x33\x05\xb9\x31\xe6\x88\x08\xc8\x75\x8a\x50\xe7\x78\x0b\x58\x0e\x96\x07\x94\xe9\x22\x57\x4b\xce\x79\x7a\x22\x70\xae\x04\xba\xe8\x42\x61\x5d\x39\xd5\x6c\x6a\xd4\x4a\x8f\xd6\x10\x62\xed\x03\x64\xb1\xfa\xc0\x40\x2e\x57\x1f\x0b\x37\x65\x7d\x42\x26\x32\x33\x54\x75\xe0\xdd\xe6\x74\xff\x5c\x21\xaa\x30\xf9\x4e\xda\x7f\xf3\x35\x0e\x94\xe3\x93\xa9\xd0\xa7\x4a\x58\x13\x25\x7f\x7d\x73\x6f\xa9\x70\x5b\x00\x2d\x07\xbe\x1f\xc2\x30\xee\xad\x51\x9c\x7f\x42\x92\x06\x5a\x0f\x53\x39\x62\x58\x17\xb4\x9c\x91\x2d\xcc\x2f\x42\xef\x17\x38\x79\x8c\xaa\xd0\x5a\x90\x36\x93\x60\x9c\xf7\x64\x0f\x7f\xfd\x54\x9e\x28\xb1\xfb\x2f\x5f\x6b\x65\x8b\xb8\xc9\x4b\xe6\x0f\xee\x60\x74\x61\x7a\xb0\xe6\x1a\xee\xeb\xd6\x53\x29\x82\xd4\x61\x94\x7d\x4e\xad\xa7\x39\xbe\x80\x7a\x17\xca\xa3\xb5\xa2\x9b\x13\x75\xe8\xf5\x72\xe0\xc1\xba\xeb\xfb\xa1\x0f\xe5\xdc\xbd\xbf\x1f\x74\x57\x95\x67\x4f\xfd\x1e\x7a\x22\x1c\x7c\xba\x57\x7b\xb5\x59\x2b\x45\xfa\x00\xff\xf4\x60\x7b\x69\x16\xb0\x55\x79\xa8\x95\xbb\x1d\x09\x11\x0f\xde\x19\x2f\x9e\xb7\x7d\x3f\x0f\xb8\xa4\x53\x6a\x48\x77\xbd\x57\x53\x9d\x6b\x8d\x63\xed\x86\x72\x31\x7f\xe8\xf0\xbd\x41\x71\x9c\x66\x5a\x23\x77\x84\xb8\x65\xf5\xc3\x79\x9f\xf3\x84\xc9\x17\x94\xde\x0c\x55\x90\x6e\x4d\x6e\x68\x56\x1d\x42\x1f\x6a\xd3\x25\x79\x73\x50\x98\xfe\xe7\xed\x03\x76\x5f\x13\xe3\x07\xb5\x21\x1d\xf0\xd4\xd5\x53\xdc\xf8\x34\x97\x74\xcf\x10\x2a\xbf\x19\xe1\xd3\xbd\x26\x7c\xd1\xa8\xd0\x05\x09\x9f\x34\x38\x67\x2d\x8f\xa7\x4a\xf1\x7d\x10\x32\x9b\x49\x58\xe7\xa6\x41\xb9\x0f\xee\xda\xd4\x2e\x14\x0a\x22\x6d\xb3\xb5\x55\x94\x6d\xfa\x54\xc9\x92\x0c\xcb\x02\x94\x83\xf6\xe3\xd7\xaa\x03\xfd\x86\xe4\xdc\x2a\xca\xad\x33\x5d\x75\xa1\x46\x6a\xa5\xdb\x2c\x64\xd6\xed\xfe\xee\x04\x6e\xa6\x59\x41\xf6\x35\x81\xf7\x24\x58\xe3\x03\x22\x12\xdd\xba\x8f\x0f\x8b\x2d\x72\x93\x8e\x08\x4c\x87\xe9\x8e\x50\xe1\x53\x0a\xc8\x48\xaf\x09\xb2\xc3\x24\x3d\xb2\x77\x9e\x05\x5c\xe6\xf6\x76\xe8\xc3\x34\x0e\x26\x79\x02\x17\xa7\x39\x11\x5a\x6c\x04\x6d\xad\x9d\xfd\x3e\xbc\xfe\xd2\x83\x60\x39\xa2\x24\x9d\x68\xcf\xee\xb0\x14\x91\x3a\x54\x79\x7a\x0b\x1e\xc2\x4a\xd0\xcf\x89\xe5\x09\x40\xc0\xdf\x81\x5c\xc7\xa1\xef\x67\xb5\x76\xba\x5b\xe5\x5a\xb7\xfe\xb5\x5e\x8d\x34\xae\x4d\xfd\x3c\xfb\x9c\xe2\x4c\x75\x68\x2d\x4a\xed\xf2\x7b\x92\x3f\x46\xb3\x23\x64\x51\x9a\xe1\xc2\x47\xc7\x77\x22\x84\x1a\xca\x49\x7b\xf3\xcd\xf1\xad\x21\x32\x0f\xec\xdb\x19\x27\xa6\x63\xf5\x66\x2b\xdb\xe0\xf5\x37\x74\xed\x69\x90\xaf\xa1\xbd\x73\xbf\x8f\x85\x35\xcd\x34\xa8\x3f\x7a\x96\x01\xa0\x3b\xc3\xe9\x16\xc6\xfb\xf4\x61\x98\xaf\xd4\xfc\x69\xed\x36\xd8\x21\x95\xa8\x48\x3f\x60\x0e\xd1\x00\xb7\x91\x2b\x51\xf4\x30\x49\x0b\x6d\x81\x81\x07\xfd\x77\xce\x4a\x41\x4d\xf3\x19\xb1\x45\x69\x02\x32\x3f\x42\x92\x46\x54\x37\x1a\xea\x1a\x72\xcb\x06\xda\xd5\xf2\x84\xc8\xe2\xfc\x79\x62\xe0\x6c\xc9\x85\xc7\xd8\x5b\xfb\x92\x80\xeb\x76\xb8\x61\x73\xa6\x74\xe3\xa2\xdd\x12\x6a\x3b\xa4\xa2\xba\xe6\x9f\x29\xff\x00\x79\xb1\x21\xdf\x01\x4d\xf6\x01\x16\xaf\xd4\xe6\x18\xe6\xbb\xfc\xe2\x68\x54\x85\x5f\x1a\x24\xe8\x82\x02\x9a\xe9\xd9\x56\x05\xe8\xc1\xab\x52\xd8\x49\x30\xc6\x9f\x6f\xcc\x8b\x33\x9b\xad\x73\x18\x18\x86\x61\xb4\xe6\xa5\x55\x07\x92\x31\xa0\xb4\x39\x9f\x37\x43\x10\x69\x45\x18\xf7\x7b\xdb\xf3\x8d\x77\x27\xf3\x8d\x34\xc2\x74\x82\x34\xf1\x6f\x4d\xe1\xeb\xd3\x04\x89\x0b\xa6\xd8\x22\x4a\x61\x5a\x98\x36\xcb\xf7\x1b\x93\x26\x4e\xff\x1d\xde\xed\x13\xbc\x38\xc2\xf4\x54\xfc\x0c\x13\x18\x33\x65\x01\xb3\x06\x3b\xa8\xea\xb2\x54\xb8\x21\x5f\xb4\x7f\xc8\x5a\x61\x80\x56\x9a\x6d\x8d\xa8\x25\xa6\xd0\x8a\xe2\x6e\xc3\x8d\x37\x24\xd5\x11\xd0\x9e\xba\x50\xfd\x5b\xca\x3f\x2b\xec\x7b\xb1\xcf\xb4\x3e\x14\x68\x0c\xae\x0c\x01\xa5\x29\xa2\x0f\x20\xf2\x82\x5d\x25\xfb\xdb\xc1\xac\x95\x89\x3f\x18\xb2\x57\x26\xd4\xab\xa7\xa5\xec\x55\x6a\x7c\x95\x31\x8c\xf4\x01\x17\x62\x98\x9b\x08\x30\x5b\xec\xd0\xcb\x12\x5d\xe0\xec\xc0\x7a\xea\xd6\xfb\xe5\x98\xe7\xc0\x1f\x63\xba\x54\xd6\xf9\x4f\x7a\x48\x5f\x1d\xa4\x7c\xa1\xb2\xbf\xd6\x5c\x92\x65\x35\x1a\xf4\x76\x7c\x71\x82\x2e\x4c\x91\x82\xb3\x3d\xdc\x35\xc8\xbc\x01\xf3\x7e\x35\x54\x0f\xc8\x7c\x46\xd3\x59\x1f\xc0\xc4\x68\xe7\xb6\x62\x36\xc6\x10\x58\x10\x24\xfc\x13\x41\xc8\xfc\x0c\xa9\x54\xb8\xb3\x52\x94\x1f\xaf\x9f\x97\x60\x80\x82\xd5\x54\x59\xcf\xe7\x7d\xed\x5f\x07\xe7\xa1\x6d\x75\xae\x5d\x75\xa0\x5d\x75\xe8\x67\x63\xac\xcf\xb3\xad\xf2\xcf\x94\x4d\x01\x2a\xff\x90\x0d\x06\xeb\xb6\x37\x38\x6f\xed\xf3\xba\xbb\xfd\x62\x1a\x01\x0d\x56\x81\x44\xbd\x85\xb7\x1a\x71\x62\x19\xe6\xc3\x88\xb8\xe0\x8e\x25\x4a\xd0\xc5\x81\xa6\x34\x35\xc8\x4e\xd4\x9f\x28\x92\x8e\x31\xac\xbf\x64\x8e\xa9\x73\xb1\x2e\xc4\xbe\x26\x43\x3c\x24\xc8\xbe\xc6\x01\xdc\x47\x88\xf1\x15\x52\xdf\x32\x61\xdf\xf7\x13\xba\x38\xa7\x47\x6d\xa0\x5d\xf9\x05\x1e\x8e\xf9\x62\x58\xa4\x6f\x08\xf9\x0c\xfd\x9c\xec\x6d\x84\xc0\xad\x87\xa8\x7d\x83\x6e\x01\x99\x0b\x43\x7d\x31\xf4\x26\x62\x80\x02\x16\x7b\x0e\xd0\x05\x5e\x52\xf4\x00\x27\x41\x3f\x0f\xf6\x10\x0f\x7f\xbf\xff\x9a\x08\xe6\xa8\xeb\x37\x8b\xc2\x89\xa0\xfb\x6c\xc5\x30\x2c\x87\xcb\x36\xc7\x7b\x79\x18\xc2\xb4\xe9\x24\x42\xdd\xc1\x7a\x30\x2f\x89\xde\x7a\x8f\x5e\x49\x02\x0c\xb1\x8c\x78\x97\x45\xd8\xe3\x56\xa7\x7e\x10\xca\xe9\x70\xe3\x07\x7a\x07\x4d\x92\x7e\x86\xfa\x56\xfa\x91\x00\x44\x6f\x99\x49\xe8\x5d\xeb\xd6\x5c\x0f\xf9\xa1\x68\x4b\x93\x47\x6f\xa3\xcb\x2a\x7f\x30\x2e\x7b\x58\x76\xbc\x7e\x71\xba\x59\xb0\x87\x58\xb0\xc7\x99\x08\xc6\xf9\xf7\x5d\xbf\xda\x58\xcb\x12\xfc\x15\x8c\x29\xb3\xa7\x43\x9f\x2c\x6e\xab\x30\x85\xd6\x82\x12\x1d\xc1\x37\x44\x77\x2d\x44\x73\x39\x44\xaf\x23\x89\xfd\x54\xd5\xf4\xc1\xb0\x43\x6d\x66\xcd\xe5\x72\x00\x2c\xfe\x94\xd8\x92\xa2\x8f\x09\x67\xc4\x01\xb7\x73\x14\x90\x5d\x88\x63\x15\xee\x2e\xfd\x53\xf0\xbb\xb7\x74\xc0\x9b\x24\xe0\xf7\x14\x83\x36\x93\x31\xf6\xba\x94\x1f\x0a\x54\x1f\x6a\xd4\x80\x32\x7b\x69\x10\x3c\x2f\x5c\x7f\xa9\x53\x7d\x33\xd4\x7f\x2c\xa7\x2f\x71\xf3\x4f\x3a\xd2\xf3\xf1\xa2\x0c\xcb\xec\x5b\x65\xe2\x04\x8c\x93\xf2\xa9\xfe\x84\xab\x95\x56\xc2\x41\xdd\x67\x19\xd9\xaf\xbc\x4f\xf5\xb7\x6f\x4f\x69\x96\x25\xe8\x97\xa4\x4d\xad\x74\x67\x99\x05\x13\x86\xbe\x4f\xbe\x6c\xbf\xe8\xb9\xf8\xeb\x5f\xd7\xa2\x2e\x86\x91\x38\xa9\xe3\x06\x7f\x88\xc5\x97\xa0\x3f\xdd\x68\x3c\x19\x29\xbf\x1b\xa8\x92\xc0\xb4\xa9\x9f\x6b\xe3\x59\xcf\xeb\xa5\x81\x9f\x0b\xbf\xff\x73\x78\x5b\xd7\xcd\x4b\x22\x34\x44\xb8\x10\x4e\xe7\x8b\x45\xfb\xaf\x05\x50\xd9\x81\xf6\xaf\x07\xe9\xff\x82\x2b\xb4\xbb\xbf\xc7\x83\xf5\x34\xd0\x82\xba\x81\x74\xd7\xf2\xf8\xdc\xfc\x62\x1c\xff\x1b\x83\xa7\xb7\x7d\xbf\x08\x3b\x5c\xb0\xfb\xf8\x37\xa0\xb5\xe7\xff\x02\x38\x38\xf4\xc7\x4f\xb7\x73\xd2\xc8\xc7\x41\xdc\x47\xa1\x59\x94\xbf\xea\x73\x1b\x49\x8f\x15\xd7\xd1\x14\xd8\x8b\x7b\xf7\x3d\xfe\x5b\x51\xe0\xa8\x28\x63\xe0\xf9\xbe\xed\xfd\xda\xfe\x77\xb7\x8a\xcf\xe6\xa1\x0f\xe7\xcb\x07\xdf\x20\x85\x65\x2f\x28\xd1\x73\xd1\x86\x84\xd6\x5f\x7e\x8e\xa8\x4d\x20\xd2\x8a\x52\xe5\xa1\x8e\x77\x5b\xd5\xc5\xee\xe1\x27\x5b\xe5\xd7\x04\xb2\x80\xb7\x03\xc5\xfe\x78\xbd\xd4\x8b\x39\xc5\xed\x7d\x9c\xf8\x9c\xb4\x0c\xff\xda\xa0\xb8\xac\xeb\x7d\x9a\x38\xf8\xa1\xbf\xea\xef\xb5\x53\x61\xa3\x93\xe1\x3c\x42\x84\xaf\xbd\x21\xd0\x80\x7a\x1b\x2f\xb4\x76\x05\xa5\x44\x31\xae\x49\x30\x2f\xd2\x26\x31\x54\x95\xb6\xd8\xc5\xfc\xbf\x74\x71\x0a\xbc\xa7\x21\x36\x32\x1a\x83\x62\xe5\x9d\x59\xcc\x3f\xd2\xc7\xb2\xdf\x87\x63\xfe\x2d\x7d\x3b\x4e\x4b\x45\xe6\x9f\xe5\x38\xa6\x88\xc2\x2b\xfb\xb7\x62\x86\x65\xdf\x5b\xc6\xfe\x33\xcb\x62\xf1\xba\x64\xaf\x7f\x2b\x86\xb3\xa2\x70\x5f\xff\x34\xc1\x32\xe2\xc0\x53\xe2\xff\x6f\x33\xfb\x8f\xcd\xff\xd8\xfc\x6f\xd8\x3c\xa1\x72\x05\xea\x3e\x34\x76\x71\x40\xba\xbf\x5c\xc7\xf3\x0d\x5b\xc1\x3f\x91\x24\xfd\xef\xff\xf1\x3f\xff\xd7\x7f\xfd\x9f\xff\xfa\xbf\x01\x00\x00\xff\xff\xbb\x94\x2f\x4d\xe9\x06\x02\x00") +var _pagesAssetsStylesContainersCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\xb9\xd7\x8e\xe3\x68\x9a\x28\x78\xdf\x4f\x91\x3b\x83\x05\xce\x39\xec\x6c\x7a\x57\x85\xbd\xa0\x11\x45\x27\x7a\x7f\xb3\xa0\x27\x25\x7a\x27\x92\x85\x79\xf7\x85\x22\x22\x23\xb3\x4c\x57\x75\xcf\x6c\x20\x95\x11\xfc\xf9\x7f\xde\x7f\xfa\xdb\x3f\xca\xbe\x2f\x9b\xfc\xeb\x56\xcf\x6b\xdc\xd4\x67\xbc\xd4\x7d\xf7\x75\xe9\xfb\x26\x89\xa7\x5f\x8a\xbe\x5b\xbe\xce\xf5\x99\xff\x04\x43\xd0\xff\xfd\x5f\x7f\x7a\xf9\xcb\x9f\xbe\xfd\x9a\xef\x43\x3f\x2d\x5f\xeb\xf7\x4b\x7f\xff\xff\x01\x55\x16\x2f\xf1\xff\x08\x4f\xb5\xb4\xcd\xd7\xb4\xcf\xf2\x5f\xda\x78\x2a\xeb\xee\xeb\x54\x97\xd5\xf2\xd3\x3f\xe0\xbc\xfd\x73\x51\xbf\x43\xbe\x78\x69\xe2\xee\xed\xed\xbb\xb6\x9e\xf9\x1b\x92\xa4\x6f\xb2\xbf\x40\xd2\x3f\xbe\x26\xeb\xb2\xf4\xdd\x2f\x43\x9c\x65\x75\x57\xfe\x84\x0c\xfb\x5f\xc0\x2c\x53\x1d\x77\x65\x93\xff\x32\xf4\x73\xfd\x7a\xf3\x53\x9c\xcc\x7d\xb3\x2e\xf9\xcf\xef\xcc\x43\x3f\x2f\xfd\xf0\x13\xf4\x17\x68\xd2\x78\x78\x7f\x8e\x93\x26\xff\xe5\x59\x67\x4b\xf5\x66\xe1\x9f\xbf\x71\x02\xfd\xfc\xae\x92\x9f\xa0\x9f\x93\x7e\xca\xf2\xe9\xf3\x8f\xaf\x69\xdf\x34\xf1\x30\xe7\x3f\x7d\xfb\xe3\x2f\x68\xcd\x6d\xdc\x34\x5f\xb3\x3a\x6e\xfa\xf2\x83\x14\x0e\x41\x7f\x29\x69\x52\x97\xbf\x06\xa2\xfe\x05\xa0\x1f\x69\xfd\xb9\x6b\xfc\x88\xff\xf7\xba\x4c\xe2\xf4\x51\x4e\xfd\xda\x65\x2f\x71\xfb\xe9\xa7\xff\x4c\xe1\x8c\x2e\x8a\x6f\xca\x80\x87\xfd\xcb\xdc\x37\x75\xf6\xe5\x3f\xd1\x18\x27\x49\xec\x53\x71\xd4\xbf\xc5\xe3\xd7\xe4\x5f\x67\xf3\x6b\x52\xfe\xf2\x7b\xbe\xb2\x2c\xfb\xf9\xf7\xfc\xbf\xf9\xc0\xcf\x4d\x5e\x2c\x7f\xe9\x0a\xbf\x62\x67\xa9\x97\xbf\x8a\xcd\x1f\x39\x7a\xbb\xfe\x07\x4c\xe5\x50\x9e\x15\xf9\xcf\x1f\x4f\x10\x04\xfd\x9c\xae\xd3\xdc\x4f\x3f\x0d\x7d\xdd\x2d\xf9\xf4\xa3\xb6\xbe\x73\x3f\xe5\x4d\xbc\xd4\x5b\xfe\xf3\x0f\x59\x07\x19\x96\x9f\x7f\x1b\x56\x3f\x6f\xf9\xb4\xd4\x69\xdc\x7c\x8d\x9b\xba\xec\x7e\x6a\xeb\x2c\x6b\xfe\x1d\x2f\xfc\x9a\xf6\xdd\x92\x77\xcb\xbf\x2e\xe8\x07\xc0\x1f\x88\x5a\x14\xc5\xa7\x34\xd8\xb0\xff\x8a\xd9\xae\x9f\xda\xb8\xf9\xb9\xdf\xf2\xa9\x68\xfa\xe7\x4f\xf1\xba\xf4\xff\xb6\x35\xbe\xa6\x4d\x3f\xff\xbb\x36\x79\x07\xfa\x81\xdd\x9f\x96\x29\xee\xe6\x21\x9e\xf2\x6e\xf9\xb2\x4e\xcd\xff\x7a\xbb\xf0\xff\x26\xfd\xfe\x8f\xb2\x2e\xfe\xf7\x97\xae\xff\x3a\xe5\x43\x1e\x2f\x5f\xe6\x74\xea\x9b\xe6\x4b\x9a\xbf\x19\xaa\x7a\x97\x04\xc6\x7f\x34\xd4\x6f\x52\x0e\x0c\x0d\xfb\x9b\xc7\xbd\xac\xf9\x91\x4b\xf0\x7f\x2f\x0c\x3e\xf4\xfb\xa5\x2e\xa6\xb8\xfd\x37\x84\xfd\x35\xdc\x8f\xd9\xe5\x1b\xe7\xe4\xdb\xc3\xef\xc2\x36\x69\xe2\xf4\xf1\x5f\xff\x48\xab\x78\x5a\xe6\xaf\x75\xd7\xd4\x5d\xfe\x35\x69\xfa\xf4\xf1\xcb\xef\xfd\x31\xab\xe7\xa1\x89\x8f\x9f\xbe\xb6\xfd\xf9\x79\xb7\xdf\x3f\xcf\x7f\x04\xff\xaf\xff\xf3\xe5\x55\x1c\xbe\xfc\x11\xea\xbf\xff\x9f\x9f\x8a\x7a\x9a\x97\xaf\x69\x55\x37\x19\xf0\x4f\xef\xfd\xf2\x6b\xc4\x9f\x6c\xb6\x79\xb7\xfe\x68\xd3\x37\xe7\xfb\x9e\x95\xdf\xd2\x54\x9a\x7e\xf9\x4f\x82\x20\x3e\xfe\x4b\xd3\xf4\xdb\x85\x79\x39\x9a\xfc\xa7\x37\xe9\xbf\x1d\x7d\xd8\x6a\xd8\xbf\x85\x67\x96\x17\xf1\xda\xbc\x47\xdc\x87\xf7\x7e\x81\xd1\x61\xff\xc2\x4c\x75\xdc\xfc\x7d\x8e\xbb\xf9\xeb\x9c\x4f\x75\xf1\xbd\x3a\xf4\xeb\xf2\xe2\xf1\xa7\xae\xef\xf2\x1f\x03\xe1\x0b\xf4\x07\xfe\xf2\xd2\x5e\x96\xef\x3f\x21\x10\x04\x41\xbf\x12\xeb\x5b\x25\xfc\x51\xba\x2c\xcb\xde\x5c\x15\x04\xe7\xb9\xf9\x47\x39\x2f\xf1\x52\xa7\xff\x48\xfb\x16\xcc\xb3\x7a\xe9\x27\xf0\x1d\xe6\x6b\x52\xfe\x63\xe8\xca\xff\xfd\xe5\xdd\x81\xbf\xee\x5f\x96\x7e\xf8\xf2\xca\x7d\xdf\x4b\xd7\x3f\xcf\x44\x4d\x3d\x2f\x1f\xba\x79\x13\xe1\x43\x30\x64\xd8\xff\x58\x34\xe8\xe7\x25\xdf\x97\xaf\x59\x9e\xf6\xd3\x9b\x5b\xbe\xbf\xfe\x27\x29\xe9\xf7\x12\x7e\xed\xd7\x25\x9f\x5e\xee\xf3\xf7\x3f\x7a\x5b\x77\xdd\xfb\xdb\x5f\xfe\xb9\xd9\x3e\x84\x89\xe3\xf8\xb7\x64\x97\x7e\xf8\x73\x9a\xbf\xfc\xa6\xaa\x7f\x77\x81\x97\xbd\xbe\x89\xf8\x87\x38\xbe\x73\xf6\x0d\xc7\x97\xaf\xf0\x67\x68\x7d\x20\x82\xbe\xbc\x8e\xbe\x21\x7a\x79\x0e\x36\xec\xbf\x0d\x89\x3f\xc6\xfa\x56\xac\x5e\x18\xff\xec\xfa\xb4\xfc\xf1\xf9\x77\x01\x3f\xd1\x7c\x24\xa7\xb7\x9c\xfb\xdf\xc0\xf8\x9d\xb1\x1f\xf1\xfc\xf3\xf8\xfd\x4b\xa1\xfe\x35\xd0\xbf\x66\xe7\x0d\xe5\x6f\xe4\xfb\xe9\xa7\xa9\xef\x97\x3f\x02\xfc\xe5\x2d\xab\x7c\xe4\x42\xe8\x4f\x2e\xfe\xa8\xc2\x7f\x15\xe4\x07\xa6\xfe\x55\x90\x8f\xc6\xf3\x57\x00\xef\x89\xe6\xcf\xa0\xb2\xa9\x1f\xb2\xfe\xf9\x87\x60\x7f\x78\xbf\x9e\x5f\x8d\x6d\xf6\x63\xbd\xae\xdb\xb8\x7c\x8f\xe5\xff\xab\x6e\x5f\xf3\x43\xdc\x2d\x3f\xf7\x43\x9c\xd6\xcb\xf1\xd3\x3f\xd0\x9f\xdf\x92\xfb\x0f\xcf\x45\xdd\x2c\xf9\xf4\x53\xdc\x0c\x55\xfc\xbf\x3e\xce\xff\x1f\x14\xfa\xdf\x7f\x4a\xf0\xcf\x35\xfb\x87\x11\xff\xa7\xa0\x9f\x1a\xfe\xf7\x41\x3f\x34\xfd\xef\x03\x7e\x2a\xfb\x23\xcf\xa0\x28\xfa\x83\xc6\x7e\x9d\x83\x68\x9a\xfe\xfe\xee\xcf\xc2\xec\x1b\xc5\x3f\x2b\x81\x7f\x68\xc1\xef\xf9\xf8\x95\x5a\x7e\x60\xe4\x33\x5b\xfd\xfa\xfc\x0f\xcd\x53\xbd\x5a\xb0\xff\x86\x6d\xfe\x39\xdc\xef\xf2\xf4\x37\x85\xa4\xc5\x97\xff\x24\xe8\xfc\xe3\x3f\x32\x2e\xfe\x82\xb5\x38\x7d\xb5\x18\x7f\x48\xbd\x1f\xf2\xee\x0f\x3a\xce\x24\x49\x7e\x9c\x4f\x3e\xab\x6c\xd2\x2f\x4b\xdf\xbe\x15\xbe\x3f\xa4\x54\xf4\xe9\x3a\xff\xb7\x5c\xf4\xcf\x20\xff\x89\x22\xfa\x29\xee\xca\x3f\x2e\x80\xdf\x52\xc0\x77\x03\xbe\xb5\x0b\x5f\xa0\x7f\xb5\x9a\x7d\x3a\xe8\x8f\xcd\xe9\x7b\xfd\x21\x5f\x05\xe9\x7b\x0b\xf1\xa7\xdd\xc3\xfb\xaf\x8f\xa6\xf2\xbd\x83\xf8\xde\x05\x7f\x45\x29\xea\xad\x28\xfe\x8b\x3c\x7d\x9b\x85\xdf\x57\x08\x7f\xa8\xc5\x5f\x5f\xf9\x6f\x98\xe1\x5f\x40\xf0\xdb\x22\xfd\xb1\xd1\xf8\xe3\x8a\xfe\x89\xef\xe5\x32\x7f\x4e\xf1\x75\xe3\x2f\xaa\xc6\x07\xc1\x6f\x33\xe7\x7f\x0b\xdb\xef\xd8\x7f\xc7\xf6\x2d\xe9\x7c\x2b\x7d\xdf\x46\xef\xa2\x28\xfe\x9a\xd0\x1f\x5e\xa8\xf2\xf4\xf1\xaf\x3a\xf4\x6f\xa9\x66\x59\xf6\x2b\xaa\xf5\x92\xb7\xbf\xfc\xd0\x60\xfe\x75\xf3\xfc\x4f\x7a\x4e\xe8\x57\xfd\x33\x99\xb7\x6f\x81\x81\xbc\x4d\x55\x55\xbd\xe4\x5f\xe7\x21\x4e\x5f\x20\xcf\x29\x1e\x7e\xc7\xc2\x6f\x9f\x5f\xad\xc4\xb7\x18\x7b\x97\x81\xcc\xdb\x6f\x14\x3e\xfc\x02\x79\xdb\x56\xfc\xa8\x84\xae\x7f\xd3\x4d\xd2\xef\x5f\x7e\x8b\xf0\xef\xbf\xb9\x58\xa7\x7d\xf7\xbb\x4b\xbf\xa6\x08\x23\xbf\xc7\x1f\xa7\x69\xde\xfc\x73\xb8\x0f\xc6\xa0\xdf\x00\xbe\x49\xf4\x6d\x0a\xff\x77\x94\xfd\x7b\x2c\x7f\x58\xfc\xde\xde\xbc\xb1\xf6\xf7\x7f\x03\xe0\x37\x0c\xa5\x69\xfa\x4f\xb2\xfd\x5f\xe0\x79\xa9\xf2\x97\xff\x69\x2f\xf2\x86\xa9\xaa\xcb\xaa\xf9\x5d\x0a\x7a\x7f\xf5\x2a\x66\x7f\xb4\x41\x22\x72\xba\xa0\x7e\x53\xd8\x7f\x7d\xf8\xee\xab\x59\xbf\x2c\xf9\xef\x87\xc7\xef\x93\xc3\xd7\xf7\x12\xf4\xea\xfb\x3f\x8f\x96\x7e\x78\x3d\xff\x81\x31\x3f\xfc\xec\xf7\x8c\xbe\x69\xe3\x07\x3e\xdf\x53\xf2\x4f\x9f\xc9\xf9\x73\x29\x41\x0c\xfb\xfb\x8e\x8b\xf8\x93\xed\xc4\xab\x41\xfe\xe3\xf1\xec\xdb\xae\x82\xf8\x23\xf6\x7e\xdb\x88\xff\x05\xcf\x7f\x78\xfd\x4d\x90\x37\x06\xdf\x98\x78\xe7\xe7\x47\x6a\xfd\xfb\x16\x76\xce\x9b\x3c\x5d\xfe\xd0\xc1\x7e\x4b\xf0\x2f\x21\x7e\xa3\xbc\xff\x49\x09\xc4\x5f\xe1\xfb\x05\xfa\xbd\x6e\xde\xe2\xe4\x97\xef\x2d\xe0\xcf\x59\x3d\xe5\xe9\x9b\xf2\x9b\x65\xfa\xf9\xbb\xc4\xdf\x4b\xfc\x9f\x59\xe8\x63\xa4\x7e\x37\xcd\xdb\xd1\xbf\x68\x8e\x77\x3e\x3e\x6a\xc4\x0f\xd6\xfe\x01\xdd\xef\xba\xa1\x37\xc0\xb6\xcb\xdb\xbe\xab\xd3\xaf\x55\xdd\x2d\xbf\xfc\x76\xa0\x5f\xbb\x2c\x9f\x7e\xb7\x78\xf9\x35\xe0\x9c\x0f\xf1\x14\x2f\xfd\xf4\xa3\x1a\x7e\xb5\xb7\xfc\x1e\x02\x6f\x0c\xbe\xe6\xe0\xbf\xfd\xed\x1f\x2f\x1b\x7c\x6d\xe2\x24\x6f\xbe\xfc\xf2\xb7\x2f\x5f\xbe\x7c\xf9\xdd\x72\xf3\x6f\xff\xf5\xb7\x7f\xac\x5d\xfd\xeb\x5b\x1f\x44\xa8\xb7\x9f\x9f\xbf\x03\xbe\x07\x66\xbd\xc4\x4d\x9d\xbe\x41\xbe\xb7\x93\x5f\xd3\x61\xfd\xa7\xf8\x7f\xc0\x07\xbd\xfd\xbc\x01\xd6\xdd\xef\x40\x7f\x43\xf4\xbf\xfe\xf6\x8f\x29\x7e\x7e\x7d\x49\x30\xff\x88\xbc\x88\xdb\xba\x39\x7e\xfa\xf2\x1f\x5c\xbf\x4e\x75\x3e\x7d\xd1\xf2\xe7\x7f\xbc\x93\xf9\xb1\x6c\x7d\x19\xa6\xfc\xeb\xab\x72\xbd\x93\x9b\xfb\xe6\x63\xab\x57\x2f\x4d\xfe\x6b\x8a\xc2\xdb\xcf\xdb\xbd\x21\x2e\x5f\x03\x5f\x9c\xe5\xd3\x97\x0a\xfe\xb8\xf6\xec\xa7\xec\x0d\xd5\x4f\x5f\x92\x29\x8f\x1f\x5f\x5f\x07\x6f\xd7\xdf\xbe\xcf\xf8\x3a\xf5\xcf\x3f\x64\x30\x7d\x67\xf0\x3f\xfe\xfe\xe5\x3f\xda\xbe\xeb\xdf\xf8\xfa\x8f\x1f\xb5\xf9\x32\xdd\x97\xb7\x76\xf2\xed\xf0\x07\x3f\xfa\xf2\xe6\x5e\xef\xc7\xbf\x49\x26\x5f\x96\x7e\x78\x7f\xf1\xb1\x6e\xfa\xf2\x89\xe0\xc7\x2e\xe6\x0b\xfa\x9b\xd3\x77\x87\xfd\xdd\xf1\x2b\x61\xfe\xee\xf0\x23\xb1\xbe\x9f\xff\xd7\xdf\xfe\x31\xaf\xc9\xab\xf6\xc4\xf5\xab\x57\xf9\xd8\x16\x7e\xad\xbb\x61\x5d\x3e\x04\xff\x15\x65\xec\x1b\xb6\xf7\x84\xf7\x05\x83\xde\xd1\xfc\x67\xd3\x97\xfd\x37\x4d\x35\x7d\xbc\xbc\x85\xcb\xfb\xd5\x8f\x1c\xfb\x05\x79\xdb\x9f\xfe\x8e\x41\xe4\xf3\xf4\xf7\x89\xfa\x7b\x0e\xf9\xdd\x8d\x77\x0d\x7f\xb0\xfe\xbb\xb7\x9f\x09\xe2\xdb\xf6\xf9\xb7\x17\xde\x67\xf8\x57\x4a\xfb\x8f\x2c\x5e\xe2\x9f\xde\x9e\xc1\xa1\x2b\x7f\x4e\xe2\x39\x27\xb0\xbf\xd7\x1e\xab\x5b\x4f\x48\xb9\x96\x3d\xc3\x30\x8c\x66\xbb\xd5\xc5\x2d\x19\x86\x25\xdd\xd7\x63\xcd\x31\x12\xc3\x30\x1c\x8b\x98\x03\xca\x30\x8c\x62\xfb\x0d\x6f\xc2\xec\xe9\x3e\x32\x4e\xba\x34\x82\x79\x86\xbb\xee\x84\x88\xea\x48\xeb\xcd\x64\x98\x5c\x1b\x33\x17\xcd\xc2\x07\x98\xfa\xc6\x02\xa2\x91\x49\xfb\xe6\x72\x7f\xce\x51\x36\x97\x26\x53\x4b\xc4\x53\x3a\x58\x36\xea\x2b\xce\x66\xae\xd7\xcb\x85\x95\x99\x3d\xf4\x25\xae\x09\x5d\xc1\xba\xa6\xb6\x10\x5c\xca\xc5\x55\x6c\x19\x91\xb0\x91\xe9\xd7\x6b\xc6\xd6\x7d\x15\xd7\xb0\x97\xea\x3a\x56\xa0\x4a\x0e\x17\xc1\xb4\x91\x39\x4a\xc2\xe0\x4a\xe1\xf8\xd9\x81\x37\x1c\x3f\x9f\x00\x73\xb1\xa4\xf6\x91\xc7\x75\x5c\x32\xb8\xdb\xbb\x94\xbe\x94\xc5\x1d\xa2\xa5\xfd\xd6\xd2\x25\x23\x79\xb6\xce\x5c\x24\x96\x6f\x0f\x4a\xee\xd8\xd4\x65\x18\x83\xe1\xf3\xa0\x02\x32\x95\x01\x41\xe6\xba\x25\x4f\x86\x63\x44\xde\x53\x6b\xd3\xde\x01\x11\x2c\xc9\xc1\x94\x9b\x94\x91\xe4\x0b\x83\x71\x92\x8d\x3f\x67\xf6\x21\x48\xcc\xc1\xe1\xee\xc1\x9a\xcc\xc1\x96\xcc\x2c\xe8\x67\xc4\xf4\x4c\xc3\x30\x4c\x22\x36\x94\x59\x4b\xcc\x58\x42\x0c\x69\x58\xb2\x51\xba\x0c\xa2\xb4\x0e\xca\xa6\x0c\xa2\x48\xe9\x68\x96\xcc\xdd\xbc\x30\xad\x5d\x89\x32\x23\x33\xeb\x93\x2d\xbd\x50\xb0\x55\x46\x65\x9e\x25\x57\x4e\x17\xa6\x56\x4b\x87\x99\xca\x2b\xe3\x27\xe1\x61\x3d\x15\x66\x33\x5d\x26\xeb\xf1\xf0\xc1\xd0\x4c\xc8\x70\x4c\xeb\x4a\xf3\xc9\x30\xa5\xce\x48\x8c\xb9\x63\xce\x69\xde\x4a\x87\xd1\xcb\x9b\xa6\x80\x98\xd2\x14\xa9\xe4\x63\x36\x73\x61\x9e\xb7\xd5\x3b\xa3\x4c\x85\xee\x37\x77\x95\x03\xf3\xda\x15\x07\xa5\xf3\xb5\x94\xab\x5c\xa2\x0a\x5c\x68\xa5\xc3\xe5\x1a\xca\x38\xa7\xa4\x0d\x3e\xdf\x2f\xd0\xbd\x63\xa0\x53\xe7\x19\x86\xb9\x6e\x9d\xf5\x2c\x30\xcc\xc0\xf9\x0e\x5f\x9d\x0a\x6f\x93\x2e\xd1\x41\x68\x8f\x55\x20\x06\xd9\x4d\xba\x63\x15\x95\x81\xc0\x26\x3d\x39\x96\x61\x4c\x67\x06\x37\xa4\x01\x1b\x3c\x0b\xf9\xf4\x96\xb2\xb3\xe8\x83\x31\xd1\x24\x8c\x36\xf8\xcc\xf5\x5e\x00\x1e\x74\x22\xcf\xb6\xf4\xcb\xc9\xcc\xb7\x56\xcd\x9d\x3b\x65\xb2\x44\xde\x7b\xbc\x64\xb0\x50\xca\x95\xb3\x4a\xda\x5e\xdd\x27\xd0\x13\xc3\x66\x00\x3c\x8d\x2a\x01\x38\xb5\xd8\x12\xca\x64\x49\x00\x41\xa7\x27\x23\x5e\x63\x53\x6e\x97\xa7\xa2\xf3\x60\x20\x54\x49\x17\xaf\x6e\x4f\x76\xc7\xd3\xe2\x6e\x3e\x89\xe2\x87\x01\xec\x98\x07\x82\x21\x36\x40\xb9\x04\xcd\xde\x73\x16\xc2\x69\xf7\xa5\x16\x73\x39\x59\xdb\xd1\x16\x77\x07\xe1\x22\xa5\xad\x87\x55\xea\x50\x9c\xea\x2e\xe2\x09\xcd\x45\x1a\xca\x9b\xdc\x5c\x3d\x40\xc6\x99\x42\x71\x49\x97\xf9\x29\x92\x2b\x63\x2a\x2a\xdf\xdc\xed\xac\x64\x99\x62\xbb\x2d\x77\xb6\x57\x1f\x26\x63\x46\xa7\x79\xe4\x45\xca\x30\xb2\x0e\x02\x21\xcd\x28\xcf\x0b\x73\x46\xf9\x03\x7b\x32\xcc\xa6\x97\x61\xa0\xd4\xae\x00\x9d\x57\x8e\x61\x18\x4b\xe1\x58\x65\x62\x59\xf6\x79\xb2\x82\x72\x32\xfc\x70\xb2\x62\x72\x32\xc2\x53\xae\xd8\x87\xc5\xf0\x9c\xc3\x18\x2c\xcb\xb1\xab\xcd\x88\xcc\x58\x5d\xe5\x88\xbd\x7a\x0e\xa3\x2f\xe5\x65\x49\x18\xfe\x3a\x32\x56\xbb\x5e\xcc\xe5\x76\xd9\x75\x06\xed\x30\x56\x4e\xcb\x7c\x79\x0a\xcf\xa8\xbc\xca\x24\x77\x55\x8f\x2b\x67\x5a\xec\xc1\x94\x4e\x68\x5f\x88\x9a\x15\xea\xfb\x52\xd6\x67\x59\x06\x6d\x88\x5d\xa4\x56\x90\x34\x59\x61\x06\x8e\x1f\x6a\xd6\xb2\xa4\x39\x78\x39\x21\x37\xba\xf2\xf8\xf0\x60\x49\x02\xa5\x25\xad\x4c\xfc\xee\xed\x0c\x30\xf0\xc9\xc1\x31\x47\xbd\x1d\xca\x76\x49\xf7\x46\xba\x1e\xae\x69\xb8\x4c\xe7\x31\x6b\x5c\x16\xd7\x7e\xba\xf6\xa2\x61\x65\xbc\x69\x04\x21\xe7\xf7\xed\x66\x5d\x27\x33\x37\x42\x0e\xe9\xc7\xc2\xb2\x81\x0a\xce\x18\x96\xae\x7b\xfc\x5e\x6a\x97\x9d\x6f\xe3\x1b\xe2\x0b\xe9\x5d\xca\x84\x50\x6a\xc2\x6b\x9d\x88\x42\x6b\x34\x3e\xef\xf9\xf7\xd3\xbb\x8b\x5e\x89\x66\x95\x9b\x3c\x65\xbc\x22\x9f\xe5\x83\xe9\x57\xe6\x02\x3c\xa5\xa0\x37\xcd\x67\x7b\x70\x0f\x99\x15\xfd\xcb\x1e\x9a\x44\xd9\x90\x9c\x0e\x5d\xa6\x5e\xf2\x9e\x2d\x5f\xf1\x22\x27\x1f\x8a\x33\x5e\x00\x48\x0e\x7a\x1b\x48\x2d\x2c\xbc\x8f\xfb\x43\xa8\xae\x13\x97\xac\x92\x85\x55\x5d\xed\xe5\xf2\x13\x73\x02\x5c\x93\x64\x86\x94\x1f\xab\xa5\x73\xf5\xa5\x79\xf0\xb2\xcc\x41\xcd\x64\x6b\x8b\xa4\xc6\x96\x33\xf0\xa4\x7d\xb9\xb6\xa9\xc3\x25\xd3\xe3\x8a\xca\xe8\x6a\x4a\x73\xcf\xcd\x92\x9c\x5b\x0b\xce\x18\x8f\x0b\x60\x03\x54\xc7\xe3\x6a\xea\xea\xb3\x2f\x61\x8f\xa7\xdc\xd7\x96\x3d\xc8\xa6\x6d\xdf\xbd\xc6\x00\xdc\xcb\x10\x51\x87\x39\xb4\xe5\xd8\x88\xda\x18\x91\x2e\xa7\x8c\x92\x3e\xd0\xb1\xed\x23\x65\xd6\x30\x9b\x8b\xe5\xc3\xe5\xb4\xe0\xb6\x0e\xb4\x76\x8f\xf8\x83\x30\x97\x9e\x0e\xcc\xb6\x6d\xba\xb8\xc8\x7a\x15\x68\xa0\xb5\xec\xb2\x72\xc7\x85\x86\xe6\x02\xa8\xdd\x7a\x16\xe8\x3a\x22\x34\xa0\xec\xf9\xd4\xaa\x12\xe1\x77\xa8\xaa\x15\xb6\x99\xae\xc1\x1c\x0d\x7b\x8d\x5e\x6d\x20\x72\x94\x50\x80\xf0\x3c\x3a\x04\xf3\x09\x9f\x52\xd2\xfa\x79\xc8\xe3\xd5\xe6\x79\xe5\x48\xd7\x03\x25\x44\xa2\x72\x19\x5c\xa2\xf1\x62\xaf\x18\x82\x6e\x6c\x2c\x2f\x85\x5d\x10\x0e\xda\x91\x83\xed\xc9\x33\x81\x29\x68\x22\x7a\x78\x44\xde\x0c\xf3\xf7\xd1\xec\x5c\x61\xdc\xbc\xd1\x82\xbd\x95\x78\x20\xd3\x8c\xdb\x19\xac\x18\xee\x8d\xb6\x02\x42\x07\x02\x10\xe1\x80\xf6\xa6\xcd\x7d\x2c\x58\x23\x77\xb5\x15\xcd\x0d\xc9\xe2\x7a\x0d\xa1\x4a\x98\xc3\x23\x2a\x84\xd0\x5c\xa2\xc3\x2b\x8d\x49\xbd\xae\xa1\xe3\xcb\xc9\xbc\x75\xb3\x23\x86\x02\xb0\x2f\x88\x8a\x2e\x37\x17\xd2\xce\xb1\x8d\xe7\x68\x10\xda\x2e\x8f\xf6\xd3\xbc\xe8\xfb\x24\x96\x14\xdf\x45\xdd\x65\xd2\xa5\x14\xd3\xce\x6e\x5e\x98\x03\x02\xd5\x44\xf4\xe3\x11\x6e\xa7\x80\x58\x34\x7a\x20\x90\x89\x8c\xc3\x15\x3e\x48\x2b\x59\x08\x7a\xda\xb2\x80\xcb\x4c\x19\x9a\xbb\xc2\x25\x12\x67\x42\x33\xb3\x25\xb4\x69\xb0\xd6\xde\x27\x10\x34\x30\x88\x3b\xe9\x84\xc0\x82\x13\x66\x07\x1f\x9b\xab\x13\x19\x6a\x59\xa0\xb7\xd0\x8a\x01\x0b\xc0\xb4\xc1\x2b\x68\x89\x67\xfa\xd4\x98\x12\x79\xb0\xcd\x7c\x73\xaa\x3b\x75\x3b\xcc\x10\xa8\xa6\xf6\xda\x62\x50\xc1\x5d\xb0\xb8\xb4\x0e\xa4\x26\x1e\xa9\xd5\xf7\xb4\x20\xf7\x47\x8e\xc9\x2b\x21\x46\xc0\x0d\xce\xd9\x14\x88\xc6\x7d\x13\xaf\xca\xac\x69\x78\x83\x5e\x67\x2a\xf4\xe5\xe1\xb6\xac\x8f\x5d\x71\x04\x5c\xc5\x86\x33\xb2\xa6\xb6\x8d\x09\x28\x71\x9c\xec\x76\xde\x0e\xee\x4c\x27\xad\x9e\x8f\xfc\xd0\xf5\xcc\x4f\x63\x1c\xaf\xb6\xa5\xa5\x10\xf0\xd0\xaa\xd8\x13\xba\x9b\xa7\xd4\x67\xac\xee\x78\x12\x37\xdd\xa9\xb2\x51\xa4\x35\x5b\x3e\xb7\x42\x08\x78\x5d\xe3\xd3\x5e\x12\x4c\x97\xce\x0f\xf4\x79\x26\x10\xd4\xf7\x90\xdd\xbc\xc2\x79\xc2\xcb\x33\xd1\x52\x2b\x32\x2a\x48\x32\xa5\x16\xd2\x22\x68\x21\xe8\x92\x7b\x3f\x3a\x5b\x29\x3c\x27\x59\xfa\x9d\x34\xce\x99\x5b\x8f\x98\x64\x92\x45\x2f\xa2\x8a\xae\x03\x24\x29\x12\x43\x4c\x9f\xe9\xa5\x42\x7d\x1d\x35\xc9\x42\x94\xa1\x2c\xdd\x91\x44\xe4\x28\x82\x70\x2a\xed\xa6\x0f\xbb\xd2\xf1\x84\x97\x75\x57\x24\xaf\x52\x1c\x3d\x35\x2b\x6f\x6d\x9c\x57\x2f\x43\x86\x34\x7b\xe6\x24\x71\xae\x0c\x08\x51\xe4\xf3\xea\xe1\xd6\xb4\xae\x8b\x02\x43\xa0\x7f\x6f\xc9\x35\x41\x15\x34\x4a\x32\x27\x3b\x79\x54\x28\xe2\x8d\x74\x6c\x22\xb9\x52\x4d\xf6\x3c\xd1\xcd\x2c\x9c\x27\x0d\xdf\xeb\x34\x3d\xef\x5d\x9c\x27\x03\xa5\x9e\x0f\x3d\xdd\x82\x2e\xca\x7d\x8f\x24\x37\x0b\x37\x66\xfc\xd9\x76\x4b\xf8\x4c\xd2\x7b\xb2\x0b\x5b\xb9\xa5\x0b\x19\xf2\x3d\x60\xf8\x01\x71\xae\x0f\xde\x98\x43\x80\x19\x68\xa2\xc3\x72\x60\x98\xb0\x1a\x3d\x11\x40\x42\x49\xbf\x18\x1a\x80\x05\x8f\x1c\x38\x37\xe2\x00\x11\x03\x50\xc1\x2a\x2f\x24\x9f\x72\x31\x21\x64\x44\x96\xc9\x5d\x76\x0b\x2f\x71\x99\xdc\xf8\xf8\x79\x2b\x58\xf9\x71\x19\xcc\x14\x66\x43\x48\x58\x9f\xe1\xc6\xf3\x98\x67\x96\x4f\x99\xf7\x1e\x45\x3a\x68\xdb\x93\x2d\x8e\x2b\xd8\xdc\x6f\xf4\xde\xa9\xf2\x64\x09\x69\x2f\x75\x96\x17\x5f\xc1\xfe\x79\x72\xd1\x4d\x5c\x42\xeb\xde\x27\x06\xa7\x51\x66\xd1\x02\x2d\xe0\x9e\xc3\xa9\x86\x52\xad\xb3\x62\x22\xd5\x98\x88\x3c\xa2\xd6\x2a\xf0\xc2\x32\x07\xe5\xae\x8c\xea\x3a\x58\x8b\xa0\x41\x69\xe0\xa0\xaa\x16\xcf\xb8\xd0\x91\x11\x89\xe8\x54\x08\x0c\x77\xf5\x41\x22\xc8\x88\xa4\x5d\xd4\x4d\xd7\x61\x76\xdb\xfd\x4e\xca\x2b\x56\xaa\xf5\xb8\x4a\x02\x56\x81\x95\x40\x45\xa9\x9d\xb1\xb7\x07\x97\x23\x32\x7e\x73\xc2\x9d\x47\x64\x0c\xbd\x71\x38\x72\xc4\x37\xfc\x7c\xb4\x8d\xed\x67\x57\x7d\xc5\xb3\xa3\x5e\xad\xa2\x25\x52\x52\xed\xba\x94\xf2\x1d\x2f\x7a\x88\x4f\xac\x8a\x4a\xbb\x21\x25\x10\x0f\x6f\xa5\xd9\x30\xdb\xf5\x02\x36\x03\x82\x6b\xa2\x65\x1c\x95\xbc\x0a\x0e\xe4\x03\x7d\x8f\xd4\x6e\x72\x45\xd7\x4b\x4f\x3d\xd6\x38\xf3\x86\x00\xd1\x35\x88\x2c\x5c\x83\x88\xa5\xc8\xb0\xba\xe0\x7a\x7b\x36\x86\x5b\x2f\xf8\xc3\x6a\xed\xd5\x36\x4e\xc7\x8b\x2e\xf0\x42\xcc\x27\xdd\xec\x88\x0d\xe1\x8d\x7c\xb8\x9d\xa2\xd9\xbc\xdf\xfa\x0b\xb7\x05\x37\x35\xed\x17\x88\x6e\x95\x63\x75\xf8\xdc\xc2\xf1\x16\x55\xae\xb8\x2e\x1e\x5b\xf1\x90\xc0\xe6\xa4\xe8\xd9\x4b\x24\xef\x06\xc5\x3a\xe4\x8e\xb8\xc9\x57\xd1\x4d\x25\xca\x29\x17\xe6\xd0\x47\x6b\xcc\x56\x61\xc9\x5f\xcc\x13\x45\x12\x80\xd8\x1e\x77\x0e\x93\xfc\x42\x45\xcc\x59\xe0\xb0\x59\x35\xc2\x55\xac\x22\x2a\x67\xea\x0d\xf2\x49\xa1\xd6\x55\x77\xcf\x50\xf5\x2e\x64\xee\x34\x3c\xf2\x6b\xf7\x84\x3a\x54\x8d\x6f\xeb\xd1\x6c\xb0\x89\x35\x95\xed\x15\xf2\x98\x67\xe7\x71\x6f\x14\x1e\x1a\xfd\xc6\x3c\x56\xce\x3b\x6d\x6f\x8c\x1c\xf9\x40\x88\x95\x16\x46\x3c\xdc\xbd\xaa\xf1\x2d\x8f\x44\xc6\x00\xf6\x16\xbf\x38\xfa\xa0\xf1\x80\x39\x55\x8e\x7e\xda\xea\xa7\x35\xdd\x5b\xac\xc9\xf4\x80\xf6\x28\x3a\x68\x16\x64\x99\x72\x7b\x2a\x14\x32\x5f\x24\x82\x70\xa6\x64\x3c\x51\x3f\x01\x36\x7b\x82\x47\x33\x0b\xcc\x6a\x63\xb4\xc9\xda\xf6\x7b\xee\x26\x01\x02\x2b\x68\x67\x57\x79\x3b\xc1\xf7\x19\x81\x5e\xd9\x81\x39\xbd\x4a\xcb\xe1\xe4\x54\xe3\x19\x01\x95\x4a\x1b\x0d\x24\x1f\x60\x30\x48\x17\x1f\x8c\x91\x69\x21\xc9\x48\x5e\x26\x7f\xa3\x61\x34\x07\x6f\x7a\xb9\x43\x9e\x4e\x93\x5b\xa3\x1f\xd4\x62\x2f\x98\x87\xac\xb4\x15\xa0\x70\xb2\xa8\x6b\xb9\x20\xc6\xb5\x95\x92\x75\x02\xdb\x62\x41\x30\x0b\xcd\x8c\x3d\x22\x8c\x9c\xf0\x48\x16\x8c\x04\x12\x58\xc6\xec\x4a\xcf\xa8\x02\x80\x1b\x29\xc9\x89\x27\x21\x77\x0b\x97\xf4\x8e\x6f\x5a\x8c\xdd\x90\x61\x5b\xda\x2e\x87\x53\xa1\x1f\xf9\x59\x66\xf1\xf4\xd2\xa1\x44\x44\xe5\x80\x66\x12\x8a\xa8\x3f\xe9\x93\x3f\x33\x20\x2a\xc8\x02\xe6\xf4\x45\xca\xed\xcd\xd6\x27\x1a\x9e\x38\xe4\x9e\xc4\x0c\xdd\x27\xe8\x46\x48\xc7\xbd\xb3\xe0\x5c\x74\x73\x60\x01\x69\x80\x3c\xc5\x7b\x88\x06\xa2\x16\xd2\x41\x37\x72\x99\xc2\xa3\x3b\x90\xdb\xdc\x03\xd0\xbb\x48\xc9\xf3\xce\xc5\x40\x23\xb3\x23\x54\xec\x50\xc7\x38\xa2\x18\x86\xad\x20\x81\x3b\x93\x02\x72\x63\xcf\x20\xf2\x71\xa0\x01\x58\xc8\x14\x4d\x76\x77\x18\x7d\x6e\x22\x0f\x49\x5b\x56\x23\x87\x8f\xda\x04\x44\x4f\x77\x58\x2c\x3a\x9e\xc6\xdc\x7a\xf3\x11\x70\x8a\x8a\x42\x25\xbd\x94\x9a\x3a\x07\x72\xf2\x99\x84\x72\xd0\x29\xc0\x61\x7c\x6a\xde\x09\x83\xf7\x40\x6d\x90\x22\x4f\xd0\x6d\x5f\x9f\xc9\x82\x60\x2d\x6a\x1d\x98\x9c\x4c\x30\x20\x26\x08\x69\x65\x28\xb7\xe2\x37\x54\x80\x89\x3b\x5a\x6f\xa0\x4a\xc6\x60\xde\x8b\x40\x09\x8e\x05\xe9\x00\xb3\x1f\x30\x49\xce\x63\xe1\x8d\x2d\x4f\x4a\x8b\x78\x80\xd2\xb9\x30\xcf\xc1\xc1\x01\x21\xac\xa8\xa0\x47\x71\xb3\x68\xf7\x6e\xd5\xba\x76\xc7\xa1\x42\xdc\x67\xed\x3a\xa1\x47\x74\x23\x1c\x2c\xce\x8f\x61\x37\x8c\xa3\xc3\x7d\x10\xed\xa6\x94\xd8\x3a\x14\x44\x6b\x00\xda\x0c\xdf\x38\x43\x8a\xd9\xf0\x9a\x02\xc0\xa9\x6b\x07\x0a\x55\xb7\x91\x68\x37\x7d\x4e\xef\xfb\x0e\x50\x67\xf7\xa0\xa9\x38\xa1\xa6\x94\x4c\x8a\x1c\xc0\x48\x72\x2a\x76\x12\x2a\x5a\xfb\x4e\x99\x06\x6e\x52\xb4\x48\x84\x54\xde\x41\x17\x10\x35\xd6\x8c\x02\xb7\xe7\x46\x63\xce\x09\x40\xa0\x2c\x45\xd4\x6b\xce\xbb\x86\x8f\x45\xb4\x18\x1e\x64\x0c\x8a\x01\x09\x8e\x1e\xf0\xdb\xeb\x5c\x4e\x2f\x55\x94\x3e\x19\x46\xad\x6f\x0c\xc3\x61\x77\x26\xa8\x1b\x03\x0d\x5f\xef\xc4\xec\xd2\x68\x96\x59\xe4\x6c\x69\xda\xd2\xdd\xb8\x33\xeb\x6b\x52\x64\x4a\xe6\xd2\x5c\x4c\xcf\xc2\x90\xf5\xcc\x32\x84\x4d\x60\xb4\x43\x53\x5f\xb2\xa4\xcb\xc5\x96\xd5\x3b\x3a\xd4\x96\x9e\x84\x4d\xbc\xd4\x41\xd4\xac\xe6\x92\xa7\xfd\x7d\x8e\x90\x87\x9f\x22\x3a\x74\x44\x0b\x46\x5a\x1b\x84\x5a\x0f\x7d\x6e\x93\xa1\x6d\xae\x18\x09\x37\x3e\xa5\xc5\xf0\xc4\xb4\x15\x64\x29\x22\x27\x35\xe4\x1d\x62\x1f\x25\xcb\xde\xa5\x0d\xe5\x09\x0f\x10\x4d\x5a\x90\x7a\x81\xe4\x31\x7e\xc3\x9f\x00\xfa\xb0\xdb\x14\x3f\xc9\x8e\xba\xa9\x14\x56\xb0\x69\x93\x15\x13\x6f\xbe\xc9\xc4\x48\x5c\x89\x89\x0c\xc1\x3f\x5d\xa6\x60\x24\x73\x5e\x69\x28\xbf\x5a\x8d\x2c\x62\xdc\x34\x28\xa1\xf0\xed\x92\xeb\xc1\x0e\x40\xc5\x49\x02\x3f\x3a\xb9\x2d\x85\x52\xe0\x1d\x88\x36\xc8\x0a\xa4\xf1\x75\xd2\xa7\x16\x0e\x19\x0f\x02\x05\xb0\xbb\xbc\x81\x98\x1a\x17\xb6\x51\x87\xdf\x77\x90\xde\x75\xef\x50\x3d\xe4\xc8\x35\x84\x58\x1c\xd1\x82\x0a\xf3\x62\x6a\xfc\x44\xe4\x10\x9d\x1b\xac\x69\x23\xcb\x64\x3f\xa6\x43\x38\x41\x50\xb7\x7a\x2e\xe0\xd1\x23\x35\x5f\x58\x58\xc6\xd5\x1a\x10\xd3\xb9\xea\x09\x15\xdc\x73\x56\x20\x7f\x45\x8f\xc8\xdd\x82\xc9\x83\x1c\xc1\xf2\x04\xff\xc9\x5c\x86\x82\x04\xc0\xf2\x26\xf2\x30\xdc\xc7\xc9\x23\xa5\x32\x18\x25\x4f\xb8\x19\x6c\x1f\x62\x93\x8a\xc0\xf5\x77\x59\x4b\x85\xad\x17\x7a\x03\xc1\x30\x05\xb3\xa9\x49\x1b\x10\xe8\x9c\x31\x27\x3b\x59\x4a\xee\x6e\x1c\x33\x07\xdb\xf3\xcc\xf3\x26\x76\x67\xe7\x0a\xda\x49\xa1\x5b\xd5\xdd\x1f\x2b\x39\x2e\x05\x10\xc9\xca\x10\x4b\xf8\x53\x73\xef\xe2\x33\x7d\xc3\xc7\xca\x33\x8a\xd2\x03\x4e\x74\x8e\x07\x80\xe0\x23\x7d\x8c\x40\x36\xc3\x23\x3d\x48\x3c\x27\x9a\x6f\xa8\xd0\xb3\xa3\xc0\xd6\xa1\xe6\x65\x59\x37\x3f\x10\xa8\x36\x80\x58\xb2\x6c\x3f\x79\xba\x32\xa7\x76\x27\x81\x07\x4e\x01\xb2\xdb\xd9\x2c\x48\xa4\x0d\xbb\x33\xa6\xb1\x62\x11\xff\x9c\x55\xee\x79\xdc\x36\x45\x4e\xd0\x5d\x1b\xe9\xc0\x96\x73\x2e\x7e\x48\x14\xff\x6e\x17\xd5\xbc\x51\x14\x00\xd3\x16\x85\xd9\x06\x28\xd8\x0f\xbc\x94\xd8\xb3\xf2\x1d\x6a\x5e\x37\x7f\x4a\xe0\x1d\x7b\x86\x31\xeb\x3d\x27\xea\x9b\x5d\x58\x76\x8c\x91\x0d\x24\x73\x10\x4c\x25\x77\x83\xf4\x06\x3a\xf0\x70\xf5\x93\xa5\xcd\x02\x0b\xd2\x74\x88\x93\x94\x13\x00\x7b\x01\xe5\x1b\x68\xb8\x9a\x94\xca\xd1\x5a\xaf\x09\x20\x48\x0c\xd9\x3d\x6c\xe5\x5e\x40\xb9\x05\x90\x21\xf5\x0d\xe1\x85\x31\x8f\x79\x53\x1d\x17\xd7\xaf\xd5\x33\x05\xa6\x72\x4b\xd5\x26\xdd\x21\x20\x9d\x94\x71\xf5\x13\x19\x83\xb2\x11\x8e\xa1\xf3\x61\x97\xcc\x05\xa2\xb7\x84\xa6\x88\xea\xb9\x67\xb1\xbf\x91\x35\xbd\x24\x35\x56\xfa\x17\x8c\xe2\xe7\x59\xa4\x3e\x14\xcb\xa5\xce\x80\xac\xf9\xa6\x2f\x9d\x57\x9c\x8d\x37\x2e\xd9\x76\xad\x2b\x7f\x0b\xa6\xbe\x55\xef\xb1\xb7\x31\xac\x13\xc0\x59\x40\x9f\xa3\xc8\x4f\x56\xba\x05\x6b\x10\x14\xc0\xb8\x5b\xa8\x76\xcc\xeb\x23\xb9\x31\xdb\xdb\xdc\xc9\x30\x1c\x83\xe9\x16\xa4\x6f\x20\x8c\xa2\x14\x76\x3b\xb5\xb3\xf1\x80\x0d\xf2\xcb\x51\x99\xc7\x2d\x20\x0b\x3f\xe1\x13\xc7\x30\x39\x25\xbe\x82\x9b\x2e\x3a\xd8\xa1\xf8\xc8\x32\x6e\xa0\x02\x35\xb2\xed\x81\x04\xb6\x66\x83\xe8\x30\x64\xa7\x50\xdc\x1b\x7f\x4c\xa5\x63\xb9\x58\x09\xb9\xd7\x0c\xc4\x3a\x4e\xa0\x3a\x19\x16\x6e\x24\x39\x12\xcd\x48\x31\xbd\xf4\x31\x4d\xf4\x1a\xb8\x32\x65\xf0\xd0\x45\x92\x17\xb5\xa1\xa7\x5c\x0f\x0e\xac\x27\x8c\xd3\xdf\xc0\x9a\x6a\x3d\x95\xb0\xb0\x67\x0c\x38\xf7\x6f\x31\xb6\xf8\xce\x8e\x8d\x9b\xb1\x1f\x80\x23\xeb\xdd\x8c\x24\x13\x01\x74\x86\xc0\xed\x51\xba\x20\xf8\x12\xb7\xbd\x92\x98\x4e\xe9\x41\xa8\x26\x60\x58\xaa\x67\x53\x06\x9d\xba\xba\x11\x9b\x17\x57\x99\xdf\x68\x1a\x1f\x6a\x89\xb4\x3f\x3f\x6c\x6d\xcf\xb4\x61\x43\xb8\x1e\x34\xcf\xd4\x60\x63\x90\x90\x1e\x76\x9e\x26\xd7\xaa\x8f\xaf\xd8\xac\x70\x04\x40\x26\x2d\xec\x47\x75\xa2\xb1\x3a\x9b\x04\x23\x88\xa2\x46\x7b\x7d\x42\xb9\x7e\xae\x52\x4e\x47\x64\xcd\x16\x37\x8d\x2d\x0d\x8d\xbd\xc6\x5a\xc1\x3e\xdf\x4d\x7e\x7b\xfa\x2a\x3c\x62\x39\x05\x26\x33\xb8\x92\x2f\x38\xf3\xb1\xa9\x55\x14\xfb\xa8\x7c\xa4\x04\x1d\x2f\xaa\x7d\x97\x06\x9c\x62\xf0\x07\x61\x0f\xbc\xd6\x11\x18\x96\x17\x41\xf6\xc8\x44\x47\x5f\xc8\x4a\xa0\x59\x3d\xe4\x40\x79\xff\xf0\xa1\xa7\x0f\x57\xd6\xd6\x15\x45\xd1\x83\xe2\x0d\x4f\x83\x38\x07\xfb\x58\x07\x7b\xfb\x42\x1b\x2b\x42\x66\xeb\x2a\x09\x14\xf0\x18\x4b\xad\xd9\x57\x1a\xf5\xb4\x24\xd8\xb6\x6e\xbb\x85\x53\xa0\x83\x99\x77\x93\x1b\x0d\x28\x2a\x3c\x63\xf3\x98\xfb\xce\xe7\x71\xb3\x5a\xcd\x0a\x7d\xc1\x49\x41\x23\x0d\x88\x11\x5d\x00\x30\xc2\x71\xdc\xb0\x1f\x43\xbc\x22\x11\x6c\x05\x6d\xe4\x8f\x48\xbb\x99\xdc\xfa\x98\x8d\x4a\x17\x7d\x82\x98\x1d\xd9\x85\x35\xc7\x25\x71\xe2\x74\x5d\x55\x81\xcd\x51\x48\x20\x01\x7f\xa8\x0d\x1d\x4c\x03\x53\xbe\x87\x6e\xcd\xbb\x70\x80\xde\x5b\x24\xcf\x28\x00\x1f\x77\x79\x31\x46\x38\x5e\x7d\xb5\x69\x67\x08\xe8\xfa\x9b\xc4\x4c\xad\x7c\xa4\x86\x48\xd6\xb8\xde\x8e\xc4\xd0\x46\x57\x0b\x00\xa6\xcb\xd8\x00\xeb\x18\x80\xf4\x30\x6b\x8b\x56\x85\x1c\x19\x1a\x4f\xec\x0d\x29\x27\xaa\x67\x5d\x8e\x9a\x35\x1a\x5a\xb4\x19\x55\x0c\x8f\x33\x3c\x20\x64\x36\xd1\x27\x61\x70\x0a\x9e\xce\xa3\x47\x10\xd2\x5d\x3a\x1e\xe7\x93\x01\xc2\xab\xf5\xc0\x6f\x35\xb9\x03\xd3\x74\x92\x5d\x96\x0c\x00\x98\x0b\x51\x2b\xaf\x17\xec\x26\x16\xf3\x67\xae\x52\x29\x14\xed\xce\x29\x13\x7c\xb8\x83\x31\xca\xe0\x77\xea\xc8\x65\x61\x84\xf3\xad\xbe\xfb\x51\xcc\xfb\xcc\x2b\xd9\x90\x03\x91\xf7\x50\x6e\x68\x1e\x80\x49\x97\xd6\xee\xb0\x18\xa3\xa3\x70\x51\xe0\x74\x1e\xe8\x80\x11\x55\x01\x14\xdf\x83\xc0\x04\x6f\x83\x62\x83\x48\x70\x3f\xd7\xec\xac\x6e\x9d\x83\x23\xeb\x58\x63\x8b\xea\xb5\x67\xdc\x78\x63\x5c\xf6\x1a\x0b\xc9\x3e\x85\xf9\x7e\x48\xe5\xfa\x82\x66\x10\x20\xba\xee\xe2\x0d\x99\xdf\xc8\xb7\x05\xbb\xe2\xd2\x4e\x7d\xa0\xb3\xc1\x4e\x3e\x67\x42\xbf\xf7\x47\xda\xc8\xe0\x13\xad\xa8\xe0\x3e\x12\xeb\x34\x11\xeb\x74\x52\xa0\xea\x4f\x02\xee\xf9\x42\xc5\x20\x98\x68\x62\x41\x4c\x9e\x74\xe2\x89\xe7\x82\x14\x62\xf5\xb0\x64\x9c\xf4\x1b\xcb\x15\xae\x90\x88\xf7\x38\xf5\xc9\xa3\x60\x8e\x9e\xdf\xc8\x54\xd8\x3a\x03\x62\x86\x04\x2c\x27\x08\xf1\x8a\x6f\xfb\x31\xf0\xfe\xe4\xc1\xbe\xdb\x20\x0f\xe8\xba\xf5\x06\xcb\x5f\x49\x34\x20\x0f\x58\x6f\x9c\x81\x58\x28\x00\xe7\x78\xe5\x98\x27\x35\x0e\xce\xf2\x0a\x02\x1c\x26\x9e\xdf\x62\xab\x8f\xb9\x69\x05\xb7\x00\x04\x83\xe2\x56\x52\xb1\x60\xf9\x70\x3a\x07\x87\x90\xae\x81\xe5\x5a\x97\x1b\xe3\x31\x0f\xf6\x11\x89\x0e\x0c\x80\x37\x3b\x9a\x51\x77\x6d\xa0\x4d\x4f\x1d\x37\x3e\x33\x8a\xa2\x4c\x74\x55\x7a\xd6\xf8\x9e\xf3\x18\x7b\xbe\x4b\x67\x08\x41\x6d\x4d\xbb\x23\x4e\xb0\x8e\x0b\x1b\x24\xbf\x3f\xa9\x74\x73\x32\xcb\x9c\x27\xa5\xc9\x57\xfb\x52\x49\x21\xa7\x30\x92\xc6\xfb\x70\x47\x52\xfb\xad\xf5\xe1\x26\x01\x8d\x80\x3c\xb9\x4d\x3d\xd2\xb2\x13\x19\x8f\xaa\xbe\xd9\xbd\x92\x46\xfb\xb1\xca\xf7\x1d\x02\x25\x83\x20\x1d\xc2\xd9\xe1\x75\x1d\x20\x2f\xf0\x2a\x33\xbd\x8e\xc7\xe0\x0a\x6c\xd1\xdd\x58\x8b\x93\xd0\x2b\x8e\x06\x05\x82\x63\x47\xa6\xf3\x25\xa6\x2b\x4d\x75\x8d\xdd\x87\x9c\x3e\x45\x8c\xdb\x1e\x1f\x0e\xca\xba\xa7\x4c\x81\x0e\x2e\x50\x20\xaf\x6c\x30\xbd\xbd\x95\x6f\x64\x18\xc3\x87\xca\x1e\x78\x38\x09\xf5\x9e\x8c\x19\xdf\xb7\xac\x59\x4a\x17\x5c\x77\x06\x88\xc8\xa6\x71\x5c\xe7\x6c\x5b\x8e\x26\xae\xbd\x71\xeb\xce\x63\xa9\xad\x92\x2d\x24\xeb\xc3\x54\x92\xad\x90\xca\x31\xd7\x52\x2d\x11\x79\x33\x44\xe9\x1c\x8a\x55\x83\x53\x04\x92\x01\x4f\x2a\x0d\xd4\xe1\x41\x85\xbd\x77\x20\x9a\x26\x8f\xe1\xf5\x22\x44\x86\x03\x80\xfb\xe3\x78\xcb\x67\xb4\xd8\x53\x12\xed\xdb\x8f\x61\x5f\x35\x6b\x0d\x05\x8a\x0d\x3e\x7b\x88\x51\xe0\x26\xd8\xf5\x84\x2a\xf0\x60\xda\x96\xdc\xdb\x09\xd1\x45\xe1\x64\x2f\x9f\xec\xf0\x12\x9c\x90\xbb\x40\xad\x01\x0e\x04\xcd\xed\xc2\x31\xfa\x42\x2f\x34\xd9\x9d\x3c\x91\x69\x1e\xea\x40\xb4\x59\xb9\x48\x01\x62\x54\x54\x16\x4f\xa9\xf8\xcc\x59\x52\xeb\xd0\x5b\x57\xf3\x38\x70\xf5\x6d\x99\x77\x11\xcd\x49\x11\xcd\x2e\x2e\xfb\x41\x27\x4b\xe7\xec\x7d\xef\x52\x59\x49\x9b\xf0\x72\xc7\x72\xc1\xf3\x32\x17\x5d\xae\xa9\x6f\xb8\x5b\x8b\xb5\x7b\x75\xb2\x46\x69\x02\xce\x7b\x32\x91\x24\xd3\x15\xac\xa8\xd7\xd8\x81\x3d\x1f\x0d\xb6\xe1\xa2\x05\x65\x30\x89\x2e\x38\x88\x5d\xd9\xc8\xcf\x5b\x25\x4a\x37\x1b\xa2\xf5\x50\xbd\xb0\x7a\x30\x02\x28\xe0\xca\xaa\xe7\x05\xf0\xee\xae\x81\x7c\x79\xd8\x33\x54\x9d\x81\xb0\xdb\x6e\xad\x38\x6c\xf1\x23\x6e\xa7\xa7\x8b\x31\xed\xf4\xfd\x55\x87\x2e\xea\x0d\x6c\xd2\x25\xf0\xbb\xb1\x5d\x8c\x48\xcf\xbb\x29\xa2\x90\x44\x3d\xe6\x46\xc9\x31\xb3\xc3\x5a\x0a\x6e\x47\x82\x18\xa6\x78\x5c\x67\xaf\x9c\xcb\xa1\x50\x30\x7b\x64\x4e\xa2\xa5\x82\x9a\xce\xde\x43\x8b\xbb\xdc\x5f\x31\x0f\xa0\xe9\xe2\xcc\xa5\x7d\x77\x43\x5e\xf7\x9a\xc1\x9d\x7d\x15\x09\x49\x3d\x67\xcf\xf7\xba\xb0\x4c\x8e\x0b\x2f\x36\xb2\x4e\xa5\x4a\x3b\xd4\x70\xc9\xc1\xdc\xbc\x03\xc1\x83\x1a\x9f\xfc\x53\x2e\x62\x83\x79\xbe\x17\x7d\x1b\x12\x4b\x83\xe9\x1c\x2c\xe0\xd2\x69\xec\x81\xc0\xf3\xb3\x76\x18\x8f\xc8\x10\xeb\xca\x0a\x1a\xb6\xd7\xd9\xab\xe9\xb4\xb1\xb8\x53\x9b\x92\x20\x8e\x8e\x44\xf3\x7e\x73\x2e\x4f\xc1\x5d\x1e\x44\x77\x50\xa4\x12\x6e\x3d\x83\x1e\xd2\x47\x09\xe0\x2e\xcd\xec\x5c\x76\xfd\xde\x13\xb7\x4e\x3e\x4c\x6e\x5c\x13\x1a\xc4\x14\x24\x5b\x69\x32\x5f\x09\x00\xcb\x6f\xa2\xbd\x87\xcb\x99\xa2\x7a\x68\x01\x6c\x9c\x3b\x47\x5e\xcd\x71\xdb\x0e\x53\xbc\x6c\x09\x62\xf2\xbb\x29\x39\xf3\x64\x13\x73\x73\x94\x1a\x75\xbf\x52\xe3\xbb\xfd\x79\x49\x49\xef\x86\x8e\xea\x88\xba\xf2\xca\xab\x9f\xcc\x82\x57\x4b\x12\xc0\x54\x21\xee\xf5\x1e\xf9\x9e\x6c\x07\x7b\x1a\x08\xb7\x68\x3b\x28\x74\xa4\xbb\x0d\x6c\xe8\x60\x7c\xfd\x13\x1f\xd3\x09\xda\xa0\x0b\x10\xf3\x06\x7a\xca\x0c\x5f\x67\xb4\x29\x88\xc1\x55\xaa\xbe\xdf\x00\x72\xed\x26\x0d\xce\x8a\xa0\x3b\x1f\xb4\xe1\x3c\x8b\xe7\x3d\x9e\xcb\x8e\x78\x5c\x0a\x89\xfc\xcc\x41\x52\x9e\xcd\x41\x81\xaa\x13\xf9\x8c\x29\x1e\xb4\x47\xd8\x8a\xda\xe8\xe1\xbd\xf5\x25\xb6\x6b\x5d\x2c\xe1\x46\xd0\xc1\x99\x4f\x2f\x2f\x36\x10\xc2\xef\xc4\xe5\x66\x54\xab\xf1\x34\x2a\xca\xa8\x5a\xb9\x8d\x52\x67\xcc\x6e\x10\xd1\x41\x40\x03\x9d\xf7\xaa\xeb\x78\x6d\x6d\xe8\x6d\x48\xa7\x26\x55\xa7\x22\x21\x83\x98\x0c\x6c\x32\xc0\xdd\x9d\x82\x1f\x5a\xe1\xcc\x2d\xec\x8f\xb0\x3f\xd2\x5e\x36\x2c\xc1\x98\xa9\xd3\x93\x32\xd8\x27\x18\x81\x40\xff\x9a\x2b\x0a\x6a\xc2\x1e\x20\xe8\xed\x54\x74\x4d\xbb\x41\x5f\xb2\x31\xb9\x9e\x73\x22\xa0\xf4\xab\x9f\x99\x93\xeb\x2e\xa9\x75\xaa\xa2\xe7\x3a\x13\xed\xa0\xb8\xcf\x09\x13\xb6\xcf\x3a\x55\xc9\x07\x07\xf2\x87\xa7\xac\x2c\x7a\x17\x5b\x1a\x9d\x89\x75\x7a\x7d\x5e\x35\xb0\x4a\x3d\xa1\x89\xdc\xe6\x06\x14\x1e\xe5\xec\x1c\x22\x54\xa1\x4d\x04\x31\x22\x1f\x69\x3b\x1c\xf1\x3c\xb4\x2d\x3e\xc6\xed\xf0\xfa\x1c\xf1\xfc\x80\x35\x24\x47\x1d\x6d\xda\xb2\x64\x4a\x49\x91\xc1\xe5\x47\x65\x0f\x4e\x24\x3d\xec\x48\x4e\xd5\x25\x9d\xc1\x11\x8f\xa7\x90\xa0\x54\x4e\xaa\x6f\x77\xa9\x7e\xb9\x8a\x52\x4f\x4a\xcc\x9b\xf1\x3d\x4d\xfa\x51\xb9\xf7\xad\x5e\x87\xc4\xb5\xd6\xac\xf9\xc6\xb5\x32\x91\x1b\x5d\x0e\x60\xf9\x19\xad\x23\xb9\x92\x23\x87\x2b\x8a\x61\xd9\x7c\xa9\x03\x8c\xb1\xb5\xe5\x7b\xcf\xf9\x60\x6f\x9e\x10\xf9\x6e\xf3\x4a\xa4\xac\x99\x36\x80\xb1\xe7\xf2\x18\xb7\xf8\x1a\xb7\x4f\xe8\x6a\xa2\xe2\x5e\x4e\x8a\xfd\xd8\x4e\xca\x50\xbd\x58\xcc\x0e\x87\x10\x63\x52\xf5\xe2\x29\xca\xf6\x7a\xf6\xc6\xc5\x1b\x37\x21\x1c\xda\xb4\x93\xc7\xb0\x93\x8f\xb0\x15\x19\x4c\xbc\x08\xf8\x11\x49\x8f\x20\x24\xc9\x09\x64\xe0\x47\x74\xb5\x6e\xce\x65\x1f\x86\x3a\x6c\xe5\x7b\x8f\x68\x55\xf8\xa0\x1e\x6e\x7c\xaa\xc2\x3e\xfb\x71\xd8\x2a\x1c\xd8\xd6\x41\x1b\xb5\xd1\x3e\x26\x2d\x05\xa4\x89\x3a\xf9\xf0\x9a\xa1\xab\xa4\xae\xc4\xa6\xee\x69\xb2\x20\x8c\x81\x73\x9f\x71\xe8\x06\x78\x1e\x2c\xfc\x03\x03\xe8\x5d\x67\x0f\x78\xf1\xb3\x31\xa6\xe8\x8b\xf5\x08\x57\x9f\xe7\xc0\x45\x31\x31\xf5\x4c\x51\x8e\x0f\x5d\x07\x82\xb5\x7b\x78\xe8\x2f\xca\x77\x0c\x30\xee\x18\x62\xdc\x31\x40\x7b\xfd\x46\x90\x27\x72\xf6\x27\x7d\x53\x5f\xfa\x2f\x12\xf6\x19\x5e\x8d\x8d\xef\xe2\xc9\x25\xe4\x3a\xb9\x8a\x6c\x1a\x44\x18\x76\xee\x54\x20\xec\xa9\x1f\x9d\x81\xb8\x47\xdd\xcd\xec\x6e\xcf\xd3\x70\xd0\x0c\x15\xcf\xaa\xb8\x68\x77\xf7\x96\xd4\xe4\xd1\x49\x07\xa2\xe1\x0c\x48\x32\x40\xfe\x31\x3b\x09\x6f\x49\x17\x22\xc5\xdb\xc6\x1a\x70\x73\x2d\x8f\x6b\x3f\xaa\x1c\xb6\x75\x27\xf7\x9c\x55\x5a\x1c\x69\x80\xf1\x49\x34\x27\x2b\x2c\xba\x56\x58\xfe\xca\xa9\xd7\xb7\xcf\x23\x7b\xfb\x3b\x6a\xad\xc6\x9f\xe4\x37\xd9\xda\xfd\x56\x88\x32\x65\x76\xfb\x2d\xed\x64\xce\x7c\xc8\x76\x70\x6d\x29\x88\xa5\xd2\x59\xf4\xc6\x15\x1e\x8b\x86\x0b\x11\xcd\x89\xe0\x3c\xd8\x81\x9b\x0c\x0c\x1d\x41\x0c\x05\xa8\xdf\x31\xa0\xab\x5c\x15\x60\x4f\xf2\xb3\x3f\xb0\xcb\x4e\x3f\x67\xe9\x5e\xf3\x60\xbd\x3f\x75\x0b\x45\xe9\xb5\x98\x88\x74\x1f\x35\xb9\x4e\xc9\x9c\x26\xa8\xbe\x6b\x84\x2a\x57\x1e\x09\x4b\x18\x11\x52\xf5\xb9\xc8\xf6\xb1\xc8\x3e\x29\x51\x9a\x93\x49\x08\x1b\xbe\x8f\x0c\xde\x89\xc8\x22\x5c\x57\x10\xa5\x71\xd4\x9a\x34\x79\xc2\x00\xa3\x7e\xbc\xd2\xde\x2e\x39\x0f\x5c\xb7\xdd\x46\xb7\xb0\x19\x6e\xcd\x19\x6e\xa9\xa0\xa5\x0d\x07\x00\x84\x66\x48\x5a\xee\x80\x0a\xb4\xbe\x15\xbb\xa2\xd4\x7d\x79\x17\x19\x87\xae\xc0\xf3\xa3\x26\x5c\x1c\x9a\x56\x9a\x82\x67\x28\xa3\x02\x38\xe1\x65\x03\xc1\x4a\x1e\x38\x50\x74\x38\x50\x34\xd7\xf2\x79\xd5\xc3\x1c\x41\xfc\x93\x06\x4d\x8d\x06\x0b\x94\x66\x9e\x3a\x5f\x42\xaf\x08\xd1\xab\xd9\xb8\xa1\xaf\xd9\x71\x5d\xed\xc7\xa0\xdb\x2e\x2e\x3b\x0f\x5c\x75\xde\xda\xe6\xc6\xe0\x9f\x29\xff\x50\x67\xa1\x9c\x85\x03\x80\x83\x64\xc7\x08\x4f\x4e\x10\xd0\x81\x32\xa6\xd2\xcd\x57\x3f\x56\xa0\x28\x9a\x2e\x36\x6b\x1c\xec\x67\xbf\xae\xc8\x39\x0a\x0a\xd7\xcc\x85\x17\x45\xcc\x98\xc9\x7b\xb9\x7d\xd1\xe1\xad\xd5\x45\x48\xd1\x45\xd7\x34\xba\xbb\x71\xc1\x9f\x17\x58\xcb\x02\x07\xa1\x8b\x04\x41\x67\x5c\x18\x80\xb0\xbd\xf2\x15\x9b\x5e\x8f\x5b\xb4\x69\xb1\xdf\x0e\x71\xdc\x0e\x84\xdf\x0c\xaf\xbf\xc1\x7b\x4d\x44\xc8\x3e\x4f\x8a\x7e\x8e\x84\x7e\xce\x24\x42\x87\xe8\x4e\x19\x37\x8b\xdc\xda\xe0\x1c\x1a\xdf\x53\xe0\x86\x2c\x79\xc0\xbc\x3f\xdf\x7d\xcb\x6c\x2c\x8d\x8c\xba\x20\x58\x68\x56\xa4\x31\x47\xd8\xf3\x57\x0c\x59\x8f\xe8\xd5\x9d\x8c\xf1\x63\x90\xa8\x95\x40\x1b\x75\xc8\xa7\x85\xcc\xb7\x81\xcc\xd1\x81\xc8\xd1\x81\x2c\x82\x70\x12\x4d\x44\xe7\x5a\x77\xec\x6a\x0c\xb8\x9d\xb7\x45\x3a\xa1\xe5\x76\x42\x8b\xe1\x40\x8d\xdc\x65\xe8\x81\xef\xfe\xb4\x90\x3b\x15\x5e\xd9\x1a\xdb\x44\x79\xd6\xaa\x95\x2e\x52\xba\x33\x40\x63\xb8\x4b\x3b\x04\x94\x3c\xc6\x00\xf9\x67\x4c\x7a\x42\xd4\xb2\x0f\x18\xdb\x40\x71\x7f\xd5\x17\xea\xe5\x4b\x78\x01\x0e\x54\xf3\xb0\x1f\xc3\xed\x2e\x51\xe8\xab\x6e\xbf\x62\x5f\xb4\x5c\x4f\xb4\xb4\xe5\x2e\x9d\x2e\x11\x38\x5b\xe7\x26\x0b\x1d\x4c\x4b\xb6\x6d\x0f\x5c\x77\x4a\x48\x64\xd1\x0c\xcd\x3a\x0f\x2c\x4c\x88\x65\xc5\xa1\x37\xdd\x89\x4e\xc9\xdb\x5d\x3a\x6e\x77\xf9\xa0\xdb\x3d\xf3\x85\x3a\x74\xc6\x4c\x75\xa0\x4d\x3d\x6f\xa7\x74\xde\xf6\x9b\xfd\x18\x6f\xb7\x6a\x2a\x40\xe8\xaa\xca\x97\xc7\x79\x3b\x6f\xc8\x8a\xc2\xe3\x2b\xf7\xa6\x9d\x9a\x86\xd5\x23\xfa\x88\xb0\x58\x2c\x49\xad\x84\xf5\xb5\x6a\xe2\x57\xfd\xd7\x7a\x13\x60\x36\x92\x14\x62\xe6\xa3\x76\xf1\x66\x23\xd7\x61\xcc\x33\xb1\xf8\x84\x2e\x93\x31\xbc\x32\x9f\x17\x4f\x31\x14\x18\x80\x5c\x04\x21\x29\xc9\x6e\x77\xe4\xfc\x49\x85\xf7\x29\x3f\x01\xef\xc8\x02\x0b\xc9\x3a\x0b\xd9\xb6\xb4\x23\x36\xaa\xb1\x23\x35\x21\xe3\x48\x80\x8f\x7e\x51\x3d\xff\x0e\x65\x8c\xf8\xbc\x80\xee\xf2\xde\xa0\x30\x17\x55\xb5\x21\x5a\xee\x9f\xf5\x81\x05\xdc\x5b\x8d\x43\xd6\x67\x27\x52\x56\x96\xdd\x67\x77\x53\xf7\x9a\x78\xfa\x90\x72\x61\x50\x5c\x7f\x07\x29\x33\xc5\x6f\x86\x4c\xba\x30\x9c\x74\x17\x9f\x05\x55\x77\xdf\x06\x28\x8e\xe0\x64\x8c\xe1\x38\x42\x43\x76\xa7\x74\x3e\xe7\x76\xde\x6c\xd2\x09\x48\x4d\x86\x7b\xd5\xd0\x3a\x7c\xc8\x34\x23\x3e\xc5\xcf\xd9\x96\xef\xeb\x7b\x9e\x3c\x29\x83\x27\xcc\x1b\x73\xb3\xdd\x4e\x8e\x1b\x57\xd9\x7a\x06\x60\xc0\x4f\xba\x03\xe7\x76\x32\x7d\xb9\x30\x92\x9a\xf7\x82\x62\xc7\xa6\x5a\x7a\xdf\x77\x03\x66\x33\xec\x37\x90\xb1\x18\x97\xeb\xd2\x9e\x61\xc0\xcf\xb9\x49\xb2\x21\x9c\x73\x90\xfb\xf3\x66\x7a\x82\x6c\x4b\x0f\x88\x01\xa5\xe1\x73\xfe\x81\x2a\xc9\xc9\x8d\x30\x63\xae\xb8\x69\x4a\x65\xfd\x59\x67\x55\xfb\x62\xe9\x52\xc7\x70\xaa\x4d\x80\xee\x7e\x73\x44\x88\x29\x98\xef\x7b\xa2\x4b\xad\x6a\x51\x03\xe9\x15\x4c\x33\x1c\x17\x57\x92\x85\xb5\x25\x8f\x5d\xc1\x6f\x73\x00\xef\xbe\x7a\x1b\xc8\x34\x99\xfe\x2e\x9c\xf1\x53\xa5\xee\xe8\x37\x5d\xf1\x23\xa7\xbc\x54\x58\x1b\x64\xc9\x44\x9f\xfb\xaa\x8b\x92\x21\xee\x53\x62\x5d\x9e\xf1\x71\x3d\x92\x4b\x1e\xb0\xee\xd8\x87\x0a\x2a\x58\x1e\x74\x59\x2e\x19\xd3\xb1\x0f\x54\x9e\x79\xf0\xb8\xa5\xec\xdb\x2b\x45\xd4\x05\x56\xe3\x2f\x4f\x4e\x92\xfb\x28\xb9\x00\x2c\xf8\xb9\x3f\x13\x98\x20\x3c\x94\x0b\x53\x33\xe9\x85\x4c\xdb\xa7\x01\x4a\xdf\x73\x30\x46\xe8\x43\xdf\x32\x2c\xa7\x38\x63\xe1\x10\x60\xf0\x92\x11\xfd\x94\x51\x29\x4f\xeb\x91\xd8\x2c\xc3\xd6\x68\x94\xdf\xac\xa1\x67\x0e\x89\xfd\xd4\x1d\x31\xab\xfc\x13\x75\x09\x31\x80\x18\x89\x21\xee\x8f\x14\x5c\x18\xbe\xfd\xcc\x11\x17\x36\x8c\x2e\xee\xc5\x61\x98\x0a\x72\x01\x15\xf5\x4b\xf6\xbb\xac\x2c\x7f\xd5\x13\x8a\xd9\xd5\x7e\xa8\x02\xcd\x61\x56\x8b\x31\xca\x77\xd5\x31\xc6\x03\x63\x6e\xc2\x6a\x97\xd1\x6a\xf7\x4c\xfa\x29\x27\x53\xaa\x30\x56\x38\x2c\xc3\xd7\x90\x94\x9d\xee\x53\x7d\x98\xfc\x87\x7e\x2e\x5c\x68\x8d\x7a\xc5\x99\x1c\xa6\x98\xd7\xaa\xfa\xe6\x97\x12\xf3\xbc\x47\x7d\xcf\xf5\xac\xa4\x1d\xc1\x91\xba\x2f\xd5\xbe\xf3\xc7\x48\xd6\xa0\x19\x07\xc3\x5f\x98\x32\xbe\xbb\xa8\xec\x72\x3e\x79\x7a\xf1\x47\xff\xce\xc8\x62\x33\x30\x25\xbb\xe7\x2a\x9a\x36\x0c\x83\x7f\xe7\x9d\x69\x6d\x4b\xd6\x18\x8e\xd1\xef\x54\xc6\x5f\x9e\xcc\x8f\x72\xb5\x8f\xb0\xae\x19\xa6\xbe\xf0\x00\xc4\xe4\xd1\xb7\xbe\x81\x31\xf1\x61\xdf\xa1\xd2\x64\xb0\xa1\x8e\xda\xf8\x22\x97\xcc\xf9\x69\x0b\x4e\x34\x6e\xfa\xb5\xe4\x18\x59\x58\xed\xd5\xb9\x97\xec\x0f\xef\xe4\x35\x97\x2b\x4a\x94\x58\x46\xd4\x0e\x8f\x11\xd6\x6f\x7e\xcf\x9a\x38\xe8\x11\x89\x13\x26\x85\xc9\x94\xf7\xbe\x93\xed\xe8\x37\x36\x7a\xb5\x7a\xf7\x67\xde\xcd\x94\x6c\x72\x8c\x73\xd9\x6f\x10\x13\x31\xec\xe7\xec\xa3\x59\x91\x2a\xc4\xaf\xb4\xd3\x43\xb4\xe1\x18\x8a\xe4\x3c\x18\xf1\x33\xee\x60\x06\x32\x78\xfa\x76\x61\x74\x35\x6b\x1f\x51\xcf\xac\xec\xf6\x83\x6f\xa1\x16\x8f\xe9\xa5\x30\x3b\x17\x59\xfd\xee\x57\x31\xd1\x2d\x78\xda\x32\x2c\xab\xa9\xbf\xf1\x39\xc9\x45\xec\x41\x23\x73\x63\xeb\x19\x93\xb7\xb3\x20\x83\x7e\xed\x73\x97\xfb\x44\xae\xf8\xbe\x7a\x27\xc4\x84\x6c\x5f\xf5\xf1\x95\x14\x43\xb2\x30\xf9\x1f\x72\x0d\x6f\xb7\x35\x9d\xf6\xcc\x83\x61\x39\x4b\x76\x1f\x8b\xaa\xa0\xbf\x8d\xeb\x31\x69\xdb\x07\xc7\xf0\x17\x58\xe3\x13\x8b\xbd\x71\xdb\xf7\x78\xb9\x4e\x5b\x02\xdf\x33\xd1\x76\x8d\x92\x31\x5d\x68\x99\x08\xc0\x79\xc8\xe2\x0b\x47\xf6\x89\x63\x7d\x10\xb9\xfe\x70\x1d\x86\xb9\xab\x18\x78\x4d\xa7\x71\x6c\x79\x8c\x07\xbf\xc7\xab\xec\x16\x20\xc5\x54\xc4\xcc\x5c\xb4\x43\x7e\x10\x79\x3b\x10\xc5\xc0\x00\x4c\xf7\xa9\x23\x96\x83\x23\x80\xce\x27\x3a\x53\xc5\x27\x27\x9d\xba\xf0\xdc\x44\xa3\xbf\xbd\xd1\xb2\x3e\x69\xd5\xe3\x30\xc5\x35\x9d\xc5\xe3\xcd\x60\x6e\x16\x4a\x2d\xce\x70\xce\xd8\xfa\x56\xe7\xbe\xe7\x94\xd7\xb4\x03\x91\xcb\x01\xb7\x27\xc6\x70\x4c\x6d\xcb\x3c\x0a\x62\x8f\xcd\xaf\xcd\x26\x66\x8a\x3a\xad\x1b\xaa\x61\xbe\xcd\x76\x6a\x8c\xd0\x6b\x96\xe7\xb2\x78\x16\xcc\xe5\xd2\x6c\x77\x0c\x90\xd2\xbc\xe0\x98\x5d\x0a\x05\x9f\xe1\xf1\xcb\xb7\x98\xe3\xb8\xab\x78\xdf\x9f\x74\x6a\x30\x99\x1d\xc9\x3d\x1b\x72\xcc\x65\xc7\xf2\x62\xc9\xb3\x0c\xb7\xbd\x66\x61\x94\xa7\x50\x7c\xb7\x63\x2a\x0e\x99\x4a\x42\xd4\xb8\x68\x67\x8a\x40\xcc\x83\x65\x7c\x12\x19\xd3\xc5\xd0\x34\xa2\x71\x95\xa6\x67\x4f\xf6\x7b\xdd\x00\x24\xe4\x76\xbf\x93\x27\x69\x18\xfc\x93\xd2\x21\xe6\xc1\x98\xc7\x6d\x53\xb7\x02\xcc\xf3\x43\xba\x4b\xc8\x43\xb8\x5d\x8a\xcf\x9d\xb4\xa4\x65\x54\x9f\x07\x8d\x07\xe3\xb0\xce\x1e\x10\xf3\xe0\xcd\x5a\x5a\x35\xb4\x20\x19\x30\x6e\x07\x25\x31\xb9\x96\xd9\x3e\x77\x18\x37\x7e\x23\xc8\x49\xa0\x81\x27\xfd\xc4\xf8\x9a\xc1\xd8\x5b\x2e\x4e\xf8\x42\xe0\x50\xb3\xa7\x3e\xc4\x0c\x25\xfc\xe9\x63\x22\x7f\xe4\xfc\xb1\x6d\xa0\xd8\xb6\x8e\xd6\xc5\xa6\xcf\x84\x8a\x67\x8f\x4e\x3c\x6f\x80\x48\x10\x74\x62\x71\x2d\xe7\xa3\x90\xf3\x7d\x47\x7e\xd5\x51\x04\xa7\x71\x72\xeb\xf4\xb3\xc6\x63\xc6\xe6\x7b\x2f\xb2\x83\x24\x41\x27\xa7\x87\xaf\xe6\xb1\x32\x31\xe4\x02\xfc\xb7\xb0\x6a\xb5\x7b\x88\x82\x18\x76\xbe\xf5\x83\x10\xf3\xe0\xa4\xe3\xd6\xe9\x45\xc7\x93\xa0\x75\xb1\x2e\x89\xc9\xad\x5c\xf2\x89\xbf\xd7\x54\x0f\x2d\x50\xe8\x46\x81\x11\x19\x44\x31\x63\x73\x0f\x99\x0b\x1f\x86\xa1\x4a\x26\x7a\xdd\xe7\xd7\x7d\x71\xfa\xce\x8f\x26\x86\xb0\xe1\xd8\x07\x01\x10\x01\xf7\xe2\x87\xb9\x58\xae\x27\x18\xc5\x69\xd2\x9b\x0d\x69\x42\xcf\x06\xdc\xf7\x1d\xcf\x43\xa9\xc7\xa9\xeb\xce\x29\x42\x6c\x59\xbb\x28\x0c\x27\xa8\x13\x9d\x6d\x05\xc0\x01\xa0\xfa\xa4\xfc\x52\x48\xf9\xef\xbb\x9b\xe7\xdb\x9e\xdf\xc0\x78\x16\x32\x96\x71\x36\xf8\xcc\x8d\xef\x37\x5b\xaa\xa5\x59\x2e\x39\x86\x32\x60\xa0\x81\xf4\x01\xd6\x07\x4f\x19\xe0\x7c\x7c\xce\x3c\xb7\xc1\xc0\x06\xe7\x0d\xf4\x9a\x8f\x97\xec\x3e\x66\xd0\x93\x10\x43\x7a\xa8\x3a\x32\x49\xc8\xec\x36\x5f\x19\xb3\x6e\xdc\xde\xbb\x5a\x8f\xe8\x1a\x34\xf8\x61\xcd\xeb\x84\xd6\x9a\x35\x6a\xf6\xe8\x99\xa3\xe7\x55\x61\xcb\x59\xb2\xf8\x68\x8e\x15\x7e\xae\x0b\xbd\x55\x29\x5f\xa6\xf7\xb5\x1b\xe2\x68\xd3\xc8\x2e\x26\x4d\x62\xb0\xfc\x11\x46\x10\x3a\x26\x8f\xc7\xa0\xfa\xbc\xc1\x04\x24\xb1\x7f\xc6\x24\xa3\x66\xd3\x02\x93\xf7\x81\xc2\xae\xa9\x19\x8c\x5a\x2e\xbe\x46\x43\x79\x76\xc4\x57\x0f\xdf\xe6\x46\xed\x70\x91\x93\x28\xcf\x41\xa9\x07\x65\xa0\x0d\x0d\x5b\xdc\x87\x9d\xa8\x51\xa2\xc5\x32\x17\x6f\x82\xde\x62\x91\xb8\xcf\xc9\xeb\x73\xdd\xfb\xbc\x1b\x8e\x34\xac\x61\xf6\x22\x0a\xfb\x1c\x3e\x64\xce\x8f\x4f\xc4\x3f\xf4\xfa\x0c\x84\xb6\x95\xb5\xd7\x6c\x79\xbc\x9a\xc3\xb3\xc3\x96\x68\x8c\xda\xa8\x4d\xdb\x49\x8e\x27\x36\x9e\xf0\x42\xe1\xc4\x1d\xca\xae\x3b\x16\x71\xb9\x9a\x06\x0a\xec\xa3\xe8\xa4\x36\x6b\xa0\x36\x93\xe2\x97\xdc\xfc\xfd\xfb\xb2\x97\x0e\xc1\x8d\xb8\x22\xa0\x0f\x6b\xe6\x15\x09\x3c\xc1\x7a\x44\x8f\xa8\x8d\xdf\x1b\xf6\xd7\x67\x4c\x2e\xfb\xcd\xb9\xa9\x55\x4c\xc9\xaf\x7e\xf9\x35\x73\x80\x28\xcd\x80\x28\x0d\x66\x10\x6d\xc4\xfa\x3d\x3c\x9e\xcf\x83\x0b\x71\xdd\x7e\x4c\xb7\xf3\xd6\xc8\xb6\xbb\x68\xb6\x3b\x28\xf6\x83\x32\xb4\xb3\x90\xa0\xd1\xd6\xf8\x10\x30\x16\x06\x84\xb4\xa2\x40\xe9\x22\x80\x69\xd4\x21\x50\x93\x28\x89\xb6\x51\x86\x60\xc4\x87\x65\x0d\x54\x8d\x10\xe2\x59\x5d\x35\x19\xfd\x61\xbf\xc9\xd8\x91\x0a\x51\xe3\xba\xc1\x59\x5e\x70\x18\xa9\xd6\x21\xa2\x9d\xe9\xa6\x1d\xe9\xa4\x1c\xf3\xa4\x38\x52\x6d\x34\xc7\xc5\xdc\x48\x65\x18\xe3\x76\x9d\x56\x62\x25\x57\xf2\x35\x5c\xe4\x68\x43\xfa\x57\x74\x57\x7b\x5a\x78\x44\x7e\xd4\x46\x48\xd6\x44\x88\xd7\x44\x6d\xd2\xd3\xc1\x24\x90\x47\xca\x97\x98\x9e\x75\x1e\x9c\xa1\x19\xba\x90\x39\x84\xb4\x27\x84\x01\xd3\x09\x54\xeb\x16\xa0\x28\x43\x13\x4f\x14\xd3\xf5\xcf\xde\x8f\x91\x2f\x9d\x4d\x1d\x00\x05\x6b\x77\x57\xd0\xd8\xa0\xcf\x84\x3e\x9d\x94\x93\x2b\x67\xb5\xc6\x7a\xe5\xb2\xdf\xec\x4a\x41\x9d\x68\xd2\xb2\x4d\xac\x87\x21\xf1\x61\x3a\x57\x1a\xc2\x6f\x75\x95\x2a\xe6\xd6\x3d\xda\x7e\x56\xb8\x67\xaf\x70\xaf\x79\x7f\x77\x8c\xfd\x0e\x18\x70\xbe\x6d\x04\x00\x2f\xb1\xdf\x2c\xb1\x8f\x2e\x24\xf0\x94\x68\xc3\x5b\xc9\x68\x0b\x34\x8d\x46\x25\xa4\xbc\x01\x97\x4f\xbd\xf0\x4f\x56\xec\xb1\x69\xf1\x46\x7c\x77\xbd\x66\x88\x5d\x81\x75\xbd\xa3\xe3\xf4\xea\x4a\x91\x28\x8d\x66\x45\x42\x54\x73\x7c\xdd\x1f\x85\xa1\xa4\x45\x50\x9f\x79\x62\x43\x83\x6a\x43\x83\xe2\xe2\xba\xc3\x8c\xcb\xbe\x2e\x67\x8a\x68\x11\x05\x16\x24\x41\xf7\x90\x6e\x33\x77\xef\x2a\x1d\x92\x68\x94\xe4\x67\x2e\x7d\x32\xd6\xaa\x2b\xb8\x3f\x02\xa9\x27\x44\xb1\xd7\xcc\x44\xe2\xed\x56\x38\x17\xb5\x98\x27\x68\x7e\xef\x49\x9d\x7a\x42\x1a\x4e\x11\x45\x87\xe3\xc8\xcc\x9d\xfb\xc3\x7d\xb6\x2e\xe4\xe1\xbe\xdf\x0c\xb6\x3f\x28\xf6\xc8\x16\x35\x05\xb7\x31\x52\xcd\x7a\x57\x00\xc5\x25\x13\xa3\x83\xf7\x37\x84\x5c\xda\x61\x59\xd1\x5b\x1d\xb3\x3c\xc1\x7c\xce\x9e\x97\x9a\x0a\x56\xf0\x54\xc9\x33\x84\xd8\xaa\xc4\x92\x12\x57\x5b\x52\x7e\xcc\xbe\x2a\x1c\xc8\xad\xee\x7b\x65\xae\x9a\x7d\xa5\x11\xed\xee\x42\xf0\x92\x78\x0b\x69\x21\xb3\x03\x1f\xed\xc7\x3e\x3b\x9e\xbb\x6a\x37\xdd\x2b\xe0\x8f\x90\x3e\x8f\x73\x72\xb5\x90\x65\x8c\x1f\x74\x4a\x26\x7a\x34\xdd\x04\x6f\x0d\x82\xc9\x5b\x48\x8c\x0a\xb5\xe9\x02\x96\x60\x7a\x17\x19\xf3\xdd\xe9\x70\xc5\x0b\x82\x11\x99\xc6\x27\x10\x76\xe4\xe0\x8d\x65\x70\xe2\x04\x1c\x6e\xf7\x27\xaa\x41\xc5\x8e\x23\x28\x81\x50\xf7\x3e\x16\x3d\xd8\x09\x3c\x8d\x4c\xcf\x1b\xcf\xcc\x70\x74\xf1\x44\x8b\xa0\xd3\xd9\x79\xf6\xc6\x73\x22\x18\x09\x14\x21\xf4\xac\x9e\x7d\xeb\x0c\x67\x4f\xae\x4f\xa5\x27\x25\x28\x81\x1b\x1a\x44\x9d\xde\x57\x36\x1e\x44\x89\x4e\xfe\x56\xe7\xca\x5c\x85\x35\x9a\x98\xa7\x11\x93\x6b\x41\x86\xf5\x05\xad\xdf\xbe\x6b\x80\x8b\x6b\x4b\x41\x95\xe6\xa6\xe7\xed\x34\x3c\x2d\x71\x3b\x67\x20\x0e\x7f\x44\x66\x44\xe6\xec\xc7\x70\x50\x79\xe3\x35\xbe\xd7\x0e\xd6\x78\x93\x47\xf3\xe5\x4c\x33\x99\x6d\xe7\x05\x08\xe3\xab\xc9\xb7\x59\x71\xdb\x5a\x27\x9e\x47\x8f\x94\xeb\x32\xdd\x3e\x6b\x6b\xe9\x8b\x01\x79\x47\x45\x07\x7f\xba\x1b\x45\x48\xdc\xcd\x18\xbd\x28\x50\x9b\x03\x51\x6b\xa9\x94\x90\xb4\xb5\x65\xbd\xa1\x4c\x4a\xb7\x24\x80\xd2\xf9\x12\xd9\x33\x49\xcb\x93\x07\xa9\xaf\x93\x37\x51\x9b\x71\x96\x1b\x11\xc8\x0a\xc9\x84\xd3\x25\x35\xd0\xeb\x31\x4e\xd3\x95\x0c\x42\x35\x18\x07\x65\x48\xa6\x3b\xbd\xac\xbe\xda\x74\x37\xa8\x0c\xb8\xef\xfd\xb5\x58\x1d\x9a\x80\xd2\x00\x99\xa8\x68\x6d\xe5\x13\x0c\xd0\xf1\xa4\x78\x91\x3b\xd1\x41\x4f\xbb\x7b\xe8\x0b\x6b\x02\xe7\x33\x90\xe9\x20\xa8\xf7\xe9\xed\xde\x13\x1a\x24\x1d\x55\x13\xb5\xd1\x1c\xa8\x14\xa0\x1e\x40\xe0\x35\x4f\xe4\xe6\x08\x3b\x70\xbb\x5b\xed\x32\xc4\x77\x28\xb5\x57\x97\x80\xfd\x0e\x05\x8b\x22\xf5\x55\xfc\x6e\x34\x8c\xf8\x51\xcb\x18\x06\xca\x02\xbf\xeb\x9a\x65\xdf\xe7\x1a\x7c\x72\x13\x04\x3c\x46\x8f\xc0\xc3\x19\x1d\x3c\x45\xa7\x22\xf0\xac\x6d\x59\x0b\x82\xee\xdc\x9e\x67\x7d\x55\x77\x57\x42\xcb\x47\x14\xc7\xeb\x78\xa5\x9d\xd4\x15\xac\xc0\xae\x4b\x2a\x59\xe0\x52\x17\x9d\x81\xc2\xdb\x23\xc3\xc4\x7c\x71\xce\xf3\x70\x2f\x38\xa2\x6d\xba\x70\xb9\xa4\xdf\x6a\xb3\xe9\x6f\xba\xd0\xb4\x79\x91\xc3\x84\xbd\x6b\xc8\xcd\xf7\x84\x3c\x5b\x74\xed\x8a\x68\x75\x38\xd4\xb3\x82\x72\x24\x9a\xe6\x7a\x77\x56\x07\x2d\x20\x1d\x04\x3d\x50\xa6\x19\xa0\x4c\x73\xae\xbc\x35\xfb\x6a\xfc\x94\xf0\x4c\x73\xa7\x39\x61\x30\x9d\x5f\x0d\x1f\x40\x97\xc4\x55\x7d\x42\x64\x43\xe3\x0e\x61\x18\xd5\xca\x47\x7a\xfa\x66\xd5\x7c\xce\x1a\xa2\x08\xd3\x45\x00\xe2\x53\x0e\x5e\xce\xc8\x3a\xdd\xca\x40\x49\xb9\x71\x91\xe8\xca\xde\x35\xb8\x79\xe5\xf6\x2d\x59\x0c\x98\x25\xf1\xfa\x58\xb8\x8b\x15\x4f\x7e\xe4\x45\xb9\x07\x13\x74\x7c\xad\xb0\xc2\xb9\xc7\x9b\x0b\x6b\x36\x15\x01\x60\x3e\x27\xc2\x7e\x4b\xe7\xb3\x75\x8f\x02\x4c\x8f\xae\x1f\xf5\x7b\x6b\xb7\x17\xd5\xf8\xa8\xe1\x12\x57\x4e\x42\x32\x4d\x28\x49\xb7\x54\x27\xdb\xa6\x30\xd5\x58\xf9\x44\xc5\xd3\x71\xc2\x39\x50\x7d\x53\x25\xed\x13\xa2\xb7\x18\xd8\x56\x43\x2b\x53\x81\xf5\x2b\x81\xe1\xca\xe9\x48\x48\x10\x6e\xa6\x1a\xd3\x85\x61\x9f\xb2\x7d\xa5\xc9\x4d\xbc\x57\x10\x51\x68\xce\x9d\x32\xab\x30\x68\x87\x91\x18\xd3\xa2\xc0\x1d\xc3\x60\xb9\xe3\xd3\x7f\x38\x25\x6e\xa3\x0e\xa7\x7d\x10\xc8\x51\xf6\x69\xdf\xf8\xd8\x7e\xac\x4a\x42\xb7\x87\x6e\xf7\x63\xdd\xb6\xb1\x16\x82\x22\x05\x10\x3b\x86\xa5\xba\x88\xc6\xa4\x11\x58\x7c\x2a\x5f\xad\x4b\xb4\x6c\x5b\xd7\x55\xcf\x72\x88\x6f\x56\xf1\xea\x11\xb7\x0d\x14\x5a\x0f\xce\xd1\x52\x0d\x34\xe5\x18\x08\xe2\x76\x11\x01\xd5\x34\xfa\xb0\xf9\xd6\x37\x97\xa2\x02\x06\x00\x95\x77\x1d\x02\x0f\x89\xe5\x2f\xf3\xab\xdd\xd2\x96\x6d\xf3\x03\x00\x14\xbb\xdb\xc3\x05\x6e\x77\x89\x5c\x11\x43\x68\x2d\x24\xed\x26\x8b\xf5\x73\xaf\x1b\x9a\x19\x49\xee\x97\xf0\x3e\x15\xbc\xdc\xe5\x30\xe4\x15\x41\x5e\x14\xb3\x60\xf0\xda\xf8\xf2\xf9\x33\x5a\xd6\xa8\xc7\xcc\x72\xfe\x96\x83\x78\x0e\x29\x0c\x04\xed\x72\x43\xb4\xf2\x65\x1c\xd4\x9a\x0d\x8a\xb4\x68\x96\x8b\xc5\x98\x0e\x7e\xd1\xf2\x09\x2c\xb2\x90\x4a\x0d\x91\x3f\x7b\xca\xad\x20\xf3\x28\xaf\x25\x48\x22\x3e\x42\x2d\xca\xb1\x36\x42\x82\xc1\xda\x9d\x04\xa0\x79\xdd\x5e\x7d\x7c\xdd\x24\x21\x3c\x6d\x6b\xa0\xee\xbd\x64\xe9\x17\xf7\x72\xfb\xf4\xd1\x03\x6f\xcf\x2c\x06\x28\x60\x18\x4c\xf9\xd5\x54\xd5\x48\xa2\x1d\x70\x36\x0f\xf7\x88\x8a\x74\x60\x9b\x88\x20\xe8\x70\x38\x12\x6d\xd7\x2b\xcb\x5a\x3f\xb7\xa9\x6b\xe1\xc4\xb1\x43\x67\x2a\x18\xd1\x8e\x24\x7d\xeb\x8c\xfe\x15\xcf\x55\x3a\x35\x8b\x30\x2d\x21\x46\x03\x52\x3d\x9f\x8f\x67\xc3\xd9\xdc\xe5\x5b\x6a\x97\x06\x15\x00\x41\x3a\x07\x81\x18\x55\x71\x4b\xe9\x06\xcd\x71\x1b\x71\xdb\xd0\x21\x36\x1f\x37\x7e\x06\xe4\xc0\x21\xb2\x03\x8f\xd1\x62\x69\x13\x1d\x32\x9b\xc7\xd8\xf8\x13\xcd\x67\xf9\xe6\x93\xcb\x75\xf2\x12\x87\x4f\xc0\x91\xd4\x07\x82\x8a\xbd\x40\xae\xc3\x76\xe7\x1e\x83\x7e\x9f\x08\x0e\x07\x49\x65\x27\xd6\x52\xe3\x08\x22\xba\x7e\x9b\x85\x2f\x37\x07\xa2\xc1\x74\x03\x01\x1c\xbc\x0b\xfd\x8c\xc9\x36\x1c\xfb\xa2\x98\x4c\x49\xd1\x67\xfc\xd0\x5a\x3b\xf6\xaa\x21\x1b\x99\x9d\x82\x79\xae\xde\xa5\x7a\x44\x79\x4e\xd1\x08\xa8\x9e\x54\x5b\x0b\xc6\xf5\x89\x20\xd8\xbc\x6d\xb4\xea\xc3\x35\xae\xdb\xce\x03\x47\x42\x9a\xa6\x2a\x8d\xd2\x7b\xd5\x34\x9d\x8f\xbd\x00\xcb\x0b\x22\x48\x62\xd9\x75\x9b\x2c\xf7\xaa\x40\x7e\x09\xf9\x67\xb4\xf9\x13\x3e\x2b\xc5\xad\x9e\x49\x02\xd8\xda\xbc\x28\x68\x61\x5c\x32\x49\x69\x63\xbf\x1b\x4e\xf8\x89\x92\x48\x3a\xda\xd2\x69\x47\x2a\x22\x9e\x15\x46\x6b\xf0\x1d\x1b\x6c\x68\xec\xd4\xe6\xe5\x3b\x2d\x3b\x5e\xb8\xcf\x58\x63\x4c\xbe\x82\x80\x79\x03\x19\xd3\xbd\x2a\x75\x48\xe8\xe8\x46\x32\x85\xd5\x0d\x44\x7a\xa8\xd2\xba\x23\xc0\x8b\xd7\x60\x97\x87\x0b\x02\x9b\x3e\x28\x63\x8f\x57\x1b\x28\x0b\x23\x1d\xd8\xdc\xc3\x85\xb4\x7b\x4c\x53\xcb\x30\x39\x63\xda\x5b\xd0\xb2\xbc\xfa\x72\xac\x74\xd5\x96\xcd\x2f\xcc\xf6\x8d\x0e\x7f\xbc\xdc\xbb\x00\x37\xd2\x80\x6a\x5b\xbe\x48\xbb\x92\x76\x32\x7f\x02\x90\x61\x87\x6d\xd5\x35\xd6\x83\xb3\xe4\x84\x3c\x71\xef\x81\x2e\x57\xaf\xaf\x4a\x7c\x59\x51\xb5\x42\x4e\x7f\x22\x7a\xe5\x00\xc0\x8d\xe8\x3a\xb4\x41\xd2\x4e\x56\xaa\xab\x54\x4b\xb5\x02\x50\x6a\x0b\xd6\x3d\xa2\x45\x1e\x66\xd2\xb7\x6f\xfe\xce\x28\x33\xbd\x2d\x06\x08\x36\xd3\xd3\xc2\x6d\x01\xae\xcc\x02\xa7\x69\x9a\xb8\xc3\xc0\x1a\x09\xfb\x25\x35\x3a\xd9\xed\x34\x6a\xe4\x2e\x43\x3c\xf9\x49\x12\x04\xb9\xd7\x0c\x89\xc9\x55\x42\x38\x65\x09\x98\x17\x63\xd4\xc7\x42\x75\x09\x27\xf9\xbc\xdd\x31\x5a\x49\x73\xec\x60\x0f\x96\x47\xb5\x07\x44\x01\xb6\xf5\xde\xf7\x32\xcc\x40\x80\xbd\x1f\xbc\x9c\xda\x52\xaa\x07\x11\xa9\x02\x4d\x1f\xf4\x3d\xce\xf6\xba\xbe\x75\xfa\x39\x57\x52\x41\x72\x95\x15\x23\x34\xd8\xf0\xd1\xd5\xb9\x48\x4f\x0a\xf4\x84\x2a\x34\xb9\xf9\x12\x3a\x4c\x28\x34\xb1\x4b\x83\x09\x7c\x0f\x01\x43\x81\xc9\xdb\x7e\xb3\x2f\x08\x00\xad\xc7\x8d\xee\x82\x8a\x4f\x01\xa6\x94\xde\xa5\xe2\x1e\x10\x1c\x78\x02\x3c\xc4\x2f\x5a\x67\x3e\x78\x19\xbd\x03\x57\x7c\x6e\x2c\x4f\xcb\xbd\x2d\x29\xe9\x6d\x81\xf5\x99\x14\xe8\xe9\xda\x8f\xca\x5d\x14\xd1\x19\x52\x4e\xdd\xf1\xca\x4a\x98\xe8\x14\x04\x75\x1e\xc1\xca\xe0\xb4\xec\x7b\x8b\x18\x17\x47\xb4\x90\xcc\x61\x46\xee\x0a\x80\xc4\xbb\x3c\xbc\x70\x97\x08\x9d\xec\x1a\x72\x7a\xd6\xde\xa4\x82\x15\x30\x2c\xdb\x3a\x5c\xa2\x8b\xa7\x55\x66\x2a\xf2\xfc\x09\x85\x62\x85\xeb\x0a\x7a\x0a\x11\x2f\xd3\xc4\x26\x88\x8f\x5d\x72\xe2\xd0\x67\x34\x76\xd2\x8a\xa2\xc8\x5d\x58\xc8\x87\xae\x6c\x1e\xb5\x4d\x83\xfa\xf0\xea\xe5\xcf\x37\x3a\xc6\x47\xcf\x79\x61\xa2\x60\x04\xd1\xc1\xcb\xf2\xcc\x19\xb3\xba\xc9\x8f\x3c\xd3\x34\x0d\xde\x41\xd1\xb9\xd5\xf3\x72\xa4\xdd\x25\x05\x01\xb7\xba\x54\x92\xe0\x9f\x02\x4c\x6d\x46\xe7\xdc\x4f\x2a\x4e\xce\xc4\xe1\xda\xeb\x00\xe7\xc3\xe2\x03\x74\xbc\xb6\xf6\x8e\x0d\x5e\xa8\xca\x31\x9a\xad\x78\x6d\x55\xfc\x0d\x45\x08\xe8\x5d\x1e\xae\xac\x15\x63\x13\xd7\x6d\x20\x3c\x9f\xba\x73\x48\xa2\x11\xc8\x38\x5d\x4d\x62\x97\x73\xab\xdb\xc8\x5e\xde\xb1\xfd\x2a\x56\x10\x31\x1c\xe1\x19\x5c\x0d\x03\x1c\xe4\x07\xd5\x5c\x94\xa7\xdd\xb9\x63\xb3\x6e\x00\xea\x47\xee\x1a\x28\x02\x29\xb7\x89\x06\x9f\x56\x16\xb7\xec\xe5\xa2\x80\x9f\x31\xc4\x55\x7d\x2e\xf2\x24\x7e\xda\xc6\xe5\x48\x96\x03\xdb\x3a\xb5\xe1\xc6\xf0\xee\x2a\x76\x3b\xdc\xc9\x03\x92\x8c\x0a\x73\x76\x26\x8b\xa2\xdb\x30\x54\xa2\x01\x0a\x52\x26\x9a\xd0\x64\x96\xd2\x6b\x66\x59\x9a\x65\x1b\xe3\xb2\xf7\xc8\xc3\x16\x7d\x80\x22\x02\x06\x7b\xd1\x90\x8b\xcf\xf8\x49\x37\x81\xdc\x14\x02\xa0\x5f\xb3\x64\x63\xc3\x77\x34\xeb\x5f\xf5\x75\x8d\x9c\x9e\xb3\x91\xa8\xc5\xfb\x9b\x61\x28\xbc\x17\xb7\x77\x61\x71\xa4\xf8\x1e\x81\xd4\xce\xe9\xaf\x49\x10\x92\x7a\xae\xf7\x24\x29\x3d\xce\xf2\x35\x82\x33\xb6\x6b\xb1\x9e\x32\x2e\x19\x8d\x5b\x81\xba\x33\x88\x23\xb5\xf2\x91\xa1\xe4\x32\x69\x19\xc7\xf4\x8f\xf0\x63\x26\xd5\x2d\x84\x5a\xac\x61\x77\x69\xdf\x4f\x04\x0a\x09\xa1\x44\x3b\x99\xa0\xb4\xcb\x4d\x05\x0b\x20\x07\x68\x6f\x84\x7d\x08\x5b\xfb\x88\x97\xa2\x97\x5c\x4f\xb3\x7b\x52\x93\x59\x62\xf1\x14\x10\xaf\xbc\xce\x81\xea\xd9\x70\xe8\xfd\xd4\x5a\xc2\x46\xe0\xc5\x61\x72\x8e\x12\xd6\x9b\xf0\x5e\x2f\x18\x9c\x37\x77\x3d\xe9\xc0\xe7\x45\x68\x62\xb8\x7e\x86\x06\xb6\xf8\x67\x78\x0a\x98\x30\xa1\xd3\x51\x23\x08\x41\x57\x0e\x9e\xca\x21\x7c\xdd\x0a\x14\x00\x70\xc5\x79\x20\x8d\x50\x6a\xf2\xb8\x14\x45\x91\x21\xb4\x52\xcf\xd2\x09\xdc\xe2\x44\x3f\xa1\x9c\xa7\x75\x56\xfa\xac\x47\x9c\x09\xa7\x44\xb6\x22\x51\x1b\x55\xe6\x2c\x71\x37\x1a\x8c\xf0\xc5\x87\xdd\x31\xab\x28\x71\x19\x3d\xb5\x19\xf7\x64\xc5\x42\x20\x90\x10\x92\x9c\x9e\x77\xed\xcc\x45\xa3\x34\xa1\xcd\x23\x41\x4a\x32\x92\x10\x51\x5c\x83\x5f\x80\x4c\x74\x20\x48\xbf\xdc\x4f\xc6\xe7\xf8\xef\x7e\x0c\x4c\x31\x14\x5c\xf7\x31\x11\xb8\x67\xa9\xdd\x13\xe4\x99\xa3\x28\x81\x9a\x14\xc6\x5f\xc2\x44\x3b\x9e\xdb\x78\x40\x15\x8f\xa7\x48\x98\xc0\x30\x9c\xbb\x0d\xad\x8b\x12\xcb\x38\x17\xd0\x78\x9a\x4f\x70\xa4\xf3\x8b\x02\x77\x3c\x84\xfa\xda\x3d\x44\x8c\x1b\x1b\x4a\xf2\xb7\x1d\xa7\xc9\xc5\xb6\xcc\x75\x64\xc5\xe7\xb4\xb1\x1d\x56\x45\x46\xa2\xe5\xba\xbb\x40\x09\xec\xe0\xc9\xcb\x96\x6f\xfa\x5d\xda\x21\x7f\x13\x76\x65\x9e\xd4\x69\x02\x57\x7d\xeb\x4d\x4d\x50\xec\x07\x00\xd0\x38\x82\x09\xdc\xab\x27\x74\x16\x94\x44\x2e\xf5\xe5\xf2\x7d\x37\x7b\xbb\x37\x00\xb6\x3a\x12\xa1\xd4\x1c\xeb\x1f\x34\x45\x91\x97\xfd\x62\xb6\xd2\x50\xf5\x4b\xd6\x8b\xe4\xb4\x05\xb1\x74\x3c\x04\x0b\xf0\x1f\x83\x4c\xed\x00\x82\x42\x61\x5a\xba\xb0\x15\xae\x57\x91\xe6\xcd\xde\x4f\x75\x81\xb3\xa1\x65\x52\x76\x9d\xed\x2f\x97\x4f\x9d\x97\x14\xdc\xe6\x40\x61\x86\xa6\xab\xfa\xea\xd2\x66\x81\xb8\x33\x75\x58\x03\x37\xe7\xb2\x6b\x3b\x7a\x9e\xf0\xa8\x99\x85\x02\xe2\xad\xc4\xdd\x6e\x3c\x4d\x83\xa2\xeb\xca\x2c\x91\x0c\x70\x0c\x14\x9a\x02\x2b\xcf\x52\x40\x48\x14\x0f\x1f\x75\xd8\x30\x29\xf7\x6d\x9c\xe3\x25\x2c\xe5\xd9\x6c\xb7\x28\xf5\x9a\x01\xc1\x4a\x5e\xf9\x7c\x8d\x2a\x9f\xf3\x5a\x7c\x39\x21\x0c\x42\xa1\xbe\x40\x57\x74\x4b\xbc\x4e\x4b\x45\x11\x92\xec\x7a\xf4\x88\x95\x58\x32\xca\x4f\x4e\xb7\xf7\x6e\xbc\x74\xbc\x2d\x44\xfb\x5f\xe1\xc5\xd0\xf4\x2e\xd9\x17\x74\xb9\xc6\xde\xf5\x8e\xf5\x07\xda\x19\xa8\x36\xf9\xb1\x88\x5d\x79\x24\x1f\xbc\xa6\x30\xba\x93\xb8\x19\x0d\x01\x27\x2b\x11\xf7\xb3\xcc\x00\x2c\x7f\xe5\x9f\x01\x0f\x16\x45\xd6\x6b\xec\xd8\x70\xdd\xbd\x83\xae\xd7\x0e\xa2\x9a\x8b\xc4\xdc\x9f\xdf\xf6\x23\x8c\x4a\x93\x39\x40\x03\x77\x2a\xbf\xc3\x7a\x95\x90\xe4\xe9\xc4\xbc\x79\x70\x90\xb8\x4d\xf7\x65\xc5\x81\xeb\x14\xfa\x42\x45\xf8\xcd\x40\x20\x00\xd6\xdd\xb1\xcb\x16\xd9\x6e\xc3\xce\xa8\xb2\x45\xd3\xcd\x89\x13\xb2\xe0\xac\x9e\x7a\x72\x65\xc9\x7f\xcc\xd3\x3c\xb3\x91\x03\x99\x0f\x63\xdc\xe2\x6e\x5c\xe4\xbe\x82\x87\xbe\xc0\xa6\x3b\xa7\x48\xf5\xed\xae\xed\x04\xbc\x43\x15\xda\x3b\xcf\x54\x83\xe8\x0c\xa1\xbd\x54\x98\x77\x5c\xb5\xc7\xf9\x2e\x6d\x2a\x9c\x0f\x90\x0b\xa4\xc6\xd5\x3f\x91\xa8\x14\x58\xbe\x5f\xd4\xf7\xfa\xc9\x00\x9a\xcf\x96\x00\xd5\x6b\x81\x65\xbb\xab\xaa\x69\xcf\x60\x99\x33\x4b\x1f\xc8\xb7\x9d\x20\x3a\x90\xf9\xb6\xa0\xd9\x33\x15\xd9\x3a\x9c\xe5\x7a\xce\xea\x76\x18\x9e\xeb\x80\x1a\x57\x25\x68\x69\x7d\x84\x90\x40\x01\x8c\x9a\xe7\xc2\xb7\x9d\xd5\x75\xe7\x8a\x8b\x4c\x58\xdd\x10\xe7\xdd\x80\x64\x53\x22\x47\xcc\xa4\x74\x95\xe9\x0b\x82\xbd\x06\xa2\x58\xe3\x4a\x6b\x82\x41\x92\x6c\xd7\xc6\x29\xc4\x9d\x72\x05\x22\xde\x01\x6d\x48\x10\x70\x8b\x75\xb0\x27\x01\x2a\x11\xd9\x32\xbd\xee\x2b\xec\x2b\x4f\xc7\x1b\x37\x87\x88\xf6\x5d\x5f\xa0\xb5\x81\xe2\x3b\x94\x37\x10\x96\x8b\xd5\x2d\x8e\xc3\x96\xad\xde\xf6\x64\xb2\xfd\x1a\x3b\x20\xd8\x70\x20\x5a\x3d\x29\xf0\xc5\x3d\x20\x8e\xb5\x3b\xb6\x08\xfa\x1a\xcb\x50\x10\x1f\x81\x6e\x88\x5d\x6f\x35\xdd\x5c\xdf\xa6\x05\x87\x28\x84\x3a\xed\x6b\xf9\x99\x03\x8d\xa3\x9f\x89\x6d\xc2\x66\x4f\xad\xcb\x63\xf1\xc6\x15\x49\x12\x58\x1d\x9f\x48\xde\xa1\xcf\xc6\x4e\x26\xb3\xd0\x85\xb4\x28\x10\x3c\x05\xb2\xf8\x65\xc2\x1c\x1d\xda\xc2\x1f\xb4\xb0\x95\xeb\xe2\x21\xb0\xb6\xdf\xc8\xb9\xeb\x09\xaf\x97\xd1\xd5\x7a\x40\xa2\x33\x0c\xd5\x8c\x2e\x53\x94\x8b\xec\x23\xe3\x53\x61\x5b\x08\x95\x2b\x47\x4f\x15\xca\xc2\x11\x8a\x00\xa2\x35\x3b\xd3\x83\x6d\x45\xd5\xee\x5a\xf5\xb1\x7e\x2f\x89\xe0\x38\x83\xa9\x81\x06\xc5\x19\x30\xb0\x80\x71\x3c\x77\xbd\xc8\x0a\x9a\x3d\x0d\x44\x13\x65\x9b\x3a\x6c\xe5\x75\xf3\x83\xd3\x3e\x00\x70\x9c\xef\xfd\x40\xac\x23\x4c\xe0\xd1\x99\x34\x57\xee\x5a\x8e\x13\x9d\xa9\xc2\xc7\xec\x5a\x6c\x0a\x89\x82\xa0\x31\x24\x21\x62\x85\x51\xfe\x9a\x59\xd3\xeb\x33\x6f\x0e\x6b\xc8\xaf\x75\xbf\x6d\x64\xea\x23\x00\xe5\x7a\x9a\x83\x81\xba\x83\xaa\x5e\x9c\x78\x0d\x9e\xe7\xfe\x78\x4c\xde\x04\x03\xc9\x03\x2f\x23\x76\xa7\x7b\x52\xef\x48\x72\x3d\x81\x0b\xb2\x65\x71\xae\xde\x57\x1b\x06\x29\xb3\xe3\xa9\x68\xe6\x1f\xf8\xd5\xea\x2c\xaa\xa8\x4c\xac\x40\xc9\x6c\x26\x75\x9e\xef\x29\x3a\x37\x6a\xa9\x0a\xe7\xce\x19\x8e\x79\x52\x5e\x3e\x6f\xdc\x6b\x04\x01\xc1\x9d\xf0\xb3\xd8\xaf\xa9\xf1\xd6\x39\xcd\x51\xf9\x68\xa2\x75\xb7\x76\x87\x38\xe1\xdb\x77\x09\x37\xb6\xc8\xbb\x81\x2c\x40\xbf\x00\x89\x40\x82\xae\xd1\x89\xb2\x29\x08\x9e\x6d\xe6\x54\xd9\x43\x4d\xbc\x78\x2a\x36\x94\xec\xb6\x69\x4a\xf2\xb8\xc5\x27\x8a\x2a\x46\x3c\x0f\x1e\x89\xd6\x9e\xe1\x76\xf5\x54\xbf\x1b\x62\x93\xcf\xc6\x7d\x5f\x36\x6f\x29\xdc\x6a\xb1\x51\xe4\x80\x13\x6d\x84\xd3\xd9\x56\xb1\x69\x5c\x87\xd9\xef\xd0\xa9\x81\xc9\x6c\x05\xf2\x4e\x3e\x21\x20\x5c\xff\x3f\xaa\xae\x63\xcd\x55\xa4\x59\x3e\x10\x8b\xc2\x43\x2d\x25\x21\xe1\x8d\xf0\xb0\xc3\x7b\xef\x79\xfa\xfb\xf5\xf9\xe7\xf4\xcc\x5d\xb7\x3e\x75\x43\x55\x66\x46\x64\x46\x46\xcf\x58\x36\x7e\xaa\x33\x5c\x77\xbd\xe7\x3b\x9c\xa6\x65\x5b\xfe\xf6\x88\x34\xaf\xee\x08\xe0\xae\xe5\xfa\x88\xe9\x48\x90\xe8\xdc\x01\xc0\xb9\x66\xc8\xbb\xb9\x40\x41\x4a\xdf\x81\xe8\xaf\xcb\xca\xff\xd1\x17\x73\x0f\x01\x32\xd1\xe6\xad\x64\xf8\xb6\xcc\x21\x43\x00\x03\xe9\x77\xc0\x5d\x1f\xdc\x7b\x1c\x1f\x29\x71\x08\xa2\xf7\xfb\xef\xe2\x2b\xe5\x91\x08\x7a\xbd\x33\x20\x5f\x69\x6c\xe7\x4f\x7c\x9d\x23\x0c\xd2\x6c\xe2\xd3\x48\x3d\x71\xa2\x25\xf6\x12\x9a\xb5\x4e\xe1\x56\x03\x21\x5f\x0b\x0b\x3c\xcc\x6a\xac\xcf\x35\x54\xe2\x65\xc0\x7a\x5d\xb7\x17\xe4\xa4\x6d\x56\xde\xa7\x6a\xcb\x6e\xb4\xe1\x44\x8c\xae\x72\x3d\x4d\x63\xe4\x62\xe7\x26\x1c\x64\xb0\x32\x7b\xca\x20\x60\xa7\x6a\x86\x61\x18\x74\x58\x76\xb8\xc7\xb8\x54\x39\xbd\x74\x7d\x9b\xa7\x5e\xa5\xdf\x08\xf8\xeb\x35\x9d\x65\x7a\xd7\xac\x57\xa0\x7c\xe1\xe4\xef\x63\xfe\x33\xf6\xf8\xdf\x99\x3c\xf6\x1f\x0c\x94\x22\x4c\x34\xb0\xf5\xf7\x63\xfa\xed\x84\xa5\xee\xb3\x4f\x19\x14\x2a\x76\xcc\xf4\x18\xb6\xdf\x6c\xd1\x1b\xfd\xbd\xb6\x8b\x36\x22\x20\xef\xe0\xce\x90\x02\x44\x88\x7c\x27\x26\xcc\x9b\x8e\x41\x7e\x15\xca\xb6\xf0\xd1\xea\x76\x9b\xac\xda\x0e\xaa\x0d\xf1\x0b\x52\xa3\x3c\xfa\x33\x39\x64\x84\xf9\xf5\x51\x7e\x7d\x91\xec\x2e\xcb\x15\xb9\xc7\x71\x4c\x21\x49\xaf\xdc\x5f\x42\xbf\x6f\x8c\xc8\x18\xc2\x42\x7d\x65\xbe\x11\xbc\x5b\xb1\x10\xd9\xd5\xb0\xd7\xf8\xb0\x89\x1f\xab\x55\x40\x23\xc3\x5e\x16\xbb\x37\x94\x64\xb3\xdf\x83\xd5\xb9\x08\x4b\x16\xf7\xaa\xd1\x9c\xe3\x4b\xf4\xf9\x3b\x4f\x62\xc5\x15\x22\x07\x40\x61\xee\x2b\x06\x4e\x91\x9b\x4e\x97\xcc\x17\x0c\x30\x1f\xbf\x91\x20\x6d\xf1\x46\xa6\x29\xbc\x33\x0c\xe6\x3e\x35\x42\x8e\xa8\x92\x5e\x42\xd9\xc4\x00\x42\xd7\xe1\x60\x48\x95\xa9\x1a\x26\xe5\x71\xcd\x88\xd5\x8c\x4a\x84\xaf\xdb\xea\x76\xfb\x57\x21\x8d\xf6\x7f\xfb\x18\xc7\xf0\x79\x46\x2c\xd1\x3d\x83\x85\x88\xd7\x63\x70\x14\x9b\x42\x59\xdd\x46\x21\x13\xb0\xe5\x0f\x97\x20\x56\xfa\xa6\xa0\x25\x35\x57\x37\x4e\x59\x3f\xd2\x5f\xc1\x11\x24\x57\x6e\xc3\xcb\xf1\x84\xbd\x9b\x8d\x93\xdb\x20\xb6\x0d\x90\x0f\xf8\xe0\x91\xcb\xc5\xb3\xfc\xb7\x5f\x83\x1c\x38\x82\xef\xfb\x4c\x71\x4d\x65\x53\x25\xc3\x94\xc8\xb6\xc9\xd5\x30\xbe\x46\xe8\xd1\xac\xc1\x61\x3c\x8c\x62\x5b\x8e\x6c\x09\x65\xe8\x28\x28\x47\x04\x58\x12\x79\xa5\x38\x93\x0e\xa1\x70\x22\x79\x4f\xb1\xf9\x57\xc5\x24\xdb\xe9\xc6\x69\x22\x25\xa6\x70\x0b\x55\xaf\x08\x83\x78\x02\xf6\x89\x63\xf9\xe1\x8e\x74\xd6\xc5\x5a\x87\x6c\x7f\x94\x7f\xe5\x7c\xa1\xe7\x93\xfb\xa2\x1a\x93\x6e\xf8\x36\xd3\x5b\xae\x11\x10\xf1\x3f\x67\x32\x08\x36\x1f\x72\xb2\x56\x8b\x0c\x3d\x5f\x98\xaf\x72\x03\xbd\xf5\x4a\x5b\x4d\x0f\x96\xfc\xa6\xcf\x5f\x6d\x81\xfe\x64\x4e\x92\xce\x37\xa1\x3e\x9b\xfd\xae\x7f\x80\xeb\xff\x6a\xb0\x1b\x09\xde\xa0\x29\x57\x33\x5e\xc9\x6e\xe8\xc4\x3d\x2c\x06\x8b\x34\xcc\x53\xc3\x53\x92\x42\xde\x8c\x4b\xa7\xc6\x47\xb2\xdc\x6e\xb9\x9b\x02\x2f\xc5\x1f\xa8\x76\xaa\xa9\x90\x1b\x27\xf7\x76\x08\x50\x9f\x03\x5a\x66\x86\x79\x87\xbc\xdd\xd8\x76\x9e\x13\xda\x6c\xe1\x2b\x91\x32\x31\x34\x91\x7e\x64\x52\xe6\xc6\x91\xb9\x6c\x73\x5d\x78\x95\x81\x70\xbd\x1c\x93\xcf\xfa\xd1\x4d\x63\x9f\xed\x51\x4c\xd7\xcf\x73\x8c\xa2\x4e\x2a\xfc\x96\xd6\x51\x31\xf9\x8b\xcf\x0e\x35\x69\x2c\x7f\x1e\x5b\x37\xed\xc3\x2e\x6e\x04\x30\xe3\x38\x4e\x49\xb6\xd3\x48\x77\x1c\xf7\xd4\x16\x77\x14\x92\xe4\xb8\x00\x1e\x33\xad\xaa\x5c\x41\x23\x8b\xa7\x0b\x17\xe2\x4d\x93\x5c\x88\x95\xae\x79\x6a\xad\x58\x13\x3d\xcd\x86\x49\x8e\xf2\x2b\x81\x44\x77\x67\xd9\x9a\xa5\x62\x5e\x3d\x9d\x4f\x19\x74\x9f\x68\x26\xe2\xe7\xf9\x45\x0c\xaa\x5c\x99\xcc\xdf\x11\x8d\xdc\x5d\x9a\x3f\x97\x08\x22\x77\xd6\x50\x9e\xdb\x85\x9e\x2b\xf9\x54\xcc\x3d\x22\x0e\x47\x88\x91\xd6\x2f\x27\xc7\xeb\xb8\xe5\x09\x73\x18\x3c\xf6\x71\x29\xc5\xef\x9c\x49\xb8\x96\x29\x6f\x5f\x69\x80\xbe\x4a\x6c\x68\xa4\x2b\x4f\xb7\xce\x55\x64\x03\x79\xe5\x28\xa5\xd7\x75\x9f\xe5\xfa\x9d\x06\x04\x99\x9a\xa1\xdc\x2c\x78\xae\x0b\x35\x76\x85\x3a\x5f\x99\x99\x98\x1b\xe6\x3b\x13\x36\x0a\xd5\xeb\x8d\xd9\x06\xe9\x3a\x3b\x10\x2e\x55\x83\x69\xb6\xdb\x7c\x98\x33\x9a\x5d\x8a\x00\xbe\xd7\xdb\x14\x19\xf1\x66\x7f\xd2\xe8\x03\xef\x70\x62\xc7\x89\x55\x28\x9b\xcc\x53\x5c\x76\x03\xb3\x36\xa1\x98\x64\x4f\xf4\x38\x13\x3c\x4d\xdc\xba\x16\xcd\x5a\x24\xb3\xb3\x22\x5e\xef\x3a\x7b\x3e\x3f\x45\x7a\x4a\xff\xe8\x19\xb8\x63\x8e\xe6\x29\x6a\x7c\x9e\xf0\x39\x8e\x0b\x4f\x24\x83\xb9\x37\xc7\x79\x92\x4d\x8b\xfa\xc0\x70\x36\x33\xf8\xd3\x26\xee\x6b\x34\xd3\xfc\xa3\xb0\x7d\x84\x45\x3b\xba\xed\x5c\x81\x23\xdb\x4f\xe2\xbb\x9a\x51\xf9\x12\xf7\xd3\x44\x43\x3e\xdf\x59\x01\x9c\xa2\xfb\xb1\x7c\xce\xe3\xd6\x2c\x07\xa8\xcd\xdd\x90\x5d\x62\xd7\x65\x25\x32\xe0\xc4\x00\x9b\xae\xbd\x15\x60\x3e\xc3\x6f\x0c\x93\x9c\x15\xad\xbf\xba\x85\xc7\x33\x5f\xda\x62\x69\x2f\x1a\xbb\x6c\xb8\x79\xed\x3c\x81\x1f\x58\x28\xc0\xc0\xd3\x0a\xa2\xdf\xcb\x6e\xac\x0e\xe1\xfb\xa7\xb5\x7e\x0f\x06\xc1\x90\x1c\x01\x08\x48\x4e\xc9\xe8\x1b\x98\x58\x89\x95\xdb\xba\xde\x73\x85\xef\x99\x01\x25\xb2\x3e\x58\x2f\x73\x38\x67\x35\x2c\xb0\x07\x0c\x41\x85\xfd\xac\xd2\xfa\xdd\xb0\x00\x4c\x1d\x01\x91\xc3\xdc\x5f\xc9\xec\xd2\x97\x58\x8b\xd7\x37\x87\xc7\x1d\x22\x0b\x2b\x14\x70\x7e\x0f\xfc\x07\xbf\xfe\xb9\x87\x52\xae\xf7\x32\x6e\x03\xf8\x39\x02\xef\xc8\xf2\x13\xe1\x8e\x9c\x7b\xaf\x14\xfd\x8d\xe3\x12\x66\x20\x45\x90\x59\x10\xca\x23\x12\x4c\x9d\x91\xa6\x65\x72\x9f\x83\x5a\x76\x7c\x40\xe9\x76\x37\x8e\x4a\x15\x60\x3e\x13\x22\x05\x1a\xbb\x13\x96\x49\x09\x0c\xb8\x21\xaf\x2f\x1f\x90\xcc\x02\x11\xb6\xd5\x4d\x9e\x56\x7c\x1a\x66\x8b\x5f\x11\x60\xe3\xa2\x78\xce\xdb\xd7\xeb\x58\xac\x22\x70\xff\xe9\x69\x72\x85\x85\xe7\x33\xbd\x39\xb9\xc6\x35\xd4\x14\xbf\x8e\x42\x8f\x40\x88\x74\x57\x68\x2e\x78\x3c\x9a\xeb\xa6\xc5\x1e\xd6\xb0\x10\xb6\xa5\x52\x31\x15\xd5\xa9\x9f\xc2\x53\xa8\x25\xb2\x9f\x1a\x1b\x0f\x58\x04\xca\x07\x6d\xe0\x08\x1c\xb0\xfd\x12\x9a\x0b\x11\x0c\xc3\xd8\x17\xc2\x26\x4e\x85\xd2\x90\xe4\xe7\xb3\x93\x4b\xef\x2e\xf2\x58\x49\xfe\x48\xa7\x93\x7f\x96\xfe\xdf\xfc\xf1\xb8\x89\x14\x85\x03\xad\xd7\xec\x80\x30\xe1\xea\x4c\xcb\x22\x1a\x99\x71\xda\x35\xbb\x50\xc4\x9f\xf1\x7d\x0e\xe8\x59\xeb\xa3\x85\x15\x71\xea\xe5\xca\x54\x38\x6b\xd2\x5c\x74\x9f\xcb\x96\xa3\xa8\x29\x55\xa2\xce\xaf\x00\x1a\x3e\x61\xb7\x0b\x76\xd5\x7d\x4d\xa2\xf2\x7b\xcf\x73\x5e\xd1\xc6\x55\xbb\xc5\xf8\xa7\x0e\xb4\x16\x95\x8b\x06\x4e\x29\x11\xb2\xa3\x90\xfb\x2a\xef\x82\xff\x6d\x4a\x92\x9c\xd6\xd3\xa7\x1d\xe8\x66\x91\x0a\x2e\xe6\xb9\xcd\x7c\x1b\xd7\x83\x50\x70\x90\xa6\xd9\xde\x08\xda\xa7\x5b\x08\x62\x95\x31\x96\x4a\x73\xb9\xe9\x1e\xb8\x54\x0f\xe4\xa6\x5c\xac\x52\x2f\xdd\xea\x2b\xcd\x15\x29\x12\x22\x10\x73\xd3\x38\x69\x23\xcc\x3c\x32\xef\xb4\xd2\x83\x9d\x2d\x35\x67\xa2\xeb\x31\x1c\x52\x41\x14\x72\x8c\x32\x24\xea\xa6\xce\x35\xa8\xbf\x45\xf1\x9b\xff\x35\x70\x64\x33\xaa\xd5\x0e\x71\xd5\xd8\x46\x25\x85\x1b\x92\x4e\x46\x33\x79\xc0\x84\x4f\xc6\x24\xfd\x07\xcb\x06\xd8\xee\x8e\xf8\xf6\x9e\xd5\x72\xc2\x92\xcf\x81\xa9\x34\xf4\x67\x88\x56\x9e\xe2\xa5\x64\xde\x8e\x9d\xbb\x7a\xd5\x49\x48\xd8\x18\x77\x0c\xc2\xb6\x10\x58\xc9\x99\x52\xdc\xfb\x54\x23\xaf\x7d\xf5\x27\x9e\x49\x14\x2b\x3c\xf8\xcf\xef\x73\x3f\xb7\xb4\x77\x31\xdb\xc1\xd4\x8b\xa3\x67\x3b\x4a\x1a\xd9\x60\x19\xf9\x1a\xbd\x73\x89\xdf\x17\x87\x8c\xa8\x3e\x03\xec\xf6\x80\x87\xc1\x44\xc1\x25\xae\x19\x65\x9b\x05\xca\xf9\xe8\x61\xfc\xa9\xce\x60\x4a\x26\x0a\xfd\x6a\xc5\xc2\x9c\xef\xb9\xbd\x1d\x42\x23\x00\x76\x61\xf0\xd4\x9f\xc7\x4b\xeb\x6c\xf7\x2a\xfd\x8b\x83\xb3\x89\x5b\xfd\xb2\xd8\xc6\xa3\xa4\xfe\xf6\x65\x9f\x2c\x49\x5c\xdc\xa3\x5f\x78\x8b\x9c\x14\xf1\x12\xab\xc8\xa0\x04\xb4\x9b\x30\xf9\x5a\x5c\x9c\x7b\x43\xc3\x26\xe0\x1d\x41\xb8\xc7\xfa\xed\xb0\xaf\xec\xa9\xf2\xbd\xec\x1f\x45\xfa\xf0\x66\x97\x86\x66\xa8\x9d\x37\xcc\xfd\x1c\x49\xd7\xdd\xa2\xc4\xc6\x6b\x0c\x0b\x5d\xff\x30\x88\x46\x68\x2d\x6b\x54\xa3\x2e\x6c\x83\xfa\x51\xf8\xbf\xfc\x4b\xa3\xa6\x48\x61\x0b\x20\x7f\xfa\xd2\x47\x6a\x03\x9d\x3c\xb9\xe6\x96\x53\xcd\x79\x6c\x26\x4e\x2c\x17\xce\x64\x15\x83\xa5\xe3\x40\x1e\x89\x87\x4a\x3c\x7a\xed\x5a\x4e\xd5\x6a\x06\x6b\xb6\x26\x86\xb0\x28\x51\xca\x42\x32\xc1\xe9\x4c\x9d\xd9\x2a\x31\xee\x60\x31\x01\x82\x10\x88\x98\x3d\xb5\x60\x58\x9e\xb6\x5e\x6e\xae\xce\x2f\x4a\xf7\xfc\xbb\x3b\xfb\x7a\x70\xc7\xa1\x72\x9f\xb1\xcd\xd1\x32\x0d\x1a\xe9\x95\xf6\x11\x1e\xc9\x28\xe9\x12\x50\xef\x79\x49\x6e\x85\xf5\x8a\x5c\xb8\x33\x90\x65\xd9\xc0\x16\xc8\xf3\x33\xa5\xf5\x37\xba\x13\x59\xae\x16\x0e\x9a\xed\x52\x11\x45\xfa\x2a\x48\x9d\x7b\x0e\xda\xb3\x17\x06\x47\xf7\x05\x66\x69\xd3\x3c\xcb\x7c\xd7\xc5\x48\x33\x0e\xc4\xe9\x5a\x56\x54\x1f\x09\xb5\x60\x42\x22\x55\xca\x44\xf9\x81\xac\x26\xf7\xdb\x43\x30\xfa\x57\x39\x3a\x3e\xf2\xfd\xfa\xea\xab\x61\x13\x03\xe7\x4f\x4f\xb5\x46\xe9\x52\xeb\x2f\xcb\x0b\x2d\x8c\x7d\x2c\x07\xcd\x79\xb3\x5e\x1c\x90\x2f\x48\x24\x3a\x14\x46\xe8\x79\x9d\xe4\xbc\x1c\xaa\x4f\xe8\x3e\xdf\x87\x7b\x75\xa7\x22\x7f\x35\xbd\x84\xe6\x4f\xd4\x07\xd7\x70\xb2\x44\xcf\xb1\x3b\x3e\x8b\x63\xeb\x13\xe1\xa6\x11\x5b\x45\xe5\x51\xc6\xa7\xef\xef\x7f\x38\x7b\xf1\x52\xa8\x4c\x69\xaf\x8e\x56\x01\xba\x73\x5f\xd4\xc8\x10\x04\x12\xea\xfd\x38\x4f\x6b\x95\xa6\xd5\xea\xae\xfe\x66\xd8\xbc\x42\x00\xa2\xc9\x4f\x90\x6f\x9f\x6e\x8a\xde\xa5\x68\xa1\xab\x66\x3e\xc0\x8b\x9c\xfd\xfa\x99\x83\x81\x78\xb8\x20\xae\x05\x40\x88\x8c\x1f\x28\xae\x2b\xf3\xc3\x8a\x3c\x94\x31\x9f\x71\x02\x30\x4c\xd2\x4b\x56\xf4\xa1\x9d\xf6\x93\x49\x3b\x52\x28\xce\xc4\xca\xdf\xd7\xfd\xab\xe1\x93\xaa\xb9\x0b\xbb\x90\x58\x49\xa4\xfb\xe6\xda\x95\xd0\x7a\x3d\x90\xde\x54\x80\x3c\x03\x12\xe3\x4e\x0b\xa6\x57\x21\x3f\xc9\x53\x35\x9c\xde\x46\x61\x00\x50\xdf\x94\x2b\x50\xed\x05\x1d\x0b\xf5\xd3\xd2\x9f\x5c\xeb\x58\x06\x36\xd3\x73\x85\x9e\x68\xf8\x39\x51\xeb\x26\x35\xb8\x47\x02\x77\x3c\x21\x00\x7b\xfb\x80\x57\x42\x48\x4d\x0e\x02\xe0\xec\x8b\x80\x66\x8f\x5f\xb9\x9c\x7a\xba\x26\x9e\xf5\x40\x4d\xd5\x8a\x0a\x5c\x63\xdf\x55\xf5\xf6\x5d\xcc\xfb\x94\x81\xf9\x36\xdf\x8e\xe8\x86\xc3\xe9\x20\x02\xef\x34\x94\x6e\x03\x50\x37\x0d\xaa\x60\xa3\xfc\x6d\x58\x43\x39\xab\x47\xc9\xed\x8d\x64\xf4\xfb\x29\x92\x88\x51\x13\x80\x05\xe0\x3c\xbc\x4f\x17\xa9\xda\x63\x76\xa7\x77\xf1\xec\x67\x48\xb3\x2a\xf7\x24\xe2\x15\xaf\x52\xd5\x5e\xdd\x31\x75\xda\x63\x61\xe9\x67\x4d\x22\xda\x4b\xb0\x83\x4f\xae\x1f\xc4\xeb\xf5\x4f\xeb\x55\x3d\x4d\x0f\xdb\x98\xfb\xa5\x4f\xae\x3c\x59\x64\xe1\x57\xa9\xfc\x22\x56\x90\x01\xb0\x1b\x3e\x03\x9d\x0e\xdb\xbf\xa2\xf3\x86\x9a\x9d\xe0\x3a\x46\x64\x22\x4a\xc9\x24\xae\x55\x2a\x34\xec\x07\x52\x91\xb3\x3f\x3f\x1f\x47\x64\x34\x06\x72\x0b\x06\xb8\xcb\x1e\x4a\x06\x99\x46\xcc\xfb\xe1\x43\xa8\x26\x80\x37\xd9\xfc\x43\x27\xeb\xe8\x8c\xac\x54\xd9\xc0\x3b\xd4\xe3\xf5\xaf\xb6\x96\x8a\xba\x7d\x6f\x84\xfe\xcd\xe8\xd7\x32\xc9\xc5\x90\xd8\xf3\x51\x20\x00\x54\xac\x51\x6e\xc6\x0d\x70\x04\xc3\x07\x2f\x14\x1a\x61\x44\xd8\xd4\xb4\x7f\x6a\x07\x39\x5b\xed\xaa\x7c\xc9\x45\x61\x14\x4a\x03\x20\x3b\x7c\xa1\x3e\x89\x78\x25\xf2\xbc\x3f\xa3\xc0\x8e\x1f\x9f\x98\xd1\x25\x47\x8c\xdd\x68\x27\xcc\x1c\x9c\xdf\x5e\x89\x1b\x8a\xff\xc6\xb5\xaf\xec\x90\x07\x33\xfd\x28\xb8\xf2\x77\x6e\xfb\x0a\xde\x74\x6e\xeb\x76\x41\xca\x86\x5e\xb0\xe4\x20\x9f\x59\xff\x28\x72\x82\x99\xe8\x6d\x22\xf6\x2d\x1f\x6a\x3c\x8a\xd1\x49\xcd\xaa\xa0\x97\x08\x70\x62\x1f\xa3\x5a\x65\x6f\x58\xb8\xd7\x63\xff\x89\xc1\x0e\xdb\xe2\x81\x2c\xce\xb8\x4e\x99\x12\x00\xf6\x9d\x83\x9c\xfc\x68\x55\xec\x17\xd2\x95\xf5\x4f\xb9\x6d\x00\x43\xc1\x35\xf6\xb0\xa2\xb4\x6a\x67\x6a\xe4\xd7\xb1\x36\xc2\x6c\xcd\x13\xb5\x11\x96\x58\x19\x7f\xf3\xb4\xfa\x4a\x7e\xe8\x9d\x25\xa9\x0d\x1b\xc9\xef\x83\xcc\x62\xf5\x07\xad\xe4\x20\xec\x17\x0f\x1a\x29\xc9\x92\x0b\x62\x9c\x99\x31\x33\xc8\xe1\x8c\xe1\x02\x36\xe3\x8b\x6a\x5c\x20\x3a\x0d\x55\x05\xfc\x73\x9d\x5f\xa3\xbd\x2f\xeb\xfd\x47\x13\x24\xa3\xc5\x79\xdf\x80\xf0\xdf\xd0\xa8\xa9\x42\x31\xaa\x63\x99\xe5\x54\x18\xde\x37\x2e\xdd\x80\x5e\xd2\x7c\xfe\x44\x56\x20\xae\xfd\xcd\xc6\xfc\xa9\x86\x4b\x80\x83\xee\x29\x5f\xbf\x71\xac\x5a\x71\xbe\xd0\x6e\xa9\xda\xef\xf3\x72\x46\xb1\x68\x01\x78\xb1\x17\xab\x72\x8f\x8b\x05\x28\x42\xeb\xf5\xfe\xe9\x42\x1c\x21\x5a\xfa\x87\xa7\x8b\xaa\x4b\xef\x90\xee\x72\xf0\xb1\x12\xaa\xfc\x7a\x9f\x87\x2d\xa7\x54\xcf\x5e\x59\x2c\x74\x61\x15\x20\x06\xb1\xe7\x3b\xe8\x53\xf2\xb9\x25\x96\x54\xa0\x31\xf9\xf4\xb0\x35\x4a\x37\xd4\xa1\x1c\x3e\x7d\xfe\x30\x9c\x12\xe8\xc4\xea\x6b\x9f\x5e\x16\xeb\xbf\xfa\x4c\xee\xd1\x1b\x36\x0e\xd5\x81\x1d\xec\x36\x1c\x56\xb8\xe5\x79\x3e\x8f\x1c\x38\xcb\x2f\xab\x51\x89\x17\x0a\xbd\x41\x44\x2d\xf4\x5d\x2c\x44\xe9\xcc\x95\xbf\x41\xa1\x0c\xd5\xd3\x3d\x87\xfd\xec\x7b\x82\x89\xb4\xdd\xed\x9f\x54\x41\x0a\x22\xa5\xb4\x17\x2d\xad\x7a\x10\x97\xe1\xc4\x10\x80\x21\x72\x62\x61\x85\xf2\x3d\x55\x41\x25\x5e\x6a\xf5\x45\x18\x1e\x65\x67\xf8\x7c\x68\x99\xfc\xef\xf9\xe4\x30\x67\x28\xc6\x2b\x62\x0a\xf1\xbb\x97\x6f\x2c\xca\x7d\xc3\x7d\x75\x07\xc0\x8a\x19\xa6\xc5\x7e\x15\xcb\xd9\x3a\x45\xa4\x17\x27\x02\x8b\x11\x00\xf1\xc6\xa7\x9c\xf4\xda\x8d\xf1\x93\x8b\x68\xb0\x4a\x7c\xfe\x0c\x67\x2d\x2c\xdf\xd3\xe2\x6e\xab\xa2\x30\x0c\xfa\x4e\xb6\xc5\x1a\xcd\x36\x9c\xdd\xe8\xe7\x5d\x94\xd2\x84\xec\x2b\x93\xf5\x1e\x02\x25\xa7\x38\x7f\xde\xc9\x37\x67\xf9\xc7\x7f\x3c\x05\x0e\xfd\xd4\x6c\x14\xc6\xca\x33\x0a\x9f\x2c\xa1\x5f\xa5\x52\xe2\x5d\xda\x85\x2d\xc5\x82\xc1\x31\x00\xd3\x7f\x82\xae\x5d\x3e\xbc\x28\x4c\x6c\x5e\xeb\x7e\x5f\x8d\x7c\x39\x8f\x13\x5f\x7e\xc3\x97\xf0\xd9\x66\x0c\xd9\xec\x26\x34\x78\x70\x1d\x66\xc8\xf2\x9f\x32\x88\x10\x16\x2a\x4a\x1a\xef\xe0\xe8\x4f\x16\xe4\xcd\xf5\xfd\x53\x3b\x2e\xa1\xe0\x38\xe6\x86\x9c\xfe\x72\x25\xd9\x6d\xad\xfe\xae\x0c\x02\x52\xba\xfa\x15\xfe\xf6\x19\x78\x0e\xa7\x34\x36\xe6\x9f\x4b\xcc\x9f\x68\xaa\x24\x32\xc6\xc0\x89\x62\x11\x15\xe1\x61\x75\x5b\x64\x61\x97\xf8\x36\x4d\x68\xca\x9b\x85\x99\x68\x0b\x60\x59\xaf\x7d\xca\xe5\xbb\xcf\x66\xfa\xa5\x70\xbc\x9c\x72\x53\xaa\x30\x00\x45\x9b\x1c\x99\xe8\xd9\x76\x13\x83\x3b\x2e\xd1\x91\x93\x8a\x20\x8a\x14\x4d\xfd\x20\x33\x7c\xe9\x5d\xbe\x87\x4b\xbd\xa5\xc6\x18\x24\x36\xe9\x65\xcb\x24\xcc\x57\xf2\xab\xdd\xe2\x42\xfe\x9c\x2c\xfd\xb4\x95\xb6\x27\x00\xc9\xe6\xd4\xa5\x12\x75\x8b\x60\x30\xa3\x01\x47\xba\x2f\xf6\xa8\x53\x7c\x7f\x09\xdd\x52\xb7\x27\x10\x2a\x83\x7f\x83\xf5\x3a\xa5\x4e\x0f\xda\x9c\x52\x68\x02\x92\xf4\x4f\xa2\x6d\x76\x20\xd1\x7b\xbc\x67\xf8\xd4\x92\xe5\x57\xe5\x1e\x74\x6a\xf4\x67\x78\xbf\x5b\x40\x22\xda\xf6\xe9\x0d\x93\xce\x83\x79\xd9\x6f\xf5\x94\xd4\xb0\x97\x58\x4e\xf8\x95\xdf\xaa\x57\x06\xb4\xcd\x7e\xa6\xad\xe2\x61\xa9\x8f\x41\xc1\x64\x7d\x01\xa4\x9d\x54\x05\x4b\x3a\xcf\xd7\x1a\x07\x0e\x24\x20\xfb\xe6\x9e\xac\x99\x68\x2d\xbd\xbb\x14\xb7\xf8\xfb\x0d\xc9\x23\xe2\x44\xab\x39\x9d\xc0\x94\xd3\xed\x0e\x92\xc0\x05\x08\xab\xdc\x37\x0b\xd4\xfb\x55\xc6\xa4\xad\x01\x20\xab\xa4\x09\x20\xf8\xf2\x98\x66\x3b\xd0\xa8\x8f\xca\xbd\x0c\x78\x5e\x6c\x2d\xca\x3a\xe2\xbe\x9f\xfd\x3f\x58\xe8\x59\xc0\xed\x6b\x18\xb5\x78\xa3\x8d\x47\xb2\x5c\x9a\x11\x84\xd0\x2a\x44\xdd\xd1\x51\xe6\x12\x8e\x82\x6c\x10\x20\xf6\x5a\x2d\xea\xeb\x58\x62\x2d\xd7\x7b\x93\x36\x4c\xf0\xbe\x82\x4e\xaa\x9e\x5c\xb7\x51\x88\xd6\x84\xb2\xad\xe4\x2f\x8e\x37\xf1\x71\x8f\xee\xd5\xf7\x09\xe4\xa3\x9c\x45\x2e\x10\x60\xc5\xdc\xc4\x9d\x85\x27\x19\xbd\xeb\xc0\xa7\x0a\xf1\x75\xec\x77\x36\x63\x7b\xd5\x7e\xb2\xb0\x89\x78\xe2\x63\x70\xa0\xe7\xc5\xfa\x2f\x1f\xe0\x9e\x8c\x48\xeb\x17\xb5\xa9\x51\x6d\x0f\xa3\xec\xf4\xe7\x44\x04\x74\x1f\xe4\x59\x96\xe5\xb2\x7f\xab\xac\x61\xdc\x0c\x01\x30\x7d\xfa\xce\x46\x7f\x6f\x44\x84\xa4\x7a\xe3\xe6\x3a\xad\x97\x03\x96\x35\xd3\xdb\x1a\x25\x0b\x45\xcd\xcd\xe5\x47\xd9\x91\x1d\x08\xe2\x38\x06\xe0\x33\x83\x5e\xcb\x01\xd6\xc5\x82\xa9\x4e\xdf\x11\xb0\x9b\x9e\xeb\x12\xa6\x37\x51\x7a\x31\x98\x8f\x3a\x22\xe9\x4a\xf9\xf6\x6a\x41\xbb\x3e\xe6\xd2\xfb\x57\x77\xde\x59\x12\xdf\x56\x41\xa3\xe8\xef\x50\xb5\x9a\xfb\x1b\xd5\x99\xe8\xe7\xfb\x1d\x24\xf1\x9e\x82\x32\x03\x8c\xd4\x00\x63\x25\x32\xac\x0a\x41\x66\xd1\x3e\x0d\x01\x02\x53\xf0\xf6\x6f\x56\x80\x1b\x6b\x0f\xc1\xed\x60\x1f\xcb\xdc\xa8\xef\x64\xf6\x17\xc8\x21\xcc\xfb\xea\x8e\x31\xcd\x32\x72\xf0\xad\xcd\x64\x60\x92\xfc\x22\x17\x39\x91\x76\x2f\x2d\x02\x9d\x63\x37\x5f\x97\x5b\x8a\xff\x36\x32\xe7\x46\x9b\x93\xa0\xb6\xb4\x91\x19\x6a\x89\x95\xfa\x9b\x83\xd1\xc5\x7e\x97\x2f\x53\x4a\x06\x3a\x68\xa5\xa3\x39\x06\x37\x0f\x4d\xa5\x4f\xe7\x1d\x30\xf5\x0a\x24\x80\x94\xb7\x02\x49\x7a\xcb\x56\xc5\x22\xaa\x99\x02\x5c\x79\xd0\x4a\xa2\x9a\x72\xd2\xca\xd6\xf5\x41\xad\x21\x3e\x94\xcf\xc5\xa0\x70\x37\xb3\xdb\xcd\x37\x82\xc9\xf7\xdb\xce\x1b\xa3\x71\x9b\x4c\xa8\x49\x81\x02\xac\xdf\x70\x89\x7a\xb0\xe2\x5b\x97\x67\x2c\x9b\xdf\xe5\xe3\x4b\x99\x8c\x19\xcc\xf1\x99\x7e\xbb\x7f\x75\xcb\x5e\x28\x02\x68\x72\xe5\x37\xf0\x2c\x49\x42\xe9\x2c\x17\xa7\x9d\xa9\x57\x74\x72\x01\x92\xe5\xfb\x45\x34\x3a\x62\x50\x10\xc1\x28\x12\xec\xdd\xc3\xe3\x10\xbb\xa1\xd4\x2a\x96\x53\xcd\x7e\x0f\xce\xda\xb8\xd3\x6a\x0f\x88\xd1\x32\xa7\xda\x90\x89\x41\x30\x98\x0f\x6e\x2e\x07\x88\x41\x4d\x41\x87\x7b\xe3\xad\x8f\x18\x00\x08\xb1\x7b\xaf\xeb\xb9\x38\x06\xd5\xd5\xf3\x78\x9e\xdb\xb8\xf3\x7b\x46\xfb\xa6\x6e\xa3\xe0\xae\x8b\xf6\x64\x18\xeb\x1f\x50\xf3\x93\x12\x0d\xef\x15\xb6\x42\x2b\xf4\x31\x89\xa6\x93\xae\xc1\xc3\x8e\x72\x79\x57\xb0\xfe\x66\x21\x80\xe3\x97\x5d\x43\x14\xc9\xf3\x91\x21\x08\xf4\x2a\xa4\xe8\xfb\x12\x3e\xe5\xae\x35\xcb\x51\xa8\x87\x41\x89\x94\xd2\xd0\xef\x07\xe9\xea\x42\x0d\x4f\xe8\xb2\xac\x7a\x1b\x37\x66\x41\x30\xc6\xdb\x6c\x91\xf1\xcb\x78\x32\x46\x0a\x00\x22\x20\xf3\x44\x17\x52\x7b\xf8\x29\x9e\x1a\x31\x01\xad\x36\x33\x07\x65\xd1\x30\x79\x66\x78\xd6\xfb\xaf\x67\xcd\xad\xc9\xce\x6d\xc5\x77\xa5\x87\xe4\x82\x48\xbe\xef\xfb\x34\x54\x7c\xa4\xf5\xb2\x3c\xcb\x02\xdf\x00\x08\xb5\x13\x99\xd9\xdf\x23\xc5\xb6\x7b\x8e\xc8\x71\xcc\x7c\xe9\x42\xf2\xcd\xa8\x78\xa7\x68\x34\xbb\x51\x73\xb5\xfc\x6d\xd0\xe2\xa2\xbf\xfa\x91\x4e\x62\xbe\x9c\x36\x3a\x82\x81\x22\x05\x7b\xbe\xf1\x8a\x60\xa2\x69\x5e\x11\x2a\xe5\x4b\x90\x61\xf6\x1c\x04\xab\x71\x95\xae\xe6\x86\xfe\x15\xdd\x4c\xa6\x18\x19\xbd\x2c\x3d\x12\x16\x9f\xc4\xf8\x3b\xbb\x56\x1f\x8b\xed\x60\xda\x65\x5f\x95\x6e\x51\x3a\x1b\x6d\x5b\x0f\xdd\x0e\x8b\x72\x40\x91\x00\x80\x37\x00\x27\xe2\x5e\x48\xd4\x4d\xc6\x0f\xca\x42\xb4\xd9\x27\x12\x52\x10\x79\xac\xfc\x8c\xb1\xb7\x7d\x41\x0f\xd0\x1a\x1b\xfd\x50\xe1\x43\x60\x7e\xea\x83\xcb\x72\xe3\x46\x24\xae\xa5\x85\xe7\x91\x40\x04\xa9\x0d\x0c\xe6\x3e\xb2\x81\x96\x65\xf1\x35\x27\x98\x94\xf0\xb1\xaa\x98\x22\x64\x76\x09\x2a\xbd\x72\x9a\xba\x4e\x9b\xd2\x1f\x26\xff\xb7\xcf\xdb\xbd\xbc\x98\xb0\x98\x3e\x79\x87\xf5\xf8\xa6\x7b\x07\x44\xd6\xee\x32\x3b\x92\x1b\x7d\x08\x52\x22\x65\x18\x0c\xeb\x92\x0c\x60\xe6\x1e\xd3\x70\xb7\xa8\x29\xec\xf2\xde\x17\xc0\x5d\x64\x10\x42\x75\x7e\xe2\x75\x0b\xba\x74\xde\xf6\xd4\x43\x2e\x83\xb5\x22\x9b\x57\x70\xe9\x0e\x70\x77\x7f\x47\xe1\x48\xf4\xde\x44\x78\x37\x00\x6e\x48\x01\x8a\x84\x2a\x02\x58\x5c\xa2\xa6\xa8\xb3\xef\x9b\xbd\xe4\xd6\x1e\x0f\xe5\x5c\xdb\x66\x3f\xf8\xe6\x60\x2b\x48\xc2\x01\x72\xe1\x6f\x8d\x79\x86\xdd\x1c\x2b\xc8\x52\x60\x0e\x79\x5f\xa2\x17\xfa\x2e\x4c\x93\x62\x64\x3a\x6a\x8b\x84\xf5\x06\x24\xc3\x19\x46\xc6\x1d\x59\x0f\x4d\x3b\x8d\xe7\xa9\x09\x92\x2c\xdf\x09\x17\x3b\x24\xe2\x63\x3b\x84\x56\xb5\x59\x81\xf6\x4e\xfe\xb9\xde\x05\x86\x44\x1d\x9e\x80\xaf\x49\x1b\x14\x92\x1a\x46\xee\xfb\x3d\x80\x2f\x93\xd8\x18\x9b\x1c\x4f\xfd\x79\x01\x64\x73\x76\x03\x10\xe4\x08\x46\x16\xd8\x9d\x1d\x0b\x23\x9d\x95\x5f\xa3\xba\xd4\x5a\x64\xdc\xc8\x4b\x10\x6c\x65\x77\x26\xb0\xc6\xc3\xfe\x57\xe3\x7a\x6b\xba\xee\x40\xcf\x52\x3b\xa9\x1c\x4e\x0b\x50\x41\xb7\xcf\xc2\x07\x96\x59\x3f\xd3\x53\x08\x10\x46\x17\x56\x0e\x61\xf2\x15\x43\xac\xf1\xd3\x81\x7b\xf0\x3b\xaa\x83\x90\x25\x9b\x3f\xbd\x15\x3f\xc4\xd3\x47\x8c\xcb\x4a\x4b\x2d\x6e\x7c\xf0\xef\x30\xf6\xce\xf3\xeb\x89\xec\x2b\xda\xef\x5a\x70\xe9\x6a\x9c\xfc\x76\x40\x48\x03\x80\x09\x00\x6c\x05\xb9\x81\x2c\x7e\x62\x4f\xe9\xcc\x00\x1c\xef\x09\x82\x30\x36\x81\x00\x6c\xf9\x16\xa0\x4b\xd6\x58\x36\x83\xbc\x3a\x83\x5d\xc6\xa6\xa7\x35\x79\x72\x15\xba\x58\x64\x4f\x99\x9d\xa2\x87\x68\xa1\x8b\xf2\x8f\x3e\xe4\x26\xa9\xcb\xd4\x72\x59\xac\x86\xd1\x02\x92\xb8\xfb\xf1\xdd\x34\x30\x1d\x63\x1c\x00\x04\x81\x2c\x05\x33\x70\xcd\xf7\x5a\xaf\x0b\x11\xa7\x93\xd3\x49\x26\xda\xfa\xc0\x24\xfd\x47\x62\xf4\xe6\x40\x61\x0b\x13\x02\x3e\x70\xb8\x4f\x0a\x3f\x1f\x85\x62\x2c\xb4\xfe\x7c\x2a\xad\xca\xe5\xe8\x63\xe1\x0f\x65\xd7\xb4\xfa\x71\xa8\x46\xee\xb3\x7e\xc1\xce\x0c\x73\x5d\x03\xe3\xfb\x10\x39\x92\x4c\xf7\x3c\x26\x58\xfc\x38\x54\x07\xa6\x8f\x40\xb2\xa7\xbe\x8b\x31\xe9\x06\xa9\x80\xcd\xf4\x26\xe4\x4d\xf2\x53\xdf\xed\x45\x65\xb3\x5c\x0d\x8e\x12\xf0\x03\x7c\x93\x5a\x4d\x6e\x38\xf1\xa8\x0c\x3e\x78\x20\x5c\xfd\xdf\x9c\x49\xb1\xf5\x63\xea\x4d\x8f\x2f\x03\x17\x4b\x89\x9c\x81\xab\x8b\x20\xb9\x96\x03\x0d\xe6\x40\x50\x90\xa2\x70\x95\xf6\x22\x57\xf9\x02\xc6\x9d\x60\x92\xed\xec\xcc\x57\xf8\x92\x7a\x3d\x47\x78\x2a\x98\x84\x66\x26\xbe\x8b\x61\xe3\x1e\xaa\x07\x87\xbf\x59\x76\xa4\x73\x6d\xb6\x3c\x8b\x5e\x65\x59\x0f\x6e\xf7\xc3\xde\x7e\x4e\xfc\xa0\x01\x28\xbc\x8c\x93\x31\x08\x26\xce\x89\x7d\xc7\x70\xff\x73\x27\x68\xe7\x2b\x6d\x75\xa6\x5e\x6b\x3a\xad\x84\xa7\x9f\xbc\xef\x5b\xa4\xff\x1f\x9f\x58\xe7\x08\x5f\xe8\x44\x77\x35\xb8\x35\xf3\x49\xa4\xd1\x8b\x40\xc3\x4e\x42\xe7\x4c\xb3\xd9\xfb\x00\xd4\x59\x89\xeb\x5f\x8d\xe4\x61\xe1\x64\x2e\x44\x12\x6c\x2d\xb1\x12\x4b\x0b\x50\x06\x8d\x18\x23\x02\xc6\x35\x9f\x63\x6f\x5c\x62\x7e\x5c\x80\xa6\x1a\x74\x3b\xae\xee\xc7\x72\xd4\x64\x1a\xdc\x6e\x5f\xd2\x71\x5d\xaf\x32\x6c\x8a\xbc\x10\xec\x37\x34\xbc\xeb\xf0\x42\x13\xbe\xe5\x44\xb1\x90\xfa\xa5\x99\x13\xe1\xd3\x94\x9b\xb1\x06\x81\x64\x5a\x7f\xb2\xf9\x07\x66\x20\xdf\x70\x66\x27\xd8\x9d\x45\x3b\xe7\xe4\x07\xec\xb3\xab\x4a\x99\x2a\xe3\xcf\xf9\xbd\xcf\x77\xfa\xe8\xa7\x57\xed\xba\xa1\xe0\xef\x6a\x62\xdf\x4f\x5d\xfe\xfe\x87\xe3\x49\x4e\x9f\xe5\xda\x59\xa8\xaf\x12\xb9\x7c\x95\xfa\x1a\xcd\xb4\xa5\xd8\x56\xb1\x6e\xb5\xb8\xe5\x2b\xbc\x04\x0c\x02\x67\xee\x45\x56\x59\x14\xf3\xed\xe7\x4f\xa8\x0e\x67\xb5\xba\xd3\xea\x76\x8b\xa8\xf8\x65\xb8\x55\xb3\x43\xef\x8a\x4a\xba\x8d\x52\x28\xe0\xcc\xdf\xff\xcb\x55\xc7\xeb\x96\x04\xa1\x09\x38\x61\xfe\x28\x77\xd7\x04\xdd\x1e\xd6\x22\x62\xc8\xee\xf6\x20\xd7\x0d\x2d\x23\x6c\xca\xf9\x2b\x81\x66\xe7\xf8\xa1\x43\x09\x54\xc2\xfe\xe0\xe6\xdf\x99\x29\xff\xf2\x24\xa1\xbf\x93\x10\xd3\x47\xe4\xc6\xec\xe6\x54\x2d\xfd\x7a\xc1\x87\xf1\x81\x07\x35\xaf\xee\xe4\x5a\x95\x1b\x79\xa7\x99\xb3\x2f\x49\xe5\xbe\x0b\xa2\x7f\x22\x9f\xd2\x7d\xfd\xe5\xf3\xe6\xdf\xbd\xc8\x70\x5f\xea\xd1\x9d\x3d\xe1\x9a\xdc\x9b\xcd\xa5\xd7\x8e\xd6\x4f\xdd\x78\x7d\x5f\xed\x18\x74\xf2\x0b\x66\xed\x6d\x4f\xa9\xcd\x06\x89\x4c\x0b\xd6\x25\xcc\xdd\xe2\x8e\x9f\xc7\x77\xe3\x40\x27\xbf\x3f\x7f\xb1\x33\xea\x15\x68\x52\xd0\x7d\xeb\x67\x24\x47\xad\xa7\x3e\xb6\x68\x4e\x70\xba\xfe\xd6\x56\xf5\x9b\xca\x96\xad\xde\xdc\x1a\xf5\x39\x8c\x59\xfb\x5d\xad\xca\xec\xf1\xcf\xa7\x52\x05\xbd\xf3\x3f\x9f\x2e\x3e\x94\x44\x04\x20\x49\xe8\x46\x64\x08\x4b\x3f\xb3\x6b\xb0\x7d\x50\x39\x85\xc1\xea\x4e\x24\x8b\xb9\x6f\x5c\x50\xe6\x64\x6e\xc3\x1e\x50\x21\x7b\xd1\xed\x87\xb9\x96\x09\xdd\x6a\xac\xe6\xc6\xf7\x33\xfe\xc5\xad\x6b\x0d\xad\x6b\x3e\x38\x27\x6b\xec\x61\xc5\xb6\x65\x91\x8b\x38\xba\x1e\x14\x89\x28\x6f\xad\xbb\xa9\x41\xc7\x16\x5b\xdc\xb3\x07\xc1\x9c\x97\x70\xe5\xec\xfd\xfd\x3b\xc7\x65\xd4\x1a\x29\xb9\x6c\x5f\xd6\x6c\xc6\xbc\x29\x84\x3f\xe4\xf8\x93\x8f\x58\xee\x98\x2f\x8c\xb2\xf6\xea\xfb\xac\x29\xac\xbc\xbf\x79\x55\x94\xf9\x91\x0b\xa0\x30\xb2\x7f\xf3\xbb\x5a\xd6\x68\x52\x30\x7d\x6b\x99\xed\x8c\xa5\xca\xa9\xab\xb9\x3e\x11\x57\x6d\x3a\x65\x6d\x50\x85\xe5\x52\x12\x6b\xce\xaf\x03\xeb\x6f\xd0\xf6\xb7\xb5\x42\x35\xfc\x30\xc7\x73\xd0\x9f\x56\x90\x1d\xbf\xfa\x65\x45\x6b\x50\x75\x89\xb2\xb8\x80\xc2\xe4\xed\x2e\x8b\x34\x08\x69\x64\x83\x70\xd6\x86\x33\xed\x73\x81\xd8\xc6\x4b\x69\x48\xfa\xa4\x8c\xdb\xc1\x0d\xf9\xeb\xed\x12\x1d\x4b\xc7\xff\xab\x87\x21\x24\x11\xa6\xf5\x29\x27\x10\x4c\x95\x93\x8c\x8d\xbb\xdf\x2f\x4d\x8b\xd7\x74\xe8\xbd\xcf\x98\x2b\x67\xfa\x7d\x4b\x21\xa6\x7f\xce\xb7\xf9\x76\xe5\xb9\x02\x44\xfd\x5f\x0e\xd0\x76\x96\x9e\x3d\x2d\xe3\x85\x6b\x55\xb0\x9a\x53\x3e\x30\xb8\x11\x1a\xd2\xc8\x1f\x4f\x61\x80\xc2\x61\x68\xae\xa9\xd6\x82\x1d\xbf\x3b\xac\xcb\x0f\xc5\xaf\x29\x97\xca\xff\xac\x47\xba\x2c\xc7\xe8\xc5\x3f\xdf\x33\xc9\xd5\xb0\x7d\x93\xbb\x16\x6a\xc7\x08\x69\xd7\x9a\xdc\x97\xfa\xf1\x58\xb1\xb9\x3b\xca\x59\x0f\xfd\x73\xbc\xb4\xc2\x3b\xd9\x4b\x65\x5b\xd5\xfe\xb0\x39\xcd\xa9\xd3\x76\x2f\x15\xb3\x4d\x72\x3d\x50\x25\xf1\xfa\x6f\x1e\xb9\x05\x13\xf5\x45\x5d\x42\x65\xb1\x0b\xe5\xe8\x9a\x6f\x66\x9f\x96\x51\x4e\x06\x4f\xe4\x0b\x84\x19\x4b\x6f\x17\x43\xe7\x33\x1f\x60\x1b\x27\x4f\x5e\xb8\xa9\xe9\x7d\xcd\x44\xd3\x8f\x8a\xb0\xdf\xcf\xfe\x6f\x1c\x0d\x9b\xaf\x9d\x08\x86\x93\x8b\x58\x84\xdc\x23\xb8\xdf\xf3\x2e\x50\x48\xd6\x97\xd1\xa1\x9e\xb4\x41\x09\x5f\xda\xcb\xb9\xa7\x74\x3b\x98\x9d\xbf\x23\xc6\x90\xb7\x2c\x8f\x8c\x2f\xfe\x1f\x6c\x1b\x4c\x7a\x63\x3b\xf9\x0a\xdb\xa5\x6a\xe1\x15\x76\x01\xcf\x22\x54\xcc\x9b\xbd\xbc\x12\xa6\x5c\xc8\x4e\x1b\x76\xef\xb0\x0b\x9b\xc5\xb1\x21\xea\xe6\x0d\x16\x53\x9b\x15\xe3\xed\xa8\x3d\xa7\x1a\x4d\x2a\xf6\x8b\xd5\xff\xfd\x3e\xa5\x76\xbd\xd1\x55\x44\xed\xcf\x33\xba\xf0\x7b\x28\x87\xe0\xa3\x5b\x3f\x16\xb6\x76\xe2\x98\xed\xb0\x83\xf7\x7e\x3a\x6e\x1b\x5a\xc9\xfd\x9a\x31\x36\x9c\x8d\x0f\xce\x69\x38\x62\xc6\x5e\x15\xd9\x56\x42\x11\xaf\xed\x19\xfd\xea\x5e\xf8\x50\x9a\xa0\x2f\x67\x89\x2d\x9f\xaa\xf5\x26\xc7\x34\xf9\x72\xcc\x16\x81\xe6\x83\x04\xb3\x2f\xaa\x8a\x85\x6c\xf5\x4b\xaa\xd2\x90\xa5\x4f\xaa\x07\x1a\xbd\x87\xea\xa5\x54\xf8\xdd\x45\x19\xfb\xed\xfe\xfd\xfb\xf0\x50\xbd\x11\xe5\x19\xb5\x5f\xf6\x5c\x08\xed\x12\x2b\xb1\xea\x84\x24\xef\x6a\x08\x8a\x4c\xc4\x5e\xd2\x66\x54\x0c\xd7\x51\x17\x12\xdc\xd7\x8c\x0d\xb1\x85\xae\x52\x15\xf0\xbc\x3b\x77\x12\x9d\xa5\x73\x9f\x2c\x05\xea\xcf\xe0\xcb\x3d\x87\xc7\xfc\xbb\x23\xaf\x5e\x59\xff\x86\xde\xaa\xb5\x4b\x3c\xa7\x31\x9e\xad\xee\x04\x9d\x76\x8c\x7c\x75\xa3\xe1\xa0\x49\xb3\xa5\x6e\x3e\x13\xc5\x9f\xe8\xcc\x2e\xe3\xf4\x24\x73\x17\xe3\x8b\xed\x45\x6f\xaa\x00\xe3\xb9\x6f\x33\x2a\xf6\xcc\xe6\x48\x7d\xf9\xd5\x78\x3f\x3b\xe5\x6a\x4e\x27\x61\xca\x73\x89\x85\x4f\x0c\x1b\xb2\x11\x73\xb9\x2a\x86\x37\xff\x06\x60\xad\x83\x4e\xaa\xe1\xdb\x13\x9e\x2d\xe5\xb9\x53\xea\xbf\x4d\x86\x66\x0a\xf7\x3b\xe4\x87\x1a\xf2\x94\x8b\x1d\x56\xd2\x49\x35\xb7\xbf\x9f\x0d\xf1\x77\x56\xc0\xbc\x9c\x5e\x62\x1b\xaa\x57\xaf\xf3\x6e\xd2\x68\xed\x8f\x90\x45\xc8\xd9\x3a\xdc\x61\x5c\xf2\x7a\x09\x26\x3a\xc7\xae\xf7\xd3\x84\xa3\xf6\xa2\x82\x45\x7c\x61\xf4\xac\xc6\x17\xa7\xb4\x18\xb8\x83\x4d\xbe\x67\x79\x6c\x85\xfd\x2d\x2d\xbf\x7c\x52\xeb\x6c\x3d\x2f\x02\xde\xc4\x4c\x20\x29\xca\xb5\x6c\x9a\xe3\x7e\xd4\xdd\x81\x8e\xdb\x06\x93\xbf\x18\x03\x2e\x15\x4d\x96\xf8\xcb\x0d\x6b\xb2\x7b\x99\xd2\xd5\x00\x7b\x6a\xbd\xc5\x0d\x81\xf8\x67\xf7\x4f\x3d\x9d\x5d\xbc\xcb\x20\x6f\xb7\xf1\xf9\x36\xdf\x56\xd4\xde\xd8\x22\xcc\xc3\xaf\x81\xdb\x91\xfd\xc4\x4f\xc8\x9b\x4d\x2c\x6a\xda\x5d\xa0\xb3\x51\xb4\xd1\x6d\x45\x1f\x6b\xf3\x54\xe0\xde\xe1\xfd\x8d\x3e\x17\x15\x8d\x42\x11\x1e\x87\xb8\x4c\x03\xa8\x59\xa7\xaa\x07\x5a\x97\x5b\xe6\x33\x41\x6f\xf2\x26\x94\x15\xd9\xa6\xe7\x50\xe5\xc9\x46\x18\xd3\x11\xf7\xfb\x15\x0e\x9f\xdf\xdf\x21\xd1\xcd\x28\xc7\xb2\xa6\xd9\x5f\x52\x52\xc5\x96\x60\x9e\xc8\x03\xa8\x48\x8a\xef\xfe\x69\x12\xb9\x5a\x3c\x1e\xe2\xe3\x29\x99\xef\x8f\x33\xa7\xda\xa0\x89\x93\xaf\xda\x05\x2a\x09\xb2\x5f\x16\x96\xc8\x9c\x89\xa3\xa0\xa5\x66\x7b\xfe\xe7\x5e\x09\x9a\xcb\x16\x77\x46\x79\x4b\xe2\x7a\xa2\x66\x77\xea\x59\x08\x2a\xf7\x1b\xc7\x4f\x5d\xad\x15\x2b\x34\xd7\x39\x12\x6a\x91\x04\xef\x21\x5f\xda\x19\x9b\xc8\x93\x7f\x9c\x2a\x30\xcf\x15\x57\x4a\xb3\xe4\x14\xfa\x79\x84\x6f\x66\x14\x0b\x2a\x22\xf2\x8b\x4f\x5f\x09\xe5\x7d\x9b\x17\xf7\x52\xd0\x4b\xad\x07\x9c\xf2\x5f\x4c\x58\x2e\xbb\x7a\xf1\xdc\x24\xda\xf5\x3f\x33\x1a\xee\xd1\xf3\x66\x77\x95\x29\xd8\x34\x3c\x6b\x4c\xa1\xad\xaf\x07\xc1\x3f\xce\x19\xa3\xb1\x9a\xde\xf2\xd7\xe7\xb9\x31\x4b\x2f\xb7\x2f\xc9\x47\x21\xc8\x14\xe2\x55\x3e\xaf\x79\x1f\xa9\x71\x0d\x57\x41\x43\x49\x14\x25\xca\xec\x25\x37\xbb\xaa\x3d\xc6\x22\xfd\xed\xc9\x49\xb7\xb8\x73\xf8\xb1\xd1\xa9\x50\xca\x1b\x42\xb2\x24\xd3\x19\x13\x85\x2b\x67\xbb\x60\xd1\xec\xc6\x97\x7a\x25\xad\x34\x11\xee\x8b\x7a\xc7\x18\x34\x42\x7a\x4f\x06\x60\xcc\xd6\x30\xf1\x35\x02\x8c\x6e\xc1\x99\x8e\xf4\x83\x2b\x36\x1b\x68\xcc\xf8\x59\xf3\x01\xc1\x6f\x4a\x41\x8e\x25\xfd\xfa\x4f\x6c\x49\x3f\x75\xba\x63\xf1\x8b\x4b\x73\x17\xb3\x88\x97\x6b\xc1\x70\xab\xbf\xb7\x4a\x19\x61\x7f\xe2\xae\xd5\xfb\x42\xef\x67\xb2\x23\x36\xc5\x85\x34\x32\x63\xc4\xe3\xfb\xfb\xce\x50\xb3\xf1\x3e\x7d\x2c\xf8\x7e\x76\xa5\x7c\xb6\x0e\x93\x58\x49\x57\x2a\x94\xef\x81\xde\x98\xb1\xe1\xaf\x4d\xb9\x33\x55\x7c\xfe\xbb\x0b\x49\x5d\x0b\xa1\x35\x1e\xba\x24\x5f\x45\xdb\x10\x04\x96\x74\x97\xf7\x9e\xaf\xb4\xa4\x5b\xb8\x75\x25\x68\x16\x7c\x37\xe1\x82\xc7\x24\x2d\x55\x9e\x80\xdd\x0b\x09\x21\x44\xe7\x6f\xd9\xea\x23\xa1\x3d\xce\xd2\x58\x13\x42\xbb\x3b\x7f\x12\x7f\x35\x25\xdc\xd3\x47\x6c\x99\x17\x65\xad\x0e\x30\xcd\x76\x2e\x29\x81\x19\x46\x10\xcf\x77\x2a\x24\xc4\x72\xb3\x0a\xfe\x3c\x59\x67\x7c\x2a\x27\x24\x11\xe1\x07\x33\x57\x2d\xdf\x1c\xde\x8b\x8c\xe0\xea\xb9\x1b\xbb\x8c\xb2\xd7\x12\xf1\x13\xd9\xbf\x95\x0b\x0d\x1b\xb5\x46\x89\x3d\x81\xdf\x34\x01\x5b\xbf\xa5\xbf\x1e\x09\x5c\x27\x57\xd0\xc0\xca\x57\x63\x51\x15\x99\x68\x0b\x4c\x6f\x83\xc1\xe8\xe9\x98\xf8\xfa\x63\x55\x7c\x76\xeb\x2d\xaa\xef\x6b\x4b\xbc\x17\x06\x39\xfc\x5c\x4e\x86\x0e\x12\xfb\xbe\x3b\xc8\x8e\x31\x3a\x3f\x43\x87\xe3\x70\xca\x9f\x48\x4f\x24\x95\x89\xa6\xdc\x55\xf9\xc1\xff\xb2\xd5\x4b\x37\xc7\xbc\xcd\xfb\xdf\x59\xd3\x4f\x79\x3c\x55\xf3\x8d\x07\x11\x83\x69\x41\x77\x89\xee\x9e\x03\xee\x39\xc9\xc5\xf0\x59\x1e\x18\x92\x9b\xbd\x51\x69\xa2\x94\xa1\xa9\x10\x8a\x4d\x95\x22\x27\x1a\xf3\x67\x12\xac\x9f\xf2\x3d\xab\x6c\x16\xad\x38\x13\x9d\xe5\x04\x25\xa7\x97\x2f\x51\xce\xd7\x92\xf4\x3e\x7e\x7c\x0c\x04\x8b\xdb\x3f\x67\xf3\xf9\x7b\x36\x8f\x47\xde\x7d\x52\x47\xd6\xd6\x40\x89\x50\xff\x19\xf7\x13\xad\xd7\xc3\x93\x09\xf7\x10\x44\x3b\x31\x28\x7f\x26\x35\x58\x31\x5a\xb6\xbc\x1c\x2f\xa4\xd9\xb7\xbe\x34\xdb\x8f\x98\x23\xd9\xb7\xff\xc9\xf5\x9e\xda\xfa\x0c\x23\x08\xc2\x6a\x8c\xe9\x4b\x72\xc8\xfe\x41\x6e\xfd\xde\x20\x88\x17\x8b\x65\x9f\x7f\xc4\x17\xf5\x3b\xcb\x32\xea\xc7\x2e\xb4\x9f\x65\xc0\xda\xc5\xad\x78\x13\x0d\xde\x5b\xce\xad\xdc\x8d\x0e\x14\x64\xa8\xaf\x91\xb5\x0a\x01\xa5\x9e\x97\x22\xe5\x08\x55\xa3\x62\xf4\x10\x95\xcf\x25\x7e\x1b\x06\x10\x45\x19\x34\x13\x95\xc5\x1d\xd1\xcf\x4c\x1c\x88\x4e\x42\x77\xab\x1d\xc7\xdc\x09\xdd\x09\xda\xf7\x95\x7c\xbf\xe2\xdf\x58\x2a\x1e\xb0\x09\x71\x5f\xda\xa2\xd5\xe9\x25\x1c\x3c\x07\xa3\x4c\x30\x40\x1a\x8f\xbd\x88\xa6\x3b\x5f\x78\xa1\x1c\x1a\xef\xf5\xe1\x54\x5a\xab\x03\x5c\x2b\xc9\x61\xf6\xd6\x39\x02\x29\x69\x88\xa2\xc3\x22\x1c\xc7\xd1\x5e\x33\x2a\xaf\x6c\x66\x43\x7f\xdf\x43\xb7\x76\x82\xda\xe2\x3e\x17\x51\xc7\x36\x0a\x65\xd5\xac\x54\xe5\x27\xe1\xfc\xce\xc9\xc5\x25\x57\xa0\x3f\x1d\x83\xc4\xe2\x41\x38\x47\x1e\xea\x52\x70\x20\x62\xd8\x5a\x6e\xfb\x73\x11\xaa\xab\x12\x2b\x71\x3f\xc3\x91\x25\x4a\xc5\xc2\x97\x47\x9a\x80\x31\x15\x42\x34\xfd\x79\xc7\x42\x49\x22\x52\x86\xb0\xa5\xe7\xc1\x0f\x82\x32\xf3\x3c\xb7\x13\x0c\x44\xa7\x38\xfd\x09\xfa\xf7\x0a\x5f\x1d\x07\x81\x50\x2c\x8f\x47\xae\xe6\xea\x5f\x51\xaf\xfe\xb0\x25\xcb\xff\x2c\x03\xe1\x12\x2b\x55\xae\x70\xff\x6c\x79\xb8\xe7\x48\x1e\x17\xe5\x71\x2a\xf2\x0f\xff\x3f\xfb\x32\x1d\x51\x42\xe3\xbe\xfc\xa7\xd0\x3e\xf2\x5c\xf0\x30\xce\x17\x9d\xa6\xd3\xe8\x9e\xe4\xfb\xc9\x9b\x3e\x51\x57\x1e\x22\x3b\x28\x24\x52\x90\x37\x94\x6c\x2e\xc7\xe9\xf8\x0a\xfe\xd0\xc4\xcf\xeb\xd7\x43\x25\xe7\xb5\xcc\x71\x3f\x64\xa6\x14\x6c\x80\x76\x16\xa6\x71\x81\xa4\xd2\x18\x85\xd8\x5d\xbe\x2b\xa6\x89\x35\x9c\x40\x67\xe8\x6a\xd8\x19\x82\xb0\xf6\x48\xd1\x52\x15\xf4\xfa\x3c\x31\x0f\xba\xfb\x70\x01\xa5\x36\x56\xa6\x38\x2c\x89\xf6\xfd\x50\xc9\x55\x27\xad\x39\x75\x1b\x39\x22\xdc\xdf\x1a\x0f\x49\xed\xa6\x9e\x6b\xcf\x5b\x4d\x2b\xd9\xcd\x29\x06\x0f\xb4\x70\x7e\xe3\xf1\xc9\x7e\xe9\xdd\xbd\xf6\x86\x81\x59\x12\xf3\x83\x45\x83\x37\x8b\xc0\xae\x1c\x23\xab\x57\x48\x55\xad\xc4\x4b\x07\x08\x09\x72\x87\x6e\x1d\x72\x34\x51\x63\x7d\xe0\x35\x99\x7d\xca\xb4\x5f\xe1\x1c\xe3\x69\x2f\x4c\x96\x73\xaa\x40\x8f\x88\xbe\x7b\x52\x39\x6d\xcb\x6e\xe8\xc6\x2b\x8e\xda\xe3\x49\xef\x18\xdc\xbf\xb2\x86\x44\x0a\xa2\xe9\x95\xf8\x7c\xfd\xea\x07\xa5\xee\x42\x4b\x25\x3c\xbb\x71\x67\x18\x2e\x8c\x1f\x0a\x2c\x64\xcc\xcb\x11\x24\x07\x43\x2b\xa0\x3d\x11\x60\x0c\x83\xc0\xe0\x52\x39\xf9\x23\xeb\x3d\x82\x88\xe6\x83\x91\xdd\x39\xd4\x20\xc6\xb2\x74\xac\x75\x58\x5f\x0a\xf8\x03\xef\x00\xd8\x04\x40\x3d\x27\x49\xa4\xf5\x7a\xe9\x25\x3a\x6d\x78\x9a\x72\xce\x3a\x79\x1d\x8f\x74\x27\x98\xdf\xb3\x9d\xcd\xc0\xfb\xfc\xe4\x0b\x76\xb8\x5b\xe8\xcf\xf3\x4d\xa1\x3e\xbd\x2b\x40\x20\x76\x42\x30\x81\xef\x67\x39\xc8\x17\x3a\x51\x67\x6c\xc0\x78\x6f\xbe\x28\x25\xf9\x52\xf7\x5b\xb6\xbe\xa5\xdd\xb0\xc3\x23\xbc\xde\xcb\x7e\x6c\x8b\x90\xf9\xcb\xe8\xa7\x17\x15\x0b\x4c\x4d\x6e\x3b\x82\x2d\x45\x0a\xb3\x76\x9e\xbe\xef\xb7\xf4\xd2\xc7\xc7\x1b\x1d\x0f\x12\xb3\xbc\x6d\xb1\x30\xc2\x63\x14\x77\xbb\x2d\xd8\x83\x4d\x38\x9e\xb5\x90\xd8\x56\x84\x86\xfd\xde\xf9\x0d\x84\xe0\x53\xc4\x96\xa1\x21\xd9\x7c\x1b\xbe\x8e\xd0\x02\x8e\xb0\x77\xa3\x59\xe7\x8c\x50\x1b\x40\xdc\xa9\xd2\xd0\xc6\x25\x70\x04\x27\x08\x40\x3f\xe1\xb1\x60\xa3\xdc\x8d\x53\x4e\x3f\x1f\xb6\xea\xb1\xc8\xab\xfc\x22\x2d\xea\x4d\x45\x09\x12\x37\x07\x9e\x3b\x47\xd7\xde\x23\x66\x77\x29\x06\x80\x06\xa0\x38\xf1\xf9\xf8\x46\xad\xcd\xc0\x0d\x25\x98\x4f\xa5\x59\x54\xea\xbd\xf6\xb8\x7e\x9c\xe9\x2f\xb7\x91\x9f\x65\xd8\xae\xd9\xa2\x6d\xb4\x0d\xe3\x15\x67\xcc\x68\x7d\xac\xe9\x8a\x1b\x2f\x89\x00\x4d\x13\xb2\x72\x32\x4f\xf4\xb6\x83\x7b\xde\x4f\x69\x75\x95\x15\x67\xee\xcf\x68\x78\xfa\x89\xbc\x79\x13\xdf\xa8\x58\x79\x46\xc2\x01\xf8\x1d\x80\x70\x57\xca\xc9\x04\x70\xee\x77\x50\x7d\xd2\x30\x4d\xc4\xd5\xfa\x36\x4b\xd7\x34\x71\x62\x29\x0c\x43\x1a\xe1\xc1\xab\xcf\xfc\xd7\xc3\x42\x8c\xd0\x5b\x7d\x1e\xa1\x60\x2e\xf1\x9b\x67\x6b\xf6\x99\x03\xf2\x81\x6c\xbb\x3e\x4c\xf7\x60\x4f\xf4\x36\xfe\xdc\x45\xa0\xea\x97\x39\xe7\xba\x73\x7b\x21\xc1\xc0\x76\x26\xa6\x79\xc3\x15\x57\x59\x5f\x6f\x4c\xb3\x73\x08\x6c\xc0\xee\xd6\xad\xf4\x86\x4e\x10\xc2\x3c\x33\xc4\x3d\x39\xb4\xac\x45\x3b\xf2\xec\xbb\xef\x15\x25\xd7\xe9\x7e\x1c\x8c\x2d\x83\xd1\x79\x72\xbf\xf8\xfb\x3b\x4d\xfb\x4c\x23\xf3\x74\xfb\x4b\xc2\xed\x1f\x1c\x95\xb6\xd5\x9d\x01\xed\x02\x64\x8b\x37\x66\xa3\x4e\xe3\x41\xe7\x36\x62\xc2\x1c\x60\xa3\x16\x07\xd8\xf0\xb8\xd7\x8b\xe2\x0b\x52\xe0\xc2\x3d\xc8\x77\xda\x6e\x30\xdd\x1e\x06\x7c\xaa\xb6\x7d\xdb\x46\x25\x07\x4c\x1f\xfa\xcc\xb8\x11\x75\x7a\xfa\xe0\x4e\x09\x76\x26\xd5\x27\xce\xf7\xa5\x11\x26\xfe\x15\x8f\xbb\x76\xd6\x83\x98\x5c\xbf\xd2\x28\x07\x97\x1d\x7e\x2d\xf3\x20\x47\x9e\x0f\x71\x7c\x7f\xe7\xaf\x84\x6f\x00\x05\x3b\x60\xb0\x73\x89\x79\xb6\x3e\x9e\xda\x9b\x60\x5a\x1a\x20\x11\x9e\xf6\x23\xf9\xac\x71\xa6\x65\x56\xd8\x2b\x32\xf2\x80\x62\xb0\x7f\xd8\x95\xd7\x04\x40\xa8\xe1\x0d\xe3\xb5\xcc\xe0\x3d\x31\xfa\x0c\x18\x66\xbf\x36\x59\x5c\xe7\x87\xb9\xe4\xfc\xb3\x08\x78\xce\xde\x13\x29\xcc\x96\xe0\xf0\xde\xf4\xbf\x5e\x46\x32\xc3\xa1\xfe\x6c\xbb\xcb\x0b\x7f\x5f\x5d\x3a\x41\x93\x35\x71\x74\x10\xe4\x83\xcb\x10\x98\xe0\xf4\x36\x8f\x57\x47\x60\x1f\x27\x67\xca\x1e\x30\xe7\xb2\xfb\xf3\xd3\xbc\xe3\x53\xb7\x1b\x34\x32\x84\xfb\xc3\x94\x7e\xbc\x13\xe3\x47\x1c\x1f\xdf\x1c\x47\x28\x00\x08\x0d\xcd\x01\x56\x84\xfe\xfe\x89\xbc\x76\x46\xe8\x73\xcf\x93\xc7\x1e\x96\x02\x31\x63\x1d\xce\x0b\xd1\x86\xaf\x53\x84\x8d\x81\xe3\xbe\xe5\x90\xfb\xbd\x97\xdc\x53\x43\x21\x75\xa3\xee\xcc\x71\x73\xda\xd2\x4c\xb8\x68\xd5\x5a\x08\xde\x38\x47\x0c\xa0\x18\x8f\x25\x34\xce\x72\x6e\x68\xcd\xd3\x3e\x13\x00\x14\x06\x86\x9d\xcc\x1c\xd6\x1c\xce\x48\x6d\x74\x34\x8e\x6e\x46\x4a\xe5\xb7\xc6\xc5\xcb\x27\xd0\x3b\x7f\xef\x6d\x9f\xf0\x33\x1f\xcb\xbc\x6d\x8d\x88\x75\x67\x00\x54\x62\x90\x2b\x08\x17\x1c\x9b\x70\x78\x60\xc7\x79\x3f\x22\x61\x98\xa6\xe7\x2e\x0a\xea\xaf\xa7\xea\xc3\x21\xf6\x19\x6b\xd1\xce\x0a\xd5\xeb\x0c\xbb\xb0\xcb\x0e\x32\xfb\x28\xaa\xce\x39\x4b\xb4\xef\x3a\x64\xee\x00\xd3\xa8\x9c\x1b\xb5\x7d\x85\x13\x40\x10\x06\xa6\x2b\xce\x73\x9d\xaa\x2b\xe6\xa4\x31\x70\xa6\x5c\x7e\xf2\xb3\xa5\xf1\xbf\x19\x2a\x7a\x15\xe2\x2a\x39\xa0\xfa\x1e\x30\x4b\xc8\xce\xcd\x77\xcd\x53\x22\x27\x56\x86\xa0\xfb\x65\x96\xac\x5b\xff\xac\x39\xa1\x09\xfa\x88\x9e\x8c\x8e\xed\x14\x8f\x99\x83\xf8\xaf\xbe\xa8\x74\xd3\xd4\x4d\x7d\x93\xe8\x98\x80\x4c\x29\xf2\x0b\x4f\x78\xa4\x0e\xa6\xa5\x80\x01\x3b\xb2\xf8\xb3\xde\xad\x7c\x9e\x50\x20\x07\x18\x01\x01\x49\x0e\xf3\x7c\x9a\x78\xa9\xed\x39\x21\x7d\xf4\x11\xdd\x6a\xbc\x35\x6c\x94\x4c\x74\xe1\x0c\xf1\x4c\x02\x0c\xd0\x33\x00\xf9\x14\x84\xb3\xe7\x65\x7a\x85\xe6\x3b\x41\xc7\x04\xd3\xe3\x37\x9d\xdb\xec\x5d\x8e\xae\x3c\x32\x1e\xd8\x54\xfd\x3c\x5a\xfa\x62\xbc\x8f\xeb\x62\x35\x1d\xfc\x6a\xdd\xf9\xa7\xe9\x0b\x04\xe6\x06\x41\x1a\x11\xcf\x81\xd6\x6d\xc9\xef\x3e\x0a\x99\x1b\x14\xd8\xd7\x98\x80\xe7\x01\x82\xd0\xea\xb3\x9d\x1f\xa7\x78\x27\x80\xce\x69\x5f\x25\x7f\xa0\x45\xd7\xc0\x0d\xdf\xb1\x56\xd6\x7c\xb0\xae\x76\x8a\xfa\x66\x8a\x67\x42\x0a\x19\x9d\x02\x20\xce\x76\x7c\x23\x70\xa7\x26\x7a\x02\x80\x27\x07\xee\x1e\x80\x8e\x46\x91\xc6\x49\x87\x2f\x93\x22\xb8\x7e\x7a\x0a\x2f\xbc\x37\x8f\xb2\xb6\x7d\x93\x4e\x97\xbb\xc2\xcf\xdf\xbb\xe4\x3d\x7f\xc8\x0c\xa6\xb4\xd7\x71\x32\xc2\x63\xdf\x67\x33\xa4\xf8\x71\xe8\xf6\xd0\x00\x40\xdc\x7b\xb0\xbf\x28\xdc\x0d\x95\x99\x97\xf6\x5d\x9e\x26\x84\xa1\xc8\xf8\xe7\xf3\xa4\x9b\x85\x5c\xbc\x76\x88\x61\x4e\x7a\xeb\x1a\xf1\x73\x88\xde\xb5\x49\x22\xb2\xa1\xca\xf3\xcc\xf4\x1d\x02\xf2\x4c\x04\x95\xb4\x7c\x5f\xf2\xb0\x1f\xc0\x07\xcc\x45\x80\x7b\x25\xc8\x99\xf5\x1b\xba\x75\x26\x04\x81\xf3\x58\x96\xd8\xc6\x84\xb3\xef\xd3\xb8\xef\x16\x68\xf7\xdb\x37\xfc\x0c\x9b\xe9\xb1\xa5\x01\x41\x92\xeb\x44\x98\xb0\xa2\xc4\x22\xb5\x9e\x23\xe9\x08\x80\x09\xcb\xed\xf4\x7b\xab\xe5\x1d\x02\x80\x84\x02\xcc\x4d\x63\xf0\xde\x20\x5a\xd5\xb7\x9a\xd5\x0c\x40\xce\x31\xb6\x32\x02\x81\x88\x26\x75\x4d\x9c\x7d\xbd\xeb\xf2\xe0\x1e\xe3\x19\x01\x51\xe2\xf6\x7d\xbf\x55\x2f\x2d\x42\xd6\x09\x10\x5e\xca\x30\x37\x6a\x2f\xc4\xab\x66\xf4\x9f\x73\x7c\x89\x39\x77\x96\xa7\xd2\x4c\xd6\x4c\x10\x9b\xfb\xb4\x1f\xeb\x2f\x97\xe2\xa5\x70\xf1\x4f\x2a\xf5\x3d\x2c\x08\xf2\x18\x6a\x92\x52\x2c\xd9\xe7\x33\xec\xed\xb2\x2c\xdb\x06\xc0\xf3\x49\xd3\xec\xb5\x9b\x27\x3a\xca\xad\xc5\x30\xf7\x9e\x41\x84\xea\x13\x20\x03\xea\xbb\xe2\x77\xfa\x19\xf6\x8f\x85\x00\xa8\xd9\xe2\x95\xed\x5c\x21\x26\x6f\x81\xf6\x23\x66\x07\xf3\x0e\x00\xd3\xee\xe0\x52\xde\x5b\x85\x2d\x9b\x40\xd3\x08\x04\xa4\x28\xb8\x11\x16\x28\xc6\x42\xbd\xc5\x5d\x88\x68\x9c\x0d\x32\x89\xd8\xb5\x5d\xa4\xb9\xf3\xfb\xab\x2d\x0f\x94\x8f\xd8\xcc\x76\x99\xa7\x49\x94\xce\xd5\x9f\x15\xd3\x92\x44\x44\x9f\x8a\xe4\x6a\x98\x01\x30\x66\x1f\xec\xf1\x0e\x16\xba\x11\xbe\xdd\xe2\x6e\x3b\xaf\xdf\x79\x06\xd8\x07\x27\x63\xd9\xfa\xee\x11\x14\xf5\x4a\xf4\x82\xe1\x3a\xe6\x83\x97\x11\xc2\x4d\x75\x07\x2e\x03\x92\x34\x80\x62\xd6\x90\x22\x00\x40\x7d\xdf\xb7\x4c\x2b\xdb\x17\x08\x76\x40\x00\x00\x77\x00\x46\x1f\x30\x21\x09\x78\xd1\x80\xd3\x8a\x93\xf7\x2c\x55\xa6\x84\x0e\xf1\x4c\x2c\x69\xf5\xc4\xd4\xdf\x99\xa3\x22\x2b\xee\x84\xe5\x2e\xa6\x7d\x83\x74\xae\x7b\xbf\x2f\xca\x86\xed\x82\xa1\x96\x89\x1c\xe1\xa0\x41\xf4\x24\x69\xae\xdb\xae\x7d\x92\x37\x9c\x33\x62\xa7\x53\x86\x20\xf6\x9b\x01\x6c\x18\xe4\x5c\x10\x0c\xcf\x9d\xb1\x29\xca\xf9\x56\x4e\x34\xef\x33\x10\xce\x2a\xc4\x99\x74\x5c\xab\xe9\x4d\x68\x76\x0a\x10\xa2\xcf\x01\x3b\x01\x70\xb5\x17\x51\x5a\xa6\xb4\xed\x04\x64\x08\x40\xa2\x4e\xba\x6d\xb9\xe3\x39\xc7\x41\x7e\x1e\x43\xb8\xf9\x2d\x4b\x05\x73\xbb\xc6\x1e\x36\x0b\xdf\x43\xcb\x9b\xd9\x07\x8d\xec\xfe\x03\x91\x94\x27\xed\xa3\x44\x57\x50\xf4\xe3\x3c\xc9\xf9\x13\xf4\x11\x33\x47\xdd\x89\x9a\x96\x9b\xd8\x45\xca\xd3\x4c\x9e\xe8\x5d\xdb\x9e\x24\x93\x75\xe7\x95\xb4\xca\x81\xe6\x33\x04\x35\xa0\x9b\x1b\x20\x34\x03\xa8\x8e\x99\x68\x7d\xd6\x93\x0c\xd7\xea\xe0\x86\x0c\xdb\xf4\x65\x87\x30\x4d\xe8\x3d\x0b\x56\x93\x14\xac\xea\xe4\x60\x98\xcd\xb4\x82\x39\x98\xde\x0c\x33\xd4\x42\xd0\x64\xba\xf0\xe4\x01\x0f\x15\xb9\xb5\x76\x80\x44\x3b\xd8\x76\x41\xaf\xed\xf8\xd6\xc4\x9c\x3a\x6d\xc3\xe0\x0a\x52\xe7\x7b\x75\x99\xa0\x4b\x63\x14\x1a\x13\x1e\xe9\x5f\xe7\xa9\xfe\x6a\x19\x94\x17\x19\x75\xe3\xc4\x95\x28\x12\xac\x35\x47\xee\x83\xf0\x5c\xd3\x14\x3c\x29\x0d\x65\x4f\xab\x67\x26\x7a\xab\x17\x84\xdd\x78\xaf\x5d\x23\x84\x58\xe9\xb0\x33\x9b\xa7\x89\x94\xf8\x76\x03\xd8\x25\x80\x05\x36\x80\x24\x35\x61\xd1\xe5\x4a\xea\xca\x53\x10\xa9\x6b\x1b\x3d\x35\x3b\xcc\x71\xe4\xda\x5c\xab\x73\xae\xce\x73\x52\xc1\x0b\xc2\x45\xc5\xba\x0d\xc8\xd3\x44\xd3\x80\x0d\xa2\x9f\x52\x80\xec\x35\x79\x3a\x40\xf1\xb0\x35\xd8\x01\xe2\xe6\x40\xed\xfc\xbd\xb3\x9b\x13\x03\x97\x76\x17\x28\x34\x6c\x7b\xbf\xed\x25\xe1\x77\x6f\x56\x98\x05\x6d\xd6\x80\xc4\x9f\x78\xd3\x32\x51\x8a\xbd\xff\xf1\x69\x11\xc9\x24\x53\x9a\x3d\xcf\xe1\xa5\x70\x9c\x1f\x2b\x1b\x59\x40\xc8\x3e\xf4\x6e\xa2\xe4\x8d\xbe\x7e\x12\xa4\xfd\x00\x36\x8b\xe4\x46\x75\x88\x6f\x55\x7e\x3e\xc4\x98\x61\xee\xfb\x9e\xf7\x3d\x99\xf7\x74\x9c\xf6\x9e\xb4\x34\x41\xe8\xeb\x63\xf0\x25\x32\xc7\xba\x15\xab\x01\x5d\x5c\xd0\x71\x5d\x2f\x8c\xb7\x6f\x6f\x53\x0c\x8b\x7e\xc1\x9e\x11\x82\x3d\xf6\x5f\x5a\x49\xaa\xef\x36\x6f\x8c\x01\xba\x91\x60\x7a\xa4\x2c\x77\x35\xb5\xd6\x81\x29\x94\x0b\xf8\xfb\x66\xf8\x38\x4d\x50\x35\x27\x07\xf3\x35\x12\x80\x2d\x09\x1a\xf4\x87\xa3\x87\xcd\xaf\xf7\xca\x71\xf4\x28\x40\x76\xfe\x24\xd9\x44\xd3\xe1\xbd\x03\x88\xe4\xb1\x6d\x4a\x2d\xe5\x11\x38\xe3\xde\x14\xa2\x1b\x46\x1e\x47\x13\xcd\xba\xaa\x6d\x26\x9b\xaf\xb4\xe5\x4d\xb1\xe7\x85\x13\xc4\x34\x4d\x53\xd9\xce\xdd\xfb\xe8\x76\x20\x68\x31\x4b\xa8\xb7\x14\x4b\x08\xbd\x61\x63\xe6\xdc\x3d\xa0\xf2\x71\x58\x51\x8a\xbc\xb4\xba\x26\x70\x5a\xd6\x41\x66\xcc\x7f\xdc\xae\x83\x01\x7b\x3a\xa1\x35\xcf\x4c\x4b\x93\x14\x46\xf8\xf3\xb9\xf5\x6e\xb7\xf3\xcc\x4f\x69\xb9\x7b\xfb\x1c\xf1\xcd\x48\xb3\x34\x73\x56\x0d\x82\x37\x3a\x04\xd6\xaf\x2e\x95\xe3\x5e\x97\x5a\x0f\x2b\xc3\xdc\x84\x77\xe0\x11\x7b\x97\xd1\x34\xcd\x3f\xb8\xf5\x66\xbb\x2b\xd6\x68\xf9\x4e\x6e\xd5\x30\x76\x55\xa5\xb6\x2c\x47\x84\xe2\x08\x74\x1e\xe4\x52\xe6\x62\x29\x01\xbe\xe8\x0f\xce\x64\xe2\x8c\x58\x99\x28\x40\x08\x8a\xa2\xe0\xdc\xac\x1e\x93\xb9\x98\xbb\x2f\x75\x01\x61\xa0\x11\x9f\x0f\xb5\x12\x40\x6a\x55\x6a\x75\x7d\xbf\xef\xf1\xce\xcb\xf0\x18\xae\x1b\xf5\xd2\x5c\x9d\x59\xe8\x98\xb9\xc7\x71\x5d\xb7\xed\xc6\xbe\xbc\xfd\xdc\x61\xa2\x94\xac\x00\x21\x49\xed\x67\xa6\x5d\xc1\x71\x37\x49\x5d\x03\x80\x8e\x20\xb8\x53\xdd\x13\xe2\xe2\xf1\xcb\xd1\xdf\x1f\x04\x6e\xa6\xad\x13\xf9\xa6\x6e\xe7\x81\x31\xc4\x8e\x17\x84\xbf\x47\xec\x9b\xae\x03\xd6\x30\x6e\xf9\xba\xe0\x4e\xc4\x52\xcb\x50\x34\xbc\x3c\xeb\x62\x35\xfe\x21\x09\x2e\x9a\x7e\x4e\x0a\x0b\x5d\x7f\xcf\xf3\x09\xf3\x56\xaf\x45\xb9\xcf\xb3\x08\x84\x1d\x24\xc0\xb8\x2a\xe2\x54\xb5\x6d\x05\x40\x12\x00\xfe\xe4\xed\xbd\xec\x5f\x6a\x54\xc7\xcc\x89\x6c\xeb\xbe\x1b\x0c\xbf\x76\x44\x1c\x96\x59\x2f\x32\x48\x25\xf3\xbb\x3f\xcf\x04\x7b\x55\x7b\x96\x42\x0c\xb3\xa8\x64\x53\x02\x21\xcc\x91\xca\x34\x89\x4d\xcd\x8a\xeb\x3a\x3d\x99\x02\x71\x18\xcf\x4c\xc9\x1a\xc1\x12\x59\x85\xf1\xab\x15\x17\xb5\xcf\x00\xf5\x1d\xc0\x00\x0b\x14\x3e\x56\x49\xcf\x0c\xc1\x26\xfa\x46\x9b\xda\xbe\x8b\x5e\xcf\xe0\x16\x48\xb0\xe3\x4c\xba\x03\x6c\x66\x20\xf2\xa7\x56\x0e\x68\x8f\x07\x0e\x37\x64\xf5\x1e\x63\x90\x8a\x46\x37\x82\xd1\xe6\xf9\xbb\x69\x86\x9e\xdb\x5a\xae\x3d\x3a\x47\xc7\x22\xc4\x9e\x77\x10\x4d\xe5\x43\x25\x70\x71\x76\xaf\x1b\xe0\x1a\x46\xb4\xc9\x65\x0c\xc8\x4e\x37\x89\x2e\x40\x84\x49\x37\x02\xe4\x60\x50\xff\x78\xad\x0d\xa0\x6b\xbc\x17\x4d\x6c\xf4\x36\x27\x2c\x11\xad\xd1\xea\xcf\x0c\x33\x0c\xc3\x96\x28\x6d\x2e\x7d\xf4\xf9\x4b\x26\xaa\x01\x36\xb3\x3c\x0e\xca\xf7\xb0\x9d\x29\x31\xc0\xee\xef\x2e\x46\xbe\xcf\xfd\xb7\x1f\xc5\x71\x3a\xac\x53\xc2\x30\x7c\x54\x39\x0e\xc6\x7f\x6b\x10\x64\x00\x10\x1b\x12\x6d\xcb\x2e\x3f\x93\x5b\xbd\xa5\x83\x4d\x3c\xe9\x48\x84\x67\x93\xf1\xe6\x12\x73\xdf\x80\x53\x19\xf5\x16\x6d\xae\x71\xbf\x57\xe7\x14\x1d\xf4\x4e\xcf\x75\x5d\x0d\x45\x0e\xbd\x64\x41\xcf\x82\x1d\xba\xae\xeb\xfe\xb1\x78\xfb\x12\xd8\x6d\xa1\xab\x45\x05\x8b\x9c\xba\x37\x23\xba\x9a\xc2\x30\x34\xbe\xad\x2d\x72\x46\x4c\x82\x68\x02\x36\x9d\x99\x1f\x8a\xc0\xab\xbf\x96\xee\x0b\x55\xf8\x81\x39\xc3\xd6\xc7\xa5\x71\x5f\x54\xb5\xdf\xa7\x7e\xab\x37\x7f\x1c\xdd\xfb\x68\x9a\xa5\x6d\xe4\xd1\xb3\x42\xb7\x75\xdd\x04\xef\xb2\x18\x45\x7a\x98\xcf\x2c\xd8\x67\x1e\x99\x67\x1a\x99\x7c\xad\xa2\xd6\x08\x47\xf6\x89\x5e\xe6\x2b\x96\xde\xe2\x28\x00\xb0\x46\xc4\x81\xf0\x62\xe0\xe4\x2c\x91\x40\x64\x9b\xbd\xb6\xcc\x1c\x55\x0a\xb4\x7f\x9a\x57\xcf\x42\xf8\x10\xfe\x0e\xfa\x95\x21\x69\xb5\x7e\x96\xf8\x29\xef\x79\x06\x5d\x4d\x83\xd4\x83\x4a\xd6\x37\x41\xb7\x36\x84\x27\xe3\xdf\x6d\x2f\xd8\x51\xcc\x7c\x11\x04\x31\x1a\x79\xa7\x10\x7b\x70\xae\x6d\x7a\x00\x96\x92\x7f\xf0\x17\x19\xa5\x8b\xfd\x53\x5f\xd3\x3c\x4b\x79\x79\x80\x56\x30\x39\xff\x0b\x8a\x07\xc7\x59\x92\x84\x9e\x47\x02\x02\xc5\x89\x20\xdc\x87\x88\x46\x00\x9a\xee\xb1\x89\xf8\xca\xd7\x3e\xb2\x1c\xb9\x4f\x89\x1d\x59\x11\x7f\x54\x8d\x5f\x2f\xac\x95\xa8\xd9\xf9\xd8\xaa\x6e\x17\xee\x11\xe3\xcb\x4d\xc3\x37\x40\xe0\x1e\x8e\x94\xcc\xab\x38\x7f\xf7\x47\xde\xea\xea\xc7\x63\x03\x3c\xcf\xe7\xfb\xfe\x7d\x52\x0c\x39\x79\x1f\xed\xc6\x3e\x6f\xfb\x66\x30\xa8\x9f\x4b\xc3\x4d\x89\xb1\x62\xf9\x5d\x62\xdd\xb5\xe4\x24\x62\x54\xf7\x7d\x63\x3b\xf5\xfc\xec\xfb\xd6\xf3\xef\xbe\x78\x28\xc2\x43\xff\x8d\x61\xed\xb3\xed\xfa\x87\xee\x59\xd8\xe3\x3a\xff\x3a\x32\xae\xb6\xc1\xe5\x80\x49\x6e\x99\x04\x4d\x75\x10\xe4\xb4\xfc\xd6\x93\x3b\x02\xc0\x50\xa4\x86\x21\x9c\x21\xf8\xb6\x0d\xdb\x09\x42\x4f\xa2\xad\x33\xc4\xe9\x62\x79\xd4\x16\x2a\x0a\xd6\x0a\x94\xfb\xa1\x7f\x67\x9f\xef\xd8\xd6\x74\x94\x63\x18\x90\x5e\x2f\x35\xd3\xfd\x79\xa1\xdc\x34\x1b\x73\xb9\x9d\x7a\x58\x90\x52\x6e\x18\xef\x25\xbd\x97\xb2\x83\x4e\x92\x65\xd7\x11\x84\x5c\x0a\xa3\x54\xc2\x40\x94\x00\x04\xf4\x01\xa2\x94\xa5\x89\x7d\x1d\x2a\x20\x8d\x3c\xe0\xe4\xe1\xb2\x7e\xb1\x6b\x35\xbd\x98\x09\xa8\xf0\x43\xa3\x18\x3b\x14\x7b\xec\x33\x14\xad\x7d\x6a\x57\x61\xd0\x52\x35\xca\xfe\xbe\x51\xeb\x15\x72\x27\x42\xe4\xef\xa1\x7d\x0b\xc4\x3d\x90\xfe\x97\xb6\xad\x56\xbe\xa9\x68\xf5\x0e\x80\x0a\x9c\x7d\x55\x5a\xba\x6f\x32\x3d\x4d\xc3\xfc\xff\xbf\x7f\x33\x70\x1c\x67\xb0\x2c\xc6\x78\x61\xe7\x4f\x89\xa2\x83\x6e\x9c\x2a\x76\x72\x9a\x86\x6d\x58\xa0\x96\xf5\xe0\xbe\x44\x67\xbe\x53\x52\xe7\xbe\x0a\xc1\x50\x9c\xe0\x0c\xaa\x95\x32\x19\x05\x41\x10\xea\xa6\x89\x9f\x53\xd4\xdf\xbb\xeb\xa6\x56\x16\x7e\xa7\x7f\xfd\x42\x85\x80\xc9\x03\x86\xd9\xab\x42\x7a\x77\xbc\x60\xbf\xbd\x0a\x02\x40\x1e\x29\xe5\x76\xc9\x98\x65\xf3\xe8\x03\x2b\xd5\x1b\x27\x25\x96\x5e\xf7\xe3\x8d\x88\x35\x4c\x16\x9c\x99\x3f\x55\xfb\xfd\x3d\x58\xd0\x2c\x68\x04\xef\xef\xc7\x4e\x55\x04\xd9\xde\x4c\xad\x9c\xdf\xc7\x11\xfc\x62\xc0\x8f\x8a\x5f\x75\x41\x73\x02\x28\x70\xd5\xee\x29\xf2\xad\x1a\xe1\x2d\x86\x55\xcb\xbe\xfc\xbe\xc7\x56\x17\x67\x20\xf6\x12\x5e\xc4\xde\xe1\x59\x62\x08\xf5\xf8\x16\x9c\xf1\x21\x3d\x65\x0a\xb0\x89\x71\x76\xd2\x29\x1e\x90\x46\x49\x94\xf8\xf3\x8f\x15\xfa\xcb\x51\x8d\x5f\x3f\xcf\x93\xf0\xe3\xf9\x8f\xfd\xbc\x13\x64\x4b\x04\x76\xec\x0d\x01\x98\xac\x8d\x3e\xed\x1c\x33\xe2\xdd\x60\xb8\xf1\x0c\xb3\xe1\x20\x0e\x16\xee\x85\xfd\x20\x75\x0e\x53\x05\xc7\x99\x7a\xe7\x71\xb0\x88\xc6\x39\x30\xce\xcc\xe7\xc6\x6c\xb4\xd2\x42\xc0\x27\xae\x10\x59\xea\x2f\xe7\x6d\x68\x71\x48\x21\x42\x39\xae\xeb\x8a\x07\xa0\xb1\x76\x67\xaa\x4f\xa8\xcd\x71\x46\x30\x64\x97\x83\xe0\xa0\xef\x8f\x3a\xb5\x39\x73\x3e\x6e\x24\x1c\x8d\x0e\x89\x3c\xef\xc6\x93\x24\xcb\x69\x09\x04\x2a\x27\x16\xc4\x04\x89\x01\xb2\xc8\x5b\x75\x84\xcb\xfb\xd5\xda\x3b\xbc\x85\xad\xb6\x43\x10\x84\x55\x1f\xdf\x14\xc2\xd8\xf7\x7d\x62\x87\x69\x30\xee\xe6\x8e\x72\x8f\x1b\x22\xec\x2b\x1f\xb5\x37\x76\x25\xe5\x4b\x32\x75\x06\x92\x3e\x7e\x2f\x8c\xee\x1c\xbd\x89\xfa\x39\x18\x6f\x82\x42\x58\xed\x59\x8a\x07\x9c\x56\x77\x20\xf2\xcd\xe6\xec\x29\xb3\x91\xc9\x68\x7e\x67\xb5\xe2\xbc\xb3\xbb\xe7\xcf\xee\x75\x12\xfa\x8b\x33\x76\xf0\xaa\x7e\xa0\x14\xda\x9f\xb7\xe7\x92\xa1\x6e\x0a\xf0\x46\xbe\xf6\x2b\x18\xcd\x88\xea\x42\xd2\x20\x80\xf1\x42\x9c\x57\xf9\x96\x9d\xf3\x8d\x01\xaf\xef\x77\xe6\x43\xcc\x39\xce\xfb\x77\x12\xd1\x08\xb5\x4d\xf8\xda\xe6\x53\x2d\xfc\xf6\x54\x5f\x6f\x56\x7a\x1b\x04\xf6\x69\x91\x8f\x51\xe4\xbb\xa0\x84\x44\xbc\x93\x5f\xde\x18\xeb\x37\x62\xec\x0c\x87\x20\xb4\x71\x22\xba\x13\x74\x7c\x37\x4e\x21\xc8\x34\x00\x51\xaf\x72\x9c\xb1\xfe\x24\x2c\x02\x16\xde\xe8\x95\x41\x35\xf0\x55\xc8\xf6\x8d\xa7\x21\xf6\x61\xdb\x47\xff\xbb\xd7\xf4\xe6\x0f\x1c\xbf\x12\x24\x9f\xb3\x2e\xfa\x06\x39\xa4\x13\x90\x41\x85\x44\x1e\x8d\xa2\x72\xe2\xc5\x0e\x1a\x93\xa3\x15\xde\xd1\xd9\xc9\xe6\xef\x9c\x30\x76\x2e\xe1\x4a\x55\xfe\x5a\x3d\x0d\x86\xfb\xbe\x19\xe2\x1c\xa3\xf5\x59\x02\x9f\x01\x46\x9a\xa7\x29\x6f\x85\x72\xf0\x18\xbf\xd2\x6f\x2e\x7a\x95\x53\xd4\x0d\x08\x54\xdf\x06\x2f\x80\x44\xa2\x20\x9d\x2d\xbb\x37\xa7\x66\x1e\x3e\x74\xae\x00\x39\xcb\x2a\x94\x58\x0a\x2f\xf6\xa1\xc6\xfc\x39\x43\x70\x33\x04\x2a\x96\x59\x38\x6f\xab\x61\xec\x3e\xda\x4e\x44\xcc\x71\xf4\x36\x54\x45\x8e\xc0\x32\xe5\x86\x8c\x93\x1d\x8e\x6a\x14\x70\xb5\x11\xfc\x3e\xff\x79\x1e\x51\x1f\xa7\xe8\xd4\x25\x53\x00\xa0\x50\x05\xc1\xf0\x19\xb6\x7c\x99\xd2\x6c\xa1\xab\x2d\xf2\x0f\xa4\xa1\xd3\x6d\x83\x82\x81\x92\xcd\x7b\xb4\x2a\x87\x97\x4f\x4f\x05\xcf\x2f\x86\xa5\xbd\x43\x5b\x8e\xf1\x69\x16\xd8\xf4\xf9\x3e\xdd\x86\xbf\xe7\xea\xce\x7c\x53\xe2\x3e\x1e\x7f\x7e\x7e\x3c\xaf\x62\xfe\xab\xbd\x78\x28\x4f\x62\x65\x72\x94\x7b\x60\x9a\x8d\x1e\x41\xbe\x32\xd9\xae\x50\x1f\x31\x55\x2c\x02\xc5\xca\x87\x2c\x26\xba\x00\x18\xe9\x7b\x20\xbd\x53\xb5\xb2\xe3\x56\x92\xa9\x4b\x8f\x9b\x15\xf0\xb5\x8d\x8c\x13\x55\xb1\x53\xfa\xc8\xa3\x49\x21\x94\xc6\x67\x05\x99\xa9\x06\xb0\x5c\x17\x3b\x91\xdb\x76\x7d\xf5\x70\x06\xed\x07\xa3\x63\xdf\x7f\x3c\xf3\x5e\x4e\xc6\xdd\x48\x9f\x01\xe4\x66\xed\x66\x34\x78\xc1\x9f\x79\x36\x43\x34\xc1\xa0\xe5\x49\x9e\xd5\xa2\xe8\x88\x78\x05\xe0\xea\xb2\x5c\x54\xca\xc8\x18\xa7\x96\x88\xd3\x5b\x6c\xbb\xcf\xf3\x27\x77\x22\x27\x24\x13\xbe\x1c\x70\xa3\xe4\x56\x26\x23\xf2\x7c\x04\x79\x30\x31\xc5\x55\x72\xbf\xf5\xe8\xf9\xd1\xec\xf7\xc9\x42\x02\x22\x6f\x71\xff\xd4\x1c\x40\xd1\xca\x55\xf2\x2d\xb3\x05\x22\xa3\x7c\xf5\x7c\xbd\x13\x14\xf6\x02\x37\xaf\x1b\xee\x04\xf3\x47\x6a\x76\x1b\xc1\x71\x04\x32\x9a\xa4\xc8\x8e\x7b\xd6\x01\x6e\x10\x00\xd4\x4f\x3c\xec\xaa\xf5\xe1\xef\xd3\x74\xb1\xbb\x9e\x23\x37\xce\x10\x09\xb9\x0c\xcf\x6b\x2f\xb2\xdf\x79\xd7\x09\xf3\x18\x23\xf2\xac\x16\xe8\xdc\x8e\x32\xd5\xf0\x5d\xa1\x04\x88\x2d\x18\x58\x18\x7b\xec\xb2\x7c\xde\xce\xdf\x9a\xaa\x0b\x77\x2b\xcb\xdf\xf3\x3e\x33\x7f\x8e\x99\x9e\x92\xa6\xdd\xb5\xaa\x4e\x26\xd7\xfa\x24\x49\x96\x45\xda\x82\xf5\x87\x6f\x87\x5c\xb4\x4f\x13\x3a\x80\xcc\xf6\xfc\x8a\xcf\xf0\x2b\x34\xbf\x9e\xfd\x15\xbf\xef\x1e\x58\xb2\x1c\x91\x7b\x33\xef\x5d\xd7\xf5\x5d\x8e\x00\x43\x41\xaf\xfe\x8c\x8b\x1e\xc7\x05\x3f\xf1\x55\x7c\x0f\x68\xd8\xef\xd3\x7a\x0d\x3a\xb6\x72\x76\xfe\x83\x0b\x56\xb8\x6a\x72\x70\x35\x34\x18\xe6\x99\x61\x70\x74\x8b\xfc\x8a\xf0\x4a\x04\xb9\x7a\x40\x03\xfe\x9c\x5c\xf5\xa9\x0e\xde\xaf\xa7\x10\x5f\xec\x3b\x50\xa6\xb1\x4c\x73\x79\x8d\xcd\x1f\xac\x0a\x12\xf4\x27\x45\x29\x6a\xa9\x95\xb5\x44\x67\x05\x0a\x58\x46\xcf\xa5\x50\x11\x8b\x19\x89\x2e\xc9\xa3\x92\x7d\x46\x1c\xba\x56\xdd\x24\x86\xf4\x2c\xec\xa7\xbf\x82\x9a\xf5\x6f\x36\x20\x12\x4c\xa9\x9c\x55\x73\x8a\x9f\x77\xc2\x56\x21\x9b\xe9\xfd\xd5\xc8\xef\x8c\xf0\x26\x8c\x20\x18\xf7\xa4\x82\xc1\xb5\x1f\xb6\xcc\xff\xed\xa1\xbd\x9f\xa8\x41\x80\x6b\x32\x51\x26\x3d\xfd\x99\x56\x31\xcd\xf6\xd3\x0d\x88\xb5\x41\xe1\x6a\xf5\x69\x9d\xae\x21\x7a\xa4\x74\xb0\x78\xed\xc8\x99\x51\x57\x84\xde\x15\x7f\xeb\x26\xc1\xa2\x43\xd9\x80\xed\x19\x26\xe0\x4b\x0e\xb1\xbf\x54\x77\x9e\xec\x9a\x46\x5f\x08\xd3\xbe\x13\x4c\x99\xe3\xb2\x09\x79\x41\x20\x5a\x34\xe9\xe5\xc9\x7b\xe8\xe4\x2f\x26\x37\xf5\xa7\xfa\x2f\xa7\xfb\xf6\x11\x33\x73\x37\xb8\x68\x91\xcf\x0a\xd6\x89\x0a\x0d\xdb\x00\x7c\x08\x04\x03\x27\xca\x99\xbb\x60\xe4\xd5\x3a\x60\x01\xb2\xd6\xb5\x78\xe9\xf5\xd3\x07\x2d\xdf\x80\x83\xa3\x21\x73\x57\x93\xfb\xdd\xc7\x33\x9b\xe6\x29\xa3\xe1\xaa\xea\x61\xd0\x27\x0b\x6d\xf4\xb7\x50\x22\x15\xb6\xf4\xb5\xbf\x72\x19\x1e\x6b\x04\x4a\x0e\x5b\x68\x2b\x61\x10\x10\x75\xbe\xa6\x79\x92\x18\x28\x4f\x15\xf2\x77\x07\xb2\x3b\x63\x8f\xbf\xba\x38\x51\x7a\xb7\xd1\x3e\x5f\xcc\x4e\xc0\x22\xeb\x27\x5a\x7e\x68\xe2\x08\x55\x68\x7c\xff\xf8\x0b\x44\x78\x3b\xb8\xa6\xf7\xe1\x5f\x4d\xed\x43\x3b\x25\x18\xe1\x26\x1b\x27\x7b\x1d\xe9\xb3\xda\x05\xb2\x89\x72\x95\x0e\x9a\x9d\x45\xe9\x60\x6f\x55\xff\xba\x54\x72\x0e\x3b\x4d\x0f\x0e\x92\xd0\x6d\x0f\x83\x34\x06\xb5\xd4\x77\xdb\xd0\x73\x7b\xd9\xfb\x7c\xbe\x07\x9f\x03\xe5\x10\xef\x95\x60\xaa\xb9\x1a\x57\x5f\x51\x66\xa2\xc1\xda\x75\x0b\x2b\xf2\xf1\x02\xfb\x3f\x1a\xdf\x67\xd7\x38\x47\xc7\x30\x3d\xb1\x0f\xc2\xf3\xb0\x64\x7d\x88\x94\x57\xce\x26\x82\x01\x6b\x0f\x5b\x46\xf4\xa3\xda\x0b\xa3\xdf\x46\x4f\xcc\x34\xae\x08\x75\x45\x0c\xce\x7a\x11\x48\x78\xe7\xac\xd3\x1a\xd6\x38\x51\xee\x0d\x39\xab\x47\x2e\x12\xdd\xeb\xda\x9f\x95\x8d\x1c\xd7\xff\xa3\xea\xba\x76\xdd\xe6\x99\xed\x03\xe9\x42\xbd\x5d\xaa\xf7\xde\x75\xa7\x2e\x59\xbd\x59\xe5\xe9\x0f\x92\x2f\xd9\xf9\x8f\x01\x23\x31\xe0\x4d\x8a\xe6\x70\xca\x9a\xe1\x9a\xef\x79\x88\xe7\x81\x5a\x84\xb8\xfb\xd5\x37\x02\x69\xea\x18\xa6\x84\x14\x31\xa1\xa8\xae\xec\x8c\xb5\x13\x00\x33\xcf\xa6\x8a\x84\x21\xff\xdd\xbb\x95\xb4\x1b\x61\xc0\xf3\x1b\xd1\x7d\xaa\x79\xd8\x45\x7e\x30\xdb\xad\xcb\x5f\x72\xf0\x4b\x17\x4b\x61\x7f\xc5\x65\x85\xe6\x31\xfd\x42\xd8\xfe\x15\xab\xc5\x9d\xd8\x0a\x99\xb3\x63\x72\xd0\xca\xae\x8a\x33\x7c\xc2\x0d\xfc\xe2\xe5\x39\x26\x57\xb0\xd0\xa7\x7a\x03\x26\x00\x02\x08\xba\x85\x1d\x30\xd9\x06\x22\xd3\xb6\x0f\x41\x14\x40\xd7\xfe\x2d\x3c\x95\x6e\x61\xef\xb9\xad\xb8\x5d\xdb\xf7\xb9\xd2\xb1\xf6\x61\xd8\xf7\xc7\x07\x95\x12\xef\x13\x64\xbf\x2c\x87\x7d\x53\xb9\xd4\x7d\x76\xe3\xa0\x0f\x18\x20\x10\x74\x1b\x16\xd1\x05\x35\x51\x62\xa1\x1c\x00\x27\x7b\x02\xe6\x5a\xe2\xf9\xbd\x53\xd7\x02\x3c\x14\xf3\x0b\xfa\x09\xf6\x64\x07\x62\x66\xfe\x83\x12\x37\xf8\xa2\xa2\x5c\x58\x1a\x5e\xa2\xdf\x2f\x08\x21\x1f\xaa\xe6\xb9\xf4\xd9\x72\x3e\x23\xf3\xfa\x8b\xf8\x1e\x7d\xfa\x6b\xe8\xdd\x2e\xac\xe2\xa4\x3a\x4c\xe0\x2b\xdc\x88\x9b\xa6\x83\xc6\x18\xd4\x0f\x07\xe2\x28\x5c\x84\xfc\x05\x01\xbe\x7a\x8a\x51\xc5\x76\xa0\xdf\xf2\x68\x38\x8e\xf3\x9b\x45\xc8\xb1\xa4\xea\x98\x95\x89\x6e\xe7\x1e\xbe\xd4\x35\xaa\x0b\x9f\xf1\x0d\x88\xcf\xd6\xf5\x40\xe5\x94\x22\x00\x41\x09\x2d\x77\x5a\x1d\x6e\x23\x42\xd8\xea\x16\x75\x1f\xc8\xf7\x0f\xe9\xfc\x9e\xf5\xa6\x69\x0f\x31\x71\x20\x48\x4d\xfe\x9b\x02\xd4\x31\x4d\x2d\x74\xd0\x1d\x44\x77\x54\xec\xa1\x47\x8e\xac\x25\x88\xb3\xb2\x21\x4f\xca\xbf\xb8\xc8\x5e\x45\x87\x50\x71\x92\xbc\xcf\x7a\xe4\x3e\x00\x49\x47\xbf\x1c\xcf\x81\x20\xc8\xba\x50\x51\x9f\x88\xdc\x4a\xef\xb8\xfc\xad\xc8\xe4\xac\xce\xe1\x4d\xcf\x0f\x14\x76\x6e\xda\x6f\xe0\x0c\x9c\xd5\x06\xca\xc6\xb8\xe2\xd0\x7b\xd3\x05\x05\xac\x77\x3e\xc9\x52\x00\xcb\xed\x17\x88\xc6\x11\xce\xf6\xd9\x44\xb5\x13\xef\xb2\x69\x22\xcf\xf3\xb7\x9f\x39\xef\xd7\xc8\xff\x3a\x33\x20\x5d\x0c\xf6\xaa\x6d\xda\x97\xe1\xa7\xad\x21\x7f\xf8\x7b\x8d\x6b\x95\x2f\x10\x7c\x1b\x74\x9f\xd8\xb2\x15\xf3\x4d\x4c\x48\x39\x27\xcd\xb0\x02\x69\x50\x87\x87\x5b\x00\xbf\x29\x72\x12\x50\x65\xc9\xbf\xce\x71\x7f\x88\x28\xa4\xe7\xa1\xff\xa5\xd6\xe2\xa8\x0d\xb8\x86\xd3\xd1\xb6\x36\x04\x20\xcc\x14\x0a\xb7\x4e\x94\x32\xe8\xab\xf3\x7e\xb9\x67\xd2\xb2\xe3\xcc\x0d\x9e\x50\xd3\x53\xe7\xb2\x6d\x24\x80\x27\x50\x41\x4c\xae\xd8\xda\xc6\x4e\x58\x1f\x12\xef\x12\xe0\x2e\xee\x0b\xd3\xff\x71\x09\x2e\x4b\x69\x2f\xf1\x4b\x92\xa0\x8e\xd3\x00\xa0\xde\x41\xba\x68\x27\x8d\x9c\x19\x41\x00\xc0\xa1\xea\xf0\x22\xf2\x53\x2c\x43\x84\x2e\xc4\x26\x42\x55\x52\x3b\x47\x29\xff\xd4\xdf\x5a\x1f\x8e\x73\xbe\x85\x3b\xbe\xf6\x12\xdd\x68\x30\x41\xb9\xf7\xa4\xde\x31\x28\x61\x04\xc0\xb6\x55\x6a\x9b\xd0\xa4\xfd\xee\x8a\x2a\x10\x31\xf5\xf2\x44\x72\x88\x1a\x03\x4c\x31\x9e\xca\xbe\xe3\xa9\x2e\x81\x87\x20\x71\x49\x1d\xd9\xb7\xf9\xfe\xf3\x97\x86\x03\xcd\x89\xcf\x17\xa5\x9a\x9e\x1a\x0b\x5b\xee\x3b\x71\x3e\x55\xe3\xe5\xec\x1b\xb5\x0f\xb3\x02\x68\x80\x47\xbf\x5a\x55\xd1\xb8\xbe\x9e\xf2\x7b\x00\xa0\x1a\xc6\xf1\xe7\x5b\xa2\x0f\x18\x5c\xec\x46\x41\x30\x80\xa5\x8b\xd1\x8f\x9e\x61\x48\x29\x71\x00\x66\xae\x9b\xd8\x0c\xc2\x67\xac\x9f\x04\x40\x03\x1b\xac\x0f\xdd\xb5\xe0\xcf\xe1\x99\xec\x56\x1e\x5f\xd4\xd4\xc3\x13\x55\x5d\x45\x2d\x82\x7c\xa0\xeb\x35\x1b\xf1\x7d\xf0\xf2\x85\x02\xb3\xd0\xfc\x04\x47\x6a\x29\x1c\xfa\x53\xab\xa4\x7e\xec\xa7\x24\x80\x29\xff\x22\xaf\x0d\xe1\x23\x85\x22\xf4\x37\x95\x94\x51\x6a\xdd\xb3\x85\xac\x8e\xa0\x71\x55\xd7\x61\x18\x1c\xcb\xed\xab\x67\xe3\xe6\x11\xb5\x9b\x03\x60\xfa\x45\xf2\x05\xce\xa5\xbb\x78\x59\xb9\x3a\x23\x92\x44\xd6\x68\xf2\x45\xd9\xa8\xc8\xb2\xa8\x10\xbd\x30\x05\x1a\xcc\x23\xfc\xac\x4e\x20\x0c\xd1\x14\x2e\x83\xd5\x0b\xc5\x7c\x02\x2d\xbf\xc7\xd5\xec\x40\xc0\x7c\x47\xd3\x43\x5b\x92\x20\x8b\x84\x50\x9c\x6d\x39\xe6\x63\x36\x08\x73\x74\x69\x1b\xe8\x14\x31\x5a\x65\x0c\xfc\x27\x4f\x63\xa8\xc3\x65\xd9\x2f\xb5\xec\xe8\x42\x4b\x40\xa7\x89\x68\xec\xe3\xe8\xca\x5d\x19\x3f\x37\x70\x38\x9c\x11\x9c\xc1\xd9\x37\x46\xe9\xd4\x41\x61\xfa\x29\x68\x5d\x63\xfd\xec\x88\xbf\x36\xfd\x29\xd1\xd6\x30\xdb\xbe\xa8\x97\x12\xf2\x41\x5a\x49\x27\x51\x3a\x24\x4a\x9e\x2b\x39\x43\x41\x28\xcf\x36\x3f\x2e\xb2\x50\x0c\xc3\x78\x92\x71\x3c\xfa\xc6\x9b\x9e\xa7\xb9\x16\xd2\xd7\x79\x12\xf5\xa6\xa1\x25\x7d\x22\x64\x52\xd9\x32\x1b\x3e\xca\xba\xd9\x44\x7c\xf8\x61\x38\xe0\x68\x1c\xc7\x83\x59\xd1\x1c\xc2\x54\xff\xf4\x5a\xd6\x11\xe0\xaa\x8f\x30\x90\x8e\xcb\x02\x00\x24\x65\xf3\xb6\x6b\xfb\x10\xa0\x5b\xd5\x4b\x00\xdb\xa2\xc3\x2b\xb4\x3d\x25\x73\x8d\x14\x54\xd9\x28\xde\xc1\x00\x59\xb6\x18\x55\xbc\x51\xf6\x2e\xed\xe7\x7d\xbd\x2f\x1a\x97\xf0\x1d\x59\xdd\x4a\x1c\x2a\x98\x93\xf8\xa2\x5b\xce\x4b\x84\x63\xbb\x9e\xdb\x1a\x7a\xdf\x78\xcb\x4f\x4c\x65\xc1\x46\x72\x56\xb1\x3c\xc1\xf4\x44\xbf\xe8\xd4\x0d\xae\xfe\xb6\x70\x19\x07\xb0\x59\xd5\x38\x6e\x48\xeb\x50\x1c\xb1\x6e\xd1\xa3\xec\xc8\xd6\x4f\x7f\x21\x9e\xf3\x99\x17\x91\xf9\x16\x27\x6e\xc4\x7d\x2b\xb9\xc5\x6a\xf1\x2d\x96\x07\x58\xd7\x6d\x79\xf0\x64\x86\x57\x38\x83\x8f\x3c\xd6\xf6\xf1\x45\x4e\xb7\xa9\x24\x7f\xb8\x66\xa9\xfe\xe5\x73\x09\xa8\x3e\x80\x49\x9c\xc0\x74\x54\xa0\xa0\xa1\x2d\x6a\x12\x94\x58\x86\xfb\xb9\x0f\x96\x15\xd4\xab\x5f\x49\xdc\xb2\x75\xfe\x46\xdb\x81\xc0\xc5\x1c\x8a\x6e\x10\xa6\x5f\x3a\xed\xf7\xa9\xef\x3d\x53\xa6\x6f\x0a\x2b\x2c\x7f\x36\x22\xf1\xcb\x06\xa3\x77\x48\xbe\xda\xf7\x5c\x43\x6d\x2f\xb8\x95\xe1\x8a\xc6\x7c\x69\x3a\xf2\xfb\x5e\xa1\xf3\xc8\x85\xf9\xc3\x1d\xc4\x5d\xe3\x42\x24\x48\x8e\x62\x70\x45\xc9\x75\xca\x15\xa3\x3a\x67\x72\x7b\xf9\x60\x5e\xd6\xed\xae\xd1\xc9\x77\x7a\xc3\x67\x89\x8e\xcd\x43\x46\x8e\x5a\x64\x17\x2a\x6c\x9a\xbd\x28\xc8\x34\x5e\xc3\x17\x17\x74\xb2\x97\x6e\x4b\x3f\x07\x01\x56\x0a\x90\x27\x7d\x3b\x0d\x04\x7e\x9c\x1c\x15\x1f\x4b\xed\x8f\x71\xae\xf5\x49\x19\x6b\x43\x76\x44\xdb\x97\x27\xc1\x2a\xc9\x2a\x6f\x0d\xad\x69\x83\x10\x1b\xe3\x59\x0c\xab\xac\x27\xcd\x5a\x50\x7c\x23\x0d\x8d\xf0\x33\x1d\x3f\x54\x49\x36\x19\x78\x9f\x26\x84\xa2\x79\xd7\xb2\x16\x69\x33\x49\xf8\x53\xdf\x62\x28\xe5\x6e\x7e\xbf\x14\xf7\x50\xa2\x43\x88\x28\x58\x6c\x29\x26\xbd\x20\x51\x5a\x23\x61\x87\xed\x41\x7f\x33\xe4\x37\x0b\xcb\x4c\x0c\x68\x88\x7d\x43\xbc\xbd\xf1\xc1\x50\x7b\xa0\x22\x79\x98\x20\xcd\xee\xc6\xa8\x5f\xfe\x4c\xaa\x63\xb2\xe7\x6d\x65\x0f\x22\x35\xbb\x5f\x5c\xd8\xad\xa0\x6f\xba\x70\x06\x54\x72\x3b\xac\x39\xe1\x66\xc8\xba\x0e\x68\x08\x95\x1c\x9e\xc3\xc7\x5b\xa8\xb2\xaa\x59\x2f\x76\x17\xd5\x9b\x50\x95\x2d\x7f\xc0\x1a\x99\xfc\x8d\xcc\x9c\x9a\x0b\x5a\x6f\x38\xf2\xd8\xc4\x62\x8e\x22\xe0\x12\xd1\xcf\x13\xcb\x2c\x78\x85\x5b\xb7\xf9\xa9\xfb\x34\x98\x02\x3d\x16\x10\x33\x3a\x37\x01\x6d\x0a\x1b\x1b\x08\x39\xea\x38\xc3\x96\x80\xc0\xde\x6c\x5c\xb6\x08\x5e\xf2\xff\xda\x4c\xf6\x89\xbb\x7f\x55\x41\x39\xe4\xda\x46\x39\xe2\x0b\x82\x01\x51\x5a\x93\xbf\xa0\x56\xca\x2b\x43\xd9\x67\x47\xd6\x7e\x30\x44\x0b\x73\x97\x3d\x42\x4b\x92\x41\xa4\x39\x51\xd5\x37\xb5\x27\x07\x3e\x66\x80\xe4\x16\x20\x6b\x79\xcd\xa2\xbf\x4b\xcd\x84\x41\x70\x42\x2f\x1f\xf1\x3d\x23\xce\x3d\x5c\x8a\x06\xdc\x57\x72\xeb\x61\xf3\x77\x1f\x5d\xf2\x04\xbf\x36\x2a\x3f\x8b\x3e\x9d\x0c\x3d\x1d\xf4\x92\x45\x61\x8f\xfa\xf8\xd7\x16\xfb\xce\x2d\x4b\x00\x0d\xc3\x30\x0a\x75\xee\x64\x7e\x38\x6f\x24\x46\xa3\x80\x71\x2f\x66\x3c\xd0\x8e\x38\x87\xef\xd3\xbe\x8b\x17\xa3\xc0\x85\x42\x7f\xad\xa1\x0b\x4c\x3e\x38\x23\xd3\xf4\x43\xf8\xd8\x0e\x77\xf3\x33\x4f\xab\x7d\xe1\x36\xfc\x06\xb2\x79\x07\x32\x79\xd0\xf9\x15\x59\x5b\x1f\xd5\xa5\xb2\xda\x22\x32\xf1\xde\xb7\xdb\x9a\x3d\x0a\xb4\x50\xf8\x44\xe7\xec\x01\x50\x93\x6c\xf6\xe7\x82\x4c\x3a\x40\xcc\xcf\x2f\xa5\xe4\x40\x26\x9d\x88\xcc\x25\xad\xb0\x34\xf1\x45\xc1\x37\x94\xcd\x5f\x8f\xc9\x5f\x94\xc5\xb7\xd8\xb9\x51\xa0\x30\x8f\xd9\x1e\xcf\xd5\x87\xac\xf0\xd3\x1f\xa7\x6e\x9a\x0a\x72\x39\xce\x73\xc0\x90\xc5\xe8\x18\xec\x87\xaf\x99\xeb\x46\x94\x9e\x86\xbc\x04\x31\x60\xf2\x17\xcc\x64\x21\xb0\xe0\xa9\x78\xa6\xe1\x30\xfc\xed\xab\x36\x01\xd7\x18\x2d\x7f\x01\xdb\x05\x6c\x74\xbd\x94\xc2\xea\x1d\x3a\x13\xfb\x9c\xe3\xed\x94\xd8\x14\x7c\x82\x19\xef\xfe\x5a\xbf\x8b\x54\x7a\xdc\xfe\x08\xb7\xe9\x47\xe1\x9b\xa6\xb1\x77\x30\xc5\x68\x77\xf4\x0a\x53\x5e\xfa\xbe\x1f\xa5\xa2\x50\xf3\x00\x0a\x38\x0b\x9e\x9a\xfa\xea\xfb\x70\xff\x9a\xa5\x0c\xdd\x40\x61\xb9\x79\xd3\x40\x08\x40\xe9\x0b\xdf\x88\x6d\x6d\x56\xaa\xed\x83\xb6\x37\xdb\xae\x98\x52\xa4\xfc\xcd\xc6\xa6\xfe\x72\xa8\x9a\x42\xba\x01\x7b\xac\x6d\x00\x29\xc7\x75\x5d\xe7\xfc\x97\x1d\xa6\x14\x18\x8a\x1c\x79\xfc\xd7\x67\xc9\x45\x31\x8b\x6b\x9b\xab\xb0\x75\xf0\x93\xbb\x50\x26\xfd\xf2\x0f\xbd\x95\xda\xc3\xf5\x1b\x92\x71\x86\x4f\x1c\x91\xa3\x55\x76\x21\x08\x84\x8c\x44\xba\x74\xab\x9a\x6c\x65\x86\x4f\x99\x47\x6c\xe7\x49\x4c\xfd\x3a\x04\xeb\x10\x28\x8b\xa8\xa8\xac\x7e\x57\x9c\x92\x75\x58\xca\x61\xa1\xb3\xde\x71\x0f\xe3\xb7\x15\x7f\x73\xa9\xab\x07\xce\xb9\xca\x07\x04\x73\x58\x76\xdb\x35\x19\xe4\xb2\x3b\xe8\x6f\xa1\x6f\x75\x99\xc4\x75\x42\xfa\x19\x19\x87\x76\x78\xc6\x66\xd5\xaf\x1e\x5e\x41\x71\x9a\xf3\x59\xfe\x7a\x99\xff\x44\xce\x3a\x04\xf3\x98\x80\x22\xa6\xb3\x99\xee\x72\x9a\xeb\xe9\x3e\x24\x7c\x58\xb5\x5f\x92\x3d\x72\x67\x85\x65\x73\x8a\xf4\xcb\xbd\x12\x69\xb1\x4d\xc1\x97\x1b\x23\xeb\x87\x88\x5d\x77\x1d\xcd\xf6\x29\xee\xa1\x07\x6a\xfa\x26\xf8\xf9\xee\xbe\x4c\x83\xdf\x67\xac\x10\x66\xe4\xc3\x7c\xe1\x6a\xf2\x6d\xad\x72\xc8\x40\xd9\xfa\x94\x84\x3e\xb2\x08\x40\x7d\xee\x89\xad\xb5\x6e\x45\x8e\xb7\xc0\x26\x87\xa0\xd0\xd1\xe5\x36\x10\x6c\xdb\xf3\xec\x89\x66\x7c\xda\xe3\x68\xff\xf0\x04\x3b\xc7\x89\x57\x08\x35\x92\xf0\x9b\xe3\xa0\x89\x96\xdc\xf3\x22\xd1\x1b\x08\x40\x95\x7c\xe4\x56\x20\xec\xea\x88\x9e\xd7\xa6\xb5\x7b\xb0\x3e\x4c\xed\xaf\xe5\x86\xe4\xba\x65\x1b\xe6\xdd\x51\xc1\x63\xfb\xe6\x4b\x9a\xed\x4d\xcc\xfd\xf3\x93\xbb\x60\xd9\x38\xa7\x81\x85\xaa\x25\xb5\xd9\x9e\x00\x8d\xc8\x2f\x21\x7e\x46\x2a\xee\x16\x6d\x27\x6f\x68\x11\xc2\x2e\x77\x33\x94\x97\x77\x09\x69\xb2\x71\x1d\x1a\xfd\xcb\x9d\x5a\xb7\x6f\x08\xfd\xcd\xc5\x20\x58\x1e\x6d\x2f\x09\xbe\x0f\xcc\x98\xf2\xcb\x8e\xa9\xff\xe1\xe8\xc2\x10\x10\x03\x0d\xa0\xbb\x06\x28\xda\xa3\xeb\x15\x72\x1c\x79\x08\x7b\x28\xb6\xa7\xc9\xec\x95\x52\x4f\xa6\xd1\x07\x0d\x82\xc2\xe1\x97\xad\xae\x64\x47\xea\x12\x4e\x44\x32\x38\x5a\xce\x6c\x5c\x8a\x3d\x0a\x37\x46\x83\x1b\x62\x2a\x89\xb7\xbc\x01\xd8\x0d\x47\x70\x7f\x9a\x3f\xb8\x37\x6b\x00\x17\x08\xa9\xac\xa6\x36\x1b\xbd\xfb\x01\x7c\x64\xc8\x91\xcf\x71\x5a\xcf\x1f\x77\xb7\x4a\x01\x97\x50\x23\x22\x80\x4f\xee\x77\x44\xde\xf9\x6b\xca\x86\x0f\x83\x46\x78\x7d\xac\xaf\x4f\x3e\x20\x85\xbd\x8e\xcc\x62\xa5\xe4\x52\xd5\x6c\xfc\x2f\x46\x20\x38\xaf\x41\x5a\xb4\x0a\xee\x11\x51\xc4\x65\x3e\x95\x53\x8a\xd7\x39\x99\xe5\xd6\xa8\xda\xd6\xb3\xec\xd6\xe6\x6d\xda\x1c\x99\x2a\x59\x45\x1f\x0d\x6f\xb8\x58\x60\x45\x37\x62\xdd\x89\xe0\xae\x14\xa0\x80\xb8\xac\x16\x83\x9a\xfd\x7f\x63\x7a\x24\x4d\x12\x61\x5c\x3c\x94\x3b\x48\xc1\xb1\xa5\x48\xb5\xa2\xcd\x47\x47\x69\xbd\xb0\xde\xfa\xb4\xec\x13\x9c\xe6\x6e\x6a\xb5\xa5\xc6\x30\x1d\x1e\x23\xe9\x3e\x73\xa9\xdd\xf3\xad\x4c\x45\xde\x96\xb8\xb9\x64\x5d\xf9\xf0\xd4\xa2\xff\xe9\x57\xe5\xbc\xeb\xe4\x83\xae\xe1\xf7\x57\x52\xac\x5a\x36\x1e\x1b\x42\xc7\xa5\x01\x65\x0d\x59\x26\xd3\x88\xa4\x04\xca\xb2\x38\xa2\x9d\x7a\x8d\x96\x03\x4b\x4d\x6a\xed\x13\xe1\x83\x6c\xba\xd5\x5d\x73\x23\x1f\xbf\xac\x11\xe7\x54\x3e\xcc\xbe\x03\x27\xff\x60\x91\x11\xaf\xa7\xa4\x8d\xf1\x76\x92\x94\xef\x39\x40\x7a\xa8\xc6\x31\xfe\xb1\xba\x5a\xce\x12\xc4\x84\xdd\x96\xbc\xdb\xc0\xda\x0c\x78\xe2\xa4\xf4\x77\x7d\xc9\x2f\xfb\x81\xd0\x0d\xc9\x93\x07\x31\x04\x6b\xcf\xdb\x4e\xb5\xd8\xc6\xec\x0c\xc8\x7f\x22\xd1\x88\x9d\x25\x7f\xa0\xe8\x53\x64\x86\xad\x29\xfb\x03\xc1\xe6\x1b\xc0\xa6\x9f\x2b\x7b\x00\x52\x0f\x8d\xbc\x36\xb2\xde\x67\x60\x9c\x65\x53\x4d\x0c\xb8\x8b\x52\x36\xe3\x6f\xa6\x6e\x11\x7c\xa4\x55\x08\x97\xdf\xc2\xae\xa6\xef\x86\xfb\x3b\x69\xb2\xc4\x4c\x3f\xce\x3f\x59\x56\x94\x45\xc0\x6a\xcb\x4d\x02\x87\x3c\x68\xb8\x84\xca\x29\xc3\x51\x17\x1f\xe7\xf4\x31\xd9\x09\x26\x81\xe1\x34\xac\x32\x79\x6d\xb7\x74\x0b\xf5\xfa\x08\x4c\xfb\xfb\x3b\x31\x47\x7e\x78\x1c\x0d\xe9\xbc\x46\xe4\x88\xd1\x02\x89\xfc\xa9\xc1\x62\x3e\x93\x53\x3c\x5d\xe8\xb5\xf1\x46\x68\xeb\x7a\x6c\x11\x6c\xfa\xce\x1e\x95\xae\x8c\x05\xe2\x35\x5e\xc4\xe1\x8d\x41\x04\xa6\x86\xc0\x62\x55\x3c\xa9\xeb\x3e\x1f\x33\xe6\xc0\x52\x30\x2c\x79\xb4\x04\x7b\xde\xa1\x1f\xfa\xbf\xce\xbc\x7d\x99\x3c\xb8\x13\x0e\x9f\xef\x5f\xdd\xcf\x29\x31\xf7\xcd\x61\xda\x75\x69\x29\x0c\xc3\x70\xaf\xd3\xaf\x33\x7f\x5e\xc3\xc2\x31\x0c\xfd\xc4\x73\x8b\x75\x2b\xf4\xb5\x65\xf8\xe6\xd7\x58\x92\x50\x00\xfc\x12\xe7\xf6\x54\x1b\x2c\xca\xd7\x90\x3f\xa2\x78\xfe\xeb\x8f\xa7\x60\x90\x6c\x5c\x2b\xb0\x77\x1f\xc1\xf7\x0f\x3b\x47\xf8\x4f\xe6\x0d\x04\x1c\x7f\xd0\x52\x16\xb6\x5c\xac\x40\xa8\xac\x2b\xc8\x4f\xfc\x0d\xbf\x24\xed\x8e\x58\x57\xcc\x22\x7c\xc9\xa2\x65\x1d\xc6\x50\x6c\x6d\xd9\x78\x77\xa2\xb2\x0f\xd4\xfe\x6a\x2c\xb5\x1b\xd7\x1f\x8c\xdd\xb9\x8c\xd7\x20\x0d\xbe\xe9\x61\x95\x1f\xcd\x45\x23\xd1\x08\x1f\x17\xfa\xea\x21\x6a\xd5\x5b\xb8\x38\xa0\x79\x9f\xa0\x73\xd1\x5d\x38\xf5\x52\xb1\xd9\x87\x79\xd5\xba\x95\x38\xf3\x05\x8c\x55\xda\x9b\xf9\xf6\xa2\x36\xe5\xb8\xd7\x5b\xb9\x98\x7f\x1c\xe2\xca\xaf\x73\xcc\xa9\x1f\xe5\xbf\x7d\xce\x2b\xc4\xec\x7a\x8d\x59\xde\x50\x4c\x91\x69\x9c\x73\xfe\xa1\xb5\x1d\x3b\xe2\xcf\x83\x9e\xf8\xe4\x25\xcd\x0e\x8f\x23\x79\x22\x8f\xae\xec\xc8\xf1\xe1\xb0\x98\xfd\x92\xcf\x34\x72\x52\xc4\xfd\xe4\x1e\xe4\xcb\x17\x70\x9b\x57\x08\x9b\xe3\x47\xfa\xfc\x20\x45\x23\x48\xdc\xf8\xf4\x65\x34\xe0\xe4\x57\x8b\x1f\x9d\x32\xc7\x24\xa1\x4e\xd5\xdf\x97\xfc\xed\x5c\x75\x5d\xc7\x45\xac\x9f\x57\xf3\x11\x99\x1f\xab\x37\xf2\x12\x1d\x4f\xfe\x16\xe6\xb3\x4e\xcd\x94\x04\xdf\xf4\x80\x33\x8c\xd1\xf0\xe0\x47\x2e\x1d\xdb\x41\x4f\xfb\x2b\x56\xa9\xc5\xfb\x1e\xc4\x97\x69\x5c\x7e\xe2\xda\x96\xaf\x75\x70\xa5\x65\x4b\x87\x07\x08\x1f\x3d\xfc\x46\x51\x58\xc6\x2a\x34\x7a\x8f\x65\x19\x77\xa4\x4c\xfe\x37\x79\xe2\xa7\x91\x7f\xee\x32\x71\x5a\xfe\x9f\x5d\xa5\xd0\x3b\xe4\xff\xd3\x39\x48\x49\xd7\x3e\x6a\x93\xde\xe5\xbf\x37\x9e\x44\x14\x37\xda\x8f\xc9\x82\x38\xe9\x6f\xa1\x07\xdb\xda\xb8\x22\x87\x1f\xc0\x26\x59\x25\x79\x95\x63\x37\x99\x63\x9e\x2d\x23\x82\x66\x15\x3b\xfb\xfc\xd1\x11\xf3\xbe\xe0\x7c\x0b\x41\x50\xd4\x8f\xde\x2f\x09\x3e\xb7\x95\x00\x63\x0c\x2c\xe0\xfa\x61\xc7\x84\x03\xae\x2a\xe7\xbe\xe1\xda\x06\x68\x9e\x64\x9a\xee\x0e\x61\x58\x86\x65\x7c\xd0\x74\x9d\x64\x6a\x7b\x23\xda\xdd\x93\x30\xed\xb3\xb4\x26\xff\xf5\x25\x35\x2e\x51\xbb\x6a\x72\xfb\x5a\xf6\xf9\x72\x0f\xc3\x28\x1f\x97\x2d\x47\xe8\xef\xc1\x37\x95\x1f\x99\x4b\x65\xe9\x0d\x31\x79\x30\x53\x4f\x2a\xb5\x59\x24\x56\x71\x28\xac\xf3\x45\xf5\x21\xaa\x77\x27\xad\x76\x2e\x24\x43\x7e\x67\xbc\xb6\xdc\x39\x3c\xab\x8a\xef\xe7\x9f\xfa\xaf\x59\xfd\x18\x47\x3c\x51\x32\x47\xc4\x27\x6a\xa6\x38\xad\x0d\xde\x1d\x8a\x89\xb7\xe8\x70\xec\x07\x47\x0e\xa3\x51\x8a\x7d\xe7\xf3\xeb\xed\xd8\xf7\x3a\x04\x40\x79\xa5\xec\x3d\xf2\x0c\x39\xb6\x0c\x19\xeb\xaa\x65\x4b\xf1\x97\x43\x83\x8a\xa2\xa5\x42\x7d\xf2\x57\x67\xce\xe3\x32\xa6\x48\x2a\xb9\x41\x6e\x83\xa2\x22\x32\x69\x5c\xa3\xd4\xb2\x66\xd2\x2e\xd4\xa7\x52\x88\x15\x38\x29\xd4\xd0\x7a\x0f\x50\x84\xf4\xd7\x43\x3f\x8c\xbb\x3c\xa2\x71\xa6\xe2\x70\x66\xd2\x67\x26\xad\xf7\xa9\xf4\x7b\x04\xe1\xde\x92\xdf\x49\x63\x27\x2d\x30\xbf\x3f\x7d\x71\x5d\x99\xc7\x9f\x43\x70\x81\x93\x4c\x33\x02\xd8\x56\xbf\x2c\x6b\xc9\x36\x02\x0f\xe9\xbf\x32\x8c\xdc\x98\x44\xdb\x69\xe5\x9a\x2d\x9c\xba\xf7\xa4\x34\x5e\xa8\xc2\x65\x1c\xa2\x34\xa8\xad\xeb\xb9\x79\xa7\x24\x44\x9f\xde\x91\x80\xa6\x4a\xa2\xd4\xb9\xfe\xe5\x33\x45\x41\x59\x24\xb6\x29\x44\x0f\x98\x7b\xc8\x8f\x8e\x35\x1b\x56\x64\x33\xed\xa3\xfc\x58\xa0\x1a\xa5\x79\x4e\x95\x10\xac\x47\x17\x89\xa2\x1f\x57\x2c\x2e\x7f\x01\x09\xe8\xac\xd1\x10\x75\xe1\xb2\xac\x63\x93\xea\x9e\x58\x98\x41\x18\x10\x3e\x37\xde\x16\x03\xcf\xff\xd4\x69\x17\xee\x85\x57\xdb\xd5\x4f\xe0\x3b\x16\xfc\x27\x43\x8e\x25\x8f\xe8\xbd\xb0\xf5\xf5\x6d\x25\xef\x81\xcf\x5f\xff\xf5\xd8\x8a\x73\xdb\x0a\x80\x13\xf3\x0a\xf6\x3c\x22\xd0\x94\x1e\x9f\x77\xcd\x46\xb5\x47\x7d\xef\x04\xa6\x11\x27\x52\xa3\x32\xfd\x41\x11\x71\xa5\xfd\x7b\x7c\x5c\x56\x59\xe4\x99\xf4\xbe\xc3\x4d\xe2\xb8\xad\xe2\xc3\xf3\xae\xf9\x88\x7f\x4d\x00\x69\x80\x49\xb2\xc1\xc4\xb2\x99\x92\xe3\x63\x12\xdd\x96\xc7\x58\x84\x71\xaf\xd0\xa1\x7b\xe2\x87\x8e\x2b\xf4\x20\xa3\xf0\xd4\xd1\x8b\x6e\xb9\x4f\xab\xfe\xab\x6d\x57\x14\x5f\x45\xf7\x77\x0c\x5e\x15\xc4\x1b\x60\x22\x81\x6d\x25\xce\x7b\xcf\x85\xbe\xc4\xa3\x68\x8e\x4a\x12\x3e\x73\x88\x76\xcd\xf6\xc5\x08\xa0\x3c\x97\xa8\x6f\x2f\xcb\x45\x8e\x2d\x42\x0f\xb2\xa5\x8e\x10\xf8\x22\x81\xbb\xd7\x52\x3a\x56\xbc\xcf\x2f\xda\x4f\x6f\x38\x83\x9d\xa3\x10\x02\x8b\x4c\x43\x6b\x99\xac\xba\x79\x61\x42\xb8\x46\xa1\x35\xd4\xbd\x62\x32\xb7\x11\x78\x80\x83\x55\x2e\xf0\x01\xc1\x6d\xe4\x33\x52\x23\x5c\x13\xd6\x3e\x86\xdd\x66\xd2\xb0\x44\x79\xb6\x43\x80\x7d\x0f\x06\x6d\xd7\xb5\x8f\x15\x59\x52\xa9\x3f\x3d\x90\xd8\xe6\xb0\x49\x9f\x21\x45\xec\xab\xdd\x40\x2a\x8c\x1e\x33\xcb\xda\xb6\x92\xe7\xbd\xa7\xd2\x48\xa1\x92\x15\xb1\xd5\x0b\x46\xfb\x14\xd5\x3d\xea\x97\x69\x1d\xb5\x18\xe7\x06\x9b\x1a\x10\xd3\x4e\x44\x43\x59\x8e\x8f\xbf\x82\x71\x88\x60\x20\x00\x4c\x3e\x83\x0d\x7d\x3f\x19\xcc\x92\x94\xff\xe6\xd0\xf5\xb2\x7d\x5a\x15\x3a\xee\x8a\xda\x73\x49\xdd\xce\xf2\x7b\x7a\xa3\x69\x00\x91\x16\x6f\xe7\x1b\x36\x8d\xb1\xd4\x3a\x5c\x6e\x19\x7c\x48\xf7\x6b\x7d\xe1\x61\x2c\x44\x63\x19\x8c\x28\x46\x4f\x16\x88\xf8\xbf\x3a\x35\xd1\x3a\xb5\x2b\xa6\x85\x48\xfb\x47\x87\x54\xd1\x0d\xc2\x29\x85\x4b\xdd\x23\xbe\x3e\x51\x79\x81\xa1\xa2\xb8\x2d\xeb\xef\xa5\x28\x15\xde\xf4\x6c\x95\xa3\x15\xfd\xcd\xac\x5f\xef\xfd\x5c\xc7\x62\xc0\x26\x7f\x79\x83\x32\x85\x7d\xa6\x8b\xa4\x7f\xe3\x7e\x3e\x2a\x84\x7b\x2d\x3f\x54\x1b\x59\x7d\x12\x78\x20\x2b\xac\x96\x31\x22\xc8\xd4\x56\x55\x11\x11\x23\xe1\x09\x61\x75\xaa\xed\xb7\x82\xaa\x63\xc1\x6a\x7d\x01\xf1\x16\xde\x46\xf7\xf0\x3a\xc5\x2b\xba\x23\xee\xb9\x2b\xf9\x6e\x0a\xa5\x21\x66\xc3\xfb\xb9\x87\xf3\x51\x4d\x45\x65\x32\x35\x44\x89\xfc\x01\xc2\x35\xff\xb5\xe9\xd6\xb3\xaf\x4c\xab\x08\x3b\xfd\x98\x4a\x81\xfa\x93\x23\x62\x18\x8e\x02\x23\x80\xd2\x71\x6a\xb8\xd0\x10\xbc\xc1\x1a\xe1\xd5\x2f\x51\x22\x4f\xbc\x76\xd9\xea\x73\x64\x9b\xc6\x43\x3f\xb1\x8d\xf2\xfa\x08\xba\xaf\x3d\x05\x7b\x2d\x59\xbd\xc8\x8d\x98\x9f\x04\xd5\x07\x02\x98\xd7\x6c\x0c\xd3\x24\xe2\x38\x8e\x08\x7b\xbf\x28\xa3\x29\xae\xc0\x67\x8d\xa0\x01\x03\xce\x59\xf1\xc6\x54\x1a\xb3\x3a\x86\xf9\xe2\x8c\x37\xa2\xfb\xe6\x30\x1b\x0e\x89\x46\xa1\xe5\x07\xab\x78\x26\x98\x1b\x6c\x56\x7f\xf2\x71\xfc\x67\xb2\xda\x81\x04\x24\x51\x53\x16\x2f\x84\x2b\xb4\xb6\xa7\x47\x0d\xce\x68\x9f\xc4\xb0\x90\xbf\x44\x04\x65\xfc\x77\xab\x41\xe0\x39\xe4\x81\xaa\x3c\xe9\x1b\xdc\x22\xfe\x6b\x4f\xfd\x00\x35\x11\xfa\xab\xaf\x04\x32\xce\x83\xbe\x7d\x9b\xb4\xbe\xfa\x01\x95\x02\xd1\x24\xff\xa0\xc0\x1c\xcf\x2b\x80\xe1\x76\xaa\x4b\x7c\xf9\xf3\x4b\x1c\xf9\x96\x46\xe1\x90\x22\x65\x1c\xc1\x07\xe9\x13\xf5\x87\x48\xb6\xe4\x50\x61\xa2\x1c\xab\xa3\xaa\x29\x0c\xf5\x83\xa3\x8e\x7b\x63\x5e\x3e\xf3\xb7\xcc\x14\x96\x4d\x8e\xf5\x79\xbf\x39\xca\x93\x60\xe9\xf7\x76\x8f\xc1\x71\x41\xf4\xd4\xe0\xb0\xb8\x63\xd8\xdb\x1f\xee\x03\x9e\x55\x72\x84\xaa\xdd\xa2\x53\x11\x00\x46\x4e\x1a\x59\x42\x59\x7d\x8a\x49\x5d\xb3\x6e\x2d\xfd\xc1\xf0\x67\xf7\xca\xd7\x77\xbf\xe3\xe3\xa8\xec\x78\x1b\x63\x18\xc3\x0e\x7e\xf5\x53\xdf\xea\xb8\xa8\xcb\xc2\x61\x48\x45\xa7\x11\x52\x71\x1c\xa5\xcf\xa0\x2e\x83\xc2\x33\x90\x6d\xbe\x09\x12\x30\x6b\xc4\x7a\x6f\xf2\x1f\xbe\xc4\x74\xaa\x2b\xa9\x73\xa5\x38\x84\xae\x70\x6b\x36\x85\x30\xfd\xeb\xf7\x46\xda\x35\x9b\x32\x12\x75\x5d\x41\x33\x40\x68\x33\x63\x38\x46\xd0\x4b\xc6\x2f\x95\x64\x8f\x79\xee\x92\xae\x7f\x7c\x15\x07\x2d\xf8\xd8\x72\xf3\x08\x95\x56\x16\x81\xa4\x47\x61\x5a\x64\x84\xa1\x01\xd1\xbf\x79\xd2\x0f\xad\xa2\xeb\x0b\xaa\xfd\xfb\xb9\x95\xfe\xf4\x51\x00\x84\x81\xfa\xd1\xdc\x51\x5d\xb3\x49\x25\xaa\xf4\x73\xe2\x9c\x90\x8e\x60\xfc\x25\x47\x1a\xf4\x61\xa9\xba\x23\x93\xac\xf2\x26\xee\x20\xde\x39\x04\xa6\x11\x98\x25\xd2\x9c\x76\x8d\x36\x6c\x54\x9f\x98\xc1\xb0\x29\x23\x35\x26\xd4\x18\xb1\xf8\xd3\xb0\x97\x71\xab\x63\xf8\x46\x1c\x83\x8c\x04\x02\x7c\x57\xe9\xdc\x3e\x58\xc2\x0b\xb8\xed\x63\x21\xa7\x84\x68\xac\x05\xbb\x9b\xfb\x2b\x55\x1d\x91\x07\xb6\xf6\xe7\xc8\x21\x1f\x0e\x5d\xde\x14\xb3\x0d\xae\xea\x37\x0c\x1d\x01\xb0\x6e\x91\xa9\x46\x02\x3d\xbe\xf1\x81\x6e\xa1\xd7\xaf\xac\xd0\x33\x7f\x6c\x22\x9b\x94\xaa\xe5\x07\x94\xd8\x74\xfc\x1c\x40\xe2\x93\x73\x2a\x68\x0f\x10\x31\x2d\x44\xf5\xfb\x8d\x7d\x05\x2c\xf5\x46\x0d\x49\x3f\x1f\xba\x32\xcb\x52\x4c\xdc\x4f\x69\x2f\x5c\xe3\xac\xa0\x8d\x69\x2b\x1e\x23\xd7\x1c\xc7\x25\xb0\x41\xeb\x10\x6c\x55\x9c\x5c\xde\x28\xd0\xb8\x97\xbc\xff\x61\x35\xdc\x4d\x96\x2b\x4d\x53\xbe\x1a\x9f\xeb\xb5\x4a\xda\x8d\x18\xaf\x41\x1b\xaf\xe1\x73\x6b\x38\xb7\xb3\xd4\x3f\x57\x84\xb5\x21\x8b\x48\x17\xf2\xa5\xe9\x3a\x70\x65\xc7\x54\xe0\x55\xd7\x96\x6d\xdb\x88\x75\xdb\x32\xc9\xda\xf9\x1d\xe1\xa9\xff\x7c\x7e\xb1\x2d\xa2\x10\xfd\xca\x77\xd7\xf3\xf3\xd7\xc4\x62\x06\x93\x15\x92\x00\xe6\xf4\xb2\x78\x87\xb6\x7d\x83\xb4\x20\x9b\x2f\x29\xc3\xf9\xb4\x55\x39\x77\xbf\xed\x43\x37\xef\xfa\x52\x8a\x8a\x99\xe0\x17\x81\x06\x14\xf0\x19\x34\xac\x1a\x98\xff\xb8\x35\x0c\xae\x93\x5a\xac\xf2\xbc\xcc\x87\x0d\xe1\xca\xb7\x5a\x26\xeb\x64\x8f\xf3\x19\x92\x98\xd7\x7a\x8d\x47\x70\x7a\x89\x2b\x16\x67\x8d\xc2\x94\xb2\x17\xb0\x26\x51\x8b\xb4\x43\x9f\x4f\x14\x8b\x77\xf0\x51\xd7\x71\x75\x18\x43\x9d\x81\xff\x61\x3c\x3a\x41\xfb\x4b\x3d\x2f\xcd\x97\x37\xcc\x67\xb3\xd3\x9c\x67\x30\xeb\x35\x5e\xe1\x22\x64\xae\x58\x5c\x69\x74\x53\x7a\x5a\xe9\x7a\x03\x6b\xb2\x4e\x15\xa1\x47\xae\xee\xb1\x4a\xbf\xa1\x0c\xfe\x13\xa0\x66\xa8\x68\x23\xfb\x53\xef\xd0\x08\x59\xe8\xad\x95\xdc\x0a\xd5\xe2\xb8\xbf\xf7\x19\xf8\x52\xf6\xbe\xed\xc7\xb0\x6b\xac\x57\x4c\x29\x64\xb6\x81\x96\xfa\x4a\xae\x5d\xb2\x13\xb8\xb5\x8c\xd7\x22\xf1\x7d\x01\x18\x11\xb0\x53\xc7\x4c\x7f\xa9\xbf\xea\x3d\xda\x24\xe7\x34\xfd\x7f\x12\xad\x1c\x53\x21\x73\x89\x26\x26\x4e\xb8\x7e\x43\x1c\xb4\xc9\x5a\xc6\x4b\x98\xb2\x5b\xc8\xba\x77\x83\xed\x0a\x3f\xab\xfd\x90\xc9\x38\xe8\xf5\x4d\x02\x8c\xf1\x9a\x27\xe2\x07\x0a\xe0\x96\x57\x10\x73\xc1\x0d\x75\xea\x03\xad\x7f\xcc\xbf\xba\x7d\x57\x3e\x04\x7b\x25\x73\x33\x36\xbd\x57\xab\xd8\x97\x25\x6c\x18\x51\x53\x69\x86\xa5\x86\x12\x1b\x83\xf5\x8c\x1b\x51\x03\xb6\xd7\xca\x25\x20\x26\x08\xf8\xc2\xd5\x06\x01\x23\x05\x4b\x4e\xe1\x05\x6f\x94\xc8\xec\xfc\x3b\xf7\x3b\xb3\xc5\xa3\xff\x8c\x2b\x76\xde\x5a\xfa\x33\xc2\xb7\xb0\xa0\x4e\x3e\x11\x3b\xc4\x14\x87\x70\x59\x0c\xbd\xd1\x3a\x5a\x1b\xa8\x2c\xb2\x78\x9f\x8e\x75\xc5\x22\xff\x96\xf2\x0c\xc6\x04\x18\x2b\xc6\x26\x85\x5c\xed\xd8\xbc\xca\xd9\x17\xfd\xc4\x72\x30\xff\xb7\x76\xb1\xe9\x9b\xcb\xe4\x55\x0d\x1e\xe7\x68\xc8\x7e\xab\xc0\x28\x1a\xb6\xf3\x74\x3d\xc9\x39\x35\x0c\x94\x31\xd8\x91\x3a\x08\xf1\x54\xa5\xe6\x89\x5a\xc4\x6c\x96\xb0\x21\x60\xec\x15\x2b\x79\xec\x0f\xe1\x7a\x9c\x9f\x60\x17\xf8\xfe\xf8\x3c\xdc\x15\x8d\xeb\x99\xf4\x9e\xab\xe6\x5b\x9d\x93\x71\x1e\xc3\x74\x35\x05\xeb\x74\x6d\x98\xeb\xad\xae\x2f\x3a\x8e\x9c\xe6\x7c\x86\x22\x80\x8d\x9c\x26\x74\x1e\xcf\xab\x8b\x86\xd7\xef\xdb\x0c\xfc\xde\xf8\x23\xd5\xb4\x88\xf5\x7e\xe2\x04\x21\x72\x7d\x2e\x97\xef\xae\x9a\x1b\x62\x5c\xd6\x61\x71\x83\x50\x74\x83\x48\xba\x23\x56\x53\x6f\xf1\xf6\xe4\x79\x14\xaa\x6c\x9e\xc4\x3e\xf7\x4d\xb5\xdc\xc8\x2f\xa1\x47\x70\x18\x72\x83\x92\xb1\x4d\x22\x3d\x45\xd9\xc6\xff\x7a\xd1\x8b\x86\xef\x50\xce\x64\x85\xc1\xa8\xbd\x48\x74\x21\x11\x12\x0e\x4b\x11\x76\x9e\xe7\x78\x7d\xf0\x9b\xcb\x53\x50\x46\x19\x19\x2a\xb3\x7c\xef\x27\x7e\x7e\xe9\x6e\xf8\x3a\x8d\xaf\xec\x99\x3c\x76\x8f\xc1\x33\x98\x5c\x12\xe3\xbd\xf3\xa3\x4f\x44\xc5\xe4\xaf\xdb\x6b\x43\x03\x1e\xf0\x44\x64\xfe\x1b\x2f\xf3\xa1\xa8\xe9\x85\x79\x90\x66\x5c\xc4\x6c\x8d\xdc\xac\x6f\xb0\x14\xb8\xd2\x5b\x29\x6c\x2d\xb4\x01\xd3\x28\xa0\x39\xdf\x25\xc0\x36\x3d\x25\x4d\xde\x29\x84\xe3\x15\x88\x88\x91\x62\x49\xfc\x8f\x77\x86\x09\xe2\x2e\xf3\x25\x17\x4a\x17\x87\x1c\x1c\xd3\x80\xfc\xd2\xa6\xc0\x66\xd5\xef\x54\x37\xd3\xd5\x76\x3d\xdb\x8f\x33\xc3\xf4\xc2\xb5\x62\x21\x60\x80\x2c\xf5\x7e\x55\x61\xec\xd4\x31\x1c\x7a\xf9\x2a\x96\x7f\xf8\xc9\xfc\x55\x65\x77\x8f\x9d\xd1\xb9\x51\xed\x53\x86\x6b\x99\xef\x74\xbe\x95\x13\xb9\x02\x4b\xc8\xcd\xd9\xa7\x48\x3b\x28\x6a\x20\x69\xfe\x8a\xc6\x72\x79\x6a\xe0\x86\x61\x52\xf3\x86\x5b\x29\xd6\x7d\x59\xe6\x3c\xc6\x67\x29\xbb\x7b\x70\x8d\x17\x6a\x46\x49\x90\x16\xdf\x3f\x06\xa2\xf5\x4b\xa1\xb8\xc3\x44\xd7\xd6\x15\x98\x2e\x60\xa2\x28\x94\xa6\xcb\x40\x6b\x47\xf3\xae\xb4\x3b\x26\xc4\xfb\xcc\x87\x79\x3c\xd3\xcb\x62\x21\xeb\xa4\x4c\x56\x51\xf0\x77\x29\xed\x05\x2a\xa5\xbb\x9d\x53\xc2\x6b\x64\x1f\x0b\x67\xe9\x3f\x79\xea\xa8\x24\x5d\x37\x4c\x82\x83\xb0\xb6\x96\x71\x07\xc9\x95\x6c\xa8\x1e\x9a\xe4\xe4\x2b\x62\xb6\x8a\x2b\x37\x6b\xa9\x2e\x7f\x55\xcb\x0d\xe8\x9e\x6a\x0d\xc8\x09\x23\x23\x05\x9f\x07\xfd\xdd\xcb\xb9\x2e\xf3\xad\xcc\xb6\xd4\x11\xc5\x97\x81\x8c\x5e\x01\xf8\x8f\xf1\x53\x87\x23\x9a\x9f\x80\x96\x1d\x4a\x5e\x0b\xeb\xe4\xd9\x34\x3c\xee\x5f\x7f\x77\xd8\x5f\xe3\xbb\x86\xce\x3a\x06\xcf\x08\x21\x83\xd8\x07\x56\x1b\x70\x4d\xf2\x91\x85\x4e\xcf\x09\xfc\x46\x06\xe2\x1b\x12\x24\x9a\x92\x66\xda\x86\xe2\x51\x0e\x7b\xce\xd0\xf2\x36\x67\xf2\xe9\x41\x5a\x00\xa8\xdd\x9f\x63\x3b\x1b\xa2\xe2\xe9\xca\xb3\xfe\x7a\x45\xdd\x13\x91\x2b\x69\x9d\x4e\x41\x2d\x87\xbd\x14\x7c\x53\x70\x73\xcd\xcf\x21\x83\x09\xcd\x28\x5f\x80\x78\x59\x6c\xa6\x13\xf9\x62\x9d\x6e\xce\x64\x5b\x40\x4c\x01\x81\x22\x00\x7c\x83\x78\x86\x7f\x2b\x99\x4d\xa4\xf6\xca\x94\x66\x4d\x22\xb1\xdb\xcb\x8f\x41\x6a\x2f\x08\x83\x48\xc0\xe8\x46\xf3\x3f\xeb\xba\x28\xa6\x15\x2f\x45\x57\xf7\xcf\x15\x7f\x86\x7d\x17\xfb\xd0\x05\x65\x02\x44\x01\x0a\x04\xae\x03\x8a\x13\x5a\x50\x16\x71\x36\x59\x4d\x75\xad\x52\x50\x0e\x31\xd1\xd9\x4c\x87\x32\x71\xf8\xac\xeb\x5a\x60\xa0\x59\xf2\x63\xe9\x2f\xb5\x8e\xc7\xba\x09\x16\x62\x7b\xe0\xc0\x42\x96\xe7\x9d\xde\x37\x1e\x83\xb7\xfc\xeb\x5c\x59\xc3\x8b\xfd\xf1\xab\xc4\x41\x68\xfa\x65\x2c\xc7\x95\x31\x22\x52\x84\xfd\x5f\xf1\xf7\x3b\x4e\x46\xd1\x27\xeb\x90\x2c\xd2\x4c\xff\x9e\xc3\xbc\x5b\x4b\xad\xf8\xa7\x1a\xa5\xe3\x92\x67\x50\xc6\x40\x31\xd0\x86\x20\x93\x28\x6a\xf8\xdd\xdf\x9c\xcf\xf2\x37\x92\x50\x04\xb8\x91\x93\x82\xc7\x9d\x1e\x00\xba\xbc\x8d\x96\xb2\x97\x92\xc3\x76\x22\x4c\xc3\xa4\xd3\xfb\x34\x6e\xb1\x54\x1e\x50\x15\x8d\x98\xc7\x4d\x4a\xef\x0f\x76\xac\x69\x9c\x0b\xf9\x22\x11\x3f\x4c\x0d\x64\x1e\xb4\xae\x1b\x51\xf3\x64\x6d\x01\x77\x03\xc7\x01\x3d\x06\xd7\x28\xcc\x93\x80\xda\xb4\x0e\xf6\x33\xf1\x1b\x70\xdf\x87\x40\x5b\x60\x6d\x29\xd5\x17\xc4\x45\x6b\xa1\xad\xd4\x37\xb2\xa4\x87\x47\x4f\x15\x42\x57\x14\x87\x28\x14\x7f\xfd\x1b\x86\x66\xf8\xfb\xe5\xc2\x79\x0f\x66\xf8\x32\x02\x6f\x68\x42\xaf\x6b\x46\xd8\xdd\xee\x32\x55\x54\xe9\x9a\x8d\xfd\x3a\x04\x93\x59\xf6\x0d\xf7\x62\x86\xf2\x47\x3f\x76\x3d\x54\x0a\x77\x37\xb3\x94\x66\x2c\x5d\x1b\x1d\xd3\x31\xa0\x36\x08\xef\xcf\xaf\x67\xea\xa1\x61\x32\x96\x01\xb2\xa6\x1d\x00\x86\x87\xdb\xce\xe3\xad\xf1\x0f\x39\x3e\xae\xda\xef\x41\xe7\xa5\xaa\xd8\xaf\x0f\x08\xf6\xe0\x86\x50\x9d\x36\x77\xab\xa6\x1b\xc7\x4b\xe6\x80\xee\x2f\x4a\x1f\xa5\xc9\x3b\x3a\xd7\xa4\xb8\xc9\xb8\x02\xc7\x27\x01\x4c\x7d\xf8\xb8\x0e\xb7\xb4\x0b\x94\x09\x7f\xfd\x2b\x4e\x69\x69\x79\x1a\x3c\xb7\xfc\xc8\xfa\xde\xbd\xfa\x3e\x24\xc0\x1d\x16\x21\x35\xc9\x7d\x82\xb8\xbf\x1b\xbd\xe3\xa3\x36\xfc\x29\xfc\x63\x9c\xa5\x37\xd9\xd1\xbe\x2d\x39\x64\xec\xd6\x23\x12\xb6\xba\x01\x66\x2d\x8d\x94\x8b\xa1\x52\xbc\xbb\x86\x7d\x7f\x38\x6a\x59\xc7\x18\xa9\xca\xb5\xf1\x48\x39\xe6\x8f\x88\x03\x89\x40\xb5\x86\xd5\x3e\xb6\xab\xc9\x11\xd3\xfd\xcf\xdd\xdc\x74\xf2\x8c\x29\xb1\x15\xf1\x16\x8f\x20\xf9\xda\xaf\x33\xf5\x5f\x97\xbd\xe6\xdb\x1b\x9d\xbf\xf9\x5e\xae\x31\xd0\x48\x4b\x47\xb5\xeb\xf9\xbd\xf1\x85\x08\xba\x3b\x0a\x1c\xeb\xca\x3d\x6a\xf9\xe1\x94\x9d\x15\x7e\xf0\x40\x25\xb8\x8d\x17\xf1\xbd\x35\x9a\x19\x50\x6b\x1f\x5d\xe9\xbd\xd2\x6b\x23\xb3\xa3\x3e\xe0\x63\xf6\x09\xc8\x87\xff\x7c\x14\xd1\xf1\x32\x0e\xe4\x86\x5d\xee\xa8\xd5\x3c\xa9\x4f\x4d\xfe\x92\xcf\x47\x0d\x98\x2a\x61\xff\xd6\xae\x69\xcc\x75\xef\xe5\x62\xcf\x38\xd5\x17\x54\x34\x10\xca\x4c\x9a\xb4\xcc\xa4\x19\x6f\x44\x44\x7c\x7d\xfc\x7f\xf7\x57\x7d\xd1\xa2\x50\xb9\xff\xd4\x58\xe5\x66\xc5\xec\x33\xf2\x63\xfa\x29\x6b\x37\xa4\xff\xc3\x77\xc1\xfa\xcc\x6b\x5d\xfb\x5b\x54\xae\x8d\xa1\x34\x06\x5d\xec\x2b\x01\x5c\xcd\xbe\xe3\x4f\x2f\x58\xae\x63\x9e\xa4\x5f\xba\x99\xa7\xd6\x6c\x91\x84\x64\xa6\x58\x77\xc3\x2d\x5b\x09\xbb\xc0\xff\x50\x7e\x0b\x37\xdc\xf8\x13\xf3\x71\x23\xdb\xf5\x67\xb2\x6a\x0c\x4f\x7c\x5e\x43\xd4\xd4\x50\x6c\xd2\x73\x34\x98\x7f\x77\xd2\x99\x94\xb3\xe7\x87\x9f\xed\x87\x64\x44\x95\xb3\xe7\x9b\xd3\x99\x1e\xe8\x1b\xe1\xdf\x38\x54\x1b\xd9\xf7\x63\xb9\x9a\x1c\x32\x53\xeb\xa6\xc7\xb0\xa8\xda\xbc\x1a\xe0\x69\xcc\x9c\x44\x64\x57\x77\x27\xe5\xdf\x9e\x71\x95\xfa\xd2\x5c\xa3\x58\x03\xa5\x1b\xce\xb7\xc7\x9e\x4e\x18\xd3\x29\xcd\xf9\x54\x97\x3d\x06\xf8\xc1\x70\xf4\x5f\x46\x3b\x59\x98\xaf\x2e\x5f\xef\x20\xce\x5c\xa3\x9d\x60\xe7\x7e\x91\x0b\xb1\x5e\xe8\x75\x6e\xa8\xb1\x57\x1c\xfd\x63\x9b\x29\x3f\x40\xfc\x0f\x61\x30\xa6\x9d\xd9\x1d\x26\x1c\x38\xf7\x31\xa0\x9d\x3b\x02\xbc\x61\xff\x62\x65\x8a\x08\xfa\x10\x72\x79\x2e\x23\xce\x24\x4b\xc5\x29\xa7\x61\x41\x22\x72\xa3\xa4\x2e\x0d\xfb\xbf\xeb\x51\xb4\x36\xd4\x16\x51\x0e\x19\x70\x6e\x07\x37\xed\x95\x7c\xf0\x1a\xc1\x6c\x40\x7e\xf8\xa7\x2f\x71\x29\xf2\x58\xc6\x11\xe7\x71\x13\xf6\x59\x35\x58\xbe\x58\x03\x86\x6f\x0b\xb5\x23\x97\x9b\xa9\x7f\xe6\xe5\x28\xaa\x71\x20\x31\xc9\xc3\x4c\x0c\xc7\x91\xef\xd4\x06\x7c\x1b\xcd\x39\x6a\x98\x48\x87\x94\x68\x78\x37\xbc\x69\xed\x4f\x1c\xab\xdc\xd7\x7b\xb3\xd7\xca\xe2\xdd\x66\xb0\xc1\x72\x37\x15\x23\x69\xbd\x2b\xff\xc5\x72\x1e\xe6\x8b\x5b\xcc\xc2\xcd\xe4\xa8\x19\xa2\x1b\x0e\x9e\xa7\x5a\x33\x23\x32\x67\xf4\xd3\xcf\x7e\x97\x39\xd7\x53\x7d\x64\xef\x1a\x85\x9a\xa4\x91\x21\x22\xb9\x66\xdf\xee\xa7\x16\x86\xbb\x8a\x09\x87\xe5\xd9\xe5\x15\xa3\x85\x66\xbb\x10\x1d\x4d\xfd\x9a\x41\x53\x29\xff\x2e\x6e\xb2\x17\x44\xda\x3b\x1f\xb8\xad\x60\x34\x9a\xd9\x22\x90\x22\x14\x6c\xaa\xdb\x69\xc2\xb3\x82\xec\x31\xcf\xcf\x1d\x59\x43\x11\x53\xcc\x08\x18\x27\x6c\x95\x9b\x9e\xac\x81\xd3\xd2\xa1\x1f\x15\x3e\xff\xe1\x3a\xba\x76\x48\xe2\x7c\xdc\x61\x94\x79\x8f\x08\xb8\x17\x19\xb5\xb5\x5e\x3a\x51\x07\x66\x8c\xe4\xfd\x8f\x37\x70\xfe\xf2\x65\x3f\x98\xd3\xc2\x8e\x8d\x41\x12\x32\xee\x09\x6f\x72\xf6\x8c\x71\x5c\xaa\xd9\x0e\xf2\xfe\xf0\x15\xb0\xce\x2a\x39\xc3\x50\x2e\x17\xa9\x4c\x10\xf2\x06\x8e\xd7\x3e\xba\xb6\x6b\xcc\x02\x3b\x36\x99\xd8\x17\xc9\xfe\xd4\xbe\xb3\x99\xb2\x6d\xca\x2c\xfe\x92\x8f\xb7\x59\x72\x4e\x94\x95\xc0\xfb\xfc\xe7\xd3\x40\xcd\x4f\x5f\x71\x83\xe1\xd8\xc4\x68\xbb\x99\xe3\x30\x69\x74\x94\x1e\xd3\xcc\x07\x76\x38\x7d\x3d\x21\x44\x11\xff\x71\x21\x33\x7b\xc0\x35\xc5\xd2\x34\x33\x67\xc0\x52\x7d\xc7\x6d\x61\x1a\x1c\x1b\xee\xbb\xd3\x5d\x91\xc0\xd6\x3f\x31\x48\x93\x34\x54\xc0\xb2\xd1\x0a\x47\x03\x9c\x87\x98\xa9\x6e\xdc\x0c\x73\x3a\xeb\x97\xbe\x52\x2b\x99\xcb\xfc\x89\x2b\x98\xdd\x57\xbf\x2a\xa3\x94\xbc\xdc\xa0\x03\xdb\x79\x54\x84\x0a\x8f\x78\xfd\xeb\x59\xac\x0b\x09\x2c\x19\xef\xfd\xf0\x49\xd8\x24\x38\xf0\xf4\xac\x2b\xf6\x51\x94\x5a\x1d\x0b\x5e\x2e\x63\xaf\x18\x87\xfd\xeb\x77\x7f\xb0\xc5\x1d\x04\xdd\x8c\x8e\x18\xa0\x5a\x7c\xf8\x55\x02\x87\x6f\x0b\xc5\xe9\x1c\xf1\x1f\xef\x5a\xeb\xf1\x5f\x47\x13\x9c\xab\x99\x29\xb3\x7c\x08\x87\x91\x7a\x08\x18\x30\x5b\xc0\x4a\x85\xfb\xb7\x7f\xce\xec\x7c\x70\xcb\x98\x79\x41\x4d\x8a\xac\x92\xe0\x4f\x31\xf7\x42\xd3\x7b\xad\xd0\xa8\x7f\x71\x5d\x83\xe5\x5c\xe3\x23\x07\x4e\xf7\xe8\xdc\xa9\x39\x2d\x2a\xdc\x1a\xa4\x8b\xcc\x8a\x86\x9c\xb0\xff\xc8\x8b\xa0\x38\xe5\xca\x34\x9e\xc4\x8b\x53\x0b\x67\x28\xc1\x5c\xa7\x16\x68\xc9\xbf\x1e\xda\x51\xd3\x80\xfa\xfc\xf0\xca\x0d\xc5\x12\x39\x61\x01\xb7\x95\x81\xe2\xbd\x3f\xba\xe9\x5d\xb4\x25\xfc\x15\x13\x37\xa4\x32\xe1\x30\x15\xdb\x8c\xba\x7f\x4c\x56\x63\xd8\x73\x0d\xd4\xe4\x27\x16\x50\x6f\xd1\xf3\x17\x47\xe1\xf3\x2e\x24\xe5\x34\x8f\x33\x8d\x11\x13\xf3\x7f\x31\xdd\x77\xd5\x6e\x49\x08\x19\xb0\x73\xd5\xe4\x0b\x60\x98\xd4\x31\x0d\xa6\x29\x77\x65\x04\x4c\xa4\xfc\xf8\x7e\x38\x77\x45\xaa\x4b\xcb\x4c\xd6\xad\x5a\xdf\x7b\xa5\xcb\xa4\x60\x18\x3a\x82\x39\xec\x66\x8f\x0b\x7c\xf7\x8f\x07\xb3\x68\x33\x93\x0a\x06\x5c\x64\xf8\x74\x05\x1b\xc8\xcf\xcd\xac\x4b\x68\xa7\x53\x22\x5e\x22\x76\x47\x68\x8b\x3f\xb9\x1d\xde\xea\xf8\xc1\x11\x6c\x0f\x31\x9b\x28\xf7\x33\x43\xd1\x0c\x9e\x73\x22\xe3\xe4\x94\xbf\x38\x28\xe7\x18\x88\x62\x89\x8c\x4f\xde\x10\xf4\xf8\xe5\x2b\x39\x8c\xec\x8c\xcc\xd1\xfd\xc4\x51\x96\x67\x98\x9b\x32\x73\x8a\x1a\x17\xd0\xe5\x23\xe0\xed\x72\xfc\xd3\x09\xc8\xff\x7b\xfe\xca\x87\x19\x01\x17\x86\x30\x0c\xc7\x68\x4d\x6c\xec\x54\x10\x96\xe9\xa0\x02\xb7\x0c\xe7\x87\xbf\x80\x63\xd0\x5f\xbe\xc5\x10\x2c\x2e\x0b\x78\x2a\xde\x2d\x63\x92\x9b\x4b\xdf\xb9\xbf\x3f\xff\x7e\x7e\xf0\xdf\x77\x53\xf6\x9a\x39\x7d\x41\x46\x25\xe8\x3d\x3f\xc9\xcd\xd7\x72\x40\x7a\x63\xc1\x49\xfa\x37\xb9\xca\xe4\x46\x83\x05\xac\x08\x91\x3e\x1f\xa6\xb8\x65\x36\x64\xc0\x2e\xff\x9e\xdf\x19\xa7\x91\x07\x2d\x66\x63\x30\x8f\xa9\x4d\x8a\xee\x11\x17\x0a\x98\x78\xd0\x54\xd5\x20\xc2\x84\xfd\x27\xdb\xfa\xa0\xf6\x0a\xf5\xc9\xdf\xa7\x80\xe0\x37\x21\x15\x90\x22\xf2\xb5\xf2\xb9\x6c\x61\x20\x97\xff\xa7\x2b\x21\x96\x05\xf5\x49\x91\x28\x58\xcc\xbf\x65\xba\x05\x9e\xd2\xc9\x5f\xdc\xb3\x59\xaa\x91\xdb\x42\xea\xf9\xbf\x73\x3b\x05\xc7\x7e\xcd\x61\xd9\x46\x05\xb1\x35\xc5\x07\x2a\x1b\xd1\x1a\x57\xf8\xff\x63\x69\xf1\x0b\x92\x77\xc9\x2f\x25\x6d\x8b\xd3\x0e\x0f\x25\x0c\x1d\xe4\xcb\x44\x85\x31\x32\x16\x99\xf7\x8d\xf2\x73\x2f\x89\x3d\x85\xd3\xff\x3c\x0a\xd5\x7b\xf5\x04\xed\x14\x3e\xee\x50\x68\x23\x85\xe6\x76\x9c\xab\x2e\x3e\x3a\x16\x24\x3f\xc8\xcf\x55\x6b\xc2\xf4\x73\x1e\x51\x9d\x05\x65\x79\xf2\x4c\x0e\xa9\xd3\x23\xcb\x96\x26\xe2\xe7\xe0\x1a\x23\xe2\xbd\x7d\xe8\xb6\xac\x16\xb2\x20\x0c\x4d\xfb\xca\x4a\x5e\x5c\x9e\x8e\xcb\xf8\x15\x22\xa4\xbe\x28\x08\x9d\x92\xfe\xc8\xcb\x08\x7b\x5a\xc3\x98\x78\x51\x79\xb4\x40\x4f\x5f\xfa\x82\x4c\xfe\x2e\x07\x4a\x13\x9d\xc8\x00\xd7\x6e\xd5\x92\xac\x78\xdb\x5d\x25\x43\xb8\x7c\xe5\x9a\x4f\xcd\x30\x0a\x53\xfc\xbc\xc4\xe7\x2a\xb5\x9f\xfe\xc5\x1c\x83\xb2\xec\xab\x04\x20\x46\xf6\x61\x18\x1e\xd1\x94\x21\x66\x87\x5d\x9e\x69\xab\xaa\xf5\x49\x10\x2b\x1c\x1c\xc9\xff\xc4\x03\xd4\x44\xc6\x09\x18\x1e\x8b\x27\x46\xe7\xf0\xff\xb8\x2c\x51\xcd\xf5\xb2\x64\x65\x6a\xfe\xb3\xb3\x49\x9b\xae\xc4\xbc\xa6\x48\x3a\x35\x94\xe4\x24\x85\x6d\xa5\x9b\xc1\x33\x98\x15\x36\xc9\x2c\x9a\x58\x71\x63\x78\x0b\x66\x80\x3a\x3b\x09\x46\x0d\x18\x05\x01\x7e\x27\xfc\xe4\xb0\x8d\xd6\xb4\x7a\x08\xf8\xcc\x91\x9f\x66\x5f\x73\x39\x63\xd0\x1a\xb2\xed\xc8\xd6\xa7\x98\xda\x5e\x95\xc8\xa0\x78\x28\xb9\xc5\x92\x5d\xec\x8f\xdf\xa5\x78\x9f\x7a\xd8\x49\x96\x96\xa7\xad\xa3\xb9\xe6\x7f\xce\xe8\xc2\x6a\x0b\x1a\x96\xcb\x85\x27\x86\x7e\xd2\x09\x3b\x6c\x6e\x12\xd6\xc3\x19\xb1\x4d\x21\x1a\xea\x66\x27\xd7\x8d\xd5\xd3\xd5\xc9\xca\x1b\x87\xb0\x97\x14\x05\x1c\xb5\xf1\xc1\xb6\x07\x74\x7a\x37\x3f\x1a\x6c\xf0\xf5\xe3\x2b\xee\x7e\x7c\x59\xcb\x69\xde\x0a\x03\x71\xa3\x4c\x4f\x1e\x55\xfa\xce\x0f\x43\xb1\x9a\x82\x75\xc4\xe3\x52\xd3\xd5\xbb\xd1\xfc\xe0\xbf\x7d\xfc\x50\xe5\xb1\x7d\x04\x52\x66\x08\xfb\x86\xad\x19\x26\xdd\x6c\xf3\xb3\xfc\x13\x1b\x09\x87\xe7\x1d\x06\x15\x0e\xff\xef\x9c\xc9\xa3\xf3\x2e\xec\x57\xef\x4a\x36\x7b\x5c\xac\x8e\x09\x54\xc0\x48\x55\x54\x7e\x85\xd6\xb9\x2a\x48\x93\xd0\xf4\xed\x5c\xca\x3a\x9b\xea\x7e\x80\xc0\x44\x91\x16\x11\xc1\xdc\x20\x92\x13\x99\x2e\x71\xb6\xe6\xfb\x52\x32\x33\xd0\x8f\x3f\xcd\x3e\xbd\x41\x38\xf9\xc8\xb7\xaa\xd0\xaf\x15\x03\x6e\xe3\x39\x67\x1d\x15\x3c\xa6\x1f\xd0\xd6\xbd\xef\x80\xc2\xbd\x48\xf5\x32\x98\xc8\x20\x23\x18\x12\x36\x13\x14\xa7\x3b\xc9\xb6\x52\x44\xb8\xd1\xc8\x25\xdc\xb4\xff\x64\x9e\xb1\x8c\xf3\x05\x9c\x9b\xdc\xcd\xe1\xcb\x4a\x70\x43\x4e\x0e\x61\x23\x08\x72\x47\x42\x2a\xb9\x80\x7d\x57\x92\x58\xd7\x12\x7b\x69\xea\x60\x78\x8b\x1a\xc4\x25\x67\x22\x52\x52\x50\xf8\xe8\x16\xf7\xca\x8b\x8e\x37\xb4\xff\x74\x06\x4f\xcf\xd4\x47\xc6\x48\xd1\x0b\xcb\x2d\x5b\xe7\xc1\x4b\xb7\xdc\x84\x51\x06\x87\x09\xeb\x93\xc0\xb2\xd2\x4a\xfa\x1d\xea\x2d\x8d\x9c\xeb\xb3\x34\x87\x38\x9f\x9d\x66\xcd\xdd\x97\xbb\x13\x27\x61\xff\xdf\xbe\x38\xe4\xca\xda\x7c\x53\xbe\x7b\x88\x45\x01\xea\x83\x32\x51\xdf\x91\xaa\x2e\xa1\xc2\x37\x58\x88\x93\x63\x70\x49\xea\x5a\x8c\x9b\x9a\xe8\xba\xb2\x87\x70\x18\x1a\x67\x5e\xfb\x9c\x9a\x3f\x86\xa0\x35\xad\xca\xfd\xd3\x07\xf2\xc2\x7e\xb5\xbb\x32\xc5\xb5\x0c\xa3\x10\xfe\x65\xe1\xb9\xd7\xcd\x64\xb7\x37\x4d\x70\x8a\xbb\xc4\xeb\x2a\xc9\xed\xf3\x1e\x0c\xe1\x44\x4a\xc5\x1e\x41\xd2\x86\xef\xde\x70\x7c\x08\xbd\xc4\x05\x06\x87\xe4\x80\x5e\x81\x6f\xc0\x04\xca\xcf\x92\x71\x81\xb7\xd3\xae\xe7\xc4\x3d\x1d\xf7\x07\xae\x32\xee\xab\x17\x63\x38\x24\x16\x69\x43\xb4\xfd\xfb\x02\x05\x87\x12\x8b\xcf\xe5\xbe\xfd\x72\xc9\x1b\x25\xbf\xd6\xbc\xae\xb3\xa4\x71\x5f\xcb\xb4\x42\xee\xae\x6c\x7b\x2e\x55\xcd\x6e\xa0\x40\x58\x7e\xee\x1d\x73\x4e\x83\x3c\xd1\x16\xd3\xc9\x6c\xc4\xe7\x03\x0b\x29\xa7\x75\x9a\xf1\xad\x27\x2c\x95\x44\x1b\x2c\x9f\x42\x6a\xe6\x21\xd8\x4c\x30\x1f\x89\x73\xdd\x1a\xfe\xf6\xe8\x4b\x6d\x3f\x69\xdb\xbd\xcb\x3a\x69\x1b\x55\x18\xb8\x4e\x28\xbd\x42\xf3\xfa\x0f\x2f\x23\x53\xa4\x72\xd4\xf1\xde\xeb\x24\xdf\x7a\xe8\x39\x6f\x39\xb7\x65\x25\x77\x80\xd8\x1a\xca\xe0\x39\xb2\x49\x12\xee\x97\x9e\x70\x3d\xd9\x3f\x5f\x34\x23\xd1\xaa\x86\xa5\xcf\x34\x5e\x4e\x64\x55\xe5\xab\x18\xe1\x37\x71\x66\x9c\x12\x5b\xf9\x29\x9c\xee\x27\xe7\xd5\x33\x56\xc7\x7b\x4f\xc2\x14\x34\x4c\xdb\x03\x55\x2e\xc7\x41\xee\xc0\x52\xca\xcb\x95\xf0\xdc\x94\xe3\x94\xc9\xae\xd5\xb8\xec\x20\x75\xe4\xd9\x88\xcc\xdc\x23\x4a\xba\x11\x5c\x99\x3d\x06\x25\x94\x9c\x92\xce\xd9\xa8\xac\xb8\x9f\x1f\x5b\xcf\xef\x1f\x79\xff\x65\x28\xda\xc9\x1f\x34\x7b\xdc\x14\x06\x93\xe6\xd3\xfc\x28\xaf\x45\xdd\xdf\x0f\x55\xf8\x69\x0c\xdc\xc8\x79\xa1\xb9\x1a\x22\x98\x91\x9f\x86\x94\x5b\x3a\x58\x99\x8d\x10\x48\xcb\x4f\x4c\x66\x79\xdc\x14\xd3\x89\xa0\xd6\x7a\x56\x7a\x6c\xcc\x11\x31\x41\xdd\xf5\x82\x94\x63\x3a\xe6\x15\x98\x8f\xf1\x18\xdc\x63\x22\xf3\x97\x2f\x2b\x93\x17\xaa\x66\x99\x47\xec\xed\x4d\xc1\x28\xfd\x1a\x93\x6d\xcb\xf7\x70\xba\x79\x57\xd6\xeb\x23\xfc\xc4\xae\x46\xc3\x5e\x4e\x12\xdd\x7b\x53\xde\x64\x00\xc3\x51\x01\xce\xb2\xda\x97\xbc\x13\x55\x78\x3d\xc5\x0d\x66\x80\x67\x8e\x0f\x6d\xa6\xdf\x55\xfd\x22\xd1\x93\xf5\x75\x80\xa3\x78\xfb\x51\x66\x20\xd5\x2b\xb7\x11\x02\xf1\x7f\x9f\x53\x28\x18\x43\x4f\x2a\xc7\x97\x23\x08\x19\xc1\x6a\x4f\x61\x7f\x3d\x3f\xd8\x65\xa9\xad\xb5\xd0\xe0\x42\x40\xa5\x7c\x13\xba\x4b\xa8\x23\x18\xad\x68\x44\xac\x8b\xd6\x00\xc7\xfb\xfd\x08\x76\xe7\xdf\xe3\x82\x1d\x94\xac\x5d\xad\xc6\xc4\x3f\x3a\x3d\x66\xd9\x43\x91\x13\x02\x47\xa8\x5f\x1f\x19\x2f\x70\xd9\x50\x2e\x91\x13\x42\xa2\xd4\xb3\xec\x48\x34\xcb\x34\x8f\x01\xf5\xeb\x2a\x0b\x6d\xf0\x0d\x64\x13\x80\x3b\x93\xaf\x81\x49\x0b\x91\x4d\x2f\x48\xc4\x4f\xcf\x35\x9a\x01\xf3\x2f\x1f\x8e\xf4\x0c\xf0\xa1\xe3\x0d\x4d\xfc\xef\xec\x37\x0d\xb4\x37\xa6\xbd\x18\xdf\x4e\xe9\x56\x45\xd0\xba\xcd\x02\x97\x32\xd1\xf9\xe4\xb1\xa6\xdc\x1a\xa8\xf8\x43\xfd\x3a\x63\xb1\x05\x1a\x21\xeb\x0e\xdc\x9a\x66\xd4\x7f\xb8\xd9\xb3\xf5\xa6\xd8\x4a\xc9\x4f\x5f\x20\x86\x4d\x62\x0b\xef\x68\x4b\x9b\x37\xfe\x46\x3d\x30\x36\x5e\xe3\x85\x5e\xe3\x49\xbf\x14\x46\xa3\xa0\x56\xf0\xe5\x16\x86\x39\x00\x77\xe9\x7e\xc7\x28\x9b\xc5\xe9\x9b\x33\xc6\xf3\x53\xc3\xd8\x2b\xd6\x3b\x75\x8c\xf2\xda\x56\xd1\xa7\xaf\x48\x18\x61\x7b\x84\xeb\xa6\xed\xf4\xf1\xf8\x10\x61\xb5\x39\x4d\xc7\xc6\x97\xbd\x32\x39\x58\xa7\x3a\xfb\xb2\xc7\x4e\x4f\x54\x3d\x50\x6e\x86\x44\xa7\x91\x1f\xbf\x6c\x9a\x18\xb3\xcc\xbe\x71\x8a\xc3\xfd\x8f\x6e\x17\x1a\x01\x2f\x4e\x17\xb2\x94\x05\x46\x06\x37\x00\xf8\xd0\xb4\xc0\xa3\x2e\x45\xa8\x96\xd9\x5b\x5b\x68\xe3\x33\x13\xe6\x64\xd3\xf4\xe3\xf7\x30\x5c\x96\xb2\x9e\x66\xca\x6d\xe1\xd0\xa9\x37\x1f\xa5\x72\xa5\xbb\xd8\x71\xe6\xdf\xb9\x64\x8b\x7a\x46\x02\x40\x08\xbc\x33\x16\xc2\xc3\xf9\xca\x08\x60\x7f\x4a\x7e\x2b\x61\xb4\x5c\xab\x69\x1b\x51\x59\x20\xa6\x88\x40\x05\x06\xe6\xb3\x71\x1b\x2d\x24\x9d\x82\xa2\xd8\x1f\x38\x23\xd1\x22\x49\x3f\xed\x15\x0b\xb5\x7a\x73\x72\x6a\x05\x8d\xf2\x9f\xef\x6e\x3f\x77\xec\x7a\xb8\x36\x62\x00\xf9\x95\xb0\xc9\x21\x65\xab\x06\x12\x6f\xad\x6b\x70\x09\x60\xd3\x8f\xb8\xd0\x78\x3f\xd3\x09\x24\x19\xb6\xd3\x69\x5b\x1c\x8f\x2a\xbf\x4a\xaf\x64\x5c\x2e\x5e\xee\x9f\x71\x8a\x9d\x95\x31\x5c\xf0\xdd\x34\xd3\xbf\x7b\x84\x2d\xdf\xbc\x74\x10\xaf\x86\x9f\x6f\x10\x8e\xdd\xfb\x51\x07\x4a\xe6\xfb\xaf\xb2\x5e\xad\xc0\xbc\x7f\xe2\x29\xa6\x61\x81\xab\x76\x21\xbc\x60\x26\x67\x2f\xca\xcf\x11\xaf\xe0\x74\x8d\x25\x7c\x38\x5c\x79\xa3\x46\x5d\xc0\xb1\x7b\x03\x03\x64\x49\x6e\x8a\x7f\x5e\x19\x14\xc7\x63\x51\x05\x87\x54\x9f\xfe\x2f\x36\x05\x78\x86\xee\x40\x64\x91\x9c\xd7\x77\x5b\xc7\xf3\xbb\x73\xc7\x14\xb0\x62\x9b\xcd\xeb\xbc\x84\xa7\x3c\x9a\x91\xeb\x76\xd3\x03\x4c\xbd\x6d\xd4\x54\x9f\xd0\xfc\x29\x5b\x9e\xf0\x07\x2b\xc4\x7b\x6a\x44\x76\x8e\xf2\x52\xa5\xf7\x6c\xca\x0c\xc3\x60\xd3\xbe\xef\x64\x13\xe1\x70\x66\xeb\x1a\xec\xfc\x3a\x46\xb1\xd1\x7c\xde\x40\xd4\xd2\xd4\xd5\x3f\x2a\xb7\xcb\x97\x8d\xfc\x89\x73\xf9\xbe\x41\x76\x9e\x42\x2a\x18\x29\x02\x22\x59\xf7\x4d\xa3\x2e\x62\x5c\xd7\xc5\x43\x18\xe4\x58\xda\xe6\xd5\x8b\xce\xbd\xae\x8d\x93\xc3\x4c\x69\x71\xc3\xfb\xef\x4c\xf0\xfa\x86\xcb\x3d\x86\x37\xe8\x01\x7d\xd4\xd5\x7b\xf5\x21\x8d\xd8\x02\xc4\x5d\x88\x74\xb4\x2f\x0c\x26\xcc\x46\xf2\xa3\x18\xb5\xf7\x7a\xcd\xdd\x01\x9f\x38\x72\xc9\xda\xf5\x31\xaa\x61\xf1\xfe\x72\xd2\x83\xdc\xd8\x4b\xbf\xc6\xa0\xf7\x03\x96\x3f\x15\xba\x59\xe4\xb9\x2d\x1a\x25\xd4\xd6\x34\x82\x06\x9c\x69\xe6\xe8\x26\xdb\xc8\x15\x40\xe3\x38\xff\xe2\x68\x59\xc7\xcd\x50\x1f\x45\x68\x32\x3a\x42\x99\xcb\xed\x30\x57\x4e\x54\xfb\xc1\xf5\xe3\x38\x1e\xb7\x8b\x90\x2f\xe0\x7b\x73\x22\x2e\x05\xd8\x9d\x1e\xec\xde\xfe\xe6\xd1\x17\x7c\x4a\x7e\xb3\xe0\x4f\xa0\x7b\xd5\xc4\xda\x05\x8c\x6f\xb4\x50\xad\xbc\xa6\xbf\xd1\x29\xbe\xb0\x83\xea\x07\x08\xa2\x79\xf3\x0c\xa6\x73\x5d\x6e\x13\x3b\x5c\xfc\x7d\x4d\x3d\xcb\xfc\xc4\x72\xec\xe3\xe4\x23\x57\xb5\x26\xec\xc7\xe2\xe1\xe7\xb9\x98\xfc\x97\x5f\x12\x2e\xef\x15\x41\x3b\xca\xd0\x5d\xb2\xc6\xdb\xd9\xc6\xe6\x3d\x77\x81\xe5\xff\xc9\x0d\xb5\x7c\x0f\x48\x91\xde\xfd\x58\x8f\x7c\x29\xfc\x8f\x2c\xe3\xbf\x9c\x13\x17\x47\xfd\x38\x82\xbb\x6a\x5e\xc0\xa5\x5d\xf6\x44\xc7\x37\x8f\xef\xb1\x9b\xff\x1e\x9a\xf3\x07\xff\xe4\xf1\x9e\x80\xa3\x95\x8e\xb2\x37\x4a\x16\xd6\x23\x19\x3c\xf6\x34\xd0\x7c\x75\xd5\x81\xcc\xe6\x03\x44\x10\xea\x7e\xca\xed\x28\x62\x76\xba\xb6\xa3\x01\xf9\x46\xba\xeb\xfb\x8f\xde\x60\x8f\x6c\xb3\xd8\xb2\xf2\x6f\x8e\x7c\x12\x24\xf6\x9b\x13\x5c\x9a\xc1\x8b\x15\x7c\x4a\x5f\x3c\xb1\x47\x86\xf8\x1f\x9f\xec\xba\x52\x26\x56\x9b\xde\x8b\xfd\x5c\x13\x2c\x0e\xbd\x3d\x56\x30\x76\x59\xce\xe7\x46\xfe\xfe\x8b\x2f\x4c\x01\xf9\x70\x7a\xc3\xb4\x94\xd1\x3e\xaf\x0a\x6b\x7c\x79\x1e\xf4\x69\x48\xa0\xd9\x92\xb6\xca\xda\x0d\xd9\xfe\xeb\xe9\xac\xf2\xf6\x8c\xe9\xc9\x4d\xdf\x55\xaa\x0e\x48\x25\x5e\x01\x53\x5d\xb5\xc1\x16\xff\x13\x57\xb5\xcc\xc3\x9c\xaf\x60\xbe\x43\xae\x95\xd5\x48\x98\x98\x88\x44\x3c\xa8\xde\x9c\x52\xfe\xf8\x11\xd2\xa5\x58\xb1\xc2\x97\xf9\x5a\x8b\x85\x39\x3d\x15\x7f\x05\x8c\x5b\x66\xf6\xd6\x60\x0e\xff\xfe\xff\x58\x86\xad\x1b\xd5\x13\x8e\x84\x58\x3b\x48\x52\x44\x11\xbe\x80\x53\x87\x34\xec\x7f\xd6\x3b\x23\x3d\xd2\xcd\x2c\x8a\x7d\x23\xef\x83\xd7\x28\x57\x30\xa6\x30\xdc\x0d\x90\x73\xee\x07\x32\xb0\x7f\x5c\xe6\x6f\x42\x56\xfe\xa7\xd2\xca\xb7\x01\x92\x15\x4f\xb0\x08\xd7\x3f\x8e\x5a\xe8\xd2\xaf\x78\xee\x7f\xd6\xfa\x51\x00\x9e\xb1\x1e\x26\x6f\xb3\x75\xe5\xb7\xb3\xb4\x4e\xfe\x48\xc5\x21\x12\x95\x80\xf8\x7f\xb1\xc9\xaf\x67\xb4\x40\x01\x07\x43\x8f\x95\x3f\xf3\xee\xd8\x11\xbf\x34\xf2\x0f\x7e\xc7\x97\x94\xc3\x92\x93\xfd\x8a\xf3\xd6\x68\xfa\x1d\x69\x77\xa5\x0f\x66\xf3\xb5\xdb\x1d\x2a\xfe\x5f\x1c\x6c\x2e\x87\xd9\x77\xfe\xda\xc8\xd6\x55\xb9\xf7\x1b\x0a\xe1\x6d\xb5\x6f\xc0\x54\xd5\x05\xa1\x8d\xec\x74\x7f\x79\x14\xf9\xde\x5d\x94\xde\x2f\xad\x99\xa1\xc8\x76\xc9\x76\xa9\x9e\x8c\xa7\xd0\x0d\x01\x27\x94\xdb\x2e\xfe\x27\xee\x64\xc7\xdc\x41\xe5\x53\xab\x44\x88\xb2\x1c\x7a\xb2\x64\xb8\xfa\x76\x01\x57\x90\x6d\xa1\x72\xb6\x03\xfd\x5b\xcb\xc4\x8a\x2f\xc6\xbe\xbb\x48\xe5\x69\x2a\x0e\x88\xb5\xa0\x01\x93\x03\x74\x21\xb3\x1f\xd1\xfb\x1f\x1c\x40\x79\x12\x06\xa9\x7c\xd5\x98\xb6\x39\xef\xb5\x7a\xfe\xda\xf7\x03\xf9\xbb\xcc\x33\xec\x3f\xbc\x09\x9f\x49\x4a\x7b\xcf\x14\x39\x11\x37\x5d\x1a\x0c\x1b\xa9\xfa\x38\x86\x4d\x9d\x62\x1e\xb9\x76\x41\xfe\x3c\x4c\xb7\x0d\x7f\x71\x96\xe7\xe6\x5d\x76\xa2\x68\xb1\x60\x23\xfa\x73\x84\x33\x2d\x7f\x4a\x98\x61\x76\xd9\xd5\xe9\x24\x70\xfe\xd3\xfb\x8e\x46\x14\xfe\xa8\x58\xa0\x32\xef\x0f\x82\xa1\x86\xf7\xb1\x9a\x7b\x9f\x2c\xc1\x0c\x45\x31\x0c\x57\xc7\x10\xdc\xbf\xdc\x54\x46\x97\x3e\x54\xe4\xd3\x20\xb3\xc9\x33\x2d\xff\xfe\x6d\x6d\xb3\xd4\xc4\x47\x9b\x07\x46\x8e\xb8\x2b\x5a\xa8\x43\x14\x3c\x66\xfc\xc1\x8e\x0d\xa2\xf0\xbb\xca\xd5\xac\x1c\xab\x9c\xeb\x83\xb6\x83\x4d\x14\x4e\x61\xa2\x65\xa8\x78\xdb\x3f\xcc\xa7\x73\xd5\x79\x31\x76\x9d\xcd\xf4\x65\x9c\xbe\x25\x97\xf6\x3b\xdc\xbb\xee\xf2\x51\x53\x61\x21\xb9\xc6\xfb\xee\x34\x38\x61\x08\xc4\x75\x54\xdc\xa9\x2c\x20\xaf\xff\x83\xc5\x39\xc9\x1e\x46\x1c\x91\xd4\xc3\x7a\xc0\x03\x83\xc5\xe6\xdb\xaa\xa9\xad\x7e\xd8\xcf\x4d\x55\x8a\xfb\x35\x88\x6c\xf9\xf4\x45\x79\x60\x4b\xc1\x0f\xa9\xd1\xf1\x6b\x98\x3a\xde\x1f\x1d\x18\x35\x3a\xc3\xf7\x9d\x5b\x9e\xf2\x29\xe9\xc6\xd9\x06\xa6\xd4\x21\xc8\x85\x48\xd7\x23\x1d\xbc\x8a\xa9\x1a\xdf\x94\x75\x76\xb7\xee\x15\x75\xd8\x17\xc0\x6e\x26\xfc\xe1\xfd\xe3\xe4\x22\xff\x94\xa2\x26\x1b\xe7\x00\xda\x9c\x0f\xd3\x90\xd5\x3e\x6d\xc1\x75\xae\xc7\xa6\x43\x25\xd8\xfc\x85\xe2\x60\xe4\x79\x6f\x73\xd4\x29\x0e\x86\xd9\xe6\xb8\x8e\xce\x4e\x3d\x76\x33\xc1\xf0\xfc\x27\x07\x97\xa0\x62\x1c\xff\xd5\xa6\x03\x9c\x6a\xba\xfa\xae\xd9\xba\xcb\x6c\x53\x98\x3a\x2e\xd3\xc7\x07\x3b\xb5\x74\x11\x31\xf2\xeb\x61\x93\x52\xe6\x69\xb6\xb9\x98\x7d\xd0\xb6\x36\x76\xae\x8a\x8a\x9a\xb8\xff\xcf\xef\x81\x79\xf9\xa5\x8d\x80\x36\xef\xd7\x44\x9b\x0e\xf4\x56\xe1\xf3\x9b\xb3\xce\x1f\x91\x84\x6b\x20\xeb\x35\xf3\x30\x1c\x52\xfb\x30\xda\x4a\x45\xa7\xe2\xd9\x45\xb5\x78\xdb\xdd\x38\xa8\xb6\xe1\x3a\xec\xf8\xa3\x6b\xb9\x47\x68\x29\x84\xc1\x2b\x94\x9b\x8a\xda\x0e\x07\x38\x14\x13\x54\xbc\xf7\x5c\xfe\xb8\x43\xa2\x2c\x42\x22\xb7\xc4\xb2\xae\xf3\x7e\xe0\x58\xa5\x1c\x75\x4a\xc4\xee\x3c\xee\x19\xd9\x7e\x03\xee\x9f\xce\x67\x1b\xc5\x75\xc5\xbd\xfc\x1e\xb0\x26\xee\x6f\x4c\xcb\x3b\xf7\x9f\xbd\x7c\x8a\x71\x49\xb8\x0e\x42\x34\x5b\x4b\x79\x9d\x66\x30\x4b\x9b\x07\x37\x08\x55\x3f\xff\x30\x6d\x85\xbf\xfa\xc2\x26\xe0\x8b\x7d\x4d\xe9\x7f\x7b\x97\x34\x65\xe3\x45\x7a\x5a\xf8\x6b\x35\xae\xb5\x0d\x02\xfb\xc2\x38\x62\x14\x60\x95\xe4\xba\x9c\x76\xaf\x8c\x91\xcb\xec\x62\x2d\xa1\x02\xc3\x61\x3c\x58\x1e\xd5\xc1\xf4\xf4\xa5\xa0\xd1\xbb\xe5\x53\xbe\x1d\xc5\xe1\x7f\x38\x42\x39\xf6\x2b\x62\xb6\xeb\xb5\xb6\xe8\x46\xf8\x91\x5a\x91\xaa\x8d\xc4\xd9\xad\xf9\x5b\x27\x36\x2f\x25\xcf\x18\x04\x19\x5c\x7e\x92\x8a\x73\xbc\xc2\x6f\x0a\xb9\x07\xd2\x69\xe9\x3b\x18\x1e\xa3\x34\x03\x64\x0d\xca\x53\xbd\x4d\xab\xae\x61\xdb\xff\xc5\xba\x6b\xfd\xa2\xb4\x64\x7d\x1a\x86\xcf\x70\xd0\x4e\xf3\x3c\x0d\xb4\x4f\x80\x99\xfa\x95\xcc\xd7\x2c\x35\xb8\xe9\x07\x77\xa6\x2f\x79\x36\x9e\xcb\x3d\xb9\x80\x43\x00\xcf\x81\x79\x5d\xa5\xb3\x1e\x53\xfc\xe0\xb4\x8c\x76\xab\x39\x43\xe9\x79\xe7\x96\x02\x6c\x1e\x45\xd8\x75\x81\xd3\x25\x88\x21\xd6\x73\xda\xdb\x2a\x96\xc8\xed\x08\x47\xdc\x5d\x7d\xa8\xba\x2d\x54\x1f\x67\x99\x21\x4f\x89\x9a\x4f\x72\x3b\xad\x98\x2a\x94\xe0\xd0\x9d\xa4\x18\xdc\x66\x5f\xf9\x87\x4d\x7f\xfb\x04\x3a\x1a\x20\x15\xfa\xb5\xd0\xc0\x4b\x44\x19\xfd\x77\xce\x58\x4d\x75\x35\x57\xd7\x6c\x4a\x31\x1b\xb4\xd1\x09\x92\x8f\x41\xaa\xc1\xcb\x02\xd7\x1c\x99\xf2\x70\xaa\xde\xd9\xdf\x7b\xd6\xf3\x04\x31\xe5\x1e\x6a\xe9\x36\x30\xcf\x9d\xf0\x6c\x5e\x53\x89\x34\xd5\xfd\xce\xf5\xc3\x40\xdf\x3c\x77\x66\xc1\x2a\x67\x29\xba\x99\x21\x4d\x55\x4b\x50\x25\xcf\x6d\x2d\xc6\xe8\x8f\x4e\x6d\x91\x57\xff\x8c\xa8\xea\x50\x16\x01\x81\x28\x10\xf0\x92\x26\xfb\x27\x52\xaa\xaa\x05\x7d\xbf\x70\x48\xf8\xca\x42\xdb\x2f\x50\x35\xbd\x5d\x57\x38\x0e\xf4\x4e\x86\x97\xd8\xb4\xd2\xe8\x59\xe8\xad\x21\x8c\x1e\x7c\xde\x95\xcd\x91\xb4\xc5\x9e\x77\xcb\xbf\x24\x26\x5c\xab\xc7\xbe\xcf\xdf\x7b\xe3\x0c\xcb\x7c\xda\x40\x69\x7b\xaa\x77\x5a\xe2\xe9\xa0\xa8\x49\xda\x79\xc2\x5f\x00\x42\x32\xc2\x55\x13\xd8\xf2\xad\x9b\x71\x16\xac\xec\xbf\xd9\xde\x8a\xcc\xec\xb2\x92\x00\x49\x92\xb3\x77\xaf\xe4\x15\xcb\x0b\x5b\x2d\x6c\x2a\xf7\x18\xc6\x21\x5b\xb0\x9e\x63\xb5\xcf\x9f\x44\x1a\xc3\x29\xb2\xab\xd4\x48\x91\xf6\xa6\x91\xbf\x38\xaa\xe1\xb6\xae\x25\x26\x7b\x59\x30\x79\x08\x04\x27\x2f\xe5\xa7\x26\x6b\x9e\xe0\x14\xa9\xcb\x8e\xe1\x63\x48\x1c\xa7\xde\xf8\xd8\x69\x3a\x5c\x7d\x2e\x59\xd3\xa6\x75\x97\xaf\x72\x71\x50\xac\xf8\x33\x28\x93\xb4\xe5\xab\x45\x8b\xe2\x5e\x2b\x1e\x6b\x91\x76\x60\xc4\xf5\x46\x6b\xab\x29\x73\xd3\x89\xcc\x1c\x5a\xf4\xda\x49\xa5\x1f\x8f\xb4\x1c\xa1\x07\x89\x9b\xf4\xb8\x63\xe3\x48\xf8\x1f\x54\x77\x2f\xcb\x7b\x64\x8b\xca\xaa\x77\x71\xef\x69\xa6\x85\x24\x2a\x77\x2b\x0a\x34\xfb\x73\x8d\x8e\x79\xfa\x7f\x75\x05\xa3\xc8\x89\xa4\x33\xb1\xa9\xee\x39\x26\xce\xb4\x6c\x30\xaf\x7f\x5e\x9f\x12\x07\x2d\x1c\x2d\x90\x3b\xf7\x0a\x87\xe3\x3d\x1c\x4d\x45\x70\x7f\xee\x4d\x18\xed\x5a\x53\xb6\x95\xb4\x16\x2d\x4f\x39\xde\xf6\x21\x4a\xa6\xc8\xc6\x32\x8e\xc4\x1c\x94\x4a\xce\xf5\xe1\x54\x64\xca\x12\x9a\x9c\x3a\x8d\xbf\xcb\x71\x4e\xbc\xcd\xcd\x40\xcf\xe6\x68\xc8\xb6\xbb\x9a\xfc\x59\x4f\x73\x6b\x3e\x92\x83\x8c\xd0\x93\x84\xf9\xed\x94\x55\x03\x7c\xe9\x8c\x90\xd0\x33\xae\xc2\xf5\x10\x37\x2e\x56\x4d\xe1\x57\x84\x94\xec\xae\x8b\x59\xed\xbb\x93\xd6\x18\x3c\xe6\x66\x7f\x72\x41\x8b\xdf\xf5\x69\x43\x6e\x73\x97\x46\xe9\xf9\xef\x70\xff\x9d\xac\x53\x61\x6b\x23\x4c\x47\xeb\x04\x52\x62\x20\x69\x8f\x75\x47\x14\xc2\xf4\x35\xc1\x60\x97\xf0\xe7\xbc\x2c\xf6\xd2\x39\x04\x32\xf7\xad\xef\x18\xe5\x97\x6c\x24\xb3\xf8\x19\x42\x3d\x45\xca\x20\xe5\xed\xac\x57\x8d\xc6\xfc\x24\x50\x07\x77\x8f\x97\xab\x6f\x98\x6d\x61\xa1\xef\x25\xa9\xb5\x67\xc3\xb6\xa5\x22\x83\xfa\xdf\xba\x4a\x46\xe0\xcf\x6c\xfe\xb8\x19\x5f\x69\x0e\x15\x74\x9e\xa9\xac\x04\x7e\x94\xb3\x69\x7b\xa7\xd6\x20\xf2\x05\x46\x67\xbb\x0a\x31\x1a\xe1\xcf\xf9\x58\xb4\xd2\xd9\xbb\xb9\x9e\x98\x4e\xa7\x9d\x79\x54\x64\x6c\x8c\x5f\xb7\x24\x0b\xa8\xd3\xfc\x27\x5e\xe2\x5a\x2a\x91\x49\x80\x46\x85\xc6\x23\xc4\x61\xe0\x53\xe0\x89\xfe\x2f\x86\xc0\x70\xcc\xa9\xf1\x82\xf7\xc9\x1d\x22\xf6\x3b\x7c\x84\xd4\x66\x63\x45\x3c\x3c\x65\xb4\xb7\x3d\x02\x8c\x90\x03\x3d\x88\x1c\x99\x21\x69\x09\x35\xb5\x22\xac\x27\x72\x9e\x23\xdc\x86\x6e\x05\x26\x3c\x66\xe1\x52\x4e\x09\xbb\x24\x67\xbc\x24\x52\x39\x2a\x69\x66\x4c\x02\xcf\x16\x78\x66\x0b\x21\xc7\xd6\x9c\xbf\xbe\x09\xa3\x70\x58\x67\x2a\xb1\xe0\xf9\xd6\xa4\xf0\xe1\x77\x58\x4d\x77\x0d\x3d\xfa\x20\x21\x20\x7e\xe8\xda\x63\x64\xac\xa1\x04\x69\x39\x81\xca\xb9\xd5\xfe\x93\x6d\xa6\x2b\x7d\x61\x64\xbd\x01\xfb\x05\xf0\x25\x8b\x70\xc5\x41\x25\xe6\x42\xcc\x4f\x9a\xea\x52\xa6\xcd\x9b\xd2\xff\xb2\xf7\xd8\xe1\x64\x76\xc7\x30\x0b\xc3\xe3\xc5\xdf\xa3\xa4\x34\x3a\xd1\xb9\xfd\xd7\x70\xc3\x8e\xcc\xd3\xdc\x14\x50\x84\xde\xe9\x78\xa5\xd1\x99\x29\x5f\xdb\x02\xd6\xf2\x1a\x39\x48\x3e\xed\x78\x05\xe2\xe7\xb6\x3d\x60\x06\xc6\x2c\xb7\x53\xd8\xdc\x32\xf8\x90\xd3\xb1\xfa\x5c\xd4\xe7\xae\x43\x80\xee\x24\x23\x4b\x76\xe1\xee\xc0\xb7\x25\x55\x96\x9d\x66\x5a\x72\x3a\xdb\x60\x15\x16\x7e\x4e\xff\x25\x50\xe6\x61\x4c\x4b\xb8\xd8\xbf\x1b\x29\x0b\xb7\x64\xbb\x33\x94\x4e\xe9\xcb\xa5\x7e\xb3\x65\x7b\xc8\xb5\x90\x35\xa2\x90\x9d\xc0\x66\xfe\xa5\x18\xdb\x73\x4b\x3a\xc3\x9c\x1b\x8c\xab\x85\xfb\x68\xd9\x47\x98\x68\x31\x7a\x79\xd3\x4e\x10\xd6\x31\xd8\xd6\xc8\xe2\xa1\xe4\x93\xa7\x09\xbc\x83\xe5\x00\x45\xee\x50\xce\x63\x2d\xd3\x80\xb6\xb2\xde\x28\x9b\xd4\x12\xbe\x25\xb9\xb1\x67\x81\x58\xf8\xd1\x21\x1c\x84\xfa\x56\x8b\x78\x31\x3b\xe8\x73\xc8\xcd\x70\xb6\x09\x79\x49\x8f\x47\xaa\xba\x38\x5c\x08\x4e\xcc\x87\x27\xd9\x02\xb6\xe7\xe5\xe3\x71\x01\xdf\x17\x24\xe4\xcd\x56\x07\x93\xe1\xe0\xce\x55\xa3\x15\x89\x0a\xbe\x2d\x42\x80\xca\xaa\x1e\xd4\xb1\x68\x39\xf3\x73\x3a\xc9\x21\x59\x86\x72\x16\x68\x72\x0c\xd3\xe5\x35\xd5\xd6\x52\x77\xe4\x4c\x1f\xcc\xef\x30\x9c\xb1\xfe\x57\xae\x2a\x76\x30\x3d\x45\xed\x75\xdd\x4e\x07\x93\x3f\xf6\xe7\xcd\x17\xcc\xac\x3b\xcb\x2c\xe8\x8c\x9c\xab\xf7\xaa\x72\xf2\x2b\x0b\x28\x64\x5f\x2f\xab\x46\xea\xba\xc2\xc0\x1e\x16\x53\x3f\x8b\xb2\x02\x9b\x5c\xda\x01\x34\x61\x7d\x8e\x0c\x31\x57\xb5\x06\x53\x7b\xef\x7c\x5b\xdf\x87\xf5\x10\x4f\xe0\xf1\x95\xf8\xd7\x9c\xd1\xff\x9b\x53\x1c\x8c\xb4\x71\x5a\x85\x1a\x7a\x63\x40\x3b\x08\x2f\xa2\x1d\x1f\x47\xa2\x6a\xc9\x5a\xc6\x41\x88\xae\x69\xa5\x86\x81\x1a\xa4\x81\x7e\xc8\x74\xd5\x1f\x87\x15\x81\x10\x0d\x12\x1c\xb4\x44\xcb\xb7\x80\x3a\x1a\x98\x0d\x56\xd1\x5c\x98\xf8\xf8\xd9\x12\x6a\xae\xaf\x57\x0b\xae\x19\x05\xa1\xda\x97\xc0\xad\x3d\xe7\xa9\x57\x12\x41\x8d\x7d\x61\x7f\xb7\xfa\x36\x8e\xdd\x71\x95\xde\x81\xea\x68\x50\x1e\xf7\xe3\xfb\x3c\x29\x9b\xd3\x49\xd5\x1f\x2a\x6e\xf0\xcf\x7a\xa0\x66\xe0\xa3\x5d\xe7\x28\x4e\x47\xc4\xd2\xf5\x12\xb8\xe0\x32\x5b\x98\x6d\x70\xb4\x72\x8a\xb8\xe5\xc8\x76\x95\x72\xa4\x1e\xc3\x59\xe3\xd5\x52\x6c\x32\x43\xf8\xaa\x34\xc2\x51\x6a\xbf\xd1\xea\xa4\x29\x16\x16\xdb\x21\xce\x94\x48\xea\x36\xbb\x9a\x51\x97\xf9\x8a\x83\x35\x75\xd1\xfd\xcd\xad\x32\x8e\x7e\x43\x51\x87\x3e\x8a\x3a\xca\x69\x39\xf6\x47\x40\x41\x52\x02\xed\x0f\x98\xe1\x3b\x35\x1d\xdb\x8a\x6c\x38\xa1\x12\x6e\x90\xf4\x87\x1a\x0e\x35\x9a\xe7\xf5\x73\xa0\xe6\x1b\xe1\xcc\x25\xde\xd5\x14\x47\xe7\xbd\x70\xb3\xcb\xb2\x82\x90\xf1\x4e\x99\x7f\xdb\xa8\xcc\x27\x0e\x1e\x63\x85\xf3\x1a\x9f\xdb\x7b\x8e\xf8\x77\xa6\x19\xc6\xc5\xf8\x8d\xe0\xd9\xd7\x57\xed\xd4\x52\x85\xc9\x4b\xb7\x18\x3a\x43\xc3\x69\x25\xee\xca\x5f\x3c\x5b\x42\x75\x31\xad\xa5\xbf\x7b\xe1\x1c\xa0\x82\x80\x94\x9c\xc7\xe2\x35\xe6\x74\x88\x40\x2b\x7c\x9c\x28\x5a\x16\x18\xd7\x84\xee\x54\x0f\x26\x4b\x81\xe6\xe7\x0d\x96\x29\x91\x37\x8e\xe1\xaf\x88\x71\x3c\x06\xfc\x53\xaf\xc8\x30\x9c\x76\x8d\x2a\xa3\x3d\x5e\x63\x5c\x8f\x77\xb4\xfd\x28\xf8\x6b\xe9\x6b\x06\x5e\xb6\xc7\x59\x01\x62\x16\x81\xe5\xb8\x83\x0f\xca\xcb\xce\x2a\x12\x03\x64\x52\xd1\xab\x11\x00\x3c\x67\x08\x60\x83\xe6\xb1\x9b\xdf\x60\xe1\x54\x02\x2e\x8a\x34\xc3\xae\x97\x0a\x0c\x2e\x71\x96\x85\xa9\xdb\x1f\x03\x2f\x24\x7a\xd2\x88\xa9\x9a\x32\xf8\x26\xdc\x95\x45\x93\x56\xb5\xde\x1a\x1a\xff\x1f\x57\x6f\xb1\x2c\xbb\xb2\x73\x8d\x3e\x90\x1b\x66\x6a\x9a\x99\xd9\x3d\x97\x99\x19\x9f\xfe\xc6\x5a\x7b\x7f\xe7\x9c\xff\xc6\xec\xcd\x08\x57\x65\x65\x4a\x1a\x63\x28\x65\x89\x70\x1d\xad\xac\xb4\xee\xf6\x39\xe3\x8d\x5a\x03\x34\xaf\x89\x22\xa7\xe2\xfb\x89\x87\xfa\x30\x0a\x6d\xff\x98\x54\x1c\x91\x02\x3f\x95\x1d\x5a\x87\x60\x1d\x83\xd2\xa4\x07\x5e\x1d\x8c\xe5\x30\x17\xc1\x29\x14\x8e\xfa\xef\x79\xd5\x4c\xbd\xaf\x61\xad\x72\xf7\xc4\x19\xca\xe7\x66\x42\x07\x50\xc4\x12\xac\xbd\x98\x99\xa9\xd2\x67\x25\x1c\x4b\xaf\xbb\x7c\x52\xe2\xdf\x1f\xe7\x56\x64\x09\x1f\xe8\x49\xdf\x00\x19\xbe\x95\x49\xfd\xe1\x17\x32\x83\xcb\xbf\x61\x3d\x86\xd3\xab\xea\x94\x7d\x27\x0d\x8a\x38\x1a\x27\xa4\xdc\xd2\x0c\x22\x73\xec\xe5\x3f\xe5\x23\x0c\xc3\x8b\x40\x3c\x68\xc1\x1b\xb9\x82\xcc\xab\xbc\xd7\x64\xb6\x0b\x3a\xc8\x0d\x7c\xd7\xa6\x43\x43\xff\x4d\xfc\x0c\x1c\xa2\xa9\x4f\x45\x1d\xb0\x75\x24\x5e\x33\x99\x0f\xaa\xa0\xb8\xdc\x5c\xf0\x5b\xc1\xf5\x79\x52\x4f\x52\x94\x85\xde\x1a\x7a\xb3\xb3\x60\x4d\x30\xd5\x2b\x87\xf0\x49\x83\x96\xff\x17\x80\x18\x46\xf0\xd8\x46\x68\x46\xcf\x8c\x5b\xca\x69\x53\x5f\xfa\x39\x13\x11\x9f\x65\xa5\xfe\xa2\x2a\x9d\x4c\x76\xe9\x8d\x61\xea\x51\x40\xbc\x4f\x93\x0c\x0d\xdb\x2d\xa7\x00\x3e\x0b\xc2\x99\x52\xca\xc1\x35\xfc\xe9\x53\xfc\x7d\x08\xbe\xe9\x10\xed\x89\x58\x46\xf6\xd8\xea\xbf\x18\xa9\x30\xd8\x1f\x3e\x90\xde\xd1\x63\x21\xa5\xfe\x70\xc6\x15\x7f\x3e\x6f\x9f\xa6\xd9\x92\x13\x3d\x71\x6d\x8a\x87\x22\x4e\x12\x3a\x44\x5c\x21\x7e\x95\xe4\x39\xd1\x44\x2a\xcd\xb8\xcc\x39\x63\xaf\x3e\xcd\xa0\xbf\xbc\xc5\xfb\x5c\xaa\xe7\x53\x51\xd5\x72\xfd\x3f\xee\xcc\xb7\x86\x7a\x99\xa6\x12\x55\x63\x39\x9a\x69\x38\x76\x77\xca\x67\x55\xc4\xce\xc8\x78\xbf\x6e\x41\x75\x3d\x11\x02\xdb\xab\x41\x52\x0f\x43\x12\x34\x56\x1f\x5d\xfd\x7e\xa8\x51\x83\x3d\x57\xef\xe5\xf8\x7b\x70\xa6\xd7\x52\x69\x38\x0a\xcd\x30\x20\xe2\x4b\x1d\x46\xdf\x6a\x8b\x39\x14\xe5\xd2\xff\x63\xcb\x8c\x35\xae\xaa\x91\x36\x76\xdb\x9b\xc9\x2e\x4e\xe6\x2a\x62\xb4\xec\xf2\x15\xaf\x32\x1d\x97\x6d\xa1\x96\x64\x54\xfb\xe3\xd3\x1f\x6d\xa0\x48\x31\x83\x31\xf1\xa0\xf3\xed\x7e\xf6\x09\x2c\x45\xdd\xf2\x10\xba\xda\x31\x01\x84\xef\x18\xb8\x65\x7b\xfb\xdd\x26\x3a\x2b\x0b\xf4\x05\x37\x4e\x6c\x77\x2a\x7f\xcc\xe8\x7f\x7c\x95\x77\x71\xb3\x39\x38\x41\x27\xa4\xb5\x1e\x8b\x4a\x73\x55\x73\xf8\x7d\x03\xb1\x84\xd6\x0a\x45\xe7\x20\x78\xd9\xe0\x10\xb2\x3b\x91\x02\xdd\xf5\xf4\x0e\x3a\x1a\x6d\x85\x24\xb4\x3d\xb2\x44\x9b\xef\xa8\xf5\x83\xa7\xfe\xa2\x49\x92\x82\x4b\x81\x70\xa8\x90\x6f\xcf\x09\xff\xd7\xbe\x4c\xa1\xd6\x53\x3d\xda\xd8\x44\xc5\xbd\xc0\x2c\x37\x38\xd7\xdb\xca\xd2\xed\xfe\x10\x7e\xc9\x07\xc3\xbd\x5e\x9f\xdc\x5a\xa0\x32\x5d\x4e\x01\x62\xfe\x06\xba\x9a\x40\xc8\x8e\x7e\xbe\x54\x9e\xb0\x50\x96\x5f\xbf\x5f\x83\xb5\x40\x16\x49\xe5\xd0\x12\x3a\x2b\x80\x3f\x7f\x80\x1d\xc3\xff\xa3\x31\x58\xc6\xe4\xb2\x2e\xf0\xfe\xc4\x3f\x72\x59\x6d\xe0\x25\x54\xef\x43\xb2\x27\xdb\xc2\x6c\x08\x85\xf9\x08\xdb\x4f\x97\x66\x78\x82\xad\xde\xca\xd3\x77\x43\x9b\x90\xee\xa8\xaa\xfb\x55\x05\x36\x9f\x74\x23\x19\x69\x6f\xf4\xcb\x4d\x70\x41\x92\xe2\x57\xa8\x2c\x88\x66\x75\xcc\x7e\x2a\xb3\x02\xd5\xe4\x7f\xb9\x3e\xc7\x8c\xe7\x8d\x84\x29\x6d\x79\xac\x5a\x2c\x19\xed\xaf\x28\xfb\x92\xa6\x61\x0e\x46\x7a\x2c\xe3\x4c\x00\x0f\x50\xa1\x26\x8a\x16\x0d\x6d\x5f\x34\x58\x5d\x08\xf2\x47\x37\x35\xaa\xb9\x71\x61\xa2\xbe\x03\xc4\xe5\x51\x72\xdd\x8a\x23\x63\x39\x54\xa3\xff\xcf\x67\xab\x42\xe5\xf5\x41\xd6\x85\xeb\x10\x08\xc7\xfb\xd8\xdd\x30\x7b\x25\x92\x9c\x21\x85\x22\x53\x66\x3f\x80\xfd\xe4\xcd\x13\x93\xc8\xaa\x90\xfb\x5f\x3f\xcc\xf5\x2e\x4f\x68\x55\x2b\x18\x56\x33\x76\x2a\x6c\x87\xf3\xb7\xb0\x6c\x2d\x78\xa9\x82\xb9\xce\xb2\xb0\x77\x9e\xfd\xc7\x0d\x45\xf3\xf4\x6b\x4c\x53\x08\x27\xee\x5b\xf7\x08\x57\x0f\x6a\x76\xb8\x37\x75\x65\x47\x80\xca\x52\xb7\x92\x3c\x56\xa2\x6c\x81\x4d\x95\x12\x41\xe1\xe9\x22\xfd\x20\x8e\x79\xd8\xd1\x6c\xfa\x1a\x2f\x31\x06\xaa\xaf\xff\xe2\x5c\x2b\x8c\x4b\x68\x75\x30\xa1\x21\xb1\x7f\xb9\x84\x1c\xaa\x99\xe1\xb1\x2a\x34\xfe\x0e\x84\x16\x03\x41\x71\x6b\xc3\xec\x26\x7f\x0b\xb5\xa5\x90\x66\xe1\x27\x37\xc6\x72\x18\x60\x4e\x33\xb5\xea\x0f\x78\x99\xd4\x9c\xca\x22\xff\xc5\x6c\x86\xe9\x3d\x51\xec\xfb\x64\x6e\x37\xb5\xcd\x62\x6b\x2f\xf8\xb5\x70\x56\x01\xc3\x29\x73\x37\x1b\xa7\x68\xeb\x50\xed\x8f\x14\x48\x5f\x96\x79\x27\xf5\xcb\x6d\x4e\x36\xed\x3b\x77\x99\xff\x73\x24\x96\x89\x85\xd6\x5d\x58\xe9\x43\x72\x57\x39\xf2\x14\x8d\xba\x66\x3a\xa2\x88\xf6\xd7\xc2\x49\xe4\x01\xe8\x6c\x87\x98\x42\x6d\x71\xd8\xcf\xab\x7b\xad\x31\xd8\xcd\x0d\xdd\xd8\xe3\xff\x87\x77\x2b\xbc\xf6\x21\x89\x4b\xc8\xd1\xc8\xa6\xe1\xf8\x71\x5e\x54\x62\x84\xea\xad\xbe\x93\x7d\x01\x0f\x7a\xb8\xd2\x0f\x67\x25\xbf\x09\x19\xd2\x48\xdf\x9a\x54\x69\x29\x02\x43\x38\xd3\x7f\x75\xd6\xa3\x14\x82\x17\xae\x37\x92\x3d\xf3\xd2\xcc\x20\x40\xb1\x8d\xa9\x8e\x86\x6e\xec\xb5\x6b\x8b\x8e\xb6\x84\x84\x1c\xce\xd9\xa2\x48\xd4\x25\xde\x02\x53\x28\xee\xff\xe8\x9a\xda\x9e\xf3\x8e\x87\x24\xc7\xd2\xff\xec\x9f\xb9\x0a\x49\x85\xfd\xe1\xdb\x35\x4c\x56\x20\x4a\x64\x4b\x0b\x8f\x06\xa0\xf8\x36\x7e\x99\xde\x20\x2a\x49\xd0\x67\xf7\xc7\xe0\xc5\xfa\x41\x0d\xe9\x09\xf5\xbd\xff\x87\x03\xea\x6f\x25\x7f\x1d\x9b\x98\xed\x51\xa8\xc2\x94\xed\x79\x5a\xd9\x6c\xb6\x2d\x31\xc5\xc1\x88\xf2\x28\xc4\xbe\x71\x73\xda\xe4\x5a\xca\x12\x7b\x41\x9d\x18\x2c\xa9\xb7\xa9\xf8\x5f\x63\xfc\x07\x9f\xec\x66\x6e\x77\x35\x73\xda\xfe\x93\xea\xe7\x1d\x9d\xe7\x11\x98\x83\x87\x50\x68\x3d\xf3\xa3\x37\x56\x83\xbd\x9c\x45\x4c\x3e\x25\x4d\xd3\xf6\x11\xe6\x74\x6b\x5a\x96\x56\xfc\x7f\xdf\x51\xfa\x3b\x1d\x59\x84\xda\xc8\x79\x78\xc9\x82\xa3\x86\x91\xf3\x74\x32\x97\xb3\xd9\xd4\xf5\x89\x38\x64\x25\x5b\xac\xee\xbd\xf4\x79\xfb\xf7\xd5\xe6\x4b\x7d\xf3\x87\x91\x3d\x33\x57\xbb\xff\x10\x69\x96\x51\x4e\x7f\xc0\x57\x0e\x6b\x78\x3f\xfd\xf9\x89\xd1\x95\x8d\xd2\x9f\x37\x4d\x10\x6c\xa0\x0d\xf2\x2f\x8e\xdd\xf5\x08\xd7\xc9\x81\xbd\xe1\x95\x32\x78\xe7\x5e\xdd\x12\xb8\xec\xde\xf3\xe6\xff\x3e\x45\x65\x7c\x7d\x9f\xb2\x9d\xc6\x9d\xf2\x08\x57\x51\x95\x2c\x4e\xb2\x5d\xe2\x0c\x1f\x4b\x50\xfc\x22\xed\x03\x48\xa8\xa5\xa9\x58\x7d\x75\x4c\x01\x5b\x11\x78\xe2\xee\xfe\x7d\x77\xef\x8f\x2f\xad\x01\xe3\xb9\x96\xaa\x04\xfd\xfb\x60\x04\x32\x31\x43\xbf\xc3\x23\x1c\xad\xb0\xb7\x06\xce\x2c\xd4\xd2\x50\x7b\x9e\x32\xb7\x8d\x6c\xaf\xcb\xe2\xf8\x9b\x9d\x24\xce\x7f\x21\x71\xf7\x9b\x57\x0b\x17\x69\x92\x1c\x7e\xbe\xd2\x8e\xcb\x64\x2e\x0b\xdd\x15\x35\xbf\x9c\x49\xda\x3a\x78\xa4\x19\xd2\x1a\x87\x7d\xfb\x1b\x21\xbe\x77\x60\x73\xb6\x50\xd2\xff\xda\x07\xe7\x68\xcc\xc7\xe3\x1c\x1b\x6b\xec\xf0\x9e\xf9\xb3\x9b\xad\x60\xb5\x49\xfa\x39\xf3\x18\x8d\xb8\xc5\xa3\x6b\xe5\x61\xb1\xdf\x5a\xad\xa8\xfc\xfe\x87\x67\x38\x8c\x12\xe2\xfc\xee\xb0\xca\xa4\x38\x61\xe2\x1a\xb3\xca\x29\x7e\x9e\x76\x49\xcb\x8b\x26\x03\x4d\x25\xcd\xe4\x6e\xf8\xff\x7b\x46\x1f\xf0\x32\x7a\x79\xc5\xc0\x16\xf5\x73\x13\x91\x6b\x34\x3e\xed\xb9\xf5\x0a\xef\x55\x1b\x9b\xdb\xef\xf1\x33\xb9\x2d\x43\xe5\xc7\xde\x04\xfe\xe3\xd0\xac\xa6\x2c\xe7\x40\xe9\x9c\x68\x9b\x69\x3a\xfc\xfc\x64\xc3\x6a\x26\xad\x5f\x5f\xbd\xcc\x3b\x7f\x38\x79\x68\xeb\x35\xe7\x18\xad\xb7\x80\xff\xc9\x1d\xe8\x82\xc6\xe1\x22\xe3\xfd\xe4\x5c\xe6\x0f\x51\x71\x0c\x46\x0a\xa2\x21\x68\x7a\x93\xdd\x9d\xa6\x29\xdd\xb7\xf4\x48\x2b\xe8\x79\xe2\x26\xf2\xff\xf8\xad\xaa\xfe\xcc\x6c\x88\x96\x50\x69\xf8\xb5\xe9\xa0\x72\x0b\xb3\xce\x6f\x18\xb6\xc1\x7f\x3f\x31\x0d\xa2\x60\x6c\x98\x31\x07\xa2\x66\x61\xd8\x9e\xfd\xdf\x75\x9e\x82\x59\xd5\xcb\xdd\xe9\xc6\x94\x91\x7c\x2c\x31\x9d\xe0\xba\xa1\x84\x59\x70\x0b\x4c\xe4\xce\x59\x19\x66\xd4\x06\x86\xf5\x1c\xf3\x1f\x6a\xd3\x47\x83\xd8\x30\x89\xe6\x08\x5b\x8f\x9f\x59\xbb\x33\xcd\x60\x6c\x61\x92\x76\xc1\x4f\x50\xaa\x01\xb2\x56\x48\xed\x93\xd2\xa8\x0d\x5a\xf9\xbf\x87\x3c\xbe\x3d\xc2\xd1\x82\x23\x96\x93\x55\xde\xc8\xba\x68\x86\xb5\xe3\x60\x9a\xcd\xfe\x6d\xbc\x96\xf2\xb2\xa8\xec\x8b\xde\x2a\xb5\xc3\x1a\xb5\xfd\x5f\x8c\x50\x19\xc3\xd3\x92\x7e\x0c\xee\x71\xa7\xcf\xbd\x65\xcc\xde\x56\x95\x9e\x02\xd5\x37\x70\x6a\x72\x8c\x5a\xed\x1e\x42\xb6\xe7\xa5\xff\xd1\xd9\x6c\xff\xca\x39\x91\x5a\x6d\x3f\xbc\x77\xef\xf9\xab\x63\x24\xde\x3e\x87\xce\xac\x4b\xc9\x87\x2d\x0c\x54\x26\x18\xb5\x99\xa3\xdc\x29\xf2\xff\x38\xaa\xc3\xfb\xa1\x20\x12\xa6\x96\xea\x62\xa6\xbb\x99\xe8\x28\x0c\xe1\xd8\xde\x1d\xed\xb0\xb6\xb8\xec\xeb\xcd\x84\x87\x09\x82\xc4\xfc\xbf\xcf\xb9\xaa\x17\xf4\x6e\xbd\x30\x82\xb6\xc4\xda\x8f\xcf\x86\x8a\xcf\xb1\xae\xbf\x70\x7b\xf6\xf9\x24\x10\x1c\x63\x3a\x31\xa1\x2c\xca\x6e\x7b\xf2\xb1\xcf\x46\x0d\x10\xa0\x6c\x8b\x8b\xb4\x37\x72\x45\xb2\xeb\xf5\x3f\xfb\xcb\xaa\xc9\xc1\x6f\x81\x63\x34\x50\xd4\x66\xc5\xe9\x0f\xde\x28\xda\xa5\x8a\xd9\xf4\xe5\xf5\x45\x03\x10\xe7\xb6\x7f\xd7\x5e\x1b\xd8\xf1\xea\x69\x38\x92\x68\x42\xf0\x8e\xa6\x85\x43\x93\x74\x9c\xca\xaa\xff\xab\x07\x71\x6e\xe0\x93\x36\x66\xa9\x49\xfd\x24\x25\x73\xef\x85\x54\xf0\x58\xbb\xe0\x67\xb7\x1a\xd8\x82\x99\x17\x3a\x16\x3a\x58\xc3\x3c\xb3\x2d\x4b\xc9\xfd\x0c\xf4\xa5\xca\x25\x64\x26\xf7\x1a\xe2\xb7\x60\xad\x12\x84\x98\x64\xdc\xed\xff\x2d\x90\x73\x8c\xdf\x4a\x9c\x17\x49\x83\x75\xa9\x56\x4b\x86\xa3\x24\xf6\xda\x1f\x55\x41\x09\x6e\x5e\x9d\x59\xe5\x3f\xe6\x15\x98\xba\x16\x95\xa1\x36\x9a\x1e\x96\x66\x18\xa2\xf7\xb2\x36\xc9\x3a\x93\x16\x14\xef\x4e\x07\x3c\x8a\x6a\x5b\xbc\x56\x5b\xce\x61\x97\xe4\x4a\xb0\xc2\xc5\xea\x0e\xa8\xe2\x18\x8f\x77\xdc\xdb\x11\x56\x99\x19\x13\xd0\x9a\x54\xb6\xf9\xcf\xef\x51\x78\x03\x2a\x26\x97\xaa\x4a\x10\xf4\x50\xf7\xfb\x54\xe2\x63\xbb\xfe\x41\x4c\xa9\x8f\x5c\x35\x2f\xe5\x46\x81\xa9\x46\xd4\x54\x5f\x6b\x1c\x64\x85\x43\xef\x7c\x3d\x7f\xf7\x3e\xfd\xd8\xbc\xfc\x18\xe6\xe3\x8d\x9f\xb2\x7a\xd9\xfb\xdc\xa2\x63\xd1\xeb\xd9\xc3\x92\x32\x4f\x70\xa9\x5f\x9d\x4e\x07\xe8\xc0\x0f\x20\x0d\xa6\xdd\x69\xd7\x75\x4f\xf8\x63\x8b\x27\xf2\xe3\x82\xff\x98\x31\xf3\x86\x6e\x16\xf4\xcf\x50\xf6\xd3\x8c\x6b\xe6\x82\x9c\xb2\xd2\x23\xdc\xf1\xcb\x2e\x11\xb5\xf0\x85\x04\xba\xdd\x37\xe7\x2f\xef\xbe\x6b\x9d\x8a\x54\x97\x92\x77\x24\x92\x14\x6b\x1e\x53\x5f\xaa\x01\x31\x45\x26\x98\x9e\xb1\xa7\x86\xd9\xe4\x18\xab\x97\xfe\xd7\x6e\xb9\x3e\x68\xd7\xb0\x1b\x9c\xa0\xde\x9b\xbd\xfc\x8d\xb8\x6e\x1c\x5a\x6e\x14\xf3\xd8\x65\xbf\x4f\x41\x4c\x3f\x18\xb7\x02\x90\xd6\x82\x16\x8c\x99\xae\x96\x7d\x9c\x39\xc5\xc0\x69\xc2\x0e\x89\xfa\x96\xd2\x58\x95\x4c\x8d\xa3\x45\x81\xe3\x3f\x85\xff\xaf\x89\xbe\x7e\xfa\xba\xec\xc8\x29\x9c\xa6\x23\xd9\xe7\xae\x5c\x44\x96\x6f\xe6\xf7\xd9\x6f\x29\x97\x52\xc7\x1f\xb3\x24\x09\x6d\x79\x4b\xf4\x03\x88\x30\x4a\xef\xf4\xf1\xe7\x3e\x3f\x21\xa4\x09\x2c\x1e\x53\xd8\x2c\xf4\x96\xc8\x0e\xf0\xd3\x69\xd3\xff\xf2\xdd\x5a\x72\x3e\x29\x81\xa2\xf6\x19\x1e\x41\x09\xb3\x2d\xc0\x5f\xb8\x9c\x1a\xa5\x9f\xbc\x6c\x93\x21\x5a\x71\x73\xde\x81\xd4\xca\xd3\xec\x87\xbe\x4e\x2a\xaa\xa1\xbf\x24\x35\x09\x3e\x26\x96\x4f\x19\x67\x29\x17\x26\x22\x5a\x6a\xcc\xba\x11\x87\xc5\x73\xb5\x04\x12\xc6\xff\xe5\x14\xc1\xc8\xf7\xad\xac\x08\x4a\x8c\xed\xef\xb7\xbd\xf9\x1a\x81\x13\xba\xd2\xfe\x2a\x3a\x85\x5c\xa0\xe6\x57\xa2\xaa\x55\x2f\xa0\x8c\x83\x3d\x7d\x98\xfd\xe8\x22\x7a\x83\xdb\xc9\x72\xf3\x99\xeb\x4e\xc3\x18\x14\x65\x3c\x29\xc4\x3c\x7b\x6f\xaa\x79\xca\xee\xb0\x46\xa3\xff\x6f\x8c\xe1\xfe\xf8\x65\x17\x64\xbc\x65\xf8\x70\x8f\x13\xed\x64\x8f\x56\x21\xf6\xa5\x04\x8d\xd3\xb1\x65\x70\x11\x7b\x0a\x0a\x59\x1e\x46\x36\x44\x88\x19\x35\x27\xaf\xb4\xdf\x12\x71\x0a\x5a\xbb\x27\xbf\x79\x5c\xf6\x61\x4a\x86\x99\x5a\x09\xba\x6e\xaa\x9c\x17\x0b\xee\xfd\x71\x0a\x83\x38\xfd\x27\xdd\x7f\x04\x1b\x04\xd1\x64\x19\x0f\x70\x11\x87\x70\x88\x16\xbf\x8c\xb8\x6c\x1c\x94\xf1\x4a\x61\x59\x05\x08\xb1\xe3\xbc\x08\x29\x2f\x6d\x39\x89\x1d\x86\x91\xd8\xc4\x9d\xa1\x19\x25\x8f\xcb\x06\x14\x67\xa7\xdf\x7c\xe2\x7d\x8d\xcd\xae\x50\x9b\x61\x09\x2d\xe2\x10\x2d\xba\x40\x55\x2b\xdd\x35\x9f\xcf\x71\xf9\x35\x76\xa0\xfc\x81\x85\x20\xfd\x85\x7d\xf9\x26\x90\xb5\x63\xef\x19\xbc\xac\xc5\x5a\x02\x12\x39\x1a\xab\x28\x85\xec\x42\x58\x66\x78\xd2\xdc\x36\x10\x3e\x3b\x46\xf3\x97\xdb\xb2\x4c\xb9\xb6\xc7\x1a\x4e\x00\x54\x69\x47\x44\x56\x11\x7c\xb8\x4a\x6e\x17\xe3\xa7\xbe\xea\x3f\xc9\x87\x27\x0e\x42\x99\x9d\x3d\x53\x06\x6d\x8c\x71\x1a\x81\x6f\xe9\x71\x87\x90\xc9\xcf\x70\xfa\x08\xc7\x03\x3d\x5e\x2a\xe7\x8f\x2a\x3c\xb1\x55\xe7\xf6\x30\x03\x0a\xb2\xff\x5d\x3f\x12\x74\x5c\x76\x14\x1e\x49\x97\x25\xb3\x9b\x0c\x1a\xbb\x5e\xf8\x32\xdf\x3c\x10\x00\xdb\xc6\xb8\xce\x0f\x3e\x99\xd8\x15\x6e\x09\x35\xd5\x12\xb8\xd1\x99\xa0\xf0\xcf\xb1\xac\xd1\x71\xfe\x26\xb2\xa5\xf2\xe2\xba\x6c\x42\x2a\x68\x1a\x77\xf1\x4d\xcd\x1d\x86\xa9\xd9\x06\xec\x43\x3d\x78\x65\x35\x50\x12\x40\x0c\xd7\xc3\x7c\x70\xc5\xcd\xb7\x21\xdf\xe8\xdc\x65\x59\x41\x6b\x8b\x94\xdf\x0f\xb4\xa3\xe2\x9a\x62\x05\xe1\xe6\x25\x03\x19\x7d\x06\x15\xac\xf2\x53\xfa\xfe\xb5\x69\x90\xc5\x2e\x57\xe5\x99\x62\xed\x9b\x1b\x16\xea\x62\x7d\xad\x46\xed\xfe\x32\x26\x56\x41\x32\xa2\xb0\x10\x80\x04\xd2\x14\xce\xe8\x5c\xad\x67\x76\xfe\xb3\x57\x78\xdb\x2f\x7a\xfa\xd3\x53\xa5\xe3\x77\xae\x0e\xda\xcd\x9f\xdf\x78\x88\xc2\xf9\x14\xe7\xb6\xe5\x14\x7d\x25\xab\x92\x1c\xf1\xae\x45\xf1\xcb\x26\xd1\x92\x1c\x37\xb2\xd8\xbb\xf1\x4e\xff\x7c\x77\xd2\xd4\xa3\x4d\x03\xac\xe7\xab\x79\x37\xdd\x72\xd9\x87\x4c\xba\x9b\xec\x1f\x1e\xe0\xe8\x33\x80\xfe\x36\x18\x73\xf2\xca\x59\x07\x81\x61\x78\x25\x61\x1e\x73\x20\xac\x9d\x8c\xb9\x5b\xb7\x6c\xa3\xc0\x99\x7e\xcc\xb0\xee\xeb\x98\xb1\xff\xe4\x97\x24\x33\xa4\x18\xa9\x1c\x45\x0d\xac\x9b\x2c\x5c\x6a\xd5\x8d\xb3\xdc\x77\xcc\xc5\xba\x95\x41\xd4\x52\x70\xbd\x4c\x3a\x0e\x8b\x79\x8a\x7b\x8d\x5e\x3e\x8b\x20\xf0\xea\xe0\x4f\x7c\x21\xaa\x0b\xf1\x7c\xc2\x53\x8d\xd2\x90\xe6\x1e\x73\xa4\x94\xff\x9b\x07\x12\xb8\x76\x5d\xd7\xb5\xba\xd0\x04\x23\x44\x87\xd3\x84\xbf\xf1\x37\x1c\xf0\x48\x1b\xcd\xf4\x91\x44\xe5\x02\x84\xe5\x5d\xc6\x6a\x02\xb0\xe1\xd8\xa1\x31\x00\x68\x3d\x93\x96\xf7\xa1\xd3\x25\x66\x39\xa1\x5c\xca\xcd\xc4\xe1\x63\x87\x17\x11\xf1\xcb\x12\xc7\x0c\xda\xce\x46\xd3\x87\x0b\xf4\x78\x6b\x35\xe6\x26\xd9\x4a\x22\x28\xea\x21\xe9\x52\xd6\xfe\xcd\x2f\xb3\x2d\xfc\x95\xc9\xbb\xfe\x83\xb3\xbf\x78\xe3\x28\x8a\xe0\xc4\xdd\xda\x7a\x39\x4f\xfc\x25\x21\x6c\x5a\x4b\x71\x4a\x5d\xca\xfb\x58\x52\xee\xa9\xba\x51\x00\xbe\x8a\x57\x2a\x54\xbd\x42\xe0\x86\xfd\x8e\xef\x2d\x78\x03\xe2\x7d\x65\x27\x8b\x83\x53\xf3\x5b\xbd\xe7\x2d\xd0\x1f\x12\x46\x43\x30\x0f\x21\x08\x09\xe7\x80\x9e\x34\x3a\x31\xd8\xa4\x6d\xf5\xa1\x7b\x79\xb8\x1e\x81\xe4\x82\x13\xcc\x6b\x7f\x78\x69\x27\x24\x65\x30\xbb\xcf\xf7\xcc\x58\xf1\x87\x4b\x3d\xff\xf2\x3b\x47\xd9\xa2\x2d\x84\x49\xd2\xac\x9f\xe7\xcf\x72\x74\x5d\x68\x59\x71\x30\x06\xa7\xb1\xf1\x32\x97\xf5\x23\xdb\x72\xc2\x80\x11\x05\x92\xe4\x6d\xfb\xe6\x82\x9b\xe3\x2d\xa6\xe4\x45\x39\xb6\x12\x2d\x49\xb4\x44\x97\x03\x19\xf1\x4e\xaf\x1d\x98\xa8\x89\xb3\x31\xbc\xb0\xa1\xe2\x95\x16\x85\xd8\x35\x55\xc8\xcc\x03\xec\x38\x69\x64\x84\x33\x6d\xa4\xbb\x1e\xa3\x26\xc5\x2c\x3b\xb4\x65\xd8\xde\xfe\x37\xc7\xbd\x44\xdb\xda\x33\xfd\xdf\x33\xd9\x6f\x5f\x37\x3c\xee\x6e\x64\xdc\x0f\xb3\x7d\xbc\x5d\x35\x38\xb4\x63\x5a\x29\x70\x20\x90\x0f\x28\x7b\x0a\x9c\x8a\xaf\x44\x69\x93\x0c\x08\xd4\xd1\x9a\x40\x17\xe4\xd4\x56\x23\x35\x1d\x22\x78\xb1\xd4\xaf\xaf\xc1\x9e\xaf\x0b\xe9\xf8\xfd\x68\xf9\xee\x14\xfa\x7b\xbb\xe9\xa5\x06\xa7\xd5\x8b\xd8\xf6\xc4\x9a\xe2\x8f\x9e\xff\xe7\xbb\x79\x95\x28\xbf\xdd\x96\xbb\x5b\xb1\xfe\xe0\x22\x9b\x2e\xbf\x97\x0a\x15\x6c\xc0\x19\xe3\xfe\x83\x05\xfb\x37\xf1\xf1\xe2\x4a\x33\x36\x59\xfe\x96\x15\x67\xe8\xe2\x0a\x27\x59\xf9\x17\x40\xb4\x9e\x5d\xaa\xf0\x07\x1b\x22\xe5\x1a\xe9\xa8\x25\xa7\x02\xa2\x49\x17\x49\x47\x28\x93\x19\x8a\x24\x16\x34\xf2\x9e\x11\x0a\x0f\xd9\xeb\x26\x6b\x0d\x22\x6f\xe1\xed\xaa\x77\x04\xb1\x7d\x94\x1e\xc5\xa5\x09\x26\xe4\xd7\x92\x1c\xf1\xdf\x7c\x48\x8f\xd3\xed\x93\xbc\xe3\x5e\xd6\xfd\x9f\x58\xee\xf1\x86\x65\x48\x5d\xe0\xe5\x65\x63\x2f\xb4\x9d\x92\xbf\xcc\xa8\xa4\x81\x26\xa6\x00\x9a\xd6\x53\x5e\x38\x46\xa1\xab\xb8\xa2\x95\xb9\xe7\x0c\xb5\x37\x75\x45\xcd\x2c\x5c\x83\x48\x3f\x4d\x25\x7c\xff\x31\xb3\x54\x53\xbd\xda\x64\x57\xe9\xfa\xa6\x51\x1f\x9a\xdb\xcf\xf0\xf7\x0e\x42\x10\x96\xbc\x91\xd2\x42\x05\x48\xaf\x6b\x45\xe0\x2e\x19\x0e\xbb\x8f\x31\xa7\xdc\x7f\x48\x3c\xeb\x61\x08\xfe\x2a\x77\xcd\x30\x2a\x27\xbe\xfb\xa1\x05\x16\x51\xf3\x18\x2c\x61\x54\xa7\x7f\x85\x1b\x2f\x30\x33\x17\xa2\x90\xfe\xe1\x84\xc4\x80\x9c\x4e\xbc\x28\x66\x97\xc0\xae\x57\x20\x59\xba\x9d\xa4\x4f\x2d\xd6\x62\xab\x9e\x69\x42\x57\x7e\x89\xd4\x3d\x54\x47\xc1\x07\xb8\xfd\xc3\xc1\x5f\x26\x84\x55\x19\xe5\xcb\xe3\x77\xa3\x94\xdc\xb5\x32\xdd\x1f\x83\x21\xbd\xb3\xe9\xed\x22\x54\xf1\x4f\x91\xf7\xd6\x16\xa6\x99\x34\xea\x3f\x90\xc3\xaa\xae\x20\x06\x7f\xf4\x8c\x27\x60\x33\xac\xfc\xf9\x2f\xcb\x98\x17\xce\x2c\x59\xf7\x05\x16\x86\xb4\xfd\x3a\xbc\x53\xd5\xa7\x55\xba\x1d\x62\x7f\x40\x6e\x28\x3a\xad\x74\x5e\x07\x0e\xc3\x7d\x89\x1c\xbf\x90\xbc\x5c\xd4\xa5\xe1\xce\x48\xe7\x45\x0b\xc5\x95\x57\x85\x71\x7b\x0a\xdd\x17\x50\x05\x36\x39\x07\x62\x1a\xc2\x3e\x68\xb4\x83\xa2\x2e\x20\x7a\xaf\x94\x55\x75\x9f\xb4\x88\x49\x8b\x9e\x5b\x60\x5b\x38\xd6\x6e\xf9\xb3\xde\x3d\x32\x25\xc5\x21\x1c\x37\xa4\x40\x81\xf9\x63\x3b\xdc\x12\x78\xeb\x53\x30\x18\xc3\xe8\x6a\x10\x44\x12\x6b\x6f\x16\x11\x18\xbf\xa7\x0a\xf8\xe5\x93\x4b\x25\x9e\xe1\x2c\x1b\xda\xc8\x74\x27\x9b\x10\xba\x85\x02\xcf\x1f\x2a\x23\xd7\x52\xd9\x78\x65\x40\x87\x5e\xc8\xe8\x2b\x79\x3b\x14\x35\x0a\xa1\x93\xcb\x5f\x52\x70\xc8\x05\x5a\xa0\x39\xaa\x6a\xf5\x02\xf2\x24\x18\xc0\xa7\x09\x8d\x9e\xb9\xee\x07\x1b\x8b\x67\xdf\x7b\xf9\x12\x30\xe7\xf0\x04\x18\x85\x0b\x6d\xf7\xd6\x97\x4e\x21\x77\xbc\xde\x4c\x0d\xfd\xeb\xe7\xb2\xfe\x1a\x13\x32\x48\x7f\x7b\x15\xf3\xd2\xe4\x53\x91\x13\x32\xab\xc8\xc4\x16\x68\x3c\xe5\x61\x5e\x7a\xd2\xc9\xe8\x1b\x9c\xc8\x81\xee\xd4\x34\xd2\x84\xa7\xa4\x7f\x7d\xdd\x24\xa1\x56\x21\x9e\x28\x7b\x54\x4b\x3e\xda\x3d\x9c\x95\x3a\x3a\xae\xcc\xea\x36\x81\xa4\x63\x35\x0f\xbf\xbe\x28\x27\xd8\xda\x86\x7c\x98\x15\xac\x24\x1c\x2f\x7d\x07\x47\x63\x7d\x0b\x94\x1e\x1c\xba\x61\x4e\x5f\x4a\x5b\xee\x7a\x8e\x45\x64\x45\xdb\xff\xd1\xa4\x9e\x41\x28\x82\x67\xf6\x46\x33\xb5\x7f\x82\x00\xe2\xf5\xe8\x83\x13\x0b\xe9\xb5\xb2\xa7\x89\x8e\xa5\xa2\x16\x69\x4c\x7e\xe0\xba\x83\x51\xe5\xc7\x8f\x46\xfb\xe2\xba\x68\x26\xd2\x98\xe4\x18\xb4\x25\xd4\x26\xd8\x4b\x1b\x94\x76\xd7\x21\xc8\x3a\xb5\x6e\xdb\x3d\x98\xfa\x78\x41\x38\xc5\xf4\x48\xa9\x6b\xad\xf9\xed\xb4\x6c\x13\xfb\x32\xbd\x07\x67\x1d\x02\x4b\x45\xd3\xa9\x55\x2a\x0b\x49\x82\x75\x13\xf9\x63\x97\x0c\x16\xaa\x91\xe9\x2c\x6d\x99\x1f\xbc\xa2\xd4\x85\x35\xb5\xa6\x71\x59\x33\x90\x27\xaa\x0f\xa0\xaa\x4b\x3e\x30\xfa\x60\x67\x93\x03\x69\x3a\x8a\xf2\xdd\x29\x9d\x37\xf2\xfd\xa5\x07\xc1\xc2\x3a\xa2\xe6\xcf\xd9\xeb\xba\xff\xd8\xf2\xb4\xc1\x27\xa8\xc5\xdf\x68\x1d\xc9\x3e\x87\xcc\xcc\x2d\x1f\x6e\xaa\x70\x11\x63\xa3\xe9\xf6\xe6\xaf\x44\xf7\x59\xb4\x56\x5a\xc5\xb5\x53\xf7\xb0\xb5\xe0\x4b\x94\x65\x54\x5a\xa0\x2d\x35\x43\xe2\x74\xa2\xe0\x95\x2c\x49\xa4\xc9\xed\xe6\x9c\x96\x38\x0f\xa5\x26\xe2\x02\x6c\x3c\x9e\xf3\xa2\x69\x5b\x50\x7c\x35\x15\x9e\xcc\x1a\x74\x2d\x4f\x13\x65\x08\xf5\xe5\x33\x80\x6b\x25\x28\xbb\xa1\xec\xb4\x00\xeb\x86\x96\x97\x22\x8b\x7a\xd5\x9e\x8d\x1a\x79\x19\x90\x5f\x18\x8a\x05\x7f\x57\x0c\x2f\xee\x5e\xa2\xfe\xef\x8f\x35\x25\x9d\xe4\xe8\x29\x7d\x8e\x68\xf4\xfc\x0c\x87\xc5\x58\x11\x7e\x69\xee\x2f\x56\xea\x5a\xe4\xaf\xf9\xa6\x79\x3d\x10\x84\x51\xf1\xf3\xe8\x7c\x0d\x99\x40\xe4\xdb\x2f\xd7\x21\xc5\x4b\x15\x64\x25\x43\x5b\x85\x5e\x0b\x83\x88\x72\x3d\x05\x6c\xdb\x01\xfe\x08\x57\xd3\x5b\x07\x03\xd9\x48\x15\x8f\x35\x3a\x3e\xab\xa7\x14\xac\xa6\xd7\xce\x66\xce\xf9\xa5\x93\x81\xdf\x95\x84\xa6\xa4\x24\xcf\x08\xbd\xca\x4e\x78\x73\x68\xcc\x5c\x8a\xe2\x47\xec\x67\xe3\xcb\x95\x86\x0c\xe5\x1e\x23\xf6\x72\xfb\x39\x6c\xbf\x4a\xcf\x5c\x8a\x13\x4d\x7f\x24\xf6\x80\xc5\x11\xff\xcc\xa8\xf5\x65\x16\x2e\x9f\x94\x19\xff\xa0\xa7\xf3\xe8\x7f\xdb\x2d\x97\x39\x23\xff\xb5\xa1\xa8\x3c\xa9\x3d\xb3\xbe\xad\x58\x93\x60\x9d\x8c\xef\x74\xb1\x46\x20\xa6\xd0\xdb\xc8\xa3\xf6\xf7\x89\xf4\xcc\x54\x6e\xfd\x4b\xa2\xa3\xf6\x0d\x5e\xfd\x40\x0b\x7b\x2a\xbe\x1c\x7f\x7e\x14\x38\xd1\xbf\xa9\xfa\x60\x78\x47\x54\x74\x12\x16\xf7\x99\xd6\x1f\x94\xee\x04\x5c\x2c\xaf\x71\xc1\x08\xa9\xa3\xbd\x24\xff\x94\x1f\x65\x09\x3c\x47\x16\x2d\x80\x81\xe9\xef\x47\x9f\x74\xf6\x14\x9c\xe1\x7a\x64\x01\xc0\xef\x19\xbe\x0a\x73\xb3\xac\xa0\x80\x34\x30\x49\xf4\x9d\x30\x7f\x39\x69\xb1\xe8\x6a\x2a\xd9\x72\x67\xac\x25\x4c\x1c\xde\xea\xac\xef\x75\x71\xcf\x17\x6f\xe7\x79\x19\x52\xfd\x91\xb8\x0a\x61\x49\xd3\x49\x69\xa7\x8a\xe0\x41\x46\x70\xf9\xed\x44\x72\x60\xc1\x83\x4a\xe7\x86\x1e\xf5\x1f\x02\xec\xbc\x2b\x40\x0a\x3d\xf2\x2d\x5a\x1b\x72\xb5\x36\x89\x63\xa0\xac\xf9\x92\x5d\x2f\x46\x81\xb2\xcb\xd0\xbd\x0a\xc4\x12\xf3\xd8\x0b\x44\xd0\xa9\x2c\x4f\x04\xb6\x12\xbb\x3e\xbb\x6d\x9b\xf3\x2d\xb5\x49\xca\x82\xd5\x2c\x23\x08\xae\xe5\xef\xe4\x15\x83\x2f\xed\xae\x26\xab\xfd\x95\xa4\x67\x38\xa4\x41\xc6\xdb\xcb\x5e\x6a\x41\xd6\x39\x3f\x35\x4f\x94\x3a\xe4\xd6\x21\xec\xae\xc4\xc4\x81\xfe\xf8\xe9\xfb\x87\xdc\x2f\x1d\x26\xea\x0f\xf9\xcb\x8f\x58\x6d\xd0\xf7\x13\x2d\x9f\x62\x9d\xa3\x4c\xd8\xf1\x3f\x6b\x0b\x8c\x56\xe7\xea\x2b\x21\xa5\x26\xb2\x62\xb5\x2f\x33\x27\xd4\x03\x26\xfa\xa3\x85\x14\x45\x5e\xb3\xf1\xac\x4a\xfb\x8b\x41\x8f\x0e\x6b\x4a\x3a\x22\x1d\xcb\x1d\xc7\x52\x2c\x19\xcc\x4f\xb3\x02\xa8\xaa\xe4\xea\x5a\x13\x98\x9a\x61\xf6\x86\x55\x05\xd5\x98\xfc\x96\x57\x79\x0e\x90\x57\xda\x5e\xfe\x68\x9a\x4e\x06\x52\x07\xf1\xbe\xa8\xce\xd5\xa6\x18\x76\xef\x08\x89\x78\x5e\x5b\x9d\x4d\x3f\xdc\x04\x9b\x5c\xef\x4a\x38\xe7\x3f\x50\x66\x50\xe8\xe8\x7a\xfa\xba\xc0\x11\x6e\xe2\x35\x74\x96\x98\x49\x1a\xc4\x6f\xf4\x2e\xb8\x47\xed\xbd\xf7\xd7\x6e\x07\x5a\x5a\xe1\xf2\xf2\xe7\xc0\x6d\xce\x57\xdc\x94\xee\x36\x7a\xc8\xa6\x49\xfa\x18\x14\x17\x55\x12\x71\x06\xe5\x1d\x95\x79\x76\x77\x58\x45\xd3\xa0\xc3\xf6\xcd\x82\x26\x00\xb2\xfa\xfd\xf0\x58\xc1\xe7\x5a\x60\x18\x73\x92\x7e\x7c\x12\x8a\x4f\x38\x52\xcc\x0e\x49\x09\x62\x5d\xe2\x6d\x14\x1c\x90\xfa\xfc\xcf\x71\x1d\x73\xcf\x87\x23\xa5\xc7\x1c\x92\x2c\xb7\xc2\xa1\x81\x9f\x43\x8e\x5e\x11\x31\xa9\x68\xb2\x44\x4f\x12\x2e\xa7\xe3\x5a\xb6\x9f\xb3\xe3\x7e\xa7\xf3\xc1\xfe\x3e\x49\x24\xb6\x75\xd0\xd6\x0f\xd0\xa5\xc6\xd8\x69\xc6\x92\x32\x75\x53\x22\x32\xc2\xbc\xbe\x56\x3b\x86\x6c\x1f\x20\x69\x95\x8a\x23\xa1\xfe\xc2\x2a\xed\x18\xac\xd3\x09\x14\x23\xad\xc1\xd9\x51\x59\x5c\xc3\x12\x85\x31\xa5\x5b\x8a\x52\x18\xcb\x30\x3c\x21\xc9\x15\x10\xc9\xa3\xa4\x1f\x04\x14\xb5\x30\x72\xde\x30\x8f\xf2\xf9\x1e\x7f\x15\x4f\xb2\xc0\x39\x68\xc0\x7b\x40\xc1\x70\xc4\x10\x10\x23\xc0\xef\x8b\xed\x17\xa8\xbe\x13\x5f\x7e\xe5\x88\xae\x85\x7e\xc6\x15\xa1\x39\x79\x4a\x0c\x4d\xa6\xaf\xf7\xad\x0d\xd1\xfa\xe9\x2f\xec\x4f\x6f\x3e\x99\x5f\xfe\xf1\x5a\x6f\xc6\x57\x9e\x38\x8c\x22\x88\xd1\x06\xbf\xaa\x62\xf8\x7b\x7b\x9b\x55\x28\x86\xe1\x90\x66\x91\xd2\x38\xcc\x15\xbb\x04\x98\x40\x28\x1a\xfb\x9d\xef\xe7\x6a\x7d\xf0\x0e\x24\xf3\x2b\x6e\x4c\xfe\x02\x81\x64\x15\x13\x15\xfa\xe4\x06\x2b\xd9\x49\x22\x1d\x1b\x41\xcd\xa1\xf6\xb0\x09\xc3\x70\x8e\xa5\x0e\x86\x39\x92\x79\x19\x5f\x08\xb9\x9f\xbd\x08\x45\x5e\x2a\x11\x56\x9c\x81\x71\x82\x7e\xe4\x8c\x4c\xd9\x8f\xcf\xe0\x43\x7d\xad\x6b\x30\x9a\x01\xda\x9b\x98\xfd\xa7\x4e\x05\x88\x5f\xe0\x35\x10\x84\x9e\x32\x11\x3b\x6e\x52\x3c\x15\x2c\xa4\x4c\xb0\x44\x55\xc9\x3d\xe8\xf5\xf0\x78\x8b\x48\xa2\x56\x93\x0a\xa0\x1a\xcd\x2e\xf8\xbe\x22\xf2\x22\x25\x4d\x5b\x55\xa0\xd2\xae\xe2\x64\x5d\xf1\x29\xab\xe7\xda\x3f\x7e\x2f\x09\x96\xea\xee\x71\xa7\x7e\x28\xf8\x70\x96\xc2\xe4\x66\x5a\xc1\x0c\xf1\xb7\xef\xcb\xa3\x8f\x05\x46\x95\xd6\x3f\x79\x74\x76\x99\x7f\xed\xbc\xeb\x19\xa2\x4f\x1b\xfe\x0b\xd7\x2e\x2b\x56\x77\xc0\xcb\x25\x83\x45\x31\xf1\xb3\xd4\xb6\x90\x32\x10\x5d\x5f\x19\x0e\x6d\xc5\x45\x61\xa7\xcf\x5d\x85\xc0\x90\x36\xf1\xfd\x27\xeb\x3a\x5e\xa9\xac\x38\x1b\x7a\x3d\xb0\x15\x89\x2d\xe7\x2e\x39\xaf\x34\x8c\x91\x9b\x7e\x3e\x15\x1c\x58\x3c\x60\xa7\x86\x98\xf6\x84\x90\xe6\xc8\x16\x30\x3a\x0d\x5d\xc8\xcd\x03\xdb\x94\xaf\x6c\xad\x1b\xc3\x79\xc2\x07\x14\x5b\x36\x2e\x9c\x6c\x69\x0f\x7e\x5a\x76\xf0\x83\x8a\x7f\x52\x62\xda\xe8\xab\xa4\x62\xa8\xe3\x7d\x73\x87\xf7\x3e\x9d\x4a\xe5\x32\x1c\x45\xd2\x11\xae\x57\x98\x6d\xa1\x40\xf7\x48\x13\x58\x0d\x74\x1e\xc4\xaf\x6e\xf9\xe0\x84\x5f\x9e\xb6\x96\xfd\x27\xcd\x1a\xcb\x68\xb4\x89\x8c\x19\x79\xa2\x5f\xeb\xb7\x93\x42\x5c\x62\xbf\x94\xbf\xfe\xe9\x87\xcc\x78\xc6\x00\xed\x53\xfd\x98\x25\x47\xc6\x72\x85\x97\x33\xe4\x98\x9d\x86\x6f\xe0\x02\x2d\x2f\x20\xae\x4d\xec\xdf\x6b\x20\x56\x90\x66\xb8\xdc\xbe\x67\x7e\x2c\x1a\x2e\x9b\xa0\x5d\xf5\xf6\x9e\xbf\x23\x7d\x89\xd3\x76\x89\x2b\x50\x4e\xf5\x6e\x39\x55\x18\xc9\x87\x4d\x60\x84\x22\x76\x88\x85\xb4\xbc\x8a\xe1\xad\x16\x01\x49\x4f\xc9\x2b\x2a\x03\x8c\x64\x93\x4f\xf5\x02\xa9\x15\x35\x2b\xc7\xbd\x69\xb9\xc5\x6f\x7a\xb3\xe5\xb0\x89\xb0\x60\x02\x33\x8e\x35\x55\xb9\x47\xe6\x6f\x5d\x1a\xc7\xf1\xba\x4a\x31\x4a\x76\x26\x97\xd8\xb2\xff\xa6\x09\x95\x3e\x33\x34\xde\x4a\x3b\x6b\x24\x41\x74\xa5\xaf\x95\x67\xc6\x12\xc3\x8f\xc8\xd3\x60\x7b\x73\x08\xd4\x7d\xcf\xf0\xf1\x2c\x2a\x7b\x37\x63\xc0\x9e\xc1\x87\x80\x0a\x36\x7f\x53\x59\x35\xe0\x40\x5d\x67\x16\x19\xd8\xcc\xbd\xd1\x1f\x6e\xc7\x32\x13\x6f\x31\xde\x1a\xac\x7a\xbb\x18\x04\xd6\x9a\x72\x07\x6b\x0b\x1d\xb6\x1a\x23\x28\x7f\x74\x80\xd0\x7b\x1b\xe4\x31\x0a\xe5\x1f\x00\x28\x77\xc3\x00\x53\xed\xff\xf5\xc8\xce\x73\x29\x78\xcb\x31\x89\xdf\x54\x85\x46\xe2\x4b\x83\xc7\x6c\x56\xd7\x5d\xe9\x30\xeb\x82\x44\xdb\xbc\x8f\xdc\xe1\xce\xa8\xb2\xb4\x2d\xc9\x4b\xc6\x40\x19\x41\xeb\x57\x74\xae\xe8\xfc\xa1\xcb\xeb\x0e\xed\x11\xd6\x93\xc5\x78\x93\x1c\xe5\x4b\x32\xb4\xd0\xaa\x9b\x5c\x8f\x23\x34\x38\xd3\xc2\x5d\x1e\x8e\xc2\x73\x2a\xc3\xda\x7a\xc7\x30\x43\x63\x2c\x47\xa8\x2d\x4e\x20\x1a\x92\xf7\x04\xfb\xff\x0d\x8c\x53\x20\x6b\x84\x70\x8c\x2a\x7a\x98\x24\x7d\xd3\x82\x20\x7c\xac\xe0\xf6\xaa\x02\xaa\x6b\x5e\xd5\x1c\xd1\x78\xd4\xe6\xf6\x1b\xaf\x31\x16\x46\xfa\x39\xaf\x8f\x66\x78\x91\x0b\x62\x6d\x08\xbc\x29\xc3\xb7\xd1\xef\x74\x59\x08\x5c\x7f\xfa\xe4\x30\xd5\xc7\x05\xce\x66\x69\x20\x90\xc3\x59\x2c\x11\x92\x29\x03\x26\x25\x4f\x60\x3c\x95\xff\x1e\x1c\xb1\x15\xc3\xbd\x30\x2c\x31\xe5\x10\xbb\xfe\xcf\x46\x04\xcf\x94\x06\x2d\x76\x9d\xa1\x14\x9d\x41\xcc\x43\xad\x93\xa0\x65\x48\x3c\x7f\x14\xd3\xcf\xc9\x22\x35\x33\xaf\xb6\xf4\xc4\x33\xc7\xb6\x55\x7a\x34\x97\x51\xe8\x69\xa4\xfc\x5c\x0f\xaf\x54\x58\xbb\xa4\x12\x93\xe9\x37\x5a\xb4\x3e\x43\xcb\xef\x7b\x30\xd2\x25\x42\x36\x65\x4b\xe6\xf7\x2a\x14\xaf\x1a\xeb\x30\xc0\x88\xa4\x18\xde\xb5\x10\xc5\xf4\x24\x73\xd8\x32\xff\xdc\xdf\xd0\xea\x8c\x14\xb1\x56\x5d\x9a\x03\xfd\x71\xbf\x79\xd7\xb8\xc3\x6c\xc2\x96\x11\xd5\xbe\x4b\x37\x14\x77\xd0\x07\x15\x26\x9f\x0e\xa1\x42\xf6\xa1\x33\x04\x36\x38\x6b\x03\xaf\x76\x8e\x00\xe9\xa1\xf1\xd4\x5a\x67\x0e\x39\xa5\x82\x8b\x23\x9c\x0f\x05\xca\x82\x24\xaf\x99\xb9\x15\xfe\x70\xb6\x39\x27\x00\x0a\x1a\x9d\x57\x71\xc8\x27\x82\xb3\x47\xfc\xb7\xc5\x4f\x2d\x62\xeb\x10\x6d\x5c\x29\x0c\xd5\x26\x0c\x6e\x28\xe2\xe5\x6f\x24\x2c\x39\x3f\x5e\x03\x3f\xac\x05\x0e\x56\x80\x48\x05\x2b\x21\xb7\x9c\xa2\x53\x03\x18\x9d\xc4\xd3\x87\x97\x41\x3d\x82\x04\xb3\xf4\x2d\x27\x3f\x45\x8c\xdb\xd1\x67\x80\x18\x4d\x81\x61\xfa\x56\xd0\x1a\xc0\x98\x8a\xe2\x35\x2c\x2a\x3b\xb7\x95\x3e\xe1\xec\xa9\xda\x7f\x73\xe4\x74\xfb\xe7\xc7\xe1\x1f\x0e\x1d\x37\x3a\x04\x4b\x1a\x8b\xde\x0c\x03\xfd\xed\x9b\x13\x93\x6e\x51\x41\x7d\x82\x54\xc3\xd8\xd8\xbf\x62\x24\x25\x54\xff\x16\x77\x5e\xc2\x78\xb9\x8d\x9e\x7e\x06\x40\x6a\xb3\x52\x01\xd5\x3c\xe1\xa0\x7f\xb0\x5f\x3a\xcf\x2a\xcb\xce\xb0\xb2\x5c\xf7\x6b\x43\x4c\x38\xa7\x7f\x6a\xfd\xef\xa5\x1d\x7d\x15\xd9\x82\x6c\x9d\x8e\x3f\x08\xf1\xfe\xaa\x29\x5e\xc4\xee\x40\x24\x66\x9f\x5d\x6d\x29\xdc\xd5\x54\x0c\xaa\xd6\xb4\xf9\xb0\xae\x8a\xe3\x85\xce\xfb\xc3\x2d\x8a\x29\xd7\x5e\x22\x4c\x84\xab\xec\x4f\xa5\x4d\x49\x8c\x4f\x6e\xf7\x1e\x6a\x03\x76\x6a\xce\x09\x74\x1a\xa0\xf6\x31\xdd\x91\xa4\xe1\x27\x39\xbb\x46\xf8\xe7\x41\xbb\xfe\x7f\x76\x74\xb9\xad\x34\xfa\x38\xad\xf5\xa1\x8a\x87\xe5\x11\x73\x04\x48\x18\xfa\xd2\xbf\x99\xb1\x20\x53\x86\xe5\x0d\x9f\xa7\xf1\x9e\xfc\xcc\x94\x7d\x86\x6d\xa7\xc7\xc0\x5d\xa0\x9f\xe2\x62\x81\xeb\x06\x18\x45\xc0\x51\x0b\xaf\xc7\x75\xbe\x5e\x0b\x95\xa6\xcc\x49\x45\x8d\xb1\xd6\x82\x43\x58\xe2\x7e\x12\x67\xf1\xfb\x0f\x26\x04\x86\xe3\x5d\x94\x5e\xab\x12\x90\x71\x10\x9c\x58\x1a\x94\x42\x8d\xc5\xf2\x07\x45\xa0\x82\x9d\xfe\xdd\x6f\xde\xf5\x85\x5f\x07\x44\xb6\xec\x49\xa2\x4d\x4d\xea\xb7\x2d\xcf\x37\x7a\xc6\x75\x80\x79\xd9\x52\x61\x1e\x5a\x9d\xb0\x61\xec\xae\xa8\xb6\xca\x33\x3f\x3a\xaa\x00\x3d\x75\xfb\xef\x1c\xd4\xe1\xf3\x9d\x59\xf7\xd0\x88\xf8\x79\xee\xf8\xba\x6a\xd7\x1d\x20\xc9\x05\xf1\x87\xfb\x5c\xea\x61\x90\x1f\xd3\x1b\x52\x07\x98\xcc\x08\x06\x6b\x0f\x4f\x6d\x78\xe0\xe7\x75\xab\x8b\x56\x05\x5d\xe2\x24\x48\x6e\x54\xdb\xee\x7d\x84\xa5\x06\xd0\xe1\xd9\x31\x7f\x53\x99\xff\xeb\xf6\x9a\x66\xca\xe2\x58\x0e\xf8\xe9\xdf\x0d\x5c\x47\xf1\x4f\xc3\x95\xfe\xd3\xaf\x35\x8a\x39\xf2\xa2\x60\x2a\xeb\xb2\x6c\x3f\xc4\xce\x25\x51\x52\xb1\xc5\x4c\xc7\xb3\x37\x36\x86\xfa\x10\x87\x47\xea\x19\x08\x92\xae\x30\x45\x81\x3b\x6a\xd1\xc9\x9a\x8c\x43\x6b\xe7\xc3\x3b\xd1\xd3\x57\xe7\x2f\x26\x68\xc1\x65\x22\x96\xf9\xeb\x7b\xc2\xf0\x78\xf9\xe4\x5f\xe8\x31\xc2\xe9\x6a\x4f\x60\x67\xf6\x70\x51\x54\x55\x3c\x1b\x91\x1d\x40\xeb\x19\x69\x52\x70\xe8\x1e\x52\xfd\x0e\x44\xfb\xbf\xd9\x99\x32\x6d\x9c\xa3\xaa\xad\x62\x3a\xe3\xc5\x71\xe5\xda\x9d\xea\xd5\x15\xa6\x41\xbd\xa6\xed\x9a\x2e\x3b\x39\x5d\xd6\x24\x89\xb1\x46\xc6\xc4\x23\xe5\x9d\x54\x3e\xa3\x72\xea\x5c\x5d\x64\x53\x8e\x3a\xb4\xea\xab\x02\x52\x99\x17\xd7\x84\x9d\x2f\xe9\xed\xfd\x07\x77\xf6\xc7\x67\x38\xfe\x16\xd5\xa8\xd6\x7c\x8f\x00\xe3\xea\x2c\xd7\x95\x84\x1f\x38\x2c\xdd\x31\x84\xcb\x18\x05\x28\x9b\x65\x74\x36\x11\x19\xa7\x31\xfd\x13\x01\x2c\xdb\x1e\xd2\x8f\x58\xdc\x24\x7a\xf7\x55\xbb\x36\xb7\x03\x68\x82\x56\x4b\x50\x98\x04\x7b\x78\x9f\xd2\xb8\x4e\xcf\xfb\x38\xfd\xdf\xeb\xad\xec\x25\x95\x38\xee\x9a\x03\x23\x92\xe3\xfd\x7d\x79\xc0\x65\x7e\xe7\xd2\x68\x3c\xc9\xee\xf9\x33\x2d\x51\x08\x42\xb1\x6b\x7f\x11\x84\xa1\x26\x59\xf6\x20\xbf\x83\xaa\x42\xa6\xa9\xad\x41\x25\xa4\xe4\x30\x7a\xda\x5f\x94\x8a\x11\xaf\xfc\xdc\x50\xb0\x5c\x02\x6f\x38\xa9\x60\xf3\xf3\x45\x34\xa0\xc8\xe9\x3d\x8a\x87\x22\x0e\x17\x5c\xc5\x65\x9e\xc9\xf8\x3f\x9c\x3a\x67\x15\xc6\xb9\xad\x3f\x9a\x21\x34\xd5\xd9\x54\x17\xb6\xc4\xa1\xc9\x8f\xc3\xf0\x48\xba\xbf\xfc\x1f\xdf\x91\xaa\x02\x86\x61\xc0\xa0\xc4\xaa\xf4\x24\x84\x01\xf5\xc8\x11\xba\x04\x7f\x01\x5c\x04\xf8\x14\x90\xc6\xdf\x4b\x2d\xb6\xbe\x45\x84\x79\xed\x0a\x21\xd3\x47\xa7\x75\xce\x94\xbb\xc0\x9b\xd5\x3c\xaa\x61\x08\xc8\x2c\x99\xb4\xd1\x09\x55\x4b\x7f\x99\xae\xd1\x6e\x1b\x5b\xd0\x1e\x9f\xd1\x16\xba\xdd\xc5\x25\xd4\xa2\xb0\x9a\xb0\x55\x7f\x17\x9c\x55\x3d\x55\x83\xa2\x16\x32\xbb\x00\x71\x47\x64\x85\xa5\x21\x37\xd4\x5b\xe4\x0b\x6d\xe3\x5c\x78\x6c\xec\xe0\x5e\x41\xc9\x55\x6b\xe3\xb1\x91\xc1\xd6\xf1\x4a\xe7\xea\xe9\xc3\x7b\x66\x9f\x0d\xe7\xfe\xca\x52\x13\xee\x5a\x9f\xd0\x9d\xb2\x0b\xd9\x85\x54\x49\xb0\x53\x5f\x4f\x9b\x02\xc8\xd4\x05\x1a\x9f\x9a\xf1\xec\x45\x61\x18\x96\x17\xf6\x35\x08\xb1\x09\x33\x2a\xa0\x5c\xc9\x87\x11\xf6\x98\x77\xef\xb1\xac\x80\x14\x4e\xc6\x27\x10\xe6\x48\x57\xfb\xeb\x3a\xaf\x07\xec\xfe\x99\x07\x6b\xba\x1a\xeb\xc5\x31\x2a\x88\xd8\xc2\x89\x7f\xe2\x23\x42\x6f\xd9\x4a\x2c\xb6\x7a\xb9\x29\x2c\x6d\xe1\xa2\xf4\x7e\x3e\xd9\xbb\x51\xbd\x87\xbb\x42\xa7\xfe\x9e\xcb\x9a\xa1\x28\x10\xdf\xc9\x86\xbd\x3d\x0c\x5c\x3f\xe4\x98\xf9\xdb\x63\xd5\x7e\x4a\x76\xb9\xa7\xee\xe6\xe2\xc7\xfc\x04\x67\x20\x82\x7a\x2a\x87\xf0\xe5\xe5\x97\x4c\x8b\xae\xdf\x6a\x5c\xc2\xa3\xe6\x3e\xdd\xd6\xf0\xb7\xe7\xa5\xeb\xc0\xa4\xa5\xbf\xe2\x66\xe2\x5f\x13\xc5\x72\x87\x12\x70\x2a\x75\xcf\xa3\x12\xaf\x73\xa8\xfd\x61\x9c\xa3\xa6\x46\xeb\xc7\x86\x8c\x81\xa9\x2a\xfe\x47\x46\xd5\xd2\xdf\x19\xa4\x6e\xfa\xd4\x78\x91\x97\xeb\x60\x6d\x6f\xae\x24\x27\x11\x86\xd8\x4a\x3a\x32\x09\xbf\x43\x40\xdb\x3e\x94\x18\xff\xbc\x7f\x3a\xff\x68\x5d\xa0\xc2\x6c\x53\x03\xe1\xb7\x20\x03\xf1\xa9\x8d\x1b\x52\x04\xad\x6c\xbf\xbe\x64\x7c\xde\xd0\x82\xc1\x75\x33\x5d\xf2\xa1\x9f\x01\x47\xfd\x32\x2f\xe5\x82\x5b\x9d\x8e\xec\xad\x96\x78\xc4\xbe\xe0\x32\x6f\x7f\x23\x61\x39\x98\xe3\xb2\xef\x9d\xc6\xf2\x15\x26\x11\xb0\x0d\xdc\x2a\xa0\x4e\x00\x1b\xc1\x98\x96\x31\x86\x3a\x44\xd2\xb7\x16\x88\xe6\xba\xca\x0f\x23\xad\xd5\x1b\x0e\xbf\x25\x64\x92\x1c\x46\x61\x95\x44\x81\x2c\xd7\x72\xdb\xdf\x3b\x40\xc3\x93\xe8\xf2\x24\xcf\x6d\x11\x50\x8b\xa7\xd8\xca\x06\x01\x02\x16\xe7\xa4\xde\x55\xb6\xd4\x61\xe0\x2a\x76\x19\xa3\x9b\xfa\x9f\x0b\x31\xa0\x35\x55\x6a\x55\x2f\x3f\x38\x84\x20\xb9\x72\x54\x7d\xf1\x10\x7e\x10\x74\xe9\xdc\x83\x6d\x43\x6e\x52\x3f\x52\x09\x44\x7f\x4e\xd5\xbb\x8a\xf3\xec\x94\xee\xa9\x02\xec\x1e\xdc\x41\x15\x2c\x26\x89\xa4\x13\xa5\x70\x1b\x64\xae\xf3\xe4\x17\x66\x2b\x2d\xf4\x64\x77\x9b\x02\xe3\xb3\x82\x49\xb4\x90\x69\xed\x10\xbf\x39\xcc\xe3\x37\xe3\xa6\x3b\x3a\x18\x37\x53\xa6\x80\x47\x78\x9f\xda\xdb\x2f\xe6\x72\x18\x49\x7f\x87\x70\xf2\xea\x57\x17\x77\xf7\xd9\x67\x5d\xd5\xca\x9b\xa6\x73\xf5\x22\x74\x6f\x27\x86\x8c\x92\x28\x40\x2c\x30\x8a\x9a\x9c\xc2\xcf\xc2\xbc\x51\x74\xa1\xc4\x53\x05\xf4\xb8\xf4\xa1\x85\x3c\xf3\x0b\x0d\x37\xd2\x07\x84\xff\xe7\xf2\xd6\xe2\xf6\x94\xd7\xd3\x4e\x39\xc7\x1d\xd0\x86\x69\x0a\x08\x32\xbb\x53\x91\x22\x28\x42\xbc\x5a\x7c\xe3\xa0\xa8\x87\xf1\x28\xe8\x5f\x02\xb6\xb7\x7b\x3b\x64\xda\x7c\x2a\xe8\xb2\x84\xae\x3a\xed\xba\x02\x83\x5b\x02\xe0\xfc\x01\x02\xfa\x7b\x9c\xdb\x46\x5c\xe5\x37\xdf\x39\xe4\xf7\xb9\xe5\x0d\xb8\xe5\xca\x62\x1f\x25\x25\x82\x38\xb8\x5c\x0b\x5a\xe5\x04\x8d\x8d\xd7\xdc\x48\x05\x54\xdc\x9b\xec\x04\xa7\x59\x32\x78\xb3\xcb\x3e\x23\x3a\xe0\x7f\xb0\xfc\x8c\xbf\x1c\x28\xcb\x47\xe7\xae\xc8\xdc\xa1\x11\xc2\x7d\x55\xf8\x44\x9b\x51\x92\xbf\xb1\xe5\x27\x57\x14\x60\xcb\xda\xdb\xcf\xae\xf9\x35\x20\x78\x7d\xf3\x08\x85\xa2\x9b\x1a\xa3\xff\xa8\x0f\x80\x47\xe1\x90\x97\xb6\x9c\xdb\xcc\xdf\x1a\x12\x83\x4d\x89\x38\x22\xb2\xc2\x1a\x83\x49\xb8\xa0\x90\x57\xf0\x3f\xda\x77\xa1\xc9\x27\x0a\xf0\x36\x1b\xdc\x9b\x67\x0d\x2f\x96\x1a\x34\x4a\xcd\x42\x83\xc2\xf1\x62\xc2\x5c\xa6\x66\x9e\x87\x2e\x11\xd8\x6c\xa1\xc5\x81\x12\x9a\xec\xe4\xfd\x9a\x3b\xbf\x5d\x0e\xdb\x26\x55\x75\xa5\xd5\xf4\x9f\x13\xab\x10\x44\x67\x3a\x5a\x9d\x9b\x8b\x47\x01\x95\x08\x66\xad\xa3\xc0\x0f\x9f\x2e\x4f\xf0\x2e\x0f\x5b\x5b\x77\x86\x76\x90\x22\xc2\x10\xf7\x22\xd1\x28\xc1\x2a\x0d\xb5\xb2\x90\x44\x88\x96\x7c\x01\x61\xb1\x1f\x64\xfd\xc1\x74\x5d\xcf\x8e\x44\x3b\xf5\xa8\xec\x8e\xcf\x1d\x4e\x2c\x53\x25\x0c\x20\x6f\xc1\xa3\xfe\x99\xef\x93\x48\x2c\x7e\xfa\xbd\xaf\xce\xaf\x0e\xe4\x54\xda\x34\x3d\x99\x5e\x2e\x7a\x75\x16\xba\xe9\x4e\x0f\xfd\x06\x6b\xa4\xe0\x2c\x18\xee\xbe\xa7\x7a\xef\x90\x91\x14\x12\xf5\x8c\xc2\xdb\x12\xfd\x64\x25\x01\x3c\x6e\x7d\x79\xc8\x16\x46\xc1\x88\x4d\x06\x58\xbf\x7c\x97\x49\xa6\xb9\x2c\x7c\x80\x63\xae\x8d\xdb\x32\xc8\x42\x6f\x58\xca\x50\x62\x10\xa9\xd1\xb1\xf3\x0c\x6f\xa0\x2c\x34\xe1\x8c\x8d\xf3\xa5\x68\xf4\x91\xbf\xa4\xff\xc9\xfb\xc6\xf6\xb1\xd6\x78\x73\xe5\x50\xb7\x6d\xb4\x36\x83\x78\x70\xc2\x73\x58\xb1\x43\x99\xe0\x17\xd8\x9b\x45\xb1\xcb\x2f\x9f\xbb\x1d\x18\x3d\xbe\xba\x9b\xdf\x5c\x25\x5d\xf1\x66\x4d\x27\xb9\xff\xee\x7f\xcf\x86\x79\xfb\x45\x4f\xda\x16\xf4\x68\x1b\x6a\x6d\xed\x5d\x49\x8c\x4e\x81\x37\x09\x9c\xc7\x54\x91\x22\x0e\xbd\x65\x28\xc3\x71\x79\xa3\x3a\xa8\xf2\xfd\x9d\xb3\xa0\xa7\x15\x97\xe2\x84\x58\x0a\x4b\xb1\x13\x27\x0c\x5f\x6a\x58\x91\x85\x13\xca\x76\xb5\xe2\xcf\xc3\xf6\x34\x59\x96\xaa\xb9\xf3\xab\xa6\x2b\xf9\xd5\x53\x2d\x18\x89\x90\x41\xcd\x4b\xdd\x4a\xec\xa8\x81\xa3\xfa\x9c\x64\x92\x4d\xd3\xa2\x94\x0d\x73\xd0\x3e\x57\x8c\xab\xf6\xc8\xfc\xb5\xe0\x2f\x43\x48\xf4\xd6\x91\xef\x4a\xc9\xce\x2b\xda\xd8\x08\x35\x80\x92\x32\xb0\x73\xa3\x6d\xd2\x38\xe5\x5f\x59\x78\x0a\xd0\xd2\xe0\x05\xa2\x1d\x9c\xd5\x77\xf5\x0f\xdf\x7a\x5a\x50\xef\xbe\xc9\x20\xa9\xb9\xc2\x90\x65\xed\xbc\xfe\xd4\xaf\x98\x1f\xa6\xfb\xea\x3e\xbc\x52\xfb\x8d\x24\x7a\xcd\xd4\x9b\x97\x8d\xd0\xcc\x2b\xf8\x82\x8d\x64\x70\xc9\xc3\x68\xfe\x61\x25\x7b\x54\x42\xb8\x5b\x82\xc7\x2e\xb4\xfd\x9c\xc9\x55\xc7\xd6\x5e\x0b\xc5\x43\xa2\x1d\x20\x03\x43\x78\x9f\xfa\x0b\x2d\xc7\x1a\xb5\x70\xb6\xa9\x99\x7a\xc3\x18\xa5\xea\x29\x45\x65\xe5\x6a\xc6\x7d\xeb\x4d\xbe\x3a\xf4\x3b\xdc\x3f\x9e\xc3\xb6\x80\xeb\x4a\x1c\xc3\xbd\x48\xa1\x0d\x49\x93\xc6\x9a\x77\xaf\x63\xd7\x2d\xa4\x16\x4b\x38\x85\x54\x3e\x76\x12\x8c\x56\x70\xb0\x4a\xfc\xc3\x17\x55\xce\x5d\x7a\xa4\xa8\x0c\x4c\x10\x20\xd4\x4f\xa3\x60\x40\xc1\x09\x99\x8c\xd1\x57\xdb\x36\x6c\x1b\xff\xdc\x02\x62\xd2\x3d\xea\x82\xcb\x11\xa2\x22\x40\x53\x15\x8f\x83\x94\xfd\x9d\x63\x21\x41\x1c\x99\x4d\x01\x05\x4d\x0c\x82\x19\xa4\x89\x8b\x29\x1f\x69\xee\x14\x68\xbf\x94\xca\xb5\xe6\x92\x71\x05\x1a\x21\x74\x48\xbd\x68\xb1\x3d\x36\xd1\xad\x54\xd7\x17\x0c\x07\x00\x2d\x2d\x96\xeb\x98\x18\x0a\xa8\x80\xf7\xef\xfd\x4d\x5a\x37\xcc\x4f\x65\x2a\x96\x6a\xca\x87\xc4\x88\xe2\xea\x1c\xc5\x15\x50\x4b\x02\xcb\x4b\x91\x81\x17\xc9\xbe\x2f\xad\x71\x87\xf2\xa7\x75\x25\x56\x6b\xfa\xa3\x6a\x13\xf7\x9f\x73\xf8\x91\x4f\x70\xb8\xc6\x0b\x12\xc3\x21\x62\xf4\xae\xb3\xba\x71\x5a\xb2\x38\x00\xf0\xaf\x9a\xfc\x65\xbe\xe3\x35\x0c\xbc\x51\x92\xa5\x74\x3e\x75\x99\x21\x26\x87\x90\xda\xe8\x78\xda\x04\xae\x36\x16\x1f\x25\x67\xe5\x14\xe6\x6b\xf4\xb4\xfe\x7a\xde\xe9\x95\xa8\x15\xbc\xf6\x04\x51\x83\x04\x9f\xc7\x2d\xb7\x87\xcd\x71\xf0\xf6\x53\xf7\x0d\x1f\xdb\xc2\xaa\xfb\x1d\x5e\x1d\xcf\xd5\x76\x3f\xf5\xfd\xd3\xbf\x54\x54\x26\x38\xfa\xc1\xb1\xfe\x6c\xca\x7e\x5e\xa8\x87\x28\x2f\x8d\xce\x22\x94\x19\x99\xba\xc3\x5a\xed\x8e\xa2\xc0\xec\x31\x88\x29\xb5\xc3\x44\xd1\x95\x06\x9d\x14\x27\xc6\xd1\xef\xeb\x79\x5e\xe3\xea\xb0\xef\x93\x17\x36\x12\x7d\x91\x0a\x60\x21\x41\xa4\xc6\x28\x70\xae\x71\x77\xff\x8e\x8a\x44\x14\xba\x6c\x96\xfe\x8f\x0d\x98\xe3\x07\x1e\xa4\xf5\xed\x57\x68\x2d\x81\xea\xc5\x43\x9a\x45\xe2\x66\xec\x1a\x25\xd2\xe6\xd9\xc1\xd2\x16\x4b\x70\xc1\x27\x50\x21\xb8\x37\x4e\x6d\x4d\x34\xff\x68\x44\x30\x2a\xa2\x56\x26\x44\x53\xe1\x86\x6d\x9e\xcd\x63\x5d\xd3\x92\x6d\xea\x1a\xd0\x51\x13\x21\x5a\x44\x81\x75\xf4\xd2\x6b\x23\xf8\x71\x5b\x91\x02\xd4\x1b\x35\x77\x6d\x5d\x06\x93\x26\xc8\x7c\xfb\x25\xc2\x6b\xd3\x6a\x17\x67\xfa\xd5\x4f\x53\xe3\x53\xf7\x60\xef\x8d\x87\x34\x53\x5e\x2d\x0b\x4b\x86\xbe\x9d\xb4\x36\x8c\x28\x56\x6d\x3c\x30\x74\x04\x68\x12\xd0\xda\x7b\x9f\x3a\x7e\xaf\x8c\x7b\xae\xe5\xa1\x06\xf9\x85\x1b\x40\x99\x04\x91\x2e\xdc\xdc\xe2\xab\x23\xff\xf8\x13\x33\x6a\xa4\x0e\xad\x05\xaf\xe3\x4a\x42\xb3\xd3\x98\x3a\x2a\x62\xe6\xbf\x31\x43\x62\xef\x7c\x9b\x28\x6c\xe8\xe3\x35\x74\xb5\xba\x65\x0d\xf5\xb4\x20\x8e\x48\xf3\xa2\x88\xc2\x19\xca\xcc\x5e\x4a\x5f\x5b\x51\x1b\xc2\xd0\x38\x90\x51\x27\x70\x62\xb6\x7e\xb1\x04\xcc\x62\xcb\x96\x10\xa5\x1c\x73\x47\x40\xbb\x2c\x26\x49\xb6\x06\x04\x6a\xc5\x76\xdb\x09\xf8\xc3\x2d\x78\x2d\x97\xc3\x86\xd0\xc7\x16\x92\x0d\x03\x2a\x26\x70\x43\x5c\x2e\xc7\x3f\x3b\x03\xac\x38\x60\x1c\xf4\x16\x45\x2e\x1c\x9e\x7c\xe4\xce\x34\x2d\xcd\x69\x29\xc2\x75\x81\xde\xb2\x34\x23\x14\xc7\xb0\xc2\x80\xd7\x26\x60\x19\xa1\x2f\x41\x65\xfc\x54\x5c\x18\x93\xc0\xd1\xdf\xea\x74\x6a\xd6\x91\xd3\xec\x40\xd3\xa3\x40\x4b\xb4\x65\x77\x2a\x43\x2a\xcb\x6e\x92\x3d\xb6\x1b\xb0\x02\x57\x3f\x32\xa8\x84\x9b\xc3\xa8\xd2\x7d\x8a\xee\xae\xeb\x96\xea\xbf\xfd\xf7\x6f\x8b\x0d\xbc\x33\xde\x23\xdf\xe2\x6c\x1f\x1e\xfb\x77\x34\x8b\xe1\xbc\x60\xa4\x87\x88\x3d\x19\x0f\xaa\xea\x44\xc7\x6d\x34\x99\x6e\x79\x89\xf6\x9a\xf5\x68\xab\x3e\x1d\x4e\x6d\x2d\xc6\x0e\x4d\x18\xcc\x5d\xab\xb8\x9d\x94\xe9\xd5\x94\x5a\x69\xa0\xe3\x31\xa7\x12\x68\xf4\xfa\xf6\xb9\xfd\x7e\x24\x4e\xf9\x2e\x8a\xd4\x74\x97\xec\x99\x62\xbb\xa7\x4d\x2b\x5d\x88\x78\xba\xfb\xa7\x18\x74\x1a\xef\x81\xba\x4d\x02\x68\x72\x16\x2c\x55\x83\xb1\xd9\x5e\x45\xf8\x8b\xd2\x6f\x61\xa6\x7f\xd1\xea\xe6\x63\x42\x8d\xbd\xd2\xc8\xdf\x31\xc2\x56\xe7\x2a\xcf\x9e\x49\x97\xfd\xe6\xfd\x60\x68\x09\x87\x25\x15\x84\x39\x6a\x9a\x69\xe3\x02\xbe\x90\x40\xe4\x50\x46\x12\xcb\xba\x29\x48\x89\xbb\xf1\x1e\xce\x57\x12\xff\xe5\xc3\x0a\x63\x2c\x87\x38\x43\xed\x8e\xcb\xbd\xec\x17\x3a\xd6\x6d\x64\x3b\x3b\x1c\x11\xf3\xa5\x10\x57\x33\x50\x00\x68\x4e\xf9\x44\xee\x10\xf5\x8f\xd0\xf1\xf0\x9e\x3b\x1f\x22\x48\xad\x74\x89\x27\x6f\xfc\x8c\xf2\x1f\x08\xf8\x59\x0c\x58\x24\x3e\x60\x6d\xfa\x24\x0f\x30\x66\xbe\x49\xc2\x2b\xa1\x91\x5b\xd5\xfb\xc3\xc9\x2f\xc9\x3e\x27\x68\x54\xc2\x69\xc7\xdc\x3a\x2d\xda\x51\xda\xfb\x36\x2e\x6e\xc3\x5a\x60\x0d\xb3\xe5\xdd\x0f\x8d\xe4\xd3\xed\x6c\x7d\xf9\x42\xc7\xaf\x47\x3c\x40\xc1\xa8\xb6\x77\x31\x59\xb0\x0c\xb3\x71\x19\x26\xd5\xa4\xce\xc5\xa8\xd2\x8e\x97\xe0\x61\x20\x0b\x5a\xfa\x6f\xec\x89\xe9\x04\xbe\xf7\x85\x42\x0a\xad\xb3\x98\x20\x8d\xc2\xfa\xb8\x7f\x3c\x56\xe5\x53\x58\x5a\xe0\x3f\x58\xf6\x45\xd9\xb0\x1a\xb2\x97\x78\x29\xc0\xbc\x16\xf4\x54\xa6\xb0\x99\xbc\x07\x42\x84\xa7\x58\x1f\x55\x41\x0b\x29\x0f\x91\xd8\x81\x3e\x18\x43\xbf\x5b\x54\x52\xe0\x77\x2f\xd7\xc3\xf3\xac\x55\xc6\x37\x89\x8b\xdc\x07\x33\xa5\xe7\x9c\xc5\x93\xc9\xe0\xe0\x5c\x27\x3a\xed\xb3\xe2\x76\x20\xbf\xb7\x82\xdb\xa3\x98\x22\x08\x8e\x29\x40\x24\x35\xa9\xba\x37\x6f\x7d\xde\x4d\x12\xeb\xf1\xff\xe4\x9a\x15\x54\x88\xfd\x07\xf8\x76\x22\x87\xdc\xe4\x8c\x5a\xff\xcc\xe0\xf1\x2d\x99\x5e\xfa\xb7\xb0\xf9\x95\xfb\xd1\x2b\x96\x13\xde\xb4\x27\x58\x69\x29\x07\x1d\x41\xf8\x01\x52\xfc\x80\xfd\xae\x9b\x72\x89\x3f\x84\xc6\xb1\xe1\xad\xd3\x2d\x06\xde\x4f\x06\x7d\xdf\xce\xd9\xb0\xa9\xbb\x9f\x68\x18\x62\x7d\x3d\xaf\x98\x76\x28\x88\x14\x4c\xe5\xdf\x7b\xd1\x64\xe2\x94\x79\xfa\x4d\xbb\x08\xa7\xe9\x47\xc3\x8d\x14\x24\xd5\x50\xe7\xe2\x5e\xea\x0b\x89\xc1\xd5\x8c\xf3\x47\xaf\xa7\x9c\x54\x7d\x64\x01\xa0\xfe\xec\x9a\x68\x67\x7d\xcb\xe7\x3e\xf3\x5d\x18\xe8\x06\x65\xac\x54\x81\x3f\xa3\xb0\xe8\xe7\x34\xb1\xe4\x6f\x3d\x14\xa3\x6b\x6c\xcc\x25\x9b\xd6\xce\xb3\x2e\xf3\x5a\x2a\x23\xbf\x16\xd8\xf8\x13\x9c\x51\x9b\xbd\xcf\xe0\xac\x1b\xce\xc7\xd6\x34\x67\xc2\x66\x21\x7d\xea\x77\x4a\x94\x56\x50\xd5\xd9\x5e\x38\xc3\x9b\xfc\x2d\x28\x75\xc7\x49\xc7\xfd\xa2\x39\xf1\xe4\xab\x01\x76\x60\xf3\xc3\x8f\x97\xf5\xb8\x7e\xbf\x48\x03\xb5\xcd\x42\x81\x3a\x57\x60\x6b\x8d\x02\x49\x86\xa9\x5f\xe2\xa3\x4b\x05\xb5\xf1\xc7\x7c\x6a\xc8\x5e\x3e\x76\x38\xea\xac\xd1\x5f\x79\x2c\x63\x38\x63\xb5\xf3\x9f\xe2\xe5\x5e\x08\x21\xeb\x44\x14\xaa\xd3\x7e\xb0\xab\x16\xbf\x3d\x16\xb6\x27\x58\xfc\x8a\x35\xc5\xe4\xa0\xc8\x36\x34\x01\x83\x71\x40\x98\x19\x4d\xb6\xe3\xc7\x4f\x20\x90\x80\x69\x4e\x59\xc7\x68\x6c\xe1\x7d\xbc\x14\xa6\xb6\xab\xc1\xb1\x6b\xc2\xcf\x12\x68\xfa\xfb\xcc\x35\x21\x68\x27\x05\xd6\x57\xa2\xea\x97\x53\x0c\x6e\xf0\x74\xe2\xb8\x7b\x9b\x05\xea\x5e\x10\x6a\xad\xb6\xed\x9a\x57\x8c\x53\x47\x0e\xee\x9e\x75\xf6\xd1\x52\x0a\x9f\x73\xe0\x67\x41\xd1\x0a\x2b\x27\xc9\x53\x67\xac\x97\x29\x29\xbf\x7f\xeb\x0d\x58\x86\x6e\x59\x15\x1a\x09\x74\x48\xcb\x90\x88\x03\xa2\x6e\xd9\xde\xd9\x0c\xa2\x12\xbb\xdc\x55\xa4\x30\x26\x7e\xd3\x71\xa3\x9b\xfe\xa2\xb9\x57\x3f\x66\xd3\x53\x54\xbd\x83\x5d\xdc\x82\x57\x99\xf3\x5a\x18\x75\x1c\x4b\xfb\xcd\x8d\x2e\x40\xf9\x92\x28\xb0\x76\x75\xec\x90\xa7\x7c\x7f\xae\x55\x30\x95\xd1\x7b\x93\x32\xdd\x7f\xa2\x92\x89\x87\xc4\xc4\xb9\xb4\xdd\xda\xf9\x06\x6d\xbb\xe8\xdc\x3d\x53\xf9\x20\xc2\x70\xac\x2e\xf3\x38\x04\x98\x4d\x1c\x63\x23\x76\xc7\xd3\x87\xdf\x28\xea\x03\xee\x0b\x84\x61\x1c\xbf\x68\x6b\x6f\x5e\x69\x6f\x28\x2e\xe0\x1f\xf8\xb7\xd6\x52\xe1\x76\x05\xb7\x1e\xd6\x89\x71\xd5\x0b\xc5\x70\x5a\xe3\xc8\x8c\x52\x05\x0e\xf6\xd0\xd0\x1e\xd9\x22\xe9\x24\x06\x43\x11\x58\x43\xe7\x8a\x1d\x19\xa2\xac\x9d\x9e\xa5\x85\x4d\x55\x4a\x88\x9a\x94\x42\x29\xe2\xc2\x87\xa0\x6f\x39\x03\x14\xb7\x77\xda\x4b\x98\x69\xc3\x14\x84\x7f\xe9\xaf\x5e\xf1\x76\xd2\x38\x16\x23\xe9\x5b\x94\x89\x3e\x24\x55\x17\xfd\x19\xea\xcc\x40\xcc\x85\xe7\x09\xbf\xef\xe3\xbb\x10\x2a\x84\x27\xd4\x00\xe9\x70\x8b\xd5\x9c\xab\x0a\x55\xd9\x27\xf5\x48\x14\xff\xee\xed\x30\x68\x5b\xc8\xa5\x6f\xf7\xb6\x3f\xa1\xae\x47\xdf\xa8\x67\xe5\x74\x0c\x30\x09\xea\x64\x1b\xa4\xe0\xa9\x80\xca\x66\x64\x43\xf0\xe4\x21\xa4\x23\x71\xda\xa0\x4b\xc5\x7f\x5f\x7d\x95\x86\xbe\x4d\xcd\x1d\xe7\x20\x31\x0a\xc2\xd3\xc8\x44\x7e\x75\x71\x03\x50\x68\x8d\x75\x07\x5b\xa1\xa0\x99\x04\xe8\xd1\x81\xd7\xe3\x72\x3f\xff\xb1\x7f\xa5\x1e\x0b\x89\x8b\xd9\xaf\xc8\x93\x89\x0f\x6a\xf7\x3d\x33\x82\x53\xdb\x37\xe1\xb8\xbf\x95\x3c\xe9\xf2\x8a\x5e\xb0\x65\xc7\xa2\x32\x86\xf3\xea\x80\x20\xaf\x7a\xb1\xc1\x4a\x67\x09\x20\x3f\xef\x51\xa4\x22\x89\x73\x59\x73\xc1\xf9\x5b\xef\x2f\x30\x08\xb7\x05\x0e\xd8\x5a\xed\x4f\xe9\x27\xd8\x71\xd9\xc8\xee\x07\x63\x15\x9e\xa3\x45\xe6\x1b\x0f\x78\xd0\x2d\xd4\x46\x82\xa9\x27\x6f\xcb\x09\x48\x6f\xf2\xdd\xf8\x07\x6a\x9b\xe6\x65\x14\x77\xec\x62\x0a\x07\x22\xba\x7b\xb3\x9b\xa4\xf5\xa4\xad\x07\x9f\x83\x72\xf3\x92\xb7\xa6\xd3\x54\x59\x13\x63\xbf\x14\xcc\xaa\x24\x42\xae\xc4\xb1\x69\x81\x7a\x8b\xb5\xc0\x70\x73\x3c\x9c\x71\xd9\x42\x46\x2c\xc3\x89\x18\xce\xf2\x65\xe6\xa7\x2b\xe3\x4c\x81\xf4\x75\xd1\xca\x88\xdf\xe8\x75\x8b\x46\x05\x1e\x77\x1f\xe8\x6e\x71\xfb\x8e\xcb\xac\xe2\x68\xa8\xd6\x3f\x39\xe0\x7e\x49\xf0\xa3\xe1\x58\x2b\x77\x23\xcb\xd4\x4d\x61\xc5\xc3\x4a\xbe\x8c\x68\x30\x74\x75\xa7\x49\x5b\x58\x08\x0f\xd2\xc0\x03\x95\xcf\x40\xe0\xbb\x1f\x88\x95\x07\xaa\x2b\x14\xd9\x05\xc6\x32\xfa\x43\x6e\x51\xf6\x19\x70\x1a\xc6\x74\xb4\xef\x56\x65\x49\x25\x82\x60\x6e\x2d\xca\x08\x3c\x88\x8c\xca\xc7\xc6\x07\x1d\x3b\xd5\xa3\xd6\x9c\xc3\xf8\x2f\x76\xb3\x69\x6a\xf5\x9e\x91\x9c\x9b\xc8\x3a\x91\xbf\xcc\x94\x56\x26\xe0\x84\xc6\xcf\xf3\xf7\xcd\x7e\x9f\x8e\x7e\xf0\x1a\xfa\x4c\xf2\x70\xf5\xdf\xde\x1d\x8e\xf9\x5b\x90\xf6\x8a\x86\x69\x51\xb2\x9b\x44\xe3\xd1\x1d\x0a\xe4\x9e\xef\x3c\xf7\xb6\x18\x3d\x07\xe6\x34\x17\x1f\x01\x83\x39\xb6\x83\x42\xe6\x1e\x8d\x42\x9b\x42\x4c\x3c\x65\x0d\x9c\x2d\x58\x6d\xed\xd9\x44\x43\xb5\xc8\x9e\xd2\xe6\x92\xf8\xb5\x81\x2c\x9c\xb0\xa7\xab\x7c\xc3\x2e\x9c\x4e\xb2\x82\x54\x5f\xf5\xcc\xb8\x2c\x52\x33\x1c\x43\x86\x62\x1a\x4d\x5d\xdd\xa4\x83\x04\x4a\x7f\xb4\xd9\x6a\x2e\xe5\x41\x52\x80\xa7\x9f\x3e\x52\x4c\x38\xb9\xc2\xc4\xc7\xff\x13\xbc\x54\x16\x7e\xb4\x3d\xb8\x53\x81\x60\x40\x7a\x99\xd8\xd3\xe5\x3c\x98\xfe\x35\x99\x3c\x8d\xba\x7d\xc9\x46\x6d\x15\xcf\xcf\x34\x25\x53\x49\x83\x4c\x21\x69\xf8\x94\x69\x8e\x73\x84\x89\x7e\x4b\x0d\xb3\x71\xb2\xbf\x3b\xe1\x7a\x8e\x47\x1c\x7b\xdc\x01\x98\xbe\xae\xb2\x9b\x11\x18\x87\x15\x84\x9b\x13\x44\x8e\xf2\xbf\xaa\xbc\x16\x28\x87\x4e\x1d\x06\xae\x83\x38\xbf\x9d\x3c\xff\xfc\x6d\x27\x71\xce\x1a\xcb\x28\x8d\xbe\x7b\xa2\xd3\x7d\x76\x03\x29\xcd\x54\x53\x1f\x6f\x18\xb7\x29\xa0\x3e\x6f\x9c\x2f\xaf\x5d\xd1\x2a\x06\xff\x0c\x43\x6e\x97\xd0\xda\x50\x2b\xe5\x9d\xcc\xf7\xb2\x0f\xc9\xdf\x57\xd7\x14\x6d\x0f\x87\x68\x0f\x7d\x61\xcb\xe3\x7c\xf4\xed\xac\xa8\xec\xe9\xaa\x64\x82\x78\x3e\xb9\xfd\x85\xb1\x18\x9c\x51\x2c\xdb\x87\xe9\x36\x5b\xab\xa0\x32\x86\x0c\x88\x49\x96\xc8\x74\xa5\xd8\x25\x12\xf6\x42\x9c\xdb\x0b\x54\x28\x10\xa3\xc0\xb4\x60\xf5\x03\x0a\xd4\x07\x1b\xc1\xfb\x53\xf6\x9b\x64\xdf\x33\xb8\xc7\x9e\x34\xf6\x95\xce\x66\xde\xc9\x78\x28\x72\x5e\x29\x41\xdc\xea\xad\x03\xae\xff\xa7\xff\x4f\x7f\x9d\xdf\x8e\x1b\x0a\xac\xac\xb0\x11\x8f\x98\x56\x11\xbd\xe2\x59\x10\x8d\xa6\xde\xc2\x0c\x60\x50\x36\xe8\x26\xb3\x44\x2b\xd3\x3a\x07\x43\x51\xb5\x44\xbf\xab\x16\x78\xed\x9d\xb2\x9c\xfb\xa6\x84\xf5\x1b\xa9\xb9\x40\xc7\x35\xe9\x11\x6e\x49\xc7\x99\x5a\xd8\xd9\x4d\x1a\xf8\xf2\xd8\x10\xd7\x76\xbf\xa3\x96\x82\xc7\x80\x18\x02\x49\x8a\x98\x4d\x89\xa9\x53\x6a\xb3\x71\x59\xd6\x0c\xd9\x36\x02\xb8\x56\x02\xb8\x36\x02\x58\x7f\xe3\xb2\x58\x07\xe9\x41\x07\x99\xeb\x75\xcb\xb7\x3b\xd2\xc2\xd1\x72\x15\xf8\xc4\xf7\xd6\xf3\x5a\x93\x79\xe7\x0d\x65\xba\x91\xe8\x05\x13\x4e\xdb\xfd\xd8\xf4\xcc\x6c\x2f\xe4\x49\x96\xbb\x88\x79\x93\x3f\x76\x91\xf7\x46\xe3\xcf\x1c\xab\x9f\xba\xf4\x70\x43\x16\xa5\x7f\x63\x99\x29\x77\xf3\x9d\xd9\xee\xdc\xf1\x67\x41\xb9\xb0\x57\xac\xfc\xf2\x60\x53\x67\xae\xe9\x46\xfc\x9d\x40\x78\xad\xe3\xb3\x67\xe5\xb4\x90\xd8\x81\xdc\xef\x58\x69\x85\xbf\xe6\x1f\x8a\x4b\xd5\x84\x03\x55\x0c\x3f\x5e\x30\xa8\x9e\x73\x18\x50\xf2\x4f\x3f\xac\xf6\x8f\x4d\x6c\xda\xdf\xf1\x95\xcb\x96\x01\x5b\x0f\x06\xf9\x65\x9b\x1c\xf4\xcb\x76\xa1\x77\x66\xab\x17\xe5\xcd\x75\x91\xe6\xc2\xf6\xee\x5d\x5f\x23\x73\xae\xd0\xfe\x88\xf4\x5a\x88\x3a\xe7\xec\x58\x1b\xea\xf6\x97\x25\x00\x19\x66\xa7\x1b\x94\xd9\x98\x31\x3a\xeb\x0a\xda\xc8\xd4\xbd\xae\x62\xc4\x45\x0b\x25\x70\x5c\x1f\x80\x81\xe0\x41\x5d\x44\xbf\x0e\xd0\xb8\x7e\x48\x09\x23\x29\x26\x7d\xb7\x38\xd5\x3a\x77\xcf\x5a\xbb\x69\x5f\x8e\x0c\x9a\xe9\x16\x8c\xd1\x50\xaf\x02\xca\xad\x55\xe1\x18\x55\x5a\xb2\x4f\x23\xa0\xdd\x3d\x09\x55\x02\xd6\xf4\xa3\x00\x5b\x3e\xc9\xd1\xcd\x99\xe3\x97\x22\xc5\x94\x22\xe1\x00\xcf\xed\x46\xf8\x73\xc8\x05\xa1\x1b\x84\xe2\x12\x45\x83\x9a\x59\xe3\xcf\x18\x4d\xb1\x7b\x46\x86\x55\x40\xef\x29\x8f\x78\x75\x38\xa6\xc6\xea\x5f\x47\xe4\x77\xfa\x2b\xa5\xb6\xf1\xf4\xf1\xb9\x47\x6e\x15\xdb\x17\x37\x65\x59\xa6\x4a\xc5\x01\x01\x9e\x5e\x64\xbe\x79\x4f\x70\xe3\x1f\x0a\x30\xb9\xc6\xc1\xf2\x5f\xcc\xe1\x8f\x0f\x93\x48\x36\xfe\xd9\xae\x63\xb3\xc0\x4e\x14\x49\x14\x7d\xad\xf8\xed\x0f\xdd\x0b\x32\x53\xea\xdc\x1a\xa7\xb6\xef\x7b\x96\xcc\x96\x14\xf9\x6f\x55\x77\x6d\x49\xeb\x96\x8d\xcb\x9f\xc8\x8d\xed\xfe\x23\x50\x85\x2d\xf3\x34\xb2\xe9\x08\x7d\x55\x25\x28\xee\xb7\x26\x78\xfa\xf0\x50\xb1\x66\xdb\xe9\xf9\x18\xef\x05\x27\x3f\x94\xe7\x0f\x5b\xf9\x79\xdb\x8d\x87\x05\x6c\xf7\x49\x81\x0d\x4e\xb3\x5e\x1d\xe0\x65\xe5\x4b\xc2\x70\x3f\x41\xad\xcf\x35\x22\xe2\x0e\x9e\x60\xa1\x44\x82\xdb\xe7\x08\x53\x42\xb5\x01\xe0\xb1\x66\xe3\xb1\xa6\xe3\x07\x90\x05\x54\x80\xfd\x4b\x6d\xba\x3b\x63\x99\x19\xeb\xfd\xab\x4a\x9d\xbb\x44\xf1\x80\xd8\x0d\x47\x71\xf7\xa1\x7b\xd0\xa4\xdd\x40\x59\xa9\xfd\x51\x5c\xc7\x6f\x4a\xd0\x09\xc1\x17\x2b\x46\x89\x83\xa7\x1c\x8e\x49\xb8\x3a\xe0\x18\x1c\x1d\x53\x5f\x4b\x1d\x4d\x75\xb5\x25\x58\x8f\xe3\xb8\x0e\xcb\xef\xf1\x62\xdb\xe9\x0f\x10\x12\x23\x56\x85\x20\x14\x5d\xe8\x4a\x7c\xa0\x12\x1b\xca\xbe\xe8\xf8\x21\x29\x6a\xe7\x58\xb0\xba\x50\xd2\xa7\x5f\x40\xe7\x30\x24\x4f\x31\x96\xd1\xd8\x19\x2a\x6d\xb9\x83\x09\x53\xd6\xfc\xe5\x1c\x5b\x6c\xd5\x9e\x84\x2a\xac\x91\x6f\x9e\x77\x71\x16\x73\xdb\x8b\xe4\x2e\x74\xed\xf5\x83\x3f\xb1\x65\xd0\x06\x9f\xc3\x4e\x2d\xeb\x89\x42\x18\xaf\x0a\x5e\xe1\x82\x26\x6d\x92\xa4\x8b\xd9\x64\x1b\x48\x60\xfc\x86\x50\x85\xee\x44\x7f\x27\xb2\xc1\x24\x0d\x88\x7c\x03\xab\x30\xc4\x60\x52\x0d\xe5\x96\xfc\xe1\x2f\x20\xf3\xfd\x32\x10\xd7\xf4\xb9\x38\x75\x3a\x4b\x4f\xe4\x76\x21\x4d\xfb\xdf\xb6\x00\xc2\x20\x38\xa1\x6b\x4c\xe9\x66\x7c\xe8\xfe\xb7\x1e\x5f\x27\x44\x67\xb5\x75\x5d\x07\xab\x03\x81\x2b\x3c\x2d\xa3\xe6\xdd\x57\xab\x9b\x09\x56\x5e\x86\x4d\x5c\x11\x69\xef\x99\x5b\xe5\x64\xed\x1d\x5e\x4f\x61\xea\x34\xf7\x57\xb9\x7a\x4b\xe4\xa2\x73\x8d\x25\x67\x05\x37\xca\xc1\xe9\x64\x43\xbb\x79\x02\x52\x1c\x3b\xdb\xcd\x8a\xd2\xdc\xe4\xa2\x45\x61\x04\x59\xe2\x88\x79\xeb\x07\x6a\xb2\x56\x53\xc5\xda\x7d\x10\xfa\xd5\xb3\x68\xd9\x52\x87\x53\x50\x56\x68\xfd\x9b\xda\xf2\xd4\xe5\xd1\x19\x6b\x0c\xd8\xcd\xa4\x75\x53\xa5\x2d\x36\x38\x65\xec\x3c\x00\xe6\x95\x8d\x12\xd4\x4a\x5c\xdb\x64\x01\x6f\xc2\xf3\x33\x1d\xcf\x6a\xbd\xb6\xa7\x35\x1a\xf0\x2e\x64\x9d\xe4\x87\x04\x6d\xb6\x4f\x42\x14\x60\x06\xae\x18\x55\x1a\x3e\x30\xe3\x7c\x75\x7d\xf9\xba\xae\x6b\x31\x7e\xbf\x6b\xb2\xa7\x14\xa6\x72\x40\xd0\x05\x67\x1f\xd5\x1b\x34\x29\x38\x19\xd7\xb5\x31\xc3\xf7\xa3\x33\xbf\xe1\x04\x08\x18\x73\x63\x8a\xd7\xc3\x9a\xfc\xa5\x47\xaa\x6d\x70\x56\x23\x63\x53\xc2\x38\x79\x16\xdc\xf3\xb4\x54\x21\xf4\xf7\x6d\xae\xf9\x65\x20\x13\x7f\x20\xe8\xc9\x36\xb6\xc2\x47\xc0\xd4\xbb\xbc\x85\x6b\x73\x9c\x17\x2b\x77\xd3\x16\x70\x75\xce\x4e\x16\x9a\xee\x00\xe8\xfb\x64\x71\x22\xe5\x77\x97\x6b\x89\x94\x30\x70\xd1\xa9\x69\x8d\xaf\x14\xf8\xe1\x7a\xc0\xf8\x1e\x7d\xe9\x0e\x00\xa9\xad\x05\x6f\xd5\x10\xff\xa0\xa7\xcb\xca\x3c\x0d\x54\x56\x69\x7d\xe8\xef\x04\xb5\xef\x7d\xa9\xa5\x36\xf8\x36\xb0\xa9\xfe\x69\xd1\x34\x94\xb9\x18\x73\xcd\xd1\x72\x4c\x45\x70\x4a\x4f\x04\x7b\x41\x35\x3c\xdc\x09\x94\xdc\x16\xc0\xe6\x55\xd9\x81\xd7\x1f\x0a\x14\xd8\x0b\x9c\x53\x6a\x17\x90\x03\xb4\x54\xbc\x7e\xd4\xba\xe4\xae\xe9\x46\xbb\x93\x75\x24\x1e\xdd\x06\x84\x2c\x00\xa8\xe5\x04\xad\x09\xc4\xfb\xfe\x15\x8b\x68\xbc\x04\x91\x19\x89\xd1\x52\x11\xe7\x39\x7a\xaa\xb3\xb7\x15\xa2\x78\x0a\x14\xa3\x25\x17\x27\xfa\xf3\x0f\x00\xd4\x9b\x04\x46\x16\xb4\x88\xe3\x03\xfd\x99\x13\x89\x63\xf4\x8f\x44\xf1\xa5\xcf\xba\xd1\x4f\x69\x64\x7e\xd3\xfd\x25\x70\x5a\x55\x05\xd8\x78\x98\x6a\x7a\x18\xe0\x8f\x8d\x91\x2c\x70\x93\xa9\x6d\xa3\x11\x89\x46\x13\xed\x9c\x7f\x02\xfb\xb6\x12\x4b\x1a\x85\x3a\xb3\x5b\xe8\xf4\xe2\xca\x31\x7e\x6a\xfd\x64\x48\x7a\xfe\x46\xd2\x2b\x5a\x26\xe1\x8d\x6d\xc8\xf7\x77\x16\x59\x15\xa5\x7e\x8d\xa1\x6e\x17\x21\x75\x22\x99\x88\x38\xfd\xf9\xd1\x05\x5e\x41\x28\x14\x15\x12\xb6\x4f\x59\x5f\x0f\x26\xd6\xb3\x54\xd3\x27\xfa\x2b\x4e\x2c\xc9\x6d\xe4\x57\x9c\x43\x8e\xa2\x36\x26\x2e\x59\xd4\xce\x91\x4e\x0c\xc9\x25\x78\x17\x4c\x01\xb9\x19\x5d\x88\xfa\xb3\xc0\xa2\x94\x44\x4f\x75\xa6\xce\x63\x94\x3a\x60\x6a\x99\x21\x26\x12\xda\x6b\xcf\xd7\xf8\x12\xea\xc5\x9a\x92\xa0\x48\xcd\x6f\xc4\x2e\xf6\x66\x17\xe3\x59\xb9\xd3\xef\x67\x7a\x2b\x1d\x24\x31\xcb\x88\x1f\x68\x93\x34\x80\xfe\x40\x63\x1a\xb7\xf0\x44\x4c\x3e\x47\x79\xb0\xb2\x3b\x77\x61\xf3\x15\x50\x02\xe9\x14\x1c\x7a\xbb\xd1\xa5\x45\x67\x96\x5d\xbd\x40\x65\xb8\x58\x5b\x2a\x4d\x71\x29\x0f\x23\x82\x24\xb2\xd9\xb3\x69\x21\x1f\x8f\x9c\x53\xef\x1f\xa0\xc7\xbc\x79\x97\x99\x59\x3a\x22\x32\xdd\xd1\x9f\x26\xaa\xc6\xff\x47\xd2\x59\x2b\x49\xaf\x43\x41\xf8\x81\x1c\xc8\x0c\xe1\x18\x66\xcc\xcc\x99\x99\x99\xfd\xf4\xb7\xf6\xbf\xd1\xd6\x26\x2e\x59\x6a\x9d\xfe\x5a\x75\xc6\x1a\x6c\x09\xce\x0b\x53\x3c\xd1\x91\x3b\x47\xa3\xbe\x8c\x57\x8e\x60\xfc\x2c\xa9\xb9\xbc\x53\x4b\xf9\x5a\x8e\xf9\x60\x52\x06\x5e\x03\x2b\x0f\x4c\x65\x0f\x10\x51\x3b\xfa\x68\xea\xc5\x1a\xe3\x42\xee\x2b\x8e\x27\xfa\xef\xcd\xb7\xd2\xf8\x11\xc4\x43\x32\x94\xaa\xac\x2b\x85\x0c\xd7\x64\xc9\x53\x4d\xf5\xb8\xf0\xf8\x7d\xec\xa5\x08\x0a\xa2\x4d\x24\x18\x07\xde\xd5\x7a\x45\x61\x8f\x6b\xe0\x0c\x2a\x8d\xd1\xdd\x31\x70\x88\xdf\x2f\x4d\x53\x14\x75\x11\x19\x94\x17\xe6\x91\x29\x13\x23\x4c\x50\xad\x21\x07\x83\xa4\xfa\x00\xe2\x2d\xd0\xc5\xdc\xf7\xfd\x00\xd9\x53\x85\x75\x9c\x07\xf2\x7f\x13\xf5\x83\x57\xd1\x5d\x90\xcf\x3e\x84\xed\xa5\x8e\xf7\x28\x23\xfa\x18\x1f\xa1\xc8\xdb\xb5\x80\x15\xe6\x77\x13\xdc\xf9\xb5\x77\x7d\xd2\x45\x73\x78\xa7\x3e\xa3\x1a\x4e\xf1\xc8\xdc\xa0\xca\x9d\x1e\x91\xca\x8f\x04\x9e\xe4\x47\xe2\xce\xf4\x50\xe9\xed\x0d\x4b\xf9\x76\x9c\xfd\xc3\x21\x4a\x9f\xf4\x49\x22\xda\x42\xf5\xcb\x2d\xa6\x0d\x19\xe8\x7a\x8e\xe6\x9e\xe7\x45\xcd\x48\xeb\xe4\xce\x34\x04\xf2\xc1\x78\x29\x9c\x2c\x56\xe7\xaf\x34\xfc\x81\x01\xb5\x3c\x5b\xc4\x73\xc9\xc4\x8b\xf0\xd3\x39\xce\xec\x73\x0b\x12\x2c\xe8\x3b\x29\x1c\xbc\xab\x36\x6d\x9b\x03\x4c\xe6\xc6\xee\x0f\x3b\x79\x46\x73\xa5\xa9\xf5\xc8\x40\x04\xdf\x47\x85\x21\xbe\x4c\x11\xfa\xa1\x0f\xe7\x62\xcd\x80\x9d\x18\x37\xe8\xd3\x78\xa5\xbb\x7f\x95\x59\x67\x2b\x4d\x4e\x82\x7e\x4e\x82\x9e\x20\x10\x80\xcd\xce\xec\xbd\x1a\x95\xd8\xb8\xc6\x9e\xe5\x81\xfa\x26\x11\x07\xff\xee\xf9\x42\xfc\x38\x14\x71\x7a\x88\xe0\xe2\xe5\x63\x5e\xd9\xc3\x75\xfb\xd9\x48\x21\xc1\xb8\xc0\xb3\x4c\x0c\x9d\xbf\xd4\x9d\x3b\xac\x30\x38\x82\x84\xc9\x55\x80\xdc\x4a\xad\xab\x8a\x50\x10\xd5\xa9\x75\xb7\xcb\x0c\xf0\x63\x2c\x61\x64\xbf\x63\x9d\x7f\x98\xf9\x76\x1c\xfb\xd6\x8a\xf8\xc7\x66\x81\x22\x39\x6e\x74\xa0\x66\xb0\xb8\xdf\x89\x94\x69\xe1\xb6\x04\xc9\x57\x2f\x66\xf5\xc3\xf0\x86\x99\x78\x7b\xb3\x23\x34\xce\x81\x3f\xc5\x94\x8f\x5b\xb4\xab\x95\x00\x3d\xd7\x77\x46\x29\x69\xa5\xbf\x7f\x7e\xf4\xc7\x51\xbd\x49\x9c\x55\x4e\x43\x34\x7d\xf9\x19\x49\x5a\xc4\x4a\xed\x10\xf6\x96\x68\xeb\xaf\xfa\x08\x1e\x27\x51\x5d\xfc\xc4\x88\x52\xa5\xa4\x14\x2a\x9d\x6d\x97\x28\x6a\x7d\xdf\xa6\xed\x5f\xfb\x37\x34\x0e\x9a\x7e\x54\x58\xb7\x73\xb1\x61\x16\xa4\xd8\xc1\xd0\xc6\xdb\xac\x8d\x2e\x9a\x9a\x0b\x19\xcf\xc5\xb6\x7a\x4a\x9e\x67\x05\x04\xf7\x3d\x82\x2f\x4f\x3f\xcc\x7f\x93\x87\x00\x70\xbf\x77\xa8\xf6\x17\xf4\xd8\x52\xd3\x2c\xf2\xf0\xc6\x3b\xf1\x52\x14\x66\x54\x95\xac\x9d\x9c\x8b\x33\x57\x56\x02\x53\xfc\x9a\x78\x83\x5f\x71\x5e\x96\x41\xf8\x10\xda\xd8\x50\x87\x6b\x4f\x70\x01\x0a\xb0\xe5\xd2\x40\x5d\xdb\xb6\x9d\x25\x94\xc6\x18\x2a\xc2\x1a\xb2\x87\x3e\x53\x2c\xbd\x73\x84\xbe\xf3\xea\x8f\x61\xa2\x87\x29\xb6\x2b\xb1\x63\x98\xed\xbf\xc4\x56\x77\xdb\x66\x31\x0f\xf8\xb6\x37\x4a\x02\xc6\x4d\xee\xfd\x4c\x80\x0b\x18\xf4\xdd\x1f\x42\xa8\xde\x01\x69\x90\xd4\xc4\x5a\x00\x4a\x8c\x62\x86\xd0\xcc\x09\x5c\x16\x9e\x34\xcb\xf6\x63\xdf\x1e\x12\x0a\xcc\x85\x0a\xfa\xdd\xa5\x21\x9c\xa2\xc8\x3b\x3d\x1e\x22\x57\xcf\xcd\x92\xdc\xa3\xf2\xbe\x37\x74\xdb\x01\x86\x61\xbd\x18\x05\xdf\xfa\x6f\x8f\x91\xcb\xba\xe4\x0c\x01\x15\x8c\xf9\x6b\x3d\x63\xe3\x64\xf1\xe8\x58\x48\x2c\xd4\x36\xa7\x6a\xaf\x54\x7d\x08\xc9\x06\xbf\x8d\x50\x93\x0e\xb0\x13\x38\x3e\x73\xaf\x55\xad\x4a\x9f\x80\x95\x04\x3f\x59\x0f\xa4\xa1\xb0\x9f\x10\x4e\x2b\xb6\x70\x32\x75\xcc\x5b\x56\xe7\x5d\xc3\x26\x41\x7d\x23\x6e\xbd\x42\xaa\x69\xb1\x9c\x90\x28\xb9\xfa\x43\x56\xf9\x54\xab\x95\xa3\xb0\xeb\x1c\x5f\xf9\xdb\xa7\x70\xc6\x0b\x5a\xc6\xfc\xb4\x9f\xb5\x05\xea\x5c\xa1\xa5\x3b\x0d\x2e\xbd\xee\xa8\x4a\xb5\x67\x08\x18\x12\x18\x4c\xc1\x98\xc7\x8e\x7f\x04\x1a\xd2\xf2\xf0\x00\xca\xff\xf3\xf7\xfc\x41\xa3\xff\x70\x38\x41\x9f\xfa\x79\x00\x0c\x4d\x48\x88\xd2\x37\x72\x93\xf7\xc4\x14\xae\xcc\x10\xbf\x2d\x3d\x5e\x10\x96\xea\x78\xc6\x4d\xc1\x43\xa5\xe1\x6b\xaa\xcc\x4a\x90\x44\x6e\x60\x45\xfc\x16\xc9\xba\x52\xfd\x80\x9d\x37\x71\x84\x3d\xa1\x05\x60\x87\x49\x24\x1d\xb1\xf5\xd1\xa1\x3c\x3b\xef\xce\xa8\xa9\x7c\xc3\x56\xb9\x85\x70\x92\xa2\x28\x6c\x9c\xe7\xfd\xbc\xab\x5b\xdb\x7f\x9e\xef\xc0\x61\x16\x74\x5f\xb6\x2a\xfe\xef\x17\xb1\x27\x38\xd1\x83\x74\x43\x35\x43\xe4\xa7\x01\xb9\x1c\x34\xd5\xa9\x02\xd9\xe7\xf9\x82\x5b\x82\x47\xf2\xd4\x2b\x4d\x1e\x82\x6e\xe8\x04\x7b\x7b\x32\x68\xb9\x24\xef\x8b\xdf\x76\x26\x9e\xa0\x6d\x88\x14\xa2\x73\xcd\xd0\xf5\x87\xa4\xcb\xf9\x1b\x2c\x74\x80\xdf\x44\xcc\x40\x1a\xec\xba\x10\x55\xee\x0f\xad\x9e\xcc\x96\x19\x22\x5b\xe1\x9b\x23\x9f\x18\xf5\x34\xee\x33\x87\xab\xf9\xa5\xc1\x5d\x86\xd8\x4a\x60\x37\x43\x77\xf2\xb7\xd7\xaf\xc8\xe6\x3e\x76\xb0\xf6\xcd\xbf\x3e\xd7\x94\x5a\x09\x62\x06\x19\xc8\x4d\x4d\x51\x3a\x8f\x9b\x42\x88\x3e\x32\xa0\x82\xfe\xca\xbc\xe4\x10\xae\xcc\x42\x40\xba\xa3\x44\x4e\x1f\x62\x1b\xbe\x33\xdc\x9c\x3d\x33\x39\xf3\xbb\x77\x8a\xea\x6b\x8d\xa0\x03\x23\xc5\x77\x5a\x92\xc2\xd9\x0e\x3b\xeb\xb6\xed\x49\x0a\x1d\xb6\x9b\x1b\x5d\xfa\xf4\x6d\x9d\x58\x50\x10\xe3\x18\xec\xe6\x4e\x38\xdc\x97\x2b\x66\xfe\x86\xa6\x0c\xf9\x14\xa1\x51\x5c\xef\x17\x80\xf3\x5d\x89\x0d\xb8\x64\xe8\x44\x16\x3d\x86\xf0\xae\x3e\x27\xc5\x10\xc9\x06\x0e\xcc\x6a\xbe\x9c\x01\xe1\x12\x9c\x41\x85\x09\xf1\xae\xbd\x61\xcc\xe9\xc2\xce\x6f\x15\x43\x33\x44\xde\x17\xa3\x28\xf3\x04\xd0\x9e\x31\x26\x38\xe3\x55\xda\xc7\xbb\x24\xe5\x8e\xf2\x79\xe5\x6a\x39\x99\xd8\x50\x50\x9c\x79\x8e\x6d\x94\x10\x14\x2f\xb2\xa7\xc1\xce\x10\x34\x4d\xa0\x18\x46\xe0\x77\x06\x45\xfb\xce\xc0\x89\x75\xff\xca\x9c\x21\x48\x88\xa4\xcf\xf3\x7d\x61\x18\xfb\xd5\x9e\x51\x9b\x25\xa8\x5b\x0b\x36\xf9\xea\xa2\x99\xda\xac\xc6\x77\x12\x78\x96\xb6\xf3\x60\x3c\x73\x20\x76\x32\xba\x07\xdf\xe6\x4e\x94\x81\x05\xa3\xf1\xa3\x08\x9b\x3e\x00\xf4\x82\xf2\xba\x5f\x49\xfb\xd2\x85\x69\xb6\xc1\x80\x82\x1d\x55\x71\x66\x0d\xf6\x1f\x8f\x20\x69\xc0\x1c\xeb\x4a\xbd\x0f\xd3\xef\x67\xe8\xe6\x27\xb6\xa1\x18\xf6\x7b\x5e\xf4\x15\xc4\x7a\x21\x9f\xed\x54\x6a\xe8\x66\x24\x50\xfb\x00\x5e\xbf\x00\x00\x9c\x4a\xd7\x93\x61\x30\x06\xcf\x32\x55\x68\x7c\x35\xdd\x21\xea\xa0\xa6\xf2\x28\x21\xfe\x53\x59\x7a\x6c\xbc\x53\x76\x53\x18\xf5\xb6\xbb\xd8\xde\x37\x81\x5b\xf4\x39\x8f\x27\xc5\x48\xa4\xd1\x56\xe5\x71\x5c\xa0\x5c\x3e\xc2\xef\x7b\x5d\xbf\x3f\x6f\xa2\x0b\xda\x04\x3d\x92\x7a\x8f\x49\xde\x63\x77\x9f\x75\x84\xec\xb1\xdc\x30\xe5\xd8\x39\x8a\x46\xaa\x9a\x74\x57\x8e\xc4\x0d\x7c\xe7\x48\x51\xe3\x19\xb7\x24\xd5\x8d\x4d\xda\xf2\x8a\xc6\x3f\x6d\x1d\xec\xb3\x9c\xc5\x00\xf3\xac\x41\x0f\x70\x53\x65\x4a\x59\x55\x29\x0c\x7a\x31\xa6\x28\x18\x04\xd1\x19\x86\x74\x60\xc2\xe0\x8f\xd8\x81\xeb\x0b\xe4\xc7\x3d\x32\xcc\x9b\x62\x3b\x69\x78\x9a\x3b\x6f\x37\x4c\x81\x33\x3c\x4e\x90\x1e\xa8\x63\xf6\x63\xf2\xec\x6b\x88\x75\x78\x94\x8d\x6e\x4c\xd1\x97\xa4\x99\xbd\x44\x47\x42\xae\x8d\x6e\x9f\x42\xd2\x77\x78\x0a\x53\xe4\x20\x6b\x2b\x40\x11\x9c\xf4\xd7\x83\x82\xb6\xca\xb0\x75\xee\x1f\x9c\x01\x90\xa2\x95\xe5\x49\x8b\xc3\x0b\x88\xd8\xa8\xc1\xf1\xc7\xc1\x73\x5e\x16\xcd\xf0\x4b\x90\x10\xa2\xe9\x1f\xc0\x30\x6c\x14\x3d\x0b\xca\x32\x3b\x37\x35\xf2\x59\xd5\x26\x37\x12\x3d\xd7\x82\xc2\x38\x7e\xa1\xde\x3f\x48\xde\x9c\xb3\x3d\x14\xee\x7b\x86\xe7\x09\x15\x63\x6e\x82\x17\xe2\xa9\x98\x81\x86\x21\x90\xb5\x9c\x82\xc2\x07\x62\x18\x86\x4a\xe9\x1f\x0d\x65\x34\x60\x90\x3b\x4d\x9e\x6d\xb8\x10\x2e\x33\x44\xb1\x39\xc7\xf3\x78\x8b\xb4\x2b\x04\x3b\xf4\x99\x8c\x67\x18\x04\x30\x46\x6d\x3b\x05\xe2\x2b\xac\x4d\xd2\x26\x45\x6d\x6f\x47\x60\xc3\x79\x62\x2b\x0c\x4a\xa8\xad\xf7\x2f\xf1\x8a\xd6\xf5\xa3\xa8\x13\x8b\x41\xfd\xe3\x5d\x1e\x27\x40\x9d\xa6\x14\x4e\x50\xc4\xbc\x86\x58\x7b\x40\x9e\xd5\xfd\xe5\x67\x92\x8d\xda\x36\x7f\xe7\x2f\x8a\xce\x7b\xc7\x5d\x2c\x27\x72\x0c\xc6\x39\x38\xcf\x02\x59\x92\xc6\x3b\xe8\x45\x57\xea\xed\xfb\xbd\x1a\xe4\x7e\xa1\x9b\x9e\x71\xf2\xfe\xa0\xa3\x40\xfd\xbe\xd6\x0c\x9d\x00\x94\x22\xf6\x5c\x89\x49\x72\x33\x41\xd1\x6f\xd2\xbb\xf3\xcb\xe5\xdd\xc2\xdd\x53\x21\x16\x07\xc4\xb0\x59\x7b\x1b\xc3\x1b\x1f\xa7\x19\x0e\xc3\x0d\xe1\x14\x46\xca\x32\x9d\x53\x54\x79\xd2\x7f\xe4\x02\xe5\x58\x18\x8e\x1d\x52\x1f\xbb\xbb\x3d\x1b\x10\x2b\x52\xf4\xb1\x07\x2e\x75\x91\x66\x1d\x9a\x66\xb0\x87\x7f\xf0\x80\x30\xe7\xcf\xc9\xdf\x14\x4d\x38\x06\x38\xfe\x9e\xf5\x57\x33\xc7\xd7\x60\xdf\x82\xc2\x1c\x08\xa2\xcd\x23\x1a\xa1\xd3\x3f\x29\xc2\xf1\xfd\xe0\x3b\x70\x39\x67\xc3\xa5\xf8\xb7\x67\x9b\xb4\x65\xd1\x38\xcf\xa4\x01\x57\x0c\x62\xfe\xe3\xec\x65\x58\xf6\x2f\xf6\x63\x5d\x8f\xf9\xe3\xe8\xed\xe2\x5d\xf5\xf3\x45\x18\x9a\x8e\x0d\xaa\x1c\x5f\xcc\xc5\xc8\x44\x9d\x81\xc2\x65\xf2\x1d\x6a\x61\x23\x5b\xe7\xf0\x86\x60\xef\xfd\x82\x3a\xa8\xed\xd6\xac\xc8\xe6\x3f\x51\x09\x32\xef\x14\x92\xb0\x18\x51\x07\x14\x87\x15\x9a\x33\x63\x1a\xeb\x09\xf7\x2f\x00\x50\xb4\xf1\x21\xca\x4e\xd2\x99\x1e\x68\x69\x98\x27\x80\x99\xb2\x2c\x01\x4f\x31\x80\xce\xda\xfa\xba\x6b\xb1\x92\x04\xa8\x65\x08\x82\x84\x10\x02\x41\x5e\xea\xd9\x76\xe0\x06\x4d\x72\xef\x9b\x9f\xe2\xe4\xb9\xce\x1d\x9c\x78\x7e\x1a\x78\x92\x1b\xcd\x68\x39\xb7\x9c\xfa\xb7\x36\xfa\x57\x4d\x67\xff\x71\x37\x73\x7a\x99\xb1\x66\x28\xd9\xe8\x26\x62\xdf\x98\xc6\x3e\xdb\x51\xc8\x18\x20\x2e\x48\x5a\x16\xb9\xa9\x9f\x23\xb6\x2c\x5b\x22\xb0\xdd\x33\xdf\xac\xf0\xc5\x29\x5d\x14\x4d\x51\x80\xcd\x28\xdd\x51\xaa\x2c\xb3\xfc\xdc\x5b\x40\x3d\x3e\xe2\x8d\x70\x73\x97\x37\x26\x66\x3f\x12\x02\xe5\x56\xec\xfb\x85\x61\x1c\x3a\x05\x65\x4e\x6c\x81\x0a\xa0\xf9\xd8\xcf\xc2\x48\xe7\x99\x38\x92\xa1\xe5\xc3\x9a\x54\x17\x5b\xf7\xce\xf2\xa4\x24\x06\x22\xf1\x0c\xca\xc7\xf1\xc2\x9e\x4d\x05\x80\x2a\xc0\x4d\x70\x1f\xbc\x1d\xcf\x7e\x8e\x33\x9b\xf2\xec\xe8\x0c\x15\xff\xf8\xa0\x0f\xd1\xa0\xd1\xc8\x7c\xf2\x71\x65\xaa\x8f\x37\x28\x85\xa5\x80\xa8\xeb\xbe\x73\xf7\x4d\xfe\xcf\x4b\x26\x66\x62\x7c\xc8\x63\x64\x54\x97\x41\xcb\xde\x92\xc4\xea\x04\x0e\x4a\xef\x4b\x43\x09\x00\x24\xc0\x3c\x23\xc7\xa4\x1f\xc4\x40\x49\x0c\x1b\x5b\x2c\x59\x81\xde\xe2\xf7\x71\x9e\x0f\x69\x8f\x6e\x0f\x51\x7a\xdb\x00\xec\x91\xac\x68\x44\xdf\x7d\x3f\x67\x68\x20\xdb\xcc\x10\xf9\x06\x00\x92\x33\x34\x49\x60\x01\x1e\xb2\x78\x92\x85\x4e\x5d\x9a\xc3\x09\x5e\xc7\xb9\xe1\x5c\x8c\x7a\xe8\x38\x52\xb4\x10\x41\x58\x82\x4a\x4e\x98\x44\xed\x6b\x68\x64\x45\x71\x99\x26\xd7\x56\x61\xf8\xb1\x16\x7b\x6a\x63\x0c\xdf\xf8\x06\x40\x9f\x3b\xb1\x1c\x4e\x22\x18\xc8\xad\x51\x23\x3f\xa2\xdc\x98\x9d\x4f\x7d\x9f\x26\xbd\xcc\xc1\x9a\xf6\x35\x0c\x0a\xa8\x2c\x2f\x21\xac\x9b\x54\x7a\x0e\x6f\xb5\x95\x37\x1f\x06\xd8\xa0\x74\x00\xa6\x1b\xd0\xb4\x22\x52\xe2\xdb\xa2\x88\x5f\xce\xd0\xb3\x2c\x0b\xb9\x03\xc8\x84\xcb\xd3\x1c\xc9\x1f\x6d\xe2\x84\xb6\xfc\x60\x17\x21\x19\xa3\x31\xcb\x93\xe7\xef\x9b\x61\x78\x15\x0a\x22\x8c\xef\x1e\x59\xa4\x4a\x2c\x2d\xcf\xb0\x2c\x3d\x04\xf1\xb1\x92\x01\xa7\x4f\x9d\x37\xb9\x87\x37\xe8\x3c\x94\xca\xcf\x95\x3c\x56\x8b\xac\xbf\x3b\x66\xcf\xbf\xd1\x1d\x5a\x23\x86\x8d\x06\x8a\xf2\xe9\x27\xfd\x98\x97\x40\x83\x42\x6f\x23\x34\xe2\x8a\x51\x22\x81\x07\x01\xe6\x7e\x77\x86\x79\x48\x9c\xda\xd1\xfb\xa7\x6f\xb4\x2b\xe2\x60\x00\x26\xc0\x66\xa6\xdf\x0f\x33\x65\x81\x49\xaa\x76\xcd\x80\xbf\x32\x40\x89\xcf\xc7\x28\x9d\x17\x7a\xf1\xcc\x09\x7c\x1e\x10\xf5\x7e\xc6\xfd\x21\xcf\xa2\xd9\x18\x06\xc5\x30\x0c\x61\xde\x2f\x02\x68\x1c\xc6\x3c\x6d\xd2\x26\x15\x98\x26\x00\x74\xdb\xef\x67\x01\x51\x71\x0c\x43\xa5\x39\xa3\x59\x4c\xde\x7b\xdb\x9c\x75\xa1\x2c\xb2\xd1\x57\x7d\xb4\x30\x15\x9c\x27\x94\x23\xf2\x7a\xb6\xa7\x8b\x94\x20\x4d\xea\x4c\xd9\xf7\x28\xf8\x8a\x8c\xa4\xd6\x57\xf1\x6d\x5e\x0e\x54\x53\x7f\xfb\x14\xe6\x36\xa2\x06\x52\xbb\x79\x5c\x83\x41\xb4\xa6\x7f\xb9\x3b\x81\x68\x26\x4d\x03\x53\x6a\x8e\xb1\x2c\x4b\x3b\x67\x1b\xe1\xab\x3a\xf4\xe6\xb6\x2f\x80\xf0\x1b\xe4\x79\xc9\xa9\xae\xda\x5a\x1d\x64\xe0\x3e\xc2\x10\x44\x4e\x52\x14\xda\xaf\xc9\x7d\x2a\xf8\xab\xd1\x26\x43\x15\xd8\x49\x52\x33\x01\xa0\xf1\x3d\xc0\x70\x0f\xed\x00\xcb\x54\x7e\x40\x10\xc8\xe3\x74\xc4\xc0\x71\xb8\x24\x12\xf2\xf9\x8e\xb1\x36\x1f\xc4\x34\xc4\x0a\xdf\xe8\x44\xf1\xbc\x6a\x9e\x03\x94\x27\x8a\x44\x75\x82\x36\xf7\xfd\x37\xf5\xaa\xff\xc0\x8e\xe6\xec\x75\x66\xd6\xb3\xed\x47\xd6\x4c\x40\xca\x1c\x6d\x47\x48\xb1\xd7\xb4\x7e\x1d\x48\x02\xe7\xcf\x41\xc9\x63\x5d\x46\xe6\x45\x70\x86\x86\x18\xdb\x3a\xc4\xe9\x41\xf3\x6c\x0b\xfc\x6f\x3a\xbf\x10\xc0\x78\xf2\x78\x1d\x49\xc5\x4b\x95\xac\xfb\x1f\xa5\x5a\xc7\x19\xac\xde\x41\xd9\xa2\x2e\xda\x57\x6e\x44\x96\x0a\xc3\x79\x9a\x40\x7b\x73\x96\x63\x88\xb5\x18\xf3\xfc\x31\x5d\xcc\x53\x27\x5d\x2e\x38\x3c\x5e\xa7\x9f\xd5\x9b\xbd\x0b\x86\x15\x7c\xb9\x5d\xbe\x5a\x17\x9d\xee\xc5\x71\x9a\x9a\xbe\xdc\x04\x31\xb6\xe2\x9b\x7d\x9f\x9f\x27\x58\x36\x0b\x02\x47\xa2\x50\xb7\xdd\x10\x39\xaa\x04\xf9\xc4\x66\x50\x71\x6c\x0c\x4c\x06\xc8\xaa\xca\x20\xf4\x85\x63\x29\x1d\x76\xcb\xf8\x97\x7f\x98\xa4\x7a\xcc\xed\xca\xec\x17\x23\x8e\xfd\xc4\x54\xf6\x82\xd3\x05\x19\xe2\x24\x41\x29\x9d\x62\x20\x14\xc3\xda\xc4\xf3\x83\x20\x1e\x59\xa6\xa4\x73\xf1\xf9\xdd\xd1\x11\x9c\x94\x9d\x35\xb7\x2b\x1c\x65\xca\x65\x4c\xa8\x7e\xc9\xe7\xcf\x5f\x5a\x22\xda\x49\x0a\x79\x29\x82\xca\xf1\xee\x8d\xc3\x74\xb3\xa3\xf0\x27\x0f\x77\x3e\x04\x65\xb2\x2c\x24\x63\x5c\x03\x65\x0e\xfc\xe8\xa6\x6b\xa1\x16\xf7\xfd\x69\xdb\x40\x17\x79\x06\xa5\xc4\x1d\x2b\xcf\x92\x00\x20\xcf\x84\x85\x10\xde\xea\xe1\x24\xe5\x4e\x8e\x62\xf9\xd5\xd1\xd1\xb4\xf4\x1d\xa7\x07\x54\x9c\x56\xcb\x7d\x74\xda\xf0\x10\xc0\x02\x9a\x87\x3e\x6f\x35\x8c\x44\x18\xe2\x70\xff\xe8\x1d\xaa\xc1\xf9\x63\xc2\x96\x23\xed\xa3\xfa\x6d\xe6\xfd\x38\x44\xb1\xad\x27\x4c\x2e\xa1\x99\x97\x9f\xee\x40\xea\x60\x0a\x2e\xca\x42\x2f\xfc\x61\x27\x04\xbf\xfd\x50\x48\xc8\x69\xc6\xe7\xb9\x6f\xbe\xc6\x47\x65\xf9\xfb\xa9\xb8\xd2\x11\xfc\x4f\xe2\x4b\x15\xc7\x49\x91\x54\x03\x6a\x40\x15\x3c\x0c\x26\x00\xf0\xf9\x48\x65\x5f\xb0\x96\x07\xdd\xcf\xe4\x8f\xdb\x70\xc2\xf8\x95\x32\x84\x2a\x1d\xfd\xa0\x19\x9d\x8f\x9c\xa6\x5c\x94\xde\x40\x38\x14\xa5\xe9\xf8\x22\xbd\x5f\xd2\xe5\xc8\xfc\xce\x8d\x0f\x60\xb2\xfc\xc7\x19\xcd\x0b\x68\xfa\x7b\x95\xe1\x3c\x4c\x57\x11\x90\x04\x99\x63\x26\x30\x61\x98\x40\xf3\x31\x6e\x57\xfc\x58\x37\x4a\x7c\xd1\x21\x07\x0c\x21\xe4\x18\x92\xbb\x9e\xf0\xb1\x6f\x48\xfe\xe0\x62\x3c\x18\x22\x3f\x70\xa8\xb4\x12\x1a\x73\xae\x58\xf9\xca\xf1\xbf\x33\x0c\x71\x43\xad\x84\x97\xac\x9e\x72\xfb\x9f\x77\x7c\x4e\x1f\x65\xce\x54\x47\xb1\xac\xe6\xba\xba\x4c\xbe\x16\xa9\xfa\x61\x78\x62\xc5\x98\x33\xdf\x91\xca\xa2\x5c\xb4\x61\xd3\x3c\x35\xea\x26\x69\x70\x09\x9e\xe6\x58\xd2\xb4\x4e\x89\x19\xd3\x62\xd3\xdd\xc8\x73\x96\x35\xa1\xf5\x0b\xaf\x39\x9f\x2b\xe9\xa0\xc1\xc7\xa9\x51\x4c\x99\xa1\xfa\x38\x90\x20\x6e\x8c\x4b\xd1\xb8\xa0\x9b\x00\x62\x4e\x0f\x0b\x38\xec\xa6\xbb\x93\x7c\x8c\x38\xf7\x51\xfb\x2d\x89\xeb\xdc\x9f\xaa\xe1\x60\x65\xeb\xf3\xab\x51\x2c\x6d\xa2\xac\x08\x8b\x65\x56\xa0\xa7\x02\x6e\xd2\xe0\xbb\x51\x88\x53\x1b\xf2\xb2\x60\x5e\xb7\x23\x12\x08\x05\xf3\x0e\xf9\xa5\x37\x1f\xa4\x9e\x03\xaf\x16\x8b\x63\x08\x55\x37\x2f\xcb\x8a\xf3\x4e\x3a\xd8\x37\x6b\x0a\xfd\xa9\x94\xc9\xd3\xa3\x69\x70\x36\x3c\xc3\x40\x25\x6d\x07\xa6\x11\x51\x14\x28\xb5\xf3\xdf\xfb\x5c\xd1\x1c\x13\x0e\x0b\x95\x82\x89\x8d\x27\xe8\xe7\xfd\xc0\xd6\x7d\xe4\xa1\x98\xc4\x8f\xb7\x3c\x01\x40\x03\xf4\x4f\xe2\x3b\x10\xc8\x33\x4e\x9c\x8f\x95\x88\x09\x25\xf8\xcd\xbd\x7e\x9f\x7b\x5a\x8a\xd1\xa6\x2d\xfd\xed\x98\x2b\xa6\x89\x09\xa1\xd1\xf4\x7e\xd9\x26\x3c\xfa\x60\x0b\xab\x7b\x9e\x5c\x41\x74\x19\x94\xd2\xa8\x34\x1d\x41\xcb\xe4\xc5\xfd\xe5\x85\xc2\xa0\xe2\xf3\x84\x82\x61\x20\x7d\xcb\x7a\xcc\x2f\x06\x34\x3a\xec\x9e\xdf\x86\x4e\x65\x70\x0c\x8c\x2d\xd2\xea\x0d\x7f\xb4\xdb\xd0\x67\x97\xd2\xe2\x3e\xc0\x29\x5e\xe6\xa6\x10\x4e\x29\x66\x47\xd0\x87\xc4\x52\xf3\x84\xe1\x3d\x3d\xee\xb2\xa3\x37\x8c\x80\xca\xc2\xcb\x21\xb4\xdb\x6c\x96\x13\x5c\x32\xdf\xb1\x1a\x86\x60\xff\xb6\x3a\xfa\xfd\x51\xb2\xe6\x8b\xad\x2d\xaf\x1e\x2e\x14\xde\x6c\x35\x84\x49\x55\x3a\x7c\x26\x3b\x06\x64\xd3\x14\x49\xef\x6b\x76\x01\x05\x55\xcc\xfd\xf8\xa1\x59\x86\xeb\x42\x1e\x31\x01\x08\xcc\x85\xc1\x99\x1e\xf7\x64\x89\xaf\x1a\x24\x7b\x20\x45\x40\x37\xce\x60\x5d\xcf\x19\xaa\xb7\x67\x25\x79\x45\xe2\x89\x64\x3f\x5e\x68\x64\xb0\x91\x26\x30\xa6\x7a\x96\x96\x3e\xf4\x3d\x0d\x30\x0c\x0b\xcf\x73\x04\x14\x7e\xc3\x9f\xaa\x52\x87\x22\xe1\x61\xc6\x2c\x01\x7d\xbf\x8c\xb0\xca\x61\x5c\x2d\x46\xd4\xfc\x00\x35\x07\x4a\x91\x8f\x11\x39\xc6\xc6\xe8\x0a\x63\xf2\xce\xa7\x4e\x4c\xfb\xf8\xde\xf1\x5d\xfd\x65\x73\xbe\x8e\x5b\x3b\x50\xe1\x33\x3c\x50\x08\xd0\x2f\x56\x00\x1f\x87\x6f\x61\x32\x45\x1c\x8c\xbd\x44\x31\x38\xb9\x8d\x25\x6f\x3a\xf3\x78\x21\x52\xb7\xd9\x9a\xc1\x6d\x73\x2f\x6f\x2d\x17\xbd\xc6\xf5\x31\x85\x9a\x12\xf9\xcf\x6f\x1c\xff\x32\x6f\x66\x80\x9c\x79\x2d\x4c\x27\x73\xda\x65\xd2\x3d\x4a\x17\xb1\x08\x42\x3a\x40\xcf\x48\xb5\x4a\x68\x78\x94\xce\x16\x26\x8d\x15\x67\xc1\xa0\x29\xea\xf1\x64\x4d\x13\xc6\x79\xee\x12\x75\x77\x86\xd6\x3f\x9a\xbc\x78\xc5\xb6\xd0\xa4\xd0\xf8\x19\x50\x5a\x3e\x59\x51\x34\x27\xf1\x90\x30\xbc\xdf\x86\x79\x66\x67\x1d\xb8\xed\x29\x96\xe6\xb8\xef\xe7\x19\x60\x39\xba\x06\x51\xd6\x7e\x0d\x5e\x79\x70\x63\x2e\x82\x7e\x4e\x44\x11\x7b\xa7\x26\xcb\xca\x14\x05\xe6\xb1\x7a\x46\x81\xa5\x27\x38\x7f\x26\xd5\xae\xab\x9f\x17\x71\x77\x83\x12\xca\xfb\x71\x67\x10\xfa\x21\xc9\xfc\x78\xf9\x10\x46\xac\xb0\x39\x7e\x12\x61\x1a\x4c\x81\xee\x6b\x02\x87\x59\x24\x4a\x64\x06\x47\xee\x0e\x6f\xa3\x65\xb3\xea\xb7\x01\xec\x2d\x7f\xaa\x96\xce\x5f\x8c\xc2\xb0\xf3\x74\xc2\x13\xb7\x39\xc9\xb3\x79\x9b\x85\xce\xf0\xc4\x72\x03\xc2\xe9\xa2\x94\xbb\x7c\x25\xe8\x8a\xf9\x49\x46\xc8\x06\xc5\x88\x2d\x2b\x22\x9b\xee\x32\x1c\xba\x12\xba\x24\xc5\xf3\x9f\xdf\x20\x10\xa0\x1f\xcb\xf3\x84\x45\x04\xd0\x15\x6c\x3f\x9a\x8f\x40\xf4\x66\xea\x17\x59\x85\x5b\x80\x9b\x34\x34\x3d\x45\x01\x74\x81\x03\x9c\x39\x59\xb8\xb5\xd0\x41\x5a\xf6\x5e\xc5\xbd\xac\x2e\x1b\x9e\xb8\xf6\x93\x43\xc6\x8f\x6b\xfc\x30\xfc\x21\x4e\xf8\x90\x6a\x88\x86\xd0\x31\x2a\x22\x83\x60\x6c\x3f\xc6\x33\x4d\x37\x7d\x44\x48\x0b\x06\xcd\xfb\x04\x2f\x3a\x97\xa9\xb1\x3c\xf4\x18\xba\x6c\xce\x12\x9c\x9e\x47\xf4\x09\xff\x2e\x37\x86\xb5\x77\x1a\xea\x96\xf2\x12\x49\x79\x6a\xba\x4e\xa6\xf4\xb0\x64\x9e\x71\xa6\xfe\x1f\x30\xd1\x42\xb6\xab\x0e\xc2\x50\x1b\x24\x00\xb1\xc2\x45\x91\x9a\x5f\x8c\x21\x70\x95\xa2\xd0\xc7\xbe\x70\x97\x17\xc0\x28\xac\x49\x8b\x5a\xa9\x9a\xc0\xd1\x32\x48\x01\xaa\xc7\xcc\x31\xaf\x6b\x37\x3e\x0e\x51\xc1\x85\x2f\xdb\x5e\xfc\x93\x7e\xc8\xfb\xd2\x59\x81\xaf\x82\xbd\x30\x07\x48\xf3\x67\x37\x92\x26\xd6\x52\x0f\xd3\x19\x0a\x50\xa7\x85\xad\x08\x5a\xd6\xc9\x43\x1f\x4a\x16\x09\x86\x72\x34\xf8\x6e\x8b\xf3\x62\x79\x4f\x84\xb3\x5f\xdb\x19\x50\x6f\x95\xf1\x41\xd1\xe4\xc1\x39\x23\x9e\xc7\x69\x49\x33\x57\xbf\x81\xc2\xaf\x4c\x31\x45\x2a\x6e\x5b\x1a\x48\x41\xc8\xe8\x23\x66\x00\x23\x46\xec\xd0\x3e\x60\x9b\xb6\x24\x1f\xc5\x27\x42\x09\x1a\xa4\x74\x0e\xff\xa0\x93\x1a\xb7\x34\xa2\xd7\xe2\x56\x4a\x46\xb3\x33\x1a\x66\x84\xa9\xcf\x7e\x8e\xac\x33\xa7\x6b\xed\xb8\xbd\x43\x2c\x1a\x46\xe1\xc8\xcc\x51\xc3\x44\x40\x34\xe5\xed\xc8\x07\x94\x02\x85\x10\xc6\xf9\x4c\x94\x3f\x9a\x0a\x27\x4a\x2f\x95\xdc\xfb\x4b\xb4\xc2\xb2\xf9\xc4\x77\x5a\x90\xe4\x38\x0e\x33\x40\x71\x82\xc9\xad\xf7\xcd\xc4\xb6\x74\xf1\xf5\x0a\x20\x7e\x33\x59\xca\x94\x2e\x00\x80\x3c\x8c\x23\x44\x2d\xe4\xb6\x3e\x9e\xee\xfa\x48\x8e\x31\x38\x01\xc1\xe3\x3a\x28\xa6\x81\xbd\xf8\x1f\x6b\x97\x25\xa0\x54\xe2\xdf\x79\xe1\x2d\x48\x65\xc7\x4a\x91\x95\x0a\x46\xe1\x6f\x68\xfa\xb7\x8f\xe1\x71\x7d\xf5\x41\x3c\x84\x53\x3a\xa8\x85\x44\xc6\x97\xd6\x92\x44\xcc\xb9\xf7\x8f\xef\x28\x8a\xc2\xc5\xbd\x10\x9c\xe7\x28\x5f\xb3\xa4\xa6\x08\x00\xec\x7e\xc1\x6d\xca\x8c\xaf\x62\xaf\xd2\x2b\x5d\x04\x52\x5d\x8c\x8f\xc5\xeb\x0d\xc0\x1f\xa6\xd8\x06\x71\xfa\x0d\x4c\x67\xf4\x96\xfe\xd7\xde\x64\x1e\x9e\x1b\x5d\x94\x52\xd7\xf3\x06\x47\xe9\x6e\x8e\x85\x98\xe9\x62\x14\x89\x46\x78\xee\xed\xc7\x6d\x7d\x00\xf1\x30\x99\x07\x0e\xbe\x3a\xa9\x41\xca\x99\xe2\x6e\x01\x92\xc3\x49\x49\x66\xd8\x19\x02\xb4\x3c\x57\xc3\x68\x9e\x75\x03\xe5\x52\x88\x54\x45\xb0\x4e\xe7\x7f\x6d\xc1\x67\x79\xee\xde\x58\x57\xf8\x28\xe1\x19\x06\xa3\xa1\xef\xe9\x41\xf5\xa9\x3c\xdf\x05\xf5\xb9\x85\x8f\x67\xb3\x2c\x6b\x55\x5f\x76\xa8\x84\x7f\x1f\xc8\x6a\x5f\x26\x08\xed\xb2\x7e\xb5\x87\xd1\xa0\x92\x39\xcd\x34\x0e\xed\x08\xe4\x10\x35\x8e\x6b\x7f\x53\xe7\x50\xc0\x8c\x73\x6e\x70\x2d\x60\x22\xcc\x22\x3b\x17\xfb\x74\x41\x83\xb2\x6d\xe3\xe7\xf9\x74\x6f\x05\xff\x26\x8c\xc7\x83\x7a\x49\xe6\x2a\xfa\x51\xc1\xb5\xa5\xc9\xbd\xb7\xd6\xa3\x8d\x2f\xa5\xb7\x98\xf5\x42\xcc\x37\x3a\x94\x9d\xb8\x9e\x89\x4f\xba\x49\x08\x8f\x06\x0d\x7d\x3d\x10\x51\x7b\x3c\x8f\x93\x74\x2e\xdb\xf2\x6f\xf2\x36\xe5\x72\x7a\xeb\x6d\x2a\x14\x0e\xe4\x8f\x2f\xc5\x4f\x6c\x50\x15\x4f\x34\xcb\x6e\x66\xd3\xbe\x26\xae\xd3\x3b\x1b\x65\x60\x67\x03\x99\xe2\xf8\x12\x98\xf8\x98\xcf\x17\x22\x34\x43\xcc\x39\x81\xa2\xfa\x17\xa2\xd2\x70\xd1\x7f\x58\xca\x20\x9c\x42\x51\x17\x38\x31\x8a\xf4\x8d\x11\x39\x18\xc5\x39\x30\xba\xf8\xce\xc9\xb0\x42\x27\x46\x1a\x62\x20\x92\x92\x24\xd2\x6e\x0e\x45\x5b\xaa\x6c\x8a\xe3\x51\xc3\xaa\x41\x1f\xcb\xd2\xfc\x2f\x6b\x7d\xd8\x44\x6b\xa6\xf7\x43\x3f\x12\xfb\x91\xbe\xcd\x5b\x9c\xc1\xc0\xdb\xc7\xad\xc5\x77\x67\x88\x22\x0d\x99\xce\xec\x41\xc1\x62\x2f\x6c\xca\xa0\x94\xf2\x6c\x33\x4e\xaa\x10\xfb\x40\x21\x4a\xed\xba\x48\xd6\x0c\x6f\x79\x02\x9b\x6f\x8a\xf0\x1a\x7c\xb2\xdc\x05\x7f\x25\x7c\x10\x33\xa9\x07\x8f\x32\x9c\x6b\x76\x5b\xd3\x55\x6a\xb2\xc4\x49\x9c\x56\xec\x78\xbd\x4c\x98\x37\xbe\x14\x74\x0c\x53\xd1\xfc\x95\x4b\x60\x7c\x4c\x7c\x8a\x2a\x34\xb9\x73\xcc\xbf\xdd\x3f\x79\x77\xc0\x50\x06\xb9\xc2\xda\xee\x6e\x90\x8c\xcf\x31\x86\x50\xa4\x0f\x76\x62\x2f\xb1\x31\x09\x44\x6e\x26\x48\x0f\x6c\x0c\x09\x7c\x0b\xb4\x1d\x1a\xa3\xce\x63\x34\x8d\xad\xb4\x6f\xe0\xf7\xa1\x0f\xe7\xda\x6b\xba\x7d\x83\x6e\x6b\x02\x6f\xed\xe7\xd2\x11\x3d\xf4\x03\xee\x1a\x2c\xfb\x33\xb6\x04\x14\x69\x61\x98\xf4\x2f\xf5\xed\x6f\x88\x3f\x0b\x7b\xd0\xe0\xf0\xe4\xa0\x56\x5c\x5b\xbb\xd0\xb8\xb1\x3d\x49\x28\x15\xf1\x73\xd6\x09\xfc\x7e\x19\x64\x34\xbd\x7a\xf0\x7b\xd7\x55\xc4\xd5\x08\xd2\xef\x86\x26\x91\xfe\x83\x82\x9d\x50\x42\x3b\xd0\xf7\x68\x20\xf6\x96\x2b\x47\xe0\x21\x48\x7e\x86\x29\x32\xac\xc3\x4b\xa0\xbc\xda\x06\x0c\x3e\x00\x02\x7d\x12\xf5\x1b\xc2\x3b\xa2\xc4\x57\x79\x62\xe1\x4b\x4c\xee\x27\x0b\x07\xe2\x08\x6b\x4b\x28\xac\x55\xcb\x7f\xd6\xeb\x7b\x9d\x29\x0f\xaf\xd9\xcd\x8a\x33\x62\x14\x63\x69\x5e\x29\x1b\x09\x74\xaa\xbd\x83\x5f\x16\xe9\xcb\x71\xe0\xf6\x2d\x7a\x5a\xaf\x96\x7e\xa7\xda\xb1\x89\xee\x64\x3b\xeb\xe0\xf0\xf6\xa6\xc1\xb3\x8d\x50\xb6\xf6\xd9\x8b\x08\x2d\x9d\xd6\xa0\x02\x9b\xc4\x23\xbe\xef\x8d\x8e\xca\x9c\x26\x76\x91\x6a\x4b\xdd\x9a\x8b\x0d\x3a\xb3\x98\xad\xfc\xa1\x13\xf3\xa2\x0d\xf1\x61\x7d\xda\x1c\x81\xb1\xea\xbd\xac\x22\xa4\x05\x79\x06\x54\x6a\x59\xb4\xfb\xee\x8d\xb2\x7e\xe0\x15\x63\x29\x23\x07\x54\x09\xb5\x36\x93\x34\x75\x9f\xdf\x6c\x1a\x5b\x80\x44\x8c\x7a\xea\x60\xd4\x9f\xee\x95\xd8\x8f\x53\xa0\x12\x9e\x74\x73\x55\x60\xe2\x49\xe4\xd0\xfc\x12\xaa\x39\x12\xf4\x20\x4b\x4e\x69\xac\x3d\x8a\x24\xf3\xec\x3d\xc9\x19\xae\x22\x9d\x08\xad\xb4\xc6\x1b\xb6\xb5\xd2\xbb\xe5\x4d\x03\xe7\xfc\x9c\x73\x5e\x6f\x07\xc8\xae\xda\x7a\x2b\xa1\xdb\xfa\x20\xd1\x9e\x66\xaf\xe6\x9a\x3e\x9c\xfb\x8e\xd7\x17\x07\xf8\xd7\xa3\xe3\x63\x24\x42\xc8\x7b\x40\xe5\xd0\xee\x4e\x6d\x4b\x90\x81\x75\x95\x24\xff\xb1\x59\x41\x28\x8d\xcd\xd5\x80\xde\xc5\x96\xc2\x49\x4e\xd8\x9a\x1d\x41\xb0\xdf\x5a\x2a\x81\x3e\xf6\x8f\xe3\x75\xb2\x3d\x30\x3c\x6d\x86\x98\x7d\x9b\xd0\xd9\x48\x9d\x1c\x0e\x26\x1d\x2d\x7a\xe3\x09\x6b\xb9\xb5\xf9\x81\x95\xbc\xd5\x19\x00\xf0\x86\x3b\x9d\x9b\x55\x1e\x2b\x8c\x34\x48\x5a\x0b\x52\x2d\xbb\xe4\xde\x4c\x8b\x32\xcf\x49\xb0\xad\x8a\xd3\xcd\xca\xf3\xb7\x5c\xd3\x22\x35\x35\xbd\xd7\x9a\xdf\x69\xf2\x64\x7e\x63\x5d\x9c\x73\xe5\x6b\x29\xac\x25\x43\xe1\x0d\x85\x01\x9c\xb7\xff\xee\xe3\xa8\x7d\xdf\x0f\xfb\x38\x08\xfa\x38\xf0\xfb\x39\x09\x7b\xa2\x08\xf1\xa5\x66\x67\x4e\xa2\x4b\x33\x5b\xe5\xc7\x12\xbc\xde\xbc\xbb\xf7\x5c\x34\x9a\xbd\xd8\xe2\x26\xcd\x76\xba\x37\x2c\x9d\x90\x7b\x1a\x60\x45\x6b\xcd\xf1\xed\x19\xbb\x8b\x7f\xec\xd9\x01\x65\x36\x65\x44\x4f\x03\x64\xe7\x3d\xff\xab\xfb\xbe\xef\xbb\xbe\x1f\x07\xf1\x2a\x46\x2a\x9b\xae\x0e\x02\x49\xc7\x0a\xcd\x6b\xd2\x4c\x0a\x07\x9d\x0b\x57\x9d\x2a\xa0\xe8\x23\xe5\xaa\x4a\xbb\xf9\xaf\x4e\x32\xc5\x9b\x1d\x92\xc8\xbf\x48\x03\xef\xc1\x56\x7f\x15\x98\x06\x25\x00\x6a\xff\x50\x63\x35\xe3\xa6\x12\xa1\xc5\x79\x52\x82\x13\xeb\x12\x3d\xfc\xb6\x55\x81\x19\x7b\x7a\x24\xee\x03\x44\x1c\x28\x7e\x97\xe8\xa2\x97\xba\xd6\x93\x51\x46\x7a\x72\x96\x14\xd8\xeb\x15\x5d\x15\x3e\x98\x3b\xd9\x7a\x72\x7b\x43\x2e\xc0\x8c\xc0\x67\xbe\xa8\x3d\x4a\xc6\xad\x0c\xc9\xaf\xf9\x1b\x52\xa5\x24\xec\xa4\x38\xc7\xa4\x34\xfb\xc5\x56\x54\x9d\xa9\x73\x29\x47\xad\x10\xf1\x5c\xc2\x3b\xf6\x47\x08\xe9\x4f\xc8\x79\x1e\x57\x25\xdc\xb0\x2f\x49\x96\x23\xcb\x17\xa5\xeb\xba\xfa\x95\xed\x60\xf7\x04\xc3\x52\xa8\xf6\xe9\xd7\x73\xe2\xf0\x0e\x34\xbc\x2b\xf9\x1f\x49\xab\x69\xe6\xa1\xde\xf2\x36\xa5\x9f\xf5\xfc\xb6\x68\x90\x9b\x64\x98\xb7\xf4\x77\x4f\xf1\xef\xee\x52\xe1\x76\x72\x5c\xe8\x2e\x38\x69\xb3\xb8\xd1\xd2\x2e\x53\xa7\x52\xc5\x43\x89\xe0\xfa\xeb\xcb\x4f\x25\x33\x62\xd0\xb2\x2a\xf7\x46\x1d\x6f\xdc\x64\xb4\x7b\x62\x68\x77\x6c\x68\x42\x20\x1e\xff\xef\x4e\x32\x41\xb0\xf6\xf0\x4d\xba\xce\xc8\x2c\x8c\x09\x36\x8c\xbe\xf5\x38\xbc\x86\xf3\xab\xcf\xb7\x85\x0e\x24\xba\x4f\xa4\x31\xcc\x4b\xf2\x27\x99\x66\x5a\x9e\x6d\xd5\xd4\x7a\x53\x99\x9f\x81\x80\xcf\xf7\xaa\x04\xa0\x2e\xe4\xf0\x6f\x65\xd6\x87\x3e\x15\x85\x3e\xd5\x07\xde\x55\xc7\xc5\x83\xaf\xcf\x0e\x42\x7f\x56\xb4\x96\x7f\x40\x64\x9c\x68\x9a\xa1\x93\xbe\xb2\x02\x7d\x5a\x25\x6c\x32\x5a\x6a\x3c\xef\x0c\x83\x1e\x93\x0a\x0b\x37\x38\x1e\xba\x40\xb5\xd0\x4e\x99\x41\xed\xe7\x2a\x70\xa6\x68\xed\x15\x99\x1e\x07\x2a\xbf\xb9\x81\x45\x05\x6f\x6d\x2d\x6e\xe5\xae\xd7\x4b\x8f\xf6\x66\xaf\xde\x46\xb7\xe6\x0a\xb7\xd6\x46\xa8\xce\xab\x1e\xb2\xff\x69\x28\x1d\x32\xbe\xd6\x3b\xdb\x93\x58\x2d\xfc\xde\x9e\xff\xb5\x23\xef\xcb\xfe\xc9\xd4\xa6\x41\xa9\xbd\xc6\xbb\x51\xca\xab\xbd\xea\x43\x1b\xb4\x51\x2a\xe6\x70\x87\x8b\x0b\xa5\xb4\x3c\x95\xce\xdc\xc1\xe9\x65\x04\xab\x4f\x32\xd9\x58\x8a\xa2\x88\xb5\xa5\xca\x86\x45\x8c\xfa\xfa\xf0\x4e\xfb\x48\x58\xfd\x2b\xa4\xbf\xfb\x4f\xfd\x47\x3a\x10\x43\x3a\xc4\x4b\x32\xcc\xeb\x4c\x16\xfd\x9c\x04\x13\x66\xfd\xed\x2a\xa8\x6c\xef\x08\x89\x87\xf8\xdf\x2e\xf9\xfb\x3b\x27\xc5\x99\x52\x0c\xe4\xc4\xeb\x97\x87\xc9\xdc\xf0\x9a\x58\x0f\x56\x1f\x6b\x29\xe3\xda\x68\x20\xfe\x88\x50\x0d\xd7\xef\x46\x15\x99\x2a\x90\xc7\xbc\x00\x33\x9a\x95\x66\x39\x82\xb4\x08\x1e\x28\x70\x88\x47\xd7\xc7\xb7\x6b\x65\x27\x24\x90\x6c\x0f\x0b\xd1\x92\xaa\xa1\x5e\x63\x50\xa6\x14\xc5\xb8\x0c\x30\xcd\xb1\xdd\x46\xd3\x81\x77\xd9\xf1\xbe\xfc\xe3\x35\x34\xee\x7c\x6b\x99\x54\x9b\xc7\xac\x59\x44\xb7\x3c\xfe\x73\xeb\xae\xf7\x26\x68\xbc\x45\x83\x52\xa5\x28\x5b\x25\x0a\xf7\x17\xd3\xfa\x35\x41\x3b\x46\x3a\x7b\xcc\xa0\x21\x21\x0b\xc7\xdf\x29\xb0\xcd\xb6\xcf\x07\xa9\xfd\x69\xe5\xaa\xfc\x64\x34\xe1\x7b\xd0\x15\x5b\xde\x85\x4f\xf5\x68\xab\xf6\x6a\xab\x06\xcc\xc9\xfd\xda\x9e\xff\xb7\x1a\xd6\xa3\xb5\xa6\x8b\x30\x66\x88\x30\x9f\x4b\xe3\x73\x0b\xd6\x0b\xff\x80\x6d\xc5\xb4\xd1\xdf\x15\xd4\x34\x52\x67\x4c\x95\xf1\xf3\xa9\xe2\xa1\x42\x88\xfc\xca\x20\x38\xa5\xd7\xd4\x43\x0f\xae\x3e\x20\x67\x89\x2a\x77\x56\x68\x23\x9d\x86\x8b\x99\x11\x42\x12\xf9\xae\x0f\x44\x22\xc3\x8c\xe8\xd5\xe5\xb7\x53\x4b\x11\xdf\x09\x80\xd3\x07\x18\xaa\xcb\x46\x34\x57\x40\x34\xd7\x43\xf5\x36\x42\x90\x5c\xf4\x52\xd3\x75\xd6\x59\x71\xba\x4d\xf9\x54\xf2\x53\x8e\xa0\x3c\xb1\xc3\xed\x08\x8a\x21\xfa\x79\x45\x88\x0b\x32\x6f\x47\xac\x2f\x1a\x93\x5f\xf9\x72\x8c\x80\x62\xee\x0a\xc3\x28\x66\xac\x0d\xd0\x8e\xaa\x4f\xe2\x11\x71\x33\x56\xf0\xad\xa6\x91\xda\x4b\xff\xeb\xc0\xb9\xf9\x2d\x39\xab\x93\x7d\xbe\x5f\x9d\xf2\x37\x1f\x69\x82\xe6\x5b\x81\xa6\xf1\x81\xa5\x8b\x1f\x95\xfd\x55\x09\xdc\x9d\x6e\xa7\xab\x10\xd9\xd9\x15\x96\x82\xa5\x01\x96\xee\x0b\x9a\xef\x11\x8a\xec\x11\xaa\x77\xf1\xcf\x36\xd4\x94\x8b\x78\x16\xce\x45\x1b\xce\x45\x07\x5a\xca\xb3\xa5\xc1\x9f\x48\x86\x79\x45\xa0\xf1\xec\x08\xbd\x3c\xbd\x45\x77\x03\x5f\x19\x68\x48\x08\xbf\x56\xf8\xbd\x34\x03\xbd\xd9\x60\xd6\x08\x12\xd2\xa0\xa8\xb2\xab\x72\x7d\x86\xf9\x48\x33\x34\xad\x6e\xac\xc7\x23\xd5\x96\x06\x2a\x43\x53\xb2\x04\x6b\xd0\x55\xbf\x80\x7b\x82\x35\x41\x76\xe8\xc2\x8d\x34\x5f\xe0\x24\xf0\xd7\x85\x4c\x06\xb6\x51\xd6\xff\x3b\x91\x88\x02\xc0\x76\x1e\xf6\x64\xd1\xcd\xef\x43\x0f\xa1\x83\xc3\x85\x19\x3e\x51\xef\xa4\xc8\x1c\x8f\x3e\xc2\xa4\x6f\x9c\xdd\xd1\x19\xe4\x32\xb0\x44\x28\x6e\xdf\x29\xfd\x11\xbb\x1e\xbc\xa9\x39\xb8\x7e\xfa\xf6\x66\x17\x72\x04\x86\x43\xb1\xfe\xaf\xf1\xd4\x9c\xbd\x94\x8f\x29\x92\x8a\x77\x98\x8c\x0d\x37\x44\x73\xe5\x28\xc6\x9b\x48\xf6\x60\xc0\x8a\x63\x0a\x1d\x0d\x3e\x88\xd0\x20\xd1\xd1\xdd\x07\x77\x3e\x0c\x25\x94\x85\xbd\xeb\x35\x57\x7f\xb0\xfc\x61\x64\x73\x7a\x9c\x6e\x96\x77\x92\x49\xd0\x35\x40\xf7\xd5\x13\x4a\x6b\x0f\x48\xbd\xc1\xc1\x79\x08\xf8\x4c\x3d\x8a\x5d\x7f\x84\xd1\x22\x5d\x9c\x6f\xc1\x24\x7f\xc7\xbb\x54\x1f\x7a\x76\x3b\x42\x76\x3b\xc2\x9c\xb1\x32\x5c\x49\xa8\xd9\x83\x35\x77\x6d\x24\xf9\xd7\x78\xf5\x37\xbc\x75\x4d\x8e\x7d\x21\x4d\xc5\x39\x5d\xe0\xd4\xae\x15\x38\x17\x3a\xd0\x48\xb3\x31\xfd\xf6\x9d\x4e\xc6\x24\x56\xdc\x3e\x58\xc4\xd9\x96\x6d\x08\x01\x1d\x90\x67\x43\xd6\x73\x42\x70\x68\x30\x22\x87\x9b\x0e\x27\xf1\x59\x94\x86\x3c\x69\x10\x17\x77\xee\xb7\x1a\x33\xf9\x45\xc9\x0e\xfb\x33\xc0\x48\x26\x08\x9a\xa5\xfd\x68\xe8\xdc\x5f\x3e\x40\xfa\xb4\x04\xa8\xec\x70\x2e\xd9\x0d\xbd\x5c\xcc\x22\x4c\xe9\xa3\xdb\xaa\x59\x4c\x70\x3d\xa1\x38\xdb\x1f\x61\x21\xe8\x21\xf5\x25\xf0\xdf\x8c\xd6\x5e\x1a\x06\x16\x92\x87\xf9\x1e\x96\x26\xa3\x43\x23\x71\x94\xe3\xc3\x1c\x10\x86\x1f\x60\xc7\x54\x22\x3b\x25\xba\xcc\x33\x20\xf9\x7b\x12\xe4\x63\x82\xf6\x73\x14\xd4\x53\xfc\xb3\xbb\xe4\x97\x2c\xf3\xe2\x74\x8b\xd2\x4a\x11\xcf\x92\x26\x43\x0d\xb8\xc1\xc1\xbb\x59\x86\x08\x79\x65\x26\xb5\x64\xdd\x0c\xb7\x8f\xca\x22\x40\x07\x44\x6e\x57\x89\x6e\xea\xe0\x05\x2f\x29\xd2\x67\xa7\x02\xe4\xaa\x98\xf0\xb7\xba\x5c\xf2\xba\xf9\x81\xba\x1d\x61\x30\x07\xb6\x07\x54\xbc\xd8\x31\x51\x3b\xa3\x8f\xe4\xce\x28\x2b\x19\x54\x9a\xd8\x89\xe3\xda\x4f\xbd\xd9\xc2\x1b\x10\xf2\x89\x72\xf3\x97\xee\x28\x85\x63\x03\x0d\x47\x96\xc0\x42\x20\x3f\xad\xdc\xca\xf4\xa2\xc0\x6c\x98\xcc\x67\x9b\xf4\x3f\x74\xa2\xa9\x1f\x6f\xdd\xa0\x64\x49\xc6\x99\xcc\x2a\xa9\x65\x63\xb9\x9b\x97\xc4\x38\x31\x6c\x49\x82\x7e\x0e\x1c\xdc\x29\xd6\xe4\xcd\xc2\xef\x9d\x05\x50\x19\x9e\x32\xc2\xa8\x01\xa2\x5b\x74\x01\x56\xf8\x5f\x9b\x1b\x0a\x4a\xc5\xa9\xa6\x4e\x91\xcc\x9c\x9f\x19\x30\x49\xa0\xbe\xf0\xd4\xae\x59\x01\x9d\x37\x2c\xa2\x30\xf6\xa8\xc9\x33\x50\x3d\x27\x1f\xee\x0f\x4c\x8f\xca\x73\xce\x2b\x5c\x48\xd8\x12\x2a\x49\xad\x06\xe8\xde\x93\x68\xd4\xf9\x6f\x03\xa5\x1d\x10\x2b\x44\x77\x83\x54\x47\x61\xf2\x87\x1d\x3f\xeb\x2c\x9a\x4a\x67\xa7\x9d\xfb\xf2\x35\x4d\x16\x06\x5e\xea\xf1\x76\x7e\xa8\xdd\xf5\x10\xe3\x2d\x76\x67\x7b\x4c\x41\x39\xfa\xf6\xaf\xea\x2f\xc5\x89\x59\xb6\x0a\xdc\xc7\x12\xfc\xef\xed\x57\x27\x75\xd1\xa5\xd9\xb5\xb7\x46\xf1\x71\x6a\x9e\x28\x53\xc3\x54\x5e\xac\xd1\x12\x1b\xc8\x9e\xfa\x51\xb2\x28\xdf\xde\x73\x1a\xee\x11\xe9\x3b\x95\xe3\x31\x16\xe3\x5d\x86\x9c\xb1\x99\x3e\x1f\xee\xf3\x71\x3c\x9b\xf5\x45\x41\x6e\x6f\xcd\x11\x5b\x08\xf5\x55\x3c\xc1\xf5\xd4\xeb\x0a\xf6\x3b\xa7\x4e\xcd\xdd\x21\xf1\xd9\xaa\x9c\xff\x4c\x72\xba\x0f\x0b\xe2\x32\x2f\x87\x2b\xe4\xd1\x4e\xd0\x49\x92\x47\xdb\x03\x11\x00\x19\x2b\x7b\x38\x0f\x1f\xbe\xa2\x41\xc7\xd1\xfd\x13\x91\xa2\x45\x56\xf4\xb7\xd2\xea\x6e\x23\xe6\x12\x68\xfe\xb0\x01\x40\x8b\x59\x61\x35\x63\xcb\x50\xfa\x79\x98\xc1\x9f\x55\xb9\x80\x10\x2d\xd8\xf7\x72\x67\x71\x26\x6f\x50\x65\x15\x78\x82\x95\x9e\x47\x9b\x32\x16\xfa\xda\x4c\x8e\x33\x43\x1d\x09\x9b\xb9\x48\x8a\xb0\x1c\x56\x60\xcf\x49\xd0\xbb\xb7\x52\xdc\x3a\xc4\x64\x0f\x80\xc4\xae\x7f\xda\xd1\x22\xcb\x8c\xa4\x8f\x02\x2f\xc4\xb1\x86\x40\xdb\xf5\xb2\xe3\xad\x7d\x83\x82\xef\xcd\x98\x70\x06\x81\xec\x0c\x17\x73\xc4\x28\xdd\x7c\x31\xd6\x52\x59\xaa\x47\x29\xb7\xbe\x28\x77\x59\xa8\x04\xdd\xd2\x1f\x5d\x1a\xed\x44\x1a\xed\x98\xee\x68\x1b\x5c\x8a\xe2\xe0\x8b\xd1\x6e\x68\x6a\xda\x9d\x2f\x38\x03\x29\xf8\x1a\x47\x5a\xda\x39\xde\xe8\xbe\x26\x45\x68\x0e\xe6\x5a\x83\x10\x99\x4a\x4b\xf8\xb2\x93\x7f\x62\xe9\x8e\x50\x8c\xc8\x6f\x98\x39\x84\x3d\x39\xcb\xa3\xb1\x2e\x64\x55\x62\x14\x03\x41\xb9\x7b\x42\xd4\x8e\x3e\x5b\x2b\x2b\xe2\x4f\xd5\xda\x37\xb3\x19\x54\xe4\x3f\x59\xe9\x0f\x73\x13\xd5\x42\xfe\xed\x86\x1f\xb8\xcc\x6d\xa6\xf2\x87\x59\x50\x63\x74\xeb\xea\xea\x0e\x11\x21\x99\xd2\x2d\x09\x7a\xc1\x52\xc3\x9a\x97\x78\x30\xdf\x02\x2b\x4f\x2c\x7f\x74\x9d\x84\x30\x5f\x79\x96\x40\xed\x1f\x22\x3f\xb9\x66\xff\x15\x12\xfc\x1b\xb0\x60\xe4\x87\x42\x7a\x91\xde\x6c\x19\x43\x90\x6c\x49\x56\xe7\x0b\x9c\xaf\x8c\x3f\x65\x71\x8a\x5d\x6b\x22\x34\x93\xdc\x47\xea\x25\xba\xc8\xdb\x15\xde\x99\x8f\xfa\xf4\xd0\x38\x4b\x96\x50\x33\x0f\x51\xfa\x3b\x54\x34\xda\x33\x78\xef\xb8\xf6\x18\xf3\x01\x0f\x70\xf2\x54\xfa\xb0\xfd\xa9\xc1\x32\x6f\x0d\x44\x76\x22\x84\xeb\x3d\x9a\x59\xdf\xea\x8b\x03\x95\x1f\xf6\x60\x9d\x5d\x3f\x0f\xbf\x7d\x7c\x84\x87\x29\xee\x28\x99\x63\xe6\x86\x7d\x3b\x27\x05\xc5\xdb\xd7\x5c\x59\x2e\x81\x67\xeb\x43\xb1\x7d\xf0\x21\x11\xc1\x68\xc0\x2e\x31\xa5\x4e\x96\x7b\x18\xcd\x49\x65\x83\x4a\x65\x53\x7d\xe9\xac\x9c\xe9\x12\xbc\x1d\x37\xf9\xfc\x87\x00\xda\xf7\x53\x7d\xcb\xbf\xd8\x5d\x0b\x4a\x8d\x1b\x00\x70\x96\xc7\x55\x5e\x35\x09\xd3\x22\x58\xcb\xd7\x92\x59\x49\x06\x80\xa4\x1c\xce\x72\x84\xc8\x96\x64\x4e\x85\x7a\xf4\x63\x83\x02\x2a\x16\x40\x01\x18\xaa\xe0\xdf\x84\x18\x33\x00\x8c\x7e\x2f\xb9\xbd\x94\x2e\x69\xbd\xbf\x9e\x02\x53\x08\x28\x12\x6e\xa0\xf4\x09\xac\xeb\x27\xdd\x25\x15\x9d\x8e\xcd\xe2\x50\xc1\xb3\x80\x5a\x9f\xdf\xce\x26\xb1\xa1\x56\x68\xf6\xe3\xdb\x2f\x49\x83\x42\x0b\xa2\xa3\xd4\x44\xd5\x5f\x69\xec\x37\x8d\x7c\x82\xf6\x00\xbc\x83\x9d\x41\xa5\x26\xe3\xe3\x9a\x9a\xec\x43\x64\xe7\x8a\x40\x54\x7e\x86\xcf\xb7\x18\x83\x03\x7b\xe4\x99\x81\x46\xb7\xf4\x62\xe3\x8e\x4d\xb8\xa4\xf3\x14\x94\xa7\x78\xc1\x14\xd4\x58\x75\x15\x4a\xc8\x2f\x85\x3c\x11\x40\x56\x4f\xe5\xd7\x95\xa9\x24\x87\x36\x5d\x03\xc3\xea\xc0\x90\x0c\x04\xdc\x03\x66\xce\x0f\xac\x7e\xe6\x17\xfb\x89\x7c\xfd\x6a\x0f\xbd\xb9\x0c\x59\xee\xb0\x22\xf0\x7a\xa9\xb6\x26\x4b\x9c\xc8\x76\x81\xb1\x25\x08\xe9\xec\x75\xed\x6e\x34\xb5\x23\x8c\xf4\xc4\xf2\x50\x9d\x91\x04\x95\xd2\x2c\xe9\x2a\xe1\xd3\x1e\x18\xb5\x63\x74\xa6\x7e\x0f\xac\x3c\x3a\xd5\xf8\x22\x7d\x76\x99\x0c\x25\x04\x44\x17\xc3\x64\xc4\x73\x1d\x99\xeb\x1b\x26\xf6\xb7\x1b\xa2\xfb\x9b\x85\x02\x83\xf1\x34\xff\xd5\xdd\x3f\xd7\x28\x8e\x10\xcf\x6e\xe9\x12\x1e\xed\xd5\x46\x37\xae\x23\x65\xf1\x6e\x12\xb3\xc8\x33\xda\xdd\x2f\x96\x99\xed\x43\x18\x6e\x2d\x43\xfb\xaf\xaa\xac\x45\xfc\x77\xee\xe2\xd9\x82\xcd\x40\x20\xef\xe9\xa9\xc1\xc8\xd1\x9d\xdb\x1e\xcd\x67\x97\x00\x62\xcb\x54\xe2\x7d\x51\x18\x91\xa0\xf3\x81\x91\xc1\x1f\x45\x31\xa5\xef\x7b\xbd\x83\xd0\x98\xed\xf9\x5f\x0f\xd1\x5d\x0e\xde\x54\xe5\x39\xd7\xb7\x23\xf2\x57\x7b\xb5\xc5\x0c\x89\x07\x9e\x55\x1c\x32\xcf\x33\x5c\x09\xfa\xc0\xd2\x1d\xa2\x1f\x6d\x5b\x95\x67\xb0\x5b\xa2\x3c\x43\x98\xf8\xfd\xea\x29\x26\x68\x28\xd5\x07\xe2\xc0\xb6\xd5\x47\xb2\x51\x76\xd8\x60\x5d\x6d\x01\x5f\x94\x67\x2b\xff\x81\x88\xec\x78\x5d\x49\xa1\x54\x7e\x60\x77\x16\x7e\xb7\x7e\xf6\xda\xb3\xbf\x0d\x47\xb8\xb5\x50\x7c\x6a\xa9\x73\xca\x61\x1e\x34\xb6\xc9\x0f\xcc\x2a\xcc\x9f\x23\x65\x4c\x71\x08\x03\xc6\xfa\xb2\xae\xcb\x1f\x7a\xa8\x3f\x98\x5a\x60\xaa\xce\xc5\x47\xcb\x7e\xcd\x16\x60\xe0\x81\x1c\xc5\xd6\x92\x6d\x46\x49\xe8\x90\xa2\xe8\xf0\xb8\x65\xfd\x8c\x24\x01\x91\x27\x91\x95\x63\xc7\x2c\x48\xda\xdc\xd9\x91\x0c\xbc\xea\x57\x81\xa1\x88\xf7\xb7\x4f\xe3\x44\xa2\xe8\x70\xb2\xf3\xb8\x40\x15\xe8\x4d\x88\xcb\x90\x9f\x62\x5d\xc8\x96\x25\x0e\x0c\x97\x22\x7b\x2f\xa1\x27\x14\xcc\xb0\x79\xd9\x98\xd5\xfd\x78\x76\xba\x7f\xbe\x15\xea\x0f\xb1\x20\xc5\xe1\xf0\xa5\xae\x8d\x14\x57\x9b\x82\x2b\xa3\x3c\xac\x4f\x0e\x07\xcc\x17\x3f\x80\x30\x2b\xd9\x78\x6a\x3b\xf1\x97\x14\x02\xff\xeb\x58\xe6\x8d\x25\xb4\xf9\xe2\x98\xbb\x63\x7e\x73\xa7\xee\x94\x2e\x01\xeb\xf9\x7d\x89\xf3\x50\x45\xe7\xe6\xfe\x72\x57\x25\x0b\x1e\xf7\xb1\x46\x7e\x26\xe8\x8d\x83\x40\xa0\x32\xeb\xdc\x3b\x51\x8b\x8f\x23\x0d\x95\x3a\x5a\x1c\x84\x6c\xb6\xc1\xbe\x26\x16\x9e\xe9\xa2\x1b\x6f\x83\x42\x16\xe2\xe8\x9b\x1f\xeb\xfa\x4d\xe4\xa1\x3a\x48\x16\xf6\xf8\xa2\x34\xa3\x7e\x67\xa9\x68\x6b\x7f\xa1\x3c\xac\x87\x9e\xc7\x3e\x32\xa6\x19\x22\xff\x48\xb2\xba\x0f\xf1\xcf\xf5\x9e\x5c\xb0\x33\x03\x93\xc6\xfc\xc0\x4c\x0c\x23\x67\x05\xdd\xdb\x40\x68\xfd\x96\xa0\x21\x9e\xdf\x77\x06\xea\xbb\xfa\x0e\xbd\x5e\x4a\xb4\xe1\x9d\x38\x81\x31\xb5\x0c\x15\x2a\x49\x36\xe6\x77\xa3\xfe\x82\x97\x01\x95\x3d\x31\x25\x62\x4d\xe7\xbf\x1b\xae\xf0\x88\xda\x57\x82\x26\xe1\x9d\x77\x89\xc6\xeb\x5e\x8d\x32\xc9\x20\x30\x58\xeb\xcb\xfa\xbe\xef\xe7\x61\x8e\x5d\xa6\xf1\x22\x09\x93\xa3\xa9\xbe\x19\xbf\x4b\xa7\x78\xe8\x0d\x13\x5a\xde\xd9\x19\xf9\x79\x08\xeb\x01\x5d\x41\x5b\x46\x16\x6d\xc4\xd7\xd7\x12\xfa\x80\xd3\xa3\x70\xa0\xe3\xa7\xb6\x2a\x4f\x13\xa1\x0d\xbe\xe1\xa2\x85\x43\x70\x88\x51\x6f\x8d\xe9\xbf\xf3\x25\xab\xcc\x6f\xfe\x72\x5a\x17\x4c\xf8\x32\xd8\xc6\xd5\x22\x79\x18\x1e\xe4\x31\x52\x8c\xf3\x27\xec\xc0\xef\x65\x6f\xfc\x11\xf2\xa3\x36\xef\x81\x83\x8b\x2d\xe6\xf9\xf7\xdb\xd0\x91\x61\xbb\x71\xf0\x64\xc7\xef\xa3\x4c\xfb\xbd\xf3\xc2\xfc\x2a\x66\x71\x1f\x21\xeb\x2e\x09\x44\xbd\x80\x2d\x14\x00\x1d\xfc\xe5\x68\x75\x79\x78\xc8\xc1\x2f\x9b\xdd\x3e\x68\x72\x98\x98\x14\x3c\xef\x45\xf0\xad\x23\xfc\xa8\xfc\x54\xfb\xe9\xca\x94\xdf\x0a\x05\xcb\xba\x7c\x77\x33\x43\x62\x33\xf9\xd9\x64\x32\x8f\x49\x09\xe1\xe9\x2b\xdc\x9a\x3d\x1e\xb8\x4b\x4c\xb0\xc2\x01\xec\x86\xd4\x53\x1a\x39\x81\x76\xc6\x4c\x51\x49\xaa\x04\xa0\x1b\x24\xf5\x1a\x3f\xe4\x94\x27\xbc\x2c\x51\xf1\x8e\x32\x27\x46\x51\x8c\x48\xe9\x63\xe8\xd6\xc0\x6c\x25\x1a\xdf\x51\x2a\x47\x54\xe2\x27\x65\x86\x58\x5b\x12\x2b\x1b\x83\xab\x45\x11\xe8\x18\x98\x29\x9d\x5f\x41\x55\xa6\xcc\x3c\x00\x2c\xff\x7a\xc5\x97\xa5\x9a\x69\x2e\x31\x29\xf8\xf7\x73\x56\x53\xb3\x1a\xcf\x12\x99\x03\x36\xf9\x6a\x71\x8a\x75\x2d\xaf\x5d\x6e\x5a\x39\x0f\xff\x22\x53\x24\x6f\xa2\x56\x57\x9a\x8a\x32\xe7\xf7\x2e\xc2\x6f\xb6\x87\xb2\x8a\xe0\x52\x12\x99\x2e\x69\xda\x8f\x8c\x97\xdf\x6b\xf9\x36\x77\xb4\x95\xa6\x7b\x4d\x8e\x85\x25\xfb\xb2\x0d\x09\x54\xa6\x80\x66\x3f\x64\x0b\xed\xe6\xa8\x8f\x79\x32\xc1\x89\x2e\xb6\x36\xc5\xd0\xda\x8f\xdf\x74\xee\xaa\xb5\xc2\xa4\xcb\x6b\xd2\x6e\xcb\xff\xe6\x28\x93\x5d\xb9\xb5\x7d\x4f\x8c\xaa\xa7\xe4\x77\xb8\xf1\x65\xff\x0a\xe9\xe2\x5f\x1c\xbb\x20\xf3\x8d\x86\x78\x7f\x71\xda\xae\x08\x7c\x31\xb6\x5d\x74\x7f\xc8\x6f\x70\xc4\xbb\x38\xe6\x05\x87\xb2\xc3\x95\xba\x44\x17\x97\xa4\x99\x3c\xad\x3a\xac\xc7\x68\x7f\xae\x3e\xb2\x95\xf5\xfb\xab\xcd\xe4\xee\xca\x1d\x14\xf6\x84\xad\xa6\x8c\x12\xa8\x74\x7f\x12\xcf\xef\x73\xf9\x5f\xdb\x2b\x9b\xde\xb3\x27\x3c\xc9\x0b\x35\xca\x32\xfe\xf3\xad\x96\x2f\x42\x32\x05\x84\x9a\xa8\xca\xdf\x73\xf9\x81\xc2\x07\x45\x9a\x60\x1e\x4d\x02\x81\xa0\xfc\x33\xa2\x1e\x16\x6e\x88\x36\xdc\x19\x9a\xd1\xc6\x4a\x2b\x35\xad\xd5\xef\x33\x6b\x7a\xdc\x1d\x58\x57\x7e\x8e\x50\x25\x9a\xcd\x2d\xe4\xe4\x98\x97\x08\xbb\xe9\x7a\x22\x8a\xd9\x69\xa6\x45\x82\x19\xa6\xaf\xb8\x5e\x44\xba\xa6\x21\xa0\xdc\x34\xc9\xee\x89\xd2\xc7\x8b\x9e\x1d\x40\xa1\xf6\x73\x26\xfd\x27\xdd\x47\x3c\x3d\xcd\x3c\x5f\x76\x19\xfd\xc4\xc0\x21\xa0\x1f\xcf\xe2\x5b\xa8\x9e\xab\xaf\x1a\x3f\x44\xa6\x2f\x48\x8c\x2b\x86\x12\x69\x62\x47\x6a\x97\x01\x80\xa0\xe3\x64\x0f\xce\x91\x84\xca\x74\x42\x4b\xad\x14\x8b\xef\xa4\x0a\x6a\xd8\x33\x3c\x22\x17\x13\x24\xac\xb7\x56\xd5\x1f\x47\xe4\x0d\x2e\xf5\x6a\x8e\x24\x1c\xaf\x17\x62\xbc\x98\xa8\x8b\x51\x6f\x8a\xa0\x29\x1b\x56\xf9\xf1\x25\x18\x15\xb4\x9a\x20\xba\xe3\xbf\x7b\x41\x3e\x97\x76\x75\x23\xfd\xef\x17\x7b\x4a\xcc\x9c\x03\xfd\x8e\x2f\x01\x40\xb0\x56\x93\xe4\x7c\xd9\xa3\x86\xda\x0c\x2a\x8d\xb0\xc5\xa1\xd1\x7e\xbf\xa8\x96\x06\xb7\xee\xc2\x97\xf2\xe3\xeb\x8b\x2e\x8c\x51\xc5\x37\xbe\x2e\x4c\xb4\x5e\x12\xf9\xc3\x98\x14\x41\x12\xd9\x9c\xea\x28\xa3\xa4\xa2\x39\x10\x47\xda\xca\xd3\x7b\x22\xe9\x86\x41\xe5\x8e\xe8\xf5\x2a\xbe\x33\x9c\x1b\x58\x0e\x0d\xb1\x0d\x47\x26\xcd\x72\x29\x83\x5b\x9a\x2b\xa0\x79\xf5\x6e\x06\x4d\xd7\xd4\x66\xca\x8c\x67\x18\x63\xb9\x2f\x0b\x74\xf2\x6f\x13\x4b\x6e\x44\xca\xcd\xe7\xea\xe4\x8b\xd1\x51\xe7\xf3\xc7\x8c\x46\x6a\x8e\xa7\x08\x40\x38\x4b\xa9\xef\xb3\x16\x6d\xa6\x37\x6d\x53\xeb\x44\x72\x95\xf5\x15\xff\x7d\x3b\xf9\x86\xa1\x4c\xd3\xd2\x54\x79\xab\x49\x31\x5f\x2f\x89\x65\xec\x96\x65\x73\x10\x9e\x61\xc7\x81\xb9\x2f\x48\xfa\x71\xfd\x36\x42\x25\xee\xae\x82\x65\x21\x8f\xf6\x2c\xd8\xcc\x10\xb3\xe2\xe4\x8b\xa9\xc4\x3f\xe0\x37\x6d\x15\x3d\xe9\x92\xcc\x2f\x78\x49\xd1\x82\xbe\xb9\x7d\xb6\x22\xf4\x29\x62\xbf\xb6\xf1\xc5\x79\x8a\x7f\xba\xce\x18\x31\x07\xd7\xcd\x48\x79\xd9\x8c\xce\x76\x5d\x1b\x72\xe8\xd0\x25\xb8\x45\xd2\x9b\x84\x9b\x70\xab\xd8\xf5\x8f\xeb\x2a\x8d\xad\x39\x96\x2d\xac\x33\xb8\xbd\x01\x33\xbc\x10\x26\x0e\x7a\x40\x71\x88\x3c\xbd\xee\xd3\x3d\xd5\x02\xbe\x6b\x0a\xbb\x87\xcc\xcb\xc4\xee\xba\x29\xb2\x26\xae\xdc\x01\xa2\xc5\xad\xc4\x21\x98\x8a\xf4\x4d\xee\x6e\xa0\x95\xa9\x35\x57\x00\xe5\x8e\x96\xe7\x46\xfa\x9a\x9c\x62\x58\x89\xad\x48\xa6\x8e\x05\x8c\xdc\x8d\x16\x83\x91\xa4\x5b\x02\x9c\x63\xcb\x5e\x1d\x54\x9a\xdd\xb8\x27\xdd\xaf\x35\xc2\x72\x37\x46\x8a\x83\xd8\x68\x0f\xe0\x79\xc8\x20\x6d\x74\x57\x76\x4f\xd3\x7d\x7b\x72\xd5\x81\x7b\x39\xca\x42\x43\x25\x16\xdb\xe0\x21\x2c\xd9\x4a\x3d\xc3\xf8\x51\x4e\x85\x5d\x75\xd0\xb3\x18\x81\x61\x8e\xee\x6b\x1f\x30\x5d\xfe\x3c\xd2\x28\x49\x1f\x19\xde\xe8\xcf\xea\xb3\x33\x80\xce\x3e\xf6\xfc\xb1\x4f\x26\x77\xb8\x35\x5e\xe1\x92\x2d\xfd\xde\x35\x8e\xe7\x4e\x1a\xf0\x3a\x3b\xef\xbf\x5f\xab\x9c\x0b\x57\x7d\xd4\xb4\x11\x97\xd1\x6c\x86\x50\x0d\x1f\x6b\x70\x86\xfb\xfd\xcb\x85\xa8\xc9\xd7\x5d\xfe\xb5\xf1\x27\x37\xa6\x1f\xf1\x00\x6e\x57\x9c\x6e\x01\x2e\x01\x27\x62\xed\x11\x99\xfe\xa9\x8a\xb4\x8a\xbc\x88\xa8\xa9\x4d\xaf\x33\xa6\xa3\xc5\x03\x74\xb4\xad\x4f\x2a\x5f\x35\x9d\x53\x69\xc7\xaa\x2b\x24\x93\xec\xa2\x3b\xc3\xd6\x8f\xb6\x6d\x91\xc4\x33\x70\x18\xac\x4e\x61\x39\x04\x6c\x10\xda\x02\xa4\xd9\x72\x07\xd7\xde\xdd\xd5\xad\x10\xc4\x2b\x97\xec\x7c\x2d\xce\x66\xb5\xf4\x9c\xf0\xb1\xd9\xf8\x36\x15\xca\x6a\xdb\xc2\x73\x57\x07\x40\x89\x68\x42\x5c\x33\x98\xed\x7e\x8e\x8b\xe8\x62\x8f\x5c\x1d\xad\xbb\x29\x9d\xcc\x95\x26\x18\xf4\x60\xa2\xb7\xe7\x59\xe6\x29\x03\xf9\x53\xa4\x20\x32\x8b\x9d\x2d\x56\x4d\x1f\xde\x28\xe7\xab\xc9\x49\xa8\x60\xf6\x55\xee\xdd\x98\x83\x2a\x19\x80\x8e\xee\x9e\xec\xbf\xaa\xc6\x3d\xe4\xc4\xa8\x74\x77\x7f\xd7\x56\xaf\x2e\x9d\x19\xef\x04\x27\xbd\x5b\xce\x7a\x93\xed\x0f\xb1\x60\xba\xe5\x98\xdc\xa0\xc6\x5b\xdf\xdd\x2f\x70\xea\x6c\x9c\x21\xee\xf2\x3e\xbb\xca\x21\x6d\xcc\xd7\x30\xd9\xf1\x19\x28\x9b\x08\xfd\xae\x77\x4f\x7c\x98\x5a\x63\xeb\xf6\x54\xe1\x66\x5a\xcc\xcc\x74\xdf\x6e\x8d\x29\xef\xca\x84\x9a\xd1\x5b\xbb\x08\x81\x31\x23\xd1\xf9\x59\x2c\xe8\x8f\x1f\x21\xc0\xd7\x1f\x92\xaf\x36\x43\xe4\x9d\xb4\x49\x2d\x3c\x32\xea\x18\xd1\xbf\x6c\x67\x57\x99\xd0\xf2\x93\x62\xb2\x5f\x94\xc9\x3a\xbf\x19\xd0\xbd\x4f\xb0\xad\x33\x8c\xd6\xac\xad\x0d\xde\x53\xbb\x7b\xbc\x28\x17\x63\x14\xf4\x0d\x4e\x64\x46\x53\xcd\x89\xd7\xb3\x75\x30\xe3\xa2\x42\x4c\x74\xf6\x41\xa0\x58\xa8\xa5\x3c\xf4\xfb\xa4\xd9\x29\x3e\x3b\x44\xaa\x2d\xfd\x28\x58\x95\xe4\x3c\x5f\x3a\x69\x1e\xf6\xb9\x8b\xc1\x95\xe9\x60\x59\xc9\xe4\x38\x07\x8a\x87\x4e\x2f\xd5\x09\x19\xc6\xe6\x60\x11\xd0\xdf\x3d\x50\x2c\xe5\xa8\x36\xb0\xf7\x0a\x48\xe8\x63\x08\xd8\x0a\xb1\x82\x89\x9c\x20\x59\x8a\x47\x43\xef\x64\xeb\xfd\xc9\xbe\x6c\xbb\x7e\xbb\x23\xe4\xeb\x0b\x7a\x01\xd1\xfa\xb7\x7c\x58\x88\x59\x68\x77\x54\xe0\xe0\x93\x98\x51\x49\x09\x1a\x72\xc0\xb8\xf7\x60\xd4\x8e\x74\xe8\xb7\x15\xdf\x19\xe5\x2f\x58\x0a\x55\xd0\xab\x0e\x2e\x9b\x0a\xa7\xd8\x89\x66\x90\x4c\xc4\xe6\x8e\x2d\x3a\xd7\x61\xde\xdf\x06\x89\x36\xd1\xa4\xf8\x93\x37\x48\x0b\x13\x9e\x6d\x5e\x18\x06\x67\x06\x2a\x3f\xfa\x90\x1d\x00\xb7\xfd\xd8\x2a\x12\xf9\x1a\x79\x28\xe3\x92\xe4\xa4\x59\x96\x7d\xf5\xef\x28\xe8\xc8\xe2\x33\xba\x04\xb1\x05\x6a\x5e\x7c\xea\x7d\x81\x13\xf5\xdb\xc5\x7c\xf5\xe7\x61\x4f\xc7\xfb\x51\x61\x54\x70\xd0\x1e\x62\x8d\x8f\x17\x0c\x42\x91\x34\x04\xef\x53\xc5\xe3\x76\xc7\x22\x8c\xe8\xaf\x8d\x33\xc5\x25\x29\xe6\xb8\xe2\x0a\x97\x8d\x2c\xa4\x89\xfd\xba\x63\x54\xfd\x58\x0f\x2f\x77\x55\xf3\xf1\xbe\x76\x38\xbe\x33\x83\xc9\x7c\x2d\x05\x7d\xde\x73\x39\x37\xa4\xf9\x61\x4d\xe8\x0f\xb8\x25\x9c\xe7\x9b\x24\xc3\x9d\x45\xdb\x38\xe2\xf9\x7d\xf4\xfb\x2a\xcf\xb4\x7c\x2a\x90\xdb\x8b\x5e\x3b\xde\x26\x7f\xec\x60\x53\x87\x6f\x1d\x5c\xe0\xe5\x34\x89\xae\x10\xe2\x48\x7b\x73\x6c\x2f\x7b\xc4\x48\x91\xaf\xff\x02\x74\x65\x14\x18\xbd\xee\x4d\xfe\xe3\x3b\x01\x2e\x04\x34\x5d\xb6\x03\xb3\xde\x2d\xbd\x94\xc4\x1d\x95\xc7\x44\xcd\xde\x9a\x7a\xdb\xf3\xb5\x27\xd1\x52\x03\x3f\xd8\xb6\x12\x8a\xcf\x37\x03\x42\x9d\x0e\x08\xaf\x8d\x3e\xbc\xaf\x29\xef\xb6\x7d\x2c\xb5\xfd\x44\x1b\xfc\xb5\x08\xd5\x29\xa2\x90\xdc\xa6\x61\x4b\x83\x41\xfd\x36\x8f\x87\x6a\x95\x60\x73\x6b\x47\x18\x96\x00\x55\x79\x76\x60\xb3\xae\xf4\x67\x88\xe8\x76\xb6\xfd\xea\x9a\x54\x7f\xad\xd7\x33\xce\x92\x38\xdd\xfe\x0b\xd3\x7d\xfb\xf2\xf5\xc4\x4d\x45\xd9\xe3\x1b\xd3\x6f\x5a\x4f\x0a\xdb\x0e\x81\xf0\x65\xcf\x73\x6d\xcb\x9a\xff\x44\xed\x1f\xbb\xd7\x97\xcc\xa5\xc9\xbd\x65\xe2\x39\xc8\x49\x80\xec\x2b\x42\x12\xc9\x19\x4a\xbf\x2f\xa1\x3b\xcd\xa6\xf2\x57\x38\x5e\xf4\xc4\xf1\xb1\x3c\x9a\xc1\x8d\x1a\x02\x4b\x72\x89\xc3\x39\x4a\x3b\xbd\x9c\xf0\x51\x3b\x0b\xa1\x0a\x7a\x09\x55\x99\x37\x96\x83\x42\xf9\x8b\x12\xdc\xfb\x45\x7e\xad\xcc\x12\xfe\xdd\x69\xf5\x02\x95\x21\x5c\x47\x8b\x61\x85\x89\x23\x81\xc3\xde\xf8\x96\x42\x5d\x19\xa6\x3c\x44\x77\xaa\x1d\xf9\xd0\xc5\x59\x4d\xb2\xe3\x7d\xfd\xdd\x0c\xdf\xbb\xd0\x6d\x9b\x23\xf0\x63\xf5\x97\x47\xb1\x8f\xcf\x92\x00\x4b\xe3\xf2\x83\x8a\xd7\x04\xdd\xd5\xc8\xfc\xc1\x71\x23\x50\x0b\xbd\xef\xea\x0a\xbb\xfc\x79\x88\x63\x1c\x74\x4e\xc5\x55\x01\x0f\x61\xea\x78\x62\x2b\xca\x0b\xeb\x4b\xb4\x7c\xbf\x0f\xb0\x6c\x93\x1e\xed\x62\xaf\x95\xc0\xc1\xd5\x5f\x69\x60\x89\x04\xed\x7f\xed\x60\x7c\x89\xfa\xa6\x4d\xbe\x26\xea\xc8\x41\x8f\x53\x79\xb6\x35\xd6\x00\xfd\x11\xb9\xc4\xec\xa4\x15\x87\xcc\xea\x13\x52\x90\xb6\xa3\x74\x7f\xa2\x1d\x39\x16\x38\x74\x35\x1a\xad\x8c\xea\xb4\xfd\x84\xae\xc8\xc2\x6f\xc4\x7f\x02\x90\xfa\xf8\xb5\x38\xb2\x9b\x5b\xb7\xd6\xa6\x28\x73\x0a\x35\x27\xc9\x30\x54\xfe\x92\x61\x9e\x00\x41\xd3\x7f\x0f\x77\x49\x24\x85\x99\xd2\xc7\x28\x66\xa3\xca\x43\x12\x44\x9d\x0d\x47\x8c\x38\xd2\x2d\x37\xc4\x1f\xb3\xc6\x78\x1e\x78\x52\xf0\xc6\xa8\x4d\x6b\x43\xce\xa0\xfb\x98\x5e\x0c\x11\xdd\x5a\xf8\xd5\x03\xdc\xe2\x4b\x89\x29\x79\x02\xe0\x91\xf2\x25\x8a\xb0\xc7\x77\xdc\xea\xe8\x21\x33\x47\xf3\x37\x8e\x08\xc5\x5b\x97\x4e\x64\x79\xf1\x97\x47\xf9\x2b\x13\xa1\x72\xa4\xf0\x37\x50\x65\xe9\xbd\x99\x43\x7e\x8e\x88\xe3\x64\xc7\xeb\x61\x5f\xc4\xef\x8a\xd4\x60\x83\xad\x02\xe1\xcb\x5b\xb4\x01\x81\x72\xa9\x37\xab\x6a\x7c\xe4\xa1\x8a\xde\xd7\xf2\x57\x51\x7b\x70\x21\x50\x9a\xcd\x5b\x7f\x73\x0b\xb4\xab\x1b\x3b\x67\x99\xa2\x11\x9b\x7a\x62\xb7\xf3\x97\xc1\xeb\xa7\x1b\x5c\xe6\x18\x7d\x53\x98\xb8\xd0\x7e\x9c\x41\xa3\xee\x40\x91\x49\xf0\xde\x2e\x44\xab\xb0\x63\xe1\xdd\x85\xc1\xf5\xc4\xcf\x31\xbf\x23\x67\xbc\x57\x13\x95\xb7\x76\x15\x2a\x6c\xc7\x6d\x9d\xca\x55\xeb\xf0\xce\x73\x1e\x20\x87\x9a\x39\x44\x2b\xf2\x3d\x5d\x18\xe4\x86\xda\x32\x8e\x9f\x64\x41\x6c\x68\x4a\xbd\xfb\x15\xe9\x14\x0d\x79\xb3\xe2\x0c\xce\x1b\xe7\x2f\x4e\x5b\x5b\xa8\xa2\xd2\x33\x44\x18\x85\xff\xe5\xbe\xe1\x0c\xd7\xf2\x74\x1e\x22\x87\x5d\x66\x4a\x11\x94\xc1\x87\xbf\x57\x79\x84\xdc\xc0\x3c\xc5\xc0\xc8\x2d\x4c\x77\x14\xfa\x84\xd4\x95\xa1\xda\xb8\x93\xd0\x07\xec\x0c\x28\x91\x4b\xad\x5f\x42\x88\x7a\x61\x73\x7f\x96\xf4\xd6\x92\x21\x8f\xef\x0f\x65\xd5\xe7\xcf\xe3\x17\xa2\x38\xa2\x85\x65\x35\xf5\x18\xd3\xdd\xe9\x66\xf3\x7b\xda\x1e\xff\xc1\x4d\xa6\xbe\x08\xb2\x08\x4f\x75\xca\x34\x8d\x1d\x4c\x85\xa2\x98\x1d\x4b\x7f\xf9\xce\x6d\x34\xf1\x99\x91\xbc\x0c\xea\x0b\x4f\xd9\xa4\xc6\x55\xe3\x59\x76\x71\x92\x1e\xfb\x54\x56\x42\x72\x04\xf6\x5b\xeb\x22\x03\xdd\x90\x18\x29\x56\xe1\x28\xdd\xac\x38\x3a\xd9\x9f\xd7\xcf\x8f\x55\x3d\xf9\xb9\xf1\xf6\xe3\xe3\x14\xf9\x72\x5f\xbe\x77\xd9\x18\x51\xf8\x12\x61\xe7\xdf\xa0\x80\x51\x69\xea\x03\x53\x2f\x79\x93\xb5\x8d\x1e\x4d\xe3\xd6\x92\xa2\xb1\x45\x55\xd2\x4a\xc7\xd9\x17\x45\x50\x28\xd3\xd9\xc2\x6b\x20\x5f\x0c\x0a\x1d\xbb\xe1\xd8\x3f\x62\x6b\x6d\xa3\x25\x84\x34\x1d\x39\x09\x66\xb3\x63\x23\x2a\xfa\x17\xe9\xe2\xf8\x60\x27\xf6\xa7\x1d\x4d\x56\x39\x02\x21\x99\x72\x45\x20\x8a\xef\xf4\x3c\x5e\x9a\x65\x99\x27\x90\x43\x24\x5b\x96\x5b\xbf\x21\x09\x65\x0e\x17\x5a\x40\x6a\xe7\x04\xa2\xd8\x52\x01\x92\xc0\x7b\x52\xe2\x91\x26\xa6\xf3\x4d\x93\xf1\x8f\x30\x88\xbf\xf1\x2f\x7a\x70\x6c\x69\xf2\x00\xc0\x1d\x04\x75\x86\xed\x3b\x93\xba\x81\xed\xc1\x2a\x6f\x91\x11\xac\x1d\x33\xc3\xd4\x26\x9d\x92\x4b\xf2\x2e\x99\x02\x06\x9c\xe7\xb2\x07\xbb\xac\x0b\x52\x0e\xd9\x22\x46\xed\x98\x0a\x40\x65\x7b\xec\x87\xc6\x2c\xdd\x00\x97\xc0\x47\x9a\xf6\x28\xad\xe4\x7c\xc8\x71\x36\xc5\x15\x27\x0d\x47\x16\x20\xb5\x68\x66\xe3\xcd\x98\xf6\x8e\xa8\x04\xdd\x4f\x79\xdb\x13\xa3\x8d\xfe\xfe\x27\x91\x74\xa7\x93\xa9\x7b\xd7\x22\xc2\x90\xaa\xbe\xa2\xf0\x5b\x27\x0a\x74\xc7\x92\x5b\x35\x5f\xaa\x7d\x8e\x3f\x7f\x86\xe8\x6e\x63\x05\x11\x7a\x72\x6d\x74\x69\x2e\x5d\xde\x4f\xd4\x1a\xde\xea\xbf\x15\xf1\xc0\x45\xbd\x6b\xd1\xe7\x3a\x79\x6c\x7c\x89\x77\xab\xc6\xf6\x88\xdc\x78\x68\xe8\x98\x73\x12\xf7\x08\x44\x77\x6e\xe3\x1d\x45\xd5\xad\x89\xcf\x50\x71\x86\x54\x1f\x2e\x8f\x7c\xa2\x87\x3e\x9a\xd4\xc5\x14\x34\x14\x3f\xfb\x9e\xd6\x2a\xca\xd3\x63\xd0\xd3\x75\x6b\x64\xae\xfa\x85\xe1\x93\x44\x72\x94\xd4\xc6\x76\x3d\x51\xb2\x09\xd8\x34\x46\xd5\x86\xd6\x8f\x69\x95\x59\xbf\x8f\x83\x49\x06\x60\x8d\x0f\x4c\x56\xc1\xec\xaf\x09\x1a\x65\x85\xc9\x46\x78\x2a\x53\x42\x01\x4d\xe8\xd0\x10\xa6\x3b\x39\x86\xf8\x66\xaf\x56\x1e\x82\x11\x22\x3b\x75\x93\x64\xce\x38\x40\x91\xb5\xc5\x8c\xd1\x78\x68\x4e\x5f\x73\x93\xd2\x8d\x69\x28\xd6\xd0\xd3\x10\x95\xeb\x93\x54\xcd\x71\xfa\x97\x29\xf2\x64\x67\xd3\x44\x4c\x56\x35\x2b\x7c\x2c\xb6\xa0\xbf\xad\x36\x0f\x7c\x3d\xf3\x0c\xe4\x33\xb6\x97\xff\xf6\xf3\x3e\xaa\x48\x57\xd4\xef\xdb\x7e\x66\x69\xf0\xfa\x38\xd8\x03\x35\xd6\xca\x0f\x4c\x9e\xd0\xe7\xcd\x8f\x77\x58\xfd\xbd\xaa\x1f\xd9\xf9\xfe\x90\x9d\x4a\x61\x91\x59\x4f\x84\xc2\x99\xc0\x61\xf3\x3c\xcc\x61\xf6\x73\x95\x5c\x32\x25\xdf\xba\x1b\x20\xa5\x73\x51\xb1\xf9\x1e\xd8\x25\x43\x65\x5d\x31\x71\xf5\x1a\x8f\xe0\x59\x85\x63\xde\x85\xa3\xe0\xc4\xec\x96\xe5\xac\x6b\xe1\x74\x6d\x3f\xf3\x36\xf4\xef\x6e\x68\x22\x01\xd3\x64\x1a\xf4\x41\x15\xbb\x47\xf5\x45\x4c\x3a\xae\x69\x3d\xde\x50\x7c\x5f\x67\xcd\xa5\x30\x84\x4a\x8a\xa1\xa4\xa1\x44\x3d\x80\xf6\x6b\xbd\x48\xe5\x3e\x6f\x76\x0c\x06\x75\x4c\xd1\x60\x65\xa6\x41\x9f\xcc\xc7\xf1\xfa\xf9\x4d\x17\xe4\x84\xa1\x5c\x99\x7e\x02\x4e\xa4\xfa\x80\x40\x4c\x31\x05\x85\x47\x4c\x22\x03\x41\x3a\x7d\x01\xe9\xd5\x1e\x8f\x16\x93\xd8\x35\x12\xa1\xb1\x35\x53\x74\xc7\x6c\xfd\x76\xef\x32\xd6\x3d\xb6\xde\xc4\xa3\xff\x2e\x38\x30\x7f\xe0\x57\x9c\x5f\x27\x71\x49\xf7\xdb\xdc\x52\xef\xca\x10\x96\xee\x24\x00\x19\x25\x77\xcd\xe4\x73\x78\x34\xf7\x0f\xd6\xd1\xe3\xd9\x18\xd4\x2a\xf9\xcf\x8b\x88\x6d\x66\x9d\x81\x66\xdc\xbb\x6a\xd1\x5f\xd1\x15\xae\x5d\xe2\x56\xe7\x53\xb3\xe5\x48\xfc\xf1\xe7\xe7\x76\x37\x6c\xd1\x28\xb9\x20\x44\x2c\x11\x45\xbc\xf3\x5b\x2e\x1c\x5d\x54\xbd\x6f\x9c\xcc\xcd\x91\x76\xc6\x17\x55\xee\xbe\x21\x4a\x73\x0c\x49\xfd\xf7\x92\xbe\x0e\xb9\xb1\x73\x04\xe0\x25\x10\x65\x9c\x9a\x30\x7c\x19\x10\x70\x65\x1a\xca\x6f\xb6\x10\xd9\x39\xbd\x3f\x7d\x0a\xae\xee\xcb\x47\xf9\x75\x2d\x4a\x33\x15\x63\x63\x5a\xc1\xc7\xb1\x75\x46\x75\xab\x62\x1b\xff\x8a\x1b\x79\x68\x94\x7f\x10\x34\x62\x46\x5d\xd0\xd3\x4d\x6b\x64\x36\xb9\x90\x42\xd6\xee\xdc\x1b\x79\xd7\x20\x3c\x70\xd3\x08\x1b\x7f\xb3\xd9\x02\x31\x20\xf4\x72\x31\x66\x06\x85\x08\x6b\x4a\x7a\x89\x60\x45\x56\x44\x08\x5d\x86\x2e\xb3\x49\x69\x96\x23\x4c\x99\x03\x9e\x5d\xb9\x60\x6f\x70\xf5\x15\xc0\x50\x98\xda\x97\x63\x5e\xa2\x1e\x51\x30\x1f\xc9\x43\x7a\x0f\xfa\xdb\xf7\xa3\xe0\xe7\x11\xf9\x9b\x15\xd9\xdf\xd8\xba\x53\x17\x87\x6f\x1d\x8b\xd5\x64\x8a\xae\x5d\x9b\x47\x3e\x37\xfc\x94\xbd\x19\x13\x95\x32\x4c\x96\xf4\x99\xde\xff\xd1\x74\x16\x4b\xd6\x32\xcd\x16\xbe\x20\x06\xb8\x0d\xb1\x8d\xbb\x33\xc3\xdd\x9d\xab\x3f\xd1\xef\x77\xfe\x01\x11\x1d\x1d\x7b\x40\x15\x59\x2b\xd7\x93\x50\x59\x40\x46\x84\x93\x7e\x0d\xc5\xa4\x40\xd3\x1f\x27\x2d\x23\xb6\xa3\xd6\xf4\xc1\x46\x10\x54\xe8\xf8\xc3\xeb\x1e\x89\xd1\x7a\x56\xd9\x7e\xcf\xd0\x34\xde\xac\x84\xf8\x92\xbd\x57\x4a\x38\x53\xf9\x99\x30\xbb\x1d\xcd\x40\xb0\x7b\xfa\x2f\xda\x50\xe1\x87\x81\x3d\xd6\x7f\xbb\x18\xff\x18\x86\xab\xb1\xcc\x6a\x6e\xf7\x7c\xb1\x25\x0d\x86\xf0\x3b\xc9\x1f\x36\xd4\x8e\x07\x45\x1a\xfc\xe6\xe6\x04\x11\x45\x63\xac\x92\x56\x5c\x68\x86\x43\x54\xa2\x61\xb4\x12\xfa\x83\x91\x9e\x11\x0d\x2a\xbb\x63\xa4\x60\xae\x67\xe9\x03\x17\x53\xe0\x6f\xc1\xa8\x79\xe9\x38\x34\x89\xe8\xf9\x27\x44\x5f\x88\x4a\x33\xee\x8f\xbd\xad\x2f\x47\x69\x2d\x79\x7e\xa6\x37\x45\xe4\x73\x33\x41\xf2\x35\x6a\x3e\x45\x2d\x63\x3b\x0c\xae\xb0\x0f\x22\x2f\xe9\xbb\x5f\x12\xff\xc8\xdd\x08\xf7\xeb\xc0\x12\x56\xef\x95\x17\x66\x80\x52\x24\x7d\x0b\x4a\xd2\xe7\x81\x12\xa5\xf2\xe3\x05\xe5\xfe\xf6\xb4\x14\xa2\x52\x63\x1b\xa8\x98\x5c\x34\x3b\x86\x25\x09\x09\x2b\x03\xa1\xe9\x0f\xfd\x4c\xc9\xeb\xa9\x9c\xca\x74\x54\xb9\x73\x2a\x84\x06\xed\x2b\x50\x8a\x4a\x8d\xea\x85\x8b\x76\x8e\x3b\x3e\xfe\x99\x0b\x7a\xe0\x25\x04\xa4\x7a\x33\x22\x5d\xea\x0a\xba\x27\x3c\xa3\x97\x74\xc0\x66\x27\x10\x71\x6e\xeb\xbd\xab\xdc\xba\x6e\x2b\x39\xe4\xff\x5e\xc0\xaf\xeb\x4a\xec\xd4\x07\xae\x0e\x26\xe2\x3c\x82\xef\x88\x63\x63\xd5\xaf\xf0\x83\x51\x1f\xca\x23\x8c\x2d\x85\xf2\xd3\xee\xef\x67\x31\x5d\x9e\xc1\xa5\x15\x14\x5e\xec\x9c\x1c\x71\x3a\x29\x31\xfd\xd7\x93\xc5\xaa\xe8\x5c\xa3\x20\xee\x58\x04\x81\x13\x60\x56\x1f\x98\xab\xfa\xee\xa8\xc6\x9a\x9d\x6c\x54\x06\xef\xc3\x05\x00\x66\x2d\x35\x3d\x13\xc0\x87\x17\x08\x8e\x59\xb0\x3f\xa5\x5b\xfc\x40\xeb\xe6\x63\xfd\xec\xf4\x4e\x7e\xb3\x03\xa1\xda\x3c\x46\xba\xd3\x92\xba\xdd\x04\xb4\x3f\xcf\x93\x5a\x62\x27\x24\x9d\xe0\xdb\x26\x89\xb2\xaa\x0b\xcd\x0c\x9b\xc1\xd4\xc5\x87\xf0\x61\xf0\x31\x7b\x69\x19\x6c\x80\x60\x8e\xfa\x14\xb5\x5e\x3d\x6e\x3a\x3b\x9a\x29\xbf\x4d\x39\xe6\x6c\xfc\x3d\xa6\x59\xcb\xca\x9e\x67\xdd\x3c\x2e\x44\x29\xd6\x32\x99\x1e\x4f\x72\x2c\xa6\x53\xa0\xe4\xb1\x05\x2b\x50\x23\xd4\x02\xce\xc4\xb9\x9d\xa8\x66\x90\xc4\xe5\x2c\x83\x40\x98\x30\x54\xb0\x76\xfa\x4b\x76\x24\x3e\xc2\xcd\xbe\xf5\xcf\xf0\x02\xb8\x02\x3f\x54\xc1\x88\xb2\x92\x1a\x5f\x6b\xde\xd3\xdf\x9e\x9f\x07\x3c\x23\x73\xff\xad\x67\x74\x85\x49\x03\xfb\xe0\x65\xe5\xfa\x45\x73\x3a\x18\xc2\xf7\x43\x90\x4b\xec\x04\x4b\xcd\xad\x67\x86\xb5\x7b\x10\xf1\x4c\xe0\xf2\x56\xd4\x39\x91\x9f\xff\xf8\xac\x96\xc4\xa9\xfd\x91\x34\x80\x05\x11\x52\x06\x1a\x8c\x20\x21\xe9\xcc\xb5\x47\x82\x92\x50\x48\x0f\x01\x91\x47\x57\xb4\x61\x97\x73\x34\xd7\xc5\x92\x75\x97\xf2\x57\xcf\xa2\x0c\x1f\xac\x18\x0e\x4b\x26\x3c\x7a\xc6\xa5\x36\x16\x1b\x41\x50\x44\x0e\x42\x39\x8e\x81\xcd\x1a\x77\x0b\x81\x68\x7e\x88\xab\x53\x56\x21\x05\xcd\x6a\x4e\x0b\x5d\x4e\x19\x0c\x42\x8c\xc3\x39\xae\xe6\x50\x6f\xa0\x3e\xf4\x31\xcc\x30\x41\x16\x77\x6e\x4a\x85\xf4\x23\x10\x17\x68\xb7\x95\x00\x12\x1a\x00\xb4\x9e\x6c\xb7\x32\xc9\x2f\x34\x29\xfc\x40\x70\xce\x19\xa5\x58\xa5\x38\xd1\x18\x2b\x2d\x20\xab\x31\x8e\xb7\x94\xbe\x06\xad\x07\x4b\x84\x6a\x1a\x3f\x67\x7f\xda\x1c\xa4\x30\x8c\x02\xaa\xea\xa8\x37\x3e\x2f\x92\x30\x1a\xb6\xe2\x44\xd2\xf0\xd8\xe6\x27\xe8\x65\x2e\x26\xc4\x81\x33\x84\x45\xc7\x2f\x70\x62\x6a\x2c\x37\x19\xc7\xd1\x3d\x88\x06\xfb\xaa\xe6\x30\x60\x8d\x5c\x01\x36\x3a\xc7\xcb\xca\x3b\x28\xa2\x02\xa1\x59\x30\xf1\x52\xb6\x95\x0b\x4d\xfc\x76\xfa\x04\xf2\x3b\x37\x35\xdc\x0f\xc1\x5a\x60\x77\x59\x5e\xde\xbc\xb7\xa4\x3f\x0b\x88\x02\x01\x44\x14\xe6\x9f\x32\xdd\x30\xbe\x55\xd2\x43\x41\xbf\x5a\xe8\x7d\x53\xb5\x7f\x20\x58\x00\xa4\x8b\x57\xfb\xb8\x50\x9d\x25\xda\x0a\xb1\x62\xb4\xd0\x08\x79\xaf\xb8\xa1\xe2\xd1\xc0\xb3\x03\x16\x12\x79\xe7\x13\xa9\xc6\x93\xf3\x6d\xee\x05\x3b\x42\xa3\x0e\xfc\x2b\xa8\xf1\xdd\x37\xf5\x18\xbf\xb4\xf9\xc5\x1d\xf8\xa4\x6c\x0e\x64\x06\x9a\x35\x78\x4f\xac\xb1\x08\xa8\x9e\xf4\x4e\xd3\xf3\xd1\x20\x38\x3d\x35\xfa\x61\x4f\x1b\x53\xa9\xe5\xcc\x52\x5e\xab\xed\x4a\x94\x68\x76\xea\xe2\x18\x0c\xab\x1d\xa0\x2f\x05\x5a\x17\x4a\xee\x96\x2a\xb4\xc1\x9b\x7c\xb0\x58\x92\x27\xdf\xed\xbb\xdc\xc9\x2d\xd3\x5b\xdb\x40\xb7\x54\xee\x18\x3b\x0b\xbe\x17\x61\x76\x33\xc9\x23\x37\x4a\x79\x02\x2b\xc0\xa7\xa9\xa2\xd9\x61\x8c\x4f\xfa\xe4\xc0\xd0\xb7\xbf\x61\x85\xd3\x22\x52\xa0\xc6\xf0\x75\x6b\xba\x85\xae\x82\x86\x92\x7f\x4a\xc0\x9c\xbc\x5d\xd9\x68\xd4\x78\x6f\xbe\xc6\x59\x9c\xb5\xe2\x01\x81\x4c\xc9\xf9\x6c\x68\x00\x86\x29\x83\x68\xed\x00\xd8\x79\xe3\xee\xdc\x00\x4a\xab\xd4\x3c\x92\x4e\x69\x00\x48\x8f\xf0\xeb\x73\x7e\xd8\x8a\x94\xad\xf5\x67\x59\xe0\xf4\xce\x75\x89\xe3\x00\x2f\x78\xc9\xb2\x45\x01\x72\x40\x20\xf1\xea\x45\x0e\x3a\xb5\x59\x61\xec\xc8\xf4\x20\xd6\x9d\xa6\x07\x2d\x4e\x14\xc6\xcb\xa8\xc5\x4c\x3c\xc5\x4e\xad\x33\x26\x30\x16\x24\x51\xc9\xc2\x2d\x38\xac\xc9\xc3\x8c\x2a\x61\xce\xdd\x13\x1e\x7b\x15\x6c\x5f\x61\xd6\xfe\x9b\xb6\x18\x61\x6b\x48\x0f\x17\x8e\xe0\xeb\xae\xbc\x22\x17\x3a\x34\x3d\x9b\xd3\x57\x89\xb4\xe0\x66\xa5\xc5\x48\xa4\x20\x08\x60\x84\xc2\x0a\xa7\x4f\xc4\xd4\x52\x2f\x68\x5f\x11\x38\x4e\x0c\x91\x1e\x76\x63\x98\x9b\xd2\x96\xb9\xda\xbf\xe4\xe9\x83\xc9\xa1\x6d\xcb\x0d\x6f\x77\xa1\xd1\x03\x3e\xda\xcd\x85\xb6\x96\xea\xc2\x81\x94\x7b\xb9\x97\xcb\x61\xc3\x0b\x99\x74\x47\x32\x03\x2a\xb4\xea\xf1\xd3\xae\xc8\x4a\xa6\xa0\xb6\x5b\x63\x07\x08\xc8\xd4\xb7\x9f\x9f\x36\xaa\x4e\x68\x8c\x8c\x73\x8b\x62\xed\xc1\x7a\xfc\xa2\xd3\x73\x73\x50\xe9\x13\x10\x8b\x6e\xb1\xaa\x36\xad\x07\x32\xb7\x3e\x05\xb0\x92\xac\xa5\x26\xb6\xa1\x83\xe2\x34\xd8\x02\x4f\x90\x08\x31\xb2\x28\xaf\x08\xa6\x30\x0d\x4f\x9f\x52\xcd\x8c\xb1\x85\xab\xd3\x56\x90\x84\xcd\xde\x26\x10\x3b\x07\x36\xe6\x5e\xb5\x76\xd9\xb1\xd5\x8f\xf2\x64\x12\xf1\xcc\x0a\xff\x0c\x07\xbb\x99\x40\x3d\x8f\xcd\x83\x49\xbc\xbe\xc5\x90\x31\x69\x64\x83\xbf\xe5\x6b\xfd\x9f\x63\xfb\xc7\xf8\x65\xe9\x28\xb2\xf3\x8d\xa5\xad\x6f\x4a\x7c\x7b\x84\x5b\x68\xdf\xa2\xd8\xfe\x1a\xed\x77\xa1\x24\x71\x4c\x0e\xb2\x27\x17\xe7\xc8\xbc\x8b\xd4\x76\x2d\xd4\xff\xf4\xf4\xb7\x16\x27\xe8\xdc\x8f\x93\x59\x9c\x28\x4b\x0a\x1d\x30\x46\xe4\xc3\x1a\x63\x9e\x9d\x14\x1f\x1a\xc3\x06\xa2\xa5\x12\xcf\x36\x03\x60\x75\xa2\x59\x03\x44\xfe\xd7\x92\x0e\x94\xc3\x23\x98\xd7\xfa\xf0\xa8\x62\x0d\x21\x97\x1a\xa8\xf0\x4b\xb4\xe1\x81\xa1\x7b\x69\x49\x37\x55\x59\xb2\x0d\xc8\xb7\xc3\xaa\x5c\xbf\x6c\xc1\xfa\x95\x07\x51\x55\x88\xf2\x04\x11\xd2\x8f\x0e\x58\xed\xc8\x89\xba\x9c\xed\x3e\xb8\x54\xee\x62\xdf\x69\x9d\x1e\x49\x28\x75\x46\x32\xbf\x2d\x84\xf7\x43\xbd\x03\x8d\xb9\xbc\xf5\xc7\xfd\x4d\x50\x24\x9f\x94\x5b\xe7\x6d\x88\x65\x53\xb3\x9b\x3b\x89\x11\x7b\x45\x28\xad\x4e\xea\x1e\xb0\xd9\xa0\x04\x02\xfe\xdd\xc3\x89\xa6\xb1\x6e\x1a\x98\x5f\x5a\xd1\x65\x02\xa2\x3e\x02\x2d\x8b\xdf\xda\x69\x5b\x64\x03\x5a\x9f\x0f\xff\x62\xf8\xe0\xcc\xca\x9a\xe2\xb2\x08\x7d\xc8\x6e\x7e\x00\x63\x3d\xab\x26\x4b\xb2\x36\x10\x3b\xb3\x72\xb1\xcf\x15\x92\xeb\xd4\x7c\x96\xf6\xb4\xe3\x07\x82\x69\x39\xb6\xda\x78\x27\x4a\x3e\x9c\x57\xca\x7b\xa8\x79\xc9\x85\xc6\xca\xc2\x04\x2d\x60\xfc\x96\x05\x26\x88\x62\xb4\x1c\x6b\xfa\xe6\xa1\x3b\x73\xac\xf9\x2d\x31\x42\xb7\x82\xc4\x40\x1e\x81\xc8\xbf\xb6\x55\x61\x50\xea\x38\x8c\x2a\xad\x87\x37\x80\xf4\xf8\x55\x15\x3e\x26\x63\x29\x36\x3f\x87\x16\x9d\x3e\x2d\x65\x52\x7c\x4e\xd4\x95\xdb\xf9\xe6\x02\x51\x7f\x47\x5b\x97\x78\x42\x9d\x7f\x9e\x9d\x66\x49\x4d\x26\x62\x19\xa3\xca\x9b\x37\x37\x55\xc6\xf2\x47\x4f\xfa\xa6\xbe\x0b\x5f\x0b\x92\x30\x83\x5f\x06\xd3\x08\x59\x9c\x84\xc4\x7b\x61\xd5\xaf\x3f\xa1\x9c\xd5\x7d\xeb\xe5\xac\xc6\xcd\xa7\xcf\x6c\x23\x21\xdc\xd8\x6d\x33\x4e\xb8\x64\x51\x68\xe2\x8c\xe3\xba\x05\x29\xc6\xcf\x99\x6b\xff\x5f\x7d\xb2\xc5\x64\xee\x07\x73\xc7\xa9\xd6\xb1\x16\x42\x82\xb2\xa6\x63\x2d\xae\xe9\xb8\x2c\x2f\x21\xfe\xf6\x42\x78\x44\x67\x8e\xb1\xc1\x8a\x60\xda\x2b\x2e\x84\x58\xeb\x59\xe6\x58\xae\x26\xb4\xa7\x58\xb1\x24\xd2\x3f\x2a\x74\xa1\x74\x5c\x56\xfa\xfc\xd2\xc6\xcd\x34\x1d\xc0\x3a\x8b\xc1\x24\xc9\x13\x10\xa3\x8f\xb7\xab\x2f\x56\xa0\x92\x8a\x13\x4d\x35\x1b\xcd\xe6\x5d\x14\xcf\x2b\xf8\x9e\x53\x9e\xb3\x08\xa5\xc0\x8b\x7d\xd4\xa9\xa6\xf1\x32\x63\xaf\x21\x5b\x60\x12\x22\x97\x91\xf2\x1b\x1b\xab\x38\xad\x6e\xd5\x4e\xee\x04\xdd\xda\x71\x05\xc3\x0a\xed\x85\x16\x26\xc9\x0b\x53\x6c\xed\x8f\x6e\x9f\x94\xa3\xfc\x1d\xa8\x4c\xa2\xe0\x1e\x19\x9c\xeb\x38\x67\xb5\x57\x04\x8b\x4b\x19\x93\x31\x9e\x44\x70\xb9\x33\x98\x00\xc0\x8f\x5a\x39\xb8\xf2\x3e\x1e\x62\x3b\xc1\x77\x4b\x3f\x48\x42\x76\xf0\xb4\xd2\xda\xc3\xbf\x71\x2f\x6b\x3a\xfa\x26\xec\x1e\x99\x0b\xe1\x16\xef\x1c\xe9\xba\x12\x27\xc7\x32\x88\x46\xd4\xe3\x57\x5c\x3d\xac\xb9\xf7\xd9\x05\xc2\x3b\xaf\x6a\xb3\xa3\x59\x12\xd8\x90\xc1\x4b\xa6\x12\x69\x03\xad\xb8\x3e\x2e\xf6\x40\x68\x5f\xe8\xb9\x7e\x0a\x68\xf5\x47\x01\xf8\x20\xa0\x14\x96\xd6\xd1\x80\x76\xb5\xa5\x4e\xa6\x61\x32\x1a\xe3\x65\x66\xfc\x3b\x0b\x3d\x1b\x04\x51\x54\xcf\x82\x34\xd7\x98\xab\x74\xf1\xf4\xfb\xd3\x7f\x3f\x28\x22\x12\xe8\x55\xc0\xc3\x76\xf0\xc4\x0b\x4a\x88\x88\xbd\x77\x86\xa4\x0c\x97\xfe\xad\xd8\xe4\x4f\x10\xcb\x8a\x36\x1d\xf0\x68\xb2\x17\x82\x80\x7e\x0b\x08\xd0\x85\x8a\x1f\xab\xbe\xae\x92\xbe\x74\x84\x98\xc1\x66\xaf\xc2\x6d\xf0\x75\xdc\x55\x10\x3e\x17\xcc\x16\x0e\x2b\x9a\x1d\x68\x39\x7b\x50\xda\xbe\xfc\xdb\x69\x04\xcb\xa4\x7c\xfa\x17\x9b\x90\xa1\x88\xb7\xf9\xbb\xe7\xdf\xba\xdf\x1c\x79\xcf\xf5\x26\x75\x0f\xb6\x69\x73\x52\x9d\x10\x37\x41\x78\x79\x85\xe3\x68\xfa\x48\xf5\xeb\x54\xd1\xe9\x93\x5a\x9c\xb0\x45\x90\x3b\x29\x58\x5b\xa5\x7f\x8e\x2d\x35\xbb\x7a\xff\x49\xe7\x0a\xea\x15\xee\xb9\x4b\x66\x39\x73\xc8\xae\xa4\x92\xf0\x6a\xb4\x05\x2f\xcb\x67\x4c\xff\x9c\x5c\x63\xd3\x8c\xe0\xba\xb2\xcd\xe9\x44\x69\xd9\xfb\x9c\x8a\xce\x8e\xea\xa3\x43\xad\x46\x18\x57\x87\xcc\x31\x2e\x37\x8a\xdd\x6e\x5f\xc4\xd5\x28\x14\x11\x05\x78\xc9\xaf\xe7\xb2\xa8\x2e\x54\x5c\x10\x61\x49\x21\x3f\x13\x62\xb3\x47\x1a\xac\x2e\x6f\xac\x88\x3f\x0a\xac\xe0\x86\x75\xe9\xea\xbc\xbe\xe6\xbe\x73\x88\x3e\x68\x0c\x34\x1b\xc7\xed\x67\x75\x53\x13\x50\x66\x53\x4b\xbe\x9b\xd2\xfe\x7c\x06\x81\x7d\xd0\x8d\x6a\xce\xfc\x60\xb9\x9a\xf8\x13\x69\x02\xe1\x21\x44\xbe\x26\x9f\x44\x21\xd2\xf6\x37\xcb\x35\x53\xcb\xfc\x94\x20\x85\xee\xd5\x43\x2d\xa1\x24\x3d\x98\x93\xe7\x98\x52\xbe\x6b\xdd\xf3\x02\x5e\x7f\xb0\x0d\x10\x4a\xf5\x2c\x88\x52\x3d\x93\x0c\x3f\x85\xb6\x9b\xba\xb6\x04\xd1\xc0\x50\x7d\xd0\xeb\x18\x77\x18\x52\x95\x84\xdc\xa1\xea\x0f\x7b\x26\x3e\xf5\xf2\xea\xad\x05\x54\xc8\x16\x1c\xbf\x5b\xd1\xf1\x86\x5a\x19\x2d\x44\x0f\x64\x58\xd2\xb0\x96\xd5\x69\x1e\x95\x9f\x5d\x0b\xa2\x08\x56\x3e\x6c\x78\x39\xa8\x09\x5c\xc4\x27\x48\x64\x4d\xb7\x95\x46\x8e\x46\xba\xdf\x81\x01\x32\xcf\xd8\x55\xc2\xf4\x60\xf1\x11\x77\x80\x25\x40\xf4\x00\xa1\x50\xcb\xe2\x75\x5c\xb5\x4c\xf3\xa9\x88\x37\x50\x11\xfd\xd8\x18\xfe\x09\xba\x1d\x6b\x33\xc8\xe0\xa0\x69\x49\x1e\xa2\x0e\x5d\xcc\xf5\x7a\x0f\x19\xec\x7a\x04\x00\x58\x52\x63\x52\x38\x52\x8d\x95\x3f\x41\xb3\x1d\xae\x30\xd1\xb2\xd4\xc5\xbe\x75\xd6\x4c\x10\x4d\x08\x76\xfa\xcf\x29\x01\xef\x16\x32\x27\x92\x46\xee\x43\x2a\x3d\xd2\xac\x66\x23\x1c\x2d\x3e\x36\xf7\x6d\x1b\xef\xc5\x00\xb6\x1b\x98\x96\xd3\x79\xfd\x13\x3f\xfa\x61\x1a\xdc\xe4\x0a\xf3\xf1\xdf\x1c\xb2\x43\x8d\x81\x3b\xff\x1b\xf3\x87\xad\xef\x4a\x6b\x3a\xf3\x56\x4a\xb7\xb2\x26\x84\x28\x2a\x3d\x7c\x8e\x0b\x9d\xe4\x53\x53\x31\xe9\xb4\x0c\xe2\x28\xc1\x9c\x65\xee\xb2\x3d\x00\x8d\xb7\x3e\x86\x41\x9f\x47\x13\xea\x94\xc7\x4a\x5d\x43\x85\x1e\x24\xc7\x27\xff\xf3\x9e\xe5\xb5\xd3\x6a\x3e\x29\x5c\x4c\x55\x37\xb2\x11\x40\x75\x36\xbb\xdd\x72\x02\xe3\x39\xeb\xe0\x6f\xb6\x16\xc9\x14\xd3\xdd\xae\x3d\xbd\xff\x9e\x5f\x93\x86\xbf\x1e\xf2\x89\x72\x6b\xe3\x9a\x4f\x75\xb9\xa2\x60\xc3\xf3\x27\xfe\x36\x23\xda\x6d\xe7\x54\x7c\x12\xab\x7f\x79\x61\x57\x76\x84\x77\xec\x0e\xa2\xad\x88\x4f\x4a\x50\x7a\x36\x18\x00\x9d\xe9\x18\xc4\x0f\xa6\xc1\x0a\x92\xf8\xfb\x41\x25\x0c\x0f\x0d\x96\x55\x70\xaa\xfa\x28\xd0\xb8\xd0\xec\x56\xb4\xee\x40\xc1\xee\xdb\x75\x97\xc6\xc3\x2d\x59\xb9\x64\x08\x6f\x1e\x63\x40\x08\x2f\xb3\x93\xec\x1c\x9c\x4d\x18\x75\xb1\xf0\xf5\xb5\x10\x8c\x07\xdc\x7e\x51\x6b\x59\x71\x7a\xaf\x1d\xa8\xbb\x64\x50\xa4\xdc\x56\x62\xbf\x42\xcd\xb6\x3b\xc7\xdf\x78\x26\xe9\x88\x78\x1b\x42\x8e\xaa\x5d\x3b\xb6\xc4\x54\x3d\x36\x1b\x9a\xc3\xdf\x91\xb5\x58\x28\x59\xb7\x4b\x95\xc6\x67\xbf\x6a\x63\xa1\x24\x3e\x6b\xb2\x5c\xb3\x82\x08\x1d\xe1\xae\x95\x6f\x52\x9d\xa3\xbd\x4c\x14\x2d\xfd\xf9\x60\x35\x03\x69\x78\xb1\xc1\x6f\xb4\x5f\x05\x42\x12\x4d\x04\xc6\x29\x1e\x15\xb0\xfa\x70\x9d\xec\xf6\x7b\x0f\xb5\xc4\xcc\xf0\xde\x97\x67\xf5\xdd\xaf\x30\x23\x74\x6f\x2e\x2c\x54\x0e\x7f\x8d\xaa\xd2\x7a\x7f\x4f\x0e\x55\xd9\x33\x22\x1c\x76\x01\xc2\x6f\xf7\xba\xdf\x0c\x54\xb5\x83\xdd\x2e\xcb\x0a\x13\x89\xd3\x7f\x50\x7a\x8b\xfa\x52\x86\x4c\x26\x8a\xe3\x9c\x12\xdc\x00\x42\x89\xee\x31\x8f\x89\x5f\x37\xfa\x63\x6d\x1f\xb5\x36\x94\x7d\x1c\xaa\x5f\x38\xc6\xf0\xc8\x2a\xe7\x40\x00\xbd\x2e\x94\x44\x04\x78\x81\x88\x4a\x59\xd3\x87\x14\x1a\xfa\xd3\x51\xe3\x01\x37\x62\xae\x29\xb7\xa6\x82\xb6\xac\x06\x8c\x81\x29\x6e\x5e\x35\xcd\xed\x2f\x07\x01\x97\x3e\x09\xa3\xe2\xaa\x43\x0a\x1e\xab\x1d\xd1\xa4\xb6\xb4\x70\x41\x90\x5d\x15\xd1\x82\xed\x94\xa2\x81\xbd\x19\x46\x66\x58\xc5\x11\x7e\x3e\xfb\x9e\x71\xbf\xbc\x54\x71\xf9\xa9\x61\x25\xd1\x5f\x1c\x40\x8b\x47\x82\x96\xe0\x29\x3b\x8b\xc4\xab\x2d\x60\xa5\xd4\x60\xca\xca\xb3\x35\x8d\x10\x45\x6c\x30\xb0\xbc\x9a\x7c\x8d\x75\x6c\xb7\x70\x2e\x4e\x12\x40\x66\xdc\xbb\xc2\x1d\xbf\x0a\x99\x8d\xdf\xcc\xc4\x22\x8b\xa0\xd5\xe9\xff\x0c\xcf\xfe\x0a\x80\x8b\x7b\x80\xad\xdb\xdf\xbd\x19\xd3\xb7\xb3\x6b\xbe\x4d\x39\xe1\x19\x13\x26\x8b\x3e\xb7\xa7\x35\x10\xfd\x0c\x2d\xba\x0b\x30\xd4\x05\xa6\x97\x72\x31\xf9\x74\xc0\x33\xea\x90\x78\x65\x47\xd5\x66\x55\xf9\x5c\xc0\xa6\x5a\x19\xe3\xaf\x9d\x3e\xa1\xcc\xdd\x3f\x29\x22\xe5\x0e\x06\x98\x8b\x10\x1d\xa1\x9a\x6b\xbd\xb8\x69\x4b\xe4\x2f\xe7\x8f\xd4\x7b\x42\x4d\xa8\x39\x2c\xec\x5e\xe3\xe2\x80\xce\xd2\x77\xd8\x3c\x78\xe6\x14\xc2\x17\x18\x0f\x47\x69\xe4\xed\x13\x11\x3f\xbd\x71\xc7\xb7\x11\x3b\x39\x24\x33\x08\x13\xbd\xa8\x95\x73\xec\xeb\x40\xc8\xe2\x7a\x58\x2a\xd7\x27\x57\x96\x64\x3c\xaf\x2e\xef\x16\xa2\xb6\xe9\xb0\x54\x64\xd7\x24\x5f\xf4\xd3\x47\x51\xd2\x83\xe8\x52\x82\x4b\xef\x03\x8a\x9f\xe3\x97\xed\xd1\xa9\x62\x3b\x6f\xca\x43\x11\x89\xe2\xbf\x99\x97\xf2\x0e\x84\xc1\xa5\x93\x4a\x24\x15\x4d\x0f\x65\xfb\x02\xbb\x99\xd9\xac\x87\x06\x54\x48\xde\x6d\x0b\xf3\xcf\xf2\x20\x7a\x15\x23\x92\x67\x62\x7e\x40\x2e\x5a\x24\x8b\x13\x6d\xef\x53\xab\x63\x7f\x90\x51\x65\xb2\x82\x25\x3b\xa4\x1c\x3d\xb0\x92\xae\xc5\x71\x59\x63\x31\x06\x28\x4d\xfa\x70\xea\x94\xc0\xcb\x3a\xdd\x5f\x03\x11\x15\x0d\x14\x94\xf1\x4b\xda\xdd\x98\xf6\x30\x33\xa6\xd9\xfe\x8b\x8f\x6b\xe8\x9a\xf9\x13\x87\xf8\x03\x0d\x6f\x59\xd7\x79\xa5\xad\x58\x63\xab\x09\xff\xbd\x74\x5e\x1b\x7e\x8e\x7d\xbe\x68\x82\x7e\xc3\xfd\xd4\x4a\x5a\x31\xfa\xf6\x02\x6b\xfa\x5e\x38\xbf\x7a\x55\x62\x22\xd0\x45\x19\x6c\x12\x21\xf2\xa8\xa9\xc4\x3c\xdd\x28\xeb\x7c\x9e\xa9\xf5\x48\x1b\xb0\xbd\x41\x93\x5f\xab\xfe\xb0\xca\x4c\x77\x0b\x76\x82\xc0\x13\xfc\x25\x6e\xb9\x7a\xd7\xf2\x32\xb2\xd8\x24\x34\xbb\x18\x64\x21\x45\xd8\xa1\x8c\xc6\x28\x95\xb7\x05\x87\x82\x50\x71\x9e\xc6\x87\x02\x38\x78\x67\x5c\x56\x15\xc7\xad\x23\x43\x1e\x2f\x0d\xf7\x31\xba\x18\x31\xe7\xec\x53\x90\xd7\x82\xd0\xb9\x95\x1b\x14\xb8\x54\x68\xf4\x4c\x60\x6b\xf9\x61\xe1\xa2\x21\x09\x8a\x2b\x92\x7f\x7f\x73\x80\x53\xa4\x78\x17\xc9\x42\x37\xd4\x7d\xeb\x9b\xca\xcd\x48\xa0\xfc\xe0\x22\x62\x97\xa8\xc4\x7c\xa8\x5b\xc4\xce\xd9\x25\xd3\x71\xeb\x4b\xc3\x28\xbd\x83\xd8\xee\x42\x79\x0a\x91\xa4\xd8\xd8\xb0\x55\x6d\xb7\x53\x6a\xfe\xd3\x07\xba\x9c\x82\x93\x2c\xe2\x6c\xbd\x5a\x6c\x4d\x4c\xa0\x44\xbd\x05\x02\xe7\x5c\xb7\x9a\x9c\xaf\xb5\xdf\x4b\x97\x57\x94\x18\x36\xbd\x90\xc7\xf8\x05\xe9\x09\xb0\x60\xa4\xc5\xc5\x72\xd7\xad\xac\x72\xf5\xd5\x92\x05\x22\xc0\x64\x89\x1e\x00\x39\xe2\xb1\xc3\xb9\x58\x98\x95\x6f\xd1\x27\xc1\x66\xce\xab\x50\xe7\xd0\x2d\x38\x6b\xc1\x7b\x70\x15\xdc\x39\xcb\xfa\x75\x8f\x01\x7d\xf0\x00\x97\xba\xae\xc4\x2e\xd1\x00\xc9\xa5\x31\x8a\x4a\x77\x87\x83\xf2\x46\xab\xe5\x9d\x4b\x02\x8b\x66\xf0\x8a\x3a\x1f\x0b\xb8\x53\x82\xd2\x0a\x27\xb0\xfe\x8f\x0b\xff\xbd\xa7\x24\xc3\x48\x05\x1e\xc0\xf1\xb3\x7a\xd0\x09\xd0\x8c\xc0\x53\x52\x99\x74\xa4\x90\xd2\x94\x7a\xf7\x6c\x17\x7c\xe9\x89\xaa\x39\xaf\x5d\x91\xd3\x77\xdf\x34\x92\x33\x59\x8a\xc0\xa6\xee\x41\x7f\x05\x07\x09\x76\x92\x86\xc3\x1f\x67\xee\xca\xdc\x39\xd3\xbc\xaa\xed\xda\x1e\x77\xcb\xc1\x7a\xe5\x42\x9b\x0a\x97\x0a\x6e\x4f\x1d\x4c\xe2\x9d\x25\x12\xe0\x0a\xa7\x48\xf7\x6a\xb4\xb8\x15\x00\xe8\xf5\xce\x6f\x83\x57\x74\xc3\x81\x0c\x01\xdd\x6e\xc7\x77\x24\xeb\x9e\x99\x2e\x0f\x80\x05\x7d\xa1\xb9\xa3\x04\x29\x2e\x65\x81\xca\x40\x1b\x10\x5e\xab\xc3\x1e\x12\xa7\x24\x94\x6d\xd0\x4b\xfa\x67\xd2\xd9\xf6\x1e\x9b\xfc\x8a\x04\xa7\x5b\x6d\xa2\xf0\x16\x8a\x4a\xf2\x2d\xa5\xcb\xdd\xff\xb1\x9e\xdd\xa5\x19\x33\x18\x6d\x48\x1e\x2f\xc0\x57\x47\xbe\x10\xeb\xca\x1d\x07\x24\x8a\xbd\x10\x25\x6f\x61\x1b\xc9\x5d\x94\x9f\x2f\xe4\x28\x25\x67\x92\xa3\xbb\xaa\xc3\xfd\xd8\x9b\xb0\xd8\x46\x1f\x7b\xbf\xa6\x84\x58\x9f\x9c\x39\xff\x31\xf6\x20\xf2\xef\x0a\x97\xcb\x9a\xf6\x88\xd1\x25\xaf\x34\xff\xac\xe9\xfb\x8c\x97\xda\xf9\x9f\xe4\xa1\x94\xc1\x7b\x10\x65\x5e\xc7\xe9\x3e\x47\x16\xc0\x7f\x3a\x57\xa9\x76\x43\x8d\x80\x4e\x55\xb2\x6a\xa1\x61\xfa\x00\x06\xcb\x39\x71\xf4\xc3\x01\xa5\xad\xfc\xdc\x08\x34\xb3\xf3\x60\x9a\x98\x1c\xb8\xf0\xc2\x5a\x56\x2a\x4d\xf2\xe0\x91\x17\xe2\xbd\x6e\xf9\x0a\xfb\x86\x15\xd7\xcf\x48\xa3\xc2\x26\xfa\x29\xee\x7e\xf1\x16\x5b\xc0\x64\x61\xd1\x1d\x3d\x3e\x34\xa1\x65\x9c\x5e\xbf\x86\x33\x59\x07\x5e\x6e\xe9\x8b\xfb\x75\x6b\x69\x2f\xb5\xa8\xae\x30\xe0\xf0\x91\xac\xe9\xb8\xad\x22\xbb\xe2\x3f\xa5\xee\x5a\x61\x72\xcf\x1d\x01\xc0\xaa\x85\xb3\xc3\x57\x3a\x37\x56\xf1\xb3\x1a\xed\xd9\x37\x9a\x11\xcb\xaa\x0b\x2d\xaf\x4d\xbd\x75\x00\x28\xec\xdf\xe3\x07\x43\xe9\x6f\xc1\x37\xe3\x26\x21\x04\x9e\x5f\xbc\x74\x69\x45\x5b\xad\x82\xb2\x43\xe1\x69\xe3\x02\xd2\x22\x10\x92\x77\x20\xc1\xcf\x88\x32\x42\x95\xfb\x45\x76\xe9\xbe\x23\xe4\xe8\x06\x40\xf8\xc9\x25\x95\xab\xe7\x44\x74\x66\xc2\xec\x36\x63\x86\x51\x5d\x2f\xe8\x19\x26\x12\xc6\x41\xf4\x97\x3b\xa3\xad\xd7\x57\x3d\x69\xf7\xec\x93\x80\x95\xdb\xa0\x5d\xbe\x6e\x96\x36\x7a\xb6\xfd\x43\x1e\x10\xe6\x5e\x58\x76\x6e\xba\x1c\x3c\x13\x81\xbc\xb7\xd4\xbe\xfb\x5f\x45\x39\xce\x4d\xf1\x8e\x49\xef\x09\x58\x41\x19\x9b\x3f\xbf\x85\xd2\xf9\xc2\xa0\xda\x86\xaf\xa8\x33\xb5\x16\x4a\xd2\x70\x14\x06\xa3\x92\x5d\xd4\xbf\x9d\x8c\x4a\x1b\x8f\xc1\x91\x85\xe7\xc4\x7c\xbb\xdb\x18\xcf\x71\xea\x87\x66\x43\xb6\x20\x88\xf5\xf7\x6b\xa0\xde\x86\x86\xc1\x16\x3a\xe6\x14\xa4\xa4\x70\x03\x38\x97\xa6\x75\xe0\x15\x3d\x8d\x9a\xd3\x8c\x25\xb9\xe0\xec\x6b\x78\x28\x33\x5e\x25\x06\x9f\x16\xe3\x8d\x7c\x45\x60\x8d\x65\xa7\xe7\x7d\x08\x40\xf3\x2f\xcb\xdd\x26\x4b\xa1\xd4\xd4\x5c\x51\x86\x7e\x00\xc8\xf6\x10\xf7\xb1\x7e\x73\xa2\x2e\x44\x5b\x1b\x56\x3b\x78\x96\x35\x2f\x91\xa9\xb9\x45\x60\x72\x93\xcb\x19\x8c\x50\x0c\x77\x25\x5f\x4e\x99\xca\x3b\x68\x02\x7c\xc4\xe1\xda\xf6\x21\xd4\xb0\xd4\x33\xe3\x83\xde\x0a\x11\x71\x06\x20\x28\x75\x00\x2a\x0d\xf8\xd1\xd9\xa3\xba\xad\xd0\xc1\x3a\x76\xaf\x0f\x7d\x79\x1d\x58\x4c\xdc\x2d\x67\x29\xd7\x82\xb5\x4c\x1e\xff\x5a\xd4\x76\xad\xe7\xde\x89\x7c\xf4\xb0\x5e\x15\x24\xfa\xa4\x2e\xd5\x1d\xd8\x0f\xff\x89\x39\x38\xb9\x4a\xd0\xb1\xe8\x84\x12\x31\x47\x77\xfb\x2a\x6e\xeb\xbd\x03\x2e\xf0\xa9\x78\x7e\xcc\x1a\x1b\xb1\xcd\x69\xd5\x2e\x57\x83\xd3\xc5\x60\x73\x96\x22\xbf\x81\xbb\x3d\x1a\x00\xd6\xa1\xdb\x6d\x5c\x9b\x2a\x1a\x78\x3c\x7f\xc0\xd2\x70\x59\x81\x6b\x23\xe7\x18\x02\xbd\xda\x11\x18\x08\x1c\x25\xfe\xe9\x5b\xf1\x26\x8d\x5d\xf6\xf6\xb3\x70\x8b\x28\x82\x81\xac\xf9\xa8\xe6\x65\x3f\x1c\x03\x9a\x08\xe6\x6b\x2a\xea\xa0\x8e\xee\xba\x07\xd1\x94\xbe\x3b\x45\x3f\xb1\x36\x10\x60\x98\x8c\x4b\xe7\x5c\x62\x77\x01\x41\x55\xd9\x61\x64\xbe\x01\x48\xb6\xdd\x93\x8b\x62\x19\xd4\x7d\xb0\xc7\xe4\x64\x0b\x34\x77\x53\xe2\xf9\x18\xd1\x1f\x0d\x09\xf6\x4d\x7d\x65\x2c\x37\xf9\xa7\xc7\x52\x43\xe4\xd8\x4a\x7a\xa8\x8c\xfd\x20\x47\xc0\xb1\x9c\x28\x10\x8c\x5d\x0b\x7e\x2b\xfe\x3c\x6f\xe4\xfa\xcd\x75\xe1\xc0\x6e\xfa\x91\xf6\xc8\xde\x4b\x5d\x1a\x14\xd4\xaa\x9d\x56\x9e\x02\x02\x22\x9f\x57\xbf\x58\x7d\x7a\xe3\xb2\xee\xdb\x36\x62\xbb\x7c\x41\xd7\xc1\xfc\x1f\xeb\x86\x5b\xf0\x39\xa7\xa4\x43\x35\xf5\x8b\x2f\xd1\x55\xda\x79\x56\x91\xab\xa6\x91\x12\xac\x2c\x14\x7d\x7f\x3f\xde\x5e\x82\x1d\x1d\x93\x4f\x0f\x13\xd0\x8c\xb2\x39\xae\x33\x9c\xed\xe2\xd5\x27\xde\xb1\x38\x10\x9a\x45\xc7\x2a\x3b\x10\xfa\xca\x46\xc2\x8c\x10\x74\xc1\x4e\x6f\x4f\x44\xb6\xae\x79\x5c\xa6\x9a\x5e\x3c\x4e\xda\x2b\xa6\x0c\x81\xdd\x23\xc2\x0b\x4b\xa7\x80\xca\xfc\x3d\xb6\x7f\xf3\x78\xd1\x59\xd1\xcf\x80\x62\xb9\xbe\x3e\x09\xa2\xb6\x5e\x0d\xf4\x7e\x27\xd0\x14\x39\x96\x3f\x46\x35\xa6\x6b\x92\x6a\x09\x3f\x55\x3c\xbf\x2e\x74\x1b\xdc\x3e\xf8\xb1\x0e\xc6\x2d\xdc\x8f\x02\xad\xee\xc9\xaa\x25\xb3\x05\x49\xfa\x16\xf4\xb9\x16\x24\x17\xd9\x3a\x3e\x25\x6f\x63\xdb\x4e\xc6\x41\xbd\x42\x25\x80\x79\x06\xf9\x97\x84\xd1\xd0\x25\x3b\x0a\xd8\x55\x4d\xbc\x07\xa6\x28\x76\x55\xdd\x5c\x2a\x21\x60\xb2\xb7\xbe\xf9\xb9\x7a\xc0\xd7\x98\x59\xbd\x70\x41\xad\xfb\x92\x77\x6b\x61\x9d\xa0\x05\x14\xf5\x4e\x14\x80\xdc\xcd\x84\xa8\x39\x72\x8e\x77\x78\x2e\x4d\x77\xc6\x6f\x6b\x46\xd2\x04\x5d\x82\x19\xb4\x16\xb7\x88\x15\x08\x59\x40\x03\x99\x2a\xac\xa8\xa3\x5e\xac\xf3\x0e\x1b\x2f\x6d\x3c\x6a\x1d\x42\xd4\xe1\x26\xda\x32\xda\x2a\x4d\xda\xc8\xa1\x78\xd7\x1b\x52\x98\xf0\x82\xac\x77\xe2\xb4\x23\x27\xf3\xc0\x94\x91\x00\x30\xb9\x36\x79\x11\x65\xbf\x98\x4e\x8e\x6a\xd3\x8f\x7b\x4a\x9c\x2e\x7b\x32\x2e\x3e\x0c\xcd\x0e\xe4\xc9\x2b\x29\xc6\xe8\x17\xe7\x2a\xa1\xa6\x09\x48\xb8\xbd\xc6\x74\xb4\x4d\xc8\x27\xad\x55\xc4\xf2\x66\x68\xba\x00\x88\x7a\x85\x1d\x7c\xef\x6a\xbf\xd0\x5d\x3c\x85\x4b\xc8\x6e\x1e\x59\xee\x80\xb2\xc7\x4d\x6f\xc4\x75\x5e\x35\x1a\x10\x63\x18\x2a\x1f\xdd\x6e\xc2\xaf\x0c\xbd\x50\x92\x66\x57\xca\xfd\x0d\x86\x17\xa2\x1d\xde\x23\xf1\x72\xf5\x28\x71\x6e\xeb\x37\xff\x10\x7a\xe1\x8f\x7f\x75\x66\x4f\xe9\x39\xf3\x95\xa7\xb5\x93\x55\xab\xd7\xef\xab\x48\xc2\x98\x9b\xb2\xc6\x0f\x86\x84\x38\xb7\xab\x0b\x94\xda\xe4\x9b\xfa\x1d\x21\x60\x9a\x6b\xca\xbb\x39\x90\x9b\xa0\xb0\x65\xea\x73\x5a\x88\xa8\x38\x51\x3c\x46\x45\x84\xce\x2f\x10\x03\xd4\xfb\x08\x21\xba\xaa\x24\x40\xf7\xec\x84\x13\x75\x13\xb4\x80\x40\x4d\x86\xdd\x9b\x28\xdc\x28\x2f\xa6\xd6\xb8\x04\xa2\x29\x8c\x02\xb6\x0d\xa1\x9c\x99\x4e\xce\xe8\xc1\x01\x75\xf2\xb4\x47\xa9\x28\xa9\xa6\x2c\x8e\x94\x03\x80\x2c\xeb\xfc\xb5\xc4\x67\xcf\xe2\x47\xc8\xc6\xe8\x77\xe7\x32\x77\x90\x13\x7d\x83\x24\x99\x1f\x27\xd0\x09\x0a\x94\x4b\x31\x20\xb0\xca\x99\x8c\xa9\x10\xc4\xbd\xec\x7a\x54\x67\xd9\xd2\x21\xe8\xca\xa4\x5f\x17\x4a\x60\x89\xd0\xc9\x21\x9a\x1e\x35\x44\x56\xfb\x67\xa1\x30\xed\xf8\x95\x30\x5d\x54\x32\xee\x58\x6e\x48\x28\xf6\x82\xbc\x0b\xd0\xd5\x82\xcb\x2b\xe3\x17\x82\x6b\x65\x02\x38\x6e\x32\xb0\x8e\xc3\x7a\xa1\x1b\xc4\xb2\x21\x58\x32\x0a\xe1\xba\x8d\x5f\x5a\x28\xa6\xaf\x46\xda\x25\x10\x18\xc7\xe5\x21\xcf\xb4\x14\xc2\x5b\x72\x65\x18\x05\xb8\xcc\xa3\xb9\xd4\x79\x6a\x2d\x71\x8f\xbb\xf4\x06\x90\x5c\x8a\xcd\xcd\xa9\x74\x8b\xe7\x48\x91\x77\x72\x5d\xbb\x8c\xfe\x18\x5d\x1a\x86\x4a\x19\x54\x90\x3e\xff\xc4\x94\xaa\xa2\xfd\x24\x8f\x6f\xf4\x92\x7d\xec\x1a\xb0\x5a\xa1\xc2\xf2\xd0\xd2\x2a\x23\x6b\x24\x0f\x84\xc6\xc1\x17\xcf\xdf\x49\x79\x83\x20\x08\xa2\xb6\xc7\x59\xb3\xf1\x95\x0e\x03\xf9\xb1\x96\x8d\xb8\xeb\x17\x8a\x65\xee\x9c\x5b\x97\x30\x1c\xd6\x74\x5a\x52\x71\x4b\xc4\x54\x1f\xe8\x12\x91\x8a\xf3\x31\xb2\x5f\x1c\xfb\x4b\x2d\x42\xfd\xd5\xdf\x7c\x20\xf1\x33\xb6\xaa\xe7\x86\xe1\x42\xa7\x10\xeb\xba\x0e\x06\x00\x52\x68\xf6\x80\xd7\x89\x66\xbb\x45\x1e\xb4\xe5\x45\xb0\x17\xfd\x1c\xdf\x0f\x24\x16\x8b\x58\xa8\x90\xec\x7d\x4b\x6f\xa2\x38\xb7\x19\x59\xf7\x48\xda\x25\x50\xe1\x67\xb7\xec\xe4\x89\x56\x9e\x4d\x3f\xf5\xe7\x3e\x2d\x0e\xa8\x0c\x3b\x9d\x02\xb8\x08\x8b\x36\x06\xd1\x7e\xa3\xbe\x1b\xca\x0d\xa9\x16\x57\x1a\x00\x33\x96\xfc\x02\x9c\x30\x90\x89\xc4\x4d\x41\x01\x5a\xfe\xb1\x18\x8b\x97\xb9\x03\x90\x48\x98\x1a\x22\x0e\xa3\xf2\xc2\x9c\x52\x5f\xb8\xf5\xca\x42\x57\xbf\x5f\x64\x6e\x05\xfa\x08\x93\x01\xa7\x75\x59\xf9\x7e\xa6\x8e\x4c\x42\x9b\xe2\x3b\x20\xeb\x1e\xc5\x9a\x69\x46\xd6\xbb\x7f\xdb\x37\xbe\x0d\xa7\x09\x3a\x8b\x24\x62\xdd\xd6\x67\xff\xd7\x89\x60\xc4\xdb\x78\x64\xeb\x54\x74\xe6\x54\x3c\xb7\x85\x28\xa7\x99\xd8\x15\x93\x82\x2c\xf9\x59\x2e\x3f\xd0\x39\x54\xfe\xec\xf4\x4b\x61\xb0\x42\xdd\x00\xb7\x9c\x8f\x20\x0b\xf4\x7b\x48\x26\xcc\x48\x6b\x7a\x8b\x34\x0c\xce\x6e\x77\x5d\xe1\x0b\x33\xf0\x8c\x18\x96\x64\x77\x07\x97\x0b\x19\xeb\x37\x9c\xaa\x83\xbe\xda\x1e\xb2\x7b\x70\x0c\x11\x5a\x73\x5d\xb6\xb4\x96\x05\x9e\xb2\xaf\x3c\xe5\xe5\x4f\xc7\x29\xba\xbc\xe3\x99\x30\xd4\x63\x24\xd2\x70\xb8\xee\x18\xd1\x16\x43\x44\xf6\x6c\xfc\xca\x65\x0d\x38\x4e\x0f\x24\xe7\x9a\x70\x60\x67\x21\xf7\xd5\xe9\xef\xe4\x67\xdf\xf0\x42\xb8\x9b\x54\xef\x55\x75\x61\xdb\xe0\xdc\x20\xf0\x8a\x47\x1b\xad\xab\xee\x3c\x84\xaf\x8c\x6f\x20\x52\x99\xb9\xf3\xb5\x6b\x40\x68\x6d\x10\xa8\x04\x9b\x30\xb5\xc7\x18\xd1\x0c\xd9\x8a\xab\xc3\x45\x45\xd6\xeb\x6b\x0f\xbd\x44\x5f\xa8\x27\x9a\xc3\x5f\x93\x7a\x74\xc3\xf9\x83\xe2\xfa\x83\x92\xd2\xe5\xb5\x6e\xa0\xfd\xac\xc6\xb2\x8a\xcd\xfc\xdf\x5e\xe4\x69\xf9\x77\x10\x84\xb9\xcd\xc4\xdf\x64\x9d\xd3\x4a\x98\xc7\x96\x22\x1d\x05\x9a\xbf\xeb\xae\xe1\xec\x58\x2b\xdb\x49\xad\x3f\x70\x44\xf6\x4e\xe6\x7e\xd0\x60\x66\x7f\xd3\x9a\x1d\x48\x2d\x98\xef\xb4\x01\xa4\xd2\x4f\xc5\xef\xf8\x16\x73\x98\xd3\xf0\xbc\x50\x92\x03\x8a\x64\x47\x52\xd9\x17\x04\xe9\x00\x2e\x4d\x7d\xf5\x4e\xff\x04\x68\x35\x9c\xd5\x62\xbf\x73\xf9\x09\xac\xec\x0a\x8f\x6f\xa8\x5f\xa3\x83\x67\xdc\x96\xf3\x77\x87\xda\x41\xf0\x89\x97\x36\x37\x55\x91\x07\x42\xe2\xc0\xb7\x6b\x3c\x25\x26\x63\x30\x30\x51\x86\x11\xa5\x37\xb7\x0c\x72\xb9\x71\x98\x8e\xc3\xba\x05\x2b\x5c\x5e\xd3\xab\x04\x7d\xab\xd7\x92\xc8\xee\xd6\xf4\x34\xb2\x2b\x0c\xd0\xa0\x50\x13\x47\x4e\x8a\x9a\xf3\xe2\x5e\xd8\x0e\x0f\x84\xfc\x6b\xca\x42\xdb\x6d\x0b\x96\x1d\xc8\x5b\xea\xd3\xcf\x2d\x22\x1f\x6e\x3c\x02\x71\x39\x7b\xd5\x5a\xec\xd4\x5a\x1c\x00\xab\x0d\xa6\x41\x3a\x08\x83\x21\x09\xe0\x22\x0a\xa1\x42\x0a\xe0\x2a\xba\x75\x96\xf9\xff\x56\x02\x95\xf7\x77\x41\x94\x55\xac\xb2\x25\x77\x78\xad\x12\xe7\x2d\x2d\xc2\x5a\x98\x60\x29\x79\x0c\xc8\x1f\x67\xcb\x62\xbf\x16\x98\xbf\x65\x64\xdf\x48\x14\x35\xab\x23\xf2\x5e\xb9\x8d\x70\xfd\x0d\x49\xf8\x17\x93\x26\xdb\xd8\x88\x1b\x23\xc7\x92\xde\x06\x56\xb3\x82\x10\x8b\xcd\xa6\xbe\xa1\x67\x02\x97\xf3\xe5\xf4\x89\x57\x7e\xf1\x73\xec\xd0\x8f\x25\x47\x58\x7e\xdf\xf0\xc0\x54\x7f\xd5\xac\x4f\x00\xaa\x20\xa0\xb8\xec\x3c\xf9\xa4\x50\xaf\xe5\x3e\xb2\xc4\x62\x39\x92\x15\x73\xaf\xf8\xe2\xb9\x32\x3b\x20\x6c\xce\x42\x3c\x69\x72\xdc\x04\x55\x1a\x9f\x0b\x35\xf2\x34\x73\x8c\x0c\x75\x4d\x7a\x88\xed\x13\x1f\x91\x6a\x01\x95\xf1\xbb\x6b\x61\xdf\x7e\xd2\xcf\xf1\x83\xbf\x4b\x74\xfa\x04\x05\x3f\xca\xa5\x43\x7c\xda\x08\xa0\x4f\xfe\xfe\x23\x3a\x73\x22\x35\x03\x0a\xda\x10\x5d\x49\x73\x5d\xbe\x7d\xf5\xad\xbf\x94\x4b\xf7\xfb\x28\xe5\x75\x64\x73\xb8\x9a\xb2\xe0\x44\xed\x75\xa0\x81\xcb\x5d\x66\x95\x65\x02\x38\x8f\x3a\xd7\x0f\x0a\x7e\xe6\xbd\x84\x89\xe3\x1d\xb0\xfe\x22\x87\x21\x16\x98\x90\xed\x8d\x66\x85\x04\xff\xe3\x2e\x22\x1c\x96\x98\xe3\x6c\xc0\x19\xf5\xb1\xa0\xcf\xdf\x77\xfe\xad\x29\x6f\xa1\x08\xde\x0d\xb0\xbf\x1c\xdd\x3d\x14\x48\x9c\xd7\xf6\x62\x23\xab\xba\x93\x52\x8b\x6b\x61\x7e\x57\x1b\xe0\xc7\x6e\x4a\xd9\x52\x59\x3c\xc3\x54\x0e\xdb\x5f\xb5\xc4\xee\x1f\x10\x3f\x69\x1d\x4b\x0f\x55\xfd\x96\x74\x5c\xc2\x60\xb0\xdf\x6c\x58\xc2\x20\xc8\x00\x58\xab\x5b\x80\xea\x0a\x8b\xe9\x05\xdb\xfe\xbd\x44\x51\x1d\x1d\xbd\x7e\x86\xf8\x95\x8a\xc3\xc9\x09\xba\x23\x57\x50\xe6\xcb\x5b\x01\x98\x82\xd8\xef\xf8\x6f\x0f\x7f\x3c\x2a\x3b\x92\x1a\x9e\xfe\x18\x9e\xf0\xe9\x9f\x0e\x1b\x45\x14\x40\x94\xc5\xf4\x6c\x32\x2c\x69\xc8\x58\x11\xa5\xf3\x36\x6a\x7c\x39\xea\x04\xfe\x52\xc7\xe3\xb2\xc6\xa3\xea\x4e\x09\x64\x28\x49\x1b\x0e\x73\x86\xe7\xbe\x5b\x0a\xd1\x01\x80\x3f\x54\x34\x21\x0e\xe6\x1b\xe8\xb6\x30\x52\x36\x8d\x9f\xf3\xaf\x3e\x7c\xdf\x89\xbc\xd0\x26\x7f\x55\x37\xa3\xbf\x16\x87\x45\xdc\xa8\x76\xf3\x9b\xd1\x46\xef\x58\xa8\x3f\x01\x10\xb3\x95\xce\x81\x60\xa3\x32\x69\x01\xb8\x60\xeb\x21\x7e\x49\xbf\x5f\x03\x35\xd5\xd4\x21\x28\xad\x29\x55\x47\x3a\xc2\x07\x58\x66\x86\xf8\x36\x6c\xef\xd7\xbe\xd0\xb8\x54\xba\xc3\xab\xe8\xc5\x87\xc6\xd9\xfe\x31\x88\x40\x9f\xfd\x1e\xf2\x23\x68\xdd\x99\x1d\x56\xec\x55\xf9\xbb\x69\xc7\x55\xda\xf7\x8d\x13\x0f\x97\x40\x81\x27\xaa\x09\x3f\xbd\x1e\x3f\x3d\x0a\xe0\x01\x30\xa5\x40\x5c\x52\xf1\x65\x47\xd2\x31\x39\xb3\x10\x5e\xd2\x70\x58\x92\x70\x68\xe2\x58\x4f\xf2\x78\x75\x87\xc5\x16\xd7\xa2\xec\x62\xc6\x45\x06\x81\xc1\x0a\xb3\xe6\xc7\x87\xb6\x3c\xe8\x74\xd6\xdb\x73\xb9\x4a\xb6\x62\x0a\xb0\xaa\x8d\x0e\x56\x43\xb2\x09\x34\x25\x0a\xc9\xc1\x3e\x47\x4e\xf0\x01\x17\x1d\x80\xbf\x1d\x5f\xe3\xb6\x16\xbb\xf2\xa5\xcd\xfe\xe6\x32\x33\xbf\x9a\xaa\x2a\x59\x70\x2f\x56\xcf\xcc\x57\x70\x1d\x3d\xe1\x8d\xa1\xc2\x5a\xdb\x6d\x0b\x62\xbc\xb0\xc0\x4d\x91\x2b\xfa\x6d\x60\xaa\xc8\x1e\x1b\x6b\xce\x8e\x38\x2a\x12\xbb\xd1\x3b\x81\xf5\x0f\xe0\xe8\x74\x67\x59\x9b\xb2\x56\x38\xa5\xa7\x4b\xc1\xc7\x78\xdc\xd7\x3e\xb2\xfd\x1f\xeb\xe4\x57\x24\xf3\x01\xcd\xf6\xe4\xaf\xdb\x79\x99\xe9\xdc\x82\x7f\xa4\xb2\x91\x5b\x28\xc9\x8c\x11\xf0\x6f\xa7\xc2\x68\x8b\x25\x2f\x2f\xdc\xd6\x91\xf6\x7c\x17\x04\xc9\x36\x53\xdf\x7d\x0e\x07\x27\xde\x43\x2d\x38\xa1\xcc\x18\xdf\xc0\xdf\x3f\xfd\x33\x20\xd7\xff\x18\x7c\xaf\x32\xa4\x38\xbb\xc5\x71\xdb\x29\x42\xc8\x51\x6c\x6f\x28\x8c\x96\xb4\x1c\x66\xd5\xd5\x2c\x57\x65\x01\x89\xbb\x41\x74\x7d\x12\xfd\xf5\x07\xc5\x7b\xc6\x96\xb9\x3d\xc9\x86\xa8\x31\x32\x31\x50\x40\xc7\x4d\x06\x25\x8e\x01\x19\x50\x6a\x9e\xbc\x12\x0d\x82\x2e\x48\x1a\xe8\x3b\x8a\x24\x07\xfc\x8c\xb9\xa1\x09\xd1\x6c\xb6\xf0\x3c\xd1\xdc\x73\x84\xb2\x4f\xa6\x35\xd1\x28\xca\xcb\x7f\x33\x23\x3a\xab\xc1\xf6\x07\x95\xb5\x55\x1b\x53\xf8\x5f\x0b\xe2\xee\x9b\x1d\xe6\x44\x42\xa2\x02\x5a\xec\xe3\xf9\xf0\x11\x97\x86\x54\xdb\x4f\x2b\x72\x32\xc7\x62\x5d\xb3\x2e\x73\x3a\x3e\xf9\x35\xaa\xe3\x32\xa7\x85\xa9\x67\x7f\x7c\x2a\x2c\xd4\x54\xad\x28\xe9\x07\x8a\xbb\x6f\xe9\x93\x16\x27\x92\x79\x17\x59\x8c\xe8\x41\x5a\x5c\x23\x2d\x9c\x7e\x20\x14\x0b\x17\x2b\x9c\x62\x65\xe8\xd7\x5d\x4c\x3f\xc3\x65\x87\xba\x62\xbb\xad\x54\x5a\x56\x37\x7d\x34\xc8\x2e\xaf\x05\x53\x2b\xf2\x73\xb0\xdb\x91\xd3\x65\x4d\x47\x3c\x67\xf0\x57\x5f\xcb\x71\x59\xd3\xa6\x0f\x7e\x67\xb6\x92\x79\x38\x2e\x4b\x92\x86\xfb\xc6\x3c\xd8\x6f\x5e\xf5\x56\x71\x25\x50\x6a\xc2\xa4\x4d\xeb\x71\x0b\x0d\x23\x22\xc8\x37\x1e\xe1\xc3\x46\x52\x59\xfc\xcd\x86\xb2\x56\x17\x0d\xaa\xb8\xbb\x33\x86\x4b\x97\x27\xcf\xfe\x31\x33\x3d\x06\x97\x12\x0d\x49\x2a\x36\xbb\xc7\x10\x3b\xfe\x1c\xa7\xf6\x84\x4a\x23\x71\xb2\xcc\xf4\xd1\x65\x4d\x90\xce\x33\xe8\xf4\x25\xb8\x17\xfc\x5c\x21\xbb\x5e\xe1\xdd\x37\xf5\x5f\xaf\xe7\xe9\x22\x0c\xce\x8d\x67\xc3\x5a\x86\x21\x09\xe9\x2b\x43\xa6\xec\x80\xc8\x83\x7b\x64\x9c\x08\xc7\xbb\xab\x77\x9c\x48\xc7\x65\x6d\x7a\xe7\x97\xba\xad\x04\xd1\x55\x14\xc0\x45\xa0\xb9\xfd\xc2\xc0\x96\xd6\x9b\x22\xc0\xc8\xb2\x8b\x51\x80\x85\x4b\x1f\xc5\xad\xc7\x08\xa5\xb2\x20\xf1\xca\xaf\xd9\x06\x84\x0e\x07\xbc\xe4\xdb\x26\x8e\x7e\xdf\x4e\x94\x04\xfb\xd4\xda\x4b\x9a\x93\x57\x00\x60\xf9\x56\x9f\x3f\xb6\x2f\x86\x0d\x71\xda\x15\x29\x55\x46\xa6\x2e\x49\x4a\x1b\x87\x22\xab\xf4\x75\x79\x80\x24\xcd\x03\x18\x48\xb1\xd2\xc9\xce\x93\x85\x3f\x94\xc1\x27\x3b\x5d\x60\x84\x2e\x46\x11\x01\x9b\xd3\x4a\x8c\x0b\x63\xff\xf3\x90\xb9\x75\x71\x71\xcb\x31\xaa\xbe\xa5\xf0\xe1\x0b\xdd\x1d\x61\x7b\x74\x83\x89\x3c\x88\xa2\x60\x9f\x62\x94\xb4\xef\xe4\x2d\x64\x9f\x6f\x63\x21\xb1\x48\x71\x7e\x1b\x5a\x48\xff\xce\xc9\xf9\xe5\xcf\xc2\x46\x23\x56\x0e\x8a\x3b\x0c\x6e\x44\xb8\x5e\xff\xe7\x0d\x5d\xa9\xa3\xc4\xbe\xd4\x9e\x52\xa1\xc0\x8a\xce\x6c\x48\x60\x23\xa1\x1a\x84\x8b\x85\x62\x0b\xd3\xc6\x87\x42\xf5\x5c\x91\x5f\x79\x3f\x04\x07\x0a\x3b\xce\xb1\x31\x4f\x11\x1d\x7d\x03\x2c\x09\x94\x01\x5b\xe8\xff\x10\x73\x56\xb9\xa6\x46\x14\xce\x4b\xc6\xcc\x18\x85\x7b\x04\x5a\xb6\x11\x99\x57\xec\x00\x3a\x0e\x75\xe4\x4b\x28\xcc\xff\x3d\x95\xd6\xf7\x3b\x25\xb5\x20\x45\x8b\x43\xec\xe5\x80\x23\x83\x3f\xa8\xe1\x38\xd9\x34\xf8\x00\xa2\x4f\xb2\x42\x69\x26\xe6\x38\xd3\x40\x3a\x01\xa9\x48\x64\x0b\x8e\x2d\x20\xa8\x84\x30\x12\x10\x3c\x1e\xfc\x40\xc8\xb2\x11\x91\x4d\xa9\x74\x99\x95\x35\xee\xee\xe5\xd9\xf0\x0f\x4d\x25\x5d\x3f\x1f\x9a\xc4\x76\x30\xf8\xd6\x0b\x86\x55\x85\x7c\xd0\x57\x6b\xc4\xaf\x86\xf1\x8d\x7e\xc8\x8a\x0b\x7d\x53\xba\x94\x05\xa0\x57\xc0\x0f\xa7\x48\xad\x52\x2a\x85\x47\x87\xba\x70\xeb\x71\x69\xe3\x58\x27\xdc\xdf\x1d\x32\xb1\xc8\xb6\x35\x8f\x2b\xd3\x6b\x70\x4d\x78\x6c\xe9\x6e\x8d\x3f\x81\x2a\xcd\x17\x57\x7e\x7f\xbe\xef\x4f\x31\x6b\x59\x56\xb9\x7a\xed\x9c\x19\xe4\x10\x41\xb7\x9d\xdf\x6d\xc1\x91\x47\x2c\xdc\xe8\xd1\x0f\x19\x6b\xa0\xb9\xf1\x7a\x20\x44\x01\x5a\x6c\xc9\x8e\xe2\x14\x50\x18\x04\x57\x21\x58\x06\x13\xa4\x60\xe5\xb5\xbe\xbc\x3a\x25\x55\xaa\x51\xa3\xcd\xdb\x0d\xe8\xc6\x24\x7b\x0f\x54\x7a\xc3\xbb\x8f\x84\x5f\x89\xab\x05\x1e\x04\x5a\x0f\x78\xdf\x7e\x12\x05\x4f\x6e\x89\x9c\xba\x3d\x22\xff\x98\x42\x38\xaf\x6a\x8b\x15\x96\xd8\x66\x26\xd5\x61\xea\x90\x45\x0f\xe8\x6d\x84\x23\x4a\xa0\xd5\x41\x33\x6f\x24\x33\xf8\x88\xb5\xb9\x89\x8e\x9f\x70\xc5\xd9\xb8\xd8\xfc\x37\x5f\xdd\x83\x91\x6c\x5a\x63\x99\x35\x3c\x81\x67\xc3\x1d\x0f\x09\xf6\x1e\x2a\x53\xf2\x14\x08\x68\xb4\x42\xeb\x58\x49\xc6\xa7\x04\xe4\x75\xea\xbb\xaf\xad\xdc\xaa\xb4\x83\x01\x89\x3e\x4e\x2f\x1d\x5e\x11\xcb\xc8\x6c\x79\xc9\x9e\x35\xa1\x04\xab\x36\x63\x2a\x77\xc6\x43\x8c\xd2\x9c\x0a\xcd\xcc\x6e\x75\x5e\xdb\xc2\xec\x7c\xed\x17\x03\x51\xc5\x11\x6c\xbf\xa8\xf4\x1d\xe8\x32\xb5\x84\xe9\xb8\xcc\x1a\x4e\x01\x1d\x0f\xdb\x8d\xda\x6b\x62\x48\xdc\x8c\x31\x6e\x36\x4c\x57\x52\xcd\x09\x1f\x5b\x27\x58\xad\xba\xfc\x83\xd5\xce\x2e\xc3\xa1\xfa\xee\x9b\x87\x53\x94\xe2\x5c\xe3\xc4\xd3\xe8\xbf\xfd\x79\x3f\xce\x7e\x42\x05\x07\xa2\xa6\x83\x39\x41\x96\x6b\x31\xf6\xcc\x14\xe3\x4a\xc3\x6a\x12\xf0\xc4\x81\x54\x24\x83\x34\xbd\x37\x73\xf2\x9e\x0a\x38\x43\x1e\xeb\x55\xb1\x91\xdd\xdf\xdf\x3a\x98\x5f\x06\x60\xc5\xfd\xc3\x68\x04\x69\xe4\x24\x31\x7c\x06\x8c\xfb\x74\xbd\x57\x30\xb7\x9b\xdf\x2a\xd3\xf8\x52\x04\x75\x86\x90\x85\xe6\x3d\xe0\x8d\xc9\x9b\x91\x9f\xd1\x8b\xcf\xab\xcc\x55\x29\xa3\x6e\x30\x00\x7a\x13\x11\x74\x5a\x95\xf2\xfe\x2f\xe2\xe9\x8a\xee\xd9\xb9\x90\xbc\x87\x81\xfa\x1f\x6b\x87\x52\xaf\x9a\x2c\x22\x11\x85\x35\xc5\x72\x91\x72\xfd\x71\xf7\x0a\x1f\xdb\x11\xc9\x03\xed\x35\x13\x26\xef\xc7\x79\xdd\xea\xf6\xad\xf3\x4e\x7e\x4d\xca\xcf\xfd\xb5\x73\x4f\xea\x5f\xe7\x27\xbc\xfd\x2c\x86\xed\x4b\xec\x34\x20\x64\x73\x53\x49\x79\x7a\x37\x96\x6f\xaa\xdb\x79\xbd\x88\x70\xdf\x58\x34\x48\x87\x6d\x33\x51\xf6\xec\x34\xda\xff\x7c\x87\x0e\x68\x66\x87\xe9\x07\x5d\x44\xc1\x53\xfd\xbb\xe7\xbf\xb5\xda\x70\x85\xdd\xba\x53\x3a\x15\x4a\x1a\x0c\xb5\x7c\x26\x41\xc4\x37\x75\xd6\x3b\x3d\x54\xf0\x38\xef\x8b\x7c\x30\x1e\xaa\xda\xca\xe7\xc3\xef\xf5\x9b\x8f\x23\x59\x9e\x96\x00\xa8\xf7\x61\xf7\xfa\xaf\xee\xac\xe8\xbb\xd1\x86\x17\x04\xef\x0d\x7f\x43\x96\xd1\x23\x62\x94\x1f\xb6\x57\x18\xcf\x9e\x2c\x43\x72\x49\x89\x31\xe4\x8e\x7c\xf4\x4e\x29\x4c\xd7\xdc\xa0\x06\xb2\x6f\x1d\xa1\xbe\xe8\xf6\x4b\x12\x3a\x63\x37\xc8\xf3\x3d\xfa\xf7\xa0\xb8\xe1\xdd\x72\x1b\x0b\xf2\x75\x7b\x61\x35\x69\xc1\x98\x48\xd7\xaf\xe5\x3a\x89\xe0\xf4\x83\xe6\xa2\x17\x4c\x35\x11\x61\x73\xc8\x19\x6d\x7d\xab\x13\x30\x70\xdd\x22\x62\xf4\x45\xf0\xf3\xc1\x62\x61\xd2\x6b\x0b\x14\xc1\xf1\xa3\x1f\x9a\xeb\x7c\x70\x59\xd8\x5e\x9a\xf0\x91\x85\xfe\xd1\xfa\xbd\xca\x36\xe1\x2e\x54\xd6\xd4\xc6\xa3\xc9\xd5\xb1\xed\x70\x4b\x1c\x0e\xcb\x19\x79\xf8\xbe\xb6\x31\x21\xf3\x8c\xfd\xfb\x34\xa8\xf1\xd4\x97\xda\x2d\xd2\xb8\x2e\xa2\xc4\x1f\xac\xba\x7f\x85\xa9\x6b\x49\x1a\xd4\x80\x93\xe6\x50\xff\x2b\x18\x99\x2a\x62\x26\xb6\xfe\x97\xf7\x7e\x11\x8f\xcb\x4f\x10\xfc\xbc\x27\x6d\x85\x97\x7b\x4a\xc3\x6f\xf4\xaa\x9b\xe2\x53\xeb\xa9\xa1\x58\x6f\xef\xa0\xd4\x76\x5d\xe6\xcd\xb1\xc7\xc4\x0f\x2c\x86\x71\x1d\xfd\xca\x5e\xca\x6c\x6e\x7d\x0e\x2d\x6f\x81\xe8\xfe\x4b\x88\x66\x73\x75\xad\x7b\xde\x7e\x51\x63\x8e\x33\xd8\xcd\xf8\x68\xaa\x92\x6f\x98\x15\xd4\xb1\x5b\x89\x1c\x4d\xf3\xad\x47\xb4\xf6\x1b\xc9\xe2\x28\x4a\x34\x68\xa9\x6b\x98\xdb\xd2\x98\x09\xae\x8e\xad\xc9\x5d\x62\x09\x60\x5c\xc0\xeb\x0f\xc6\xb8\x95\xaa\x0d\x79\x83\x8d\x6c\xce\x8a\x42\xe8\xd8\x76\x9a\xc1\xd9\xea\xc1\xf7\x9c\xfd\xe8\x72\x05\x98\xd7\x7a\x39\x99\x92\x4e\xf0\x7d\xfc\x6b\x66\x9e\x50\xe9\xe6\x35\xfc\x92\x5a\x20\x03\x4a\x1f\xc1\x83\xbf\x75\x69\xca\x81\xb1\x5d\x53\x1f\x2e\xf5\xda\x0e\x75\xa4\x9b\x43\x91\xd1\x12\xb9\xf7\x84\xe6\xe6\xd4\xa7\xc6\x5f\xbc\xd8\x44\x64\xd7\x0b\xd9\x66\x39\x49\xdb\xbf\xb2\x20\xcf\x6f\xbf\xc4\xac\x40\xfa\x23\xda\x88\xa7\xb8\x08\x12\xac\x19\xf1\xde\xf1\x6d\x68\x00\x27\xda\xde\xd4\x9c\x48\x16\xa6\x9e\x18\xcf\xc8\xff\xea\xdd\x4a\x1a\xd6\xb3\xcc\x3e\x4d\x55\x7d\x45\x14\x50\x34\x6b\x3a\x28\xb2\x43\x41\x4e\x46\x92\xf4\x8a\x0e\xe2\x3a\x2f\x9e\x9f\x58\xe8\x98\xed\xa2\x06\x63\xd5\xf7\xb2\x2f\x21\xf4\xbc\x5b\xe3\x30\x74\x7b\x50\xc7\xea\xe8\x8a\x4f\xae\x88\x3d\xac\x89\x0f\x9a\xef\x99\x76\x59\xfc\x43\x01\x91\xb6\x60\x7d\xbc\x84\x97\x80\x2b\x65\xf5\xcd\x42\xfa\x8e\x11\xc6\x80\x4f\xd3\x72\x32\xe7\x0a\x37\x20\xfd\x51\x60\x71\x4e\x5f\xf2\xb8\x67\xc4\x3f\x10\x6e\x49\xd2\xab\x58\x17\x42\x56\xe2\x49\x24\xb9\xcd\xcd\xe1\x5f\xfe\x84\xc4\xd2\x0c\x46\xa5\x9e\x8f\x37\xf3\xe1\x59\x0a\x64\x40\xa6\xe5\x24\xba\x70\xea\x67\x4a\xd1\x8b\xf1\x2c\xc1\xf9\x0c\xf9\xe0\x7b\x85\x57\x3c\xa6\x5c\xa0\x2e\xdb\x0d\xde\x1f\x76\x92\x78\x73\xab\xf7\xba\xb0\x4c\xb6\x0c\xfc\xa9\xcb\xb4\x2d\x0a\x5f\xe3\x6d\xdf\x6d\x04\x70\x6d\x34\x6a\x3c\xb5\x91\x8b\xdb\xce\xf9\x3c\xa0\x97\x48\x8a\x54\xd1\x36\x40\x64\xc7\x0b\x82\xed\x77\xb4\x21\x10\xd3\x81\x90\x9e\x32\xf1\xf9\xa9\xa1\x24\x0d\x23\x78\x35\xa2\x28\xda\x21\xde\x02\x7d\x0b\x71\xaa\xed\x9c\x8a\x2c\xde\xb9\x02\x69\x7e\xbb\xea\xe8\x13\x59\xac\x8f\xff\xd6\xfe\x7b\x4a\x27\x83\xc4\x5f\xe8\xbf\x54\x9a\xe6\x6b\xa6\xd5\xd2\xd6\x6d\x7a\xf7\x5a\x2f\x28\xd6\x3b\x7a\xe9\x70\xa4\x10\xbf\x37\xf4\x6a\xc8\x57\xfa\xce\xf0\x7c\x58\xf1\xfa\xc4\x72\xd9\xf3\x78\x81\x22\x3d\xe7\x55\x68\x34\x17\x3a\xc3\xa2\xde\x10\x22\x56\xcc\x05\x31\xf3\xde\x5a\x9f\x49\x96\xa2\xe0\x9c\x3a\x1c\xa0\x51\xac\x7a\xf0\x5b\xa5\xcb\x60\xcc\xc6\xbf\x9c\x10\x49\x17\x3c\x2f\x8c\x42\x92\xe6\xbf\x6f\xbf\xb9\xf4\x5a\x5a\x1c\x95\x04\xdb\x6d\x7e\xbf\xfb\x84\x89\x1f\xcf\x62\xb9\x29\x65\x24\x95\x22\xf8\x99\xf5\xcb\x9b\x1f\x6b\x76\xf0\xc5\x85\x6e\x0f\x4e\x11\xd9\x0c\xbd\x25\x44\x18\xe0\x19\xc5\x80\xd8\xb8\xfe\xa4\x10\x74\x79\x42\x59\x89\xb6\xbb\xdc\xb8\x2a\xa5\x40\xce\x9c\x33\x74\xce\x6c\x03\xf5\x61\x76\x1d\x3d\x6c\x57\x35\x2f\x02\xba\xfe\x18\x3a\x7c\x22\xdf\xa9\xf6\xbc\xbd\x39\x51\x80\xd5\xc5\xbd\x22\xf1\x85\xb9\xac\x98\x3a\x33\xd1\x2f\x94\x0d\x26\xbf\x18\xa6\xb7\x25\x2f\xd9\x61\xe2\xdc\x96\xf2\x44\xe7\xc0\xe7\x75\x4c\x55\x9a\x14\xa8\xc4\x1b\xe2\xf9\x78\x40\x8d\x30\x18\x32\xf3\xdb\xb1\x38\xb2\x5b\xce\x67\xf3\x4e\x7e\xf5\x4e\x26\xac\xb2\xaa\x16\x0a\x28\x2a\x6b\xfa\x80\xe9\xf2\x07\x92\x7e\xb1\x4e\x7d\xaf\x2d\xca\x53\xb2\x78\xfa\x44\x71\xfd\x81\x30\x94\x68\xea\x70\x1c\x3b\xf3\xaf\xd8\x11\xe2\x48\xd1\xec\x50\xc4\xea\xb2\x53\x2a\xa3\x0a\xd2\xde\xde\x4a\x21\x79\x83\x6f\xb5\xaa\xe5\x25\x42\xb4\x6f\xb1\xf3\x63\xaf\x0d\xd9\x32\xad\x34\xdc\xe4\x6a\xa6\x77\xc7\x75\x25\xf6\x2b\xa2\x34\x65\x52\xec\xc9\xe8\x36\xd1\x7a\x59\x66\x18\xc8\x5a\x77\x5a\x1b\xbe\xa3\x3d\xf6\x8b\x97\x62\x38\x87\xb5\x40\xa6\x36\xaf\x68\xeb\xa5\x42\x6f\x04\x81\x72\x63\x98\x0e\x48\xca\x72\x8b\x49\x81\xb6\xbd\xea\x7f\xec\x9d\xfb\xda\x0f\x61\xe0\x3d\xaa\x73\xdb\x61\x12\x1b\x32\xf8\x19\x37\xba\x05\x22\xcc\x29\x23\x81\x45\x7d\x77\xcd\x32\x8a\xa4\xc7\xcd\x62\xc0\xcf\x68\x4a\x90\xe2\xef\xef\x11\x37\xb7\xee\x9e\x3e\x7b\x57\xb9\x35\xd1\xc7\xed\xc1\xde\x00\xac\xbb\xfc\x90\x39\x7c\x37\x60\xb8\xed\x8e\x26\x05\x93\xa8\x06\x3b\x04\x46\x04\x69\x10\x2f\x11\x90\xfe\xbc\x1f\xac\xed\x7b\xa5\x00\xca\xcb\xb6\x91\x36\x10\xfb\xa6\x3e\x82\x44\x34\x66\x35\xe1\x00\x68\x20\xa4\x76\x20\x04\x16\xa1\x48\x07\x81\x05\x20\xae\x1e\x6a\x72\xf0\xe0\x3a\x94\x1a\xa5\x40\x09\xe6\xe7\x2c\x66\x76\xef\x1a\xfe\x1b\x03\x95\x29\xc5\xb9\xd1\xb8\x46\x17\xc3\x24\x0d\x8c\x46\x1e\xfb\x75\x2b\xbe\xdd\xba\x6e\x29\xbc\xe2\x47\x48\x9e\x08\x1d\xd1\x71\x18\x8c\xc9\x80\x9f\xd9\x84\x83\x20\x55\x5a\x12\xc8\xd1\x20\x80\x37\x55\xaf\x80\x20\x51\xe8\x53\xe9\xd6\xd8\x4a\x9c\x5b\x48\x26\x7d\x1a\xfe\x9a\x39\x07\xe3\xbd\x92\x6d\x06\x7a\xb1\xe7\x30\xe1\x52\xa0\xe5\x30\x9c\x1b\x82\xca\x9f\xbf\x9c\xaa\x3d\x9e\xda\x21\xa6\xcb\x26\xc2\x32\xaf\x66\x0b\x31\x5e\x22\xf3\x72\xeb\x2f\xd9\x8b\x41\x8b\xea\xce\xec\xfa\xaf\xd6\xc2\xfd\x7a\xb6\xa6\xec\xb2\x62\x2c\x5e\xe6\x8d\xee\x2d\xe9\xd2\x92\xa0\xb7\xb0\x10\xfe\x84\x95\xeb\x34\x86\x24\xf4\x1f\x28\xe7\x8b\x67\xcf\x08\xa0\x50\x66\x83\xb9\x57\x56\x90\x7a\xe1\x5f\x5d\xef\xd8\xd2\xf7\xd8\x08\x84\x06\xb4\x46\x55\xda\xf9\xd4\xb8\x7b\xd7\xac\xc7\x0f\x7f\x4d\x02\xa8\xd7\x06\xad\x19\x77\x53\x86\x39\xfd\xeb\x97\xd0\xf8\xe1\x4f\x22\x01\x90\x1d\x6e\x3e\x31\x20\x21\x1e\x84\x13\xcd\x76\xfc\x17\x7d\x4f\x0f\xe4\xb2\x65\x4a\x22\xc9\xa7\x07\xa1\x52\x92\x46\x01\x81\x86\x54\xcd\x77\x20\x1a\x1f\x67\xa0\xef\xf5\xbf\x4b\x1a\x1f\xb8\xdf\x83\x29\x80\x8d\xb8\xf1\x15\x8f\xe4\xe5\x4f\x13\x08\xdc\xf5\x61\xa3\x5c\x08\xd9\xd8\x29\xb3\x6b\x6e\xf2\xcb\x23\x41\xef\x64\x7a\x8a\x25\x3f\xb2\x41\xa9\x59\x5c\x77\x10\x39\xa5\x5f\xd4\xf7\xe0\x05\xa8\xe1\x88\x2f\x39\x24\x7e\xc6\x2c\x61\xc0\x98\xb5\x4e\x24\xd6\x11\xc0\xeb\xc1\x00\x75\xf2\x7c\xab\x54\x23\x9a\x77\x20\xa7\xc7\x52\x96\x15\x7f\xb2\x5c\xbb\x12\xfc\xc7\x79\xff\x1d\xec\xd9\x64\xce\xbc\x47\x28\x88\x47\x16\xfe\xe4\xe1\xcf\xb4\x23\xd0\x42\xa3\x85\x52\xac\x32\x1a\x70\xb3\x98\x92\x33\x1b\xf1\x3f\x89\x0a\x8c\xaf\xb2\xef\xdc\x82\xce\x16\xca\x4b\x4b\x7a\xc1\xa4\x09\x82\x24\x6c\x16\x8d\x2b\xae\xcc\x5f\x32\xdb\x27\x65\x41\x3b\x06\x59\xb5\xd3\xf6\xf7\x97\xb3\x23\x0e\x8b\x88\x63\x24\x37\xf6\xad\x30\x47\x4a\x64\x7b\xd5\x1e\xc4\x97\xe7\x80\x89\x25\x27\xa2\x01\xd2\x5d\xf6\x4d\x20\xff\xd8\xb8\x16\x97\x21\x49\xea\x6a\xc0\x2b\x32\x4d\xbd\x85\x65\x6c\xf1\x3e\x33\x49\xfa\x60\x6a\x54\xd4\x1c\xd5\x94\x40\xa5\xb5\x53\x9f\x41\xc9\x44\x51\xc2\x50\xdf\x71\x20\x73\x94\x54\x9e\x86\x15\x04\x1b\xf1\x29\x55\x57\x7a\xb1\x99\x63\xb1\x99\xb3\x0a\x24\x4a\x0e\xb4\xbe\x7d\x52\xde\xfc\x4f\xa3\x69\x7a\xba\xd0\x31\xec\x17\x22\x9a\x70\xa0\xf0\xe2\x35\x37\xc9\x0e\xa8\x37\xf5\xa6\xd2\xcb\x99\x45\x0f\xbf\xd3\x73\x59\x73\xbe\x3a\x46\x11\x64\x01\x74\xe0\x3c\x1f\x32\xfe\x9e\x17\x0d\xcb\x73\x30\xf9\x7f\xc3\xe5\xea\x5a\x8d\x29\xd4\x5f\xa6\x95\x72\xc4\x54\x5c\xd7\x73\x53\x97\xfc\x9a\xeb\xf7\x79\xe8\xfd\x4d\xdc\x40\x1f\x5f\x28\x11\x9b\x1d\x1e\xd5\xb7\x5f\x6c\x2f\x92\xb4\x17\x3b\x9f\x8b\xa9\x75\x2a\xf8\x95\x32\x2b\x3b\x72\x69\x4a\x21\x9c\x21\x24\x4e\x00\xf6\x62\xeb\x40\x23\x43\x72\x62\x88\xd0\x48\xbe\x9f\xe4\x40\x69\x58\xb3\xe1\xb1\xa6\xe3\xc6\x5b\xc5\xfd\x0c\x0d\x23\xd8\x29\x6f\xc3\x85\xe5\xc1\x85\xf1\xe5\x12\x8f\x01\x8b\xf5\xef\xd5\xe6\xa1\x79\xd0\x61\xb9\x7a\x27\x7f\x3e\x6c\xd8\xe8\x9a\x8e\x3d\x19\xbe\x40\x55\xee\x48\x15\xa1\x34\xd9\xc1\xc4\x6b\x6e\x0e\x93\x3f\x23\x75\x4d\xd4\x87\x53\x60\x8a\x34\x73\xac\x3f\x5e\x8b\xab\xae\xb0\x33\x97\x6c\x66\x92\x10\x0d\xe2\x65\xf4\x79\x10\x84\x77\x46\x57\xd3\x87\x83\x4f\xca\x89\xef\x7f\xda\xc5\x47\x7d\x6b\xf4\x27\x07\xe0\xc5\x15\xac\xd6\x97\xa3\x48\x51\x17\xbc\x0d\x19\xab\xf5\x61\xcd\x4d\x55\x3a\xff\xc9\x9f\x5e\x5a\x12\x2d\x08\xfe\x29\x54\x02\xcf\xa2\x74\xf3\x30\xb7\x77\xa2\x19\x52\x52\x5f\xd8\x9c\x50\xf2\x0a\xe6\xbb\xdb\x9c\x80\xe5\xe6\x4d\x6c\x84\x37\xbc\x8a\x52\x69\x8f\xcf\x72\x02\x63\x3b\xd2\x3b\xaf\xea\x95\x1d\x08\x7d\xa2\x20\x79\x80\x01\x02\x1f\x64\x79\xfa\xf0\x4b\x57\xca\x06\x13\x40\xa5\x0c\x50\x7f\xb8\x41\x92\x6b\x4d\xae\x0e\xd5\x0b\xe1\x42\xf3\xe8\xdf\x79\xd4\x10\x26\x19\xc9\x99\x0d\x78\xf8\x03\x26\x49\xeb\xf1\x32\x61\x9d\xc5\x26\xf0\x3d\x77\x80\xd8\x3f\x83\x29\x08\x82\x8c\x85\x81\x6b\xc2\x78\x19\x94\xb9\xee\xe7\x40\x61\xcb\x91\xac\x10\x88\x6e\xff\x01\x84\xda\xad\xf7\x81\x12\x3b\x29\x21\x1c\xf3\x49\xa0\xf0\x63\xf7\xe1\x26\x8a\xcc\x80\x0e\xd9\xf5\x7f\x4c\x2f\xcc\xbe\xc0\xe2\xbf\x2f\x79\x81\xdc\xba\x81\xcc\x19\x7e\x7e\xc0\xde\x78\xd9\x38\xa2\x10\xcb\x75\xcb\x39\xcd\x4e\x0f\x07\x59\x2c\x44\x19\x9d\x57\xb4\xed\x88\x71\xe2\x7b\x66\x7c\xc9\xe3\x5f\x00\x71\x9c\xd7\x46\xae\x84\xd9\xf4\x80\x34\x83\x12\x80\x5a\xa5\xa5\x76\x2a\x4f\xaa\xcd\x9c\xf6\xca\x6a\xc5\x8d\x76\x83\xdd\x18\x29\x14\xc0\xb0\x38\xcc\x7b\x3e\x65\xfb\x8a\xbc\x01\xb2\xf9\x51\x26\x7d\xc5\x4a\x4a\xdf\x36\x52\xf2\xc1\x0a\xff\xcb\xdb\xac\x84\xb8\xd6\x63\x96\x4a\x46\x03\xc0\x10\xc3\xb4\xee\xf1\x76\x83\x6b\xea\x4b\xa1\xbf\xbf\x71\x94\x55\x5d\x3b\x3c\x49\x31\xb5\x52\xb3\x29\x5d\x9e\xc9\xa2\xbd\x23\xdd\x86\x62\x1d\x6f\x26\x6f\x63\xb9\xe1\x07\x81\xff\x34\xec\x5f\x8c\xb9\xf2\xb4\x90\xe5\xac\x8b\x7c\x40\x3c\x69\x51\x49\x2a\x5e\x70\x9d\x83\x02\x95\x79\x44\x25\xba\xe0\xe6\x02\x5d\x8f\x4c\xc2\x20\x88\x92\x03\xd5\xf8\x2f\x91\xa5\xc0\xd5\x11\x75\x78\x65\x64\xc6\x3c\xf5\xbb\x88\x05\x1f\x4d\x3e\xcd\xa0\x10\x14\x28\x42\xd6\xfc\xae\xea\x09\x0e\x29\x84\xe6\xb5\xd4\x5a\x6c\xfb\x1b\x7f\xaa\x35\x79\xae\xcf\x9e\x7b\x3f\xa5\xa1\x2b\x87\x15\x23\x6c\x53\x64\x9e\x8d\x9b\xde\xc8\x80\xc5\x1f\xf3\x6a\xd3\xb7\x50\x13\x43\xc6\xb1\xc8\x36\x9c\xfb\x97\x5c\xe6\x45\x90\x9b\xd5\x68\xf6\x9f\x68\xff\xd1\xf7\xc4\x84\xc2\x3d\x2d\x68\x5a\xa1\x75\x84\x92\x8c\x42\x23\x2c\xa3\xb0\xae\xc4\xdf\x77\x1f\x6a\xae\x16\x66\x06\x02\xe6\xaf\xbe\xe9\x2c\xeb\xf4\x7b\xd0\x95\x56\x87\xc5\xd6\xdd\x72\xb2\xe9\xb1\xc8\x9e\xa5\xc8\x9e\x10\x9b\x42\x5f\x79\x20\x39\x5f\xb2\xa3\x1b\x0c\x90\xe0\xf9\x2d\x3e\xf6\x63\x64\x25\xd4\xbc\xde\xc1\x00\xa6\xb1\xbc\x67\xf8\x42\x61\x50\xfe\x14\x9a\x0f\x67\x2a\x46\x17\x42\xac\xe3\x03\xf7\x1b\xcb\xbd\x0b\x5f\xff\xec\xd7\xee\x77\xe7\xf4\x43\x0a\x88\x04\xda\xf2\xa0\x59\x4d\x7f\xf6\xa8\x84\x01\x9b\x5d\xb3\x28\x08\xbd\x27\x06\x03\x9e\x44\x5a\xaf\xe8\x78\x14\x9d\xf8\x36\xb4\x30\x37\xc3\x87\x21\xef\xa1\xa6\x3d\x08\xed\xae\x3e\xb2\xfb\xda\x8f\x48\xcc\x89\x48\xeb\x59\x60\xe1\x10\x7a\x74\x2f\xe5\xfa\x92\x7f\xca\x6c\xc4\xce\x6b\xf7\x1b\x05\x01\x3d\x94\x90\x15\x45\x76\xc4\x19\x9c\x08\x20\x85\x44\x2b\x2b\x2a\x53\x2a\x00\x50\xb6\x95\x73\xe2\x8e\x1e\x5f\x4c\x57\x62\xe4\xcd\xe4\x98\xda\xfe\x63\x34\xff\xa9\xfe\x74\xc4\x6f\xe5\x65\x49\xc3\x13\x2b\xa5\x13\xab\xa4\x01\xef\xe9\x3c\xac\x3a\x7f\x51\x3d\xa5\xba\x13\xfa\xfc\x52\xb2\x78\xe1\x15\x13\xd9\x3a\x7e\xf0\x14\x61\x6b\x46\xf1\x2b\x0f\x3d\x79\x20\x92\xd6\x3e\xc1\xc4\xe2\xf0\xe6\x82\xdf\xf6\xf6\x5f\x9d\x21\x6b\xa3\xb2\x16\x00\x00\x55\x4b\xe5\x64\xd9\xfb\x6d\x0c\xb6\x32\x27\xa0\x7a\x4a\x99\xe3\x6c\x2c\xb7\x1c\xb8\xcc\xea\x6b\x97\x79\x88\x83\xdf\x7d\x53\xdb\x27\x3f\xdf\x99\x39\x92\x31\x19\x6b\x71\xb9\x20\x5a\x7f\xa0\xbd\x56\xa4\x3f\x34\x1b\x79\xdb\xe6\x51\xdb\x24\xda\x3f\x8f\xd8\xc5\x7e\xa5\xec\x98\xae\x74\x0f\xc8\x27\x7d\x1c\xf3\x7a\x12\x5b\x30\xa0\x4b\x8c\xc3\x0a\x8a\xda\x6d\x2b\x79\xea\xa1\x22\xf8\x42\x77\xef\x89\xa5\xfb\xd4\xac\x32\x0c\x53\x33\xc2\x20\xd8\x81\x3d\xa1\x42\x81\xa9\xdc\xbc\xba\xe3\xe7\x05\x41\x10\xa4\x70\x79\xf5\x1e\xac\x31\xf6\x01\xe4\xa8\xf2\xae\xeb\xba\xea\xc2\xbc\xb0\x32\x28\x31\xf0\xf3\xac\xaa\xb9\xbd\x08\x73\x43\xbf\xe0\xbf\xd8\x1d\x6e\xcb\xe4\x01\x73\x08\x31\xf9\x88\x7f\xc7\x36\x2a\x6b\x7a\x6e\x35\x26\x73\xa7\x51\xfb\x12\x7b\x57\x5e\xb2\x43\xff\xce\xac\x90\x47\x8f\xf9\x1d\xc6\xf8\xd9\x0c\x39\x6f\x0f\x05\x38\x72\xd7\x25\x5f\x3d\xbb\xc2\xc1\x33\x19\xcf\x92\x4b\xe0\x8a\x8f\xee\x09\x83\xc2\xf9\x03\x22\x68\x0f\x86\x6a\x41\xa4\xc1\xef\x54\xb7\x8c\x6c\xb7\xc2\xae\x71\xdf\xf8\x25\x34\x48\x5f\x2f\xbe\x16\xe6\xe4\x86\x0f\x0e\x11\xa6\xc4\x53\x40\x65\xbe\x74\xe7\x2d\x6b\x32\x42\x3f\x85\x8b\x23\x6e\x51\xed\x9e\x17\xb0\x15\xcd\xef\x49\xb1\x05\x44\xe3\xe5\x2f\xc5\xd2\x50\xbe\x54\xfd\x2f\xff\xb2\x75\x2c\xd6\xc2\x18\xc2\x6f\xbf\x94\x3a\x63\xbb\x03\x40\x43\xbd\xb8\x19\x71\x6e\x4a\x58\x7a\x1a\x0d\x8c\x16\x68\xc1\x1c\x99\x0b\x1d\x4a\xe5\x5f\x04\xec\x07\x3f\x5b\xd0\x84\x40\x71\xa7\x97\x8d\xfd\xc4\xec\x07\xf2\xef\xfe\x2f\x1b\x8b\x0d\xb8\xf0\x7c\x96\xf5\x7a\x2a\xaa\xa7\xae\x81\xa6\x9b\x20\x68\xaf\x1f\x70\xd0\xac\x9d\x99\xe3\x0c\xe7\xb2\x60\x9a\x32\x63\xdb\x61\x84\xd9\x79\x54\xb7\x9f\x81\x98\x2a\xc4\x09\x1b\x8b\x0b\xe1\xd8\x84\x9d\x49\xd2\x38\xbf\x85\x48\x47\x67\x89\xa9\x43\x73\x21\x66\x70\x48\xf0\xbc\x93\xa6\x0d\x7f\xae\xf4\x96\xae\xb1\xc4\x76\xc3\xab\x72\x41\x52\xa6\x52\x69\x6c\xf6\x99\x71\xea\x67\xf6\x38\xb6\x33\x21\x33\x70\x50\xef\xf0\xce\xb2\x35\xcd\x07\x4b\x67\xdc\x14\xc7\xfc\xe3\x7b\x91\xed\x1d\xd5\x36\x91\x43\x6c\x1a\xe6\x0d\x83\x7e\x91\x39\x93\xb1\x83\xda\x95\x41\x8b\x04\xea\x99\x61\x69\x99\x83\x72\x7e\x2c\xf5\xe1\x29\xf7\x64\x74\x04\x47\x0c\x86\xa5\x70\x50\x3e\x7b\xb0\x4b\x3e\xbd\x1e\xf7\x03\xeb\xf8\xd7\xaf\xc4\xfe\xd9\x29\xaf\x17\x0f\x5d\x0a\x0e\x04\x5b\x6e\xa2\xc3\x8e\x1f\xe8\x2e\xb1\xc5\x2a\x57\x0f\xfa\xe4\x51\xab\x5b\xc2\x59\x38\x5b\xd3\xb3\xe0\x75\xb5\x3c\x9a\x35\x75\xd8\x2f\xd0\x2c\xe1\x7c\x07\xe1\x5a\x45\xad\xbc\x2d\x43\x4b\x16\xde\x0d\x02\x3f\xd0\x1e\x92\xbe\x09\x4b\xf4\x95\xdf\x9f\xdf\xd9\x06\x84\x66\xdb\x2e\x9e\x91\xc2\x2a\xa2\x00\x36\xb4\x6c\x59\xd5\x76\xa6\x4d\xd2\x13\x61\xfe\x06\xb6\x60\x4b\x7c\x96\xcf\x57\x57\xf6\xe2\x3d\xf4\x7f\x32\x6c\x78\xfe\xb5\xcb\x6e\x5e\xe9\x4c\x48\x71\x5a\x7b\xeb\xc6\xc7\x8a\x6f\x3a\xab\x6a\x2b\x5f\x9f\x9f\x96\xd3\x09\x5b\xe7\xbe\xfd\x76\x34\xa3\x94\x15\xa8\x23\x2c\x26\xed\xfa\x6f\x3c\x92\x9b\x7a\x44\x19\x1d\x92\x83\x05\x48\x04\x32\x8c\xf6\x3b\x55\xde\xc6\xfe\xd8\xc3\x13\x13\x4a\xff\x84\xc7\x0f\x58\xbb\xfe\x29\x9a\xdb\xaf\xeb\xaa\x76\x4c\xaa\xda\xc2\xdf\x9a\x69\xe3\xb1\xa6\x4b\x23\x1c\xce\x7a\x95\x77\x40\xb6\x3c\xfa\xcc\x0e\x2c\xf7\x19\xb4\x4a\x5e\xd9\xc7\x73\x19\xc7\xf4\x31\xbf\x49\x0e\xe7\x9a\x8f\xb7\x83\x73\xff\x6f\x7f\x00\x4c\x0e\xe7\x5a\x1e\xe2\xe4\xb1\x03\x7f\x80\x1b\x4c\x8a\x62\xd2\x33\x37\xa5\x02\xc1\xcf\x0f\x2c\x3b\x37\x1c\x25\xa4\x00\x57\x11\x48\x26\x9a\x61\x6a\xd0\x78\x47\x57\x59\x9b\xe3\xb6\x64\x83\x29\xd2\xfc\x6a\xa2\x63\x7d\xc1\x76\xf8\xc4\x0e\xe0\xa2\x3a\xa7\x2f\x81\x4f\x34\x3b\x34\x17\x7e\x32\x57\x69\xe7\x55\x5d\xf5\x46\x0e\x3d\xcd\x40\xf6\x38\x75\xe7\xcd\xfa\xd7\x3c\xe0\x01\xbb\x9e\x95\xa9\xca\xe2\xea\x6d\xc5\x4c\x77\x46\x79\x2c\x69\xb3\x91\x30\xb7\x96\x7f\xf8\xa5\x5a\x9e\xba\xe5\x4f\x94\x2a\x3d\xc3\x57\x94\x58\x39\xb7\x51\x69\x63\x53\xf2\xe6\x5a\xc5\x39\x28\x5b\x0c\x91\x6e\x61\x20\xe3\xf8\x50\x0d\x38\x15\x3e\x32\x65\xd1\x95\xe2\xdc\xc4\x80\xfb\x14\x81\x59\x7f\xdd\x83\xb7\xa0\x57\xff\x44\x1e\x26\xfe\xd5\xf0\x56\xb6\x76\xfd\x51\xf9\x09\xb4\xcd\xda\xfe\xef\xbf\xef\xa7\x23\x01\x9b\x84\x7f\x03\x98\xb1\xd3\x83\x40\xd1\x53\xbe\x19\x37\xe7\x45\xb1\x3e\x99\x62\x26\x6e\x9d\x0b\xf7\x49\xe9\xdc\xf4\x4d\xf4\x9c\x8a\x3d\xae\x13\xaf\x26\x17\x84\xec\x05\x1a\xdc\x82\x99\xb5\x2e\xa8\xe1\x58\xdf\x29\x51\x82\x72\xe2\x33\xac\xc5\xc5\x12\x1a\xa5\x27\xfc\x55\x7c\x99\xfe\x12\x06\xba\x4f\x44\x9b\xe1\x4e\xa5\xf7\x26\x3e\xb7\xbc\x9b\x81\x33\xf5\xdb\x1f\x0a\xa8\x84\x66\xb1\xbf\x3a\xef\x6a\x38\xe7\x40\x0d\xa2\x2d\x2f\x47\x7f\xb7\xae\x83\xda\x4d\xeb\xa3\x47\x5d\xca\x4b\xcb\x12\xb5\xaa\xeb\xc2\x47\x7a\xc5\xaf\x07\x4e\x9a\x8d\x93\xc6\x90\x60\x4a\xd2\x6e\x8a\x5c\x23\x40\x82\x0d\x77\x31\x12\x25\x6f\xae\x0b\x98\xcd\x0a\xbf\x9e\xff\x7c\xf8\x28\x21\xb2\x40\x3b\xbc\x94\x0f\xfe\xd2\xfe\xf8\x68\x0b\xd6\x4f\xeb\x7e\xe1\x03\x80\x3a\x79\x98\x9f\x8c\xdc\x83\x3d\xfe\x13\x4c\x04\x8e\x61\x8a\x2c\xa5\x01\xfe\xe1\xec\xf7\xd0\x70\xb1\x64\x2b\xb1\x6f\xcc\x3b\x04\xc3\x90\x70\xde\xc9\x31\x82\xd0\x33\x56\xd3\x7f\xbd\xc8\xb3\x84\xb9\xed\x96\xb7\x91\x77\x92\x2d\xec\xf0\x03\xdd\xb1\x90\xbc\x03\x61\x06\x97\xcc\xaf\xfe\x6d\x9a\x01\x3f\xe5\x57\xaa\xbf\x05\x03\xc0\xf6\x46\xc2\x2f\x5e\x04\x2e\x37\xa4\x85\x19\x92\x30\x78\x7c\x9b\x1f\x08\x92\x04\xaa\xa3\x69\x38\x46\xae\xf7\x7a\x35\xd8\xe5\x00\xe6\x3b\xd9\x81\x4c\x5f\x8e\xdf\x4c\x31\xf4\xdc\x07\xc2\x1b\xe3\x6e\x31\x39\x48\xc1\x2f\x59\xbc\xa6\xeb\xca\x2c\x94\x5f\xcf\xec\x47\x53\x58\x81\x08\xe4\x53\x9c\xc8\x1d\x3e\x50\x4b\x62\x35\x6d\x41\x44\x07\x05\x5b\x82\xd4\xb2\x1a\x6d\xf0\xfb\xb3\xa9\xcc\x6e\x25\xf5\xdd\xd7\x33\xc2\xcc\xfc\x33\x46\xda\xb9\x81\xfc\x63\x46\x73\x5e\x95\xf6\xcf\x2b\xdb\xba\x84\x9d\x68\xfe\xc7\x94\x0b\xf9\x7f\x2c\x9d\xb5\xd6\xb4\xcc\x12\x85\x2f\x88\x00\xb7\x10\x67\x70\xb7\x0c\x77\x77\xae\xfe\xac\xf7\xfb\xcf\x24\x93\x0c\x2c\x56\x4f\x57\xed\x67\x37\xd5\xd5\xfe\x20\x3b\xd4\x6b\xbd\x6f\xcc\xd5\x25\x3f\xd2\xce\x35\x33\x70\x64\x43\xc7\x2f\x35\x18\x0f\x9b\xc2\x66\x76\xb9\x66\x17\x40\x4e\x12\x64\x72\x2d\xe4\x00\x36\x52\xae\x5e\xd3\x71\x49\x09\xba\x94\x38\xe1\x0f\x34\xdf\x2f\x6f\x9c\x19\xc9\xac\xc9\xd6\xcf\x11\x6f\xe3\x50\x74\x7c\x85\xdd\x43\xd5\xdd\xb7\x10\x5e\x38\x6f\x48\xa9\xdc\xc2\x38\xb6\x21\x95\x3a\x84\xe3\x07\xb1\x5f\x84\x74\x8e\x96\xc0\xda\xbc\xc6\x0b\xe6\x38\x99\xa9\x32\x10\x8c\x8a\x8d\x4c\x93\x38\xff\xd7\x7d\x38\x45\xb2\xdb\xc0\xf3\x6f\xf2\xb4\xcc\x94\xa8\x0a\x0e\x5a\x1d\x86\x98\x73\x66\x37\x59\x71\x22\x6c\xfa\x2e\x15\x7e\x01\x46\xe7\xc3\x62\x6e\xea\xbc\xf1\x6f\xbd\x6a\xdf\x98\x87\x9e\x59\xb6\xab\x22\xb8\x31\x21\x6d\xc4\xcb\xa4\xbc\x26\x47\xe9\xb7\x8b\xc1\x40\xd1\x9c\x3e\xcc\x58\x81\xa9\x33\xe1\x38\x17\x63\xff\x57\xb7\xec\x90\x84\x21\x9a\x1d\x18\x75\x5a\x20\xb3\x36\xfe\x36\xbc\x4f\x79\xc9\xe4\x71\x5e\x30\x49\xc3\x08\x95\xab\x62\x1c\x1c\xea\x32\x48\xa6\xb0\x20\x05\x13\x73\xdc\xbe\x0c\xcc\xfc\xef\x37\xfb\x13\xb3\xc6\x71\x90\xa6\xc4\xb3\x0f\xf8\x52\x1d\x7f\xbb\xc2\xd1\xc4\xa1\x4c\xa1\x51\xc3\xdc\x5e\x72\x20\x6d\xbd\xa8\xa9\xf4\xb4\xca\xdd\xb5\xd5\x83\x85\x34\x17\x48\xab\x3f\x28\x36\x55\x5a\x3a\xd3\x35\x30\x23\x22\xc2\x30\x2b\x6d\x3c\xca\xde\x82\x38\x62\x25\xd6\x59\x68\x1c\x21\xbc\xbd\xba\xff\x6f\x5f\xf5\x20\x13\xc9\x83\xa8\x4f\xac\xe7\xed\x5b\xfc\x88\xa0\x9f\x3d\xaf\x42\xf1\xbd\x52\xa5\x3a\x16\x59\x14\xc5\xe9\xd8\x0b\xf8\x1a\xd2\xfb\xb2\x1d\xf5\xae\xc5\x0e\xd5\x16\x7c\xd8\x88\x02\xd9\x63\xef\x9f\x1e\x4b\x75\xcb\xdd\x96\x0c\x02\x95\x49\xee\xa6\x6d\x11\x67\x8e\x54\x26\x00\xde\xb3\x6f\x3c\x09\x45\x97\xd7\x64\xe3\x66\x3b\xaf\x84\xde\x3a\x89\x7f\x86\xfc\x79\x83\x3f\x83\x01\x31\x2b\x3e\x26\x11\x9b\x59\x37\x31\xc6\xe2\xc5\xdd\xa7\x88\xbd\xa2\x86\xaf\x43\x1d\x56\xe8\x88\x6c\x3e\x6c\x7c\xb6\x76\xe4\x59\x74\x1c\xf8\x8f\x97\x8d\x26\xa1\x7a\xac\xa6\x62\xab\x1e\x18\xdf\x2d\x3d\x99\x2c\xd1\xc3\x74\x6e\x73\xe6\xf8\x60\xfd\x7e\x8e\x99\x6d\x4a\x75\xc5\xbb\xfe\xe9\x9e\x2f\xf3\xb3\xce\xdd\xa5\xf8\x9a\xb7\xff\xcd\x1a\x77\x5f\x17\xaa\x66\x88\x80\x3b\x9a\xf1\x14\xcc\x91\x9f\x91\x36\x04\x41\x10\x4a\xb1\x9b\x54\x39\x6a\x7c\x72\xf7\x10\x0d\x8f\x7b\x3c\x4c\x53\xd5\xae\xf9\xb3\xed\x30\x89\xe4\xf4\x2e\xf1\x62\xab\xc0\x34\xa6\xcc\xef\xe6\x0a\x4c\x03\x69\xc3\x99\x8f\x51\x75\xab\x3c\x5e\x18\x4c\x38\x4c\x68\x62\xff\x16\xfa\x25\x5b\x24\xbd\x51\xc3\xd4\xdb\xad\x5f\xa9\x5b\x00\x5a\xe3\xc7\x4c\x37\x45\x81\xe6\xd7\x67\x9e\xa2\xbf\x6e\xe2\x5f\x01\x6c\xd8\x0c\x1b\x50\xa3\x52\x44\x21\xbc\xaf\x92\x99\xa2\xd9\x21\x0a\x3c\x99\xb3\x38\xbd\xdf\x89\x3c\x03\xb2\xef\x17\xdc\xbf\x16\x1c\xac\xd1\x7e\x03\xf4\x58\x18\x56\x0b\xfb\x68\xed\xf4\xb0\x97\x34\xdb\x9c\x56\xd5\x62\xc4\x54\x22\x6c\xe9\x08\x08\xbf\x19\x62\xbf\xc3\x3b\xcf\xd6\x34\x1b\x50\x85\x44\x7a\x0f\x15\x88\x4e\xc0\xdc\x6e\xab\x06\xd0\x4b\x9e\x5a\xcd\x24\x2b\xb7\xae\xeb\x66\x3e\xba\xd3\x8e\xc2\x18\xf9\x6f\x61\xb5\xbd\xdd\xd2\x3f\xf1\x1d\x95\x31\xa1\xcb\x2e\x11\xe0\xf8\x23\x2c\xc6\x79\xa3\x0f\xe3\x80\xca\xc2\x81\x6a\x62\xee\x75\x5d\xd3\xdd\xca\x1f\xc0\xf4\x63\xa1\xe6\x58\x07\x3f\xb3\x09\x88\x51\x28\xb3\xda\x7d\x50\xbc\x1e\xbf\x8d\xea\xa2\x57\x38\x4b\x15\xa8\x87\x0a\xcf\x9f\x2c\x8d\xcc\x30\x8e\xfb\xd3\xfe\x37\xa5\xcb\x37\x8e\x71\x83\xdd\x0c\x2e\xf3\x44\x77\xf8\x0b\x0f\x5b\x18\x16\xce\xeb\x33\x63\xfa\x9a\x7f\xc7\x85\xa8\x52\x33\x27\x63\x72\x66\x33\x29\xfc\xec\xa5\xd2\x93\x91\x89\x6c\x50\xfe\xe3\xaf\x39\x52\x4b\xab\x57\x90\x17\x3a\x7e\x19\xfc\x4a\x24\x32\x45\x1b\x3a\xc0\xbc\x7d\x9b\x86\xe8\x50\x39\x5f\x17\xc8\x5d\x91\x9d\x83\x51\x73\x14\x10\xfd\xba\xc6\x12\x42\x81\xd3\x63\xc9\x9d\x29\xb0\x5c\xbb\xb8\xe1\x70\xfe\x36\xee\xdc\x5a\xf9\x61\x18\x04\x21\x49\xba\xbc\x22\xfe\x8d\xdc\xdd\xc4\xe7\x94\x6d\x77\x4c\x6f\x9c\x1f\x08\x5c\x57\xb5\xeb\xed\xab\x24\xd8\xed\x03\xfa\xfd\x34\x31\xd3\x72\x4c\xab\xba\xf3\xdd\xfb\x62\x7e\x3b\xc2\xdb\x72\x82\xc3\x88\x9d\x74\x99\x66\x59\xee\x27\x11\xc3\x0a\x74\xcf\xf3\x8f\x3b\x4d\x57\x5f\xd7\x1f\xa7\xd6\x5c\xe3\xb0\xae\x6d\xf1\xb2\x5c\x57\x27\x9b\x93\x30\x8e\x16\x4e\xb0\x95\xdb\x42\x82\x1b\x65\xe1\xdf\x64\x99\x41\x57\x80\x41\x62\x14\x68\x00\x07\x19\xb4\xaa\xed\x32\x0b\x35\xcb\xac\x7b\x0e\x14\x95\xea\x4f\x00\x72\x0f\x89\xeb\x6b\xe2\x7a\xf7\x02\x94\x15\x3c\xcf\xfd\xf8\x1c\xfd\x21\x46\x57\xd7\x91\xf4\xec\xe9\xab\x74\x2d\x3d\x45\xa7\xa7\x55\x2c\x74\x73\xe3\x9b\x66\x5e\x0d\x43\x7d\x2f\x9c\x4a\x86\x5b\xd1\xda\x85\xdf\xc4\xb0\xb7\x59\x94\x56\x27\xdb\xc3\x25\x36\x09\x04\xd6\x5a\x08\xdc\xbc\x11\xd9\x04\xe8\xc9\x99\x22\x77\xca\xe7\xef\x6c\x47\xb0\x1c\xf7\x83\x39\xf8\xc8\x02\x1d\xc4\x1b\xf9\x58\xcf\x62\x4a\x60\xc3\x38\x7a\x02\x54\x3c\x66\x07\xaa\x9b\x90\xec\x53\x45\xf6\x0d\x1b\xce\x4b\xee\x8e\x37\x30\xd6\xa9\xaa\x2a\x53\xce\x34\xb2\x53\xc7\x75\xad\xb1\x4a\xb7\xb5\x83\xb1\xd9\x5f\x98\x19\xc4\x97\x17\xc4\xe2\xaa\xbe\xcb\xb9\x5b\x40\xe0\xaa\xd7\xe3\x12\xcf\x4d\x3c\x06\x0a\x94\xe5\x41\x8a\xb8\xb7\x29\x50\xea\x39\xbf\x97\x8b\x7d\xe7\xc7\x3c\x9c\x3b\x6e\x46\x43\x1c\xc4\x2d\x1a\xbb\x72\x13\x06\x43\x92\x08\xc7\x0e\x83\x20\x58\xd7\x3b\xf3\x2b\x5f\x2a\x12\x3c\xe1\x31\x5f\xf7\x4b\xd5\x76\x9e\xcf\xdf\xae\xc7\x90\x78\xfc\x38\xbd\x1f\xd3\xb1\x99\x11\x15\xa7\x4b\xc8\x09\x90\xf9\x35\xba\x12\x51\xc8\xba\x80\x4d\xf7\xcd\x21\x6f\xcf\xe8\xb3\x4b\x7a\xe0\xa6\x3e\xd7\x94\x3b\x84\x51\x4f\xff\xf6\xc5\xa7\xac\x4c\x74\x0f\xf3\x90\x81\x2c\x94\x06\xf7\x3a\x04\x4a\x4b\x68\x95\x36\x91\xd6\xb3\xa0\xb0\xab\x56\xf6\x3a\x24\x4a\xb4\xb5\x51\xbb\xd6\x18\xd3\x4b\x66\xac\x91\x2f\xfc\x77\x2c\xdb\x4b\x55\x16\x9a\x16\x76\xdd\x0d\xd5\x06\x11\xd0\x7e\x1f\xf0\x20\x39\x6d\xb2\xfd\x56\xb3\x9e\x0d\x3e\x43\x92\x61\x1f\xfd\xd2\x92\x39\x68\x51\x7d\x69\x9b\xb1\xe9\xa5\x2e\x95\x07\x6e\xc0\xd6\x0a\x8b\xbb\x77\x2d\x47\xd9\xf7\x61\x4b\xd6\x36\x3a\xd2\xc4\x22\xee\x87\x12\x8f\xc5\xda\x4e\x7d\xd9\xa0\x0d\x5f\xc6\xf4\xfd\x39\xee\xba\x0e\x5b\xfd\xcd\x07\x4e\xca\x96\xcc\x9c\x57\xb3\x1b\xbb\xe4\xd2\xbb\x38\x4d\xc7\x61\x45\xc9\x03\x52\xc4\xab\x6e\x74\xdc\x5a\x10\xf2\xf7\xc1\x86\xd0\x39\x20\x27\xdd\xa8\x7b\x90\xe2\x0a\x67\x87\x29\x65\xca\xe0\x4e\xfe\xaf\x94\x9d\x40\x2c\xc6\xb5\xec\x17\xd5\x85\xdf\xe6\x3a\x18\x9b\xf9\x99\x11\xb6\x07\x00\xcd\x52\x29\x58\x4d\x28\x88\x4e\xb0\xd0\xef\xeb\xb9\xfa\x56\x76\xac\x8e\x0a\x80\x5d\xc3\xe0\x27\x07\x11\x34\x68\x91\x3f\x51\x6c\xfc\x50\xee\x06\x4a\x52\x11\x53\xe9\x89\x82\xd7\x3b\xe3\x73\xa3\x22\xe9\x58\x5c\x36\x59\x70\x65\xf7\x8a\x84\x02\xc2\x22\x3e\x6b\xfa\x28\x79\xea\x10\x59\xad\x1d\xe2\xb7\xed\x71\x97\x83\xd3\x79\x63\x69\xab\x36\xee\x0c\x55\x96\x3c\x7a\x68\x69\xd3\x63\x29\xab\x59\x9d\x7a\x19\xba\x09\x9d\x43\x6d\xea\x7b\xf0\xee\xaa\xfc\xf1\x0b\x0c\xc9\xfd\x18\x22\x3a\x1f\xa1\x18\xbd\xd7\x31\xd5\xfd\x1e\xdd\x15\x56\x07\x3a\xf6\x42\xc9\x70\x01\xbf\xf5\x40\x74\xa0\x5c\xfe\x6c\x86\x69\xf5\x4e\xff\x7e\xaf\xf1\xcb\x78\x9a\xd6\x7d\x5b\x93\xce\x71\x10\x8c\x8c\x85\x15\x71\x48\xca\x48\x78\xe8\x7b\x50\x85\x50\x0b\xce\x74\x4c\x46\x47\xab\x2c\xea\xcc\x39\x9d\x96\x5d\xc8\x5c\x26\xed\xcd\x7b\xc5\x14\x7a\x6c\xb6\xcf\xea\x03\xc0\x4b\xaf\xb9\x87\x4d\x1b\x1e\xad\x5a\xac\x8d\x47\xe5\xb4\xa3\xa6\x71\xf6\xa9\x6b\xa8\xa8\xf5\xdd\xd9\x49\xb9\xec\xde\x35\xee\x44\xab\x53\xe0\xde\x3b\x44\x8b\x1e\x5f\x6d\x93\x4a\x29\x67\x83\x09\xba\x5c\x4e\xff\x31\x5e\xe6\x90\xd0\x68\x1b\x27\x6f\xa9\x6d\xed\xd3\x2f\xcd\x44\xbd\xfa\x27\xce\xe9\x10\x25\x27\x4a\xc9\x73\x15\x91\x35\x07\x15\xb2\x43\x1f\xe3\x97\xb7\x61\x59\x7d\xb3\x1e\x8e\x79\xba\xab\x99\x01\x8b\xd6\xf4\xfc\x76\x5f\x10\xd9\x3b\xac\xf4\x54\xb3\x59\x86\x63\x06\xec\x14\xf9\xdf\x19\x08\x56\x84\xa8\x49\x0e\xfb\xfb\x96\x22\xca\xd0\xa4\xd5\x80\x01\x23\xf6\xfb\x0a\xab\xbe\xf7\x3a\x08\x83\x20\x38\x50\x8d\x61\x37\x9f\x1c\x8f\x91\x2c\xed\xe9\xe5\xfc\x2a\x2f\x2d\x8f\x3f\x18\x93\xd1\xe4\xc5\x97\x73\xd4\x1f\x2c\x66\x70\x14\xf6\x57\x00\xb2\x0e\xa6\x8d\x3c\x2b\xdd\xbd\x6f\xea\xba\xb6\x23\xc7\xd6\x34\x5a\x08\x81\xbf\x96\x51\x83\x51\x72\xdd\x67\x18\xcd\x8a\x98\x1a\xcb\x0e\xd7\x96\x14\x7e\x9b\xca\xf3\x25\x14\x00\x54\x60\x5e\x6a\x31\x94\x76\xaf\xde\x91\xf4\x0b\x74\x5e\x7b\x37\x0d\xb8\xa8\xa5\x87\x92\x07\x36\x8c\x90\xfa\xf1\x97\xac\x60\x24\x4c\xd0\xc8\xe6\x2e\x4c\x9f\x32\x0c\x67\x0b\x4d\x9e\xb9\x84\x9b\xaf\xeb\xbc\x12\x2e\x2a\xd4\x11\xd5\x02\xef\x3d\xab\x48\x15\xcc\x7b\x06\x30\x77\xfc\x6b\x7c\xf5\x4d\x3c\x42\xab\x9a\xb8\x14\xe7\x54\x52\x20\x03\x25\xe9\x17\xaf\x00\xaa\xa4\x42\x55\xee\x9c\xd6\x44\x18\x85\xb2\xcb\x09\x3b\x7f\xe7\xf6\x6f\x1d\xf8\xe8\xdf\x1a\x33\x39\xa1\xd1\x3d\x3f\x91\xf5\x20\x58\x52\xb5\x49\xb5\x30\x4a\x9c\x01\xfa\x9d\xfd\x79\x73\xbf\x4e\x9f\x70\x67\xce\x24\xcd\x41\x0b\xce\x89\x31\x2d\xdc\x02\xe8\xd0\x5c\xbc\xcf\x04\xa3\x60\x19\x6a\x18\x89\xf4\x3d\xb7\x83\x78\x53\xc7\x48\xa5\xda\x5f\xd5\x76\x45\xb2\x62\x54\x4d\x8d\x55\x60\x84\x2c\xbe\x5f\x33\xfa\xeb\xba\xac\xae\x00\x34\xde\x4d\x6b\x25\x5a\xa2\xd9\x2e\x70\x80\xf3\x9e\xa9\x41\x57\x81\x4c\x32\x02\x63\xdf\x3e\x08\x8f\x4e\x9f\xb4\x5c\x31\x48\x63\xb3\x09\xef\x7b\x74\x22\x72\xcd\xb5\xca\xc6\x3f\x6e\xe3\x40\xb1\x05\xe9\x0e\x2f\x21\xdc\xe4\x8f\xb5\xf4\x58\x3b\x12\x43\xb4\xb8\x7e\x01\xb6\x07\x49\x18\x4c\x09\x52\xd4\xf5\x75\xb3\xa5\xdf\xcd\xf4\x96\x4a\x1b\x42\x18\x23\x9f\x29\xbd\xdb\x86\xde\xf7\xcc\x08\x7c\x64\xe1\xec\xde\x99\xf2\xed\x18\x50\x9d\x5f\xbc\x58\x82\x77\x4e\xd5\x82\xac\x07\xd9\xe1\x64\x71\x4e\xd5\x01\xcb\x13\xd6\xab\x52\x66\x8c\x2f\x55\x80\xc5\xe9\xf9\x90\x31\x13\xd4\xf9\xc7\x6c\x35\x52\x47\x48\x88\xdc\x12\x3d\xeb\xb4\xf7\x4a\xa5\xc2\xef\x65\x52\x06\x60\x14\x0d\xdc\x64\x76\x73\x4a\x9c\x64\xb7\xda\x14\xa3\xd6\xe2\x4b\x97\x13\x7f\x90\x62\x0b\x1b\xc8\x95\x9d\xe8\x62\xb2\x03\x9a\x85\x54\x15\x7d\x4a\x70\xfd\xeb\xd5\x63\xa1\x28\xe5\x6a\x3c\xd3\xbf\x85\xd9\x4f\x48\x5a\x00\x9b\xff\xe6\x1a\xc7\x0c\xbe\xc5\x30\xce\x1f\x14\x38\x6a\xbf\xf8\x1d\x0f\x81\x6b\x6a\xee\x9b\xf0\xde\x87\xa5\xbd\xf9\x92\x89\x09\xe0\xfd\xfa\xe8\x06\x26\xe4\x4d\x9e\x3c\x12\x57\xb8\x3c\xed\xe3\xf4\xda\x28\xd8\x60\x68\xe8\x06\xf0\x75\xa2\x85\xf8\x69\x39\x5f\x51\x6e\x0d\x4c\xc7\x51\x53\xc7\x21\x5c\x32\x09\x03\x83\xc3\x30\xe6\xea\x0a\x91\xfa\xee\x78\x7e\x45\xde\xd2\xef\xd6\x09\x58\xd1\xd1\xc5\xa3\x0a\x14\x42\xbe\xaa\xef\xe5\x41\xb4\xc1\xd7\xb3\x1e\x7d\xca\x0e\x66\x7a\x67\xef\x22\x23\x69\x00\xa3\x72\x6e\x9a\xcc\x73\x65\xe1\x8f\x5e\x0d\x98\x5d\x41\xd9\xa0\x89\x30\x5c\x44\xf1\x4f\x64\xeb\x5c\xd4\xbf\xb9\x9e\x0e\xbc\x4c\xac\x32\xc7\x22\x06\x03\xc0\x6c\x77\xf2\x5f\xb5\x46\xdb\x03\x5a\x13\x7e\x0b\xbc\x50\x47\x30\x61\x7e\x7b\x26\x35\xb3\xab\x69\x1c\x71\x5e\x5b\xdf\x3a\x66\xef\x5e\x17\x05\x54\x56\x70\x81\x58\x5b\x52\x43\xe7\xca\x94\xcb\x30\x0c\x1e\xc0\x41\xe0\x44\xcc\x17\x81\xa9\xb1\x17\x4a\x52\x44\x3e\x7c\x17\x0a\x66\x31\x61\xf8\xe4\x65\xed\x26\x3a\x50\x55\xbb\xad\x7d\x31\xa1\xd6\x6b\xa7\x1d\xb6\x76\x45\xae\x42\xfe\x54\xca\xec\x21\x9d\xaf\x2e\x98\xc4\xc0\x9b\xb9\x1e\xb0\xbb\x4d\xb5\xa3\x2b\x69\x8e\x91\x60\x70\xa2\x27\xae\x41\xea\xf4\xa8\x41\xb1\xa3\xc0\x0d\x4b\xf8\xc8\x22\xb9\x7b\xa0\xcc\x9a\xbc\x98\x0e\xaf\xe8\xa2\x81\x09\x48\xb8\xe4\x0c\x49\x63\xac\xa3\x0f\x5f\x42\x7f\xe0\xf9\x53\xaf\x77\x95\xdb\x6b\xe0\xd6\x16\x84\x20\x12\xa3\x60\xfa\x06\x94\xd9\x6a\x2c\x73\xa4\xe4\x79\x96\x39\x00\xb0\x7c\x72\x4f\xa0\xac\xbc\xe6\x9d\x05\x9a\x3d\x8c\x59\xf9\x97\xba\xb4\xe1\xc5\x53\xaf\xca\x47\x56\x83\xec\x16\x4a\xe7\x40\xc5\x47\xe0\x57\x97\x40\x11\xeb\xed\xb4\xf6\xaf\xc5\xbd\x39\x6c\xbe\x3d\x74\x4b\xd3\xcb\x65\x4d\x6a\x20\xa3\x97\x1f\xe1\x16\xf0\xd7\x5f\xbc\x20\xf1\x9b\xbe\x3a\xad\x10\xf6\x11\x59\xfb\xa8\x8f\xfb\xbc\xbe\xe7\xad\xf5\x29\x50\xc9\xa0\x88\x6b\xce\x6d\x26\x34\x50\x5e\x99\x24\x14\x27\x22\xcb\xa0\x8c\x90\xc5\x75\x84\x64\xa1\xcc\x80\xfd\x5c\xa3\xd9\x43\xfe\x8f\x19\x2d\x44\x72\x2a\xa8\x71\x30\xa8\xf4\xb2\x4f\x7f\xf7\x4d\x15\xf3\x33\xec\x51\x28\x61\x30\x55\x6d\xe7\x75\xf4\x92\x91\xc8\x28\xb0\x94\x71\x3a\xcb\x4b\xf0\xbc\x22\x11\x00\xf7\x40\x74\xf2\x97\x80\x2e\xde\xd6\x35\x95\xaf\xf2\x4e\x57\x7d\x93\x67\xb7\xd3\x89\x92\x3b\x16\x76\x6f\xd7\x14\x04\x00\x2f\x3c\xd7\x96\x62\x1f\x43\xb0\xc5\x50\xb0\xda\x1d\xd3\xf0\x9c\x25\xdc\x82\x46\xc9\x4d\x1f\x83\x7e\x88\xb6\xbe\x57\x62\xb8\x44\x30\x24\x81\x80\xcf\x7e\x20\x3e\x45\xc5\x33\x7d\x53\x92\x62\x57\xe9\x65\xfd\xd4\x12\xb3\xb1\xab\xd2\x48\xec\xd2\xc4\xa1\xc8\x71\xc3\xf0\x4a\x58\xae\xc0\x8c\xb3\x73\xb4\xe0\xec\x99\x74\x45\x1b\xaa\x0c\xf8\x5e\x44\x11\xdc\xcf\xa1\x46\x5d\xd6\x0b\x31\xf5\xf2\x63\x55\x45\xdc\xf1\xea\xf7\x43\x60\xf5\x5f\x3d\x3a\x71\x6e\x73\x7d\xdf\x62\xd1\xeb\x3c\x59\x22\xe4\x88\xd6\xb1\xc0\x52\xa1\xea\x09\x8d\x39\xf4\x99\xcf\xb8\xa2\x63\xf4\x2b\x68\x11\x7f\x41\xaf\x80\x74\x43\x8c\xcb\x1a\x19\xe3\x97\x12\xfb\x26\x79\xf6\x13\x10\x94\xcb\xff\xbe\x05\x92\x3c\xe5\x84\x8a\x3e\x19\xbc\xa4\x99\xe1\xf4\x70\x97\xfc\x2d\x39\xb6\x20\x80\x22\xb9\x9c\x0d\x02\xc2\x61\x49\x8a\x13\xf5\x87\xe8\xfc\xf0\x5b\xe7\xb3\x03\x21\x9a\x2b\x11\x82\xdc\xd2\xfa\xfb\xce\x2d\x81\xc9\x43\x0c\xb4\x70\xb4\x23\x3a\x8e\x07\xb9\xba\x37\x00\x30\xb3\x20\x20\x67\x99\x7a\xd8\xc3\x9c\xaf\xf3\xf4\x39\xb6\xe5\xc6\x0a\xdf\xd4\x19\x5b\x2a\x2b\x6b\x82\x39\x81\xeb\x3e\xe2\xf0\xb7\x7a\x0d\x16\x82\x64\xbf\xb5\x24\x31\xae\x34\x15\x12\xd2\x4c\x2c\x62\xdf\x1c\xd5\x23\x2d\x78\x7f\x70\x02\xba\xc1\x70\x64\x27\xa2\x25\x54\x0d\x24\x63\xf1\x9b\x2b\xf3\xc5\x76\x7b\x56\xb9\xba\x7e\xff\x3f\x0e\x26\x06\x4a\x94\x33\x0d\xda\x0c\x38\xff\xf6\xb5\x9f\xd1\x2d\x0e\xfe\xbc\x09\x36\x55\x79\xcb\x7c\xcf\x05\x5e\x3c\x4b\x7e\x45\x0f\x46\x96\x8b\xbd\x88\xe0\x05\xbd\x81\xf9\x7e\x1f\xdb\xe3\x8b\x8d\x77\xaf\xc8\x73\x4a\x12\x06\x81\x1f\x14\xed\xe8\x43\xb6\x90\x18\x88\xc4\x19\x46\xd6\x6e\x01\x42\xe3\x27\x9a\x1d\x48\x73\x2d\xfc\x6b\x76\xc6\x8b\x26\x2f\xd8\xde\xf2\xca\xa3\x05\x94\x19\x52\xc3\xec\x3a\xf7\xee\xdb\xef\x4b\x28\x92\x72\xeb\x3e\x10\x9d\x90\xc5\xef\xe6\x2a\x3d\x50\x82\x9f\x02\xe6\x6d\x06\xa2\x5f\x0c\x97\x7c\xa9\xde\x16\xf0\x53\x15\xa5\x4e\x63\x28\x35\x42\x65\x18\x7e\xaf\xde\xe5\xd3\x29\x56\x83\x98\x09\xdf\x3e\x0e\xe1\x06\xd9\xc4\x90\x5e\x57\x98\xc5\x54\xfb\x5b\x1d\x67\xdf\x46\x11\xc8\x59\xb6\x7e\xd8\xa6\x96\x01\xf7\x4b\x12\x3e\x9b\x75\x99\x00\xa6\xcd\x25\xac\xde\x73\x76\xa4\xb7\x90\x92\xd7\xc0\x16\x35\xc6\x8f\x02\x2a\x53\xc0\x2e\xf1\x82\xb5\x1f\x11\xb3\x1e\x52\x4c\x49\xa7\x75\x65\x2f\x71\x2e\xe7\x74\x78\x65\xe1\x4c\x62\xa6\xbe\xe1\xe7\x93\x82\xe9\xa3\xe7\xd7\x88\x70\x68\x19\x3f\xf0\x3a\x74\xa8\xef\xf1\x59\x38\x9c\x2c\xed\x1e\x75\x27\xc9\x0f\xf1\xe6\x0f\x42\x3a\x6e\x2b\xb4\x07\x9a\x22\xb8\x51\x67\xeb\xb4\xea\xee\x5b\x07\x7a\xaf\x22\x5e\x63\x93\xf3\xdf\x92\x86\x03\x5d\x91\x93\x08\x19\x5c\x3b\xb7\x67\xdf\x09\xaf\x71\x92\xce\xfe\x56\xab\x54\xb3\x33\x79\x59\xd3\x07\x3d\x78\x1c\x4a\xc4\x64\x3e\x7b\x3d\x1c\x75\x8a\x0c\xcb\xdf\x7d\xf6\x2f\x08\xb1\xb7\x30\x05\x66\x15\xa1\xb6\xc3\x91\x23\xdc\x00\xb0\xa2\xdb\x22\x9e\x95\xb4\xb0\x26\x6f\xae\x24\xae\x1d\xee\xb5\xb2\x46\x41\xa4\x6e\xe8\x66\x8a\x85\x37\x00\xad\x6d\xe7\x53\xfb\x9b\x3b\x83\x1f\x29\xa2\x93\x47\x94\x19\x99\xcf\x2d\xa0\x63\x3a\x2e\xeb\x1f\x17\xed\xa3\x95\xd1\x03\x55\xcd\x6f\x7f\x69\x24\xf8\x6b\x38\x59\x04\xb5\x97\x62\xc1\xea\x12\xde\xf7\x76\xe9\xf2\x44\xba\x99\x48\x85\xfd\xc5\x21\x1c\xe2\xc0\xf6\x5f\x4f\x41\x6b\xda\x04\xe2\xc7\xea\x23\xef\x28\xd2\xba\xc3\x3b\x69\xbe\xf8\x4e\x24\x2d\xe8\x84\x17\x3a\x77\xef\x0e\x54\x56\xa4\x87\x02\xae\x67\x36\x08\xe9\xb2\x97\xea\xa4\x7a\x5e\x2e\x67\xab\xe7\xc4\x35\xd5\x45\x87\xbf\x45\xca\xf9\xae\x11\xe9\x5a\xa1\x9d\x19\x94\xc2\xe7\x66\x74\x46\xf3\x4c\xb4\xd1\x85\xc8\x6e\x5a\x89\x3f\xf3\x40\xff\xf4\x27\x93\xdd\xfe\xd0\x5a\xb4\x46\xc8\x17\x30\xe5\x6e\x78\xad\x8b\xa1\xa4\xd6\x79\x60\x02\x2f\xd1\x0a\x95\xb5\xb6\x0f\xc8\xc3\x6d\x6a\xc2\xf4\x04\xec\xa7\x53\x2a\xdd\x0f\x9b\x16\x3e\x45\x14\x78\xfc\x62\xcd\xe0\x47\x81\x46\xff\x25\x4d\x1c\x89\x84\x81\x24\x69\x88\xad\x52\x87\x74\x4a\x1f\x7d\x8d\x41\x46\x07\x01\x16\x18\x08\x04\x0f\x94\x4a\xfd\x8c\xe9\x44\x2e\xeb\x93\xe5\xde\x3d\x87\xda\xf1\xaa\xb6\xa3\x4b\xd2\xf1\x6d\xb6\xaa\xd4\x60\x09\xd7\xd8\x44\xe0\xe4\xd7\xb7\xc1\xf7\x35\x7e\x79\x25\xe0\xe8\xbc\x69\x2e\x52\x98\x11\x2a\x3d\x37\x55\x09\xcc\x86\xcf\x69\x3a\x1a\xe3\x87\xe5\x9a\x32\x4c\x1d\x4c\x2f\x13\x5e\xad\x2b\x09\xd0\x4e\x95\x10\x89\x41\x94\xd3\xcc\x47\x4c\x5a\x0f\xb5\xc4\x08\x29\x49\x17\x4e\xc0\xf6\x7c\xea\xb8\xac\xa3\x1b\x06\x42\x4b\xe9\x25\x3f\x5f\x9f\x77\x4b\x89\x1e\xb8\xf4\xec\x89\xad\xa2\x24\x0e\x13\x40\x74\xe7\xbc\xa3\xd5\x27\x59\x9e\x99\x31\xc2\xe1\xca\x45\x18\x0f\x78\x83\xe8\xe4\x45\x9a\x23\x76\xfd\xde\x5a\xf5\xcb\xe4\x37\x7f\x81\xc0\x1c\xbb\x27\x71\xe7\x70\x5a\x54\x4f\x1f\x3b\x89\xcf\xe4\x79\x06\x04\xf1\xc2\x4e\x43\xee\x40\x05\x07\xad\xfa\xb9\x3d\xad\x52\x32\xa0\x03\xef\xd2\x82\x7e\x67\xc4\x29\xc9\x18\x4b\x7c\x4d\x98\xed\x0f\xef\x6d\x39\x1c\x96\x14\x8d\x2e\xe8\x27\x36\x35\xae\x1a\xb2\x27\x02\xac\x89\x66\x94\xc9\x40\x06\x1f\x8e\x3e\xc8\xd7\x6f\x7e\x46\xaf\x18\x36\x30\x1d\xb0\x49\xe8\x0d\x28\x7c\xbe\xfd\x23\xb8\x82\x7d\x10\x3a\x6d\xfd\x7c\x8e\xb1\x05\x8e\xfa\xac\x8f\xa4\xfd\x0d\xfe\x3c\x05\x03\xaa\x9f\xf5\x84\x16\x35\x67\x40\xaa\x33\x35\x37\xb4\xf1\xa8\x95\x4a\xeb\x37\xfc\xa0\xc0\xb8\xc1\x36\xbd\xdf\x08\xbb\xce\x8f\x5a\xb2\x7e\x26\xf2\xa7\x59\xb3\x26\xb6\x88\x0f\xf5\xeb\x8e\x1c\x5b\xfa\xa7\xe1\x68\xf6\x3c\xcc\x8b\x78\xb0\x70\xb5\x7b\x1b\x68\x4b\x52\x91\xc5\xef\xbd\x36\x52\xf6\xad\x25\x66\x76\x86\xb5\x9a\xbd\x3a\x91\x15\xd9\x66\xdc\x84\x9c\x9a\xad\x2e\x74\xa6\x13\x3a\xdb\xdf\x7d\x51\x61\x5e\x0a\xec\x59\xe1\x95\x99\xa8\x7a\x25\x0c\x9c\xec\xeb\x2f\xe3\xc9\xcd\x01\xc4\x30\xaa\xfb\x75\x0f\x05\x0a\xed\x73\x0b\xbc\xf2\x41\xf3\x24\x0b\x0c\xc0\x2d\x7e\xdf\xf7\xf3\xc5\x7e\x06\x21\x46\x82\xc1\xc7\xf8\x03\xd1\x7b\xd7\xe6\x7c\x8d\x6e\x03\x5d\x9d\xe8\x6c\x84\x07\x6c\xad\xe9\xbb\x4f\x3c\x3d\x91\xf3\x1e\x69\x0f\xa3\x9a\xa9\xbb\xac\xde\x14\x4f\x15\x4b\xcc\xb0\xce\x2e\x28\x8e\x28\x79\x49\x9f\x68\xe1\xf9\xb0\xae\x4c\xb3\x19\x0e\x4d\xf6\x10\xf7\x8f\xe9\x15\x32\x45\x8e\x99\x07\x6e\xaa\xe6\x0f\xd5\x1b\x94\x46\xa8\x44\x6b\x84\x57\x38\xdb\x3f\xfc\xe4\x6b\xcc\x64\xe0\x27\x5f\x44\x17\x73\x38\x25\xd9\xc3\x0f\x8f\xe0\x0a\xb0\xaf\x8b\x72\x6c\xf0\x44\x61\xea\xce\x50\x3c\x08\x92\xd0\x7f\xbb\xfc\xd1\x2b\x19\xa6\x21\x33\x67\x32\x82\x57\xe8\x8b\xff\x70\x08\x8c\xa0\x41\x71\xfd\x55\xb4\x50\x10\xc9\xac\x4c\x0f\xcb\x68\xc0\xd3\x66\xe4\xe6\x4e\xc4\x06\x7f\x0b\x46\x32\xdd\x05\x51\x45\x5f\x5b\x6d\xcb\x8c\xfc\xb5\x84\x40\xc8\xcc\x34\x51\x54\xd5\xa8\x9e\x5b\x18\x2a\xe1\x39\xbb\x27\x3c\x26\xc7\xc6\xa9\xd4\x74\x0a\x2d\xe2\x52\x73\x63\x49\x12\x06\x87\xb6\x56\xfc\xbd\x6b\xcc\xdf\x23\x47\xf0\x91\xb6\x64\x85\xfc\xfe\x38\xa4\x8c\xac\xb1\x17\x11\x98\xa1\xbe\xd6\xc8\xd7\x2c\x93\xa8\xd5\x7c\xaf\xcd\xbd\xba\x61\x02\xe7\xe9\xc1\xa8\x42\x41\x0f\xe7\x86\x4a\x33\x96\x65\xd3\x60\xd8\x7a\xb1\x2b\x94\xc4\x0d\xc3\xb9\x9d\x2a\x10\x1d\x7f\x13\x5b\xa4\x33\x3f\x3f\xec\x60\x1a\xcc\x63\x89\xbd\x7e\xf6\xe5\xf4\x5f\xf2\x39\x33\x24\xb0\x6a\xe3\xef\x7d\xdf\x85\x5b\x70\x04\x17\xee\x4c\xfe\x19\x4d\xdd\xb3\xfe\x44\x8e\xf0\xd1\x33\xb1\x34\xdc\xc5\xf3\x4b\xa8\x53\x29\xb5\x19\x44\xf0\x24\x0c\x30\x8c\xf1\xcb\x60\xb9\x0b\xd4\xf2\x0c\x64\x07\x0a\x8c\xe9\x5b\x8f\x43\xb3\x7d\xc3\x85\x03\xdd\x13\x9e\xf9\x7b\x69\x70\x5b\x5e\x27\x5a\x65\x5f\xd4\x3d\xe8\xca\xde\x1c\x35\xbe\xdf\x83\x01\xca\x5f\x6c\xbc\x78\xd1\x2b\xa3\x97\x1d\xaf\xe2\xb5\xb2\xf5\x52\x08\x5d\x80\xf9\xb9\x3f\x54\x69\x19\x24\xd5\xd8\x07\x66\x49\x3d\xed\x70\x84\xdc\x10\x74\x7e\xd5\x0a\xd9\x57\xd4\xb4\x07\x35\x55\x52\x99\xfe\x2f\x26\x58\xef\xdd\x44\xa9\x73\x76\x77\x6a\x16\x08\xab\xae\x4f\xbe\x60\x81\x6e\x31\xc2\xac\x40\x24\x25\x04\xb3\x02\x35\x72\x65\x18\xc6\x0f\x35\x1a\x89\x5c\x82\x1e\x48\xe8\x85\xcd\xd4\x90\x60\xc3\xf5\xf7\x36\x44\xd2\x40\x13\xbf\x51\x99\xe6\x74\xc4\xd7\xdf\xd0\x12\x41\x54\xf5\xb0\xf1\xe5\x19\x2c\x41\x58\x6e\x61\x84\x14\xd6\x54\x63\xe7\xc2\x36\xb4\x48\x84\x6e\xff\xf5\xc9\x78\xf4\xd6\xa1\x27\xf5\x0e\x18\xad\x16\x41\xb1\xaf\xea\x14\x00\x27\x4c\xf2\x92\xe5\xb7\x81\x2f\x86\x18\xf5\xd2\xa3\xa4\xf6\x51\x07\x9f\xf8\x07\xff\xc7\x64\x64\x47\x23\x5e\x82\x61\x6d\xf9\x9d\xbf\x01\x6c\xf2\x6d\x22\xba\x2a\x58\x24\xcc\xbd\x5d\xcc\xfe\x72\x72\xee\xf4\xb7\xa2\x4f\xad\x25\xd6\xb5\x9d\x05\xee\xbe\x09\xfd\x27\x2e\x7b\xd9\x3b\xf1\x1e\x92\x0d\x56\x42\xed\x05\xfe\xe9\x1a\x05\x64\xfa\x16\x00\xa5\xcd\x0f\x8e\x2f\x32\x77\x59\xaa\x3d\x61\x36\x73\xc3\x28\x52\xf7\x0c\x46\x31\x2c\xb6\xd3\x67\x52\x57\xee\xe0\xb9\xea\x33\x95\x4f\xdd\xdf\xf5\xc4\xb9\xad\xab\x18\x33\xb1\xa9\x76\x65\x49\xec\x48\xd0\xb1\x9e\x91\x2e\xf5\x8f\xd7\x2c\x72\x5c\x09\xbf\x80\x08\x1a\x37\xaa\x51\x8b\x8f\x60\x47\xd7\x90\xac\xc3\xe0\xc8\x4e\xb4\x42\xa7\x1d\xca\x0c\x68\x5e\x8e\xd7\xc9\xc8\x62\xa4\x4b\xda\x92\xba\x40\x2d\x84\xd1\x39\x63\x0f\x1f\x90\xc1\x15\xa4\x93\x01\xdf\x08\xa7\x22\x71\x48\xca\x6a\x54\xf1\x42\xcd\x1a\xbd\x6b\x72\xf6\xe7\x0a\xd7\x43\x83\x81\x56\x5e\x50\x41\xf7\x4f\x04\x9a\x07\x19\x25\xa8\xc7\xbc\x89\xe5\x56\xf5\x9a\x8e\x4a\xf3\x53\x9d\xbe\xee\x53\x8c\x41\x8a\xa0\x67\xeb\x58\xea\x7a\xdc\x4c\x45\x18\xfa\x3d\xb6\x5a\x7e\x3b\x29\xbc\xdf\x0d\x64\xd6\x94\x86\xc3\xe2\x31\xa0\x06\x9b\x93\x27\x75\xc5\xa9\x69\xe8\x2b\x43\x81\x00\xf2\xc9\xe3\x0f\xa2\x23\x04\x5b\xfd\xe7\x2d\x29\x49\xe6\x10\x59\x50\x38\xbb\xff\xb1\x11\x83\xc9\x3f\x7d\xf4\xf2\xae\x9c\xbf\x3a\xff\xe6\xa1\x7b\x50\x62\x1e\xc0\x68\xfc\x0a\x7d\x52\xc4\xfc\x56\xb9\x7e\x29\xa3\x85\x9c\x7d\x8c\xad\x55\xc1\x15\x1a\x35\xd4\x06\x82\xf3\x7d\xc5\xdd\x06\x80\x3c\xae\xd3\x8a\x72\x57\x83\x0b\xd9\x49\x4b\xab\xfe\x89\xa5\x33\xf1\x01\x79\xac\x52\x33\x07\xb6\x25\x38\xb7\x6f\x49\xa0\x29\x6b\x62\xdc\xc9\x19\x39\xbd\xf3\xaa\x2e\xf1\x19\xf5\x3f\xea\x00\x7b\x10\x80\xdd\x0d\x26\xf0\x20\x10\x1d\xe1\x66\x64\x40\xc0\x28\x53\x41\xe4\xea\xbc\xfc\xdf\x87\x8b\xae\xe5\x3b\x7f\xc3\x8d\xed\xa5\x59\xc6\xf2\x11\xda\xfd\xa2\xb9\x5f\x71\x21\x9d\x67\xa2\x1f\x85\x9b\xc9\xc8\xdd\x68\x74\x21\xb1\x89\xcc\x71\x43\x89\x42\x8f\xc8\x40\x5f\x1b\x88\x37\x2e\xdc\xf8\x86\xc4\xe7\xc3\x47\x2c\x45\x77\xd3\x96\xfd\x2d\x30\xbb\xdc\x2e\xfe\x89\x45\x2a\x09\xf1\x8d\x0c\x02\x9d\x0f\xe4\x16\xee\x65\xd2\xb7\x87\x3c\x08\x76\xbf\x5b\x6f\x18\x15\x90\x4c\x6b\x72\xd1\x14\xb1\x23\xe5\xa5\x66\x7a\x5f\xe4\x93\xa3\xd5\xb3\x20\x20\x36\x92\xc3\xfb\x65\x6b\x56\xa7\x2f\x9e\xad\x89\x50\x1b\x10\x2e\x3b\x45\x1a\xc5\xc1\xec\x80\xdd\x23\x72\x5d\x7d\xfa\x6c\x26\x12\x6f\x5f\x64\xb3\x5e\x5d\x7a\xa6\x0e\xb5\x00\xe9\x32\x57\x76\x5b\xbd\x74\xae\xd6\xa2\x7c\x78\x02\x3b\xbb\x5e\xab\x0d\x22\x0a\x0b\xdb\x4b\xe3\x44\xb7\xba\x21\x6d\xc3\x18\xbd\x2c\x40\x8d\x3b\x67\xec\x0a\x13\xbc\x37\x9d\xf4\xaa\x57\x03\x43\xf7\x7c\x64\x28\xe7\xd6\xed\x17\x2d\xd1\x91\xbc\x00\x49\x59\xf1\x2d\xa6\x80\x09\xba\x90\x3f\x1c\x0a\x2d\x4e\xfc\x49\xeb\xe8\x54\x3c\x0e\xfe\x04\x9e\xa5\x7f\x88\xd4\xca\xec\x1d\xdb\x17\x5c\x23\x89\x19\x8e\x6b\x59\x59\x63\xc2\x1b\x6e\xb4\x0d\xd4\x68\x87\x59\x43\x3a\x1b\x8a\x92\x31\x25\x89\xae\x95\xbb\xf2\xa7\x3f\x0f\x38\x5a\x0a\x79\x69\x56\x2a\x49\x4d\x1c\x18\xe0\x08\x3f\xb6\xff\x63\xed\xdb\x00\x92\x71\x59\x10\xd2\x4f\xb0\xb5\x6a\x9f\x19\x2a\x41\xf0\xce\x13\xa2\x1c\x50\x92\xbe\x9b\x52\xb0\x45\x36\x0e\x1c\xab\x23\x66\xcf\x66\xeb\xf3\x38\xd1\xac\x45\xf2\xeb\xe6\xb3\xc7\x82\xd3\x00\x9c\xd4\x7f\x3a\x9b\x09\x97\x8e\x57\x9a\xec\xba\x11\x09\x7f\xc6\xf8\x95\x95\x1e\x9e\xe3\xf1\x03\xdc\x71\xd9\x56\x12\x38\xe5\x48\x8b\x2a\xe1\x8a\xa0\xb5\x04\x67\xdd\x1f\x95\xb6\xf8\x01\xbe\x44\x4f\xf8\x23\x74\xce\xab\xd5\x0c\x0f\xfc\xb4\xd3\xea\xfe\xbd\xfb\xa7\xcb\xd3\x0f\x92\x70\x38\x17\x47\x61\x35\xe5\xea\x60\x52\xa4\xde\xac\xc3\xde\x82\xae\xf4\xd1\xba\x08\xb4\x4d\xcc\x50\x83\x91\xd4\x41\xe4\x74\xcc\x48\x1a\xe8\x17\xb8\x01\x97\xaa\x4f\x4c\x8f\x7a\xeb\xb6\x1e\x39\x76\xf2\x16\xf4\x59\x6d\xdc\xbc\xf4\xda\x9c\x83\x5c\x61\x95\xe2\x42\xd3\x82\x0d\xa5\xc4\x9c\x10\x62\x4c\xc6\x24\x60\x68\x27\x1c\x66\x22\xa3\xc8\x9c\x01\x2f\xd4\x45\x74\x4e\x88\x0d\xff\xdf\xba\xe1\xe6\x2c\x68\x32\xc3\x41\x10\x04\x00\x8f\x72\x16\x38\x11\x00\x2e\x1c\x94\x15\x6e\x30\x2c\x5a\x13\x4a\x4e\x3c\x44\x94\xa6\x20\x23\x31\x3a\x5c\x09\x52\xa5\xd6\x65\xb3\x8c\x4c\xaa\x35\x78\xa1\x0a\x76\x67\x03\x6e\x7a\x35\xcc\xfb\x36\xc5\x2b\xfd\x5f\x62\x4b\x14\x17\xda\x15\x73\xf2\x66\xd8\xf0\x54\xf7\xa3\x18\x53\xf6\xd2\x3d\x02\x41\x00\xea\x70\xd5\xf5\x27\x05\xd4\xa6\xaf\x3a\x80\xc7\x27\x96\x45\x79\xc6\x69\x59\xb3\xf1\xb6\x48\x5a\x27\x2a\x4b\xe5\xe6\x7d\xb0\xd2\xe7\xe3\x50\x7d\x78\xf1\xaa\x59\x6a\x1e\xe4\xeb\xb2\xf2\x49\x11\xda\x97\xf5\xdb\x5e\x5c\x64\xbf\x19\x3d\x10\x12\x6d\x75\x7a\xfa\x68\xb2\x3b\x37\x30\x77\x28\xb0\x12\x5b\xa2\xfc\x8d\xe0\x46\x03\x1a\x8e\x7b\x92\xe7\xd6\x3e\xd8\x25\x03\x37\xa2\xbc\x30\xc5\xcb\x60\x7e\x9c\x1b\x7a\xe3\xa2\x8d\x00\x58\xad\xcb\x9a\xf6\x8e\xc6\x33\x98\xfa\xab\xc5\x38\x10\x5d\x7f\x98\x07\xd0\x06\x72\xc3\xf3\x61\xe7\xc6\x8a\xa9\x5b\xf0\xfd\xf8\xc6\x10\x24\x31\x2c\x37\x30\x90\x14\x76\x44\x89\xe5\x45\xaf\xa4\x67\x4f\x95\x82\x49\x35\x12\xba\x1b\x10\xfc\x70\x0a\xa6\x23\xb0\x55\x30\x8d\x23\x22\x93\x5e\x50\xde\x9c\x1d\x46\xac\x3b\xdd\x95\x9d\x0c\xfb\xae\x1d\xa9\x2c\xe1\x37\x57\x0a\x2e\xfe\xe0\x95\xce\x0e\xf0\xb4\xa2\x42\xcd\x44\x4f\x5c\x3f\xa9\xc1\xbb\x8b\x6b\x95\x78\x3f\x34\xee\xed\xab\xc4\xfb\xe3\xa1\x6f\x81\x00\x8b\xae\x2c\xcd\x15\x68\x79\x05\x92\xdb\x14\x87\x24\x4c\xf4\xd1\xe3\x9f\x97\xe0\x19\xd1\x7c\x29\xa5\xc9\x15\x23\x24\xd3\x79\x55\xeb\x9a\x64\x18\xb0\xeb\xcb\x6d\x7f\xa6\x1e\x3f\xb3\x96\xe8\x35\xe3\x0a\xa7\x15\xd7\x47\x12\xa4\x9b\x20\x0c\x46\xc5\xb9\x58\xfa\x5f\x9d\x34\x59\x5d\x12\x3f\x50\x8f\x0e\x65\x06\xd1\x56\xa7\x37\xd0\xf8\xfc\x4a\xbc\x73\xb9\xfa\xa7\xc2\x47\x16\x36\x73\x2a\x08\x1c\xf2\xe5\xa7\x26\x7a\xcd\x8d\xcf\xf9\x64\x8f\x02\xd3\x11\xf4\x49\xcb\xbb\xce\xa0\x54\x25\x28\x19\x4d\xdc\x3f\x5e\x0d\x87\x25\xf3\x2b\x93\xa5\xc4\x0b\xde\x07\xa5\x42\xc1\xe8\x42\x71\x15\x3d\xa2\x6c\xa0\xca\xca\x9e\x65\xc4\xeb\x1a\x0b\xec\x4d\x3a\xad\x80\x1f\x78\x26\xc1\x97\x5f\x20\x5e\x2d\x10\x50\xfc\xb6\xd3\x9f\x03\x6b\xfa\x8e\x7a\x10\x6e\x3e\xa6\x3b\x35\x18\xbf\xe3\xa0\x16\xfb\x8f\x17\x3b\x13\xfc\x55\x7d\x72\x66\x26\x32\x71\x8f\xc1\x69\x03\xe1\xa0\x2d\x1f\x9b\xfc\x95\x21\xf4\xf5\xcb\x13\x29\x18\x70\xf7\xcf\x9b\x0f\x5b\x0a\x1b\xf7\xa6\xbb\x82\xc3\x82\x97\xff\xe7\x19\x13\xed\x33\x52\xa6\xb9\x5d\xa9\xb3\x34\xb4\x8d\x47\x7d\x31\xd9\xb4\xfa\x60\x80\xac\x16\x04\xca\x49\x81\xdb\x19\xc0\xca\xda\x1c\xaa\x00\x91\xe3\x3a\x7c\xcc\xbb\x36\x1f\x97\x35\xad\x15\x7d\xba\xfc\x82\xb0\xe6\xd3\x74\xba\x0b\x0b\xe0\x21\x09\x83\x1d\xcd\x40\xf1\x96\xd0\x0f\xf9\xd8\x31\xa9\x84\xe6\x26\x0f\x53\xfe\xf6\x7e\xfd\xd3\xaf\x84\xd3\xbf\x05\xfc\x1e\x89\x57\x85\xec\x27\xef\xa3\xa0\x99\x73\xc5\xf0\x09\x8a\x03\xe0\xf5\xe2\x14\x9c\x1e\xa1\x38\xb8\x7e\xfb\xb3\xbc\x94\xb8\x77\x99\x67\x2f\x6b\xdc\x02\x82\x70\x7b\xe0\x14\x23\xc4\xb9\x32\x08\xa4\x8e\x8a\xae\x16\x9c\x89\xc4\xa7\xa8\xb8\x6d\xf8\xd1\x3d\x07\x54\x1b\x31\xdb\x36\x5b\x4f\xf8\x9a\x8e\x8b\x25\x7b\xd8\x9d\xe0\x95\x79\x7c\x18\x6c\x2b\xef\xaf\xbd\xf7\x4a\x0b\x89\xc5\xc1\x26\x15\x87\x88\xc4\xb2\x89\x3a\x33\x10\x82\x2a\xaa\x03\x49\xcf\x38\x1a\xf0\x32\x02\x49\xf9\xa3\x41\x19\x88\xff\xf8\xe9\xb4\x6f\x98\xa5\xc0\x4a\xe9\x08\x04\x70\x0c\xe9\x4b\x6c\x78\x31\xab\x03\x61\x80\xe4\x80\x58\x98\xb1\x8e\x67\x2f\x83\x21\x09\x51\xf2\x68\xcc\x2f\x2f\x74\x7e\x2e\xbd\x3e\x11\x9e\xa2\x52\x7d\x23\x3d\xc2\x17\x40\x38\x30\x02\x82\x7e\x51\xdf\xe4\x08\xd7\xb9\xc4\x4a\x91\x22\xaa\x01\x0e\x42\xa9\x38\xc6\x2f\x6c\x81\x38\x48\x7e\x82\x29\xeb\xd3\x4f\x97\x62\xf4\xcd\x19\xf7\xde\x35\x71\x45\x32\x61\xbc\x6f\xf3\x8a\xb6\xcd\x18\x51\xd9\x62\xe7\x3e\x44\x8e\x2d\xf3\x65\xf3\xbc\x54\xb8\x90\xf8\xe6\x32\xbb\x1a\xcd\xaf\x57\xa7\x34\xe9\xeb\x77\x53\x46\xc9\x83\x14\xbd\x9b\x06\x64\x4b\x2c\x5e\x96\x16\x2d\x79\xcd\x06\xcb\x0b\x40\xb4\x06\x74\x91\xd9\xc5\x46\xa7\xdc\x66\x97\x9b\x27\x11\x48\x02\x63\x18\xcd\x75\x27\xe5\xa6\xe2\xe2\xc3\xfe\x3c\x94\xd8\xf8\xc1\xcf\x59\x2a\xd5\xa5\x2e\x94\x5c\xf0\x97\x7e\x92\x9d\x08\x8b\xea\x78\xf1\x8e\x34\x8e\x1d\x89\x7f\xe6\xaf\xe2\xd3\x22\xa5\x90\x74\x86\x57\xb4\xa7\xda\x3e\x15\xa2\x3f\xd5\x2f\xad\xe1\xaa\x4e\x2b\x0a\x6d\x92\x47\x9d\xd0\x0f\xc4\x27\xb6\x68\xe0\x7b\x84\x6e\x7e\xc7\xbc\xf7\x26\x09\xa6\x8b\x83\x92\x18\x28\xc3\x58\x02\x33\xe4\x7b\xb2\x3c\x98\xce\x8a\x1d\x85\xb2\x24\xe9\xc7\xbf\xb8\x8a\x6e\xca\x94\xcb\xfa\xc6\x8b\xd3\x9d\x27\x29\x8f\x07\xa8\xee\x0b\x84\xac\xfe\xf2\x4d\x7b\x94\xbf\x3e\xa9\x78\x8d\xda\xef\x31\x4e\x9f\x36\x9f\x90\xc9\x3b\xfb\x2b\x41\x9f\x8d\xea\x94\x6f\x26\xf4\x8e\x73\xe2\x58\xad\xcc\x29\xe1\x92\xfe\x9d\xf2\xd6\xc7\x1a\xed\xa5\xae\x0b\xd5\x70\xbd\xdb\xdb\x99\x70\xfb\x43\x9a\x36\x71\x5f\xd2\x7c\xc4\x4f\xf2\x52\x07\x97\x28\xcc\x69\x51\xcb\x61\xcf\xd4\x03\x68\x15\x8c\x2a\xaf\x28\x43\x46\xdf\x9a\x95\xea\xc7\x0e\x13\xbf\x40\xbb\x26\x3d\xbf\xe2\xf0\xcc\x29\x83\x10\xb1\xbc\xaa\x9f\x3c\x31\xf5\x08\x42\xa1\xfc\x9a\x0c\x77\x47\x21\x7c\x64\x65\x3b\xf8\x0c\x80\xde\x71\x6e\xc9\x13\x57\x5a\x37\xc3\x30\x1c\xc3\xb8\xbe\xc3\x06\xdc\xbc\xa9\x90\xc1\x10\xdd\x43\x53\x87\x97\x41\xc0\xc5\x01\x95\xe5\xa2\xb3\xc2\xab\xac\x67\x8e\xbc\xaf\x31\x15\xd0\x86\x60\x75\x42\x48\x65\xce\xe6\x73\xc3\x45\x14\x30\x6e\x61\xf1\x0d\xf5\xd2\xfa\x36\xbc\xc4\x57\x65\x67\x24\xd1\xce\xc5\x37\x37\x46\xe6\x67\x7d\x0f\xbe\xa1\x57\xd5\x05\x83\x21\xaa\x26\xbc\xb3\xd7\xd6\xd6\x40\x95\x09\x8d\x5d\x71\x5d\xd3\x37\x70\x2d\x11\xb9\x74\xef\x3c\xa1\xf6\x4c\xe0\x85\xf0\x95\x1e\xe7\xa7\x4c\x95\x99\x9a\xa5\x9e\xc2\x79\x53\x86\x60\xb1\x29\xa3\x4c\x89\x6b\xc1\x7a\xeb\xfc\xbc\xba\x5b\x6a\xad\x44\x62\x8c\x00\x59\x9e\x44\xc1\xd6\x77\x02\x75\x47\x3b\xb4\x78\xf5\x23\x5b\x01\x43\x58\x24\x31\x9d\xb1\x3b\x50\xe3\xe2\x6d\xc8\xa8\xeb\xf0\x55\x4b\xb3\xdd\x56\xf2\x5c\x5a\x24\x5b\xf3\x5b\x12\x1e\x28\xc0\xbd\xc8\xcd\x81\x6b\xd4\x0a\xb5\xcc\x05\x37\x41\x8d\x2f\xff\x9b\x67\x5d\x33\x0d\xc2\xb3\x7b\x2d\x4e\x65\x30\x03\x3e\x6a\xe9\xa0\xd9\x01\x02\x00\x56\xce\xed\x2b\x34\x8e\xef\x82\xa8\xd1\xc5\x08\x5f\x13\xe6\xaa\xb5\x8c\xc0\x94\x83\x92\x86\x72\xf7\xdc\xb7\x6e\x1c\xb9\xce\x2b\x3d\x11\x0e\x4b\xf5\xcb\xdb\xf9\xd5\x3b\x6b\xf8\xf0\xbf\x41\xbb\x02\x3e\x69\x63\xb1\x01\x8f\xe3\x1d\x1d\xdd\xb5\x36\xd5\x15\x68\x0b\x44\xc5\x4e\x2d\xc9\x79\x1b\x08\xfc\x3d\x2e\x8f\x00\x09\x1a\x20\x6e\x3d\x2d\xdd\xce\x41\x0c\xaa\x52\xe2\x23\xf4\x15\x44\xa3\x33\x10\x56\x83\x0d\x06\xfc\x2d\x20\x14\x85\xa1\xed\xac\x23\x93\x09\xde\x8c\x68\x43\xfa\x6f\x19\x0c\x0a\x52\xde\x7c\x52\x91\xec\xb8\xef\x84\xe4\x48\xbe\xf2\xc3\x39\x10\x15\x37\x3f\xa7\xf6\x29\xa8\xd1\x9b\x7e\x99\x69\x76\x75\x6d\xeb\xb8\xe9\xf5\xaf\xf5\xfa\x99\x84\xef\x7c\x73\xff\x65\x37\xf7\xf7\x4d\xd2\xb6\xa6\xbd\xf2\xc3\xd7\xc4\x87\xe9\x4a\x46\x4f\x41\x8f\xff\x38\x50\xd5\xd6\xf7\x62\x99\xc1\xf0\x7c\x9c\x02\x4b\x8a\x48\x01\xcd\x99\x91\x48\xc3\xdf\x25\xf4\xb7\x22\xeb\x73\xbe\x0b\xe0\xc2\x8f\xbe\x85\xaa\x88\x13\x7f\x72\x8b\x62\x7b\xbf\x13\x50\x83\x09\xc4\x27\xa2\xcf\xa8\xaa\xf4\x1f\xd5\xcc\x49\xc1\xf5\xb9\xaf\x55\x7d\x84\xca\x1d\xaa\x7f\xfa\xf7\xb3\xae\xd6\x94\xbf\xa3\xa4\x69\x6a\x10\xf6\x69\xf1\x89\x4a\x99\x3a\x66\xec\x6e\x93\xb9\xb8\x7e\x56\x8d\x8e\x7e\x5e\x0c\x85\x31\x91\x2c\x4e\xd0\xfc\x72\x24\x38\xa8\x0b\xa4\x4c\x99\x47\x8e\x60\xb3\x7e\xa5\x35\x73\xdf\x52\x1e\x27\x92\xc5\xb9\x2e\xc7\xab\x44\x43\x94\xa7\xf4\x58\xae\x47\xbf\xfe\xb1\x0b\x1a\x04\xc8\x02\xd4\x86\xef\x01\x37\x7c\x0f\x47\xdf\xe2\x7e\x6d\xbc\xab\x18\x95\x19\xe3\x27\x3c\xba\xab\x7a\x18\x64\x42\x85\x5c\x14\x17\xaa\x72\xf5\xfc\xe3\x11\x5e\x64\xaf\x8a\x5f\xab\x49\x29\xd1\x83\x6c\xfc\xca\xec\x6a\x3b\x5e\xad\xf0\x2b\xd4\x6a\xe2\xc1\x3a\x97\x4f\xb2\x37\x25\xbe\x88\x02\x38\x0c\x92\x2d\x3e\x84\xb9\xcf\x1d\x21\x2a\x4e\x14\xed\x7d\xc1\x95\xbc\x47\xb9\x2a\x68\xc5\x69\x00\x82\x33\x63\x3d\xd7\x75\xdf\x56\x96\x9b\x57\x75\xdf\xd4\x77\xdf\x04\xec\x52\x55\x6e\x27\x50\x85\x96\xe5\x67\x4f\x5d\x30\xc9\x6a\xc2\x74\x7d\xb7\x73\x8a\x36\x5e\xd5\xb6\x67\x83\xc0\x65\x6c\x48\x1e\x2d\x69\xdc\xfc\xbf\xf4\x3f\x2c\x69\xd8\xe6\xc1\x80\x97\x3e\x8c\x33\x71\x1e\xd2\x23\x6c\x4e\x66\x8a\x5b\xbc\x8d\x78\xc6\x24\x75\x9e\x0d\xec\x58\x45\xb1\xb6\x1f\x88\x41\xf0\xaf\x96\x9a\xf9\xb1\x43\x78\x6c\xc4\x5a\x4c\x1b\x4e\x7a\x09\x45\xc6\x67\x04\xc9\x83\x9d\x1f\x9a\x0b\xd1\x95\xd9\xfd\xbb\xd6\xfa\xc5\x26\x55\x69\xc8\x72\xf5\xe9\xa0\x82\x3d\x5e\x86\x05\x57\xd7\x3f\xce\x5e\xc7\x52\x0b\xe1\x73\x9b\x89\xdb\xa5\x54\xb3\xe8\x71\xb5\x04\x2b\x98\xc8\xaf\x79\x6d\xe7\x75\x9d\x57\x81\x69\xfa\xd2\xa2\xa7\x89\x96\x80\x18\xfe\xbb\xbd\x74\xdf\xd2\x11\x6d\x37\x0e\x49\x84\xbb\xcc\x8e\x90\x48\xfe\x80\xb8\x8b\xfb\xb4\xfe\x68\x4d\x1d\xec\x24\xf1\x5d\x8b\xf3\xa5\xfd\x8d\x45\x3c\x09\x70\x0c\x17\x19\x3e\x61\xa3\xd1\x30\x4b\xef\x77\x6b\x41\x9b\x1d\xeb\xe2\x91\x13\x0c\xc9\xac\xfe\x77\xbc\xf8\xa2\xba\xc2\xd1\x42\xdb\xbf\x1e\x59\x18\x50\x99\xb2\xb5\x16\x4a\x79\x46\x76\x49\xf2\x68\x02\xe5\xa3\x46\x2b\x19\xa0\xb7\xdf\x41\x13\x97\x47\x97\x60\x39\x4f\xcc\xfa\xe7\x74\xf1\xf8\xf2\xd7\x43\x2b\xae\xd1\x0b\xa6\x62\x18\x13\xd3\x72\x5c\x55\x8f\xe4\xaa\x42\x57\xba\x7a\xf1\x1c\x83\x0d\x4f\x48\x0d\xe9\x08\xd1\xea\x03\x24\x74\xde\x7b\x61\x1f\x0d\x82\xce\x79\x6f\x79\x74\xaf\x27\xc2\xd7\x9e\x62\x84\x3e\x2e\x24\x4b\x02\x3f\x10\x1c\x2d\x19\xe8\x7c\x52\x5f\xa9\xf8\xbc\x58\x9c\xad\x46\xb8\xc0\x2e\xa1\x01\x94\x9c\x3e\xde\x1c\x16\xb7\x03\x1f\x68\x27\x27\x13\x8d\x66\x5d\xe6\xed\x5b\xc7\x8b\xb3\xe3\xe9\x1c\x7e\xc6\xe4\xcc\x78\xe0\xa6\x4a\x4b\xab\x2b\xe9\xd1\xb7\x9b\x44\xc5\x8b\x8a\x44\x38\x8c\x9c\xbe\x0d\x28\x6b\x8d\xaa\xec\xb7\x6a\x6d\xfb\xeb\x7e\x88\x51\x3e\xda\x43\xd2\x2f\x39\x80\xd1\x21\x7d\xdf\x47\x07\x90\xda\x40\xfa\x97\xff\x8b\x24\x9d\x2c\xaf\xd1\x9c\xba\x65\x4d\xc7\xf5\x01\x1b\xc3\x0b\xe1\x3d\x13\x1f\xdf\xa7\xcb\x64\xf5\x07\xc5\x7b\xc1\x0b\x95\xbb\x87\x4e\x8c\x1a\x88\x3e\x20\xcf\x24\xe7\xcf\x6d\x5c\x19\x62\xd4\x58\x51\x3e\x75\x1c\x8a\x2c\x02\xeb\xf4\x69\x81\x0c\xe6\x9b\x72\x22\x2b\xf3\xdf\x38\xe3\x1f\x69\xda\x58\x6e\xcc\x28\xa5\x2e\x43\x7e\x45\x38\x66\x59\x32\x48\xc2\x2d\xba\x1e\x60\xfc\x1a\xbc\x9b\xeb\xe1\x35\x5e\xfc\xf0\x11\x8e\x6e\x88\x8e\x1f\x08\x8f\xee\xf9\x9f\xf9\xe1\x2d\x92\x16\x40\x17\xef\xfe\xe8\x29\x67\x1d\xc2\xf0\x61\x78\x39\x6c\x77\xaf\x50\x59\xe8\x5b\x83\x44\x7f\x46\x19\xa9\x17\x25\xcb\x04\x6c\x4f\x2f\xa9\xdb\x3b\x3c\x4e\x88\x16\x45\x58\x6d\x34\x54\xd1\x92\x9e\xcd\x73\xda\x6d\x6d\x1d\x5a\x08\x7b\x39\xac\x7b\x3e\x6a\x20\x5d\x0b\xe5\x91\xf3\xfa\x83\xe1\xf5\x58\xb9\x69\x2d\xb6\x12\x87\x95\x8d\x21\x3e\x25\x21\x5b\x63\xa6\xb6\x22\x6b\x71\x4d\x44\x22\x39\xe7\xad\x01\x5a\xb8\xc1\xd0\xef\x8a\xa2\x8d\xa4\x5b\x34\xb0\x1f\x3c\x27\xd7\xf8\x18\xc9\x03\x21\xb0\xb8\x02\xc0\xaa\x9a\x8b\xf4\x8c\x26\x1d\xb7\xc4\x27\x54\x08\xae\x99\x4b\xb9\x25\xce\xed\xc4\x42\xbc\xcf\xf4\xd7\x9f\x65\x94\x00\x2c\xb1\x7d\xf5\xf0\x86\x0c\x9e\x02\xe5\x27\x99\x7c\x6b\x83\x57\x04\x25\x8b\x97\x55\xdd\xa4\x42\x43\xb4\x5c\x08\xb7\x5f\xd2\x38\x14\x9b\x34\x1c\xf0\x50\x7b\x4a\x75\x4d\xfb\x85\xd4\x5e\x91\xbd\xd3\xaf\x8a\x13\xca\x9f\xe2\xf8\xcf\xb2\x18\x5f\xfe\x19\xe1\x56\x9c\x64\xd9\xf5\x8a\x71\x86\x9f\x14\x08\x47\x88\x96\x27\x2f\x60\xaa\xd8\x22\x38\xcd\xd5\x76\x6c\x94\x57\x74\xa1\x84\x11\x7e\x5e\xb2\x23\xb4\x02\x50\x40\x45\x9f\x7c\x1e\x89\x50\x20\x3a\xbe\x0f\x74\x53\x87\xd3\xee\x8e\x20\xd9\x15\xd6\xab\x5a\x63\x31\x9d\xe9\x73\x6e\x4a\x7e\x34\xd9\x9d\xe5\x41\xf6\x9d\x5b\x6f\xe8\xe8\xee\x90\x61\x95\x4c\xa0\xe4\x81\x4a\x5d\x73\x5c\xc9\x48\x4a\x6f\x7d\xee\xab\x5a\xff\x2c\xf9\x3a\xef\xd9\xd7\x44\x13\x84\x95\xe1\x08\xe1\x23\x06\x66\x28\x27\x69\x54\x1a\x71\x60\x3a\x10\x2d\xa3\x51\x63\xfc\xf4\xee\x66\xd3\xe7\xda\x18\x17\x3b\xff\x34\x1e\xa2\x35\x77\x1e\x82\x9e\xab\x77\x6d\x83\x11\xba\xec\xfa\xdd\xe2\xfa\xc5\x75\xe3\x29\x96\x9e\x76\x1f\xab\x0a\x9d\x35\xaa\xfa\xe3\xfb\xcc\x39\xd0\x15\xdd\x49\x21\xc3\x01\xd9\x3b\xe0\x26\xa0\x1f\x74\x43\xbe\xd9\xe0\x8d\x11\x8d\x6d\xcb\xf4\x60\xc3\x8a\x78\x12\x8b\xeb\x26\x62\x0d\xd9\x23\xe1\x13\xf1\x14\x88\xa0\xda\x98\x96\x55\xd3\x55\xf7\x4d\x79\x37\x9a\xa0\x41\xbd\x34\xf4\xcf\x86\x12\xab\x0c\xa0\xc9\xa0\x40\xe6\xca\x92\x1d\x25\x68\x73\xf2\xb6\x68\x47\x19\x2c\x01\xd5\x16\x5b\xd5\x69\xc3\x30\x5d\x36\xa4\xaf\xec\xa6\x8e\x08\x60\xfb\x3e\x2c\x14\x04\xdb\x8c\x36\xee\x15\x24\x1d\xe4\x4b\x0e\x7e\x9a\xef\xda\x4f\xa6\x9a\x8b\x5e\x04\x4d\xd6\x21\x04\xc1\x0b\xfe\x13\xe4\xc3\x2e\xca\x25\xca\x03\xd1\xc9\x8a\x13\xcd\x70\x1c\x03\x57\x0d\x7f\xe9\x3f\xa0\xef\x4b\x2e\x31\xbd\x9e\xc8\x8e\x91\x64\xfb\x4b\x20\xf2\x4c\x7c\x82\x17\xaf\xaa\xe6\x4e\x46\x37\xb0\xe6\x5b\xdc\x7f\x0d\xf3\x9b\xac\x0b\xa1\x2a\xeb\x4b\x91\xa7\xfe\x29\x17\xc9\xc3\x3a\x90\x52\x0d\xd5\x9b\xbe\x96\x85\xe8\x91\x1d\x08\x69\xa2\xc1\x9d\x1e\xe7\x29\x7b\xc8\x14\x00\xde\x84\x93\x80\xde\x62\xa7\xd1\xc5\x30\x2e\x24\x66\xdc\x0b\xfc\xf7\x66\x06\x01\x5c\x73\x66\x8c\xf8\x0e\x2f\x2f\x61\x26\xc4\x1f\x9f\xca\xfc\x83\x11\xc5\x65\xcf\xbc\x5b\x39\x3e\xc4\xb2\x76\x2e\x6b\x08\x7a\xb9\xb8\x41\xc7\x46\x53\x3b\xc2\x63\x7e\xf9\x31\xbc\x67\x04\x85\xc8\x71\x65\x1b\x4c\x90\xce\x50\x58\x1d\x96\x4d\xc7\x84\x6e\xf0\xce\x7e\x75\xf6\xa3\xd9\x8f\xa8\x48\xa0\xeb\x11\x0d\x26\x3b\x0a\x2f\x4e\xaa\x41\x50\x3c\xe4\xea\x59\x95\xe9\x07\xcc\xaf\xe8\x9a\xde\x52\x9f\xf4\xed\x16\xc7\x64\x4d\xc9\x63\x44\x03\xf8\x90\x71\x04\x76\x85\x05\x95\x1f\x10\x8c\xe8\x12\xb4\xe3\x3c\x5c\x87\xfc\x4b\xf5\x5f\x3e\xae\xeb\xb9\xa1\x76\x89\x57\x15\x49\x8d\x39\x36\x99\xd1\x75\x4e\xa6\xd0\x39\x2b\xf7\xa6\x9c\x8f\x1a\x68\x35\x50\x4d\xf4\x97\x35\x7e\x4d\xf1\xb5\x14\x38\x01\x18\x95\x11\x40\xb5\x7f\xa9\x98\xd9\x1f\xa1\xe1\x80\x55\x47\xce\x9e\x49\x93\xb7\xc0\xf9\xb9\xc5\x77\x41\x96\x53\x3b\x76\x31\xab\x2a\xb8\x15\x6e\x30\x2d\x3b\x7e\x22\x38\x1d\x4c\x80\x55\x8b\x66\x4c\xee\x9a\x93\xb7\x1c\x28\xd9\xf3\x16\x30\x57\x0a\x7d\x0d\x00\x0a\x80\x1c\x19\x1c\x08\x99\x78\x38\x45\x14\x27\xea\x86\x70\xc8\x27\xc1\x87\xd3\xc0\xe5\xdd\xe0\x6c\xfe\x9a\x1b\xe8\x66\xc2\x5a\x74\x45\xb3\x67\xa4\xb2\x8e\xf2\x8a\xe2\x38\x66\x1b\xca\x50\x3c\xe1\xcb\xf7\x28\xc8\x3c\xe1\xd6\xed\x5d\xe0\xda\x79\x55\xb1\x37\xd5\xe5\x0f\x1e\x79\xcb\xa7\x05\x8e\xe0\x97\x0b\xbd\x10\xd2\x42\x01\x4d\xec\x0e\x73\xc6\x8c\x11\x74\x2e\x01\xca\x9a\x27\x3d\xc2\x2d\xd9\x91\x40\xf9\x43\xd3\x30\x81\x15\xd7\xbf\x7e\x88\xfa\x47\x58\xee\xd4\x78\xbe\x92\x47\xec\x73\xe0\xd8\x04\xfa\x40\x7e\x46\xc9\x9f\x86\x66\xc3\xa5\x13\xeb\xba\xce\x43\xfb\x24\xde\x50\x97\xd2\x71\xac\x48\x96\x1d\x03\xf0\xa5\xac\xad\x73\xe6\x7b\xf7\x16\x47\x9f\x58\x35\x1b\x54\x75\xf1\x98\x3d\x1c\x59\x09\x5e\xa6\xec\xa5\xc8\xd1\xd9\xd4\x01\xa0\xd9\x51\x87\xa5\xe5\x97\xb2\x0e\xc8\x4f\xa9\xc3\xd9\x21\xb3\x50\x21\x7d\xd9\x35\xa0\xda\xd0\x22\x0b\x11\xb0\xaa\xa2\xa8\x91\x06\x53\x40\x8b\x2d\x00\x70\x79\xca\x4b\xb6\xd1\x3e\xc1\x1a\x2e\x33\x98\xd8\xc4\xe1\x09\x56\xc7\x96\x5d\x8f\x3e\x2a\xd4\xa4\x5b\x0d\x61\x5e\x1b\x49\xaf\xe4\x82\x4c\xde\x72\xd6\x2f\xb8\xbe\xdf\x42\x81\x3b\x2a\xc8\x55\x6e\x0b\x87\x96\xc2\x1a\x16\x4a\x31\x0d\xd9\xdb\x9b\x4f\xca\x4e\x64\xd6\x6c\x0d\x93\x98\xb2\xd2\x01\x90\x05\x8c\xc0\xb1\x07\xca\x63\x83\x3b\xcd\x7b\x75\x18\x4a\x53\xb4\xe6\xde\x3b\x69\x7e\x07\x59\x7e\x47\x26\xc1\x47\xea\x29\x2d\x20\x04\x67\x36\x92\xdc\xf8\x55\x0f\x55\xc9\xcf\x06\x93\xf2\x93\x87\xca\x13\xfe\x88\x49\x09\x8d\x2e\x47\x00\x06\x3c\x12\x8a\x5a\xf8\x31\xd7\x65\xfe\x81\x88\x52\x61\x80\x1f\x14\x8e\xcb\x9a\x1d\xc8\x56\x6c\x68\x9a\x18\xd2\x9d\xcb\xed\x95\x6b\x9a\x3b\xaf\xe5\x45\xee\x48\x7c\x4d\x1f\x61\x76\xf3\x01\x7a\xa9\x73\xcc\x22\x5c\x45\x4a\x7e\x86\xd4\x54\x53\x86\x91\x6b\x38\x05\xfe\x59\x33\xfe\x9b\x73\x0e\x11\xf2\x71\x42\x01\xec\x33\xe6\x10\x07\xba\xe6\x07\x5e\x20\xa8\x98\x92\x42\x65\x43\x3e\x23\x7a\x97\x22\xc7\x96\xdf\x87\xf6\x5f\x05\xa9\xfe\xfd\xf9\xc7\x34\x82\xce\xd4\xbc\x77\x9e\xa1\x35\x0f\xa2\x2d\x17\xea\x2c\x57\x87\x8d\x32\x38\xa0\xe3\xb0\xa6\x0f\x7b\xb2\x69\xef\xfe\x64\x7a\xcc\x8c\x91\x8f\xec\xcc\xb1\xc4\x67\x78\x9f\xb4\x30\xff\xbc\xc9\x73\x9a\xec\x63\x96\x50\x25\x4f\x11\x48\x7d\xb3\x32\x11\x27\x9c\x1d\x78\x7c\x46\xad\xf6\xe9\x7e\x4e\x40\x86\xe7\x5b\x32\x8f\xf5\xbc\x70\xd0\xc0\x97\x5d\x00\x68\xcd\x50\x69\x42\xe6\xfa\xaf\x39\x64\xa6\xec\xf0\x8e\xc0\x63\x8a\x18\x5d\x8a\x0c\x03\x92\x39\x8b\x0f\x2b\x3c\xc3\xfb\xb0\x91\xf4\x7f\x3c\xa6\xb8\x95\xfc\x50\x21\xd8\xa2\xaf\x4b\x95\xbe\xf1\xc0\x3b\x7a\xa5\xc6\xf8\xe2\x59\x5c\x20\x24\xb0\x07\x48\xa4\x02\x7c\x2b\xdf\xb4\x1c\xc1\x1f\xfd\x92\x6a\x8b\x74\x35\x0e\x10\x1e\x30\x5c\xda\x97\x67\x25\x57\xcf\xbb\xc2\x95\x0b\x59\x2e\x06\x0d\x50\x60\x84\xda\x5f\xe4\x3c\x44\x69\xc9\x54\x45\x00\xf9\x71\xcb\x70\x58\x8f\x30\x41\x97\x04\xd2\x81\x5e\x62\xd0\xb9\x89\x51\x95\x74\xc1\x9c\x9f\xec\x1b\x07\xe9\x7c\x8e\x1a\x9f\x0d\x19\x52\xe7\x3e\x3a\xcf\xd4\xda\xcf\x02\xbf\x29\x41\x9c\x3e\xf9\x23\x14\x29\x1c\x97\xb4\x04\xcb\x2b\x3a\xa3\x49\x7c\x6c\x5f\xe0\x50\x81\xf7\x1f\x83\x7f\x30\x40\xe1\x90\xa3\x4b\x80\xd7\x0a\x6e\xd3\x7b\xa8\x9b\xdc\xe0\x82\x3d\x3e\x5c\x56\x71\x2f\x46\xf4\xbf\x79\xb4\x85\xc8\xbe\x85\xf0\xee\x09\xb0\xd1\x16\xc5\x15\x0d\x78\x99\xf6\x8b\x1a\x8f\x4a\x1b\xf7\x4a\x6b\xf7\x0a\x57\x0e\x73\xca\xfb\x45\xfa\xa7\x29\x93\xc7\x3e\xbf\x61\x5e\x1a\x4a\x57\x4c\x59\x7c\x69\x02\x2f\x5a\x46\xdf\xe8\x12\x8d\x0a\x3a\x4c\x8d\xc4\x2b\x63\xa7\x46\xb7\x7e\xb3\x4c\xd0\x07\x55\x77\x5e\x8d\x2e\xfe\x77\x00\x5a\x66\xb8\x73\xef\xe3\xbc\xfc\xe7\xbb\x1d\xa8\x90\x6d\xd8\xf0\xbc\x7f\x5b\x35\xe0\x22\x15\xed\xd5\x6a\xc0\xaa\x1c\xa6\xe0\x5f\x8b\xaa\xbf\x4f\x11\xb5\x54\xf4\xa7\x52\x2d\x76\x2a\x6d\xef\xd7\xbd\xb0\x44\x3d\x5e\x66\x23\x2e\x79\x53\x62\x56\xfe\x5f\x8e\x88\xa3\x85\x3f\x48\x13\x0d\x49\xf1\x55\xaa\x90\x24\x6c\x5f\x66\xff\x38\x81\xe9\xd1\xb1\xa5\x46\x4f\xd9\x66\x0b\x8d\xf7\x9d\x06\x7b\x0b\x85\x52\xbc\x1c\x82\x70\x58\x55\x64\x7d\xba\x38\x14\x1f\xd2\xb4\x95\xb2\x27\x61\x73\xba\x88\xcf\xa4\xc8\x82\xd6\x90\x39\xb7\x26\xea\x8f\x64\x92\x3d\x95\xbc\xe0\xdc\x54\xe0\x5f\x03\xde\x85\x06\xea\x3a\x49\xa5\x66\x4d\xa5\x65\x4f\x23\x13\xcd\x0a\x9a\x70\xd3\x71\x59\x89\x92\x0c\x67\xb2\xc1\x0e\xa1\xf1\x7d\xd1\x74\x7a\x1c\x70\xa8\x6f\x9f\x88\x41\x09\xc3\x61\x59\x9d\x69\x10\x93\xd0\x9f\xf8\x14\xf8\xc2\xab\xb8\xd6\x23\xbc\x40\x6b\x28\x23\xbb\xb4\x3a\x99\x21\xe5\x37\x1f\x16\x9c\xc0\x40\x8a\xb7\xc0\x30\x73\x8b\x90\xe0\xc6\x65\x8a\x17\x4b\xb8\x67\x51\x55\xcd\x54\x55\x91\x63\x4a\x29\x90\x6e\xa2\x0d\xc6\x1a\xd1\x62\xd1\x1c\xa8\xac\xf9\x6b\x34\x81\x0f\x63\xab\x23\x01\xef\xbb\x4b\x0f\xfe\x33\x5e\x74\x15\x11\x65\xd3\xfd\x3b\x14\x20\x45\xfe\x7d\xcf\xe9\xd8\xf4\x5c\xbd\xb6\x89\xef\xc3\x86\xe7\x3f\xfe\x2f\x60\x6c\xce\x5a\x80\x6b\x56\x59\x9b\xc6\xe9\x0a\x5c\x61\xe1\xb7\x0e\xa4\xf4\x42\x97\xf6\x52\x97\x46\x50\xf3\xf6\xd6\x8b\xea\x48\x41\x2b\x2c\xc2\x6f\x11\x7f\x81\xd8\xd8\xa1\xf8\xaf\xea\xcb\x9e\xf0\x3f\xc7\x13\x06\x43\x02\x54\x1e\xa8\x1b\x16\xdf\xe7\xdd\x42\x00\xe0\x02\x92\x20\xe1\x65\x9a\x7c\x8c\x6d\xb4\x11\x85\x21\x71\x8d\x67\x57\x15\xcb\xa8\x8d\x8f\x18\x1d\x73\x1b\xbc\x73\x2d\xc6\xb8\xc5\xbb\x24\xf3\xcd\x8c\x40\x87\x66\x43\x06\x1f\xf9\x7f\x8f\xdf\xf8\xbf\x21\x09\x03\xd1\xc9\xfd\xcb\xcb\x90\xb3\x0f\x8a\x30\xf8\x35\xaa\x7b\x6e\x04\x25\x04\xa2\x93\xd0\x9f\x75\x16\x7f\xe3\xab\x7c\xd7\x74\x63\xbe\x41\x84\xcd\x66\xa0\xef\x85\x57\x16\xd1\x2a\xf1\x51\x44\x22\xea\x58\x5d\x9f\x7f\xb3\x45\xd2\x40\x65\x4d\x71\xa1\x8f\xdc\x8b\x94\xe6\xe8\xda\x21\x7e\x0e\x2c\x62\xbe\x94\x27\x3d\xbf\x9a\xa4\x7d\x1a\x4e\xc2\x65\x4b\x81\xa8\x0f\x5e\xc8\xe6\xb8\x44\x9f\xf8\xce\x7d\x71\x8b\xb8\x62\x47\xf8\x2d\x92\x8c\x20\x9c\xe3\xfe\x5c\xf2\x63\xab\x3f\x8f\x3a\xd5\x98\x01\x15\x51\x98\x94\xf7\xa0\x89\x2f\x80\x97\xa5\x2e\xad\x97\x76\x25\x3b\x92\x58\xa1\xd6\xbb\xf2\x5a\x22\x18\xba\x19\x74\xba\x87\x7d\x64\xed\xa5\x5e\x95\xa3\xf3\x9e\xc1\x7d\x0e\x84\xa4\x89\x71\x6c\xc8\x1f\x4e\x92\xcb\x7b\x91\x1b\xaa\x4b\x0d\x38\xe5\x7c\x9b\x1d\x08\x2e\x9e\x3b\x51\x05\xc1\x92\x12\x67\x4c\xf4\xbf\x94\x56\x16\xea\x99\x89\x42\x9f\xe2\x59\x9d\x67\x50\xc3\x4a\x71\x0b\x56\x58\x92\x4d\x66\xfb\xb7\x07\x27\xc9\xcf\x3f\x96\xd8\x81\x97\x40\xf8\x1c\x88\x6a\x4a\xe5\xea\x15\x3e\xb2\x68\x0f\xb1\x70\x59\x89\x93\x24\x29\x1e\x50\xea\xc7\xca\xfc\x59\x26\x69\x82\x1e\x3f\xf8\x0b\xba\xec\xd3\xbf\xef\x29\xf9\x3b\x3c\x64\xb3\xf1\xb5\xa6\x81\xa8\x04\x21\x8b\xb3\x4b\xa2\x1f\x26\x6b\x62\x6c\x19\x5e\xc1\xbc\x57\x89\x08\xb0\xe7\xbe\xd7\x1f\x7c\x0e\xa5\x43\x25\x67\x16\x70\x27\x98\x1d\xf4\x43\x11\xab\x5a\x5e\x1f\x86\xad\x1b\x31\x2e\x7d\x62\xb1\x54\x4e\x14\x26\x4e\xd1\x32\xd0\x2d\xa4\x1b\x7a\x01\x0f\x34\xc3\x8b\x37\xb9\x75\x6f\xff\xfa\xf3\x52\x44\x95\x00\x24\x8e\x23\xce\x84\x92\x84\x88\x46\x2a\x50\x5c\x9b\x9f\x16\x5b\xe9\x2f\x1f\x69\x9a\x60\x7a\x42\xa5\x77\x2f\x36\x9f\xf0\x32\x24\x27\x9a\x86\xfb\x44\x61\x8d\xa8\x79\x69\xfd\xee\x6a\x06\xa2\x9e\x4c\x5f\x9d\x3a\xe8\x42\xb0\x91\xe5\x9d\x3d\x47\x36\xa9\x22\xc1\x90\xf8\xb4\xe5\xd1\xb6\xb9\x15\x4c\xf3\x73\x2e\xbb\x17\xea\x9e\xab\x29\xc3\x7e\x9e\xd8\x75\x55\x67\x97\xe8\xfb\x92\xe5\xee\x99\x2e\x00\xfd\x55\x6c\xee\x6a\xfa\x97\x23\xd6\xf4\x7d\xce\xb3\x4b\x2c\xe1\x54\x5c\x3d\x25\x67\x32\xe2\x00\xad\x3b\x0f\xff\xc3\xc1\xbc\x24\xf5\xd0\x7e\xcd\x1d\xc6\x9d\x38\x14\x31\x49\xf6\x69\x41\x4d\x4e\x2b\x78\xd0\x05\x4d\x66\x02\x63\xc2\xfb\xae\x09\xde\x39\x4f\x94\x6c\x1a\x02\x23\x8f\x72\x5a\x88\xbd\x5b\xe5\x91\xce\x9f\x17\x6f\x8e\x38\x1b\x3e\xfc\x26\x10\xb9\x02\x26\x52\x7d\x77\xc4\x68\x62\xe4\x2f\xa9\x24\x68\x01\x85\x86\xfc\xa7\xf1\xbe\x68\xf9\x02\xb3\x8a\x23\x99\xec\x52\x19\x75\x37\x46\x6a\x6d\x8c\x28\x4e\xb1\x31\xe5\xc5\x43\xf4\xa0\x38\x7e\x2a\x07\x6a\xd6\xeb\x85\x1e\x9c\xd4\xe6\xba\xd3\x92\x86\x93\xf2\xea\xa6\xce\x81\xe7\x14\x7d\xca\xfc\x63\x3a\x89\x1d\xb3\x89\xbc\x5f\x49\xe8\x69\x2e\xf9\xb9\xd0\xb1\xc1\x38\x59\x1c\x22\x92\xab\x6d\x41\xd0\xe7\x02\x03\x66\xed\x18\xe6\x0b\x9e\xa8\xf7\xfc\xe6\x5f\xcd\x23\x64\x39\xab\x98\x7c\x9f\xbc\x5f\x89\x4b\x1a\x6e\x17\x42\xb2\xd3\x80\x51\x26\xc7\x45\xc2\xfe\x9c\xd5\xd7\xdc\x7f\xa2\xdb\x35\x95\x4e\x97\x57\xe8\x60\xd2\x19\x0d\xf8\x9f\x58\xa4\xad\x59\x81\x1b\x46\x85\x86\xe9\xd5\x38\xef\x7d\x18\xa2\x34\x31\x0c\xa8\x74\x3b\x6d\x40\x4d\x01\x7d\x62\x48\xc3\x0b\x18\x9d\x83\xee\xa4\xb9\x72\xc3\xa2\x7f\x3e\x6c\x45\xda\xae\xea\x3b\xa2\xfc\x46\xa3\xf5\x65\x03\x5e\x2d\x0d\xe7\x13\x4c\x57\x51\x82\xed\xf0\x1d\x08\x33\x7e\x1e\xa8\x50\x5e\xa7\xdb\x0b\xc1\xf2\x92\xe4\x13\x1d\xbb\xea\x1e\x03\xa0\x9b\x36\xcc\x3d\xaf\x70\xab\xf2\xca\x32\xb8\x3b\xf6\x4d\xa1\x6e\x6f\x91\x8d\x83\x24\x47\xd2\x2b\xd2\x60\xf5\xc7\x32\xb5\x74\x38\xe8\x98\x26\xe6\x48\x26\x00\x76\xc7\x02\x08\xd0\xae\x3f\xa8\xef\xa8\x81\x07\x0f\x3e\x36\xff\xda\xc0\xb5\xae\x6b\xbb\x2c\x9d\x9e\x1d\xd9\x6d\x5a\xcf\xd2\x13\xd5\x31\x92\x3c\xdb\x3a\xe8\xbc\x96\x48\x56\x9c\x25\xc7\xb4\xb1\x8e\x50\x67\xe2\xec\x52\xe5\x1f\x63\xd2\xc6\xa3\x34\xd1\x9a\xd3\xc7\x92\x4d\x51\x54\x4f\x8a\x93\x76\x41\x84\x97\x56\xb8\xe4\xce\x9b\x4a\xd0\xc5\xf5\x67\x5d\xb4\xa1\xca\x1b\x9b\xcf\x33\x98\xa0\xbf\xe5\x05\x95\xd6\xd9\x3d\x59\xdb\xda\x9d\xb6\x1c\x24\x08\x6f\xb0\xb4\x56\x6b\xa6\x50\x6b\x23\xcb\x57\x9f\xa4\x11\x2a\x51\xb6\xc7\x16\x41\x45\x5f\xd7\x71\x79\x9c\x06\x76\xf7\x6d\xff\xb8\xb8\xd7\x8c\x7d\x9e\xb6\xfb\x27\x91\x34\x00\x0f\x28\x94\xe1\xb4\x21\x5d\x15\x48\x79\x8c\x5b\x1e\x59\x7a\x3b\xdc\x3e\xc7\x12\xdb\x51\x65\xc8\x83\xab\x21\xc5\xfe\xc5\xc1\x05\xdc\x3d\x4a\x57\xae\x2b\xb1\x83\x95\xd5\x30\x5f\x26\xc7\x94\x31\x52\x09\x01\x94\x4c\x58\x98\x08\x7d\x65\x33\x30\xd9\x0e\x60\x49\x9d\x57\xec\xc8\xd6\x90\x28\x8c\x7d\xf9\xa7\x13\x93\x40\x29\x45\x43\x82\xc6\x20\x3e\x7a\x60\x59\xb2\x11\x11\x7f\xfe\xfa\xfb\x75\xfa\xce\xca\xfc\x1f\x6a\xc0\xba\x67\x0b\x62\x20\x0d\x7f\x5a\x3d\xbf\x89\x4e\x17\x47\x6c\x16\xba\x00\x7e\x37\x01\x15\x51\x40\x96\xe0\x7b\x33\xc4\xc4\x8c\xd1\xfb\xa0\x9d\x79\xc7\xb0\xdd\x55\xb3\x1a\x4b\x14\x1a\x9d\xc1\x5f\xb2\x1f\x92\xc0\xd9\xdf\xb7\x9e\x55\xe6\x2e\xaa\x15\x5a\xf2\x33\x52\xdd\x7a\xfe\xb1\x8b\x0b\x17\xde\x8f\xb0\x78\x67\xc6\xf1\x32\x1a\x3a\xd6\xbb\xe5\xc9\x9a\x60\x73\xea\xc8\x60\x1e\x03\xad\x31\xd5\x6a\xb7\x74\x0c\x58\x3b\xf7\x56\x94\x21\xa9\x36\x02\xb8\x76\x93\x54\xa8\x3b\x75\xd5\xea\x83\xaa\xad\x77\x61\x43\x4b\xd1\x45\x47\x72\x81\x1d\xd0\x19\xca\xf4\x69\xcb\x16\x63\x55\x96\x2b\x2e\x89\x8b\x3d\x22\xb5\xee\x56\x78\xcf\x7c\xc3\x94\x11\x35\xe8\x76\x50\x57\x90\xe4\xac\x46\x27\x86\x72\xfa\xca\xe0\x18\x21\xef\x80\x52\xb9\x6b\xf9\x24\xe0\x9a\x5c\x2c\x8b\xb2\x05\x39\x22\xff\x63\xbe\xc0\xce\x68\x59\x9d\xf0\x84\xb2\x60\x52\xbb\x77\x32\x8a\xa1\xd4\xf0\x35\x43\x5f\x10\xe4\xb8\xe2\xea\x44\x4b\xe3\x5e\xf0\x42\xd4\xf0\xe1\x53\x6a\x38\xa7\xaa\xcb\x44\x03\xb8\x70\x04\x43\x75\x04\xb5\xc9\x8f\x68\x1b\xda\x2f\x5b\x51\x3d\x00\x04\x1e\x52\x4d\xcb\x1d\x04\xab\xe1\xd1\x6c\x9b\x24\x3c\xb2\x80\x27\xbe\xa9\xdc\x1c\x9d\x33\x8e\x31\x49\x61\x75\xae\xce\x59\x10\x35\xba\xba\x7d\xc5\x12\x3d\x70\x08\xf9\x74\x16\xa0\xb9\x47\xef\x6a\x82\x4f\xe4\x8c\x74\x4e\x27\x0e\xbc\xd0\xc4\x44\x66\x97\x3a\x05\x1b\x21\xf2\x38\x75\x98\x2e\xca\xaa\x7e\x0d\xa9\xeb\x1e\x04\xdb\xda\x61\x70\xc2\x6c\xfc\x0a\x00\xb0\xa2\x60\x11\x46\x0f\xdd\x09\x3a\x89\xb0\x74\xfe\x44\x5c\x66\x28\x33\xe6\xd8\x50\x39\x3c\xf1\xf8\xd6\x71\x27\x4b\xd4\x42\x85\xf5\x56\xc5\x23\xc4\x5c\x2e\xee\xb1\xc2\x5c\x6e\xc5\x32\x73\xa4\xe4\x99\xb1\x6f\xac\xe8\xf4\xcf\x48\xc7\xe4\x89\xdd\x5a\xec\xc7\x7a\xd5\x9c\x86\x43\xe3\xf2\x3f\xdf\x01\x82\xb5\x16\xb6\x63\xf5\x87\x7f\xef\xa4\xe2\x91\xa9\x99\xd1\x53\x46\x45\x36\xdd\x75\x59\xd5\x06\x2b\xa7\x5e\x6c\x6c\xc7\x1a\xf9\xd6\x99\x1a\x0a\x97\x9e\x3d\x8e\xf9\x92\xa8\x88\x70\x5e\xf5\x2e\x2f\x6e\x43\x73\xdf\xcb\xc7\x34\xe0\x3d\xc6\xac\xfc\x66\xae\x30\xa7\x1c\x28\x3f\xd2\x46\x73\x9d\x07\xcb\x8f\x80\x0a\x2b\x42\xc9\x60\xa2\x89\x1b\xdf\xe1\x09\x04\x34\xf3\xbd\xf7\x34\xa2\xbd\x4a\x82\x21\x00\xb1\x48\xe9\x2f\xa6\x91\xd4\x10\x38\xd1\x17\x79\x71\x89\x43\x8e\x51\xa8\x96\x48\x43\xb1\xbd\xa6\x76\x4b\x56\x38\x5e\x7e\x66\xb6\x12\xe7\x1e\x8f\xca\x58\x4f\x81\xa9\x7d\x40\x46\x1b\x63\xf6\x5d\xb7\x4c\xea\xf5\xf2\x0b\xfe\x3c\x8a\xe3\xef\x21\x49\xaf\x1f\x72\x33\x8a\x93\x77\x45\x1a\x94\x47\x98\xc1\x30\xf0\x65\xba\x54\x2d\x56\x72\xa6\xb1\xc8\xed\xd7\xc1\x75\x7b\x9d\x31\x77\xf6\xcf\x18\x68\x20\xa2\x09\x93\x87\xd3\x2b\x9a\xcd\xb0\xb4\x98\x10\x67\xbb\x9c\xef\x61\xc0\x2f\x95\xa2\x64\x7f\x53\xb7\x32\x70\xc5\xc7\x0a\x73\xe5\x00\x70\x1f\x81\x22\xbc\x46\x72\x43\x67\x58\x17\x14\xa3\xaa\xaf\x03\xf5\xcb\x0f\x97\x92\x3e\x10\x3d\x1f\x3e\xef\xd4\x94\x2a\xf6\xa6\xe3\x70\x58\x5b\x79\x02\x57\x31\x92\xfc\x8e\x02\x70\x0a\x13\x0f\x84\x0c\x3a\x30\x0f\xd7\x95\x38\xd1\x7d\x3c\x45\xf7\x02\x44\xf8\x1e\xbf\xed\x40\x88\xf7\x62\x04\xe7\xba\x88\xb9\xc9\xe6\x35\x46\x78\x06\x4c\xda\x66\x04\x91\x53\xb9\xf3\x30\x1a\x0a\xd9\x5b\x60\x45\x54\x4c\x1a\xc7\x68\x6f\xa1\x68\x1d\x45\x47\x5e\xf8\xe4\xb9\x5c\x92\x64\x22\x31\xc4\x10\x9e\x6b\x23\xe2\x80\x13\xbf\xac\x38\x61\xf3\x31\xb0\x6f\x02\xf3\x90\x7f\xd8\xea\x92\xe1\x2f\x44\x6f\x4c\x67\x34\x1c\xbc\x10\x02\x2b\x66\x90\xa9\x35\xee\x3e\xf7\x12\xd7\x3d\xd9\xee\xe9\x35\x02\xe7\x81\xea\xc2\x1f\x3b\x67\x12\x40\x91\xbb\x0c\x1e\xc0\x14\x61\x53\xdb\x0f\xe3\xb2\xa6\xd5\x45\x02\x22\x65\x70\x0b\x44\x54\xf0\xca\xb2\x0c\xb8\x56\x94\xc4\x58\x05\xed\xa5\x16\x39\x90\x25\xcf\x63\x5b\x9a\x74\x0e\x75\x27\x40\x52\xc8\xae\x5e\x9a\x9d\x42\xf6\x49\x58\x4c\xc5\xf7\xd5\xb8\xf9\xe2\xf9\x9f\x87\x12\xb9\x0c\x7c\xa0\xb3\x24\x77\x8f\xb1\x45\x6f\x81\x68\xcf\x0f\x24\xa7\x01\x0e\x38\x8f\x02\x71\x5e\xc4\x0a\x37\xaf\x49\x21\x0e\x1e\xa1\xb7\xf4\x98\x8a\xbd\x7b\xd7\x5a\x81\x83\x00\x4e\x08\xba\x6c\x05\x93\x52\x29\x89\x6f\x36\xe1\x3d\xcf\x77\xd7\xb8\xfb\x68\xa5\x5b\x59\x87\x0d\x26\x30\x95\xab\xff\x15\xfc\xb2\xbf\x5c\xe4\x9b\x1b\xe5\x73\x68\x21\xfe\xbc\xe6\xd2\xbf\x45\x79\x29\x3e\xc3\x5a\x91\xbb\xa9\xc1\xbe\x69\x6f\xf3\xeb\xbd\x0f\xbf\x49\x84\x0c\x32\x03\x72\x3b\x79\xd8\x12\xc4\x81\xe8\x3c\xfa\x9c\x7e\x0f\xff\xfc\xc4\xef\x99\xa9\x0c\xfb\xcd\x47\x6a\x7a\x5c\x2c\x57\x2b\xbb\x3e\x18\xc9\xe0\x43\x86\x54\x53\xfe\x54\xa6\x87\xf7\x44\xe0\xd6\xfe\x3c\x4a\x4e\x3c\x3a\xbd\x21\x83\x4d\x7d\xf0\x1a\xe6\x18\xe0\x48\xb9\xf5\x48\xb6\x7c\x0d\x59\x7f\x7f\x9f\x5d\x45\x60\xb4\x0f\xe1\x36\x6d\x30\xa1\x5f\x15\x25\x3a\xb6\xff\xcc\x58\xd6\xa0\x1b\xa5\xb6\xb1\xa7\xf4\x70\x0b\x0c\xd1\x5d\x6a\x5f\xf3\x83\x62\x21\xb2\xbf\x87\xb6\xd4\x36\xfc\x7e\x3c\x70\xb0\x0b\x22\xf3\x0b\x35\x46\x24\xff\x60\x90\x8e\x97\x11\x19\xe8\xb0\xc1\x7b\x12\x60\x8d\x2d\x31\x01\x4e\x96\x19\x23\xe9\xc0\x83\xfe\xf8\x3e\xec\x17\x1f\x78\x5a\x12\xb6\xc4\x1a\x48\x7b\xd2\x1d\xcb\x22\x81\x5b\x12\x68\xa9\xad\xd8\xec\xd9\xf8\x82\x0f\x54\xf0\xfe\xc4\x1f\x20\xba\x0c\x5f\xf2\x37\x2f\xa8\xaa\xa3\x0a\x16\x7b\xcd\x2d\xdd\x8a\x33\xc9\x91\x82\x7e\xe6\x27\x8c\xff\xc7\xde\x75\xb4\xde\x8f\x73\xe7\xfd\xfb\x29\xc2\x64\x15\x0c\x71\x6f\x33\x24\x60\xdf\xeb\x5e\xaf\xbb\x4d\x36\xee\xbd\x77\x87\xf7\xbb\x87\xfb\x1b\x08\xff\xc9\x2e\xfb\x11\x78\x61\x21\x74\x2c\xe9\x41\xe7\x3c\xb2\xa4\x47\x30\x10\x5f\x09\x9f\x58\x97\x5b\x8a\x0e\xb7\x03\x00\x89\x93\x1a\x15\x01\x79\x03\xb3\x16\xeb\x02\x41\xa7\x47\xbe\x30\x63\x68\x62\x68\xa7\x99\xc1\x46\xe6\x82\xe1\x3b\x15\xbd\x02\x9d\xbb\x8b\xbc\x17\xef\x81\x36\xec\xe5\x6b\x1c\x15\xbd\x89\x90\x13\x8a\x49\x2d\xa9\x52\x59\xb1\xdb\x79\x5e\x66\x5a\x94\x88\xb1\x50\xf1\x24\x42\x33\xf9\xbc\x62\x3a\x49\x81\x42\x5f\xfb\xc7\x6b\xb0\xf5\x96\xbc\x21\x42\xb0\x98\xaf\xbe\x6c\x74\xe6\x20\xa3\x22\x8c\x76\x52\xad\x0d\x00\x43\x01\x29\xa1\xb0\xb0\x7c\xe4\xf1\xaf\xe2\x10\x1b\x80\xc8\xb5\xc1\x13\xca\x2c\x78\x00\xf0\x38\x50\x62\xdd\xf5\xb8\x93\x7d\x6f\x70\x46\x8c\xce\xc1\xe2\x20\x2c\x8e\x36\xc9\x0c\xa0\x97\xd8\xdf\xac\x43\x8c\xf9\x0f\xf1\xa6\x11\x52\x5f\x60\x02\x1f\xe1\x18\x8c\x55\x2b\x0f\x06\x3c\x74\x39\xf6\x31\xbb\x73\x2f\x8c\x01\xa1\x87\x04\x4b\x05\xca\x38\x20\xba\x70\x8f\x0a\xcb\x9c\x82\x67\x1c\x0e\x37\x70\x0a\x48\x1b\x40\xb4\xfb\x39\x1e\x64\x1c\x34\x9a\xa1\x0d\x2b\xde\x7c\xf1\xdf\x66\x6d\x13\x44\xfa\xef\x7b\x81\x32\x47\x53\x5f\xd8\x4e\xae\x86\xf8\x06\x03\x12\x47\x4f\xa0\xa0\x11\x81\x0e\x26\x22\xef\x40\xba\x13\x17\x88\xd4\x45\x75\x0f\xb2\x2f\xa1\x9b\xe3\x7a\x24\x34\x15\xd8\xf3\xf0\xe8\x63\x3a\xde\xa8\xab\xa5\x85\xe6\x08\x21\xf9\xae\xc6\x58\x24\x39\x86\x6a\x42\x40\x47\xef\xb4\xe3\x2d\xd7\x95\x6e\xb5\x18\x09\xa3\xd2\x86\x46\xee\x80\x78\xf6\xbc\xaa\x8c\x96\x1b\x60\x3f\xca\xdb\xfb\x18\x07\x0b\x7e\x52\x89\x0d\x8f\x22\x4f\x8a\x0d\xe5\xf8\xae\x6c\x8d\x3d\x78\x23\xe7\xeb\x1c\x15\x67\xc2\x30\x2e\x38\xab\x59\xbd\x29\x51\x41\xb7\x89\x26\x82\x80\x89\x89\xbc\x3c\xa3\xb0\xaf\xc6\xdb\xb4\x96\xdc\x0a\x33\x33\x38\xfa\x45\x9a\x95\x57\xd3\x1f\x10\xb9\xe4\x85\x5a\xec\xa8\xd7\x9d\xaa\xeb\xf1\x56\x6e\xd2\x17\x11\x0f\x13\x60\x0c\xc8\x02\x8b\xcd\x89\x24\xfa\x22\xdf\x0a\x15\xbb\x9d\x6c\xbd\xd9\x93\xcc\xf7\x00\x21\xa7\x39\x12\x86\x79\x0b\x6d\xb7\x9b\x6e\xfd\xf5\xa3\x4f\x36\x49\xe1\x8c\xd6\x61\x07\x04\xb5\x19\xae\x36\x0c\x60\x67\x6e\xfa\x41\xec\x99\x8f\x66\x47\xed\x99\x0c\x57\x9a\xbc\x6e\xcf\x82\x6f\x20\x5d\x5d\x86\x27\x6e\x2e\x0a\x19\xef\xc6\xa3\x62\x47\x47\x2f\x0d\x5f\x04\xb9\x2b\xfd\xe2\xad\xc4\x1a\xa0\x74\xe4\xe1\x39\xda\x94\x20\x08\x01\x05\xd1\x98\x51\xb8\x12\x54\x9e\x1a\xc2\x1b\x3c\x0c\x91\xb0\x87\x29\x82\xf0\x22\x4f\x14\x3c\xdd\x30\x80\x28\x7f\x48\x64\x80\xd2\xa0\x55\xd3\x58\xe7\x52\x76\x6d\x7f\x28\xd5\x78\xb0\x2f\x4f\x24\xc0\xe3\xaa\x63\x08\xda\x92\x0f\x75\x38\x97\x98\x0f\x8b\x72\x67\xb1\x04\x2d\x11\x4d\xd0\x09\x93\x62\x82\xc4\x80\xfb\x38\x2b\xcd\x0a\x1a\x3a\x9a\x23\xb3\x63\x0f\x6a\x47\x49\x3a\x52\x0c\x24\x50\x15\xe6\x50\xde\x14\xc7\x5b\xa1\x3f\x6e\x1d\xcd\x62\xa0\x18\xd3\xfb\x3e\x84\xfb\x62\x63\x40\x6e\x46\xbe\xdb\xf1\xd5\x13\x58\x9c\xc5\xa5\xab\xaf\x5e\x82\x7a\x05\x72\xb2\x42\x93\x62\xb7\xbb\xaf\xfa\x9e\x43\x8f\x20\x05\x9a\x27\xbe\x22\x31\x58\x87\x40\x80\x5f\xc0\x4a\x50\x2f\xb6\xc6\xc3\x35\x3a\xf2\x17\x44\xda\xd0\xc6\x9a\x62\xd3\x21\x64\xde\x42\x86\x77\x2a\xd3\xac\x94\x5a\x5a\x1d\x2d\x99\x9b\x0f\x6c\xdb\x70\x40\xd9\x2e\x15\xf0\x66\x42\x50\x91\x86\x14\xf5\xee\x3e\x9f\x11\xab\xc4\xf5\x6e\xb5\x39\x21\x80\x62\x7b\xf3\x9c\x72\x4b\x11\xf6\xb3\x47\x6c\xeb\x9f\x90\x67\x4f\xb7\xe1\x45\xd9\x50\x58\x7b\x0f\x96\xdd\xbf\x97\x2b\xd4\x26\xdb\x6a\x1d\x9e\xbe\x37\xbb\x10\x6a\x75\xa0\x82\xea\xc1\x57\x28\xd3\x06\x67\x6c\xc0\xeb\xfa\xb8\x22\x0b\xa4\x6f\xa6\x55\xde\x65\x39\x4a\x2f\x5d\x81\x0a\x58\x91\xf9\x17\x4c\x83\x07\x85\x67\x7b\xcd\xb8\x0f\xef\x39\x2e\x6a\x3e\xd7\x0a\x3b\x8c\x9d\x3a\xdc\x25\x59\xef\x00\xa6\x0b\x97\x3e\x16\x4a\xdc\xc9\xb5\xea\xec\x2d\xc9\xcd\x60\x33\xc4\x06\x60\x3b\x67\x5a\xc7\x39\xee\xbf\x3c\x20\x9f\x53\x43\xb4\x4b\x40\xed\xb2\xbd\x87\x79\xde\x4a\x7d\xef\x86\x0b\x63\x70\x7a\xa7\x9f\x66\x85\x3e\x52\x4f\x7b\x8c\x07\x2c\x35\x81\x66\x8f\xcf\x01\x0b\x8d\xb5\x3f\x2b\xab\xe4\x43\x42\xcf\xa7\xe6\x8c\x84\x5f\x62\xcb\x86\xfb\x19\x9a\xca\x7c\x4d\xd7\x68\x98\xf0\x9c\xd4\x74\x10\x66\xbc\x27\x97\x28\xda\x54\x9c\x8d\x66\x24\x8d\x4e\x18\x1c\x76\x29\xb5\x56\x36\x97\x27\x25\x0d\x73\x28\x90\xc4\x68\x78\xfb\x76\x3b\xb9\x90\xcf\x23\x5b\x62\xa4\x35\xc4\xf7\xb5\x12\x89\x5c\x42\xdc\x94\xd8\x05\x7a\xa5\x01\x4c\x83\xf4\x76\xe2\xb7\x59\xb6\xef\x58\x17\x01\xfb\xd3\xb9\x87\x0d\x00\x13\xed\xa8\x12\xbf\x27\x1b\x81\x51\x69\x61\xa0\xa8\x7e\xa7\x15\xa3\x00\xb9\xd3\xe2\xaa\x55\x42\xa9\x2a\xb0\x65\x72\xa7\x54\xcf\x57\xa1\xb7\x78\x33\xa2\xe4\x6c\x73\x49\x28\xbd\x46\x82\x0d\x45\x66\x7b\x1b\xe4\x5e\x20\xa0\xac\x4a\x93\x98\xee\x01\x44\xca\xef\x06\x03\x12\x73\x10\xb4\x05\xe7\x5b\x8f\xbf\xbe\xfe\xb1\x84\x0a\x23\x96\x71\x0a\x88\x25\xb6\x5f\xa9\xa2\x50\xa1\xc7\x18\x1c\xe7\xc2\xc0\x6e\x05\xbc\x51\x8d\x91\xad\x36\x68\x53\x46\x49\x9a\xe3\x63\xa6\xee\xe5\xdc\xac\xce\x9b\x48\xf4\x6f\xf4\xff\x62\x3c\x42\x52\x81\x57\x8d\x14\xcf\x8a\x14\xe6\xa7\xa6\xf3\x7d\x38\x86\x77\x17\xd3\x7d\xd4\x47\x74\xfe\x78\x33\x94\x1f\xc3\x9e\xf4\xf8\x20\xdb\xb4\xcd\xb3\x67\x48\xd5\xd2\x6e\x55\xea\xd2\x2b\xf5\x38\x39\xd5\x49\xee\x9f\xe7\x4c\xa3\xc8\xf6\x1b\x6b\x2e\x4c\x94\x32\xde\xe5\x4c\xa4\x39\x4c\x44\x7d\xd8\x13\xc0\xee\xde\x40\x10\x23\xfa\x4c\xec\xea\x8c\xbd\xab\x73\xac\xce\x03\xb2\xdb\x4d\x5d\x30\xab\x78\x2f\xd3\x59\xa6\x41\x61\x28\xf0\xb6\xbc\x11\xe1\xcd\x0e\x25\x56\x2b\x74\x23\xf6\x90\x68\xd1\x81\xcb\xb3\x23\x94\x28\xfc\x0c\x1c\x4c\x51\x9a\x84\x84\x08\x65\xa7\xdf\x40\x16\x1c\xd7\x39\xba\xae\xc8\x55\x89\xff\xac\x2d\xcd\xca\xbd\xfd\xf0\x06\xea\xd9\xf6\x8d\xf0\x15\x55\x88\x57\xba\x10\x68\x57\x9c\x9f\xd7\xe8\x67\xc6\x90\x21\x22\x20\x57\x09\x42\x1d\xc3\x25\x60\x19\x58\xec\x50\xaa\x8b\x5c\x25\xd9\xc7\xe1\x8a\xc0\xb1\x10\xe8\xac\x0b\xf9\xe7\xcc\xa8\x7a\x55\xc3\x46\xba\xb5\x9a\x10\x2b\x0f\x20\xf3\xc5\x03\x7a\x72\x3e\xbb\x48\xb8\xa8\xcf\x2b\x60\x42\x33\x45\x55\x1b\xde\x2c\x4e\xf7\x8e\x05\xa2\x72\x93\x6f\xa5\xed\x3b\x5f\xe3\x40\x31\xdc\xa9\x0a\xbd\xca\x98\x35\x51\xf2\xdb\x37\xd7\x9a\x08\xd7\x07\xa0\x65\xdf\xf3\x02\x18\xc6\xdd\x25\x8c\xb2\x57\x40\xd2\x40\xe3\x62\x2a\x47\xf4\xcb\x8c\x16\x13\xb2\x06\xd9\x49\xe8\xdd\x0c\xc7\xb7\x51\xe6\x5a\x03\xd2\x66\xec\x0f\xd3\x16\x6f\xc1\xb7\x9f\x8a\x03\x25\x36\xef\xe1\x2b\xad\x68\x10\x27\x7e\xc8\xec\xc6\x6d\x8c\xce\x4d\x17\xd6\x1c\xc3\x79\x9c\x6a\x2c\x44\x90\xda\x8d\xa2\xcb\xa8\xe5\x30\x87\x07\x50\xaf\x5c\xb9\xb5\x46\x74\x32\xa2\x0a\xdc\x4e\xf6\x5d\x58\x77\x3c\x2f\xf0\xa0\x8c\xbb\xb6\xe7\x85\x6e\xaa\x72\x6f\x89\xd7\x41\x77\x88\x83\x77\xfb\x68\x8f\x36\x69\x85\x48\xef\xe0\x8f\x1e\x6c\x27\x4d\x02\xb6\x28\x37\xb5\x70\x97\x2d\x21\xe2\xce\xdb\xc3\xc9\xf3\x96\xe7\x65\x3e\x17\xb7\x4a\x05\xe9\x8e\xfb\x68\xaa\x7d\x2e\x51\xa4\x5d\x50\x26\x66\x37\x1d\x3c\x17\x28\x0e\xe3\x44\x6b\xe4\x86\x10\x97\xac\xbe\x38\xf7\x75\x1c\x30\xf9\x80\xd2\x93\xa2\x0a\xd2\x2e\xf1\x05\x4d\xaa\x4d\xe8\x7d\x65\x3a\x24\x6f\xf6\x0a\xd3\x7d\xbd\xbd\xcf\x6e\x4b\x6c\x7c\xa1\xd6\x27\x3d\x9e\x38\x7a\x82\x1b\xaf\xfa\x94\xae\x09\x42\xe5\x27\x25\x3c\xba\xd3\x84\x37\x1a\xe6\xba\x20\xe1\xa3\x06\x67\xec\xc7\xe5\xa9\x42\x7c\x6e\x84\x4c\x27\x12\xd6\xb9\xb1\x57\xae\x9d\x3b\x57\xb5\x0d\x84\x9c\x48\x9a\x74\x69\x14\x65\x1d\x5f\x65\x3c\xc7\xfd\x3c\x03\x45\xaf\x7d\xf9\xb5\x6a\x43\xdf\x21\x39\xd6\x92\x72\xaa\x54\x57\x1d\xa8\x96\x1a\xe9\x32\x73\x99\x75\xda\x9f\x3b\x81\xeb\x71\x52\x90\x6d\x89\xe1\x2d\xf6\x97\x68\x87\x88\x58\xff\x5c\xfb\x8b\xc5\x66\xb9\x4e\x06\x04\xa6\x83\x64\x43\xa8\xe0\x2e\x04\x64\xa0\x97\x18\xd9\x60\x92\x1e\xd8\x2b\x4b\x7d\x2e\x75\x3a\x2b\xf0\x60\x1a\x07\xe3\x2c\x86\xf3\xc3\x1c\x09\x2d\x32\xfc\xa6\xd2\x8e\x6e\xeb\x1f\x6f\xee\x40\xb0\x18\x50\x92\x8e\xb5\x7b\xb3\x59\x8a\x48\x6c\xaa\x38\xdc\x19\x0f\x60\xc5\xef\xa6\xf8\xe3\x0a\x80\xcf\x5f\xbe\x5c\x45\x81\xe7\xa5\x95\x76\x38\x6b\xe9\x7c\x2e\xfd\xfd\x79\x34\xd2\x38\x57\xf5\x75\x6f\x53\x82\x33\xe5\xae\x35\x28\xb5\xc9\xcf\x41\x7e\x19\xcd\x86\x90\x79\x61\x06\x33\x1f\xee\xef\x91\x10\x2a\x28\x23\xad\xd5\x33\x87\xa7\x82\xc8\xcc\xb7\x2e\x7b\x18\x99\x96\xd5\xeb\xb5\x68\xfc\xc7\x5b\xd1\xa5\xa3\x41\xbe\x82\xb6\xd6\x79\xdf\x1f\xac\xae\xc7\x5e\xfd\xd2\xb3\x14\x00\x9d\x09\x4e\xd6\x20\xda\xc6\x17\xc3\xbc\xa5\xfa\x47\x6b\xb7\xc6\x76\xa9\x40\x45\xfa\x06\x33\x88\x06\xb8\x95\x5c\x88\xbc\x83\x49\x5a\x68\x72\x0c\xdc\xe9\x9f\x73\x56\x0a\x6a\x9a\xf7\x80\xcd\x4a\xed\x93\xd9\x1e\x90\x34\xa2\x3a\x61\x5f\x55\x90\x53\xd4\xd0\xa6\x16\x07\x44\xe6\xc7\xd7\x13\x03\x47\x43\xce\x3c\xc6\x5e\xda\x9b\x04\x1c\xa7\xc5\x0d\x8b\x33\xa5\x0b\x17\xad\x86\x50\x9b\x3e\x11\xd5\x25\x7b\x8d\xd9\x0b\xc8\xf2\x15\x79\xf7\x68\xbc\xf5\xb0\x78\x26\x16\xc7\x30\xef\xf9\x1b\x47\xa3\x2a\xfc\xd0\x20\x41\xe7\x14\x50\x8f\xf7\xba\x28\x40\x07\x9e\xa5\xc2\x8e\x82\x31\x7c\x7d\x63\x96\x1f\xe9\xf4\x39\xfa\x9e\x61\x18\x46\xab\x1f\x5a\xb5\x21\x19\x03\x0a\x8b\xf3\x78\x33\x00\x91\x46\x84\x71\xaf\xb3\x5c\xcf\x78\x36\x32\x5b\x49\x23\x48\x46\x48\x13\x7f\xd6\x14\xde\x1e\x4d\x90\xb8\x60\x8a\x0d\xa2\xe4\xe6\x07\xd3\x26\xf9\x7a\x22\xd2\xc4\xe9\x9f\xc3\xbb\x5d\x8c\xe7\x7b\x90\x1c\x8a\x97\x62\x02\x63\x26\x2c\x60\x56\x60\x0b\x95\x6d\x9a\x08\x17\xe4\x89\xd6\x17\x59\x0b\x0c\xd0\x4a\xbd\x2e\x21\x35\x47\x14\x5a\x52\xdc\x65\x38\xd1\x8a\x24\x3a\x02\x5a\x63\x1b\xa8\x3f\x4b\xf9\x47\x89\xbd\x4f\xf6\x1e\x97\x9b\x02\x8d\xde\x91\x21\xa0\x30\x45\xf4\x06\x44\x5e\xb0\xca\x78\x7b\x5a\x98\xfd\xa4\xe2\x17\x86\xec\x99\x0a\xd5\xe2\x6a\x09\x7b\x16\x1a\x5f\xa6\x0c\x23\xbd\xc0\x99\xe8\xa7\x3a\x04\xcc\x06\xdb\xf5\xa2\x40\x67\x38\xdd\xb1\x8e\xba\xf4\x6e\xde\xa7\xc9\xf7\x86\x88\x2e\x94\x65\xfa\x91\x1e\xd2\x17\x1b\x29\x1e\xa8\xe8\xce\x25\x93\x64\x59\x0d\x7b\xbd\x19\x1e\x9c\xa0\x73\x53\xa4\xe0\x74\x0b\x36\x0d\x32\x2f\xc0\xbc\x1e\x0d\xd5\x7d\x32\x9b\xd0\x64\xd2\x7b\x30\x36\x9a\xa9\x29\x99\x95\x31\x04\x16\x04\x09\xef\x40\x10\x32\x3b\x02\x2a\x11\xae\xb4\x10\xe5\xdb\xed\xa6\xd9\xef\x21\x7f\x31\x55\xd6\xf5\x78\x4f\xfb\xf3\xe0\x3c\xb4\x2e\xf6\xb9\xa9\x36\xb4\xa9\x36\x7d\xaf\xcc\xe7\x75\xaf\x8b\xfc\x35\x65\x51\x80\xca\xdf\x64\x8d\xc1\xba\xe5\xf6\xf6\x53\x79\xbc\xee\xac\xdf\x98\x46\x40\xfd\x45\x20\x51\x77\xe6\x3f\xb5\x38\xb2\x0c\xf3\x62\x44\x5c\x70\x86\x02\x25\xe8\x7c\x47\x13\x9a\xea\x65\x3b\xec\x0e\x14\x49\x86\x08\xd6\x1f\x32\xc3\xd4\x29\x5f\x66\x62\x5b\xe2\x3e\xea\x63\x64\x5b\x22\x1f\xee\x42\xc4\x78\x0b\x89\xf7\x31\x61\xcf\xf3\x62\x3a\x3f\xc6\x5b\xad\xa1\x4d\xf9\x06\x1e\xb6\xf9\x60\x58\xa8\xaf\x08\x79\xf7\xdd\x14\x6f\x4d\x88\xc0\x8d\x8b\xa8\x5d\x8d\xae\x3e\x99\x09\x7d\x75\x32\xf4\x2a\x62\x80\x02\xe6\x5b\x06\xd0\x39\x5e\x50\x74\x0f\xc7\x7e\x37\xf5\x56\x1f\xf5\x3f\xbf\xff\xea\x10\xe6\xa8\xf3\x3b\x8b\xc2\xb1\xa0\x7b\x6c\xc9\x30\x2c\x87\xcb\x16\xc7\xbb\x59\x10\xc0\xb4\x69\xc7\x42\xd5\xc2\xba\x3f\xcd\xb1\xde\xb8\xb7\x5e\x4a\x02\x0c\xb1\x8c\x78\x15\x79\xd0\xe1\x9f\x56\x7d\x21\x94\xdd\xe2\xc6\x17\xf4\x36\x1a\xc7\xdd\x04\x75\x8d\xf4\x25\x01\x88\xde\x30\xa3\xd0\x39\x9f\x4b\x73\x5c\xe4\x8b\xa2\x35\x89\x6f\xbd\x09\xcf\x4f\xf1\x85\x71\xd1\xc1\xb2\xed\x76\xb3\xdd\x4e\x82\xd5\x47\x82\x35\x4c\x84\x3f\x4c\xdf\xef\xfa\xd6\xc6\x7e\x3e\x82\xb7\x80\x11\x65\x76\x74\xe0\x91\xf9\xf5\xc9\x4d\xa1\xf9\x40\xb1\x8e\xe0\x2b\xa2\x3b\x1f\x44\x73\x38\x44\xaf\x42\x89\x7d\x95\xe5\xf8\xc2\xb0\x5d\xad\x27\xcd\xe1\x32\x00\xcc\x7f\x94\xd8\xe2\xbc\x8b\x08\x7b\xc0\x01\xa7\xb5\x15\x90\x9d\x89\x7d\x11\xae\x36\xf9\x51\xf0\xbb\xd6\xa4\xc7\xeb\xd8\xe7\xb7\x04\x83\x56\x93\x31\xb6\xaa\x90\x6f\x0a\x54\x6f\x6a\xd0\x80\x22\x7d\x68\x10\x3c\x4e\x5c\x7f\xa8\x43\x7d\x52\xd4\xbb\x3f\x76\x57\xe0\xe6\x8f\x74\xa4\xeb\xe1\x79\x11\x14\xe9\xbb\x4c\xc5\x11\x18\x46\xe5\x55\xfe\x08\x57\x2b\x8d\x84\x83\xba\xc7\x32\xb2\x57\xba\xaf\xf2\x67\xdf\x9e\x52\xcf\xb3\xdf\xcd\x71\x93\x7c\x92\x8d\x65\x66\x4c\xe8\xbb\x2e\x7e\xb3\xdd\xac\x67\xe2\xb7\x7f\x9d\x0f\x75\x32\x8c\xc4\x49\x2d\xd7\x7b\x7d\x24\x3e\x04\xfd\x6a\x07\xe3\x4e\x49\xf9\x59\x41\x95\x04\xc6\x55\x7d\x9d\x2b\xcf\xba\x6e\x27\xf5\xfc\x94\x7b\xdd\x8f\xc3\x5b\xdb\x76\x9a\x63\xa1\x26\x82\x99\xb0\x5b\x4f\xcc\x9b\x3f\x17\x40\x65\x1b\xda\xde\x2e\xa4\xff\x09\x57\x68\x73\xbe\x8f\x0b\xeb\x89\xaf\xf9\x55\x0d\xe9\xce\xc7\xe5\x33\xf3\x8d\x71\xfc\x77\x0c\xee\xce\xf2\xbc\x3c\x68\x71\xc1\xea\xa2\xef\x80\x56\xae\xf7\x0d\xe0\xe0\xc0\x1b\x5e\xed\xc6\x49\x03\x1f\xf9\x51\x17\x06\x66\x5e\x7c\xab\xcf\x2c\x24\xd9\x17\x5c\x47\x13\x60\xcb\xaf\xcd\x73\xf9\x77\x49\x81\x83\xa2\x0c\xbe\xeb\x79\x96\xfb\x6d\xfb\xcf\xdd\x2a\x1e\x9b\x05\x1e\x9c\xcd\x2f\x7c\x85\x14\x96\x3d\xa1\x58\xcf\x44\x0b\x12\x1a\x6f\xfe\x3a\xa2\x26\x86\xc8\x4f\x98\x28\x37\xb5\x3f\xeb\xa2\xce\x56\x07\xdf\xe9\x22\x3f\x26\x90\xfa\xbc\xe5\x2b\xd6\xcb\xed\xa4\x4e\xcc\x28\x6e\xeb\xa2\xd8\xe3\xa4\xb9\xff\x73\x83\xe2\xbc\x2c\xd7\x61\xe2\xe0\x8b\x7e\xab\xdf\xd7\x56\x85\x8d\x56\x86\xb3\x10\x11\xde\xd6\x8a\x40\x3d\xea\xae\xbc\xd0\x58\x25\x94\x10\xf9\xb0\xc4\xfe\x34\x4b\xab\xc4\x50\x65\xd2\x60\x27\xf3\xbf\xe9\xe4\x14\x78\x4b\x02\x6c\x60\x34\x06\xc5\x8a\x2b\xfd\x30\xbf\xa4\xd7\xc7\x7a\x6e\x8e\xf9\x4b\x7a\xb7\x9c\x96\x88\xcc\xaf\xe5\x38\x26\x0f\x83\x33\xfd\x4b\x31\xe3\x63\x5d\x6b\xca\xfe\x9a\xf5\x61\xf1\xaa\x60\xcf\xbf\x14\xc3\x59\x51\xb8\xce\x5f\x4d\xb0\x8c\xd8\xf3\x94\xf8\x7f\x6d\xa6\x7f\xdb\xfc\xdb\xe6\xff\xc3\xe6\x01\x15\x0b\x50\x75\x81\xb1\x89\x3d\xd2\xfe\xe4\xda\xae\x67\x58\x0a\xfe\x0a\x25\xe9\x3f\x7e\xfb\xb7\x3f\xfe\xf1\xcf\x7f\xfc\x7b\xb2\xe4\x71\x96\x2e\x7b\x9f\xfc\x67\x57\x03\x5d\xfd\x7b\x92\x17\xe3\x92\xff\x77\x3a\x0e\x5b\x3e\x6c\xbf\xff\x06\xfe\x17\x04\xc5\xd0\x6f\x7f\x4c\x71\x96\xd5\x43\xf9\x3b\xf4\x2f\xf8\x74\xfd\x91\x8e\xdd\xb8\xfc\xfe\xaf\x69\x9a\xfe\xf1\xcf\xff\x09\x00\x00\xff\xff\xaf\xa5\x1a\xb1\x3d\x07\x02\x00") func pagesAssetsStylesContainersCssBytes() ([]byte, error) { return bindataRead( diff --git a/vendor/github.com/google/cadvisor/pages/templates.go b/vendor/github.com/google/cadvisor/pages/templates.go index 45f1c184002..b4821d4ae44 100644 --- a/vendor/github.com/google/cadvisor/pages/templates.go +++ b/vendor/github.com/google/cadvisor/pages/templates.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/BUILD index 78d32f02804..aafb2ae03ed 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "console.go", "container.go", "error.go", "factory.go", @@ -12,13 +11,8 @@ go_library( "stats.go", "sync.go", ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "console_freebsd.go", - "stats_freebsd.go", - ], "@io_bazel_rules_go//go/platform:linux": [ "capabilities_linux.go", - "compat_1.5_linux.go", "console_linux.go", "container_linux.go", "criu_opts_linux.go", @@ -30,23 +24,11 @@ go_library( "process_linux.go", "restored_process.go", "rootfs_linux.go", - "setgroups_linux.go", "setns_init_linux.go", "standard_init_linux.go", "state_linux.go", "stats_linux.go", ], - "@io_bazel_rules_go//go/platform:solaris": [ - "console_solaris.go", - "container_solaris.go", - "stats_solaris.go", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "console_windows.go", - "container_windows.go", - "criu_opts_windows.go", - "stats_windows.go", - ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer", @@ -57,18 +39,19 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/utils:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/docker/pkg/mount:go_default_library", - "//vendor/github.com/docker/docker/pkg/symlink:go_default_library", + "//vendor/github.com/containerd/console:go_default_library", + "//vendor/github.com/cyphar/filepath-securejoin:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/mrunalp/fileutils:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/apparmor:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs/validate:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/criurpc:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/keys:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/mount:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/seccomp:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/system:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", @@ -98,7 +81,9 @@ filegroup( "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/configs:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/criurpc:all-srcs", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/keys:all-srcs", + "//vendor/github.com/opencontainers/runc/libcontainer/mount:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/seccomp:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/stacktrace:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/system:all-srcs", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md index e5894c6429d..4363b6f9f4e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md +++ b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md @@ -154,6 +154,90 @@ that no processes or threads escape the cgroups. This sync is done via a pipe ( specified in the runtime section below ) that the container's init process will block waiting for the parent to finish setup. +### IntelRdt + +Intel platforms with new Xeon CPU support Intel Resource Director Technology +(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which +currently supports L3 cache resource allocation. + +This feature provides a way for the software to restrict cache allocation to a +defined 'subset' of L3 cache which may be overlapping with other 'subsets'. +The different subsets are identified by class of service (CLOS) and each CLOS +has a capacity bitmask (CBM). + +It can be used to handle L3 cache resource allocation for containers if +hardware and kernel support Intel RDT/CAT. + +In Linux 4.10 kernel or newer, the interface is defined and exposed via +"resource control" filesystem, which is a "cgroup-like" interface. + +Comparing with cgroups, it has similar process management lifecycle and +interfaces in a container. But unlike cgroups' hierarchy, it has single level +filesystem layout. + +Intel RDT "resource control" filesystem hierarchy: +``` +mount -t resctrl resctrl /sys/fs/resctrl +tree /sys/fs/resctrl +/sys/fs/resctrl/ +|-- info +| |-- L3 +| |-- cbm_mask +| |-- min_cbm_bits +| |-- num_closids +|-- cpus +|-- schemata +|-- tasks +|-- + |-- cpus + |-- schemata + |-- tasks + +``` + +For runc, we can make use of `tasks` and `schemata` configuration for L3 cache +resource constraints. + +The file `tasks` has a list of tasks that belongs to this group (e.g., +" group). Tasks can be added to a group by writing the task ID +to the "tasks" file (which will automatically remove them from the previous +group to which they belonged). New tasks created by fork(2) and clone(2) are +added to the same group as their parent. If a pid is not in any sub group, it +is in root group. + +The file `schemata` has allocation masks/values for L3 cache on each socket, +which contains L3 cache id and capacity bitmask (CBM). +``` + Format: "L3:=;=;..." +``` +For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0` +Which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + +The valid L3 cache CBM is a *contiguous bits set* and number of bits that can +be set is less than the max bit. The max bits in the CBM is varied among +supported Intel Xeon platforms. In Intel RDT "resource control" filesystem +layout, the CBM in a group should be a subset of the CBM in root. Kernel will +check if it is valid when writing. e.g., 0xfffff in root indicates the max bits +of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM +values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + +For more information about Intel RDT/CAT kernel interface: +https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt + +An example for runc: +``` +Consider a two-socket machine with two L3 caches where the default CBM is +0xfffff and the max CBM length is 20 bits. With this configuration, tasks +inside the container only have access to the "upper" 80% of L3 cache id 0 and +the "lower" 50% L3 cache id 1: + +"linux": { + "intelRdt": { + "l3CacheSchema": "L3:0=ffff0;1=3ff" + } +} +``` + ### Security The standard set of Linux capabilities that are set in a container @@ -306,7 +390,7 @@ a container. | Exec | Execute a new process inside of the container ( requires setns ) | | Set | Setup configs of the container after it's created | -### Execute a new process inside of a running container. +### Execute a new process inside of a running container User can execute a new process inside of a running container. Any binaries to be executed must be accessible within the container's rootfs. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD index db8484c7c8c..5e06bf7d2e5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/BUILD @@ -38,7 +38,6 @@ go_library( ], "//conditions:default": [], }), - cgo = True, importpath = "github.com/opencontainers/runc/libcontainer/apparmor", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go index 82ed1a68a69..7fff0627fa1 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go @@ -2,15 +2,10 @@ package apparmor -// #cgo LDFLAGS: -lapparmor -// #include -// #include -import "C" import ( "fmt" "io/ioutil" "os" - "unsafe" ) // IsEnabled returns true if apparmor is enabled for the host. @@ -24,16 +19,36 @@ func IsEnabled() bool { return false } +func setprocattr(attr, value string) error { + // Under AppArmor you can only change your own attr, so use /proc/self/ + // instead of /proc// like libapparmor does + path := fmt.Sprintf("/proc/self/attr/%s", attr) + + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + + _, err = fmt.Fprintf(f, "%s", value) + return err +} + +// changeOnExec reimplements aa_change_onexec from libapparmor in Go +func changeOnExec(name string) error { + value := "exec " + name + if err := setprocattr("exec", value); err != nil { + return fmt.Errorf("apparmor failed to apply profile: %s", err) + } + return nil +} + // ApplyProfile will apply the profile with the specified name to the process after // the next exec. func ApplyProfile(name string) error { if name == "" { return nil } - cName := C.CString(name) - defer C.free(unsafe.Pointer(cName)) - if _, err := C.aa_change_onexec(cName); err != nil { - return fmt.Errorf("apparmor failed to apply profile: %s", err) - } - return nil + + return changeOnExec(name) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD index 95dd26783e7..b6fdcc6e0d3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/BUILD @@ -63,7 +63,6 @@ filegroup( srcs = [ ":package-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:all-srcs", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless:all-srcs", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index 22d82acb4e2..43bdccf3e9d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -145,8 +145,17 @@ func (m *Manager) Apply(pid int) (err error) { m.Paths[sys.Name()] = p if err := sys.Apply(d); err != nil { + if os.IsPermission(err) && m.Cgroups.Path == "" { + // If we didn't set a cgroup path, then let's defer the error here + // until we know whether we have set limits or not. + // If we hadn't set limits, then it's ok that we couldn't join this cgroup, because + // it will have the same limits as its parent. + delete(m.Paths, sys.Name()) + continue + } return err } + } return nil } @@ -198,6 +207,10 @@ func (m *Manager) Set(container *configs.Config) error { for _, sys := range subsystems { path := paths[sys.Name()] if err := sys.Set(path, container.Cgroups); err != nil { + if path == "" { + // cgroup never applied + return fmt.Errorf("cannot set limits on the %s cgroup, as the container has not joined it", sys.Name()) + } return err } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go index e70dfe3b950..4b19f8a970d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go @@ -29,11 +29,15 @@ func (s *FreezerGroup) Apply(d *cgroupData) error { func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { switch cgroup.Resources.Freezer { case configs.Frozen, configs.Thawed: - if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { - return err - } - for { + // In case this loop does not exit because it doesn't get the expected + // state, let's write again this state, hoping it's going to be properly + // set this time. Otherwise, this loop could run infinitely, waiting for + // a state change that would never happen. + if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { + return err + } + state, err := readFile(path, "freezer.state") if err != nil { return err @@ -41,6 +45,7 @@ func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) { break } + time.Sleep(1 * time.Millisecond) } case configs.Undefined: diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go deleted file mode 100644 index b1efbfd9997..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build linux - -package rootless - -import ( - "fmt" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/configs/validate" -) - -// TODO: This is copied from libcontainer/cgroups/fs, which duplicates this code -// needlessly. We should probably export this list. - -var subsystems = []subsystem{ - &fs.CpusetGroup{}, - &fs.DevicesGroup{}, - &fs.MemoryGroup{}, - &fs.CpuGroup{}, - &fs.CpuacctGroup{}, - &fs.PidsGroup{}, - &fs.BlkioGroup{}, - &fs.HugetlbGroup{}, - &fs.NetClsGroup{}, - &fs.NetPrioGroup{}, - &fs.PerfEventGroup{}, - &fs.FreezerGroup{}, - &fs.NameGroup{GroupName: "name=systemd"}, -} - -type subsystem interface { - // Name returns the name of the subsystem. - Name() string - - // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. - GetStats(path string, stats *cgroups.Stats) error -} - -// The noop cgroup manager is used for rootless containers, because we currently -// cannot manage cgroups if we are in a rootless setup. This manager is chosen -// by factory if we are in rootless mode. We error out if any cgroup options are -// set in the config -- this may change in the future with upcoming kernel features -// like the cgroup namespace. - -type Manager struct { - Cgroups *configs.Cgroup - Paths map[string]string -} - -func (m *Manager) Apply(pid int) error { - // If there are no cgroup settings, there's nothing to do. - if m.Cgroups == nil { - return nil - } - - // We can't set paths. - // TODO(cyphar): Implement the case where the runner of a rootless container - // owns their own cgroup, which would allow us to set up a - // cgroup for each path. - if m.Cgroups.Paths != nil { - return fmt.Errorf("cannot change cgroup path in rootless container") - } - - // We load the paths into the manager. - paths := make(map[string]string) - for _, sys := range subsystems { - name := sys.Name() - - path, err := cgroups.GetOwnCgroupPath(name) - if err != nil { - // Ignore paths we couldn't resolve. - continue - } - - paths[name] = path - } - - m.Paths = paths - return nil -} - -func (m *Manager) GetPaths() map[string]string { - return m.Paths -} - -func (m *Manager) Set(container *configs.Config) error { - // We have to re-do the validation here, since someone might decide to - // update a rootless container. - return validate.New().Validate(container) -} - -func (m *Manager) GetPids() ([]int, error) { - dir, err := cgroups.GetOwnCgroupPath("devices") - if err != nil { - return nil, err - } - return cgroups.GetPids(dir) -} - -func (m *Manager) GetAllPids() ([]int, error) { - dir, err := cgroups.GetOwnCgroupPath("devices") - if err != nil { - return nil, err - } - return cgroups.GetAllPids(dir) -} - -func (m *Manager) GetStats() (*cgroups.Stats, error) { - // TODO(cyphar): We can make this work if we figure out a way to allow usage - // of cgroups with a rootless container. While this doesn't - // actually require write access to a cgroup directory, the - // statistics are not useful if they can be affected by - // non-container processes. - return nil, fmt.Errorf("cannot get cgroup stats in rootless container") -} - -func (m *Manager) Freeze(state configs.FreezerState) error { - // TODO(cyphar): We can make this work if we figure out a way to allow usage - // of cgroups with a rootless container. - return fmt.Errorf("cannot use freezer cgroup in rootless container") -} - -func (m *Manager) Destroy() error { - // We don't have to do anything here because we didn't do any setup. - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go index 7de9ae6050b..a65d8e4432d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux static_build package systemd @@ -43,7 +43,7 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { } func (m *Manager) Set(container *configs.Config) error { - return nil, fmt.Errorf("Systemd not supported") + return fmt.Errorf("Systemd not supported") } func (m *Manager) Freeze(state configs.FreezerState) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go index de89ccbedaa..45bd3acce71 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,!static_build package systemd @@ -271,6 +271,13 @@ func (m *Manager) Apply(pid int) error { // cpu.cfs_quota_us and cpu.cfs_period_us are controlled by systemd. if c.Resources.CpuQuota != 0 && c.Resources.CpuPeriod != 0 { cpuQuotaPerSecUSec := uint64(c.Resources.CpuQuota*1000000) / c.Resources.CpuPeriod + // systemd converts CPUQuotaPerSecUSec (microseconds per CPU second) to CPUQuota + // (integer percentage of CPU) internally. This means that if a fractional percent of + // CPU is indicated by Resources.CpuQuota, we need to round up to the nearest + // 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect. + if cpuQuotaPerSecUSec%10000 != 0 { + cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 + } properties = append(properties, newProp("CPUQuotaPerSecUSec", cpuQuotaPerSecUSec)) } @@ -288,10 +295,13 @@ func (m *Manager) Apply(pid int) error { } } - if _, err := theConn.StartTransientUnit(unitName, "replace", properties, nil); err != nil && !isUnitExists(err) { + statusChan := make(chan string) + if _, err := theConn.StartTransientUnit(unitName, "replace", properties, statusChan); err != nil && !isUnitExists(err) { return err } + <-statusChan + if err := joinCgroups(c, pid); err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go deleted file mode 100644 index c7bdf1f60a0..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux,!go1.5 - -package libcontainer - -import "syscall" - -// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building -// with earlier versions -func enableSetgroups(sys *syscall.SysProcAttr) { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD index 01bd8e8c03b..67df01f41ca 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/BUILD @@ -7,28 +7,25 @@ go_library( "config.go", "device.go", "hugepage_limit.go", + "intelrdt.go", "interface_priority_map.go", "mount.go", "namespaces.go", "network.go", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:darwin": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "device_defaults.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], @@ -40,27 +37,22 @@ go_library( "namespaces_syscall.go", ], "@io_bazel_rules_go//go/platform:nacl": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:plan9": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], "@io_bazel_rules_go//go/platform:solaris": [ - "cgroup_unsupported.go", "namespaces_syscall_unsupported.go", "namespaces_unsupported.go", ], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go deleted file mode 100644 index 95e2830a436..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows,!linux,!freebsd - -package configs - -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 269fffff357..3cae4fd8d96 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -187,6 +187,10 @@ type Config struct { // Rootless specifies whether the container is a rootless container. Rootless bool `json:"rootless"` + + // IntelRdt specifies settings for Intel RDT/CAT group that the container is placed into + // to limit the resources (e.g., L3 cache) the container has available + IntelRdt *IntelRdt `json:"intel_rdt,omitempty"` } type Hooks struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go index 4d348d217ec..e4f423c523f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go new file mode 100644 index 00000000000..36bd5f96a11 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go @@ -0,0 +1,7 @@ +package configs + +type IntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3_cache_schema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD index 282deb77d09..f564a88af5b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/BUILD @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/github.com/opencontainers/runc/libcontainer/intelrdt:go_default_library", "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", ], ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go index 0cebfaf801a..7a9f33b7114 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go @@ -21,13 +21,6 @@ func (v *ConfigValidator) rootless(config *configs.Config) error { if err := rootlessMount(config); err != nil { return err } - // Currently, cgroups cannot effectively be used in rootless containers. - // The new cgroup namespace doesn't really help us either because it doesn't - // have nice interactions with the user namespace (we're working with upstream - // to fix this). - if err := rootlessCgroup(config); err != nil { - return err - } // XXX: We currently can't verify the user config at all, because // configs.Config doesn't store the user-related configs. So this @@ -36,37 +29,27 @@ func (v *ConfigValidator) rootless(config *configs.Config) error { return nil } -func rootlessMappings(config *configs.Config) error { - rootuid, err := config.HostRootUID() - if err != nil { - return fmt.Errorf("failed to get root uid from uidMappings: %v", err) +func hasIDMapping(id int, mappings []configs.IDMap) bool { + for _, m := range mappings { + if id >= m.ContainerID && id < m.ContainerID+m.Size { + return true + } } + return false +} + +func rootlessMappings(config *configs.Config) error { if euid := geteuid(); euid != 0 { if !config.Namespaces.Contains(configs.NEWUSER) { return fmt.Errorf("rootless containers require user namespaces") } - if rootuid != euid { - return fmt.Errorf("rootless containers cannot map container root to a different host user") - } } - rootgid, err := config.HostRootGID() - if err != nil { - return fmt.Errorf("failed to get root gid from gidMappings: %v", err) + if len(config.UidMappings) == 0 { + return fmt.Errorf("rootless containers requires at least one UID mapping") } - - // Similar to the above test, we need to make sure that we aren't trying to - // map to a group ID that we don't have the right to be. - if rootgid != getegid() { - return fmt.Errorf("rootless containers cannot map container root to a different host group") - } - - // We can only map one user and group inside a container (our own). - if len(config.UidMappings) != 1 || config.UidMappings[0].Size != 1 { - return fmt.Errorf("rootless containers cannot map more than one user") - } - if len(config.GidMappings) != 1 || config.GidMappings[0].Size != 1 { - return fmt.Errorf("rootless containers cannot map more than one group") + if len(config.GidMappings) == 0 { + return fmt.Errorf("rootless containers requires at least one UID mapping") } return nil @@ -104,11 +87,28 @@ func rootlessMount(config *configs.Config) error { // Check that the options list doesn't contain any uid= or gid= entries // that don't resolve to root. for _, opt := range strings.Split(mount.Data, ",") { - if strings.HasPrefix(opt, "uid=") && opt != "uid=0" { - return fmt.Errorf("cannot specify uid= mount options in rootless containers where argument isn't 0") + if strings.HasPrefix(opt, "uid=") { + var uid int + n, err := fmt.Sscanf(opt, "uid=%d", &uid) + if n != 1 || err != nil { + // Ignore unknown mount options. + continue + } + if !hasIDMapping(uid, config.UidMappings) { + return fmt.Errorf("cannot specify uid= mount options for unmapped uid in rootless containers") + } } - if strings.HasPrefix(opt, "gid=") && opt != "gid=0" { - return fmt.Errorf("cannot specify gid= mount options in rootless containers where argument isn't 0") + + if strings.HasPrefix(opt, "gid=") { + var gid int + n, err := fmt.Sscanf(opt, "gid=%d", &gid) + if n != 1 || err != nil { + // Ignore unknown mount options. + continue + } + if !hasIDMapping(gid, config.GidMappings) { + return fmt.Errorf("cannot specify gid= mount options for unmapped gid in rootless containers") + } } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go index 8284345442c..cbbba9a03a2 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/intelrdt" selinux "github.com/opencontainers/selinux/go-selinux" ) @@ -40,6 +41,9 @@ func (v *ConfigValidator) Validate(config *configs.Config) error { if err := v.sysctl(config); err != nil { return err } + if err := v.intelrdt(config); err != nil { + return err + } if config.Rootless { if err := v.rootless(config); err != nil { return err @@ -153,6 +157,19 @@ func (v *ConfigValidator) sysctl(config *configs.Config) error { return nil } +func (v *ConfigValidator) intelrdt(config *configs.Config) error { + if config.IntelRdt != nil { + if !intelrdt.IsEnabled() { + return fmt.Errorf("intelRdt is specified in config, but Intel RDT feature is not supported or enabled") + } + if config.IntelRdt.L3CacheSchema == "" { + return fmt.Errorf("intelRdt is specified in config, but intelRdt.l3CacheSchema is empty") + } + } + + return nil +} + func isSymbolicLink(path string) (bool, error) { fi, err := os.Lstat(path) if err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go deleted file mode 100644 index 917acc702f3..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console.go +++ /dev/null @@ -1,17 +0,0 @@ -package libcontainer - -import ( - "io" - "os" -) - -// Console represents a pseudo TTY. -type Console interface { - io.ReadWriteCloser - - // Path returns the filesystem path to the slave side of the pty. - Path() string - - // Fd returns the fd for the master of the pty. - File() *os.File -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go deleted file mode 100644 index b7166a31f06..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build freebsd - -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on FreeBSD") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go index f70de384812..9997e93ed4f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go @@ -1,71 +1,14 @@ package libcontainer import ( - "fmt" "os" - "unsafe" "golang.org/x/sys/unix" ) -func ConsoleFromFile(f *os.File) Console { - return &linuxConsole{ - master: f, - } -} - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - console, err := ptsname(master) - if err != nil { - return nil, err - } - if err := unlockpt(master); err != nil { - return nil, err - } - return &linuxConsole{ - slavePath: console, - master: master, - }, nil -} - -// linuxConsole is a linux pseudo TTY for use within a container. -type linuxConsole struct { - master *os.File - slavePath string -} - -func (c *linuxConsole) File() *os.File { - return c.master -} - -func (c *linuxConsole) Path() string { - return c.slavePath -} - -func (c *linuxConsole) Read(b []byte) (int, error) { - return c.master.Read(b) -} - -func (c *linuxConsole) Write(b []byte) (int, error) { - return c.master.Write(b) -} - -func (c *linuxConsole) Close() error { - if m := c.master; m != nil { - return m.Close() - } - return nil -} - // mount initializes the console inside the rootfs mounting with the specified mount label // and applying the correct ownership of the console. -func (c *linuxConsole) mount() error { +func mountConsole(slavePath string) error { oldMask := unix.Umask(0000) defer unix.Umask(oldMask) f, err := os.Create("/dev/console") @@ -75,17 +18,20 @@ func (c *linuxConsole) mount() error { if f != nil { f.Close() } - return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "") + return unix.Mount(slavePath, "/dev/console", "bind", unix.MS_BIND, "") } // dupStdio opens the slavePath for the console and dups the fds to the current // processes stdio, fd 0,1,2. -func (c *linuxConsole) dupStdio() error { - slave, err := c.open(unix.O_RDWR) +func dupStdio(slavePath string) error { + fd, err := unix.Open(slavePath, unix.O_RDWR, 0) if err != nil { - return err + return &os.PathError{ + Op: "open", + Path: slavePath, + Err: err, + } } - fd := int(slave.Fd()) for _, i := range []int{0, 1, 2} { if err := unix.Dup3(fd, i, 0); err != nil { return err @@ -93,60 +39,3 @@ func (c *linuxConsole) dupStdio() error { } return nil } - -// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. -func (c *linuxConsole) open(flag int) (*os.File, error) { - r, e := unix.Open(c.slavePath, flag, 0) - if e != nil { - return nil, &os.PathError{ - Op: "open", - Path: c.slavePath, - Err: e, - } - } - return os.NewFile(uintptr(r), c.slavePath), nil -} - -func ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} - -// SaneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func SaneTerminal(terminal *os.File) error { - termios, err := unix.IoctlGetTermios(int(terminal.Fd()), unix.TCGETS) - if err != nil { - return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error()) - } - - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - - if err := unix.IoctlSetTermios(int(terminal.Fd()), unix.TCSETS, termios); err != nil { - return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error()) - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go deleted file mode 100644 index e5ca54599c2..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on Solaris") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go deleted file mode 100644 index c61e866a5d5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainer - -// newConsole returns an initialized console that can be used within a container -func newConsole() (Console, error) { - return &windowsConsole{}, nil -} - -// windowsConsole is a Windows pseudo TTY for use within a container. -type windowsConsole struct { -} - -func (c *windowsConsole) Fd() uintptr { - return 0 -} - -func (c *windowsConsole) Path() string { - return "" -} - -func (c *windowsConsole) Read(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Write(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Close() error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 9e1b74d77af..1ac74b1bf82 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -21,6 +21,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/criurpc" + "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" @@ -38,10 +39,14 @@ type linuxContainer struct { root string config *configs.Config cgroupManager cgroups.Manager + intelRdtManager intelrdt.Manager + initPath string initArgs []string initProcess parentProcess initProcessStartTime uint64 criuPath string + newuidmapPath string + newgidmapPath string m sync.Mutex criuVersion int state containerState @@ -67,6 +72,9 @@ type State struct { // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore ExternalDescriptors []string `json:"external_descriptors,omitempty"` + + // Intel RDT "resource control" filesystem path + IntelRdtPath string `json:"intel_rdt_path"` } // Container is a libcontainer container object. @@ -163,6 +171,11 @@ func (c *linuxContainer) Stats() (*Stats, error) { if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { return stats, newSystemErrorWithCause(err, "getting container stats from cgroups") } + if c.intelRdtManager != nil { + if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil { + return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats") + } + } for _, iface := range c.config.Networks { switch iface.Type { case "veth": @@ -193,6 +206,15 @@ func (c *linuxContainer) Set(config configs.Config) error { } return err } + if c.intelRdtManager != nil { + if err := c.intelRdtManager.Set(&config); err != nil { + // Set configs back + if err2 := c.intelRdtManager.Set(c.config); err2 != nil { + logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2) + } + return err + } + } // After config setting succeed, update config and states c.config = &config _, err = c.updateState(nil) @@ -268,7 +290,7 @@ func (c *linuxContainer) start(process *Process, isInit bool) error { } if err := parent.start(); err != nil { // terminate the process to ensure that it properly is reaped. - if err := parent.terminate(); err != nil { + if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCause(err, "starting container process") @@ -294,7 +316,7 @@ func (c *linuxContainer) start(process *Process, isInit bool) error { } for i, hook := range c.config.Hooks.Poststart { if err := hook.Run(s); err != nil { - if err := parent.terminate(); err != nil { + if err := ignoreTerminateErrors(parent.terminate()); err != nil { logrus.Warn(err) } return newSystemErrorWithCausef(err, "running poststart hook %d", i) @@ -392,7 +414,8 @@ func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProces } func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { - cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...) + cmd := exec.Command(c.initPath, c.initArgs[1:]...) + cmd.Args[0] = c.initArgs[0] cmd.Stdin = p.Stdin cmd.Stdout = p.Stdout cmd.Stderr = p.Stderr @@ -434,15 +457,16 @@ func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, c return nil, err } return &initProcess{ - cmd: cmd, - childPipe: childPipe, - parentPipe: parentPipe, - manager: c.cgroupManager, - config: c.newInitConfig(p), - container: c, - process: p, - bootstrapData: data, - sharePidns: sharePidns, + cmd: cmd, + childPipe: childPipe, + parentPipe: parentPipe, + manager: c.cgroupManager, + intelRdtManager: c.intelRdtManager, + config: c.newInitConfig(p), + container: c, + process: p, + bootstrapData: data, + sharePidns: sharePidns, }, nil } @@ -461,6 +485,7 @@ func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, return &setnsProcess{ cmd: cmd, cgroupPaths: c.cgroupManager.GetPaths(), + intelRdtPath: state.IntelRdtPath, childPipe: childPipe, parentPipe: parentPipe, config: c.newInitConfig(p), @@ -499,6 +524,8 @@ func (c *linuxContainer) newInitConfig(process *Process) *initConfig { cfg.Rlimits = process.Rlimits } cfg.CreateConsole = process.ConsoleSocket != nil + cfg.ConsoleWidth = process.ConsoleWidth + cfg.ConsoleHeight = process.ConsoleHeight return cfg } @@ -600,9 +627,24 @@ func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc. logrus.Debugf("Feature check says: %s", criuFeatures) missingFeatures := false - if *criuFeat.MemTrack && !*criuFeatures.MemTrack { - missingFeatures = true - logrus.Debugf("CRIU does not support MemTrack") + // The outer if checks if the fields actually exist + if (criuFeat.MemTrack != nil) && + (criuFeatures.MemTrack != nil) { + // The inner if checks if they are set to true + if *criuFeat.MemTrack && !*criuFeatures.MemTrack { + missingFeatures = true + logrus.Debugf("CRIU does not support MemTrack") + } + } + + // This needs to be repeated for every new feature check. + // Is there a way to put this in a function. Reflection? + if (criuFeat.LazyPages != nil) && + (criuFeatures.LazyPages != nil) { + if *criuFeat.LazyPages && !*criuFeatures.LazyPages { + missingFeatures = true + logrus.Debugf("CRIU does not support LazyPages") + } } if missingFeatures { @@ -632,9 +674,9 @@ func parseCriuVersion(path string) (int, error) { return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path) } - n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 + n, err := fmt.Sscanf(version, "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 if err != nil { - n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6 + n, err = fmt.Sscanf(version, "GitID: v%d.%d", &x, &y) // 1.6 y++ } else { z++ @@ -758,6 +800,25 @@ func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error { } req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) } + return nil +} + +func waitForCriuLazyServer(r *os.File, status string) error { + + data := make([]byte, 1) + _, err := r.Read(data) + if err != nil { + return err + } + fd, err := os.OpenFile(status, os.O_TRUNC|os.O_WRONLY, os.ModeAppend) + if err != nil { + return err + } + _, err = fd.Write(data) + if err != nil { + return err + } + fd.Close() return nil } @@ -825,6 +886,7 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), + LazyPages: proto.Bool(criuOpts.LazyPages), } fcg := c.cgroupManager.GetPaths()["freezer"] @@ -875,6 +937,24 @@ func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { Opts: &rpcOpts, } + if criuOpts.LazyPages { + // lazy migration requested; check if criu supports it + feat := criurpc.CriuFeatures{ + LazyPages: proto.Bool(true), + } + + if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { + return err + } + + statusRead, statusWrite, err := os.Pipe() + if err != nil { + return err + } + rpcOpts.StatusFd = proto.Int32(int32(statusWrite.Fd())) + go waitForCriuLazyServer(statusRead, criuOpts.StatusFd) + } + //no need to dump these information in pre-dump if !criuOpts.PreDump { for _, m := range c.config.Mounts { @@ -1027,6 +1107,7 @@ func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { EmptyNs: proto.Uint32(criuOpts.EmptyNs), OrphanPtsMaster: proto.Bool(true), AutoDedup: proto.Bool(criuOpts.AutoDedup), + LazyPages: proto.Bool(criuOpts.LazyPages), }, } @@ -1404,7 +1485,7 @@ func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Proc defer master.Close() // While we can access console.master, using the API is a good idea. - if err := utils.SendFd(process.ConsoleSocket, master); err != nil { + if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil { return err } } @@ -1519,6 +1600,10 @@ func (c *linuxContainer) currentState() (*State, error) { startTime, _ = c.initProcess.startTime() externalDescriptors = c.initProcess.externalDescriptors() } + intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID()) + if err != nil { + intelRdtPath = "" + } state := &State{ BaseState: BaseState{ ID: c.ID(), @@ -1529,6 +1614,7 @@ func (c *linuxContainer) currentState() (*State, error) { }, Rootless: c.config.Rootless, CgroupPaths: c.cgroupManager.GetPaths(), + IntelRdtPath: intelRdtPath, NamespacePaths: make(map[configs.NamespaceType]string), ExternalDescriptors: externalDescriptors, } @@ -1627,6 +1713,12 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na if !joinExistingUser { // write uid mappings if len(c.config.UidMappings) > 0 { + if c.config.Rootless && c.newuidmapPath != "" { + r.AddData(&Bytemsg{ + Type: UidmapPathAttr, + Value: []byte(c.newuidmapPath), + }) + } b, err := encodeIDMapping(c.config.UidMappings) if err != nil { return nil, err @@ -1647,6 +1739,12 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na Type: GidmapAttr, Value: b, }) + if c.config.Rootless && c.newgidmapPath != "" { + r.AddData(&Bytemsg{ + Type: GidmapPathAttr, + Value: []byte(c.newgidmapPath), + }) + } // The following only applies if we are root. if !c.config.Rootless { // check if we have CAP_SETGID to setgroup properly @@ -1678,3 +1776,18 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.Na return bytes.NewReader(r.Serialize()), nil } + +// ignoreTerminateErrors returns nil if the given err matches an error known +// to indicate that the terminate occurred successfully or err was nil, otherwise +// err is returned unaltered. +func ignoreTerminateErrors(err error) error { + if err == nil { + return nil + } + s := err.Error() + switch { + case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"): + return nil + } + return err +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go deleted file mode 100644 index bb84ff7402b..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go deleted file mode 100644 index bb84ff7402b..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go index 8f142c9fa5c..a2e344fc4b6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go @@ -23,7 +23,7 @@ type VethPairName struct { type CriuOpts struct { ImagesDirectory string // directory for storing image files WorkDirectory string // directory to cd and write logs/pidfiles/stats to - ParentImage string // direcotry for storing parent image files in pre-dump and dump + ParentImage string // directory for storing parent image files in pre-dump and dump LeaveRunning bool // leave container in running state after checkpoint TcpEstablished bool // checkpoint/restore established TCP connections ExternalUnixConnections bool // allow external unix connections @@ -35,4 +35,6 @@ type CriuOpts struct { ManageCgroupsMode cgMode // dump or restore cgroup mode EmptyNs uint32 // don't c/r properties for namespace from this mask AutoDedup bool // auto deduplication for incremental dumps + LazyPages bool // restore memory pages lazily using userfaultfd + StatusFd string // fd for feedback when lazy server is ready } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go deleted file mode 100644 index bc9207703a1..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainer - -// TODO Windows: This can ultimately be entirely factored out as criu is -// a Unix concept not relevant on Windows. -type CriuOpts struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index 947bdea1ceb..7d53d5e04d8 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -11,13 +11,13 @@ import ( "runtime/debug" "strconv" - "github.com/docker/docker/pkg/mount" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/rootless" "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs/validate" + "github.com/opencontainers/runc/libcontainer/intelrdt" + "github.com/opencontainers/runc/libcontainer/mount" "github.com/opencontainers/runc/libcontainer/utils" "golang.org/x/sys/unix" @@ -72,15 +72,15 @@ func Cgroupfs(l *LinuxFactory) error { return nil } -// RootlessCgroups is an options func to configure a LinuxFactory to -// return containers that use the "rootless" cgroup manager, which will -// fail to do any operations not possible to do with an unprivileged user. -// It should only be used in conjunction with rootless containers. -func RootlessCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &rootless.Manager{ - Cgroups: config, - Paths: paths, +// IntelRdtfs is an options func to configure a LinuxFactory to return +// containers that use the Intel RDT "resource control" filesystem to +// create and manage Intel Xeon platform shared resources (e.g., L3 cache). +func IntelRdtFs(l *LinuxFactory) error { + l.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager { + return &intelrdt.IntelRdtManager{ + Config: config, + Id: id, + Path: path, } } return nil @@ -119,12 +119,16 @@ func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { } l := &LinuxFactory{ Root: root, - InitArgs: []string{"/proc/self/exe", "init"}, + InitPath: "/proc/self/exe", + InitArgs: []string{os.Args[0], "init"}, Validator: validate.New(), CriuPath: "criu", } Cgroupfs(l) for _, opt := range options { + if opt == nil { + continue + } if err := opt(l); err != nil { return nil, err } @@ -137,6 +141,10 @@ type LinuxFactory struct { // Root directory for the factory to store state. Root string + // InitPath is the path for calling the init responsibilities for spawning + // a container. + InitPath string + // InitArgs are arguments for calling the init responsibilities for spawning // a container. InitArgs []string @@ -145,11 +153,19 @@ type LinuxFactory struct { // containers. CriuPath string + // New{u,g}uidmapPath is the path to the binaries used for mapping with + // rootless containers. + NewuidmapPath string + NewgidmapPath string + // Validator provides validation to container configurations. Validator validate.Validator // NewCgroupsManager returns an initialized cgroups manager for a single container. NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager + + // NewIntelRdtManager returns an initialized Intel RDT manager for a single container. + NewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager } func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { @@ -174,17 +190,20 @@ func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, err if err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil { return nil, newGenericError(err, SystemError) } - if config.Rootless { - RootlessCgroups(l) - } c := &linuxContainer{ id: id, root: containerRoot, config: config, + initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, + newuidmapPath: l.NewuidmapPath, + newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), } + if intelrdt.IsEnabled() { + c.intelRdtManager = l.NewIntelRdtManager(config, id, "") + } c.state = &stoppedState{c: c} return c, nil } @@ -203,17 +222,16 @@ func (l *LinuxFactory) Load(id string) (Container, error) { processStartTime: state.InitProcessStartTime, fds: state.ExternalDescriptors, } - // We have to use the RootlessManager. - if state.Rootless { - RootlessCgroups(l) - } c := &linuxContainer{ initProcess: r, initProcessStartTime: state.InitProcessStartTime, id: id, config: &state.Config, + initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, + newuidmapPath: l.NewuidmapPath, + newgidmapPath: l.NewgidmapPath, cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), root: containerRoot, created: state.Created, @@ -222,6 +240,9 @@ func (l *LinuxFactory) Load(id string) (Container, error) { if err := c.refreshState(); err != nil { return nil, err } + if intelrdt.IsEnabled() { + c.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath) + } return c, nil } @@ -323,3 +344,21 @@ func (l *LinuxFactory) validateID(id string) error { return nil } + +// NewuidmapPath returns an option func to configure a LinuxFactory with the +// provided .. +func NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error { + return func(l *LinuxFactory) error { + l.NewuidmapPath = newuidmapPath + return nil + } +} + +// NewgidmapPath returns an option func to configure a LinuxFactory with the +// provided .. +func NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error { + return func(l *LinuxFactory) error { + l.NewgidmapPath = newgidmapPath + return nil + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index 2020bb7a5aa..2770be30718 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -12,15 +12,16 @@ import ( "syscall" // only for Errno "unsafe" + "golang.org/x/sys/unix" + + "github.com/containerd/console" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/opencontainers/runc/libcontainer/utils" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" - "golang.org/x/sys/unix" ) type initType string @@ -61,6 +62,8 @@ type initConfig struct { ContainerId string `json:"containerid"` Rlimits []configs.Rlimit `json:"rlimits"` CreateConsole bool `json:"create_console"` + ConsoleWidth uint16 `json:"console_width"` + ConsoleHeight uint16 `json:"console_height"` Rootless bool `json:"rootless"` } @@ -170,29 +173,38 @@ func setupConsole(socket *os.File, config *initConfig, mount bool) error { // however, that setupUser (specifically fixStdioPermissions) *will* change // the UID owner of the console to be the user the process will run as (so // they can actually control their console). - console, err := newConsole() + + pty, slavePath, err := console.NewPty() if err != nil { return err } - // After we return from here, we don't need the console anymore. - defer console.Close() - linuxConsole, ok := console.(*linuxConsole) - if !ok { - return fmt.Errorf("failed to cast console to *linuxConsole") + if config.ConsoleHeight != 0 && config.ConsoleWidth != 0 { + err = pty.Resize(console.WinSize{ + Height: config.ConsoleHeight, + Width: config.ConsoleWidth, + }) + + if err != nil { + return err + } } + + // After we return from here, we don't need the console anymore. + defer pty.Close() + // Mount the console inside our rootfs. if mount { - if err := linuxConsole.mount(); err != nil { + if err := mountConsole(slavePath); err != nil { return err } } // While we can access console.master, using the API is a good idea. - if err := utils.SendFd(socket, linuxConsole.File()); err != nil { + if err := utils.SendFd(socket, pty.Name(), pty.Fd()); err != nil { return err } // Now, dup over all the things. - return linuxConsole.dupStdio() + return dupStdio(slavePath) } // syncParentReady sends to the given pipe a JSON payload which indicates that @@ -261,25 +273,27 @@ func setupUser(config *initConfig) error { } } + // Rather than just erroring out later in setuid(2) and setgid(2), check + // that the user is mapped here. + if _, err := config.Config.HostUID(execUser.Uid); err != nil { + return fmt.Errorf("cannot set uid to unmapped user in user namespace") + } + if _, err := config.Config.HostGID(execUser.Gid); err != nil { + return fmt.Errorf("cannot set gid to unmapped user in user namespace") + } + if config.Rootless { - if execUser.Uid != 0 { - return fmt.Errorf("cannot run as a non-root user in a rootless container") - } - - if execUser.Gid != 0 { - return fmt.Errorf("cannot run as a non-root group in a rootless container") - } - - // We cannot set any additional groups in a rootless container and thus we - // bail if the user asked us to do so. TODO: We currently can't do this - // earlier, but if libcontainer.Process.User was typesafe this might work. + // We cannot set any additional groups in a rootless container and thus + // we bail if the user asked us to do so. TODO: We currently can't do + // this check earlier, but if libcontainer.Process.User was typesafe + // this might work. if len(addGroups) > 0 { return fmt.Errorf("cannot set any additional groups in a rootless container") } } - // before we change to the container's user make sure that the processes STDIO - // is correctly owned by the user that we are switching to. + // Before we change to the container's user make sure that the processes + // STDIO is correctly owned by the user that we are switching to. if err := fixStdioPermissions(config, execUser); err != nil { return err } @@ -298,7 +312,6 @@ func setupUser(config *initConfig) error { if err := system.Setgid(execUser.Gid); err != nil { return err } - if err := system.Setuid(execUser.Uid); err != nil { return err } @@ -335,14 +348,6 @@ func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { continue } - // Skip chown if s.Gid is actually an unmapped gid in the host. While - // this is a bit dodgy if it just so happens that the console _is_ - // owned by overflow_gid, there's no way for us to disambiguate this as - // a userspace program. - if _, err := config.Config.HostGID(int(s.Gid)); err != nil { - continue - } - // We only change the uid owner (as it is possible for the mount to // prefer a different gid, and there's no reason for us to change it). // The reason why we don't just leave the default uid=X mount setup is @@ -350,6 +355,15 @@ func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { // this code, you couldn't effectively run as a non-root user inside a // container and also have a console set up. if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil { + // If we've hit an EINVAL then s.Gid isn't mapped in the user + // namespace. If we've hit an EPERM then the inode's current owner + // is not mapped in our user namespace (in particular, + // privileged_wrt_inode_uidgid() has failed). In either case, we + // are in a configuration where it's better for us to just not + // touch the stdio rather than bail at this point. + if err == unix.EINVAL || err == unix.EPERM { + continue + } return err } } @@ -480,6 +494,16 @@ func signalAllProcesses(m cgroups.Manager, s os.Signal) error { logrus.Warn(err) } + subreaper, err := system.GetSubreaper() + if err != nil { + // The error here means that PR_GET_CHILD_SUBREAPER is not + // supported because this code might run on a kernel older + // than 3.4. We don't want to throw an error in that case, + // and we simplify things, considering there is no subreaper + // set. + subreaper = 0 + } + for _, p := range procs { if s != unix.SIGKILL { if ok, err := isWaitable(p.Pid); err != nil { @@ -493,9 +517,16 @@ func signalAllProcesses(m cgroups.Manager, s os.Signal) error { } } - if _, err := p.Wait(); err != nil { - if !isNoChildren(err) { - logrus.Warn("wait: ", err) + // In case a subreaper has been setup, this code must not + // wait for the process. Otherwise, we cannot be sure the + // current process will be reaped by the subreaper, while + // the subreaper might be waiting for this process in order + // to retrieve its exit code. + if subreaper == 0 { + if _, err := p.Wait(); err != nil { + if !isNoChildren(err) { + logrus.Warn("wait: ", err) + } } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD similarity index 65% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD rename to vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD index 82406c88c1a..4c530fdd02f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/BUILD @@ -4,18 +4,16 @@ go_library( name = "go_default_library", srcs = select({ "@io_bazel_rules_go//go/platform:linux": [ - "rootless.go", + "intelrdt.go", + "stats.go", ], "//conditions:default": [], }), - importpath = "github.com/opencontainers/runc/libcontainer/cgroups/rootless", + importpath = "github.com/opencontainers/runc/libcontainer/intelrdt", visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/configs/validate:go_default_library", ], "//conditions:default": [], }), diff --git a/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go new file mode 100644 index 00000000000..487c630af61 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go @@ -0,0 +1,553 @@ +// +build linux + +package intelrdt + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +/* + * About Intel RDT/CAT feature: + * Intel platforms with new Xeon CPU support Resource Director Technology (RDT). + * Intel Cache Allocation Technology (CAT) is a sub-feature of RDT. Currently L3 + * Cache is the only resource that is supported in RDT. + * + * This feature provides a way for the software to restrict cache allocation to a + * defined 'subset' of L3 cache which may be overlapping with other 'subsets'. + * The different subsets are identified by class of service (CLOS) and each CLOS + * has a capacity bitmask (CBM). + * + * For more information about Intel RDT/CAT can be found in the section 17.17 + * of Intel Software Developer Manual. + * + * About Intel RDT/CAT kernel interface: + * In Linux 4.10 kernel or newer, the interface is defined and exposed via + * "resource control" filesystem, which is a "cgroup-like" interface. + * + * Comparing with cgroups, it has similar process management lifecycle and + * interfaces in a container. But unlike cgroups' hierarchy, it has single level + * filesystem layout. + * + * Intel RDT "resource control" filesystem hierarchy: + * mount -t resctrl resctrl /sys/fs/resctrl + * tree /sys/fs/resctrl + * /sys/fs/resctrl/ + * |-- info + * | |-- L3 + * | |-- cbm_mask + * | |-- min_cbm_bits + * | |-- num_closids + * |-- cpus + * |-- schemata + * |-- tasks + * |-- + * |-- cpus + * |-- schemata + * |-- tasks + * + * For runc, we can make use of `tasks` and `schemata` configuration for L3 cache + * resource constraints. + * + * The file `tasks` has a list of tasks that belongs to this group (e.g., + * " group). Tasks can be added to a group by writing the task ID + * to the "tasks" file (which will automatically remove them from the previous + * group to which they belonged). New tasks created by fork(2) and clone(2) are + * added to the same group as their parent. If a pid is not in any sub group, it is + * in root group. + * + * The file `schemata` has allocation bitmasks/values for L3 cache on each socket, + * which contains L3 cache id and capacity bitmask (CBM). + * Format: "L3:=;=;..." + * For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0` + * which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + * + * The valid L3 cache CBM is a *contiguous bits set* and number of bits that can + * be set is less than the max bit. The max bits in the CBM is varied among + * supported Intel Xeon platforms. In Intel RDT "resource control" filesystem + * layout, the CBM in a group should be a subset of the CBM in root. Kernel will + * check if it is valid when writing. e.g., 0xfffff in root indicates the max bits + * of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM + * values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + * + * For more information about Intel RDT/CAT kernel interface: + * https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt + * + * An example for runc: + * Consider a two-socket machine with two L3 caches where the default CBM is + * 0xfffff and the max CBM length is 20 bits. With this configuration, tasks + * inside the container only have access to the "upper" 80% of L3 cache id 0 and + * the "lower" 50% L3 cache id 1: + * + * "linux": { + * "intelRdt": { + * "l3CacheSchema": "L3:0=ffff0;1=3ff" + * } + * } + */ + +type Manager interface { + // Applies Intel RDT configuration to the process with the specified pid + Apply(pid int) error + + // Returns statistics for Intel RDT + GetStats() (*Stats, error) + + // Destroys the Intel RDT 'container_id' group + Destroy() error + + // Returns Intel RDT path to save in a state file and to be able to + // restore the object later + GetPath() string + + // Set Intel RDT "resource control" filesystem as configured. + Set(container *configs.Config) error +} + +// This implements interface Manager +type IntelRdtManager struct { + mu sync.Mutex + Config *configs.Config + Id string + Path string +} + +const ( + IntelRdtTasks = "tasks" +) + +var ( + // The absolute root path of the Intel RDT "resource control" filesystem + intelRdtRoot string + intelRdtRootLock sync.Mutex + + // The flag to indicate if Intel RDT is supported + isEnabled bool +) + +type intelRdtData struct { + root string + config *configs.Config + pid int +} + +// Check if Intel RDT is enabled in init() +func init() { + // 1. Check if hardware and kernel support Intel RDT/CAT feature + // "cat_l3" flag is set if supported + isFlagSet, err := parseCpuInfoFile("/proc/cpuinfo") + if !isFlagSet || err != nil { + isEnabled = false + return + } + + // 2. Check if Intel RDT "resource control" filesystem is mounted + // The user guarantees to mount the filesystem + isEnabled = isIntelRdtMounted() +} + +// Return the mount point path of Intel RDT "resource control" filesysem +func findIntelRdtMountpointDir() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + fields := strings.Split(text, " ") + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + numPostFields := len(postSeparatorFields) + + // This is an error as we can't detect if the mount is for "Intel RDT" + if numPostFields == 0 { + return "", fmt.Errorf("Found no fields post '-' in %q", text) + } + + if postSeparatorFields[0] == "resctrl" { + // Check that the mount is properly formated. + if numPostFields < 3 { + return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + return fields[4], nil + } + } + if err := s.Err(); err != nil { + return "", err + } + + return "", NewNotFoundError("Intel RDT") +} + +// Gets the root path of Intel RDT "resource control" filesystem +func getIntelRdtRoot() (string, error) { + intelRdtRootLock.Lock() + defer intelRdtRootLock.Unlock() + + if intelRdtRoot != "" { + return intelRdtRoot, nil + } + + root, err := findIntelRdtMountpointDir() + if err != nil { + return "", err + } + + if _, err := os.Stat(root); err != nil { + return "", err + } + + intelRdtRoot = root + return intelRdtRoot, nil +} + +func isIntelRdtMounted() bool { + _, err := getIntelRdtRoot() + if err != nil { + return false + } + + return true +} + +func parseCpuInfoFile(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text := s.Text() + flags := strings.Split(text, " ") + + // "cat_l3" flag is set if Intel RDT/CAT is supported + for _, flag := range flags { + if flag == "cat_l3" { + return true, nil + } + } + } + return false, nil +} + +func parseUint(s string, base, bitSize int) (uint64, error) { + value, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + return 0, nil + } + + return value, err + } + + return value, nil +} + +// Gets a single uint64 value from the specified file. +func getIntelRdtParamUint(path, file string) (uint64, error) { + fileName := filepath.Join(path, file) + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return 0, err + } + + res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64) + if err != nil { + return res, fmt.Errorf("unable to parse %q as a uint from file %q", string(contents), fileName) + } + return res, nil +} + +// Gets a string value from the specified file +func getIntelRdtParamString(path, file string) (string, error) { + contents, err := ioutil.ReadFile(filepath.Join(path, file)) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(contents)), nil +} + +func readTasksFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, IntelRdtTasks)) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +func writeFile(dir, file, data string) error { + if dir == "" { + return fmt.Errorf("no such directory for %s", file) + } + if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data+"\n"), 0700); err != nil { + return fmt.Errorf("failed to write %v to %v: %v", data, file, err) + } + return nil +} + +func getIntelRdtData(c *configs.Config, pid int) (*intelRdtData, error) { + rootPath, err := getIntelRdtRoot() + if err != nil { + return nil, err + } + return &intelRdtData{ + root: rootPath, + config: c, + pid: pid, + }, nil +} + +// Get the read-only L3 cache information +func getL3CacheInfo() (*L3CacheInfo, error) { + l3CacheInfo := &L3CacheInfo{} + + rootPath, err := getIntelRdtRoot() + if err != nil { + return l3CacheInfo, err + } + + path := filepath.Join(rootPath, "info", "L3") + cbmMask, err := getIntelRdtParamString(path, "cbm_mask") + if err != nil { + return l3CacheInfo, err + } + minCbmBits, err := getIntelRdtParamUint(path, "min_cbm_bits") + if err != nil { + return l3CacheInfo, err + } + numClosids, err := getIntelRdtParamUint(path, "num_closids") + if err != nil { + return l3CacheInfo, err + } + + l3CacheInfo.CbmMask = cbmMask + l3CacheInfo.MinCbmBits = minCbmBits + l3CacheInfo.NumClosids = numClosids + + return l3CacheInfo, nil +} + +// WriteIntelRdtTasks writes the specified pid into the "tasks" file +func WriteIntelRdtTasks(dir string, pid int) error { + if dir == "" { + return fmt.Errorf("no such directory for %s", IntelRdtTasks) + } + + // Dont attach any pid if -1 is specified as a pid + if pid != -1 { + if err := ioutil.WriteFile(filepath.Join(dir, IntelRdtTasks), []byte(strconv.Itoa(pid)), 0700); err != nil { + return fmt.Errorf("failed to write %v to %v: %v", pid, IntelRdtTasks, err) + } + } + return nil +} + +// Check if Intel RDT is enabled +func IsEnabled() bool { + return isEnabled +} + +// Get the 'container_id' path in Intel RDT "resource control" filesystem +func GetIntelRdtPath(id string) (string, error) { + rootPath, err := getIntelRdtRoot() + if err != nil { + return "", err + } + + path := filepath.Join(rootPath, id) + return path, nil +} + +// Applies Intel RDT configuration to the process with the specified pid +func (m *IntelRdtManager) Apply(pid int) (err error) { + // If intelRdt is not specified in config, we do nothing + if m.Config.IntelRdt == nil { + return nil + } + d, err := getIntelRdtData(m.Config, pid) + if err != nil && !IsNotFound(err) { + return err + } + + m.mu.Lock() + defer m.mu.Unlock() + path, err := d.join(m.Id) + if err != nil { + return err + } + + m.Path = path + return nil +} + +// Destroys the Intel RDT 'container_id' group +func (m *IntelRdtManager) Destroy() error { + m.mu.Lock() + defer m.mu.Unlock() + if err := os.RemoveAll(m.Path); err != nil { + return err + } + m.Path = "" + return nil +} + +// Returns Intel RDT path to save in a state file and to be able to +// restore the object later +func (m *IntelRdtManager) GetPath() string { + if m.Path == "" { + m.Path, _ = GetIntelRdtPath(m.Id) + } + return m.Path +} + +// Returns statistics for Intel RDT +func (m *IntelRdtManager) GetStats() (*Stats, error) { + // If intelRdt is not specified in config + if m.Config.IntelRdt == nil { + return nil, nil + } + + m.mu.Lock() + defer m.mu.Unlock() + stats := NewStats() + + // The read-only L3 cache information + l3CacheInfo, err := getL3CacheInfo() + if err != nil { + return nil, err + } + stats.L3CacheInfo = l3CacheInfo + + // The read-only L3 cache schema in root + rootPath, err := getIntelRdtRoot() + if err != nil { + return nil, err + } + tmpRootStrings, err := getIntelRdtParamString(rootPath, "schemata") + if err != nil { + return nil, err + } + // L3 cache schema is in the first line + schemaRootStrings := strings.Split(tmpRootStrings, "\n") + stats.L3CacheSchemaRoot = schemaRootStrings[0] + + // The L3 cache schema in 'container_id' group + tmpStrings, err := getIntelRdtParamString(m.GetPath(), "schemata") + if err != nil { + return nil, err + } + // L3 cache schema is in the first line + schemaStrings := strings.Split(tmpStrings, "\n") + stats.L3CacheSchema = schemaStrings[0] + + return stats, nil +} + +// Set Intel RDT "resource control" filesystem as configured. +func (m *IntelRdtManager) Set(container *configs.Config) error { + path := m.GetPath() + + // About L3 cache schema file: + // The schema has allocation masks/values for L3 cache on each socket, + // which contains L3 cache id and capacity bitmask (CBM). + // Format: "L3:=;=;..." + // For example, on a two-socket machine, L3's schema line could be: + // L3:0=ff;1=c0 + // Which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0. + // + // About L3 cache CBM validity: + // The valid L3 cache CBM is a *contiguous bits set* and number of + // bits that can be set is less than the max bit. The max bits in the + // CBM is varied among supported Intel Xeon platforms. In Intel RDT + // "resource control" filesystem layout, the CBM in a group should + // be a subset of the CBM in root. Kernel will check if it is valid + // when writing. + // e.g., 0xfffff in root indicates the max bits of CBM is 20 bits, + // which mapping to entire L3 cache capacity. Some valid CBM values + // to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc. + if container.IntelRdt != nil { + l3CacheSchema := container.IntelRdt.L3CacheSchema + if l3CacheSchema != "" { + if err := writeFile(path, "schemata", l3CacheSchema); err != nil { + return err + } + } + } + + return nil +} + +func (raw *intelRdtData) join(id string) (string, error) { + path := filepath.Join(raw.root, id) + if err := os.MkdirAll(path, 0755); err != nil { + return "", err + } + + if err := WriteIntelRdtTasks(path, raw.pid); err != nil { + return "", err + } + return path, nil +} + +type NotFoundError struct { + ResourceControl string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.ResourceControl) +} + +func NewNotFoundError(res string) error { + return &NotFoundError{ + ResourceControl: res, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + _, ok := err.(*NotFoundError) + return ok +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go new file mode 100644 index 00000000000..095c0a380cd --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go @@ -0,0 +1,24 @@ +// +build linux + +package intelrdt + +type L3CacheInfo struct { + CbmMask string `json:"cbm_mask,omitempty"` + MinCbmBits uint64 `json:"min_cbm_bits,omitempty"` + NumClosids uint64 `json:"num_closids,omitempty"` +} + +type Stats struct { + // The read-only L3 cache information + L3CacheInfo *L3CacheInfo `json:"l3_cache_info,omitempty"` + + // The read-only L3 cache schema in root + L3CacheSchemaRoot string `json:"l3_cache_schema_root,omitempty"` + + // The L3 cache schema in 'container_id' group + L3CacheSchema string `json:"l3_cache_schema,omitempty"` +} + +func NewStats() *Stats { + return &Stats{} +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go index 82ffa7a8849..ce8b4e6b040 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go @@ -29,7 +29,7 @@ func ModKeyringPerm(ringId KeySerial, mask, setbits uint32) error { return err } - res := strings.Split(string(dest), ";") + res := strings.Split(dest, ";") if len(res) < 5 { return fmt.Errorf("Destination buffer for key description is too small") } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go index 8829b71ad85..ab453cde912 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go @@ -18,6 +18,8 @@ const ( SetgroupAttr uint16 = 27285 OomScoreAdjAttr uint16 = 27286 RootlessAttr uint16 = 27287 + UidmapPathAttr uint16 = 27288 + GidmapPathAttr uint16 = 27289 ) type Int32msg struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD new file mode 100644 index 00000000000..211d28b0f6b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "mount.go", + "mountinfo.go", + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "mount_linux.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/opencontainers/runc/libcontainer/mount", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go new file mode 100644 index 00000000000..e8965e081bb --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go @@ -0,0 +1,23 @@ +package mount + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go new file mode 100644 index 00000000000..1e5191928de --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go @@ -0,0 +1,82 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go b/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go new file mode 100644 index 00000000000..e3fc3535e93 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go index f1ad0814912..86bf7387f8c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/process.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/process.go @@ -47,6 +47,10 @@ type Process struct { // ExtraFiles specifies additional open files to be inherited by the container ExtraFiles []*os.File + // Initial sizings for the console + ConsoleWidth uint16 + ConsoleHeight uint16 + // Capabilities specify the capabilities to keep when executing the process inside the container // All capabilities not specified will be dropped from the processes capability mask Capabilities *configs.Capabilities diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go index 50f9af574c4..149b1126652 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go @@ -15,6 +15,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/intelrdt" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" @@ -49,6 +50,7 @@ type setnsProcess struct { parentPipe *os.File childPipe *os.File cgroupPaths map[string]string + intelRdtPath string config *initConfig fds []string process *Process @@ -83,12 +85,20 @@ func (p *setnsProcess) start() (err error) { if err = p.execSetns(); err != nil { return newSystemErrorWithCause(err, "executing setns process") } - // We can't join cgroups if we're in a rootless container. - if !p.config.Rootless && len(p.cgroupPaths) > 0 { + if len(p.cgroupPaths) > 0 { if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil { return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid()) } } + if p.intelRdtPath != "" { + // if Intel RDT "resource control" filesystem path exists + _, err := os.Stat(p.intelRdtPath) + if err == nil { + if err := intelrdt.WriteIntelRdtTasks(p.intelRdtPath, p.pid()); err != nil { + return newSystemErrorWithCausef(err, "adding pid %d to Intel RDT resource control filesystem", p.pid()) + } + } + } // set rlimits, this has to be done here because we lose permissions // to raise the limits once we enter a user-namespace if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { @@ -193,16 +203,17 @@ func (p *setnsProcess) setExternalDescriptors(newFds []string) { } type initProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - config *initConfig - manager cgroups.Manager - container *linuxContainer - fds []string - process *Process - bootstrapData io.Reader - sharePidns bool + cmd *exec.Cmd + parentPipe *os.File + childPipe *os.File + config *initConfig + manager cgroups.Manager + intelRdtManager intelrdt.Manager + container *linuxContainer + fds []string + process *Process + bootstrapData io.Reader + sharePidns bool } func (p *initProcess) pid() int { @@ -261,12 +272,35 @@ func (p *initProcess) start() error { p.process.ops = nil return newSystemErrorWithCause(err, "starting init process command") } + // Do this before syncing with child so that no children can escape the + // cgroup. We don't need to worry about not doing this and not being root + // because we'd be using the rootless cgroup manager in that case. + if err := p.manager.Apply(p.pid()); err != nil { + return newSystemErrorWithCause(err, "applying cgroup configuration for process") + } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Apply(p.pid()); err != nil { + return newSystemErrorWithCause(err, "applying Intel RDT configuration for process") + } + } + defer func() { + if err != nil { + // TODO: should not be the responsibility to call here + p.manager.Destroy() + if p.intelRdtManager != nil { + p.intelRdtManager.Destroy() + } + } + }() + if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { return newSystemErrorWithCause(err, "copying bootstrap data to pipe") } + if err := p.execSetns(); err != nil { return newSystemErrorWithCause(err, "running exec setns process for init") } + // Save the standard descriptor names before the container process // can potentially move them (e.g., via dup2()). If we don't do this now, // we won't know at checkpoint time which file descriptor to look up. @@ -275,18 +309,6 @@ func (p *initProcess) start() error { return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid()) } p.setExternalDescriptors(fds) - // Do this before syncing with child so that no children can escape the - // cgroup. We don't need to worry about not doing this and not being root - // because we'd be using the rootless cgroup manager in that case. - if err := p.manager.Apply(p.pid()); err != nil { - return newSystemErrorWithCause(err, "applying cgroup configuration for process") - } - defer func() { - if err != nil { - // TODO: should not be the responsibility to call here - p.manager.Destroy() - } - }() if err := p.createNetworkInterfaces(); err != nil { return newSystemErrorWithCause(err, "creating network interfaces") } @@ -312,6 +334,11 @@ func (p *initProcess) start() error { if err := p.manager.Set(p.config.Config); err != nil { return newSystemErrorWithCause(err, "setting cgroup config for ready process") } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Set(p.config.Config); err != nil { + return newSystemErrorWithCause(err, "setting Intel RDT config for ready process") + } + } if p.config.Config.Hooks != nil { s := configs.HookState{ @@ -337,6 +364,11 @@ func (p *initProcess) start() error { if err := p.manager.Set(p.config.Config); err != nil { return newSystemErrorWithCause(err, "setting cgroup config for procHooks process") } + if p.intelRdtManager != nil { + if err := p.intelRdtManager.Set(p.config.Config); err != nil { + return newSystemErrorWithCause(err, "setting Intel RDT config for procHooks process") + } + } if p.config.Config.Hooks != nil { s := configs.HookState{ Version: p.container.config.Version, diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index e2e734a8566..eb9e0253b9a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -13,11 +13,11 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/symlink" + "github.com/cyphar/filepath-securejoin" "github.com/mrunalp/fileutils" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/mount" "github.com/opencontainers/runc/libcontainer/system" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" "github.com/opencontainers/selinux/go-selinux/label" @@ -40,7 +40,8 @@ func needsSetupDev(config *configs.Config) bool { // prepareRootfs sets up the devices, mount points, and filesystems for use // inside a new mount namespace. It doesn't set anything as ro. You must call // finalizeRootfs after this function to finish setting up the rootfs. -func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { +func prepareRootfs(pipe io.ReadWriter, iConfig *initConfig) (err error) { + config := iConfig.Config if err := prepareRoot(config); err != nil { return newSystemErrorWithCause(err, "preparing rootfs") } @@ -80,6 +81,7 @@ func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { // The hooks are run after the mounts are setup, but before we switch to the new // root, so that the old root is still available in the hooks for any mount // manipulations. + // Note that iConfig.Cwd is not guaranteed to exist here. if err := syncParentHooks(pipe); err != nil { return err } @@ -111,6 +113,14 @@ func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { } } + if cwd := iConfig.Cwd; cwd != "" { + // Note that spec.Process.Cwd can contain unclean value like "../../../../foo/bar...". + // However, we are safe to call MkDirAll directly because we are in the jail here. + if err := os.MkdirAll(cwd, 0755); err != nil { + return err + } + } + return nil } @@ -230,7 +240,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { // any previous mounts can invalidate the next mount's destination. // this can happen when a user specifies mounts within other mounts to cause breakouts or other // evil stuff to try to escape the container's rootfs. - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } if err := checkMountDestination(rootfs, dest); err != nil { @@ -318,7 +328,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { // this can happen when a user specifies mounts within other mounts to cause breakouts or other // evil stuff to try to escape the container's rootfs. var err error - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + if dest, err = securejoin.SecureJoin(rootfs, m.Destination); err != nil { return err } if err := checkMountDestination(rootfs, dest); err != nil { @@ -668,9 +678,12 @@ func pivotRoot(rootfs string) error { return err } - // Make oldroot rprivate to make sure our unmounts don't propagate to the - // host (and thus bork the machine). - if err := unix.Mount("", ".", "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + // Make oldroot rslave to make sure our unmounts don't propagate to the + // host (and thus bork the machine). We don't use rprivate because this is + // known to cause issues due to races where we still have a reference to a + // mount while a process in the host namespace are trying to operate on + // something they think has no mounts (devicemapper in particular). + if err := unix.Mount("", ".", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil { return err } // Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd. @@ -733,7 +746,14 @@ func remountReadonly(m *configs.Mount) error { flags = m.Flags ) for i := 0; i < 5; i++ { - if err := unix.Mount("", dest, "", uintptr(flags|unix.MS_REMOUNT|unix.MS_RDONLY), ""); err != nil { + // There is a special case in the kernel for + // MS_REMOUNT | MS_BIND, which allows us to change only the + // flags even as an unprivileged user (i.e. user namespace) + // assuming we don't drop any security related flags (nodev, + // nosuid, etc.). So, let's use that case so that we can do + // this re-mount without failing in a userns. + flags |= unix.MS_REMOUNT | unix.MS_BIND | unix.MS_RDONLY + if err := unix.Mount("", dest, "", uintptr(flags), ""); err != nil { switch err { case unix.EBUSY: time.Sleep(100 * time.Millisecond) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go index 2523cbf9901..d99f3fe640c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go @@ -22,6 +22,11 @@ var ( actErrno = libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM)) ) +const ( + // Linux system calls can have at most 6 arguments + syscallMaxArguments int = 6 +) + // Filters given syscalls in a container, preventing them from being used // Started in the container init process, and carried over to all child processes // Setns calls, however, require a separate invocation, as they are not children @@ -45,11 +50,11 @@ func InitSeccomp(config *configs.Seccomp) error { for _, arch := range config.Architectures { scmpArch, err := libseccomp.GetArchFromString(arch) if err != nil { - return err + return fmt.Errorf("error validating Seccomp architecture: %s", err) } if err := filter.AddArch(scmpArch); err != nil { - return err + return fmt.Errorf("error adding architecture to seccomp filter: %s", err) } } @@ -170,29 +175,55 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error { // Convert the call's action to the libseccomp equivalent callAct, err := getAction(call.Action) if err != nil { - return err + return fmt.Errorf("action in seccomp profile is invalid: %s", err) } // Unconditional match - just add the rule if len(call.Args) == 0 { if err = filter.AddRule(callNum, callAct); err != nil { - return err + return fmt.Errorf("error adding seccomp filter rule for syscall %s: %s", call.Name, err) } } else { - // Conditional match - convert the per-arg rules into library format + // If two or more arguments have the same condition, + // Revert to old behavior, adding each condition as a separate rule + argCounts := make([]uint, syscallMaxArguments) conditions := []libseccomp.ScmpCondition{} for _, cond := range call.Args { newCond, err := getCondition(cond) if err != nil { - return err + return fmt.Errorf("error creating seccomp syscall condition for syscall %s: %s", call.Name, err) } + argCounts[cond.Index] += 1 + conditions = append(conditions, newCond) } - if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { - return err + hasMultipleArgs := false + for _, count := range argCounts { + if count > 1 { + hasMultipleArgs = true + break + } + } + + if hasMultipleArgs { + // Revert to old behavior + // Add each condition attached to a separate rule + for _, cond := range conditions { + condArr := []libseccomp.ScmpCondition{cond} + + if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil { + return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) + } + } + } else { + // No conditions share same argument + // Use new, proper behavior + if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { + return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) + } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go deleted file mode 100644 index c7bdb605aa8..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,go1.5 - -package libcontainer - -import "syscall" - -// Set the GidMappingsEnableSetgroups member to true, so the process's -// setgroups proc entry wont be set to 'deny' if GidMappings are set -func enableSetgroups(sys *syscall.SysProcAttr) { - sys.GidMappingsEnableSetgroups = true -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go index 35b84219c5d..096c601e767 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go @@ -47,7 +47,10 @@ func (l *linuxSetnsInit) Init() error { return err } } - if l.config.Config.Seccomp != nil { + // Without NoNewPrivileges seccomp is a privileged operation, so we need to + // do this before dropping capabilities; otherwise do it as late as possible + // just before execve so as few syscalls take place after it as possible. + if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return err } @@ -61,5 +64,13 @@ func (l *linuxSetnsInit) Init() error { if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { return err } + // Set seccomp as close to execve as possible, so as few syscalls take + // place afterward (reducing the amount of syscalls that users need to + // enable in their seccomp profiles). + if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { + if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { + return newSystemErrorWithCause(err, "init seccomp") + } + } return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go index fbcf3a6ac02..8a544ed5be7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go @@ -30,15 +30,15 @@ func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { var newperms uint32 if l.config.Config.Namespaces.Contains(configs.NEWUSER) { - // with user ns we need 'other' search permissions + // With user ns we need 'other' search permissions. newperms = 0x8 } else { - // without user ns we need 'UID' search permissions + // Without user ns we need 'UID' search permissions. newperms = 0x80000 } - // create a unique per session container name that we can - // join in setns; however, other containers can also join it + // Create a unique per session container name that we can join in setns; + // However, other containers can also join it. return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms } @@ -46,12 +46,12 @@ func (l *linuxStandardInit) Init() error { if !l.config.Config.NoNewKeyring { ringname, keepperms, newperms := l.getSessionRingParams() - // do not inherit the parent's session keyring + // Do not inherit the parent's session keyring. sessKeyId, err := keys.JoinSessionKeyring(ringname) if err != nil { return err } - // make session keyring searcheable + // Make session keyring searcheable. if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { return err } @@ -68,7 +68,7 @@ func (l *linuxStandardInit) Init() error { // prepareRootfs() can be executed only for a new mount namespace. if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := prepareRootfs(l.pipe, l.config.Config); err != nil { + if err := prepareRootfs(l.pipe, l.config); err != nil { return err } } @@ -150,19 +150,20 @@ func (l *linuxStandardInit) Init() error { if err := pdeath.Restore(); err != nil { return err } - // compare the parent from the initial start of the init process and make sure that it did not change. - // if the parent changes that means it died and we were reparented to something else so we should - // just kill ourself and not cause problems for someone else. + // Compare the parent from the initial start of the init process and make + // sure that it did not change. if the parent changes that means it died + // and we were reparented to something else so we should just kill ourself + // and not cause problems for someone else. if unix.Getppid() != l.parentPid { return unix.Kill(unix.Getpid(), unix.SIGKILL) } - // check for the arg before waiting to make sure it exists and it is returned - // as a create time error. + // Check for the arg before waiting to make sure it exists and it is + // returned as a create time error. name, err := exec.LookPath(l.config.Args[0]) if err != nil { return err } - // close the pipe to signal that we have completed our init. + // Close the pipe to signal that we have completed our init. l.pipe.Close() // Wait for the FIFO to be opened on the other side before exec-ing the // user process. We open it through /proc/self/fd/$fd, because the fd that @@ -170,19 +171,26 @@ func (l *linuxStandardInit) Init() error { // re-open an O_PATH fd through /proc. fd, err := unix.Open(fmt.Sprintf("/proc/self/fd/%d", l.fifoFd), unix.O_WRONLY|unix.O_CLOEXEC, 0) if err != nil { - return newSystemErrorWithCause(err, "openat exec fifo") + return newSystemErrorWithCause(err, "open exec fifo") } if _, err := unix.Write(fd, []byte("0")); err != nil { return newSystemErrorWithCause(err, "write 0 exec fifo") } + // Close the O_PATH fifofd fd before exec because the kernel resets + // dumpable in the wrong order. This has been fixed in newer kernels, but + // we keep this to ensure CVE-2016-9962 doesn't re-emerge on older kernels. + // N.B. the core issue itself (passing dirfds to the host filesystem) has + // since been resolved. + // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 + unix.Close(l.fifoFd) + // Set seccomp as close to execve as possible, so as few syscalls take + // place afterward (reducing the amount of syscalls that users need to + // enable in their seccomp profiles). if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return newSystemErrorWithCause(err, "init seccomp") } } - // close the statedir fd before exec because the kernel resets dumpable in the wrong order - // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 - unix.Close(l.fifoFd) if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil { return newSystemErrorWithCause(err, "exec user process") } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go index 44fa6b43a8d..1f8c5e71e41 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go @@ -45,6 +45,11 @@ func destroy(c *linuxContainer) error { } } err := c.cgroupManager.Destroy() + if c.intelRdtManager != nil { + if ierr := c.intelRdtManager.Destroy(); err == nil { + err = ierr + } + } if rerr := os.RemoveAll(c.root); err == nil { err = rerr } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go deleted file mode 100644 index f8d1d689cee..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go index c629dc67de9..29fd641e9dd 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go @@ -1,8 +1,10 @@ package libcontainer import "github.com/opencontainers/runc/libcontainer/cgroups" +import "github.com/opencontainers/runc/libcontainer/intelrdt" type Stats struct { - Interfaces []*NetworkInterface - CgroupStats *cgroups.Stats + Interfaces []*NetworkInterface + CgroupStats *cgroups.Stats + IntelRdtStats *intelrdt.Stats } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go deleted file mode 100644 index da78c1c2e15..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package libcontainer - -// Solaris - TODO - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go deleted file mode 100644 index f8d1d689cee..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD index 7ba719fd195..0e7fc01da98 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/BUILD @@ -18,7 +18,6 @@ go_library( "unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "sysconfig.go", "sysconfig_notcgo.go", "unsupported.go", ], @@ -55,17 +54,29 @@ go_library( "//conditions:default": [], }) + select({ "@io_bazel_rules_go//go/platform:linux_386": [ - "syscall_linux_386.go", + "syscall_linux_32.go", ], "@io_bazel_rules_go//go/platform:linux_amd64": [ "syscall_linux_64.go", ], "@io_bazel_rules_go//go/platform:linux_arm": [ - "syscall_linux_arm.go", + "syscall_linux_32.go", ], "@io_bazel_rules_go//go/platform:linux_arm64": [ "syscall_linux_64.go", ], + "@io_bazel_rules_go//go/platform:linux_mips": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mips64le": [ + "syscall_linux_64.go", + ], + "@io_bazel_rules_go//go/platform:linux_mipsle": [ + "syscall_linux_64.go", + ], "@io_bazel_rules_go//go/platform:linux_ppc64": [ "syscall_linux_64.go", ], diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 4837085a7fd..5f124cd8bbc 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -134,3 +134,14 @@ func RunningInUserNS() bool { func SetSubreaper(i int) error { return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { + return -1, err + } + + return int(i), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go similarity index 93% rename from vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go rename to vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go index 31ff3deb135..c5ca5d86235 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -1,4 +1,5 @@ -// +build linux,arm +// +build linux +// +build 386 arm package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index 3f7235ed154..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index d7891a2ffa0..11c3faafbf0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,4 +1,5 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go index b3a07cba3ef..b8434f10500 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -1,4 +1,4 @@ -// +build cgo,linux cgo,freebsd +// +build cgo,linux package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD index d70f0eabb1d..966c1074fb7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/BUILD @@ -6,9 +6,6 @@ go_library( "lookup.go", "user.go", ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:darwin": [ "lookup_unix.go", ], @@ -21,24 +18,15 @@ go_library( "@io_bazel_rules_go//go/platform:linux": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:nacl": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:netbsd": [ "lookup_unix.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:plan9": [ - "lookup_unsupported.go", - ], "@io_bazel_rules_go//go/platform:solaris": [ "lookup_unix.go", ], - "@io_bazel_rules_go//go/platform:windows": [ - "lookup_unsupported.go", - ], "//conditions:default": [], }), importpath = "github.com/opencontainers/runc/libcontainer/user", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 4a8d00acbd5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import ( - "io" - "syscall" -) - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go index 2cbb6491a70..c8a9364d54d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go @@ -84,12 +84,10 @@ func RecvFd(socket *os.File) (*os.File, error) { // addition, the file.Name() of the given file will also be sent as // non-auxiliary data in the same payload (allowing to send contextual // information for a file descriptor). -func SendFd(socket, file *os.File) error { - name := []byte(file.Name()) +func SendFd(socket *os.File, name string, fd uintptr) error { if len(name) >= MaxNameLen { - return fmt.Errorf("sendfd: filename too long: %s", file.Name()) + return fmt.Errorf("sendfd: filename too long: %s", name) } - oob := unix.UnixRights(int(file.Fd())) - - return unix.Sendmsg(int(socket.Fd()), name, oob, nil, 0) + oob := unix.UnixRights(int(fd)) + return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0) } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README b/vendor/k8s.io/kube-openapi/pkg/generators/README index 35660a40da7..feb19b401a9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/README +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README @@ -11,5 +11,36 @@ escape or quote the value string. Extensions can be used to pass more informatio documentation generators. For example a type might have a friendly name to be displayed in documentation or being used in a client's fluent interface. +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } TODO(mehdy): Make k8s:openapi-gen a parameter to the generator now that OpenAPI has its own repo. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 5efb3f45c6f..d9b0980abb4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -118,35 +118,13 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat `)...) - outputPath := arguments.OutputPackagePath - - if err := context.AddDir(outputPath); err != nil { - glog.Fatalf("Failed to load output package: %v", err) - } - - // Compute the canonical output path to allow retrieval of the - // package for a vendored output path. - const vendorPath = "/vendor/" - canonicalOutputPath := outputPath - if strings.Contains(outputPath, vendorPath) { - canonicalOutputPath = outputPath[strings.Index(outputPath, vendorPath)+len(vendorPath):] - } - - // The package for outputPath is mapped to the canonical path - pkg := context.Universe[canonicalOutputPath] - if pkg == nil { - glog.Fatalf("Got nil output package: %v", err) - } return generator.Packages{ &generator.DefaultPackage{ - PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], - // Use the supplied output path rather than the canonical - // one to allow generation into the path of a - // vendored package. - PackagePath: outputPath, + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, HeaderText: header, GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, pkg, context)} + return []generator.Generator{NewOpenAPIGen(arguments.OutputFileBaseName, arguments.OutputPackagePath, context)} }, FilterFunc: func(c *generator.Context, t *types.Type) bool { // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen @@ -175,12 +153,12 @@ const ( type openAPIGen struct { generator.DefaultGen // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. - targetPackage *types.Package + targetPackage string imports namer.ImportTracker context *generator.Context } -func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context *generator.Context) generator.Generator { +func NewOpenAPIGen(sanitizedName string, targetPackage string, context *generator.Context) generator.Generator { return &openAPIGen{ DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, @@ -194,7 +172,7 @@ func NewOpenAPIGen(sanitizedName string, targetPackage *types.Package, context * func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { // Have the raw namer for this file track what it imports. return namer.NameSystems{ - "raw": namer.NewRawNamer(g.targetPackage.Path, g.imports), + "raw": namer.NewRawNamer(g.targetPackage, g.imports), } } @@ -207,10 +185,10 @@ func (g *openAPIGen) Filter(c *generator.Context, t *types.Type) bool { } func (g *openAPIGen) isOtherPackage(pkg string) bool { - if pkg == g.targetPackage.Path { + if pkg == g.targetPackage { return false } - if strings.HasSuffix(pkg, "\""+g.targetPackage.Path+"\"") { + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { return false } return true @@ -300,23 +278,37 @@ func newOpenAPITypeWriter(sw *generator.SnippetWriter) openAPITypeWriter { } } +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + func hasOpenAPIDefinitionMethod(t *types.Type) bool { for mn, mt := range t.Methods { if mn != "OpenAPIDefinition" { continue } - if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { - return false - } - r := mt.Signature.Results[0] - if r.Name.Name != "OpenAPIDefinition" || r.Name.Package != openAPICommonPackagePath { - return false - } - return true + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") } return false } +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + // typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. func typeShortName(t *types.Type) string { return filepath.Base(t.Name.Package) + "." + t.Name.Name @@ -360,6 +352,28 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) return nil } + if hasOpenAPIDefinitionMethods(t) { + // Since this generated snippet is part of a map: + // + // map[string]common.OpenAPIDefinition: { + // "TYPE_NAME": { + // Schema: spec.Schema{ ... }, + // }, + // } + // + // For compliance with gofmt -s it's important we elide the + // struct type. The type is implied by the map and will be + // removed otherwise. + g.Do("{\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n"+ + "Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "},\n", args) + return nil + } g.Do("{\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) g.generateDescription(t.CommentLines) g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5f607c76701..61dbf4fc0e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -210,11 +210,18 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error }, nil } +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + // ParseSchema creates a walkable Schema from an openapi schema. While // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) == 1 { - t := s.GetType().GetValue()[0] + objectTypes := s.GetType().GetValue() + if len(objectTypes) == 1 { + t := objectTypes[0] switch t { case object: return d.parseMap(s, path) @@ -229,6 +236,9 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err if s.GetProperties() != nil { return d.parseKind(s, path) } + if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") { + return d.parseArbitrary(s, path) + } return d.parsePrimitive(s, path) } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 02ab06d6d53..b48e62c3bf9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -58,6 +58,14 @@ type SchemaVisitor interface { VisitReference(Reference) } +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatability, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + // Schema is the base definition of an openapi type. type Schema interface { // Giving a visitor here will let you visit the actual type. @@ -242,6 +250,23 @@ func (p *Primitive) GetName() string { return fmt.Sprintf("%s (%s)", p.Type, p.Format) } +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + // Reference implementation depends on the type of document. type Reference interface { Schema diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index 0be7a5302f1..bbbdd4f61c9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -127,6 +127,9 @@ func (item *mapItem) VisitKind(schema *proto.Kind) { } } +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *mapItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) @@ -163,11 +166,14 @@ func (item *arrayItem) VisitArray(schema *proto.Array) { } func (item *arrayItem) VisitMap(schema *proto.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) } func (item *arrayItem) VisitKind(schema *proto.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { } func (item *arrayItem) VisitReference(schema proto.Reference) { @@ -226,6 +232,9 @@ func (item *primitiveItem) VisitKind(schema *proto.Kind) { item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) } +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + func (item *primitiveItem) VisitReference(schema proto.Reference) { // passthrough schema.SubSchema().Accept(item) From 65046435e772f7e0e521fced59737bca11980bcb Mon Sep 17 00:00:00 2001 From: Mik Vyatskov Date: Fri, 12 Jan 2018 20:26:37 +0100 Subject: [PATCH 755/794] Adjust the Stackdriver Logging length test Signed-off-by: Mik Vyatskov --- test/e2e/instrumentation/logging/stackdrvier/basic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/instrumentation/logging/stackdrvier/basic.go b/test/e2e/instrumentation/logging/stackdrvier/basic.go index 7bc5e682b5f..7b0f6ff593a 100644 --- a/test/e2e/instrumentation/logging/stackdrvier/basic.go +++ b/test/e2e/instrumentation/logging/stackdrvier/basic.go @@ -108,7 +108,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd }) ginkgo.By("Checking that too long lines are trimmed", func() { - maxLength := 100000 + maxLength := 100 * 1024 cmd := []string{ "/bin/sh", "-c", From c1554d08d8e5af81eb3f10868be9a5652b85d089 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 12:27:34 -0800 Subject: [PATCH 756/794] Install gazelle from bazelbuild/bazel-gazelle instead of rules_go --- hack/update-bazel.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/update-bazel.sh b/hack/update-bazel.sh index e569d94e9c7..1690c32f8f5 100755 --- a/hack/update-bazel.sh +++ b/hack/update-bazel.sh @@ -31,8 +31,8 @@ kube::util::go_install_from_commit \ github.com/kubernetes/repo-infra/kazel \ ae4e9a3906ace4ba657b7a09242610c6266e832c kube::util::go_install_from_commit \ - github.com/bazelbuild/rules_go/go/tools/gazelle/gazelle \ - 737df20c53499fd84b67f04c6ca9ccdee2e77089 + github.com/bazelbuild/bazel-gazelle/cmd/gazelle \ + 31ce76e3acc34a22434d1a783bb9b3cae790d108 # 0.8.0 touch "${KUBE_ROOT}/vendor/BUILD" From 0d5eb00a39293bad4237bd4556f67ff959e13c49 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 12:27:49 -0800 Subject: [PATCH 757/794] Revert "Rewrite go_install_from_commit to handle pkgs that aren't in HEAD" This reverts commit e55938940d2d95e9cb1ff1def63cc54d7725f774. --- hack/lib/util.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/hack/lib/util.sh b/hack/lib/util.sh index 0035ced815f..f699a295267 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -476,15 +476,10 @@ kube::util::go_install_from_commit() { kube::util::ensure-temp-dir mkdir -p "${KUBE_TEMP}/go/src" - # TODO(spiffxp): remove this brittle workaround for go getting a package that doesn't exist at HEAD - repo=$(echo ${pkg} | cut -d/ -f1-3) - git clone "https://${repo}" "${KUBE_TEMP}/go/src/${repo}" - # GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" + GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" ( - cd "${KUBE_TEMP}/go/src/${repo}" - git fetch # TODO(spiffxp): workaround + cd "${KUBE_TEMP}/go/src/${pkg}" git checkout -q "${commit}" - GOPATH="${KUBE_TEMP}/go" go get -d "${pkg}" #TODO(spiffxp): workaround GOPATH="${KUBE_TEMP}/go" go install "${pkg}" ) PATH="${KUBE_TEMP}/go/bin:${PATH}" From d8f6febc7d2ea5c045dd93ff9ec7a06f11cdf2a5 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Fri, 12 Jan 2018 10:08:23 -0800 Subject: [PATCH 758/794] Use the bazel version check function from bazel-skylib --- build/root/WORKSPACE | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index c6ba9c71059..a3bf016e7f1 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -12,6 +12,13 @@ http_archive( urls = ["https://github.com/kubernetes/repo-infra/archive/1fb0a3ff0cc5308a6d8e2f3f9c57d1f2f940354e.tar.gz"], ) +http_archive( + name = "bazel_skylib", + sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d", + strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b", + urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"], +) + ETCD_VERSION = "3.1.10" new_http_archive( @@ -39,9 +46,9 @@ http_archive( urls = ["https://github.com/bazelbuild/rules_docker/archive/8bbe2a8abd382641e65ff7127a3700a8530f02ce.tar.gz"], ) -load("@io_kubernetes_build//defs:bazel_version.bzl", "check_version") +load("@bazel_skylib//:lib.bzl", "versions") -check_version("0.8.0") +versions.check(minimum_bazel_version = "0.8.0") load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains", "go_download_sdk") load("@io_bazel_rules_docker//docker:docker.bzl", "docker_repositories", "docker_pull") From b96c383ef7982f526db190ebba76730fd6c818df Mon Sep 17 00:00:00 2001 From: Penghao Cen Date: Sat, 13 Jan 2018 05:47:49 +0800 Subject: [PATCH 759/794] Check grpc server ready properly --- pkg/kubelet/cm/deviceplugin/device_plugin_stub.go | 2 +- pkg/kubelet/cm/deviceplugin/endpoint_test.go | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index 9969e99989b..5e39dd00853 100644 --- a/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go +++ b/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -70,7 +70,7 @@ func (m *Stub) Start() error { // Wait till grpc server is ready. for i := 0; i < 10; i++ { services := m.server.GetServiceInfo() - if len(services) > 1 { + if len(services) > 0 { break } time.Sleep(1 * time.Second) diff --git a/pkg/kubelet/cm/deviceplugin/endpoint_test.go b/pkg/kubelet/cm/deviceplugin/endpoint_test.go index f4634db85f4..6005310181a 100644 --- a/pkg/kubelet/cm/deviceplugin/endpoint_test.go +++ b/pkg/kubelet/cm/deviceplugin/endpoint_test.go @@ -90,20 +90,12 @@ func TestRun(t *testing.T) { go e.run() // Wait for the first callback to be issued. - select { - case <-callbackChan: - break - } + <-callbackChan p.Update(updated) // Wait for the second callback to be issued. - select { - case <-callbackChan: - break - } - - time.Sleep(time.Second) + <-callbackChan e.mutex.Lock() defer e.mutex.Unlock() From 7064f4856ab1addbc612fdc7f5aa2957207232fd Mon Sep 17 00:00:00 2001 From: Robert Bailey Date: Wed, 10 Jan 2018 16:47:04 -0800 Subject: [PATCH 760/794] Remove salt support for providers that no longer exist. --- cluster/get-kube.sh | 8 - cluster/saltbase/salt/base.sls | 14 -- cluster/saltbase/salt/cni/init.sls | 15 -- cluster/saltbase/salt/docker/init.sls | 42 ----- cluster/saltbase/salt/generate-cert/init.sls | 15 +- .../salt/generate-cert/make-ca-cert.sh | 18 --- cluster/saltbase/salt/helpers/init.sls | 14 -- .../salt/helpers/safe_format_and_mount | 144 ------------------ cluster/saltbase/salt/kube-addons/init.sls | 2 +- cluster/saltbase/salt/kube-apiserver/init.sls | 2 +- .../kube-apiserver/kube-apiserver.manifest | 18 +-- .../kube-controller-manager.manifest | 16 +- .../saltbase/salt/kube-node-unpacker/init.sls | 4 - .../salt/kube-proxy/kube-proxy.manifest | 2 +- cluster/saltbase/salt/kubelet/default | 7 +- cluster/saltbase/salt/top.sls | 8 +- 16 files changed, 15 insertions(+), 314 deletions(-) delete mode 100644 cluster/saltbase/salt/helpers/init.sls delete mode 100644 cluster/saltbase/salt/helpers/safe_format_and_mount diff --git a/cluster/get-kube.sh b/cluster/get-kube.sh index f0492d45ac6..b733bd2187e 100755 --- a/cluster/get-kube.sh +++ b/cluster/get-kube.sh @@ -24,14 +24,6 @@ # Set KUBERNETES_PROVIDER to choose between different providers: # Google Compute Engine [default] # * export KUBERNETES_PROVIDER=gce; wget -q -O - https://get.k8s.io | bash -# Libvirt (with CoreOS as a guest operating system) -# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash -# Vagrant (local virtual machines) -# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash -# VMWare Photon Controller -# * export KUBERNETES_PROVIDER=photon-controller; wget -q -O - https://get.k8s.io | bash -# OpenStack-Heat -# * export KUBERNETES_PROVIDER=openstack-heat; wget -q -O - https://get.k8s.io | bash # # Set KUBERNETES_RELEASE to choose a specific release instead of the current # stable release, (e.g. 'v1.3.7'). diff --git a/cluster/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls index 91639730dd0..c9ccdf5a84c 100644 --- a/cluster/saltbase/salt/base.sls +++ b/cluster/saltbase/salt/base.sls @@ -24,20 +24,6 @@ pkg-core: - git {% endif %} -# Fix ARP cache issues on AWS by setting net.ipv4.neigh.default.gc_thresh1=0 -# See issue #23395 -{% if grains.get('cloud') == 'aws' %} -# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089 -# (we also have to give it a different id from the same fix elsewhere) -99-salt-conf-with-a-different-id: - file.touch: - - name: /etc/sysctl.d/99-salt.conf - -net.ipv4.neigh.default.gc_thresh1: - sysctl.present: - - value: 0 -{% endif %} - /usr/local/share/doc/kubernetes: file.directory: - user: root diff --git a/cluster/saltbase/salt/cni/init.sls b/cluster/saltbase/salt/cni/init.sls index a1d1060d6bd..14f26927fef 100644 --- a/cluster/saltbase/salt/cni/init.sls +++ b/cluster/saltbase/salt/cni/init.sls @@ -24,18 +24,3 @@ cni-tar: - source_hash: md5=9534876FAE7DBE813CDAB404DC1F9219 - archive_format: tar - if_missing: /home/kubernetes/bin - -{% if grains['cloud'] is defined and grains.cloud in [ 'vagrant' ] %} -# Install local CNI network plugins in a Vagrant environment -cmd-local-cni-plugins: - cmd.run: - - name: | - cp -v /vagrant/cluster/network-plugins/cni/bin/* /home/kubernetes/bin/. - chmod +x /home/kubernetes/bin/* -cmd-local-cni-config: - cmd.run: - - name: | - cp -v /vagrant/cluster/network-plugins/cni/config/* /etc/cni/net.d/. - chown root:root /etc/cni/net.d/* - chmod 744 /etc/cni/net.d/* -{% endif -%} diff --git a/cluster/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls index ed1b9186d5a..23ab4cfba1b 100644 --- a/cluster/saltbase/salt/docker/init.sls +++ b/cluster/saltbase/salt/docker/init.sls @@ -344,37 +344,6 @@ net.ipv4.ip_forward: {% set override_deb_sha1='' %} {% set override_docker_ver='' %} -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'jessie' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~jessie' %} -{% set override_deb='docker-engine_1.11.2-0~jessie_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~jessie_amd64.deb' %} -{% set override_deb_sha1='c312f1f6fa0b34df4589bb812e4f7af8e28fd51d' %} - -# Ubuntu presents as os_family=Debian, osfullname=Ubuntu -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'trusty' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~trusty' %} -{% set override_deb='docker-engine_1.11.2-0~trusty_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~trusty_amd64.deb' %} -{% set override_deb_sha1='022dee31e68c6d572eaac750915786e4a6729d2a' %} - -{% elif grains.get('cloud', '') == 'aws' - and grains.get('os_family', '') == 'Debian' - and grains.get('oscodename', '') == 'wily' -%} -# TODO: Get from google storage? -{% set docker_pkg_name='docker-engine' %} -{% set override_docker_ver='1.11.2-0~wily' %} -{% set override_deb='docker-engine_1.11.2-0~wily_amd64.deb' %} -{% set override_deb_url='http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.11.2-0~wily_amd64.deb' %} -{% set override_deb_sha1='3e02f51fe18aa777eeb1676c3d9a75e5ea6d96c9' %} - {% else %} {% set docker_pkg_name='lxc-docker-1.7.1' %} {% set override_docker_ver='1.7.1' %} @@ -502,19 +471,8 @@ fix-systemd-docker-healthcheck-service: {% endif %} docker: -# Starting Docker is racy on aws for some reason. To be honest, since Monit -# is managing Docker restart we should probably just delete this whole thing -# but the kubernetes components use salt 'require' to set up a dag, and that -# complicated and scary to unwind. -# On AWS, we use a trick now... We don't start the docker service through Salt. -# Kubelet or our health checker will start it. But we use service.enabled, -# so we still have a `service: docker` node for our DAG. -{% if grains.cloud is defined and grains.cloud == 'aws' %} - service.enabled: -{% else %} service.running: - enable: True -{% endif %} # If we put a watch on this, salt will try to start the service. # We put the watch on the fixer instead {% if not pillar.get('is_systemd') %} diff --git a/cluster/saltbase/salt/generate-cert/init.sls b/cluster/saltbase/salt/generate-cert/init.sls index 436e5af7f71..4543239afe1 100644 --- a/cluster/saltbase/salt/generate-cert/init.sls +++ b/cluster/saltbase/salt/generate-cert/init.sls @@ -1,17 +1,6 @@ {% set master_extra_sans=grains.get('master_extra_sans', '') %} -{% if grains.cloud is defined %} - {% if grains.cloud == 'gce' %} - {% set cert_ip='_use_gce_external_ip_' %} - {% endif %} - {% if grains.cloud == 'aws' %} - {% set cert_ip='_use_aws_external_ip_' %} - {% endif %} - {% if grains.cloud == 'azure-legacy' %} - {% set cert_ip='_use_azure_dns_name_' %} - {% endif %} - {% if grains.cloud == 'photon-controller' %} - {% set cert_ip=grains.ip_interfaces.eth0[0] %} - {% endif %} +{% if grains.cloud is defined and grains.cloud == 'gce' %} + {% set cert_ip='_use_gce_external_ip_' %} {% endif %} # If there is a pillar defined, override any defaults. diff --git a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh index f4e23a81f9e..41531209ed5 100755 --- a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +++ b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh @@ -38,24 +38,6 @@ if [ "$cert_ip" == "_use_gce_external_ip_" ]; then cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) fi -if [ "$cert_ip" == "_use_aws_external_ip_" ]; then - # If there's no public IP assigned (e.g. this host is running on an internal subnet in a VPC), then - # curl will happily spit out the contents of AWS's 404 page and an exit code of zero. - # - # The string containing the 404 page trips up one of easyrsa's calls to openssl later; whichever - # one creates the CA certificate, because the 404 page is > 64 characters. - if cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/public-ipv4); then - : - else - cert_ip=$(curl -f -s http://169.254.169.254/latest/meta-data/local-ipv4) - fi -fi - -if [ "$cert_ip" == "_use_azure_dns_name_" ]; then - cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net - use_cn=true -fi - sans="IP:${cert_ip}" if [[ -n "${extra_sans}" ]]; then sans="${sans},${extra_sans}" diff --git a/cluster/saltbase/salt/helpers/init.sls b/cluster/saltbase/salt/helpers/init.sls deleted file mode 100644 index 7e5ad435d6e..00000000000 --- a/cluster/saltbase/salt/helpers/init.sls +++ /dev/null @@ -1,14 +0,0 @@ -{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %} -/usr/share/google: - file.directory: - - user: root - - group: root - - dir_mode: 755 - -/usr/share/google/safe_format_and_mount: - file.managed: - - source: salt://helpers/safe_format_and_mount - - user: root - - group: root - - mode: 755 -{% endif %} diff --git a/cluster/saltbase/salt/helpers/safe_format_and_mount b/cluster/saltbase/salt/helpers/safe_format_and_mount deleted file mode 100644 index 53cfe6cc815..00000000000 --- a/cluster/saltbase/salt/helpers/safe_format_and_mount +++ /dev/null @@ -1,144 +0,0 @@ -#! /bin/bash -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Mount a disk, formatting it if necessary. If the disk looks like it may -# have been formatted before, we will not format it. -# -# This script uses blkid and file to search for magic "formatted" bytes -# at the beginning of the disk. Furthermore, it attempts to use fsck to -# repair the filesystem before formatting it. - -FSCK=fsck.ext4 -MOUNT_OPTIONS="discard,defaults" -MKFS="mkfs.ext4 -F" -if [ -e /etc/redhat-release ]; then - if grep -q '7\..' /etc/redhat-release; then - FSCK=fsck.xfs - MKFS=mkfs.xfs - fi -fi - -LOGTAG=safe_format_and_mount -LOGFACILITY=user - -function log() { - local readonly severity=$1; shift; - logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@" -} - -function log_command() { - local readonly log_file=$(mktemp) - local readonly retcode - log info "Running: $*" - $* > ${log_file} 2>&1 - retcode=$? - # only return the last 1000 lines of the logfile, just in case it's HUGE. - tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s - rm -f ${log_file} - return ${retcode} -} - -function help() { - cat >&2 < -EOF - exit 0 -} - -while getopts ":hf:o:m:" opt; do - case $opt in - h) help;; - f) FSCK=$OPTARG;; - o) MOUNT_OPTIONS=$OPTARG;; - m) MKFS=$OPTARG;; - -) break;; - \?) log error "Invalid option: -${OPTARG}"; exit 1;; - :) log "Option -${OPTARG} requires an argument."; exit 1;; - esac -done - -shift $(($OPTIND - 1)) -readonly DISK=$1 -readonly MOUNTPOINT=$2 - -[[ -z ${DISK} ]] && help -[[ -z ${MOUNTPOINT} ]] && help - -function disk_looks_unformatted() { - blkid ${DISK} - if [[ $? == 0 ]]; then - return 0 - fi - - local readonly file_type=$(file --special-files ${DISK}) - case ${file_type} in - *filesystem*) - return 0;; - esac - - return 1 -} - -function format_disk() { - log_command ${MKFS} ${DISK} -} - -function try_repair_disk() { - log_command ${FSCK} -a ${DISK} - local readonly fsck_return=$? - if [[ ${fsck_return} -ge 8 ]]; then - log error "Fsck could not correct errors on ${DISK}" - return 1 - fi - if [[ ${fsck_return} -gt 0 ]]; then - log warning "Fsck corrected errors on ${DISK}" - fi - return 0 -} - -function try_mount() { - local mount_retcode - try_repair_disk - - log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} - mount_retcode=$? - if [[ ${mount_retcode} == 0 ]]; then - return 0 - fi - - # Check to see if it looks like a filesystem before formatting it. - disk_looks_unformatted ${DISK} - if [[ $? == 0 ]]; then - log error "Disk ${DISK} looks formatted but won't mount. Giving up." - return ${mount_retcode} - fi - - # The disk looks like it's not been formatted before. - format_disk - if [[ $? != 0 ]]; then - log error "Format of ${DISK} failed." - fi - - log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} - mount_retcode=$? - if [[ ${mount_retcode} == 0 ]]; then - return 0 - fi - log error "Tried everything we could, but could not mount ${DISK}." - return ${mount_retcode} -} - -try_mount -exit $? diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index 3171cb6ca61..923a711a6b8 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -204,7 +204,7 @@ addon-dir-create: - group: root - mode: 755 -{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] in ['aws', 'gce', 'openstack'] %} +{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] == 'gce' %} /etc/kubernetes/addons/storage-class/default.yaml: file.managed: - source: salt://kube-addons/storage-class/{{ grains['cloud'] }}/default.yaml diff --git a/cluster/saltbase/salt/kube-apiserver/init.sls b/cluster/saltbase/salt/kube-apiserver/init.sls index f22067b877c..261fd53ef35 100644 --- a/cluster/saltbase/salt/kube-apiserver/init.sls +++ b/cluster/saltbase/salt/kube-apiserver/init.sls @@ -1,4 +1,4 @@ -{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} # TODO: generate and distribute tokens on other cloud providers. /srv/kubernetes/known_tokens.csv: file.managed: diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 878b10f43bf..34e75ac7a89 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -14,24 +14,14 @@ {% set srv_sshproxy_path = "/srv/sshproxy" -%} {% if grains.cloud is defined -%} - {% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% endif -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} - {% endif -%} - - {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} + {% if grains.cloud == 'gce' and grains.cloud_config is defined -%} {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%} {% endif -%} - {% if grains.cloud in ['openstack'] -%} - {% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%} - {% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%} - {% endif -%} {% endif -%} {% set advertise_address = "" -%} @@ -99,7 +89,7 @@ {% set client_ca_file = "" -%} {% set secure_port = "6443" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set secure_port = "443" -%} {% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%} {% endif -%} @@ -113,7 +103,7 @@ {% set basic_auth_file = "" -%} {% set authz_mode = "" -%} {% set abac_policy_file = "" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%} {% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%} {% set authz_mode = " --authorization-mode=ABAC" -%} diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index c287b29652e..74353d07f6f 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -40,30 +40,20 @@ {% flex_vol_plugin_dir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec" -%} {% if grains.cloud is defined -%} - {% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} - {% endif -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} {% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%} - {% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} - {% endif -%} - - {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} + {% if grains.cloud == 'gce' and grains.cloud_config is defined -%} {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\", \"type\": \"FileOrCreate\"}}," -%} {% endif -%} - {% if grains.cloud in ['openstack'] -%} - {% set cloud_config_mount = "{\"name\": \"instanceid\",\"mountPath\": \"/var/lib/cloud/data/instance-id\",\"readOnly\": true}," -%} - {% set cloud_config_volume = "{\"name\": \"instanceid\",\"hostPath\": {\"path\": \"/var/lib/cloud/data/instance-id\"}}," -%} - {% endif -%} {% endif -%} {% set root_ca_file = "" -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %} +{% if grains.cloud is defined and grains.cloud == 'gce' %} {% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%} {% endif -%} diff --git a/cluster/saltbase/salt/kube-node-unpacker/init.sls b/cluster/saltbase/salt/kube-node-unpacker/init.sls index 03495d7fe18..7c9d47eccc0 100644 --- a/cluster/saltbase/salt/kube-node-unpacker/init.sls +++ b/cluster/saltbase/salt/kube-node-unpacker/init.sls @@ -24,10 +24,6 @@ kube-proxy-tar: {% set is_helium = '0' %} # Super annoying, the salt version on GCE is old enough that 'salt.cmd.run' # isn't supported -{% if grains.cloud is defined and grains.cloud == 'aws' %} - # Salt has terrible problems with systemd on AWS too - {% set is_helium = '0' %} -{% endif %} # Salt Helium doesn't support systemd modules for service running {% if pillar.get('is_systemd') and is_helium == '0' %} diff --git a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest index d35692a3fd4..6e9af81b78f 100644 --- a/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest +++ b/cluster/saltbase/salt/kube-proxy/kube-proxy.manifest @@ -8,7 +8,7 @@ {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} {% set api_servers = "--master=https://" + ips[0][0] -%} {% endif -%} -{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %} +{% if grains['cloud'] is defined and grains.cloud == 'gce' %} {% set api_servers_with_port = api_servers -%} {% else -%} {% set api_servers_with_port = api_servers + ":6443" -%} diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 27511061e84..c2aff4694a7 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -22,7 +22,7 @@ {% set debugging_handlers = "--enable-debugging-handlers=true" -%} {% if grains['roles'][0] == 'kubernetes-master' -%} - {% if grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] -%} + {% if grains.cloud == 'gce' -%} # Unless given a specific directive, disable registration for the kubelet # running on the master. {% if kubeconfig != "" -%} @@ -37,14 +37,11 @@ {% endif -%} {% set cloud_provider = "" -%} -{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%} +{% if grains.cloud is defined -%} {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} {% endif -%} {% set cloud_config = "" -%} -{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%} - {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} -{% endif -%} {% set config = "--pod-manifest-path=/etc/kubernetes/manifests" -%} diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index e517778a77e..51c3a347cca 100644 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -3,9 +3,6 @@ base: - base - debian-auto-upgrades - salt-helpers -{% if grains.get('cloud') == 'aws' %} - - ntp -{% endif %} {% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' %} - e2e {% endif %} @@ -20,7 +17,6 @@ base: {% elif pillar.get('network_provider', '').lower() == 'cni' %} - cni {% endif %} - - helpers - kube-client-tools - kube-node-unpacker - kubelet @@ -60,11 +56,9 @@ base: - kube-client-tools - kube-master-addons - kube-admission-controls -{% if grains['cloud'] is defined and grains['cloud'] != 'vagrant' %} - logrotate -{% endif %} - kube-addons -{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'photon-controller', 'openstack', 'azure-legacy'] %} +{% if grains['cloud'] is defined and grains['cloud'] == 'gce' %} - docker - kubelet {% endif %} From 63826000c57c84ec086215e465dcae1e703f1cb4 Mon Sep 17 00:00:00 2001 From: Balu Dontu Date: Fri, 12 Jan 2018 16:40:08 -0800 Subject: [PATCH 761/794] Remove vmUUID check in VSphere cloud provider --- .../providers/vsphere/vsphere.go | 8 ------ .../providers/vsphere/vsphere_util.go | 25 ------------------- 2 files changed, 33 deletions(-) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 5707dd3d06c..42400682cb7 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -375,14 +375,6 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { if cfg.Global.VCenterPort == "" { cfg.Global.VCenterPort = "443" } - if cfg.Global.VMUUID == "" { - // This needs root privileges on the host, and will fail otherwise. - cfg.Global.VMUUID, err = getvmUUID() - if err != nil { - glog.Errorf("Failed to get VM UUID. err: %+v", err) - return nil, err - } - } vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg) if err != nil { return nil, err diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 0f4edb155ae..45a35c71d41 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -19,7 +19,6 @@ package vsphere import ( "context" "errors" - "io/ioutil" "os" "regexp" "runtime" @@ -128,30 +127,6 @@ func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) { return client, err } -// getvmUUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere -func getvmUUID() (string, error) { - id, err := ioutil.ReadFile(UUIDPath) - if err != nil { - return "", fmt.Errorf("error retrieving vm uuid: %s", err) - } - uuidFromFile := string(id[:]) - //strip leading and trailing white space and new line char - uuid := strings.TrimSpace(uuidFromFile) - // check the uuid starts with "VMware-" - if !strings.HasPrefix(uuid, UUIDPrefix) { - return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile) - } - // Strip the prefix and while spaces and - - uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1) - uuid = strings.Replace(uuid, "-", "", -1) - if len(uuid) != 32 { - return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile) - } - // need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f" - uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32]) - return uuid, nil -} - // Returns the accessible datastores for the given node VM. func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx) From 16ff0c2dda5672dd0078226507bbbcd959d61246 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Wed, 10 Jan 2018 14:19:45 +0530 Subject: [PATCH 762/794] Improved readability for messages being logged --- pkg/scheduler/schedulercache/cache.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/scheduler/schedulercache/cache.go b/pkg/scheduler/schedulercache/cache.go index f891707d505..04ccc88eef2 100644 --- a/pkg/scheduler/schedulercache/cache.go +++ b/pkg/scheduler/schedulercache/cache.go @@ -131,7 +131,7 @@ func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { cache.mu.Lock() defer cache.mu.Unlock() if _, ok := cache.podStates[key]; ok { - return fmt.Errorf("pod %v state wasn't initial but get assumed", key) + return fmt.Errorf("pod %v is not in the cache, so can't be assumed", key) } cache.addPod(pod) @@ -178,7 +178,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { currState, ok := cache.podStates[key] if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName { - return fmt.Errorf("pod %v state was assumed on a different node", key) + return fmt.Errorf("pod %v was assumed on %v but assigned to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) } switch { @@ -191,7 +191,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { delete(cache.assumedPods, key) delete(cache.podStates, key) default: - return fmt.Errorf("pod %v state wasn't assumed but get forgotten", key) + return fmt.Errorf("pod %v wasn't assumed so cannot be forgotten", key) } return nil } @@ -241,7 +241,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { case ok && cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { // The pod was added to a different node than it was assumed to. - glog.Warningf("Pod %v assumed to a different node than added to.", key) + glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) // Clean this up. cache.removePod(currState.pod) cache.addPod(pod) @@ -257,7 +257,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } cache.podStates[key] = ps default: - return fmt.Errorf("pod was already in added state. Pod key: %v", key) + return fmt.Errorf("pod %v was already in added state.", key) } return nil } @@ -284,7 +284,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { return err } default: - return fmt.Errorf("pod %v state wasn't added but get updated", key) + return fmt.Errorf("pod %v is not added to scheduler cache, so cannot be updated", key) } return nil } @@ -304,7 +304,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { // before Remove event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { - glog.Errorf("Pod %v removed from a different node than previously added to.", key) + glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } err := cache.removePod(currState.pod) @@ -313,7 +313,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { } delete(cache.podStates, key) default: - return fmt.Errorf("pod state wasn't added but get removed. Pod key: %v", key) + return fmt.Errorf("pod %v is not found in scheduler cache, so cannot be removed from it", key) } return nil } @@ -345,7 +345,7 @@ func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { podState, ok := cache.podStates[key] if !ok { - return nil, fmt.Errorf("pod %v does not exist", key) + return nil, fmt.Errorf("pod %v does not exist in scheduler cache", key) } return podState.pod, nil From 8aebf3554c7534300f08a7646748a1afe91b1812 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Sat, 13 Jan 2018 10:21:06 +0530 Subject: [PATCH 763/794] Added metrics for preemption victims, pods preempted and duration of preemption --- pkg/scheduler/metrics/metrics.go | 24 ++++++++++++++++++++++++ pkg/scheduler/scheduler.go | 8 ++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index c0a87f319ae..7d329ba5ac0 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -59,6 +59,14 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + SchedulingAlgorithmPremptionEvaluationDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: schedulerSubsystem, + Name: "scheduling_algorithm_preemption_evaluation", + Help: "Scheduling algorithm preemption evaluation duration", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) BindingLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ Subsystem: schedulerSubsystem, @@ -67,6 +75,18 @@ var ( Buckets: prometheus.ExponentialBuckets(1000, 2, 15), }, ) + PreemptionVictims = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: schedulerSubsystem, + Name: "pod_preemption_victims", + Help: "Number of selected preemption victims", + }) + PreemptionAttempts = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: schedulerSubsystem, + Name: "total_preemption_attempts", + Help: "Total preemption attempts in the cluster till now", + }) ) var registerMetrics sync.Once @@ -78,8 +98,12 @@ func Register() { prometheus.MustRegister(E2eSchedulingLatency) prometheus.MustRegister(SchedulingAlgorithmLatency) prometheus.MustRegister(BindingLatency) + prometheus.MustRegister(SchedulingAlgorithmPredicateEvaluationDuration) prometheus.MustRegister(SchedulingAlgorithmPriorityEvaluationDuration) + prometheus.MustRegister(SchedulingAlgorithmPremptionEvaluationDuration) + prometheus.MustRegister(PreemptionVictims) + prometheus.MustRegister(PreemptionAttempts) }) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 9fae7d117f7..fee845f34e8 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -216,7 +216,9 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e glog.Errorf("Error getting the updated preemptor pod object: %v", err) return "", err } + node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) + metrics.PreemptionVictims.Set(float64(len(victims))) if err != nil { glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err @@ -440,18 +442,20 @@ func (sched *Scheduler) scheduleOne() { // Synchronously attempt to find a fit for the pod. start := time.Now() suggestedHost, err := sched.schedule(pod) - metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { // schedule() may have failed because the pod would not fit on any host, so we try to // preempt, with the expectation that the next time the pod is tried for scheduling it // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. if fitError, ok := err.(*core.FitError); ok { + preemptionStartTime := time.Now() sched.preempt(pod, fitError) + metrics.PreemptionAttempts.Inc() + metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) } return } - + metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. assumedPod := pod.DeepCopy() From b8526cd0777af6717d986942ccb6de6d23eca1bb Mon Sep 17 00:00:00 2001 From: Wang Guoliang Date: Thu, 21 Dec 2017 23:03:46 +0800 Subject: [PATCH 764/794] -Add scheduler optimization options, short circuit all predicates if one predicate fails --- ...scheduler-policy-config-with-extender.json | 3 +- examples/scheduler-policy-config.json | 3 +- pkg/scheduler/api/types.go | 6 ++ pkg/scheduler/api/v1/types.go | 6 ++ pkg/scheduler/core/extender_test.go | 2 +- pkg/scheduler/core/generic_scheduler.go | 64 +++++++++++-------- pkg/scheduler/core/generic_scheduler_test.go | 45 +++++++++---- pkg/scheduler/factory/factory.go | 11 +++- pkg/scheduler/scheduler_test.go | 6 +- 9 files changed, 98 insertions(+), 48 deletions(-) diff --git a/examples/scheduler-policy-config-with-extender.json b/examples/scheduler-policy-config-with-extender.json index 996e6efc828..cd566fb314c 100644 --- a/examples/scheduler-policy-config-with-extender.json +++ b/examples/scheduler-policy-config-with-extender.json @@ -26,5 +26,6 @@ "nodeCacheCapable": false } ], -"hardPodAffinitySymmetricWeight" : 10 +"hardPodAffinitySymmetricWeight" : 10, +"alwaysCheckAllPredicates" : false } diff --git a/examples/scheduler-policy-config.json b/examples/scheduler-policy-config.json index b0fecffab23..048299e5e36 100644 --- a/examples/scheduler-policy-config.json +++ b/examples/scheduler-policy-config.json @@ -15,5 +15,6 @@ {"name" : "ServiceSpreadingPriority", "weight" : 1}, {"name" : "EqualPriority", "weight" : 1} ], -"hardPodAffinitySymmetricWeight" : 10 +"hardPodAffinitySymmetricWeight" : 10, +"alwaysCheckAllPredicates" : false } diff --git a/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go index 080fc386db5..28b095f3348 100644 --- a/pkg/scheduler/api/types.go +++ b/pkg/scheduler/api/types.go @@ -47,6 +47,12 @@ type Policy struct { // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. HardPodAffinitySymmetricWeight int32 + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool } type PredicatePolicy struct { diff --git a/pkg/scheduler/api/v1/types.go b/pkg/scheduler/api/v1/types.go index 3f6684a5f3c..14e2f06b1e1 100644 --- a/pkg/scheduler/api/v1/types.go +++ b/pkg/scheduler/api/v1/types.go @@ -39,6 +39,12 @@ type Policy struct { // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"` } type PredicatePolicy struct { diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 23551a2415c..09e136d38b6 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -317,7 +317,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { } queue := NewSchedulingQueue() scheduler := NewGenericScheduler( - cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) + cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) podIgnored := &v1.Pod{} machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index f147d534958..2009b7af895 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -90,16 +90,17 @@ func (f *FitError) Error() string { } type genericScheduler struct { - cache schedulercache.Cache - equivalenceCache *EquivalenceCache - schedulingQueue SchedulingQueue - predicates map[string]algorithm.FitPredicate - priorityMetaProducer algorithm.MetadataProducer - predicateMetaProducer algorithm.PredicateMetadataProducer - prioritizers []algorithm.PriorityConfig - extenders []algorithm.SchedulerExtender - lastNodeIndexLock sync.Mutex - lastNodeIndex uint64 + cache schedulercache.Cache + equivalenceCache *EquivalenceCache + schedulingQueue SchedulingQueue + predicates map[string]algorithm.FitPredicate + priorityMetaProducer algorithm.MetadataProducer + predicateMetaProducer algorithm.PredicateMetadataProducer + prioritizers []algorithm.PriorityConfig + extenders []algorithm.SchedulerExtender + lastNodeIndexLock sync.Mutex + lastNodeIndex uint64 + alwaysCheckAllPredicates bool cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder @@ -133,7 +134,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister trace.Step("Computing predicates") startPredicateEvalTime := time.Now() - filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue) + filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue, g.alwaysCheckAllPredicates) if err != nil { return "", err } @@ -295,6 +296,7 @@ func findNodesThatFit( metadataProducer algorithm.PredicateMetadataProducer, ecache *EquivalenceCache, schedulingQueue SchedulingQueue, + alwaysCheckAllPredicates bool, ) ([]*v1.Node, FailedPredicateMap, error) { var filtered []*v1.Node failedPredicateMap := FailedPredicateMap{} @@ -313,7 +315,7 @@ func findNodesThatFit( meta := metadataProducer(pod, nodeNameToInfo) checkNode := func(i int) { nodeName := nodes[i].Name - fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache, schedulingQueue) + fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache, schedulingQueue, alwaysCheckAllPredicates) if err != nil { predicateResultLock.Lock() errs[err.Error()]++ @@ -402,6 +404,7 @@ func podFitsOnNode( predicateFuncs map[string]algorithm.FitPredicate, ecache *EquivalenceCache, queue SchedulingQueue, + alwaysCheckAllPredicates bool, ) (bool, []algorithm.PredicateFailureReason, error) { var ( equivalenceHash uint64 @@ -457,8 +460,6 @@ func podFitsOnNode( fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) } - // TODO(bsalamat): When one predicate fails and fit is false, why do we continue - // checking other predicates? if !eCacheAvailable || invalid { // we need to execute predicate functions since equivalence cache does not work fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) @@ -479,6 +480,11 @@ func podFitsOnNode( if !fit { // eCache is available and valid, and predicates result is unfit, record the fail reasons failedPredicates = append(failedPredicates, reasons...) + // if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails. + if !alwaysCheckAllPredicates { + glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate evaluation is short circuited and there are chances of other predicates failing as well.") + break + } } } } @@ -917,7 +923,7 @@ func selectVictimsOnNode( // that we should check is if the "pod" is failing to schedule due to pod affinity // failure. // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue); !fits { + if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false); !fits { if err != nil { glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } @@ -931,7 +937,7 @@ func selectVictimsOnNode( violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) reprievePod := func(p *v1.Pod) bool { addPod(p) - fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue) + fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false) if !fits { removePod(p) victims = append(victims, p) @@ -1045,18 +1051,20 @@ func NewGenericScheduler( priorityMetaProducer algorithm.MetadataProducer, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, - pvcLister corelisters.PersistentVolumeClaimLister) algorithm.ScheduleAlgorithm { + pvcLister corelisters.PersistentVolumeClaimLister, + alwaysCheckAllPredicates bool) algorithm.ScheduleAlgorithm { return &genericScheduler{ - cache: cache, - equivalenceCache: eCache, - schedulingQueue: podQueue, - predicates: predicates, - predicateMetaProducer: predicateMetaProducer, - prioritizers: prioritizers, - priorityMetaProducer: priorityMetaProducer, - extenders: extenders, - cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), - volumeBinder: volumeBinder, - pvcLister: pvcLister, + cache: cache, + equivalenceCache: eCache, + schedulingQueue: podQueue, + predicates: predicates, + predicateMetaProducer: predicateMetaProducer, + prioritizers: prioritizers, + priorityMetaProducer: priorityMetaProducer, + extenders: extenders, + cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), + volumeBinder: volumeBinder, + pvcLister: pvcLister, + alwaysCheckAllPredicates: alwaysCheckAllPredicates, } } diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index cdfc6b20fe5..55fede23c4a 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -187,16 +187,17 @@ func TestSelectHost(t *testing.T) { func TestGenericScheduler(t *testing.T) { predicates.SetPredicatesOrdering(order) tests := []struct { - name string - predicates map[string]algorithm.FitPredicate - prioritizers []algorithm.PriorityConfig - nodes []string - pvcs []*v1.PersistentVolumeClaim - pod *v1.Pod - pods []*v1.Pod - expectedHosts sets.String - expectsErr bool - wErr error + name string + predicates map[string]algorithm.FitPredicate + prioritizers []algorithm.PriorityConfig + alwaysCheckAllPredicates bool + nodes []string + pvcs []*v1.PersistentVolumeClaim + pod *v1.Pod + pods []*v1.Pod + expectedHosts sets.String + expectsErr bool + wErr error }{ { predicates: map[string]algorithm.FitPredicate{"false": falsePredicate}, @@ -377,6 +378,22 @@ func TestGenericScheduler(t *testing.T) { expectsErr: true, wErr: fmt.Errorf("persistentvolumeclaim \"existingPVC\" is being deleted"), }, + { + // alwaysCheckAllPredicates is true + predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, + alwaysCheckAllPredicates: true, + nodes: []string{"1"}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + name: "test alwaysCheckAllPredicates is true", + wErr: &FitError{ + Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + NumAllNodes: 1, + FailedPredicates: FailedPredicateMap{ + "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate}, + }, + }, + }, } for _, test := range tests { cache := schedulercache.New(time.Duration(0), wait.NeverStop) @@ -393,7 +410,7 @@ func TestGenericScheduler(t *testing.T) { pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister) + cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if !reflect.DeepEqual(err, test.wErr) { @@ -414,7 +431,7 @@ func TestFindFitAllError(t *testing.T) { "2": schedulercache.NewNodeInfo(), "1": schedulercache.NewNodeInfo(), } - _, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil) + _, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil, false) if err != nil { t.Errorf("unexpected error: %v", err) @@ -449,7 +466,7 @@ func TestFindFitSomeError(t *testing.T) { nodeNameToInfo[name].SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}) } - _, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil) + _, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyPredicateMetadataProducer, nil, nil, false) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1276,7 +1293,7 @@ func TestPreempt(t *testing.T) { extenders = append(extenders, extender) } scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}) + cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) // Call Preempt and check the expected results. node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) if err != nil { diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 04b7a585913..fca4f46e3dd 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -130,6 +130,9 @@ type configFactory struct { // Handles volume binding decisions volumeBinder *volumebinder.VolumeBinder + + // always check all predicates even if the middle of one predicate fails. + alwaysCheckAllPredicates bool } // NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only @@ -880,6 +883,12 @@ func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler if policy.HardPodAffinitySymmetricWeight != 0 { f.hardPodAffinitySymmetricWeight = policy.HardPodAffinitySymmetricWeight } + // When AlwaysCheckAllPredicates is set to true, scheduler checks all the configured + // predicates even after one or more of them fails. + if policy.AlwaysCheckAllPredicates { + f.alwaysCheckAllPredicates = policy.AlwaysCheckAllPredicates + } + return f.CreateFromKeys(predicateKeys, priorityKeys, extenders) } @@ -933,7 +942,7 @@ func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, glog.Info("Created equivalence class cache") } - algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister) + algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder, f.pVCLister, f.alwaysCheckAllPredicates) podBackoff := util.CreateDefaultPodBackoff() return &scheduler.Config{ diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 73d4abcc280..ed36792156b 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -533,7 +533,8 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, - schedulertesting.FakePersistentVolumeClaimLister{}) + schedulertesting.FakePersistentVolumeClaimLister{}, + false) bindingChan := make(chan *v1.Binding, 1) errChan := make(chan error, 1) configurator := &FakeConfigurator{ @@ -577,7 +578,8 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, - schedulertesting.FakePersistentVolumeClaimLister{}) + schedulertesting.FakePersistentVolumeClaimLister{}, + false) bindingChan := make(chan *v1.Binding, 2) configurator := &FakeConfigurator{ Config: &Config{ From 22592c8cdae6a4e013f26d5e1634f7e1cb5e760c Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Thu, 11 Jan 2018 01:15:10 +0800 Subject: [PATCH 765/794] fix typeos in cloud-controller-manager --- pkg/cloudprovider/plugins.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/plugins.go b/pkg/cloudprovider/plugins.go index 888532717aa..739c0961339 100644 --- a/pkg/cloudprovider/plugins.go +++ b/pkg/cloudprovider/plugins.go @@ -64,7 +64,7 @@ func IsCloudProvider(name string) bool { // the name is unknown. The error return is only used if the named provider // was known but failed to initialize. The config parameter specifies the // io.Reader handler of the configuration file for the cloud provider, or nil -// for no configuation. +// for no configuration. func GetCloudProvider(name string, config io.Reader) (Interface, error) { providersMutex.Lock() defer providersMutex.Unlock() From 9ac650c437206c018d98f892ff35d63ce80f6039 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:08:48 -0800 Subject: [PATCH 766/794] cluster: remove kube-push --- cluster/gce/util.sh | 60 ------------------------- cluster/kube-push.sh | 96 ---------------------------------------- cluster/skeleton/util.sh | 5 --- 3 files changed, 161 deletions(-) delete mode 100755 cluster/kube-push.sh diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 5c4f48d12cd..0bbd740d864 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -2065,66 +2065,6 @@ function prepare-push() { fi } -# Push binaries to kubernetes master -function push-master() { - echo "Updating master metadata ..." - write-master-env - prepare-startup-script - add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh" - - echo "Pushing to master (log at ${OUTPUT}/push-${KUBE_MASTER}.log) ..." - cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${KUBE_MASTER}".log -} - -# Push binaries to kubernetes node -function push-node() { - node=${1} - - echo "Updating node ${node} metadata... " - prepare-startup-script - add-instance-metadata-from-file "${node}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_TEMP}/configure-vm.sh" - - echo "Start upgrading node ${node} (log at ${OUTPUT}/push-${node}.log) ..." - cat ${KUBE_TEMP}/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${node}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${node}".log -} - -# Push binaries to kubernetes cluster -function kube-push() { - # Disable this until it's fixed. - # See https://github.com/kubernetes/kubernetes/issues/17397 - echo "./cluster/kube-push.sh is currently not supported in GCE." - echo "Please use ./cluster/gce/upgrade.sh." - exit 1 - - prepare-push true - - push-master - - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - push-node "${NODE_NAMES[$i]}" & - done - - kube::util::wait-for-jobs || { - echo -e "${color_red}Some commands failed.${color_norm}" >&2 - } - - # TODO(zmerlynn): Re-create instance-template with the new - # node-kube-env. This isn't important until the node-ip-range issue - # is solved (because that's blocking automatic dynamic nodes from - # working). The node-kube-env has to be composed with the KUBELET_TOKEN - # and KUBE_PROXY_TOKEN. Ideally we would have - # http://issue.k8s.io/3168 - # implemented before then, though, so avoiding this mess until then. - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kube/config" - echo -} - # ----------------------------------------------------------------------------- # Cluster specific test helpers used from hack/e2e.go diff --git a/cluster/kube-push.sh b/cluster/kube-push.sh deleted file mode 100755 index aa84f902fa4..00000000000 --- a/cluster/kube-push.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Push a new release to the cluster. -# -# This will find the release tar, cause it to be downloaded, unpacked, installed -# and enacted. - -set -o errexit -set -o nounset -set -o pipefail - -echo "kube-push.sh is currently broken; see https://github.com/kubernetes/kubernetes/issues/17397" -exit 1 - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. - -if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then - source "${KUBE_ROOT}/cluster/env.sh" -fi - -source "${KUBE_ROOT}/cluster/kube-util.sh" - -function usage() { - echo "${0} [-m|-n ] " - echo " Updates Kubernetes binaries. Can be done for all components (by default), master(-m) or specified node(-n)." - echo " If the version is not specified will try to use local binaries." - echo " Warning: upgrading single node is experimental" -} - -push_to_master=false -push_to_node=false - -while getopts "mn:h" opt; do - case ${opt} in - m) - push_to_master=true;; - n) - push_to_node=true - node_id="$OPTARG";; - h) - usage - exit 0;; - \?) - echo "Invalid option: -$OPTARG" >&2 - usage - exit 1;; - esac -done -shift $((OPTIND-1)) - -if [[ "${push_to_master}" == "true" ]] && [[ "${push_to_node}" == "true" ]]; then - echo "Only one of options -m -n should be specified" - usage - exit 1 -fi - -verify-prereqs -verify-kube-binaries -KUBE_VERSION=${1-} - -if [[ "${push_to_master}" == "false" ]] && [[ "${push_to_node}" == "false" ]]; then - echo "Updating cluster using provider: $KUBERNETES_PROVIDER" - kube-push -fi - -if [[ "${push_to_master}" == "true" ]]; then - echo "Updating master to version ${KUBE_VERSION:-"dev"}" - prepare-push false - push-master -fi - -if [[ "${push_to_node}" == "true" ]]; then - echo "Updating node $node_id to version ${KUBE_VERSION:-"dev"}" - prepare-push true - push-node $node_id -fi - -echo "Validating cluster post-push..." - -"${KUBE_ROOT}/cluster/validate-cluster.sh" - -echo "Done" diff --git a/cluster/skeleton/util.sh b/cluster/skeleton/util.sh index 28d82d07d77..0cd4756101a 100644 --- a/cluster/skeleton/util.sh +++ b/cluster/skeleton/util.sh @@ -55,11 +55,6 @@ function kube-down { echo "Skeleton Provider: kube-down not implemented" 1>&2 } -# Update a kubernetes cluster -function kube-push { - echo "Skeleton Provider: kube-push not implemented" 1>&2 -} - # Prepare update a kubernetes component function prepare-push { echo "Skeleton Provider: prepare-push not implemented" 1>&2 From 6387c7b5b31c78af4d5dc5c7b036f7a69f04b7ac Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 08:11:57 -0800 Subject: [PATCH 767/794] cluster: remove support for cvm from gce kube-up --- cluster/gce/BUILD | 7 +- cluster/gce/configure-vm.sh | 932 ------------------------------ cluster/gce/debian/node-helper.sh | 32 - cluster/gce/util.sh | 11 +- 4 files changed, 4 insertions(+), 978 deletions(-) delete mode 100755 cluster/gce/configure-vm.sh delete mode 100755 cluster/gce/debian/node-helper.sh diff --git a/cluster/gce/BUILD b/cluster/gce/BUILD index 00a2b7663ec..e297c36ef6f 100644 --- a/cluster/gce/BUILD +++ b/cluster/gce/BUILD @@ -38,13 +38,12 @@ filegroup( tags = ["automanaged"], ) -# Having the configure-vm.sh script and and trusty code from the GCE cluster -# deploy hosted with the release is useful for GKE. -# This list should match the list in kubernetes/release/lib/releaselib.sh. +# Having the COS code from the GCE cluster deploy hosted with the release is +# useful for GKE. This list should match the list in +# kubernetes/release/lib/releaselib.sh. release_filegroup( name = "gcs-release-artifacts", srcs = [ - "configure-vm.sh", "gci/configure.sh", "gci/master.yaml", "gci/node.yaml", diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh deleted file mode 100755 index c1b66bab0e3..00000000000 --- a/cluster/gce/configure-vm.sh +++ /dev/null @@ -1,932 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# If we have any arguments at all, this is a push and not just setup. -is_push=$@ - -function ensure-basic-networking() { - # Deal with GCE networking bring-up race. (We rely on DNS for a lot, - # and it's just not worth doing a whole lot of startup work if this - # isn't ready yet.) - until getent hosts metadata.google.internal &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...' - sleep 3 - done - until getent hosts $(hostname -f || echo _error_) &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve my own FQDN)...' - sleep 3 - done - until getent hosts $(hostname -i || echo _error_) &>/dev/null; do - echo 'Waiting for functional DNS (trying to resolve my own IP)...' - sleep 3 - done - - echo "Networking functional on $(hostname) ($(hostname -i))" -} - -# A hookpoint for installing any needed packages -ensure-packages() { - : -} - -function create-node-pki { - echo "Creating node pki files" - - local -r pki_dir="/etc/kubernetes/pki" - mkdir -p "${pki_dir}" - - if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then - CA_CERT_BUNDLE="${CA_CERT}" - fi - - CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt" - echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}" - - if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then - KUBELET_CERT_PATH="${pki_dir}/kubelet.crt" - echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}" - - KUBELET_KEY_PATH="${pki_dir}/kubelet.key" - echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}" - fi -} - -# A hookpoint for setting up local devices -ensure-local-disks() { - for ssd in /dev/disk/by-id/google-local-ssd-*; do - if [ -e "$ssd" ]; then - ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'` - echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum" - mkdir -p /mnt/disks/ssd$ssdnum - /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \ - { echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; } - else - echo "No local SSD disks found." - fi - done -} - -function config-ip-firewall { - echo "Configuring IP firewall rules" - - # Do not consider loopback addresses as martian source or destination while - # routing. This enables the use of 127/8 for local routing purposes. - sysctl -w net.ipv4.conf.all.route_localnet=1 - - # We need to add rules to accept all TCP/UDP/ICMP packets. - if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then - echo "Add rules to accept all inbound TCP/UDP/ICMP packets" - iptables -A INPUT -p TCP -j ACCEPT - iptables -A INPUT -p UDP -j ACCEPT - iptables -A INPUT -p ICMP -j ACCEPT - fi - if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then - echo "Add rules to accept all forwarded TCP/UDP/ICMP packets" - iptables -A FORWARD -p TCP -j ACCEPT - iptables -A FORWARD -p UDP -j ACCEPT - iptables -A FORWARD -p ICMP -j ACCEPT - fi - - # Flush iptables nat table - iptables -t nat -F || true - - if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then - echo "Add rules for ip masquerade" - iptables -t nat -N IP-MASQ - iptables -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ - iptables -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN - iptables -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE - fi - - if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then - echo "Add rule for metadata concealment" - iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988 - fi -} - -function ensure-install-dir() { - INSTALL_DIR="/var/cache/kubernetes-install" - mkdir -p ${INSTALL_DIR} - cd ${INSTALL_DIR} -} - -function salt-apiserver-timeout-grain() { - cat <>/etc/salt/minion.d/grains.conf - minRequestTimeout: '$1' -EOF -} - -function set-broken-motd() { - echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd -} - -function reset-motd() { - # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl) - local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")" - # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1), - # or the git hash that's in the build info. - local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")" - local devel="" - if [[ "${gitref}" != "${version}" ]]; then - devel=" -Note: This looks like a development version, which might not be present on GitHub. -If it isn't, the closest tag is at: - https://github.com/kubernetes/kubernetes/tree/${gitref} -" - gitref="${version//*+/}" - fi - cat > /etc/motd < "${kube_env_yaml}"; do - echo 'Waiting for kube-env...' - sleep 3 - done - - # kube-env has all the environment variables we care about, in a flat yaml format - eval "$(python -c ' -import pipes,sys,yaml - -for k,v in yaml.load(sys.stdin).iteritems(): - print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v)))) - print("""export {var}""".format(var = k)) - ' < """${kube_env_yaml}""")" - ) -} - -function remove-docker-artifacts() { - echo "== Deleting docker0 ==" - apt-get-install bridge-utils - - # Remove docker artifacts on minion nodes, if present - ifconfig docker0 down || true - brctl delbr docker0 || true - echo "== Finished deleting docker0 ==" -} - -# Retry a download until we get it. Takes a hash and a set of URLs. -# -# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. -# $2+ are the URLs to download. -download-or-bust() { - local -r hash="$1" - shift 1 - - urls=( $* ) - while true; do - for url in "${urls[@]}"; do - local file="${url##*/}" - rm -f "${file}" - if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then - echo "== Failed to download ${url}. Retrying. ==" - elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then - echo "== Hash validation of ${url} failed. Retrying. ==" - else - if [[ -n "${hash}" ]]; then - echo "== Downloaded ${url} (SHA1 = ${hash}) ==" - else - echo "== Downloaded ${url} ==" - fi - return - fi - done - done -} - -validate-hash() { - local -r file="$1" - local -r expected="$2" - local actual - - actual=$(sha1sum ${file} | awk '{ print $1 }') || true - if [[ "${actual}" != "${expected}" ]]; then - echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" - return 1 - fi -} - -apt-get-install() { - local -r packages=( $@ ) - installed=true - for package in "${packages[@]}"; do - if ! dpkg -s "${package}" &>/dev/null; then - installed=false - break - fi - done - if [[ "${installed}" == "true" ]]; then - echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} ==" - return - fi - - apt-get-update - - # Forcibly install packages (options borrowed from Salt logs). - until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do - echo "== install of packages $@ failed, retrying ==" - sleep 5 - done -} - -apt-get-update() { - echo "== Refreshing package database ==" - until apt-get update; do - echo "== apt-get update failed, retrying ==" - sleep 5 - done -} - -# Restart any services that need restarting due to a library upgrade -# Uses needrestart -restart-updated-services() { - # We default to restarting services, because this is only done as part of an update - if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then - echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}" - return - fi - echo "Restarting services with updated libraries (needrestart -r a)" - # The pipes make sure that needrestart doesn't think it is running with a TTY - # Debian bug #803249; fixed but not necessarily in package repos yet - echo "" | needrestart -r a 2>&1 | tee /dev/null -} - -# Reboot the machine if /var/run/reboot-required exists -reboot-if-required() { - if [[ ! -e "/var/run/reboot-required" ]]; then - return - fi - - echo "Reboot is required (/var/run/reboot-required detected)" - if [[ -e "/var/run/reboot-required.pkgs" ]]; then - echo "Packages that triggered reboot:" - cat /var/run/reboot-required.pkgs - fi - - # We default to rebooting the machine because this is only done as part of an update - if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then - echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}" - return - fi - - rm -f /var/run/reboot-required - rm -f /var/run/reboot-required.pkgs - echo "Triggering reboot" - init 6 -} - -# Install upgrades using unattended-upgrades, then reboot or restart services -auto-upgrade() { - # We default to not installing upgrades - if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then - echo "AUTO_UPGRADE not set to true; won't auto-upgrade" - return - fi - apt-get-install unattended-upgrades needrestart - unattended-upgrade --debug - reboot-if-required # We may reboot the machine right here - restart-updated-services -} - -# -# Install salt from GCS. See README.md for instructions on how to update these -# debs. -install-salt() { - if dpkg -s salt-minion &>/dev/null; then - echo "== SaltStack already installed, skipping install step ==" - return - fi - - echo "== Refreshing package database ==" - until apt-get update; do - echo "== apt-get update failed, retrying ==" - sleep 5 - done - - mkdir -p /var/cache/salt-install - cd /var/cache/salt-install - - DEBS=( - libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb - python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb - salt-common_2014.1.13+ds-1~bpo70+1_all.deb - salt-minion_2014.1.13+ds-1~bpo70+1_all.deb - ) - URL_BASE="https://storage.googleapis.com/kubernetes-release/salt" - - for deb in "${DEBS[@]}"; do - if [ ! -e "${deb}" ]; then - download-or-bust "" "${URL_BASE}/${deb}" - fi - done - - # Based on - # https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/ - # We do this to prevent Salt from starting the salt-minion - # daemon. The other packages don't have relevant daemons. (If you - # add a package that needs a daemon started, add it to a different - # list.) - cat > /usr/sbin/policy-rc.d <&2 -exit 101 -EOF - chmod 0755 /usr/sbin/policy-rc.d - - for deb in "${DEBS[@]}"; do - echo "== Installing ${deb}, ignore dependency complaints (will fix later) ==" - dpkg --skip-same-version --force-depends -i "${deb}" - done - - # This will install any of the unmet dependencies from above. - echo "== Installing unmet dependencies ==" - until apt-get install -f -y; do - echo "== apt-get install failed, retrying ==" - sleep 5 - done - - rm /usr/sbin/policy-rc.d - - # Log a timestamp - echo "== Finished installing Salt ==" -} - -# Ensure salt-minion isn't running and never runs -stop-salt-minion() { - if [[ -e /etc/init/salt-minion.override ]]; then - # Assume this has already run (upgrade, or baked into containervm) - return - fi - - # This ensures it on next reboot - echo manual > /etc/init/salt-minion.override - update-rc.d salt-minion disable - - while service salt-minion status >/dev/null; do - echo "salt-minion found running, stopping" - service salt-minion stop - sleep 1 - done -} - -# Finds the master PD device; returns it in MASTER_PD_DEVICE -find-master-pd() { - MASTER_PD_DEVICE="" - if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then - return - fi - device_info=$(ls -l /dev/disk/by-id/google-master-pd) - relative_path=${device_info##* } - MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}" -} - -# Create the overlay files for the salt tree. We create these in a separate -# place so that we can blow away the rest of the salt configs on a kube-push and -# re-apply these. -function create-salt-pillar() { - # Always overwrite the cluster-params.sls (even on a push, we have - # these variables) - mkdir -p /srv/salt-overlay/pillar - cat </srv/salt-overlay/pillar/cluster-params.sls -instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' -node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")' -node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' -cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' -non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")' -service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' -enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' -enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' -enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")' -enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")' -enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' -enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")' -enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")' -enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")' -enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")' -logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' -elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' -enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' -cluster_dns_core_dns: '$(echo "$CLUSTER_DNS_CORE_DNS" | sed -e "s/'/''/g")' -enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")' -dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' -dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' -enable_dns_horizontal_autoscaler: '$(echo "$ENABLE_DNS_HORIZONTAL_AUTOSCALER" | sed -e "s/'/''/g")' -admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' -network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' -prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")' -hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")' -softlockup_panic: '$(echo "$SOFTLOCKUP_PANIC" | sed -e "s/'/''/g")' -opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' -opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")' -opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")' -network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")' -enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")' -manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")' -manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")' -num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g") -e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' -kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")' -initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")' -initial_etcd_cluster_state: '$(echo "${INITIAL_ETCD_CLUSTER_STATE:-}" | sed -e "s/'/''/g")' -ca_cert_bundle_path: '$(echo "${CA_CERT_BUNDLE_PATH:-}" | sed -e "s/'/''/g")' -hostname: '$(echo "${ETCD_HOSTNAME:-$(hostname -s)}" | sed -e "s/'/''/g")' -enable_pod_priority: '$(echo "${ENABLE_POD_PRIORITY:-}" | sed -e "s/'/''/g")' -enable_default_storage_class: '$(echo "$ENABLE_DEFAULT_STORAGE_CLASS" | sed -e "s/'/''/g")' -kube_proxy_daemonset: '$(echo "$KUBE_PROXY_DAEMONSET" | sed -e "s/'/''/g")' -EOF - if [ -n "${STORAGE_BACKEND:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${STORAGE_MEDIA_TYPE:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -storage_media_type: '$(echo "$STORAGE_MEDIA_TYPE" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kube_apiserver_request_timeout_sec: '$(echo "$KUBE_APISERVER_REQUEST_TIMEOUT_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_liveness_probe_initial_delay: '$(echo "$ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kube_apiserver_liveness_probe_initial_delay: '$(echo "$KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -admission-control-config-file: /etc/admission_controller.config -EOF - fi - if [ -n "${KUBELET_PORT:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_IMAGE:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_docker_tag: '$(echo "$ETCD_IMAGE" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_docker_repository: '$(echo "$ETCD_DOCKER_REPOSITORY" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ETCD_VERSION:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_version: '$(echo "$ETCD_VERSION" | sed -e "s/'/''/g")' -EOF - fi - if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_over_ssl: 'true' -EOF - else - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_over_ssl: 'false' -EOF - fi - if [ -n "${ETCD_QUORUM_READ:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -etcd_quorum_read: '$(echo "${ETCD_QUORUM_READ}" | sed -e "s/'/''/g")' -EOF - fi - # Configuration changes for test clusters - if [ -n "${APISERVER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBELET_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")' -EOF - fi - # TODO: Replace this with a persistent volume (and create it). - if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -cluster_registry_disk_type: gce -cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g") -cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g") -EOF - fi - if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NODE_LABELS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -non_master_node_labels: '$(echo "${NON_MASTER_NODE_LABELS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${NODE_TAINTS:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -node_taints: '$(echo "${NODE_TAINTS}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${EVICTION_HARD:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")' -EOF - fi - if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")' -autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")' -autoscaler_expander_config: '$(echo "${AUTOSCALER_EXPANDER_CONFIG}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")' -EOF - fi - if [ -n "${ENABLE_IP_ALIASES:-}" ]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -enable_ip_aliases: '$(echo "$ENABLE_IP_ALIASES" | sed -e "s/'/''/g")' -EOF - fi -} - -# The job of this function is simple, but the basic regular expression syntax makes -# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc -# into [0-9]+, Ki, Mi, Gi, etc. -# This is done in two steps: -# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field -# is optional. -# 2. Attach an 'i' to the end of the string if we find a letter. -# The two step process is needed to handle the edge case in which we want to convert -# a raw byte count, as the result should be a simple number (e.g. 5B -> 5). -function convert-bytes-gce-kube() { - local -r storage_space=$1 - echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/' -} - -# This should happen both on cluster initialization and node upgrades. -# -# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and -# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely -# connect to the apiserver. - -function create-salt-kubelet-auth() { - local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig" - if [ ! -e "${kubelet_kubeconfig_file}" ]; then - mkdir -p /srv/salt-overlay/salt/kubelet - (umask 077; - cat > "${kubelet_kubeconfig_file}" < "${kube_proxy_kubeconfig_file}" < /dev/null -} - -function download-release() { - # In case of failure checking integrity of release, retry. - until try-download-release; do - sleep 15 - echo "Couldn't download release. Retrying..." - done - - echo "Running release install script" - kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}" -} - -function fix-apt-sources() { - sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list - sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list -} - -function salt-run-local() { - cat </etc/salt/minion.d/local.conf -file_client: local -file_roots: - base: - - /srv/salt -EOF -} - -function salt-debug-log() { - cat </etc/salt/minion.d/log-level-debug.conf -log_level: debug -log_level_logfile: debug -EOF -} - -function salt-node-role() { - local -r kubelet_bootstrap_kubeconfig="/srv/salt-overlay/salt/kubelet/bootstrap-kubeconfig" - local -r kubelet_kubeconfig="/srv/salt-overlay/salt/kubelet/kubeconfig" - cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cloud: gce - api_servers: '${KUBERNETES_MASTER_NAME}' - kubelet_bootstrap_kubeconfig: /var/lib/kubelet/bootstrap-kubeconfig - kubelet_kubeconfig: /var/lib/kubelet/kubeconfig -EOF -} - -function env-to-grains { - local key=$1 - local env_key=`echo $key | tr '[:lower:]' '[:upper:]'` - local value=${!env_key:-} - if [[ -n "${value}" ]]; then - # Note this is yaml, so indentation matters - cat <>/etc/salt/minion.d/grains.conf - ${key}: '$(echo "${value}" | sed -e "s/'/''/g")' -EOF - fi -} - -function node-docker-opts() { - if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then - DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}" - fi - - # Decide whether to enable a docker registry mirror. This is taken from - # the "kube-env" metadata value. - if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then - echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}" - DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}" - fi -} - -function salt-grains() { - env-to-grains "docker_opts" - env-to-grains "docker_root" - env-to-grains "kubelet_root" - env-to-grains "feature_gates" -} - -function configure-salt() { - mkdir -p /etc/salt/minion.d - salt-run-local - salt-node-role - node-docker-opts - salt-grains - install-salt - stop-salt-minion -} - -function run-salt() { - echo "== Calling Salt ==" - local rc=0 - for i in {0..6}; do - salt-call --retcode-passthrough --local state.highstate && rc=0 || rc=$? - if [[ "${rc}" == 0 ]]; then - return 0 - fi - done - echo "Salt failed to run repeatedly" >&2 - return "${rc}" -} - -function run-user-script() { - if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then - user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh") - fi - if [[ ! -z ${user_script:-} ]]; then - chmod u+x "${INSTALL_DIR}/k8s-user-script.sh" - echo "== running user startup script ==" - "${INSTALL_DIR}/k8s-user-script.sh" - fi -} - -if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - echo "Support for debian master has been removed" - exit 1 -fi - -if [[ -z "${is_push}" ]]; then - echo "== kube-up node config starting ==" - set-broken-motd - ensure-basic-networking - fix-apt-sources - ensure-install-dir - ensure-packages - set-kube-env - auto-upgrade - ensure-local-disks - create-node-pki - create-salt-pillar - create-salt-kubelet-auth - if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then - create-salt-kubeproxy-auth - fi - download-release - configure-salt - remove-docker-artifacts - config-ip-firewall - run-salt - reset-motd - - run-user-script - echo "== kube-up node config done ==" -else - echo "== kube-push node config starting ==" - ensure-basic-networking - ensure-install-dir - set-kube-env - create-salt-pillar - download-release - reset-motd - run-salt - echo "== kube-push node config done ==" -fi diff --git a/cluster/gce/debian/node-helper.sh b/cluster/gce/debian/node-helper.sh deleted file mode 100755 index b62930f0e34..00000000000 --- a/cluster/gce/debian/node-helper.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constant for debian os distro - -function get-node-instance-metadata { - local metadata="" - metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh," - metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," - metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt" - echo "${metadata}" -} - -# $1: template name (required) -function create-node-instance-template { - local template_name="$1" - prepare-startup-script - create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)" -} diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 5c4f48d12cd..97c25c527bf 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -25,7 +25,7 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/hack/lib/util.sh" -if [[ "${NODE_OS_DISTRIBUTION}" == "debian" || "${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then +if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]]; then source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" else echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 @@ -2233,12 +2233,3 @@ function ssh-to-node() { function prepare-e2e() { detect-project } - -# Writes configure-vm.sh to a temporary location with comments stripped. GCE -# limits the size of metadata fields to 32K, and stripping comments is the -# easiest way to buy us a little more room. -function prepare-startup-script() { - # Find a standard sed instance (and ensure that the command works as expected on a Mac). - kube::util::ensure-gnu-sed - ${SED} '/^\s*#\([^!].*\)*$/ d' ${KUBE_ROOT}/cluster/gce/configure-vm.sh > ${KUBE_TEMP}/configure-vm.sh -} From 410b4016fd3dc97cdaf0a8e2bc20726900db772e Mon Sep 17 00:00:00 2001 From: ilackarms Date: Sat, 13 Jan 2018 13:14:31 -0500 Subject: [PATCH 768/794] periodically flush writer --- staging/src/k8s.io/apiserver/pkg/server/filters/compression.go | 1 + 1 file changed, 1 insertion(+) diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go b/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go index 6303ab54a5f..6bedfadea73 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/compression.go @@ -136,6 +136,7 @@ func (c *compressionResponseWriter) Write(p []byte) (int, error) { return -1, errors.New("compressing error: tried to write data using closed compressor") } c.Header().Set(headerContentEncoding, c.encoding) + defer c.compressor.Flush() return c.compressor.Write(p) } From 1e2b644260cf6643f89502b953912b581cc689a0 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 13 Jan 2018 16:25:18 -0800 Subject: [PATCH 769/794] cluster: move logging library to hack/ it's used once in cluster and used a bunch in hack/ and build/ --- cluster/BUILD | 4 ---- cluster/common.sh | 3 +-- cluster/lib/BUILD | 25 ------------------------- hack/generate-bindata.sh | 2 +- hack/lib/BUILD | 4 +--- hack/lib/init.sh | 2 +- {cluster => hack}/lib/logging.sh | 0 test/e2e_node/gubernator.sh | 2 +- 8 files changed, 5 insertions(+), 37 deletions(-) delete mode 100644 cluster/lib/BUILD rename {cluster => hack}/lib/logging.sh (100%) diff --git a/cluster/BUILD b/cluster/BUILD index 1f55e38dc54..9d3ad2c9744 100644 --- a/cluster/BUILD +++ b/cluster/BUILD @@ -20,7 +20,6 @@ filegroup( "//cluster/images/etcd/rollback:all-srcs", "//cluster/images/hyperkube:all-srcs", "//cluster/images/kubemark:all-srcs", - "//cluster/lib:all-srcs", "//cluster/saltbase:all-srcs", ], tags = ["automanaged"], @@ -55,7 +54,6 @@ sh_test( name = "common_test", srcs = ["common.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) @@ -64,7 +62,6 @@ sh_test( name = "clientbin_test", srcs = ["clientbin.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) @@ -73,7 +70,6 @@ sh_test( name = "kube-util_test", srcs = ["kube-util.sh"], deps = [ - "//cluster/lib", "//hack/lib", ], ) diff --git a/cluster/common.sh b/cluster/common.sh index 2aa73622a64..cdc2300612c 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -25,7 +25,6 @@ KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd) DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config" source "${KUBE_ROOT}/hack/lib/util.sh" -source "${KUBE_ROOT}/cluster/lib/logging.sh" # KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4" # # NOTE This must match the version_regex in build/common.sh @@ -499,7 +498,7 @@ function stage-images() { done kube::util::wait-for-jobs || { - kube::log::error "unable to push images. See ${temp_dir}/*.log for more info." + echo "!!! unable to push images. See ${temp_dir}/*.log for more info." 1>&2 return 1 } diff --git a/cluster/lib/BUILD b/cluster/lib/BUILD deleted file mode 100644 index 9634d17f6f4..00000000000 --- a/cluster/lib/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -sh_library( - name = "lib", - srcs = [ - "logging.sh", - ], - visibility = [ - "//build/visible_to:COMMON_testing", - "//build/visible_to:cluster", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = [ - "//build/visible_to:cluster", - ], -) diff --git a/hack/generate-bindata.sh b/hack/generate-bindata.sh index 40605fb419d..6d2ec0a5d71 100755 --- a/hack/generate-bindata.sh +++ b/hack/generate-bindata.sh @@ -22,7 +22,7 @@ if [[ -z "${KUBE_ROOT:-}" ]]; then KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. fi -source "${KUBE_ROOT}/cluster/lib/logging.sh" +source "${KUBE_ROOT}/hack/lib/logging.sh" if [[ ! -d "${KUBE_ROOT}/examples" ]]; then echo "${KUBE_ROOT}/examples not detected. This script should be run from a location where the source dirs are available." diff --git a/hack/lib/BUILD b/hack/lib/BUILD index 4c3122fb09f..bfce52eb23a 100644 --- a/hack/lib/BUILD +++ b/hack/lib/BUILD @@ -6,14 +6,12 @@ sh_library( "etcd.sh", "golang.sh", "init.sh", + "logging.sh", "swagger.sh", "test.sh", "util.sh", "version.sh", ], - deps = [ - "//cluster/lib", - ], ) filegroup( diff --git a/hack/lib/init.sh b/hack/lib/init.sh index cbff854137b..d141d168c71 100755 --- a/hack/lib/init.sh +++ b/hack/lib/init.sh @@ -37,7 +37,7 @@ export no_proxy=127.0.0.1,localhost THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin" source "${KUBE_ROOT}/hack/lib/util.sh" -source "${KUBE_ROOT}/cluster/lib/logging.sh" +source "${KUBE_ROOT}/hack/lib/logging.sh" kube::log::install_errexit diff --git a/cluster/lib/logging.sh b/hack/lib/logging.sh similarity index 100% rename from cluster/lib/logging.sh rename to hack/lib/logging.sh diff --git a/test/e2e_node/gubernator.sh b/test/e2e_node/gubernator.sh index 6a41900c81b..97f3da8ce38 100755 --- a/test/e2e_node/gubernator.sh +++ b/test/e2e_node/gubernator.sh @@ -22,7 +22,7 @@ set -o errexit set -o nounset set -o pipefail -source cluster/lib/logging.sh +source hack/lib/logging.sh if [[ $# -eq 0 || ! $1 =~ ^[Yy]$ ]]; then From 5deb5f4913ab9ea6a469c5a4c601f82a9e30a1c4 Mon Sep 17 00:00:00 2001 From: junxu Date: Thu, 11 Jan 2018 22:36:28 -0500 Subject: [PATCH 770/794] Rename func name according TODO --- pkg/scheduler/algorithm/priorities/metadata.go | 4 ++-- pkg/scheduler/algorithm/types.go | 9 ++++----- pkg/scheduler/algorithm/types_test.go | 8 ++++---- .../algorithmprovider/defaults/defaults.go | 2 +- pkg/scheduler/core/extender_test.go | 2 +- pkg/scheduler/core/generic_scheduler.go | 11 +++++------ pkg/scheduler/core/generic_scheduler_test.go | 4 ++-- pkg/scheduler/factory/factory.go | 2 +- pkg/scheduler/factory/plugins.go | 13 ++++++------- pkg/scheduler/scheduler.go | 2 +- pkg/scheduler/scheduler_test.go | 4 ++-- pkg/scheduler/testutil.go | 2 +- 12 files changed, 30 insertions(+), 33 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/metadata.go b/pkg/scheduler/algorithm/priorities/metadata.go index fe9dce79f47..b949ad7d9bf 100644 --- a/pkg/scheduler/algorithm/priorities/metadata.go +++ b/pkg/scheduler/algorithm/priorities/metadata.go @@ -32,7 +32,7 @@ type PriorityMetadataFactory struct { statefulSetLister algorithm.StatefulSetLister } -func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.MetadataProducer { +func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer { factory := &PriorityMetadataFactory{ serviceLister: serviceLister, controllerLister: controllerLister, @@ -52,7 +52,7 @@ type priorityMetadata struct { podFirstServiceSelector labels.Selector } -// PriorityMetadata is a MetadataProducer. Node info can be nil. +// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil. func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { diff --git a/pkg/scheduler/algorithm/types.go b/pkg/scheduler/algorithm/types.go index 5fb2981f110..f6ff3b49427 100644 --- a/pkg/scheduler/algorithm/types.go +++ b/pkg/scheduler/algorithm/types.go @@ -43,10 +43,9 @@ type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo m // PredicateMetadataProducer is a function that computes predicate metadata for a given pod. type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata -// MetadataProducer is a function that computes metadata for a given pod. This +// PriorityMetadataProducer is a function that computes metadata for a given pod. This // is now used for only for priority functions. For predicates please use PredicateMetadataProducer. -// TODO: Rename this once we have a specific type for priority metadata producer. -type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} +type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} // DEPRECATED // Use Map-Reduce pattern for priority functions. @@ -67,8 +66,8 @@ func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*sche return nil } -// EmptyMetadataProducer returns a no-op MetadataProducer type. -func EmptyMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. +func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { return nil } diff --git a/pkg/scheduler/algorithm/types_test.go b/pkg/scheduler/algorithm/types_test.go index 862425f7218..58ead064d99 100644 --- a/pkg/scheduler/algorithm/types_test.go +++ b/pkg/scheduler/algorithm/types_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/scheduler/schedulercache" ) -// EmptyMetadataProducer should returns a no-op MetadataProducer type. -func TestEmptyMetadataProducer(t *testing.T) { +// EmptyPriorityMetadataProducer should returns a no-op PriorityMetadataProducer type. +func TestEmptyPriorityMetadataProducer(t *testing.T) { fakePod := new(v1.Pod) fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"}) @@ -33,8 +33,8 @@ func TestEmptyMetadataProducer(t *testing.T) { "2": schedulercache.NewNodeInfo(fakePod), "1": schedulercache.NewNodeInfo(), } - // Test EmptyMetadataProducer - metadata := EmptyMetadataProducer(fakePod, nodeNameToInfo) + // Test EmptyPriorityMetadataProducer + metadata := EmptyPriorityMetadataProducer(fakePod, nodeNameToInfo) if metadata != nil { t.Errorf("failed to produce empty metadata: got %v, expected nil", metadata) } diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index 6cbc772ac99..11b0a54042a 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -42,7 +42,7 @@ func init() { return predicates.NewPredicateMetadataFactory(args.PodLister) }) factory.RegisterPriorityMetadataProducerFactory( - func(args factory.PluginFactoryArgs) algorithm.MetadataProducer { + func(args factory.PluginFactoryArgs) algorithm.PriorityMetadataProducer { return priorities.NewPriorityMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) }) diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 09e136d38b6..69cf8c54d15 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -317,7 +317,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { } queue := NewSchedulingQueue() scheduler := NewGenericScheduler( - cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) + cache, nil, queue, test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) podIgnored := &v1.Pod{} machine, err := scheduler.Schedule(podIgnored, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if test.expectsErr { diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 2009b7af895..da04ff45ad6 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -94,17 +94,16 @@ type genericScheduler struct { equivalenceCache *EquivalenceCache schedulingQueue SchedulingQueue predicates map[string]algorithm.FitPredicate - priorityMetaProducer algorithm.MetadataProducer + priorityMetaProducer algorithm.PriorityMetadataProducer predicateMetaProducer algorithm.PredicateMetadataProducer prioritizers []algorithm.PriorityConfig extenders []algorithm.SchedulerExtender lastNodeIndexLock sync.Mutex lastNodeIndex uint64 alwaysCheckAllPredicates bool - - cachedNodeInfoMap map[string]*schedulercache.NodeInfo - volumeBinder *volumebinder.VolumeBinder - pvcLister corelisters.PersistentVolumeClaimLister + cachedNodeInfoMap map[string]*schedulercache.NodeInfo + volumeBinder *volumebinder.VolumeBinder + pvcLister corelisters.PersistentVolumeClaimLister } // Schedule tries to schedule the given pod to one of node in the node list. @@ -1048,7 +1047,7 @@ func NewGenericScheduler( predicates map[string]algorithm.FitPredicate, predicateMetaProducer algorithm.PredicateMetadataProducer, prioritizers []algorithm.PriorityConfig, - priorityMetaProducer algorithm.MetadataProducer, + priorityMetaProducer algorithm.PriorityMetadataProducer, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 55fede23c4a..70802239d78 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -410,7 +410,7 @@ func TestGenericScheduler(t *testing.T) { pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) + cache, nil, NewSchedulingQueue(), test.predicates, algorithm.EmptyPredicateMetadataProducer, test.prioritizers, algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, pvcLister, test.alwaysCheckAllPredicates) machine, err := scheduler.Schedule(test.pod, schedulertesting.FakeNodeLister(makeNodeList(test.nodes))) if !reflect.DeepEqual(err, test.wErr) { @@ -1293,7 +1293,7 @@ func TestPreempt(t *testing.T) { extenders = append(extenders, extender) } scheduler := NewGenericScheduler( - cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) + cache, nil, NewSchedulingQueue(), map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyPriorityMetadataProducer, extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, false) // Call Preempt and check the expected results. node, victims, _, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap})) if err != nil { diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index fca4f46e3dd..cfde827ca9b 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -983,7 +983,7 @@ func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([] return getPriorityFunctionConfigs(priorityKeys, *pluginArgs) } -func (f *configFactory) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { +func (f *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index b8733d2961d..1447d9487c2 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -49,9 +49,8 @@ type PluginFactoryArgs struct { HardPodAffinitySymmetricWeight int32 } -// MetadataProducerFactory produces MetadataProducer from the given args. -// TODO: Rename this to PriorityMetadataProducerFactory. -type MetadataProducerFactory func(PluginFactoryArgs) algorithm.MetadataProducer +// PriorityMetadataProducerFactory produces PriorityMetadataProducer from the given args. +type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer // PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args. type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer @@ -89,7 +88,7 @@ var ( algorithmProviderMap = make(map[string]AlgorithmProviderConfig) // Registered metadata producers - priorityMetadataProducer MetadataProducerFactory + priorityMetadataProducer PriorityMetadataProducerFactory predicateMetadataProducer PredicateMetadataProducerFactory // get equivalence pod function @@ -245,7 +244,7 @@ func IsFitPredicateRegistered(name string) bool { return ok } -func RegisterPriorityMetadataProducerFactory(factory MetadataProducerFactory) { +func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() priorityMetadataProducer = factory @@ -404,12 +403,12 @@ func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[st return predicates, nil } -func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) { +func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.PriorityMetadataProducer, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() if priorityMetadataProducer == nil { - return algorithm.EmptyMetadataProducer, nil + return algorithm.EmptyPriorityMetadataProducer, nil } return priorityMetadataProducer(args), nil } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index fee845f34e8..788647618ef 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -78,7 +78,7 @@ func (sched *Scheduler) StopEverything() { // factory.go. type Configurator interface { GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) - GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) + GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) GetHardPodAffinitySymmetricWeight() int32 diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index ed36792156b..d2a5d14772e 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -530,7 +530,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. predicateMap, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, - algorithm.EmptyMetadataProducer, + algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, @@ -575,7 +575,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc predicateMap, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{}, - algorithm.EmptyMetadataProducer, + algorithm.EmptyPriorityMetadataProducer, []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, diff --git a/pkg/scheduler/testutil.go b/pkg/scheduler/testutil.go index 249ced16cd8..0e0c2eae7ce 100644 --- a/pkg/scheduler/testutil.go +++ b/pkg/scheduler/testutil.go @@ -40,7 +40,7 @@ func (fc *FakeConfigurator) GetPriorityFunctionConfigs(priorityKeys sets.String) } // GetPriorityMetadataProducer is not implemented yet. -func (fc *FakeConfigurator) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { +func (fc *FakeConfigurator) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) { return nil, fmt.Errorf("not implemented") } From cd02f168e6b476ed475228e725008ab948dfadc5 Mon Sep 17 00:00:00 2001 From: wackxu Date: Tue, 14 Nov 2017 20:07:24 +0800 Subject: [PATCH 771/794] use shared informers for TokenCleaner controller --- cmd/kube-controller-manager/app/bootstrap.go | 1 + pkg/controller/bootstrap/BUILD | 3 - pkg/controller/bootstrap/tokencleaner.go | 129 ++++++++++++++---- pkg/controller/bootstrap/tokencleaner_test.go | 24 ++-- 4 files changed, 116 insertions(+), 41 deletions(-) diff --git a/cmd/kube-controller-manager/app/bootstrap.go b/cmd/kube-controller-manager/app/bootstrap.go index 38e066523fd..aeb8405612f 100644 --- a/cmd/kube-controller-manager/app/bootstrap.go +++ b/cmd/kube-controller-manager/app/bootstrap.go @@ -39,6 +39,7 @@ func startBootstrapSignerController(ctx ControllerContext) (bool, error) { func startTokenCleanerController(ctx ControllerContext) (bool, error) { tcc, err := bootstrap.NewTokenCleaner( ctx.ClientBuilder.ClientGoClientOrDie("token-cleaner"), + ctx.InformerFactory.Core().V1().Secrets(), bootstrap.DefaultTokenCleanerOptions(), ) if err != nil { diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index e4ef02a20b5..f5850a6fba9 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -54,12 +54,9 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 6c099a4c733..34a91e492c8 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -17,21 +17,23 @@ limitations under the License. package bootstrap import ( + "fmt" "time" "github.com/golang/glog" - "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" + coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -59,57 +61,128 @@ type TokenCleaner struct { client clientset.Interface - secrets cache.Store - secretsController cache.Controller + // secretLister is able to list/get secrets and is populated by the shared informer passed to NewTokenCleaner. + secretLister corelisters.SecretLister + + // secretSynced returns true if the secret shared informer has been synced at least once. + secretSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface } // NewTokenCleaner returns a new *NewTokenCleaner. -// -// TODO: Switch to shared informers -func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) (*TokenCleaner, error) { +func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInformer, options TokenCleanerOptions) (*TokenCleaner, error) { e := &TokenCleaner{ client: cl, + secretLister: secrets.Lister(), + secretSynced: secrets.Informer().HasSynced, tokenSecretNamespace: options.TokenSecretNamespace, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"), } + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { if err := metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { return nil, err } } - secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) - e.secrets, e.secretsController = cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo) + secrets.Informer().AddEventHandlerWithResyncPeriod( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.Secret: + return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.tokenSecretNamespace + default: + utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj)) + return false + } }, - WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { - lo.FieldSelector = secretSelector.String() - return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo) + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: e.enqueueSecrets, + UpdateFunc: func(oldSecret, newSecret interface{}) { e.enqueueSecrets(newSecret) }, }, }, - &v1.Secret{}, options.SecretResync, - cache.ResourceEventHandlerFuncs{ - AddFunc: e.evalSecret, - UpdateFunc: func(oldSecret, newSecret interface{}) { e.evalSecret(newSecret) }, - }, ) + return e, nil } // Run runs controller loops and returns when they are done func (tc *TokenCleaner) Run(stopCh <-chan struct{}) { - go tc.secretsController.Run(stopCh) - go wait.Until(tc.evalSecrets, 10*time.Second, stopCh) + defer utilruntime.HandleCrash() + defer tc.queue.ShutDown() + + glog.Infof("Starting token cleaner controller") + defer glog.Infof("Shutting down token cleaner controller") + + if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) { + return + } + + go wait.Until(tc.worker, 10*time.Second, stopCh) + <-stopCh } -func (tc *TokenCleaner) evalSecrets() { - for _, obj := range tc.secrets.List() { - tc.evalSecret(obj) +func (tc *TokenCleaner) enqueueSecrets(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return } + tc.queue.Add(key) +} + +// worker runs a thread that dequeues secrets, handles them, and marks them done. +func (tc *TokenCleaner) worker() { + for tc.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit. +func (tc *TokenCleaner) processNextWorkItem() bool { + key, quit := tc.queue.Get() + if quit { + return false + } + defer tc.queue.Done(key) + + if err := tc.syncFunc(key.(string)); err != nil { + tc.queue.AddRateLimited(key) + utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err)) + return true + } + + tc.queue.Forget(key) + return true +} + +func (tc *TokenCleaner) syncFunc(key string) error { + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime)) + }() + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + ret, err := tc.secretLister.Secrets(namespace).Get(name) + if apierrors.IsNotFound(err) { + glog.V(3).Infof("secret has been deleted: %v", key) + return nil + } + + if err != nil { + return err + } + + if ret.Type == bootstrapapi.SecretTypeBootstrapToken { + tc.evalSecret(ret) + } + return nil } func (tc *TokenCleaner) evalSecret(o interface{}) { diff --git a/pkg/controller/bootstrap/tokencleaner_test.go b/pkg/controller/bootstrap/tokencleaner_test.go index 47059dd4d19..5fddd7980f6 100644 --- a/pkg/controller/bootstrap/tokencleaner_test.go +++ b/pkg/controller/bootstrap/tokencleaner_test.go @@ -23,6 +23,8 @@ import ( "github.com/davecgh/go-spew/spew" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" @@ -32,24 +34,26 @@ func init() { spew.Config.DisableMethods = true } -func newTokenCleaner() (*TokenCleaner, *fake.Clientset, error) { +func newTokenCleaner() (*TokenCleaner, *fake.Clientset, coreinformers.SecretInformer, error) { options := DefaultTokenCleanerOptions() cl := fake.NewSimpleClientset() - tcc, err := NewTokenCleaner(cl, options) + informerFactory := informers.NewSharedInformerFactory(cl, options.SecretResync) + secrets := informerFactory.Core().V1().Secrets() + tcc, err := NewTokenCleaner(cl, secrets, options) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - return tcc, cl, nil + return tcc, cl, secrets, nil } func TestCleanerNoExpiration(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) @@ -59,14 +63,14 @@ func TestCleanerNoExpiration(t *testing.T) { } func TestCleanerExpired(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") addSecretExpiration(secret, timeString(-time.Hour)) - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) @@ -81,14 +85,14 @@ func TestCleanerExpired(t *testing.T) { } func TestCleanerNotExpired(t *testing.T) { - cleaner, cl, err := newTokenCleaner() + cleaner, cl, secrets, err := newTokenCleaner() if err != nil { t.Fatalf("error creating TokenCleaner: %v", err) } secret := newTokenSecret("tokenID", "tokenSecret") addSecretExpiration(secret, timeString(time.Hour)) - cleaner.secrets.Add(secret) + secrets.Informer().GetIndexer().Add(secret) cleaner.evalSecret(secret) From 24762b9f436faa5ecf59eb8de6ccd59cf143de0d Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Mon, 15 Jan 2018 10:02:00 +0100 Subject: [PATCH 772/794] Extend the ListNextResults methods with the resource group and instrument them --- .../providers/azure/azure_backoff.go | 6 +-- .../providers/azure/azure_client.go | 45 ++++++++++++------- .../providers/azure/azure_fakes.go | 10 ++--- .../providers/azure/azure_util_vmss.go | 4 +- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index ff0e16bfd7d..9e4ee788d45 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -89,7 +89,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.VirtualMachinesClient.ListNextResults(result) + result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) @@ -176,7 +176,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.LoadBalancerClient.ListNextResults(result) + result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, @@ -225,7 +225,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd if result.NextLink != nil { err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error - result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) + result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result) if retryErr != nil { glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index e0e2697aef8..a3bce657b90 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -35,7 +35,7 @@ type VirtualMachinesClient interface { CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) - ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) } // InterfacesClient defines needed functions for azure network.InterfacesClient @@ -51,7 +51,7 @@ type LoadBalancersClient interface { Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) - ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) + ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) } // PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient @@ -60,7 +60,7 @@ type PublicIPAddressesClient interface { Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) - ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) + ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) } // SubnetsClient defines needed functions for azure network.SubnetsClient @@ -84,7 +84,7 @@ type VirtualMachineScaleSetsClient interface { CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(resourceGroupName string, astResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) } @@ -93,7 +93,7 @@ type VirtualMachineScaleSetVMsClient interface { Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) - ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) } // RoutesClient defines needed functions for azure network.RoutesClient @@ -193,14 +193,17 @@ func (az *azVirtualMachinesClient) List(resourceGroupName string) (result comput return } -func (az *azVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (az *azVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachinesClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vm", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azInterfacesClient implements InterfacesClient. @@ -341,14 +344,17 @@ func (az *azLoadBalancersClient) List(resourceGroupName string) (result network. return } -func (az *azLoadBalancersClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (az *azLoadBalancersClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): start", lastResult) defer func() { glog.V(10).Infof("azLoadBalancersClient.ListNextResults(%q): end", lastResult) }() - return az.client.ListNextResults(lastResult) + mc := newMetricContext("load_balancers", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResult) + mc.Observe(err) + return } // azPublicIPAddressesClient implements PublicIPAddressesClient. @@ -428,14 +434,17 @@ func (az *azPublicIPAddressesClient) List(resourceGroupName string) (result netw return } -func (az *azPublicIPAddressesClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (az *azPublicIPAddressesClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azPublicIPAddressesClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("public_ip_addresses", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azSubnetsClient implements SubnetsClient. @@ -653,14 +662,17 @@ func (az *azVirtualMachineScaleSetsClient) List(resourceGroupName string) (resul return } -func (az *azVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (az *azVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachineScaleSetsClient.ListNextResults(%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vmss", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } func (az *azVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { @@ -737,14 +749,17 @@ func (az *azVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virt return } -func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (az *azVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { az.rateLimiter.Accept() glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): start", lastResults) defer func() { glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.ListNextResults(%q,%q,%q): end", lastResults) }() - return az.client.ListNextResults(lastResults) + mc := newMetricContext("vmssvm", "list_next_results", resourceGroupName, az.client.SubscriptionID) + result, err = az.client.ListNextResults(lastResults) + mc.Observe(err) + return } // azRoutesClient implements RoutesClient. diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index dd66d509f17..ec58e57da44 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -144,7 +144,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa return result, nil } -func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (fLBC fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() result.Response.Response = &http.Response{ @@ -264,7 +264,7 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName } } -func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (fAPC fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() return network.PublicIPAddressListResult{}, nil @@ -411,7 +411,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul result.Value = &value return result, nil } -func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() return compute.VirtualMachineListResult{}, nil @@ -659,7 +659,7 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, v return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { return result, nil } @@ -764,7 +764,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { return result, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 2116e4f0dc7..48bd3adcd8f 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -422,7 +422,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) { if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(result) + result, err = ss.VirtualMachineScaleSetsClient.ListNextResults(ss.ResourceGroup, result) if err != nil { glog.Errorf("VirtualMachineScaleSetsClient.ListNextResults for %v failed: %v", ss.ResourceGroup, err) return false, err @@ -468,7 +468,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir if result.NextLink != nil { backoffError := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(result) + result, err = ss.VirtualMachineScaleSetVMsClient.ListNextResults(ss.ResourceGroup, result) if err != nil { glog.Errorf("VirtualMachineScaleSetVMsClient.ListNextResults for %v failed: %v", scaleSetName, err) return false, err From eb1650ce567e0bf19f310817502a7a4fe3049a11 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Fri, 12 Jan 2018 17:22:33 +0800 Subject: [PATCH 773/794] remove invalid and useless functions from unit test --- .../admission/plugin/webhook/initializer/initializer_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go index 553690d9a58..bc05b9c5db2 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer_test.go @@ -26,9 +26,7 @@ import ( type doNothingAdmission struct{} -func (doNothingAdmission) Admit(a admission.Attributes) error { return nil } func (doNothingAdmission) Handles(o admission.Operation) bool { return false } -func (doNothingAdmission) Validate() error { return nil } type fakeServiceResolver struct{} From 4139594e663e6f4c696145724fe4fa358f556338 Mon Sep 17 00:00:00 2001 From: Nikhita Raghunath Date: Fri, 12 Jan 2018 18:57:07 +0530 Subject: [PATCH 774/794] unstructured helpers: print path in error --- .../pkg/apis/meta/v1/unstructured/helpers.go | 56 ++++++++++--------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fdc688f0732..08705ac8410 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -43,14 +43,15 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj - for _, field := range fields { + + for i, field := range fields { if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { return nil, false, nil } } else { - return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val) } } return val, true, nil @@ -65,7 +66,7 @@ func NestedString(obj map[string]interface{}, fields ...string) (string, bool, e } s, ok := val.(string) if !ok { - return "", false, fmt.Errorf("%v is of the type %T, expected string", val, val) + return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val) } return s, true, nil } @@ -79,7 +80,7 @@ func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error } b, ok := val.(bool) if !ok { - return false, false, fmt.Errorf("%v is of the type %T, expected bool", val, val) + return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val) } return b, true, nil } @@ -93,7 +94,7 @@ func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, } f, ok := val.(float64) if !ok { - return 0, false, fmt.Errorf("%v is of the type %T, expected float64", val, val) + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val) } return f, true, nil } @@ -107,7 +108,7 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err } i, ok := val.(int64) if !ok { - return 0, false, fmt.Errorf("%v is of the type %T, expected int64", val, val) + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val) } return i, true, nil } @@ -121,14 +122,14 @@ func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, } m, ok := val.([]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) } strSlice := make([]string, 0, len(m)) for _, v := range m { if str, ok := v.(string); ok { strSlice = append(strSlice, str) } else { - return nil, false, fmt.Errorf("contains non-string key in the slice: %v is of the type %T, expected string", v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v) } } return strSlice, true, nil @@ -143,7 +144,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b } _, ok := val.([]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected []interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) } return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil } @@ -160,7 +161,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("contains non-string key in the map: %v is of the type %T, expected string", v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) } } return strMap, true, nil @@ -185,25 +186,26 @@ func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]i } m, ok := val.(map[string]interface{}) if !ok { - return nil, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", val, val) + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) } return m, true, nil } // SetNestedField sets the value of a nested field to a deep copy of the value provided. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error { return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) } -func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) bool { +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error { m := obj - for _, field := range fields[:len(fields)-1] { + + for i, field := range fields[:len(fields)-1] { if val, ok := m[field]; ok { if valMap, ok := val.(map[string]interface{}); ok { m = valMap } else { - return false + return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1])) } } else { newVal := make(map[string]interface{}) @@ -212,12 +214,12 @@ func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields } } m[fields[len(fields)-1]] = value - return true + return nil } // SetNestedStringSlice sets the string slice value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error { m := make([]interface{}, 0, len(value)) // convert []string into []interface{} for _, v := range value { m = append(m, v) @@ -226,14 +228,14 @@ func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ... } // SetNestedSlice sets the slice value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error { return SetNestedField(obj, value, fields...) } // SetNestedStringMap sets the map[string]string value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error { m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} for k, v := range value { m[k] = v @@ -242,8 +244,8 @@ func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fie } // SetNestedMap sets the map[string]interface{} value of a nested field. -// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. -func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) bool { +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { return SetNestedField(obj, value, fields...) } @@ -268,6 +270,10 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } +func jsonPath(fields []string) string { + return "." + strings.Join(fields, ".") +} + func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { // though this field is a *bool, but when decoded from JSON, it's // unmarshalled as bool. From 32520e09853289a22656b540ed4141e72926a108 Mon Sep 17 00:00:00 2001 From: Cosmin Cojocar Date: Mon, 15 Jan 2018 13:32:42 +0100 Subject: [PATCH 775/794] Review fixes --- pkg/cloudprovider/providers/azure/azure_client.go | 2 +- pkg/cloudprovider/providers/azure/azure_metrics.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index a3bce657b90..a8bf5a2eac6 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -84,7 +84,7 @@ type VirtualMachineScaleSetsClient interface { CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) - ListNextResults(resourceGroupName string, astResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) + ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) } diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go index 2ef21bb5a5c..908ce7a5944 100644 --- a/pkg/cloudprovider/providers/azure/azure_metrics.go +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 1bdc99d2ecc406aa1d3128bc008fcbbe80d58807 Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Thu, 11 Jan 2018 13:25:57 +0100 Subject: [PATCH 776/794] Add script to run integration benchmark tests in dockerized env --- hack/jenkins/benchmark-dockerized.sh | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100755 hack/jenkins/benchmark-dockerized.sh diff --git a/hack/jenkins/benchmark-dockerized.sh b/hack/jenkins/benchmark-dockerized.sh new file mode 100755 index 00000000000..994189dea0e --- /dev/null +++ b/hack/jenkins/benchmark-dockerized.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail +set -o xtrace + +retry() { + for i in {1..5}; do + "$@" && return 0 || sleep $i + done + "$@" +} + +# Runs benchmark integration tests, producing JUnit-style XML test +# reports in ${WORKSPACE}/artifacts. This script is intended to be run from +# kubekins-test container with a kubernetes repo mounted (at the path +# /go/src/k8s.io/kubernetes). See k8s.io/test-infra/scenarios/kubernetes_verify.py. + +export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH} + +retry go get github.com/tools/godep && godep version +retry go get github.com/jstemmer/go-junit-report + +# Disable the Go race detector. +export KUBE_RACE=" " +# Disable coverage report +export KUBE_COVER="n" +# Produce a JUnit-style XML test report. +export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts +export ARTIFACTS_DIR=${WORKSPACE}/artifacts + +cd /go/src/k8s.io/kubernetes + +./hack/install-etcd.sh + +make test-integration WHAT="$*" KUBE_TEST_ARGS="-run='XXX' -bench=. -benchmem" From 8f9cddda32b65aaaf3325c3dd1c36ee6ebdeaf45 Mon Sep 17 00:00:00 2001 From: Slava Semushin Date: Wed, 10 Jan 2018 20:14:48 +0100 Subject: [PATCH 777/794] cmd/kube-apiserver/app/aggregator.go: add comments for explaining the group/version fields. --- cmd/kube-apiserver/app/aggregator.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 6d6e469434b..623fcad8158 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -187,8 +187,12 @@ func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistra }) } +// priority defines group priority that is used in discovery. This controls +// group position in the kubectl output. type priority struct { - group int32 + // group indicates the order of the group relative to other groups. + group int32 + // version indicates the relative order of the version inside of its group. version int32 } @@ -229,6 +233,9 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, + // Append a new group to the end of the list if unsure. + // You can use min(existing group)-100 as the initial value for a group. + // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { From 1a552bbe149373c056ee004304d7e5abaa89f4c6 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Mon, 27 Nov 2017 14:44:04 +0100 Subject: [PATCH 778/794] admission: do not leak admission config types outside of the plugins --- .../pkg/admission/eventratelimit/admission.go | 5 - .../podtolerationrestriction/admission.go | 4 - .../pkg/admission/resourcequota/admission.go | 5 - .../src/k8s.io/apiserver/pkg/admission/BUILD | 1 + .../k8s.io/apiserver/pkg/admission/config.go | 16 +-- .../apiserver/pkg/admission/config_test.go | 100 +++++++++++++++++- .../admission/plugin/webhook/validating/BUILD | 2 - .../plugin/webhook/validating/admission.go | 5 - .../k8s.io/apiserver/pkg/admission/plugins.go | 10 +- .../apiserver/pkg/apis/apiserver/types.go | 2 +- .../pkg/apis/apiserver/v1alpha1/conversion.go | 88 --------------- .../pkg/apis/apiserver/v1alpha1/types.go | 2 +- .../apiserver/pkg/server/options/admission.go | 11 +- vendor/github.com/jmespath/go-jmespath/BUILD | 5 +- 14 files changed, 119 insertions(+), 137 deletions(-) delete mode 100644 staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go diff --git a/plugin/pkg/admission/eventratelimit/admission.go b/plugin/pkg/admission/eventratelimit/admission.go index 8cd64ebe587..7e025319690 100644 --- a/plugin/pkg/admission/eventratelimit/admission.go +++ b/plugin/pkg/admission/eventratelimit/admission.go @@ -23,7 +23,6 @@ import ( "k8s.io/client-go/util/flowcontrol" api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" - eventratelimitapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation" ) @@ -44,10 +43,6 @@ func Register(plugins *admission.Plugins) { } return newEventRateLimit(configuration, realClock{}) }) - - // add our config types - eventratelimitapi.AddToScheme(plugins.ConfigScheme) - eventratelimitapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // Plugin implements an admission controller that can enforce event rate limits diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 3318e221b2c..0bfe76696b9 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -38,7 +38,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" - pluginapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1" ) // Register registers a plugin @@ -50,9 +49,6 @@ func Register(plugins *admission.Plugins) { } return NewPodTolerationsPlugin(pluginConfig), nil }) - // add our config types - pluginapi.AddToScheme(plugins.ConfigScheme) - pluginapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // The annotation keys for default and whitelist of tolerations diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index 24f8b6354b9..c6e89aad806 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -28,7 +28,6 @@ import ( kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" "k8s.io/kubernetes/pkg/quota" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" - resourcequotaapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation" ) @@ -49,10 +48,6 @@ func Register(plugins *admission.Plugins) { } return NewResourceQuota(configuration, 5, make(chan struct{})) }) - - // add our config types - resourcequotaapi.AddToScheme(plugins.ConfigScheme) - resourcequotaapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // QuotaAdmission implements an admission controller that can enforce quota constraints diff --git a/staging/src/k8s.io/apiserver/pkg/admission/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/BUILD index aab87e45791..4af97de951a 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/BUILD @@ -20,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config.go b/staging/src/k8s.io/apiserver/pkg/admission/config.go index eb979861207..e716e62238a 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config.go @@ -126,16 +126,10 @@ type configProvider struct { } // GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. -func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration, scheme *runtime.Scheme) (io.Reader, error) { - // if there is nothing nested in the object, we return the named location - obj := pluginCfg.Configuration - if obj != nil { - // serialize the configuration and build a reader for it - content, err := writeYAML(obj, scheme) - if err != nil { - return nil, err - } - return bytes.NewBuffer(content), nil +func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { + // if there is a nest object, return it directly + if pluginCfg.Configuration != nil { + return bytes.NewBuffer(pluginCfg.Configuration.Raw), nil } // there is nothing nested, so we delegate to path if pluginCfg.Path != "" { @@ -162,7 +156,7 @@ func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { if pluginName != pluginCfg.Name { continue } - pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg, p.scheme) + pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config_test.go b/staging/src/k8s.io/apiserver/pkg/admission/config_test.go index debde2463d2..67d8a1a5625 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config_test.go @@ -23,6 +23,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/apiserver/pkg/apis/apiserver" apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" @@ -49,7 +50,7 @@ func TestReadAdmissionConfiguration(t *testing.T) { ExpectedAdmissionConfig *apiserver.AdmissionConfiguration PluginNames []string }{ - "v1Alpha1 configuration - path fixup": { + "v1alpha1 configuration - path fixup": { ConfigBody: `{ "apiVersion": "apiserver.k8s.io/v1alpha1", "kind": "AdmissionConfiguration", @@ -70,7 +71,7 @@ func TestReadAdmissionConfiguration(t *testing.T) { }, PluginNames: []string{}, }, - "v1Alpha1 configuration - abspath": { + "v1alpha1 configuration - abspath": { ConfigBody: `{ "apiVersion": "apiserver.k8s.io/v1alpha1", "kind": "AdmissionConfiguration", @@ -153,3 +154,98 @@ func TestReadAdmissionConfiguration(t *testing.T) { } } } + +func TestEmbeddedConfiguration(t *testing.T) { + // create a place holder file to hold per test config + configFile, err := ioutil.TempFile("", "admission-plugin-config") + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if err = configFile.Close(); err != nil { + t.Fatalf("unexpected err: %v", err) + } + configFileName := configFile.Name() + + testCases := map[string]struct { + ConfigBody string + ExpectedConfig string + }{ + "versioned configuration": { + ConfigBody: `{ + "apiVersion": "apiserver.k8s.io/v1alpha1", + "kind": "AdmissionConfiguration", + "plugins": [ + { + "name": "Foo", + "configuration": { + "apiVersion": "foo.admission.k8s.io/v1alpha1", + "kind": "Configuration", + "foo": "bar" + } + } + ]}`, + ExpectedConfig: `{ + "apiVersion": "foo.admission.k8s.io/v1alpha1", + "kind": "Configuration", + "foo": "bar" + }`, + }, + "legacy configuration": { + ConfigBody: `{ + "apiVersion": "apiserver.k8s.io/v1alpha1", + "kind": "AdmissionConfiguration", + "plugins": [ + { + "name": "Foo", + "configuration": { + "foo": "bar" + } + } + ]}`, + ExpectedConfig: `{ + "foo": "bar" + }`, + }, + } + + for desc, test := range testCases { + scheme := runtime.NewScheme() + apiserverapi.AddToScheme(scheme) + apiserverapiv1alpha1.AddToScheme(scheme) + + if err = ioutil.WriteFile(configFileName, []byte(test.ConfigBody), 0644); err != nil { + t.Errorf("[%s] unexpected err writing temp file: %v", desc, err) + continue + } + config, err := ReadAdmissionConfiguration([]string{"Foo"}, configFileName, scheme) + if err != nil { + t.Errorf("[%s] unexpected err: %v", desc, err) + continue + } + r, err := config.ConfigFor("Foo") + if err != nil { + t.Errorf("[%s] Failed to get Foo config: %v", desc, err) + continue + } + bs, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("[%s] Failed to read Foo config data: %v", desc, err) + continue + } + + if !equalJSON(test.ExpectedConfig, string(bs)) { + t.Errorf("Unexpected config: expected=%q got=%q", test.ExpectedConfig, string(bs)) + } + } +} + +func equalJSON(a, b string) bool { + var x, y interface{} + if err := json.Unmarshal([]byte(a), &x); err != nil { + return false + } + if err := json.Unmarshal([]byte(b), &y); err != nil { + return false + } + return reflect.DeepEqual(x, y) +} diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index 5ab45072db6..4226a13912c 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -23,8 +23,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go index b88556631cf..f68e46fa585 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go @@ -40,8 +40,6 @@ import ( genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" "k8s.io/apiserver/pkg/admission/plugin/webhook/config" - webhookadmissionapi "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" - webhookadmissionapiv1alpha1 "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" "k8s.io/apiserver/pkg/admission/plugin/webhook/request" @@ -66,9 +64,6 @@ func Register(plugins *admission.Plugins) { return plugin, nil }) - // add our config types - webhookadmissionapi.AddToScheme(plugins.ConfigScheme) - webhookadmissionapiv1alpha1.AddToScheme(plugins.ConfigScheme) } // WebhookSource can list dynamic webhook plugins. diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go index 3ede44a173f..05e321ffc17 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go @@ -25,8 +25,6 @@ import ( "sort" "sync" - "k8s.io/apimachinery/pkg/runtime" - "github.com/golang/glog" ) @@ -39,16 +37,10 @@ type Factory func(config io.Reader) (Interface, error) type Plugins struct { lock sync.Mutex registry map[string]Factory - - // ConfigScheme is used to parse the admission plugin config file. - // It is exposed to act as a hook for extending server providing their own config. - ConfigScheme *runtime.Scheme } func NewPlugins() *Plugins { - return &Plugins{ - ConfigScheme: runtime.NewScheme(), - } + return &Plugins{} } // All registered admission options. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go index f84fd04a340..e55da95f95d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -46,5 +46,5 @@ type AdmissionPluginConfiguration struct { // Configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +optional - Configuration runtime.Object + Configuration *runtime.Unknown } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go deleted file mode 100644 index 378cc080d3a..00000000000 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -var _ runtime.NestedObjectDecoder = &AdmissionConfiguration{} - -// DecodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the -// objects are decoded with the provided decoder. -func (c *AdmissionConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - // decoding failures result in a runtime.Unknown object being created in Object and passed - // to conversion - for k, v := range c.Plugins { - decodeNestedRawExtensionOrUnknown(d, &v.Configuration) - c.Plugins[k] = v - } - return nil -} - -var _ runtime.NestedObjectEncoder = &AdmissionConfiguration{} - -// EncodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the -// objects are encoded with the provided encoder. -func (c *AdmissionConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for k, v := range c.Plugins { - if err := encodeNestedRawExtension(e, &v.Configuration); err != nil { - return err - } - c.Plugins[k] = v - } - return nil -} - -// decodeNestedRawExtensionOrUnknown decodes the raw extension into an object once. If called -// On a RawExtension that has already been decoded (has an object), it will not run again. -func decodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { - if ext.Raw == nil || ext.Object != nil { - return - } - obj, gvk, err := d.Decode(ext.Raw, nil, nil) - if err != nil { - unk := &runtime.Unknown{Raw: ext.Raw} - if runtime.IsNotRegisteredError(err) { - if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { - unk.APIVersion = gvk.GroupVersion().String() - unk.Kind = gvk.Kind - ext.Object = unk - return - } - } - // TODO: record mime-type with the object - if gvk != nil { - unk.APIVersion = gvk.GroupVersion().String() - unk.Kind = gvk.Kind - } - obj = unk - } - ext.Object = obj -} - -func encodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { - if ext.Raw != nil || ext.Object == nil { - return nil - } - data, err := runtime.Encode(e, ext.Object) - if err != nil { - return err - } - ext.Raw = data - return nil -} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 522c41c4143..239b8e20e04 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -46,5 +46,5 @@ type AdmissionPluginConfiguration struct { // Configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +optional - Configuration runtime.RawExtension `json:"configuration"` + Configuration *runtime.Unknown `json:"configuration"` } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index aa180378dc5..66b0b97ba63 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -38,6 +38,13 @@ import ( "k8s.io/client-go/rest" ) +var scheme = runtime.NewScheme() + +func init() { + apiserverapi.AddToScheme(scheme) + apiserverapiv1alpha1.AddToScheme(scheme) +} + // AdmissionOptions holds the admission options type AdmissionOptions struct { // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default @@ -69,8 +76,6 @@ func NewAdmissionOptions() *AdmissionOptions { RecommendedPluginOrder: []string{lifecycle.PluginName, initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, DefaultOffPlugins: []string{initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, } - apiserverapi.AddToScheme(options.Plugins.ConfigScheme) - apiserverapiv1alpha1.AddToScheme(options.Plugins.ConfigScheme) server.RegisterAllAdmissionPlugins(options.Plugins) return options } @@ -120,7 +125,7 @@ func (a *AdmissionOptions) ApplyTo( pluginNames = a.enabledPluginNames() } - pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, a.Plugins.ConfigScheme) + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, scheme) if err != nil { return fmt.Errorf("failed to read plugin config: %v", err) } diff --git a/vendor/github.com/jmespath/go-jmespath/BUILD b/vendor/github.com/jmespath/go-jmespath/BUILD index f4c95791b8b..a3dbf5f5c3a 100644 --- a/vendor/github.com/jmespath/go-jmespath/BUILD +++ b/vendor/github.com/jmespath/go-jmespath/BUILD @@ -25,7 +25,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//vendor/github.com/jmespath/go-jmespath/cmd/jpgo:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) From 83268fa9a8642c9754eeadca76c1b572c4c0ec43 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 11 Jan 2018 17:17:27 +0100 Subject: [PATCH 779/794] Update generated files --- .../pkg/apis/apiserver/v1alpha1/BUILD | 1 - .../v1alpha1/zz_generated.conversion.go | 34 ++++--------------- .../v1alpha1/zz_generated.deepcopy.go | 10 +++++- .../apis/apiserver/zz_generated.deepcopy.go | 12 ++++--- vendor/github.com/jmespath/go-jmespath/BUILD | 5 +-- 5 files changed, 24 insertions(+), 38 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD index 2640ff64b64..3075c3bbec8 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "conversion.go", "doc.go", "register.go", "types.go", diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index b1af97ec392..d9668e99029 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -21,6 +21,8 @@ limitations under the License. package v1alpha1 import ( + unsafe "unsafe" + conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiserver "k8s.io/apiserver/pkg/apis/apiserver" @@ -42,17 +44,7 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = make([]apiserver.AdmissionPluginConfiguration, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Plugins = nil - } + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) return nil } @@ -62,17 +54,7 @@ func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration } func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = make([]AdmissionPluginConfiguration, len(*in)) - for i := range *in { - if err := Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Plugins = nil - } + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) return nil } @@ -84,9 +66,7 @@ func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { out.Name = in.Name out.Path = in.Path - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Configuration, &out.Configuration, s); err != nil { - return err - } + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) return nil } @@ -98,9 +78,7 @@ func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginC func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { out.Name = in.Name out.Path = in.Path - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Configuration, &out.Configuration, s); err != nil { - return err - } + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index c8b46fac5d8..d795781ff22 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -60,7 +60,15 @@ func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { *out = *in - in.Configuration.DeepCopyInto(&out.Configuration) + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + if *in == nil { + *out = nil + } else { + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 7e5fb6edb45..431abf61d68 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -60,10 +60,14 @@ func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { *out = *in - if in.Configuration == nil { - out.Configuration = nil - } else { - out.Configuration = in.Configuration.DeepCopyObject() + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + if *in == nil { + *out = nil + } else { + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } } return } diff --git a/vendor/github.com/jmespath/go-jmespath/BUILD b/vendor/github.com/jmespath/go-jmespath/BUILD index a3dbf5f5c3a..f4c95791b8b 100644 --- a/vendor/github.com/jmespath/go-jmespath/BUILD +++ b/vendor/github.com/jmespath/go-jmespath/BUILD @@ -25,10 +25,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//vendor/github.com/jmespath/go-jmespath/cmd/jpgo:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) From 7e33b128567700ef114fe15ae43f5e2e662b29cf Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sun, 14 Jan 2018 11:38:00 -0500 Subject: [PATCH 780/794] Return the correct set of supported mime types for non-streaming requests --- .../pkg/endpoints/handlers/create.go | 2 +- .../pkg/endpoints/handlers/delete.go | 4 +-- .../handlers/negotiation/negotiate.go | 27 +++++++++---------- .../pkg/endpoints/handlers/update.go | 2 +- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index dc3560623ea..1d474267dfd 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -61,7 +61,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.Object ctx = request.WithNamespace(ctx, namespace) gv := scope.Kind.GroupVersion() - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 0bc5a659b55..b8ac281fa76 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -60,7 +60,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco return } if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, metainternalversion.Codecs) + s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversion.Codecs) if err != nil { scope.err(err, w, req) return @@ -228,7 +228,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco return } if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 7f4225a5b93..3edfa675bf8 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -73,31 +73,30 @@ func NegotiateOutputStreamSerializer(req *http.Request, ns runtime.NegotiatedSer } // NegotiateInputSerializer returns the input serializer for the provided request. -func NegotiateInputSerializer(req *http.Request, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { +func NegotiateInputSerializer(req *http.Request, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { mediaType := req.Header.Get("Content-Type") - return NegotiateInputSerializerForMediaType(mediaType, ns) + return NegotiateInputSerializerForMediaType(mediaType, streaming, ns) } // NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error. -func NegotiateInputSerializerForMediaType(mediaType string, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { +func NegotiateInputSerializerForMediaType(mediaType string, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { mediaTypes := ns.SupportedMediaTypes() if len(mediaType) == 0 { mediaType = mediaTypes[0].MediaType } - mediaType, _, err := mime.ParseMediaType(mediaType) - if err != nil { - _, supported := MediaTypesForSerializer(ns) - return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) - } - - for _, info := range mediaTypes { - if info.MediaType != mediaType { - continue + if mediaType, _, err := mime.ParseMediaType(mediaType); err == nil { + for _, info := range mediaTypes { + if info.MediaType != mediaType { + continue + } + return info, nil } - return info, nil } - _, supported := MediaTypesForSerializer(ns) + supported, streamingSupported := MediaTypesForSerializer(ns) + if streaming { + return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(streamingSupported) + } return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 319bfd51b7c..0eac36660a4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -56,7 +56,7 @@ func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectType return } - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer) if err != nil { scope.err(err, w, req) return From fc37221db5d08eb9553273ad6202a2557cfde131 Mon Sep 17 00:00:00 2001 From: Spyros Trigazis Date: Fri, 12 Jan 2018 09:56:50 +0000 Subject: [PATCH 781/794] Fix comparison of golang versions Change hack/lib/golang.sh to compare golang version properly with "sort -s -t. -k 1,1 -k 2,2n -k 3,3n", which sorts key by key and not as strings. --- hack/lib/golang.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 3e12b3170a4..589e965afb4 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -323,7 +323,7 @@ EOF go_version=($(go version)) local minimum_go_version minimum_go_version=go1.9.1 - if [[ "${go_version[2]}" < "${minimum_go_version}" && "${go_version[2]}" != "devel" ]]; then + if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then kube::log::usage_from_stdin < Date: Mon, 15 Jan 2018 14:41:42 -0500 Subject: [PATCH 782/794] Limit all category to apps group for ds/deployment/replicaset --- hack/make-rules/test-cmd-util.sh | 8 ++++++-- pkg/registry/extensions/daemonset/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/deployment/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/replicaset/storage/storage.go | 10 ++++++++-- pkg/registry/extensions/rest/storage_extensions.go | 6 +++--- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index aaed0980807..0120083319d 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1397,11 +1397,15 @@ run_kubectl_get_tests() { kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK" kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK" + kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK" kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK" kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200" kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK" - kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK" - kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK" + kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK" ### Test kubectl get chunk size output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}") diff --git a/pkg/registry/extensions/daemonset/storage/storage.go b/pkg/registry/extensions/daemonset/storage/storage.go index 461e5a97ff2..3291ba23d97 100644 --- a/pkg/registry/extensions/daemonset/storage/storage.go +++ b/pkg/registry/extensions/daemonset/storage/storage.go @@ -33,6 +33,7 @@ import ( // rest implements a RESTStorage for DaemonSets type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against DaemonSets. @@ -56,7 +57,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { statusStore := *store statusStore.UpdateStrategy = daemonset.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore} } // Implement ShortNamesProvider @@ -71,7 +72,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a daemonset diff --git a/pkg/registry/extensions/deployment/storage/storage.go b/pkg/registry/extensions/deployment/storage/storage.go index 6cf80f8dcc7..1601c5b4f59 100644 --- a/pkg/registry/extensions/deployment/storage/storage.go +++ b/pkg/registry/extensions/deployment/storage/storage.go @@ -63,6 +63,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter) DeploymentStorage { type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against deployments. @@ -83,7 +84,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Rollbac statusStore := *store statusStore.UpdateStrategy = deployment.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore}, &RollbackREST{store: store} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore}, &RollbackREST{store: store} } // Implement ShortNamesProvider @@ -99,7 +100,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a deployment diff --git a/pkg/registry/extensions/replicaset/storage/storage.go b/pkg/registry/extensions/replicaset/storage/storage.go index 893e66390da..faa0021df53 100644 --- a/pkg/registry/extensions/replicaset/storage/storage.go +++ b/pkg/registry/extensions/replicaset/storage/storage.go @@ -62,6 +62,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter) ReplicaSetStorage { type REST struct { *genericregistry.Store + categories []string } // NewREST returns a RESTStorage object that will work against ReplicaSet. @@ -86,7 +87,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { statusStore := *store statusStore.UpdateStrategy = replicaset.StatusStrategy - return &REST{store}, &StatusREST{store: &statusStore} + return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore} } // Implement ShortNamesProvider @@ -102,7 +103,12 @@ var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { - return []string{"all"} + return r.categories +} + +func (r *REST) WithCategories(categories []string) *REST { + r.categories = categories + return r } // StatusREST implements the REST endpoint for changing the status of a ReplicaSet diff --git a/pkg/registry/extensions/rest/storage_extensions.go b/pkg/registry/extensions/rest/storage_extensions.go index 6de5b96233a..f879b87d57d 100644 --- a/pkg/registry/extensions/rest/storage_extensions.go +++ b/pkg/registry/extensions/rest/storage_extensions.go @@ -62,12 +62,12 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag if apiResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) { daemonSetStorage, daemonSetStatusStorage := daemonstore.NewREST(restOptionsGetter) - storage["daemonsets"] = daemonSetStorage + storage["daemonsets"] = daemonSetStorage.WithCategories(nil) storage["daemonsets/status"] = daemonSetStatusStorage } if apiResourceConfigSource.ResourceEnabled(version.WithResource("deployments")) { deploymentStorage := deploymentstore.NewStorage(restOptionsGetter) - storage["deployments"] = deploymentStorage.Deployment + storage["deployments"] = deploymentStorage.Deployment.WithCategories(nil) storage["deployments/status"] = deploymentStorage.Status storage["deployments/rollback"] = deploymentStorage.Rollback storage["deployments/scale"] = deploymentStorage.Scale @@ -83,7 +83,7 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag } if apiResourceConfigSource.ResourceEnabled(version.WithResource("replicasets")) { replicaSetStorage := replicasetstore.NewStorage(restOptionsGetter) - storage["replicasets"] = replicaSetStorage.ReplicaSet + storage["replicasets"] = replicaSetStorage.ReplicaSet.WithCategories(nil) storage["replicasets/status"] = replicaSetStorage.Status storage["replicasets/scale"] = replicaSetStorage.Scale } From aeb7428c895b5fe4757a8c1a5d47592ae8708a49 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 15 Jan 2018 15:06:43 -0500 Subject: [PATCH 783/794] Log message at a better level We don't really need to log this meessage at level 1. --- pkg/cloudprovider/providers/openstack/openstack_instances.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index 981ff7b9f89..c1031e4f5e9 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -43,7 +43,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { return nil, false } - glog.V(1).Info("Claiming to support Instances") + glog.V(4).Info("Claiming to support Instances") return &Instances{ compute: compute, From ef93e0f4267e0f5c9e396bedd1213ec695c4eba0 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 14:23:47 +0800 Subject: [PATCH 784/794] Convert nodeName to lower case for vmss instances This is because Kubelet always converts hostname to lower case. --- pkg/cloudprovider/providers/azure/azure_util_vmss.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_util_vmss.go index 48bd3adcd8f..34585208977 100644 --- a/pkg/cloudprovider/providers/azure/azure_util_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -121,7 +121,7 @@ func (ss *scaleSet) updateCache() error { for _, vm := range vms { nodeName := "" if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil { - nodeName = *vm.OsProfile.ComputerName + nodeName = strings.ToLower(*vm.OsProfile.ComputerName) } vmSize := "" From 79da10fb903b03d502e0eb23e9cb455db13f4f25 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 14:26:29 +0800 Subject: [PATCH 785/794] Rename filenames for clear --- .../providers/azure/{azure_util_cache.go => azure_cache.go} | 0 .../azure/{azure_util_cache_test.go => azure_cache_test.go} | 0 .../providers/azure/{azure_util.go => azure_standard.go} | 0 .../azure/{azure_util_test.go => azure_standard_test.go} | 0 .../providers/azure/{azure_util_vmss.go => azure_vmss.go} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename pkg/cloudprovider/providers/azure/{azure_util_cache.go => azure_cache.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_cache_test.go => azure_cache_test.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util.go => azure_standard.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_test.go => azure_standard_test.go} (100%) rename pkg/cloudprovider/providers/azure/{azure_util_vmss.go => azure_vmss.go} (100%) diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache.go b/pkg/cloudprovider/providers/azure/azure_cache.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_cache.go rename to pkg/cloudprovider/providers/azure/azure_cache.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_cache_test.go b/pkg/cloudprovider/providers/azure/azure_cache_test.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_cache_test.go rename to pkg/cloudprovider/providers/azure/azure_cache_test.go diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_standard.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util.go rename to pkg/cloudprovider/providers/azure/azure_standard.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_test.go b/pkg/cloudprovider/providers/azure/azure_standard_test.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_test.go rename to pkg/cloudprovider/providers/azure/azure_standard_test.go diff --git a/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go similarity index 100% rename from pkg/cloudprovider/providers/azure/azure_util_vmss.go rename to pkg/cloudprovider/providers/azure/azure_vmss.go From 66b023110fb80134ecfc7d937f90f27ea3d3205d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 15:43:48 +0800 Subject: [PATCH 786/794] Fix azure fake clients: use pointers --- .../providers/azure/azure_fakes.go | 155 ++++++++++-------- 1 file changed, 84 insertions(+), 71 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index ec58e57da44..72aca359194 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -37,14 +37,14 @@ type fakeAzureLBClient struct { FakeStore map[string]map[string]network.LoadBalancer } -func newFakeAzureLBClient() fakeAzureLBClient { - fLBC := fakeAzureLBClient{} +func newFakeAzureLBClient() *fakeAzureLBClient { + fLBC := &fakeAzureLBClient{} fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer) fLBC.mutex = &sync.Mutex{} return fLBC } -func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { +func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() resultChan := make(chan network.LoadBalancer, 1) @@ -80,7 +80,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan return resultChan, errChan } -func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fLBC *fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -113,7 +113,7 @@ func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName return respChan, errChan } -func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { +func (fLBC *fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() if _, ok := fLBC.FakeStore[resourceGroupName]; ok { @@ -127,7 +127,7 @@ func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName str } } -func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { +func (fLBC *fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() var value []network.LoadBalancer @@ -144,7 +144,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa return result, nil } -func (fLBC fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { +func (fLBC *fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { fLBC.mutex.Lock() defer fLBC.mutex.Unlock() result.Response.Response = &http.Response{ @@ -172,15 +172,15 @@ func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName stri pipName) } -func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient { - fAPC := fakeAzurePIPClient{} +func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient { + fAPC := &fakeAzurePIPClient{} fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress) fAPC.SubscriptionID = subscriptionID fAPC.mutex = &sync.Mutex{} return fAPC } -func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { +func (fAPC *fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() resultChan := make(chan network.PublicIPAddress, 1) @@ -217,7 +217,7 @@ func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIP return resultChan, errChan } -func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fAPC *fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -250,7 +250,7 @@ func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressN return respChan, errChan } -func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { +func (fAPC *fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() if _, ok := fAPC.FakeStore[resourceGroupName]; ok { @@ -264,13 +264,13 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName } } -func (fAPC fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { +func (fAPC *fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() return network.PublicIPAddressListResult{}, nil } -func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { +func (fAPC *fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { fAPC.mutex.Lock() defer fAPC.mutex.Unlock() var value []network.PublicIPAddress @@ -292,15 +292,15 @@ type fakeAzureInterfacesClient struct { FakeStore map[string]map[string]network.Interface } -func newFakeAzureInterfacesClient() fakeAzureInterfacesClient { - fIC := fakeAzureInterfacesClient{} +func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient { + fIC := &fakeAzureInterfacesClient{} fIC.FakeStore = make(map[string]map[string]network.Interface) fIC.mutex = &sync.Mutex{} return fIC } -func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { +func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { fIC.mutex.Lock() defer fIC.mutex.Unlock() resultChan := make(chan network.Interface, 1) @@ -326,7 +326,7 @@ func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, ne return resultChan, errChan } -func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { +func (fIC *fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { fIC.mutex.Lock() defer fIC.mutex.Unlock() if _, ok := fIC.FakeStore[resourceGroupName]; ok { @@ -340,7 +340,7 @@ func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterf } } -func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { +func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { return result, nil } @@ -349,14 +349,14 @@ type fakeAzureVirtualMachinesClient struct { FakeStore map[string]map[string]compute.VirtualMachine } -func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient { - fVMC := fakeAzureVirtualMachinesClient{} +func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient { + fVMC := &fakeAzureVirtualMachinesClient{} fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine) fVMC.mutex = &sync.Mutex{} return fVMC } -func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { +func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() resultChan := make(chan compute.VirtualMachine, 1) @@ -381,7 +381,7 @@ func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName stri return resultChan, errChan } -func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() if _, ok := fVMC.FakeStore[resourceGroupName]; ok { @@ -395,7 +395,7 @@ func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName } } -func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() var value []compute.VirtualMachine @@ -411,7 +411,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul result.Value = &value return result, nil } -func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { +func (fVMC *fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() return compute.VirtualMachineListResult{}, nil @@ -422,14 +422,14 @@ type fakeAzureSubnetsClient struct { FakeStore map[string]map[string]network.Subnet } -func newFakeAzureSubnetsClient() fakeAzureSubnetsClient { - fASC := fakeAzureSubnetsClient{} +func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient { + fASC := &fakeAzureSubnetsClient{} fASC.FakeStore = make(map[string]map[string]network.Subnet) fASC.mutex = &sync.Mutex{} return fASC } -func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { +func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() resultChan := make(chan network.Subnet, 1) @@ -455,7 +455,7 @@ func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virt return resultChan, errChan } -func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fASC *fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -489,7 +489,7 @@ func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetwo } return respChan, errChan } -func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { +func (fASC *fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") @@ -503,7 +503,7 @@ func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkN Message: "Not such Subnet", } } -func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { +func (fASC *fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { fASC.mutex.Lock() defer fASC.mutex.Unlock() rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") @@ -526,14 +526,14 @@ type fakeAzureNSGClient struct { FakeStore map[string]map[string]network.SecurityGroup } -func newFakeAzureNSGClient() fakeAzureNSGClient { - fNSG := fakeAzureNSGClient{} +func newFakeAzureNSGClient() *fakeAzureNSGClient { + fNSG := &fakeAzureNSGClient{} fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup) fNSG.mutex = &sync.Mutex{} return fNSG } -func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { +func (fNSG *fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() resultChan := make(chan network.SecurityGroup, 1) @@ -558,7 +558,7 @@ func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkS return resultChan, errChan } -func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fNSG *fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() respChan := make(chan autorest.Response, 1) @@ -591,7 +591,7 @@ func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityG return respChan, errChan } -func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { +func (fNSG *fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() if _, ok := fNSG.FakeStore[resourceGroupName]; ok { @@ -605,7 +605,7 @@ func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGrou } } -func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { +func (fNSG *fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { fNSG.mutex.Lock() defer fNSG.mutex.Unlock() var value []network.SecurityGroup @@ -632,15 +632,22 @@ type fakeVirtualMachineScaleSetVMsClient struct { FakeStore map[string]map[string]compute.VirtualMachineScaleSetVM } -func newFakeVirtualMachineScaleSetVMsClient() fakeVirtualMachineScaleSetVMsClient { - fVMC := fakeVirtualMachineScaleSetVMsClient{} +func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient { + fVMC := &fakeVirtualMachineScaleSetVMsClient{} fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSetVM) fVMC.mutex = &sync.Mutex{} return fVMC } -func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSetVM) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + + fVMC.FakeStore = store +} + +func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResult, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -659,11 +666,11 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) List(resourceGroupName string, v return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { return result, nil } -func (fVMC fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() @@ -680,7 +687,7 @@ func (fVMC fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VM } } -func (fVMC fakeVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) { _, err = fVMC.Get(resourceGroupName, VMScaleSetName, instanceID) if err != nil { return result, err @@ -694,15 +701,22 @@ type fakeVirtualMachineScaleSetsClient struct { FakeStore map[string]map[string]compute.VirtualMachineScaleSet } -func newFakeVirtualMachineScaleSetsClient() fakeVirtualMachineScaleSetsClient { - fVMSSC := fakeVirtualMachineScaleSetsClient{} +func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient { + fVMSSC := &fakeVirtualMachineScaleSetsClient{} fVMSSC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSet) fVMSSC.mutex = &sync.Mutex{} return fVMSSC } -func (fVMSSC fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSet) { + fVMSSC.mutex.Lock() + defer fVMSSC.mutex.Unlock() + + fVMSSC.FakeStore = store +} + +func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -729,7 +743,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName return resultChan, errChan } -func (fVMSSC fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -745,7 +759,7 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) Get(resourceGroupName string, VM } } -func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) (result compute.VirtualMachineScaleSetListResult, err error) { fVMSSC.mutex.Lock() defer fVMSSC.mutex.Unlock() @@ -755,7 +769,6 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( value = append(value, v) } } - result.Response.Response = &http.Response{ StatusCode: http.StatusOK, } @@ -764,11 +777,11 @@ func (fVMSSC fakeVirtualMachineScaleSetsClient) List(resourceGroupName string) ( return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { return result, nil } -func (fVMSSC fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { +func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { resultChan := make(chan compute.OperationStatusResponse, 1) errChan := make(chan error, 1) var result compute.OperationStatusResponse @@ -792,14 +805,14 @@ type fakeRoutesClient struct { FakeStore map[string]map[string]network.Route } -func newFakeRoutesClient() fakeRoutesClient { - fRC := fakeRoutesClient{} +func newFakeRoutesClient() *fakeRoutesClient { + fRC := &fakeRoutesClient{} fRC.FakeStore = make(map[string]map[string]network.Route) fRC.mutex = &sync.Mutex{} return fRC } -func (fRC fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { +func (fRC *fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) { fRC.mutex.Lock() defer fRC.mutex.Unlock() @@ -826,7 +839,7 @@ func (fRC fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableN return resultChan, errChan } -func (fRC fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { +func (fRC *fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { fRC.mutex.Lock() defer fRC.mutex.Unlock() @@ -866,14 +879,14 @@ type fakeRouteTablesClient struct { FakeStore map[string]map[string]network.RouteTable } -func newFakeRouteTablesClient() fakeRouteTablesClient { - fRTC := fakeRouteTablesClient{} +func newFakeRouteTablesClient() *fakeRouteTablesClient { + fRTC := &fakeRouteTablesClient{} fRTC.FakeStore = make(map[string]map[string]network.RouteTable) fRTC.mutex = &sync.Mutex{} return fRTC } -func (fRTC fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { +func (fRTC *fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) { fRTC.mutex.Lock() defer fRTC.mutex.Unlock() @@ -900,7 +913,7 @@ func (fRTC fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, route return resultChan, errChan } -func (fRTC fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { +func (fRTC *fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { fRTC.mutex.Lock() defer fRTC.mutex.Unlock() if _, ok := fRTC.FakeStore[resourceGroupName]; ok { @@ -919,14 +932,14 @@ type fakeStorageAccountClient struct { FakeStore map[string]map[string]storage.Account } -func newFakeStorageAccountClient() fakeStorageAccountClient { - fSAC := fakeStorageAccountClient{} +func newFakeStorageAccountClient() *fakeStorageAccountClient { + fSAC := &fakeStorageAccountClient{} fSAC.FakeStore = make(map[string]map[string]storage.Account) fSAC.mutex = &sync.Mutex{} return fSAC } -func (fSAC fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { +func (fSAC *fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -961,7 +974,7 @@ func (fSAC fakeStorageAccountClient) Create(resourceGroupName string, accountNam return resultChan, errChan } -func (fSAC fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { +func (fSAC *fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -985,15 +998,15 @@ func (fSAC fakeStorageAccountClient) Delete(resourceGroupName string, accountNam return result, err } -func (fSAC fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { +func (fSAC *fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { return storage.AccountListKeysResult{}, nil } -func (fSAC fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { +func (fSAC *fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) { return storage.AccountListResult{}, nil } -func (fSAC fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { +func (fSAC *fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) { fSAC.mutex.Lock() defer fSAC.mutex.Unlock() @@ -1014,14 +1027,14 @@ type fakeDisksClient struct { FakeStore map[string]map[string]disk.Model } -func newFakeDisksClient() fakeDisksClient { - fDC := fakeDisksClient{} +func newFakeDisksClient() *fakeDisksClient { + fDC := &fakeDisksClient{} fDC.FakeStore = make(map[string]map[string]disk.Model) fDC.mutex = &sync.Mutex{} return fDC } -func (fDC fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { +func (fDC *fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() @@ -1048,7 +1061,7 @@ func (fDC fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName str return resultChan, errChan } -func (fDC fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { +func (fDC *fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() @@ -1087,7 +1100,7 @@ func (fDC fakeDisksClient) Delete(resourceGroupName string, diskName string, can return respChan, errChan } -func (fDC fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { +func (fDC *fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) { fDC.mutex.Lock() defer fDC.mutex.Unlock() From 2e646b0e1264c35c35230a61b69f997239a0555a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Jan 2018 15:44:02 +0800 Subject: [PATCH 787/794] Add more unit tests --- pkg/cloudprovider/providers/azure/BUILD | 10 +- .../providers/azure/azure_standard_test.go | 53 ------- .../providers/azure/azure_vmss.go | 2 + .../providers/azure/azure_vmss_test.go | 149 ++++++++++++++++++ 4 files changed, 156 insertions(+), 58 deletions(-) delete mode 100644 pkg/cloudprovider/providers/azure/azure_standard_test.go create mode 100644 pkg/cloudprovider/providers/azure/azure_vmss_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 8272b20219d..bd65300c2ed 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -12,6 +12,7 @@ go_library( "azure.go", "azure_backoff.go", "azure_blobDiskController.go", + "azure_cache.go", "azure_client.go", "azure_controllerCommon.go", "azure_fakes.go", @@ -21,12 +22,11 @@ go_library( "azure_managedDiskController.go", "azure_metrics.go", "azure_routes.go", + "azure_standard.go", "azure_storage.go", "azure_storageaccount.go", - "azure_util.go", - "azure_util_cache.go", - "azure_util_vmss.go", "azure_vmsets.go", + "azure_vmss.go", "azure_wrap.go", "azure_zones.go", ], @@ -64,11 +64,11 @@ go_library( go_test( name = "go_default_test", srcs = [ + "azure_cache_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", "azure_test.go", - "azure_util_cache_test.go", - "azure_util_test.go", + "azure_vmss_test.go", "azure_wrap_test.go", ], embed = [":go_default_library"], diff --git a/pkg/cloudprovider/providers/azure/azure_standard_test.go b/pkg/cloudprovider/providers/azure/azure_standard_test.go deleted file mode 100644 index cac803c2eb0..00000000000 --- a/pkg/cloudprovider/providers/azure/azure_standard_test.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetScaleSetVMInstanceID(t *testing.T) { - tests := []struct { - msg string - machineName string - expectError bool - expectedInstanceID string - }{{ - msg: "invalid vmss instance name", - machineName: "vmvm", - expectError: true, - }, - { - msg: "valid vmss instance name", - machineName: "vm00000Z", - expectError: false, - expectedInstanceID: "35", - }, - } - - for i, test := range tests { - instanceID, err := getScaleSetVMInstanceID(test.machineName) - if test.expectError { - assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) - } else { - assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) - } - } -} diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 34585208977..a5ad369dc52 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -190,10 +190,12 @@ func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, er } // Update cache and try again. + glog.V(10).Infof("vmss cache before updateCache: %v", ss.cache) if err := ss.updateCache(); err != nil { glog.Errorf("updateCache failed with error: %v", err) return scaleSetVMInfo{}, err } + glog.V(10).Infof("vmss cache after updateCache: %v", ss.cache) vm, found = getVMFromCache(nodeName) if found { return vm, nil diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go new file mode 100644 index 00000000000..7830eab783d --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/stretchr/testify/assert" +) + +func newTestScaleSet() *scaleSet { + ss := newScaleSet(getTestCloud()) + return ss.(*scaleSet) +} + +func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList []string) { + virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient() + scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet) + scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{ + scaleSetName: { + Name: &scaleSetName, + }, + } + virtualMachineScaleSetsClient.setFakeStore(scaleSets) + + virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient() + ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM) + ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM) + for i := range vmList { + ID := fmt.Sprintf("azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i) + nodeName := vmList[i] + instanceID := fmt.Sprintf("%d", i) + vmKey := fmt.Sprintf("%s-%s", scaleSetName, nodeName) + networkInterfaces := []compute.NetworkInterfaceReference{ + { + ID: &nodeName, + }, + } + ssVMs["rg"][vmKey] = compute.VirtualMachineScaleSetVM{ + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + OsProfile: &compute.OSProfile{ + ComputerName: &nodeName, + }, + NetworkProfile: &compute.NetworkProfile{ + NetworkInterfaces: &networkInterfaces, + }, + }, + ID: &ID, + InstanceID: &instanceID, + Location: &ss.Cloud.Location, + } + } + virtualMachineScaleSetVMsClient.setFakeStore(ssVMs) + + ss.Cloud.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient + ss.Cloud.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient +} + +func TestGetScaleSetVMInstanceID(t *testing.T) { + tests := []struct { + msg string + machineName string + expectError bool + expectedInstanceID string + }{{ + msg: "invalid vmss instance name", + machineName: "vmvm", + expectError: true, + }, + { + msg: "valid vmss instance name", + machineName: "vm00000Z", + expectError: false, + expectedInstanceID: "35", + }, + } + + for i, test := range tests { + instanceID, err := getScaleSetVMInstanceID(test.machineName) + if test.expectError { + assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } else { + assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } + } +} + +func TestGetInstanceIDByNodeName(t *testing.T) { + ss := newTestScaleSet() + + testCases := []struct { + description string + scaleSet string + vmList []string + nodeName string + expected string + expectError bool + }{ + { + description: "scaleSet should get instance by node name", + scaleSet: "ss", + vmList: []string{"vm1", "vm2"}, + nodeName: "vm1", + expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0", + }, + { + description: "scaleSet should get instance by node name with upper cases hostname", + scaleSet: "ss", + vmList: []string{"VM1", "vm2"}, + nodeName: "vm1", + expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0", + }, + { + description: "scaleSet should not get instance for non-exist nodes", + scaleSet: "ss", + vmList: []string{"VM1", "vm2"}, + nodeName: "vm3", + expectError: true, + }, + } + + for _, test := range testCases { + setTestVirtualMachineScaleSets(ss, test.scaleSet, test.vmList) + real, err := ss.GetInstanceIDByNodeName(test.nodeName) + if test.expectError { + assert.Error(t, err, test.description) + continue + } + + assert.NoError(t, err, test.description) + assert.Equal(t, test.expected, real, test.description) + } +} From 037eec3b9a32c766d965090dd719d772f7247130 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 15 Jan 2018 20:42:12 -0500 Subject: [PATCH 788/794] Add error helpers and constants for NotAcceptable and UnsupportedMediaType --- .../apimachinery/pkg/api/errors/errors.go | 18 ++++++++++++++++++ .../apimachinery/pkg/apis/meta/v1/types.go | 12 ++++++++++++ .../endpoints/handlers/negotiation/errors.go | 4 ++-- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go index 9960600be33..3a2c9549ba2 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -352,6 +352,14 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr reason = metav1.StatusReasonForbidden // the server message has details about who is trying to perform what action. Keep its message. message = serverMessage + case http.StatusNotAcceptable: + reason = metav1.StatusReasonNotAcceptable + // the server message has details about what types are acceptable + message = serverMessage + case http.StatusUnsupportedMediaType: + reason = metav1.StatusReasonUnsupportedMediaType + // the server message has details about what types are acceptable + message = serverMessage case http.StatusMethodNotAllowed: reason = metav1.StatusReasonMethodNotAllowed message = "the server does not allow this method on the requested resource" @@ -434,6 +442,16 @@ func IsResourceExpired(err error) bool { return ReasonForError(err) == metav1.StatusReasonExpired } +// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header +func IsNotAcceptable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonNotAcceptable +} + +// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header +func IsUnsupportedMediaType(err error) bool { + return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType +} + // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index c8ee4e5d65b..750080770c4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -651,6 +651,18 @@ const ( // can only be created. API calls that return MethodNotAllowed can never succeed. StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected // and the outcome of the call is unknown. // Details (optional): diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go index 07bc8e280f4..93b17cfb097 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go @@ -41,7 +41,7 @@ func (e errNotAcceptable) Status() metav1.Status { return metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusNotAcceptable, - Reason: metav1.StatusReason("NotAcceptable"), + Reason: metav1.StatusReasonNotAcceptable, Message: e.Error(), } } @@ -63,7 +63,7 @@ func (e errUnsupportedMediaType) Status() metav1.Status { return metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusUnsupportedMediaType, - Reason: metav1.StatusReason("UnsupportedMediaType"), + Reason: metav1.StatusReasonUnsupportedMediaType, Message: e.Error(), } } From aa504ccd57f38bfc23248c68019b7685fb14e668 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 15 Jan 2018 20:42:47 -0500 Subject: [PATCH 789/794] Return correct error when submitting patch in unsupported format --- .../pkg/apiserver/customresource_handler.go | 6 ++++- .../apiserver/pkg/endpoints/handlers/BUILD | 1 + .../apiserver/pkg/endpoints/handlers/patch.go | 27 ++++++++++++------- .../apiserver/pkg/endpoints/installer.go | 11 +++++--- 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 3112b9353fd..17d4329e33a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -227,7 +227,11 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { http.Error(w, fmt.Sprintf("%v not allowed while CustomResourceDefinition is terminating", requestInfo.Verb), http.StatusMethodNotAllowed) return } - handler := handlers.PatchResource(storage, requestScope, r.admission, unstructured.UnstructuredObjectConverter{}) + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + } + handler := handlers.PatchResource(storage, requestScope, r.admission, unstructured.UnstructuredObjectConverter{}, supportedTypes) handler(w, req) return case "delete": diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index 16901d6a6b0..2bf780bd6b5 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -71,6 +71,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 1ac736d09dd..a54054127bf 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -32,17 +32,34 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" ) // PatchResource returns a function that will handle a resource patch // TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner -func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) http.HandlerFunc { +func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor, patchTypes []string) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { + // Do this first, otherwise name extraction can fail for unrecognized content types + // TODO: handle this in negotiation + contentType := req.Header.Get("Content-Type") + // Remove "; charset=" if included in header. + if idx := strings.Index(contentType, ";"); idx > 0 { + contentType = contentType[:idx] + } + patchType := types.PatchType(contentType) + + // Ensure the patchType is one we support + if !sets.NewString(patchTypes...).Has(contentType) { + scope.err(negotiation.NewUnsupportedMediaTypeError(patchTypes), w, req) + return + } + // TODO: we either want to remove timeout or document it (if we // document, move timeout out of this function and declare it in // api_installer) @@ -63,14 +80,6 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface return } - // TODO: handle this in negotiation - contentType := req.Header.Get("Content-Type") - // Remove "; charset=" if included in header. - if idx := strings.Index(contentType, ";"); idx > 0 { - contentType = contentType[:idx] - } - patchType := types.PatchType(contentType) - patchJS, err := readBody(req) if err != nil { scope.err(err, w, req) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index 87bf1700d2b..11f658ee82a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -690,7 +690,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if hasSubresource { doc = "partially update " + subresource + " of the specified " + kind } - handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulPatchResource(patcher, reqScope, admit, mapping.ObjectConvertor)) + supportedTypes := []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), + } + handler := metrics.InstrumentRouteFunc(action.Verb, resource, subresource, requestScope, restfulPatchResource(patcher, reqScope, admit, mapping.ObjectConvertor, supportedTypes)) route := ws.PATCH(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). @@ -1099,9 +1104,9 @@ func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, typer ru } } -func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) restful.RouteFunction { +func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, converter runtime.ObjectConvertor, supportedTypes []string) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { - handlers.PatchResource(r, scope, admit, converter)(res.ResponseWriter, req.Request) + handlers.PatchResource(r, scope, admit, converter, supportedTypes)(res.ResponseWriter, req.Request) } } From 59c305b59083a8e731c76eb859e77222402d1448 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sun, 14 Jan 2018 13:22:41 -0500 Subject: [PATCH 790/794] Add support for submitting/receiving CRD objects as yaml --- .../pkg/apiserver/customresource_handler.go | 49 +-- .../test/integration/BUILD | 2 + .../test/integration/yaml_test.go | 361 ++++++++++++++++++ 3 files changed, 372 insertions(+), 40 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 17d4329e33a..f87718d7621 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -18,7 +18,6 @@ package apiserver import ( "fmt" - "io" "net/http" "path" "sync" @@ -475,27 +474,20 @@ func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.Serial Framer: json.Framer, }, }, + { + MediaType: "application/yaml", + EncodesAsText: true, + Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), + }, } } -func (s unstructuredNegotiatedSerializer) EncoderForVersion(serializer runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(Scheme, crEncoderInstance, nil, gv, nil) +func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(Scheme, encoder, nil, gv, nil) } -func (s unstructuredNegotiatedSerializer) DecoderToVersion(serializer runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return unstructuredDecoder{delegate: Codecs.DecoderToVersion(serializer, gv)} -} - -type unstructuredDecoder struct { - delegate runtime.Decoder -} - -func (d unstructuredDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - // Delegate for things other than Unstructured. - if _, ok := into.(runtime.Unstructured); !ok && into != nil { - return d.delegate.Decode(data, defaults, into) - } - return unstructured.UnstructuredJSONScheme.Decode(data, defaults, into) +func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(Scheme, nil, decoder, nil, gv) } type unstructuredObjectTyper struct { @@ -515,29 +507,6 @@ func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { return t.delegate.Recognizes(gvk) || t.unstructuredTyper.Recognizes(gvk) } -var crEncoderInstance = crEncoder{} - -// crEncoder *usually* encodes using the unstructured.UnstructuredJSONScheme, but if the type is Status or WatchEvent -// it will serialize them out using the converting codec. -type crEncoder struct{} - -func (crEncoder) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *metav1.Status, *metav1.WatchEvent: - for _, info := range Codecs.SupportedMediaTypes() { - // we are always json - if info.MediaType == "application/json" { - return info.Serializer.Encode(obj, w) - } - } - - return fmt.Errorf("unable to find json serializer for %T", t) - - default: - return unstructured.UnstructuredJSONScheme.Encode(obj, w) - } -} - type unstructuredCreator struct{} func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 1aca59abf8e..5d26af605a7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -12,11 +12,13 @@ go_test( "finalization_test.go", "registration_test.go", "validation_test.go", + "yaml_test.go", ], importpath = "k8s.io/apiextensions-apiserver/test/integration", tags = ["integration"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go new file mode 100644 index 00000000000..b6410123896 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go @@ -0,0 +1,361 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/ghodss/yaml" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/test/integration/testserver" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" +) + +func TestYAML(t *testing.T) { + config, err := testserver.DefaultServerConfig() + if err != nil { + t.Fatal(err) + } + + stopCh, apiExtensionClient, clientPool, err := testserver.StartServer(config) + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, clientPool) + if err != nil { + t.Fatal(err) + } + + kind := noxuDefinition.Spec.Names.Kind + listKind := noxuDefinition.Spec.Names.ListKind + apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version + + rest := apiExtensionClient.Discovery().RESTClient() + + // Discovery + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "APIResourceList" { + t.Fatalf("unexpected discovery kind: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "groupVersion"); v != apiVersion || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Error + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "missingname"). + DoRaw() + if !errors.IsNotFound(err) { + t.Fatalf("expected not found, got %v", err) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected discovery kind: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "NotFound" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + uid := types.UID("") + resourceVersion := "" + + // Create + { + yamlBody := []byte(fmt.Sprintf(` +apiVersion: %s +kind: %s +metadata: + name: mytest +values: + numVal: 1 + boolVal: true + stringVal: "1"`, apiVersion, kind)) + + result, err := rest.Post(). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + Body(yamlBody). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + uid = obj.GetUID() + resourceVersion = obj.GetResourceVersion() + } + + // Get + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + DoRaw() + if err != nil { + t.Fatal(err) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err, string(result)) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // List + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + listObj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if listObj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, listObj.GetAPIVersion()) + } + if listObj.GetKind() != listKind { + t.Fatalf("expected %s, got %s", kind, listObj.GetKind()) + } + items, ok, err := unstructured.NestedSlice(listObj.Object, "items") + if !ok || err != nil || len(items) != 1 { + t.Fatalf("expected one item, got %v %v %v", items, ok, err) + } + obj := unstructured.Unstructured{Object: items[0].(map[string]interface{})} + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 1 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != true || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "1" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Watch rejects yaml (no streaming support) + { + result, err := rest.Get(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural). + Param("watch", "true"). + DoRaw() + if !errors.IsNotAcceptable(err) { + t.Fatal("expected not acceptable error, got %v (%s)", err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected result: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "NotAcceptable" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "code"); v != http.StatusNotAcceptable || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Update + { + yamlBody := []byte(fmt.Sprintf(` +apiVersion: %s +kind: %s +metadata: + name: mytest + uid: %s + resourceVersion: "%s" +values: + numVal: 2 + boolVal: false + stringVal: "2"`, apiVersion, kind, uid, resourceVersion)) + result, err := rest.Put(). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + Body(yamlBody). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetName() != "mytest" { + t.Fatalf("expected mytest, got %s", obj.GetName()) + } + if obj.GetAPIVersion() != apiVersion { + t.Fatalf("expected %s, got %s", apiVersion, obj.GetAPIVersion()) + } + if obj.GetKind() != kind { + t.Fatalf("expected %s, got %s", kind, obj.GetKind()) + } + if v, ok, err := unstructured.NestedFloat64(obj.Object, "values", "numVal"); v != 2 || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedBool(obj.Object, "values", "boolVal"); v != false || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "values", "stringVal"); v != "2" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + if obj.GetUID() != uid { + t.Fatal("uid changed: %v vs %v", uid, obj.GetUID()) + } + } + + // Patch rejects yaml requests (only JSON mime types are allowed) + { + yamlBody := []byte(fmt.Sprintf(` +values: + numVal: 3`, apiVersion, kind, uid, resourceVersion)) + result, err := rest.Patch(types.MergePatchType). + SetHeader("Accept", "application/yaml"). + SetHeader("Content-Type", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + Body(yamlBody). + DoRaw() + if !errors.IsUnsupportedMediaType(err) { + t.Fatalf("Expected bad request, got %v\n%s", err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("expected %s %s, got %s %s", "v1", "Status", obj.GetAPIVersion(), obj.GetKind()) + } + if v, ok, err := unstructured.NestedString(obj.Object, "reason"); v != "UnsupportedMediaType" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } + + // Delete + { + result, err := rest.Delete(). + SetHeader("Accept", "application/yaml"). + AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural, "mytest"). + DoRaw() + if err != nil { + t.Fatal(err, string(result)) + } + obj, err := decodeYAML(result) + if err != nil { + t.Fatal(err) + } + if obj.GetAPIVersion() != "v1" || obj.GetKind() != "Status" { + t.Fatalf("unexpected response: %s", string(result)) + } + if v, ok, err := unstructured.NestedString(obj.Object, "status"); v != "Success" || !ok || err != nil { + t.Fatal(v, ok, err, string(result)) + } + } +} + +func decodeYAML(data []byte) (*unstructured.Unstructured, error) { + retval := &unstructured.Unstructured{Object: map[string]interface{}{}} + // ensure this isn't JSON + if json.Unmarshal(data, &retval.Object) == nil { + return nil, fmt.Errorf("data is JSON, not YAML: %s", string(data)) + } + // ensure it is YAML + retval.Object = map[string]interface{}{} + if err := yaml.Unmarshal(data, &retval.Object); err != nil { + return nil, fmt.Errorf("error decoding YAML: %v\noriginal YAML: %s", err, string(data)) + } + return retval, nil +} From 631119a7d65e01e48b5d8a46d7300b20c65262e1 Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 16 Jan 2018 15:28:16 +0800 Subject: [PATCH 791/794] move prometheus init to k8s.io/apiserver/pkg/endpoints/metrics/metrics.go --- .../src/k8s.io/apiserver/pkg/endpoints/BUILD | 1 - .../apiserver/pkg/endpoints/apiserver.go | 25 ------------------- .../pkg/endpoints/metrics/metrics.go | 4 +-- 3 files changed, 2 insertions(+), 28 deletions(-) delete mode 100644 staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD index f2c1ce34509..39fe40b5d9a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD @@ -60,7 +60,6 @@ go_test( go_library( name = "go_default_library", srcs = [ - "apiserver.go", "doc.go", "groupversion.go", "installer.go", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go deleted file mode 100644 index 933363bffeb..00000000000 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "k8s.io/apiserver/pkg/endpoints/metrics" -) - -func init() { - metrics.Register() -} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 65e651a3317..8a3b20d0d86 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -81,8 +81,8 @@ var ( kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) ) -// Register all metrics. -func Register() { +func init() { + // Register all metrics. prometheus.MustRegister(requestCounter) prometheus.MustRegister(longRunningRequestGauge) prometheus.MustRegister(requestLatencies) From d77d20dd54777f95cbe88815ea47ec0bdf31332e Mon Sep 17 00:00:00 2001 From: hzxuzhonghu Date: Tue, 16 Jan 2018 17:01:48 +0800 Subject: [PATCH 792/794] fix azure TestGetInstanceIDByNodeName data race --- .../providers/azure/azure_vmss_test.go | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go index 7830eab783d..08afdd56908 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_test.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -24,12 +24,15 @@ import ( "github.com/stretchr/testify/assert" ) -func newTestScaleSet() *scaleSet { - ss := newScaleSet(getTestCloud()) +func newTestScaleSet(scaleSetName string, vmList []string) *scaleSet { + cloud := getTestCloud() + setTestVirtualMachineCloud(cloud, scaleSetName, vmList) + ss := newScaleSet(cloud) + return ss.(*scaleSet) } -func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList []string) { +func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) { virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient() scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet) scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{ @@ -63,13 +66,13 @@ func setTestVirtualMachineScaleSets(ss *scaleSet, scaleSetName string, vmList [] }, ID: &ID, InstanceID: &instanceID, - Location: &ss.Cloud.Location, + Location: &ss.Location, } } virtualMachineScaleSetVMsClient.setFakeStore(ssVMs) - ss.Cloud.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient - ss.Cloud.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient + ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient + ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient } func TestGetScaleSetVMInstanceID(t *testing.T) { @@ -102,8 +105,6 @@ func TestGetScaleSetVMInstanceID(t *testing.T) { } func TestGetInstanceIDByNodeName(t *testing.T) { - ss := newTestScaleSet() - testCases := []struct { description string scaleSet string @@ -136,7 +137,8 @@ func TestGetInstanceIDByNodeName(t *testing.T) { } for _, test := range testCases { - setTestVirtualMachineScaleSets(ss, test.scaleSet, test.vmList) + ss := newTestScaleSet(test.scaleSet, test.vmList) + real, err := ss.GetInstanceIDByNodeName(test.nodeName) if test.expectError { assert.Error(t, err, test.description) From f57cc0b22d282bc8fe68faf91529e7175bc3918a Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Thu, 21 Dec 2017 16:50:16 +0800 Subject: [PATCH 793/794] fix(fakeclient): write event to watch channel on add/update/delete fix races with watch call add test for non-namespace resource watch add matching for all-namespace-watch fix delete namespace watch & restrict test fix multiple invocation on same resource & namespace add descriptive doc for tracker.watchers --- staging/src/k8s.io/client-go/testing/BUILD | 19 ++ .../src/k8s.io/client-go/testing/fixture.go | 60 +++++- .../k8s.io/client-go/testing/fixture_test.go | 192 ++++++++++++++++++ .../fake/generator_fake_for_clientset.go | 10 +- 4 files changed, 277 insertions(+), 4 deletions(-) create mode 100644 staging/src/k8s.io/client-go/testing/fixture_test.go diff --git a/staging/src/k8s.io/client-go/testing/BUILD b/staging/src/k8s.io/client-go/testing/BUILD index b26e662876d..666a449bd44 100644 --- a/staging/src/k8s.io/client-go/testing/BUILD +++ b/staging/src/k8s.io/client-go/testing/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -28,6 +29,24 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = [ + "fixture_test.go", + ], + embed = [":go_default_library"], + importpath = "k8s.io/client-go/testing", + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/staging/src/k8s.io/client-go/testing/fixture.go b/staging/src/k8s.io/client-go/testing/fixture.go index 08379fb0897..ba8ee508f46 100644 --- a/staging/src/k8s.io/client-go/testing/fixture.go +++ b/staging/src/k8s.io/client-go/testing/fixture.go @@ -29,6 +29,11 @@ import ( restclient "k8s.io/client-go/rest" ) +// FakeWatchBufferSize is the max num of watch event can be buffered in the +// watch channel. Note that when watch event overflows or exceed this buffer +// size, manipulations via fake client may be blocked. +const FakeWatchBufferSize = 128 + // ObjectTracker keeps track of objects. It is intended to be used to // fake calls to a server by returning objects based on their kind, // namespace and name. @@ -54,6 +59,10 @@ type ObjectTracker interface { // didn't exist in the tracker prior to deletion, Delete returns // no error. Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -132,6 +141,13 @@ type tracker struct { decoder runtime.Decoder lock sync.RWMutex objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. Each of + // fake watcher holds a buffered channel of size "FakeWatchBufferSize" which + // is default to 128. Manipulations on resources will broadcast the notification + // events into the watchers' channel and note that too many unhandled event may + // potentially block the tracker. + watchers map[schema.GroupVersionResource]map[string][]*watch.FakeWatcher } var _ ObjectTracker = &tracker{} @@ -140,9 +156,10 @@ var _ ObjectTracker = &tracker{} // of objects for the fake clientset. Mostly useful for unit tests. func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { return &tracker{ - scheme: scheme, - decoder: decoder, - objects: make(map[schema.GroupVersionResource][]runtime.Object), + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.FakeWatcher), } } @@ -185,6 +202,19 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewFakeWithChanSize(FakeWatchBufferSize, true) + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.FakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { errNotFound := errors.NewNotFound(gvr.GroupResource(), name) @@ -263,6 +293,19 @@ func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns return t.add(gvr, obj, ns, true) } +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.FakeWatcher { + watches := []*watch.FakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) + } + } + return watches +} + func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() @@ -296,6 +339,9 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st } if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) + } t.objects[gvr][i] = obj return nil } @@ -310,6 +356,10 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr] = append(t.objects[gvr], obj) + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) + } + return nil } @@ -342,7 +392,11 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return err } if objMeta.GetNamespace() == ns && objMeta.GetName() == name { + obj := t.objects[gvr][i] t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } found = true break } diff --git a/staging/src/k8s.io/client-go/testing/fixture_test.go b/staging/src/k8s.io/client-go/testing/fixture_test.go new file mode 100644 index 00000000000..967e0aefa93 --- /dev/null +++ b/staging/src/k8s.io/client-go/testing/fixture_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "math/rand" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" +) + +func getArbitraryResource(s schema.GroupVersionResource, name, namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": s.Resource, + "apiVersion": s.Version, + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + }, + "data": strconv.Itoa(rand.Int()), + }, + } +} + +func TestWatchCallNonNamespace(t *testing.T) { + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + testObj := getArbitraryResource(testResource, "test_name", "test_namespace") + accessor, err := meta.Accessor(testObj) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ns := accessor.GetNamespace() + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + watch, err := o.Watch(testResource, ns) + go func() { + err := o.Create(testResource, testObj, ns) + if err != nil { + t.Errorf("test resource creation failed: %v", err) + } + }() + out := <-watch.ResultChan() + assert.Equal(t, testObj, out.Object, "watched object mismatch") +} + +func TestWatchCallAllNamespace(t *testing.T) { + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + testObj := getArbitraryResource(testResource, "test_name", "test_namespace") + accessor, err := meta.Accessor(testObj) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ns := accessor.GetNamespace() + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + w, err := o.Watch(testResource, "test_namespace") + wAll, err := o.Watch(testResource, "") + go func() { + err := o.Create(testResource, testObj, ns) + assert.NoError(t, err, "test resource creation failed") + }() + out := <-w.ResultChan() + outAll := <-wAll.ResultChan() + assert.Equal(t, watch.Added, out.Type, "watch event mismatch") + assert.Equal(t, watch.Added, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched created object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched created object mismatch") + go func() { + err := o.Update(testResource, testObj, ns) + assert.NoError(t, err, "test resource updating failed") + }() + out = <-w.ResultChan() + outAll = <-wAll.ResultChan() + assert.Equal(t, watch.Modified, out.Type, "watch event mismatch") + assert.Equal(t, watch.Modified, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched updated object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched updated object mismatch") + go func() { + err := o.Delete(testResource, "test_namespace", "test_name") + assert.NoError(t, err, "test resource deletion failed") + }() + out = <-w.ResultChan() + outAll = <-wAll.ResultChan() + assert.Equal(t, watch.Deleted, out.Type, "watch event mismatch") + assert.Equal(t, watch.Deleted, outAll.Type, "watch event mismatch") + assert.Equal(t, testObj, out.Object, "watched deleted object mismatch") + assert.Equal(t, testObj, outAll.Object, "watched deleted object mismatch") +} + +func TestWatchCallMultipleInvocation(t *testing.T) { + cases := []struct { + name string + op watch.EventType + }{ + { + "foo", + watch.Added, + }, + { + "bar", + watch.Added, + }, + { + "bar", + watch.Modified, + }, + { + "foo", + watch.Deleted, + }, + { + "bar", + watch.Deleted, + }, + } + + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + testResource := schema.GroupVersionResource{Group: "", Version: "test_version", Resource: "test_kind"} + + o := NewObjectTracker(scheme, codecs.UniversalDecoder()) + watchNamespaces := []string{ + "", + "", + "test_namespace", + "test_namespace", + } + var wg sync.WaitGroup + wg.Add(len(watchNamespaces)) + for idx, watchNamespace := range watchNamespaces { + i := idx + w, err := o.Watch(testResource, watchNamespace) + go func() { + assert.NoError(t, err, "watch invocation failed") + for _, c := range cases { + fmt.Printf("%#v %#v\n", c, i) + event := <-w.ResultChan() + accessor, err := meta.Accessor(event.Object) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + assert.Equal(t, c.op, event.Type, "watch event mismatched") + assert.Equal(t, c.name, accessor.GetName(), "watched object mismatch") + } + wg.Done() + }() + } + for _, c := range cases { + switch c.op { + case watch.Added: + obj := getArbitraryResource(testResource, c.name, "test_namespace") + o.Create(testResource, obj, "test_namespace") + case watch.Modified: + obj := getArbitraryResource(testResource, c.name, "test_namespace") + o.Update(testResource, obj, "test_namespace") + case watch.Deleted: + o.Delete(testResource, "test_namespace", c.name) + } + } + wg.Wait() +} diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index ea9ed8deb45..f77ab057008 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -127,7 +127,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } From 6f381ab2cd351c96a28b7ccde704ea96c38612dd Mon Sep 17 00:00:00 2001 From: yue9944882 <291271447@qq.com> Date: Mon, 25 Dec 2017 11:46:56 +0800 Subject: [PATCH 794/794] Regenerating code of fake clientset --- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../clientset/clientset/fake/clientset_generated.go | 10 +++++++++- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../client-go/kubernetes/fake/clientset_generated.go | 10 +++++++++- .../internalversion/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/fake/clientset_generated.go | 10 +++++++++- .../internalclientset/fake/clientset_generated.go | 10 +++++++++- .../clientset/fake/clientset_generated.go | 10 +++++++++- .../internalversion/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- .../clientset/versioned/fake/clientset_generated.go | 10 +++++++++- 13 files changed, 117 insertions(+), 13 deletions(-) diff --git a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index 28efaac5a73..1db023eb600 100644 --- a/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -71,7 +71,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go index 473f88f47da..4f2448fce5d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go index fdc0beee577..7647ef17ce4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 12cfac0a8f2..982d7420b89 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -95,7 +95,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go index 8b474ba6d7e..af74a734dd8 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 5cadbe26140..df811fcb30f 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go index 04135606b8c..0428810e8fb 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index bae14284200..5852846aca5 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index d938683de71..48f7226cd82 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go index a3e65fa49f1..8dfdbfd4119 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -43,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go index b4030b659e1..89b726e1178 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go index 6b627a3c7a3..503ab9b931b 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} } diff --git a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go index 864cfe59b70..6a89220a266 100644 --- a/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} }